[nGraph][IE] Enable FP64 data type in IE and review nGraph Python tests (#2563)
This commit is contained in:
parent
95cae09566
commit
e1a7f7e5af
@ -120,6 +120,10 @@ enum precision_e{
|
||||
|
||||
FP16 = 11, /**< 16bit floating point value */
|
||||
|
||||
BF16 = 12, /**< 16bit floating point value, 8 bit for exponent, 7 bit for mantisa*/
|
||||
|
||||
FP64 = 13, /**< 64bit floating point value */
|
||||
|
||||
Q78 = 20, /**< 16bit specific signed fixed point precision */
|
||||
|
||||
I16 = 30, /**< 16bit signed integer value */
|
||||
|
@ -174,6 +174,7 @@ typedef enum {
|
||||
MIXED = 0, /**< Mixed value. Can be received from network. No applicable for tensors */
|
||||
FP32 = 10, /**< 32bit floating point value */
|
||||
FP16 = 11, /**< 16bit floating point value */
|
||||
FP64 = 13, /**< 64bit floating point value */
|
||||
Q78 = 20, /**< 16bit specific signed fixed point precision */
|
||||
I16 = 30, /**< 16bit signed integer value */
|
||||
U8 = 40, /**< 8bit unsigned integer value */
|
||||
|
@ -80,6 +80,7 @@ std::map<IE::Precision, precision_e> precision_map = {{IE::Precision::UNSPECIFIE
|
||||
{IE::Precision::MIXED, precision_e::MIXED},
|
||||
{IE::Precision::FP32, precision_e::FP32},
|
||||
{IE::Precision::FP16, precision_e::FP16},
|
||||
{IE::Precision::FP64, precision_e::FP64},
|
||||
{IE::Precision::Q78, precision_e::Q78},
|
||||
{IE::Precision::I16, precision_e::I16},
|
||||
{IE::Precision::U8, precision_e::U8},
|
||||
@ -1433,6 +1434,8 @@ IEStatusCode ie_blob_make_memory(const tensor_desc_t *tensorDesc, ie_blob_t **bl
|
||||
_blob->object = IE::make_shared_blob<uint64_t>(tensor);
|
||||
} else if (prec == IE::Precision::FP32) {
|
||||
_blob->object = IE::make_shared_blob<float>(tensor);
|
||||
} else if (prec == IE::Precision::FP64) {
|
||||
_blob->object = IE::make_shared_blob<double>(tensor);
|
||||
} else {
|
||||
_blob->object = IE::make_shared_blob<uint8_t>(tensor);
|
||||
}
|
||||
@ -1505,6 +1508,9 @@ IEStatusCode ie_blob_make_memory_from_preallocated(const tensor_desc_t *tensorDe
|
||||
} else if (prec == IE::Precision::FP32) {
|
||||
float *p = reinterpret_cast<float *>(ptr);
|
||||
_blob->object = IE::make_shared_blob(tensor, p, size);
|
||||
} else if (prec == IE::Precision::FP64) {
|
||||
double *p = reinterpret_cast<double *>(ptr);
|
||||
_blob->object = IE::make_shared_blob(tensor, p, size);
|
||||
} else {
|
||||
uint8_t *p = reinterpret_cast<uint8_t *>(ptr);
|
||||
_blob->object = IE::make_shared_blob(tensor, p, size);
|
||||
|
@ -18,7 +18,7 @@ from .cimport ie_api_impl_defs as C
|
||||
import numpy as np
|
||||
from enum import Enum
|
||||
|
||||
supported_precisions = ["FP32", "FP16", "I64", "U64", "I32", "U32", "I16", "I8", "U16", "U8"]
|
||||
supported_precisions = ["FP32", "FP64", "FP16", "I64", "U64", "I32", "U32", "I16", "I8", "U16", "U8"]
|
||||
|
||||
known_plugins = ['CPU', 'GPU', 'FPGA', 'MYRIAD', 'HETERO', 'HDDL', 'MULTI']
|
||||
|
||||
@ -27,6 +27,7 @@ layout_int_to_str_map = {0: "ANY", 1: "NCHW", 2: "NHWC", 3: "NCDHW", 4: "NDHWC",
|
||||
|
||||
format_map = {
|
||||
'FP32' : np.float32,
|
||||
'FP64' : np.float64,
|
||||
'I32' : np.int32,
|
||||
'FP16' : np.float16,
|
||||
'I16' : np.int16,
|
||||
|
@ -114,6 +114,7 @@ cdef class Blob:
|
||||
def __cinit__(self, TensorDesc tensor_desc = None, array : np.ndarray = None):
|
||||
cdef CTensorDesc c_tensor_desc
|
||||
cdef float[::1] fp32_array_memview
|
||||
cdef double[::1] fp64_array_memview
|
||||
cdef int16_t[::1] I16_array_memview
|
||||
cdef uint16_t[::1] U16_array_memview
|
||||
cdef uint8_t[::1] U8_array_memview
|
||||
@ -137,6 +138,8 @@ cdef class Blob:
|
||||
precision = tensor_desc.precision
|
||||
if precision == "FP32":
|
||||
self._ptr = C.make_shared_blob[float](c_tensor_desc)
|
||||
elif precision == "FP64":
|
||||
self._ptr = C.make_shared_blob[double](c_tensor_desc)
|
||||
elif precision == "FP16" or precision == "I16":
|
||||
self._ptr = C.make_shared_blob[int16_t](c_tensor_desc)
|
||||
elif precision == "Q78" or precision == "U16":
|
||||
@ -168,6 +171,9 @@ cdef class Blob:
|
||||
if precision == "FP32":
|
||||
fp32_array_memview = self._array_data
|
||||
self._ptr = C.make_shared_blob[float](c_tensor_desc, &fp32_array_memview[0], fp32_array_memview.shape[0])
|
||||
elif precision == "FP64":
|
||||
fp64_array_memview = self._array_data
|
||||
self._ptr = C.make_shared_blob[double](c_tensor_desc, &fp64_array_memview[0], fp64_array_memview.shape[0])
|
||||
elif precision == "FP16":
|
||||
raise RuntimeError("Currently, it's impossible to set_blob with FP16 precision")
|
||||
elif precision == "I16":
|
||||
@ -1485,6 +1491,7 @@ cdef class BlobBuffer:
|
||||
# todo: half floats
|
||||
precision_to_format = {
|
||||
'FP32': 'f', # float
|
||||
'FP64': 'd', # double
|
||||
'FP16': 'h', # signed short
|
||||
'U8': 'B', # unsigned char
|
||||
'U16': 'H', # unsigned short
|
||||
|
@ -8,6 +8,7 @@
|
||||
|
||||
const std::string EXPORTED_NETWORK_NAME = "undefined";
|
||||
std::map <std::string, InferenceEngine::Precision> precision_map = {{"FP32", InferenceEngine::Precision::FP32},
|
||||
{"FP64", InferenceEngine::Precision::FP64},
|
||||
{"FP16", InferenceEngine::Precision::FP16},
|
||||
{"I8", InferenceEngine::Precision::I8},
|
||||
{"I16", InferenceEngine::Precision::I16},
|
||||
|
@ -46,6 +46,15 @@ def test_write_to_buffer_fp32():
|
||||
assert np.array_equal(blob.buffer, ones_arr)
|
||||
|
||||
|
||||
def test_write_to_buffer_fp64():
|
||||
tensor_desc = TensorDesc("FP64", [1, 3, 127, 127], "NCHW")
|
||||
array = np.zeros(shape=(1, 3, 127, 127), dtype=np.float64)
|
||||
blob = Blob(tensor_desc, array)
|
||||
ones_arr = np.ones(shape=(1, 3, 127, 127), dtype=np.float64)
|
||||
blob.buffer[:] = ones_arr
|
||||
assert np.array_equal(blob.buffer, ones_arr)
|
||||
|
||||
|
||||
@pytest.mark.skip(reason="Need to figure out how to implement right conversion")
|
||||
def test_write_to_buffer_fp16():
|
||||
tensor_desc = TensorDesc("FP16", [1, 3, 127, 127], "NCHW")
|
||||
|
@ -29,6 +29,7 @@ public:
|
||||
FP32 = 10, /**< 32bit floating point value */
|
||||
FP16 = 11, /**< 16bit floating point value, 5 bit for exponent, 10 bit for mantisa */
|
||||
BF16 = 12, /**< 16bit floating point value, 8 bit for exponent, 7 bit for mantisa*/
|
||||
FP64 = 13, /**< 64bit floating point value */
|
||||
Q78 = 20, /**< 16bit specific signed fixed point precision */
|
||||
I16 = 30, /**< 16bit signed integer value */
|
||||
U8 = 40, /**< 8bit unsigned integer value */
|
||||
@ -111,6 +112,7 @@ public:
|
||||
|
||||
switch (precisionInfo.value) {
|
||||
CASE(FP32, float);
|
||||
CASE(FP64, double);
|
||||
CASE2(FP16, int16_t, uint16_t);
|
||||
CASE2(BF16, int16_t, uint16_t);
|
||||
CASE(I8, int8_t);
|
||||
@ -223,7 +225,8 @@ public:
|
||||
PRECISION_NAME(Q78), PRECISION_NAME(BOOL), PRECISION_NAME(BF16),
|
||||
PRECISION_NAME(I8), PRECISION_NAME(I16), PRECISION_NAME(I32), PRECISION_NAME(I64),
|
||||
PRECISION_NAME(U8), PRECISION_NAME(U16), PRECISION_NAME(U32), PRECISION_NAME(U64),
|
||||
PRECISION_NAME(FP32), PRECISION_NAME(FP16), PRECISION_NAME(MIXED), PRECISION_NAME(BIN),
|
||||
PRECISION_NAME(FP32), PRECISION_NAME(FP64), PRECISION_NAME(FP16), PRECISION_NAME(MIXED),
|
||||
PRECISION_NAME(BIN),
|
||||
#undef PRECISION_NAME
|
||||
};
|
||||
auto i = names.find(str);
|
||||
@ -255,11 +258,12 @@ public:
|
||||
*/
|
||||
bool isSigned() const noexcept {
|
||||
return (precisionInfo.value == Precision::UNSPECIFIED) || (precisionInfo.value == Precision::MIXED) ||
|
||||
(precisionInfo.value == Precision::FP32) || (precisionInfo.value == Precision::FP16) ||
|
||||
(precisionInfo.value == Precision::Q78) || (precisionInfo.value == Precision::I16) ||
|
||||
(precisionInfo.value == Precision::I8) || (precisionInfo.value == Precision::I32) ||
|
||||
(precisionInfo.value == Precision::I64) || (precisionInfo.value == Precision::BIN) ||
|
||||
(precisionInfo.value == Precision::BF16) || (precisionInfo.value == Precision::CUSTOM);
|
||||
(precisionInfo.value == Precision::FP32) || (precisionInfo.value == Precision::FP64) ||
|
||||
(precisionInfo.value == Precision::FP16) || (precisionInfo.value == Precision::Q78) ||
|
||||
(precisionInfo.value == Precision::I16) || (precisionInfo.value == Precision::I8) ||
|
||||
(precisionInfo.value == Precision::I32) || (precisionInfo.value == Precision::I64) ||
|
||||
(precisionInfo.value == Precision::BIN) || (precisionInfo.value == Precision::BF16) ||
|
||||
(precisionInfo.value == Precision::CUSTOM);
|
||||
}
|
||||
|
||||
protected:
|
||||
@ -301,6 +305,7 @@ protected:
|
||||
return makePrecisionInfo<x>(#x);
|
||||
switch (v) {
|
||||
CASE(FP32);
|
||||
CASE(FP64);
|
||||
CASE(FP16);
|
||||
CASE(BF16);
|
||||
CASE(I8);
|
||||
@ -334,6 +339,11 @@ struct PrecisionTrait<Precision::FP32> {
|
||||
using value_type = float;
|
||||
};
|
||||
|
||||
template <>
|
||||
struct PrecisionTrait<Precision::FP64> {
|
||||
using value_type = double;
|
||||
};
|
||||
|
||||
template <>
|
||||
struct PrecisionTrait<Precision::FP16> {
|
||||
using value_type = int16_t;
|
||||
|
@ -97,6 +97,7 @@ private:
|
||||
|
||||
switch (input.getTensorDesc().getPrecision()) {
|
||||
TBLOB_TOP_RESULT(FP32);
|
||||
TBLOB_TOP_RESULT(FP64);
|
||||
TBLOB_TOP_RESULT(FP16);
|
||||
TBLOB_TOP_RESULT(Q78);
|
||||
TBLOB_TOP_RESULT(I16);
|
||||
|
@ -87,6 +87,7 @@ Parameter::Any::~Any() {}
|
||||
template struct InferenceEngine::Parameter::RealData<int>;
|
||||
template struct InferenceEngine::Parameter::RealData<bool>;
|
||||
template struct InferenceEngine::Parameter::RealData<float>;
|
||||
template struct InferenceEngine::Parameter::RealData<double>;
|
||||
template struct InferenceEngine::Parameter::RealData<uint32_t>;
|
||||
template struct InferenceEngine::Parameter::RealData<std::string>;
|
||||
template struct InferenceEngine::Parameter::RealData<unsigned long>;
|
||||
|
@ -354,6 +354,9 @@ CNNLayer::Ptr NodeConverter<ngraph::op::Convert>::createLayer(const std::shared_
|
||||
case Precision::FP32:
|
||||
precision_str = "FP32";
|
||||
break;
|
||||
case Precision::FP64:
|
||||
precision_str = "FP64";
|
||||
break;
|
||||
case Precision::I8:
|
||||
precision_str = "I8";
|
||||
break;
|
||||
|
@ -1527,6 +1527,9 @@ void ConvertPrecision(ICNNNetwork& net, Precision from, Precision to) {
|
||||
case getPrecisionMask(Precision::FP16, Precision::FP32):
|
||||
convertPrecisionForAll<Precision::FP16, Precision::FP32>(net);
|
||||
break;
|
||||
case getPrecisionMask(Precision::FP64, Precision::FP32):
|
||||
convertPrecisionForAll<Precision::FP64, Precision::FP32>(net);
|
||||
break;
|
||||
case getPrecisionMask(Precision::U8, Precision::I32):
|
||||
convertPrecisionForAll<Precision::U8, Precision::I32>(net);
|
||||
break;
|
||||
|
@ -107,6 +107,7 @@ InferenceEngine::Blob::Ptr make_blob_with_precision(InferenceEngine::Precision p
|
||||
|
||||
switch (precision) {
|
||||
USE_FACTORY(FP32);
|
||||
USE_FACTORY(FP64);
|
||||
USE_FACTORY(FP16);
|
||||
USE_FACTORY(Q78);
|
||||
USE_FACTORY(I8);
|
||||
|
@ -19,6 +19,8 @@ inline ::ngraph::element::Type convertPrecision(const Precision& precision) {
|
||||
return ::ngraph::element::Type(::ngraph::element::Type_t::undefined);
|
||||
case Precision::FP32:
|
||||
return ::ngraph::element::Type(::ngraph::element::Type_t::f32);
|
||||
case Precision::FP64:
|
||||
return ::ngraph::element::Type(::ngraph::element::Type_t::f64);
|
||||
case Precision::FP16:
|
||||
return ::ngraph::element::Type(::ngraph::element::Type_t::f16);
|
||||
case Precision::BF16:
|
||||
@ -95,6 +97,8 @@ inline Precision convertPrecision(const ::ngraph::element::Type& precision) {
|
||||
return Precision(Precision::FP16);
|
||||
case ::ngraph::element::Type_t::f32:
|
||||
return Precision(Precision::FP32);
|
||||
case ::ngraph::element::Type_t::f64:
|
||||
return Precision(Precision::FP64);
|
||||
case ::ngraph::element::Type_t::bf16:
|
||||
return Precision(Precision::BF16);
|
||||
case ::ngraph::element::Type_t::i8:
|
||||
|
@ -44,8 +44,10 @@ SizeVector SetDimVector(BatchNum batchNum, ChannelNum channelNum, Dims dims) {
|
||||
InferenceEngine::Blob::Ptr createBlob(InferenceEngine::Precision precision, SizeVector dimsVector, InferenceEngine::Layout layout) {
|
||||
InferenceEngine::TensorDesc tensorDesc(precision, dimsVector, layout);
|
||||
switch (precision) {
|
||||
case InferenceEngine::Precision::FP32 :
|
||||
case InferenceEngine::Precision::FP32:
|
||||
return make_shared_blob<float>(tensorDesc);
|
||||
case InferenceEngine::Precision::FP64:
|
||||
return make_shared_blob<double>(tensorDesc);
|
||||
case InferenceEngine::Precision::FP16:
|
||||
case InferenceEngine::Precision::I16:
|
||||
case InferenceEngine::Precision::Q78:
|
||||
@ -115,8 +117,10 @@ void FillBlobRandom(Blob::Ptr& inputBlob) {
|
||||
void FillBlob(Blob::Ptr& inputBlob) {
|
||||
auto precision = inputBlob->getTensorDesc().getPrecision();
|
||||
switch (precision) {
|
||||
case InferenceEngine::Precision::FP32 :
|
||||
case InferenceEngine::Precision::FP32:
|
||||
return FillBlobRandom<float>(inputBlob);
|
||||
case InferenceEngine::Precision::FP64:
|
||||
return FillBlobRandom<double>(inputBlob);
|
||||
case InferenceEngine::Precision::FP16:
|
||||
case InferenceEngine::Precision::I16:
|
||||
case InferenceEngine::Precision::Q78:
|
||||
@ -203,8 +207,10 @@ bool IsCorrectBlobCopy_Impl(Blob::Ptr& srcBlob, Blob::Ptr& dstBlob) {
|
||||
|
||||
bool IsCorrectBlobCopy(Blob::Ptr& srcBlob, Blob::Ptr& dstBlob) {
|
||||
switch (srcBlob->getTensorDesc().getPrecision()) {
|
||||
case InferenceEngine::Precision::FP32 :
|
||||
case InferenceEngine::Precision::FP32:
|
||||
return IsCorrectBlobCopy_Impl<float>(srcBlob, dstBlob);
|
||||
case InferenceEngine::Precision::FP64:
|
||||
return IsCorrectBlobCopy_Impl<double>(srcBlob, dstBlob);
|
||||
case InferenceEngine::Precision::FP16:
|
||||
case InferenceEngine::Precision::I16:
|
||||
case InferenceEngine::Precision::Q78:
|
||||
@ -264,7 +270,7 @@ TEST_P(BlobCopyTest, BlobCopy) {
|
||||
|
||||
std::cout << "Blob_copy execution time : " << std::chrono::duration_cast<std::chrono::microseconds>(finish - start).count() << " micros" << std::endl;
|
||||
|
||||
ASSERT_TRUE(IsCorrectBlobCopy(srcBlob, dstBlob)) << "'blob_copy' function is't correct";
|
||||
ASSERT_TRUE(IsCorrectBlobCopy(srcBlob, dstBlob)) << "'blob_copy' function is not correct";
|
||||
}
|
||||
|
||||
namespace {
|
||||
@ -332,6 +338,8 @@ bool IsEqualBlobCopy(Blob::Ptr& srcBlob, Blob::Ptr& dstBlob) {
|
||||
switch (srcBlob->getTensorDesc().getPrecision()) {
|
||||
case InferenceEngine::Precision::FP32:
|
||||
return IsEqualBlobCopy_Impl<float>(srcBlob, dstBlob);
|
||||
case InferenceEngine::Precision::FP64:
|
||||
return IsEqualBlobCopy_Impl<double>(srcBlob, dstBlob);
|
||||
case InferenceEngine::Precision::FP16:
|
||||
case InferenceEngine::Precision::I16:
|
||||
case InferenceEngine::Precision::Q78:
|
||||
@ -381,6 +389,8 @@ void copy3DBlobsAllBytesWithReLayoutWrapper(const Blob::Ptr& srcLayoutBlob, Blob
|
||||
switch (precision) {
|
||||
case InferenceEngine::Precision::FP32:
|
||||
return copy3DBlobsAllBytesWithReLayout<float>(srcLayoutBlob, trgLayoutBlob);
|
||||
case InferenceEngine::Precision::FP64:
|
||||
return copy3DBlobsAllBytesWithReLayout<double>(srcLayoutBlob, trgLayoutBlob);
|
||||
case InferenceEngine::Precision::FP16:
|
||||
case InferenceEngine::Precision::I16:
|
||||
case InferenceEngine::Precision::Q78:
|
||||
|
@ -17,6 +17,7 @@ TEST_F(PrecisionTests, ShowsCorrectPrecisionNames) {
|
||||
ASSERT_STREQ(Precision(Precision::U64).name(), "U64");
|
||||
ASSERT_STREQ(Precision(Precision::FP16).name(), "FP16");
|
||||
ASSERT_STREQ(Precision(Precision::FP32).name(), "FP32");
|
||||
ASSERT_STREQ(Precision(Precision::FP64).name(), "FP64");
|
||||
ASSERT_STREQ(Precision(Precision::I16).name(), "I16");
|
||||
ASSERT_STREQ(Precision(Precision::I32).name(), "I32");
|
||||
ASSERT_STREQ(Precision(Precision::U32).name(), "U32");
|
||||
@ -35,6 +36,7 @@ TEST_F(PrecisionTests, sizeIsCorrect) {
|
||||
ASSERT_EQ(Precision(Precision::U64).size(), 8);
|
||||
ASSERT_EQ(Precision(Precision::FP16).size(), 2);
|
||||
ASSERT_EQ(Precision(Precision::FP32).size(), 4);
|
||||
ASSERT_EQ(Precision(Precision::FP64).size(), 8);
|
||||
ASSERT_EQ(Precision(Precision::I32).size(), 4);
|
||||
ASSERT_EQ(Precision(Precision::U32).size(), 4);
|
||||
ASSERT_EQ(Precision(Precision::I16).size(), 2);
|
||||
@ -50,6 +52,7 @@ TEST_F(PrecisionTests, sizeIsCorrect) {
|
||||
TEST_F(PrecisionTests, is_float) {
|
||||
ASSERT_TRUE(Precision(Precision::FP16).is_float());
|
||||
ASSERT_TRUE(Precision(Precision::FP32).is_float());
|
||||
ASSERT_TRUE(Precision(Precision::FP64).is_float());
|
||||
ASSERT_FALSE(Precision(Precision::I64).is_float());
|
||||
ASSERT_FALSE(Precision(Precision::U64).is_float());
|
||||
ASSERT_FALSE(Precision(Precision::I32).is_float());
|
||||
@ -70,6 +73,7 @@ TEST_F(PrecisionTests, constructFromSTR) {
|
||||
ASSERT_EQ(Precision(Precision::U64), Precision::FromStr("U64"));
|
||||
ASSERT_EQ(Precision(Precision::FP16), Precision::FromStr("FP16"));
|
||||
ASSERT_EQ(Precision(Precision::FP32), Precision::FromStr("FP32"));
|
||||
ASSERT_EQ(Precision(Precision::FP64), Precision::FromStr("FP64"));
|
||||
ASSERT_EQ(Precision(Precision::I32), Precision::FromStr("I32"));
|
||||
ASSERT_EQ(Precision(Precision::U32), Precision::FromStr("U32"));
|
||||
ASSERT_EQ(Precision(Precision::I16), Precision::FromStr("I16"));
|
||||
|
@ -88,7 +88,8 @@ xfail_issue_35923 = xfail_test(reason="RuntimeError: PReLU without weights is no
|
||||
xfail_issue_35924 = xfail_test(reason="Assertion error - elu results mismatch")
|
||||
xfail_issue_35925 = xfail_test(reason="Assertion error - reduction ops results mismatch")
|
||||
xfail_issue_35927 = xfail_test(reason="RuntimeError: B has zero dimension that is not allowable")
|
||||
xfail_issue_35929 = xfail_test(reason="RuntimeError: Incorrect precision f64!")
|
||||
xfail_issue_40319 = xfail_test(reason="RuntimeError: [NOT_IMPLEMENTED] Input image format FP64 is not "
|
||||
"supported yet...")
|
||||
xfail_issue_35930 = xfail_test(reason="onnx.onnx_cpp2py_export.checker.ValidationError: "
|
||||
"Required attribute 'to' is missing.")
|
||||
xfail_issue_40485 = xfail_test(reason="Computation missmatch")
|
||||
|
@ -26,9 +26,9 @@ from ngraph.impl import Function, PartialShape, Shape, Type
|
||||
from ngraph.impl.op import Parameter
|
||||
from tests.runtime import get_runtime
|
||||
from tests.test_ngraph.util import run_op_node
|
||||
from tests import (xfail_issue_35929,
|
||||
xfail_issue_36476,
|
||||
xfail_issue_36480)
|
||||
from tests import (xfail_issue_36476,
|
||||
xfail_issue_36480,
|
||||
xfail_issue_40319)
|
||||
|
||||
|
||||
def test_ngraph_function_api():
|
||||
@ -59,7 +59,7 @@ def test_ngraph_function_api():
|
||||
"dtype",
|
||||
[
|
||||
np.float32,
|
||||
pytest.param(np.float64, marks=xfail_issue_35929),
|
||||
pytest.param(np.float64, marks=xfail_issue_40319),
|
||||
np.int8,
|
||||
np.int16,
|
||||
np.int32,
|
||||
@ -155,9 +155,9 @@ def test_convert_to_bool(destination_type, input_data):
|
||||
"destination_type, rand_range, in_dtype, expected_type",
|
||||
[
|
||||
pytest.param(np.float32, (-8, 8), np.int32, np.float32),
|
||||
pytest.param(np.float64, (-16383, 16383), np.int64, np.float64, marks=xfail_issue_35929),
|
||||
pytest.param(np.float64, (-16383, 16383), np.int64, np.float64),
|
||||
pytest.param("f32", (-8, 8), np.int32, np.float32),
|
||||
pytest.param("f64", (-16383, 16383), np.int64, np.float64, marks=xfail_issue_35929),
|
||||
pytest.param("f64", (-16383, 16383), np.int64, np.float64),
|
||||
],
|
||||
)
|
||||
def test_convert_to_float(destination_type, rand_range, in_dtype, expected_type):
|
||||
@ -169,7 +169,6 @@ def test_convert_to_float(destination_type, rand_range, in_dtype, expected_type)
|
||||
assert np.array(result).dtype == expected_type
|
||||
|
||||
|
||||
@xfail_issue_35929
|
||||
@pytest.mark.parametrize(
|
||||
"destination_type, expected_type",
|
||||
[
|
||||
@ -185,14 +184,13 @@ def test_convert_to_float(destination_type, rand_range, in_dtype, expected_type)
|
||||
)
|
||||
def test_convert_to_int(destination_type, expected_type):
|
||||
np.random.seed(133391)
|
||||
input_data = np.ceil(-8 + np.random.rand(2, 3, 4) * 16)
|
||||
input_data = (np.ceil(-8 + np.random.rand(2, 3, 4) * 16)).astype(np.float32)
|
||||
expected = np.array(input_data, dtype=expected_type)
|
||||
result = run_op_node([input_data], ng.convert, destination_type)
|
||||
assert np.allclose(result, expected)
|
||||
assert np.array(result).dtype == expected_type
|
||||
|
||||
|
||||
@xfail_issue_35929
|
||||
@pytest.mark.parametrize(
|
||||
"destination_type, expected_type",
|
||||
[
|
||||
@ -208,7 +206,7 @@ def test_convert_to_int(destination_type, expected_type):
|
||||
)
|
||||
def test_convert_to_uint(destination_type, expected_type):
|
||||
np.random.seed(133391)
|
||||
input_data = np.ceil(np.random.rand(2, 3, 4) * 16)
|
||||
input_data = np.ceil(np.random.rand(2, 3, 4) * 16).astype(np.float32)
|
||||
expected = np.array(input_data, dtype=expected_type)
|
||||
result = run_op_node([input_data], ng.convert, destination_type)
|
||||
assert np.allclose(result, expected)
|
||||
|
@ -19,7 +19,7 @@ import numpy as np
|
||||
import ngraph as ng
|
||||
from tests.runtime import get_runtime
|
||||
from tests.test_ngraph.util import run_op_node
|
||||
from tests import xfail_issue_40957, xfail_issue_35929
|
||||
from tests import xfail_issue_40957
|
||||
|
||||
|
||||
@xfail_issue_40957
|
||||
@ -103,15 +103,14 @@ def test_lrn_factory():
|
||||
assert np.allclose(result, excepted)
|
||||
|
||||
|
||||
@xfail_issue_35929
|
||||
def test_batch_norm_inference():
|
||||
data = np.array([[1.0, 2.0, 3.0], [-1.0, -2.0, -3.0]])
|
||||
gamma = np.array([2.0, 3.0, 4.0])
|
||||
beta = np.array([0.0, 0.0, 0.0])
|
||||
mean = np.array([0.0, 0.0, 0.0])
|
||||
variance = np.array([1.0, 1.0, 1.0])
|
||||
data = np.array([[1.0, 2.0, 3.0], [-1.0, -2.0, -3.0]], dtype=np.float32)
|
||||
gamma = np.array([2.0, 3.0, 4.0], dtype=np.float32)
|
||||
beta = np.array([0.0, 0.0, 0.0], dtype=np.float32)
|
||||
mean = np.array([0.0, 0.0, 0.0], dtype=np.float32)
|
||||
variance = np.array([1.0, 1.0, 1.0], dtype=np.float32)
|
||||
epsilon = 9.99e-06
|
||||
excepted = np.array([[2.0, 6.0, 12.0], [-2.0, -6.0, -12.0]])
|
||||
excepted = np.array([[2.0, 6.0, 12.0], [-2.0, -6.0, -12.0]], dtype=np.float32)
|
||||
|
||||
result = run_op_node([data, gamma, beta, mean, variance], ng.batch_norm_inference, epsilon)
|
||||
|
||||
|
@ -19,7 +19,7 @@ import pytest
|
||||
import ngraph as ng
|
||||
from tests.runtime import get_runtime
|
||||
from tests.test_ngraph.util import run_op_node, run_op_numeric_data
|
||||
from tests import xfail_issue_40957, xfail_issue_35929
|
||||
from tests import xfail_issue_40957
|
||||
|
||||
|
||||
def test_concat():
|
||||
@ -51,7 +51,7 @@ def test_constant_from_bool(val_type, value):
|
||||
"val_type, value",
|
||||
[
|
||||
pytest.param(np.float32, np.float32(0.1234), marks=xfail_issue_40957),
|
||||
pytest.param(np.float64, np.float64(0.1234), marks=xfail_issue_35929),
|
||||
pytest.param(np.float64, np.float64(0.1234), marks=xfail_issue_40957),
|
||||
pytest.param(np.int8, np.int8(-63), marks=xfail_issue_40957),
|
||||
pytest.param(np.int16, np.int16(-12345), marks=xfail_issue_40957),
|
||||
pytest.param(np.int32, np.int32(-123456), marks=xfail_issue_40957),
|
||||
@ -72,7 +72,7 @@ def test_constant_from_scalar(val_type, value):
|
||||
"val_type",
|
||||
[
|
||||
pytest.param(np.float32, marks=xfail_issue_40957),
|
||||
pytest.param(np.float64, marks=xfail_issue_35929),
|
||||
pytest.param(np.float64, marks=xfail_issue_40957),
|
||||
],
|
||||
)
|
||||
def test_constant_from_float_array(val_type):
|
||||
|
@ -19,21 +19,19 @@ import pytest
|
||||
import ngraph as ng
|
||||
from ngraph.impl import Shape, Type
|
||||
from tests.test_ngraph.util import run_op_node
|
||||
from tests import xfail_issue_35929
|
||||
|
||||
|
||||
@xfail_issue_35929
|
||||
@pytest.mark.parametrize(
|
||||
"ng_api_fn, numpy_fn, range_start, range_end",
|
||||
[
|
||||
(ng.absolute, np.abs, -1, 1),
|
||||
(ng.abs, np.abs, -1, 1),
|
||||
(ng.acos, np.arccos, -1, 1),
|
||||
(ng.acosh, np.arccosh, -1, 1),
|
||||
(ng.acosh, np.arccosh, 1, 2),
|
||||
(ng.asin, np.arcsin, -1, 1),
|
||||
(ng.asinh, np.arcsinh, -1, 1),
|
||||
(ng.atan, np.arctan, -100.0, 100.0),
|
||||
(ng.atanh, np.arctanh, -100.0, 100.0),
|
||||
(ng.atanh, np.arctanh, 0.0, 1.0),
|
||||
(ng.ceiling, np.ceil, -100.0, 100.0),
|
||||
(ng.ceil, np.ceil, -100.0, 100.0),
|
||||
(ng.cos, np.cos, -100.0, 100.0),
|
||||
@ -52,7 +50,7 @@ from tests import xfail_issue_35929
|
||||
)
|
||||
def test_unary_op_array(ng_api_fn, numpy_fn, range_start, range_end):
|
||||
np.random.seed(133391)
|
||||
input_data = range_start + np.random.rand(2, 3, 4) * (range_end - range_start)
|
||||
input_data = (range_start + np.random.rand(2, 3, 4) * (range_end - range_start)).astype(np.float32)
|
||||
expected = numpy_fn(input_data)
|
||||
|
||||
result = run_op_node([input_data], ng_api_fn)
|
||||
|
@ -44,7 +44,6 @@ from tests import (BACKEND_NAME,
|
||||
xfail_issue_36476,
|
||||
xfail_issue_36478,
|
||||
xfail_issue_38091,
|
||||
xfail_issue_35929,
|
||||
xfail_issue_38699,
|
||||
xfail_issue_33596,
|
||||
xfail_issue_38701,
|
||||
@ -76,6 +75,7 @@ from tests import (BACKEND_NAME,
|
||||
xfail_issue_38732,
|
||||
xfail_issue_38734,
|
||||
xfail_issue_38735,
|
||||
xfail_issue_40319,
|
||||
xfail_issue_40485,
|
||||
xfail_issue_41894)
|
||||
|
||||
@ -213,7 +213,8 @@ tests_expected_to_fail = [
|
||||
"OnnxBackendNodeModelTest.test_dropout_default_ratio_cpu",
|
||||
"OnnxBackendNodeModelTest.test_training_dropout_default_cpu",
|
||||
"OnnxBackendNodeModelTest.test_training_dropout_zero_ratio_cpu",
|
||||
"OnnxBackendNodeModelTest.test_training_dropout_cpu"),
|
||||
"OnnxBackendNodeModelTest.test_training_dropout_cpu",
|
||||
"OnnxBackendNodeModelTest.test_eyelike_with_dtype_cpu"),
|
||||
(xfail_issue_35915,
|
||||
"OnnxBackendNodeModelTest.test_min_int16_cpu",
|
||||
"OnnxBackendNodeModelTest.test_min_uint8_cpu"),
|
||||
@ -242,24 +243,21 @@ tests_expected_to_fail = [
|
||||
(xfail_issue_38091,
|
||||
"OnnxBackendNodeModelTest.test_gather_negative_indices_cpu",
|
||||
"OnnxBackendNodeModelTest.test_mvn_cpu",
|
||||
"OnnxBackendNodeModelTest.test_elu_example_cpu"),
|
||||
(xfail_issue_35929,
|
||||
"OnnxBackendNodeModelTest.test_elu_example_cpu",),
|
||||
(xfail_issue_40319,
|
||||
"OnnxBackendPyTorchOperatorModelTest.test_operator_add_size1_broadcast_cpu",
|
||||
"OnnxBackendPyTorchOperatorModelTest.test_operator_add_size1_singleton_broadcast_cpu",
|
||||
"OnnxBackendPyTorchOperatorModelTest.test_operator_add_broadcast_cpu",
|
||||
"OnnxBackendPyTorchOperatorModelTest.test_operator_addconstant_cpu",
|
||||
"OnnxBackendPyTorchOperatorModelTest.test_operator_add_size1_right_broadcast_cpu",
|
||||
"OnnxBackendNodeModelTest.test_cast_DOUBLE_to_FLOAT_cpu",
|
||||
"OnnxBackendNodeModelTest.test_cast_FLOAT16_to_DOUBLE_cpu",
|
||||
"OnnxBackendNodeModelTest.test_cast_DOUBLE_to_FLOAT16_cpu",
|
||||
"OnnxBackendNodeModelTest.test_cumsum_1d_cpu",
|
||||
"OnnxBackendNodeModelTest.test_cast_FLOAT_to_DOUBLE_cpu",
|
||||
"OnnxBackendNodeModelTest.test_cumsum_1d_reverse_cpu",
|
||||
"OnnxBackendNodeModelTest.test_cumsum_1d_exclusive_cpu",
|
||||
"OnnxBackendNodeModelTest.test_cumsum_1d_reverse_exclusive_cpu",
|
||||
"OnnxBackendNodeModelTest.test_cumsum_2d_axis_0_cpu",
|
||||
"OnnxBackendNodeModelTest.test_cumsum_2d_negative_axis_cpu",
|
||||
"OnnxBackendNodeModelTest.test_eyelike_with_dtype_cpu",
|
||||
"OnnxBackendNodeModelTest.test_cumsum_2d_axis_1_cpu",
|
||||
"OnnxBackendNodeModelTest.test_mod_mixed_sign_float64_cpu",
|
||||
"OnnxBackendNodeModelTest.test_max_float64_cpu",
|
||||
|
@ -22,8 +22,7 @@ from onnx.helper import make_graph, make_model, make_node, make_tensor_value_inf
|
||||
from ngraph.exceptions import NgraphTypeError
|
||||
from tests.runtime import get_runtime
|
||||
from tests.test_onnx.utils import get_node_model, import_onnx_model, run_model, run_node
|
||||
from tests import (xfail_issue_35929,
|
||||
xfail_issue_40957,
|
||||
from tests import (xfail_issue_40957,
|
||||
xfail_issue_35930)
|
||||
|
||||
|
||||
@ -105,7 +104,6 @@ def test_neg(input_data):
|
||||
assert np.array_equal(ng_results, [expected_output])
|
||||
|
||||
|
||||
@xfail_issue_35929
|
||||
@pytest.mark.parametrize(
|
||||
"input_data",
|
||||
[
|
||||
@ -115,13 +113,13 @@ def test_neg(input_data):
|
||||
],
|
||||
)
|
||||
def test_floor(input_data):
|
||||
input_data = input_data.astype(np.float32)
|
||||
expected_output = np.floor(input_data)
|
||||
node = onnx.helper.make_node("Floor", inputs=["x"], outputs=["y"])
|
||||
ng_results = run_node(node, [input_data])
|
||||
assert np.array_equal(ng_results, [expected_output])
|
||||
|
||||
|
||||
@xfail_issue_35929
|
||||
@pytest.mark.parametrize(
|
||||
"input_data",
|
||||
[
|
||||
@ -131,6 +129,7 @@ def test_floor(input_data):
|
||||
],
|
||||
)
|
||||
def test_ceil(input_data):
|
||||
input_data = input_data.astype(np.float32)
|
||||
expected_output = np.ceil(input_data)
|
||||
node = onnx.helper.make_node("Ceil", inputs=["x"], outputs=["y"])
|
||||
ng_results = run_node(node, [input_data])
|
||||
@ -165,7 +164,6 @@ def test_clip_default():
|
||||
assert np.allclose(result, [expected])
|
||||
|
||||
|
||||
@xfail_issue_35929
|
||||
@pytest.mark.parametrize(
|
||||
"input_data",
|
||||
[
|
||||
@ -175,6 +173,7 @@ def test_clip_default():
|
||||
],
|
||||
)
|
||||
def test_reciprocal(input_data):
|
||||
input_data = input_data.astype(np.float32)
|
||||
expected_output = np.reciprocal(input_data)
|
||||
node = onnx.helper.make_node("Reciprocal", inputs=["x"], outputs=["y"])
|
||||
ng_results = run_node(node, [input_data])
|
||||
@ -389,7 +388,8 @@ def test_cast_to_bool(val_type, input_data):
|
||||
"val_type, range_start, range_end, in_dtype",
|
||||
[
|
||||
(np.dtype(np.float32), -8, 8, np.dtype(np.int32)),
|
||||
pytest.param(np.dtype(np.float64), -16383, 16383, np.dtype(np.int64), marks=xfail_issue_35929),
|
||||
pytest.param(np.dtype(np.float64), -16383, 16383, np.dtype(np.int64),
|
||||
marks=pytest.mark.xfail(reason="RuntimeError: Unsupported type")),
|
||||
],
|
||||
)
|
||||
def test_cast_to_float(val_type, range_start, range_end, in_dtype):
|
||||
@ -503,7 +503,7 @@ def test_cast_errors():
|
||||
|
||||
@pytest.mark.parametrize("value_type",
|
||||
[pytest.param(np.float32, marks=xfail_issue_40957),
|
||||
pytest.param(np.float64, marks=xfail_issue_35929)])
|
||||
pytest.param(np.float64, marks=xfail_issue_40957)])
|
||||
def test_constant(value_type):
|
||||
values = np.random.randn(5, 5).astype(value_type)
|
||||
node = onnx.helper.make_node(
|
||||
|
@ -56,6 +56,7 @@ namespace
|
||||
switch (elem_type)
|
||||
{
|
||||
case element::Type_t::f32: blob = MAKE_IE_TBLOB(float, FP32, shape, layout); break;
|
||||
case element::Type_t::f64: blob = MAKE_IE_TBLOB(double, FP64, shape, layout); break;
|
||||
case element::Type_t::i16: blob = MAKE_IE_TBLOB(int16_t, I16, shape, layout); break;
|
||||
case element::Type_t::u8: blob = MAKE_IE_TBLOB(uint8_t, U8, shape, layout); break;
|
||||
case element::Type_t::i8: blob = MAKE_IE_TBLOB(int8_t, I8, shape, layout); break;
|
||||
|
@ -88,6 +88,9 @@ namespace
|
||||
case InferenceEngine::Precision::FP32:
|
||||
return compare_blobs<float>(computed, expected, tolerance_bits);
|
||||
break;
|
||||
case InferenceEngine::Precision::FP64:
|
||||
return compare_blobs<double>(computed, expected, tolerance_bits);
|
||||
break;
|
||||
case InferenceEngine::Precision::I8:
|
||||
return compare_blobs<int8_t>(computed, expected, tolerance_bits);
|
||||
break;
|
||||
|
Loading…
Reference in New Issue
Block a user