From 43f18da41312c147a4e8a1c291f8d861eb22767c Mon Sep 17 00:00:00 2001 From: Edward Shogulin Date: Fri, 6 Aug 2021 08:27:34 +0100 Subject: [PATCH] [LPT] Legacy compliance restrictions removal all: Reshape (#6870) * [LPT] Reshape: legacy compliance restrictions removal * [LPT] comment fixes --- .../include/low_precision/network_helper.hpp | 2 +- .../src/concat.cpp | 11 - .../src/network_helper.cpp | 4 +- .../src/reshape.cpp | 307 ++++++------------ .../concat_transformation.cpp | 14 +- .../reshape_transformation.cpp | 164 +++++++++- .../reshape_transformation.cpp | 74 ++++- .../mat_mul_with_constant_transformation.cpp | 18 +- .../reshape_transformation.cpp | 68 +++- .../reshape_transformation.hpp | 4 +- .../reshape_transformation.cpp | 16 +- .../base/layer_test_utils.hpp | 4 + .../src/base/layer_test_utils.cpp | 18 + .../low_precision/reshape_test.cpp | 1 - 14 files changed, 430 insertions(+), 275 deletions(-) diff --git a/inference-engine/src/low_precision_transformations/include/low_precision/network_helper.hpp b/inference-engine/src/low_precision_transformations/include/low_precision/network_helper.hpp index 4e5a93fe070..3229c9814f0 100644 --- a/inference-engine/src/low_precision_transformations/include/low_precision/network_helper.hpp +++ b/inference-engine/src/low_precision_transformations/include/low_precision/network_helper.hpp @@ -151,7 +151,7 @@ public: static bool isQuantizeSupported(const std::shared_ptr& fakeQuantize); - static FakeQuantizeDequantization getDequantization(const std::shared_ptr& node, const size_t parentIndex = 0ul, const bool inPlace = false); + static FakeQuantizeDequantization getDequantization(const std::shared_ptr& node, const size_t parentIndex = 0ul, const bool inPlace = false); static FakeQuantizeDequantization getDequantizationBelow(const std::shared_ptr& node, const bool convertIsMandatory = false); diff --git a/inference-engine/src/low_precision_transformations/src/concat.cpp b/inference-engine/src/low_precision_transformations/src/concat.cpp index 0863dcb3f09..6adeb1f413c 100644 --- a/inference-engine/src/low_precision_transformations/src/concat.cpp +++ b/inference-engine/src/low_precision_transformations/src/concat.cpp @@ -5,9 +5,7 @@ #include "low_precision/concat.hpp" #include -#include #include -#include #include #include @@ -189,7 +187,6 @@ bool ConcatTransformation::canBeTransformed(const TransformationContext& context const auto outPShape = concat->get_output_partial_shape(0); const size_t normalizedAxis = ngraph::normalize_axis(concat->get_friendly_name(), axis, outPShape.rank()); - // TODO: LPT: to support current flow: #58269 if (normalizedAxis != 1ul) { return false; } @@ -198,8 +195,6 @@ bool ConcatTransformation::canBeTransformed(const TransformationContext& context return false; } - const bool perTensorQuantizationIsRequired = normalizedAxis != 1ul; - element::Type precision; for (size_t i = 0ul; i < concat->get_input_size(); i++) { const FakeQuantizeDequantization dequantization = NetworkHelper::getDequantization(concat, i); @@ -212,12 +207,6 @@ bool ConcatTransformation::canBeTransformed(const TransformationContext& context } else if (precision != dequantization.data.get_element_type()) { return false; } - - if (perTensorQuantizationIsRequired && - (((dequantization.subtractConstant != nullptr) && !NetworkHelper::isScalarLike(dequantization.subtractConstant)) || - ((dequantization.multiplyConstant != nullptr) && !NetworkHelper::isScalarLike(dequantization.multiplyConstant)))) { - return false; - } } return true; } diff --git a/inference-engine/src/low_precision_transformations/src/network_helper.cpp b/inference-engine/src/low_precision_transformations/src/network_helper.cpp index a0ee6d9114e..879bd24dc04 100644 --- a/inference-engine/src/low_precision_transformations/src/network_helper.cpp +++ b/inference-engine/src/low_precision_transformations/src/network_helper.cpp @@ -1268,7 +1268,7 @@ bool NetworkHelper::isQuantizeSupported(const std::shared_ptrget_levels()); } -FakeQuantizeDequantization NetworkHelper::getDequantization(const std::shared_ptr& node, const size_t parentIndex, const bool inPlace) { +FakeQuantizeDequantization NetworkHelper::getDequantization(const std::shared_ptr& node, const size_t parentIndex, const bool inPlace) { auto getDataIndex = [](const std::shared_ptr& node) { if (is_type(node->get_input_node_ptr(1))) { return 0ul; @@ -1285,7 +1285,7 @@ FakeQuantizeDequantization NetworkHelper::getDequantization(const std::shared_pt return 1ul; }; - Output dataNode = inPlace ? node->output(0) : node->input_value(parentIndex); + Output dataNode = inPlace ? std::const_pointer_cast(node)->output(0) : node->input_value(parentIndex); const std::shared_ptr multiply = as_type_ptr(dataNode.get_node_shared_ptr()); std::shared_ptr multiplyConstant; diff --git a/inference-engine/src/low_precision_transformations/src/reshape.cpp b/inference-engine/src/low_precision_transformations/src/reshape.cpp index f478928537e..b94e62320e4 100644 --- a/inference-engine/src/low_precision_transformations/src/reshape.cpp +++ b/inference-engine/src/low_precision_transformations/src/reshape.cpp @@ -38,131 +38,80 @@ ReshapeTransformation::ReshapeTransformation(const Params& params) : LayerTransf } void reshapeDequantizationConstant(const std::shared_ptr& reshape) { + // Reshape dequantization operation Constant. + // 1. Calculate result dequantization Constant shape for broadcast based on original dequantization Constant shape and Reshape output. + // For example: dequantization shape {1, 3, 1, 1}, output Reshape shape {1, 12, 3, 3}, result for broadcast: {1, 3, 4, 1}, + // where '4' calculated for temporary broadcast before reshape. + // 2. Broadcast dequantization Constant, if channels are changed + // 3. Reshape and replace + auto replaceConstant = [](const std::shared_ptr& reshape, const std::shared_ptr& originalConstant) { + // reshape for element-wise constant is not required + auto constantShape = originalConstant->get_shape(); + if (shape_size(constantShape) == 1ul) { + if (!constantShape.empty()) { + const auto newConstant = NetworkHelper::toScalar(originalConstant); + replace_node(originalConstant, newConstant); + } + return; + } + + auto const reshapeInputRank = reshape->get_input_partial_shape(0).rank(); + assert(reshapeInputRank.is_static()); + if (constantShape.size() > 1ul) { + while (constantShape.size() < static_cast(reshapeInputRank.get_length())) { + constantShape.insert(constantShape.begin(), 1ul); + } + } + + const auto reshapeOutputPShape = reshape->output(0).get_partial_shape(); + const auto reshapeOutputRank = reshapeOutputPShape.rank(); + assert(reshapeOutputRank.is_static()); + assert(reshapeOutputRank.get_length() >= 2); + assert(reshapeOutputPShape[1].is_static()); + assert(static_cast(reshapeOutputPShape[1].get_length()) >= constantShape[1]); + assert(reshapeOutputPShape[1].get_length() % constantShape[1] == 0); + const size_t dimensionsToBroadcast = reshapeOutputPShape[1].get_length() / constantShape[1]; + if (dimensionsToBroadcast == 0ul) { + return; + } + + Shape newOperationConstantBroadcastedShape = originalConstant->output(0).get_shape(); + // add dimensions to broadcast values + if (newOperationConstantBroadcastedShape.size() == 2ul) { + newOperationConstantBroadcastedShape.push_back(dimensionsToBroadcast); + } else { + newOperationConstantBroadcastedShape[2] = dimensionsToBroadcast; + } + const std::shared_ptr broadcastedConstant = fold( + originalConstant, + std::make_shared( + element::i32, + Shape({ newOperationConstantBroadcastedShape.size() }), + newOperationConstantBroadcastedShape)); + + std::vector newReshapeConstValues(reshapeOutputRank.get_length(), 1ul); + newReshapeConstValues[1] = reshapeOutputPShape[1].get_length(); + const std::shared_ptr newReshapeConstant = std::make_shared( + element::i32, + Shape({ newReshapeConstValues.size() }), + newReshapeConstValues); + + const std::shared_ptr resultConstant = fold( + broadcastedConstant, + newReshapeConstant, + reshape->get_special_zero()); + + replace_node(originalConstant, resultConstant); + }; + const FakeQuantizeDequantization dequantization = NetworkHelper::getDequantization(reshape, 0); - if (dequantization.multiplyConstant->get_shape().size() > 1ul) { - // Reshape Subtract or Multiply operation Constant. - // 1. modify reshape parameters to avoid reshape by spatial dimensions - // 2. broadcast element-wise constant if channels are changed - // 3. reshape element-wise constant with modified reshape parameters - auto replaceConstant = [](const std::shared_ptr& reshape, const std::shared_ptr& op) { - const size_t constantIndex = as_type(op->get_input_node_ptr(1)) ? 1 : 0; - const auto originalConstant = as_type_ptr(op->get_input_node_shared_ptr(constantIndex)); - const auto constantShape = originalConstant->get_shape(); - // reshape for element-wise constant is not required - if (shape_size(constantShape) == 1ul) { - if (constantShape.size() > 1ul) { - const Shape newConstShape = Shape(reshape->get_output_partial_shape(0).rank().get_length(), 1ul); - const auto newConstant = opset1::Constant::create( - originalConstant->get_element_type(), newConstShape, originalConstant->cast_vector()); - replace_node(op->get_input_node_shared_ptr(constantIndex), newConstant); - } + if (dequantization.subtract != nullptr) { + replaceConstant(reshape, dequantization.subtractConstant); + } - return; - } - - // simple broadcast operation Constant shape to shape on activations - auto newOperationConstantShape = constantShape; - auto const reshapeInputPShape = reshape->get_input_partial_shape(0); - PartialShape newOperationConstantBroadcastedShape(reshapeInputPShape); - newOperationConstantBroadcastedShape[0] = 1ul; - - if ((reshapeInputPShape.rank().get_length() - newOperationConstantShape.size()) == 1ul) { - newOperationConstantShape.insert(newOperationConstantShape.begin(), 1ul); - } - const std::shared_ptr newOperationConstant = std::make_shared( - op->input(constantIndex).get_element_type(), - newOperationConstantShape, - originalConstant->cast_vector()); - - // reshape -1 value handling - auto getOverallValue = [](const Shape& shape, const std::vector& reshapeValues, const bool specialZero) -> size_t { - size_t overallValue = shape_size(shape); - for (size_t i = 0; i < reshapeValues.size(); ++i) { - auto reshapeValue = reshapeValues[i]; - if ((reshapeValue == 1ul) || (reshapeValue == -1) || ((reshapeValue == 0ul) && !specialZero)) { - continue; - } - - if ((reshapeValue == 0ul) && specialZero) { - reshapeValue = shape[i]; - } - - overallValue = overallValue / reshapeValue; - } - return overallValue; - }; - - // modify reshape constant for element-wise constant reshape - // element-wise constant doesn't have spatial dimensions, as result we should remove spatial dimensions from reshape parameters - const std::vector reshapeConstValues = as_type_ptr(reshape->get_input_node_shared_ptr(1))->cast_vector(); - - size_t overallValue = 0; - for (size_t i = 0; i < reshapeConstValues.size(); ++i) { - if (reshapeConstValues[i] == -1) { - overallValue = getOverallValue( - reshapeInputPShape.to_shape(), - reshapeConstValues, - as_type_ptr(reshape)->get_special_zero()); - break; - } - } - - std::vector newReshapeConstValues(reshapeConstValues); - for (int i = static_cast(newReshapeConstValues.size() - 1); i >= 0; --i) { - if (static_cast(newOperationConstantShape.size()) <= i) { - // new dimension was added - newReshapeConstValues[i] = 1; - } else if (newOperationConstantShape[i] == 1ul) { - // keep the same - newReshapeConstValues[i] = 1; - } else if (newReshapeConstValues[i] == -1) { - // modified reshape parameters are different, but value instead '-1' has to be equal as original reshape - newReshapeConstValues[i] = overallValue; - } - } - - const std::shared_ptr newReshapeConstant = std::make_shared( - reshape->input(1).get_element_type(), - Shape({ newReshapeConstValues.size() }), - newReshapeConstValues); - - // if channels are different then broadcast spatial dimensions to reshape channels correctly - // limitation which has to be covered by canBeTransformed: - // 1. spatial dimensions have to be absent or equal to 1 after reshape - // 2. only second dimension can be changed - - const bool shouldBroadcast = (shape_size(newReshapeConstValues) != 1ul) && (reshapeConstValues[1] != 0) && - (((reshapeConstValues[1] != -1) && - (static_cast(newOperationConstantShape[1]) != reshapeConstValues[1])) || - ((reshapeConstValues[1] == -1) && - (newOperationConstantShape[1] != overallValue))); - - const std::shared_ptr broadcastedConstant = shouldBroadcast ? - fold( - newOperationConstant, - std::make_shared( - element::i32, - Shape({static_cast(newOperationConstantBroadcastedShape.rank().get_length())}), - // TODO: investigate behaviour - newOperationConstantBroadcastedShape.to_shape())) : - newOperationConstant; - - const std::shared_ptr resultConstant = fold( - broadcastedConstant, - newReshapeConstant, - reshape->get_special_zero()); - - replace_node(op->get_input_node_shared_ptr(constantIndex), resultConstant); - }; - - if (dequantization.subtract != nullptr) { - replaceConstant(reshape, dequantization.subtract); - } - - if (dequantization.multiply != nullptr) { - replaceConstant(reshape, dequantization.multiply); - } + if (dequantization.multiply != nullptr) { + replaceConstant(reshape, dequantization.multiplyConstant); } } @@ -186,7 +135,7 @@ bool ReshapeTransformation::isPrecisionPreserved(std::shared_ptr op) const return true; } -size_t getLastNotBroadcastedChannel(const Shape& shape) { +size_t getLastNotBroadcastedDimension(const Shape& shape) { for (int i = static_cast(shape.size()) - 1; i >= 0; --i) { if (shape[i] != 1ul) { return i; @@ -195,7 +144,7 @@ size_t getLastNotBroadcastedChannel(const Shape& shape) { return 0; } -size_t getFirstChangedChannel(const PartialShape& shape1, const PartialShape& shape2) { +size_t getFirstChangedDimension(const PartialShape& shape1, const PartialShape& shape2) { const size_t minSize = std::min(shape1.rank().get_length(), shape2.rank().get_length()); size_t i = 0; for (; i < minSize; ++i) { @@ -216,11 +165,15 @@ bool ReshapeTransformation::canBeTransformed(const TransformationContext& contex return false; } - // TODO: LPT: to support current flow: #58269 - //if (((dequantization.subtractConstant != nullptr) && NetworkHelper::isScalarLike(dequantization.subtractConstant)) || - // ((dequantization.multiplyConstant != nullptr) && NetworkHelper::isScalarLike(dequantization.multiplyConstant))) { - // return true; - //} + if (((dequantization.subtract == nullptr) || NetworkHelper::isScalarLike(dequantization.subtractConstant)) && + ((dequantization.multiply == nullptr) || NetworkHelper::isScalarLike(dequantization.multiplyConstant))) { + return true; + } + + const PartialShape outputPShape = op->get_output_partial_shape(0); + if (outputPShape[1].is_dynamic()) { + return false; + } const Shape subtractShape = dequantization.subtract == nullptr ? Shape{} : dequantization.subtractConstant->get_shape(); Shape subtractShapeWithBatch = subtractShape; @@ -245,26 +198,23 @@ bool ReshapeTransformation::canBeTransformed(const TransformationContext& contex multiplyShapeWithBatch.insert(multiplyShapeWithBatch.begin(), 1ul); } - const PartialShape outputPShape = op->get_output_partial_shape(0); - // if we have per-channel dq, dynamic shape, and "-1" reshape value - don't transform - if (outputPShape.is_dynamic() && (shape_size(subtractShape) > 1ul || shape_size(multiplyShape) > 1ul)) { - const auto reshapeConstant = as_type_ptr(op->get_input_node_shared_ptr(1))->cast_vector(); - if (std::any_of(reshapeConstant.cbegin(), reshapeConstant.cend(), [](const int value) { return value == -1; })) { - return false; - } + const size_t outputChannel = static_cast(outputPShape[1].get_length()); + if (!subtractShapeWithBatch.empty() && (outputChannel < subtractShapeWithBatch[1])) { + return false; + } + if (!multiplyShapeWithBatch.empty() && (outputChannel < multiplyShapeWithBatch[1])) { + return false; + } + + if (outputPShape.is_static() && + ((!subtractShapeWithBatch.empty() && ((outputChannel % subtractShapeWithBatch[1]) != 0)) || + (!multiplyShapeWithBatch.empty() && (outputChannel % multiplyShapeWithBatch[1] != 0)))) { + return false; } return canBeTransformed(subtractShapeWithBatch, multiplyShapeWithBatch, inputPShape, outputPShape); } -size_t getChannelVolume(const PartialShape& shape) { - size_t volume = 1ul; - for (int i = 2; i < shape.rank().get_length(); ++i) { - volume = volume * shape[i].get_length(); - } - return volume; -} - bool ReshapeTransformation::canBeTransformed( const ngraph::Shape& subtractShape, const ngraph::Shape& multiplyShape, @@ -277,68 +227,15 @@ bool ReshapeTransformation::canBeTransformed( return false; } - // TODO: story 38439 - if ((inputRank == 4ul) && (outputRank == 2ul)) { - auto checkSpatialDimensions = [](const Shape& dequantizationConstShape) { - for (size_t i = (dequantizationConstShape.size() - 2); i < dequantizationConstShape.size(); ++i) { - if (dequantizationConstShape[i] != 1ul) { - return false; - } - } - return true; - }; + const size_t lastNotBroadcastedDimension = std::max(getLastNotBroadcastedDimension(subtractShape), getLastNotBroadcastedDimension(multiplyShape)); + const size_t firstChangedDimension = getFirstChangedDimension(inputShape, outputShape); + // LPT supports channel on the second dimension natively <= reshape transformation supports more shapes for this case + if ((lastNotBroadcastedDimension == 1ul) && (firstChangedDimension == 1ul)) { + return true; + } - if (((subtractShape.size() >= 3ul) && (!checkSpatialDimensions(subtractShape))) || - ((multiplyShape.size() >= 3ul) && (!checkSpatialDimensions(multiplyShape)))) { - return false; - } - - if (inputRank > 1ul) { - if (inputShape[1].is_dynamic()) { - return false; - } - } else { - if (inputShape[0].is_dynamic()) { - return false; - } - } - - if (outputRank > 1ul) { - if (outputShape[1].is_dynamic()) { - return false; - } - } else { - if (outputShape[0].is_dynamic()) { - return false; - } - } - - // custom validation for Layout::NCHW => Layout::NC - const size_t inputChannelsCount = inputRank > 1ul ? inputShape[1].get_length() : inputShape[0].get_length(); - const size_t outputChannelsCount = outputRank > 1ul ? outputShape[1].get_length() : outputShape[0].get_length(); - for (size_t i = 2; i < inputRank; ++i) { - if (inputShape[i].is_dynamic()) { - return false; - } - } - - if ((inputShape[0] != outputShape[0]) || ((inputChannelsCount * getChannelVolume(inputShape)) != outputChannelsCount)) { - return false; - } - } else { - if (ngraph::shape_size(subtractShape) > 1 || ngraph::shape_size(multiplyShape) > 1) { - for (size_t i = 0; i < 2ul; ++i) { - if (inputShape[i] != outputShape[i]) { - return false; - } - } - } - - const size_t lastNotBroadcastedChannel = std::max(getLastNotBroadcastedChannel(subtractShape), getLastNotBroadcastedChannel(multiplyShape)); - const size_t firstChangedChannel = getFirstChangedChannel(inputShape, outputShape); - if (lastNotBroadcastedChannel >= firstChangedChannel) { - return false; - } + if (lastNotBroadcastedDimension >= firstChangedDimension) { + return false; } return true; diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/concat_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/concat_transformation.cpp index 8b1c65fc439..9c539b7504a 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/concat_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/concat_transformation.cpp @@ -7,22 +7,18 @@ #include #include #include - #include -#include -#include - -#include +#include +#include +#include +#include +#include #include #include -#include -#include #include #include -#include -#include #include "common_test_utils/ngraph_test_utils.hpp" #include "lpt_ngraph_functions/concat_function.hpp" diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/reshape_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/reshape_transformation.cpp index 8383c79267a..377c8ca043b 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/reshape_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/reshape_transformation.cpp @@ -140,7 +140,7 @@ const std::vector testValues = { {{ngraph::element::f32}, {}, {0.1f}} } }, - // U8: 3D -> 4D: dynamic rank + // U8: 3D -> 4D: dynamic rank: per tensor quantization { PartialShape::dynamic(), { 0, 384, 16, 64 }, @@ -151,7 +151,39 @@ const std::vector testValues = { }, { ngraph::element::u8, - {{ngraph::element::f32}, {}, {0.1f}}, + {}, + ngraph::element::u8, + {{ngraph::element::f32}, {}, {0.1f}} + } + }, + // U8: 3D -> 4D: dynamic rank: per tensor quantization + { + PartialShape::dynamic(), + { 0, 384, 16, 64 }, + LayerTransformation::createParamsU8I8(), + { + ngraph::element::u8, + {{ngraph::element::f32}, {128}, {0.1f}} + }, + { + ngraph::element::u8, + {}, + ngraph::element::u8, + {{ngraph::element::f32}, {128}, {0.1f}} + } + }, + // U8: 3D -> 4D: dynamic rank + { + PartialShape::dynamic(), + { 0, 3, 16, 64 }, + LayerTransformation::createParamsU8I8(), + { + ngraph::element::u8, + {{ngraph::element::f32}, {}, {{0.1f, 0.2f, 0.3f}, element::f32, {1, 3, 1, 1}}} + }, + { + ngraph::element::u8, + {{ngraph::element::f32}, {}, {{0.1f, 0.2f, 0.3f}, element::f32, {1, 3, 1, 1}}}, ngraph::element::f32, {} } @@ -340,8 +372,7 @@ const std::vector testValues = { {} } }, - // U8: no subtract 2D -> 4D: channels are affected: per tensor quantization - // TODO: story 38439 + // U8: no subtract 4D -> 2D: channels are affected: per tensor quantization { { 1, 16, 384, 384 }, { 6144, -1 }, @@ -352,12 +383,12 @@ const std::vector testValues = { }, { ngraph::element::u8, - {{ngraph::element::f32}, {}, {0.1f}}, - ngraph::element::f32, - {} + {}, + ngraph::element::u8, + {{ngraph::element::f32}, {}, {0.1f}} } }, - // U8: no subtract 2D -> 4D: channels are affected: per channel quantization + // U8: no subtract 4D -> 2D: channels are affected: per channel quantization { { 1, 3, 4, 5 }, { 12, -1 }, @@ -437,8 +468,83 @@ const std::vector testValues = { {} } }, + // U8: no subtract 4D -> 5D: channels are not affected: no subtract + { + { 1, 3, 4, 5 }, + { 1, 3, 20, 1, 1}, + LayerTransformation::createParamsU8I8(), + { + ngraph::element::u8, + {{ngraph::element::f32}, {}, {{0.1f, 0.2f, 0.3f}, ngraph::element::f32, {1, 3, 1, 1}}} + }, + { + ngraph::element::u8, + {}, + ngraph::element::u8, + {{ngraph::element::f32}, {}, {{0.1f, 0.2f, 0.3f}, ngraph::element::f32, {1, 3, 1, 1, 1}}}, + } + }, + // U8: no subtract 4D -> 5D: channels are affected: no subtract + { + { 1, 3, 2, 3 }, + { 1, 18, 1, 1, 1}, + LayerTransformation::createParamsU8I8(), + { + ngraph::element::u8, + {{ngraph::element::f32}, {}, {{0.1f, 0.2f, 0.3f}, ngraph::element::f32, {1, 3, 1, 1}}} + }, + { + ngraph::element::u8, + {}, + ngraph::element::u8, + { + {ngraph::element::f32}, + {}, + { + {0.1f, 0.1f, 0.1f, 0.1f, 0.1f, 0.1f, 0.2f, 0.2f, 0.2f, 0.2f, 0.2f, 0.2f, 0.3f, 0.3f, 0.3f, 0.3f, 0.3f, 0.3f}, + ngraph::element::f32, + {1, 18, 1, 1, 1} + } + }, + } + }, + // U8: no subtract 4D -> 5D: channels are affected: no subtract + { + { 1, 3, 4, 5 }, + { 1, 12, 1, 1, 5}, + LayerTransformation::createParamsU8I8(), + { + ngraph::element::u8, + {{ngraph::element::f32}, {}, {{0.1f}, ngraph::element::f32, {}}} + }, + { + ngraph::element::u8, + {}, + ngraph::element::u8, + {{ngraph::element::f32}, {}, {{0.1f}, ngraph::element::f32, {}}}, + } + }, + // U8: no subtract 4D -> 5D: channels are affected: no subtract + { + { 1, 3, 4, 5 }, + { 1, 12, 1, 1, 5}, + LayerTransformation::createParamsU8I8(), + { + ngraph::element::u8, + {{ngraph::element::f32}, {}, {{0.1f, 0.2f, 0.3f}, ngraph::element::f32, {1, 3, 1, 1}}} + }, + { + ngraph::element::u8, + {}, + ngraph::element::u8, + { + {ngraph::element::f32}, + {}, + {{0.1f, 0.1f, 0.1f, 0.1f, 0.2f, 0.2f, 0.2f, 0.2f, 0.3f, 0.3f, 0.3f, 0.3f}, ngraph::element::f32, {1, 12, 1, 1, 1}} + } + } + }, // U8: no subtract 4D -> 2D: channels are not affected: per tensor quantization - // TODO: story 38439 { { 1, 3, 4, 5 }, { 0, -1 }, @@ -454,7 +560,7 @@ const std::vector testValues = { {{ngraph::element::f32}, {{128.f}, ngraph::element::f32, {}}, {{0.1f}, ngraph::element::f32, {}}} } }, - // U8: no subtract 4D -> 2D: channels are not affected: per tensor quantization + // U8: no subtract 4D -> 2D: channels are affected: per channel quantization { { 1, 3, 2, 2 }, { 0, -1 }, @@ -474,6 +580,26 @@ const std::vector testValues = { } } }, + // U8: no subtract 4D -> 2D: channels are affected: per channel quantization + { + { 1, 3, 2, 2 }, + { 0, -1 }, + LayerTransformation::createParamsU8I8(), + { + ngraph::element::u8, + {{ngraph::element::f32}, {{0.f, 128.f, 255.f}, ngraph::element::f32, {3, 1, 1}}, {{0.1f, 0.2f, 0.3f}, ngraph::element::f32, {3, 1, 1}}} + }, + { + ngraph::element::u8, + {}, + ngraph::element::u8, + { + {ngraph::element::f32}, + {{0.f, 0.f, 0.f, 0.f, 128.f, 128.f, 128.f, 128.f, 255.f, 255.f, 255.f, 255.f}, ngraph::element::f32, {1, 12}}, + {{0.1f, 0.1f, 0.1f, 0.1f, 0.2f, 0.2f, 0.2f, 0.2f, 0.3f, 0.3f, 0.3f, 0.3f}, ngraph::element::f32, {1, 12}} + } + } + }, // U8: 4D -> 2D: per channel dq and dynamic batch { { Dimension::dynamic(), 3, 2, 2 }, @@ -485,9 +611,13 @@ const std::vector testValues = { }, { ngraph::element::u8, - {{ngraph::element::f32}, {{0.f, 128.f, 255.f}, ngraph::element::f32, {1, 3, 1, 1}}, {{0.1f, 0.2f, 0.3f}, ngraph::element::f32, {1, 3, 1, 1}}}, - ngraph::element::f32, - {} + {}, + ngraph::element::u8, + { + {ngraph::element::f32}, + {{0.f, 0.f, 0.f, 0.f, 128.f, 128.f, 128.f, 128.f, 255.f, 255.f, 255.f, 255.f}, ngraph::element::f32, {1, 12}}, + {{0.1f, 0.1f, 0.1f, 0.1f, 0.2f, 0.2f, 0.2f, 0.2f, 0.3f, 0.3f, 0.3f, 0.3f}, ngraph::element::f32, {1, 12}} + } } }, // U8: no subtract 4D -> 2D: channels are not affected: per tensor quantization @@ -603,7 +733,7 @@ const std::vector testValues = { ngraph::element::u8, {{}, {}, {}}, ngraph::element::u8, - {{ngraph::element::f32}, {}, {{0.1f}, ngraph::element::f32, {1ul}}} + {{ngraph::element::f32}, {}, {{0.1f}, ngraph::element::f32, {}}} } }, // U8: no subtract 4D -> 2D @@ -619,7 +749,7 @@ const std::vector testValues = { ngraph::element::u8, {{}, {}, {}}, ngraph::element::u8, - {{ngraph::element::f32}, {}, {{0.1f}, ngraph::element::f32, {1, 1}}} + {{ngraph::element::f32}, {}, {{0.1f}, ngraph::element::f32, {}}} } }, // U8: no subtract 4D -> 2D: channels are not affected @@ -635,7 +765,7 @@ const std::vector testValues = { ngraph::element::u8, {{}, {}, {}}, ngraph::element::u8, - {{ngraph::element::f32}, {}, {{0.1f}, ngraph::element::f32, {1, 1}}} + {{ngraph::element::f32}, {}, {{0.1f}, ngraph::element::f32, {}}} } }, // U8: no subtract 4D -> 2D: channels are not affected, dynamic batch @@ -651,7 +781,7 @@ const std::vector testValues = { ngraph::element::u8, {{}, {}, {}}, ngraph::element::u8, - {{ngraph::element::f32}, {}, {{0.1f}, ngraph::element::f32, {1, 1}}} + {{ngraph::element::f32}, {}, {{0.1f}, ngraph::element::f32, {}}} } }, // U8: no subtract 4D -> 4D: channels are affected diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/reshape_transformation.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/reshape_transformation.cpp index 7353032f4a0..c4b2466bc21 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/reshape_transformation.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/reshape_transformation.cpp @@ -11,14 +11,12 @@ using namespace LayerTestsDefinitions; namespace { const std::vector netPrecisions = { - ngraph::element::f32 - // ngraph::element::f16 + ngraph::element::f32, + ngraph::element::f16 }; const std::vector trasformationParamValues = { - LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParams(), - // LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParams().setUpdatePrecisions(false), - // LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8() + LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParams() }; const std::vector params = { @@ -27,29 +25,87 @@ const std::vector params = { { 1, 3, 32 }, { 1, 3, 4, 8 }, { 256ul, ngraph::Shape{ 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, - true + "Reshape", + "U8" + }, + // 3D -> 1D + { + { 1, 3, 32 }, + { -1 }, + { 256ul, ngraph::Shape{}, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + "Reshape", + "FP32" }, // 4D -> 3D { { 1, 3, 16, 16 }, { 1, 3, 256 }, { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, - true + "Reshape", + "U8" }, // 4D -> 3D { { 1, 3, 16, 16 }, { 0, 3, -1 }, { 256ul, ngraph::Shape{ 1, 3, 1, 1 }, { 0.f }, { 255.f }, { 0.f, 0.f, 0.f }, { 255.f, 25.5f, 2.55f } }, - true + "Reshape", + "U8" }, // 4D -> 2D { { 1, 3, 4, 8 }, { 1, -1 }, { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, - true + "Reshape", + "U8" }, + // 4D -> 2D + { + { 1, 3, 4, 8 }, + { 1, -1 }, + { + 256ul, + ngraph::Shape{ 1, 3, 1, 1 }, + { 0.f, 0.f, 0.f }, + { 255.f, 255.f/2.f, 255.f/3.f }, + { 0.f, 0.f, 0.f }, + { 255.f, 255.f/2.f, 255.f/3.f }, + }, + "Reshape", + "U8" + }, + // 4D -> 3D + { + { 1, 3, 4, 8 }, + { 1, 3, -1 }, + { + 256ul, + ngraph::Shape{ 1, 3, 1, 1 }, + { 0.f, 0.f, 0.f }, + { 255.f, 255.f/2.f, 255.f/3.f }, + { 0.f, 0.f, 0.f }, + { 255.f, 255.f/2.f, 255.f/3.f }, + }, + "Reshape", + "U8" + }, + // per-channel + // 4D -> 3D + { + { 1, 3, 4, 8 }, + { 1, -1, 8 }, + { + 256ul, + ngraph::Shape{ 1, 3, 1, 1 }, + { 0.f, 0.f, 0.f }, + { 255.f, 255.f/2.f, 255.f/3.f }, + { 0.f, 0.f, 0.f }, + { 255.f, 255.f/2.f, 255.f/3.f }, + }, + "Reshape", + "U8" + } }; INSTANTIATE_TEST_SUITE_P(smoke_LPT, ReshapeTransformation, diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/mat_mul_with_constant_transformation.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/mat_mul_with_constant_transformation.cpp index 919f2dd9388..7a2c8ec2b8f 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/mat_mul_with_constant_transformation.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/mat_mul_with_constant_transformation.cpp @@ -19,16 +19,16 @@ const std::vector precisions = { std::vector testValues = { { { 2, 3, 4 }, - { 256ul, {{1, 1, 1}, {1, 1, 1}, {1, 3, 1}, {1, 3, 1}}, {0.f}, {255.f}, {0.f, 0.f, 0.f}, {255.f, 25.5f, 255.f} }, + { 256ul, {{1, 3, 1}, {1, 3, 1}, {1, 3, 1}, {1, 3, 1}}, {0.f, 0.f, 0.f}, {255.f, 25.5f, 2.55f}, {0.f, 0.f, 0.f}, {255.f, 25.5f, 2.55f} }, { std::vector(4 * 2, 2.f), ngraph::element::f32, ngraph::Shape{ 2, 4 } }, - { 256ul, {{1}, {1}, {2, 1}, {2, 1}}, {-128.f}, {127.f}, {-128.f, -12.8f}, {127.f, 12.7f} }, + { 256ul, {{2, 1}, {2, 1}, {2, 1}, {2, 1}}, {-128.f, -12.8f}, {127.f, 12.7f}, {-128.f, -12.8f}, {127.f, 12.7f} }, { {}, {}, {} }, "FullyConnected", "FP32" }, { { 2, 3, 4 }, - { 256ul, {{1, 1, 1}, {1, 1, 1}, {1, 3, 1}, {1, 3, 1}}, {0.f}, {255.f}, {0.f, 0.f, 0.f}, {255.f, 25.5f, 255.f} }, + { 256ul, {{1, 3, 1}, {1, 3, 1}, {1, 3, 1}, {1, 3, 1}}, {0.f, 0.f, 0.f}, {255.f, 25.5f, 2.f}, {0.f, 0.f, 0.f}, {255.f, 25.5f, 2.f} }, { std::vector(4 * 2, 2.f), ngraph::element::i8, ngraph::Shape{ 2, 4 } }, {}, { ngraph::element::f32, {}, {0.1f} }, @@ -39,23 +39,23 @@ std::vector testValues = { { 1, 3, 4 }, { 256ul, {{1, 1, 1}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}}, {-10.5f}, {4.5f}, {-10.5f}, {4.5f} }, { std::vector(4 * 2, 2.f), ngraph::element::f32, ngraph::Shape{ 2, 4 } }, - { 256ul, {{1}, {1}, {2, 1}, {2, 1}}, {-128.f}, {127.f}, {-128.f, -12.8f}, {127.f, 12.7f} }, + { 256ul, {{2, 1}, {2, 1}, {2, 1}, {2, 1}}, {-128.f, -12.8f}, {127.f, 12.7f}, {-128.f, -12.8f}, {127.f, 12.7f} }, { {}, {}, {} }, "FullyConnected", "FP32" }, { { 1, 1, 3, 4 }, - { 256ul, {{1, 1, 1}, {1, 1, 1}, {1, 3, 1}, {1, 3, 1}}, {0.f}, {255.f}, {0.f, 0.f, 0.f}, {255.f, 25.5f, 255.f} }, + { 256ul, {{1, 3, 1}, {1, 3, 1}, {1, 3, 1}, {1, 3, 1}}, {0.f, 0.f, 0.f}, {25.f, 24.f, 25.f}, {0.f, 0.f, 0.f}, {25.f, 24.f, 25.f} }, { std::vector(4 * 2, 2.f), ngraph::element::f32, ngraph::Shape{ 2, 4 } }, - { 256ul, {{1}, {1}, {2, 1}, {2, 1}}, {-128.f}, {127.f}, {-128.f, -12.8f}, {127.f, 12.7f} }, + { 256ul, {{2, 1}, {2, 1}, {2, 1}, {2, 1}}, {-128.f, -12.8f}, {127.f, 12.7f}, {-128.f, -12.8f}, {127.f, 12.7f} }, { {}, {}, {} }, "FullyConnected", "U8" }, { { 1, 1, 3, 4 }, - { 256ul, {{1, 1, 1}, {1, 1, 1}, {1, 3, 1}, {1, 3, 1}}, {0.f}, {255.f}, {0.f, 0.f, 0.f}, {255.f, 25.5f, 255.f} }, + { 256ul, {{1, 3, 1}, {1, 3, 1}, {1, 3, 1}, {1, 3, 1}}, {0.f, 0.f, 0.f}, {25.f, 24.f, 25.f}, {0.f, 0.f, 0.f}, {25.f, 24.f, 25.f} }, { std::vector(4 * 2, 2.f), ngraph::element::i8, ngraph::Shape{ 2, 4 } }, {}, { ngraph::element::f32, {}, {{0.1f, 0.01}, ngraph::element::f32, ngraph::Shape{ 2, 1 }} }, @@ -73,7 +73,7 @@ std::vector testValues = { }, { { 2, 3 }, - { 256ul, {{1}, {1}, {2, 1}, {2, 1}}, {-10.f}, {5.f}, {-10.f, -5.f}, {5.f, 5.f} }, + { 256ul, {{2, 1}, {2, 1}, {2, 1}, {2, 1}}, {-10.f, -5.f}, {5.f, 5.f}, {-10.f, -5.f}, {5.f, 5.f} }, { std::vector{1, 2, 3, 4, 5, 6}, ngraph::element::f32, ngraph::Shape{ 2, 3 } }, { 256ul, {{1}, {1}, {1}, {1}}, {-128.f}, {127.f}, {-12.8f}, {12.7f} }, { {}, {}, {} }, @@ -82,7 +82,7 @@ std::vector testValues = { }, { { 2, 3 }, - { 256ul, {{1}, {1}, {2, 1}, {2, 1}}, {-10.f}, {5.f}, {-10.f, -5.f}, {5.f, 5.f} }, + { 256ul, {{2, 1}, {2, 1}, {2, 1}, {2, 1}}, {-10.f, -5.f}, {5.f, 5.f}, {-10.f, -5.f}, {5.f, 5.f} }, { std::vector{1, 2, 3, 4, 5, 6}, ngraph::element::i8, ngraph::Shape{ 2, 3 } }, {}, { ngraph::element::f32, {}, {0.1f} }, diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/reshape_transformation.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/reshape_transformation.cpp index b74f1d2769e..2621dec4dba 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/reshape_transformation.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/reshape_transformation.cpp @@ -17,8 +17,6 @@ const std::vector netPrecisions = { const std::vector trasformationParamValues = { LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParams(), - // LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParams().setUpdatePrecisions(false), - // LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8() }; const std::vector params = { @@ -27,29 +25,87 @@ const std::vector params = { { 1, 3, 32 }, { 1, 3, 4, 8 }, { 256ul, ngraph::Shape{ 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, - true + "Reshape", + "U8" + }, + // 3D -> 1D + { + { 1, 3, 32 }, + { -1 }, + { 256ul, ngraph::Shape{}, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + "Reshape", + "FP32" }, // 4D -> 3D { { 1, 3, 16, 16 }, { 1, 3, 256 }, { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, - true + "Reshape", + "U8" }, // 4D -> 3D { { 1, 3, 16, 16 }, { 0, 3, -1 }, { 256ul, ngraph::Shape{ 1, 3, 1, 1 }, { 0.f }, { 255.f }, { 0.f, 0.f, 0.f }, { 255.f, 25.5f, 2.55f } }, - true + "Reshape", + "U8" }, // 4D -> 2D { { 1, 3, 4, 8 }, { 1, -1 }, { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, - true + "Reshape", + "U8" }, + // 4D -> 2D + { + { 1, 3, 4, 8 }, + { 1, -1 }, + { + 256ul, + ngraph::Shape{ 1, 3, 1, 1 }, + { 0.f, 0.f, 0.f }, + { 255.f, 255.f / 2.f, 255.f / 3.f }, + { 0.f, 0.f, 0.f }, + { 255.f, 255.f / 2.f, 255.f / 3.f }, + }, + "Reshape", + "U8" + }, + // 4D -> 3D + { + { 1, 3, 4, 8 }, + { 1, 3, -1 }, + { + 256ul, + ngraph::Shape{ 1, 3, 1, 1 }, + { 0.f, 0.f, 0.f }, + { 255.f, 255.f / 2.f, 255.f / 3.f }, + { 0.f, 0.f, 0.f }, + { 255.f, 255.f / 2.f, 255.f / 3.f }, + }, + "Reshape", + "U8" + }, + // per-channel + // 4D -> 3D + { + { 1, 3, 4, 8 }, + { 1, -1, 8 }, + { + 256ul, + ngraph::Shape{ 1, 3, 1, 1 }, + { 0.f, 0.f, 0.f }, + { 255.f, 255.f / 2.f, 255.f / 3.f }, + { 0.f, 0.f, 0.f }, + { 255.f, 255.f / 2.f, 255.f / 3.f }, + }, + "Reshape", + "U8" + } }; INSTANTIATE_TEST_SUITE_P(smoke_LPT, ReshapeTransformation, diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/reshape_transformation.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/reshape_transformation.hpp index 29175cf77ee..9275538b8cf 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/reshape_transformation.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/reshape_transformation.hpp @@ -17,7 +17,8 @@ public: ngraph::PartialShape inputShape; std::vector reshapeConstValues; ngraph::builder::subgraph::FakeQuantizeOnData fakeQuantize; - bool isTransformed; + std::string layerType; + std::string expectedKernelType; }; typedef std::tuple< @@ -35,6 +36,7 @@ public: protected: void SetUp() override; + void Run() override; }; } // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/reshape_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/reshape_transformation.cpp index 2d5141c6800..1c227ce27a3 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/reshape_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/reshape_transformation.cpp @@ -6,15 +6,11 @@ #include #include -#include -#include #include -#include "ngraph_functions/builders.hpp" #include #include "lpt_ngraph_functions/reshape_function.hpp" - namespace LayerTestsDefinitions { std::string ReshapeTransformation::getTestCaseName(testing::TestParamInfo obj) { @@ -50,6 +46,18 @@ void ReshapeTransformation::SetUp() { param.fakeQuantize); } +void ReshapeTransformation::Run() { + LayerTestsCommon::Run(); + + const auto params = std::get<3>(GetParam()); + auto actualPrecision = getRuntimePrecisionByType(params.layerType); + const auto expectedPrecision = params.expectedKernelType; + if ((expectedPrecision == "FP32") && (actualPrecision == "FP16")) { + actualPrecision = "FP32"; + } + EXPECT_EQ(actualPrecision, expectedPrecision); +} + TEST_P(ReshapeTransformation, CompareWithRefImpl) { Run(); }; diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/base/layer_test_utils.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/base/layer_test_utils.hpp index 72172e69924..bde6ba57578 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/base/layer_test_utils.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/base/layer_test_utils.hpp @@ -83,6 +83,10 @@ public: std::string getRuntimePrecision(const std::string& layerName); std::string getRuntimePrecisionByType(const std::string& layerType); +#ifndef NDEBUG + void showRuntimePrecisions(); +#endif + template static void Compare(const T_NGRAPH *expected, const T_IE *actual, std::size_t size, float threshold) { for (std::size_t i = 0; i < size; ++i) { diff --git a/inference-engine/tests/functional/shared_test_classes/src/base/layer_test_utils.cpp b/inference-engine/tests/functional/shared_test_classes/src/base/layer_test_utils.cpp index 056826aff86..3c1639b978f 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/base/layer_test_utils.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/base/layer_test_utils.cpp @@ -474,6 +474,24 @@ std::string LayerTestsCommon::getRuntimePrecisionByType(const std::string& layer return ""; } +#ifndef NDEBUG +void LayerTestsCommon::showRuntimePrecisions() { + const auto execGraph = executableNetwork.GetExecGraphInfo(); + const auto function = execGraph.getFunction(); + + for (const auto& op : function->get_ops()) { + const auto& rtInfo = op->get_rt_info(); + const auto& typeIt = rtInfo.find("layerType"); + + const auto type = ngraph::as_type_ptr>(typeIt->second)->get(); + const auto& it = rtInfo.find("runtimePrecision"); + + const auto rtPrecisionPtr = ngraph::as_type_ptr>(it->second); + std::cout << type << ": " << rtPrecisionPtr->get() << std::endl; + } +} +#endif + void LayerTestsCommon::SetRefMode(RefMode mode) { refMode = mode; } diff --git a/inference-engine/tests/unit/inference_engine/transformations/low_precision/reshape_test.cpp b/inference-engine/tests/unit/inference_engine/transformations/low_precision/reshape_test.cpp index 403f73ae14c..8377d12d029 100644 --- a/inference-engine/tests/unit/inference_engine/transformations/low_precision/reshape_test.cpp +++ b/inference-engine/tests/unit/inference_engine/transformations/low_precision/reshape_test.cpp @@ -152,7 +152,6 @@ TEST(LPT_ReshapeTransformation, canBeTransformed_4D_to_2D_perSpacial_TRUE) { ngraph::Shape({ 1, 9216 }))); } -// TODO: story 38439 TEST(LPT_ReshapeTransformation, canBeTransformed_5D_to_5D_perBatch) { ASSERT_FALSE(ngraph::pass::low_precision::ReshapeTransformation::canBeTransformed( ngraph::Shape({ 1, 16, 1, 1, 1 }),