diff --git a/inference-engine/cmake/vpu_dependencies.cmake b/inference-engine/cmake/vpu_dependencies.cmake index 6db2057e8fd..10744c60942 100644 --- a/inference-engine/cmake/vpu_dependencies.cmake +++ b/inference-engine/cmake/vpu_dependencies.cmake @@ -19,7 +19,7 @@ set(VPU_SUPPORTED_FIRMWARES usb-ma2450 usb-ma2x8x pcie-ma248x) # Default packages # -set(FIRMWARE_PACKAGE_VERSION 1169) +set(FIRMWARE_PACKAGE_VERSION 1176) set(VPU_CLC_MA2X8X_VERSION "movi-cltools-20.02.0") # diff --git a/inference-engine/src/vpu/common/include/vpu/ngraph/operations/out_shape_of_reshape.hpp b/inference-engine/src/vpu/common/include/vpu/ngraph/operations/out_shape_of_reshape.hpp index 6bb56c16f02..89cc9f0fd55 100644 --- a/inference-engine/src/vpu/common/include/vpu/ngraph/operations/out_shape_of_reshape.hpp +++ b/inference-engine/src/vpu/common/include/vpu/ngraph/operations/out_shape_of_reshape.hpp @@ -6,6 +6,7 @@ #include #include +#include "ngraph/runtime/host_tensor.hpp" namespace ngraph { namespace vpu { namespace op { @@ -28,6 +29,8 @@ public: bool getSpecialZero() const { return m_specialZero; } void setSpecialZero(bool special_zero) { m_specialZero = special_zero; } + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) override; + private: bool m_specialZero; }; diff --git a/inference-engine/src/vpu/common/src/ngraph/operations/out_shape_of_reshape.cpp b/inference-engine/src/vpu/common/src/ngraph/operations/out_shape_of_reshape.cpp index 4688e7c1852..14a7c279030 100644 --- a/inference-engine/src/vpu/common/src/ngraph/operations/out_shape_of_reshape.cpp +++ b/inference-engine/src/vpu/common/src/ngraph/operations/out_shape_of_reshape.cpp @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // +#include #include "vpu/ngraph/operations/out_shape_of_reshape.hpp" namespace ngraph { namespace vpu { namespace op { @@ -66,6 +67,196 @@ bool OutShapeOfReshape::visit_attributes(ngraph::AttributeVisitor& visitor) { return true; } +namespace { + +template +bool getShapeFromHostTensorData(const HostTensorPtr& data, Shape& result) { + using T = typename element_type_traits::value_type; + T* dataPtr = data->get_data_ptr(); + if (!dataPtr) { + return false; + } + size_t outputRank = data->get_shape()[0]; + + for (int i = 0; i < outputRank; i++) { + result.push_back(dataPtr[i]); + } + + return true; +} + +template +bool setShapeToHostTensorData(const HostTensorPtr& data, const Shape& shape) { + using T = typename element_type_traits::value_type; + T* dataPtr = data->get_data_ptr(); + if (!dataPtr) { + return false; + } + size_t outputRank = data->get_shape()[0]; + if (shape.size() != outputRank) { + return false; + } + + for (int i = 0; i < outputRank; i++) { + dataPtr[i] = shape[i]; + } + return true; +} + +bool getShapeFromHostTensorData(const HostTensorPtr& data, Shape& shape) { + bool rc = false; + switch (data->get_element_type()) { + case element::Type_t::i8: + rc = getShapeFromHostTensorData(data, shape); + break; + case element::Type_t::i16: + rc = getShapeFromHostTensorData(data, shape); + break; + case element::Type_t::i32: + rc = getShapeFromHostTensorData(data, shape); + break; + case element::Type_t::i64: + rc = getShapeFromHostTensorData(data, shape); + break; + case element::Type_t::u8: + rc = getShapeFromHostTensorData(data, shape); + break; + case element::Type_t::u16: + rc = getShapeFromHostTensorData(data, shape); + break; + case element::Type_t::u32: + rc = getShapeFromHostTensorData(data, shape); + break; + case element::Type_t::u64: + rc = getShapeFromHostTensorData(data, shape); + break; + default: rc = false; + } + return rc; +} + +bool setShapeToHostTensorData(const HostTensorPtr& data, const Shape& shape) { + bool rc = false; + switch (data->get_element_type()) { + case element::Type_t::i8: + rc = setShapeToHostTensorData(data, shape); + break; + case element::Type_t::i16: + rc = setShapeToHostTensorData(data, shape); + break; + case element::Type_t::i32: + rc = setShapeToHostTensorData(data, shape); + break; + case element::Type_t::i64: + rc = setShapeToHostTensorData(data, shape); + break; + case element::Type_t::u8: + rc = setShapeToHostTensorData(data, shape); + break; + case element::Type_t::u16: + rc = setShapeToHostTensorData(data, shape); + break; + case element::Type_t::u32: + rc = setShapeToHostTensorData(data, shape); + break; + case element::Type_t::u64: + rc = setShapeToHostTensorData(data, shape); + break; + default: rc = false; + } + return rc; +} + +bool evaluateOutShapeOfReshape( + const HostTensorPtr& inDataShapeTensor, + const HostTensorPtr& outShapeDescriptorTensor, + bool specialZero, + const HostTensorPtr& outShapeTensor) { + if (!inDataShapeTensor || !outShapeDescriptorTensor || !outShapeTensor) { + return false; + } + Shape inputShape; + Shape outputShape; + + if (!getShapeFromHostTensorData(inDataShapeTensor, inputShape)) { + return false; + } + if (!getShapeFromHostTensorData(outShapeDescriptorTensor, outputShape)) { + return false; + } + + if (std::any_of(outputShape.begin(), outputShape.end(), [](int64_t value) { return value < -1; })) { + return false; + } + + int zeroDimsCount = std::count_if(outputShape.begin(), outputShape.end(), + [](int64_t value) { return value == 0; }); + int negativeDimsCount = std::count_if(outputShape.begin(), outputShape.end(), + [](int64_t value) { return value == -1; }); + if (negativeDimsCount > 1) { + return false; + } + + size_t outputRank = outputShape.size(); + + if (!(zeroDimsCount && specialZero) && !negativeDimsCount) { + if (shape_size(inputShape) != shape_size(outputShape)) { + return false; + } + } else { + int negativeDimIdx = -1; + + size_t inputTotalDimCount = shape_size(inputShape); + size_t outputTotalDimCount = 1; + + + // compute the output shape + for (size_t i = 0; i < outputRank; i++) { + if (outputShape[i] == 0 && specialZero) { + // Copy input_shape[i] for zero values + if (i > inputShape.size() - 1) { + return false; + } + outputShape[i] = inputShape[i]; + outputTotalDimCount *= inputShape[i]; + } else if (outputShape[i] == -1) { + negativeDimIdx = i; + } else { + outputTotalDimCount *= outputShape[i]; + } + } + + if (negativeDimIdx != -1) { + // Infer size such that number of output elements matches + // input elements + if (outputTotalDimCount == 0) { + if (inputTotalDimCount != 0) { + return false; + } + outputShape[negativeDimIdx] = 0; + } else { + if (inputTotalDimCount % outputTotalDimCount != 0) { + return false; + } + outputShape[negativeDimIdx] = inputTotalDimCount / outputTotalDimCount; + } + } + } + + if (!setShapeToHostTensorData(outShapeTensor, outputShape)) { + return false; + } + + return true; +} + +} // namespace + +bool OutShapeOfReshape::evaluate(const HostTensorVector& outputs, + const HostTensorVector& inputs) { + return evaluateOutShapeOfReshape(inputs[0], inputs[1], m_specialZero, outputs[0]); +} + } // namespace op } // namespace vpu diff --git a/inference-engine/tests/functional/plugin/myriad/single_layer_tests/out_shape_of_reshape.cpp b/inference-engine/tests/functional/plugin/myriad/single_layer_tests/out_shape_of_reshape.cpp new file mode 100644 index 00000000000..30d45ab25d6 --- /dev/null +++ b/inference-engine/tests/functional/plugin/myriad/single_layer_tests/out_shape_of_reshape.cpp @@ -0,0 +1,120 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "vpu/ngraph/operations/out_shape_of_reshape.hpp" + +#include "vpu/private_plugin_config.hpp" + +#include +#include +#include +#include + +#include +#include +#include +#include + +using InputShape = InferenceEngine::SizeVector; +using ShapeDescriptor = std::vector; + +using OutShapeOfReshapeParam = std::tuple< + InputShape, // Input shape + ShapeDescriptor, // out shape descriptor + bool>; // Special zero + +using OutShapeOfReshapeTestParam = std::tuple< + OutShapeOfReshapeParam, // Shape params + LayerTestsUtils::TargetDevice>; // Device name + + +namespace LayerTestsDefinitions { + +class OutShapeOfReshapeLayerTest : public testing::WithParamInterface, + public LayerTestsUtils::LayerTestsCommon { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + OutShapeOfReshapeParam shapesParam; + std::string targetDevice; + std::tie(shapesParam, targetDevice) = obj.param; + + const auto& inputShape = std::get<0>(shapesParam); + const auto& outShapeDescriptor = std::get<1>(shapesParam); + const auto& specialZero = std::get<2>(shapesParam); + + std::ostringstream result; + result << "IS=" << CommonTestUtils::vec2str(inputShape) << "_"; + result << "OSD=" << CommonTestUtils::vec2str(outShapeDescriptor) << "_"; + result << "SZ=" << std::to_string(specialZero) << "_"; + result << "targetDevice=" << targetDevice; + return result.str(); + } + +protected: + void SetUp() override { + SetRefMode(LayerTestsUtils::RefMode::INTERPRETER); + configuration[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO); + configuration[VPU_CONFIG_KEY(DISABLE_REORDER)] = CONFIG_VALUE(YES); + + OutShapeOfReshapeParam shapesParam; + std::tie(shapesParam, targetDevice) = this->GetParam(); + inPrc = InferenceEngine::Precision::I32; + outPrc = InferenceEngine::Precision::I32; + + const auto& inputShape = std::get<0>(shapesParam); + const auto& outShapeDescriptor = std::get<1>(shapesParam); + const auto& specialZero = std::get<2>(shapesParam); + + auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inPrc); + + const auto inputShapeParam = std::make_shared( + ngPrc, ngraph::Shape{inputShape.size()}); + const auto outShapeDescriptorConst = std::make_shared( + ngPrc, ngraph::Shape{outShapeDescriptor.size()}, outShapeDescriptor); + + const auto outShapeOfReshape = std::make_shared( + inputShapeParam, outShapeDescriptorConst, specialZero); + ngraph::ResultVector results{std::make_shared(outShapeOfReshape)}; + function = std::make_shared(results, ngraph::ParameterVector{inputShapeParam}); + } + + InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override { + OutShapeOfReshapeParam shapesParam; + std::string targetDevice; + std::tie(shapesParam, targetDevice) = this->GetParam(); + const auto& inputShape = std::get<0>(shapesParam); + + InferenceEngine::Blob::Ptr blob = make_blob_with_precision(info.getTensorDesc()); + blob->allocate(); + + auto dataPtr = InferenceEngine::as(blob)->rwmap().as(); + for (size_t i = 0; i < blob->size(); ++i) { + dataPtr[i] = inputShape[i]; + } + + return blob; + } +}; + +TEST_P(OutShapeOfReshapeLayerTest, accuracy) { + Run(); +} + +std::vector shapeParams = { + std::make_tuple(InputShape{ 2, 3, 128, 256 }, ShapeDescriptor{ 0, 0, 64, 512 }, true), + std::make_tuple(InputShape{ 2, 3, 128, 256 }, ShapeDescriptor{ 3, 2, 64, 512 }, false), + std::make_tuple(InputShape{ 2, 3, 0, 256 }, ShapeDescriptor{ 3, 8, 0, 512 }, false), + std::make_tuple(InputShape{ 2, 3, 128, 256 }, ShapeDescriptor{ 2, 3, -1, 64 }, false), + std::make_tuple(InputShape{ 2, 3, 128, 256 }, ShapeDescriptor{ 2, -1, 0 }, true), + std::make_tuple(InputShape{ 2, 5, 5, 24 }, ShapeDescriptor{ 0, -1, 4 }, true), + std::make_tuple(InputShape{ 2, 5, 5, 0 }, ShapeDescriptor{ 0, 4 }, false), +}; + +INSTANTIATE_TEST_CASE_P(accuracy, OutShapeOfReshapeLayerTest, + ::testing::Combine( + ::testing::ValuesIn(shapeParams), + ::testing::Values(CommonTestUtils::DEVICE_MYRIAD)), + OutShapeOfReshapeLayerTest::getTestCaseName); + +} // namespace LayerTestsDefinitions