Removed legacy IE shape infer (#4211)

* Removed legacy IE shape infer

* Removed legacy shape infer tests

* Updated tests and make IR Reader to load old experimenal and extension ops as opset6

* Change opset of some ops only if they are currently experimental/extension to avoid situation like opset1::Proposal -> opset6::Proposal

Co-authored-by: Evgeny Lazarev <elazarev.nnov@gmail.com>
This commit is contained in:
Evgeny Lazarev 2021-02-10 07:55:48 +03:00 committed by GitHub
parent 929fa26e2e
commit 48aa1c35b3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 85 additions and 654 deletions

View File

@ -9,7 +9,6 @@ file (GLOB LIBRARY_SRC
${CMAKE_CURRENT_SOURCE_DIR}/cpp/*.cpp
${CMAKE_CURRENT_SOURCE_DIR}/threading/*.cpp
${CMAKE_CURRENT_SOURCE_DIR}/cpp/*.cpp
${CMAKE_CURRENT_SOURCE_DIR}/shape_infer/ie_built_in_holder.cpp
)
# TODO: WA for OneHot pass usage in reshape

View File

@ -33,7 +33,6 @@
#include "exec_graph_info.hpp"
#include "ie_itt.hpp"
#include "generic_ie.hpp"
#include "shape_infer/ie_built_in_holder.hpp"
using namespace std;
using namespace InferenceEngine;
@ -114,9 +113,6 @@ CNNNetworkNGraphImpl::CNNNetworkNGraphImpl(
network.setInputInfo(info);
};
// Add shape infer method for old operations which are not included to opset1, opset2 and opset3
::ngraph::op::GenericIE::addExtension(_ngraph_function, std::make_shared<ShapeInfer::BuiltInShapeInferHolder>());
reshape();
for (const auto& layer : _ngraph_function->get_parameters()) {
std::string outName = layer->get_friendly_name();

View File

@ -15,7 +15,6 @@
#include "blob_factory.hpp"
#include <ie_ngraph_utils.hpp>
#include "shape_infer/ie_ishape_infer_extension.hpp"
#include "ngraph/util.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/validation_util.hpp"
@ -85,63 +84,6 @@ void ngraph::op::GenericIE::validate_and_infer_types() {
}
return get_output_element_type(index);
};
// Try to find extension with shape inference implementation and apply it
for (const auto& ext : extensions) {
IE_SUPPRESS_DEPRECATED_START
InferenceEngine::IShapeInferImpl::Ptr impl;
InferenceEngine::StatusCode ret = ext->getShapeInferImpl(impl, type.c_str(), nullptr);
if (ret != InferenceEngine::StatusCode::OK || !impl) continue;
std::vector<InferenceEngine::Blob::CPtr> inputs;
std::map<std::string, std::string> parameters;
std::map<std::string, InferenceEngine::Blob::Ptr> blobs;
std::vector<InferenceEngine::SizeVector> outShapes;
for (uint64_t i = 0; i < get_input_size(); i++) {
PartialShape this_input_shape = get_input_partial_shape(i);
if (!this_input_shape.is_static()) {
// Set dynamic output shapes if input shapes are not defined
for (size_t output_index = 0; output_index < outputs.size(); output_index++) {
set_output_type(output_index, get_precision(output_index), PartialShape::dynamic());
}
return;
}
Shape this_ishape = get_input_shape(i);
InferenceEngine::SizeVector dims = this_ishape;
InferenceEngine::Blob::Ptr input = make_blob_with_precision(InferenceEngine::TensorDesc(
InferenceEngine::details::convertPrecision(get_input_element_type(i)), dims,
InferenceEngine::TensorDesc::getLayoutByDims(dims)));
inputs.emplace_back(input);
}
for (const auto& attr : params) {
if (attr.second.is<std::string>()) {
parameters[attr.first] = attr.second.as<std::string>();
} else if (attr.second.is<InferenceEngine::Blob::CPtr>()) {
auto cBlob = attr.second.as<InferenceEngine::Blob::CPtr>();
auto wBlob = std::const_pointer_cast<InferenceEngine::Blob>(cBlob);
blobs[attr.first] = wBlob;
} else if (attr.second.is<InferenceEngine::Blob::Ptr>()) {
auto wBlob = attr.second.as<InferenceEngine::Blob::Ptr>();
blobs[attr.first] = wBlob;
} else {
THROW_IE_EXCEPTION << "Generic node for layer " << get_friendly_name() << " with type " << type
<< " has incorrect parameter " << attr.first << "!";
}
}
ret = impl->inferShapes(inputs, parameters, blobs, outShapes, nullptr);
IE_SUPPRESS_DEPRECATED_END
if (ret != InferenceEngine::StatusCode::OK || outShapes.size() != outputs.size()) continue;
for (size_t output_index = 0; output_index < outputs.size(); output_index++) {
set_output_type(output_index, get_precision(output_index), Shape(outShapes[output_index]));
}
return;
}
// Extensions are not loaded when we create nGraph function
// First call: create node

View File

@ -1,74 +0,0 @@
// Copyright (C) 2017-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <algorithm>
#include <memory>
#include <string>
#include "shape_infer/ie_built_in_holder.hpp"
#include "shape_infer/ie_proposal_shape_infer.hpp"
#include "shape_infer/ie_rnn_cell_shape_infer.hpp"
#include "shape_infer/ie_simpler_nms_shape_infer.hpp"
#include "shape_infer/ie_sparse_to_dense_shape_infer.hpp"
#include "shape_infer/ie_unique_shape_infer.hpp"
#include "shape_infer/ie_sparse_to_dense_shape_infer.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
BuiltInShapeInferHolder::ImplsHolder::Ptr BuiltInShapeInferHolder::GetImplsHolder() {
static ImplsHolder::Ptr localHolder;
if (localHolder == nullptr) {
localHolder = std::make_shared<ImplsHolder>();
}
return localHolder;
}
void BuiltInShapeInferHolder::AddImpl(const std::string& name, const IShapeInferImpl::Ptr& impl) {
GetImplsHolder()->list[name] = impl;
}
StatusCode BuiltInShapeInferHolder::getShapeInferTypes(char**& types, unsigned int& size, ResponseDesc* resp) noexcept {
auto& factories = GetImplsHolder()->list;
types = new char*[factories.size()];
size = 0;
for (auto it = factories.begin(); it != factories.end(); it++, size++) {
types[size] = new char[it->first.size() + 1];
std::copy(it->first.begin(), it->first.end(), types[size]);
types[size][it->first.size()] = '\0';
}
return OK;
}
StatusCode BuiltInShapeInferHolder::getShapeInferImpl(IShapeInferImpl::Ptr& impl, const char* type,
ResponseDesc* resp) noexcept {
auto& impls = BuiltInShapeInferHolder::GetImplsHolder()->list;
if (impls.find(type) != impls.end()) {
impl = impls[type];
return OK;
}
impl.reset();
return NOT_FOUND;
}
template <typename Impl>
class ImplRegisterBase {
public:
explicit ImplRegisterBase(const std::string& type) {
BuiltInShapeInferHolder::AddImpl(type, std::make_shared<Impl>(type));
}
};
#define REG_SHAPE_INFER_FOR_TYPE(__prim, __type) \
static ImplRegisterBase<__prim> __bi_reg__##__type(#__type)
REG_SHAPE_INFER_FOR_TYPE(SimplerNMSShapeProp, SimplerNMS);
REG_SHAPE_INFER_FOR_TYPE(SparseToDenseShapeProp, SparseToDense);
REG_SHAPE_INFER_FOR_TYPE(ProposalShapeProp, Proposal);
REG_SHAPE_INFER_FOR_TYPE(RNNCellShapeProp, RNNCell);
REG_SHAPE_INFER_FOR_TYPE(GRUCellShapeProp, GRUCell);
REG_SHAPE_INFER_FOR_TYPE(UniqueShapeProp, Unique);
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,50 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <list>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include <description_buffer.hpp>
#include "caseless.hpp"
#include "shape_infer/ie_ishape_infer_extension.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
* @brief Holder of shape infer implementations for build-in IE layers, that plugins support out-of-the-box
*/
class BuiltInShapeInferHolder : public IShapeInferExtension {
struct ImplsHolder {
using Ptr = std::shared_ptr<ImplsHolder>;
InferenceEngine::details::caseless_map<std::string, IShapeInferImpl::Ptr> list;
};
public:
StatusCode getShapeInferTypes(char**& types, unsigned int& size, ResponseDesc* resp) noexcept override;
StatusCode getShapeInferImpl(IShapeInferImpl::Ptr& impl, const char* type, ResponseDesc* resp) noexcept override;
void GetVersion(const InferenceEngine::Version*& versionInfo) const noexcept override {}
void Release() noexcept override {
delete this;
}
void Unload() noexcept override {}
static void AddImpl(const std::string& name, const IShapeInferImpl::Ptr& impl);
private:
static ImplsHolder::Ptr GetImplsHolder();
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,145 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_iextension.h>
#include <shape_infer/ie_ishape_infer_extension.hpp>
#include <description_buffer.hpp>
#include <list>
#include <map>
#include <memory>
#include <string>
#include <vector>
namespace InferenceEngine {
inline std::string GetParamAsString(const char* param, const std::map<std::string, std::string> & params) {
auto it = params.find(param);
if (it == params.end()) {
THROW_IE_EXCEPTION << "No such parameter name '" << param << "'";
}
return (*it).second;
}
inline int GetParamAsInt(const char* param, const std::map<std::string, std::string> & params) {
std::string val = GetParamAsString(param, params);
try {
return std::stoi(val);
} catch (...) {
THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer. Value "
<< val << " cannot be casted to int.";
}
}
inline bool GetParamAsBool(const char* param, const std::map<std::string, std::string> & params) {
std::string val = GetParamAsString(param, params);
std::string loweredCaseValue;
std::transform(val.begin(), val.end(), std::back_inserter(loweredCaseValue), [](char value) {
return static_cast<char>(std::tolower(value));
});
bool result = false;
if (!(std::istringstream(loweredCaseValue) >> std::boolalpha >> result)) {
// attempting parse using non alpha bool
return (GetParamAsInt(param, params) != 0);
}
return result;
}
std::string GetParamAsString(const char* param, const char* def,
const std::map<std::string, std::string> & params) {
auto it = params.find(param);
if (it == params.end() || it->second.empty()) {
return def;
}
return (*it).second;
}
int GetParamAsInt(const char* param, int def,
const std::map<std::string, std::string> & params) {
std::string val = GetParamAsString(param, std::to_string(def).c_str(), params);
try {
return std::stoi(val);
} catch (...) {
THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer. Value "
<< val << " cannot be casted to int.";
}
}
bool GetParamAsBool(const char* param, bool def,
const std::map<std::string, std::string> & params) {
std::string val = GetParamAsString(param, std::to_string(def).c_str(), params);
std::string loweredCaseValue;
std::transform(val.begin(), val.end(), std::back_inserter(loweredCaseValue), [](char value) {
return static_cast<char>(std::tolower(value));
});
bool result = false;
if (!(std::istringstream(loweredCaseValue) >> std::boolalpha >> result)) {
// attempting parse using non alpha bool
return (GetParamAsInt(param, def, params) != 0);
}
return result;
}
inline unsigned int GetParamAsUInt(const char* param, const std::map<std::string, std::string> & params) {
std::string val = GetParamAsString(param, params);
std::string message = "Cannot parse parameter " + std::string(param) + " from IR for layer" +
". Value " + val + " cannot be casted to unsigned int.";
try {
int value = std::stoi(val);
if (value < 0) {
THROW_IE_EXCEPTION << message;
}
return static_cast<unsigned int>(value);
} catch (...) {
THROW_IE_EXCEPTION << message;
}
}
namespace ShapeInfer {
/**
* @brief Base class for all built-in shape infer implementations. Contains common logic with validators and errors
* handling
*/
class BuiltInShapeInferImpl : public IShapeInferImpl {
public:
explicit BuiltInShapeInferImpl(const std::string& type): _type(type) { }
virtual void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs,
const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) = 0;
StatusCode inferShapes(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes,
ResponseDesc* resp) noexcept override {
inShapes.clear();
for (const auto& blob : inBlobs) {
inShapes.push_back(blob->getTensorDesc().getDims());
}
outShapes.clear();
try {
inferShapesImpl(inBlobs, params, blobs, outShapes);
return OK;
} catch (const std::exception& ex) {
return InferenceEngine::DescriptionBuffer(GENERAL_ERROR, resp) << ex.what();
} catch (...) {
return InferenceEngine::DescriptionBuffer(UNEXPECTED) << "Unknown error";
}
}
protected:
std::string _type;
std::vector<SizeVector> inShapes;
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,86 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <vector>
#include <map>
#include "details/ie_irelease.hpp"
#include "ie_version.hpp"
#include "ie_common.h"
#include "ie_blob.h"
namespace InferenceEngine {
/**
* @class IShapeInferImpl
* @brief This class provides interface for the implementation with the custom execution code
*/
class IShapeInferImpl {
public:
/**
* @brief A shared pointer to a IShapeInferImpl object
*/
using Ptr = std::shared_ptr<IShapeInferImpl>;
virtual ~IShapeInferImpl() = default;
/**
* @brief check that reshape can be applied, that parameters and shapes are valid
*/
virtual StatusCode inferShapes(const std::vector<Blob::CPtr>& /*inBlobs*/,
const std::map<std::string, std::string>& /*params*/,
const std::map<std::string, Blob::Ptr>& /*blobs*/,
std::vector<SizeVector>& /*outShapes*/, ResponseDesc* /*resp*/) noexcept {
return NOT_IMPLEMENTED;
} // For backward-compatibility
};
/**
* @class IShapeInferExtension
* @brief This class is the reader extension interface to provide implementation for shape propagation
*/
class IShapeInferExtension : public InferenceEngine::details::IRelease {
public:
/**
* @brief Gets extension version information and stores in versionInfo
* @param versionInfo Pointer to version info, will be set by plugin
*/
virtual void GetVersion(const InferenceEngine::Version*& versionInfo) const noexcept = 0;
/**
* @brief Cleans resources up
*/
virtual void Unload() noexcept = 0;
/**
* The method will be removed in 2021.1 release.
* @brief Fills passed array with types of layers which shape infer implementations are included in the extension
*
* @param types Array to store the layer types
* @param size Size of the layer types array
* @param resp Response descriptor
* @return Status code
*/
virtual StatusCode getShapeInferTypes(char**& types, unsigned int& size, ResponseDesc* resp) noexcept = 0;
/**
* @brief Gets shape propagation implementation for the given string-type of CNNLayer
*
* @param impl the vector with implementations which is ordered by priority
* @param type A type of CNNLayer
* @param resp response descriptor
* @return status code
*/
virtual StatusCode getShapeInferImpl(IShapeInferImpl::Ptr& impl, const char* type, ResponseDesc* resp) noexcept = 0;
};
/**
* This API will be removed in 2021.1 release.
* @brief A shared pointer to a IShapeInferExtension interface
*/
using IShapeInferExtensionPtr = std::shared_ptr<IShapeInferExtension>;
} // namespace InferenceEngine

View File

@ -1,39 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
* @brief Implementation of Shape inference for Proposal layer
*/
class ProposalShapeProp : public BuiltInShapeInferImpl {
public:
explicit ProposalShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
size_t post_nms_topn = static_cast<size_t>(GetParamAsInt("post_nms_topn", params));
auto num_outputs = GetParamAsUInt("num_outputs", params);
if (num_outputs > 2)
THROW_IE_EXCEPTION << "Incorrect value num_outputs: " << num_outputs;
outShapes.push_back({inShapes[0][0] * post_nms_topn, 5});
if (num_outputs == 2)
outShapes.push_back({inShapes[0][0] * post_nms_topn});
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,39 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
* @brief Implementation of Shape inference for DetectionOutput layer
*/
template <int S>
class RNNBaseCellShapeProp : public BuiltInShapeInferImpl {
public:
explicit RNNBaseCellShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
auto state_dims = inShapes[1];
for (int i = 0; i < S; i++)
outShapes.push_back(state_dims);
}
};
using RNNCellShapeProp = RNNBaseCellShapeProp<1>;
using GRUCellShapeProp = RNNBaseCellShapeProp<1>;
using LSTMCellShapeProp = RNNBaseCellShapeProp<2>;
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,33 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
* @brief Implementation of Shape inference for SimplerNMS layer
*/
class SimplerNMSShapeProp : public BuiltInShapeInferImpl {
public:
explicit SimplerNMSShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
size_t post_nms_topn = static_cast<size_t>(GetParamAsInt("post_nms_topn", params));
outShapes.push_back({post_nms_topn, 5});
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,44 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "ie_built_in_impl.hpp"
#include <map>
#include <memory>
#include <string>
#include <vector>
namespace InferenceEngine {
namespace ShapeInfer {
/**
* @brief Implementation of Shape inference for SparseToDense layer
*/
class SparseToDenseShapeProp : public BuiltInShapeInferImpl {
public:
explicit SparseToDenseShapeProp(const std::string& type) : BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs,
const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs,
std::vector<SizeVector>& outShapes) override {
SizeVector shapes;
if (inBlobs[1]->getTensorDesc().getPrecision() == Precision::I32) {
auto* buffer = inBlobs[1]->cbuffer().as<int*>();
if (buffer != nullptr) {
shapes.assign(buffer, buffer + inBlobs[1]->size());
} else {
THROW_IE_EXCEPTION << "Second input must have allocated data";
}
} else {
THROW_IE_EXCEPTION << "Second input must have I32 precision";
}
outShapes = { shapes };
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,48 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "shape_infer/ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
* @brief Implementation of Shape inference for Unique layer
*/
class UniqueShapeProp : public BuiltInShapeInferImpl {
public:
explicit UniqueShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
bool return_inverse = GetParamAsBool("return_inverse", params);
bool return_counts = GetParamAsBool("return_counts", params);
// compute a number of outputs
size_t num_outputs = 1;
if (return_counts) {
num_outputs++;
}
if (return_inverse) {
num_outputs++;
}
// reshape available outputs
outShapes.resize(num_outputs);
for (size_t i = 0; i < num_outputs; i++) {
outShapes[i].resize(1);
outShapes[i][0] = inShapes[0][0];
}
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -706,13 +706,17 @@ std::shared_ptr<ngraph::Node> V10Parser::XmlDeserializer::createNode(
auto opsetIt = opsets.find(params.version);
// Try to create operation from loaded opsets
static const std::unordered_set<std::string> experimental_detectrons = {"ExperimentalDetectronDetectionOutput",
"ExperimentalDetectronGenerateProposalsSingleImage",
"ExperimentalDetectronPriorGridGenerator",
"ExperimentalDetectronROIFeatureExtractor",
"ExperimentalDetectronTopKROIs"};
static const std::unordered_set<std::string> experimental_ops_added_to_opset = {
"ExperimentalDetectronDetectionOutput",
"ExperimentalDetectronGenerateProposalsSingleImage",
"ExperimentalDetectronPriorGridGenerator",
"ExperimentalDetectronROIFeatureExtractor",
"ExperimentalDetectronTopKROIs",
"GRUCell",
"RNNCell",
"Proposal"};
if (experimental_detectrons.count(params.type)) {
if (experimental_ops_added_to_opset.count(params.type) && (params.version == "experimental" || params.version == "extension")) {
opsetIt = opsets.find("opset6");
}

View File

@ -4,6 +4,7 @@
#include <string>
#include <generic_ie.hpp>
#include "ngraph/opsets/opset6.hpp"
#include "ngraph_reader_tests.hpp"
TEST_F(NGraphReaderTests, ReadProposalNetwork) {
std::string model_v10 = R"V0G0N(
@ -308,6 +309,8 @@ TEST_F(NGraphReaderTests, ReadProposalNetwork_2) {
}
TEST_F(NGraphReaderTests, ReadExtensionProposalNetwork) {
// the Proposal with 2 inputs was initially marked as "extension" operation but later was added to opset
// the test checks that IR reader properly instantiate the "extension" Proposal as "opset6" Proposal
std::string model_v10 = R"V0G0N(
<net name="Network" version="10">
<layers>
@ -334,9 +337,9 @@ TEST_F(NGraphReaderTests, ReadExtensionProposalNetwork) {
</output>
</layer>
<layer id="2" name="in3" type="Const" version="opset1">
<data element_type="i64" offset="0" shape="3" size="24"/>
<data element_type="f32" offset="0" shape="3" size="12"/>
<output>
<port id="0" precision="I64">
<port id="0" precision="FP32">
<dim>3</dim>
</port>
</output>
@ -391,15 +394,15 @@ TEST_F(NGraphReaderTests, ReadExtensionProposalNetwork) {
Core ie;
Blob::Ptr weights;
weights = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {24}, Layout::C));
weights = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {12}, Layout::C));
weights->allocate();
CommonTestUtils::fill_data(weights->buffer().as<float *>(), weights->size() / sizeof(float));
auto func = ie.ReadNetwork(model_v10, weights).getFunction();
for (auto op : func->get_ordered_ops()) {
if (op->get_friendly_name() == "proposal" && op->get_type_info() == ngraph::op::GenericIE::type_info) {
if (op->get_friendly_name() == "proposal" && op->get_type_info() == ngraph::opset6::Proposal::type_info) {
return;
}
}
FAIL() << "Custom proposal layer is not a Generic operation!";
}
FAIL() << "Custom proposal layer is not an opset6 operation.";
}

View File

@ -472,66 +472,111 @@ TEST_F(NGraphReshapeTests, TestInterpParameters) {
}
TEST_F(NGraphReshapeTests, ReshapeWithDefaultGenericOps) {
// the RNNCEll was initially marked as "experimental" operation but later was added to opset
// the test checks that IR reader properly instantiate the "experimental" RNNCell as "opset6" RNNCell
std::string model = R"V0G0N(
<net name="Activation" version="10">
<layers>
<layer name="in1" type="Parameter" id="0" version="opset1">
<data shape="1,256" element_type="f32"/>
<data shape="1,16" element_type="f32"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>256</dim>
<dim>16</dim>
</port>
</output>
</layer>
<layer id="1" name="77/GRUCell" type="GRUCell" version="experimental">
<data hidden_size="256" linear_before_reset="1"/>
<layer name="in2" type="Parameter" id="1" version="opset1">
<data shape="1,128" element_type="f32"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>128</dim>
</port>
</output>
</layer>
<layer name="in3" type="Parameter" id="2" version="opset1">
<data shape="128,16" element_type="f32"/>
<output>
<port id="0" precision="FP32">
<dim>128</dim>
<dim>16</dim>
</port>
</output>
</layer>
<layer name="in4" type="Parameter" id="3" version="opset1">
<data shape="128,128" element_type="f32"/>
<output>
<port id="0" precision="FP32">
<dim>128</dim>
<dim>128</dim>
</port>
</output>
</layer>
<layer name="in5" type="Parameter" id="4" version="opset1">
<data shape="128" element_type="f32"/>
<output>
<port id="0" precision="FP32">
<dim>128</dim>
</port>
</output>
</layer>
<layer id="5" name="77/RNNCell" type="RNNCell" version="experimental">
<data hidden_size="128" linear_before_reset="1"/>
<input>
<port id="0">
<dim>1</dim>
<dim>256</dim>
<dim>16</dim>
</port>
<port id="1">
<dim>1</dim>
<dim>256</dim>
<dim>128</dim>
</port>
<port id="2">
<dim>128</dim>
<dim>16</dim>
</port>
<port id="3">
<dim>128</dim>
<dim>128</dim>
</port>
<port id="4">
<dim>128</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<port id="5" precision="FP32">
<dim>1</dim>
<dim>256</dim>
<dim>128</dim>
</port>
</output>
<blobs>
<weights offset="0" precision="FP32" size="1572864"/>
<biases offset="1572864" precision="FP32" size="4096"/>
</blobs>
</layer>
<layer name="output" type="Result" id="2" version="opset1">
<layer name="output" type="Result" id="6" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>256</dim>
<dim>128</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
<edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
<edge from-layer="1" from-port="2" to-layer="2" to-port="0"/>
<edge from-layer="0" from-port="0" to-layer="5" to-port="0"/>
<edge from-layer="1" from-port="0" to-layer="5" to-port="1"/>
<edge from-layer="2" from-port="0" to-layer="5" to-port="2"/>
<edge from-layer="3" from-port="0" to-layer="5" to-port="3"/>
<edge from-layer="4" from-port="0" to-layer="5" to-port="4"/>
<edge from-layer="5" from-port="5" to-layer="6" to-port="0"/>
</edges>
</net>
)V0G0N";
InferenceEngine::Core ie;
Blob::Ptr weights;
weights = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {1576960}, Layout::C));
weights->allocate();
fill_data(weights->buffer(), weights->size() / sizeof(float));
auto network = ie.ReadNetwork(model, weights);
InferenceEngine::ICNNNetwork::InputShapes newShapes;
newShapes["in1"] = {2, 256};
newShapes["in1"] = {2, 16};
newShapes["in2"] = {2, 128};
ASSERT_NO_THROW(network.reshape(newShapes));
}