ExperimentalDetectron* shape infer old-style (#1962)

* ExperimentalDetectron* shape infer old-style

* nGraph reshape tests
This commit is contained in:
Evgenya Stepyreva 2020-08-27 11:52:04 +03:00 committed by GitHub
parent ae8be58701
commit 0182b97980
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 701 additions and 2 deletions

View File

@ -129,8 +129,9 @@ void ngraph::op::GenericIE::validate_and_infer_types() {
}
}
// WA: Proposal shape infer has to know number of outputs
if (type == "Proposal" && parameters.find("num_outputs") == parameters.end()) {
// WA: shape infer has to know number of outputs
if ((type == "Proposal" || type == "ExperimentalDetectronROIFeatureExtractor" || type == "ExperimentalDetectronDetectionOutput")
&& parameters.find("num_outputs") == parameters.end()) {
parameters["num_outputs"] = std::to_string(outputs.size());
}

View File

@ -17,6 +17,7 @@
#include "ie_deconv_shape_infer.hpp"
#include "ie_deformable_conv_shape_infer.hpp"
#include "ie_depth_to_space_shape_infer.hpp"
#include "ie_detectionoutput_onnx_shape_infer.hpp"
#include "ie_detection_output_shape_infer.hpp"
#include "ie_eltwise_shape_infer.hpp"
#include "ie_equal_shape_infer.hpp"
@ -35,6 +36,8 @@
#include "ie_pool_shape_infer.hpp"
#include "ie_priorbox_clustered_shape_infer.hpp"
#include "ie_priorbox_shape_infer.hpp"
#include "ie_priorgridgenerator_onnx_shape_infer.hpp"
#include "ie_proposal_onnx_shape_infer.hpp"
#include "ie_proposal_shape_infer.hpp"
#include "ie_psroi_pooling_shape_infer.hpp"
#include "ie_quantize_shape_infer.hpp"
@ -48,6 +51,7 @@
#include "ie_rnn_cell_shape_infer.hpp"
#include "ie_rnn_shape_infer.hpp"
#include "ie_roi_pooling_shape_infer.hpp"
#include "ie_roifeatureextractor_onnx_shape_infer.hpp"
#include "ie_scatter_shape_infer.hpp"
#include "ie_select_shape_infer.hpp"
#include "ie_shape_shape_infer.hpp"
@ -65,6 +69,7 @@
#include "ie_tensor_iterator_shape_infer.hpp"
#include "ie_tile_shape_infer.hpp"
#include "ie_topk_shape_infer.hpp"
#include "ie_topkrois_onnx_shape_infer.hpp"
#include "ie_unique_shape_infer.hpp"
#include "ie_unsqueeze_shape_infer.hpp"
#include "ie_upsampling_shape_infer.hpp"
@ -157,6 +162,11 @@ REG_SHAPE_INFER_FOR_TYPE(ReshapeShapeProp, Reshape);
REG_SHAPE_INFER_FOR_TYPE(DetectionOutputShapeProp, DetectionOutput);
REG_SHAPE_INFER_FOR_TYPE(PriorBoxClusteredShapeProp, PriorBoxClustered);
REG_SHAPE_INFER_FOR_TYPE(PriorBoxShapeProp, PriorBox);
REG_SHAPE_INFER_FOR_TYPE(ExperimentalDetectronDetectionOutputShapeProp, ExperimentalDetectronDetectionOutput);
REG_SHAPE_INFER_FOR_TYPE(ExperimentalDetectronPriorGridGeneratorShapeProp, ExperimentalDetectronPriorGridGenerator);
REG_SHAPE_INFER_FOR_TYPE(ExperimentalDetectronGenerateProposalsSingleImageShapeProp, ExperimentalDetectronGenerateProposalsSingleImage);
REG_SHAPE_INFER_FOR_TYPE(ExperimentalDetectronROIFeatureExtractorShapeProp, ExperimentalDetectronROIFeatureExtractor);
REG_SHAPE_INFER_FOR_TYPE(ExperimentalDetectronTopKROIsShapeProp, ExperimentalDetectronTopKROIs);
REG_SHAPE_INFER_FOR_TYPE(RoiPoolingShapeProp, ROIPooling);
REG_SHAPE_INFER_FOR_TYPE(PSRoiPoolingShapeProp, PSROIPooling);
REG_SHAPE_INFER_FOR_TYPE(UpsamplingShapeProp, Upsampling);

View File

@ -0,0 +1,51 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for ExperimentalDetectronDetectionOutput layer
*/
class ExperimentalDetectronDetectionOutputShapeProp : public BuiltInShapeInferImpl {
protected:
const int ROIS = 0;
const int FEATMAPS = 1;
public:
explicit ExperimentalDetectronDetectionOutputShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer cnnLayer(lp);
cnnLayer.params = params;
cnnLayer.type = _type;
validate(&cnnLayer, inBlobs, params, blobs);
auto rois_num = cnnLayer.GetParamAsUInt("max_detections_per_image");
outShapes.push_back({rois_num, 4});
auto num_outputs = cnnLayer.GetParamAsUInt("num_outputs");
if (num_outputs > 3) THROW_IE_EXCEPTION << "Incorrect value num_outputs: " << num_outputs;
if (num_outputs >= 2) {
outShapes.push_back({rois_num});
}
if (num_outputs == 3) {
outShapes.push_back({rois_num});
}
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -0,0 +1,57 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_layers.h>
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for ExperimentalDetectronPriorGridGenerator layer
*/
class ExperimentalDetectronPriorGridGeneratorShapeProp : public BuiltInShapeInferImpl {
protected:
const int PRIORS = 0;
const int FEATMAP = 1;
const int H = 2;
const int W = 3;
public:
explicit ExperimentalDetectronPriorGridGeneratorShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer cnnLayer(lp);
cnnLayer.params = params;
cnnLayer.type = _type;
validate(&cnnLayer, inBlobs, params, blobs);
const auto& priors_shape = inShapes.at(PRIORS);
const auto priors_num = priors_shape.at(0);
const auto& featmap_shape = inShapes.at(FEATMAP);
const auto grid_height = featmap_shape.at(H);
const auto grid_width = featmap_shape.at(W);
const bool flatten = cnnLayer.GetParamAsBool("flatten", true);
if (flatten) {
outShapes.push_back({grid_height * grid_width * priors_num, 4});
} else {
outShapes.push_back({grid_height, grid_width, priors_num, 4});
}
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -0,0 +1,40 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for ExperimentalDetectronGenerateProposalsSingleImage layer
*/
class ExperimentalDetectronGenerateProposalsSingleImageShapeProp : public BuiltInShapeInferImpl {
public:
explicit ExperimentalDetectronGenerateProposalsSingleImageShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer cnnLayer(lp);
cnnLayer.params = params;
cnnLayer.type = _type;
validate(&cnnLayer, inBlobs, params, blobs);
size_t post_nms_count = static_cast<size_t>(cnnLayer.GetParamAsInt("post_nms_count"));
outShapes.push_back({post_nms_count, 4});
outShapes.push_back({post_nms_count, });
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -0,0 +1,50 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for ExperimentalDetectronROIFeatureExtractor layer
*/
class ExperimentalDetectronROIFeatureExtractorShapeProp : public BuiltInShapeInferImpl {
protected:
const int ROIS = 0;
const int FEATMAPS = 1;
public:
explicit ExperimentalDetectronROIFeatureExtractorShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer cnnLayer(lp);
cnnLayer.params = params;
cnnLayer.type = _type;
validate(&cnnLayer, inBlobs, params, blobs);
size_t rois_num = inShapes.at(ROIS).at(0);
size_t channels_num = inShapes.at(FEATMAPS).at(1);
size_t output_size = static_cast<size_t>(cnnLayer.GetParamAsInt("output_size"));
outShapes.push_back({rois_num, channels_num, output_size, output_size});
auto num_outputs = cnnLayer.GetParamAsUInt("num_outputs");
if (num_outputs > 2) THROW_IE_EXCEPTION << "Incorrect value num_outputs: " << num_outputs;
if (num_outputs == 2) {
outShapes.push_back({rois_num, 4});
}
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -0,0 +1,41 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_layers.h>
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for ExperimentalDetectronTopKROIs layer
*/
class ExperimentalDetectronTopKROIsShapeProp : public BuiltInShapeInferImpl {
public:
explicit ExperimentalDetectronTopKROIsShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer cnnLayer(lp);
cnnLayer.params = params;
cnnLayer.type = _type;
validate(&cnnLayer, inBlobs, params, blobs);
const bool max_rois = cnnLayer.GetParamAsInt("max_rois");
outShapes.push_back({max_rois, 4});
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -535,3 +535,452 @@ TEST_F(NGraphReshapeTests, ReshapeWithDefaultGenericOps) {
ASSERT_NO_THROW(network.reshape(newShapes));
}
TEST_F(NGraphReshapeTests, ReshapeEDDetectionOutput) {
std::string model = R"V0G0N(
<net name="ExperimentalDetectronDetectionOutput" version="10">
<layers>
<layer name="in0" type="Parameter" id="0" version="opset1">
<data shape="1000,4" element_type="f32"/>
<output>
<port id="0" precision="FP32">
<dim>1000</dim>
<dim>4</dim>
</port>
</output>
</layer>
<layer name="in1" type="Parameter" id="1" version="opset1">
<data shape="1000,324" element_type="f32"/>
<output>
<port id="0" precision="FP32">
<dim>1000</dim>
<dim>324</dim>
</port>
</output>
</layer>
<layer name="in2" type="Parameter" id="2" version="opset1">
<data shape="1000,324" element_type="f32"/>
<output>
<port id="0" precision="FP32">
<dim>1000</dim>
<dim>81</dim>
</port>
</output>
</layer>
<layer name="in3" type="Parameter" id="3" version="opset1">
<data shape="1,3" element_type="f32"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>3</dim>
</port>
</output>
</layer>
<layer id="4" name="DO" type="ExperimentalDetectronDetectionOutput" version="experimental">
<data class_agnostic_box_regression="0" deltas_weights="10.0,10.0,5.0,5.0" max_delta_log_wh="4.135166645050049" max_detections_per_image="100" nms_threshold="0.5" num_classes="81" post_nms_count="2000" score_threshold="0.05000000074505806"/>
<input>
<port id="0">
<dim>1000</dim>
<dim>4</dim>
</port>
<port id="1">
<dim>1000</dim>
<dim>324</dim>
</port>
<port id="2">
<dim>1000</dim>
<dim>81</dim>
</port>
<port id="3">
<dim>1</dim>
<dim>3</dim>
</port>
</input>
<output>
<port id="4" precision="FP32">
<dim>100</dim>
<dim>4</dim>
</port>
<port id="5" precision="I32">
<dim>100</dim>
</port>
<port id="6" precision="FP32">
<dim>100</dim>
</port>
</output>
</layer>
<layer name="out_0" type="Result" id="5" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>100</dim>
<dim>4</dim>
</port>
</input>
</layer>
<layer name="out_1" type="Result" id="6" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>100</dim>
</port>
</input>
</layer>
<layer name="out_2" type="Result" id="7" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>100</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="4" to-port="0"/>
<edge from-layer="1" from-port="0" to-layer="4" to-port="1"/>
<edge from-layer="2" from-port="0" to-layer="4" to-port="2"/>
<edge from-layer="3" from-port="0" to-layer="4" to-port="3"/>
<edge from-layer="4" from-port="4" to-layer="5" to-port="0"/>
<edge from-layer="4" from-port="5" to-layer="6" to-port="0"/>
<edge from-layer="4" from-port="6" to-layer="7" to-port="0"/>
</edges>
</net>
)V0G0N";
InferenceEngine::Core ie;
Blob::Ptr weights;
auto network = ie.ReadNetwork(model, weights);
InferenceEngine::ICNNNetwork::InputShapes newShapes;
newShapes["in0"] = {2000, 4};
newShapes["in1"] = {2000, 324};
newShapes["in2"] = {2000, 81};
ASSERT_NO_THROW(network.reshape(newShapes));
}
TEST_F(NGraphReshapeTests, ReshapeEDPriorGridGenerator) {
std::string model = R"V0G0N(
<net name="PriorGridGenerator" version="10">
<layers>
<layer name="in0" type="Parameter" id="0" version="opset1">
<data shape="3,4" element_type="f32"/>
<output>
<port id="0" precision="FP32">
<dim>3</dim>
<dim>4</dim>
</port>
</output>
</layer>
<layer name="in1" type="Parameter" id="1" version="opset1">
<data shape="1,256,200,336" element_type="f32"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>256</dim>
<dim>200</dim>
<dim>336</dim>
</port>
</output>
</layer>
<layer name="in2" type="Parameter" id="2" version="opset1">
<data shape="1,3,800,1344" element_type="f32"/>
<output>
<port id="0" precision="FP32">
<dim>1000</dim>
<dim>81</dim>
</port>
</output>
</layer>
<layer id="3" name="1117" type="ExperimentalDetectronPriorGridGenerator" version="experimental">
<data flatten="1" h="0" stride_x="4.0" stride_y="4.0" w="0"/>
<input>
<port id="0">
<dim>3</dim>
<dim>4</dim>
</port>
<port id="1">
<dim>1</dim>
<dim>256</dim>
<dim>200</dim>
<dim>336</dim>
</port>
<port id="2">
<dim>1</dim>
<dim>3</dim>
<dim>800</dim>
<dim>1344</dim>
</port>
</input>
<output>
<port id="3" precision="FP32">
<dim>201600</dim>
<dim>4</dim>
</port>
</output>
</layer>
<layer name="out_0" type="Result" id="4" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>201600</dim>
<dim>4</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="3" to-port="0"/>
<edge from-layer="1" from-port="0" to-layer="3" to-port="1"/>
<edge from-layer="2" from-port="0" to-layer="3" to-port="2"/>
<edge from-layer="3" from-port="3" to-layer="4" to-port="0"/>
</edges>
</net>
)V0G0N";
InferenceEngine::Core ie;
Blob::Ptr weights;
auto network = ie.ReadNetwork(model, weights);
InferenceEngine::ICNNNetwork::InputShapes newShapes;
newShapes["in1"] = {2, 256, 200, 336};
newShapes["in2"] = {2, 3, 800, 1344};
ASSERT_NO_THROW(network.reshape(newShapes));
}
TEST_F(NGraphReshapeTests, ReshapeEDGenerateProposalsSingleImage) {
std::string model = R"V0G0N(
<net name="GenerateProposalsSingleImage" version="10">
<layers>
<layer name="in0" type="Parameter" id="0" version="opset1">
<data shape="3" element_type="f32"/>
<output>
<port id="0" precision="FP32">
<dim>3</dim>
</port>
</output>
</layer>
<layer name="in1" type="Parameter" id="1" version="opset1">
<data shape="1201600000,4" element_type="f32"/>
<output>
<port id="0" precision="FP32">
<dim>201600</dim>
<dim>4</dim>
</port>
</output>
</layer>
<layer name="in2" type="Parameter" id="2" version="opset1">
<data shape="12,200,336" element_type="f32"/>
<output>
<port id="0" precision="FP32">
<dim>12</dim>
<dim>200</dim>
<dim>336</dim>
</port>
</output>
</layer>
<layer name="in3" type="Parameter" id="3" version="opset1">
<data shape="1,3" element_type="f32"/>
<output>
<port id="0" precision="FP32">
<dim>3</dim>
<dim>200</dim>
<dim>336</dim>
</port>
</output>
</layer>
<layer id="4" name="1133" type="ExperimentalDetectronGenerateProposalsSingleImage" version="experimental">
<data min_size="0.0" nms_threshold="0.699999988079071" post_nms_count="1000" pre_nms_count="1000"/>
<input>
<port id="0">
<dim>3</dim>
</port>
<port id="1">
<dim>201600</dim>
<dim>4</dim>
</port>
<port id="2">
<dim>12</dim>
<dim>200</dim>
<dim>336</dim>
</port>
<port id="3">
<dim>3</dim>
<dim>200</dim>
<dim>336</dim>
</port>
</input>
<output>
<port id="4" precision="FP32">
<dim>1000</dim>
<dim>4</dim>
</port>
<port id="5" precision="FP32">
<dim>1000</dim>
</port>
</output>
</layer>
<layer name="out_0" type="Result" id="5" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1000</dim>
<dim>4</dim>
</port>
</input>
</layer>
<layer name="out_1" type="Result" id="6" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1000</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="4" to-port="0"/>
<edge from-layer="1" from-port="0" to-layer="4" to-port="1"/>
<edge from-layer="2" from-port="0" to-layer="4" to-port="2"/>
<edge from-layer="3" from-port="0" to-layer="4" to-port="3"/>
<edge from-layer="4" from-port="4" to-layer="5" to-port="0"/>
<edge from-layer="4" from-port="5" to-layer="6" to-port="0"/>
</edges>
</net>
)V0G0N";
InferenceEngine::Core ie;
Blob::Ptr weights;
auto network = ie.ReadNetwork(model, weights);
InferenceEngine::ICNNNetwork::InputShapes newShapes;
newShapes["in2"] = {12, 200, 300};
newShapes["in3"] = {2, 200, 300};
ASSERT_NO_THROW(network.reshape(newShapes));
}
TEST_F(NGraphReshapeTests, ReshapeEDROIFeatureExtractor) {
std::string model = R"V0G0N(
<net name="ExperimentalDetectronROIFeatureExtractor" version="10">
<layers>
<layer name="in0" type="Parameter" id="0" version="opset1">
<data shape="1000,4" element_type="f32"/>
<output>
<port id="0" precision="FP32">
<dim>1000</dim>
<dim>4</dim>
</port>
</output>
</layer>
<layer name="in1" type="Parameter" id="1" version="opset1">
<data shape="1,256,200,336" element_type="f32"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>256</dim>
<dim>200</dim>
<dim>336</dim>
</port>
</output>
</layer>
<layer id="2" name="1190" type="ExperimentalDetectronROIFeatureExtractor" version="experimental">
<data aligned="0" distribute_rois_between_levels="1" output_size="7" preserve_rois_order="1" pyramid_scales="4" sampling_ratio="2"/>
<input>
<port id="0">
<dim>1000</dim>
<dim>4</dim>
</port>
<port id="1">
<dim>1</dim>
<dim>256</dim>
<dim>200</dim>
<dim>336</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1000</dim>
<dim>256</dim>
<dim>7</dim>
<dim>7</dim>
</port>
</output>
</layer>
<layer name="out_0" type="Result" id="3" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1000</dim>
<dim>256</dim>
<dim>7</dim>
<dim>7</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="2" to-port="0"/>
<edge from-layer="1" from-port="0" to-layer="2" to-port="1"/>
<edge from-layer="2" from-port="2" to-layer="3" to-port="0"/>
</edges>
</net>
)V0G0N";
InferenceEngine::Core ie;
Blob::Ptr weights;
auto network = ie.ReadNetwork(model, weights);
InferenceEngine::ICNNNetwork::InputShapes newShapes;
newShapes["in0"] = {1, 256, 300, 400};
newShapes["in1"] = {1000, 256, 7, 7};
ASSERT_NO_THROW(network.reshape(newShapes));
}
TEST_F(NGraphReshapeTests, ReshapeEDTopKROIs) {
std::string model = R"V0G0N(
<net name="ExperimentalDetectronTopKROIs" version="10">
<layers>
<layer name="in0" type="Parameter" id="0" version="opset1">
<data shape="5000,4" element_type="f32"/>
<output>
<port id="0" precision="FP32">
<dim>5000</dim>
<dim>4</dim>
</port>
</output>
</layer>
<layer name="in1" type="Parameter" id="1" version="opset1">
<data shape="5000" element_type="f32"/>
<output>
<port id="0" precision="FP32">
<dim>5000</dim>
</port>
</output>
</layer>
<layer id="2" name="1189" type="ExperimentalDetectronTopKROIs" version="experimental">
<data max_rois="1000"/>
<input>
<port id="0">
<dim>5000</dim>
<dim>4</dim>
</port>
<port id="1">
<dim>5000</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1000</dim>
<dim>4</dim>
</port>
</output>
</layer>
<layer name="out_0" type="Result" id="3" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1000</dim>
<dim>4</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="2" to-port="0"/>
<edge from-layer="1" from-port="0" to-layer="2" to-port="1"/>
<edge from-layer="2" from-port="2" to-layer="3" to-port="0"/>
</edges>
</net>
)V0G0N";
InferenceEngine::Core ie;
Blob::Ptr weights;
auto network = ie.ReadNetwork(model, weights);
InferenceEngine::ICNNNetwork::InputShapes newShapes;
newShapes["in0"] = {10000, 4};
newShapes["in1"] = {10000};
ASSERT_NO_THROW(network.reshape(newShapes));
}