Reshape v7: remove (#1379)

* Removed shape inference fr IR v7 and older

* Disabled dynamic batch tests which require reshape

* Fixes tests 2

* Disabled MKLDNN tests with convolution reshape

* Fixed GPU tests

* Disable VPU tests with batch size > 1 for old IRs

* Removed most of shape infer functions for old representation

* Removed most of CNNLayer validators

* Fixed validators and keep only parseParams

* Removed tests on invalid IR v7

* Disabled more VPU tests

* Removed Backetize validator

* Disable one more Myriad tests case where reshape for old IR is needed

* Removed useless reshape

* Need to replace GRUCell with Unique

* Moved shape infer functions for experimental layers to Core IE

* Fixed shape inference functions not to depend on legacy

* Added missed SparseToDense

* Added descriptive error message

* Fixed comments
This commit is contained in:
Ilya Lavrenov 2020-09-15 15:08:17 +03:00 committed by GitHub
parent 9e8b42ff95
commit 9ca5fbaf02
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
163 changed files with 532 additions and 9011 deletions

View File

@ -29,6 +29,7 @@ set(IE_BASE_SOURCE_FILES
${CMAKE_CURRENT_SOURCE_DIR}/ie_parameter.cpp
${CMAKE_CURRENT_SOURCE_DIR}/ie_rtti.cpp
${CMAKE_CURRENT_SOURCE_DIR}/precision_utils.cpp
${CMAKE_CURRENT_SOURCE_DIR}/shape_infer/ie_built_in_holder.cpp
${CMAKE_CURRENT_SOURCE_DIR}/network_serializer.cpp
${CMAKE_CURRENT_SOURCE_DIR}/network_serializer.hpp
${CMAKE_CURRENT_SOURCE_DIR}/system_allocator.cpp
@ -123,6 +124,7 @@ add_library(${TARGET_NAME}_common_obj OBJECT
target_compile_definitions(${TARGET_NAME}_common_obj PRIVATE IMPLEMENT_INFERENCE_ENGINE_API)
target_include_directories(${TARGET_NAME}_common_obj PRIVATE
"${CMAKE_CURRENT_SOURCE_DIR}"
$<TARGET_PROPERTY:${TARGET_NAME}_transformations,INTERFACE_INCLUDE_DIRECTORIES>
$<TARGET_PROPERTY:${TARGET_NAME}_plugin_api,INTERFACE_INCLUDE_DIRECTORIES>)

View File

@ -30,7 +30,7 @@
#include "ie_itt.hpp"
#include "network_serializer.hpp"
#include "generic_ie.hpp"
#include <legacy/shape_infer/built-in/ie_built_in_holder.hpp>
#include "shape_infer/ie_built_in_holder.hpp"
using namespace std;
using namespace InferenceEngine;

View File

@ -31,12 +31,6 @@
#include <legacy/cnn_network_impl.hpp>
namespace InferenceEngine {
namespace ShapeInfer {
class Reshaper;
using ReshaperPtr = std::shared_ptr<Reshaper>;
} // namespace ShapeInfer
namespace details {
/**

View File

@ -14,6 +14,7 @@
#include <vector>
#include "blob_factory.hpp"
#include "shape_infer/ie_ishape_infer_extension.hpp"
#include <legacy/ie_ngraph_utils.hpp>
#include "ngraph/util.hpp"
#include "ngraph/graph_util.hpp"

View File

@ -0,0 +1,84 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <algorithm>
#include <memory>
#include <string>
#include "shape_infer/ie_built_in_holder.hpp"
#include "shape_infer/ie_detectionoutput_onnx_shape_infer.hpp"
#include "shape_infer/ie_priorgridgenerator_onnx_shape_infer.hpp"
#include "shape_infer/ie_proposal_onnx_shape_infer.hpp"
#include "shape_infer/ie_proposal_shape_infer.hpp"
#include "shape_infer/ie_rnn_cell_shape_infer.hpp"
#include "shape_infer/ie_roifeatureextractor_onnx_shape_infer.hpp"
#include "shape_infer/ie_simpler_nms_shape_infer.hpp"
#include "shape_infer/ie_sparse_to_dense_shape_infer.hpp"
#include "shape_infer/ie_topkrois_onnx_shape_infer.hpp"
#include "shape_infer/ie_unique_shape_infer.hpp"
#include "shape_infer/ie_sparse_to_dense_shape_infer.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
BuiltInShapeInferHolder::ImplsHolder::Ptr BuiltInShapeInferHolder::GetImplsHolder() {
static ImplsHolder::Ptr localHolder;
if (localHolder == nullptr) {
localHolder = std::make_shared<ImplsHolder>();
}
return localHolder;
}
void BuiltInShapeInferHolder::AddImpl(const std::string& name, const IShapeInferImpl::Ptr& impl) {
GetImplsHolder()->list[name] = impl;
}
StatusCode BuiltInShapeInferHolder::getShapeInferTypes(char**& types, unsigned int& size, ResponseDesc* resp) noexcept {
auto& factories = GetImplsHolder()->list;
types = new char*[factories.size()];
size = 0;
for (auto it = factories.begin(); it != factories.end(); it++, size++) {
types[size] = new char[it->first.size() + 1];
std::copy(it->first.begin(), it->first.end(), types[size]);
types[size][it->first.size()] = '\0';
}
return OK;
}
StatusCode BuiltInShapeInferHolder::getShapeInferImpl(IShapeInferImpl::Ptr& impl, const char* type,
ResponseDesc* resp) noexcept {
auto& impls = BuiltInShapeInferHolder::GetImplsHolder()->list;
if (impls.find(type) != impls.end()) {
impl = impls[type];
return OK;
}
impl.reset();
return NOT_FOUND;
}
template <typename Impl>
class ImplRegisterBase {
public:
explicit ImplRegisterBase(const std::string& type) {
BuiltInShapeInferHolder::AddImpl(type, std::make_shared<Impl>(type));
}
};
#define REG_SHAPE_INFER_FOR_TYPE(__prim, __type) \
static ImplRegisterBase<__prim> __bi_reg__##__type(#__type)
REG_SHAPE_INFER_FOR_TYPE(ExperimentalDetectronDetectionOutputShapeProp, ExperimentalDetectronDetectionOutput);
REG_SHAPE_INFER_FOR_TYPE(ExperimentalDetectronPriorGridGeneratorShapeProp, ExperimentalDetectronPriorGridGenerator);
REG_SHAPE_INFER_FOR_TYPE(ExperimentalDetectronGenerateProposalsSingleImageShapeProp, ExperimentalDetectronGenerateProposalsSingleImage);
REG_SHAPE_INFER_FOR_TYPE(ExperimentalDetectronROIFeatureExtractorShapeProp, ExperimentalDetectronROIFeatureExtractor);
REG_SHAPE_INFER_FOR_TYPE(ExperimentalDetectronTopKROIsShapeProp, ExperimentalDetectronTopKROIs);
REG_SHAPE_INFER_FOR_TYPE(SimplerNMSShapeProp, SimplerNMS);
REG_SHAPE_INFER_FOR_TYPE(SparseToDenseShapeProp, SparseToDense);
REG_SHAPE_INFER_FOR_TYPE(ProposalShapeProp, Proposal);
REG_SHAPE_INFER_FOR_TYPE(RNNCellShapeProp, RNNCell);
REG_SHAPE_INFER_FOR_TYPE(GRUCellShapeProp, GRUCell);
REG_SHAPE_INFER_FOR_TYPE(UniqueShapeProp, Unique);
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -13,17 +13,15 @@
#include <description_buffer.hpp>
#include "caseless.hpp"
#include <legacy/ie_ishape_infer_extension.hpp>
#include "shape_infer/ie_ishape_infer_extension.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
IE_SUPPRESS_DEPRECATED_START
/**
*@brief Holder of shape infer implementations for build-in IE layers, that plugins support out-of-the-box
*/
class INFERENCE_ENGINE_API_CLASS(BuiltInShapeInferHolder) : public IShapeInferExtension {
class BuiltInShapeInferHolder : public IShapeInferExtension {
struct ImplsHolder {
using Ptr = std::shared_ptr<ImplsHolder>;
InferenceEngine::details::caseless_map<std::string, IShapeInferImpl::Ptr> list;
@ -48,7 +46,5 @@ private:
static ImplsHolder::Ptr GetImplsHolder();
};
IE_SUPPRESS_DEPRECATED_END
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -0,0 +1,145 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_iextension.h>
#include <shape_infer/ie_ishape_infer_extension.hpp>
#include <description_buffer.hpp>
#include <list>
#include <map>
#include <memory>
#include <string>
#include <vector>
namespace InferenceEngine {
inline std::string GetParamAsString(const char* param, const std::map<std::string, std::string> & params) {
auto it = params.find(param);
if (it == params.end()) {
THROW_IE_EXCEPTION << "No such parameter name '" << param << "'";
}
return (*it).second;
}
inline int GetParamAsInt(const char* param, const std::map<std::string, std::string> & params) {
std::string val = GetParamAsString(param, params);
try {
return std::stoi(val);
} catch (...) {
THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer. Value "
<< val << " cannot be casted to int.";
}
}
inline bool GetParamAsBool(const char* param, const std::map<std::string, std::string> & params) {
std::string val = GetParamAsString(param, params);
std::string loweredCaseValue;
std::transform(val.begin(), val.end(), std::back_inserter(loweredCaseValue), [](char value) {
return static_cast<char>(std::tolower(value));
});
bool result = false;
if (!(std::istringstream(loweredCaseValue) >> std::boolalpha >> result)) {
// attempting parse using non alpha bool
return (GetParamAsInt(param, params) != 0);
}
return result;
}
std::string GetParamAsString(const char* param, const char* def,
const std::map<std::string, std::string> & params) {
auto it = params.find(param);
if (it == params.end() || it->second.empty()) {
return def;
}
return (*it).second;
}
int GetParamAsInt(const char* param, int def,
const std::map<std::string, std::string> & params) {
std::string val = GetParamAsString(param, std::to_string(def).c_str(), params);
try {
return std::stoi(val);
} catch (...) {
THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer. Value "
<< val << " cannot be casted to int.";
}
}
bool GetParamAsBool(const char* param, bool def,
const std::map<std::string, std::string> & params) {
std::string val = GetParamAsString(param, std::to_string(def).c_str(), params);
std::string loweredCaseValue;
std::transform(val.begin(), val.end(), std::back_inserter(loweredCaseValue), [](char value) {
return static_cast<char>(std::tolower(value));
});
bool result = false;
if (!(std::istringstream(loweredCaseValue) >> std::boolalpha >> result)) {
// attempting parse using non alpha bool
return (GetParamAsInt(param, def, params) != 0);
}
return result;
}
inline unsigned int GetParamAsUInt(const char* param, const std::map<std::string, std::string> & params) {
std::string val = GetParamAsString(param, params);
std::string message = "Cannot parse parameter " + std::string(param) + " from IR for layer" +
". Value " + val + " cannot be casted to unsigned int.";
try {
int value = std::stoi(val);
if (value < 0) {
THROW_IE_EXCEPTION << message;
}
return static_cast<unsigned int>(value);
} catch (...) {
THROW_IE_EXCEPTION << message;
}
}
namespace ShapeInfer {
/**
* @brief Base class for all built-in shape infer implementations. Contains common logic with validators and errors
* handling
*/
class BuiltInShapeInferImpl : public IShapeInferImpl {
public:
explicit BuiltInShapeInferImpl(const std::string& type): _type(type) { }
virtual void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs,
const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) = 0;
StatusCode inferShapes(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes,
ResponseDesc* resp) noexcept override {
inShapes.clear();
for (const auto& blob : inBlobs) {
inShapes.push_back(blob->getTensorDesc().getDims());
}
outShapes.clear();
try {
inferShapesImpl(inBlobs, params, blobs, outShapes);
return OK;
} catch (const std::exception& ex) {
return InferenceEngine::DescriptionBuffer(GENERAL_ERROR, resp) << ex.what();
} catch (...) {
return InferenceEngine::DescriptionBuffer(UNEXPECTED) << "Unknown error";
}
}
protected:
std::string _type;
std::vector<SizeVector> inShapes;
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -15,8 +15,8 @@ namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for ExperimentalDetectronDetectionOutput layer
*/
* @brief Implementation of Shape inference for ExperimentalDetectronDetectionOutput layer
*/
class ExperimentalDetectronDetectionOutputShapeProp : public BuiltInShapeInferImpl {
protected:
const int ROIS = 0;
@ -27,17 +27,12 @@ public:
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer cnnLayer(lp);
cnnLayer.params = params;
cnnLayer.type = _type;
validate(&cnnLayer, inBlobs, params, blobs);
auto rois_num = cnnLayer.GetParamAsUInt("max_detections_per_image");
auto rois_num = GetParamAsUInt("max_detections_per_image", params);
outShapes.push_back({rois_num, 4});
auto num_outputs = cnnLayer.GetParamAsUInt("num_outputs");
if (num_outputs > 3) THROW_IE_EXCEPTION << "Incorrect value num_outputs: " << num_outputs;
auto num_outputs = GetParamAsUInt("num_outputs", params);
if (num_outputs > 3)
THROW_IE_EXCEPTION << "Incorrect value num_outputs: " << num_outputs;
if (num_outputs >= 2) {
outShapes.push_back({rois_num});
}

View File

@ -18,8 +18,8 @@ namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for ExperimentalDetectronPriorGridGenerator layer
*/
* @brief Implementation of Shape inference for ExperimentalDetectronPriorGridGenerator layer
*/
class ExperimentalDetectronPriorGridGeneratorShapeProp : public BuiltInShapeInferImpl {
protected:
const int PRIORS = 0;
@ -32,19 +32,13 @@ public:
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer cnnLayer(lp);
cnnLayer.params = params;
cnnLayer.type = _type;
validate(&cnnLayer, inBlobs, params, blobs);
const auto& priors_shape = inShapes.at(PRIORS);
const auto priors_num = priors_shape.at(0);
const auto& featmap_shape = inShapes.at(FEATMAP);
const auto grid_height = featmap_shape.at(H);
const auto grid_width = featmap_shape.at(W);
const bool flatten = cnnLayer.GetParamAsBool("flatten", true);
const bool flatten = GetParamAsBool("flatten", true, params);
if (flatten) {
outShapes.push_back({grid_height * grid_width * priors_num, 4});
} else {

View File

@ -15,21 +15,15 @@ namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for ExperimentalDetectronGenerateProposalsSingleImage layer
*/
* @brief Implementation of Shape inference for ExperimentalDetectronGenerateProposalsSingleImage layer
*/
class ExperimentalDetectronGenerateProposalsSingleImageShapeProp : public BuiltInShapeInferImpl {
public:
explicit ExperimentalDetectronGenerateProposalsSingleImageShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer cnnLayer(lp);
cnnLayer.params = params;
cnnLayer.type = _type;
validate(&cnnLayer, inBlobs, params, blobs);
auto post_nms_count = cnnLayer.GetParamAsUInt("post_nms_count");
auto post_nms_count = GetParamAsUInt("post_nms_count", params);
outShapes.push_back({post_nms_count, 4});
outShapes.push_back({post_nms_count, });
}

View File

@ -15,7 +15,7 @@ namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Proposal layer
* @brief Implementation of Shape inference for Proposal layer
*/
class ProposalShapeProp : public BuiltInShapeInferImpl {
public:
@ -23,14 +23,12 @@ public:
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer cnnLayer(lp);
cnnLayer.params = params;
cnnLayer.type = _type;
validate(&cnnLayer, inBlobs, params, blobs);
size_t post_nms_topn = static_cast<size_t>(cnnLayer.GetParamAsInt("post_nms_topn"));
auto num_outputs = cnnLayer.GetParamAsUInt("num_outputs");
if (num_outputs > 2) THROW_IE_EXCEPTION << "Incorrect value num_outputs: " << num_outputs;
size_t post_nms_topn = static_cast<size_t>(GetParamAsInt("post_nms_topn", params));
auto num_outputs = GetParamAsUInt("num_outputs", params);
if (num_outputs > 2)
THROW_IE_EXCEPTION << "Incorrect value num_outputs: " << num_outputs;
outShapes.push_back({inShapes[0][0] * post_nms_topn, 5});
if (num_outputs == 2)
outShapes.push_back({inShapes[0][0] * post_nms_topn});

View File

@ -16,29 +16,24 @@ namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for DetectionOutput layer
* @brief Implementation of Shape inference for DetectionOutput layer
*/
template <class CELL, int S>
template <int S>
class RNNBaseCellShapeProp : public BuiltInShapeInferImpl {
public:
explicit RNNBaseCellShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CELL cnnLayer(lp);
cnnLayer.params = params;
cnnLayer.type = _type;
validate(&cnnLayer, inBlobs, params, blobs);
auto state_dims = inShapes[1];
for (int i = 0; i < S; i++) outShapes.push_back(state_dims);
for (int i = 0; i < S; i++)
outShapes.push_back(state_dims);
}
};
using RNNCellShapeProp = RNNBaseCellShapeProp<RNNCell, 1>;
using GRUCellShapeProp = RNNBaseCellShapeProp<GRUCell, 1>;
using LSTMCellShapeProp = RNNBaseCellShapeProp<LSTMCell, 2>;
using RNNCellShapeProp = RNNBaseCellShapeProp<1>;
using GRUCellShapeProp = RNNBaseCellShapeProp<1>;
using LSTMCellShapeProp = RNNBaseCellShapeProp<2>;
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -15,8 +15,8 @@ namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for ExperimentalDetectronROIFeatureExtractor layer
*/
* @brief Implementation of Shape inference for ExperimentalDetectronROIFeatureExtractor layer
*/
class ExperimentalDetectronROIFeatureExtractorShapeProp : public BuiltInShapeInferImpl {
protected:
const int ROIS = 0;
@ -27,18 +27,12 @@ public:
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer cnnLayer(lp);
cnnLayer.params = params;
cnnLayer.type = _type;
validate(&cnnLayer, inBlobs, params, blobs);
size_t rois_num = inShapes.at(ROIS).at(0);
size_t channels_num = inShapes.at(FEATMAPS).at(1);
size_t output_size = static_cast<size_t>(cnnLayer.GetParamAsInt("output_size"));
size_t output_size = static_cast<size_t>(GetParamAsInt("output_size", params));
outShapes.push_back({rois_num, channels_num, output_size, output_size});
auto num_outputs = cnnLayer.GetParamAsUInt("num_outputs");
auto num_outputs = GetParamAsUInt("num_outputs", params);
if (num_outputs > 2) THROW_IE_EXCEPTION << "Incorrect value num_outputs: " << num_outputs;
if (num_outputs == 2) {
outShapes.push_back({rois_num, 4});

View File

@ -18,7 +18,7 @@ namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for SimplerNMS layer
* @brief Implementation of Shape inference for SimplerNMS layer
*/
class SimplerNMSShapeProp : public BuiltInShapeInferImpl {
public:
@ -26,13 +26,7 @@ public:
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer cnnLayer(lp);
cnnLayer.params = params;
cnnLayer.type = _type;
validate(&cnnLayer, inBlobs, params, blobs);
size_t post_nms_topn = static_cast<size_t>(cnnLayer.GetParamAsInt("post_nms_topn"));
size_t post_nms_topn = static_cast<size_t>(GetParamAsInt("post_nms_topn", params));
outShapes.push_back({post_nms_topn, 5});
}
};

View File

@ -14,22 +14,16 @@ namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for SparseToDense layer
* @brief Implementation of Shape inference for SparseToDense layer
*/
class SparseToDenseShapeProp : public BuiltInShapeInferImpl {
public:
explicit SparseToDenseShapeProp(const std::string& type) : BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs,
const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs,
std::vector<SizeVector>& outShapes) override {
LayerParams lp{};
SparseToDenseLayer sparse_to_dense_layer(lp);
sparse_to_dense_layer.params = params;
sparse_to_dense_layer.type = _type;
validate(&sparse_to_dense_layer, inBlobs, params, blobs);
const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs,
std::vector<SizeVector>& outShapes) override {
SizeVector shapes;
if (inBlobs[1]->getTensorDesc().getPrecision() == Precision::I32) {
auto* buffer = inBlobs[1]->cbuffer().as<int*>();

View File

@ -4,35 +4,27 @@
#pragma once
#include <ie_layers.h>
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
#include "shape_infer/ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for ExperimentalDetectronTopKROIs layer
*/
* @brief Implementation of Shape inference for ExperimentalDetectronTopKROIs layer
*/
class ExperimentalDetectronTopKROIsShapeProp : public BuiltInShapeInferImpl {
public:
explicit ExperimentalDetectronTopKROIsShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer cnnLayer(lp);
cnnLayer.params = params;
cnnLayer.type = _type;
validate(&cnnLayer, inBlobs, params, blobs);
const auto max_rois = cnnLayer.GetParamAsUInt("max_rois");
const auto max_rois = GetParamAsUInt("max_rois", params);
outShapes.push_back({max_rois, 4});
}
};

View File

@ -9,13 +9,13 @@
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
#include "shape_infer/ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Unique layer
* @brief Implementation of Shape inference for Unique layer
*/
class UniqueShapeProp : public BuiltInShapeInferImpl {
public:
@ -23,18 +23,15 @@ public:
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
UniqueLayer unique_layer(lp);
unique_layer.params = params;
unique_layer.type = _type;
validate(&unique_layer, inBlobs, params, blobs);
bool return_inverse = GetParamAsBool("return_inverse", params);
bool return_counts = GetParamAsBool("return_counts", params);
// compute a number of outputs
size_t num_outputs = 1;
if (unique_layer.return_counts) {
if (return_counts) {
num_outputs++;
}
if (unique_layer.return_inverse) {
if (return_inverse) {
num_outputs++;
}

View File

@ -18,14 +18,12 @@
#include "description_buffer.hpp"
#include <legacy/ie_layers.h>
#include <legacy/ie_ishape_infer_extension.hpp>
namespace InferenceEngine {
namespace ShapeInfer {
class Reshaper;
using ReshaperPtr = std::shared_ptr<Reshaper>;
} // namespace ShapeInfer
class IShapeInferExtension;
using IShapeInferExtensionPtr = std::shared_ptr<IShapeInferExtension>;
namespace details {
class INFERENCE_ENGINE_API_CLASS(CNNNetworkImpl): public ICNNNetwork {
@ -126,9 +124,6 @@ public:
StatusCode reshape(const std::map<std::string, std::vector<size_t>>& inputShapes,
ResponseDesc* resp) noexcept override;
StatusCode AddExtension(const InferenceEngine::IShapeInferExtensionPtr& extension,
InferenceEngine::ResponseDesc* resp) noexcept;
StatusCode serialize(const std::string& xmlPath, const std::string& binPath, ResponseDesc* resp) const
noexcept override;
@ -139,7 +134,6 @@ protected:
std::map<std::string, DataPtr> _outputData;
std::string _name;
DataPtr _emptyData;
ShapeInfer::ReshaperPtr _reshaper;
};
typedef std::shared_ptr<CNNNetworkImpl> CNNNetworkImplPtr;

View File

@ -29,7 +29,6 @@
#include "legacy/details/ie_cnn_network_tools.h"
#include <legacy/cnn_network_impl.hpp>
#include "network_serializer_v7.hpp"
#include <shape_infer/ie_reshaper.hpp>
using namespace std;
using namespace InferenceEngine;
@ -364,31 +363,24 @@ size_t CNNNetworkImpl::getBatchSize() const noexcept {
StatusCode CNNNetworkImpl::reshape(const std::map<std::string, std::vector<size_t>>& inputShapes,
ResponseDesc* responseDesc) noexcept {
try {
if (!_reshaper) _reshaper = std::make_shared<ShapeInfer::Reshaper>(*this);
_reshaper->run(inputShapes);
} catch (const InferenceEngineException& e) {
return DescriptionBuffer(GENERAL_ERROR, responseDesc) << e.what();
} catch (const std::exception& e) {
return DescriptionBuffer(UNEXPECTED, responseDesc) << e.what();
} catch (...) {
return DescriptionBuffer(UNEXPECTED, responseDesc);
for (const auto& pair : _inputData) {
auto info = pair.second;
if (info) {
auto data = info->getInputData();
auto it = inputShapes.find(pair.first);
if (data && it != inputShapes.end()) {
auto newDims = it->second;
auto currentDims = data->getTensorDesc().getDims();
if (newDims != currentDims) {
return DescriptionBuffer(NOT_IMPLEMENTED, responseDesc) <<
"You have called setBatchSize + reshape for CNNNetwork object. Please, either: \n"
"- [SUGGESTED] Regenerate IR with current version of Model Optimizer\n"
"- [WORKAROUND] Call only reshape method where proper batch is already set\n";
}
}
}
}
return OK;
}
StatusCode CNNNetworkImpl::AddExtension(const InferenceEngine::IShapeInferExtensionPtr& extension,
InferenceEngine::ResponseDesc* resp) noexcept {
try {
if (!_reshaper) _reshaper = std::make_shared<ShapeInfer::Reshaper>(*this);
_reshaper->AddExtension(extension);
} catch (const InferenceEngineException& e) {
return DescriptionBuffer(GENERAL_ERROR, resp) << e.what();
} catch (const std::exception& e) {
return DescriptionBuffer(UNEXPECTED, resp) << e.what();
} catch (...) {
return DescriptionBuffer(UNEXPECTED, resp);
}
return OK;
}

View File

@ -1105,10 +1105,6 @@ void convertFunctionToICNNNetwork(const std::shared_ptr<const ::ngraph::Function
i.second->setLayout(thisInputData.getLayout());
i.second->getPreProcess() = thisInputData.getPreProcess();
}
for (const auto &ext : ::ngraph::op::GenericIE::getExtensions(graph)) {
cnnNetworkImpl->AddExtension(ext, nullptr);
}
}
std::shared_ptr<CNNNetworkImpl> convertFunctionToICNNNetwork(const std::shared_ptr<const ::ngraph::Function> &graph,

View File

@ -1,67 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <legacy/ie_layers.h>
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for ArgMax layer
*/
class ArgMaxShapeProp : public BuiltInShapeInferImpl {
public:
explicit ArgMaxShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer cnnLayer(lp);
cnnLayer.params = params;
cnnLayer.type = _type;
validate(&cnnLayer, inBlobs, params, blobs);
auto out_max_val = static_cast<size_t>(cnnLayer.GetParamAsInt("out_max_val", 0));
auto top_k = static_cast<size_t>(cnnLayer.GetParamAsInt("top_k", 0));
int axis = 0;
bool isValidAxis = true;
try {
axis = cnnLayer.GetParamAsInt("axis");
} catch (const details::InferenceEngineException& exception) {
isValidAxis = false;
}
auto firstInputShape = inShapes[0];
size_t num_top_axes = firstInputShape.size();
if (num_top_axes < 3) num_top_axes = 3;
SizeVector outputShape(num_top_axes, 1lu);
if (isValidAxis) {
if (axis < 0) {
axis = static_cast<int>(firstInputShape.size() + axis);
}
outputShape = firstInputShape;
outputShape[axis] = top_k;
} else {
outputShape[0] = firstInputShape[0];
outputShape[2] = top_k;
if (out_max_val) {
outputShape[1] = 2;
}
}
outShapes.push_back(outputShape);
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,78 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <debug.h>
#include <cmath>
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for BinaryConvolution layer
*/
class BinConvShapeProp : public BuiltInShapeInferImpl {
public:
explicit BinConvShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
BinaryConvolutionLayer binConvLayer(lp);
binConvLayer.params = params;
binConvLayer.type = _type;
validate(&binConvLayer, inBlobs, params, blobs);
auto dims = inShapes[0];
auto computeSpatialShape = [&](size_t inDim, int axis) {
size_t kernel = 0;
if (binConvLayer._dilation[axis])
kernel = (binConvLayer._kernel[axis] - 1) * binConvLayer._dilation[axis] + 1;
else
kernel = binConvLayer._kernel[axis];
size_t stride = binConvLayer._stride[axis];
size_t pad = binConvLayer._padding[axis];
float outDim;
std::string padType = binConvLayer._auto_pad;
if (padType == "valid") {
outDim = std::ceil((inDim - kernel + 1.f) / stride);
} else if (padType == "same_upper") {
outDim = std::ceil(1.f * inDim / stride);
} else if (padType == "same_lower") {
outDim = std::floor(1.f * inDim / stride);
} else {
int padEnd = binConvLayer._pads_end[axis];
outDim = std::floor(1.f * (inDim + pad + padEnd - kernel) / stride) + 1.f;
}
if (outDim < 0)
THROW_IE_EXCEPTION << "New shapes " << details::dumpVec(dims) << " make output shape negative";
return static_cast<size_t>(outDim);
};
size_t inputN = dims[0];
size_t OC = binConvLayer._out_depth;
SizeVector shapes;
shapes.push_back(inputN);
shapes.push_back(OC);
if (dims.size() == 5) shapes.push_back(computeSpatialShape(dims[dims.size() - 3], Z_AXIS));
shapes.push_back(computeSpatialShape(dims[dims.size() - 2], Y_AXIS));
shapes.push_back(computeSpatialShape(dims[dims.size() - 1], X_AXIS));
outShapes.push_back(shapes);
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,80 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
#include "precision_utils.h"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Broadcast layer
*/
class BroadcastShapeProp : public BuiltInShapeInferImpl {
public:
explicit BroadcastShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
BroadcastLayer broadcastLayer(lp);
broadcastLayer.params = params;
broadcastLayer.type = _type;
validate(&broadcastLayer, inBlobs, params, blobs);
SizeVector shapes;
if (inBlobs[1]->getTensorDesc().getPrecision() == Precision::I32) {
auto* buffer = inBlobs[1]->cbuffer().as<int*>();
if (buffer != nullptr) {
shapes.assign(buffer, buffer + inBlobs[1]->size());
} else {
THROW_IE_EXCEPTION << "Second input must have allocated data";
}
} else if (inBlobs[1]->getTensorDesc().getPrecision() == Precision::FP32) {
auto* buffer = inBlobs[1]->cbuffer().as<float*>();
if (buffer != nullptr) {
for (int i = 0; i < inBlobs[1]->size(); i++) {
shapes.push_back(static_cast<int>(buffer[i]));
}
} else {
THROW_IE_EXCEPTION << "Second input must have allocated data";
}
} else if (inBlobs[1]->getTensorDesc().getPrecision() == Precision::FP16) {
auto* buffer = inBlobs[1]->cbuffer().as<uint16_t*>();
if (buffer != nullptr) {
for (int i = 0; i < inBlobs[1]->size(); i++) {
shapes.push_back(static_cast<int>(PrecisionUtils::f16tof32(buffer[i])));
}
}
} else if (inBlobs[1]->getTensorDesc().getPrecision() == Precision::I64) {
auto* buffer = inBlobs[1]->cbuffer().as<int64_t*>();
if (buffer != nullptr) {
shapes.assign(buffer, buffer + inBlobs[1]->size());
} else {
THROW_IE_EXCEPTION << "Second input must have allocated data";
}
} else if (inBlobs[1]->getTensorDesc().getPrecision() == Precision::U64) {
auto* buffer = inBlobs[1]->cbuffer().as<uint64_t*>();
if (buffer != nullptr) {
shapes.assign(buffer, buffer + inBlobs[1]->size());
} else {
THROW_IE_EXCEPTION << "Second input must have allocated data";
}
} else {
THROW_IE_EXCEPTION << "Second input must have I32 or FP32 or FP16 precision";
}
outShapes = {shapes};
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,43 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "ie_built_in_impl.hpp"
#include <map>
#include <memory>
#include <string>
#include <vector>
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Bucketize layer
*/
class BucketizeShapeProp : public BuiltInShapeInferImpl {
public:
explicit BucketizeShapeProp(const std::string& type) : BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs,
const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs,
std::vector<SizeVector>& outShapes) override {
LayerParams lp{};
BucketizeLayer bucketize_layer(lp);
bucketize_layer.params = params;
bucketize_layer.type = _type;
validate(&bucketize_layer, inBlobs, params, blobs);
// compute a number of outputs
size_t num_outputs = 1;
// reshape available outputs
outShapes.resize(num_outputs);
outShapes[0] = inShapes[0];
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,264 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <algorithm>
#include <memory>
#include <string>
#include "legacy/shape_infer/built-in/ie_built_in_holder.hpp"
#include "ie_argmax_shape_infer.hpp"
#include "ie_bin_conv_shape_infer.hpp"
#include "ie_broadcast_shape_infer.hpp"
#include "ie_concat_shape_infer.hpp"
#include "ie_conv_shape_infer.hpp"
#include "ie_crop_shape_infer.hpp"
#include "ie_ctc_greedy_decoder_shape_infer.hpp"
#include "ie_deconv_shape_infer.hpp"
#include "ie_deformable_conv_shape_infer.hpp"
#include "ie_depth_to_space_shape_infer.hpp"
#include "ie_detectionoutput_onnx_shape_infer.hpp"
#include "ie_detection_output_shape_infer.hpp"
#include "ie_eltwise_shape_infer.hpp"
#include "ie_equal_shape_infer.hpp"
#include "ie_erf_shape_infer.hpp"
#include "ie_fill_shape_infer.hpp"
#include "ie_flatten_shape_infer.hpp"
#include "ie_gather_shape_infer.hpp"
#include "ie_gather_tree_shape_infer.hpp"
#include "ie_gemm_shape_infer.hpp"
#include "ie_inner_product_shape_infer.hpp"
#include "ie_interp_shape_infer.hpp"
#include "ie_non_max_suppression_shape_infer.hpp"
#include "ie_one_hot_shape_infer.hpp"
#include "ie_pad_shape_infer.hpp"
#include "ie_permute_shape_infer.hpp"
#include "ie_pool_shape_infer.hpp"
#include "ie_priorbox_clustered_shape_infer.hpp"
#include "ie_priorbox_shape_infer.hpp"
#include "ie_priorgridgenerator_onnx_shape_infer.hpp"
#include "ie_proposal_onnx_shape_infer.hpp"
#include "ie_proposal_shape_infer.hpp"
#include "ie_psroi_pooling_shape_infer.hpp"
#include "ie_quantize_shape_infer.hpp"
#include "ie_range_shape_infer.hpp"
#include "ie_reduce_shape_infer.hpp"
#include "ie_region_yolo_shape_infer.hpp"
#include "ie_reorg_yolo_shape_infer.hpp"
#include "ie_resample_shape_infer.hpp"
#include "ie_reshape_shape_infer.hpp"
#include "ie_reverse_sequence_shape_infer.hpp"
#include "ie_rnn_cell_shape_infer.hpp"
#include "ie_rnn_shape_infer.hpp"
#include "ie_roi_pooling_shape_infer.hpp"
#include "ie_roifeatureextractor_onnx_shape_infer.hpp"
#include "ie_scatter_shape_infer.hpp"
#include "ie_select_shape_infer.hpp"
#include "ie_shape_shape_infer.hpp"
#include "ie_shuffle_channels_shape_infer.hpp"
#include "ie_simpler_nms_shape_infer.hpp"
#include "ie_space_to_depth_shape_infer.hpp"
#include "ie_sparse_fill_empty_rows_shape_infer.hpp"
#include "ie_sparse_segment_reduce_shape_infer.hpp"
#include "ie_split_shape_infer.hpp"
#include "ie_sparse_to_dense_shape_infer.hpp"
#include "ie_bucketize_shape_infer.hpp"
#include "ie_squeeze_shape_infer.hpp"
#include "ie_sparse_weighted_reduce_shape_infer.hpp"
#include "ie_strided_slice_shape_infer.hpp"
#include "ie_tensor_iterator_shape_infer.hpp"
#include "ie_tile_shape_infer.hpp"
#include "ie_topk_shape_infer.hpp"
#include "ie_topkrois_onnx_shape_infer.hpp"
#include "ie_unique_shape_infer.hpp"
#include "ie_unsqueeze_shape_infer.hpp"
#include "ie_upsampling_shape_infer.hpp"
#include "impl_register.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
BuiltInShapeInferHolder::ImplsHolder::Ptr BuiltInShapeInferHolder::GetImplsHolder() {
static ImplsHolder::Ptr localHolder;
if (localHolder == nullptr) {
localHolder = std::make_shared<ImplsHolder>();
}
return localHolder;
}
IE_SUPPRESS_DEPRECATED_START
void BuiltInShapeInferHolder::AddImpl(const std::string& name, const IShapeInferImpl::Ptr& impl) {
GetImplsHolder()->list[name] = impl;
}
StatusCode BuiltInShapeInferHolder::getShapeInferTypes(char**& types, unsigned int& size, ResponseDesc* resp) noexcept {
auto& factories = GetImplsHolder()->list;
types = new char*[factories.size()];
size = 0;
for (auto it = factories.begin(); it != factories.end(); it++, size++) {
types[size] = new char[it->first.size() + 1];
std::copy(it->first.begin(), it->first.end(), types[size]);
types[size][it->first.size()] = '\0';
}
return OK;
}
StatusCode BuiltInShapeInferHolder::getShapeInferImpl(IShapeInferImpl::Ptr& impl, const char* type,
ResponseDesc* resp) noexcept {
auto& impls = BuiltInShapeInferHolder::GetImplsHolder()->list;
if (impls.find(type) != impls.end()) {
impl = impls[type];
return OK;
}
impl.reset();
return NOT_FOUND;
}
IE_SUPPRESS_DEPRECATED_END
// Register without implementation just to protect from adding custom implementation for them
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, Input);
REG_SHAPE_INFER_FOR_TYPE(DoNothingShapeProp, Output);
REG_SHAPE_INFER_FOR_TYPE(MemoryShapeProp, Memory);
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, Const);
// Outputs = Inputs
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, Activation);
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, ReLU);
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, ReLU6);
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, ELU);
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, TanH);
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, Logistic);
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, Sigmoid);
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, PReLU);
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, SoftMax);
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, LogSoftMax);
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, LRN);
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, Norm);
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, Normalize);
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, Convert);
// FIXME: Really Copy??? New MO doesn't generate this layer
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, Copy);
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, Power);
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, PowerFile);
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, Clamp);
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, ScaleShift);
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, BatchNormalization);
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, GRN);
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, MVN);
REG_SHAPE_INFER_FOR_TYPE(ConvShapeProp, Convolution);
REG_SHAPE_INFER_FOR_TYPE(DeconvShapeProp, Deconvolution);
REG_SHAPE_INFER_FOR_TYPE(DeformableConvShapeProp, DeformableConvolution);
REG_SHAPE_INFER_FOR_TYPE(PoolingShapeProp, Pooling);
REG_SHAPE_INFER_FOR_TYPE(InnerProductShapeProp, InnerProduct);
REG_SHAPE_INFER_FOR_TYPE(InnerProductShapeProp, FullyConnected);
REG_SHAPE_INFER_FOR_TYPE(SplitShapeProp, Split);
REG_SHAPE_INFER_FOR_TYPE(SplitShapeProp, Slice);
REG_SHAPE_INFER_FOR_TYPE(PermuteShapeProp, Permute);
REG_SHAPE_INFER_FOR_TYPE(FlattenShapeProp, Flatten);
REG_SHAPE_INFER_FOR_TYPE(ReshapeShapeProp, Reshape);
REG_SHAPE_INFER_FOR_TYPE(DetectionOutputShapeProp, DetectionOutput);
REG_SHAPE_INFER_FOR_TYPE(PriorBoxClusteredShapeProp, PriorBoxClustered);
REG_SHAPE_INFER_FOR_TYPE(PriorBoxShapeProp, PriorBox);
REG_SHAPE_INFER_FOR_TYPE(ExperimentalDetectronDetectionOutputShapeProp, ExperimentalDetectronDetectionOutput);
REG_SHAPE_INFER_FOR_TYPE(ExperimentalDetectronPriorGridGeneratorShapeProp, ExperimentalDetectronPriorGridGenerator);
REG_SHAPE_INFER_FOR_TYPE(ExperimentalDetectronGenerateProposalsSingleImageShapeProp, ExperimentalDetectronGenerateProposalsSingleImage);
REG_SHAPE_INFER_FOR_TYPE(ExperimentalDetectronROIFeatureExtractorShapeProp, ExperimentalDetectronROIFeatureExtractor);
REG_SHAPE_INFER_FOR_TYPE(ExperimentalDetectronTopKROIsShapeProp, ExperimentalDetectronTopKROIs);
REG_SHAPE_INFER_FOR_TYPE(RoiPoolingShapeProp, ROIPooling);
REG_SHAPE_INFER_FOR_TYPE(PSRoiPoolingShapeProp, PSROIPooling);
REG_SHAPE_INFER_FOR_TYPE(UpsamplingShapeProp, Upsampling);
REG_SHAPE_INFER_FOR_TYPE(ResampleShapeProp, Resample);
REG_SHAPE_INFER_FOR_TYPE(InterpShapeProp, Interp);
REG_SHAPE_INFER_FOR_TYPE(SimplerNMSShapeProp, SimplerNMS);
REG_SHAPE_INFER_FOR_TYPE(TileShapeProp, Tile);
REG_SHAPE_INFER_FOR_TYPE(CropShapeProp, Crop);
REG_SHAPE_INFER_FOR_TYPE(ConcatShapeProp, Concat);
REG_SHAPE_INFER_FOR_TYPE(EltWiseShapeProp, Eltwise);
REG_SHAPE_INFER_FOR_TYPE(EltWiseShapeProp, Mul);
REG_SHAPE_INFER_FOR_TYPE(EltWiseShapeProp, Add);
REG_SHAPE_INFER_FOR_TYPE(EltWiseShapeProp, Div);
REG_SHAPE_INFER_FOR_TYPE(CTCGreedyDecoderShapeProp, CTCGreedyDecoder);
REG_SHAPE_INFER_FOR_TYPE(ProposalShapeProp, Proposal);
REG_SHAPE_INFER_FOR_TYPE(ReorgYoloShapeProp, ReorgYolo);
REG_SHAPE_INFER_FOR_TYPE(RegionYoloShapeProp, RegionYolo);
REG_SHAPE_INFER_FOR_TYPE(RNNShapeProp, RNNSequence);
REG_SHAPE_INFER_FOR_TYPE(RNNShapeProp, GRUSequence);
REG_SHAPE_INFER_FOR_TYPE(RNNShapeProp, LSTMSequence);
REG_SHAPE_INFER_FOR_TYPE(RNNCellShapeProp, RNNCell);
REG_SHAPE_INFER_FOR_TYPE(GRUCellShapeProp, GRUCell);
REG_SHAPE_INFER_FOR_TYPE(LSTMCellShapeProp, LSTMCell);
REG_SHAPE_INFER_FOR_TYPE(TensorIteratorShapeProp, TensorIterator);
REG_SHAPE_INFER_FOR_TYPE(ArgMaxShapeProp, ArgMax);
REG_SHAPE_INFER_FOR_TYPE(GemmShapeProp, Gemm);
REG_SHAPE_INFER_FOR_TYPE(PadShapeProp, Pad);
REG_SHAPE_INFER_FOR_TYPE(GatherShapeProp, Gather);
REG_SHAPE_INFER_FOR_TYPE(StridedSliceShapeProp, StridedSlice);
REG_SHAPE_INFER_FOR_TYPE(ShuffleChannelsShapeProp, ShuffleChannels);
REG_SHAPE_INFER_FOR_TYPE(DepthToSpaceShapeProp, DepthToSpace);
REG_SHAPE_INFER_FOR_TYPE(SpaceToDepthShapeProp, SpaceToDepth);
REG_SHAPE_INFER_FOR_TYPE(SparseFillEmptyRowsShapeProp, SparseFillEmptyRows);
REG_SHAPE_INFER_FOR_TYPE(SparseSegmentReduceShapeProp, SparseSegmentMean);
REG_SHAPE_INFER_FOR_TYPE(SparseSegmentReduceShapeProp, SparseSegmentSqrtN);
REG_SHAPE_INFER_FOR_TYPE(SparseSegmentReduceShapeProp, SparseSegmentSum);
REG_SHAPE_INFER_FOR_TYPE(ExperimentalSparseWeightedReduceShapeProp, ExperimentalSparseWeightedSum);
REG_SHAPE_INFER_FOR_TYPE(SparseToDenseShapeProp, SparseToDense);
REG_SHAPE_INFER_FOR_TYPE(BucketizeShapeProp, Bucketize);
REG_SHAPE_INFER_FOR_TYPE(ReverseSequenceShapeProp, ReverseSequence);
REG_SHAPE_INFER_FOR_TYPE(SelectShapeProp, Select);
REG_SHAPE_INFER_FOR_TYPE(SqueezeShapeProp, Squeeze);
REG_SHAPE_INFER_FOR_TYPE(UnsqueezeShapeProp, Unsqueeze);
REG_SHAPE_INFER_FOR_TYPE(RangeShapeProp, Range);
REG_SHAPE_INFER_FOR_TYPE(FillShapeProp, Fill);
REG_SHAPE_INFER_FOR_TYPE(BroadcastShapeProp, Broadcast);
REG_SHAPE_INFER_FOR_TYPE(ShapeShapeProp, Shape);
REG_SHAPE_INFER_FOR_TYPE(OneHotShapeProp, OneHot);
REG_SHAPE_INFER_FOR_TYPE(QuantizeShapeProp, FakeQuantize);
REG_SHAPE_INFER_FOR_TYPE(BinConvShapeProp, BinaryConvolution);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Abs);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Acos);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Acosh);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Asin);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Asinh);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Atan);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Atanh);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Ceil);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Cos);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Cosh);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Erf);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Floor);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, HardSigmoid);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Log);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Exp);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Neg);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Reciprocal);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Selu);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Sign);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Sin);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Sinh);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Softplus);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Softsign);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Tan);
REG_SHAPE_INFER_FOR_TYPE(ReduceShapeProp, ReduceAnd);
REG_SHAPE_INFER_FOR_TYPE(ReduceShapeProp, ReduceL1);
REG_SHAPE_INFER_FOR_TYPE(ReduceShapeProp, ReduceL2);
REG_SHAPE_INFER_FOR_TYPE(ReduceShapeProp, ReduceLogSum);
REG_SHAPE_INFER_FOR_TYPE(ReduceShapeProp, ReduceLogSumExp);
REG_SHAPE_INFER_FOR_TYPE(ReduceShapeProp, ReduceMax);
REG_SHAPE_INFER_FOR_TYPE(ReduceShapeProp, ReduceMean);
REG_SHAPE_INFER_FOR_TYPE(ReduceShapeProp, ReduceMin);
REG_SHAPE_INFER_FOR_TYPE(ReduceShapeProp, ReduceOr);
REG_SHAPE_INFER_FOR_TYPE(ReduceShapeProp, ReduceProd);
REG_SHAPE_INFER_FOR_TYPE(ReduceShapeProp, ReduceSum);
REG_SHAPE_INFER_FOR_TYPE(ReduceShapeProp, ReduceSumSquare);
REG_SHAPE_INFER_FOR_TYPE(GatherTreeShapeProp, GatherTree);
REG_SHAPE_INFER_FOR_TYPE(TopKShapeProp, TopK);
REG_SHAPE_INFER_FOR_TYPE(UniqueShapeProp, Unique);
REG_SHAPE_INFER_FOR_TYPE(NMSShapeProp, NonMaxSuppression);
REG_SHAPE_INFER_FOR_TYPE(ScatterUpdateShapeProp, ScatterUpdate);
REG_SHAPE_INFER_FOR_TYPE(ScatterElementsUpdateShapeProp, ScatterElementsUpdate);
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,71 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_iextension.h>
#include <legacy/ie_layers.h>
#include <description_buffer.hpp>
#include <ie_layer_validators.hpp>
#include <list>
#include <map>
#include <memory>
#include <string>
#include <vector>
namespace InferenceEngine {
namespace ShapeInfer {
IE_SUPPRESS_DEPRECATED_START
/**
*@brief Base class for all built-in shape infer implementations. Contains common logic with validators and errors
*handling
*/
class BuiltInShapeInferImpl : public IShapeInferImpl {
public:
explicit BuiltInShapeInferImpl(const std::string& type): _type(type) {
_validator = details::LayerValidators::getInstance()->getValidator(_type);
if (!_validator)
THROW_IE_EXCEPTION << "Internal error: failed to find validator for layer with type: " << _type;
}
void validate(CNNLayer* layer, const std::vector<Blob::CPtr>& inBlobs,
const std::map<std::string, std::string>& params, const std::map<std::string, Blob::Ptr>& blobs) {
_validator->parseParams(layer);
}
virtual void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs,
const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) = 0;
StatusCode inferShapes(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes,
ResponseDesc* resp) noexcept override {
inShapes.clear();
for (const auto& blob : inBlobs) {
inShapes.push_back(blob->getTensorDesc().getDims());
}
outShapes.clear();
try {
inferShapesImpl(inBlobs, params, blobs, outShapes);
return OK;
} catch (const std::exception& ex) {
return InferenceEngine::DescriptionBuffer(GENERAL_ERROR, resp) << ex.what();
} catch (...) {
return InferenceEngine::DescriptionBuffer(UNEXPECTED) << "Unknown error";
}
}
protected:
std::string _type;
details::LayerValidator::Ptr _validator;
std::vector<SizeVector> inShapes;
};
IE_SUPPRESS_DEPRECATED_END
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,44 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Concat layer
*/
class ConcatShapeProp : public BuiltInShapeInferImpl {
public:
explicit ConcatShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
ConcatLayer concatLayer(lp);
concatLayer.params = params;
concatLayer.type = _type;
validate(&concatLayer, inBlobs, params, blobs);
size_t sum(0);
size_t axis = concatLayer._axis;
outShapes.push_back(inShapes[0]);
for (const auto& inShape : inShapes) {
if (axis >= inShape.size()) THROW_IE_EXCEPTION << "Axis can't be more then number of input shapes";
sum += inShape[axis];
}
outShapes[0][axis] = sum;
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,82 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cmath>
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Convolution layer
*/
class ConvShapeProp : public BuiltInShapeInferImpl {
public:
explicit ConvShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
ConvolutionLayer convLayer(lp);
convLayer.params = params;
convLayer.type = _type;
validate(&convLayer, inBlobs, params, blobs);
auto dims = inShapes[0];
auto dims_size = dims.size();
auto spacial_d_size = dims.size() - 2;
float* OD_temp = new float[spacial_d_size];
size_t* KDims = new size_t[spacial_d_size];
size_t inputN = dims[0];
for (int i = 0; i < spacial_d_size; i++) {
if (convLayer._dilation[i])
KDims[i] = (convLayer._kernel[i] - 1) * convLayer._dilation[i] + 1;
else
KDims[i] = convLayer._kernel[i];
}
size_t OC = convLayer._out_depth;
std::string padType = convLayer._auto_pad;
if (padType == "valid") {
for (int i = 0; i < spacial_d_size; i++)
OD_temp[i] = std::ceil((dims[dims_size - 1 - i] - KDims[i] + 1.f) / convLayer._stride[i]);
} else if (padType == "same_upper") {
for (int i = 0; i < spacial_d_size; i++)
OD_temp[i] = std::ceil(1.f * dims[dims_size - 1 - i] / convLayer._stride[i]);
} else if (padType == "same_lower") {
for (int i = 0; i < spacial_d_size; i++)
OD_temp[i] = std::floor(1.f * dims[dims_size - 1 - i] / convLayer._stride[i]);
} else {
for (int i = 0; i < spacial_d_size; i++) {
OD_temp[i] =
std::floor(1.f *
(dims[dims_size - 1 - i] + convLayer._padding[i] + convLayer._pads_end[i] - KDims[i]) /
convLayer._stride[i]) +
1.f;
}
}
for (int i = 0; i < spacial_d_size; i++)
if (OD_temp[i] < 0)
THROW_IE_EXCEPTION << "New shapes " << details::dumpVec(dims) << " make output shape negative";
SizeVector outShape = {inputN, OC};
for (int i = spacial_d_size - 1; i >= 0; i--) outShape.push_back(static_cast<size_t>(OD_temp[i]));
outShapes.push_back(outShape);
delete[] OD_temp;
delete[] KDims;
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,51 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Crop layer
*/
class CropShapeProp : public BuiltInShapeInferImpl {
public:
explicit CropShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CropLayer cropLayer(lp);
cropLayer.params = params;
cropLayer.type = _type;
validate(&cropLayer, inBlobs, params, blobs);
outShapes.push_back(inShapes[0]);
if (inShapes.size() == 2) {
SizeVector cropShapes = inShapes[1];
for (int axis : cropLayer.axis) {
outShapes[0][axis] = cropShapes[axis];
}
} else {
std::vector<int> crop_end;
bool isDim = cropLayer.params.find("dim") != cropLayer.params.end();
if (!isDim) crop_end = cropLayer.GetParamAsInts("crop_end");
for (size_t i = 0; i < cropLayer.axis.size(); i++) {
outShapes[0][cropLayer.axis[i]] =
isDim ? cropLayer.dim[i] : inShapes[0][cropLayer.axis[i]] - cropLayer.offset[i] - crop_end[i];
}
}
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,38 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for CTCGreedyDecoder layer
*/
class CTCGreedyDecoderShapeProp : public BuiltInShapeInferImpl {
public:
explicit CTCGreedyDecoderShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
outShapes.clear();
LayerParams lp {};
CNNLayer cnnLayer(lp);
cnnLayer.params = params;
cnnLayer.type = _type;
validate(&cnnLayer, inBlobs, params, blobs);
outShapes.push_back({inShapes[0][1], inShapes[0][0], 1, 1});
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,72 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Deconvolution layer
*/
class DeconvShapeProp : public BuiltInShapeInferImpl {
public:
explicit DeconvShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
DeconvolutionLayer deconvLayer(lp);
deconvLayer.params = params;
deconvLayer.type = _type;
validate(&deconvLayer, inBlobs, params, blobs);
auto dims = inShapes[0];
auto dims_size = dims.size();
auto spacial_d_size = dims.size() - 2;
float* OD_temp = new float[spacial_d_size];
size_t* KDims = new size_t[spacial_d_size];
size_t inputN = dims[0];
for (int i = 0; i < spacial_d_size; i++) {
if (deconvLayer._dilation[i])
KDims[i] = (deconvLayer._kernel[i] - 1) * deconvLayer._dilation[i] + 1;
else
KDims[i] = deconvLayer._kernel[i];
}
size_t OC = deconvLayer._out_depth;
std::string padType = deconvLayer._auto_pad;
if (padType == "valid") {
for (int i = 0; i < spacial_d_size; i++)
OD_temp[i] = (dims[dims_size - 1 - i] - 1) * deconvLayer._stride[i] + KDims[i];
} else if ((padType == "same_upper") || (padType == "same_lower")) {
for (int i = 0; i < spacial_d_size; i++) OD_temp[i] = dims[dims_size - 1 - i] * deconvLayer._stride[i];
} else {
for (int i = 0; i < spacial_d_size; i++)
OD_temp[i] = deconvLayer._stride[i] * (dims[dims_size - 1 - i] - 1) + KDims[i] -
deconvLayer._padding[i] - deconvLayer._pads_end[i];
}
for (int i = 0; i < spacial_d_size; i++)
if (OD_temp[i] < 0)
THROW_IE_EXCEPTION << "New shapes " << details::dumpVec(dims) << " make output shape negative";
SizeVector outShape = {inputN, OC};
for (int i = spacial_d_size - 1; i >= 0; i--) outShape.push_back(static_cast<size_t>(OD_temp[i]));
outShapes.emplace_back(outShape);
delete[] OD_temp;
delete[] KDims;
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,77 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cmath>
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Deformable Convolution layer
*/
class DeformableConvShapeProp : public BuiltInShapeInferImpl {
public:
explicit DeformableConvShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
DeformableConvolutionLayer deformableConvLayer(lp);
deformableConvLayer.params = params;
deformableConvLayer.type = _type;
validate(&deformableConvLayer, inBlobs, params, blobs);
auto dims = inShapes[0];
auto dims_size = dims.size();
auto spacial_d_size = dims.size() - 2;
std::vector<float> OD_temp(spacial_d_size);
std::vector<size_t> KDims(spacial_d_size);
size_t inputN = dims[0];
for (int i = 0; i < spacial_d_size; i++) {
if (deformableConvLayer._dilation[i])
KDims[i] = (deformableConvLayer._kernel[i] - 1) * deformableConvLayer._dilation[i] + 1;
else
KDims[i] = deformableConvLayer._kernel[i];
}
size_t OC = deformableConvLayer._out_depth;
std::string padType = deformableConvLayer._auto_pad;
if (padType == "valid") {
for (int i = 0; i < spacial_d_size; i++)
OD_temp[i] = std::ceil((dims[dims_size - 1 - i] - KDims[i] + 1.f) / deformableConvLayer._stride[i]);
} else if (padType == "same_upper") {
for (int i = 0; i < spacial_d_size; i++)
OD_temp[i] = std::ceil(1.f * dims[dims_size - 1 - i] / deformableConvLayer._stride[i]);
} else if (padType == "same_lower") {
for (int i = 0; i < spacial_d_size; i++)
OD_temp[i] = std::floor(1.f * dims[dims_size - 1 - i] / deformableConvLayer._stride[i]);
} else {
for (int i = 0; i < spacial_d_size; i++)
OD_temp[i] = std::floor(1.f *
(dims[dims_size - 1 - i] + deformableConvLayer._padding[i] +
deformableConvLayer._pads_end[i] - KDims[i]) /
deformableConvLayer._stride[i]) +
1.f;
}
for (int i = 0; i < spacial_d_size; i++)
if (OD_temp[i] < 0)
THROW_IE_EXCEPTION << "New shapes " << details::dumpVec(dims) << " make output shape negative";
SizeVector outShape = {inputN, OC};
for (int i = spacial_d_size - 1; i >= 0; i--) outShape.push_back(static_cast<size_t>(OD_temp[i]));
outShapes.emplace_back(outShape);
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,42 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for DepthToSpace layer
*/
class DepthToSpaceShapeProp : public BuiltInShapeInferImpl {
public:
explicit DepthToSpaceShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
DepthToSpaceLayer depthToSpaceLayer(lp);
depthToSpaceLayer.params = params;
depthToSpaceLayer.type = _type;
validate(&depthToSpaceLayer, inBlobs, params, blobs);
unsigned int block_size = depthToSpaceLayer.block_size;
outShapes = {inShapes[0]};
outShapes[0][outShapes[0].size() - 1] = inShapes[0][inShapes[0].size() - 1] * block_size;
outShapes[0][outShapes[0].size() - 2] = inShapes[0][inShapes[0].size() - 2] * block_size;
outShapes[0][outShapes[0].size() - 3] = inShapes[0][inShapes[0].size() - 3] / block_size / block_size;
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,41 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <legacy/ie_layers.h>
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for DetectionOutput layer
*/
class DetectionOutputShapeProp : public BuiltInShapeInferImpl {
public:
explicit DetectionOutputShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer cnnLayer(lp);
cnnLayer.params = params;
cnnLayer.type = _type;
validate(&cnnLayer, inBlobs, params, blobs);
int top_k = cnnLayer.GetParamAsInt("keep_top_k");
outShapes.push_back({1, 1, static_cast<size_t>(top_k) * inShapes[0][0], 7});
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,52 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <algorithm>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for EltWise layer
*/
class EltWiseShapeProp : public BuiltInShapeInferImpl {
public:
explicit EltWiseShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
EltwiseLayer eltwiseLayer(lp);
eltwiseLayer.params = params;
eltwiseLayer.type = _type;
validate(&eltwiseLayer, inBlobs, params, blobs);
if (inShapes.size() == 1) {
outShapes.push_back(inShapes[0]);
} else {
SizeVector outShape((std::max)(inShapes[0], inShapes[1]));
for (size_t ind = 0; ind < outShape.size(); ++ind) {
if (ind < inShapes[0].size() && ind < inShapes[1].size()) {
outShape[ind] = (std::max)(inShapes[0][ind], inShapes[1][ind]);
} else if (ind >= inShapes[0].size()) {
outShape[ind] = inShapes[1][ind];
} else {
outShape[ind] = inShapes[0][ind];
}
}
outShapes.push_back(outShape);
}
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,57 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference that just assign input shapes to output shapes
*/
class EqualShapeProp : public BuiltInShapeInferImpl {
public:
explicit EqualShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
outShapes = inShapes;
}
};
class DoNothingShapeProp : public BuiltInShapeInferImpl {
public:
explicit DoNothingShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {}
};
class MemoryShapeProp : public BuiltInShapeInferImpl {
public:
explicit MemoryShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
std::stringstream ss;
ss.str(params.at("index"));
int idx;
ss >> idx;
//
if (idx == 1) {
outShapes = inShapes;
}
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,37 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Math layers
*/
class MathShapeProp : public BuiltInShapeInferImpl {
public:
explicit MathShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
MathLayer mathLayer(lp);
mathLayer.params = params;
mathLayer.type = _type;
validate(&mathLayer, inBlobs, params, blobs);
outShapes = {inShapes[0]};
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,47 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Fill layer
*/
class FillShapeProp : public BuiltInShapeInferImpl {
public:
explicit FillShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
FillLayer fillLayer(lp);
fillLayer.params = params;
fillLayer.type = _type;
validate(&fillLayer, inBlobs, params, blobs);
auto dimsBlob = *inBlobs.begin();
SizeVector shape;
SizeVector dims = dimsBlob->getTensorDesc().getDims();
auto* buffer = dimsBlob->cbuffer().as<int32_t*>();
if (!buffer || dimsBlob->getTensorDesc().getPrecision() != Precision::I32)
THROW_IE_EXCEPTION << " Fill dimensions vector should be I32!";
for (int i = 0; i < dimsBlob->size(); i++) {
shape.push_back(buffer[i]);
}
outShapes = {shape};
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,67 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <legacy/ie_layers.h>
#include <description_buffer.hpp>
#include <functional>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Reshape layer
*/
class FlattenShapeProp : public BuiltInShapeInferImpl {
public:
explicit FlattenShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
ReshapeLayer reshapeLayer(lp);
reshapeLayer.params = params;
reshapeLayer.type = _type;
validate(&reshapeLayer, inBlobs, params, blobs);
auto inputShape = inShapes[0];
size_t inputShapeTotal = std::accumulate(inputShape.begin(), inputShape.end(), 1lu, std::multiplies<size_t>());
SizeVector outShape;
int numAxes = reshapeLayer.num_axes;
int axis = reshapeLayer.axis;
size_t notFlatten = 1;
if (numAxes == -1 && axis == 0) {
outShape = {inputShapeTotal};
} else {
if (axis > 0) {
for (int i = 0; i < axis; i++) {
notFlatten *= inputShape[i];
outShape.push_back(inputShape[i]);
}
}
outShape.push_back(1);
if (numAxes > 0) {
for (int i = numAxes + 1; i < inputShape.size(); i++) {
notFlatten *= inputShape[i];
outShape.push_back(inputShape[i]);
}
}
outShape[axis] = inputShapeTotal / notFlatten;
}
outShapes.emplace_back(outShape);
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,47 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Gather layer
*/
class GatherShapeProp : public BuiltInShapeInferImpl {
public:
explicit GatherShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
GatherLayer gatherLayer(lp);
gatherLayer.params = params;
gatherLayer.type = _type;
validate(&gatherLayer, inBlobs, params, blobs);
int axis = gatherLayer.axis;
if (axis < 0) axis += inShapes[0].size();
outShapes.resize(1);
outShapes[0].resize(inShapes[0].size() + inShapes[1].size() - 1);
for (int i = 0; i < axis; i++) outShapes[0][i] = inShapes[0][i];
for (size_t i = 0; i < inShapes[1].size(); i++) outShapes[0][i + axis] = inShapes[1][i];
for (size_t i = axis + 1; i < inShapes[0].size(); i++)
outShapes[0][i + inShapes[1].size() - 1] = inShapes[0][i];
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,37 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for GatherTree layer
*/
class GatherTreeShapeProp : public BuiltInShapeInferImpl {
public:
explicit GatherTreeShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
GatherLayer gatherLayer(lp);
gatherLayer.params = params;
gatherLayer.type = _type;
validate(&gatherLayer, inBlobs, params, blobs);
outShapes.resize(1, inShapes[0]);
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,61 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <algorithm>
#include <cmath>
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Gemm layer
*/
class GemmShapeProp : public BuiltInShapeInferImpl {
public:
explicit GemmShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
// TODO: primitive does not support 5D tensor yet
LayerParams lp {};
GemmLayer gemmLayer(lp);
gemmLayer.params = params;
gemmLayer.type = _type;
validate(&gemmLayer, inBlobs, params, blobs);
auto dims0 = inShapes[0];
auto dims1 = inShapes[1];
SizeVector shapes;
for (int idx = 0; idx < dims0.size() - 2; idx++) {
unsigned long max_dim = dims0[idx] > dims1[idx] ? dims0[idx] : dims1[idx];
if (inShapes.size() == 3) {
auto dims2 = inShapes[2];
max_dim = max_dim > dims2[idx] ? max_dim : dims2[idx];
}
shapes.push_back(max_dim);
}
unsigned long xAxis = gemmLayer.transpose_a ? dims0.size() - 2 : dims0.size() - 1;
unsigned long yAxis = gemmLayer.transpose_b ? dims1.size() - 1 : dims1.size() - 2;
shapes.push_back(dims0[yAxis]);
shapes.push_back(dims1[xAxis]);
outShapes.push_back(shapes);
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,37 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
class InnerProductShapeProp : public BuiltInShapeInferImpl {
public:
explicit InnerProductShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
FullyConnectedLayer fcLayer(lp);
fcLayer.params = params;
fcLayer.type = _type;
validate(&fcLayer, inBlobs, params, blobs);
size_t OC, ON;
ON = inShapes[0][0];
OC = fcLayer._out_num;
outShapes.emplace_back(std::initializer_list<size_t> {ON, OC});
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,99 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <legacy/ie_layers.h>
#include <description_buffer.hpp>
#include <limits>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Interp layer
*/
class InterpShapeProp : public BuiltInShapeInferImpl {
public:
explicit InterpShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer cnnLayer(lp);
cnnLayer.params = params;
cnnLayer.type = _type;
validate(&cnnLayer, inBlobs, params, blobs);
SizeVector outShape;
if (inBlobs.size() == 2) {
auto* buffer = inBlobs[1]->cbuffer().as<float*>();
if (buffer != nullptr) {
for (int i = 0; i < inBlobs[1]->size(); i++) {
outShape.push_back(static_cast<unsigned long>(buffer[i]));
}
} else {
THROW_IE_EXCEPTION << "Second input must have allocated data";
}
} else {
auto factor = cnnLayer.GetParamAsFloat("factor", 0);
auto shrink_factor = cnnLayer.GetParamAsFloat("shrink_factor", 0);
auto zoom_factor = cnnLayer.GetParamAsFloat("zoom_factor", 0);
auto height = static_cast<size_t>(cnnLayer.GetParamAsInt("height", 0));
auto width = static_cast<size_t>(cnnLayer.GetParamAsInt("width", 0));
auto IS_ZERO = [](float value) {
return std::fabs(value) < std::numeric_limits<float>::epsilon();
};
bool noFactor = IS_ZERO(zoom_factor) && IS_ZERO(shrink_factor) && IS_ZERO(factor);
size_t N, C, H, W;
N = inShapes[0][0];
C = inShapes[0][1];
H = inShapes[0][2];
W = inShapes[0][3];
auto SETW = [&width, &W](size_t value) {
if (width) {
W = width;
} else {
W = value;
}
};
auto SETH = [&height, &H](size_t value) {
if (height) {
H = height;
} else {
H = value;
}
};
if (noFactor) {
SETW(width);
SETH(height);
} else {
float actualFactor = factor;
if (!IS_ZERO(shrink_factor) || !IS_ZERO(zoom_factor)) {
if (!IS_ZERO(zoom_factor)) actualFactor = zoom_factor;
if (!IS_ZERO(shrink_factor)) actualFactor /= shrink_factor;
}
SETW(W * actualFactor);
SETH(H * actualFactor);
}
outShape = {N, C, H, W};
}
outShapes.push_back(outShape);
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,37 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for NonMaxSuppression layer
*/
class NMSShapeProp : public BuiltInShapeInferImpl {
public:
explicit NMSShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
NonMaxSuppressionLayer nmsLayer(lp);
nmsLayer.params = params;
nmsLayer.type = _type;
validate(&nmsLayer, inBlobs, params, blobs);
outShapes.push_back({inShapes[1][0] * inShapes[1][1] * inShapes[1][2], 3});
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,46 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for the OneHot layer
*/
class OneHotShapeProp : public BuiltInShapeInferImpl {
public:
explicit OneHotShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlob, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
OneHotLayer oneHotLayer(lp);
oneHotLayer.params = params;
oneHotLayer.type = _type;
validate(&oneHotLayer, inBlob, params, blobs);
auto& inShape = inShapes[0];
SizeVector outShape;
auto actual_axis = (oneHotLayer.axis == -1) ? inShape.size() : oneHotLayer.axis;
for (std::size_t idx = 0; idx < inShape.size() + 1; ++idx) {
if (idx < actual_axis)
outShape.push_back(inShape[idx]);
else if (idx == actual_axis)
outShape.push_back(oneHotLayer.depth);
else
outShape.push_back(inShape[idx - 1]);
}
outShapes.push_back(outShape);
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,40 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Pad layer
*/
class PadShapeProp : public BuiltInShapeInferImpl {
public:
explicit PadShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
PadLayer padLayer(lp);
padLayer.params = params;
padLayer.type = _type;
validate(&padLayer, inBlobs, params, blobs);
outShapes.push_back(inShapes[0]);
for (size_t i = 0; i < outShapes[0].size(); i++) {
outShapes[0][i] += padLayer.pads_begin[i] + padLayer.pads_end[i];
}
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,48 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <legacy/ie_layers.h>
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Permute layer
*/
class PermuteShapeProp : public BuiltInShapeInferImpl {
public:
explicit PermuteShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer permuteLayer(lp);
permuteLayer.params = params;
permuteLayer.type = _type;
validate(&permuteLayer, inBlobs, params, blobs);
std::vector<size_t> order;
std::vector<int> layerOrder = permuteLayer.GetParamAsInts("order");
for (auto ord : layerOrder) order.push_back(static_cast<size_t>(ord));
SizeVector outShape;
for (size_t i = 0; i < inShapes[0].size(); i++) {
outShape.push_back(inShapes[0][order[i]]);
}
outShapes.emplace_back(outShape);
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,88 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cmath>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Pooling layer
*/
class PoolingShapeProp : public BuiltInShapeInferImpl {
public:
explicit PoolingShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
PoolingLayer poolLayer(lp);
poolLayer.params = params;
poolLayer.type = _type;
validate(&poolLayer, inBlobs, params, blobs);
auto dims = inShapes[0];
auto dims_size = dims.size();
auto spacial_d_size = dims.size() - 2;
float* OD_temp = new float[spacial_d_size];
for (int i = 0; i < spacial_d_size; i++) OD_temp[i] = 1.f;
size_t inputN = dims[0];
size_t IC = dims[1];
std::string padType = poolLayer._auto_pad;
if (padType == "valid") {
for (int i = 0; i < spacial_d_size; i++)
OD_temp[i] = std::ceil((dims[dims_size - 1 - i] - poolLayer._kernel[i] + 1.f) / poolLayer._stride[i]);
} else if (padType == "same_upper") {
for (int i = 0; i < spacial_d_size; i++)
OD_temp[i] = std::ceil(1.f * dims[dims_size - 1 - i] / poolLayer._stride[i]);
} else if (padType == "same_lower") {
for (int i = 0; i < spacial_d_size; i++)
OD_temp[i] = std::floor(1.f * dims[dims_size - 1 - i] / poolLayer._stride[i]);
} else {
auto it = std::find_if(poolLayer.params.begin(), poolLayer.params.end(),
[](decltype(*poolLayer.params.begin())& lhs) {
return lhs.first == "rounding-type" || lhs.first == "rounding_type";
});
bool isCeil = true;
if (it != poolLayer.params.end()) {
if (it->second == "floor") isCeil = false;
}
for (int i = 0; i < spacial_d_size; i++)
OD_temp[i] +=
1.f *
(dims[dims_size - 1 - i] + poolLayer._padding[i] + poolLayer._pads_end[i] - poolLayer._kernel[i]) /
poolLayer._stride[i];
if (isCeil) {
for (int i = 0; i < spacial_d_size; i++) OD_temp[i] = std::ceil(OD_temp[i]);
} else {
for (int i = 0; i < spacial_d_size; i++) OD_temp[i] = std::floor(OD_temp[i]);
}
for (int i = 0; i < spacial_d_size; i++)
if ((OD_temp[i] - 1) * poolLayer._stride[i] >= dims[dims_size - 1 - i] + poolLayer._padding[i])
--OD_temp[i];
}
for (int i = 0; i < spacial_d_size; i++)
if (OD_temp[i] < 0)
THROW_IE_EXCEPTION << "New shapes " << details::dumpVec(dims) << " make output shape negative";
SizeVector outShape = {inputN, IC};
for (int i = spacial_d_size - 1; i >= 0; i--) outShape.push_back(static_cast<size_t>(OD_temp[i]));
outShapes.emplace_back(outShape);
delete[] OD_temp;
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,42 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <legacy/ie_layers.h>
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for PriorBoxClustered layer
*/
class PriorBoxClusteredShapeProp : public BuiltInShapeInferImpl {
public:
explicit PriorBoxClusteredShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer cnnLayer(lp);
cnnLayer.params = params;
cnnLayer.type = _type;
validate(&cnnLayer, inBlobs, params, blobs);
std::vector<float> widths = cnnLayer.GetParamAsFloats("width", {});
size_t res_prod = widths.size() * 4;
for (int i = 2; i < inShapes[0].size(); i++) res_prod *= inShapes[0][i];
outShapes.push_back({1, 2, res_prod});
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,55 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <legacy/ie_layers.h>
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for PriorBox layer
*/
class PriorBoxShapeProp : public BuiltInShapeInferImpl {
public:
explicit PriorBoxShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer cnnLayer(lp);
cnnLayer.params = params;
cnnLayer.type = _type;
validate(&cnnLayer, inBlobs, params, blobs);
std::vector<float> min_sizes = cnnLayer.GetParamAsFloats("min_size", {});
std::vector<float> max_sizes = cnnLayer.GetParamAsFloats("max_size", {});
bool flip = static_cast<bool>(cnnLayer.GetParamAsInt("flip"));
const std::vector<float> aspect_ratios = cnnLayer.GetParamAsFloats("aspect_ratio", {});
size_t num_priors = 0;
bool scale_all_sizes = static_cast<bool>(cnnLayer.GetParamAsInt("scale_all_sizes", 1));
if (scale_all_sizes) {
num_priors = ((flip ? 2 : 1) * aspect_ratios.size() + 1) * min_sizes.size() + max_sizes.size();
} else {
num_priors = (flip ? 2 : 1) * aspect_ratios.size() + min_sizes.size() - 1;
}
size_t res_prod = num_priors * 4;
for (int i = 2; i < inShapes[0].size(); i++) res_prod *= inShapes[0][i];
outShapes.push_back({1, 2, res_prod});
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,41 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <legacy/ie_layers.h>
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for PSRoiPooling layer
*/
class PSRoiPoolingShapeProp : public BuiltInShapeInferImpl {
public:
explicit PSRoiPoolingShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer cnnLayer(lp);
cnnLayer.params = params;
cnnLayer.type = _type;
validate(&cnnLayer, inBlobs, params, blobs);
size_t output_dim = static_cast<size_t>(cnnLayer.GetParamAsInt("output_dim"));
size_t group_size = static_cast<size_t>(cnnLayer.GetParamAsInt("group_size"));
outShapes.push_back({inShapes[1][0], output_dim, group_size, group_size});
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,40 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <algorithm>
#include <cmath>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for quantize layer
*/
class QuantizeShapeProp : public BuiltInShapeInferImpl {
public:
explicit QuantizeShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
QuantizeLayer quantizeLayer(lp);
quantizeLayer.params = params;
quantizeLayer.type = _type;
validate(&quantizeLayer, inBlobs, params, blobs);
outShapes.push_back(inShapes[0]);
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,49 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cmath>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Range layer
*/
class RangeShapeProp : public BuiltInShapeInferImpl {
public:
explicit RangeShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
RangeLayer rangeLayer(lp);
rangeLayer.params = params;
rangeLayer.type = _type;
validate(&rangeLayer, inBlobs, params, blobs);
const size_t RANGE_START = 0;
const size_t RANGE_LIMIT = 1;
const size_t RANGE_DELTA = 2;
float start = (inBlobs[RANGE_START]->cbuffer().as<float*>() +
inBlobs[RANGE_START]->getTensorDesc().getBlockingDesc().getOffsetPadding())[0];
float limit = (inBlobs[RANGE_LIMIT]->cbuffer().as<float*>() +
inBlobs[RANGE_LIMIT]->getTensorDesc().getBlockingDesc().getOffsetPadding())[0];
float delta = (inBlobs[RANGE_DELTA]->cbuffer().as<float*>() +
inBlobs[RANGE_DELTA]->getTensorDesc().getBlockingDesc().getOffsetPadding())[0];
size_t work_amount_dst = std::floor(std::abs((limit - start) / delta));
outShapes = {{work_amount_dst}};
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,75 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <algorithm>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Reduce layer
*/
class ReduceShapeProp : public BuiltInShapeInferImpl {
public:
explicit ReduceShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
ReduceLayer reduceLayer(lp);
reduceLayer.params = params;
reduceLayer.type = _type;
validate(&reduceLayer, inBlobs, params, blobs);
const size_t REDUCE_DATA = 0;
const size_t REDUCE_INDEXES = 1;
if (inBlobs.size() < 2) THROW_IE_EXCEPTION << " Incorrect number of inputs";
SizeVector idx_dims = inBlobs[REDUCE_INDEXES]->getTensorDesc().getDims();
if (idx_dims.size() > 1) THROW_IE_EXCEPTION << " Index vector should be 1 dimension";
if (inBlobs[REDUCE_INDEXES]->getTensorDesc().getPrecision() != Precision::I32)
THROW_IE_EXCEPTION << " Incorrect 'axes_to_reduction' input precision. Only I32 is supported!";
SizeVector data_dims = inBlobs[REDUCE_DATA]->getTensorDesc().getDims();
int32_t* idx_data = inBlobs[REDUCE_INDEXES]->cbuffer().as<int32_t*>() +
inBlobs[REDUCE_INDEXES]->getTensorDesc().getBlockingDesc().getOffsetPadding();
SizeVector axes;
for (size_t i = 0; i < idx_dims[0]; i++) {
int32_t axis = idx_data[i];
if (axis < 0) axis += data_dims.size();
if (static_cast<size_t>(axis) > data_dims.size())
THROW_IE_EXCEPTION << " Index to reduce exceeds data tensor dimension";
axes.push_back(static_cast<size_t>(axis));
}
bool keep_dims = reduceLayer.keep_dims;
SizeVector outShape;
SizeVector src_dims = inBlobs[REDUCE_DATA]->getTensorDesc().getDims();
for (size_t i = 0; i < src_dims.size(); i++) {
bool found = false;
for (size_t axis : axes)
if (i == axis) found = true;
if (found) {
if (keep_dims) outShape.push_back(1);
} else {
outShape.push_back(src_dims[i]);
}
}
outShapes.push_back(outShape);
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,67 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <legacy/ie_layers.h>
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for RegionYolo layer
*/
class RegionYoloShapeProp : public BuiltInShapeInferImpl {
public:
explicit RegionYoloShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer layer(lp);
layer.params = params;
int classes;
int coords;
int num;
bool do_softmax;
std::vector<int> mask;
classes = layer.GetParamAsInt("classes", 1);
coords = layer.GetParamAsInt("coords", 1);
num = layer.GetParamAsInt("num", 1);
do_softmax = static_cast<bool>(layer.GetParamAsInt("do_softmax", 1));
mask = layer.GetParamAsInts("mask", {});
unsigned int axis = layer.GetParamAsUInt("axis", 1);
int end_axis = layer.GetParamAsInt("end_axis", 1);
if (end_axis < 0) end_axis += inShapes[0].size();
SizeVector outShape;
if (do_softmax) {
size_t flat_dim = 1;
for (size_t i = 0; i < axis; i++) {
outShape.push_back(inShapes[0][i]);
}
for (size_t i = axis; i < end_axis + 1; i++) {
flat_dim *= inShapes[0][i];
}
outShape.push_back(flat_dim);
for (size_t i = end_axis + 1; i < inShapes[0].size(); i++) {
outShape.push_back(inShapes[0][i]);
}
} else {
outShape = {inShapes[0][0], (classes + coords + 1) * mask.size(), inShapes[0][2], inShapes[0][3]};
}
outShapes.push_back({outShape});
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,49 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <legacy/ie_layers.h>
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for ReorgYolo layer
*/
class ReorgYoloShapeProp : public BuiltInShapeInferImpl {
public:
explicit ReorgYoloShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer cnnLayer(lp);
cnnLayer.params = params;
cnnLayer.type = _type;
validate(&cnnLayer, inBlobs, params, blobs);
size_t stride = static_cast<size_t>(cnnLayer.GetParamAsInt("stride"));
SizeVector outShape;
for (size_t i = 0; i < inShapes[0].size(); i++) {
outShape.push_back(inShapes[0][i]);
if (i == 1) {
outShape[outShape.size() - 1] *= stride * stride;
} else if (i > 1) {
outShape[outShape.size() - 1] /= stride;
}
}
outShapes.push_back(outShape);
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,75 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <legacy/ie_layers.h>
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Resample layer
*/
class ResampleShapeProp : public BuiltInShapeInferImpl {
public:
explicit ResampleShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer cnnLayer(lp);
cnnLayer.params = params;
cnnLayer.type = _type;
validate(&cnnLayer, inBlobs, params, blobs);
SizeVector outShape;
if (inBlobs.size() == 2) {
switch (inBlobs[1]->getTensorDesc().getPrecision()) {
case Precision::FP32: {
auto* buffer = inBlobs[1]->cbuffer().as<float*>();
if (buffer != nullptr) {
for (int i = 0; i < inBlobs[1]->size(); i++) {
outShape.push_back(static_cast<unsigned long>(buffer[i]));
}
} else {
THROW_IE_EXCEPTION << "Second input must have allocated data";
}
break;
}
case Precision::I32: {
auto* buffer = inBlobs[1]->cbuffer().as<int32_t*>();
if (buffer != nullptr) {
for (int i = 0; i < inBlobs[1]->size(); i++) {
outShape.push_back(static_cast<unsigned long>(buffer[i]));
}
} else {
THROW_IE_EXCEPTION << "Second input must have allocated data";
}
break;
}
default:
THROW_IE_EXCEPTION << "Unsupported second input precision";
}
} else {
auto scale = cnnLayer.GetParamAsFloat("factor");
outShape = {inShapes[0][0], inShapes[0][1]};
for (int i = 2; i < inShapes[0].size(); i++)
outShape.push_back(static_cast<size_t>(std::ceil(inShapes[0][i] * scale)));
}
outShapes.push_back(outShape);
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,120 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <legacy/ie_layers.h>
#include <functional>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
#include "precision_utils.h"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Reshape layer
*/
class ReshapeShapeProp : public BuiltInShapeInferImpl {
public:
explicit ReshapeShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
ReshapeLayer reshapeLayer(lp);
reshapeLayer.params = params;
reshapeLayer.type = _type;
validate(&reshapeLayer, inBlobs, params, blobs);
SizeVector outShape;
std::vector<int> reshapeMask;
if (inBlobs.size() == 2) {
if (inBlobs[1]->getTensorDesc().getPrecision() == Precision::FP32) {
auto* buffer = inBlobs[1]->cbuffer().as<float*>();
if (buffer != nullptr) {
for (int i = 0; i < inBlobs[1]->size(); i++) {
reshapeMask.push_back(static_cast<int>(buffer[i]));
}
} else {
THROW_IE_EXCEPTION << "Second input must have allocated data";
}
} else if (inBlobs[1]->getTensorDesc().getPrecision() == Precision::I32) {
auto* buffer = inBlobs[1]->cbuffer().as<int*>();
if (buffer != nullptr) {
reshapeMask.assign(buffer, buffer + inBlobs[1]->size());
} else {
THROW_IE_EXCEPTION << "Second input must have allocated data";
}
} else if (inBlobs[1]->getTensorDesc().getPrecision() == Precision::I64) {
auto* buffer = inBlobs[1]->cbuffer().as<int64_t*>();
if (buffer != nullptr) {
reshapeMask.assign(buffer, buffer + inBlobs[1]->size());
} else {
THROW_IE_EXCEPTION << "Second input must have allocated data";
}
} else if (inBlobs[1]->getTensorDesc().getPrecision() == Precision::U64) {
auto* buffer = inBlobs[1]->cbuffer().as<uint64_t*>();
if (buffer != nullptr) {
reshapeMask.assign(buffer, buffer + inBlobs[1]->size());
} else {
THROW_IE_EXCEPTION << "Second input must have allocated data";
}
} else if (inBlobs[1]->getTensorDesc().getPrecision() == Precision::FP16) {
auto* buffer = inBlobs[1]->cbuffer().as<uint16_t*>();
if (buffer != nullptr) {
for (int i = 0; i < inBlobs[1]->size(); i++) {
reshapeMask.push_back(static_cast<int>(PrecisionUtils::f16tof32(buffer[i])));
}
} else {
THROW_IE_EXCEPTION << "Second input must have allocated data";
}
} else {
THROW_IE_EXCEPTION << "Second input has unsupported precision";
}
} else {
reshapeMask = reshapeLayer.shape;
}
auto inputShape = inShapes[0];
size_t inputShapeTotal = std::accumulate(inputShape.begin(), inputShape.end(), 1lu, std::multiplies<size_t>());
if (reshapeMask.empty()) {
outShape = {inputShapeTotal};
} else {
size_t res = 1;
for (int i = 0; i < reshapeMask.size(); i++) {
if (reshapeMask[i] == 0) {
res *= inputShape[i];
} else if (reshapeMask[i] != -1) {
res *= reshapeMask[i];
}
}
size_t newDim = inputShapeTotal / res;
for (int i = 0; i < reshapeMask.size(); i++) {
if (reshapeMask[i] == 0) {
outShape.push_back(inputShape[i]);
} else if (reshapeMask[i] == -1) {
outShape.push_back(newDim);
} else {
outShape.push_back(reshapeMask[i]);
}
}
size_t outputShapeTotal = std::accumulate(outShape.begin(), outShape.end(), 1lu, std::multiplies<size_t>());
if (inputShapeTotal != outputShapeTotal) {
THROW_IE_EXCEPTION << "Invalid reshape mask (dim attribute): number of elements in input: "
<< details::dumpVec(inputShape) << " and output: " << details::dumpVec(outShape)
<< " mismatch";
}
}
outShapes.emplace_back(outShape);
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,37 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for ReverseSequence layer
*/
class ReverseSequenceShapeProp : public BuiltInShapeInferImpl {
public:
explicit ReverseSequenceShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
ReverseSequenceLayer reverseSequenceLayer(lp);
reverseSequenceLayer.params = params;
reverseSequenceLayer.type = _type;
validate(&reverseSequenceLayer, inBlobs, params, blobs);
outShapes = {inShapes[0]};
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,50 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for DetectionOutput layer
*/
class RNNShapeProp : public BuiltInShapeInferImpl {
public:
explicit RNNShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
RNNSequenceLayer rnn(lp);
rnn.params = params;
rnn.type = _type;
IE_SUPPRESS_DEPRECATED_START
rnn.precision = Precision::FP32; // FIXME: No ability to discover current precision. Assume fp32
IE_SUPPRESS_DEPRECATED_END
validate(&rnn, inBlobs, params, blobs);
int state_size = rnn.hidden_size;
int ns = rnn.cellType == RNNCellBase::LSTM ? 2 : 1;
auto data_dims = inShapes[0];
data_dims[2] = static_cast<size_t>(state_size);
outShapes.push_back(data_dims);
for (int i = 1; i < 1 + ns; i++) {
outShapes.push_back(inShapes[i]);
}
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,47 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <legacy/ie_layers.h>
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for RoiPooling layer
*/
class RoiPoolingShapeProp : public BuiltInShapeInferImpl {
public:
explicit RoiPoolingShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer cnnLayer(lp);
cnnLayer.params = params;
cnnLayer.type = _type;
validate(&cnnLayer, inBlobs, params, blobs);
SizeVector out_shapes = {inShapes[1][0], inShapes[0][1]};
for (auto attr : {"pooled_d", "pooled_h", "pooled_w"}) { // desired IR format: pooled="...,d,h,w"
int pooled = cnnLayer.GetParamAsInt(attr, -1);
if (pooled >= 0) {
out_shapes.push_back(static_cast<size_t>(pooled));
}
}
outShapes.push_back(out_shapes);
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,56 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for ScatterUpdate layer
*/
class ScatterUpdateShapeProp : public BuiltInShapeInferImpl {
public:
explicit ScatterUpdateShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
ScatterUpdateLayer scatterUpdateLayer(lp);
scatterUpdateLayer.params = params;
scatterUpdateLayer.type = _type;
validate(&scatterUpdateLayer, inBlobs, params, blobs);
outShapes = {inShapes[0]};
}
};
/**
*@brief Implementation of Shape inference for ScatterElementsUpdate layer
*/
class ScatterElementsUpdateShapeProp : public BuiltInShapeInferImpl {
public:
explicit ScatterElementsUpdateShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
ScatterElementsUpdateLayer scatterElementsUpdateLayer(lp);
scatterElementsUpdateLayer.params = params;
scatterElementsUpdateLayer.type = _type;
validate(&scatterElementsUpdateLayer, inBlobs, params, blobs);
outShapes = {inShapes[0]};
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,36 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Select layer
*/
class SelectShapeProp : public BuiltInShapeInferImpl {
public:
explicit SelectShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
SelectLayer selectLayer(lp);
selectLayer.params = params;
selectLayer.type = _type;
validate(&selectLayer, inBlobs, params, blobs);
outShapes.push_back(inShapes[1]);
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,33 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cmath>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Shape layer
*/
class ShapeShapeProp : public BuiltInShapeInferImpl {
public:
explicit ShapeShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
outShapes.push_back({inShapes[0].size()});
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,37 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for ShuffleChannels layer
*/
class ShuffleChannelsShapeProp : public BuiltInShapeInferImpl {
public:
explicit ShuffleChannelsShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
ShuffleChannelsLayer shuffleChannelsLayer(lp);
shuffleChannelsLayer.params = params;
shuffleChannelsLayer.type = _type;
validate(&shuffleChannelsLayer, inBlobs, params, blobs);
outShapes = {inShapes[0]};
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,42 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for SpaceToDepth layer
*/
class SpaceToDepthShapeProp : public BuiltInShapeInferImpl {
public:
explicit SpaceToDepthShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
SpaceToDepthLayer spaceToDepthLayer(lp);
spaceToDepthLayer.params = params;
spaceToDepthLayer.type = _type;
validate(&spaceToDepthLayer, inBlobs, params, blobs);
unsigned int block_size = spaceToDepthLayer.block_size;
outShapes = {inShapes[0]};
outShapes[0][outShapes[0].size() - 1] = inShapes[0][inShapes[0].size() - 1] / block_size;
outShapes[0][outShapes[0].size() - 2] = inShapes[0][inShapes[0].size() - 2] / block_size;
outShapes[0][outShapes[0].size() - 3] = inShapes[0][inShapes[0].size() - 3] * block_size * block_size;
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,31 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for SparseFillEmptyRows layer
*/
class SparseFillEmptyRowsShapeProp : public BuiltInShapeInferImpl {
public:
explicit SparseFillEmptyRowsShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
THROW_IE_EXCEPTION << "SparseFillEmptyRows is not re-shapeable layer.";
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,40 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for SparseSegmentReduce layer
*/
class SparseSegmentReduceShapeProp : public BuiltInShapeInferImpl {
public:
explicit SparseSegmentReduceShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
SparseSegmentReduceLayer sparse_segment_reduce_layer(lp);
sparse_segment_reduce_layer.params = params;
sparse_segment_reduce_layer.type = _type;
validate(&sparse_segment_reduce_layer, inBlobs, params, blobs);
// reshape output
auto output_shape = inShapes[0];
output_shape[0] = inShapes[1][0];
outShapes = {output_shape};
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,54 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "ie_built_in_impl.hpp"
#include <map>
#include <memory>
#include <string>
#include <vector>
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for ExperimentalSparseWeightedReduce layer
*/
class ExperimentalSparseWeightedReduceShapeProp : public BuiltInShapeInferImpl {
public:
explicit ExperimentalSparseWeightedReduceShapeProp(const std::string& type) : BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs,
const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs,
std::vector<SizeVector>& outShapes) override {
LayerParams lp{};
ExperimentalSparseWeightedReduceLayer sparse_weighted_reduce_layer(lp);
sparse_weighted_reduce_layer.params = params;
sparse_weighted_reduce_layer.type = _type;
validate(&sparse_weighted_reduce_layer, inBlobs, params, blobs);
// compute a number of outputs
size_t num_outputs = 1;
// reshape available outputs
outShapes.resize(num_outputs);
outShapes[0] = inShapes[3];
if (inBlobs[2]->getTensorDesc().getPrecision() == Precision::I32) {
auto* buffer = inBlobs[2]->cbuffer().as<int*>();
if (buffer != nullptr) {
outShapes[0][0] = static_cast<size_t>(buffer[0]);
} else {
THROW_IE_EXCEPTION << "The third input must have allocated data";
}
} else {
THROW_IE_EXCEPTION << "The third must have I32 precision";
}
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,50 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Split layer
*/
class SplitShapeProp : public BuiltInShapeInferImpl {
public:
explicit SplitShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
SplitLayer splitLayer(lp);
splitLayer.params = params;
splitLayer.type = _type;
validate(&splitLayer, inBlobs, params, blobs);
std::vector<int> out_sizes = splitLayer.GetParamAsInts("out_sizes", {});
if (out_sizes.empty()) THROW_IE_EXCEPTION << "Value of out_sizes attribute is empty";
size_t sum(0);
for (const auto& size : out_sizes) sum += size;
if (sum != inShapes[0][splitLayer._axis])
THROW_IE_EXCEPTION << "The sum of the dimensions on the axis(" << splitLayer._axis
<< ") is not equal out_sizes: " << details::dumpVec(out_sizes);
for (const auto& size : out_sizes) {
outShapes.push_back(inShapes[0]);
outShapes[outShapes.size() - 1][splitLayer._axis] = static_cast<size_t>(size);
}
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,122 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#ifndef NOMINMAX
#define NOMINMAX
#endif
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Squeeze layer
*/
class SqueezeShapeProp : public BuiltInShapeInferImpl {
public:
explicit SqueezeShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer layer(lp);
layer.params = params;
layer.type = _type;
validate(&layer, inBlobs, params, blobs);
const size_t SQUEEZE_DATA = 0;
const size_t SQUEEZE_INDEXES = 1;
SizeVector data_dims;
SizeVector idx_dims;
idx_dims = inBlobs[SQUEEZE_INDEXES]->getTensorDesc().getDims();
if (idx_dims.size() > 1) THROW_IE_EXCEPTION << " Index vector should be 1 dimension";
data_dims = inBlobs[SQUEEZE_DATA]->getTensorDesc().getDims();
if (data_dims.size() <= idx_dims[0] && !(data_dims.size() == 1 && idx_dims[0] == 1))
THROW_IE_EXCEPTION << " Incompatible number of data dimensions and indexes vector length!";
SizeVector outShape;
switch (inBlobs[SQUEEZE_INDEXES]->getTensorDesc().getPrecision()) {
case Precision::FP32: {
procIndices<float>(inBlobs, SQUEEZE_INDEXES, data_dims, outShape, idx_dims);
} break;
case Precision::FP16: {
procIndices<ie_fp16>(inBlobs, SQUEEZE_INDEXES, data_dims, outShape, idx_dims);
} break;
case Precision::I32: {
procIndices<int32_t>(inBlobs, SQUEEZE_INDEXES, data_dims, outShape, idx_dims);
} break;
case Precision::I64: {
procIndices<int64_t>(inBlobs, SQUEEZE_INDEXES, data_dims, outShape, idx_dims);
} break;
case Precision::U64: {
procIndices<uint64_t>(inBlobs, SQUEEZE_INDEXES, data_dims, outShape, idx_dims);
} break;
default:
THROW_IE_EXCEPTION
<< "Incorrect 'indices_to_squeeze' input precision. Only FP32, FP16, I32, I64 and U64 are supported!";
}
outShapes.push_back(outShape);
}
private:
template <typename T>
void procIndices(const std::vector<Blob::CPtr>& inBlobs, const size_t SQUEEZE_INDEXES, SizeVector& data_dims,
SizeVector& outShape, const SizeVector& idx_dims) {
T* idx_data = inBlobs[SQUEEZE_INDEXES]->cbuffer().as<T*>() +
inBlobs[SQUEEZE_INDEXES]->getTensorDesc().getBlockingDesc().getOffsetPadding();
for (size_t i = 0; i < idx_dims[0]; i++) {
auto axis = castToInt32(idx_data[i]);
if (axis < 0) axis += data_dims.size();
if (axis > data_dims.size()) {
THROW_IE_EXCEPTION << "Index to squeeze exceeds data tensor dimension";
} else if (data_dims[axis] != 1) {
THROW_IE_EXCEPTION << "Index to squeeze of data tensor dimension is not 1";
}
}
for (size_t j = 0; j < data_dims.size(); j++) {
bool found = false;
for (size_t i = 0; i < inBlobs[SQUEEZE_INDEXES]->size(); i++) {
auto axis = castToInt32(idx_data[i]);
if (axis < 0) axis += data_dims.size();
if (j == static_cast<size_t>(axis)) found = true;
}
if (!found) outShape.push_back(data_dims[j]);
}
}
int32_t castToInt32(ie_fp16 x) {
return static_cast<int32_t>(InferenceEngine::PrecisionUtils::f16tof32(x));
}
int32_t castToInt32(uint64_t x) {
return static_cast<int32_t>(x);
}
int32_t castToInt32(int64_t x) {
return static_cast<int32_t>(x);
}
int32_t castToInt32(int32_t x) {
return x;
}
int32_t castToInt32(float x) {
return static_cast<int32_t>(x);
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,34 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <algorithm>
#include <map>
#include <memory>
#include <shape_infer/const_infer/ie_strided_slice_const_infer.hpp>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for StridedSlice layer
*/
class StridedSliceShapeProp : public BuiltInShapeInferImpl {
public:
explicit StridedSliceShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
StridedSliceHelper helper(inBlobs, params);
outShapes.push_back(helper.getOutputShape());
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,104 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <legacy/ie_layers.h>
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <shape_infer/ie_reshaper.hpp>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for DetectionOutput layer
*/
class TensorIteratorShapeProp : public BuiltInShapeInferImpl {
public:
explicit TensorIteratorShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void setOriginalLayer(const CNNLayer* layer) {
auto ti = dynamic_cast<const TensorIterator*>(layer);
if (!ti) THROW_IE_EXCEPTION << "Error during shape infer. Original layer is not TensorIterator.";
_original_ti = ti;
}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
TensorIterator ti(lp);
ti.params = params;
ti.type = _type;
ti.body = _original_ti->body;
ti.back_edges = _original_ti->back_edges;
ti.input_port_map = _original_ti->input_port_map;
ti.output_port_map = _original_ti->output_port_map;
validate(&ti, inBlobs, params, blobs);
// TODO: make util function to calculate num of iteration
int num_iteration = 1;
// Prepare input shapes for internal body
std::map<std::string, std::vector<size_t>> newInShapes;
for (auto& port_map : ti.input_port_map) {
int ext_port = port_map.from;
int int_port = port_map.to;
auto int_name = ti.body.inputs[int_port]->getName();
auto shape = inShapes[ext_port];
if (port_map.axis != -1) {
int size = shape[port_map.axis];
int start = port_map.start < 0 ? port_map.start + size + 1 : port_map.start;
int end = port_map.end < 0 ? port_map.end + size + 1 : port_map.end;
num_iteration = std::abs(end - start) / std::abs(port_map.stride);
// port with iterating through. Change dimension with iteration
shape[port_map.axis] = port_map.part_size;
}
newInShapes[int_name] = shape;
}
// Body shape infer
_body_reshaper = std::make_shared<Reshaper>(_original_ti->body.inputs);
_body_reshaper->runNoApply(newInShapes);
outShapes.resize(ti.output_port_map.size());
for (auto& port_map : ti.output_port_map) {
int ext_port = port_map.from;
int int_port = port_map.to;
auto& int_out_data = ti.body.outputs[int_port];
auto shape = _body_reshaper->getResultShapeFor(int_out_data);
if (port_map.axis != -1) {
// port with iterating through. Change dimension with iteration
shape[port_map.axis] *= num_iteration;
}
outShapes[ext_port] = shape;
}
}
void apply() {
if (!_body_reshaper)
THROW_IE_EXCEPTION << "Request of apply reshape results while shape infer was not finished";
_body_reshaper->apply();
_body_reshaper.reset(); // WA: reset _body_reshaper to release ownership for input data
}
private:
const TensorIterator* _original_ti = nullptr;
std::shared_ptr<Reshaper> _body_reshaper;
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,37 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Tile layer
*/
class TileShapeProp : public BuiltInShapeInferImpl {
public:
explicit TileShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
TileLayer tileLayer(lp);
tileLayer.params = params;
tileLayer.type = _type;
validate(&tileLayer, inBlobs, params, blobs);
outShapes.push_back(inShapes[0]);
outShapes[0][tileLayer.axis] *= tileLayer.tiles;
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,67 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <algorithm>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for TopK layer
*/
class TopKShapeProp : public BuiltInShapeInferImpl {
public:
explicit TopKShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
TopKLayer topKLayer(lp);
topKLayer.params = params;
topKLayer.type = _type;
validate(&topKLayer, inBlobs, params, blobs);
const size_t TOPK_DATA = 0;
const size_t TOPK_K = 1;
if (inBlobs[TOPK_DATA]->getTensorDesc().getPrecision() != Precision::FP32)
THROW_IE_EXCEPTION << " Incorrect input data tensor precision. Only FP32 is supported!";
if (inBlobs[TOPK_K]->getTensorDesc().getPrecision() != Precision::I32)
THROW_IE_EXCEPTION << " Incorrect input index value precision. Only I32 is supported!";
if (inBlobs[TOPK_K]->getTensorDesc().getDims().size() > 1)
THROW_IE_EXCEPTION << " Index vector should be 1 dimension";
SizeVector src_dims = inBlobs[TOPK_DATA]->getTensorDesc().getDims();
int axis_ = topKLayer.axis;
if (axis_ < 0) axis_ += src_dims.size();
size_t axis = static_cast<size_t>(axis_);
if (src_dims.size() < (1 + axis))
THROW_IE_EXCEPTION << " Incorrect input parameters dimensions and axis number!";
int* src_k = inBlobs[TOPK_K]->cbuffer().as<int*>();
if (src_k == nullptr) THROW_IE_EXCEPTION << " Only const input for 'k' is supported!";
src_k += inBlobs[TOPK_K]->getTensorDesc().getBlockingDesc().getOffsetPadding();
outShapes.push_back(inShapes[0]);
outShapes.push_back(inShapes[0]);
outShapes[0][axis] = static_cast<size_t>(src_k[0]);
outShapes[1][axis] = static_cast<size_t>(src_k[0]);
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,109 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#ifndef NOMINMAX
#define NOMINMAX
#endif
#include <algorithm>
#include <iostream>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Unsqueeze layer
*/
class UnsqueezeShapeProp : public BuiltInShapeInferImpl {
public:
explicit UnsqueezeShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer unsqueezeLayer(lp);
unsqueezeLayer.params = params;
unsqueezeLayer.type = _type;
validate(&unsqueezeLayer, inBlobs, params, blobs);
const size_t UNSQUEEZE_DATA = 0;
const size_t UNSQUEEZE_INDEXES = 1;
SizeVector idx_dims = inBlobs[UNSQUEEZE_INDEXES]->getTensorDesc().getDims();
SizeVector data_dims = inBlobs[UNSQUEEZE_DATA]->getTensorDesc().getDims();
SizeVector outShape;
if (idx_dims.size() > 1) THROW_IE_EXCEPTION << " Index vector should be 1 dimension";
switch (inBlobs[UNSQUEEZE_INDEXES]->getTensorDesc().getPrecision()) {
case Precision::FP32: {
procIndices<float>(inBlobs, UNSQUEEZE_INDEXES, data_dims, outShape, idx_dims);
} break;
case Precision::FP16: {
procIndices<ie_fp16>(inBlobs, UNSQUEEZE_INDEXES, data_dims, outShape, idx_dims);
} break;
case Precision::I32: {
procIndices<int32_t>(inBlobs, UNSQUEEZE_INDEXES, data_dims, outShape, idx_dims);
} break;
default:
THROW_IE_EXCEPTION << "Incorrect 'indices_to_set' input precision. Only FP32, FP16 and I32 are supported!";
}
outShapes.push_back(outShape);
}
private:
template <typename T>
void procIndices(const std::vector<Blob::CPtr>& inBlobs, const size_t UNSQUEEZE_INDEXES, SizeVector& data_dims,
SizeVector& outShape, const SizeVector& idx_dims) {
T* idx_data = inBlobs[UNSQUEEZE_INDEXES]->cbuffer().as<T*>() +
inBlobs[UNSQUEEZE_INDEXES]->getTensorDesc().getBlockingDesc().getOffsetPadding();
if (!idx_data) {
outShape = data_dims;
return;
}
size_t max = data_dims.size();
for (size_t i = 0; i < idx_dims[0]; i++) {
auto axis = static_cast<size_t>(castToInt32(idx_data[i]));
max = std::max(max, axis);
}
max++;
if ((idx_dims[0] + data_dims.size()) < max) {
THROW_IE_EXCEPTION << "Indices_to_set for unsqueeze layer is out of tensor dimension";
}
max = inBlobs[UNSQUEEZE_INDEXES]->size() + data_dims.size();
for (size_t i = 0, j = 0, k = 0; i < max; i++) {
size_t index_to_push = 1;
if (k < inBlobs[UNSQUEEZE_INDEXES]->size() && i == castToInt32(idx_data[k])) {
k++;
} else {
index_to_push = data_dims[j++];
}
outShape.push_back(index_to_push);
}
}
int32_t castToInt32(ie_fp16 x) {
return static_cast<int32_t>(InferenceEngine::PrecisionUtils::f16tof32(x));
}
int32_t castToInt32(int32_t x) {
return x;
}
int32_t castToInt32(float x) {
return static_cast<int32_t>(x);
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,44 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <legacy/ie_layers.h>
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Upsampling layer
*/
class UpsamplingShapeProp : public BuiltInShapeInferImpl {
public:
explicit UpsamplingShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer cnnLayer(lp);
cnnLayer.params = params;
cnnLayer.type = _type;
validate(&cnnLayer, inBlobs, params, blobs);
size_t scale = static_cast<size_t>(cnnLayer.GetParamAsInt("scale"));
SizeVector out_shapes = {inShapes[0][0], inShapes[0][1]};
for (int i = 2; i < inShapes[0].size(); i++) {
out_shapes.push_back(inShapes[0][i] * scale);
}
outShapes.push_back(out_shapes);
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,26 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <memory>
#include <string>
#include "legacy/shape_infer/built-in/ie_built_in_holder.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
template <typename Impl>
class ImplRegisterBase {
public:
explicit ImplRegisterBase(const std::string& type) {
BuiltInShapeInferHolder::AddImpl(type, std::make_shared<Impl>(type));
}
};
#define REG_SHAPE_INFER_FOR_TYPE(__prim, __type) static ImplRegisterBase<__prim> __bi_reg__##__type(#__type)
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,72 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_blob.h>
#include <legacy/ie_layers.h>
#include <precision_utils.h>
#include <ie_precision.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
namespace InferenceEngine {
namespace ShapeInfer {
class BroadcastOffset {
SizeVector dims;
SizeVector offset_v;
SizeVector getDims(const SizeVector& originDims, const SizeVector& outputDims) {
SizeVector d(outputDims.size(), 1);
for (int i = 0; i < originDims.size(); i++) {
d[d.size() - 1 - i] = originDims[originDims.size() - 1 - i];
}
return d;
}
SizeVector getOffset(const SizeVector& originDims, const SizeVector& outDims) {
SizeVector o(originDims.size());
if (originDims.size() != outDims.size())
THROW_IE_EXCEPTION << "Cannot calculate offsets! Incorrect patameters for eltwise broadcast!";
int k = 1;
for (int i = originDims.size() - 1; i >= 0; i--) {
o[i] = (originDims[i] == outDims[i]) ? k : 0;
k *= originDims[i];
}
return o;
}
public:
BroadcastOffset(const SizeVector& originDims, const SizeVector& outputDims) {
dims = getDims(originDims, outputDims);
offset_v = getOffset(dims, outputDims);
}
size_t offset(const SizeVector& v) const {
size_t off = 0;
if (v.size() != offset_v.size())
THROW_IE_EXCEPTION << "Cannot calculate offsets! Incorrect patameters for eltwise broadcast!";
for (size_t i = 0; i < v.size(); i++) {
off += v[i] * offset_v[i];
}
return off;
}
SizeVector offset_dims(size_t l) const {
size_t n_dims = dims.size();
SizeVector pos(n_dims);
for (int rd = 1; rd <= n_dims; ++rd) {
const size_t d = n_dims - rd;
const size_t cur_dim = dims[d];
pos[d] = l % cur_dim;
l /= cur_dim;
}
return pos;
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,259 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_blob.h>
#include <legacy/ie_layers.h>
#include <precision_utils.h>
#include <ie_precision.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "broadcast_offset.hpp"
#include "ie_const_infer_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Const inference for TBD layer
*
* Table of output data type value with given input parameters
*
*
* U8 I32 I64 FP16 FP32
* =============================================================
* U8 == U8 I32 I64 FP16 FP32
* ==
* I32 == I32 I32 I64 FP32 FP32
* ==
* I64 == I64 I64 I64 FP32 FP32
* ==
* FP16 == FP16 FP32 FP32 FP16 FP32
* ==
* FP32 == FP32 FP32 FP32 FP32 FP32
*
* There is a special case with FP16 precision. Convert input data to FP32 and add. After that
* convert output data to FP16, if both of input parameters have FP16 precision or one - FP16 and another - U8.
*/
class AddConstInfer : public ConstInferImpl {
public:
explicit AddConstInfer(const std::string& type): ConstInferImpl(type) {}
struct fp16tofp32 {
inline float operator()(ie_fp16 value) {
return static_cast<float>(PrecisionUtils::f16tof32(value));
}
};
struct fp32tofp16 {
inline ie_fp16 operator()(float value) {
return static_cast<float>(PrecisionUtils::f32tof16(value));
}
};
template <typename dataType>
struct noConversion {
inline dataType operator()(dataType value) {
return value;
}
};
template <typename inDatatype1, typename inDatatype2, typename outDatatype, class ConversionInData1,
class ConversionInData2, class ConversionOutData>
void add(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) {
auto* firstBlobBuffer = inData[0]->cbuffer().as<inDatatype1*>();
auto* secondBlobBuffer = inData[1]->cbuffer().as<inDatatype2*>();
if (!firstBlobBuffer || !secondBlobBuffer) {
THROW_IE_EXCEPTION << "empty input data";
}
auto outBlob = *outData.begin();
auto* outBuffer = outBlob->buffer().as<outDatatype*>();
if (!outBuffer) THROW_IE_EXCEPTION << "empty output data";
BroadcastOffset outOff(outBlob->getTensorDesc().getDims(), outBlob->getTensorDesc().getDims());
BroadcastOffset inOff1(inData[0]->getTensorDesc().getDims(), outBlob->getTensorDesc().getDims());
BroadcastOffset inOff2(inData[1]->getTensorDesc().getDims(), outBlob->getTensorDesc().getDims());
for (size_t i = 0; i < outBlob->size(); i++) {
SizeVector offsetDims = outOff.offset_dims(i);
outBuffer[outOff.offset(offsetDims)] =
ConversionOutData()(ConversionInData1()(firstBlobBuffer[inOff1.offset(offsetDims)]) +
ConversionInData2()(secondBlobBuffer[inOff2.offset(offsetDims)]));
}
}
void inferImpl(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) override {
size_t numInputs = inData.size();
if (inData.size() != 2)
THROW_IE_EXCEPTION << "Unsupported number of inputs: " << numInputs << ". 2 inputs is supported";
auto compare =
getPrecisionMask(inData[0]->getTensorDesc().getPrecision(), inData[1]->getTensorDesc().getPrecision(),
outData[0]->getTensorDesc().getPrecision());
switch (compare) {
case getPrecisionMask(Precision::U8, Precision::U8, Precision::U8):
add<uint8_t, uint8_t, uint8_t, noConversion<uint8_t>, noConversion<uint8_t>, noConversion<uint8_t>>(
inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::U8, Precision::I32, Precision::I32):
add<uint8_t, int, int, noConversion<uint8_t>, noConversion<int>, noConversion<int>>(inData, params, blobs,
outData);
break;
case getPrecisionMask(Precision::U8, Precision::I64, Precision::I64):
add<uint8_t, long long int, long long int, noConversion<uint8_t>, noConversion<long long int>,
noConversion<long long int>>(inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::U8, Precision::U64, Precision::U64):
add<uint8_t, unsigned long long int, unsigned long long int, noConversion<uint8_t>,
noConversion<unsigned long long int>, noConversion<unsigned long long int>>(inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::U8, Precision::FP16, Precision::FP16):
add<uint8_t, ie_fp16, ie_fp16, noConversion<uint8_t>, fp16tofp32, fp32tofp16>(inData, params, blobs,
outData);
break;
case getPrecisionMask(Precision::U8, Precision::FP32, Precision::FP32):
add<uint8_t, float, float, noConversion<uint8_t>, noConversion<float>, noConversion<float>>(inData, params,
blobs, outData);
break;
case getPrecisionMask(Precision::I32, Precision::U8, Precision::I32):
add<int, uint8_t, int, noConversion<int>, noConversion<uint8_t>, noConversion<int>>(inData, params, blobs,
outData);
break;
case getPrecisionMask(Precision::I32, Precision::I32, Precision::I32):
add<int, int, int, noConversion<int>, noConversion<int>, noConversion<int>>(inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::I32, Precision::I64, Precision::I64):
add<int, long long int, long long int, noConversion<int>, noConversion<long long int>,
noConversion<long long int>>(inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::I32, Precision::U64, Precision::U64):
add<int, unsigned long long int, unsigned long long int, noConversion<int>,
noConversion<unsigned long long int>, noConversion<unsigned long long int>>(inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::I32, Precision::FP16, Precision::FP32):
add<int, ie_fp16, float, noConversion<int>, fp16tofp32, noConversion<float>>(inData, params, blobs,
outData);
break;
case getPrecisionMask(Precision::I32, Precision::FP32, Precision::FP32):
add<int, float, float, noConversion<int>, noConversion<float>, noConversion<float>>(inData, params, blobs,
outData);
break;
case getPrecisionMask(Precision::I64, Precision::U8, Precision::I64):
add<long long int, uint8_t, long long int, noConversion<long long int>, noConversion<uint8_t>,
noConversion<long long int>>(inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::I64, Precision::I32, Precision::I64):
add<long long int, int, long long int, noConversion<long long int>, noConversion<int>,
noConversion<long long int>>(inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::I64, Precision::I64, Precision::I64):
add<long long int, long long int, long long int, noConversion<long long int>, noConversion<long long int>,
noConversion<long long int>>(inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::I64, Precision::FP16, Precision::FP32):
add<long long int, ie_fp16, float, noConversion<long long int>, fp16tofp32, noConversion<float>>(
inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::I64, Precision::FP32, Precision::FP32):
add<long long int, float, float, noConversion<long long int>, noConversion<float>, noConversion<float>>(
inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::U64, Precision::U8, Precision::U64):
add<unsigned long long int, uint8_t, unsigned long long int, noConversion<unsigned long long int>, noConversion<uint8_t>,
noConversion<unsigned long long int>>(inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::U64, Precision::I32, Precision::U64):
add<unsigned long long int, int, unsigned long long int, noConversion<unsigned long long int>, noConversion<int>,
noConversion<unsigned long long int>>(inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::U64, Precision::U64, Precision::U64):
add<unsigned long long int, unsigned long long int, unsigned long long int,
noConversion<unsigned long long int>, noConversion<unsigned long long int>,
noConversion<unsigned long long int>>(inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::U64, Precision::FP16, Precision::FP32):
add<unsigned long long int, ie_fp16, float, noConversion<unsigned long long int>, fp16tofp32, noConversion<float>>(
inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::U64, Precision::FP32, Precision::FP32):
add<unsigned long long int, float, float, noConversion<unsigned long long int>, noConversion<float>, noConversion<float>>(
inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::FP16, Precision::U8, Precision::FP16):
add<ie_fp16, uint8_t, ie_fp16, fp16tofp32, noConversion<uint8_t>, fp32tofp16>(inData, params, blobs,
outData);
break;
case getPrecisionMask(Precision::FP16, Precision::I32, Precision::FP32):
add<ie_fp16, int, float, fp16tofp32, noConversion<int>, noConversion<float>>(inData, params, blobs,
outData);
break;
case getPrecisionMask(Precision::FP16, Precision::I64, Precision::FP32):
add<ie_fp16, long long int, float, fp16tofp32, noConversion<long long int>, noConversion<float>>(
inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::FP16, Precision::U64, Precision::FP32):
add<ie_fp16, unsigned long long int, float, fp16tofp32, noConversion<unsigned long long int>, noConversion<float>>(
inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::FP16, Precision::FP16, Precision::FP16):
add<ie_fp16, ie_fp16, ie_fp16, fp16tofp32, fp16tofp32, fp32tofp16>(inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::FP16, Precision::FP32, Precision::FP16):
add<ie_fp16, float, ie_fp16, fp16tofp32, noConversion<float>, fp32tofp16>(inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::FP16, Precision::FP32, Precision::FP32):
add<ie_fp16, float, float, fp16tofp32, noConversion<float>, noConversion<float>>(inData, params, blobs,
outData);
break;
case getPrecisionMask(Precision::FP32, Precision::U8, Precision::FP32):
add<float, uint8_t, float, noConversion<float>, noConversion<uint8_t>, noConversion<float>>(inData, params,
blobs, outData);
break;
case getPrecisionMask(Precision::FP32, Precision::I32, Precision::FP32):
add<float, int, float, noConversion<float>, noConversion<int>, noConversion<float>>(inData, params, blobs,
outData);
break;
case getPrecisionMask(Precision::FP32, Precision::I64, Precision::FP32):
add<float, long long int, float, noConversion<float>, noConversion<unsigned long long int>, noConversion<float>>(
inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::FP32, Precision::U64, Precision::FP32):
add<float, unsigned long long int, float, noConversion<float>, noConversion<long long int>, noConversion<float>>(
inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::FP32, Precision::FP16, Precision::FP32):
add<float, ie_fp16, float, noConversion<float>, fp16tofp32, noConversion<float>>(inData, params, blobs,
outData);
break;
case getPrecisionMask(Precision::FP32, Precision::FP16, Precision::FP16):
add<float, ie_fp16, ie_fp16, noConversion<float>, fp16tofp32, fp32tofp16>(inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::FP32, Precision::FP32, Precision::FP32):
add<float, float, float, noConversion<float>, noConversion<float>, noConversion<float>>(inData, params,
blobs, outData);
break;
default:
THROW_IE_EXCEPTION << "Unsupported precision!";
}
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,112 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_blob.h>
#include <legacy/ie_layers.h>
#include <cmath>
#include <ie_algorithm.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_const_infer_impl.hpp"
#include "ie_memcpy.h"
#include "ie_parallel.hpp"
#include "precision_utils.h"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Const inference for Broadcast layer
*/
class BroadcastConstInfer : public ConstInferImpl {
private:
const size_t BROADCAST_INPUT = 0;
const size_t BROADCAST_SHAPE = 1;
public:
explicit BroadcastConstInfer(const std::string& type): ConstInferImpl(type) {}
void inferImpl(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) override {
LayerParams lp {};
CNNLayer layer(lp);
layer.params = params;
if (outData.empty()) THROW_IE_EXCEPTION << "Incorrect number of input/output edges!";
if (inData.size() != 2) THROW_IE_EXCEPTION << "Incorrect number of input edges!";
if (inData[BROADCAST_SHAPE]->getTensorDesc().getDims().size() > 1)
THROW_IE_EXCEPTION << "Shape vector should be 1 dimension";
size_t data_size = inData[BROADCAST_INPUT]->getTensorDesc().getPrecision().size();
size_t shape_size = (inData[BROADCAST_SHAPE]->getTensorDesc().getDims())[0];
SizeVector dst_dims = outData[0]->getTensorDesc().getDims();
SizeVector src_dims = inData[BROADCAST_INPUT]->getTensorDesc().getDims();
if (!src_dims.size()) src_dims = SizeVector(1, 1);
if (dst_dims.size() != shape_size) {
THROW_IE_EXCEPTION << "Output tensor dimension mismatch";
}
if (src_dims.size() > dst_dims.size()) {
THROW_IE_EXCEPTION << "Output tensor dimension is smaller then input tensor dimension";
}
InferenceEngine::SizeVector dstStrides = outData[0]->getTensorDesc().getBlockingDesc().getStrides();
InferenceEngine::SizeVector srcStrides =
inData[BROADCAST_INPUT]->getTensorDesc().getBlockingDesc().getStrides();
InferenceEngine::SizeVector src_aligned(dst_dims.size());
InferenceEngine::SizeVector srcStrides_aligned(dst_dims.size());
if (!srcStrides.size()) srcStrides = SizeVector(1, 1);
size_t prefix_size = dst_dims.size() - src_dims.size();
for (size_t i = 0; i < dst_dims.size(); i++) {
if (i < prefix_size) {
src_aligned[i] = 1;
srcStrides_aligned[i] = srcStrides[0];
} else {
src_aligned[i] = src_dims[i - prefix_size];
srcStrides_aligned[i] = srcStrides[i - prefix_size];
}
}
size_t work_amount_dst = dstStrides[0] * dst_dims[0];
const uint8_t* src_data = inData[BROADCAST_INPUT]->cbuffer().as<const uint8_t*>() +
inData[BROADCAST_INPUT]->getTensorDesc().getBlockingDesc().getOffsetPadding();
uint8_t* dst_data =
outData[0]->cbuffer().as<uint8_t*>() + outData[0]->getTensorDesc().getBlockingDesc().getOffsetPadding();
parallel_nt(0, [&](const int ithr, const int nthr) {
size_t i, src_idx, start = 0, end = 0;
SizeVector counters(dst_dims.size(), 0);
splitter(work_amount_dst, nthr, ithr, start, end);
for (int j = dst_dims.size() - 1, i = start; j >= 0; j--) {
counters[j] = i % dst_dims[j];
i /= dst_dims[j];
}
for (size_t iwork = start * data_size; iwork < end * data_size; iwork += data_size) {
for (i = 0, src_idx = 0; i < dst_dims.size(); ++i)
src_idx += counters[i] ? ((counters[i] % src_aligned[i]) * srcStrides_aligned[i]) : 0;
ie_memcpy(&dst_data[iwork], data_size, &src_data[src_idx * data_size], data_size);
for (int j = dst_dims.size() - 1; j >= 0; j--) {
counters[j] = (counters[j] + 1) % dst_dims[j];
if (counters[j] != 0) break;
}
}
});
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,62 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_blob.h>
#include <legacy/ie_layers.h>
#include <ie_memcpy.h>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_const_infer_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Const inference for Tile layer
*/
class ConcatConstInfer : public ConstInferImpl {
public:
explicit ConcatConstInfer(const std::string& type): ConstInferImpl(type) {}
void inferImpl(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) override {
LayerParams lp {};
ConcatLayer layer(lp);
layer.params = params;
layer.type = _type;
_validator->parseParams(&layer);
auto outBlob = *outData.begin();
SizeVector outShape = outBlob->getTensorDesc().getDims();
auto* outBuffer = outBlob->buffer().as<int8_t*>();
size_t outerSize = 1;
for (int i = 0; i < layer._axis; i++) outerSize *= outShape[i];
size_t outIdx = 0;
for (size_t osIdx = 0; osIdx < outerSize; osIdx++) {
for (auto& inBlob : inData) {
if (inBlob->getTensorDesc().getPrecision() != outBlob->getTensorDesc().getPrecision())
THROW_IE_EXCEPTION << "Unsupported concat layer with different precisions! Out precision: " +
std::string(outBlob->getTensorDesc().getPrecision().name());
const auto* inBuffer = inBlob->cbuffer().as<int8_t*>();
size_t innerSize = inBlob->size() / outerSize;
for (size_t j = 0; j < innerSize; j++, outIdx++) {
memcpy(outBuffer + outIdx * outBlob->element_size(),
inBuffer + (osIdx * innerSize + j) * inBlob->element_size(), inBlob->element_size());
}
}
}
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,35 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_blob.h>
#include <legacy/ie_layers.h>
#include <map>
#include <memory>
#include <string>
#include <vector>
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Const inference for TBD layer
*/
class ConstConstInfer : public ConstInferImpl {
public:
explicit ConstConstInfer(const std::string& type): ConstInferImpl(type) {}
void inferImpl(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) override {
auto it = blobs.find("custom");
if (it == blobs.end()) THROW_IE_EXCEPTION << "Missed `custom` blob";
// TODO: copy instead of putting pointer?
outData[0] = (*it).second;
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,104 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#ifdef __INTEL_COMPILER
#pragma warning disable : 2586
#endif
#include "ie_const_infer_holder.hpp"
#include <list>
#include <memory>
#include <string>
#include "ie_add_const_infer.hpp"
#include "ie_broadcast_const_infer.hpp"
#include "ie_concat_const_infer.hpp"
#include "ie_const_const_infer.hpp"
#include "ie_convert_const_infer.hpp"
#include "ie_div_const_infer.hpp"
#include "ie_eltw_const_infer.hpp"
#include "ie_fill_const_infer.hpp"
#include "ie_gather_const_infer.hpp"
#include "ie_in_place_const_infer.hpp"
#include "ie_mul_const_infer.hpp"
#include "ie_onehot_const_infer.hpp"
#include "ie_permute_const_infer.hpp"
#include "ie_power_const_infer.hpp"
#include "ie_range_const_infer.hpp"
#include "ie_reduce_const_infer.hpp"
#include "ie_reshape_const_infer.hpp"
#include "ie_shape_const_infer.hpp"
#include "ie_split_const_infer.hpp"
#include "ie_strided_slice_const_infer.hpp"
#include "ie_tile_const_infer.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
ConstInferHolder::ImplsHolder::Ptr ConstInferHolder::GetImplsHolder() {
static ImplsHolder::Ptr localHolder;
if (localHolder == nullptr) {
localHolder = std::make_shared<ImplsHolder>();
}
return localHolder;
}
void ConstInferHolder::AddImpl(const std::string& name, const IConstInferImpl::Ptr& impl) {
GetImplsHolder()->list[name] = impl;
}
std::list<std::string> ConstInferHolder::getConstInferTypes() {
std::list<std::string> types;
auto& factories = GetImplsHolder()->list;
for (const auto& factory : factories) {
types.push_back(factory.first);
}
return types;
}
IConstInferImpl::Ptr ConstInferHolder::getConstInferImpl(const std::string& type) {
auto& impls = ConstInferHolder::GetImplsHolder()->list;
if (impls.find(type) != impls.end()) {
return impls[type];
}
return nullptr;
}
REG_CONST_INFER_FOR_TYPE(MulConstInfer, Mul);
REG_CONST_INFER_FOR_TYPE(AddConstInfer, Add);
REG_CONST_INFER_FOR_TYPE(DivConstInfer, Div);
REG_CONST_INFER_FOR_TYPE(EltwiseConstInfer, Eltwise);
REG_CONST_INFER_FOR_TYPE(ShapeConstInfer, Shape);
REG_CONST_INFER_FOR_TYPE(ConstConstInfer, Const);
REG_CONST_INFER_FOR_TYPE(PowerConstInfer, Power);
REG_CONST_INFER_FOR_TYPE(TileConstInfer, Tile);
REG_CONST_INFER_FOR_TYPE(ReshapeConstInfer, Reshape);
REG_CONST_INFER_FOR_TYPE(GatherConstInfer, Gather);
REG_CONST_INFER_FOR_TYPE(SplitConstInfer, Split);
REG_CONST_INFER_FOR_TYPE(ConcatConstInfer, Concat);
REG_CONST_INFER_FOR_TYPE(InPlaceConstInfer, Unsqueeze);
REG_CONST_INFER_FOR_TYPE(InPlaceConstInfer, Squeeze);
REG_CONST_INFER_FOR_TYPE(StridedSliceConstInfer, StridedSlice);
REG_CONST_INFER_FOR_TYPE(FillConstInfer, Fill);
REG_CONST_INFER_FOR_TYPE(RangeConstInfer, Range);
REG_CONST_INFER_FOR_TYPE(BroadcastConstInfer, Broadcast);
REG_CONST_INFER_FOR_TYPE(OneHotConstInfer, OneHot);
REG_CONST_INFER_FOR_TYPE(ReduceConstInfer, ReduceAnd);
REG_CONST_INFER_FOR_TYPE(ReduceConstInfer, ReduceL1);
REG_CONST_INFER_FOR_TYPE(ReduceConstInfer, ReduceL2);
REG_CONST_INFER_FOR_TYPE(ReduceConstInfer, ReduceLogSum);
REG_CONST_INFER_FOR_TYPE(ReduceConstInfer, ReduceLogSumExp);
REG_CONST_INFER_FOR_TYPE(ReduceConstInfer, ReduceMax);
REG_CONST_INFER_FOR_TYPE(ReduceConstInfer, ReduceMean);
REG_CONST_INFER_FOR_TYPE(ReduceConstInfer, ReduceMin);
REG_CONST_INFER_FOR_TYPE(ReduceConstInfer, ReduceOr);
REG_CONST_INFER_FOR_TYPE(ReduceConstInfer, ReduceProd);
REG_CONST_INFER_FOR_TYPE(ReduceConstInfer, ReduceSum);
REG_CONST_INFER_FOR_TYPE(ReduceConstInfer, ReduceSumSquare);
REG_CONST_INFER_FOR_TYPE(PermuteConstInfer, Permute);
REG_CONST_INFER_FOR_TYPE(ConvertConstInfer, Convert);
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,53 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_iextension.h>
#include <description_buffer.hpp>
#include <list>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "caseless.hpp"
#include "ie_const_infer_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Holder of const infer implementations for build-in IE layers, that plugins support out-of-the-box
*/
class ConstInferHolder {
struct ImplsHolder {
using Ptr = std::shared_ptr<ImplsHolder>;
InferenceEngine::details::caseless_map<std::string, IConstInferImpl::Ptr> list;
};
public:
std::list<std::string> getConstInferTypes();
IConstInferImpl::Ptr getConstInferImpl(const std::string& type);
static void AddImpl(const std::string& name, const IConstInferImpl::Ptr& impl);
private:
static ImplsHolder::Ptr GetImplsHolder();
};
template <typename Impl>
class ImplRegisterBase {
public:
explicit ImplRegisterBase(const std::string& type) {
ConstInferHolder::AddImpl(type, std::make_shared<Impl>(type));
}
};
#define REG_CONST_INFER_FOR_TYPE(__prim, __type) static ImplRegisterBase<__prim> __ci_reg__##__type(#__type)
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,23 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "ie_const_infer_impl.hpp"
#include <map>
#include <string>
#include <vector>
using namespace InferenceEngine;
using namespace ShapeInfer;
void ConstInferImpl::infer(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) {
std::string errorPrefix = "Ref infer error for Layer with `" + _type + "` type: ";
if (outData.empty()) THROW_IE_EXCEPTION << errorPrefix + "output data is empty";
for (auto const& data : outData) {
if (data->buffer() == nullptr) THROW_IE_EXCEPTION << errorPrefix + "output data is not allocated";
}
// TODO: check for direct (NCHW, NCH, NC) and FP32
inferImpl(inData, params, blobs, outData);
}

View File

@ -1,57 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_layer_validators.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
* @experimental
* @class IConstInferImpl
* @brief This class provides interface for the layer's implementation to propagate const
*/
class IConstInferImpl {
public:
using Ptr = std::shared_ptr<IConstInferImpl>;
virtual ~IConstInferImpl() = default;
/**
* @brief all shapes are valid, blobs are allocated
*
*/
virtual void infer(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) = 0;
};
class ConstInferImpl : public IConstInferImpl {
public:
explicit ConstInferImpl(const std::string& type): _type(type) {
_validator = details::LayerValidators::getInstance()->getValidator(_type);
if (!_validator)
THROW_IE_EXCEPTION << "Internal error: failed to find validator for layer with type: " << _type;
}
virtual void inferImpl(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) = 0;
void infer(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) override;
protected:
std::string _type;
// to get parsed descendant CNNLayer from map<string,string>
details::LayerValidator::Ptr _validator;
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,179 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_blob.h>
#include <legacy/ie_layers.h>
#include <ie_memcpy.h>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_const_infer_impl.hpp"
#include "ie_parallel.hpp"
#include "ie_precision.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Const inference for Tile layer
*/
class ConvertConstInfer : public ConstInferImpl {
template <typename src_d, typename dst_d>
void exec_cast(const Blob::CPtr& inData, Blob::Ptr& outData) {
const src_d* src_data =
inData->cbuffer().as<src_d*>() + inData->getTensorDesc().getBlockingDesc().getOffsetPadding();
dst_d* dst_data =
outData->buffer().as<dst_d*>() + outData->getTensorDesc().getBlockingDesc().getOffsetPadding();
if (inData->size() != outData->size())
THROW_IE_EXCEPTION << " Convert constant inference error: Input and output buffers have different sizes! "
"Input buffer size = `"
<< inData->size() << "` output buffer size = `" << outData->size() << "`";
parallel_for(inData->size(), [&](size_t i) {
dst_data[i] = static_cast<dst_d>(src_data[i]);
});
}
template<typename dst_d>
void exec_from_fp16_cast(const Blob::CPtr &inData, Blob::Ptr &outData) {
const ie_fp16 *src_data =
inData->cbuffer().as<ie_fp16 *>() + inData->getTensorDesc().getBlockingDesc().getOffsetPadding();
dst_d *dst_data =
outData->buffer().as<dst_d *>() + outData->getTensorDesc().getBlockingDesc().getOffsetPadding();
if (inData->size() != outData->size())
THROW_IE_EXCEPTION << " Convert constant inference error: Input and output buffers have different sizes! "
"Input buffer size = `"
<< inData->size() << "` output buffer size = `" << outData->size() << "`";
parallel_for(inData->size(), [&](size_t i) {
dst_data[i] = static_cast<dst_d>(PrecisionUtils::f16tof32(src_data[i]));
});
}
template<typename src_d>
void exec_to_fp16_cast(const Blob::CPtr &inData, Blob::Ptr &outData) {
const src_d* src_data =
inData->cbuffer().as<src_d*>() + inData->getTensorDesc().getBlockingDesc().getOffsetPadding();
ie_fp16* dst_data =
outData->buffer().as<ie_fp16*>() + outData->getTensorDesc().getBlockingDesc().getOffsetPadding();
if (inData->size() != outData->size())
THROW_IE_EXCEPTION << " Convert constant inference error: Input and output buffers have different sizes! "
"Input buffer size = `"
<< inData->size() << "` output buffer size = `" << outData->size() << "`";
parallel_for(inData->size(), [&](size_t i) {
dst_data[i] = PrecisionUtils::f32tof16(static_cast<float>(src_data[i]));
});
}
public:
explicit ConvertConstInfer(const std::string& type): ConstInferImpl(type) {}
void inferImpl(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) override {
LayerParams lp {};
ConcatLayer layer(lp);
layer.params = params;
_validator->parseParams(&layer);
if (inData.size() != 1)
THROW_IE_EXCEPTION << " Convert constant inference error: incorrect number of inputs! Expected 1, got "
<< inData.size();
if (outData.size() != 1)
THROW_IE_EXCEPTION << " Convert constant inference error: incorrect number of outputs! Expected 1, got "
<< outData.size();
if (layer.params["precision"] != outData[0]->getTensorDesc().getPrecision().name())
THROW_IE_EXCEPTION << " Convert constant inference error: layer `precision` parameter and actual output "
"data precision mismatch! "
"`precision`=\""
<< layer.params["precision"] << "\", "
<< "`output_data_precision`=\"" << outData[0]->getTensorDesc().getPrecision() << "\"";
auto compare =
getPrecisionMask(inData[0]->getTensorDesc().getPrecision(), outData[0]->getTensorDesc().getPrecision());
switch (compare) {
case getPrecisionMask(Precision::I32, Precision::I32):
exec_cast<PrecisionTrait<Precision::I32>::value_type, PrecisionTrait<Precision::I32>::value_type>(
inData[0], outData[0]);
break;
case getPrecisionMask(Precision::I64, Precision::I64):
exec_cast<PrecisionTrait<Precision::I64>::value_type, PrecisionTrait<Precision::I64>::value_type>(
inData[0], outData[0]);
break;
case getPrecisionMask(Precision::U64, Precision::U64):
exec_cast<PrecisionTrait<Precision::U64>::value_type, PrecisionTrait<Precision::U64>::value_type>(
inData[0], outData[0]);
break;
case getPrecisionMask(Precision::FP32, Precision::FP32):
exec_cast<PrecisionTrait<Precision::FP32>::value_type, PrecisionTrait<Precision::FP32>::value_type>(
inData[0], outData[0]);
break;
case getPrecisionMask(Precision::I32, Precision::I64):
exec_cast<PrecisionTrait<Precision::I32>::value_type, PrecisionTrait<Precision::I64>::value_type>(
inData[0], outData[0]);
break;
case getPrecisionMask(Precision::I32, Precision::U64):
exec_cast<PrecisionTrait<Precision::I32>::value_type, PrecisionTrait<Precision::U64>::value_type>(
inData[0], outData[0]);
break;
case getPrecisionMask(Precision::I32, Precision::FP32):
exec_cast<PrecisionTrait<Precision::I32>::value_type, PrecisionTrait<Precision::FP32>::value_type>(
inData[0], outData[0]);
break;
case getPrecisionMask(Precision::FP32, Precision::I32):
exec_cast<PrecisionTrait<Precision::FP32>::value_type, PrecisionTrait<Precision::I32>::value_type>(
inData[0], outData[0]);
break;
case getPrecisionMask(Precision::FP32, Precision::I64):
exec_cast<PrecisionTrait<Precision::FP32>::value_type, PrecisionTrait<Precision::I64>::value_type>(
inData[0], outData[0]);
break;
case getPrecisionMask(Precision::FP32, Precision::U64):
exec_cast<PrecisionTrait<Precision::FP32>::value_type, PrecisionTrait<Precision::U64>::value_type>(
inData[0], outData[0]);
break;
case getPrecisionMask(Precision::FP32, Precision::U8):
exec_cast<PrecisionTrait<Precision::FP32>::value_type, PrecisionTrait<Precision::U8>::value_type>(
inData[0], outData[0]);
break;
case getPrecisionMask(Precision::FP32, Precision::BOOL):
exec_cast<PrecisionTrait<Precision::FP32>::value_type, PrecisionTrait<Precision::BOOL>::value_type>(
inData[0], outData[0]);
break;
case getPrecisionMask(Precision::BOOL, Precision::BOOL):
exec_cast<PrecisionTrait<Precision::BOOL>::value_type, PrecisionTrait<Precision::BOOL>::value_type>(
inData[0], outData[0]);
break;
case getPrecisionMask(Precision::FP16, Precision::FP32):
exec_from_fp16_cast<PrecisionTrait<Precision::FP32>::value_type>(inData[0], outData[0]);
break;
case getPrecisionMask(Precision::FP16, Precision::I32):
exec_from_fp16_cast<PrecisionTrait<Precision::I32>::value_type>(inData[0], outData[0]);
break;
case getPrecisionMask(Precision::FP16, Precision::I64):
exec_from_fp16_cast<PrecisionTrait<Precision::I64>::value_type>(inData[0], outData[0]);
break;
case getPrecisionMask(Precision::FP16, Precision::U64):
exec_from_fp16_cast<PrecisionTrait<Precision::U64>::value_type>(inData[0], outData[0]);
break;
case getPrecisionMask(Precision::FP16, Precision::U8):
exec_from_fp16_cast<PrecisionTrait<Precision::U8>::value_type>(inData[0], outData[0]);
break;
case getPrecisionMask(Precision::FP16, Precision::BOOL):
exec_from_fp16_cast<PrecisionTrait<Precision::BOOL>::value_type>(inData[0], outData[0]);
break;
case getPrecisionMask(Precision::FP32, Precision::FP16):
exec_to_fp16_cast<PrecisionTrait<Precision::FP32>::value_type>(inData[0], outData[0]);
break;
default:
THROW_IE_EXCEPTION << " Convert constant inference error: Unsupported precision configuration! "
<< " Input precision: " << inData[0]->getTensorDesc().getPrecision()
<< ", output precision: " << outData[0]->getTensorDesc().getPrecision();
}
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,50 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_blob.h>
#include <legacy/ie_layers.h>
#include <map>
#include <memory>
#include <string>
#include <vector>
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Const inference for TBD layer
*/
class DivConstInfer : public ConstInferImpl {
public:
explicit DivConstInfer(const std::string& type): ConstInferImpl(type) {}
void inferImpl(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) override {
size_t numInputs = inData.size();
if (inData.size() != 2)
THROW_IE_EXCEPTION << "Unsupported number of inputs: " << numInputs << ". 2 inputs is supported";
auto* firstBlobBuffer = inData[0]->cbuffer().as<float*>();
auto* secondBlobBuffer = inData[1]->cbuffer().as<float*>();
if (!firstBlobBuffer || !secondBlobBuffer) {
THROW_IE_EXCEPTION << "empty input data";
}
auto outBlob = *outData.begin();
auto* outBuffer = outBlob->buffer().as<float*>();
if (!outBuffer) THROW_IE_EXCEPTION << "empty output data";
if (inData[0]->size() != inData[1]->size()) {
THROW_IE_EXCEPTION << "inputs with different shapes are not supported";
}
for (int i = 0; i < outBlob->size(); i++) {
if (secondBlobBuffer[i] == 0) THROW_IE_EXCEPTION << "division by zero";
outBuffer[i] = firstBlobBuffer[i] / secondBlobBuffer[i];
}
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,67 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_blob.h>
#include <legacy/ie_layers.h>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_add_const_infer.hpp"
#include "ie_div_const_infer.hpp"
#include "ie_mul_const_infer.hpp"
#include "ie_pow_const_infer.hpp"
#include "ie_sub_const_infer.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
* @brief Eltwise wrapper on top of Mul/Add/Div operation
*/
class EltwiseConstInfer : public ConstInferImpl {
public:
explicit EltwiseConstInfer(const std::string& type): ConstInferImpl(type) {
_sum = std::shared_ptr<ConstInferImpl>(new AddConstInfer(_type));
_sub = std::shared_ptr<ConstInferImpl>(new SubConstInfer(_type));
_mul = std::shared_ptr<ConstInferImpl>(new MulConstInfer(_type));
_div = std::shared_ptr<ConstInferImpl>(new DivConstInfer(_type));
_pow = std::shared_ptr<ConstInferImpl>(new PowConstInfer(_type));
}
void inferImpl(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) override {
auto found = params.find("operation");
IE_ASSERT(found != params.end()) << "Eltwise layer has no attribute operation.";
std::string operation = found->second;
std::shared_ptr<ConstInferImpl> actual;
if (operation == "sum")
actual = _sum;
else if (operation == "sub")
actual = _sub;
else if (operation == "mul")
actual = _mul;
else if (operation == "div")
actual = _div;
else if (operation == "pow")
actual = _pow;
else
THROW_IE_EXCEPTION << "Unsupported eltwise operation type " << operation
<< ". "
"IE cannot propagate constants through this layer.";
actual->inferImpl(inData, params, blobs, outData);
}
private:
std::shared_ptr<ConstInferImpl> _mul, _div, _sum, _sub, _pow;
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,102 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_blob.h>
#include <legacy/ie_layers.h>
#include <ie_memcpy.h>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_const_infer_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Const inference for Fill layer
*/
class FillConstInfer : public ConstInferImpl {
public:
explicit FillConstInfer(const std::string& type): ConstInferImpl(type) {}
void inferImpl(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) override {
const size_t FILL_DIMS = 0;
const size_t FILL_VALUE = 1;
if (inData.empty() || outData.empty()) THROW_IE_EXCEPTION << " Incorrect number of input/output edges!";
if (inData.size() != 2) THROW_IE_EXCEPTION << " Incorrect number of input edges!";
SizeVector dims = inData[FILL_DIMS]->getTensorDesc().getDims();
if (dims.size() > 1) THROW_IE_EXCEPTION << " Fill dimensions vector should be 1 dimension";
if (inData[FILL_DIMS]->getTensorDesc().getPrecision() != Precision::I32)
THROW_IE_EXCEPTION << " Fill dimensions vector should be I32!";
SizeVector value_dims = inData[FILL_VALUE]->getTensorDesc().getDims();
if (value_dims.size() > 1) THROW_IE_EXCEPTION << " Value scalar should have 1 dimension";
if (!(inData[FILL_VALUE]->getTensorDesc().getPrecision() == Precision::I32 &&
outData[0]->getTensorDesc().getPrecision() == Precision::I32) &&
!(inData[FILL_VALUE]->getTensorDesc().getPrecision() == Precision::FP32 &&
outData[0]->getTensorDesc().getPrecision() == Precision::FP32)) {
THROW_IE_EXCEPTION << " 'Value' input scalars and output tensor should have same precision and only FP32 "
"and I32 are supported!";
}
int32_t* fill_dims = inData[FILL_DIMS]->cbuffer().as<int32_t*>() +
inData[FILL_DIMS]->getTensorDesc().getBlockingDesc().getOffsetPadding();
size_t fill_size = inData[FILL_DIMS]->getTensorDesc().getDims()[0];
SizeVector dst_dims = outData[0]->getTensorDesc().getDims();
if (dst_dims.size() != fill_size) {
THROW_IE_EXCEPTION << "Output tensor dimension mismatch";
}
size_t work_amount_dst = 1;
for (size_t i = 0; i < dst_dims.size(); i++) {
work_amount_dst *= fill_dims[i];
if (static_cast<int>(dst_dims[i]) != fill_dims[i]) {
THROW_IE_EXCEPTION << "Output tensor dimension size mismatch";
}
}
switch (outData[0]->getTensorDesc().getPrecision()) {
case Precision::FP32: {
float* dst_data =
outData[0]->cbuffer().as<float*>() + outData[0]->getTensorDesc().getBlockingDesc().getOffsetPadding();
float value = (inData[FILL_VALUE]->cbuffer().as<float*>() +
inData[FILL_VALUE]->getTensorDesc().getBlockingDesc().getOffsetPadding())[0];
parallel_nt(0, [&](const int ithr, const int nthr) {
size_t start = 0, end = 0;
splitter(work_amount_dst, nthr, ithr, start, end);
std::fill_n(dst_data + start, end - start, value);
});
} break;
case Precision::I32: {
int32_t* dst_data =
outData[0]->cbuffer().as<int32_t*>() + outData[0]->getTensorDesc().getBlockingDesc().getOffsetPadding();
int32_t value = (inData[FILL_VALUE]->cbuffer().as<int32_t*>() +
inData[FILL_VALUE]->getTensorDesc().getBlockingDesc().getOffsetPadding())[0];
parallel_nt(0, [&](const int ithr, const int nthr) {
size_t start = 0, end = 0;
splitter(work_amount_dst, nthr, ithr, start, end);
std::fill_n(dst_data + start, end - start, value);
});
} break;
default:
THROW_IE_EXCEPTION << "Incorrect output precision. Only FP32 and I32 are supported!";
}
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,143 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_blob.h>
#include <legacy/ie_layers.h>
#include <cmath>
#include <ie_algorithm.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_const_infer_impl.hpp"
#include "ie_parallel.hpp"
#include "precision_utils.h"
namespace InferenceEngine {
namespace ShapeInfer {
struct GatherParams {
size_t dataLength = 1;
int axis = 0;
size_t indexRange = 0;
size_t numDictionaries = 1;
};
/**
*@brief Implementation of Const inference for Gather layer
*/
class GatherConstInfer : public ConstInferImpl {
public:
explicit GatherConstInfer(const std::string& type): ConstInferImpl(type) {}
struct f32toUi32 {
inline unsigned int operator()(const float value) {
return static_cast<unsigned int>(value);
}
};
struct f16toUi32 {
inline unsigned int operator()(const ie_fp16 value) {
return static_cast<unsigned int>(PrecisionUtils::f16tof32(value));
}
};
struct i32toUi32 {
inline unsigned int operator()(const int32_t value) {
return static_cast<unsigned int>(value);
}
};
template <typename index_t, class Conversion>
void gather(const Blob::CPtr& indexes, const Blob::CPtr& dictionary, Blob::Ptr output, const GatherParams& p) {
size_t src_indexSize = indexes->size();
const index_t* src_index =
indexes->cbuffer().as<const index_t*>() + indexes->getTensorDesc().getBlockingDesc().getOffsetPadding();
const uint8_t* src_dataDict = dictionary->cbuffer().as<const uint8_t*>() +
dictionary->getTensorDesc().getBlockingDesc().getOffsetPadding();
uint8_t* dst_data =
output->cbuffer().as<uint8_t*>() + output->getTensorDesc().getBlockingDesc().getOffsetPadding();
parallel_for(src_indexSize, [&](size_t i) {
unsigned int idx = Conversion()(src_index[i]);
// Index clipping
if (idx < p.indexRange) {
// Copying data to destination from Dictionary
for (size_t j = 0; j < p.numDictionaries; j++) {
ie_memcpy(&dst_data[p.dataLength * (i + j * src_indexSize)],
output->byteSize() - (p.dataLength * (i + j * src_indexSize)),
&src_dataDict[p.dataLength * (idx + j * p.indexRange)], p.dataLength);
}
} else {
for (size_t j = 0; j < p.numDictionaries; j++) {
memset(&dst_data[p.dataLength * (i + j * src_indexSize)], 0, p.dataLength);
}
}
});
}
void inferImpl(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) override {
LayerParams lp {};
CNNLayer layer(lp);
layer.params = params;
const size_t GATHER_DICTIONARY = 0;
const size_t GATHER_INDEXES = 1;
if (inData.size() != 2 || outData.empty()) THROW_IE_EXCEPTION << " Incorrect number of input/output edges!";
Precision inIdxPrecision = inData[GATHER_INDEXES]->getTensorDesc().getPrecision();
if (inIdxPrecision != Precision::FP32 && inIdxPrecision != Precision::FP16 && inIdxPrecision != Precision::I32)
THROW_IE_EXCEPTION << " Incorrect input precision. Only FP32|FP16|I32 are supported!";
Precision inDataPrecision = inData[GATHER_DICTIONARY]->getTensorDesc().getPrecision();
if (inDataPrecision != Precision::FP32 && inDataPrecision != Precision::FP16 &&
inIdxPrecision != Precision::I32)
THROW_IE_EXCEPTION << " Incorrect input precision. Only FP32|FP16|I32 are supported!";
// Remove redundant dimensions
const SizeVector& dictionary_dims = inData[GATHER_DICTIONARY]->getTensorDesc().getDims();
if (dictionary_dims.size() == 0) THROW_IE_EXCEPTION << " Incorrect input parameters dimension!";
GatherParams p;
p.axis = static_cast<int>(layer.GetParamAsInt("axis"));
// Dictionary must be at least rank axis + 1
if (!(-static_cast<int>(dictionary_dims.size()) <= p.axis && p.axis < static_cast<int>(dictionary_dims.size())))
THROW_IE_EXCEPTION << " Incorrect input parameters dimensions and axis number!";
if (p.axis < 0) p.axis += dictionary_dims.size();
// Find number of dictionaries, index range and data length
for (size_t i = 0; i < p.axis; i++) p.numDictionaries *= dictionary_dims[i];
p.indexRange = dictionary_dims[p.axis];
for (size_t i = p.axis + 1; i < dictionary_dims.size(); i++) p.dataLength *= dictionary_dims[i];
if (p.dataLength == 0) THROW_IE_EXCEPTION << " Incorrect input parameters dimension!";
p.dataLength *= inData[GATHER_DICTIONARY]->getTensorDesc().getPrecision().size();
switch (inData[GATHER_INDEXES]->getTensorDesc().getPrecision()) {
case Precision::FP32:
gather<float, f32toUi32>(inData[GATHER_INDEXES], inData[GATHER_DICTIONARY], outData[0], p);
break;
case Precision::FP16:
gather<ie_fp16, f16toUi32>(inData[GATHER_INDEXES], inData[GATHER_DICTIONARY], outData[0], p);
break;
case Precision::I32:
gather<int32_t, i32toUi32>(inData[GATHER_INDEXES], inData[GATHER_DICTIONARY], outData[0], p);
break;
default:
THROW_IE_EXCEPTION << " Unsupported precision!";
}
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,36 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_blob.h>
#include <legacy/ie_layers.h>
#include <map>
#include <memory>
#include <string>
#include <vector>
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Const inference for Unsqueeze layer
*/
class InPlaceConstInfer : public ConstInferImpl {
public:
explicit InPlaceConstInfer(const std::string& type): ConstInferImpl(type) {}
void inferImpl(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) override {
auto inBlob = inData[0];
auto outBlob = outData[0];
auto* inBuffer = inBlob->cbuffer().as<uint8_t*>();
auto* outBuffer = outBlob->buffer().as<uint8_t*>();
ie_memcpy(outBuffer, outData[0]->byteSize(), inBuffer, inBlob->byteSize());
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,257 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_blob.h>
#include <legacy/ie_layers.h>
#include <precision_utils.h>
#include <ie_precision.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "broadcast_offset.hpp"
#include "ie_const_infer_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Const inference for TBD layer
*
* Table of output data type value with given input parameters
*
*
* U8 I32 I64 FP16 FP32
* =============================================================
* U8 == U8 I32 I64 FP16 FP32
* ==
* I32 == I32 I32 I64 FP32 FP32
* ==
* I64 == I64 I64 I64 FP32 FP32
* ==
* FP16 == FP16 FP32 FP32 FP16 FP32
* ==
* FP32 == FP32 FP32 FP32 FP32 FP32
*
* There is a special case with FP16 precision. Convert input data to FP32 and multiply. After that
* convert output data to FP16, if both of input parameters have FP16 precision or one - FP16 and another - U8.
*/
class MulConstInfer : public ConstInferImpl {
public:
explicit MulConstInfer(const std::string& type): ConstInferImpl(type) {}
struct fp16tofp32 {
inline float operator()(ie_fp16 value) {
return static_cast<float>(PrecisionUtils::f16tof32(value));
}
};
struct fp32tofp16 {
inline ie_fp16 operator()(float value) {
return static_cast<float>(PrecisionUtils::f32tof16(value));
}
};
template <typename dataType>
struct noConversion {
inline dataType operator()(dataType value) {
return value;
}
};
template <typename inDatatype1, typename inDatatype2, typename outDatatype, class ConversionInData1,
class ConversionInData2, class ConversionOutData>
void mul(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) {
auto* firstBlobBuffer = inData[0]->cbuffer().as<inDatatype1*>();
auto* secondBlobBuffer = inData[1]->cbuffer().as<inDatatype2*>();
if (!firstBlobBuffer || !secondBlobBuffer) {
THROW_IE_EXCEPTION << "empty input data";
}
auto outBlob = *outData.begin();
auto* outBuffer = outBlob->buffer().as<outDatatype*>();
if (!outBuffer) THROW_IE_EXCEPTION << "empty output data";
BroadcastOffset outOff(outBlob->getTensorDesc().getDims(), outBlob->getTensorDesc().getDims());
BroadcastOffset inOff1(inData[0]->getTensorDesc().getDims(), outBlob->getTensorDesc().getDims());
BroadcastOffset inOff2(inData[1]->getTensorDesc().getDims(), outBlob->getTensorDesc().getDims());
for (size_t i = 0; i < outBlob->size(); i++) {
SizeVector offsetDims = outOff.offset_dims(i);
outBuffer[outOff.offset(offsetDims)] =
ConversionOutData()(ConversionInData1()(firstBlobBuffer[inOff1.offset(offsetDims)]) *
ConversionInData2()(secondBlobBuffer[inOff2.offset(offsetDims)]));
}
}
void inferImpl(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) override {
size_t numInputs = inData.size();
if (inData.size() != 2)
THROW_IE_EXCEPTION << "Unsupported number of inputs: " << numInputs << ". 2 inputs is supported";
auto compare =
getPrecisionMask(inData[0]->getTensorDesc().getPrecision(), inData[1]->getTensorDesc().getPrecision(),
outData[0]->getTensorDesc().getPrecision());
switch (compare) {
case getPrecisionMask(Precision::U8, Precision::U8, Precision::U8):
mul<uint8_t, uint8_t, uint8_t, noConversion<uint8_t>, noConversion<uint8_t>, noConversion<uint8_t>>(
inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::U8, Precision::I32, Precision::I32):
mul<uint8_t, int, int, noConversion<uint8_t>, noConversion<int>, noConversion<int>>(inData, params, blobs,
outData);
break;
case getPrecisionMask(Precision::U8, Precision::I64, Precision::I64):
mul<uint8_t, long long int, long long int, noConversion<uint8_t>, noConversion<long long int>,
noConversion<long long int>>(inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::U8, Precision::U64, Precision::U64):
mul<uint8_t, unsigned long long int, unsigned long long int, noConversion<uint8_t>,
noConversion<unsigned long long int>, noConversion<unsigned long long int>>(inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::U8, Precision::FP16, Precision::FP16):
mul<uint8_t, ie_fp16, ie_fp16, noConversion<uint8_t>, fp16tofp32, fp32tofp16>(inData, params, blobs,
outData);
break;
case getPrecisionMask(Precision::U8, Precision::FP32, Precision::FP32):
mul<uint8_t, float, float, noConversion<uint8_t>, noConversion<float>, noConversion<float>>(inData, params,
blobs, outData);
break;
case getPrecisionMask(Precision::I32, Precision::U8, Precision::I32):
mul<int, uint8_t, int, noConversion<int>, noConversion<uint8_t>, noConversion<int>>(inData, params, blobs,
outData);
break;
case getPrecisionMask(Precision::I32, Precision::I32, Precision::I32):
mul<int, int, int, noConversion<int>, noConversion<int>, noConversion<int>>(inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::I32, Precision::I64, Precision::I64):
mul<int, long long int, long long int, noConversion<int>, noConversion<long long int>,
noConversion<long long int>>(inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::I32, Precision::U64, Precision::U64):
mul<int, unsigned long long int, unsigned long long int, noConversion<int>,
noConversion<unsigned long long int>, noConversion<unsigned long long int>>(inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::I32, Precision::FP16, Precision::FP32):
mul<int, ie_fp16, float, noConversion<int>, fp16tofp32, noConversion<float>>(inData, params, blobs,
outData);
break;
case getPrecisionMask(Precision::I32, Precision::FP32, Precision::FP32):
mul<int, float, float, noConversion<int>, noConversion<float>, noConversion<float>>(inData, params, blobs,
outData);
break;
case getPrecisionMask(Precision::I64, Precision::U8, Precision::I64):
mul<long long int, uint8_t, long long int, noConversion<long long int>, noConversion<uint8_t>,
noConversion<long long int>>(inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::I64, Precision::I32, Precision::I64):
mul<long long int, int, long long int, noConversion<long long int>, noConversion<int>,
noConversion<long long int>>(inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::I64, Precision::I64, Precision::I64):
mul<long long int, long long int, long long int, noConversion<long long int>, noConversion<long long int>,
noConversion<long long int>>(inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::I64, Precision::FP16, Precision::FP32):
mul<long long int, ie_fp16, float, noConversion<long long int>, fp16tofp32, noConversion<float>>(
inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::I64, Precision::FP32, Precision::FP32):
mul<long long int, float, float, noConversion<long long int>, noConversion<float>, noConversion<float>>(
inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::U64, Precision::U8, Precision::U64):
mul<unsigned long long int, uint8_t, unsigned long long int, noConversion<unsigned long long int>,
noConversion<uint8_t>, noConversion<unsigned long long int>>(inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::U64, Precision::I32, Precision::U64):
mul<unsigned long long int, int, unsigned long long int, noConversion<unsigned long long int>,
noConversion<int>, noConversion<unsigned long long int>>(inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::U64, Precision::U64, Precision::U64):
mul<unsigned long long int, unsigned long long int, unsigned long long int,
noConversion<unsigned long long int>, noConversion<unsigned long long int>,
noConversion<unsigned long long int>>(inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::U64, Precision::FP16, Precision::FP32):
mul<unsigned long long int, ie_fp16, float, noConversion<unsigned long long int>, fp16tofp32, noConversion<float>>(
inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::U64, Precision::FP32, Precision::FP32):
mul<unsigned long long int, float, float, noConversion<unsigned long long int>, noConversion<float>, noConversion<float>>(
inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::FP16, Precision::U8, Precision::FP16):
mul<ie_fp16, uint8_t, ie_fp16, fp16tofp32, noConversion<uint8_t>, fp32tofp16>(inData, params, blobs,
outData);
break;
case getPrecisionMask(Precision::FP16, Precision::I32, Precision::FP32):
mul<ie_fp16, int, float, fp16tofp32, noConversion<int>, noConversion<float>>(inData, params, blobs,
outData);
break;
case getPrecisionMask(Precision::FP16, Precision::I64, Precision::FP32):
mul<ie_fp16, long long int, float, fp16tofp32, noConversion<long long int>, noConversion<float>>(
inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::FP16, Precision::U64, Precision::FP32):
mul<ie_fp16, unsigned long long int, float, fp16tofp32, noConversion<unsigned long long int>, noConversion<float>>(
inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::FP16, Precision::FP16, Precision::FP16):
mul<ie_fp16, ie_fp16, ie_fp16, fp16tofp32, fp16tofp32, fp32tofp16>(inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::FP16, Precision::FP32, Precision::FP32):
mul<ie_fp16, float, float, fp16tofp32, noConversion<float>, noConversion<float>>(inData, params, blobs,
outData);
break;
case getPrecisionMask(Precision::FP16, Precision::FP32, Precision::FP16):
mul<ie_fp16, float, ie_fp16, fp16tofp32, noConversion<float>, fp32tofp16>(inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::FP32, Precision::U8, Precision::FP32):
mul<float, uint8_t, float, noConversion<float>, noConversion<uint8_t>, noConversion<float>>(inData, params,
blobs, outData);
break;
case getPrecisionMask(Precision::FP32, Precision::I32, Precision::FP32):
mul<float, int, float, noConversion<float>, noConversion<int>, noConversion<float>>(inData, params, blobs,
outData);
break;
case getPrecisionMask(Precision::FP32, Precision::I64, Precision::FP32):
mul<float, long long int, float, noConversion<float>, noConversion<long long int>, noConversion<float>>(
inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::FP32, Precision::U64, Precision::FP32):
mul<float, unsigned long long int, float, noConversion<float>, noConversion<unsigned long long int>, noConversion<float>>(
inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::FP32, Precision::FP16, Precision::FP32):
mul<float, ie_fp16, float, noConversion<float>, fp16tofp32, noConversion<float>>(inData, params, blobs,
outData);
break;
case getPrecisionMask(Precision::FP32, Precision::FP16, Precision::FP16):
mul<float, ie_fp16, ie_fp16, noConversion<float>, fp16tofp32, fp32tofp16>(inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::FP32, Precision::FP32, Precision::FP32):
mul<float, float, float, noConversion<float>, noConversion<float>, noConversion<float>>(inData, params,
blobs, outData);
break;
default:
THROW_IE_EXCEPTION << "Unsupported precision!";
}
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,140 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_blob.h>
#include <legacy/ie_layers.h>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_const_infer_impl.hpp"
#include "precision_utils.h"
namespace InferenceEngine {
namespace ShapeInfer {
/**
* @brief Implementation of Const inference for OneHot layer
*/
class OneHotConstInfer : public ConstInferImpl {
public:
explicit OneHotConstInfer(const std::string& type): ConstInferImpl(type) {}
template <typename T>
void inferImplBody(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
std::vector<Blob::Ptr>& outData) {
OneHotLayer layer(LayerParams {});
layer.params = params;
layer.type = _type;
_validator->parseParams(&layer);
auto src_dims = inData[0]->getTensorDesc().getDims();
const auto* src_data = inData[0]->cbuffer().as<const T*>();
auto* dst_data = outData[0]->buffer().as<T*>();
std::size_t prefix_size = 1;
auto input_dims = inData[0]->getTensorDesc().getDims();
std::size_t actual_axis = (layer.axis == -1) ? src_dims.size() : layer.axis;
for (size_t i = 0; i < actual_axis; ++i) prefix_size *= input_dims[i];
std::size_t suffix_size = inData[0]->size() / prefix_size;
std::size_t dst_offset = 0;
for (std::size_t prefix_idx = 0; prefix_idx < prefix_size; ++prefix_idx) {
for (std::size_t depth_idx = 0; depth_idx < layer.depth; ++depth_idx) {
for (std::size_t suffix_idx = 0; suffix_idx < suffix_size; suffix_idx++) {
auto src_index = prefix_idx * suffix_size + suffix_idx;
auto v = static_cast<std::size_t>(src_data[src_index]);
dst_data[dst_offset++] = (v == depth_idx) ? layer.on_value : layer.off_value;
}
}
}
}
void inferImplBody_fp16(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
std::vector<Blob::Ptr>& outData) {
OneHotLayer layer(LayerParams {});
layer.params = params;
layer.type = _type;
_validator->parseParams(&layer);
auto src_dims = inData[0]->getTensorDesc().getDims();
const auto* src_data = inData[0]->cbuffer().as<const int16_t*>();
auto* dst_data = outData[0]->buffer().as<int16_t*>();
std::size_t prefix_size = 1;
auto input_dims = inData[0]->getTensorDesc().getDims();
std::size_t actual_axis = (layer.axis == -1) ? src_dims.size() : layer.axis;
for (size_t i = 0; i < actual_axis; ++i) prefix_size *= input_dims[i];
std::size_t suffix_size = inData[0]->size() / prefix_size;
int16_t val_on = PrecisionUtils::f32tof16(layer.on_value);
int16_t val_off = PrecisionUtils::f32tof16(layer.off_value);
std::size_t dst_offset = 0;
for (std::size_t prefix_idx = 0; prefix_idx < prefix_size; ++prefix_idx) {
for (std::size_t depth_idx = 0; depth_idx < layer.depth; ++depth_idx) {
for (std::size_t suffix_idx = 0; suffix_idx < suffix_size; suffix_idx++) {
auto src_index = prefix_idx * suffix_size + suffix_idx;
auto v = static_cast<std::size_t>(src_data[src_index]);
dst_data[dst_offset++] = (v == depth_idx) ? val_on : val_off;
}
}
}
}
void inferImpl(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) override {
// OneHot are specified by precision of output tensors for IR v7.
// V10 OneHot spec will use on/off value as inputs tensors so
// the total layer precision will be same as precision of "on_value"
// input.
auto precision = outData[0]->getTensorDesc().getPrecision();
switch (precision) {
case Precision::FP32:
inferImplBody<PrecisionTrait<Precision::FP32>::value_type>(inData, params, outData);
break;
case Precision::FP16:
inferImplBody_fp16(inData, params, outData);
break;
case Precision::Q78:
inferImplBody<PrecisionTrait<Precision::Q78>::value_type>(inData, params, outData);
break;
case Precision::I16:
inferImplBody<PrecisionTrait<Precision::I16>::value_type>(inData, params, outData);
break;
case Precision::U8:
inferImplBody<PrecisionTrait<Precision::U8>::value_type>(inData, params, outData);
break;
case Precision::I8:
inferImplBody<PrecisionTrait<Precision::I8>::value_type>(inData, params, outData);
break;
case Precision::U16:
inferImplBody<PrecisionTrait<Precision::U16>::value_type>(inData, params, outData);
break;
case Precision::I32:
inferImplBody<PrecisionTrait<Precision::I32>::value_type>(inData, params, outData);
break;
case Precision::I64:
inferImplBody<PrecisionTrait<Precision::I64>::value_type>(inData, params, outData);
break;
case Precision::U64:
inferImplBody<PrecisionTrait<Precision::U64>::value_type>(inData, params, outData);
break;
case Precision::BOOL:
inferImplBody<PrecisionTrait<Precision::BOOL>::value_type>(inData, params, outData);
break;
default:
THROW_IE_EXCEPTION << "OneHot const inference: Unsupported precision " << precision.name();
}
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,70 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_blob.h>
#include <legacy/ie_layers.h>
#include <cmath>
#include <ie_algorithm.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "precision_utils.h"
#include "ie_const_infer_impl.hpp"
#include "ie_parallel.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Const inference for Broadcast layer
*/
class PermuteConstInfer : public ConstInferImpl {
public:
explicit PermuteConstInfer(const std::string& type): ConstInferImpl(type) {}
void inferImpl(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) override {
LayerParams lp {};
CNNLayer layer(lp);
layer.params = params;
if (outData.empty()) THROW_IE_EXCEPTION << "Incorrect number of input/output edges!";
if (inData.size() != 1) THROW_IE_EXCEPTION << "Incorrect number of input edges!";
if (inData[0]->getTensorDesc().getPrecision() != outData[0]->getTensorDesc().getPrecision()) {
THROW_IE_EXCEPTION << "Input and output tensors should have same precision!";
}
std::vector<size_t> order;
std::vector<int> layerOrder = layer.GetParamAsInts("order");
for (auto ord : layerOrder) order.push_back(static_cast<size_t>(ord));
TensorDesc srcDesc = inData[0]->getTensorDesc();
SizeVector& dims = srcDesc.getDims();
InferenceEngine::SizeVector orderedDims;
for (auto ord : order) {
orderedDims.push_back(dims[ord]);
}
TensorDesc dstDesc(InferenceEngine::Precision::FP32, dims, {orderedDims, order});
size_t dataSize = inData[0]->size();
const auto* src_data = inData[0]->cbuffer().as<const uint8_t*>();
auto* dst_data = outData[0]->buffer().as<uint8_t*>();
parallel_for(dataSize, [&](size_t i) {
memcpy(dst_data + dstDesc.offset(i) * outData[0]->element_size(),
src_data + srcDesc.offset(i) * inData[0]->element_size(), inData[0]->element_size());
});
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,102 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_blob.h>
#include <legacy/ie_layers.h>
#include <precision_utils.h>
#include <cmath>
#include <ie_precision.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "broadcast_offset.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
class PowConstInfer : public ConstInferImpl {
public:
explicit PowConstInfer(const std::string& type): ConstInferImpl(type) {}
struct fp16tofp32 {
inline float operator()(ie_fp16 value) {
return static_cast<float>(PrecisionUtils::f16tof32(value));
}
};
struct fp32tofp16 {
inline ie_fp16 operator()(float value) {
return static_cast<float>(PrecisionUtils::f32tof16(value));
}
};
template <typename dataType>
struct noConversion {
inline dataType operator()(dataType value) {
return value;
}
};
template <typename inDatatype1, typename inDatatype2, typename outDatatype, class ConversionInData1,
class ConversionInData2, class ConversionOutData>
void pow(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) {
auto* firstBlobBuffer = inData[0]->cbuffer().as<inDatatype1*>();
auto* secondBlobBuffer = inData[1]->cbuffer().as<inDatatype2*>();
if (!firstBlobBuffer || !secondBlobBuffer) {
THROW_IE_EXCEPTION << "empty input data";
}
auto outBlob = *outData.begin();
auto* outBuffer = outBlob->buffer().as<outDatatype*>();
if (!outBuffer) THROW_IE_EXCEPTION << "empty output data";
BroadcastOffset outOff(outBlob->getTensorDesc().getDims(), outBlob->getTensorDesc().getDims());
BroadcastOffset inOff1(inData[0]->getTensorDesc().getDims(), outBlob->getTensorDesc().getDims());
BroadcastOffset inOff2(inData[1]->getTensorDesc().getDims(), outBlob->getTensorDesc().getDims());
for (size_t i = 0; i < outBlob->size(); i++) {
SizeVector offsetDims = outOff.offset_dims(i);
outBuffer[outOff.offset(offsetDims)] =
ConversionOutData()(std::pow(ConversionInData1()(firstBlobBuffer[inOff1.offset(offsetDims)]),
ConversionInData2()(secondBlobBuffer[inOff2.offset(offsetDims)])));
}
}
void inferImpl(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) override {
size_t numInputs = inData.size();
if (inData.size() != 2)
THROW_IE_EXCEPTION << "Unsupported number of inputs: " << numInputs << ". 2 inputs is supported";
auto compare =
getPrecisionMask(inData[0]->getTensorDesc().getPrecision(), inData[1]->getTensorDesc().getPrecision(),
outData[0]->getTensorDesc().getPrecision());
switch (compare) {
case getPrecisionMask(Precision::FP32, Precision::FP32, Precision::FP32):
pow<float, float, float, noConversion<float>, noConversion<float>, noConversion<float>>(inData, params,
blobs, outData);
break;
case getPrecisionMask(Precision::I32, Precision::I32, Precision::FP32):
pow<int32_t, int32_t, float, noConversion<int32_t>, noConversion<int32_t>, noConversion<float>>(
inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::FP16, Precision::FP16, Precision::FP16):
pow<ie_fp16, ie_fp16, ie_fp16, noConversion<ie_fp16>, noConversion<ie_fp16>, noConversion<ie_fp16>>(
inData, params, blobs, outData);
break;
case getPrecisionMask(Precision::I32, Precision::I32, Precision::FP16):
pow<int32_t, int32_t, float, noConversion<int32_t>, noConversion<int32_t>, fp32tofp16>(inData, params,
blobs, outData);
break;
default:
THROW_IE_EXCEPTION << "Not supported data type in port 0";
}
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@ -1,58 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_blob.h>
#include <legacy/ie_layers.h>
#include <cmath>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_const_infer_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Const inference for TBD layer
*/
class PowerConstInfer : public ConstInferImpl {
public:
explicit PowerConstInfer(const std::string& type): ConstInferImpl(type) {}
void inferImpl(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) override {
LayerParams lp {};
PowerLayer layer(lp);
layer.params = params;
layer.type = _type;
_validator->parseParams(&layer);
float scale = layer.scale;
float power = layer.power;
float shift = layer.offset;
// TODO: check for access and sizes
auto* input = inData[0]->cbuffer().as<float*>();
auto* output = outData[0]->buffer().as<float*>();
size_t dataSize = inData[0]->size();
if (power == 1.0f) {
for (int i = 0; i < dataSize; i++) {
output[i] = input[i] * scale + shift;
}
} else {
for (int i = 0; i < dataSize; i++) {
output[i] = pow(input[i] * scale + shift, power);
}
}
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

Some files were not shown because too many files have changed in this diff Show More