Deserialization implementation for Constant op. (#2573)

* Deserialization implementation for Constant op.

* Add Cont op implementation for NodeConverter.

* Refactor functional tests, remove Const op from layer and node cretaors.

* Remove Constant op from NodeConverter.

* Refactor smoke test.

* Correct parameter in addBlob function.

* Update Constant op representation for myriad functional tests.

* Correct Const op representation for TopK model test.

* Add changes accroding to review comments.

* Refactor constant test.

* Add review changes.

* Add custom op for testing on_adapter(void*).

* Correct library path.

* Correct test fixture class for custom op test.

* Apply review remarks, remove creators from DeconvolutionIE.

* Refactored test ReadCustomAddConstNetwork, corrected on_adapter().

* Remove on_adapter() for CoordinateDiff which is specific to Convolution op.

* Apply review remarks.

* Apply teview remarks.

* Correct Const op in non_max_suppression tests.

* Resolve conflicts after rebase.
This commit is contained in:
Szymon Durawa 2020-11-23 13:41:59 +01:00 committed by GitHub
parent a555efe287
commit 1c7cfb7c7d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
54 changed files with 535 additions and 426 deletions

View File

@ -11,6 +11,9 @@
#include <vector>
#include <legacy/cnn_network_impl.hpp>
#include <ie_ngraph_utils.hpp>
#include "blob_factory.hpp"
#include <ngraph/op/constant.hpp>
namespace InferenceEngine {
namespace details {
@ -25,6 +28,74 @@ convertFunctionToICNNNetwork(const std::shared_ptr<const ::ngraph::Function>& gr
CNNNetworkImpl* cnnNetworkImpl,
bool keep_constant_inputs = false);
// TODO: move ConstAllocatorWrapper class, shareWeights add addBlob into CNNLayerCreator when NodeConverter class is removed
class ConstAllocatorWrapper : public IAllocator {
public:
explicit ConstAllocatorWrapper(std::shared_ptr<ngraph::op::Constant> constOp): _constOp(std::move(constOp)) {}
void Release() noexcept override {
delete this;
}
void* lock(void* handle, LockOp) noexcept override {
return handle;
}
void unlock(void*) noexcept override {} // NOLINT
void* alloc(size_t) noexcept override {
return const_cast<void*>(_constOp->get_data_ptr());
}
bool free(void*) noexcept override { // NOLINT
return true;
}
private:
std::shared_ptr<ngraph::op::Constant> _constOp;
};
enum BlobType {
weights,
biases };
inline Blob::Ptr shareWeights(const std::shared_ptr<ngraph::op::Constant>& constLayer) {
if (!constLayer) THROW_IE_EXCEPTION << "Cannot share weights! Constant operation is empty!";
auto dataPrecision = convertPrecision(constLayer->get_element_type());
size_t shapeSize = ngraph::shape_size(constLayer->get_shape());
constexpr size_t byte_size{8};
if (dataPrecision == Precision::BIN) {
shapeSize = (shapeSize + (byte_size - 1)) / byte_size;
}
TensorDesc td(dataPrecision, {shapeSize}, Layout::C);
auto blob = make_blob_with_precision(td, std::make_shared<ConstAllocatorWrapper>(constLayer));
blob->allocate();
return blob;
}
template <class T>
bool addBlob(const std::shared_ptr<ngraph::Node>& weightsNode, std::shared_ptr<T>& res, BlobType type) {
auto constWeights = ngraph::as_type_ptr<ngraph::op::Constant>(weightsNode);
if (constWeights) {
Blob::Ptr dataBlob = shareWeights(constWeights);
if (type == weights) {
res->blobs["weights"] = dataBlob;
res->_weights = dataBlob;
} else if (type == biases) {
res->blobs["biases"] = dataBlob;
res->_biases = dataBlob;
} else {
return false;
}
return true;
} else {
return false;
}
}
} // namespace details
} // namespace InferenceEngine

View File

@ -132,6 +132,13 @@ public:
void on_adapter(const std::string& name, ::ngraph::ValueAccessor<void>& adapter) override;
void on_adapter(const std::string& name, ::ngraph::ValueAccessor<void*>& adapter) override {
if (std::string(node->get_type_name()) != "Constant") {
const auto data_beg = static_cast<char*>(adapter.get_ptr());
params[name] = std::string(data_beg, adapter.size());
}
}
private:
std::shared_ptr<::ngraph::Node> node;
std::map<std::string, std::string> params;
@ -353,14 +360,9 @@ InferenceEngine::details::CNNLayerCreator::CNNLayerCreator(const std::shared_ptr
res->params["input"] = Builder::asString(weights_shape[1]);
res->params["pad_value"] = Builder::asString(castedLayer->get_pad_value());
Builder::NodeConverter<::ngraph::op::Constant> converter;
const auto weightsNode = castedLayer->input(1).get_source_output().get_node_shared_ptr();
if (converter.canCreate(weightsNode)) {
const auto& weights = converter.createLayer(weightsNode);
res->blobs["weights"] = weights->blobs["custom"];
res->_weights = weights->blobs["custom"];
}
InferenceEngine::details::addBlob(weightsNode, res, InferenceEngine::details::weights);
return res;
});
@ -439,20 +441,11 @@ InferenceEngine::details::CNNLayerCreator::CNNLayerCreator(const std::shared_ptr
}
res->params["kernel"] = kernel_value;
Builder::NodeConverter<ngraph::op::Constant> converter;
const auto weightsNode = node->input_value(1).get_node_shared_ptr();
if (converter.canCreate(weightsNode)) {
const auto& weights = converter.createLayer(weightsNode);
res->blobs["weights"] = weights->blobs["custom"];
res->_weights = weights->blobs["custom"];
const auto weightsNode = node->input_value(1).get_node_shared_ptr();
if (InferenceEngine::details::addBlob(weightsNode, res, InferenceEngine::details::weights)) {
if (node->inputs().size() == 3) {
const auto biasNode = node->input_value(2).get_node_shared_ptr();
if (converter.canCreate(biasNode)) {
const auto& bias = converter.createLayer(biasNode);
res->blobs["biases"] = bias->blobs["custom"];
res->_biases = bias->blobs["custom"];
}
InferenceEngine::details::addBlob(biasNode, res, InferenceEngine::details::biases);
}
}
return res;
@ -504,20 +497,12 @@ InferenceEngine::details::CNNLayerCreator::CNNLayerCreator(const std::shared_ptr
details::convertPrecision(node->get_output_element_type(0))};
auto res = std::make_shared<LSTMCell>(attrs);
res->params = params;
Builder::NodeConverter<ngraph::op::Constant> converter;
const auto weightsNode = node->input_value(3).get_node_shared_ptr();
if (converter.canCreate(weightsNode)) {
const auto& weights = converter.createLayer(weightsNode);
res->blobs["weights"] = weights->blobs["custom"];
res->_weights = weights->blobs["custom"];
}
InferenceEngine::details::addBlob(weightsNode, res, InferenceEngine::details::weights);
const auto biasNode = node->input_value(4).get_node_shared_ptr();
if (converter.canCreate(biasNode)) {
const auto& bias = converter.createLayer(biasNode);
res->blobs["biases"] = bias->blobs["custom"];
res->_biases = bias->blobs["custom"];
}
InferenceEngine::details::addBlob(biasNode, res, InferenceEngine::details::biases);
return res;
});
@ -528,20 +513,12 @@ InferenceEngine::details::CNNLayerCreator::CNNLayerCreator(const std::shared_ptr
auto res = std::make_shared<RNNCell>(attrs);
res->params = params;
Builder::NodeConverter<ngraph::op::Constant> converter;
const auto weightsNode = node->input_value(2).get_node_shared_ptr();
if (converter.canCreate(weightsNode)) {
const auto& weights = converter.createLayer(weightsNode);
res->blobs["weights"] = weights->blobs["custom"];
res->_weights = weights->blobs["custom"];
}
InferenceEngine::details::addBlob(weightsNode, res, InferenceEngine::details::weights);
const auto biasNode = node->input_value(3).get_node_shared_ptr();
if (converter.canCreate(biasNode)) {
const auto& bias = converter.createLayer(biasNode);
res->blobs["biases"] = bias->blobs["custom"];
res->_biases = bias->blobs["custom"];
}
InferenceEngine::details::addBlob(biasNode, res, InferenceEngine::details::biases);
return res;
});
@ -552,20 +529,12 @@ InferenceEngine::details::CNNLayerCreator::CNNLayerCreator(const std::shared_ptr
auto res = std::make_shared<GRUCell>(attrs);
res->params = params;
Builder::NodeConverter<ngraph::op::Constant> converter;
const auto weightsNode = node->input_value(2).get_node_shared_ptr();
if (converter.canCreate(weightsNode)) {
const auto& weights = converter.createLayer(weightsNode);
res->blobs["weights"] = weights->blobs["custom"];
res->_weights = weights->blobs["custom"];
}
InferenceEngine::details::addBlob(weightsNode, res, InferenceEngine::details::weights);
const auto biasNode = node->input_value(3).get_node_shared_ptr();
if (converter.canCreate(biasNode)) {
const auto& bias = converter.createLayer(biasNode);
res->blobs["biases"] = bias->blobs["custom"];
res->_biases = bias->blobs["custom"];
}
InferenceEngine::details::addBlob(biasNode, res, InferenceEngine::details::biases);
return res;
});
@ -758,20 +727,12 @@ InferenceEngine::details::CNNLayerCreator::CNNLayerCreator(const std::shared_ptr
res->cellType = RNNSequenceLayer::CellType::GRU_LBR;
}
Builder::NodeConverter<ngraph::op::Constant> converter;
const auto weightsNode = node->input_value(3).get_node_shared_ptr();
if (converter.canCreate(weightsNode)) {
const auto& weights = converter.createLayer(weightsNode);
res->blobs["weights"] = weights->blobs["custom"];
res->_weights = weights->blobs["custom"];
}
InferenceEngine::details::addBlob(weightsNode, res, InferenceEngine::details::weights);
const auto biasNode = node->input_value(4).get_node_shared_ptr();
if (converter.canCreate(biasNode)) {
const auto& bias = converter.createLayer(biasNode);
res->blobs["biases"] = bias->blobs["custom"];
res->_biases = bias->blobs["custom"];
}
InferenceEngine::details::addBlob(biasNode, res, InferenceEngine::details::biases);
return res;
});
@ -792,20 +753,12 @@ InferenceEngine::details::CNNLayerCreator::CNNLayerCreator(const std::shared_ptr
else
res->params["direction"] = "Bidirectional";
Builder::NodeConverter<ngraph::op::Constant> converter;
const auto weightsNode = node->input_value(3).get_node_shared_ptr();
if (converter.canCreate(weightsNode)) {
const auto& weights = converter.createLayer(weightsNode);
res->blobs["weights"] = weights->blobs["custom"];
res->_weights = weights->blobs["custom"];
}
InferenceEngine::details::addBlob(weightsNode, res, InferenceEngine::details::weights);
const auto biasNode = node->input_value(4).get_node_shared_ptr();
if (converter.canCreate(biasNode)) {
const auto& bias = converter.createLayer(biasNode);
res->blobs["biases"] = bias->blobs["custom"];
res->_biases = bias->blobs["custom"];
}
InferenceEngine::details::addBlob(biasNode, res, InferenceEngine::details::biases);
return res;
});
@ -826,20 +779,12 @@ InferenceEngine::details::CNNLayerCreator::CNNLayerCreator(const std::shared_ptr
else
res->params["direction"] = "Bidirectional";
Builder::NodeConverter<ngraph::op::Constant> converter;
const auto weightsNode = node->input_value(4).get_node_shared_ptr();
if (converter.canCreate(weightsNode)) {
const auto &weights = converter.createLayer(weightsNode);
res->blobs["weights"] = weights->blobs["custom"];
res->_weights = weights->blobs["custom"];
}
InferenceEngine::details::addBlob(weightsNode, res, InferenceEngine::details::weights);
const auto biasNode = node->input_value(5).get_node_shared_ptr();
if (converter.canCreate(biasNode)) {
const auto &bias = converter.createLayer(biasNode);
res->blobs["biases"] = bias->blobs["custom"];
res->_biases = bias->blobs["custom"];
}
InferenceEngine::details::addBlob(biasNode, res, InferenceEngine::details::biases);
return res;
});
@ -893,6 +838,17 @@ InferenceEngine::details::CNNLayerCreator::CNNLayerCreator(const std::shared_ptr
res->params["keep_dims"] = reduce_node->get_keep_dims() ? "True" : "False";
return res;
});
addSpecificCreator({"Constant"}, [](const std::shared_ptr<::ngraph::Node>& node, const std::map<std::string, std::string>& params) -> CNNLayerPtr {
LayerParams attrs = {node->get_friendly_name(), "Const", details::convertPrecision(node->get_output_element_type(0))};
auto res = std::make_shared<InferenceEngine::CNNLayer>(attrs);
auto castedLayer = ngraph::as_type_ptr<ngraph::op::Constant>(node);
if (!res) THROW_IE_EXCEPTION << "Cannot get " << attrs.type << " layer " << attrs.name;
res->blobs["custom"] = InferenceEngine::details::shareWeights(castedLayer);
return res;
});
}
CNNLayerPtr InferenceEngine::details::CNNLayerCreator::create() {
@ -922,7 +878,6 @@ void convertFunctionToICNNNetwork(const std::shared_ptr<const ::ngraph::Function
const static std::vector<std::shared_ptr<Builder::INodeConverter>> convertors = {
std::make_shared<Builder::NodeConverter<::ngraph::op::v1::AvgPool>>(),
std::make_shared<Builder::NodeConverter<::ngraph::op::Clamp>>(),
std::make_shared<Builder::NodeConverter<::ngraph::op::Constant>>(),
std::make_shared<Builder::NodeConverter<::ngraph::op::ConvolutionIE>>(),
std::make_shared<Builder::NodeConverter<::ngraph::op::CropIE>>(),
std::make_shared<Builder::NodeConverter<::ngraph::op::Convert>>(),

View File

@ -342,18 +342,6 @@ CNNLayer::Ptr NodeConverter<ngraph::opset5::Loop>::createLayer(const std::shared
return res;
}
template <>
CNNLayer::Ptr NodeConverter<ngraph::op::Constant>::createLayer(const std::shared_ptr<ngraph::Node>& layer) const {
LayerParams params = {layer->get_friendly_name(), "Const",
details::convertPrecision(layer->get_output_element_type(0))};
auto res = std::make_shared<InferenceEngine::CNNLayer>(params);
auto castedLayer = ngraph::as_type_ptr<ngraph::op::Constant>(layer);
if (castedLayer == nullptr) THROW_IE_EXCEPTION << "Cannot get " << params.type << " layer " << params.name;
res->blobs["custom"] = shareWeights(castedLayer);
return res;
}
template <>
CNNLayer::Ptr NodeConverter<ngraph::op::Convert>::createLayer(const std::shared_ptr<ngraph::Node>& layer) const {
LayerParams params = {layer->get_friendly_name(), "Convert",
@ -732,22 +720,14 @@ CNNLayer::Ptr NodeConverter<ngraph::op::ConvolutionIE>::createLayer(
keep_constants = attr->get();
}
NodeConverter<ngraph::op::Constant> converter;
const auto weightsNode = castedLayer->input_value(1).get_node_shared_ptr();
if (!keep_constants && converter.canCreate(weightsNode)) {
const auto& weights = converter.createLayer(weightsNode);
res->blobs["weights"] = weights->blobs["custom"];
res->_weights = weights->blobs["custom"];
const auto weightsNode = castedLayer->input_value(1).get_node_shared_ptr();
if (!keep_constants && InferenceEngine::details::addBlob(weightsNode, res, InferenceEngine::details::weights)) {
if (castedLayer->inputs().size() == 3) {
const auto biasNode = castedLayer->input_value(2).get_node_shared_ptr();
if (converter.canCreate(biasNode)) {
const auto& bias = converter.createLayer(biasNode);
res->blobs["biases"] = bias->blobs["custom"];
res->_biases = bias->blobs["custom"];
}
InferenceEngine::details::addBlob(biasNode, res, InferenceEngine::details::biases);
}
}
return res;
}
@ -816,13 +796,9 @@ CNNLayer::Ptr NodeConverter<ngraph::op::v1::DeformableConvolution>::createLayer(
res->params["group"] = asString(castedLayer->get_group());
res->params["deformable_group"] = asString(castedLayer->get_deformable_group());
NodeConverter<ngraph::op::Constant> converter;
const auto weightsNode = castedLayer->input_value(2).get_node_shared_ptr();
if (converter.canCreate(weightsNode)) {
const auto& weights = converter.createLayer(weightsNode);
res->blobs["weights"] = weights->blobs["custom"];
res->_weights = weights->blobs["custom"];
}
InferenceEngine::details::addBlob(weightsNode, res, InferenceEngine::details::weights);
return res;
}
@ -1031,7 +1007,7 @@ CNNLayer::Ptr NodeConverter<ngraph::op::PRelu>::createLayer(const std::shared_pt
dataShape = {dataShape[1]};
}
Blob::Ptr dataBlb = shareWeights(const_weights);
Blob::Ptr dataBlb = InferenceEngine::details::shareWeights(const_weights);
res->blobs["weights"] = dataBlb;
res->_weights = dataBlb;
@ -1217,20 +1193,10 @@ CNNLayer::Ptr NodeConverter<ngraph::op::ScaleShiftIE>::createLayer(const std::sh
details::convertPrecision(layer->get_output_element_type(0))};
auto res = std::make_shared<InferenceEngine::ScaleShiftLayer>(params);
NodeConverter<ngraph::op::Constant> converter;
const auto weightsNode = layer->input_value(1).get_node_shared_ptr();
if (converter.canCreate(weightsNode)) {
const auto& weightsLayer = converter.createLayer(weightsNode);
res->blobs["weights"] = weightsLayer->blobs["custom"];
res->_weights = weightsLayer->blobs["custom"];
}
InferenceEngine::details::addBlob(weightsNode, res, InferenceEngine::details::weights);
const auto biasNode = layer->input_value(2).get_node_shared_ptr();
if (converter.canCreate(biasNode)) {
const auto& bias = converter.createLayer(biasNode);
res->blobs["biases"] = bias->blobs["custom"];
res->_biases = bias->blobs["custom"];
}
InferenceEngine::details::addBlob(biasNode, res, InferenceEngine::details::biases);
return res;
}
@ -1683,21 +1649,12 @@ CNNLayer::Ptr NodeConverter<ngraph::op::FullyConnected>::createLayer(const std::
keep_constants = attr->get();
}
NodeConverter<ngraph::op::Constant> converter;
const auto weightsNode = layer->input_value(1).get_node_shared_ptr();
if (!keep_constants && converter.canCreate(weightsNode)) {
const auto& weights = converter.createLayer(weightsNode);
res->blobs["weights"] = weights->blobs["custom"];
res->_weights = weights->blobs["custom"];
if (!keep_constants && InferenceEngine::details::addBlob(weightsNode, res, InferenceEngine::details::weights)) {
const auto biasNode = layer->input_value(2).get_node_shared_ptr();
if (converter.canCreate(biasNode)) {
const auto& bias = converter.createLayer(biasNode);
res->blobs["biases"] = bias->blobs["custom"];
res->_biases = bias->blobs["custom"];
}
InferenceEngine::details::addBlob(biasNode, res, InferenceEngine::details::biases);
}
return res;
}
@ -1821,13 +1778,10 @@ CNNLayer::Ptr NodeConverter<ngraph::op::NormalizeIE>::createLayer(const std::sha
res->params["channel_shared"] = castedLayer->get_channel_shared() ? "1" : "0";
res->params["across_spatial"] = castedLayer->get_across_spatial() ? "1" : "0";
NodeConverter<ngraph::op::Constant> converter;
const auto weightsNode = castedLayer->input_value(1).get_node_shared_ptr();
if (converter.canCreate(weightsNode)) {
const auto& weights = converter.createLayer(weightsNode);
res->blobs["weights"] = weights->blobs["custom"];
} else {
THROW_IE_EXCEPTION << "Cannot convert weight node for NormalizeIE op";
const auto weightsNode = layer->input_value(1).get_node_shared_ptr();
if (auto constWeights = ngraph::as_type_ptr<ngraph::op::Constant>(weightsNode)) {
Blob::Ptr dataBlob = InferenceEngine::details::shareWeights(constWeights);
res->blobs["weights"] = dataBlob;
}
return res;

View File

@ -64,50 +64,6 @@ public:
auto castedPtr = ngraph::as_type_ptr<NGT>(node);
return castedPtr != nullptr;
}
private:
class ConstAllocatorWrapper : public IAllocator {
public:
explicit ConstAllocatorWrapper(std::shared_ptr<ngraph::op::Constant> constOp): _constOp(std::move(constOp)) {}
void Release() noexcept override {
delete this;
}
void* lock(void* handle, LockOp) noexcept override {
return handle;
}
void unlock(void*) noexcept override {} // NOLINT
void* alloc(size_t) noexcept override {
return const_cast<void*>(_constOp->get_data_ptr());
}
bool free(void*) noexcept override { // NOLINT
return true;
}
private:
std::shared_ptr<ngraph::op::Constant> _constOp;
};
Blob::Ptr shareWeights(const std::shared_ptr<ngraph::op::Constant>& constLayer) const {
if (!constLayer) THROW_IE_EXCEPTION << "Cannot share weights! Constant operation is empty!";
auto dataPrecision = details::convertPrecision(constLayer->get_element_type());
size_t shapeSize = ngraph::shape_size(constLayer->get_shape());
if (dataPrecision == Precision::BIN) {
shapeSize = (shapeSize % 8 == 0 ? shapeSize / 8 : (shapeSize / 8) + 1);
}
TensorDesc td(dataPrecision, {shapeSize}, Layout::C);
auto blob = make_blob_with_precision(td, std::make_shared<ConstAllocatorWrapper>(constLayer));
blob->allocate();
return blob;
}
};
} // namespace Builder

View File

@ -398,7 +398,6 @@ std::shared_ptr<ngraph::Node> V10Parser::createNode(const std::vector<ngraph::Ou
static std::vector<std::shared_ptr<LayerBaseCreator>> creators = {
std::make_shared<LayerCreator<ngraph::op::v1::AvgPool>>("AvgPool"),
std::make_shared<LayerCreator<ngraph::op::Clamp>>("Clamp"),
std::make_shared<LayerCreator<ngraph::op::Constant>>("Const"),
std::make_shared<LayerCreator<ngraph::op::Convert>>("Convert"),
std::make_shared<LayerCreator<ngraph::op::CTCGreedyDecoder>>("CTCGreedyDecoder"),
std::make_shared<LayerCreator<ngraph::op::v1::DeformableConvolution>>("DeformableConvolution"),
@ -504,15 +503,18 @@ std::shared_ptr<ngraph::Node> V10Parser::createNode(const std::vector<ngraph::Ou
// Try to create operation from loaded opsets
if (!ngraphNode && opsets.count(params.version)) {
auto opset = opsets.at(params.version);
if (!opset.contains_type(params.type)) {
THROW_IE_EXCEPTION << "Opset " << params.version << " doesn't contain the operation with type: " << params.type;
std::string type = params.type;
if (type == "Const") {
type = "Constant";
}
if (!opset.contains_type(type)) {
THROW_IE_EXCEPTION << "Opset " << params.version << " doesn't contain the operation with type: " << type;
}
ngraphNode = std::shared_ptr<ngraph::Node>(opset.create(params.type));
ngraphNode = std::shared_ptr<ngraph::Node>(opset.create(type));
ngraphNode->set_friendly_name(params.name);
ngraphNode->set_arguments(inputs);
XmlDeserializer visitor(node);
XmlDeserializer visitor(node, weights);
if (ngraphNode->visit_attributes(visitor))
ngraphNode->constructor_validate_and_infer_types();
}
@ -1237,44 +1239,6 @@ std::shared_ptr<ngraph::Node> V10Parser::LayerCreator<ngraph::op::v1::Broadcast>
THROW_IE_EXCEPTION << "Invalid number of inputs: " << layerParsePrms.inputPorts.size();
}
// Constant layer
template <>
std::shared_ptr<ngraph::Node> V10Parser::LayerCreator<ngraph::op::Constant>::createLayer(
const ngraph::OutputVector& inputs, const pugi::xml_node& node, const Blob::CPtr& weights,
const GenericLayerParams& layerParsePrms) {
checkParameters(inputs, layerParsePrms, 0);
pugi::xml_node dn = node.child("data");
if (dn.empty())
THROW_IE_EXCEPTION << "Cannot read parameter for " << getType() << " layer with name: " << layerParsePrms.name;
size_t offset = GetUInt64Attr(dn, "offset");
size_t size = GetUInt64Attr(dn, "size");
size_t length = weights->byteSize();
if (!length)
THROW_IE_EXCEPTION << "Cannot read network! The model requires weights data! "
<< "Bin file cannot be found! Please specify the path to bin file.";
if (static_cast<size_t>(length) < offset + size)
THROW_IE_EXCEPTION << "Cannot create " << getType() << " layer with name: " << layerParsePrms.name
<< ". Layer has incorrect weights!";
auto port = layerParsePrms.outputPorts[0];
ngraph::Shape shape(port.dims);
ngraph::element::Type el_type(port.precision);
if (size < std::ceil(ngraph::shape_size(shape) * el_type.bitwidth() / 8.f))
THROW_IE_EXCEPTION << "Cannot create Constant op " << layerParsePrms.name << " size attribute and shape size are inconsistent!";
char* data = weights->cbuffer().as<char*>() + offset;
using SharedBuffer = ngraph::runtime::SharedBuffer<const Blob::CPtr>;
auto buffer = std::make_shared<SharedBuffer>(data, size, weights);
auto constant = std::make_shared<ngraph::op::Constant>(port.precision, shape, buffer);
return constant;
}
// Power layer
template <>
std::shared_ptr<ngraph::Node> V10Parser::LayerCreator<ngraph::op::v1::Power>::createLayer(

View File

@ -182,7 +182,7 @@ private:
class XmlDeserializer : public ngraph::AttributeVisitor {
public:
explicit XmlDeserializer(const pugi::xml_node& node): node(node) {}
explicit XmlDeserializer(const pugi::xml_node& node, const Blob::CPtr& weights): node(node), weights(weights) {}
void on_adapter(const std::string& name, ngraph::ValueAccessor<std::string>& value) override {
std::string val;
if (!getStrAttribute(node.child("data"), name, val)) return;
@ -243,7 +243,7 @@ private:
} else if (auto a = ngraph::as_type<ngraph::AttributeAdapter<ngraph::op::TopKMode>>(&adapter)) {
if (!getStrAttribute(node.child("data"), name, val)) return;
static_cast<ngraph::op::TopKMode&>(*a) = ngraph::as_enum<ngraph::op::TopKMode>(val);
} else {
} else {
THROW_IE_EXCEPTION << "Error IR reading. Attribute adapter can not be found for " << name
<< " parameter";
}
@ -256,6 +256,43 @@ private:
stringToType<double>(val, value);
adapter.set(value);
}
void on_adapter(const std::string& name, ngraph::ValueAccessor<void*>& adapter) override {
std::string value;
pugi::xml_node dn = node.child("data");
auto type = XMLParseUtils::GetStrAttr(node, "type");
if (dn.empty())
THROW_IE_EXCEPTION << "No attrtibutes defined for " << type << " op!";
if (getStrAttribute(dn, name, value)) {
auto data = static_cast<char*>(adapter.get_ptr());
size_t length = std::min(value.size(), adapter.size());
value.copy(data, length);
} else if (name == "value" && type == "Const") {
std::vector<int64_t> shape;
std::string el_type_str;
size_t offset = XMLParseUtils::GetUInt64Attr(dn, "offset");
size_t size = XMLParseUtils::GetUInt64Attr(dn, "size");
if (!getStrAttribute(dn, "element_type", el_type_str)) return;
if (!getParameters<int64_t>(dn, "shape", shape)) return;
ngraph::element::Type el_type = details::convertPrecision(el_type_str);
size_t length = weights->byteSize();
if (!length)
THROW_IE_EXCEPTION << "Empty weights data in bin file or bin file cannot be found!";
if (length < offset + size)
THROW_IE_EXCEPTION << "Incorrect weights in bin file!";
if (size < std::ceil(ngraph::shape_size(shape) * el_type.bitwidth() / 8.f))
THROW_IE_EXCEPTION << "Attribute and shape size are inconsistent for " << type << " op!";
auto data = static_cast<char*>(adapter.get_ptr());
char* weights_data = weights->cbuffer().as<char*>() + offset;
std::memcpy(data, weights_data, size);
}
}
void on_adapter(const std::string& name, ngraph::ValueAccessor<int64_t>& adapter) override {
std::string val;
if (!getStrAttribute(node.child("data"), name, val))
@ -285,6 +322,7 @@ private:
private:
const pugi::xml_node node;
const Blob::CPtr& weights;
bool getStrAttribute(const pugi::xml_node& node, const std::string& name, std::string& value) {
if (!node) return false;

View File

@ -20,7 +20,7 @@ TEST_F(NGraphReaderTests, DISABLED_ReadBatchNormInferenceNetwork) {
</output>
</layer>
<layer id="11" name="conv_weights" type="Const" version="opset1">
<data offset="0" size="36" />
<data element_type="f32" offset="0" shape="3,3,1,1" size="36"/>
<output>
<port id="0" precision="FP32">
<dim>3</dim>
@ -56,7 +56,7 @@ TEST_F(NGraphReaderTests, DISABLED_ReadBatchNormInferenceNetwork) {
</output>
</layer>
<layer id="1" name="a" type="Const" version="opset1">
<data offset="0" size="12"/>
<data element_type="f32" offset="0" shape="3" size="12"/>
<output>
<port id="0" precision="FP32">
<dim>3</dim>
@ -64,7 +64,7 @@ TEST_F(NGraphReaderTests, DISABLED_ReadBatchNormInferenceNetwork) {
</output>
</layer>
<layer id="2" name="a1" type="Const" version="opset1">
<data offset="12" size="12"/>
<data element_type="f32" offset="12" shape="3" size="12"/>
<output>
<port id="0" precision="FP32">
<dim>3</dim>
@ -72,7 +72,7 @@ TEST_F(NGraphReaderTests, DISABLED_ReadBatchNormInferenceNetwork) {
</output>
</layer>
<layer id="3" name="a2" type="Const" version="opset1">
<data offset="24" size="12"/>
<data element_type="f32" offset="24" shape="3" size="24"/>
<output>
<port id="0" precision="FP32">
<dim>3</dim>
@ -80,7 +80,7 @@ TEST_F(NGraphReaderTests, DISABLED_ReadBatchNormInferenceNetwork) {
</output>
</layer>
<layer id="4" name="a3" type="Const" version="opset1">
<data offset="36" size="12"/>
<data element_type="f32" offset="36" shape="3" size="12"/>
<output>
<port id="0" precision="FP32">
<dim>3</dim>

View File

@ -18,7 +18,7 @@ TEST_F(NGraphReaderTests, ConvertBroadcastToTiles1) {
</output>
</layer>
<layer id="15" name="broadcast1_shape" type="Const" version="opset1">
<data offset="256" size="32"/>
<data element_type="i64" offset="256" shape="4" size="32"/>
<output>
<port id="1" precision="I64">
<dim>4</dim>
@ -173,7 +173,7 @@ TEST_F(NGraphReaderTests, ConvertBroadcastToTiles2) {
</output>
</layer>
<layer id="15" name="broadcast1_shape" type="Const" version="opset1">
<data offset="256" size="32"/>
<data element_type="i64" offset="256" shape="4" size="32"/>
<output>
<port id="1" precision="I64">
<dim>4</dim>
@ -351,7 +351,7 @@ TEST_F(NGraphReaderTests, ConvertBroadcastToTiles3) {
</output>
</layer>
<layer id="15" name="broadcast1_shape" type="Const" version="opset1">
<data offset="256" size="32"/>
<data element_type="i64" offset="256" shape="4" size="32"/>
<output>
<port id="1" precision="I64">
<dim>4</dim>
@ -462,7 +462,7 @@ TEST_F(NGraphReaderTests, ConvertBroadcastToTiles4) {
</output>
</layer>
<layer id="15" name="broadcast1_shape" type="Const" version="opset1">
<data offset="256" size="32"/>
<data element_type="i64" offset="256" shape="4" size="32"/>
<output>
<port id="1" precision="I64">
<dim>4</dim>
@ -470,7 +470,7 @@ TEST_F(NGraphReaderTests, ConvertBroadcastToTiles4) {
</output>
</layer>
<layer id="16" name="broadcast1_axes" type="Const" version="opset1">
<data offset="288" size="16"/>
<data element_type="i64" offset="288" shape="2" size="16"/>
<output>
<port id="1" precision="I64">
<dim>2</dim>
@ -612,7 +612,7 @@ TEST_F(NGraphReaderTests, DISABLED_ConvertBroadcastToTiles5) {
</output>
</layer>
<layer id="15" name="broadcast1_shape" type="Const" version="opset1">
<data offset="256" size="32"/>
<data element_type="i64" offset="256" shape="4" size="32"/>
<output>
<port id="1" precision="I64">
<dim>4</dim>
@ -620,7 +620,7 @@ TEST_F(NGraphReaderTests, DISABLED_ConvertBroadcastToTiles5) {
</output>
</layer>
<layer id="16" name="broadcast1_axes" type="Const" version="opset1">
<data offset="288" size="16"/>
<data element_type="i64" offset="288" shape="2" size="16"/>
<output>
<port id="1" precision="I64">
<dim>2</dim>

View File

@ -13,7 +13,7 @@ TEST_F(NGraphReaderTests, ReadConstantNetwork) {
<net name="Network" version="10">
<layers>
<layer id="0" name="constant" type="Const" version="opset1">
<data offset="0" size="5808"/>
<data element_type="f32" offset="0" shape="1,3,22,22" size="5808"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>

View File

@ -21,7 +21,7 @@ TEST_F(NGraphReaderTests, ReadConvolutionNetwork) {
</output>
</layer>
<layer id="1" name="embedded_input__const" type="Const" version="opset1">
<data offset="0" size="139392"/>
<data element_type="f32" offset="0" shape="96,3,11,11" size="139392"/>
<output>
<port id="1" precision="FP32">
<dim>96</dim>

View File

@ -0,0 +1,133 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <string>
#include <ngraph/ngraph.hpp>
#include <legacy/ie_util_internal.hpp>
#include <common_test_utils/xml_net_builder/xml_filler.hpp>
#include "ngraph_reader_tests.hpp"
class CustomAddConst : public ngraph::op::Op {
public:
static constexpr ngraph::NodeTypeInfo type_info{"CustomAddConst", 100600};
const ngraph::NodeTypeInfo& get_type_info() const override { return type_info; }
CustomAddConst() = default;
CustomAddConst(const ngraph::Output<ngraph::Node>& arg, const ngraph::element::Type element_type,
const ngraph::Shape shape, const std::shared_ptr<ngraph::runtime::AlignedBuffer> data):
ngraph::op::Op({arg}),
m_element_type(element_type),
m_shape(shape),
m_data(data) {
constructor_validate_and_infer_types();
}
void validate_and_infer_types() override {
set_output_type(0, m_element_type, m_shape);
}
std::shared_ptr<ngraph::Node> clone_with_new_inputs(const ngraph::OutputVector& new_args) const override {
return std::make_shared<CustomAddConst>(new_args.at(0), m_element_type, m_shape, m_data);
}
bool visit_attributes(ngraph::AttributeVisitor& visitor) override {
visitor.on_attribute("element_type", m_element_type);
visitor.on_attribute("shape", m_shape);
if (!m_data) {
m_data = std::make_shared<ngraph::runtime::AlignedBuffer>(shape_size(m_shape) * m_element_type.size(), 64);
}
visitor.on_attribute("value", m_data);
return true;
}
ngraph::Shape getShapeAttr() const { return m_shape; }
void* getDataPtr() { return (m_data ? m_data->get_ptr() : nullptr); }
private:
ngraph::element::Type m_element_type;
ngraph::Shape m_shape{};
std::shared_ptr<ngraph::runtime::AlignedBuffer> m_data;
};
constexpr ngraph::NodeTypeInfo CustomAddConst::type_info;
class CustomAddConstExtension : public InferenceEngine::IExtension {
public:
CustomAddConstExtension() {
}
void GetVersion(const InferenceEngine::Version*& versionInfo) const noexcept override {}
void Release() noexcept override { delete this; }
void Unload() noexcept override {}
std::map<std::string, ngraph::OpSet> getOpSets() override {
std::map<std::string, ngraph::OpSet> opsets;
ngraph::OpSet opset;
opset.insert<CustomAddConst>();
opsets["custom_opset"] = opset;
return opsets;
}
};
TEST_F(NGraphReaderTests, ReadCustomAddConstNetwork) {
std::string model = R"V0G0N(
<net name="Network" version="10">
<layers>
<layer name="in1" type="Parameter" id="0" version="opset1">
<data element_type="i32" shape="4"/>
<output>
<port id="0" precision="I32">
<dim>4</dim>
</port>
</output>
</layer>
<layer name="activation" id="1" type="CustomAddConst" version="custom_opset">
<data element_type="i32" shape="4" value="_VALUE_"/>
<input>
<port id="1" precision="I32">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="I32">
<dim>4</dim>
</port>
</output>
</layer>
<layer name="output" type="Result" id="2" version="opset1">
<input>
<port id="0" precision="I32">
<dim>4</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
<edge from-layer="1" from-port="2" to-layer="2" to-port="0"/>
</edges>
</net>
)V0G0N";
std::string expectedValue = std::string("0?|%.g6/,-{5~P1>");
REPLACE_WITH_STR(model, "_VALUE_", expectedValue);
InferenceEngine::Blob::CPtr weights;
InferenceEngine::Core ie;
ie.AddExtension(std::make_shared<CustomAddConstExtension>());
auto network = ie.ReadNetwork(model, weights);
IE_SUPPRESS_DEPRECATED_START
auto convertedNetwork = std::make_shared<InferenceEngine::details::CNNNetworkImpl>(network);
for (auto it = details::CNNNetworkIterator(convertedNetwork.get()); it != details::CNNNetworkIterator(); it++) {
InferenceEngine::CNNLayerPtr layer = *it;
ASSERT_NE(nullptr, layer->getNode());
}
InferenceEngine::CNNLayerPtr customAdd;
convertedNetwork->getLayerByName("activation", customAdd, nullptr);
ASSERT_EQ(expectedValue, customAdd->GetParamAsString("value"));
IE_SUPPRESS_DEPRECATED_END
}

View File

@ -21,7 +21,7 @@ TEST_F(NGraphReaderTests, DISABLED_ReadDeconvolution3DNetwork) {
</output>
</layer>
<layer id="1" name="embedded_input__const" type="Const" version="opset1">
<data offset="0" size="33554432"/>
<data element_type="f32" offset="0" shape="512,256,4,4,4" size="33554432"/>
<output>
<port id="1" precision="FP32">
<dim>512</dim>
@ -140,7 +140,7 @@ TEST_F(NGraphReaderTests, DISABLED_ReadDeconvolution2DNetwork) {
</output>
</layer>
<layer id="1" name="embedded_input__const" type="Const" version="opset1">
<data offset="0" size="8388608"/>
<data element_type="f32" offset="0" shape="512,256,4,4" size="8388608"/>
<output>
<port id="1" precision="FP32">
<dim>512</dim>

View File

@ -22,7 +22,7 @@ TEST_F(NGraphReaderTests, ReadDivideNetwork) {
</output>
</layer>
<layer id="1" name="data1" type="Const" version="opset1">
<data offset="0" size="3211264"/>
<data element_type="f32" offset="0" shape="1,64,112,112" size="3211264"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>

View File

@ -22,7 +22,7 @@ TEST_F(NGraphReaderTests, ReadFQNetwork) {
</output>
</layer>
<layer id="1" name="const_1" precision="FP32" type="Const" version="opset1">
<data offset="14272" size="224"/>
<data element_type="f32" offset="14272" shape="1,56,1,1" size="224"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
@ -33,7 +33,7 @@ TEST_F(NGraphReaderTests, ReadFQNetwork) {
</output>
</layer>
<layer id="2" name="const_2" precision="FP32" type="Const" version="opset1">
<data offset="14272" size="224"/>
<data element_type="f32" offset="14272" shape="1,56,1,1" size="224"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
@ -44,7 +44,7 @@ TEST_F(NGraphReaderTests, ReadFQNetwork) {
</output>
</layer>
<layer id="3" name="const_3" precision="FP32" type="Const" version="opset1">
<data offset="14496" size="4"/>
<data element_type="f32" offset="14496" shape="1,1,1,1" size="4"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
@ -55,7 +55,7 @@ TEST_F(NGraphReaderTests, ReadFQNetwork) {
</output>
</layer>
<layer id="4" name="const_4" precision="FP32" type="Const" version="opset1">
<data offset="14500" size="4"/>
<data element_type="f32" offset="14500" shape="1,1,1,1" size="4"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>

View File

@ -20,7 +20,7 @@ TEST_F(NGraphReaderTests, ConvBiasFusion) {
</output>
</layer>
<layer id="1" name="conv_weights" type="Const" version="opset1">
<data offset="0" size="139392"/>
<data element_type="f32" offset="0" shape="96,3,11,11" size="139392"/>
<output>
<port id="0" precision="FP32">
<dim>96</dim>
@ -56,7 +56,7 @@ TEST_F(NGraphReaderTests, ConvBiasFusion) {
</output>
</layer>
<layer id="3" name="data_add_5451_const" type="Const" version="opset1">
<data offset="139392" size="384"/>
<data element_type="f32" offset="139392" shape="96,1,1" size="384"/>
<output>
<port id="0" precision="FP32">
<dim>96</dim>
@ -174,7 +174,7 @@ TEST_F(NGraphReaderTests, DISABLED_ConvBiasFusionFP16) {
</output>
</layer>
<layer id="1" name="conv_weights" type="Const" version="opset1">
<data offset="0" size="69696"/>
<data element_type="f16" offset="0" shape="96,3,11,11" size="69696"/>
<output>
<port id="0" precision="FP16">
<dim>96</dim>
@ -210,7 +210,7 @@ TEST_F(NGraphReaderTests, DISABLED_ConvBiasFusionFP16) {
</output>
</layer>
<layer id="3" name="data_add_5451_const" type="Const" version="opset1">
<data offset="69696" size="192"/>
<data element_type="f16" offset="69696" shape="96,1,1" size="192"/>
<output>
<port id="0" precision="FP16">
<dim>96</dim>
@ -326,7 +326,7 @@ TEST_F(NGraphReaderTests, MatMulBiasFusionNoBroadcast) {
</output>
</layer>
<layer id="1" name="weights" type="Const" version="opset1">
<data offset="0" size="8192000"/>
<data element_type="f32" offset="0" shape="2048,1000" size="8192000"/>
<output>
<port id="1" precision="FP32">
<dim>2048</dim>
@ -353,7 +353,7 @@ TEST_F(NGraphReaderTests, MatMulBiasFusionNoBroadcast) {
</output>
</layer>
<layer id="3" name="b_input" type="Const" version="opset1">
<data offset="8192000" size="4000"/>
<data element_type="f32" offset="8192000" shape="1,1000" size="4000"/>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
@ -448,7 +448,7 @@ TEST_F(NGraphReaderTests, DISABLED_MatMulBiasFusion) {
</output>
</layer>
<layer id="1" name="weights" type="Const" version="opset1">
<data offset="0" size="8192000"/>
<data element_type="f32" offset="0" shape="2048,1000" size="8192000"/>
<output>
<port id="1" precision="FP32">
<dim>2048</dim>
@ -475,7 +475,7 @@ TEST_F(NGraphReaderTests, DISABLED_MatMulBiasFusion) {
</output>
</layer>
<layer id="3" name="b_input" type="Const" version="opset1">
<data offset="8192000" size="4000"/>
<data element_type="f32" offset="8192000" shape="1000" size="4000"/>
<output>
<port id="1" precision="FP32">
<dim>1000</dim>
@ -483,7 +483,7 @@ TEST_F(NGraphReaderTests, DISABLED_MatMulBiasFusion) {
</output>
</layer>
<layer id="4" name="b_shape" type="Const" version="opset1">
<data offset="8196000" size="8"/>
<data element_type="i64" offset="8196000" shape="2" size="8"/>
<output>
<port id="1" precision="I64">
<dim>2</dim>
@ -491,7 +491,7 @@ TEST_F(NGraphReaderTests, DISABLED_MatMulBiasFusion) {
</output>
</layer>
<layer id="5" name="b_axis" type="Const" version="opset1">
<data offset="8196008" size="16"/>
<data element_type="i64" offset="8196008" shape="4" size="16"/>
<output>
<port id="1" precision="I64">
<dim>4</dim>

View File

@ -37,7 +37,7 @@ TEST_F(NGraphReaderTests, ReadGatherTreeNetwork) {
</output>
</layer>
<layer id="3" name="const1" type="Const" version="opset1">
<data offset="0" size="4"/>
<data element_type="f32" offset="0" shape="" size="4"/>
<output>
<port id="0" precision="FP32">
</port>

View File

@ -20,7 +20,7 @@ TEST_F(NGraphReaderTests, DISABLED_ReadGreaterNetwork) {
</output>
</layer>
<layer id="1" name="data1" type="Const" version="opset1">
<data offset="0" size="3211264"/>
<data element_type="f32" offset="0" shape="1,64,112,112" size="3211264"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
@ -149,7 +149,7 @@ TEST_F(NGraphReaderTests, DISABLED_ReadGreaterEqualNetwork) {
</output>
</layer>
<layer id="1" name="data1" type="Const" version="opset1">
<data offset="0" size="3211264"/>
<data element_type="f32" offset="0" shape="1,64,112,112" size="3211264"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>

View File

@ -20,13 +20,13 @@ TEST_F(NGraphReaderTests, ReadHardSigmoidNetwork) {
</output>
</layer>
<layer id="3" name="1345_const" type="Const" version="opset1">
<data offset="0" size="4"/>
<data element_type="f32" offset="0" shape="" size="4"/>
<output>
<port id="1" precision="FP32" />
</output>
</layer>
<layer id="4" name="3459_const" type="Const" version="opset1">
<data offset="4" size="4"/>
<data element_type="f32" offset="4" shape="" size="4"/>
<output>
<port id="1" precision="FP32" />
</output>

View File

@ -20,7 +20,7 @@ TEST_F(NGraphReaderTests, ReadInterpolateNetwork) {
</output>
</layer>
<layer id="1" name="const1" type="Const" precision="I64" version="opset1">
<data offset="0" size="16"/>
<data element_type="i64" offset="0" shape="2" size="16"/>
<output>
<port id="1" precision="I64">
<dim>2</dim>
@ -127,7 +127,7 @@ TEST_F(NGraphReaderTests, ReadInterpolate2Network) {
</output>
</layer>
<layer id="1" name="const1" type="Const" precision="I64" version="opset1">
<data offset="0" size="32"/>
<data element_type="i64" offset="0" shape="4" size="32"/>
<output>
<port id="1" precision="I64">
<dim>4</dim>
@ -237,7 +237,7 @@ TEST_F(NGraphReaderTests, ReadInterpolate4Network) {
</output>
</layer>
<layer id="1" name="output_shape" type="Const" precision="I32" version="opset1">
<data offset="0" size="8"/>
<data element_type="i32" offset="0" shape="2" size="8"/>
<output>
<port id="1" precision="I32">
<dim>2</dim>
@ -245,7 +245,7 @@ TEST_F(NGraphReaderTests, ReadInterpolate4Network) {
</output>
</layer>
<layer id="2" name="scales" type="Const" precision="FP32" version="opset1">
<data offset="8" size="8"/>
<data element_type="f32" offset="8" shape="2" size="8"/>
<output>
<port id="1" precision="FP32">
<dim>2</dim>
@ -253,7 +253,7 @@ TEST_F(NGraphReaderTests, ReadInterpolate4Network) {
</output>
</layer>
<layer id="3" name="axes" type="Const" precision="I32" version="opset1">
<data offset="16" size="8"/>
<data element_type="i32" offset="16" shape="2" size="8"/>
<output>
<port id="1" precision="I32">
<dim>2</dim>

View File

@ -20,7 +20,7 @@ TEST_F(NGraphReaderTests, DISABLED_ReadLessNetwork) {
</output>
</layer>
<layer id="1" name="data1" type="Const" version="opset1">
<data offset="0" size="3211264"/>
<data element_type="f32" offset="0" shape="1,64,112,112" size="3211264"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
@ -149,7 +149,7 @@ TEST_F(NGraphReaderTests, DISABLED_ReadLessEqualNetwork) {
</output>
</layer>
<layer id="1" name="data1" type="Const" version="opset1">
<data offset="0" size="3211264"/>
<data element_type="f32" offset="0" shape="1,64,112,112" size="3211264"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>

View File

@ -20,7 +20,7 @@ TEST_F(NGraphReaderTests, ConvertMulAddToScaleShift) {
</output>
</layer>
<layer id="1" name="broadcast1_data" type="Const" version="opset1">
<data offset="0" size="256"/>
<data element_type="f32" offset="0" shape="64,1,1" size="256"/>
<output>
<port id="0" precision="FP32">
<dim>64</dim>
@ -53,7 +53,7 @@ TEST_F(NGraphReaderTests, ConvertMulAddToScaleShift) {
</output>
</layer>
<layer id="3" name="broadcast2_data" type="Const" version="opset1">
<data offset="320" size="256"/>
<data element_type="f32" offset="320" shape="64,1,1" size="256"/>
<output>
<port id="0" precision="FP32">
<dim>64</dim>
@ -164,7 +164,7 @@ TEST_F(NGraphReaderTests, ConvertMulAddToPower) {
</output>
</layer>
<layer id="1" name="broadcast1_data" type="Const" version="opset1">
<data offset="0" size="4"/>
<data element_type="f32" offset="0" shape="1,1,1,1" size="4"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
@ -199,7 +199,7 @@ TEST_F(NGraphReaderTests, ConvertMulAddToPower) {
</output>
</layer>
<layer id="3" name="broadcast2_data" type="Const" version="opset1">
<data offset="68" size="4"/>
<data element_type="f32" offset="68" shape="1,1,1,1" size="4"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
@ -318,7 +318,7 @@ TEST_F(NGraphReaderTests, ConvertMulToPower) {
</output>
</layer>
<layer id="1" name="broadcast1_data" type="Const" version="opset1">
<data offset="0" size="4"/>
<data element_type="f32" offset="0" shape="1,1,1" size="4"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
@ -432,7 +432,7 @@ TEST_F(NGraphReaderTests, ConvertMulToPower2) {
</output>
</layer>
<layer id="1" name="broadcast1_data" type="Const" version="opset1">
<data offset="0" size="4"/>
<data element_type="f32" offset="0" shape="1" size="4"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
@ -543,7 +543,7 @@ TEST_F(NGraphReaderTests, ConvertAddToPower) {
</output>
</layer>
<layer id="1" name="broadcast1_data" type="Const" version="opset1">
<data offset="0" size="4"/>
<data element_type="f32" offset="0" shape="1,1,1" size="4"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
@ -657,7 +657,7 @@ TEST_F(NGraphReaderTests, ConvertMulToScaleShift) {
</output>
</layer>
<layer id="1" name="broadcast1_data" type="Const" version="opset1">
<data offset="0" size="256"/>
<data element_type="f32" offset="0" shape="64,1,1" size="256"/>
<output>
<port id="0" precision="FP32">
<dim>64</dim>
@ -765,7 +765,7 @@ TEST_F(NGraphReaderTests, ConvertAddToScaleShift) {
</output>
</layer>
<layer id="1" name="broadcast1_data" type="Const" version="opset1">
<data offset="0" size="256"/>
<data element_type="f32" offset="0" shape="64,1,1" size="256"/>
<output>
<port id="0" precision="FP32">
<dim>64</dim>
@ -873,7 +873,7 @@ TEST_F(NGraphReaderTests, ConvertMulToEltwise) {
</output>
</layer>
<layer id="1" name="broadcast1_data" type="Const" version="opset1">
<data offset="0" size="448"/>
<data element_type="f32" offset="0" shape="112,1" size="448"/>
<output>
<port id="0" precision="FP32">
<dim>112</dim>
@ -994,7 +994,7 @@ TEST_F(NGraphReaderTests, ConvertAddToEltwise) {
</output>
</layer>
<layer id="1" name="broadcast1_data" type="Const" version="opset1">
<data offset="0" size="448"/>
<data element_type="f32" offset="0" shape="112,1" size="448"/>
<output>
<port id="0" precision="FP32">
<dim>112</dim>
@ -1115,7 +1115,7 @@ TEST_F(NGraphReaderTests, ReadAddNoBroadcastNetwork) {
</output>
</layer>
<layer id="1" name="data1" type="Const" version="opset1">
<data offset="0" size="3211264"/>
<data element_type="f32" offset="0" shape="1,64,112,112" size="3211264"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
@ -1244,7 +1244,7 @@ TEST_F(NGraphReaderTests, ReadMultiplyNoBroadcastNetwork) {
</output>
</layer>
<layer id="1" name="data1" type="Const" version="opset1">
<data offset="0" size="3211264"/>
<data element_type="f32" offset="0" shape="1,64,112,112" size="3211264"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
@ -1373,7 +1373,7 @@ TEST_F(NGraphReaderTests, RemoveAdd) {
</output>
</layer>
<layer id="1" name="broadcast1_data" type="Const" version="opset1">
<data offset="0" size="4"/>
<data element_type="f32" offset="0" shape="1,1,1" size="4"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
@ -1502,7 +1502,7 @@ TEST_F(NGraphReaderTests, RemoveMulAdd) {
</output>
</layer>
<layer id="1" name="broadcast1_data" type="Const" version="opset1">
<data offset="0" size="4"/>
<data element_type="f32" offset="0" shape="1,1,1,1" size="4"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
@ -1537,7 +1537,7 @@ TEST_F(NGraphReaderTests, RemoveMulAdd) {
</output>
</layer>
<layer id="3" name="broadcast2_data" type="Const" version="opset1">
<data offset="68" size="4"/>
<data element_type="f32" offset="68" shape="1,1,1,1" size="4"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
@ -1692,7 +1692,7 @@ TEST_F(NGraphReaderTests, RemoveAdd2) {
</output>
</layer>
<layer id="1" name="broadcast1_data" type="Const" version="opset1">
<data offset="0" size="4"/>
<data element_type="f32" offset="0" shape="1,1,1" size="4"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
@ -1822,7 +1822,7 @@ TEST_F(NGraphReaderTests, RemoveAdd3) {
</output>
</layer>
<layer id="1" name="broadcast1_data" type="Const" version="opset1">
<data offset="0" size="4"/>
<data element_type="f32" offset="0" shape="1,1,1" size="4"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
@ -1855,7 +1855,7 @@ TEST_F(NGraphReaderTests, RemoveAdd3) {
</output>
</layer>
<layer id="6" name="broadcast2_data" type="Const" version="opset1">
<data offset="4" size="4"/>
<data element_type="f32" offset="4" shape="1,1,1" size="4"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
@ -2020,7 +2020,7 @@ TEST_F(NGraphReaderTests, ConvertAddToEltwise2) {
</output>
</layer>
<layer id="1" name="constant_1" type="Const" version="opset1">
<data offset="0" size="4"/>
<data element_type="i32" offset="0" shape="1,1,1" size="4"/>
<output>
<port id="0" precision="I32">
<dim>1</dim>

View File

@ -20,7 +20,7 @@ TEST_F(NGraphReaderTests, DISABLED_ReadLogicalAndNetwork) {
</output>
</layer>
<layer id="1" name="data1" type="Const" version="opset1">
<data offset="0" size="1452"/>
<data element_type="boolean" offset="0" shape="1,3,22,22" size="1452"/>
<output>
<port id="0" precision="BOOL">
<dim>1</dim>

View File

@ -20,7 +20,7 @@ TEST_F(NGraphReaderTests, DISABLED_ReadLogicalOrNetwork) {
</output>
</layer>
<layer id="1" name="data1" type="Const" version="opset1">
<data offset="0" size="1452"/>
<data element_type="boolean" offset="0" shape="1,3,22,22" size="1452"/>
<output>
<port id="0" precision="BOOL">
<dim>1</dim>

View File

@ -20,7 +20,7 @@ TEST_F(NGraphReaderTests, DISABLED_ReadLogicalXorNetwork) {
</output>
</layer>
<layer id="1" name="data1" type="Const" version="opset1">
<data offset="0" size="1452"/>
<data element_type="boolean" offset="0" shape="1,3,22,22" size="1452"/>
<output>
<port id="0" precision="BOOL">
<dim>1</dim>

View File

@ -15,7 +15,7 @@ TEST_F(NGraphReaderTests, ReadLrnNetwork) {
.getLayer();
auto data_layer = ir_builder_v10
.AddLayer("data1", "Const", {{"size", "16"}})
.AddLayer("data1", "Const", {{"element_type", "i64"}, {"offset", "0"}, {"size", "16"}, {"shape", "2"}})
.AddOutPort(Precision::ePrecision::I64, {2})
.getLayer();
@ -75,7 +75,7 @@ TEST_F(NGraphReaderTests, ReadLrnNetwork2) {
.getLayer();
auto data_layer = ir_builder_v10
.AddLayer("data1", "Const", {{"size", "8"}})
.AddLayer("data1", "Const", {{"element_type", "i64"}, {"offset", "0"}, {"size", "8"}, {"shape", "1"}})
.AddOutPort(Precision::ePrecision::I64, {1})
.getLayer();

View File

@ -18,7 +18,7 @@ TEST_F(NGraphReaderTests, ReadMatMulNetwork1) {
</output>
</layer>
<layer id="1" name="embedded_input__const" type="Const" version="opset1">
<data offset="0" size="8192000"/>
<data element_type="f32" offset="0" shape="2048,1000" size="8192000"/>
<output>
<port id="1" precision="FP32">
<dim>2048</dim>
@ -112,7 +112,7 @@ TEST_F(NGraphReaderTests, ReadMatMulNetwork2) {
</output>
</layer>
<layer id="1" name="embedded_input__const" type="Const" version="opset1">
<data offset="0" size="8192000"/>
<data element_type="f32" offset="0" shape="1000,2048" size="8192000"/>
<output>
<port id="1" precision="FP32">
<dim>1000</dim>
@ -207,7 +207,7 @@ TEST_F(NGraphReaderTests, ReadMatMulNetwork3) {
</output>
</layer>
<layer id="1" name="embedded_input__const" type="Const" version="opset1">
<data offset="0" size="8192000"/>
<data element_type="f32" offset="0" shape="1000,2048" size="8192000"/>
<output>
<port id="1" precision="FP32">
<dim>1000</dim>

View File

@ -20,7 +20,7 @@ TEST_F(NGraphReaderTests, ReadMaximumNetwork) {
</output>
</layer>
<layer id="1" name="data1" type="Const" version="opset1">
<data offset="0" size="3211264"/>
<data element_type="f32" offset="0" shape="1,64,112,112" size="3211264"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>

View File

@ -48,25 +48,25 @@ TEST_F(NGraphReaderTests, ReadNonMaxSuppression5) {
</output>
</layer>
<layer id="2" name="max_output_boxes_per_class" precision="I64" type="Const" version="opset1">
<data offset="0" size="8"/>
<data element_type="i64" offset="0" shape="" size="8"/>
<output>
<port id="0" precision="I64"/>
</output>
</layer>
<layer id="3" name="iou_threshold" precision="FP32" type="Const" version="opset1">
<data offset="8" size="4"/>
<data element_type="f32" offset="8" shape="" size="4"/>
<output>
<port id="0" precision="FP32"/>
</output>
</layer>
<layer id="4" name="score_threshold" precision="FP32" type="Const" version="opset1">
<data offset="12" size="4"/>
<data element_type="f32" offset="12" shape="" size="4"/>
<output>
<port id="0" precision="FP32"/>
</output>
</layer>
<layer id="5" name="soft_nms_sigma" precision="FP32" type="Const" version="opset1">
<data offset="16" size="4"/>
<data element_type="f32" offset="16" shape="" size="4"/>
<output>
<port id="0" precision="FP32"/>
</output>

View File

@ -19,19 +19,19 @@ TEST_F(NGraphReaderTests, ReadOneHotFP32) {
</output>
</layer>
<layer id="1" name="data1" precision="I64" type="Const" version="opset1">
<data offset="0" size="8"/>
<data element_type="i64" offset="0" shape="" size="8"/>
<output>
<port id="0" precision="I64"/>
</output>
</layer>
<layer id="2" name="data2" precision="FP32" type="Const" version="opset1">
<data offset="8" size="4"/>
<data element_type="f32" offset="8" shape="" size="4"/>
<output>
<port id="0" precision="FP32"/>
</output>
</layer>
<layer id="3" name="data3" precision="FP32" type="Const" version="opset1">
<data offset="12" size="4"/>
<data element_type="f32" offset="12" shape="" size="4"/>
<output>
<port id="0" precision="FP32"/>
</output>
@ -139,19 +139,19 @@ TEST_F(NGraphReaderTests, DISABLED_ReadOneHotINT16) {
</output>
</layer>
<layer id="1" name="data1" precision="I64" type="Const" version="opset1">
<data offset="0" size="8"/>
<data element_type="i64" offset="0" shape="" size="8"/>
<output>
<port id="0" precision="I64"/>
</output>
</layer>
<layer id="2" name="data2" precision="I16" type="Const" version="opset1">
<data offset="8" size="2"/>
<data element_type="i16" offset="8" shape="" size="2"/>
<output>
<port id="0" precision="I16"/>
</output>
</layer>
<layer id="3" name="data3" precision="I16" type="Const" version="opset1">
<data offset="10" size="2"/>
<data element_type="i16" offset="10" shape="" size="2"/>
<output>
<port id="0" precision="I16"/>
</output>

View File

@ -20,7 +20,7 @@ TEST_F(NGraphReaderTests, ReadPadNoPadValue) {
</output>
</layer>
<layer id="1" name="data1" precision="I64" type="Const" version="opset1">
<data offset="0" size="32"/>
<data element_type="i64" offset="0" shape="4" size="32"/>
<output>
<port id="0" precision="I64">
<dim>4</dim>
@ -28,7 +28,7 @@ TEST_F(NGraphReaderTests, ReadPadNoPadValue) {
</output>
</layer>
<layer id="2" name="data2" precision="I64" type="Const" version="opset1">
<data offset="32" size="32"/>
<data element_type="i64" offset="32" shape="4" size="32"/>
<output>
<port id="0" precision="I64">
<dim>4</dim>
@ -148,7 +148,7 @@ TEST_F(NGraphReaderTests, ReadPadWithPadValue) {
</output>
</layer>
<layer id="1" name="data1" precision="I64" type="Const" version="opset1">
<data offset="0" size="32"/>
<data element_type="i64" offset="0" shape="4" size="32"/>
<output>
<port id="0" precision="I64">
<dim>4</dim>
@ -156,7 +156,7 @@ TEST_F(NGraphReaderTests, ReadPadWithPadValue) {
</output>
</layer>
<layer id="2" name="data2" precision="I64" type="Const" version="opset1">
<data offset="32" size="32"/>
<data element_type="i64" offset="32" shape="4" size="32"/>
<output>
<port id="0" precision="I64">
<dim>4</dim>
@ -164,7 +164,7 @@ TEST_F(NGraphReaderTests, ReadPadWithPadValue) {
</output>
</layer>
<layer id="6" name="data3" precision="FP32" type="Const" version="opset1">
<data offset="64" size="8"/>
<data element_type="f32" offset="64" shape="" size="8"/>
<output>
<port id="0" precision="FP32">
</port>

View File

@ -20,7 +20,7 @@ TEST_F(NGraphReaderTests, ReadPowNetwork) {
</output>
</layer>
<layer id="1" name="data1" type="Const" version="opset1">
<data offset="0" size="3211264"/>
<data element_type="f32" offset="0" shape="1,64,112,112" size="3211264"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>

View File

@ -20,7 +20,7 @@ TEST_F(NGraphReaderTests, ReadPReLUNetwork) {
</output>
</layer>
<layer id="3" name="data" type="Const" version="opset1">
<data offset="0" size="256"/>
<data element_type="f32" offset="0" shape="1,64,1,1" size="256"/>
<output>
<port id="1" precision="FP32">
<dim>64</dim>

View File

@ -50,7 +50,7 @@ TEST_F(NGraphReaderTests, ReadPriorBoxClusteredNetwork) {
</output>
</layer>
<layer id="3" name="1344813449_const" type="Const" version="opset1">
<data offset="0" size="8"/>
<data element_type="i64" offset="0" shape="1" size="8"/>
<output>
<port id="1" precision="I64">
<dim>1</dim>
@ -58,7 +58,7 @@ TEST_F(NGraphReaderTests, ReadPriorBoxClusteredNetwork) {
</output>
</layer>
<layer id="4" name="1345813459_const" type="Const" version="opset1">
<data offset="8" size="8"/>
<data element_type="i64" offset="8" shape="1" size="8"/>
<output>
<port id="1" precision="I64">
<dim>1</dim>
@ -66,7 +66,7 @@ TEST_F(NGraphReaderTests, ReadPriorBoxClusteredNetwork) {
</output>
</layer>
<layer id="9" name="13458134591_const" type="Const" version="opset1">
<data offset="16" size="8"/>
<data element_type="i64" offset="16" shape="1" size="8"/>
<output>
<port id="1" precision="I64">
<dim>1</dim>
@ -151,7 +151,7 @@ TEST_F(NGraphReaderTests, ReadPriorBoxClusteredNetwork) {
</output>
</layer>
<layer id="12" name="ExpandAxis" type="Const" version="opset1">
<data offset="24" size="8"/>
<data element_type="i64" offset="24" shape="1" size="8"/>
<output>
<port id="0" precision="I64">
<dim>1</dim>
@ -311,7 +311,7 @@ TEST_F(NGraphReaderTests, ReadPriorBoxNetwork) {
</output>
</layer>
<layer id="3" name="1344813449_const" type="Const" version="opset1">
<data offset="0" size="8"/>
<data element_type="i64" offset="0" shape="1" size="8"/>
<output>
<port id="1" precision="I64">
<dim>1</dim>
@ -319,7 +319,7 @@ TEST_F(NGraphReaderTests, ReadPriorBoxNetwork) {
</output>
</layer>
<layer id="4" name="13458134591_const" type="Const" version="opset1">
<data offset="8" size="8"/>
<data element_type="i64" offset="8" shape="1" size="8"/>
<output>
<port id="1" precision="I64">
<dim>1</dim>
@ -327,7 +327,7 @@ TEST_F(NGraphReaderTests, ReadPriorBoxNetwork) {
</output>
</layer>
<layer id="9" name="1345813459_const" type="Const" version="opset1">
<data offset="16" size="8"/>
<data element_type="i64" offset="16" shape="1" size="8"/>
<output>
<port id="1" precision="I64">
<dim>1</dim>
@ -411,7 +411,7 @@ TEST_F(NGraphReaderTests, ReadPriorBoxNetwork) {
</output>
</layer>
<layer id="12" name="ExpandAxis" type="Const" version="opset1">
<data offset="24" size="8"/>
<data element_type="i64" offset="24" shape="1" size="8"/>
<output>
<port id="0" precision="I64">
<dim>1</dim>
@ -581,7 +581,7 @@ TEST_F(NGraphReaderTests, DISABLED_ReadPriorBoxClusteredNetwork) {
</output>
</layer>
<layer id="3" name="1344813449_const" type="Const" version="opset1">
<data offset="0" size="8"/>
<data element_type="i64" offset="0" shape="1" size="8"/>
<output>
<port id="1" precision="I64">
<dim>1</dim>
@ -589,7 +589,7 @@ TEST_F(NGraphReaderTests, DISABLED_ReadPriorBoxClusteredNetwork) {
</output>
</layer>
<layer id="4" name="1345813459_const" type="Const" version="opset1">
<data offset="8" size="8"/>
<data element_type="i64" offset="8" shape="1" size="8"/>
<output>
<port id="1" precision="I64">
<dim>1</dim>
@ -597,7 +597,7 @@ TEST_F(NGraphReaderTests, DISABLED_ReadPriorBoxClusteredNetwork) {
</output>
</layer>
<layer id="9" name="13458134591_const" type="Const" version="opset1">
<data offset="16" size="8"/>
<data element_type="i64" offset="16" shape="1" size="8"/>
<output>
<port id="1" precision="I64">
<dim>1</dim>
@ -682,7 +682,7 @@ TEST_F(NGraphReaderTests, DISABLED_ReadPriorBoxClusteredNetwork) {
</output>
</layer>
<layer id="12" name="ExpandAxis" type="Const" version="opset1">
<data offset="24" size="8"/>
<data element_type="i64" offset="24" shape="1" size="8"/>
<output>
<port id="0" precision="I64">
<dim>1</dim>
@ -920,7 +920,7 @@ TEST_F(NGraphReaderTests, DISABLED_ReadPriorBoxNetwork) {
</output>
</layer>
<layer id="3" name="1344813449_const" type="Const" version="opset1">
<data offset="0" size="8"/>
<data element_type="i64" offset="0" shape="1" size="8"/>
<output>
<port id="1" precision="I64">
<dim>1</dim>
@ -928,7 +928,7 @@ TEST_F(NGraphReaderTests, DISABLED_ReadPriorBoxNetwork) {
</output>
</layer>
<layer id="4" name="13458134591_const" type="Const" version="opset1">
<data offset="8" size="8"/>
<data element_type="i64" offset="8" shape="1" size="8"/>
<output>
<port id="1" precision="I64">
<dim>1</dim>
@ -936,7 +936,7 @@ TEST_F(NGraphReaderTests, DISABLED_ReadPriorBoxNetwork) {
</output>
</layer>
<layer id="9" name="1345813459_const" type="Const" version="opset1">
<data offset="16" size="8"/>
<data element_type="i64" offset="16" shape="1" size="8"/>
<output>
<port id="1" precision="I64">
<dim>1</dim>
@ -1020,7 +1020,7 @@ TEST_F(NGraphReaderTests, DISABLED_ReadPriorBoxNetwork) {
</output>
</layer>
<layer id="12" name="ExpandAxis" type="Const" version="opset1">
<data offset="24" size="8"/>
<data element_type="i64" offset="24" shape="1" size="8"/>
<output>
<port id="0" precision="I64">
<dim>1</dim>

View File

@ -32,7 +32,7 @@ TEST_F(NGraphReaderTests, ReadProposalNetwork) {
</output>
</layer>
<layer id="2" name="in3" type="Const" version="opset1">
<data offset="0" size="24"/>
<data element_type="i64" offset="0" shape="3" size="24"/>
<output>
<port id="0" precision="I64">
<dim>3</dim>
@ -183,7 +183,7 @@ TEST_F(NGraphReaderTests, ReadProposalNetwork_2) {
</output>
</layer>
<layer id="2" name="in3" type="Const" version="opset1">
<data offset="0" size="32"/>
<data element_type="i64" offset="0" shape="4" size="32"/>
<output>
<port id="0" precision="I64">
<dim>4</dim>
@ -334,7 +334,7 @@ TEST_F(NGraphReaderTests, ReadExtensionProposalNetwork) {
</output>
</layer>
<layer id="2" name="in3" type="Const" version="opset1">
<data offset="0" size="24"/>
<data element_type="i64" offset="0" shape="3" size="24"/>
<output>
<port id="0" precision="I64">
<dim>3</dim>

View File

@ -19,21 +19,21 @@ TEST_F(NGraphReaderTests, ReadRangeNetwork) {
</output>
</layer>
<layer id="4" name="start" type="Const" version="opset1">
<data offset="0" size="8"/>
<data element_type="i64" offset="0" shape="" size="8"/>
<output>
<port id="0" precision="I64">
</port>
</output>
</layer>
<layer id="5" name="stop" type="Const" version="opset1">
<data offset="8" size="8"/>
<data element_type="i64" offset="8" shape="" size="8"/>
<output>
<port id="0" precision="I64">
</port>
</output>
</layer>
<layer id="6" name="step" type="Const" version="opset1">
<data offset="16" size="8"/>
<data element_type="i64" offset="16" shape="" size="8"/>
<output>
<port id="0" precision="I64">
</port>

View File

@ -20,7 +20,7 @@ TEST_F(NGraphReaderTests, DISABLED_ReadReduceLogicalAndNetwork) {
</output>
</layer>
<layer id="1" name="data1" type="Const" version="opset1">
<data offset="0" size="8"/>
<data element_type="i64" offset="0" shape="1" size="8"/>
<output>
<port id="0" precision="I64">
<dim>1</dim>

View File

@ -20,7 +20,7 @@ TEST_F(NGraphReaderTests, DISABLED_ReadReduceLogicalOrNetwork) {
</output>
</layer>
<layer id="1" name="data1" type="Const" version="opset1">
<data offset="0" size="8"/>
<data element_type="i64" offset="0" shape="1" size="8"/>
<output>
<port id="0" precision="I64">
<dim>1</dim>

View File

@ -20,7 +20,7 @@ TEST_F(NGraphReaderTests, ReduceMeanToAvgPool) {
</output>
</layer>
<layer id="1" name="conv_weights" type="Const" version="opset1">
<data offset="0" size="16"/>
<data element_type="i64" offset="0" shape="2" size="16"/>
<output>
<port id="0" precision="I64">
<dim>2</dim>
@ -129,7 +129,7 @@ TEST_F(NGraphReaderTests, ReduceMeanToAvgPoolKeepDimsFalse) {
</output>
</layer>
<layer id="1" name="conv_weights" type="Const" version="opset1">
<data offset="0" size="16"/>
<data element_type="i64" offset="0" shape="1" size="16"/>
<output>
<port id="0" precision="I64">
<dim>1</dim>
@ -268,7 +268,7 @@ TEST_F(NGraphReaderTests, ReduceMeanToAvgPoolNonSpatial) {
</output>
</layer>
<layer id="1" name="conv_weights" type="Const" version="opset1">
<data offset="0" size="8"/>
<data element_type="i64" offset="0" shape="1" size="8"/>
<output>
<port id="0" precision="I64">
<dim>1</dim>
@ -444,7 +444,7 @@ TEST_F(NGraphReaderTests, ReduceMeanToAvgPoolNonSpatialHard) {
</output>
</layer>
<layer id="1" name="conv_weights" type="Const" version="opset1">
<data offset="0" size="16"/>
<data element_type="i64" offset="0" shape="2" size="16"/>
<output>
<port id="0" precision="I64">
<dim>2</dim>
@ -588,7 +588,7 @@ TEST_F(NGraphReaderTests, ReduceMeanToMaxPool) {
</output>
</layer>
<layer id="1" name="conv_weights" type="Const" version="opset1">
<data offset="0" size="16"/>
<data element_type="i64" offset="0" shape="2" size="16"/>
<output>
<port id="0" precision="I64">
<dim>2</dim>
@ -698,7 +698,7 @@ TEST_F(NGraphReaderTests, ReduceMeanToMaxPoolKeepDimsFalse) {
</output>
</layer>
<layer id="1" name="conv_weights" type="Const" version="opset1">
<data offset="0" size="16"/>
<data element_type="i64" offset="0" shape="1" size="16"/>
<output>
<port id="0" precision="I64">
<dim>1</dim>
@ -837,7 +837,7 @@ TEST_F(NGraphReaderTests, ReduceMeanToMaxPoolNonSpatial) {
</output>
</layer>
<layer id="1" name="conv_weights" type="Const" version="opset1">
<data offset="0" size="8"/>
<data element_type="i64" offset="0" shape="1" size="8"/>
<output>
<port id="0" precision="I64">
<dim>1</dim>
@ -1013,7 +1013,7 @@ TEST_F(NGraphReaderTests, ReduceSumToAvgPool) {
</output>
</layer>
<layer id="1" name="conv_weights" type="Const" version="opset1">
<data offset="0" size="16"/>
<data element_type="i64" offset="0" shape="2" size="16"/>
<output>
<port id="0" precision="I64">
<dim>2</dim>

View File

@ -20,7 +20,7 @@ TEST_F(NGraphReaderTests, ReadReshapeNetwork) {
</output>
</layer>
<layer id="1" name="const1" type="Const" version="opset1">
<data offset="0" size="16"/>
<data element_type="i64" offset="0" shape="2" size="16"/>
<output>
<port id="1" precision="I64">
<dim>2</dim>

View File

@ -20,7 +20,7 @@ TEST_F(NGraphReaderTests, ReadReverseSequenceNetwork) {
</output>
</layer>
<layer id="1" name="const1" type="Const" version="opset1">
<data offset="0" size="24"/>
<data element_type="i64" offset="0" shape="3" size="24"/>
<output>
<port id="1" precision="I64">
<dim>3</dim>

View File

@ -20,7 +20,7 @@ TEST_F(NGraphReaderTests, ReadSeluNetwork) {
</output>
</layer>
<layer id="1" name="const1" type="Const" version="opset1">
<data offset="0" size="4"/>
<data element_type="f32" offset="0" shape="1" size="4"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
@ -28,7 +28,7 @@ TEST_F(NGraphReaderTests, ReadSeluNetwork) {
</output>
</layer>
<layer id="2" name="const2" type="Const" version="opset1">
<data offset="4" size="4"/>
<data element_type="f32" offset="4" shape="1" size="4"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>

View File

@ -20,7 +20,7 @@ TEST_F(NGraphReaderTests, ReadSplitNetwork) {
</output>
</layer>
<layer id="1" name="const1" type="Const" version="opset1">
<data offset="0" size="8"/>
<data element_type="i64" offset="0" shape="" size="8"/>
<output>
<port id="0" precision="I64"/>
</output>
@ -147,7 +147,7 @@ TEST_F(NGraphReaderTests, ReadSplitNetwork2) {
</output>
</layer>
<layer id="1" name="const1" type="Const" version="opset1">
<data offset="0" size="8"/>
<data element_type="i64" offset="0" shape="" size="8"/>
<output>
<port id="0" precision="I64"/>
</output>
@ -381,13 +381,13 @@ TEST_F(NGraphReaderTests, ReadVariadicSplitNetwork) {
</output>
</layer>
<layer id="1" name="const1" type="Const" version="opset1">
<data offset="0" size="8"/>
<data element_type="i64" offset="0" shape="" size="8"/>
<output>
<port id="0" precision="I64"/>
</output>
</layer>
<layer id="2" name="const2" type="Const" version="opset1">
<data offset="8" size="16"/>
<data element_type="i64" offset="8" shape="2" size="16"/>
<output>
<port id="0" precision="I64">
<dim>2</dim>

View File

@ -20,7 +20,7 @@ TEST_F(NGraphReaderTests, ReadSquaredDifferenceNetwork) {
</output>
</layer>
<layer id="1" name="data1" type="Const" version="opset1">
<data offset="0" size="3211264"/>
<data element_type="f32" offset="0" shape="1,64,112,112" size="3211264"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>

View File

@ -20,7 +20,7 @@ TEST_F(NGraphReaderTests, ReadSqueeze) {
</output>
</layer>
<layer id="1" name="const1" precision="I64" type="Const" version="opset1">
<data offset="0" size="8"/>
<data element_type="i64" offset="0" shape="1" size="8"/>
<output>
<port id="1" precision="I64">
<dim>1</dim>

View File

@ -24,7 +24,7 @@ TEST_F(NGraphReaderTests, ConvertStridedSliceToCrop) {
</output>
</layer>
<layer id="1" name="Begin" precision="I64" type="Const" version="opset1">
<data offset="0" size="32"/>
<data element_type="i64" offset="0" shape="4" size="32"/>
<output>
<port id="0" precision="I64">
<dim>4</dim>
@ -32,7 +32,7 @@ TEST_F(NGraphReaderTests, ConvertStridedSliceToCrop) {
</output>
</layer>
<layer id="2" name="End" precision="I64" type="Const" version="opset1">
<data offset="32" size="32"/>
<data element_type="i64" offset="32" shape="4" size="32"/>
<output>
<port id="0" precision="I64">
<dim>4</dim>
@ -40,7 +40,7 @@ TEST_F(NGraphReaderTests, ConvertStridedSliceToCrop) {
</output>
</layer>
<layer id="3" name="Strides" precision="I64" type="Const" version="opset1">
<data offset="64" size="32"/>
<data element_type="i64" offset="64" shape="4" size="32"/>
<output>
<port id="0" precision="I64" >
<dim>4</dim>
@ -191,7 +191,7 @@ TEST_F(NGraphReaderTests, DISABLED_ConvertStridedSliceToCropMultipleMasks) {
</output>
</layer>
<layer id="1" name="Begin" precision="I64" type="Const" version="opset1">
<data offset="0" size="48"/>
<data element_type="i64" offset="0" shape="6" size="48"/>
<output>
<port id="0">
<dim>6</dim>
@ -199,7 +199,7 @@ TEST_F(NGraphReaderTests, DISABLED_ConvertStridedSliceToCropMultipleMasks) {
</output>
</layer>
<layer id="2" name="End" precision="I64" type="Const" version="opset1">
<data offset="48" size="48"/>
<data element_type="i64" offset="48" shape="6" size="48"/>
<output>
<port id="0">
<dim>6</dim>
@ -207,7 +207,7 @@ TEST_F(NGraphReaderTests, DISABLED_ConvertStridedSliceToCropMultipleMasks) {
</output>
</layer>
<layer id="3" name="Strides" precision="I64" type="Const" version="opset1">
<data offset="96" size="48"/>
<data element_type="i64" offset="96" shape="6" size="48"/>
<output>
<port id="0">
<dim>6</dim>
@ -447,7 +447,7 @@ TEST_F(NGraphReaderTests, DISABLED_ConvertStridedSliceToCropMultipleMasks_2) {
</output>
</layer>
<layer id="1" name="Begin" precision="I64" type="Const" version="opset1">
<data offset="0" size="64"/>
<data element_type="i64" offset="0" shape="8" size="64"/>
<output>
<port id="0">
<dim>8</dim>
@ -455,7 +455,7 @@ TEST_F(NGraphReaderTests, DISABLED_ConvertStridedSliceToCropMultipleMasks_2) {
</output>
</layer>
<layer id="2" name="End" precision="I64" type="Const" version="opset1">
<data offset="64" size="64"/>
<data element_type="i64" offset="64" shape="8" size="64"/>
<output>
<port id="0">
<dim>8</dim>
@ -463,7 +463,7 @@ TEST_F(NGraphReaderTests, DISABLED_ConvertStridedSliceToCropMultipleMasks_2) {
</output>
</layer>
<layer id="3" name="Strides" precision="I64" type="Const" version="opset1">
<data offset="128" size="64"/>
<data element_type="i64" offset="128" shape="8" size="64"/>
<output>
<port id="0">
<dim>8</dim>

View File

@ -82,7 +82,7 @@ TEST_F(NGraphReaderTests, ReadTensorIteratorNetwork_opset1) {
</output>
</layer>
<layer id="1" name="25_const" type="Const" version="opset1">
<data offset="0" size="16"/>
<data element_type="i64" offset="0" shape="2" size="16"/>
<output>
<port id="1" precision="I64">
<dim>2</dim>
@ -127,7 +127,7 @@ TEST_F(NGraphReaderTests, ReadTensorIteratorNetwork_opset1) {
</output>
</layer>
<layer id="5" name="concat/LSTMCell/Split256_const" type="Const" version="opset1">
<data offset="16" size="2097152"/>
<data element_type="f32" offset="16" shape="1024,512" size="2097152"/>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
@ -136,7 +136,7 @@ TEST_F(NGraphReaderTests, ReadTensorIteratorNetwork_opset1) {
</output>
</layer>
<layer id="6" name="LSTMCell/Split257_const" type="Const" version="opset1">
<data offset="2097168" size="1048576"/>
<data element_type="f32" offset="2097168" shape="1024,256" size="1048576"/>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
@ -145,7 +145,7 @@ TEST_F(NGraphReaderTests, ReadTensorIteratorNetwork_opset1) {
</output>
</layer>
<layer id="7" name="Output_0/Data__const" type="Const" version="opset1">
<data offset="3145744" size="4096"/>
<data element_type="f32" offset="3145744" shape="1024" size="4096"/>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
@ -207,7 +207,7 @@ TEST_F(NGraphReaderTests, ReadTensorIteratorNetwork_opset1) {
</input>
</layer>
<layer id="11" name="28_const" type="Const" version="opset1">
<data offset="3149840" size="24"/>
<data element_type="i64" offset="3149840" shape="3" size="24"/>
<output>
<port id="1" precision="I64">
<dim>3</dim>
@ -545,7 +545,7 @@ TEST_F(NGraphReaderTests, ReadTensorIteratorNetwork_resnet_opset1) {
</output>
</layer>
<layer id="1" name="7_const" type="Const" version="opset1">
<data offset="0" size="16"/>
<data element_type="i64" offset="0" shape="2" size="16"/>
<output>
<port id="1" precision="I64">
<dim>2</dim>
@ -590,7 +590,7 @@ TEST_F(NGraphReaderTests, ReadTensorIteratorNetwork_resnet_opset1) {
</output>
</layer>
<layer id="5" name="471/LSTMCell/Split149_const" type="Const" version="opset1">
<data offset="16" size="4194304"/>
<data element_type="f32" offset="16" shape="2048,512" size="4194304"/>
<output>
<port id="1" precision="FP32">
<dim>2048</dim>
@ -599,7 +599,7 @@ TEST_F(NGraphReaderTests, ReadTensorIteratorNetwork_resnet_opset1) {
</output>
</layer>
<layer id="6" name="471/LSTMCell/Split150_const" type="Const" version="opset1">
<data offset="4194320" size="4194304"/>
<data element_type="f32" offset="4194320" shape="2048,512" size="4194304"/>
<output>
<port id="1" precision="FP32">
<dim>2048</dim>
@ -608,7 +608,7 @@ TEST_F(NGraphReaderTests, ReadTensorIteratorNetwork_resnet_opset1) {
</output>
</layer>
<layer id="7" name="471/inport/2_const" type="Const" version="opset1">
<data offset="8388624" size="8192"/>
<data element_type="f32" offset="8388624" shape="2048" size="8192"/>
<output>
<port id="1" precision="FP32">
<dim>2048</dim>
@ -670,7 +670,7 @@ TEST_F(NGraphReaderTests, ReadTensorIteratorNetwork_resnet_opset1) {
</input>
</layer>
<layer id="11" name="15_const" type="Const" version="opset1">
<data offset="8396816" size="24"/>
<data element_type="i64" offset="8396816" shape="3" size="24"/>
<output>
<port id="1" precision="I64">
<dim>3</dim>
@ -1026,7 +1026,7 @@ TEST_F(NGraphReaderTests, ReadTensorIteratorNetwork_negative_stride_opset1) {
</output>
</layer>
<layer id="1" name="25_const" type="Const" version="opset1">
<data offset="0" size="16"/>
<data element_type="i64" offset="0" shape="2" size="16"/>
<output>
<port id="1" precision="I64">
<dim>2</dim>
@ -1071,7 +1071,7 @@ TEST_F(NGraphReaderTests, ReadTensorIteratorNetwork_negative_stride_opset1) {
</output>
</layer>
<layer id="5" name="shadow/LSTMLayers/stack_bidirectional_rnn/cell_1/bidirectional_rnn/bw/bw/while/bw/basic_lstm_cell/concat/LSTMCell/Split269_const" type="Const" version="opset1">
<data offset="16" size="2097152"/>
<data element_type="f32" offset="16" shape="1024,512" size="2097152"/>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
@ -1080,7 +1080,7 @@ TEST_F(NGraphReaderTests, ReadTensorIteratorNetwork_negative_stride_opset1) {
</output>
</layer>
<layer id="6" name="shadow/LSTMLayers/stack_bidirectional_rnn/cell_1/bidirectional_rnn/bw/bw/while/bw/basic_lstm_cell/concat/LSTMCell/Split270_const" type="Const" version="opset1">
<data offset="2097168" size="1048576"/>
<data element_type="f32" offset="2097168" shape="1024,256" size="1048576"/>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
@ -1089,7 +1089,7 @@ TEST_F(NGraphReaderTests, ReadTensorIteratorNetwork_negative_stride_opset1) {
</output>
</layer>
<layer id="7" name="shadow/LSTMLayers/stack_bidirectional_rnn/cell_1/bidirectional_rnn/bw/bw/while/bw/basic_lstm_cell/BiasAdd/Enter/Output_0/Data__const" type="Const" version="opset1">
<data offset="3145744" size="4096"/>
<data element_type="f32" offset="3145744" shape="1024" size="4096"/>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
@ -1151,7 +1151,7 @@ TEST_F(NGraphReaderTests, ReadTensorIteratorNetwork_negative_stride_opset1) {
</input>
</layer>
<layer id="11" name="28_const" type="Const" version="opset1">
<data offset="3149840" size="24"/>
<data element_type="i64" offset="3149840" shape="3" size="24"/>
<output>
<port id="1" precision="I64">
<dim>3</dim>
@ -1479,7 +1479,7 @@ TEST_F(NGraphReaderTests, ReadTensorIteratorNetwork_opset4) {
</output>
</layer>
<layer id="1" name="25_const" type="Const" version="opset1">
<data offset="0" size="16"/>
<data element_type="i64" offset="0" shape="2" size="16"/>
<output>
<port id="1" precision="I64">
<dim>2</dim>
@ -1524,7 +1524,7 @@ TEST_F(NGraphReaderTests, ReadTensorIteratorNetwork_opset4) {
</output>
</layer>
<layer id="5" name="concat/LSTMCell/Split256_const" type="Const" version="opset1">
<data offset="16" size="2097152"/>
<data element_type="f32" offset="16" shape="1024,512" size="2097152"/>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
@ -1533,7 +1533,7 @@ TEST_F(NGraphReaderTests, ReadTensorIteratorNetwork_opset4) {
</output>
</layer>
<layer id="6" name="LSTMCell/Split257_const" type="Const" version="opset1">
<data offset="2097168" size="1048576"/>
<data element_type="f32" offset="2097168" shape="1024,256" size="1048576"/>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
@ -1542,7 +1542,7 @@ TEST_F(NGraphReaderTests, ReadTensorIteratorNetwork_opset4) {
</output>
</layer>
<layer id="7" name="Output_0/Data__const" type="Const" version="opset1">
<data offset="3145744" size="4096"/>
<data element_type="f32" offset="3145744" shape="1024" size="4096"/>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
@ -1604,7 +1604,7 @@ TEST_F(NGraphReaderTests, ReadTensorIteratorNetwork_opset4) {
</input>
</layer>
<layer id="11" name="28_const" type="Const" version="opset1">
<data offset="3149840" size="24"/>
<data element_type="i64" offset="3149840" shape="3" size="24"/>
<output>
<port id="1" precision="I64">
<dim>3</dim>
@ -1942,7 +1942,7 @@ TEST_F(NGraphReaderTests, ReadTensorIteratorNetwork_resnet_opset4) {
</output>
</layer>
<layer id="1" name="7_const" type="Const" version="opset1">
<data offset="0" size="16"/>
<data element_type="i64" offset="0" shape="2" size="16"/>
<output>
<port id="1" precision="I64">
<dim>2</dim>
@ -1987,7 +1987,7 @@ TEST_F(NGraphReaderTests, ReadTensorIteratorNetwork_resnet_opset4) {
</output>
</layer>
<layer id="5" name="471/LSTMCell/Split149_const" type="Const" version="opset1">
<data offset="16" size="4194304"/>
<data element_type="f32" offset="16" shape="2048,512" size="4194304"/>
<output>
<port id="1" precision="FP32">
<dim>2048</dim>
@ -1996,7 +1996,7 @@ TEST_F(NGraphReaderTests, ReadTensorIteratorNetwork_resnet_opset4) {
</output>
</layer>
<layer id="6" name="471/LSTMCell/Split150_const" type="Const" version="opset1">
<data offset="4194320" size="4194304"/>
<data element_type="f32" offset="4194320" shape="2048,512" size="4194304"/>
<output>
<port id="1" precision="FP32">
<dim>2048</dim>
@ -2005,7 +2005,7 @@ TEST_F(NGraphReaderTests, ReadTensorIteratorNetwork_resnet_opset4) {
</output>
</layer>
<layer id="7" name="471/inport/2_const" type="Const" version="opset1">
<data offset="8388624" size="8192"/>
<data element_type="f32" offset="8388624" shape="2048" size="8192"/>
<output>
<port id="1" precision="FP32">
<dim>2048</dim>
@ -2067,7 +2067,7 @@ TEST_F(NGraphReaderTests, ReadTensorIteratorNetwork_resnet_opset4) {
</input>
</layer>
<layer id="11" name="15_const" type="Const" version="opset1">
<data offset="8396816" size="24"/>
<data element_type="i64" offset="8396816" shape="3" size="24"/>
<output>
<port id="1" precision="I64">
<dim>3</dim>
@ -2423,7 +2423,7 @@ TEST_F(NGraphReaderTests, ReadTensorIteratorNetwork_negative_stride_opset4) {
</output>
</layer>
<layer id="1" name="25_const" type="Const" version="opset1">
<data offset="0" size="16"/>
<data element_type="i64" offset="0" shape="2" size="16"/>
<output>
<port id="1" precision="I64">
<dim>2</dim>
@ -2468,7 +2468,7 @@ TEST_F(NGraphReaderTests, ReadTensorIteratorNetwork_negative_stride_opset4) {
</output>
</layer>
<layer id="5" name="shadow/LSTMLayers/stack_bidirectional_rnn/cell_1/bidirectional_rnn/bw/bw/while/bw/basic_lstm_cell/concat/LSTMCell/Split269_const" type="Const" version="opset1">
<data offset="16" size="2097152"/>
<data element_type="f32" offset="16" shape="1024,512" size="2097152"/>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
@ -2477,7 +2477,7 @@ TEST_F(NGraphReaderTests, ReadTensorIteratorNetwork_negative_stride_opset4) {
</output>
</layer>
<layer id="6" name="shadow/LSTMLayers/stack_bidirectional_rnn/cell_1/bidirectional_rnn/bw/bw/while/bw/basic_lstm_cell/concat/LSTMCell/Split270_const" type="Const" version="opset1">
<data offset="2097168" size="1048576"/>
<data element_type="f32" offset="2097168" shape="1024,256" size="1048576"/>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
@ -2486,7 +2486,7 @@ TEST_F(NGraphReaderTests, ReadTensorIteratorNetwork_negative_stride_opset4) {
</output>
</layer>
<layer id="7" name="shadow/LSTMLayers/stack_bidirectional_rnn/cell_1/bidirectional_rnn/bw/bw/while/bw/basic_lstm_cell/BiasAdd/Enter/Output_0/Data__const" type="Const" version="opset1">
<data offset="3145744" size="4096"/>
<data element_type="f32" offset="3145744" shape="1024" size="4096"/>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
@ -2548,7 +2548,7 @@ TEST_F(NGraphReaderTests, ReadTensorIteratorNetwork_negative_stride_opset4) {
</input>
</layer>
<layer id="11" name="28_const" type="Const" version="opset1">
<data offset="3149840" size="24"/>
<data element_type="i64" offset="3149840" shape="3" size="24"/>
<output>
<port id="1" precision="I64">
<dim>3</dim>

View File

@ -20,7 +20,7 @@ TEST_F(NGraphReaderTests, ReadTileNetwork) {
</output>
</layer>
<layer id="1" name="const1" type="Const" version="opset1">
<data offset="0" size="32"/>
<data element_type="i64" offset="0" shape="4" size="32"/>
<output>
<port id="1" precision="I64">
<dim>4</dim>
@ -130,7 +130,7 @@ TEST_F(NGraphReaderTests, ReadTileNetwork2) {
</output>
</layer>
<layer id="1" name="const1" type="Const" version="opset1">
<data offset="0" size="32"/>
<data element_type="i64" offset="0" shape="4" size="32"/>
<output>
<port id="1" precision="I64">
<dim>4</dim>

View File

@ -20,7 +20,7 @@ TEST_F(NGraphReaderTests, DISABLED_ReadTopKNetwork) {
</output>
</layer>
<layer id="4" name="1345813459_const" type="Const" version="opset1">
<data offset="0" size="8"/>
<data element_type="i64" offset="0" shape="1" size="8"/>
<output>
<port id="1" precision="I64" />
</output>
@ -146,7 +146,7 @@ TEST_F(NGraphReaderTests, DISABLED_ReadTopKNetwork) {
</output>
</layer>
<layer id="4" name="1345813459_const" type="Const" precision="I64">
<data offset="0" size="8"/>
<data element_type="i64" offset="0" shape="1" size="8"/>
<output>
<port id="0">
<dim>1</dim>

View File

@ -20,7 +20,7 @@ TEST_F(NGraphReaderTests, ReadTransposeNetwork) {
</output>
</layer>
<layer id="1" name="const1" type="Const" version="opset1">
<data offset="0" size="32"/>
<data element_type="i64" offset="0" shape="4" size="32"/>
<output>
<port id="1" precision="I64">
<dim>4</dim>

View File

@ -19,7 +19,7 @@ TEST_F(NGraphReaderTests, ReadUnsqueeze) {
</output>
</layer>
<layer id="1" name="const1" precision="I64" type="Const" version="opset1">
<data offset="0" size="8"/>
<data element_type="i64" offset="0" shape="1" size="8"/>
<output>
<port id="0" precision="I64">
<dim>1</dim>

View File

@ -96,12 +96,20 @@ void generateTestModel(const std::string &modelPath,
getCreatorLayer(conv1OutData) = conv1LayerPtr;
conv1LayerPtr->outData[0] = conv1OutData;
shapeStr.str("");
InferenceEngine::SizeVector constShape {conv1Params.out_c, inputDims[1], conv1Params.kernel[0], conv1Params.kernel[1]};
std::copy(constShape.begin(), constShape.end() - 1, std::ostream_iterator<size_t>(shapeStr, ","));
shapeStr << constShape.back();
auto conv1ParamConstLayerXML = ir_builder_v10
.AddLayer("Conv1_Param_Const", "Const",
{{"size", std::to_string(CommonTestUtils::getConvWeightsSize(
inputDims,
conv1Params,
netPrc.name()))}})
netPrc.name()))},
{"offset", "0"},
{"element_type", FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrc).get_type_name()},
{"shape", shapeStr.str()}})
.AddOutPort(netPrc, {conv1Params.out_c, inputDims[1], conv1Params.kernel[0], conv1Params.kernel[1]})
.getLayer();
@ -166,9 +174,13 @@ void generateTestModel(const std::string &modelPath,
InferenceEngine::Layout::NCHW});
getCreatorLayer(lrn1OutData) = lrn1LayerPtr;
lrn1LayerPtr->outData[0] = lrn1OutData;
size_t offset = CommonTestUtils::getConvWeightsSize(inputDims, conv1Params, netPrc.name());
auto lrn1ParamConstLayerXML = ir_builder_v10
.AddLayer("Lrn1_Param_Const", "Const", {{"size", "8"}})
.AddLayer("Lrn1_Param_Const", "Const", {{"size", "8"},
{"element_type", "i64"},
{"shape", "1"},
{"offset", std::to_string(offset)}})
.AddOutPort(InferenceEngine::Precision::I64, {1})
.getLayer();
@ -266,8 +278,13 @@ void generateTestModel(const std::string &modelPath,
split1LayerPtr->outData[0] = split1OutData0;
split1LayerPtr->outData[1] = split1OutData1;
offset = offset + 8;
auto split1ParamConstLayerXML = ir_builder_v10
.AddLayer("Split1_Param_Const", "Const", {{"size", "8"}})
.AddLayer("Split1_Param_Const", "Const", {{"size", "8"},
{"element_type", "i64"},
{"shape", ""},
{"offset", std::to_string(offset)}})
.AddOutPort(InferenceEngine::Precision::I64, {})
.getLayer();
@ -315,12 +332,21 @@ void generateTestModel(const std::string &modelPath,
getCreatorLayer(conv2OutData) = conv2LayerPtr;
conv2LayerPtr->outData[0] = conv2OutData;
shapeStr.str("");
InferenceEngine::SizeVector conv2ConstShape {conv2Params.out_c, split1OutShape[1], conv2Params.kernel[0], conv2Params.kernel[1]};
std::copy(conv2ConstShape.begin(), conv2ConstShape.end() - 1, std::ostream_iterator<size_t>(shapeStr, ","));
shapeStr << conv2ConstShape.back();
offset = offset + 8;
auto conv2ParamConstLayerXML = ir_builder_v10
.AddLayer("Conv2_Param_Const", "Const",
{{"size", std::to_string(CommonTestUtils::getConvWeightsSize(
split1OutShape,
conv2Params,
netPrc.name()))}})
netPrc.name()))},
{"offset", std::to_string(offset)},
{"element_type", FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrc).get_type_name()},
{"shape", shapeStr.str()}})
.AddOutPort(netPrc,
{conv2Params.out_c, split1OutShape[1], conv2Params.kernel[0], conv2Params.kernel[1]})
.getLayer();
@ -372,12 +398,21 @@ void generateTestModel(const std::string &modelPath,
getCreatorLayer(conv3OutData) = conv3LayerPtr;
conv3LayerPtr->outData[0] = conv3OutData;
shapeStr.str("");
InferenceEngine::SizeVector conv3ConstShape {conv3Params.out_c, split1OutShape[1], conv3Params.kernel[0], conv3Params.kernel[1]};
std::copy(conv3ConstShape.begin(), conv3ConstShape.end() - 1, std::ostream_iterator<size_t>(shapeStr, ","));
shapeStr << conv3ConstShape.back();
offset = offset + CommonTestUtils::getConvWeightsSize(split1OutShape, conv2Params, netPrc.name());
auto conv3ParamConstLayerXML = ir_builder_v10
.AddLayer("Conv3_Param_Const", "Const",
{{"size", std::to_string(CommonTestUtils::getConvWeightsSize(
split1OutShape,
conv3Params,
netPrc.name()))}})
netPrc.name()))},
{"offset", std::to_string(offset)},
{"element_type", FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrc).get_type_name()},
{"shape", shapeStr.str()}})
.AddOutPort(netPrc,
{conv3Params.out_c, split1OutShape[1], conv3Params.kernel[0], conv3Params.kernel[1]})
.getLayer();

View File

@ -139,7 +139,7 @@ TEST_F(smoke_NGraphNetworkTest, reshapeLoadTest) {
</output>
</layer>
<layer id="1" name="13/Output_0/Data__const" type="Const" version="opset1">
<data offset="0" size="2000"/>
<data element_type="f32" offset="0" shape="20,1,5,5" size="2000"/>
<output>
<port id="1" precision="FP32">
<dim>20</dim>
@ -175,7 +175,7 @@ TEST_F(smoke_NGraphNetworkTest, reshapeLoadTest) {
</output>
</layer>
<layer id="3" name="conv1/Dims215/copy_const" type="Const" version="opset1">
<data offset="2000" size="80"/>
<data element_type="f32" offset="2000" shape="1,20,1,1" size="80"/>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
@ -229,7 +229,7 @@ TEST_F(smoke_NGraphNetworkTest, reshapeLoadTest) {
</output>
</layer>
<layer id="6" name="11/Output_0/Data__const" type="Const" version="opset1">
<data offset="2080" size="100000"/>
<data element_type="f32" offset="2080" shape="50,20,5,5" size="100000"/>
<output>
<port id="1" precision="FP32">
<dim>50</dim>
@ -265,7 +265,7 @@ TEST_F(smoke_NGraphNetworkTest, reshapeLoadTest) {
</output>
</layer>
<layer id="8" name="conv2/Dims209/copy_const" type="Const" version="opset1">
<data offset="102080" size="200"/>
<data element_type="f32" offset="102080" shape="1,50,1,1" size="200"/>
<output>
<port id="1" precision="FP32">
<dim>1</dim>

View File

@ -189,7 +189,7 @@ protected:
</output>
</layer>
<layer id="1" name="topk_k" type="Const" version="opset1">
<data element_type="f16" offset="0" shape="__K_DIMS_SHAPE__" size="__K_SIZE__"/>
<data element_type="i32" offset="0" shape="__K_DIMS_SHAPE__" size="__K_SIZE__"/>
<output>
<port id="1" precision="__INDEX_PRECISION__" />
</output>
@ -251,7 +251,7 @@ protected:
REPLACE_WITH_STR(model, "__INDEX_PRECISION__", indexPrecision.name());
REPLACE_WITH_STR(model, "__INPUT_DIMS__", inputDimsStr);
REPLACE_WITH_NUM_VECTOR(model, "__INPUT_DIMS_SHAPE__", inputDims);
REPLACE_WITH_STR(model, "__K_DIMS_SHAPE__", "1");
REPLACE_WITH_STR(model, "__K_DIMS_SHAPE__", "");
REPLACE_WITH_NUM(model, "__K_SIZE__", kSize);
REPLACE_WITH_STR(model, "__OUTPUT_DIMS__", outputDimsStr);
REPLACE_WITH_NUM(model, "__AXIS__", axis);

View File

@ -11,6 +11,7 @@ namespace {
switch (precision) {
case InferenceEngine::Precision::FP16: return "f16";
case InferenceEngine::Precision::FP32: return "f32";
case InferenceEngine::Precision::I64: return "i64";
default:
break;
}
@ -227,7 +228,9 @@ IRXmlNode IRDumperLayer::dump() const {
if (!_weights.empty()) {
IRXmlNode dataNode {"data", {
{"offset", std::to_string(_weights._dataOffset)},
{"size", std::to_string(_weights.size())}}, {}, {}};
{"size", std::to_string(_weights.size())},
{"element_type", paramterPresitionToString(_weights._precision)},
{"shape", InferenceEngine::details::joinVec(_outDesc[0])}}, {}, {}};
layer.children.push_back(std::move(dataNode));
}
else if (_parameterPrecision != InferenceEngine::Precision::UNSPECIFIED) {