Ngraph helpers/builders cleaning (#20819)
* Delete `getNodeSharedPtr()` * Remove `makeRoll` ng::builder * Delete `makeSelect` ng::builder * Delete `makeDepthToSpace` ng::builder * Remove `CompareFunctions` and `getConstData` from ng::helpers * Return `makeSelect` for compatibility with NPU * Port `QuantizationGranularity`, `MemoryTransformation` * Restore ng::helpers::QuantGranularity for BWD CMP
This commit is contained in:
parent
8f406067d1
commit
bcb38796ce
@ -195,7 +195,21 @@ public:
|
||||
|
||||
private:
|
||||
std::shared_ptr<Node> get_reduction(NodeTypeInfo reduction_type_info, const OutputVector& inputs, bool keep_dims) {
|
||||
auto reduction = ngraph::helpers::getNodeSharedPtr(reduction_type_info, inputs);
|
||||
std::shared_ptr<Node> reduction;
|
||||
for (const auto& it : get_available_opsets()) {
|
||||
const auto& opset = it.second();
|
||||
if (opset.contains_type(reduction_type_info)) {
|
||||
reduction = std::shared_ptr<Node>(opset.create(reduction_type_info.name));
|
||||
reduction->set_arguments(inputs);
|
||||
reduction->validate_and_infer_types();
|
||||
}
|
||||
}
|
||||
OPENVINO_ASSERT(reduction,
|
||||
"supported opsets does not contain op with name: ",
|
||||
reduction_type_info.name,
|
||||
" version: ",
|
||||
reduction_type_info.version_id);
|
||||
|
||||
if (auto arithmetic_reduce = std::dynamic_pointer_cast<op::util::ArithmeticReductionKeepDims>(reduction))
|
||||
arithmetic_reduce->set_keep_dims(keep_dims);
|
||||
else if (auto logical_reduce = std::dynamic_pointer_cast<op::util::LogicalReductionKeepDims>(reduction))
|
||||
|
@ -19,7 +19,7 @@ const std::vector<InferenceEngine::Precision> netPrecisions = {
|
||||
const std::vector<size_t> numOutChannels = {16, 32};
|
||||
|
||||
const std::vector<size_t > levels = {256};
|
||||
const std::vector<QuantizationGranularity > granularity = {Pertensor, Perchannel};
|
||||
const std::vector<QuantizationGranularity > granularity = {QuantizationGranularity::Pertensor, QuantizationGranularity::Perchannel};
|
||||
|
||||
/* ============= 2D GroupConvolutionBackpropData ============= */
|
||||
const std::vector<std::vector<size_t >> inputShapes2D = {{1, 16, 10, 10}, {1, 32, 10, 10}};
|
||||
|
@ -21,7 +21,7 @@ const std::vector<size_t> numOutChannels = {3, 24, 48};
|
||||
const std::vector<size_t> numGroups = {3};
|
||||
|
||||
const std::vector<size_t > levels = {256};
|
||||
const std::vector<QuantizationGranularity> granularity = {Pertensor, Perchannel};
|
||||
const std::vector<QuantizationGranularity> granularity = {QuantizationGranularity::Pertensor, QuantizationGranularity::Perchannel};
|
||||
const std::vector<bool> quantizeWeights = {false, true};
|
||||
|
||||
/* ============= 2D GroupConvolution ============= */
|
||||
|
@ -20,7 +20,7 @@ const std::vector<size_t> numOutChannels = {16, 32};
|
||||
const std::vector<size_t> numGroups = {2, 8, 16};
|
||||
|
||||
const std::vector<size_t > levels = {256};
|
||||
const std::vector<QuantizationGranularity > granularity = {Pertensor, Perchannel};
|
||||
const std::vector<QuantizationGranularity > granularity = {QuantizationGranularity::Pertensor, QuantizationGranularity::Perchannel};
|
||||
|
||||
/* ============= 2D GroupConvolutionBackpropData ============= */
|
||||
const std::vector<std::vector<size_t >> inputShapes2D = {{1, 16, 10, 10}, {1, 32, 10, 10}};
|
||||
|
@ -43,7 +43,7 @@ const std::vector<QuantRange> ranges_i32 = {
|
||||
const std::vector<uint64_t> levels_8 = {256};
|
||||
const std::vector<uint64_t> levels_16 = {65536};
|
||||
const std::vector<uint64_t> levels_32 = {4294967296};
|
||||
const std::vector<QuantizationGranularity> granularity = {Pertensor};
|
||||
const std::vector<QuantizationGranularity> granularity = {QuantizationGranularity::Pertensor};
|
||||
|
||||
const auto quantParams_i8 = ::testing::Combine(
|
||||
::testing::ValuesIn(levels_8),
|
||||
|
@ -75,7 +75,7 @@ protected:
|
||||
for (auto&& shape : inputDynamicShapes) {
|
||||
params.push_back(std::make_shared<ov::op::v0::Parameter>(inType, shape));
|
||||
}
|
||||
auto d2s = ngraph::builder::makeDepthToSpace(params[0], mode, blockSize);
|
||||
auto d2s = std::make_shared<ov::op::v0::DepthToSpace>(params[0], mode, blockSize);
|
||||
function = makeNgraphFunction(inType, params, d2s, "DepthToSpace");
|
||||
}
|
||||
};
|
||||
|
@ -61,8 +61,7 @@ protected:
|
||||
auto shiftNode = std::make_shared<ngraph::op::Constant>(ngraph::element::Type_t::i64, ngraph::Shape{shift.size()}, shift)->output(0);
|
||||
auto axesNode = std::make_shared<ngraph::op::Constant>(ngraph::element::Type_t::i64, ngraph::Shape{axes.size()}, axes)->output(0);
|
||||
|
||||
const auto paramsOut = ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(paramsIn));
|
||||
const auto roll = std::dynamic_pointer_cast<ngraph::op::v7::Roll>(ngraph::builder::makeRoll(paramsOut[0], shiftNode, axesNode));
|
||||
const auto roll = std::make_shared<ngraph::op::v7::Roll>(paramsIn[0], shiftNode, axesNode);
|
||||
const ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(roll)};
|
||||
function = std::make_shared<ngraph::Function>(results, paramsIn, "roll");
|
||||
}
|
||||
|
@ -19,7 +19,7 @@ const std::vector<InferenceEngine::Precision> netPrecisions = {
|
||||
const std::vector<size_t> numOutChannels = {16, 32};
|
||||
|
||||
const std::vector<size_t > levels = {256};
|
||||
const std::vector<QuantizationGranularity > granularity = {Pertensor, Perchannel};
|
||||
const std::vector<QuantizationGranularity > granularity = {QuantizationGranularity::Pertensor, QuantizationGranularity::Perchannel};
|
||||
|
||||
/* ============= 2D GroupConvolutionBackpropData ============= */
|
||||
const std::vector<std::vector<size_t >> inputShapes2D = {{1, 16, 10, 10}, {1, 32, 10, 10}};
|
||||
|
@ -20,7 +20,7 @@ const std::vector<size_t> numOutChannels = {16, 32};
|
||||
const std::vector<size_t> numGroups = {2, 8, 16};
|
||||
|
||||
const std::vector<size_t > levels = {256};
|
||||
const std::vector<QuantizationGranularity > granularity = {Pertensor, Perchannel};
|
||||
const std::vector<QuantizationGranularity > granularity = {QuantizationGranularity::Pertensor, QuantizationGranularity::Perchannel};
|
||||
|
||||
/* ============= 2D GroupConvolutionBackpropData ============= */
|
||||
const std::vector<std::vector<size_t >> inputShapes2D = {{1, 16, 10, 10}, {1, 32, 10, 10}};
|
||||
|
@ -65,10 +65,10 @@ protected:
|
||||
init_input_shapes({shapes});
|
||||
|
||||
ov::ParameterVector params;
|
||||
for (auto&& shape : inputDynamicShapes) {
|
||||
for (auto&& shape : inputDynamicShapes)
|
||||
params.push_back(std::make_shared<ov::op::v0::Parameter>(inType, shape));
|
||||
}
|
||||
auto d2s = ngraph::builder::makeDepthToSpace(params[0], mode, blockSize);
|
||||
|
||||
auto d2s = std::make_shared<ov::op::v0::DepthToSpace>(params[0], mode, blockSize);
|
||||
|
||||
ngraph::ResultVector results;
|
||||
for (size_t i = 0; i < d2s->get_output_size(); i++)
|
||||
|
@ -65,8 +65,7 @@ protected:
|
||||
std::make_shared<opset1::Parameter>(netType, inputDynamicShapes[2]),
|
||||
};
|
||||
|
||||
auto paramOuts = helpers::convert2OutputVector(helpers::castOps2Nodes<op::Parameter>(params));
|
||||
auto select = builder::makeSelect(paramOuts, broadcast);
|
||||
auto select = std::make_shared<ov::op::v1::Select>(params[0], params[1], params[2], broadcast);
|
||||
|
||||
auto makeFunction = [](ParameterVector ¶ms, const std::shared_ptr<Node> &lastNode) {
|
||||
ResultVector results;
|
||||
|
@ -19,6 +19,31 @@
|
||||
|
||||
namespace LayerTestsUtils {
|
||||
|
||||
namespace {
|
||||
std::vector<std::pair<ov::element::Type, std::vector<std::uint8_t>>> getConstData(
|
||||
const std::shared_ptr<ov::Model>& function) {
|
||||
size_t numOutputs = function->get_output_size();
|
||||
std::vector<std::pair<ov::element::Type, std::vector<std::uint8_t>>> outputs(numOutputs);
|
||||
auto funcResults = function->get_results();
|
||||
for (size_t i = 0; i < numOutputs; i++) {
|
||||
outputs[i].first = funcResults[i]->get_element_type();
|
||||
const auto& output = function->output(i).get_node_shared_ptr();
|
||||
OPENVINO_ASSERT(output->inputs().size() == 1);
|
||||
auto parrentNode = output->input_value(0).get_node_shared_ptr();
|
||||
OPENVINO_ASSERT(ov::op::util::is_constant(parrentNode),
|
||||
"Function was not fully folded to constant state!\n",
|
||||
"Parent node of one of results is not constant and has type ",
|
||||
parrentNode->get_type_name());
|
||||
|
||||
const auto data = std::dynamic_pointer_cast<ov::op::v0::Constant>(parrentNode)->get_data_ptr<std::uint8_t>();
|
||||
const auto dataSize = ov::shape_size(parrentNode->get_shape()) * parrentNode->get_element_type().size();
|
||||
outputs[i].second.resize(dataSize);
|
||||
std::copy(data, data + dataSize, outputs[i].second.data());
|
||||
}
|
||||
return outputs;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
LayerTestsCommon::LayerTestsCommon() : threshold(1e-2f), abs_threshold(-1.f) {
|
||||
core = PluginCache::get().ie(targetDevice);
|
||||
}
|
||||
@ -460,7 +485,7 @@ std::vector<std::pair<ngraph::element::Type, std::vector<std::uint8_t>>> LayerTe
|
||||
}
|
||||
case CONSTANT_FOLDING: {
|
||||
const auto &foldedFunc = ngraph::helpers::foldFunction(functionRefs, referenceInputs, refInputsTypes);
|
||||
expectedOutputs = ngraph::helpers::getConstData(foldedFunc);
|
||||
expectedOutputs = getConstData(foldedFunc);
|
||||
break;
|
||||
}
|
||||
case IE: {
|
||||
|
@ -46,8 +46,7 @@ void DepthToSpaceLayerTest::SetUp() {
|
||||
std::tie(inShape, inputPrecision, mode, blockSize, targetDevice) = this->GetParam();
|
||||
auto inPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision);
|
||||
ov::ParameterVector params {std::make_shared<ov::op::v0::Parameter>(inPrc, ov::Shape(inShape))};
|
||||
auto paramOuts = ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(params));
|
||||
auto d2s = ngraph::builder::makeDepthToSpace(paramOuts[0], mode, blockSize);
|
||||
auto d2s = std::make_shared<ov::op::v0::DepthToSpace>(params[0], mode, blockSize);
|
||||
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(d2s)};
|
||||
function = std::make_shared<ngraph::Function>(results, params, "DepthToSpace");
|
||||
}
|
||||
|
@ -37,8 +37,7 @@ void RollLayerTest::SetUp() {
|
||||
auto shiftNode = std::make_shared<ngraph::op::Constant>(ngraph::element::Type_t::i64, ngraph::Shape{shift.size()}, shift)->output(0);
|
||||
auto axesNode = std::make_shared<ngraph::op::Constant>(ngraph::element::Type_t::i64, ngraph::Shape{axes.size()}, axes)->output(0);
|
||||
|
||||
auto paramOuts = ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(paramVector));
|
||||
auto roll = std::dynamic_pointer_cast<ngraph::op::v7::Roll>(ngraph::builder::makeRoll(paramOuts[0], shiftNode, axesNode));
|
||||
auto roll = std::make_shared<ngraph::op::v7::Roll>(paramVector[0], shiftNode, axesNode);
|
||||
|
||||
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(roll)};
|
||||
function = std::make_shared<ngraph::Function>(results, paramVector, "roll");
|
||||
|
@ -36,9 +36,7 @@ namespace LayerTestsDefinitions {
|
||||
paramNode = std::make_shared<ngraph::opset1::Parameter>(inType, ngraph::Shape(inputShapes[i]));
|
||||
paramNodesVector.push_back(paramNode);
|
||||
}
|
||||
auto paramOuts = ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(paramNodesVector));
|
||||
|
||||
auto select = std::dynamic_pointer_cast<ngraph::opset1::Select>(ngraph::builder::makeSelect(paramOuts, broadcast));
|
||||
auto select = std::make_shared<ov::op::v1::Select>(paramNodesVector[0], paramNodesVector[1], paramNodesVector[2], broadcast);
|
||||
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(select)};
|
||||
function = std::make_shared<ngraph::Function>(results, paramNodesVector, "select");
|
||||
}
|
||||
|
@ -55,7 +55,7 @@ void QuantConvBackpropDataLayerTest::SetUp() {
|
||||
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(ngPrc, ov::Shape(inputShape))};
|
||||
|
||||
std::vector<size_t> dataFqConstShapes(inputShape.size(), 1);
|
||||
if (quantGranularity == ngraph::helpers::Perchannel)
|
||||
if (quantGranularity == QuantizationGranularity::Perchannel)
|
||||
dataFqConstShapes[1] = inputShape[1];
|
||||
auto dataFq = ngraph::builder::makeFakeQuantize(params[0], ngPrc, quantLevels, dataFqConstShapes);
|
||||
|
||||
@ -66,7 +66,7 @@ void QuantConvBackpropDataLayerTest::SetUp() {
|
||||
auto weightsNode = ngraph::builder::makeConstant(ngPrc, weightsShapes, weightsData, weightsData.empty());
|
||||
|
||||
std::vector<size_t> weightsFqConstShapes(weightsShapes.size(), 1);
|
||||
if (quantGranularity == ngraph::helpers::Perchannel)
|
||||
if (quantGranularity == QuantizationGranularity::Perchannel)
|
||||
weightsFqConstShapes[0] = weightsShapes[0];
|
||||
|
||||
auto weightsFq = ngraph::builder::makeFakeQuantize(weightsNode, ngPrc, quantLevels, weightsFqConstShapes);
|
||||
|
@ -60,7 +60,7 @@ void QuantGroupConvLayerTest::SetUp() {
|
||||
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(ngPrc, ov::Shape(inputShape))};
|
||||
|
||||
std::vector<size_t> dataFqConstShapes(inputShape.size(), 1);
|
||||
if (quantGranularity == ngraph::helpers::Perchannel)
|
||||
if (quantGranularity == QuantizationGranularity::Perchannel)
|
||||
dataFqConstShapes[1] = inputShape[1];
|
||||
auto dataFq = ngraph::builder::makeFakeQuantize(params[0], ngPrc, quantLevels, dataFqConstShapes);
|
||||
|
||||
@ -76,7 +76,7 @@ void QuantGroupConvLayerTest::SetUp() {
|
||||
auto weightsNode = ngraph::builder::makeConstant(ngPrc, weightsShapes, weightsData, weightsData.empty());
|
||||
|
||||
std::vector<size_t> weightsFqConstShapes(weightsShapes.size(), 1);
|
||||
if (quantGranularity == ngraph::helpers::Perchannel)
|
||||
if (quantGranularity == QuantizationGranularity::Perchannel)
|
||||
weightsFqConstShapes[0] = weightsShapes[0];
|
||||
|
||||
std::shared_ptr<ngraph::Node> weights;
|
||||
|
@ -56,7 +56,7 @@ void QuantGroupConvBackpropDataLayerTest::SetUp() {
|
||||
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(ngPrc, ov::Shape(inputShape))};
|
||||
|
||||
std::vector<size_t> dataFqConstShapes(inputShape.size(), 1);
|
||||
if (quantGranularity == ngraph::helpers::Perchannel)
|
||||
if (quantGranularity == QuantizationGranularity::Perchannel)
|
||||
dataFqConstShapes[1] = inputShape[1];
|
||||
auto dataFq = ngraph::builder::makeFakeQuantize(params[0], ngPrc, quantLevels, dataFqConstShapes);
|
||||
|
||||
@ -72,7 +72,7 @@ void QuantGroupConvBackpropDataLayerTest::SetUp() {
|
||||
auto weightsNode = ngraph::builder::makeConstant(ngPrc, weightsShapes, weightsData, weightsData.empty());
|
||||
|
||||
std::vector<size_t> weightsFqConstShapes(weightsShapes.size(), 1);
|
||||
if (quantGranularity == ngraph::helpers::Perchannel)
|
||||
if (quantGranularity == QuantizationGranularity::Perchannel)
|
||||
weightsFqConstShapes[0] = weightsShapes[0];
|
||||
|
||||
auto weightsFq = ngraph::builder::makeFakeQuantize(weightsNode, ngPrc, quantLevels, weightsFqConstShapes);
|
||||
|
@ -78,7 +78,7 @@ void QuantMatMulTest::SetUp() {
|
||||
QuantizationGranularity quantGranularity, const ngraph::Output<ngraph::Node> &in, std::vector<size_t> inputShape,
|
||||
InferenceEngine::Precision prec) -> std::shared_ptr<ngraph::Node> {
|
||||
std::vector<size_t> dataFqConstShapes(inputShape.size(), 1);
|
||||
if (quantGranularity == ngraph::helpers::Perchannel)
|
||||
if (quantGranularity == QuantizationGranularity::Perchannel)
|
||||
dataFqConstShapes[1] = inputShape[1];
|
||||
size_t constDataSize = ngraph::shape_size(dataFqConstShapes);
|
||||
std::vector<float> inputLowData(constDataSize), inputHighData(constDataSize), outputLowData(constDataSize), outputHighData(constDataSize);
|
||||
|
@ -427,10 +427,6 @@ std::shared_ptr<ov::Node> makeEmbeddingSegmentsSum(const element::Type& dataType
|
||||
bool with_weights,
|
||||
bool with_default_index);
|
||||
|
||||
std::shared_ptr<ov::Node> makeDepthToSpace(const ov::Output<Node>& in,
|
||||
ov::op::v0::DepthToSpace::DepthToSpaceMode mode,
|
||||
size_t blockSize);
|
||||
|
||||
std::shared_ptr<ov::Node> makeSpaceToDepth(const ov::Output<Node>& in,
|
||||
ov::op::v0::SpaceToDepth::SpaceToDepthMode mode,
|
||||
size_t blockSize);
|
||||
@ -628,10 +624,6 @@ std::shared_ptr<ov::Node> makeOneHot(const ov::Output<Node>& indices,
|
||||
const float& off_val,
|
||||
const int64_t& axis);
|
||||
|
||||
std::shared_ptr<ov::Node> makeRoll(const ov::Output<Node>& dataNode,
|
||||
const ov::Output<Node>& shiftNode,
|
||||
const ov::Output<Node>& axesNode);
|
||||
|
||||
std::shared_ptr<ov::Node> makeDFT(const ov::Output<Node>& dataNode,
|
||||
const std::vector<int64_t>& axes,
|
||||
const std::vector<int64_t>& signalSize,
|
||||
|
@ -91,13 +91,7 @@ using ov::test::utils::DFTOpType;
|
||||
using ov::test::utils::InputLayerType;
|
||||
using ov::test::utils::PadMode;
|
||||
using ov::test::utils::SequenceTestsMode;
|
||||
|
||||
enum class MemoryTransformation {
|
||||
NONE,
|
||||
LOW_LATENCY_V2,
|
||||
LOW_LATENCY_V2_REGULAR_API,
|
||||
LOW_LATENCY_V2_ORIGINAL_INIT
|
||||
};
|
||||
using ov::test::utils::MemoryTransformation;
|
||||
// clang-format on
|
||||
|
||||
bool is_tensor_iterator_exist(const std::shared_ptr<ngraph::Function>& func);
|
||||
@ -138,31 +132,15 @@ std::vector<std::pair<ngraph::element::Type, std::vector<std::uint8_t>>> interpr
|
||||
std::vector<ov::Tensor> interpretFunction(const std::shared_ptr<Function>& function,
|
||||
const std::map<std::shared_ptr<ov::Node>, ov::Tensor>& inputs);
|
||||
|
||||
//
|
||||
// This function compares two nGraph functions and requires them to have exactly one output
|
||||
// Check nodes types
|
||||
// Check number of inputs
|
||||
// Check shapes of each Node
|
||||
//
|
||||
void CompareFunctions(const Function& actual, const Function& expected);
|
||||
|
||||
std::shared_ptr<Function> foldFunction(const std::shared_ptr<Function>& function,
|
||||
const std::vector<std::vector<std::uint8_t>>& inputs,
|
||||
const std::vector<ngraph::element::Type>& inputTypes = {});
|
||||
|
||||
std::vector<std::pair<ngraph::element::Type, std::vector<std::uint8_t>>> getConstData(
|
||||
const std::shared_ptr<Function>& function);
|
||||
|
||||
std::shared_ptr<ngraph::Node> getNodeSharedPtr(const ngraph::NodeTypeInfo& type_info,
|
||||
const ngraph::OutputVector& outputVector);
|
||||
|
||||
std::vector<std::uint8_t> convertOutputPrecision(const std::vector<std::uint8_t>& output,
|
||||
const element::Type_t& fromPrecision,
|
||||
const element::Type_t& toPrecision,
|
||||
const size_t elementsCount);
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, MemoryTransformation type);
|
||||
|
||||
// todo: remove the following function from the source code after cleaning up VPU repo
|
||||
void resize_function(std::shared_ptr<ov::Model> function, const std::vector<ov::Shape>& targetInputStaticShapes);
|
||||
|
||||
|
@ -1,18 +0,0 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "ov_models/builders.hpp"
|
||||
|
||||
namespace ngraph {
|
||||
namespace builder {
|
||||
|
||||
std::shared_ptr<ov::Node> makeDepthToSpace(const ov::Output<Node>& in,
|
||||
ov::op::v0::DepthToSpace::DepthToSpaceMode mode,
|
||||
size_t blockSize) {
|
||||
auto dtsNode = std::make_shared<ov::op::v0::DepthToSpace>(in, mode, blockSize);
|
||||
return dtsNode;
|
||||
}
|
||||
|
||||
} // namespace builder
|
||||
} // namespace ngraph
|
@ -1,21 +0,0 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "openvino/op/roll.hpp"
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "openvino/core/node.hpp"
|
||||
|
||||
namespace ngraph {
|
||||
namespace builder {
|
||||
|
||||
std::shared_ptr<ov::Node> makeRoll(const ov::Output<Node>& in,
|
||||
const ov::Output<Node>& shift,
|
||||
const ov::Output<Node>& axes) {
|
||||
return std::make_shared<ov::op::v7::Roll>(in, shift, axes);
|
||||
}
|
||||
|
||||
} // namespace builder
|
||||
} // namespace ngraph
|
@ -240,118 +240,6 @@ std::shared_ptr<Function> foldFunction(const std::shared_ptr<Function>& function
|
||||
return foldedFunc;
|
||||
}
|
||||
|
||||
std::vector<std::pair<ov::element::Type, std::vector<std::uint8_t>>> getConstData(
|
||||
const std::shared_ptr<Function>& function) {
|
||||
size_t numOutputs = function->get_output_size();
|
||||
std::vector<std::pair<ov::element::Type, std::vector<std::uint8_t>>> outputs(numOutputs);
|
||||
auto funcResults = function->get_results();
|
||||
for (size_t i = 0; i < numOutputs; i++) {
|
||||
outputs[i].first = funcResults[i]->get_element_type();
|
||||
const auto& output = function->output(i).get_node_shared_ptr();
|
||||
OPENVINO_ASSERT(output->inputs().size() == 1);
|
||||
auto parrentNode = output->input_value(0).get_node_shared_ptr();
|
||||
OPENVINO_ASSERT(op::is_constant(parrentNode),
|
||||
"Function was not fully folded to constant state!\n",
|
||||
"Parent node of one of results is not constant and has type ",
|
||||
parrentNode->get_type_name());
|
||||
|
||||
const auto data = std::dynamic_pointer_cast<opset1::Constant>(parrentNode)->get_data_ptr<std::uint8_t>();
|
||||
const auto dataSize = shape_size(parrentNode->get_shape()) * parrentNode->get_element_type().size();
|
||||
outputs[i].second.resize(dataSize);
|
||||
std::copy(data, data + dataSize, outputs[i].second.data());
|
||||
}
|
||||
return outputs;
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
std::string toString(const NodeTypeInfo& typeInfo) {
|
||||
return std::string(typeInfo.name) + " ver. " + std::string(typeInfo.version_id);
|
||||
}
|
||||
|
||||
void CompareShapes(const PartialShape& actual, const PartialShape& expected) {
|
||||
OPENVINO_ASSERT(actual.relaxes(expected) && actual.refines(expected),
|
||||
"Functions compare: Different shape detected ",
|
||||
actual,
|
||||
" and ",
|
||||
expected);
|
||||
}
|
||||
|
||||
void CompareNodes(const Node& actual, const Node& expected) {
|
||||
const auto& actualType = actual.get_type_info();
|
||||
const auto& expectedType = expected.get_type_info();
|
||||
OPENVINO_ASSERT(actualType == expectedType,
|
||||
"Functions compare: data types must be equal ",
|
||||
toString(actualType),
|
||||
" != ",
|
||||
toString(expectedType));
|
||||
|
||||
const auto& numActualInputs = actual.inputs().size();
|
||||
const auto& numExpectedInputs = expected.inputs().size();
|
||||
OPENVINO_ASSERT(numActualInputs == numExpectedInputs,
|
||||
"Functions compare: numbers of inputs are different: ",
|
||||
numActualInputs,
|
||||
" and ",
|
||||
numExpectedInputs);
|
||||
|
||||
const auto& numActualOutputs = actual.outputs().size();
|
||||
const auto& numExpectedOutputs = expected.outputs().size();
|
||||
OPENVINO_ASSERT(numActualOutputs == numExpectedOutputs,
|
||||
"Functions compare: numbers of outputs are different: ",
|
||||
numActualOutputs,
|
||||
" and ",
|
||||
numExpectedOutputs);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
void CompareFunctions(const Function& actual, const Function& expected) {
|
||||
const auto& actualOrderedOps = actual.get_ordered_ops();
|
||||
const auto& expectedOrderedOps = expected.get_ordered_ops();
|
||||
|
||||
OPENVINO_ASSERT(expectedOrderedOps.size() == actualOrderedOps.size(),
|
||||
"Functions compare: expected and actual ops number should be equal "
|
||||
"but got ",
|
||||
expectedOrderedOps.size(),
|
||||
" and ",
|
||||
actualOrderedOps.size(),
|
||||
" respectively");
|
||||
|
||||
for (std::size_t i = 0; i < expectedOrderedOps.size(); i++) {
|
||||
const auto& expectedOp = expectedOrderedOps[i];
|
||||
const auto& actualOp = actualOrderedOps[i];
|
||||
|
||||
CompareNodes(*actualOp, *expectedOp);
|
||||
for (std::size_t i = 0; i < actualOp->inputs().size(); ++i) {
|
||||
const auto& actualShape = actualOp->input(i).get_partial_shape();
|
||||
const auto& expectedShape = expectedOp->input(i).get_partial_shape();
|
||||
CompareShapes(actualShape, expectedShape);
|
||||
}
|
||||
|
||||
for (std::size_t i = 0; i < actualOp->outputs().size(); ++i) {
|
||||
const auto& actualShape = actualOp->output(i).get_partial_shape();
|
||||
const auto& expectedShape = expectedOp->output(i).get_partial_shape();
|
||||
CompareShapes(actualShape, expectedShape);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::Node> getNodeSharedPtr(const ov::NodeTypeInfo& type_info, const ov::OutputVector& outputVector) {
|
||||
for (const auto& it : get_available_opsets()) {
|
||||
const auto& opset = it.second();
|
||||
if (opset.contains_type(type_info)) {
|
||||
const auto node = std::shared_ptr<ov::Node>(opset.create(type_info.name));
|
||||
node->set_arguments(outputVector);
|
||||
node->validate_and_infer_types();
|
||||
return node;
|
||||
}
|
||||
}
|
||||
OPENVINO_THROW("supported opsets does not contain op with name: ",
|
||||
type_info.name,
|
||||
" version: ",
|
||||
type_info.version_id);
|
||||
}
|
||||
|
||||
bool is_tensor_iterator_exist(const std::shared_ptr<ov::Model>& func) {
|
||||
const auto& ops = func->get_ops();
|
||||
for (const auto& node : ops) {
|
||||
@ -650,26 +538,6 @@ std::vector<std::uint8_t> convertOutputPrecision(const std::vector<std::uint8_t>
|
||||
}
|
||||
}
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, MemoryTransformation type) {
|
||||
switch (type) {
|
||||
case MemoryTransformation::NONE:
|
||||
os << "NONE";
|
||||
break;
|
||||
case MemoryTransformation::LOW_LATENCY_V2:
|
||||
os << "LOW_LATENCY_V2";
|
||||
break;
|
||||
case MemoryTransformation::LOW_LATENCY_V2_REGULAR_API:
|
||||
os << "LOW_LATENCY_V2_REGULAR_API";
|
||||
break;
|
||||
case MemoryTransformation::LOW_LATENCY_V2_ORIGINAL_INIT:
|
||||
os << "LOW_LATENCY_V2_ORIGINAL_INIT";
|
||||
break;
|
||||
default:
|
||||
throw std::runtime_error("NOT_SUPPORTED_TYPE");
|
||||
}
|
||||
return os;
|
||||
}
|
||||
|
||||
void resize_function(std::shared_ptr<ov::Model> function, const std::vector<ov::Shape>& targetInputStaticShapes) {
|
||||
auto inputs = function->inputs();
|
||||
std::map<ov::Output<ov::Node>, ov::PartialShape> shapes;
|
||||
|
@ -156,6 +156,11 @@ enum class DFTOpType {
|
||||
INVERSE
|
||||
};
|
||||
|
||||
enum class QuantizationGranularity {
|
||||
Pertensor,
|
||||
Perchannel
|
||||
};
|
||||
|
||||
enum class TensorIteratorBody {
|
||||
RNN,
|
||||
GRU,
|
||||
@ -169,6 +174,7 @@ enum class MemoryTransformation {
|
||||
LOW_LATENCY_V2_REGULAR_API,
|
||||
LOW_LATENCY_V2_ORIGINAL_INIT
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, const ReductionType& m);
|
||||
|
||||
|
@ -351,6 +351,20 @@ std::ostream& operator<<(std::ostream& os, TensorIteratorBody type) {
|
||||
return os;
|
||||
}
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, QuantizationGranularity type) {
|
||||
switch (type) {
|
||||
case QuantizationGranularity::Pertensor:
|
||||
os << "Pertensor";
|
||||
break;
|
||||
case QuantizationGranularity::Perchannel:
|
||||
os << "Perchannel";
|
||||
break;
|
||||
default:
|
||||
throw std::runtime_error("NOT_SUPPORTED_OP_TYPE");
|
||||
}
|
||||
return os;
|
||||
}
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, MemoryTransformation type) {
|
||||
switch (type) {
|
||||
case MemoryTransformation::NONE:
|
||||
|
Loading…
Reference in New Issue
Block a user