Refactor ngraph builders 6 (#21133)
* Refactor make_convolution builders * refactor make_activation builders * Refactor make_eltwise builder * Refactor make_embedding_bag_offsets_sum * Refactor make_embedding_bag_packed_sum * Refactor make_embedding_segments_sum * Fix * Apply comments
This commit is contained in:
parent
7f5ba4e074
commit
08fa27762e
@ -8,7 +8,7 @@
|
||||
#include "test_utils/fusing_test_utils.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "ov_models/utils/ov_helpers.hpp"
|
||||
#include "ov_models/builders.hpp"
|
||||
#include "common_test_utils/node_builders/convolution.hpp"
|
||||
#include "openvino/core/visibility.hpp"
|
||||
#include <shared_test_classes/single_layer/convolution.hpp>
|
||||
#include "utils/general_utils.h"
|
||||
@ -203,7 +203,8 @@ protected:
|
||||
ov::ParameterVector inputParams;
|
||||
for (auto&& shape : inputDynamicShapes)
|
||||
inputParams.push_back(std::make_shared<ov::op::v0::Parameter>(ov::element::f32, shape));
|
||||
auto convolutionNode = ngraph::builder::makeConvolution(inputParams[0], netType, kernel, stride, padBegin,
|
||||
|
||||
auto convolutionNode = ov::test::utils::make_convolution(inputParams[0], netType, kernel, stride, padBegin,
|
||||
padEnd, dilation, padType, convOutChannels);
|
||||
|
||||
function = makeNgraphFunction(netType, inputParams, convolutionNode, "Convolution");
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include "test_utils/cpu_test_utils.hpp"
|
||||
#include "test_utils/filter_cpu_info.hpp"
|
||||
#include "test_utils/fusing_test_utils.hpp"
|
||||
#include "common_test_utils/node_builders/convolution_backprop_data.hpp"
|
||||
|
||||
using namespace CPUTestUtils;
|
||||
using namespace ov::test;
|
||||
@ -164,10 +165,10 @@ public:
|
||||
std::shared_ptr<ov::Node> deconv;
|
||||
if (!outShapeData.empty()) {
|
||||
OPENVINO_ASSERT(outShapeNode != nullptr);
|
||||
deconv = ngraph::builder::makeConvolutionBackpropData(params[0], outShapeNode, prec, kernel, stride, padBegin,
|
||||
deconv = ov::test::utils::make_convolution_backprop_data(params[0], outShapeNode, prec, kernel, stride, padBegin,
|
||||
padEnd, dilation, padType, convOutChannels);
|
||||
} else {
|
||||
deconv = ngraph::builder::makeConvolutionBackpropData(params[0], prec, kernel, stride, padBegin,
|
||||
deconv = ov::test::utils::make_convolution_backprop_data(params[0], prec, kernel, stride, padBegin,
|
||||
padEnd, dilation, padType, convOutChannels, false, outPadding);
|
||||
}
|
||||
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include "test_utils/convolution_params.hpp"
|
||||
#include "test_utils/fusing_test_utils.hpp"
|
||||
#include "test_utils/filter_cpu_info.hpp"
|
||||
#include "common_test_utils/node_builders/group_convolution.hpp"
|
||||
|
||||
using namespace InferenceEngine;
|
||||
using namespace CPUTestUtils;
|
||||
@ -197,9 +198,8 @@ protected:
|
||||
for (auto&& shape : inputDynamicShapes)
|
||||
params.push_back(std::make_shared<ov::op::v0::Parameter>(netType, shape));
|
||||
|
||||
auto groupConv = std::dynamic_pointer_cast<ngraph::opset1::GroupConvolution>(
|
||||
ngraph::builder::makeGroupConvolution(params[0], netType, kernel, stride, padBegin,
|
||||
padEnd, dilation, padType, convOutChannels, numGroups));
|
||||
auto groupConv = ov::test::utils::make_group_convolution(params[0], netType, kernel, stride, padBegin,
|
||||
padEnd, dilation, padType, convOutChannels, numGroups);
|
||||
function = makeNgraphFunction(netType, params, groupConv, "groupConvolution");
|
||||
}
|
||||
};
|
||||
|
@ -8,9 +8,9 @@
|
||||
#include "test_utils/fusing_test_utils.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include <common_test_utils/ov_tensor_utils.hpp>
|
||||
#include "ov_models/builders.hpp"
|
||||
#include <shared_test_classes/single_layer/group_convolution_backprop_data.hpp>
|
||||
#include "openvino/core/preprocess/pre_post_process.hpp"
|
||||
#include "common_test_utils/node_builders/group_convolution_backprop_data.hpp"
|
||||
|
||||
using namespace CPUTestUtils;
|
||||
using namespace ov::test;
|
||||
@ -183,10 +183,10 @@ public:
|
||||
std::shared_ptr<ov::Node> deconv;
|
||||
if (!outShapeData.empty()) {
|
||||
OPENVINO_ASSERT(outShapeNode != nullptr);
|
||||
deconv = ngraph::builder::makeGroupConvolutionBackpropData(params[0], outShapeNode, prec, kernel, stride, padBegin,
|
||||
deconv = ov::test::utils::make_group_convolution_backprop_data(params[0], outShapeNode, prec, kernel, stride, padBegin,
|
||||
padEnd, dilation, padType, convOutChannels, groupNum);
|
||||
} else {
|
||||
deconv = ngraph::builder::makeGroupConvolutionBackpropData(params[0], prec, kernel, stride, padBegin,
|
||||
deconv = ov::test::utils::make_group_convolution_backprop_data(params[0], prec, kernel, stride, padBegin,
|
||||
padEnd, dilation, padType, convOutChannels, groupNum, false, outPadding);
|
||||
}
|
||||
|
||||
|
@ -3,7 +3,7 @@
|
||||
//
|
||||
|
||||
#include "shared_test_classes/single_layer/embedding_bag_packed_sum.hpp"
|
||||
#include "ov_models/builders.hpp"
|
||||
#include "common_test_utils/node_builders/embedding_bag_packed_sum.hpp"
|
||||
|
||||
namespace LayerTestsDefinitions {
|
||||
|
||||
@ -42,9 +42,7 @@ void EmbeddingBagPackedSumLayerTest::SetUp() {
|
||||
auto emb_table_node = std::make_shared<ngraph::opset1::Parameter>(ngPrc, ngraph::Shape(embTableShape));
|
||||
ngraph::ParameterVector params = {emb_table_node};
|
||||
|
||||
auto embBag = std::dynamic_pointer_cast<ngraph::opset3::EmbeddingBagPackedSum>(
|
||||
ngraph::builder::makeEmbeddingBagPackedSum(
|
||||
ngPrc, ngIdxPrc, emb_table_node, indices, withWeights));
|
||||
auto embBag = ov::test::utils::make_embedding_bag_packed_sum(ngPrc, ngIdxPrc, emb_table_node, indices, withWeights);
|
||||
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(embBag)};
|
||||
function = std::make_shared<ngraph::Function>(results, params, "embeddingBagPackedSum");
|
||||
}
|
||||
|
@ -4,7 +4,7 @@
|
||||
|
||||
#include "shared_test_classes/single_op/activation.hpp"
|
||||
|
||||
#include "ov_models/builders.hpp"
|
||||
#include "common_test_utils/node_builders/activation.hpp"
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
@ -65,7 +65,7 @@ void ActivationLayerTest::SetUp() {
|
||||
std::iota(constants_value.begin(), constants_value.end(), -10);
|
||||
}
|
||||
|
||||
auto activation = ngraph::builder::makeActivation(param, model_type, activationType, const_shape, constants_value);
|
||||
auto activation = ov::test::utils::make_activation(param, model_type, activationType, const_shape, constants_value);
|
||||
|
||||
auto result = std::make_shared<ov::op::v0::Result>(activation);
|
||||
|
||||
@ -131,7 +131,7 @@ void ActivationParamLayerTest::SetUp() {
|
||||
|
||||
params[0]->set_friendly_name("Input");
|
||||
|
||||
auto activation = ngraph::builder::makeActivation(params, model_type, activationType);
|
||||
auto activation = ov::test::utils::make_activation(params, model_type, activationType);
|
||||
auto result = std::make_shared<ov::op::v0::Result>(activation);
|
||||
function = std::make_shared<ov::Model>(result, params);
|
||||
}
|
||||
|
@ -6,6 +6,7 @@
|
||||
|
||||
#include "ov_models/builders.hpp"
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include "common_test_utils/node_builders/binary_convolution.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
@ -70,7 +71,7 @@ void BinaryConvolutionLayerTest::SetUp() {
|
||||
|
||||
// TODO: refactor build BinaryConvolution op to accept filters input as Parameter
|
||||
auto bin_conv =
|
||||
ngraph::builder::makeBinaryConvolution(params[0], kernel_size, strides, pads_begin, pads_end, dilations, pad_type, num_out_channels, pad_value);
|
||||
ov::test::utils::make_binary_convolution(params[0], kernel_size, strides, pads_begin, pads_end, dilations, pad_type, num_out_channels, pad_value);
|
||||
auto result = std::make_shared<ov::op::v0::Result>(bin_conv);
|
||||
function = std::make_shared<ov::Model>(ov::OutputVector{result}, params, "BinaryConvolution");
|
||||
}
|
||||
|
@ -10,7 +10,7 @@
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
#include "openvino/op/convolution.hpp"
|
||||
#include "ov_models/builders.hpp"
|
||||
#include "common_test_utils/node_builders/convolution_backprop_data.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
@ -70,16 +70,14 @@ void ConvolutionBackpropDataLayerTest::SetUp() {
|
||||
|
||||
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(model_type, inputDynamicShapes.front())};
|
||||
|
||||
std::shared_ptr<ov::op::v1::ConvolutionBackpropData> convBackpropData;
|
||||
std::shared_ptr<ov::Node> convBackpropData;
|
||||
if (!output_shape.empty()) {
|
||||
auto outShape = ov::op::v0::Constant::create(ov::element::i64, {output_shape.size()}, output_shape);
|
||||
convBackpropData = std::dynamic_pointer_cast<ov::op::v1::ConvolutionBackpropData>(
|
||||
ngraph::builder::makeConvolutionBackpropData(params[0]->output(0), outShape, model_type, kernel, stride, pad_begin,
|
||||
pad_end, dilation, pad_type, convOutChannels));
|
||||
convBackpropData = ov::test::utils::make_convolution_backprop_data(
|
||||
params[0]->output(0), outShape, model_type, kernel, stride, pad_begin, pad_end, dilation, pad_type, convOutChannels);
|
||||
} else {
|
||||
convBackpropData = std::dynamic_pointer_cast<ov::op::v1::ConvolutionBackpropData>(
|
||||
ngraph::builder::makeConvolutionBackpropData(params[0]->output(0), model_type, kernel, stride, pad_begin,
|
||||
pad_end, dilation, pad_type, convOutChannels, false, out_padding));
|
||||
convBackpropData = ov::test::utils::make_convolution_backprop_data(
|
||||
params[0]->output(0), model_type, kernel, stride, pad_begin, pad_end, dilation, pad_type, convOutChannels, false, out_padding);
|
||||
}
|
||||
function = std::make_shared<ov::Model>(std::make_shared<ov::op::v0::Result>(convBackpropData), params, "convolutionBackpropData");
|
||||
}
|
||||
|
@ -6,7 +6,7 @@
|
||||
|
||||
#include "shared_test_classes/single_op/eltwise.hpp"
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include "ov_models/builders.hpp"
|
||||
#include "common_test_utils/node_builders/eltwise.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
@ -128,7 +128,7 @@ void EltwiseLayerTest::SetUp() {
|
||||
parameters[0]->set_friendly_name("param0");
|
||||
secondary_input->set_friendly_name("param1");
|
||||
|
||||
auto eltwise = ngraph::builder::makeEltwise(parameters[0], secondary_input, eltwise_type);
|
||||
auto eltwise = ov::test::utils::makeEltwise(parameters[0], secondary_input, eltwise_type);
|
||||
function = std::make_shared<ov::Model>(eltwise, parameters, "Eltwise");
|
||||
}
|
||||
} // namespace test
|
||||
|
@ -3,7 +3,7 @@
|
||||
//
|
||||
|
||||
#include "shared_test_classes/single_op/embedding_bag_offsets_sum.hpp"
|
||||
#include "ov_models/builders.hpp"
|
||||
#include "common_test_utils/node_builders/embedding_bag_offsets_sum.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
@ -56,7 +56,7 @@ void EmbeddingBagOffsetsSumLayerTest::SetUp() {
|
||||
|
||||
auto param = std::make_shared<ov::op::v0::Parameter>(model_type, inputDynamicShapes.front());
|
||||
|
||||
auto embBag = ngraph::builder::makeEmbeddingBagOffsetsSum(model_type, ind_type, param, indices, offsets, default_index, with_weights, with_def_index);
|
||||
auto embBag = ov::test::utils::make_embedding_bag_offsets_sum(model_type, ind_type, param, indices, offsets, default_index, with_weights, with_def_index);
|
||||
|
||||
auto result = std::make_shared<ov::op::v0::Result>(embBag);
|
||||
function = std::make_shared<ov::Model>(result, ov::ParameterVector{param}, "embeddingBagOffsetsSum");
|
||||
|
@ -3,7 +3,7 @@
|
||||
//
|
||||
|
||||
#include "shared_test_classes/single_op/embedding_segments_sum.hpp"
|
||||
#include "ov_models/builders.hpp"
|
||||
#include "common_test_utils/node_builders/embedding_segments_sum.hpp"
|
||||
|
||||
|
||||
namespace ov {
|
||||
@ -59,7 +59,7 @@ void EmbeddingSegmentsSumLayerTest::SetUp() {
|
||||
|
||||
auto param = std::make_shared<ov::op::v0::Parameter>(model_type, inputDynamicShapes.front());
|
||||
|
||||
auto embBag = ngraph::builder::makeEmbeddingSegmentsSum(model_type,
|
||||
auto embBag = ov::test::utils::make_embedding_segments_sum(model_type,
|
||||
ind_type,
|
||||
param,
|
||||
indices,
|
||||
|
@ -4,7 +4,7 @@
|
||||
|
||||
#include "shared_test_classes/single_op/group_convolution.hpp"
|
||||
|
||||
#include "ov_models/builders.hpp"
|
||||
#include "common_test_utils/node_builders/group_convolution.hpp"
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
@ -64,8 +64,8 @@ void GroupConvolutionLayerTest::SetUp() {
|
||||
|
||||
auto param = std::make_shared<ov::op::v0::Parameter>(model_type, inputDynamicShapes.front());
|
||||
|
||||
auto group_conv = ngraph::builder::makeGroupConvolution(param, model_type, kernel, stride, pad_begin,
|
||||
pad_end, dilation, pad_type, conv_out_channels, num_groups);
|
||||
auto group_conv = ov::test::utils::make_group_convolution(
|
||||
param, model_type, kernel, stride, pad_begin, pad_end, dilation, pad_type, conv_out_channels, num_groups);
|
||||
|
||||
auto result = std::make_shared<ov::op::v0::Result>(group_conv);
|
||||
function = std::make_shared<ov::Model>(result, ov::ParameterVector{param}, "groupConvolution");
|
||||
|
@ -4,7 +4,7 @@
|
||||
|
||||
#include "shared_test_classes/single_op/group_convolution_backprop_data.hpp"
|
||||
|
||||
#include "ov_models/builders.hpp"
|
||||
#include "common_test_utils/node_builders/group_convolution_backprop_data.hpp"
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
@ -71,11 +71,11 @@ void GroupConvBackpropLayerTest::SetUp() {
|
||||
std::shared_ptr<ov::Node> group_conv;
|
||||
if (!output_shape.empty()) {
|
||||
auto outShape = ov::op::v0::Constant::create(ov::element::i64, {output_shape.size()}, output_shape);
|
||||
group_conv = ngraph::builder::makeGroupConvolutionBackpropData(param, outShape, model_type, kernel, stride, pad_begin,
|
||||
pad_end, dilation, pad_type, conv_out_channels, num_groups, false, out_padding);
|
||||
group_conv = ov::test::utils::make_group_convolution_backprop_data(
|
||||
param, outShape, model_type, kernel, stride, pad_begin, pad_end, dilation, pad_type, conv_out_channels, num_groups, false, out_padding);
|
||||
} else {
|
||||
group_conv = ngraph::builder::makeGroupConvolutionBackpropData(param, model_type, kernel, stride, pad_begin,
|
||||
pad_end, dilation, pad_type, conv_out_channels, num_groups, false, out_padding);
|
||||
group_conv = ov::test::utils::make_group_convolution_backprop_data(
|
||||
param, model_type, kernel, stride, pad_begin, pad_end, dilation, pad_type, conv_out_channels, num_groups, false, out_padding);
|
||||
}
|
||||
|
||||
auto result = std::make_shared<ov::op::v0::Result>(group_conv);
|
||||
|
@ -0,0 +1,22 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "common_test_utils/test_enums.hpp"
|
||||
#include "openvino/core/node.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace utils {
|
||||
std::shared_ptr<ov::Node> make_activation(const ov::Output<Node>& in,
|
||||
const element::Type& type,
|
||||
ov::test::utils::ActivationTypes activation_type,
|
||||
ov::Shape in_shape = {},
|
||||
std::vector<float> constants_value = {});
|
||||
|
||||
std::shared_ptr<ov::Node> make_activation(const ov::ParameterVector& parameters,
|
||||
const element::Type& type,
|
||||
ov::test::utils::ActivationTypes activation_type);
|
||||
} // namespace utils
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,22 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "openvino/core/node.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace utils {
|
||||
std::shared_ptr<ov::Node> make_binary_convolution(const ov::Output<Node>& in,
|
||||
const std::vector<size_t>& filterSize,
|
||||
const std::vector<size_t>& strides,
|
||||
const std::vector<ptrdiff_t>& padsBegin,
|
||||
const std::vector<ptrdiff_t>& padsEnd,
|
||||
const std::vector<size_t>& dilations,
|
||||
const ov::op::PadType& autoPad,
|
||||
size_t numOutChannels,
|
||||
float padValue,
|
||||
const std::vector<int8_t>& filterWeihgts = {});
|
||||
} // namespace utils
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,37 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "openvino/core/node.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace utils {
|
||||
std::shared_ptr<ov::Node> make_convolution(const ov::Output<Node>& in,
|
||||
const ov::element::Type& type,
|
||||
const std::vector<size_t>& filterSize,
|
||||
const std::vector<size_t>& strides,
|
||||
const std::vector<ptrdiff_t>& padsBegin,
|
||||
const std::vector<ptrdiff_t>& padsEnd,
|
||||
const std::vector<size_t>& dilations,
|
||||
const ov::op::PadType& autoPad,
|
||||
size_t numOutChannels,
|
||||
bool addBiases = false,
|
||||
const std::vector<float>& filterWeights = {},
|
||||
const std::vector<float>& biasesWeights = {});
|
||||
|
||||
std::shared_ptr<ov::Node> make_convolution(const ov::Output<Node>& in_data,
|
||||
const ov::Output<Node>& in_weights,
|
||||
const ov::element::Type& type,
|
||||
const std::vector<size_t>& filterSize,
|
||||
const std::vector<size_t>& strides,
|
||||
const std::vector<ptrdiff_t>& padsBegin,
|
||||
const std::vector<ptrdiff_t>& padsEnd,
|
||||
const std::vector<size_t>& dilations,
|
||||
const ov::op::PadType& autoPad,
|
||||
size_t numOutChannels,
|
||||
bool addBiases = false,
|
||||
const std::vector<float>& biasesWeights = {});
|
||||
} // namespace utils
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,54 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "openvino/core/node.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace utils {
|
||||
|
||||
std::shared_ptr<ov::Node> make_convolution_backprop_data(const ov::Output<Node>& in,
|
||||
const ov::element::Type& type,
|
||||
const std::vector<size_t>& filterSize,
|
||||
const std::vector<size_t>& strides,
|
||||
const std::vector<ptrdiff_t>& padsBegin,
|
||||
const std::vector<ptrdiff_t>& padsEnd,
|
||||
const std::vector<size_t>& dilations,
|
||||
const ov::op::PadType& autoPad,
|
||||
size_t numOutChannels,
|
||||
bool addBiases = false,
|
||||
const std::vector<ptrdiff_t>& outputPadding = {},
|
||||
const std::vector<float>& filterWeights = {},
|
||||
const std::vector<float>& biasesWeights = {});
|
||||
|
||||
std::shared_ptr<ov::Node> make_convolution_backprop_data(const ov::Output<Node>& in,
|
||||
const ov::Output<Node>& weights,
|
||||
const ov::element::Type& type,
|
||||
const std::vector<size_t>& strides,
|
||||
const std::vector<ptrdiff_t>& padsBegin,
|
||||
const std::vector<ptrdiff_t>& padsEnd,
|
||||
const std::vector<size_t>& dilations,
|
||||
const ov::op::PadType& autoPad,
|
||||
bool addBiases = false,
|
||||
const std::vector<ptrdiff_t>& outputPadding = {},
|
||||
const std::vector<float>& biasesWeights = {});
|
||||
|
||||
std::shared_ptr<ov::Node> make_convolution_backprop_data(const ov::Output<Node>& in,
|
||||
const ov::Output<Node>& outputShape,
|
||||
const ov::element::Type& type,
|
||||
const std::vector<size_t>& filterSize,
|
||||
const std::vector<size_t>& strides,
|
||||
const std::vector<ptrdiff_t>& padsBegin,
|
||||
const std::vector<ptrdiff_t>& padsEnd,
|
||||
const std::vector<size_t>& dilations,
|
||||
const ov::op::PadType& autoPad,
|
||||
size_t numOutChannels,
|
||||
bool addBiases = false,
|
||||
const std::vector<ptrdiff_t>& outputPadding = {},
|
||||
const std::vector<float>& filterWeights = {},
|
||||
const std::vector<float>& biasesWeights = {});
|
||||
|
||||
} // namespace utils
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,16 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "common_test_utils/test_enums.hpp"
|
||||
#include "openvino/core/node.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace utils {
|
||||
std::shared_ptr<ov::Node> makeEltwise(const ov::Output<Node>& in0,
|
||||
const ov::Output<Node>& in1,
|
||||
ov::test::utils::EltwiseTypes eltwise_type);
|
||||
} // namespace utils
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,20 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "openvino/core/node.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace utils {
|
||||
std::shared_ptr<ov::Node> make_embedding_bag_offsets_sum(const element::Type& data_type,
|
||||
const ov::element::Type& indices_type,
|
||||
const ov::Output<Node>& emb_table_node,
|
||||
const std::vector<size_t>& indices,
|
||||
const std::vector<size_t>& offsets,
|
||||
size_t default_index,
|
||||
bool with_weights,
|
||||
bool with_default_index);
|
||||
} // namespace utils
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,17 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "openvino/core/node.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace utils {
|
||||
std::shared_ptr<ov::Node> make_embedding_bag_packed_sum(const ov::element::Type& data_type,
|
||||
const ov::element::Type& indices_type,
|
||||
const ov::Output<Node>& emb_table_node,
|
||||
const std::vector<std::vector<size_t>>& indices,
|
||||
bool with_weights);
|
||||
} // namespace utils
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,21 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "openvino/core/node.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace utils {
|
||||
std::shared_ptr<ov::Node> make_embedding_segments_sum(const ov::element::Type& data_type,
|
||||
const ov::element::Type& indices_type,
|
||||
const ov::Output<Node>& emb_table_node,
|
||||
const std::vector<size_t>& indices,
|
||||
const std::vector<size_t>& segment_ids,
|
||||
size_t num_segments,
|
||||
size_t default_index,
|
||||
bool with_weights,
|
||||
bool with_default_index);
|
||||
} // namespace utils
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,36 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "openvino/core/node.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace utils {
|
||||
std::shared_ptr<ov::Node> make_group_convolution(const ov::Output<Node>& in,
|
||||
const ov::element::Type& type,
|
||||
const std::vector<size_t>& filterSize,
|
||||
const std::vector<size_t>& strides,
|
||||
const std::vector<ptrdiff_t>& padsBegin,
|
||||
const std::vector<ptrdiff_t>& padsEnd,
|
||||
const std::vector<size_t>& dilations,
|
||||
const ov::op::PadType& autoPad,
|
||||
size_t numOutChannels,
|
||||
size_t numGroups,
|
||||
bool addBiases = false,
|
||||
const std::vector<float>& filterWeights = {},
|
||||
const std::vector<float>& biasesWeights = {});
|
||||
|
||||
std::shared_ptr<ov::Node> make_group_convolution(const ov::Output<Node>& in,
|
||||
const ov::Output<Node>& weights,
|
||||
const ov::element::Type& type,
|
||||
const std::vector<size_t>& strides,
|
||||
const std::vector<ptrdiff_t>& padsBegin,
|
||||
const std::vector<ptrdiff_t>& padsEnd,
|
||||
const std::vector<size_t>& dilations,
|
||||
const ov::op::PadType& autoPad,
|
||||
bool addBiases = false,
|
||||
const std::vector<float>& biasesWeights = {});
|
||||
} // namespace utils
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,54 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "openvino/core/node.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace utils {
|
||||
std::shared_ptr<ov::Node> make_group_convolution_backprop_data(const ov::Output<Node>& in,
|
||||
const ov::element::Type& type,
|
||||
const std::vector<size_t>& filterSize,
|
||||
const std::vector<size_t>& strides,
|
||||
const std::vector<ptrdiff_t>& padsBegin,
|
||||
const std::vector<ptrdiff_t>& padsEnd,
|
||||
const std::vector<size_t>& dilations,
|
||||
const ov::op::PadType& autoPad,
|
||||
size_t numOutChannels,
|
||||
size_t numGroups,
|
||||
bool addBiases = false,
|
||||
const std::vector<ptrdiff_t>& outputPadding = {},
|
||||
const std::vector<float>& filterWeights = {},
|
||||
const std::vector<float>& biasesWeights = {});
|
||||
|
||||
std::shared_ptr<ov::Node> make_group_convolution_backprop_data(const ov::Output<Node>& in,
|
||||
const ov::Output<Node>& weights,
|
||||
const ov::element::Type& type,
|
||||
const std::vector<size_t>& strides,
|
||||
const std::vector<ptrdiff_t>& padsBegin,
|
||||
const std::vector<ptrdiff_t>& padsEnd,
|
||||
const std::vector<size_t>& dilations,
|
||||
const ov::op::PadType& autoPad,
|
||||
bool addBiases = false,
|
||||
const std::vector<ptrdiff_t>& outputPadding = {},
|
||||
const std::vector<float>& biasesWeights = {});
|
||||
|
||||
std::shared_ptr<ov::Node> make_group_convolution_backprop_data(const ov::Output<Node>& in,
|
||||
const ov::Output<Node>& outputShape,
|
||||
const ov::element::Type& type,
|
||||
const std::vector<size_t>& filterSize,
|
||||
const std::vector<size_t>& strides,
|
||||
const std::vector<ptrdiff_t>& padsBegin,
|
||||
const std::vector<ptrdiff_t>& padsEnd,
|
||||
const std::vector<size_t>& dilations,
|
||||
const ov::op::PadType& autoPad,
|
||||
size_t numOutChannels,
|
||||
size_t numGroups,
|
||||
bool addBiases = false,
|
||||
const std::vector<ptrdiff_t>& outputPadding = {},
|
||||
const std::vector<float>& filterWeights = {},
|
||||
const std::vector<float>& biasesWeights = {});
|
||||
} // namespace utils
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,170 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "common_test_utils/node_builders/activation.hpp"
|
||||
|
||||
#include "openvino/op/abs.hpp"
|
||||
#include "openvino/op/acos.hpp"
|
||||
#include "openvino/op/acosh.hpp"
|
||||
#include "openvino/op/asin.hpp"
|
||||
#include "openvino/op/asinh.hpp"
|
||||
#include "openvino/op/atan.hpp"
|
||||
#include "openvino/op/atanh.hpp"
|
||||
#include "openvino/op/ceiling.hpp"
|
||||
#include "openvino/op/clamp.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/cos.hpp"
|
||||
#include "openvino/op/cosh.hpp"
|
||||
#include "openvino/op/elu.hpp"
|
||||
#include "openvino/op/erf.hpp"
|
||||
#include "openvino/op/exp.hpp"
|
||||
#include "openvino/op/floor.hpp"
|
||||
#include "openvino/op/gelu.hpp"
|
||||
#include "openvino/op/hard_sigmoid.hpp"
|
||||
#include "openvino/op/hsigmoid.hpp"
|
||||
#include "openvino/op/hswish.hpp"
|
||||
#include "openvino/op/log.hpp"
|
||||
#include "openvino/op/mish.hpp"
|
||||
#include "openvino/op/negative.hpp"
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/prelu.hpp"
|
||||
#include "openvino/op/relu.hpp"
|
||||
#include "openvino/op/round.hpp"
|
||||
#include "openvino/op/selu.hpp"
|
||||
#include "openvino/op/sigmoid.hpp"
|
||||
#include "openvino/op/sign.hpp"
|
||||
#include "openvino/op/sin.hpp"
|
||||
#include "openvino/op/sinh.hpp"
|
||||
#include "openvino/op/softplus.hpp"
|
||||
#include "openvino/op/softsign.hpp"
|
||||
#include "openvino/op/sqrt.hpp"
|
||||
#include "openvino/op/swish.hpp"
|
||||
#include "openvino/op/tan.hpp"
|
||||
#include "openvino/op/tanh.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace utils {
|
||||
std::shared_ptr<ov::Node> make_activation(const ov::Output<Node>& in,
|
||||
const element::Type& type,
|
||||
ov::test::utils::ActivationTypes activation_type,
|
||||
ov::Shape in_shape,
|
||||
std::vector<float> constants_value) {
|
||||
switch (activation_type) {
|
||||
case ov::test::utils::ActivationTypes::Sigmoid:
|
||||
return std::make_shared<ov::op::v0::Sigmoid>(in);
|
||||
case ov::test::utils::ActivationTypes::Tanh:
|
||||
return std::make_shared<ov::op::v0::Tanh>(in);
|
||||
case ov::test::utils::ActivationTypes::Relu:
|
||||
return std::make_shared<ov::op::v0::Relu>(in);
|
||||
case ov::test::utils::ActivationTypes::LeakyRelu: {
|
||||
auto leaky_slope = std::make_shared<ov::op::v0::Constant>(ov::element::f32, in_shape, constants_value);
|
||||
return std::make_shared<ov::op::v0::PRelu>(in, leaky_slope);
|
||||
}
|
||||
case ov::test::utils::ActivationTypes::Exp:
|
||||
return std::make_shared<ov::op::v0::Exp>(in);
|
||||
case ov::test::utils::ActivationTypes::Log:
|
||||
return std::make_shared<ov::op::v0::Log>(in);
|
||||
case ov::test::utils::ActivationTypes::Sign:
|
||||
return std::make_shared<ov::op::v0::Sign>(in);
|
||||
case ov::test::utils::ActivationTypes::Abs:
|
||||
return std::make_shared<ov::op::v0::Abs>(in);
|
||||
case ov::test::utils::ActivationTypes::Gelu:
|
||||
return std::make_shared<ov::op::v0::Gelu>(in);
|
||||
case ov::test::utils::ActivationTypes::Clamp:
|
||||
return std::make_shared<ov::op::v0::Clamp>(in, constants_value[0], constants_value[1]);
|
||||
case ov::test::utils::ActivationTypes::Negative:
|
||||
return std::make_shared<ov::op::v0::Negative>(in);
|
||||
case ov::test::utils::ActivationTypes::Acos:
|
||||
return std::make_shared<ov::op::v0::Acos>(in);
|
||||
case ov::test::utils::ActivationTypes::Acosh:
|
||||
return std::make_shared<ov::op::v3::Acosh>(in);
|
||||
case ov::test::utils::ActivationTypes::Asin:
|
||||
return std::make_shared<ov::op::v0::Asin>(in);
|
||||
case ov::test::utils::ActivationTypes::Asinh:
|
||||
return std::make_shared<ov::op::v3::Asinh>(in);
|
||||
case ov::test::utils::ActivationTypes::Atan:
|
||||
return std::make_shared<ov::op::v0::Atan>(in);
|
||||
case ov::test::utils::ActivationTypes::Atanh:
|
||||
return std::make_shared<ov::op::v3::Atanh>(in);
|
||||
case ov::test::utils::ActivationTypes::Cos:
|
||||
return std::make_shared<ov::op::v0::Cos>(in);
|
||||
case ov::test::utils::ActivationTypes::Cosh:
|
||||
return std::make_shared<ov::op::v0::Cosh>(in);
|
||||
case ov::test::utils::ActivationTypes::Floor:
|
||||
return std::make_shared<ov::op::v0::Floor>(in);
|
||||
case ov::test::utils::ActivationTypes::Sin:
|
||||
return std::make_shared<ov::op::v0::Sin>(in);
|
||||
case ov::test::utils::ActivationTypes::Sinh:
|
||||
return std::make_shared<ov::op::v0::Sinh>(in);
|
||||
case ov::test::utils::ActivationTypes::Sqrt:
|
||||
return std::make_shared<ov::op::v0::Sqrt>(in);
|
||||
case ov::test::utils::ActivationTypes::Tan:
|
||||
return std::make_shared<ov::op::v0::Tan>(in);
|
||||
case ov::test::utils::ActivationTypes::Elu:
|
||||
return std::make_shared<ov::op::v0::Elu>(in, constants_value[0]);
|
||||
case ov::test::utils::ActivationTypes::Erf:
|
||||
return std::make_shared<ov::op::v0::Erf>(in);
|
||||
case ov::test::utils::ActivationTypes::HardSigmoid: {
|
||||
auto hard_sigmoid_alpha = std::make_shared<ov::op::v0::Constant>(type, in_shape, constants_value[0]);
|
||||
auto hard_sigmoid_beta = std::make_shared<ov::op::v0::Constant>(type, in_shape, constants_value[1]);
|
||||
return std::make_shared<ov::op::v0::HardSigmoid>(in, hard_sigmoid_alpha, hard_sigmoid_beta);
|
||||
}
|
||||
case ov::test::utils::ActivationTypes::Selu: {
|
||||
auto selu_alpha = std::make_shared<ov::op::v0::Constant>(type, in_shape, constants_value[0]);
|
||||
auto selu_lambda = std::make_shared<ov::op::v0::Constant>(type, in_shape, constants_value[1]);
|
||||
return std::make_shared<ov::op::v0::Selu>(in, selu_alpha, selu_lambda);
|
||||
}
|
||||
case ov::test::utils::ActivationTypes::Ceiling:
|
||||
return std::make_shared<ov::op::v0::Ceiling>(in);
|
||||
case ov::test::utils::ActivationTypes::PReLu: {
|
||||
auto negative_slope = std::make_shared<ov::op::v0::Constant>(ov::element::f32, in_shape, constants_value);
|
||||
return std::make_shared<ov::op::v0::PRelu>(in, negative_slope);
|
||||
}
|
||||
case ov::test::utils::ActivationTypes::Mish:
|
||||
return std::make_shared<ov::op::v4::Mish>(in);
|
||||
case ov::test::utils::ActivationTypes::HSwish:
|
||||
return std::make_shared<ov::op::v4::HSwish>(in);
|
||||
case ov::test::utils::ActivationTypes::SoftPlus:
|
||||
return std::make_shared<ov::op::v4::SoftPlus>(in);
|
||||
case ov::test::utils::ActivationTypes::Swish: {
|
||||
auto beta = std::make_shared<ov::op::v0::Constant>(type, in_shape, constants_value[0]);
|
||||
return std::make_shared<ov::op::v4::Swish>(in, beta);
|
||||
}
|
||||
case ov::test::utils::ActivationTypes::HSigmoid:
|
||||
return std::make_shared<ov::op::v5::HSigmoid>(in);
|
||||
case ov::test::utils::ActivationTypes::RoundHalfToEven:
|
||||
return std::make_shared<ov::op::v5::Round>(in, ov::op::v5::Round::RoundMode::HALF_TO_EVEN);
|
||||
case ov::test::utils::ActivationTypes::RoundHalfAwayFromZero:
|
||||
return std::make_shared<ov::op::v5::Round>(in, ov::op::v5::Round::RoundMode::HALF_AWAY_FROM_ZERO);
|
||||
case ov::test::utils::ActivationTypes::GeluErf:
|
||||
return std::make_shared<ov::op::v7::Gelu>(in, ov::op::GeluApproximationMode::ERF);
|
||||
case ov::test::utils::ActivationTypes::GeluTanh:
|
||||
return std::make_shared<ov::op::v7::Gelu>(in, ov::op::GeluApproximationMode::TANH);
|
||||
case ov::test::utils::ActivationTypes::SoftSign:
|
||||
return std::make_shared<ov::op::v9::SoftSign>(in);
|
||||
default:
|
||||
OPENVINO_THROW("Can't create layer for this activation type");
|
||||
}
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::Node> make_activation(const ov::ParameterVector& parameters,
|
||||
const element::Type& type,
|
||||
ov::test::utils::ActivationTypes activation_type) {
|
||||
switch (activation_type) {
|
||||
case ov::test::utils::ActivationTypes::LeakyRelu:
|
||||
return std::make_shared<ov::op::v0::PRelu>(parameters[0], parameters[1]);
|
||||
case ov::test::utils::ActivationTypes::HardSigmoid:
|
||||
return std::make_shared<ov::op::v0::HardSigmoid>(parameters[0], parameters[1], parameters[2]);
|
||||
case ov::test::utils::ActivationTypes::Selu:
|
||||
return std::make_shared<ov::op::v0::Selu>(parameters[0], parameters[1], parameters[2]);
|
||||
case ov::test::utils::ActivationTypes::PReLu:
|
||||
return std::make_shared<ov::op::v0::PRelu>(parameters[0], parameters[1]);
|
||||
default:
|
||||
OPENVINO_THROW("It is impossible to create layer for this activation type with input as parameter");
|
||||
}
|
||||
}
|
||||
} // namespace utils
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,55 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "common_test_utils/node_builders/binary_convolution.hpp"
|
||||
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include "openvino/op/add.hpp"
|
||||
#include "openvino/op/binary_convolution.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace utils {
|
||||
std::shared_ptr<ov::Node> make_binary_convolution(const ov::Output<Node>& in,
|
||||
const std::vector<size_t>& filter_size,
|
||||
const std::vector<size_t>& strides,
|
||||
const std::vector<ptrdiff_t>& pads_begin,
|
||||
const std::vector<ptrdiff_t>& pads_end,
|
||||
const std::vector<size_t>& dilations,
|
||||
const ov::op::PadType& auto_pad,
|
||||
size_t num_out_channels,
|
||||
float pad_value,
|
||||
const std::vector<int8_t>& filter_weihgts) {
|
||||
auto shape = in.get_shape();
|
||||
ov::Shape filter_weights_shape = {num_out_channels, shape[1]};
|
||||
filter_weights_shape.insert(filter_weights_shape.end(), filter_size.begin(), filter_size.end());
|
||||
|
||||
auto filter_weights_node = std::make_shared<ov::op::v0::Constant>(element::u1, filter_weights_shape);
|
||||
const size_t byteNum = (ov::shape_size(filter_weights_shape) + 7) / 8;
|
||||
int8_t* buffer = const_cast<int8_t*>(filter_weights_node->get_data_ptr<int8_t>());
|
||||
if (filter_weihgts.size() == 0) {
|
||||
auto tensor = create_and_fill_tensor(ov::element::i8, filter_weights_shape);
|
||||
auto weights = static_cast<int8_t*>(tensor.data());
|
||||
for (size_t i = 0; i < byteNum; i++)
|
||||
buffer[i] = weights[i];
|
||||
} else {
|
||||
for (size_t i = 0; i < byteNum; i++)
|
||||
buffer[i] = filter_weihgts[i];
|
||||
}
|
||||
auto conv = std::make_shared<ov::op::v1::BinaryConvolution>(
|
||||
in,
|
||||
filter_weights_node,
|
||||
strides,
|
||||
pads_begin,
|
||||
pads_end,
|
||||
dilations,
|
||||
ov::op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT,
|
||||
pad_value,
|
||||
auto_pad);
|
||||
return conv;
|
||||
}
|
||||
} // namespace utils
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,101 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "common_test_utils/node_builders/convolution.hpp"
|
||||
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include "openvino/op/add.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/convolution.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace utils {
|
||||
std::shared_ptr<ov::Node> make_convolution(const ov::Output<Node>& in,
|
||||
const ov::element::Type& type,
|
||||
const std::vector<size_t>& filter_size,
|
||||
const std::vector<size_t>& strides,
|
||||
const std::vector<ptrdiff_t>& pads_begin,
|
||||
const std::vector<ptrdiff_t>& pads_end,
|
||||
const std::vector<size_t>& dilations,
|
||||
const ov::op::PadType& auto_pad,
|
||||
size_t num_out_channels,
|
||||
bool add_biases,
|
||||
const std::vector<float>& filter_weights,
|
||||
const std::vector<float>& biases_weights) {
|
||||
auto shape = in.get_partial_shape();
|
||||
ov::Shape filter_weights_shape = {num_out_channels, static_cast<size_t>(shape[1].get_length())};
|
||||
filter_weights_shape.insert(filter_weights_shape.end(), filter_size.begin(), filter_size.end());
|
||||
|
||||
std::shared_ptr<ov::op::v0::Constant> filter_weights_node;
|
||||
if (!filter_weights.empty()) {
|
||||
filter_weights_node = std::make_shared<ov::op::v0::Constant>(type, filter_weights_shape, filter_weights);
|
||||
} else {
|
||||
auto tensor = create_and_fill_tensor(type, filter_weights_shape);
|
||||
filter_weights_node = std::make_shared<ov::op::v0::Constant>(tensor);
|
||||
}
|
||||
|
||||
auto conv = std::make_shared<ov::op::v1::Convolution>(in,
|
||||
filter_weights_node,
|
||||
strides,
|
||||
pads_begin,
|
||||
pads_end,
|
||||
dilations,
|
||||
auto_pad);
|
||||
if (add_biases) {
|
||||
std::shared_ptr<ov::op::v0::Constant> biases_weights_node;
|
||||
if (!biases_weights.empty()) {
|
||||
biases_weights_node =
|
||||
std::make_shared<ov::op::v0::Constant>(type, ov::Shape{1, num_out_channels, 1, 1}, biases_weights);
|
||||
} else {
|
||||
auto tensor = create_and_fill_tensor(type, ov::Shape{1, num_out_channels, 1, 1});
|
||||
biases_weights_node = std::make_shared<ov::op::v0::Constant>(tensor);
|
||||
}
|
||||
|
||||
auto add = std::make_shared<ov::op::v1::Add>(conv, biases_weights_node);
|
||||
return add;
|
||||
} else {
|
||||
return conv;
|
||||
}
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::Node> make_convolution(const ov::Output<Node>& in_data,
|
||||
const ov::Output<Node>& in_weights,
|
||||
const ov::element::Type& type,
|
||||
const std::vector<size_t>& filter_size,
|
||||
const std::vector<size_t>& strides,
|
||||
const std::vector<ptrdiff_t>& pads_begin,
|
||||
const std::vector<ptrdiff_t>& pads_end,
|
||||
const std::vector<size_t>& dilations,
|
||||
const ov::op::PadType& auto_pad,
|
||||
size_t num_out_channels,
|
||||
bool add_biases,
|
||||
const std::vector<float>& biases_weights) {
|
||||
auto shape = in_data.get_partial_shape();
|
||||
auto conv = std::make_shared<ov::op::v1::Convolution>(in_data,
|
||||
in_weights,
|
||||
strides,
|
||||
pads_begin,
|
||||
pads_end,
|
||||
dilations,
|
||||
auto_pad);
|
||||
if (add_biases) {
|
||||
std::shared_ptr<ov::op::v0::Constant> biases_weights_node;
|
||||
if (!biases_weights.empty()) {
|
||||
biases_weights_node =
|
||||
std::make_shared<ov::op::v0::Constant>(type, ov::Shape{1, num_out_channels, 1, 1}, biases_weights);
|
||||
} else {
|
||||
auto tensor = create_and_fill_tensor(type, ov::Shape{1, num_out_channels, 1, 1});
|
||||
biases_weights_node = std::make_shared<ov::op::v0::Constant>(tensor);
|
||||
}
|
||||
|
||||
auto add = std::make_shared<ov::op::v1::Add>(conv, biases_weights_node);
|
||||
return add;
|
||||
} else {
|
||||
return conv;
|
||||
}
|
||||
}
|
||||
} // namespace utils
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,163 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "common_test_utils/node_builders/convolution_backprop_data.hpp"
|
||||
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include "openvino/op/add.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/convolution.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace utils {
|
||||
std::shared_ptr<ov::Node> make_convolution_backprop_data(const ov::Output<Node>& in,
|
||||
const ov::element::Type& type,
|
||||
const std::vector<size_t>& filter_size,
|
||||
const std::vector<size_t>& strides,
|
||||
const std::vector<ptrdiff_t>& pads_begin,
|
||||
const std::vector<ptrdiff_t>& pads_end,
|
||||
const std::vector<size_t>& dilations,
|
||||
const ov::op::PadType& auto_pad,
|
||||
size_t num_out_channels,
|
||||
bool add_biases,
|
||||
const std::vector<ptrdiff_t>& output_padding,
|
||||
const std::vector<float>& filter_weights,
|
||||
const std::vector<float>& biases_weights) {
|
||||
auto shape = in.get_partial_shape();
|
||||
ov::Shape filter_weights_shape = {static_cast<size_t>(shape[1].get_length()), num_out_channels};
|
||||
filter_weights_shape.insert(filter_weights_shape.end(), filter_size.begin(), filter_size.end());
|
||||
|
||||
std::shared_ptr<ov::op::v0::Constant> filter_weights_node;
|
||||
if (!filter_weights.empty()) {
|
||||
filter_weights_node = std::make_shared<ov::op::v0::Constant>(type, filter_weights_shape, filter_weights);
|
||||
} else {
|
||||
auto tensor = create_and_fill_tensor(type, filter_weights_shape);
|
||||
filter_weights_node = std::make_shared<ov::op::v0::Constant>(tensor);
|
||||
}
|
||||
|
||||
return make_convolution_backprop_data(in,
|
||||
filter_weights_node,
|
||||
type,
|
||||
strides,
|
||||
pads_begin,
|
||||
pads_end,
|
||||
dilations,
|
||||
auto_pad,
|
||||
add_biases,
|
||||
output_padding,
|
||||
biases_weights);
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::Node> make_convolution_backprop_data(const ov::Output<Node>& in,
|
||||
const ov::Output<Node>& weights,
|
||||
const ov::element::Type& type,
|
||||
const std::vector<size_t>& strides,
|
||||
const std::vector<ptrdiff_t>& pads_begin,
|
||||
const std::vector<ptrdiff_t>& pads_end,
|
||||
const std::vector<size_t>& dilations,
|
||||
const ov::op::PadType& auto_pad,
|
||||
bool add_biases,
|
||||
const std::vector<ptrdiff_t>& output_padding,
|
||||
const std::vector<float>& biases_weights) {
|
||||
auto deconv = std::make_shared<ov::op::v1::ConvolutionBackpropData>(in,
|
||||
weights,
|
||||
strides,
|
||||
pads_begin,
|
||||
pads_end,
|
||||
dilations,
|
||||
auto_pad);
|
||||
|
||||
if (!output_padding.empty()) {
|
||||
deconv = std::make_shared<ov::op::v1::ConvolutionBackpropData>(in,
|
||||
weights,
|
||||
strides,
|
||||
pads_begin,
|
||||
pads_end,
|
||||
dilations,
|
||||
auto_pad,
|
||||
output_padding);
|
||||
}
|
||||
|
||||
if (add_biases) {
|
||||
std::shared_ptr<ov::op::v0::Constant> biases_weights_node;
|
||||
if (!biases_weights.empty()) {
|
||||
biases_weights_node = std::make_shared<ov::op::v0::Constant>(type, ov::Shape{}, biases_weights);
|
||||
} else {
|
||||
auto tensor = create_and_fill_tensor(type, ov::Shape{});
|
||||
biases_weights_node = std::make_shared<ov::op::v0::Constant>(tensor);
|
||||
}
|
||||
|
||||
auto add = std::make_shared<ov::op::v1::Add>(deconv, biases_weights_node);
|
||||
return add;
|
||||
} else {
|
||||
return deconv;
|
||||
}
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::Node> make_convolution_backprop_data(const ov::Output<Node>& in,
|
||||
const ov::Output<Node>& outputShape,
|
||||
const ov::element::Type& type,
|
||||
const std::vector<size_t>& filter_size,
|
||||
const std::vector<size_t>& strides,
|
||||
const std::vector<ptrdiff_t>& pads_begin,
|
||||
const std::vector<ptrdiff_t>& pads_end,
|
||||
const std::vector<size_t>& dilations,
|
||||
const ov::op::PadType& auto_pad,
|
||||
size_t num_out_channels,
|
||||
bool add_biases,
|
||||
const std::vector<ptrdiff_t>& output_padding,
|
||||
const std::vector<float>& filter_weights,
|
||||
const std::vector<float>& biases_weights) {
|
||||
auto shape = in.get_partial_shape();
|
||||
ov::Shape filter_weights_shape = {static_cast<size_t>(shape[1].get_length()), num_out_channels};
|
||||
filter_weights_shape.insert(filter_weights_shape.end(), filter_size.begin(), filter_size.end());
|
||||
|
||||
std::shared_ptr<ov::op::v0::Constant> filter_weights_node;
|
||||
if (!filter_weights.empty()) {
|
||||
filter_weights_node = std::make_shared<ov::op::v0::Constant>(type, filter_weights_shape, filter_weights);
|
||||
} else {
|
||||
auto tensor = create_and_fill_tensor(type, filter_weights_shape);
|
||||
filter_weights_node = std::make_shared<ov::op::v0::Constant>(tensor);
|
||||
}
|
||||
|
||||
auto deconv = std::make_shared<ov::op::v1::ConvolutionBackpropData>(in,
|
||||
filter_weights_node,
|
||||
outputShape,
|
||||
strides,
|
||||
pads_begin,
|
||||
pads_end,
|
||||
dilations,
|
||||
auto_pad);
|
||||
|
||||
if (!output_padding.empty()) {
|
||||
deconv = std::make_shared<ov::op::v1::ConvolutionBackpropData>(in,
|
||||
filter_weights_node,
|
||||
outputShape,
|
||||
strides,
|
||||
pads_begin,
|
||||
pads_end,
|
||||
dilations,
|
||||
auto_pad,
|
||||
output_padding);
|
||||
}
|
||||
|
||||
if (add_biases) {
|
||||
std::shared_ptr<ov::op::v0::Constant> biases_weights_node;
|
||||
if (!biases_weights.empty()) {
|
||||
biases_weights_node = std::make_shared<ov::op::v0::Constant>(type, ov::Shape{}, biases_weights);
|
||||
} else {
|
||||
auto tensor = create_and_fill_tensor(type, ov::Shape{});
|
||||
biases_weights_node = std::make_shared<ov::op::v0::Constant>(tensor);
|
||||
}
|
||||
|
||||
auto add = std::make_shared<ov::op::v1::Add>(deconv, biases_weights_node);
|
||||
return add;
|
||||
} else {
|
||||
return deconv;
|
||||
}
|
||||
}
|
||||
} // namespace utils
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,61 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "common_test_utils/node_builders/eltwise.hpp"
|
||||
|
||||
#include "openvino/op/add.hpp"
|
||||
#include "openvino/op/bitwise_and.hpp"
|
||||
#include "openvino/op/bitwise_not.hpp"
|
||||
#include "openvino/op/bitwise_or.hpp"
|
||||
#include "openvino/op/bitwise_xor.hpp"
|
||||
#include "openvino/op/divide.hpp"
|
||||
#include "openvino/op/erf.hpp"
|
||||
#include "openvino/op/floor_mod.hpp"
|
||||
#include "openvino/op/mod.hpp"
|
||||
#include "openvino/op/multiply.hpp"
|
||||
#include "openvino/op/power.hpp"
|
||||
#include "openvino/op/squared_difference.hpp"
|
||||
#include "openvino/op/subtract.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace utils {
|
||||
std::shared_ptr<ov::Node> makeEltwise(const ov::Output<Node>& in0,
|
||||
const ov::Output<Node>& in1,
|
||||
ov::test::utils::EltwiseTypes eltwiseType) {
|
||||
switch (eltwiseType) {
|
||||
case ov::test::utils::EltwiseTypes::ADD:
|
||||
return std::make_shared<ov::op::v1::Add>(in0, in1);
|
||||
case ov::test::utils::EltwiseTypes::SUBTRACT:
|
||||
return std::make_shared<ov::op::v1::Subtract>(in0, in1);
|
||||
case ov::test::utils::EltwiseTypes::MULTIPLY:
|
||||
return std::make_shared<ov::op::v1::Multiply>(in0, in1);
|
||||
case ov::test::utils::EltwiseTypes::DIVIDE:
|
||||
return std::make_shared<ov::op::v1::Divide>(in0, in1);
|
||||
case ov::test::utils::EltwiseTypes::SQUARED_DIFF:
|
||||
return std::make_shared<ov::op::v0::SquaredDifference>(in0, in1);
|
||||
case ov::test::utils::EltwiseTypes::POWER:
|
||||
return std::make_shared<ov::op::v1::Power>(in0, in1);
|
||||
case ov::test::utils::EltwiseTypes::FLOOR_MOD:
|
||||
return std::make_shared<ov::op::v1::FloorMod>(in0, in1);
|
||||
case ov::test::utils::EltwiseTypes::MOD:
|
||||
return std::make_shared<ov::op::v1::Mod>(in0, in1);
|
||||
case ov::test::utils::EltwiseTypes::ERF:
|
||||
return std::make_shared<ov::op::v0::Erf>(in0);
|
||||
case ov::test::utils::EltwiseTypes::BITWISE_AND:
|
||||
return std::make_shared<ov::op::v13::BitwiseAnd>(in0, in1);
|
||||
case ov::test::utils::EltwiseTypes::BITWISE_NOT:
|
||||
return std::make_shared<ov::op::v13::BitwiseNot>(in0);
|
||||
case ov::test::utils::EltwiseTypes::BITWISE_OR:
|
||||
return std::make_shared<ov::op::v13::BitwiseOr>(in0, in1);
|
||||
case ov::test::utils::EltwiseTypes::BITWISE_XOR:
|
||||
return std::make_shared<ov::op::v13::BitwiseXor>(in0, in1);
|
||||
default: {
|
||||
OPENVINO_THROW("Incorrect type of Eltwise operation");
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace utils
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,52 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "common_test_utils/node_builders/embedding_bag_offsets_sum.hpp"
|
||||
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/embeddingbag_offsets_sum.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace utils {
|
||||
std::shared_ptr<ov::Node> make_embedding_bag_offsets_sum(const element::Type& data_type,
|
||||
const ov::element::Type& indices_type,
|
||||
const ov::Output<Node>& emb_table_node,
|
||||
const std::vector<size_t>& indices,
|
||||
const std::vector<size_t>& offsets,
|
||||
size_t default_index,
|
||||
bool with_weights,
|
||||
bool with_default_index) {
|
||||
ov::Shape i_shape = {indices.size()};
|
||||
auto indices_node = std::make_shared<ov::op::v0::Constant>(indices_type, i_shape, indices);
|
||||
ov::Shape o_shape = {offsets.size()};
|
||||
auto offsetsNode = std::make_shared<ov::op::v0::Constant>(indices_type, o_shape, offsets);
|
||||
|
||||
std::shared_ptr<Node> embBag;
|
||||
if (with_default_index) {
|
||||
auto defIdxNode = std::make_shared<ov::op::v0::Constant>(indices_type, ov::Shape{}, default_index);
|
||||
if (with_weights) {
|
||||
auto tensor = create_and_fill_tensor(data_type, ov::Shape{indices.size()});
|
||||
auto weights_node = std::make_shared<ov::op::v0::Constant>(tensor);
|
||||
|
||||
embBag = std::make_shared<ov::op::v3::EmbeddingBagOffsetsSum>(emb_table_node,
|
||||
indices_node,
|
||||
offsetsNode,
|
||||
defIdxNode,
|
||||
weights_node);
|
||||
} else {
|
||||
embBag = std::make_shared<ov::op::v3::EmbeddingBagOffsetsSum>(emb_table_node,
|
||||
indices_node,
|
||||
offsetsNode,
|
||||
defIdxNode);
|
||||
}
|
||||
} else {
|
||||
embBag = std::make_shared<ov::op::v3::EmbeddingBagOffsetsSum>(emb_table_node, indices_node, offsetsNode);
|
||||
}
|
||||
return embBag;
|
||||
}
|
||||
} // namespace utils
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,39 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "common_test_utils/node_builders/embedding_bag_packed_sum.hpp"
|
||||
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/embeddingbag_packedsum.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace utils {
|
||||
std::shared_ptr<ov::Node> make_embedding_bag_packed_sum(const ov::element::Type& data_type,
|
||||
const ov::element::Type& indices_type,
|
||||
const ov::Output<Node>& emb_table_node,
|
||||
const std::vector<std::vector<size_t>>& indices,
|
||||
bool with_weights) {
|
||||
ov::Shape i_shape({indices.size(), indices[0].size()});
|
||||
size_t i_size = ov::shape_size(i_shape);
|
||||
std::vector<size_t> i_values(i_size);
|
||||
for (int i = 0; i < indices.size(); i++)
|
||||
memcpy(i_values.data() + indices[0].size() * i, indices[i].data(), indices[0].size() * sizeof(size_t));
|
||||
auto indicesNode = std::make_shared<ov::op::v0::Constant>(indices_type, i_shape, i_values);
|
||||
|
||||
std::shared_ptr<Node> embBag;
|
||||
if (with_weights) {
|
||||
auto tensor = create_and_fill_tensor(data_type, i_shape);
|
||||
auto weights_node = std::make_shared<ov::op::v0::Constant>(tensor);
|
||||
|
||||
embBag = std::make_shared<ov::op::v3::EmbeddingBagPackedSum>(emb_table_node, indicesNode, weights_node);
|
||||
} else {
|
||||
embBag = std::make_shared<ov::op::v3::EmbeddingBagPackedSum>(emb_table_node, indicesNode);
|
||||
}
|
||||
return embBag;
|
||||
}
|
||||
} // namespace utils
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,60 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "common_test_utils/node_builders/embedding_segments_sum.hpp"
|
||||
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/embedding_segments_sum.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace utils {
|
||||
std::shared_ptr<ov::Node> make_embedding_segments_sum(const ov::element::Type& data_type,
|
||||
const ov::element::Type& indices_type,
|
||||
const ov::Output<Node>& emb_table_node,
|
||||
const std::vector<size_t>& indices,
|
||||
const std::vector<size_t>& segment_ids,
|
||||
size_t num_segments,
|
||||
size_t default_index,
|
||||
bool with_weights,
|
||||
bool with_default_index) {
|
||||
ov::Shape i_shape = {indices.size()};
|
||||
auto indicesNode = std::make_shared<ov::op::v0::Constant>(indices_type, i_shape, indices);
|
||||
ov::Shape o_shape = {segment_ids.size()};
|
||||
auto segmentIdNode = std::make_shared<ov::op::v0::Constant>(indices_type, o_shape, segment_ids);
|
||||
ov::Shape shape_0 = {};
|
||||
auto segmentNumNode = std::make_shared<ov::op::v0::Constant>(indices_type, shape_0, num_segments);
|
||||
|
||||
std::shared_ptr<Node> embBag;
|
||||
if (with_default_index) {
|
||||
auto defIdxNode = std::make_shared<ov::op::v0::Constant>(indices_type, shape_0, default_index);
|
||||
if (with_weights) {
|
||||
auto tensor = create_and_fill_tensor(data_type, ov::Shape{indices.size()});
|
||||
auto weights_node = std::make_shared<ov::op::v0::Constant>(tensor);
|
||||
|
||||
embBag = std::make_shared<ov::op::v3::EmbeddingSegmentsSum>(emb_table_node,
|
||||
indicesNode,
|
||||
segmentIdNode,
|
||||
segmentNumNode,
|
||||
defIdxNode,
|
||||
weights_node);
|
||||
} else {
|
||||
embBag = std::make_shared<ov::op::v3::EmbeddingSegmentsSum>(emb_table_node,
|
||||
indicesNode,
|
||||
segmentIdNode,
|
||||
segmentNumNode,
|
||||
defIdxNode);
|
||||
}
|
||||
} else {
|
||||
embBag = std::make_shared<ov::op::v3::EmbeddingSegmentsSum>(emb_table_node,
|
||||
indicesNode,
|
||||
segmentIdNode,
|
||||
segmentNumNode);
|
||||
}
|
||||
return embBag;
|
||||
}
|
||||
} // namespace utils
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,86 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "common_test_utils/node_builders/group_convolution.hpp"
|
||||
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include "openvino/op/add.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/group_conv.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace utils {
|
||||
std::shared_ptr<ov::Node> make_group_convolution(const ov::Output<Node>& in,
|
||||
const ov::element::Type& type,
|
||||
const std::vector<size_t>& filter_size,
|
||||
const std::vector<size_t>& strides,
|
||||
const std::vector<ptrdiff_t>& pads_begin,
|
||||
const std::vector<ptrdiff_t>& pads_end,
|
||||
const std::vector<size_t>& dilations,
|
||||
const ov::op::PadType& auto_pad,
|
||||
size_t num_out_channels,
|
||||
size_t num_groups,
|
||||
bool add_biases,
|
||||
const std::vector<float>& filter_weights,
|
||||
const std::vector<float>& biases_weights) {
|
||||
auto shape = in.get_partial_shape();
|
||||
ov::Shape filter_weights_shape = {num_out_channels, static_cast<size_t>(shape[1].get_length())};
|
||||
OPENVINO_ASSERT(!(filter_weights_shape[0] % num_groups || filter_weights_shape[1] % num_groups),
|
||||
"incorrected shape for GroupConvolution");
|
||||
filter_weights_shape[0] /= num_groups;
|
||||
filter_weights_shape[1] /= num_groups;
|
||||
filter_weights_shape.insert(filter_weights_shape.begin(), num_groups);
|
||||
filter_weights_shape.insert(filter_weights_shape.end(), filter_size.begin(), filter_size.end());
|
||||
|
||||
std::shared_ptr<ov::op::v0::Constant> filter_weights_node;
|
||||
if (!filter_weights.empty()) {
|
||||
filter_weights_node = std::make_shared<ov::op::v0::Constant>(type, filter_weights_shape, filter_weights);
|
||||
} else {
|
||||
auto tensor = create_and_fill_tensor(type, filter_weights_shape);
|
||||
filter_weights_node = std::make_shared<ov::op::v0::Constant>(tensor);
|
||||
}
|
||||
|
||||
return make_group_convolution(in,
|
||||
filter_weights_node,
|
||||
type,
|
||||
strides,
|
||||
pads_begin,
|
||||
pads_end,
|
||||
dilations,
|
||||
auto_pad,
|
||||
add_biases,
|
||||
biases_weights);
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::Node> make_group_convolution(const ov::Output<Node>& in,
|
||||
const ov::Output<Node>& weights,
|
||||
const ov::element::Type& type,
|
||||
const std::vector<size_t>& strides,
|
||||
const std::vector<ptrdiff_t>& pads_begin,
|
||||
const std::vector<ptrdiff_t>& pads_end,
|
||||
const std::vector<size_t>& dilations,
|
||||
const ov::op::PadType& auto_pad,
|
||||
bool add_biases,
|
||||
const std::vector<float>& biases_weights) {
|
||||
auto conv =
|
||||
std::make_shared<ov::op::v1::GroupConvolution>(in, weights, strides, pads_begin, pads_end, dilations, auto_pad);
|
||||
if (add_biases) {
|
||||
std::shared_ptr<ov::op::v0::Constant> biases_weights_node;
|
||||
if (!biases_weights.empty()) {
|
||||
biases_weights_node = std::make_shared<ov::op::v0::Constant>(type, ov::Shape{}, biases_weights);
|
||||
} else {
|
||||
auto tensor = create_and_fill_tensor(type, ov::Shape{});
|
||||
biases_weights_node = std::make_shared<ov::op::v0::Constant>(tensor);
|
||||
}
|
||||
|
||||
auto add = std::make_shared<ov::op::v1::Add>(conv, biases_weights_node);
|
||||
return add;
|
||||
} else {
|
||||
return conv;
|
||||
}
|
||||
}
|
||||
} // namespace utils
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,174 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "common_test_utils/node_builders/group_convolution_backprop_data.hpp"
|
||||
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include "openvino/op/add.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/group_conv.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace utils {
|
||||
std::shared_ptr<ov::Node> make_group_convolution_backprop_data(const ov::Output<Node>& in,
|
||||
const ov::element::Type& type,
|
||||
const std::vector<size_t>& filter_size,
|
||||
const std::vector<size_t>& strides,
|
||||
const std::vector<ptrdiff_t>& pads_begin,
|
||||
const std::vector<ptrdiff_t>& pads_end,
|
||||
const std::vector<size_t>& dilations,
|
||||
const ov::op::PadType& auto_pad,
|
||||
size_t num_out_channels,
|
||||
size_t num_groups,
|
||||
bool add_biases,
|
||||
const std::vector<ptrdiff_t>& output_padding,
|
||||
const std::vector<float>& filter_weights,
|
||||
const std::vector<float>& biases_weights) {
|
||||
auto shape = in.get_partial_shape();
|
||||
ov::Shape filter_weights_shape = {static_cast<size_t>(shape[1].get_length()), num_out_channels};
|
||||
OPENVINO_ASSERT(!(filter_weights_shape[0] % num_groups || filter_weights_shape[1] % num_groups),
|
||||
"incorrected shape for GroupConvolution");
|
||||
filter_weights_shape[0] /= num_groups;
|
||||
filter_weights_shape[1] /= num_groups;
|
||||
filter_weights_shape.insert(filter_weights_shape.begin(), num_groups);
|
||||
filter_weights_shape.insert(filter_weights_shape.end(), filter_size.begin(), filter_size.end());
|
||||
|
||||
std::shared_ptr<ov::op::v0::Constant> filter_weights_node;
|
||||
if (!filter_weights.empty()) {
|
||||
filter_weights_node = std::make_shared<ov::op::v0::Constant>(type, filter_weights_shape, filter_weights);
|
||||
} else {
|
||||
auto tensor = create_and_fill_tensor(type, filter_weights_shape);
|
||||
filter_weights_node = std::make_shared<ov::op::v0::Constant>(tensor);
|
||||
}
|
||||
|
||||
return make_group_convolution_backprop_data(in,
|
||||
filter_weights_node,
|
||||
type,
|
||||
strides,
|
||||
pads_begin,
|
||||
pads_end,
|
||||
dilations,
|
||||
auto_pad,
|
||||
add_biases,
|
||||
output_padding,
|
||||
biases_weights);
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::Node> make_group_convolution_backprop_data(const ov::Output<Node>& in,
|
||||
const ov::Output<Node>& weights,
|
||||
const ov::element::Type& type,
|
||||
const std::vector<size_t>& strides,
|
||||
const std::vector<ptrdiff_t>& pads_begin,
|
||||
const std::vector<ptrdiff_t>& pads_end,
|
||||
const std::vector<size_t>& dilations,
|
||||
const ov::op::PadType& auto_pad,
|
||||
bool add_biases,
|
||||
const std::vector<ptrdiff_t>& output_padding,
|
||||
const std::vector<float>& biases_weights) {
|
||||
auto deconv = std::make_shared<ov::op::v1::GroupConvolutionBackpropData>(in,
|
||||
weights,
|
||||
strides,
|
||||
pads_begin,
|
||||
pads_end,
|
||||
dilations,
|
||||
auto_pad);
|
||||
|
||||
if (!output_padding.empty()) {
|
||||
deconv = std::make_shared<ov::op::v1::GroupConvolutionBackpropData>(in,
|
||||
weights,
|
||||
strides,
|
||||
pads_begin,
|
||||
pads_end,
|
||||
dilations,
|
||||
auto_pad,
|
||||
output_padding);
|
||||
}
|
||||
if (add_biases) {
|
||||
std::shared_ptr<ov::op::v0::Constant> biases_weights_node;
|
||||
if (!biases_weights.empty()) {
|
||||
biases_weights_node = std::make_shared<ov::op::v0::Constant>(type, ov::Shape{}, biases_weights);
|
||||
} else {
|
||||
auto tensor = create_and_fill_tensor(type, ov::Shape{});
|
||||
biases_weights_node = std::make_shared<ov::op::v0::Constant>(tensor);
|
||||
}
|
||||
|
||||
auto add = std::make_shared<ov::op::v1::Add>(deconv, biases_weights_node);
|
||||
return add;
|
||||
} else {
|
||||
return deconv;
|
||||
}
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::Node> make_group_convolution_backprop_data(const ov::Output<Node>& in,
|
||||
const ov::Output<Node>& outputShape,
|
||||
const ov::element::Type& type,
|
||||
const std::vector<size_t>& filter_size,
|
||||
const std::vector<size_t>& strides,
|
||||
const std::vector<ptrdiff_t>& pads_begin,
|
||||
const std::vector<ptrdiff_t>& pads_end,
|
||||
const std::vector<size_t>& dilations,
|
||||
const ov::op::PadType& auto_pad,
|
||||
size_t num_out_channels,
|
||||
size_t num_groups,
|
||||
bool add_biases,
|
||||
const std::vector<ptrdiff_t>& output_padding,
|
||||
const std::vector<float>& filter_weights,
|
||||
const std::vector<float>& biases_weights) {
|
||||
auto shape = in.get_partial_shape();
|
||||
ov::Shape filter_weights_shape = {static_cast<size_t>(shape[1].get_length()), num_out_channels};
|
||||
if (filter_weights_shape[0] % num_groups || filter_weights_shape[1] % num_groups)
|
||||
throw std::runtime_error("incorrect shape for GroupConvolutionBackpropData");
|
||||
filter_weights_shape[0] /= num_groups;
|
||||
filter_weights_shape[1] /= num_groups;
|
||||
filter_weights_shape.insert(filter_weights_shape.begin(), num_groups);
|
||||
filter_weights_shape.insert(filter_weights_shape.end(), filter_size.begin(), filter_size.end());
|
||||
|
||||
std::shared_ptr<ov::op::v0::Constant> filter_weights_node;
|
||||
if (!filter_weights.empty()) {
|
||||
filter_weights_node = std::make_shared<ov::op::v0::Constant>(type, filter_weights_shape, filter_weights);
|
||||
} else {
|
||||
auto tensor = create_and_fill_tensor(type, filter_weights_shape);
|
||||
filter_weights_node = std::make_shared<ov::op::v0::Constant>(tensor);
|
||||
}
|
||||
|
||||
auto deconv = std::make_shared<ov::op::v1::GroupConvolutionBackpropData>(in,
|
||||
filter_weights_node,
|
||||
outputShape,
|
||||
strides,
|
||||
pads_begin,
|
||||
pads_end,
|
||||
dilations,
|
||||
auto_pad);
|
||||
|
||||
if (!output_padding.empty()) {
|
||||
deconv = std::make_shared<ov::op::v1::GroupConvolutionBackpropData>(in,
|
||||
filter_weights_node,
|
||||
outputShape,
|
||||
strides,
|
||||
pads_begin,
|
||||
pads_end,
|
||||
dilations,
|
||||
auto_pad,
|
||||
output_padding);
|
||||
}
|
||||
|
||||
if (add_biases) {
|
||||
std::shared_ptr<ov::op::v0::Constant> biases_weights_node;
|
||||
if (!biases_weights.empty()) {
|
||||
biases_weights_node = std::make_shared<ov::op::v0::Constant>(type, ov::Shape{}, biases_weights);
|
||||
} else {
|
||||
auto tensor = create_and_fill_tensor(type, ov::Shape{});
|
||||
biases_weights_node = std::make_shared<ov::op::v0::Constant>(tensor);
|
||||
}
|
||||
|
||||
auto add = std::make_shared<ov::op::v1::Add>(deconv, biases_weights_node);
|
||||
return add;
|
||||
} else {
|
||||
return deconv;
|
||||
}
|
||||
}
|
||||
} // namespace utils
|
||||
} // namespace test
|
||||
} // namespace ov
|
Loading…
Reference in New Issue
Block a user