Refactor GPU single layer tests (#21527)
* Refactor GPU single layer tests
This commit is contained in:
parent
5a4f632680
commit
358cd4b709
@ -86,10 +86,10 @@ std::vector<std::string> disabledTestPatterns() {
|
||||
// unsupported metrics
|
||||
R"(.*nightly_HeteroAutoBatchOVGetMetricPropsTest.*OVGetMetricPropsTest.*(FULL_DEVICE_NAME_with_DEVICE_ID|AVAILABLE_DEVICES|DEVICE_UUID|OPTIMIZATION_CAPABILITIES|MAX_BATCH_SIZE|DEVICE_GOPS|DEVICE_TYPE|RANGE_FOR_ASYNC_INFER_REQUESTS|RANGE_FOR_STREAMS).*)",
|
||||
// Issue: 111437
|
||||
R"(.*smoke_Deconv_2D_Dynamic_.*FP32/DeconvolutionLayerGPUTest.CompareWithRefs.*)",
|
||||
R"(.*smoke_GroupDeconv_2D_Dynamic_.*FP32/GroupDeconvolutionLayerGPUTest.CompareWithRefs.*)",
|
||||
R"(.*smoke_Deconv_2D_Dynamic_.*FP32/DeconvolutionLayerGPUTest.Inference.*)",
|
||||
R"(.*smoke_GroupDeconv_2D_Dynamic_.*FP32/GroupDeconvolutionLayerGPUTest.Inference.*)",
|
||||
// Issue: 111440
|
||||
R"(.*smoke_set1/GatherElementsGPUTest.CompareWithRefs.*)",
|
||||
R"(.*smoke_set1/GatherElementsGPUTest.Inference.*)",
|
||||
// New plugin work with tensors, so it means that blob in old API can have different pointers
|
||||
R"(.*InferRequestIOBBlobTest.*secondCallGetInputDoNotReAllocateData.*)",
|
||||
R"(.*InferRequestIOBBlobTest.*secondCallGetOutputDoNotReAllocateData.*)",
|
||||
|
@ -2,16 +2,17 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "shared_test_classes/single_layer/batch_to_space.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "ov_models/builders.hpp"
|
||||
#include "common_test_utils/test_constants.hpp"
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include "common_test_utils/test_enums.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
|
||||
using namespace InferenceEngine;
|
||||
using namespace ov::test;
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
#include "openvino/op/batch_to_space.hpp"
|
||||
|
||||
namespace GPULayerTestsDefinitions {
|
||||
namespace {
|
||||
using ov::test::InputShape;
|
||||
|
||||
struct BatchToSpaceParams {
|
||||
std::vector<int64_t> block;
|
||||
@ -22,22 +23,21 @@ struct BatchToSpaceParams {
|
||||
typedef std::tuple<
|
||||
InputShape, // Input shapes
|
||||
BatchToSpaceParams,
|
||||
ElementType, // Element type
|
||||
ngraph::helpers::InputLayerType, // block/begin/end input type
|
||||
ov::element::Type, // Element type
|
||||
ov::test::utils::InputLayerType, // block/begin/end input type
|
||||
std::map<std::string, std::string> // Additional network configuration
|
||||
> BatchToSpaceParamsLayerParamSet;
|
||||
|
||||
class BatchToSpaceLayerGPUTest : public testing::WithParamInterface<BatchToSpaceParamsLayerParamSet>,
|
||||
virtual public SubgraphBaseTest {
|
||||
virtual public ov::test::SubgraphBaseTest {
|
||||
public:
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<BatchToSpaceParamsLayerParamSet>& obj) {
|
||||
InputShape shapes;
|
||||
BatchToSpaceParams params;
|
||||
ElementType elementType;
|
||||
ngraph::helpers::InputLayerType restInputType;
|
||||
TargetDevice targetDevice;
|
||||
ov::element::Type model_type;
|
||||
ov::test::utils::InputLayerType restInputType;
|
||||
std::map<std::string, std::string> additionalConfig;
|
||||
std::tie(shapes, params, elementType, restInputType, additionalConfig) = obj.param;
|
||||
std::tie(shapes, params, model_type, restInputType, additionalConfig) = obj.param;
|
||||
|
||||
std::ostringstream results;
|
||||
results << "IS=" << ov::test::utils::partialShape2str({shapes.first}) << "_";
|
||||
@ -45,7 +45,7 @@ public:
|
||||
for (const auto& item : shapes.second) {
|
||||
results << ov::test::utils::vec2str(item) << "_";
|
||||
}
|
||||
results << "netPRC=" << elementType << "_";
|
||||
results << "netPRC=" << model_type << "_";
|
||||
results << "block=" << ov::test::utils::vec2str(params.block) << "_";
|
||||
results << "begin=" << ov::test::utils::vec2str(params.begin) << "_";
|
||||
results << "end=" << ov::test::utils::vec2str(params.end) << "_";
|
||||
@ -59,7 +59,7 @@ public:
|
||||
return results.str();
|
||||
}
|
||||
|
||||
void generate_inputs(const std::vector<ngraph::Shape>& targetInputStaticShapes) override {
|
||||
void generate_inputs(const std::vector<ov::Shape>& targetInputStaticShapes) override {
|
||||
inputs.clear();
|
||||
const auto& funcInputs = function->inputs();
|
||||
for (size_t i = 0; i < funcInputs.size(); ++i) {
|
||||
@ -100,7 +100,7 @@ protected:
|
||||
void SetUp() override {
|
||||
InputShape shapes;
|
||||
BatchToSpaceParams ssParams;
|
||||
ngraph::helpers::InputLayerType restInputType;
|
||||
ov::test::utils::InputLayerType restInputType;
|
||||
std::map<std::string, std::string> additionalConfig;
|
||||
std::tie(shapes, ssParams, inType, restInputType, additionalConfig) = this->GetParam();
|
||||
|
||||
@ -112,7 +112,7 @@ protected:
|
||||
|
||||
std::vector<InputShape> inputShapes;
|
||||
inputShapes.push_back(shapes);
|
||||
if (restInputType == ngraph::helpers::InputLayerType::PARAMETER) {
|
||||
if (restInputType == ov::test::utils::InputLayerType::PARAMETER) {
|
||||
inputShapes.push_back(InputShape({static_cast<int64_t>(block.size())}, std::vector<ov::Shape>(shapes.second.size(), {block.size()})));
|
||||
inputShapes.push_back(InputShape({static_cast<int64_t>(begin.size())}, std::vector<ov::Shape>(shapes.second.size(), {begin.size()})));
|
||||
inputShapes.push_back(InputShape({static_cast<int64_t>(end.size())}, std::vector<ov::Shape>(shapes.second.size(), {end.size()})));
|
||||
@ -122,10 +122,10 @@ protected:
|
||||
|
||||
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(inType, inputDynamicShapes.front())};
|
||||
std::shared_ptr<ov::Node> blockInput, beginInput, endInput;
|
||||
if (restInputType == ngraph::helpers::InputLayerType::PARAMETER) {
|
||||
auto blockNode = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::Type_t::i64, ov::Shape{block.size()});
|
||||
auto beginNode = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::Type_t::i64, ov::Shape{begin.size()});
|
||||
auto endNode = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::Type_t::i64, ov::Shape{end.size()});
|
||||
if (restInputType == ov::test::utils::InputLayerType::PARAMETER) {
|
||||
auto blockNode = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{block.size()});
|
||||
auto beginNode = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{begin.size()});
|
||||
auto endNode = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{end.size()});
|
||||
|
||||
params.push_back(blockNode);
|
||||
params.push_back(beginNode);
|
||||
@ -135,38 +135,34 @@ protected:
|
||||
beginInput = beginNode;
|
||||
endInput = endNode;
|
||||
} else {
|
||||
blockInput = std::make_shared<ngraph::opset1::Constant>(ngraph::element::Type_t::i64, ov::Shape{block.size()}, block);
|
||||
beginInput = std::make_shared<ngraph::opset1::Constant>(ngraph::element::Type_t::i64, ov::Shape{begin.size()}, begin);
|
||||
endInput = std::make_shared<ngraph::opset1::Constant>(ngraph::element::Type_t::i64, ov::Shape{end.size()}, end);
|
||||
blockInput = std::make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{block.size()}, block);
|
||||
beginInput = std::make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{begin.size()}, begin);
|
||||
endInput = std::make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{end.size()}, end);
|
||||
}
|
||||
auto ss = std::make_shared<ngraph::op::v1::BatchToSpace>(params[0], blockInput, beginInput, endInput);
|
||||
auto ss = std::make_shared<ov::op::v1::BatchToSpace>(params[0], blockInput, beginInput, endInput);
|
||||
|
||||
ngraph::ResultVector results;
|
||||
ov::ResultVector results;
|
||||
for (size_t i = 0; i < ss->get_output_size(); i++) {
|
||||
results.push_back(std::make_shared<ngraph::opset1::Result>(ss->output(i)));
|
||||
results.push_back(std::make_shared<ov::op::v0::Result>(ss->output(i)));
|
||||
}
|
||||
|
||||
function = std::make_shared<ngraph::Function>(results, params, "BatchToSpaceFuncTest");
|
||||
function = std::make_shared<ov::Model>(results, params, "BatchToSpaceFuncTest");
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(BatchToSpaceLayerGPUTest, CompareWithRefs) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
|
||||
TEST_P(BatchToSpaceLayerGPUTest, Inference) {
|
||||
run();
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
std::map<std::string, std::string> emptyAdditionalConfig;
|
||||
|
||||
const std::vector<ElementType> inputPrecisions = {
|
||||
ElementType::f32
|
||||
const std::vector<ov::element::Type> inputPrecisions = {
|
||||
ov::element::f32
|
||||
};
|
||||
|
||||
const std::vector<ngraph::helpers::InputLayerType> restInputTypes = {
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
ngraph::helpers::InputLayerType::PARAMETER
|
||||
const std::vector<ov::test::utils::InputLayerType> restInputTypes = {
|
||||
ov::test::utils::InputLayerType::CONSTANT,
|
||||
ov::test::utils::InputLayerType::PARAMETER
|
||||
};
|
||||
|
||||
const std::vector<InputShape> inputShapesDynamic3D = {
|
||||
@ -224,4 +220,3 @@ INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Plain_Dynamic_5D, BatchToSpaceLay
|
||||
BatchToSpaceLayerGPUTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
} // namespace GPULayerTestsDefinitions
|
||||
|
@ -2,48 +2,46 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "shared_test_classes/single_layer/broadcast.hpp"
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "ie_precision.hpp"
|
||||
#include "ov_models/builders.hpp"
|
||||
#include <common_test_utils/ov_tensor_utils.hpp>
|
||||
#include <string>
|
||||
|
||||
using namespace ngraph;
|
||||
using namespace InferenceEngine;
|
||||
using namespace ov::test;
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
#include "openvino/op/broadcast.hpp"
|
||||
|
||||
namespace GPULayerTestsDefinitions {
|
||||
namespace {
|
||||
using ov::test::InputShape;
|
||||
|
||||
typedef std::tuple<
|
||||
std::vector<InputShape>, // Shapes
|
||||
std::vector<int64_t>, // Target shapes
|
||||
std::vector<int64_t>, // Axes mapping
|
||||
ov::op::BroadcastType, // Broadcast mode
|
||||
ov::element::Type_t, // Network precision
|
||||
ov::element::Type, // Network precision
|
||||
std::vector<bool>, // Const inputs
|
||||
std::string // Device name
|
||||
> BroadcastLayerTestParamsSet;
|
||||
|
||||
class BroadcastLayerGPUTest : public testing::WithParamInterface<BroadcastLayerTestParamsSet>,
|
||||
virtual public SubgraphBaseTest {
|
||||
virtual public ov::test::SubgraphBaseTest {
|
||||
public:
|
||||
static std::string getTestCaseName(testing::TestParamInfo<BroadcastLayerTestParamsSet> obj) {
|
||||
std::vector<ov::test::InputShape> inputShapes;
|
||||
std::vector<ov::test::InputShape> shapes;
|
||||
std::vector<int64_t> targetShapes, axesMapping;
|
||||
ov::op::BroadcastType mode;
|
||||
ov::element::Type_t netPrecision;
|
||||
ov::element::Type model_type;
|
||||
std::vector<bool> isConstInputs;
|
||||
std::string deviceName;
|
||||
std::tie(inputShapes, targetShapes, axesMapping, mode, netPrecision, isConstInputs, deviceName) = obj.param;
|
||||
std::tie(shapes, targetShapes, axesMapping, mode, model_type, isConstInputs, deviceName) = obj.param;
|
||||
|
||||
std::ostringstream result;
|
||||
result << "IS=(";
|
||||
for (const auto& shape : inputShapes) {
|
||||
for (const auto& shape : shapes) {
|
||||
result << ov::test::utils::partialShape2str({shape.first}) << "_";
|
||||
}
|
||||
result << ")_TS=(";
|
||||
for (const auto& shape : inputShapes) {
|
||||
for (const auto& shape : shapes) {
|
||||
for (const auto& item : shape.second) {
|
||||
result << ov::test::utils::vec2str(item) << "_";
|
||||
}
|
||||
@ -51,7 +49,7 @@ public:
|
||||
result << "targetShape=" << ov::test::utils::vec2str(targetShapes) << "_";
|
||||
result << "axesMapping=" << ov::test::utils::vec2str(axesMapping) << "_";
|
||||
result << "mode=" << mode << "_";
|
||||
result << "netPrec=" << netPrecision << "_";
|
||||
result << "netPrec=" << model_type << "_";
|
||||
result << "constIn=(" << (isConstInputs[0] ? "True" : "False") << "." << (isConstInputs[1] ? "True" : "False") << ")_";
|
||||
result << "trgDevice=" << deviceName;
|
||||
|
||||
@ -62,11 +60,11 @@ protected:
|
||||
std::vector<int64_t> targetShape, axesMapping;
|
||||
|
||||
void SetUp() override {
|
||||
std::vector<InputShape> inputShapes;
|
||||
std::vector<InputShape> shapes;
|
||||
ov::op::BroadcastType mode;
|
||||
ov::element::Type_t netPrecision;
|
||||
ov::element::Type model_type;
|
||||
std::vector<bool> isConstInput;
|
||||
std::tie(inputShapes, targetShape, axesMapping, mode, netPrecision, isConstInput, targetDevice) = this->GetParam();
|
||||
std::tie(shapes, targetShape, axesMapping, mode, model_type, isConstInput, targetDevice) = this->GetParam();
|
||||
|
||||
bool isTargetShapeConst = isConstInput[0];
|
||||
bool isAxesMapConst = isConstInput[1];
|
||||
@ -74,8 +72,8 @@ protected:
|
||||
const auto targetShapeRank = targetShape.size();
|
||||
const auto axesMappingRank = axesMapping.size();
|
||||
|
||||
if (inputShapes.front().first.rank() != 0) {
|
||||
inputDynamicShapes.push_back(inputShapes.front().first);
|
||||
if (shapes.front().first.rank() != 0) {
|
||||
inputDynamicShapes.push_back(shapes.front().first);
|
||||
if (!isTargetShapeConst) {
|
||||
inputDynamicShapes.push_back({ static_cast<int64_t>(targetShape.size()) });
|
||||
}
|
||||
@ -83,10 +81,10 @@ protected:
|
||||
inputDynamicShapes.push_back({ static_cast<int64_t>(axesMapping.size()) });
|
||||
}
|
||||
}
|
||||
const size_t targetStaticShapeSize = inputShapes.front().second.size();
|
||||
const size_t targetStaticShapeSize = shapes.front().second.size();
|
||||
targetStaticShapes.resize(targetStaticShapeSize);
|
||||
for (size_t i = 0lu; i < targetStaticShapeSize; ++i) {
|
||||
targetStaticShapes[i].push_back(inputShapes.front().second[i]);
|
||||
targetStaticShapes[i].push_back(shapes.front().second[i]);
|
||||
if (!isTargetShapeConst)
|
||||
targetStaticShapes[i].push_back({ targetShape.size() });
|
||||
if (!isAxesMapConst)
|
||||
@ -95,9 +93,9 @@ protected:
|
||||
|
||||
ov::ParameterVector functionParams;
|
||||
if (inputDynamicShapes.empty()) {
|
||||
functionParams.push_back(std::make_shared<ov::op::v0::Parameter>(netPrecision, targetStaticShapes.front().front()));
|
||||
functionParams.push_back(std::make_shared<ov::op::v0::Parameter>(model_type, targetStaticShapes.front().front()));
|
||||
} else {
|
||||
functionParams.push_back(std::make_shared<ov::op::v0::Parameter>(netPrecision, inputDynamicShapes.front()));
|
||||
functionParams.push_back(std::make_shared<ov::op::v0::Parameter>(model_type, inputDynamicShapes.front()));
|
||||
if (!isTargetShapeConst) {
|
||||
functionParams.push_back(std::make_shared<ov::op::v0::Parameter>(ov::element::i64, inputDynamicShapes[1]));
|
||||
functionParams.back()->set_friendly_name("targetShape");
|
||||
@ -140,19 +138,19 @@ protected:
|
||||
}
|
||||
}
|
||||
|
||||
auto makeFunction = [](ParameterVector ¶ms, const std::shared_ptr<Node> &lastNode) {
|
||||
ResultVector results;
|
||||
auto makeFunction = [](ov::ParameterVector ¶ms, const std::shared_ptr<ov::Node> &lastNode) {
|
||||
ov::ResultVector results;
|
||||
|
||||
for (size_t i = 0; i < lastNode->get_output_size(); i++)
|
||||
results.push_back(std::make_shared<opset1::Result>(lastNode->output(i)));
|
||||
results.push_back(std::make_shared<ov::op::v0::Result>(lastNode->output(i)));
|
||||
|
||||
return std::make_shared<Function>(results, params, "BroadcastLayerGPUTest");
|
||||
return std::make_shared<ov::Model>(results, params, "BroadcastLayerGPUTest");
|
||||
};
|
||||
|
||||
function = makeFunction(functionParams, broadcastOp);
|
||||
}
|
||||
|
||||
void generate_inputs(const std::vector<ngraph::Shape>& targetInputStaticShapes) override {
|
||||
void generate_inputs(const std::vector<ov::Shape>& targetInputStaticShapes) override {
|
||||
inputs.clear();
|
||||
const auto& funcInputs = function->inputs();
|
||||
for (size_t i = 0lu; i < funcInputs.size(); i++) {
|
||||
@ -183,19 +181,15 @@ protected:
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(BroadcastLayerGPUTest, CompareWithRefs) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
|
||||
TEST_P(BroadcastLayerGPUTest, Inference) {
|
||||
run();
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<ov::element::Type_t> inputPrecisionsFloat = {
|
||||
const std::vector<ov::element::Type> inputPrecisionsFloat = {
|
||||
ov::element::f32,
|
||||
};
|
||||
|
||||
const std::vector<ov::element::Type_t> inputPrecisionsInt = {
|
||||
const std::vector<ov::element::Type> inputPrecisionsInt = {
|
||||
ov::element::i32,
|
||||
};
|
||||
|
||||
@ -407,5 +401,3 @@ INSTANTIATE_TEST_CASE_P(smoke_broadcast_6d_numpy_compareWithRefs_dynamic,
|
||||
BroadcastLayerGPUTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
|
||||
} // namespace GPULayerTestsDefinitions
|
||||
|
@ -1,49 +1,43 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
#include <tuple>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <memory>
|
||||
#include "ov_models/utils/ov_helpers.hpp"
|
||||
#include "ov_models/builders.hpp"
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include "common_test_utils/node_builders/activation.hpp"
|
||||
#include "common_test_utils/node_builders/convolution.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "shared_test_classes/single_layer/convolution.hpp"
|
||||
#include "common_test_utils/test_constants.hpp"
|
||||
#include "shared_test_classes/single_op/convolution.hpp"
|
||||
|
||||
// using namespace LayerTestsDefinitions;
|
||||
using namespace InferenceEngine;
|
||||
using namespace ov::test;
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
#include "openvino/op/convolution.hpp"
|
||||
|
||||
namespace GPULayerTestsDefinitions {
|
||||
|
||||
using LayerTestsDefinitions::convSpecificParams;
|
||||
namespace {
|
||||
using ov::test::InputShape;
|
||||
using ov::test::convSpecificParams;
|
||||
|
||||
typedef std::tuple<
|
||||
convSpecificParams,
|
||||
ElementType, // Net precision
|
||||
ElementType, // Input precision
|
||||
ElementType, // Output precision
|
||||
InputShape, // Input shape
|
||||
LayerTestsUtils::TargetDevice, // Device name
|
||||
bool // activation fusing
|
||||
ov::element::Type, // Model type
|
||||
InputShape, // Input shape
|
||||
std::string, // Device name
|
||||
bool // activation fusing
|
||||
> convLayerTestParamsSet;
|
||||
|
||||
|
||||
class ConvolutionLayerGPUTestDynamic : public testing::WithParamInterface<convLayerTestParamsSet>,
|
||||
virtual public SubgraphBaseTest {
|
||||
virtual public ov::test::SubgraphBaseTest {
|
||||
public:
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<convLayerTestParamsSet>& obj) {
|
||||
convSpecificParams convParams;
|
||||
ElementType netType;
|
||||
ElementType inType, outType;
|
||||
ov::element::Type model_type;
|
||||
InputShape inputShape;
|
||||
std::string targetDevice;
|
||||
bool activationFusing;
|
||||
std::tie(convParams, netType, inType, outType, inputShape, targetDevice, activationFusing) = obj.param;
|
||||
std::tie(convParams, model_type, inputShape, targetDevice, activationFusing) = obj.param;
|
||||
|
||||
ngraph::op::PadType padType;
|
||||
InferenceEngine::SizeVector kernel, stride, dilation;
|
||||
ov::op::PadType padType;
|
||||
std::vector<size_t> kernel, stride, dilation;
|
||||
std::vector<ptrdiff_t> padBegin, padEnd;
|
||||
size_t convOutChannels;
|
||||
std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, padType) = convParams;
|
||||
@ -63,9 +57,7 @@ public:
|
||||
result << "D=" << ov::test::utils::vec2str(dilation) << "_";
|
||||
result << "O=" << convOutChannels << "_";
|
||||
result << "AP=" << padType << "_";
|
||||
result << "netPRC=" << netType << "_";
|
||||
result << "inPRC=" << inType << "_";
|
||||
result << "outPRC=" << outType << "_";
|
||||
result << "netPRC=" << model_type << "_";
|
||||
result << "trgDev=" << targetDevice << "_";
|
||||
result << "activationFusing=" << activationFusing;
|
||||
|
||||
@ -76,49 +68,46 @@ protected:
|
||||
void SetUp() override {
|
||||
convSpecificParams convParams;
|
||||
InputShape inputShape;
|
||||
auto netType = ElementType::undefined;
|
||||
auto model_type = ov::element::undefined;
|
||||
bool activationFusing;
|
||||
std::tie(convParams, netType, inType, outType, inputShape, targetDevice, activationFusing) = this->GetParam();
|
||||
std::tie(convParams, model_type, inputShape, targetDevice, activationFusing) = this->GetParam();
|
||||
|
||||
init_input_shapes({inputShape});
|
||||
|
||||
ngraph::op::PadType padType;
|
||||
InferenceEngine::SizeVector kernel, stride, dilation;
|
||||
ov::op::PadType padType;
|
||||
std::vector<size_t> kernel, stride, dilation;
|
||||
std::vector<ptrdiff_t> padBegin, padEnd;
|
||||
size_t convOutChannels;
|
||||
std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, padType) = convParams;
|
||||
|
||||
ov::ParameterVector inputParams;
|
||||
for (auto&& shape : inputDynamicShapes)
|
||||
inputParams.push_back(std::make_shared<ov::op::v0::Parameter>(inType, shape));
|
||||
inputParams.push_back(std::make_shared<ov::op::v0::Parameter>(model_type, shape));
|
||||
|
||||
auto convolutionNode = ngraph::builder::makeConvolution(inputParams.front(), netType, kernel, stride, padBegin,
|
||||
padEnd, dilation, padType, convOutChannels);
|
||||
auto convolutionNode = ov::test::utils::make_convolution(inputParams.front(), model_type, kernel, stride, padBegin,
|
||||
padEnd, dilation, padType, convOutChannels);
|
||||
if (activationFusing) {
|
||||
auto activationNode = ngraph::builder::makeActivation(convolutionNode, netType, ngraph::helpers::ActivationTypes::Relu);
|
||||
auto activationNode = ov::test::utils::make_activation(convolutionNode, model_type, ov::test::utils::ActivationTypes::Relu);
|
||||
|
||||
ngraph::ResultVector results;
|
||||
ov::ResultVector results;
|
||||
for (size_t i = 0; i < activationNode->get_output_size(); i++)
|
||||
results.push_back(std::make_shared<ngraph::opset1::Result>(activationNode->output(i)));
|
||||
results.push_back(std::make_shared<ov::op::v0::Result>(activationNode->output(i)));
|
||||
|
||||
function = std::make_shared<ngraph::Function>(results, inputParams, "Convolution");
|
||||
function = std::make_shared<ov::Model>(results, inputParams, "Convolution");
|
||||
} else {
|
||||
ngraph::ResultVector results;
|
||||
ov::ResultVector results;
|
||||
for (size_t i = 0; i < convolutionNode->get_output_size(); i++)
|
||||
results.push_back(std::make_shared<ngraph::opset1::Result>(convolutionNode->output(i)));
|
||||
results.push_back(std::make_shared<ov::op::v0::Result>(convolutionNode->output(i)));
|
||||
|
||||
function = std::make_shared<ngraph::Function>(results, inputParams, "Convolution");
|
||||
function = std::make_shared<ov::Model>(results, inputParams, "Convolution");
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(ConvolutionLayerGPUTestDynamic, CompareWithRefs) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
TEST_P(ConvolutionLayerGPUTestDynamic, Inference) {
|
||||
run();
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
// ======== 1D convolutions
|
||||
const std::vector<ov::test::InputShape> dynInputShapes1D = {
|
||||
{
|
||||
@ -130,27 +119,25 @@ const std::vector<ov::test::InputShape> dynInputShapes1D = {
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic1DSymPad, ConvolutionLayerGPUTestDynamic,
|
||||
::testing::Combine(
|
||||
::testing::Combine(
|
||||
::testing::Values(SizeVector{3}),
|
||||
::testing::Values(SizeVector{1}),
|
||||
::testing::Values(std::vector<size_t>{3}),
|
||||
::testing::Values(std::vector<size_t>{1}),
|
||||
::testing::Values(std::vector<ptrdiff_t>{1}),
|
||||
::testing::Values(std::vector<ptrdiff_t>{1}),
|
||||
::testing::Values(SizeVector{1}),
|
||||
::testing::Values(std::vector<size_t>{1}),
|
||||
::testing::Values(10),
|
||||
::testing::ValuesIn({ngraph::op::PadType::EXPLICIT, ngraph::op::PadType::VALID})),
|
||||
::testing::Values(ElementType::f16),
|
||||
::testing::Values(ElementType::f16),
|
||||
::testing::Values(ElementType::undefined),
|
||||
::testing::ValuesIn({ov::op::PadType::EXPLICIT, ov::op::PadType::VALID})),
|
||||
::testing::Values(ov::element::f16),
|
||||
::testing::ValuesIn(dynInputShapes1D),
|
||||
::testing::Values<std::string>(ov::test::utils::DEVICE_GPU),
|
||||
::testing::Values(false)),
|
||||
ConvolutionLayerGPUTestDynamic::getTestCaseName);
|
||||
|
||||
const std::vector<SizeVector> kernels1D = { {3}, {1} };
|
||||
const std::vector<SizeVector> strides1D = { {1} };
|
||||
const std::vector<std::vector<size_t>> kernels1D = { {3}, {1} };
|
||||
const std::vector<std::vector<size_t>> strides1D = { {1} };
|
||||
const std::vector<std::vector<ptrdiff_t>> padBegins1D = { {0}, {1} };
|
||||
const std::vector<std::vector<ptrdiff_t>> padEnds1D = { {0}, {1} };
|
||||
const std::vector<SizeVector> dilations1D = { {1} };
|
||||
const SizeVector numOutChannels = { 64, 63 };
|
||||
const std::vector<std::vector<size_t>> dilations1D = { {1} };
|
||||
const std::vector<size_t> numOutChannels = { 64, 63 };
|
||||
const std::vector<InputShape> inputShapes1D = {
|
||||
{{}, {{ 2, 64, 7 }}},
|
||||
{{}, {{ 1, 67, 7 }}},
|
||||
@ -181,10 +168,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_ExplicitPad1D, Convolutio
|
||||
::testing::ValuesIn(padEnds1D),
|
||||
::testing::ValuesIn(dilations1D),
|
||||
::testing::ValuesIn(numOutChannels),
|
||||
::testing::Values(ngraph::op::PadType::EXPLICIT)),
|
||||
::testing::Values(ElementType::f16),
|
||||
::testing::Values(ElementType::f16),
|
||||
::testing::Values(ElementType::undefined),
|
||||
::testing::Values(ov::op::PadType::EXPLICIT)),
|
||||
::testing::Values(ov::element::f16),
|
||||
::testing::ValuesIn(inputShapes1D),
|
||||
::testing::Values<std::string>(ov::test::utils::DEVICE_GPU),
|
||||
::testing::Values(false)),
|
||||
@ -209,16 +194,14 @@ const std::vector<ov::test::InputShape> dynInputShapes2D_static_output = {
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic2DSymPad, ConvolutionLayerGPUTestDynamic,
|
||||
::testing::Combine(
|
||||
::testing::Combine(
|
||||
::testing::Values(SizeVector{3, 3}),
|
||||
::testing::Values(SizeVector{1, 1}),
|
||||
::testing::Values(std::vector<size_t>{3, 3}),
|
||||
::testing::Values(std::vector<size_t>{1, 1}),
|
||||
::testing::Values(std::vector<ptrdiff_t>{1, 2}),
|
||||
::testing::Values(std::vector<ptrdiff_t>{1, 2}),
|
||||
::testing::Values(SizeVector{1, 1}),
|
||||
::testing::Values(std::vector<size_t>{1, 1}),
|
||||
::testing::Values(10),
|
||||
::testing::ValuesIn({ngraph::op::PadType::EXPLICIT, ngraph::op::PadType::VALID})),
|
||||
::testing::Values(ElementType::f16),
|
||||
::testing::Values(ElementType::f16),
|
||||
::testing::Values(ElementType::undefined),
|
||||
::testing::ValuesIn({ov::op::PadType::EXPLICIT, ov::op::PadType::VALID})),
|
||||
::testing::Values(ov::element::f16),
|
||||
::testing::ValuesIn(dynInputShapes2D),
|
||||
::testing::Values<std::string>(ov::test::utils::DEVICE_GPU),
|
||||
::testing::Values(false)),
|
||||
@ -228,16 +211,14 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic2DSymPad, Convolut
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic2DSymAutoPad, ConvolutionLayerGPUTestDynamic,
|
||||
::testing::Combine(
|
||||
::testing::Combine(
|
||||
::testing::Values(SizeVector{3, 3}),
|
||||
::testing::Values(SizeVector{1, 1}),
|
||||
::testing::Values(std::vector<size_t>{3, 3}),
|
||||
::testing::Values(std::vector<size_t>{1, 1}),
|
||||
::testing::Values(std::vector<ptrdiff_t>{0, 0}),
|
||||
::testing::Values(std::vector<ptrdiff_t>{0, 0}),
|
||||
::testing::Values(SizeVector{1, 1}),
|
||||
::testing::Values(std::vector<size_t>{1, 1}),
|
||||
::testing::Values(10),
|
||||
::testing::ValuesIn({ngraph::op::PadType::SAME_LOWER, ngraph::op::PadType::SAME_UPPER})),
|
||||
::testing::Values(ElementType::f16),
|
||||
::testing::Values(ElementType::f16),
|
||||
::testing::Values(ElementType::undefined),
|
||||
::testing::ValuesIn({ov::op::PadType::SAME_LOWER, ov::op::PadType::SAME_UPPER})),
|
||||
::testing::Values(ov::element::f16),
|
||||
::testing::ValuesIn(dynInputShapes2D),
|
||||
::testing::Values<std::string>(ov::test::utils::DEVICE_GPU),
|
||||
::testing::Values(false)),
|
||||
@ -247,16 +228,14 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic2DSymAutoPad, Conv
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic2D_AsymPad, ConvolutionLayerGPUTestDynamic,
|
||||
::testing::Combine(
|
||||
::testing::Combine(
|
||||
::testing::Values(SizeVector{3, 3}),
|
||||
::testing::Values(SizeVector{1, 1}),
|
||||
::testing::Values(std::vector<size_t>{3, 3}),
|
||||
::testing::Values(std::vector<size_t>{1, 1}),
|
||||
::testing::Values(std::vector<ptrdiff_t>{1, 2}),
|
||||
::testing::Values(std::vector<ptrdiff_t>{2, 1}),
|
||||
::testing::Values(SizeVector{1, 1}),
|
||||
::testing::Values(std::vector<size_t>{1, 1}),
|
||||
::testing::Values(10),
|
||||
::testing::ValuesIn({ngraph::op::PadType::EXPLICIT, ngraph::op::PadType::VALID})),
|
||||
::testing::Values(ElementType::f16),
|
||||
::testing::Values(ElementType::f16),
|
||||
::testing::Values(ElementType::undefined),
|
||||
::testing::ValuesIn({ov::op::PadType::EXPLICIT, ov::op::PadType::VALID})),
|
||||
::testing::Values(ov::element::f16),
|
||||
::testing::ValuesIn(dynInputShapes2D),
|
||||
::testing::Values<std::string>(ov::test::utils::DEVICE_GPU),
|
||||
::testing::Values(false)),
|
||||
@ -266,16 +245,14 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic2D_AsymPad, Convol
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic2D_static_output, ConvolutionLayerGPUTestDynamic,
|
||||
::testing::Combine(
|
||||
::testing::Combine(
|
||||
::testing::Values(SizeVector{3, 3}),
|
||||
::testing::Values(SizeVector{2, 2}),
|
||||
::testing::Values(std::vector<size_t>{3, 3}),
|
||||
::testing::Values(std::vector<size_t>{2, 2}),
|
||||
::testing::Values(std::vector<ptrdiff_t>{1, 1}),
|
||||
::testing::Values(std::vector<ptrdiff_t>{1, 1}),
|
||||
::testing::Values(SizeVector{1, 1}),
|
||||
::testing::Values(std::vector<size_t>{1, 1}),
|
||||
::testing::Values(256),
|
||||
::testing::Values(ngraph::op::PadType::EXPLICIT)),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ElementType::undefined),
|
||||
::testing::Values(ov::op::PadType::EXPLICIT)),
|
||||
::testing::Values(ov::element::f32),
|
||||
::testing::ValuesIn(dynInputShapes2D_static_output),
|
||||
::testing::Values<std::string>(ov::test::utils::DEVICE_GPU),
|
||||
::testing::Values(true)),
|
||||
@ -293,16 +270,14 @@ const std::vector<ov::test::InputShape> dynInputShapes3D = {
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic3DSymPad, ConvolutionLayerGPUTestDynamic,
|
||||
::testing::Combine(
|
||||
::testing::Combine(
|
||||
::testing::Values(SizeVector{3, 3, 3}),
|
||||
::testing::Values(SizeVector{1, 1, 1}),
|
||||
::testing::Values(std::vector<size_t>{3, 3, 3}),
|
||||
::testing::Values(std::vector<size_t>{1, 1, 1}),
|
||||
::testing::Values(std::vector<ptrdiff_t>{1, 2, 1}),
|
||||
::testing::Values(std::vector<ptrdiff_t>{1, 2, 1}),
|
||||
::testing::Values(SizeVector{1, 1, 1}),
|
||||
::testing::Values(std::vector<size_t>{1, 1, 1}),
|
||||
::testing::Values(3),
|
||||
::testing::ValuesIn({ngraph::op::PadType::EXPLICIT, ngraph::op::PadType::VALID})),
|
||||
::testing::Values(ElementType::f16),
|
||||
::testing::Values(ElementType::f16),
|
||||
::testing::Values(ElementType::undefined),
|
||||
::testing::ValuesIn({ov::op::PadType::EXPLICIT, ov::op::PadType::VALID})),
|
||||
::testing::Values(ov::element::f16),
|
||||
::testing::ValuesIn(dynInputShapes3D),
|
||||
::testing::Values<std::string>(ov::test::utils::DEVICE_GPU),
|
||||
::testing::Values(false)),
|
||||
@ -312,16 +287,14 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic3DSymPad, Convolut
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic3DSymAutoPad, ConvolutionLayerGPUTestDynamic,
|
||||
::testing::Combine(
|
||||
::testing::Combine(
|
||||
::testing::Values(SizeVector{3, 3, 3}),
|
||||
::testing::Values(SizeVector{1, 1, 1}),
|
||||
::testing::Values(std::vector<size_t>{3, 3, 3}),
|
||||
::testing::Values(std::vector<size_t>{1, 1, 1}),
|
||||
::testing::Values(std::vector<ptrdiff_t>{0, 0, 0}),
|
||||
::testing::Values(std::vector<ptrdiff_t>{0, 0, 0}),
|
||||
::testing::Values(SizeVector{1, 1, 1}),
|
||||
::testing::Values(std::vector<size_t>{1, 1, 1}),
|
||||
::testing::Values(3),
|
||||
::testing::ValuesIn({ngraph::op::PadType::SAME_LOWER, ngraph::op::PadType::SAME_UPPER})),
|
||||
::testing::Values(ElementType::f16),
|
||||
::testing::Values(ElementType::f16),
|
||||
::testing::Values(ElementType::undefined),
|
||||
::testing::ValuesIn({ov::op::PadType::SAME_LOWER, ov::op::PadType::SAME_UPPER})),
|
||||
::testing::Values(ov::element::f16),
|
||||
::testing::ValuesIn(dynInputShapes3D),
|
||||
::testing::Values<std::string>(ov::test::utils::DEVICE_GPU),
|
||||
::testing::Values(false)),
|
||||
@ -331,20 +304,17 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic3DSymAutoPad, Conv
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic3DAsymPad, ConvolutionLayerGPUTestDynamic,
|
||||
::testing::Combine(
|
||||
::testing::Combine(
|
||||
::testing::Values(SizeVector{3, 3, 3}),
|
||||
::testing::Values(SizeVector{1, 1, 1}),
|
||||
::testing::Values(std::vector<size_t>{3, 3, 3}),
|
||||
::testing::Values(std::vector<size_t>{1, 1, 1}),
|
||||
::testing::Values(std::vector<ptrdiff_t>{1, 2, 1}),
|
||||
::testing::Values(std::vector<ptrdiff_t>{2, 1, 1}),
|
||||
::testing::Values(SizeVector{1, 1, 1}),
|
||||
::testing::Values(std::vector<size_t>{1, 1, 1}),
|
||||
::testing::Values(3),
|
||||
::testing::ValuesIn({ngraph::op::PadType::EXPLICIT, ngraph::op::PadType::VALID})),
|
||||
::testing::Values(ElementType::f16),
|
||||
::testing::Values(ElementType::f16),
|
||||
::testing::Values(ElementType::undefined),
|
||||
::testing::ValuesIn({ov::op::PadType::EXPLICIT, ov::op::PadType::VALID})),
|
||||
::testing::Values(ov::element::f16),
|
||||
::testing::ValuesIn(dynInputShapes3D),
|
||||
::testing::Values<std::string>(ov::test::utils::DEVICE_GPU),
|
||||
::testing::Values(false)),
|
||||
ConvolutionLayerGPUTestDynamic::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
} // namespace GPULayerTestsDefinitions
|
||||
|
@ -2,54 +2,50 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <tuple>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <memory>
|
||||
#include "ov_models/utils/ov_helpers.hpp"
|
||||
#include "ov_models/builders.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "shared_test_classes/single_layer/convolution_backprop_data.hpp"
|
||||
#include "common_test_utils/test_constants.hpp"
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include "openvino/core/preprocess/pre_post_process.hpp"
|
||||
#include "common_test_utils/test_enums.hpp"
|
||||
#include "common_test_utils/node_builders/convolution_backprop_data.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "shared_test_classes/single_op/convolution_backprop_data.hpp"
|
||||
|
||||
using namespace InferenceEngine;
|
||||
using namespace ov::test;
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
#include "openvino/op/convolution.hpp"
|
||||
|
||||
namespace GPULayerTestsDefinitions {
|
||||
|
||||
using DeconvSpecParams = LayerTestsDefinitions::convBackpropDataSpecificParams;
|
||||
namespace {
|
||||
using ov::test::InputShape;
|
||||
using ov::test::convBackpropDataSpecificParams;
|
||||
|
||||
using DeconvInputData = std::tuple<InputShape, // data shape
|
||||
ngraph::helpers::InputLayerType, // 'output_shape' input type
|
||||
ov::test::utils::InputLayerType, // 'output_shape' input type
|
||||
std::vector<std::vector<int32_t>>>; // values for 'output_shape'
|
||||
|
||||
using DeconvLayerTestParamsSet = std::tuple<DeconvSpecParams,
|
||||
using DeconvLayerTestParamsSet = std::tuple<convBackpropDataSpecificParams,
|
||||
DeconvInputData,
|
||||
ElementType,
|
||||
LayerTestsUtils::TargetDevice,
|
||||
ov::element::Type,
|
||||
std::string,
|
||||
std::map<std::string, std::string>>;
|
||||
|
||||
class DeconvolutionLayerGPUTest : public testing::WithParamInterface<DeconvLayerTestParamsSet>,
|
||||
virtual public SubgraphBaseTest {
|
||||
virtual public ov::test::SubgraphBaseTest {
|
||||
public:
|
||||
static std::string getTestCaseName(testing::TestParamInfo<DeconvLayerTestParamsSet> obj) {
|
||||
DeconvSpecParams basicParamsSet;
|
||||
convBackpropDataSpecificParams basicParamsSet;
|
||||
DeconvInputData inputData;
|
||||
ElementType prec;
|
||||
ov::element::Type model_type;
|
||||
std::string targetDevice;
|
||||
std::map<std::string, std::string> additionalConfig;
|
||||
std::tie(basicParamsSet, inputData, prec, targetDevice, additionalConfig) = obj.param;
|
||||
std::tie(basicParamsSet, inputData, model_type, targetDevice, additionalConfig) = obj.param;
|
||||
|
||||
ngraph::op::PadType padType;
|
||||
InferenceEngine::SizeVector kernel, stride, dilation;
|
||||
ov::op::PadType padType;
|
||||
std::vector<size_t> kernel, stride, dilation;
|
||||
std::vector<ptrdiff_t> padBegin, padEnd, outPadding;
|
||||
size_t convOutChannels;
|
||||
std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, padType, outPadding) = basicParamsSet;
|
||||
|
||||
InputShape inputShape;
|
||||
ngraph::helpers::InputLayerType outShapeType;
|
||||
ov::test::utils::InputLayerType outShapeType;
|
||||
std::vector<std::vector<int32_t>> outShapeData;
|
||||
std::tie(inputShape, outShapeType, outShapeData) = inputData;
|
||||
|
||||
@ -62,7 +58,7 @@ public:
|
||||
result << ov::test::utils::vec2str(shape);
|
||||
result << ")_";
|
||||
}
|
||||
result << "PRC=" << prec << "_";
|
||||
result << "PRC=" << model_type << "_";
|
||||
result << "K=" << ov::test::utils::vec2str(kernel) << "_";
|
||||
result << "S=" << ov::test::utils::vec2str(stride) << "_";
|
||||
result << "PB=" << ov::test::utils::vec2str(padBegin) << "_";
|
||||
@ -88,13 +84,13 @@ public:
|
||||
return result.str();
|
||||
}
|
||||
|
||||
void generate_inputs(const std::vector<ngraph::Shape>& targetInputStaticShapes) override {
|
||||
void generate_inputs(const std::vector<ov::Shape>& targetInputStaticShapes) override {
|
||||
if (function->get_parameters().size() != 1) {
|
||||
// WA: output_shape depends on 3rd deconvolution input data
|
||||
// but the reference implementation doesn't implement shape inference
|
||||
// so we need to build a new ngraph function and replace the 3rd input parameter with a constant
|
||||
// so we need to build a new ov function and replace the 3rd input parameter with a constant
|
||||
// to get valid output shapes
|
||||
functionRefs = createGraph({targetInputStaticShapes[0]}, ngraph::helpers::InputLayerType::CONSTANT);
|
||||
functionRefs = createGraph({targetInputStaticShapes[0]}, ov::test::utils::InputLayerType::CONSTANT);
|
||||
}
|
||||
inputs.clear();
|
||||
const auto& funcInputs = function->inputs();
|
||||
@ -113,62 +109,17 @@ public:
|
||||
inferRequestNum++;
|
||||
}
|
||||
|
||||
void validate() override {
|
||||
auto actualOutputs = get_plugin_outputs();
|
||||
if (function->get_parameters().size() == 2) {
|
||||
auto pos = std::find_if(inputs.begin(), inputs.end(),
|
||||
[](const std::pair<std::shared_ptr<ov::Node>, ov::Tensor> ¶ms) {
|
||||
return params.first->get_friendly_name() == "param_1";
|
||||
});
|
||||
IE_ASSERT(pos != inputs.end());
|
||||
inputs.erase(pos);
|
||||
}
|
||||
auto expectedOutputs = calculate_refs();
|
||||
if (expectedOutputs.empty()) {
|
||||
return;
|
||||
}
|
||||
ASSERT_EQ(actualOutputs.size(), expectedOutputs.size())
|
||||
<< "nGraph interpreter has " << expectedOutputs.size() << " outputs, while IE " << actualOutputs.size();
|
||||
|
||||
abs_threshold = 1e-2f;
|
||||
compare(expectedOutputs, actualOutputs);
|
||||
}
|
||||
|
||||
void configure_model() override {
|
||||
ov::preprocess::PrePostProcessor p(function);
|
||||
{
|
||||
auto& params = function->get_parameters();
|
||||
for (size_t i = 0; i < params.size(); i++) {
|
||||
if (i > 0) {
|
||||
continue;
|
||||
}
|
||||
if (inType != ov::element::Type_t::undefined) {
|
||||
p.input(i).tensor().set_element_type(inType);
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
auto results = function->get_results();
|
||||
for (size_t i = 0; i < results.size(); i++) {
|
||||
if (outType != ov::element::Type_t::undefined) {
|
||||
p.output(i).tensor().set_element_type(outType);
|
||||
}
|
||||
}
|
||||
}
|
||||
function = p.build();
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::Model> createGraph(const std::vector<ov::PartialShape>& inShapes, ngraph::helpers::InputLayerType outShapeType) {
|
||||
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(prec, inShapes.front())};
|
||||
std::shared_ptr<ov::Model> createGraph(const std::vector<ov::PartialShape>& inShapes, ov::test::utils::InputLayerType outShapeType) {
|
||||
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(model_type, inShapes.front())};
|
||||
std::shared_ptr<ov::Node> outShapeNode;
|
||||
if (!outShapeData.empty()) {
|
||||
if (outShapeType == ngraph::helpers::InputLayerType::PARAMETER) {
|
||||
if (outShapeType == ov::test::utils::InputLayerType::PARAMETER) {
|
||||
IE_ASSERT(inputDynamicShapes.size() == 2);
|
||||
auto outShapeParam = std::make_shared<ngraph::opset8::Parameter>(ngraph::element::i32, inputDynamicShapes.back());
|
||||
auto outShapeParam = std::make_shared<ov::op::v0::Parameter>(ov::element::i32, inputDynamicShapes.back());
|
||||
params.push_back(outShapeParam);
|
||||
outShapeNode = outShapeParam;
|
||||
} else {
|
||||
outShapeNode = ngraph::opset8::Constant::create(ngraph::element::i32, {outShapeData[inferRequestNum].size()}, outShapeData[inferRequestNum]);
|
||||
outShapeNode = ov::op::v0::Constant::create(ov::element::i32, {outShapeData[inferRequestNum].size()}, outShapeData[inferRequestNum]);
|
||||
}
|
||||
}
|
||||
|
||||
@ -179,36 +130,36 @@ public:
|
||||
std::shared_ptr<ov::Node> deconv;
|
||||
if (!outShapeData.empty()) {
|
||||
IE_ASSERT(outShapeNode != nullptr);
|
||||
deconv = ngraph::builder::makeConvolutionBackpropData(params[0], outShapeNode, prec, kernel, stride, padBegin,
|
||||
padEnd, dilation, padType, convOutChannels);
|
||||
deconv = ov::test::utils::make_convolution_backprop_data(params[0], outShapeNode, model_type, kernel, stride, padBegin,
|
||||
padEnd, dilation, padType, convOutChannels);
|
||||
} else {
|
||||
deconv = ngraph::builder::makeConvolutionBackpropData(params[0], prec, kernel, stride, padBegin,
|
||||
padEnd, dilation, padType, convOutChannels, false, outPadding);
|
||||
deconv = ov::test::utils::make_convolution_backprop_data(params[0], model_type, kernel, stride, padBegin,
|
||||
padEnd, dilation, padType, convOutChannels, false, outPadding);
|
||||
}
|
||||
|
||||
ngraph::ResultVector results;
|
||||
ov::ResultVector results;
|
||||
for (size_t i = 0; i < deconv->get_output_size(); i++)
|
||||
results.push_back(std::make_shared<ngraph::opset1::Result>(deconv->output(i)));
|
||||
results.push_back(std::make_shared<ov::op::v0::Result>(deconv->output(i)));
|
||||
|
||||
return std::make_shared<ngraph::Function>(results, params, "Deconv");
|
||||
return std::make_shared<ov::Model>(results, params, "Deconv");
|
||||
}
|
||||
|
||||
protected:
|
||||
void SetUp() override {
|
||||
DeconvSpecParams basicParamsSet;
|
||||
convBackpropDataSpecificParams basicParamsSet;
|
||||
DeconvInputData inputData;
|
||||
std::map<std::string, std::string> additionalConfig;
|
||||
std::tie(basicParamsSet, inputData, prec, targetDevice, additionalConfig) = this->GetParam();
|
||||
std::tie(basicParamsSet, inputData, model_type, targetDevice, additionalConfig) = this->GetParam();
|
||||
|
||||
InputShape inputShape;
|
||||
ngraph::helpers::InputLayerType outShapeType;
|
||||
ov::test::utils::InputLayerType outShapeType;
|
||||
std::tie(inputShape, outShapeType, outShapeData) = inputData;
|
||||
|
||||
std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, padType, outPadding) = basicParamsSet;
|
||||
|
||||
std::vector<InputShape> paramsShapes;
|
||||
paramsShapes.push_back(inputShape);
|
||||
if (!outShapeData.empty() && outShapeType == ngraph::helpers::InputLayerType::PARAMETER) {
|
||||
if (!outShapeData.empty() && outShapeType == ov::test::utils::InputLayerType::PARAMETER) {
|
||||
const auto outShapeDims = ov::Shape{outShapeData.front().size()};
|
||||
paramsShapes.push_back(InputShape{outShapeDims, std::vector<ov::Shape>(inputShape.second.size(), outShapeDims)});
|
||||
}
|
||||
@ -219,36 +170,32 @@ protected:
|
||||
}
|
||||
|
||||
private:
|
||||
ElementType prec;
|
||||
ngraph::op::PadType padType;
|
||||
InferenceEngine::SizeVector kernel, stride, dilation;
|
||||
ov::element::Type model_type;
|
||||
ov::op::PadType padType;
|
||||
std::vector<size_t> kernel, stride, dilation;
|
||||
std::vector<ptrdiff_t> padBegin, padEnd, outPadding;
|
||||
size_t convOutChannels;
|
||||
std::vector<std::vector<int32_t>> outShapeData;
|
||||
size_t inferRequestNum = 0;
|
||||
};
|
||||
|
||||
TEST_P(DeconvolutionLayerGPUTest, CompareWithRefs) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
|
||||
TEST_P(DeconvolutionLayerGPUTest, Inference) {
|
||||
run();
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
std::map<std::string, std::string> emptyAdditionalConfig;
|
||||
|
||||
const std::vector<std::vector<ptrdiff_t>> emptyOutputPadding = { {} };
|
||||
|
||||
/* ============= Deconvolution params ============= */
|
||||
const InferenceEngine::SizeVector numOutChannels = { 6 };
|
||||
const std::vector<size_t> numOutChannels = { 6 };
|
||||
|
||||
/* ============= Deconvolution params (2D) ============= */
|
||||
const std::vector<InferenceEngine::SizeVector> kernels2d = { {3, 3}, {1, 1} };
|
||||
const std::vector<InferenceEngine::SizeVector> strides2d = { {1, 1}, {2, 2} };
|
||||
const std::vector<std::vector<size_t>> kernels2d = { {3, 3}, {1, 1} };
|
||||
const std::vector<std::vector<size_t>> strides2d = { {1, 1}, {2, 2} };
|
||||
const std::vector<std::vector<ptrdiff_t>> padBegins2d = { {0, 0} };
|
||||
const std::vector<std::vector<ptrdiff_t>> padEnds2d = { {0, 0} };
|
||||
const std::vector<InferenceEngine::SizeVector> dilations2d = { {1, 1} };
|
||||
const std::vector<std::vector<size_t>> dilations2d = { {1, 1} };
|
||||
|
||||
/* ============= Deconvolution (2D) ============= */
|
||||
const auto convParams_ExplicitPadding_2D = ::testing::Combine(
|
||||
@ -258,29 +205,29 @@ const auto convParams_ExplicitPadding_2D = ::testing::Combine(
|
||||
::testing::ValuesIn(padEnds2d),
|
||||
::testing::ValuesIn(dilations2d),
|
||||
::testing::ValuesIn(numOutChannels),
|
||||
::testing::Values(ngraph::op::PadType::EXPLICIT),
|
||||
::testing::Values(ov::op::PadType::EXPLICIT),
|
||||
::testing::ValuesIn(emptyOutputPadding)
|
||||
);
|
||||
|
||||
const std::vector<DeconvInputData> dyn_2D_inputs_smoke = {
|
||||
DeconvInputData{
|
||||
InputShape{{-1, 12, -1, -1}, {{1, 12, 7, 7}, {2, 12, 5, 7}, {1, 12, 7, 7}}},
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
ov::test::utils::InputLayerType::CONSTANT,
|
||||
{}
|
||||
},
|
||||
DeconvInputData{
|
||||
InputShape{{-1, 12, -1, -1}, {{2, 12, 7, 7}, {2, 12, 5, 7}, {1, 12, 9, 4}}},
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
ov::test::utils::InputLayerType::CONSTANT,
|
||||
{}
|
||||
},
|
||||
DeconvInputData{
|
||||
InputShape{{-1, 12, 7, 7}, {{1, 12, 7, 7}, {2, 12, 7, 7}, {1, 12, 7, 7}}},
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
ov::test::utils::InputLayerType::CONSTANT,
|
||||
{}
|
||||
},
|
||||
DeconvInputData{
|
||||
InputShape{{{1, 10}, 12, 7, 7}, {{1, 12, 7, 7}, {2, 12, 7, 7}, {3, 12, 7, 7}}},
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
ov::test::utils::InputLayerType::CONSTANT,
|
||||
{}
|
||||
},
|
||||
};
|
||||
@ -289,7 +236,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Deconv_2D_Dynamic_FP32, DeconvolutionLayerGPUTest
|
||||
::testing::Combine(
|
||||
convParams_ExplicitPadding_2D,
|
||||
::testing::ValuesIn(dyn_2D_inputs_smoke),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ov::element::f32),
|
||||
::testing::Values(ov::test::utils::DEVICE_GPU),
|
||||
::testing::Values(emptyAdditionalConfig)),
|
||||
DeconvolutionLayerGPUTest::getTestCaseName);
|
||||
@ -297,17 +244,17 @@ INSTANTIATE_TEST_SUITE_P(smoke_Deconv_2D_Dynamic_FP32, DeconvolutionLayerGPUTest
|
||||
const std::vector<DeconvInputData> dyn_2D_inputs_with_output_shape = {
|
||||
DeconvInputData{
|
||||
InputShape{{-1, 12, -1, -1}, {{1, 12, 7, 7}, {2, 12, 5, 7}, {1, 12, 7, 7}}},
|
||||
ngraph::helpers::InputLayerType::PARAMETER,
|
||||
ov::test::utils::InputLayerType::PARAMETER,
|
||||
{{15, 15}, {9, 10}, {15, 15}}
|
||||
},
|
||||
DeconvInputData{
|
||||
InputShape{{-1, 12, 7, 7}, {{1, 12, 7, 7}, {2, 12, 7, 7}, {1, 12, 7, 7}}},
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
ov::test::utils::InputLayerType::CONSTANT,
|
||||
{{15, 15}}
|
||||
},
|
||||
DeconvInputData{
|
||||
InputShape{{{1, 10}, 12, 7, 7}, {{1, 12, 7, 7}, {2, 12, 7, 7}, {3, 12, 7, 7}}},
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
ov::test::utils::InputLayerType::CONSTANT,
|
||||
{{15, 15}}
|
||||
},
|
||||
};
|
||||
@ -315,20 +262,18 @@ const std::vector<DeconvInputData> dyn_2D_inputs_with_output_shape = {
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Deconv_2D_Dynamic_OutputShape_FP32, DeconvolutionLayerGPUTest,
|
||||
::testing::Combine(
|
||||
::testing::Combine(
|
||||
::testing::Values(SizeVector{3, 3}),
|
||||
::testing::Values(std::vector<size_t>{3, 3}),
|
||||
::testing::ValuesIn(strides2d),
|
||||
::testing::ValuesIn(padBegins2d),
|
||||
::testing::ValuesIn(padEnds2d),
|
||||
::testing::ValuesIn(dilations2d),
|
||||
::testing::ValuesIn(numOutChannels),
|
||||
::testing::Values(ngraph::op::PadType::EXPLICIT),
|
||||
::testing::Values(ov::op::PadType::EXPLICIT),
|
||||
::testing::ValuesIn(emptyOutputPadding)),
|
||||
::testing::ValuesIn(dyn_2D_inputs_with_output_shape),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ov::element::f32),
|
||||
::testing::Values(ov::test::utils::DEVICE_GPU),
|
||||
::testing::Values(emptyAdditionalConfig)),
|
||||
DeconvolutionLayerGPUTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
|
||||
} // namespace GPULayerTestsDefinitions
|
||||
|
@ -2,22 +2,19 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "shared_test_classes/single_layer/cum_sum.hpp"
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "ie_precision.hpp"
|
||||
#include "ov_models/builders.hpp"
|
||||
#include <string>
|
||||
|
||||
using namespace ngraph;
|
||||
using namespace InferenceEngine;
|
||||
using namespace ov::test;
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
#include "openvino/op/cum_sum.hpp"
|
||||
|
||||
using ElementType = ov::element::Type_t;
|
||||
|
||||
namespace GPULayerTestsDefinitions {
|
||||
namespace {
|
||||
using ov::test::InputShape;
|
||||
|
||||
typedef std::tuple<
|
||||
ElementType, // data precision
|
||||
ov::element::Type, // data type
|
||||
InputShape, // input shape
|
||||
std::int64_t, // axis
|
||||
bool, // exclusive
|
||||
@ -25,15 +22,15 @@ typedef std::tuple<
|
||||
> CumSumLayerGPUParamSet;
|
||||
|
||||
class CumSumLayerGPUTest : public testing::WithParamInterface<CumSumLayerGPUParamSet>,
|
||||
virtual public SubgraphBaseTest {
|
||||
virtual public ov::test::SubgraphBaseTest {
|
||||
public:
|
||||
static std::string getTestCaseName(testing::TestParamInfo<CumSumLayerGPUParamSet> obj) {
|
||||
ElementType inputPrecision;
|
||||
ov::element::Type model_type;
|
||||
InputShape shapes;
|
||||
std::int64_t axis;
|
||||
bool exclusive;
|
||||
bool reverse;
|
||||
std::tie(inputPrecision, shapes, axis, exclusive, reverse) = obj.param;
|
||||
std::tie(model_type, shapes, axis, exclusive, reverse) = obj.param;
|
||||
|
||||
std::ostringstream results;
|
||||
results << "IS=" << ov::test::utils::partialShape2str({shapes.first}) << "_";
|
||||
@ -41,7 +38,7 @@ public:
|
||||
for (const auto& item : shapes.second) {
|
||||
results << ov::test::utils::vec2str(item) << "_";
|
||||
}
|
||||
results << "Prc=" << inputPrecision << "_";
|
||||
results << "Prc=" << model_type << "_";
|
||||
results << "Axis=" << axis << "_" << (exclusive ? "exclusive" : "") << "_" << (reverse ? "reverse" : "");
|
||||
return results.str();
|
||||
}
|
||||
@ -50,44 +47,40 @@ protected:
|
||||
void SetUp() override {
|
||||
targetDevice = ov::test::utils::DEVICE_GPU;
|
||||
|
||||
ElementType inputPrecision;
|
||||
ov::element::Type model_type;
|
||||
InputShape shapes;
|
||||
std::int64_t axis;
|
||||
bool exclusive;
|
||||
bool reverse;
|
||||
std::tie(inputPrecision, shapes, axis, exclusive, reverse) = this->GetParam();
|
||||
std::tie(model_type, shapes, axis, exclusive, reverse) = this->GetParam();
|
||||
|
||||
init_input_shapes({shapes});
|
||||
|
||||
ov::ParameterVector params;
|
||||
for (auto&& shape : inputDynamicShapes) {
|
||||
params.push_back(std::make_shared<ov::op::v0::Parameter>(inputPrecision, shape));
|
||||
params.push_back(std::make_shared<ov::op::v0::Parameter>(model_type, shape));
|
||||
}
|
||||
auto axisNode = ngraph::opset1::Constant::create(ngraph::element::i32, ngraph::Shape{}, std::vector<int64_t>{axis})->output(0);
|
||||
auto cumSum = std::make_shared<opset3::CumSum>(params[0], axisNode, exclusive, reverse);
|
||||
auto axisNode = std::make_shared<ov::op::v0::Constant>(ov::element::i32, ov::Shape{}, std::vector<int64_t>{axis});
|
||||
auto cumSum = std::make_shared<ov::op::v0::CumSum>(params[0], axisNode, exclusive, reverse);
|
||||
|
||||
auto makeFunction = [](ParameterVector ¶ms, const std::shared_ptr<Node> &lastNode) {
|
||||
ResultVector results;
|
||||
auto makeFunction = [](ov::ParameterVector ¶ms, const std::shared_ptr<ov::Node> &lastNode) {
|
||||
ov::ResultVector results;
|
||||
|
||||
for (size_t i = 0; i < lastNode->get_output_size(); i++)
|
||||
results.push_back(std::make_shared<opset1::Result>(lastNode->output(i)));
|
||||
results.push_back(std::make_shared<ov::op::v0::Result>(lastNode->output(i)));
|
||||
|
||||
return std::make_shared<Function>(results, params, "CumSumLayerGPUTest");
|
||||
return std::make_shared<ov::Model>(results, params, "CumSumLayerGPUTest");
|
||||
};
|
||||
function = makeFunction(params, cumSum);
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(CumSumLayerGPUTest, CompareWithRefs) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
|
||||
TEST_P(CumSumLayerGPUTest, Inference) {
|
||||
run();
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<ElementType> inputPrecision = {
|
||||
ngraph::element::f32
|
||||
const std::vector<ov::element::Type> model_type = {
|
||||
ov::element::f32
|
||||
};
|
||||
|
||||
const std::vector<int64_t> axes = { 0, 1, 2, 3, 4, 5 };
|
||||
@ -117,7 +110,7 @@ const std::vector<InputShape> inShapes = {
|
||||
};
|
||||
|
||||
const auto testCasesAxis_0 = ::testing::Combine(
|
||||
::testing::ValuesIn(inputPrecision),
|
||||
::testing::ValuesIn(model_type),
|
||||
::testing::ValuesIn(inShapes),
|
||||
::testing::Values(axes[0]),
|
||||
::testing::ValuesIn(exclusive),
|
||||
@ -125,7 +118,7 @@ const auto testCasesAxis_0 = ::testing::Combine(
|
||||
);
|
||||
|
||||
const auto testCasesAxis_1 = ::testing::Combine(
|
||||
::testing::ValuesIn(inputPrecision),
|
||||
::testing::ValuesIn(model_type),
|
||||
::testing::ValuesIn(std::vector<InputShape>(inShapes.begin() + 1, inShapes.end())),
|
||||
::testing::Values(axes[1]),
|
||||
::testing::ValuesIn(exclusive),
|
||||
@ -133,7 +126,7 @@ const auto testCasesAxis_1 = ::testing::Combine(
|
||||
);
|
||||
|
||||
const auto testCasesAxis_2 = ::testing::Combine(
|
||||
::testing::ValuesIn(inputPrecision),
|
||||
::testing::ValuesIn(model_type),
|
||||
::testing::ValuesIn(std::vector<InputShape>(inShapes.begin() + 2, inShapes.end())),
|
||||
::testing::Values(axes[2]),
|
||||
::testing::ValuesIn(exclusive),
|
||||
@ -141,7 +134,7 @@ const auto testCasesAxis_2 = ::testing::Combine(
|
||||
);
|
||||
|
||||
const auto testCasesAxis_3 = ::testing::Combine(
|
||||
::testing::ValuesIn(inputPrecision),
|
||||
::testing::ValuesIn(model_type),
|
||||
::testing::ValuesIn(std::vector<InputShape>(inShapes.begin() + 3, inShapes.end())),
|
||||
::testing::Values(axes[3]),
|
||||
::testing::ValuesIn(exclusive),
|
||||
@ -149,7 +142,7 @@ const auto testCasesAxis_3 = ::testing::Combine(
|
||||
);
|
||||
|
||||
const auto testCasesAxis_4 = ::testing::Combine(
|
||||
::testing::ValuesIn(inputPrecision),
|
||||
::testing::ValuesIn(model_type),
|
||||
::testing::ValuesIn(std::vector<InputShape>(inShapes.begin() + 4, inShapes.end())),
|
||||
::testing::Values(axes[4]),
|
||||
::testing::ValuesIn(exclusive),
|
||||
@ -157,7 +150,7 @@ const auto testCasesAxis_4 = ::testing::Combine(
|
||||
);
|
||||
|
||||
const auto testCasesAxis_5 = ::testing::Combine(
|
||||
::testing::ValuesIn(inputPrecision),
|
||||
::testing::ValuesIn(model_type),
|
||||
::testing::ValuesIn(std::vector<InputShape>(inShapes.begin() + 5, inShapes.end())),
|
||||
::testing::Values(axes[5]),
|
||||
::testing::ValuesIn(exclusive),
|
||||
@ -165,7 +158,7 @@ const auto testCasesAxis_5 = ::testing::Combine(
|
||||
);
|
||||
|
||||
const auto testCasesAxis_negative = ::testing::Combine(
|
||||
::testing::ValuesIn(inputPrecision),
|
||||
::testing::ValuesIn(model_type),
|
||||
::testing::ValuesIn(std::vector<InputShape>(inShapes.begin() + 5, inShapes.end())),
|
||||
::testing::ValuesIn(negativeAxes),
|
||||
::testing::ValuesIn(exclusive),
|
||||
@ -181,5 +174,3 @@ INSTANTIATE_TEST_SUITE_P(smoke_cum_sum_axis_5_CompareWithRefs_dynamic, CumSumLay
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_cum_sum_neg_axes_CompareWithRefs_dynamic, CumSumLayerGPUTest, testCasesAxis_negative, CumSumLayerGPUTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
|
||||
} // namespace GPULayerTestsDefinitions
|
||||
|
@ -2,22 +2,21 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "shared_test_classes/single_layer/depth_to_space.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "ie_precision.hpp"
|
||||
#include "ov_models/builders.hpp"
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include <string>
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
|
||||
using namespace ngraph::opset3;
|
||||
using namespace InferenceEngine;
|
||||
using namespace ov::test;
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
#include "openvino/op/depth_to_space.hpp"
|
||||
|
||||
namespace GPULayerTestsDefinitions {
|
||||
namespace {
|
||||
using ov::test::InputShape;
|
||||
using ov::op::v0::DepthToSpace;
|
||||
|
||||
typedef std::tuple<
|
||||
InputShape, // Input shape
|
||||
ElementType, // Input element type
|
||||
ov::element::Type, // Input element type
|
||||
DepthToSpace::DepthToSpaceMode, // Mode
|
||||
std::size_t // Block size
|
||||
> DepthToSpaceLayerGPUTestParams;
|
||||
@ -27,7 +26,7 @@ class DepthToSpaceLayerGPUTest : public testing::WithParamInterface<DepthToSpace
|
||||
public:
|
||||
static std::string getTestCaseName(testing::TestParamInfo<DepthToSpaceLayerGPUTestParams> obj) {
|
||||
InputShape shapes;
|
||||
ElementType inType;
|
||||
ov::element::Type inType;
|
||||
DepthToSpace::DepthToSpaceMode mode;
|
||||
std::size_t blockSize;
|
||||
std::tie(shapes, inType, mode, blockSize) = obj.param;
|
||||
@ -70,25 +69,21 @@ protected:
|
||||
|
||||
auto d2s = std::make_shared<ov::op::v0::DepthToSpace>(params[0], mode, blockSize);
|
||||
|
||||
ngraph::ResultVector results;
|
||||
ov::ResultVector results;
|
||||
for (size_t i = 0; i < d2s->get_output_size(); i++)
|
||||
results.push_back(std::make_shared<ngraph::opset1::Result>(d2s->output(i)));
|
||||
function = std::make_shared<ngraph::Function>(results, params, "DepthToSpace");
|
||||
results.push_back(std::make_shared<ov::op::v0::Result>(d2s->output(i)));
|
||||
function = std::make_shared<ov::Model>(results, params, "DepthToSpace");
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(DepthToSpaceLayerGPUTest, CompareWithRefs) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
|
||||
TEST_P(DepthToSpaceLayerGPUTest, Inference) {
|
||||
run();
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<ElementType> inputElementType = {
|
||||
ElementType::f32,
|
||||
ElementType::f16,
|
||||
ElementType::i8
|
||||
const std::vector<ov::element::Type> input_types = {
|
||||
ov::element::f32,
|
||||
ov::element::f16,
|
||||
ov::element::i8
|
||||
};
|
||||
|
||||
const std::vector<DepthToSpace::DepthToSpaceMode> depthToSpaceModes = {
|
||||
@ -120,16 +115,16 @@ const std::vector<ov::Shape> inputShapesBS3_4D = {
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_GPUDepthToSpaceStaticBS2_4D, DepthToSpaceLayerGPUTest,
|
||||
testing::Combine(
|
||||
testing::ValuesIn(static_shapes_to_test_representation(inputShapesBS2_4D)),
|
||||
testing::ValuesIn(inputElementType),
|
||||
testing::ValuesIn(ov::test::static_shapes_to_test_representation(inputShapesBS2_4D)),
|
||||
testing::ValuesIn(input_types),
|
||||
testing::ValuesIn(depthToSpaceModes),
|
||||
testing::Values(1, 2)),
|
||||
DepthToSpaceLayerGPUTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_GPUDepthToSpaceStaticBS3_4D, DepthToSpaceLayerGPUTest,
|
||||
testing::Combine(
|
||||
testing::ValuesIn(static_shapes_to_test_representation(inputShapesBS3_4D)),
|
||||
testing::ValuesIn(inputElementType),
|
||||
testing::ValuesIn(ov::test::static_shapes_to_test_representation(inputShapesBS3_4D)),
|
||||
testing::ValuesIn(input_types),
|
||||
testing::ValuesIn(depthToSpaceModes),
|
||||
testing::Values(1, 3)),
|
||||
DepthToSpaceLayerGPUTest::getTestCaseName);
|
||||
@ -153,16 +148,16 @@ const std::vector<ov::Shape> inputShapesBS3_5D = {
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_GPUDepthToSpaceStaticBS2_5D, DepthToSpaceLayerGPUTest,
|
||||
testing::Combine(
|
||||
testing::ValuesIn(static_shapes_to_test_representation(inputShapesBS2_5D)),
|
||||
testing::ValuesIn(inputElementType),
|
||||
testing::ValuesIn(ov::test::static_shapes_to_test_representation(inputShapesBS2_5D)),
|
||||
testing::ValuesIn(input_types),
|
||||
testing::ValuesIn(depthToSpaceModes),
|
||||
testing::Values(1, 2)),
|
||||
DepthToSpaceLayerGPUTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_GPUDepthToSpaceStaticBS3_5D, DepthToSpaceLayerGPUTest,
|
||||
testing::Combine(
|
||||
testing::ValuesIn(static_shapes_to_test_representation(inputShapesBS3_5D)),
|
||||
testing::ValuesIn(inputElementType),
|
||||
testing::ValuesIn(ov::test::static_shapes_to_test_representation(inputShapesBS3_5D)),
|
||||
testing::ValuesIn(input_types),
|
||||
testing::ValuesIn(depthToSpaceModes),
|
||||
testing::Values(1, 3)),
|
||||
DepthToSpaceLayerGPUTest::getTestCaseName);
|
||||
@ -171,8 +166,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_GPUDepthToSpaceStaticBS3_5D, DepthToSpaceLayerGPU
|
||||
|
||||
//======================== Dynamic Shapes Tests ========================
|
||||
|
||||
namespace dynamic_shapes {
|
||||
|
||||
const std::vector<InputShape> inputShapes4D = {
|
||||
{{-1, -1, -1, -1}, // dynamic
|
||||
{{2, 36, 1, 1}, {1, 36, 3, 1}, {2, 36, 1, 1}, {1, 36, 3, 1}}}, // target
|
||||
@ -198,7 +191,7 @@ const std::vector<InputShape> inputShapes5D = {
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_GPUDepthToSpaceDynamic4D, DepthToSpaceLayerGPUTest,
|
||||
testing::Combine(
|
||||
testing::ValuesIn(inputShapes4D),
|
||||
testing::ValuesIn(inputElementType),
|
||||
testing::ValuesIn(input_types),
|
||||
testing::ValuesIn(depthToSpaceModes),
|
||||
testing::Values(1, 2, 3)),
|
||||
DepthToSpaceLayerGPUTest::getTestCaseName);
|
||||
@ -206,12 +199,9 @@ INSTANTIATE_TEST_SUITE_P(smoke_GPUDepthToSpaceDynamic4D, DepthToSpaceLayerGPUTes
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_GPUDepthToSpaceDynamic5D, DepthToSpaceLayerGPUTest,
|
||||
testing::Combine(
|
||||
testing::ValuesIn(inputShapes5D),
|
||||
testing::ValuesIn(inputElementType),
|
||||
testing::ValuesIn(input_types),
|
||||
testing::ValuesIn(depthToSpaceModes),
|
||||
testing::Values(1, 2, 3)),
|
||||
DepthToSpaceLayerGPUTest::getTestCaseName);
|
||||
|
||||
} // namespace dynamic_shapes
|
||||
|
||||
} // namespace
|
||||
} // namespace GPULayerTestsDefinitions
|
||||
|
@ -2,18 +2,16 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "shared_test_classes/single_layer/detection_output.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "ie_precision.hpp"
|
||||
#include "ov_models/builders.hpp"
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include <string>
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
|
||||
using namespace ngraph;
|
||||
using namespace InferenceEngine;
|
||||
using namespace ov::test;
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
#include "openvino/op/detection_output.hpp"
|
||||
|
||||
namespace GPULayerTestsDefinitions {
|
||||
namespace {
|
||||
using ov::test::InputShape;
|
||||
|
||||
enum {
|
||||
idxLocation,
|
||||
@ -65,7 +63,7 @@ public:
|
||||
static std::string getTestCaseName(testing::TestParamInfo<DetectionOutputGPUTestParams> obj) {
|
||||
DetectionOutputAttributes commonAttrs;
|
||||
ParamsWhichSizeDependsDynamic specificAttrs;
|
||||
ngraph::op::DetectionOutputAttrs attrs;
|
||||
ov::op::v0::DetectionOutput::Attributes attrs;
|
||||
size_t batch;
|
||||
bool replaceDynamicShapesToIntervals;
|
||||
std::string targetDevice;
|
||||
@ -101,14 +99,30 @@ public:
|
||||
result << " }_";
|
||||
}
|
||||
|
||||
using LayerTestsDefinitions::operator<<;
|
||||
result << attrs;
|
||||
result << "attributes={";
|
||||
result << "Classes=" << attrs.num_classes << "_";
|
||||
result << "backgrId=" << attrs.background_label_id << "_";
|
||||
result << "topK=" << attrs.top_k << "_";
|
||||
result << "varEnc=" << attrs.variance_encoded_in_target << "_";
|
||||
result << "keepTopK=" << ov::test::utils::vec2str(attrs.keep_top_k) << "_";
|
||||
result << "codeType=" << attrs.code_type << "_";
|
||||
result << "shareLoc=" << attrs.share_location << "_";
|
||||
result << "nmsThr=" << attrs.nms_threshold << "_";
|
||||
result << "confThr=" << attrs.confidence_threshold << "_";
|
||||
result << "clipAfterNms=" << attrs.clip_after_nms << "_";
|
||||
result << "clipBeforeNms=" << attrs.clip_before_nms << "_";
|
||||
result << "decrId=" << attrs.decrease_label_id << "_";
|
||||
result << "norm=" << attrs.normalized << "_";
|
||||
result << "inH=" << attrs.input_height << "_";
|
||||
result << "inW=" << attrs.input_width << "_";
|
||||
result << "OS=" << attrs.objectness_score;
|
||||
result << "}_";
|
||||
result << "RDS=" << (replaceDynamicShapesToIntervals ? "true" : "false") << "_";
|
||||
result << "TargetDevice=" << targetDevice;
|
||||
return result.str();
|
||||
}
|
||||
|
||||
void generate_inputs(const std::vector<ngraph::Shape>& targetInputStaticShapes) override {
|
||||
void generate_inputs(const std::vector<ov::Shape>& targetInputStaticShapes) override {
|
||||
inputs.clear();
|
||||
const auto& funcInputs = function->inputs();
|
||||
for (auto i = 0ul; i < funcInputs.size(); ++i) {
|
||||
@ -197,7 +211,7 @@ protected:
|
||||
|
||||
ov::ParameterVector params;
|
||||
for (auto&& shape : inputDynamicShapes)
|
||||
params.push_back(std::make_shared<ov::op::v0::Parameter>(ngraph::element::f32, shape));
|
||||
params.push_back(std::make_shared<ov::op::v0::Parameter>(ov::element::f32, shape));
|
||||
|
||||
if (attrs.num_classes == -1) {
|
||||
std::shared_ptr<ov::op::v8::DetectionOutput> detOut;
|
||||
@ -209,8 +223,8 @@ protected:
|
||||
else
|
||||
throw std::runtime_error("DetectionOutput layer supports only 3 or 5 inputs");
|
||||
|
||||
ngraph::ResultVector results{std::make_shared<ngraph::opset3::Result>(detOut)};
|
||||
function = std::make_shared<ngraph::Function>(results, params, "DetectionOutputDynamic");
|
||||
ov::ResultVector results{std::make_shared<ov::op::v0::Result>(detOut)};
|
||||
function = std::make_shared<ov::Model>(results, params, "DetectionOutputDynamic");
|
||||
} else {
|
||||
std::shared_ptr<ov::op::v0::DetectionOutput> detOut;
|
||||
if (params.size() == 3)
|
||||
@ -220,8 +234,8 @@ protected:
|
||||
else
|
||||
OPENVINO_THROW("DetectionOutput layer supports only 3 or 5 inputs");
|
||||
|
||||
ngraph::ResultVector results{std::make_shared<ngraph::opset3::Result>(detOut)};
|
||||
function = std::make_shared<ngraph::Function>(results, params, "DetectionOutputDynamic");
|
||||
ov::ResultVector results{std::make_shared<ov::op::v0::Result>(detOut)};
|
||||
function = std::make_shared<ov::Model>(results, params, "DetectionOutputDynamic");
|
||||
}
|
||||
}
|
||||
|
||||
@ -253,18 +267,14 @@ private:
|
||||
}
|
||||
}
|
||||
}
|
||||
ngraph::op::DetectionOutputAttrs attrs;
|
||||
ov::op::v0::DetectionOutput::Attributes attrs;
|
||||
std::vector<ov::test::InputShape> inShapes;
|
||||
};
|
||||
|
||||
TEST_P(DetectionOutputLayerGPUTest, CompareWithRefs) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
|
||||
TEST_P(DetectionOutputLayerGPUTest, Inference) {
|
||||
run();
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<int> numClasses = {11, -1};
|
||||
const int backgroundLabelId = 0;
|
||||
const std::vector<int> topK = {75};
|
||||
@ -438,4 +448,3 @@ INSTANTIATE_TEST_SUITE_P(smoke_GPUDetectionOutputV8Dynamic3In, DetectionOutputLa
|
||||
params3InputsDynamic_v8,
|
||||
DetectionOutputLayerGPUTest::getTestCaseName);
|
||||
} // namespace
|
||||
} // namespace GPULayerTestsDefinitions
|
||||
|
@ -2,18 +2,18 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "shared_test_classes/single_layer/gather.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "ie_precision.hpp"
|
||||
#include "ov_models/builders.hpp"
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include <string>
|
||||
#include "common_test_utils/test_enums.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
|
||||
using namespace ngraph;
|
||||
using namespace InferenceEngine;
|
||||
using namespace ov::test;
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
#include "openvino/op/gather.hpp"
|
||||
|
||||
namespace {
|
||||
using ov::test::InputShape;
|
||||
|
||||
namespace GPULayerTestsDefinitions {
|
||||
struct GatherShapeParams {
|
||||
InputShape inputShapes;
|
||||
InputShape targetShapes;
|
||||
@ -23,22 +23,21 @@ struct GatherShapeParams {
|
||||
|
||||
typedef std::tuple<
|
||||
GatherShapeParams,
|
||||
ElementType, // Network precision
|
||||
ov::element::Type, // Network precision
|
||||
bool, // Is const Indices
|
||||
bool // Is const Axis
|
||||
> GatherGPUTestParams;
|
||||
|
||||
|
||||
class GatherGPUTest : public testing::WithParamInterface<GatherGPUTestParams>,
|
||||
virtual public ov::test::SubgraphBaseTest {
|
||||
virtual public ov::test::SubgraphBaseTest {
|
||||
public:
|
||||
static std::string getTestCaseName(testing::TestParamInfo<GatherGPUTestParams> obj) {
|
||||
GatherShapeParams Shapes;
|
||||
ElementType netPrecision;
|
||||
ov::element::Type model_type;
|
||||
bool isIndicesConstant;
|
||||
bool isAxisConstant;
|
||||
|
||||
std::tie(Shapes, netPrecision, isIndicesConstant, isAxisConstant) = obj.param;
|
||||
std::tie(Shapes, model_type, isIndicesConstant, isAxisConstant) = obj.param;
|
||||
|
||||
std::ostringstream result;
|
||||
result << "IS=(";
|
||||
@ -57,7 +56,7 @@ public:
|
||||
}
|
||||
result << "axis=" << Shapes.axis << "_";
|
||||
result << "batchDims=" << Shapes.batch_dims << "_";
|
||||
result << "netPrc=" << netPrecision << "_";
|
||||
result << "netPrc=" << model_type << "_";
|
||||
result << "constIdx=" << (isIndicesConstant ? "True" : "False") << "_";
|
||||
result << "constAx=" << (isAxisConstant ? "True" : "False") << "_";
|
||||
|
||||
@ -67,12 +66,12 @@ public:
|
||||
protected:
|
||||
void SetUp() override {
|
||||
GatherShapeParams Shapes;
|
||||
ElementType netPrecision;
|
||||
ov::element::Type model_type;
|
||||
bool isAxisConstant;
|
||||
bool isIndicesConstant;
|
||||
const ElementType intInputsPrecision = ElementType::i32;
|
||||
const auto int_model_type = ov::element::i32;
|
||||
|
||||
std::tie(Shapes, netPrecision, isIndicesConstant, isAxisConstant) = this->GetParam();
|
||||
std::tie(Shapes, model_type, isIndicesConstant, isAxisConstant) = this->GetParam();
|
||||
const int axis = Shapes.axis;
|
||||
const int batchDims = Shapes.batch_dims;
|
||||
targetDevice = ov::test::utils::DEVICE_GPU;
|
||||
@ -86,7 +85,7 @@ protected:
|
||||
init_input_shapes({Shapes.inputShapes, Shapes.targetShapes});
|
||||
}
|
||||
|
||||
ngraph::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(netPrecision, inputDynamicShapes[0])};
|
||||
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(model_type, inputDynamicShapes[0])};
|
||||
params.back()->set_friendly_name("data");
|
||||
|
||||
if (isIndicesConstant) {
|
||||
@ -96,26 +95,22 @@ protected:
|
||||
for (size_t i = 0; i < Shapes.inputShapes.second.size(); ++i) {
|
||||
idx_range = std::min(static_cast<int64_t>(Shapes.inputShapes.second[i][axis_norm]), idx_range);
|
||||
}
|
||||
indicesNode = ngraph::builder::makeConstant<int64_t>(
|
||||
ngraph::element::i64,
|
||||
Shapes.targetShapes.second[0],
|
||||
{},
|
||||
true,
|
||||
idx_range - 1,
|
||||
0);
|
||||
|
||||
auto indices_tensor = ov::test::utils::create_and_fill_tensor(ov::element::i64, Shapes.targetShapes.second[0], idx_range - 1, 0);
|
||||
indicesNode = std::make_shared<ov::op::v0::Constant>(indices_tensor);
|
||||
} else {
|
||||
params.push_back(std::make_shared<ov::op::v0::Parameter>(intInputsPrecision, inputDynamicShapes[1]));
|
||||
params.push_back(std::make_shared<ov::op::v0::Parameter>(int_model_type, inputDynamicShapes[1]));
|
||||
params.back()->set_friendly_name("indices");
|
||||
}
|
||||
|
||||
if (isAxisConstant) {
|
||||
axisNode = ngraph::builder::makeConstant<int64_t>(intInputsPrecision, ov::Shape({1}), {axis});
|
||||
axisNode = std::make_shared<ov::op::v0::Constant>(int_model_type, ov::Shape({1}), std::vector<int64_t>{axis});
|
||||
} else {
|
||||
inputDynamicShapes.push_back({1});
|
||||
for (size_t i = 0lu; i < targetStaticShapes.size(); i++) {
|
||||
targetStaticShapes[i].push_back({1});
|
||||
}
|
||||
params.push_back(std::make_shared<ov::op::v0::Parameter>(intInputsPrecision, inputDynamicShapes[2]));
|
||||
params.push_back(std::make_shared<ov::op::v0::Parameter>(int_model_type, inputDynamicShapes[2]));
|
||||
params.back()->set_friendly_name("axis");
|
||||
}
|
||||
|
||||
@ -125,20 +120,16 @@ protected:
|
||||
: isIndicesConstant ? params[1]
|
||||
: params[2],
|
||||
batchDims);
|
||||
ngraph::ResultVector results{std::make_shared<ngraph::opset4::Result>(gatherNode)};
|
||||
function = std::make_shared<ngraph::Function>(results, params, "Gather");
|
||||
ov::ResultVector results{std::make_shared<ov::op::v0::Result>(gatherNode)};
|
||||
function = std::make_shared<ov::Model>(results, params, "Gather");
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(GatherGPUTest, CompareWithRefs) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
|
||||
TEST_P(GatherGPUTest, Inference) {
|
||||
run();
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<ov::element::Type_t> netPrecisions = {
|
||||
const std::vector<ov::element::Type> model_types = {
|
||||
ov::element::f32,
|
||||
ov::element::i32,
|
||||
ov::element::i64,
|
||||
@ -201,9 +192,8 @@ const std::vector<GatherShapeParams> dynamicInputShapeConstTargetShape = {
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_dynamic_input_shapes_const_target_shapes, GatherGPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(dynamicInputShapeConstTargetShape), // input shapes
|
||||
::testing::ValuesIn(netPrecisions), // network precision
|
||||
::testing::Values(true), // is const indices
|
||||
::testing::Values(true)), // is const axis
|
||||
::testing::ValuesIn(model_types), // network precision
|
||||
::testing::Values(true), // is const indices
|
||||
::testing::Values(true)), // is const axis
|
||||
GatherGPUTest::getTestCaseName);
|
||||
} // namespace
|
||||
} // namespace GPULayerTestsDefinitions
|
||||
|
@ -2,36 +2,33 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "ie_precision.hpp"
|
||||
#include "ov_models/builders.hpp"
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include <string>
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
|
||||
using namespace ov::test;
|
||||
using namespace ngraph;
|
||||
using namespace InferenceEngine;
|
||||
using namespace ngraph::helpers;
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
#include "openvino/op/gather_elements.hpp"
|
||||
|
||||
namespace GPULayerTestsDefinitions {
|
||||
namespace {
|
||||
using ov::test::InputShape;
|
||||
|
||||
using GatherElementsParams = std::tuple<
|
||||
std::vector<InputShape>, // Dynamic shape + Target static shapes
|
||||
int, // Axis
|
||||
ElementType, // Data precision
|
||||
ElementType, // Indices precision
|
||||
TargetDevice // Device name
|
||||
>;
|
||||
ov::element::Type, // Data type
|
||||
ov::element::Type, // Indices type
|
||||
std::string>; // Device name
|
||||
|
||||
class GatherElementsGPUTest : public testing::WithParamInterface<GatherElementsParams>,
|
||||
virtual public ov::test::SubgraphBaseTest {
|
||||
public:
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<GatherElementsParams>& obj) {
|
||||
std::vector<InputShape> shapes;
|
||||
ElementType dPrecision, iPrecision;
|
||||
ov::element::Type data_type, indices_type;
|
||||
int axis;
|
||||
std::string device;
|
||||
std::tie(shapes, axis, dPrecision, iPrecision, device) = obj.param;
|
||||
std::tie(shapes, axis, data_type, indices_type, device) = obj.param;
|
||||
|
||||
std::ostringstream result;
|
||||
result << "IS=(";
|
||||
@ -45,14 +42,14 @@ public:
|
||||
}
|
||||
}
|
||||
result << "Ax=" << axis << "_";
|
||||
result << "DP=" << dPrecision << "_";
|
||||
result << "IP=" << iPrecision << "_";
|
||||
result << "DP=" << data_type << "_";
|
||||
result << "IP=" << indices_type << "_";
|
||||
result << "device=" << device;
|
||||
|
||||
return result.str();
|
||||
}
|
||||
|
||||
void generate_inputs(const std::vector<ngraph::Shape>& targetInputStaticShapes) override {
|
||||
void generate_inputs(const std::vector<ov::Shape>& targetInputStaticShapes) override {
|
||||
inputs.clear();
|
||||
const auto& funcInputs = function->inputs();
|
||||
for (size_t i = 0; i < funcInputs.size(); ++i) {
|
||||
@ -68,29 +65,27 @@ public:
|
||||
protected:
|
||||
void SetUp() override {
|
||||
std::vector<InputShape> shapes;
|
||||
ElementType dPrecision, iPrecision;
|
||||
ov::element::Type data_type, indices_type;
|
||||
int axis;
|
||||
std::tie(shapes, axis, dPrecision, iPrecision, targetDevice) = this->GetParam();
|
||||
std::tie(shapes, axis, data_type, indices_type, targetDevice) = this->GetParam();
|
||||
init_input_shapes(shapes);
|
||||
|
||||
ngraph::ParameterVector params = {
|
||||
std::make_shared<ngraph::opset1::Parameter>(dPrecision, inputDynamicShapes[0]),
|
||||
std::make_shared<ngraph::opset1::Parameter>(iPrecision, inputDynamicShapes[1]),
|
||||
ov::ParameterVector params = {
|
||||
std::make_shared<ov::op::v0::Parameter>(data_type, inputDynamicShapes[0]),
|
||||
std::make_shared<ov::op::v0::Parameter>(indices_type, inputDynamicShapes[1]),
|
||||
};
|
||||
|
||||
auto gather = std::make_shared<ngraph::op::v6::GatherElements>(params[0], params[1], axis);
|
||||
auto gather = std::make_shared<ov::op::v6::GatherElements>(params[0], params[1], axis);
|
||||
|
||||
ngraph::ResultVector results{std::make_shared<ngraph::opset4::Result>(gather)};
|
||||
function = std::make_shared<ngraph::Function>(results, params, "GatherElements");
|
||||
ov::ResultVector results{std::make_shared<ov::op::v0::Result>(gather)};
|
||||
function = std::make_shared<ov::Model>(results, params, "GatherElements");
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(GatherElementsGPUTest, CompareWithRefs) {
|
||||
TEST_P(GatherElementsGPUTest, Inference) {
|
||||
run();
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<std::vector<InputShape>> inDynamicShapeParams = {
|
||||
{{{-1, -1, -1, -1}, {{2, 3, 5, 7}, {3, 4, 6, 8}}},
|
||||
{{-1, -1, -1, -1}, {{2, 3, 9, 7}, {3, 4, 4, 8}}}},
|
||||
@ -102,10 +97,9 @@ INSTANTIATE_TEST_SUITE_P(smoke_set1, GatherElementsGPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(inDynamicShapeParams), // shape
|
||||
::testing::ValuesIn(std::vector<int>({2, -2})), // Axis
|
||||
::testing::ValuesIn(std::vector<ElementType>({ElementType::f16, ElementType::f32})),
|
||||
::testing::Values(ElementType::i32),
|
||||
::testing::ValuesIn(std::vector<ov::element::Type>({ov::element::f16, ov::element::f32})),
|
||||
::testing::Values(ov::element::i32),
|
||||
::testing::Values(ov::test::utils::DEVICE_GPU)),
|
||||
GatherElementsGPUTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
} // namespace GPULayerTestsDefinitions
|
||||
|
@ -3,17 +3,18 @@
|
||||
//
|
||||
|
||||
#include "shared_test_classes/single_layer/gather_nd.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "ie_precision.hpp"
|
||||
#include "ov_models/builders.hpp"
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include <string>
|
||||
#include "common_test_utils/test_enums.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
|
||||
using namespace ngraph;
|
||||
using namespace InferenceEngine;
|
||||
using namespace ov::test;
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
#include "openvino/op/gather_nd.hpp"
|
||||
|
||||
namespace {
|
||||
using ov::test::InputShape;
|
||||
|
||||
namespace GPULayerTestsDefinitions {
|
||||
struct GatherNDShapeParams {
|
||||
InputShape inputShapes;
|
||||
InputShape targetShapes;
|
||||
@ -22,20 +23,20 @@ struct GatherNDShapeParams {
|
||||
|
||||
typedef std::tuple<
|
||||
GatherNDShapeParams,
|
||||
ElementType, // Network precision
|
||||
bool // Is const Indices
|
||||
ov::element::Type, // Model type
|
||||
bool // Is const Indices
|
||||
> GatherNDGPUTestParams;
|
||||
|
||||
|
||||
class GatherNDGPUTest : public testing::WithParamInterface<GatherNDGPUTestParams>,
|
||||
virtual public ov::test::SubgraphBaseTest {
|
||||
virtual public ov::test::SubgraphBaseTest {
|
||||
public:
|
||||
static std::string getTestCaseName(testing::TestParamInfo<GatherNDGPUTestParams> obj) {
|
||||
GatherNDShapeParams Shapes;
|
||||
ElementType netPrecision;
|
||||
ov::element::Type model_type;
|
||||
bool isIndicesConstant;
|
||||
|
||||
std::tie(Shapes, netPrecision, isIndicesConstant) = obj.param;
|
||||
std::tie(Shapes, model_type, isIndicesConstant) = obj.param;
|
||||
|
||||
std::ostringstream result;
|
||||
result << "IS=(";
|
||||
@ -53,7 +54,7 @@ public:
|
||||
result << "}_";
|
||||
}
|
||||
result << "batchDims=" << Shapes.batch_dims << "_";
|
||||
result << "netPrc=" << netPrecision << "_";
|
||||
result << "netPrc=" << model_type << "_";
|
||||
result << "constIdx=" << (isIndicesConstant ? "True" : "False") << "_";
|
||||
|
||||
return result.str();
|
||||
@ -62,11 +63,11 @@ public:
|
||||
protected:
|
||||
void SetUp() override {
|
||||
GatherNDShapeParams Shapes;
|
||||
ElementType netPrecision;
|
||||
ov::element::Type model_type;
|
||||
bool isIndicesConstant;
|
||||
const ElementType intInputsPrecision = ElementType::i32;
|
||||
const auto intInputsPrecision = ov::element::i32;
|
||||
|
||||
std::tie(Shapes, netPrecision, isIndicesConstant) = this->GetParam();
|
||||
std::tie(Shapes, model_type, isIndicesConstant) = this->GetParam();
|
||||
const int batchDims = Shapes.batch_dims;
|
||||
targetDevice = ov::test::utils::DEVICE_GPU;
|
||||
std::shared_ptr<ov::Node> indicesNode;
|
||||
@ -78,7 +79,7 @@ protected:
|
||||
init_input_shapes({Shapes.inputShapes, Shapes.targetShapes});
|
||||
}
|
||||
|
||||
ngraph::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(netPrecision, inputDynamicShapes[0])};
|
||||
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(model_type, inputDynamicShapes[0])};
|
||||
params.back()->set_friendly_name("data");
|
||||
|
||||
if (isIndicesConstant) {
|
||||
@ -88,13 +89,8 @@ protected:
|
||||
idx_range = std::min(static_cast<int64_t>(Shapes.inputShapes.second[i][j]), idx_range);
|
||||
}
|
||||
}
|
||||
indicesNode = ngraph::builder::makeConstant<int64_t>(
|
||||
ngraph::element::i64,
|
||||
Shapes.targetShapes.second[0],
|
||||
{},
|
||||
true,
|
||||
idx_range - 1,
|
||||
0);
|
||||
auto indices_tensor = ov::test::utils::create_and_fill_tensor(ov::element::i64, Shapes.targetShapes.second[0], idx_range - 1, 0);
|
||||
indicesNode = std::make_shared<ov::op::v0::Constant>(indices_tensor);
|
||||
} else {
|
||||
params.push_back(std::make_shared<ov::op::v0::Parameter>(intInputsPrecision, inputDynamicShapes[1]));
|
||||
params.back()->set_friendly_name("indices");
|
||||
@ -103,20 +99,16 @@ protected:
|
||||
gather_ndNode = std::make_shared<ov::op::v8::GatherND>(params[0],
|
||||
isIndicesConstant ? indicesNode : params[1],
|
||||
batchDims);
|
||||
ngraph::ResultVector results{std::make_shared<ngraph::opset4::Result>(gather_ndNode)};
|
||||
function = std::make_shared<ngraph::Function>(results, params, "GatherND");
|
||||
ov::ResultVector results{std::make_shared<ov::op::v0::Result>(gather_ndNode)};
|
||||
function = std::make_shared<ov::Model>(results, params, "GatherND");
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(GatherNDGPUTest, CompareWithRefs) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
|
||||
TEST_P(GatherNDGPUTest, Inference) {
|
||||
run();
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<ov::element::Type_t> netPrecisions = {
|
||||
const std::vector<ov::element::Type> model_types = {
|
||||
ov::element::f32,
|
||||
ov::element::f16,
|
||||
ov::element::i32
|
||||
@ -158,8 +150,7 @@ const std::vector<GatherNDShapeParams> dynamicInputShapeConstTargetShape = {
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_dynamic_input_shapes_const_target_shapes, GatherNDGPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(dynamicInputShapeConstTargetShape), // input shapes
|
||||
::testing::ValuesIn(netPrecisions), // network precision
|
||||
::testing::ValuesIn(model_types), // network precision
|
||||
::testing::Values(true)), // is const indices
|
||||
GatherNDGPUTest::getTestCaseName);
|
||||
} // namespace
|
||||
} // namespace GPULayerTestsDefinitions
|
||||
|
@ -2,36 +2,35 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "shared_test_classes/single_layer/gather_tree.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "ie_precision.hpp"
|
||||
#include "ov_models/builders.hpp"
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include <string>
|
||||
#include "common_test_utils/test_enums.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
|
||||
using namespace ngraph;
|
||||
using namespace InferenceEngine;
|
||||
using namespace ov::test;
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
#include "openvino/op/gather_tree.hpp"
|
||||
|
||||
namespace GPULayerTestsDefinitions {
|
||||
namespace {
|
||||
using ov::test::InputShape;
|
||||
|
||||
typedef std::tuple<
|
||||
InputShape, // Input tensors shape
|
||||
ngraph::helpers::InputLayerType, // Secondary input type
|
||||
ov::element::Type_t, // Network precision
|
||||
ov::test::utils::InputLayerType, // Secondary input type
|
||||
ov::element::Type, // Model type
|
||||
std::string // Device name
|
||||
> GatherTreeGPUTestParams;
|
||||
|
||||
class GatherTreeLayerGPUTest : public testing::WithParamInterface<GatherTreeGPUTestParams>,
|
||||
virtual public SubgraphBaseTest {
|
||||
virtual public ov::test::SubgraphBaseTest {
|
||||
public:
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<GatherTreeGPUTestParams> &obj) {
|
||||
InputShape inputShape;
|
||||
ov::element::Type_t netPrecision;
|
||||
ngraph::helpers::InputLayerType secondaryInputType;
|
||||
ov::element::Type_t model_type;
|
||||
ov::test::utils::InputLayerType secondaryInputType;
|
||||
std::string targetName;
|
||||
|
||||
std::tie(inputShape, secondaryInputType, netPrecision, targetName) = obj.param;
|
||||
std::tie(inputShape, secondaryInputType, model_type, targetName) = obj.param;
|
||||
|
||||
std::ostringstream result;
|
||||
result << "IS=" << ov::test::utils::partialShape2str({inputShape.first}) << "_";
|
||||
@ -40,7 +39,7 @@ public:
|
||||
result << ov::test::utils::vec2str(item) << "_";
|
||||
}
|
||||
result << "secondaryInputType=" << secondaryInputType << "_";
|
||||
result << "netPRC=" << netPrecision << "_";
|
||||
result << "netPRC=" << model_type << "_";
|
||||
result << "trgDev=" << targetName;
|
||||
|
||||
return result.str();
|
||||
@ -49,10 +48,10 @@ public:
|
||||
protected:
|
||||
void SetUp() override {
|
||||
InputShape inputShape;
|
||||
ov::element::Type netPrecision;
|
||||
ngraph::helpers::InputLayerType secondaryInputType;
|
||||
ov::element::Type model_type;
|
||||
ov::test::utils::InputLayerType secondaryInputType;
|
||||
|
||||
std::tie(inputShape, secondaryInputType, netPrecision, targetDevice) = this->GetParam();
|
||||
std::tie(inputShape, secondaryInputType, model_type, targetDevice) = this->GetParam();
|
||||
InputShape parentShape{inputShape};
|
||||
InputShape::first_type maxSeqLenFirst;
|
||||
if (inputShape.first.is_dynamic()) {
|
||||
@ -73,15 +72,15 @@ protected:
|
||||
shape.push_back({});
|
||||
}
|
||||
|
||||
std::shared_ptr<ngraph::Node> inp2;
|
||||
std::shared_ptr<ngraph::Node> inp3;
|
||||
std::shared_ptr<ngraph::Node> inp4;
|
||||
std::shared_ptr<ov::Node> inp2;
|
||||
std::shared_ptr<ov::Node> inp3;
|
||||
std::shared_ptr<ov::Node> inp4;
|
||||
|
||||
ov::ParameterVector paramsIn{std::make_shared<ov::op::v0::Parameter>(netPrecision, inputDynamicShapes[0])};
|
||||
if (ngraph::helpers::InputLayerType::PARAMETER == secondaryInputType) {
|
||||
auto param2 = std::make_shared<ov::op::v0::Parameter>(netPrecision, inputDynamicShapes[1]);
|
||||
auto param3 = std::make_shared<ov::op::v0::Parameter>(netPrecision, inputDynamicShapes[2]);
|
||||
auto param4 = std::make_shared<ov::op::v0::Parameter>(netPrecision, inputDynamicShapes[3]);
|
||||
ov::ParameterVector paramsIn{std::make_shared<ov::op::v0::Parameter>(model_type, inputDynamicShapes[0])};
|
||||
if (ov::test::utils::InputLayerType::PARAMETER == secondaryInputType) {
|
||||
auto param2 = std::make_shared<ov::op::v0::Parameter>(model_type, inputDynamicShapes[1]);
|
||||
auto param3 = std::make_shared<ov::op::v0::Parameter>(model_type, inputDynamicShapes[2]);
|
||||
auto param4 = std::make_shared<ov::op::v0::Parameter>(model_type, inputDynamicShapes[3]);
|
||||
inp2 = param2;
|
||||
inp3 = param3;
|
||||
inp4 = param4;
|
||||
@ -89,23 +88,26 @@ protected:
|
||||
paramsIn.push_back(param2);
|
||||
paramsIn.push_back(param3);
|
||||
paramsIn.push_back(param4);
|
||||
} else if (ngraph::helpers::InputLayerType::CONSTANT == secondaryInputType) {
|
||||
} else if (ov::test::utils::InputLayerType::CONSTANT == secondaryInputType) {
|
||||
auto maxBeamIndex = inputShape.second.front().at(2) - 1;
|
||||
|
||||
inp2 = ngraph::builder::makeConstant<float>(netPrecision, inputShape.second.front(), {}, true, maxBeamIndex);
|
||||
inp3 = ngraph::builder::makeConstant<float>(netPrecision, {inputShape.second.front().at(1)}, {}, true, maxBeamIndex);
|
||||
inp4 = ngraph::builder::makeConstant<float>(netPrecision, {}, {}, true, maxBeamIndex);
|
||||
auto inp2_tensor = ov::test::utils::create_and_fill_tensor(model_type, inputShape.second.front(), maxBeamIndex);
|
||||
inp2 = std::make_shared<ov::op::v0::Constant>(inp2_tensor);
|
||||
auto inp3_tensor = ov::test::utils::create_and_fill_tensor(model_type, ov::Shape{inputShape.second.front().at(1)}, maxBeamIndex);
|
||||
inp3 = std::make_shared<ov::op::v0::Constant>(inp3_tensor);
|
||||
auto inp4_tensor = ov::test::utils::create_and_fill_tensor(model_type, ov::Shape{}, maxBeamIndex);
|
||||
inp4 = std::make_shared<ov::op::v0::Constant>(inp4_tensor);
|
||||
} else {
|
||||
throw std::runtime_error("Unsupported inputType");
|
||||
}
|
||||
|
||||
auto operationResult = std::make_shared<ngraph::opset4::GatherTree>(paramsIn.front(), inp2, inp3, inp4);
|
||||
auto operationResult = std::make_shared<ov::op::v1::GatherTree>(paramsIn.front(), inp2, inp3, inp4);
|
||||
|
||||
ngraph::ResultVector results{std::make_shared<ngraph::opset4::Result>(operationResult)};
|
||||
function = std::make_shared<ngraph::Function>(results, paramsIn, "GatherTree");
|
||||
ov::ResultVector results{std::make_shared<ov::op::v0::Result>(operationResult)};
|
||||
function = std::make_shared<ov::Model>(results, paramsIn, "GatherTree");
|
||||
}
|
||||
|
||||
void generate_inputs(const std::vector<ngraph::Shape>& targetInputStaticShapes) override {
|
||||
void generate_inputs(const std::vector<ov::Shape>& targetInputStaticShapes) override {
|
||||
inputs.clear();
|
||||
const auto maxBeamIndex = targetInputStaticShapes.front().at(2) - 1;
|
||||
const auto& funcInputs = function->inputs();
|
||||
@ -121,15 +123,11 @@ protected:
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(GatherTreeLayerGPUTest, CompareWithRefs) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
|
||||
TEST_P(GatherTreeLayerGPUTest, Inference) {
|
||||
run();
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<ov::element::Type_t> netPrecisions = {
|
||||
const std::vector<ov::element::Type> model_types = {
|
||||
ov::element::f32,
|
||||
ov::element::i32
|
||||
};
|
||||
@ -167,19 +165,17 @@ const std::vector<InputShape> inputDynamicShapesConstant = {
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_gathertree_parameter_compareWithRefs_dynamic, GatherTreeLayerGPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(inputDynamicShapesParameter),
|
||||
::testing::Values(ngraph::helpers::InputLayerType::PARAMETER),
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(ov::test::utils::InputLayerType::PARAMETER),
|
||||
::testing::ValuesIn(model_types),
|
||||
::testing::Values(ov::test::utils::DEVICE_GPU)),
|
||||
GatherTreeLayerGPUTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_gathertree_constant_compareWithRefs_dynamic, GatherTreeLayerGPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(inputDynamicShapesConstant),
|
||||
::testing::Values(ngraph::helpers::InputLayerType::CONSTANT),
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(ov::test::utils::InputLayerType::CONSTANT),
|
||||
::testing::ValuesIn(model_types),
|
||||
::testing::Values(ov::test::utils::DEVICE_GPU)),
|
||||
GatherTreeLayerGPUTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
} // namespace GPULayerTestsDefinitions
|
||||
|
||||
|
@ -2,38 +2,37 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "shared_test_classes/single_layer/select.hpp"
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include "common_test_utils/test_enums.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "ie_precision.hpp"
|
||||
#include "ov_models/builders.hpp"
|
||||
#include <string>
|
||||
#include <common_test_utils/ov_tensor_utils.hpp>
|
||||
|
||||
using namespace ngraph;
|
||||
using namespace InferenceEngine;
|
||||
using namespace ov::test;
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
#include "openvino/op/grid_sample.hpp"
|
||||
|
||||
namespace {
|
||||
using ov::test::InputShape;
|
||||
using ov::op::v9::GridSample;
|
||||
|
||||
namespace GPULayerTestsDefinitions {
|
||||
|
||||
typedef std::tuple<
|
||||
std::vector<InputShape>, // Input shapes
|
||||
GridSample::InterpolationMode, // Interpolation mode
|
||||
GridSample::PaddingMode, // Padding mode
|
||||
bool, // Align corners
|
||||
ElementType, // Data precision
|
||||
ElementType // Grid precision
|
||||
ov::element::Type, // Data precision
|
||||
ov::element::Type // Grid precision
|
||||
> GridSampleLayerTestGPUParams;
|
||||
|
||||
class GridSampleLayerTestGPU : public testing::WithParamInterface<GridSampleLayerTestGPUParams>,
|
||||
virtual public SubgraphBaseTest {
|
||||
virtual public ov::test::SubgraphBaseTest {
|
||||
public:
|
||||
static std::string getTestCaseName(testing::TestParamInfo<GridSampleLayerTestGPUParams> obj) {
|
||||
std::vector<InputShape> inputShapes;
|
||||
GridSample::InterpolationMode interpolateMode;
|
||||
GridSample::PaddingMode paddingMode;
|
||||
bool alignCorners;
|
||||
ElementType dataPrecision, gridPrecision;
|
||||
ov::element::Type dataPrecision, gridPrecision;
|
||||
|
||||
std::tie(inputShapes, interpolateMode, paddingMode, alignCorners, dataPrecision, gridPrecision) = obj.param;
|
||||
|
||||
@ -69,7 +68,7 @@ protected:
|
||||
GridSample::InterpolationMode interpolateMode;
|
||||
GridSample::PaddingMode paddingMode;
|
||||
bool alignCorners;
|
||||
ElementType dataPrecision, gridPrecision;
|
||||
ov::element::Type dataPrecision, gridPrecision;
|
||||
|
||||
std::tie(inputShapes, interpolateMode, paddingMode, alignCorners, dataPrecision, gridPrecision) = this->GetParam();
|
||||
targetDevice = ov::test::utils::DEVICE_GPU;
|
||||
@ -86,12 +85,12 @@ protected:
|
||||
GridSample::Attributes attributes = {alignCorners, interpolateMode, paddingMode};
|
||||
auto gridSampleNode = std::make_shared<GridSample>(params[0], params[1], attributes);
|
||||
|
||||
ngraph::ResultVector results;
|
||||
ov::ResultVector results;
|
||||
for (size_t i = 0; i < gridSampleNode->get_output_size(); i++) {
|
||||
results.push_back(std::make_shared<ngraph::opset1::Result>(gridSampleNode->output(i)));
|
||||
results.push_back(std::make_shared<ov::op::v0::Result>(gridSampleNode->output(i)));
|
||||
}
|
||||
|
||||
function = std::make_shared<ngraph::Function>(results, params, "GridSampleGPU");
|
||||
function = std::make_shared<ov::Model>(results, params, "GridSampleGPU");
|
||||
}
|
||||
|
||||
void generate_inputs(const std::vector<ov::Shape>& targetInputStaticShapes) override {
|
||||
@ -104,12 +103,12 @@ protected:
|
||||
|
||||
if (funcInput.get_node()->get_friendly_name() == "data") {
|
||||
int32_t range = std::accumulate(targetInputStaticShapes[0].begin(), targetInputStaticShapes[0].end(), 1u, std::multiplies<uint32_t>());
|
||||
tensor = utils::create_and_fill_tensor(
|
||||
tensor = ov::test::utils::create_and_fill_tensor(
|
||||
funcInput.get_element_type(), targetInputStaticShapes[0], range, -range / 2, 1);
|
||||
} else if (funcInput.get_node()->get_friendly_name() == "grid") {
|
||||
int32_t range = std::max(targetInputStaticShapes[0][2], targetInputStaticShapes[0][3]) + 2;
|
||||
int32_t resolution = range / 2;
|
||||
tensor = utils::create_and_fill_tensor(
|
||||
tensor = ov::test::utils::create_and_fill_tensor(
|
||||
funcInput.get_element_type(), targetInputStaticShapes[1], range, -1, resolution == 0 ? 1 : resolution);
|
||||
}
|
||||
inputs.insert({funcInput.get_node_shared_ptr(), tensor});
|
||||
@ -117,8 +116,7 @@ protected:
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(GridSampleLayerTestGPU, CompareWithRefs) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
TEST_P(GridSampleLayerTestGPU, Inference) {
|
||||
run();
|
||||
}
|
||||
|
||||
@ -152,8 +150,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_dynamic, GridSampleLayerTestGPU,
|
||||
::testing::ValuesIn(interpolateMode),
|
||||
::testing::ValuesIn(paddingMode),
|
||||
::testing::ValuesIn(alignCorners),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ElementType::f32)),
|
||||
::testing::Values(ov::element::f32),
|
||||
::testing::Values(ov::element::f32)),
|
||||
GridSampleLayerTestGPU::getTestCaseName);
|
||||
|
||||
} // namespace GPULayerTestsDefinitions
|
||||
} // namespace
|
||||
|
@ -2,54 +2,50 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <tuple>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <memory>
|
||||
#include "ov_models/utils/ov_helpers.hpp"
|
||||
#include "ov_models/builders.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "shared_test_classes/single_layer/group_convolution_backprop_data.hpp"
|
||||
#include "common_test_utils/test_constants.hpp"
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include "openvino/core/preprocess/pre_post_process.hpp"
|
||||
#include "common_test_utils/test_enums.hpp"
|
||||
#include "common_test_utils/node_builders/group_convolution_backprop_data.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "shared_test_classes/single_op/group_convolution_backprop_data.hpp"
|
||||
|
||||
using namespace InferenceEngine;
|
||||
using namespace ov::test;
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
#include "openvino/op/group_conv.hpp"
|
||||
|
||||
namespace GPULayerTestsDefinitions {
|
||||
|
||||
using GroupDeconvSpecParams = LayerTestsDefinitions::groupConvBackpropSpecificParams;
|
||||
namespace {
|
||||
using ov::test::InputShape;
|
||||
using ov::test::groupConvBackpropSpecificParams;
|
||||
|
||||
using DeconvInputData = std::tuple<InputShape, // data shape
|
||||
ngraph::helpers::InputLayerType, // 'output_shape' input type
|
||||
ov::test::utils::InputLayerType, // 'output_shape' input type
|
||||
std::vector<std::vector<int32_t>>>; // values for 'output_shape'
|
||||
|
||||
using GroupDeconvLayerTestParamsSet = std::tuple<GroupDeconvSpecParams,
|
||||
using GroupDeconvLayerTestParamsSet = std::tuple<groupConvBackpropSpecificParams,
|
||||
DeconvInputData,
|
||||
ElementType,
|
||||
LayerTestsUtils::TargetDevice,
|
||||
ov::element::Type,
|
||||
std::string,
|
||||
std::map<std::string, std::string>>;
|
||||
|
||||
class GroupDeconvolutionLayerGPUTest : public testing::WithParamInterface<GroupDeconvLayerTestParamsSet>,
|
||||
virtual public SubgraphBaseTest {
|
||||
virtual public ov::test::SubgraphBaseTest {
|
||||
public:
|
||||
static std::string getTestCaseName(testing::TestParamInfo<GroupDeconvLayerTestParamsSet> obj) {
|
||||
GroupDeconvSpecParams basicParamsSet;
|
||||
groupConvBackpropSpecificParams basicParamsSet;
|
||||
DeconvInputData inputData;
|
||||
ElementType prec;
|
||||
ov::element::Type prec;
|
||||
std::string targetDevice;
|
||||
std::map<std::string, std::string> additionalConfig;
|
||||
std::tie(basicParamsSet, inputData, prec, targetDevice, additionalConfig) = obj.param;
|
||||
|
||||
ngraph::op::PadType padType;
|
||||
InferenceEngine::SizeVector kernel, stride, dilation;
|
||||
ov::op::PadType padType;
|
||||
std::vector<size_t> kernel, stride, dilation;
|
||||
std::vector<ptrdiff_t> padBegin, padEnd, outPadding;
|
||||
size_t convOutChannels, groupNum;
|
||||
std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, groupNum, padType, outPadding) = basicParamsSet;
|
||||
|
||||
InputShape inputShape;
|
||||
ngraph::helpers::InputLayerType outShapeType;
|
||||
ov::test::utils::InputLayerType outShapeType;
|
||||
std::vector<std::vector<int32_t>> outShapeData;
|
||||
std::tie(inputShape, outShapeType, outShapeData) = inputData;
|
||||
|
||||
@ -89,13 +85,13 @@ public:
|
||||
return result.str();
|
||||
}
|
||||
|
||||
void generate_inputs(const std::vector<ngraph::Shape>& targetInputStaticShapes) override {
|
||||
void generate_inputs(const std::vector<ov::Shape>& targetInputStaticShapes) override {
|
||||
if (function->get_parameters().size() != 1) {
|
||||
// WA: output_shape depends on 3rd deconvolution input data
|
||||
// but the reference implementation doesn't implement shape inference
|
||||
// so we need to build a new ngraph function and replace the 3rd input parameter with a constant
|
||||
// so we need to build a new ov function and replace the 3rd input parameter with a constant
|
||||
// to get valid output shapes
|
||||
functionRefs = createGraph({targetInputStaticShapes[0]}, ngraph::helpers::InputLayerType::CONSTANT);
|
||||
functionRefs = createGraph({targetInputStaticShapes[0]}, ov::test::utils::InputLayerType::CONSTANT);
|
||||
}
|
||||
inputs.clear();
|
||||
const auto& funcInputs = function->inputs();
|
||||
@ -159,17 +155,17 @@ public:
|
||||
function = p.build();
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::Model> createGraph(const std::vector<ov::PartialShape>& inShapes, ngraph::helpers::InputLayerType outShapeType) {
|
||||
std::shared_ptr<ov::Model> createGraph(const std::vector<ov::PartialShape>& inShapes, ov::test::utils::InputLayerType outShapeType) {
|
||||
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(prec, inShapes.front())};
|
||||
std::shared_ptr<ov::Node> outShapeNode;
|
||||
if (!outShapeData.empty()) {
|
||||
if (outShapeType == ngraph::helpers::InputLayerType::PARAMETER) {
|
||||
if (outShapeType == ov::test::utils::InputLayerType::PARAMETER) {
|
||||
IE_ASSERT(inputDynamicShapes.size() == 2);
|
||||
auto outShapeParam = std::make_shared<ngraph::opset8::Parameter>(ngraph::element::i32, inputDynamicShapes.back());
|
||||
auto outShapeParam = std::make_shared<ov::op::v0::Parameter>(ov::element::i32, inputDynamicShapes.back());
|
||||
params.push_back(outShapeParam);
|
||||
outShapeNode = outShapeParam;
|
||||
} else {
|
||||
outShapeNode = ngraph::opset8::Constant::create(ngraph::element::i32, {outShapeData[inferRequestNum].size()}, outShapeData[inferRequestNum]);
|
||||
outShapeNode = ov::op::v0::Constant::create(ov::element::i32, {outShapeData[inferRequestNum].size()}, outShapeData[inferRequestNum]);
|
||||
}
|
||||
}
|
||||
|
||||
@ -180,36 +176,36 @@ public:
|
||||
std::shared_ptr<ov::Node> deconv;
|
||||
if (!outShapeData.empty()) {
|
||||
IE_ASSERT(outShapeNode != nullptr);
|
||||
deconv = ngraph::builder::makeGroupConvolutionBackpropData(params[0], outShapeNode, prec, kernel, stride, padBegin,
|
||||
padEnd, dilation, padType, convOutChannels, groupNum);
|
||||
deconv = ov::test::utils::make_group_convolution_backprop_data(params[0], outShapeNode, prec, kernel, stride, padBegin,
|
||||
padEnd, dilation, padType, convOutChannels, groupNum);
|
||||
} else {
|
||||
deconv = ngraph::builder::makeGroupConvolutionBackpropData(params[0], prec, kernel, stride, padBegin,
|
||||
padEnd, dilation, padType, convOutChannels, groupNum, false, outPadding);
|
||||
deconv = ov::test::utils::make_group_convolution_backprop_data(params[0], prec, kernel, stride, padBegin,
|
||||
padEnd, dilation, padType, convOutChannels, groupNum, false, outPadding);
|
||||
}
|
||||
|
||||
ngraph::ResultVector results;
|
||||
ov::ResultVector results;
|
||||
for (size_t i = 0; i < deconv->get_output_size(); i++)
|
||||
results.push_back(std::make_shared<ngraph::opset1::Result>(deconv->output(i)));
|
||||
results.push_back(std::make_shared<ov::op::v0::Result>(deconv->output(i)));
|
||||
|
||||
return std::make_shared<ngraph::Function>(results, params, "GroupDeconv");
|
||||
return std::make_shared<ov::Model>(results, params, "GroupDeconv");
|
||||
}
|
||||
|
||||
protected:
|
||||
void SetUp() override {
|
||||
GroupDeconvSpecParams basicParamsSet;
|
||||
groupConvBackpropSpecificParams basicParamsSet;
|
||||
DeconvInputData inputData;
|
||||
std::map<std::string, std::string> additionalConfig;
|
||||
std::tie(basicParamsSet, inputData, prec, targetDevice, additionalConfig) = this->GetParam();
|
||||
|
||||
InputShape inputShape;
|
||||
ngraph::helpers::InputLayerType outShapeType;
|
||||
ov::test::utils::InputLayerType outShapeType;
|
||||
std::tie(inputShape, outShapeType, outShapeData) = inputData;
|
||||
|
||||
std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, groupNum, padType, outPadding) = basicParamsSet;
|
||||
|
||||
std::vector<InputShape> paramsShapes;
|
||||
paramsShapes.push_back(inputShape);
|
||||
if (!outShapeData.empty() && outShapeType == ngraph::helpers::InputLayerType::PARAMETER) {
|
||||
if (!outShapeData.empty() && outShapeType == ov::test::utils::InputLayerType::PARAMETER) {
|
||||
const auto outShapeDims = ov::Shape{outShapeData.front().size()};
|
||||
paramsShapes.push_back(InputShape{outShapeDims, std::vector<ov::Shape>(inputShape.second.size(), outShapeDims)});
|
||||
}
|
||||
@ -220,38 +216,34 @@ protected:
|
||||
}
|
||||
|
||||
private:
|
||||
ElementType prec;
|
||||
ngraph::op::PadType padType;
|
||||
InferenceEngine::SizeVector kernel, stride, dilation;
|
||||
ov::element::Type prec;
|
||||
ov::op::PadType padType;
|
||||
std::vector<size_t> kernel, stride, dilation;
|
||||
std::vector<ptrdiff_t> padBegin, padEnd, outPadding;
|
||||
size_t convOutChannels, groupNum;
|
||||
std::vector<std::vector<int32_t>> outShapeData;
|
||||
size_t inferRequestNum = 0;
|
||||
};
|
||||
|
||||
TEST_P(GroupDeconvolutionLayerGPUTest, CompareWithRefs) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
|
||||
TEST_P(GroupDeconvolutionLayerGPUTest, Inference) {
|
||||
run();
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
std::map<std::string, std::string> emptyAdditionalConfig;
|
||||
|
||||
const std::vector<std::vector<size_t >> emptyOutputShape = {{}};
|
||||
const std::vector<std::vector<ptrdiff_t>> emptyOutputPadding = {{}};
|
||||
|
||||
/* ============= GroupConvolution params ============= */
|
||||
const InferenceEngine::SizeVector numOutChannels = {6};
|
||||
const InferenceEngine::SizeVector numGroups = {2, 3};
|
||||
const std::vector<size_t> numOutChannels = {6};
|
||||
const std::vector<size_t> numGroups = {2, 3};
|
||||
|
||||
/* ============= GroupConvolution params (2D) ============= */
|
||||
const std::vector<InferenceEngine::SizeVector> kernels2d = {{3, 3}, {1, 1}};
|
||||
const std::vector<InferenceEngine::SizeVector> strides2d = {{1, 1}, {2, 2}};
|
||||
const std::vector<std::vector<size_t>> kernels2d = {{3, 3}, {1, 1}};
|
||||
const std::vector<std::vector<size_t>> strides2d = {{1, 1}, {2, 2}};
|
||||
const std::vector<std::vector<ptrdiff_t>> padBegins2d = {{0, 0}};
|
||||
const std::vector<std::vector<ptrdiff_t>> padEnds2d = {{0, 0}};
|
||||
const std::vector<InferenceEngine::SizeVector> dilations2d = {{1, 1}};
|
||||
const std::vector<std::vector<size_t>> dilations2d = {{1, 1}};
|
||||
|
||||
/* ============= GroupConvolution (2D) ============= */
|
||||
const auto groupConvParams_ExplicitPadding_2D = ::testing::Combine(
|
||||
@ -262,29 +254,29 @@ const auto groupConvParams_ExplicitPadding_2D = ::testing::Combine(
|
||||
::testing::ValuesIn(dilations2d),
|
||||
::testing::ValuesIn(numOutChannels),
|
||||
::testing::ValuesIn(numGroups),
|
||||
::testing::Values(ngraph::op::PadType::EXPLICIT),
|
||||
::testing::Values(ov::op::PadType::EXPLICIT),
|
||||
::testing::ValuesIn(emptyOutputPadding)
|
||||
);
|
||||
|
||||
const std::vector<DeconvInputData> dyn_2D_inputs_smoke = {
|
||||
DeconvInputData{
|
||||
InputShape{{-1, 12, -1, -1}, {{1, 12, 7, 7}, {2, 12, 5, 7}, {1, 12, 7, 7}}},
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
ov::test::utils::InputLayerType::CONSTANT,
|
||||
{}
|
||||
},
|
||||
DeconvInputData{
|
||||
InputShape{{-1, 12, -1, -1}, {{2, 12, 7, 7}, {2, 12, 5, 7}, {1, 12, 9, 4}}},
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
ov::test::utils::InputLayerType::CONSTANT,
|
||||
{}
|
||||
},
|
||||
DeconvInputData{
|
||||
InputShape{{-1, 12, -1, -1}, {{2, 12, 7, 7}, {2, 12, 5, 7}, {1, 12, 9, 4}, {2, 12, 5, 7}}},
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
ov::test::utils::InputLayerType::CONSTANT,
|
||||
{}
|
||||
},
|
||||
DeconvInputData{
|
||||
InputShape{{{1, 10}, 12, 7, 7}, {{1, 12, 7, 7}, {3, 12, 7, 7}, {2, 12, 7, 7}}},
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
ov::test::utils::InputLayerType::CONSTANT,
|
||||
{}
|
||||
}
|
||||
};
|
||||
@ -293,7 +285,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_GroupDeconv_2D_Dynamic_FP32, GroupDeconvolutionLa
|
||||
::testing::Combine(
|
||||
groupConvParams_ExplicitPadding_2D,
|
||||
::testing::ValuesIn(dyn_2D_inputs_smoke),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ov::element::f32),
|
||||
::testing::Values(ov::test::utils::DEVICE_GPU),
|
||||
::testing::Values(emptyAdditionalConfig)),
|
||||
GroupDeconvolutionLayerGPUTest::getTestCaseName);
|
||||
@ -301,17 +293,17 @@ INSTANTIATE_TEST_SUITE_P(smoke_GroupDeconv_2D_Dynamic_FP32, GroupDeconvolutionLa
|
||||
const std::vector<DeconvInputData> dyn_2D_inputs_with_output_shape = {
|
||||
DeconvInputData{
|
||||
InputShape{{-1, 12, -1, -1}, {{1, 12, 7, 7}, {2, 12, 5, 7}, {1, 12, 7, 7}}},
|
||||
ngraph::helpers::InputLayerType::PARAMETER,
|
||||
ov::test::utils::InputLayerType::PARAMETER,
|
||||
{{15, 15}, {9, 10}, {15, 15}}
|
||||
},
|
||||
DeconvInputData{
|
||||
InputShape{{-1, 12, -1, -1}, {{2, 12, 7, 7}, {2, 12, 5, 7}, {1, 12, 9, 4}, {2, 12, 5, 7}}},
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
ov::test::utils::InputLayerType::CONSTANT,
|
||||
{{15, 15}}
|
||||
},
|
||||
DeconvInputData{
|
||||
InputShape{{{1, 10}, 12, 7, 7}, {{1, 12, 7, 7}, {3, 12, 7, 7}, {2, 12, 7, 7}}},
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
ov::test::utils::InputLayerType::CONSTANT,
|
||||
{{15, 15}}
|
||||
}
|
||||
};
|
||||
@ -319,21 +311,19 @@ const std::vector<DeconvInputData> dyn_2D_inputs_with_output_shape = {
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_GroupDeconv_2D_Dynamic_OutputShape_FP32, GroupDeconvolutionLayerGPUTest,
|
||||
::testing::Combine(
|
||||
::testing::Combine(
|
||||
::testing::Values(SizeVector{3, 3}),
|
||||
::testing::Values(std::vector<size_t>{3, 3}),
|
||||
::testing::ValuesIn(strides2d),
|
||||
::testing::ValuesIn(padBegins2d),
|
||||
::testing::ValuesIn(padEnds2d),
|
||||
::testing::ValuesIn(dilations2d),
|
||||
::testing::ValuesIn(numOutChannels),
|
||||
::testing::ValuesIn(numGroups),
|
||||
::testing::Values(ngraph::op::PadType::EXPLICIT),
|
||||
::testing::Values(ov::op::PadType::EXPLICIT),
|
||||
::testing::ValuesIn(emptyOutputPadding)),
|
||||
::testing::ValuesIn(dyn_2D_inputs_with_output_shape),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ov::element::f32),
|
||||
::testing::Values(ov::test::utils::DEVICE_GPU),
|
||||
::testing::Values(emptyAdditionalConfig)),
|
||||
GroupDeconvolutionLayerGPUTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
|
||||
} // namespace GPULayerTestsDefinitions
|
||||
|
@ -1,46 +1,39 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
#include <tuple>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <memory>
|
||||
#include "ov_models/utils/ov_helpers.hpp"
|
||||
#include "ov_models/builders.hpp"
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include "common_test_utils/node_builders/group_convolution.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "shared_test_classes/single_layer/group_convolution.hpp"
|
||||
#include "common_test_utils/test_constants.hpp"
|
||||
#include "shared_test_classes/single_op/group_convolution.hpp"
|
||||
|
||||
// using namespace LayerTestsDefinitions;
|
||||
using namespace InferenceEngine;
|
||||
using namespace ov::test;
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
#include "openvino/op/group_conv.hpp"
|
||||
|
||||
namespace GPULayerTestsDefinitions {
|
||||
namespace {
|
||||
using ov::test::InputShape;
|
||||
using ov::test::groupConvSpecificParams;
|
||||
|
||||
using LayerTestsDefinitions::groupConvSpecificParams;
|
||||
typedef std::tuple<
|
||||
groupConvSpecificParams,
|
||||
ElementType, // Net precision
|
||||
ElementType, // Input precision
|
||||
ElementType, // Output precision
|
||||
InputShape, // Input shape
|
||||
LayerTestsUtils::TargetDevice // Device name
|
||||
ov::element::Type, // Model type
|
||||
InputShape, // Input shape
|
||||
std::string // Device name
|
||||
> groupConvLayerTestParamsSet;
|
||||
|
||||
|
||||
class GroupConvolutionLayerGPUTestDynamic : public testing::WithParamInterface<groupConvLayerTestParamsSet>,
|
||||
virtual public SubgraphBaseTest {
|
||||
virtual public ov::test::SubgraphBaseTest {
|
||||
public:
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<groupConvLayerTestParamsSet>& obj) {
|
||||
groupConvSpecificParams groupConvParams;
|
||||
ElementType netType;
|
||||
ElementType inType, outType;
|
||||
ov::element::Type model_type;
|
||||
InputShape inputShape;
|
||||
std::string targetDevice;
|
||||
std::tie(groupConvParams, netType, inType, outType, inputShape, targetDevice) = obj.param;
|
||||
std::tie(groupConvParams, model_type, inputShape, targetDevice) = obj.param;
|
||||
|
||||
ngraph::op::PadType padType;
|
||||
InferenceEngine::SizeVector kernel, stride, dilation;
|
||||
ov::op::PadType padType;
|
||||
std::vector<size_t> kernel, stride, dilation;
|
||||
std::vector<ptrdiff_t> padBegin, padEnd;
|
||||
size_t convOutChannels;
|
||||
size_t numGroups;
|
||||
@ -62,9 +55,7 @@ public:
|
||||
result << "O=" << convOutChannels << "_";
|
||||
result << "G=" << numGroups << "_";
|
||||
result << "AP=" << padType << "_";
|
||||
result << "netPRC=" << netType << "_";
|
||||
result << "inPRC=" << inType << "_";
|
||||
result << "outPRC=" << outType << "_";
|
||||
result << "netPRC=" << model_type << "_";
|
||||
result << "trgDev=" << targetDevice;
|
||||
|
||||
return result.str();
|
||||
@ -74,13 +65,13 @@ protected:
|
||||
void SetUp() override {
|
||||
groupConvSpecificParams groupConvParams;
|
||||
InputShape inputShape;
|
||||
auto netType = ElementType::undefined;
|
||||
std::tie(groupConvParams, netType, inType, outType, inputShape, targetDevice) = this->GetParam();
|
||||
auto model_type = ov::element::undefined;
|
||||
std::tie(groupConvParams, model_type, inputShape, targetDevice) = this->GetParam();
|
||||
|
||||
init_input_shapes({inputShape});
|
||||
|
||||
ngraph::op::PadType padType;
|
||||
InferenceEngine::SizeVector kernel, stride, dilation;
|
||||
ov::op::PadType padType;
|
||||
std::vector<size_t> kernel, stride, dilation;
|
||||
std::vector<ptrdiff_t> padBegin, padEnd;
|
||||
size_t convOutChannels;
|
||||
size_t numGroups;
|
||||
@ -88,25 +79,23 @@ protected:
|
||||
|
||||
ov::ParameterVector inputParams;
|
||||
for (auto&& shape : inputDynamicShapes)
|
||||
inputParams.push_back(std::make_shared<ov::op::v0::Parameter>(inType, shape));
|
||||
inputParams.push_back(std::make_shared<ov::op::v0::Parameter>(model_type, shape));
|
||||
|
||||
auto groupConvolutionNode = ngraph::builder::makeGroupConvolution(inputParams.front(), netType, kernel, stride, padBegin,
|
||||
padEnd, dilation, padType, convOutChannels, numGroups);
|
||||
auto groupConvolutionNode = ov::test::utils::make_group_convolution(inputParams.front(), model_type, kernel, stride, padBegin,
|
||||
padEnd, dilation, padType, convOutChannels, numGroups);
|
||||
|
||||
ngraph::ResultVector results;
|
||||
ov::ResultVector results;
|
||||
for (size_t i = 0; i < groupConvolutionNode->get_output_size(); i++)
|
||||
results.push_back(std::make_shared<ngraph::opset1::Result>(groupConvolutionNode->output(i)));
|
||||
results.push_back(std::make_shared<ov::op::v0::Result>(groupConvolutionNode->output(i)));
|
||||
|
||||
function = std::make_shared<ngraph::Function>(results, inputParams, "GroupConvolution");
|
||||
function = std::make_shared<ov::Model>(results, inputParams, "GroupConvolution");
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(GroupConvolutionLayerGPUTestDynamic, CompareWithRefs) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
TEST_P(GroupConvolutionLayerGPUTestDynamic, Inference) {
|
||||
run();
|
||||
}
|
||||
|
||||
namespace {
|
||||
const std::vector<ov::test::InputShape> dynInputShapes1D = {
|
||||
{
|
||||
{1, 12, ov::Dimension::dynamic()},
|
||||
@ -116,17 +105,15 @@ const std::vector<ov::test::InputShape> dynInputShapes1D = {
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_DwGroupConvolutionLayerGPUTest_dynamic1DSymPad, GroupConvolutionLayerGPUTestDynamic,
|
||||
::testing::Combine(
|
||||
::testing::Combine(
|
||||
::testing::Values(SizeVector{3}),
|
||||
::testing::Values(SizeVector{1}),
|
||||
::testing::Values(std::vector<size_t>{3}),
|
||||
::testing::Values(std::vector<size_t>{1}),
|
||||
::testing::Values(std::vector<ptrdiff_t>{0}),
|
||||
::testing::Values(std::vector<ptrdiff_t>{0}),
|
||||
::testing::Values(SizeVector{1}),
|
||||
::testing::Values(std::vector<size_t>{1}),
|
||||
::testing::Values(12),
|
||||
::testing::Values(12),
|
||||
::testing::ValuesIn({ngraph::op::PadType::EXPLICIT, ngraph::op::PadType::VALID})),
|
||||
::testing::Values(ElementType::f16),
|
||||
::testing::Values(ElementType::f16),
|
||||
::testing::Values(ElementType::undefined),
|
||||
::testing::ValuesIn({ov::op::PadType::EXPLICIT, ov::op::PadType::VALID})),
|
||||
::testing::Values(ov::element::f16),
|
||||
::testing::ValuesIn(dynInputShapes1D),
|
||||
::testing::Values<std::string>(ov::test::utils::DEVICE_GPU)),
|
||||
GroupConvolutionLayerGPUTestDynamic::getTestCaseName);
|
||||
@ -135,17 +122,15 @@ INSTANTIATE_TEST_SUITE_P(smoke_DwGroupConvolutionLayerGPUTest_dynamic1DSymPad, G
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolutionLayerGPUTest_dynamic1DSymPad_Disabled, GroupConvolutionLayerGPUTestDynamic,
|
||||
::testing::Combine(
|
||||
::testing::Combine(
|
||||
::testing::Values(SizeVector{3}),
|
||||
::testing::Values(SizeVector{1}),
|
||||
::testing::Values(std::vector<size_t>{3}),
|
||||
::testing::Values(std::vector<size_t>{1}),
|
||||
::testing::Values(std::vector<ptrdiff_t>{0}),
|
||||
::testing::Values(std::vector<ptrdiff_t>{0}),
|
||||
::testing::Values(SizeVector{1}),
|
||||
::testing::Values(std::vector<size_t>{1}),
|
||||
::testing::Values(4),
|
||||
::testing::Values(4),
|
||||
::testing::ValuesIn({ngraph::op::PadType::EXPLICIT, ngraph::op::PadType::VALID})),
|
||||
::testing::Values(ElementType::f16),
|
||||
::testing::Values(ElementType::f16),
|
||||
::testing::Values(ElementType::undefined),
|
||||
::testing::ValuesIn({ov::op::PadType::EXPLICIT, ov::op::PadType::VALID})),
|
||||
::testing::Values(ov::element::f16),
|
||||
::testing::ValuesIn(dynInputShapes1D),
|
||||
::testing::Values<std::string>(ov::test::utils::DEVICE_GPU)),
|
||||
GroupConvolutionLayerGPUTestDynamic::getTestCaseName);
|
||||
@ -161,17 +146,15 @@ const std::vector<ov::test::InputShape> dynInputShapes2D = {
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolutionLayerGPUTest_dynamic2DSymPad, GroupConvolutionLayerGPUTestDynamic,
|
||||
::testing::Combine(
|
||||
::testing::Combine(
|
||||
::testing::Values(SizeVector{3, 3}),
|
||||
::testing::Values(SizeVector{1, 1}),
|
||||
::testing::Values(std::vector<size_t>{3, 3}),
|
||||
::testing::Values(std::vector<size_t>{1, 1}),
|
||||
::testing::Values(std::vector<ptrdiff_t>{1, 2}),
|
||||
::testing::Values(std::vector<ptrdiff_t>{1, 2}),
|
||||
::testing::Values(SizeVector{1, 1}),
|
||||
::testing::Values(std::vector<size_t>{1, 1}),
|
||||
::testing::Values(4),
|
||||
::testing::Values(4),
|
||||
::testing::ValuesIn({ngraph::op::PadType::EXPLICIT, ngraph::op::PadType::VALID})),
|
||||
::testing::Values(ElementType::f16),
|
||||
::testing::Values(ElementType::f16),
|
||||
::testing::Values(ElementType::undefined),
|
||||
::testing::ValuesIn({ov::op::PadType::EXPLICIT, ov::op::PadType::VALID})),
|
||||
::testing::Values(ov::element::f16),
|
||||
::testing::ValuesIn(dynInputShapes2D),
|
||||
::testing::Values<std::string>(ov::test::utils::DEVICE_GPU)),
|
||||
GroupConvolutionLayerGPUTestDynamic::getTestCaseName);
|
||||
@ -179,17 +162,15 @@ INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolutionLayerGPUTest_dynamic2DSymPad, Gro
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolutionLayerGPUTest_dynamic2D_AsymPad, GroupConvolutionLayerGPUTestDynamic,
|
||||
::testing::Combine(
|
||||
::testing::Combine(
|
||||
::testing::Values(SizeVector{3, 3}),
|
||||
::testing::Values(SizeVector{1, 1}),
|
||||
::testing::Values(std::vector<size_t>{3, 3}),
|
||||
::testing::Values(std::vector<size_t>{1, 1}),
|
||||
::testing::Values(std::vector<ptrdiff_t>{1, 2}),
|
||||
::testing::Values(std::vector<ptrdiff_t>{2, 1}),
|
||||
::testing::Values(SizeVector{1, 1}),
|
||||
::testing::Values(std::vector<size_t>{1, 1}),
|
||||
::testing::Values(4),
|
||||
::testing::Values(4),
|
||||
::testing::ValuesIn({ngraph::op::PadType::EXPLICIT, ngraph::op::PadType::VALID})),
|
||||
::testing::Values(ElementType::f16),
|
||||
::testing::Values(ElementType::f16),
|
||||
::testing::Values(ElementType::undefined),
|
||||
::testing::ValuesIn({ov::op::PadType::EXPLICIT, ov::op::PadType::VALID})),
|
||||
::testing::Values(ov::element::f16),
|
||||
::testing::ValuesIn(dynInputShapes2D),
|
||||
::testing::Values<std::string>(ov::test::utils::DEVICE_GPU)),
|
||||
GroupConvolutionLayerGPUTestDynamic::getTestCaseName);
|
||||
@ -197,17 +178,15 @@ INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolutionLayerGPUTest_dynamic2D_AsymPad, G
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolutionLayerGPUTest_dynamic2D_SymAutoPad, GroupConvolutionLayerGPUTestDynamic,
|
||||
::testing::Combine(
|
||||
::testing::Combine(
|
||||
::testing::Values(SizeVector{3, 3}),
|
||||
::testing::Values(SizeVector{1, 1}),
|
||||
::testing::Values(std::vector<size_t>{3, 3}),
|
||||
::testing::Values(std::vector<size_t>{1, 1}),
|
||||
::testing::Values(std::vector<ptrdiff_t>{1, 2}),
|
||||
::testing::Values(std::vector<ptrdiff_t>{1, 2}),
|
||||
::testing::Values(SizeVector{1, 1}),
|
||||
::testing::Values(std::vector<size_t>{1, 1}),
|
||||
::testing::Values(4),
|
||||
::testing::Values(4),
|
||||
::testing::ValuesIn({ngraph::op::PadType::SAME_LOWER, ngraph::op::PadType::SAME_UPPER})),
|
||||
::testing::Values(ElementType::f16),
|
||||
::testing::Values(ElementType::f16),
|
||||
::testing::Values(ElementType::undefined),
|
||||
::testing::ValuesIn({ov::op::PadType::SAME_LOWER, ov::op::PadType::SAME_UPPER})),
|
||||
::testing::Values(ov::element::f16),
|
||||
::testing::ValuesIn(dynInputShapes2D),
|
||||
::testing::Values<std::string>(ov::test::utils::DEVICE_GPU)),
|
||||
GroupConvolutionLayerGPUTestDynamic::getTestCaseName);
|
||||
@ -215,20 +194,16 @@ INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolutionLayerGPUTest_dynamic2D_SymAutoPad
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolutionLayerGPUTest_dynamic2D_AsymAutoPad, GroupConvolutionLayerGPUTestDynamic,
|
||||
::testing::Combine(
|
||||
::testing::Combine(
|
||||
::testing::Values(SizeVector{3, 3}),
|
||||
::testing::Values(SizeVector{1, 1}),
|
||||
::testing::Values(std::vector<size_t>{3, 3}),
|
||||
::testing::Values(std::vector<size_t>{1, 1}),
|
||||
::testing::Values(std::vector<ptrdiff_t>{1, 2}),
|
||||
::testing::Values(std::vector<ptrdiff_t>{2, 1}),
|
||||
::testing::Values(SizeVector{1, 1}),
|
||||
::testing::Values(std::vector<size_t>{1, 1}),
|
||||
::testing::Values(4),
|
||||
::testing::Values(4),
|
||||
::testing::ValuesIn({ngraph::op::PadType::SAME_LOWER, ngraph::op::PadType::SAME_UPPER})),
|
||||
::testing::Values(ElementType::f16),
|
||||
::testing::Values(ElementType::f16),
|
||||
::testing::Values(ElementType::undefined),
|
||||
::testing::ValuesIn({ov::op::PadType::SAME_LOWER, ov::op::PadType::SAME_UPPER})),
|
||||
::testing::Values(ov::element::f16),
|
||||
::testing::ValuesIn(dynInputShapes2D),
|
||||
::testing::Values<std::string>(ov::test::utils::DEVICE_GPU)),
|
||||
GroupConvolutionLayerGPUTestDynamic::getTestCaseName);
|
||||
} // namespace
|
||||
|
||||
} // namespace GPULayerTestsDefinitions
|
||||
|
@ -2,67 +2,69 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "shared_test_classes/single_layer/interpolate.hpp"
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include "common_test_utils/test_enums.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "ov_models/builders.hpp"
|
||||
#include <common_test_utils/ov_tensor_utils.hpp>
|
||||
#include "openvino/core/preprocess/pre_post_process.hpp"
|
||||
|
||||
using namespace ov::test;
|
||||
using ngraph::helpers::operator<<;
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
#include "openvino/op/interpolate.hpp"
|
||||
|
||||
namespace GPULayerTestsDefinitions {
|
||||
namespace {
|
||||
using ov::test::InputShape;
|
||||
|
||||
using InterpolateSpecificParams = std::tuple<ngraph::op::v4::Interpolate::InterpolateMode, // InterpolateMode
|
||||
ngraph::op::v4::Interpolate::CoordinateTransformMode, // CoordinateTransformMode
|
||||
ngraph::op::v4::Interpolate::NearestMode, // NearestMode
|
||||
using InterpolateSpecificParams = std::tuple<ov::op::v4::Interpolate::InterpolateMode, // InterpolateMode
|
||||
ov::op::v4::Interpolate::CoordinateTransformMode, // CoordinateTransformMode
|
||||
ov::op::v4::Interpolate::NearestMode, // NearestMode
|
||||
bool, // AntiAlias
|
||||
std::vector<size_t>, // PadBegin
|
||||
std::vector<size_t>, // PadEnd
|
||||
double>; // Cube coef
|
||||
|
||||
using ShapeParams = std::tuple<ngraph::op::v4::Interpolate::ShapeCalcMode, // ShapeCalculationMode
|
||||
using ShapeParams = std::tuple<ov::op::v4::Interpolate::ShapeCalcMode, // ShapeCalculationMode
|
||||
InputShape, // Input shapes
|
||||
// params describing input, choice of which depends on ShapeCalcMode
|
||||
ngraph::helpers::InputLayerType, // sizes input type
|
||||
ngraph::helpers::InputLayerType, // scales input type
|
||||
ov::test::utils::InputLayerType, // sizes input type
|
||||
ov::test::utils::InputLayerType, // scales input type
|
||||
std::vector<std::vector<float>>, // scales or sizes values
|
||||
std::vector<int64_t>>; // axes
|
||||
|
||||
using InterpolateLayerGPUTestParamsSet = std::tuple<InterpolateSpecificParams,
|
||||
ShapeParams,
|
||||
ElementType,
|
||||
ov::element::Type,
|
||||
bool>; // use Interpolate_v11
|
||||
|
||||
class InterpolateLayerGPUTest : public testing::WithParamInterface<InterpolateLayerGPUTestParamsSet>,
|
||||
virtual public SubgraphBaseTest {
|
||||
virtual public ov::test::SubgraphBaseTest {
|
||||
public:
|
||||
static std::string getTestCaseName(testing::TestParamInfo<InterpolateLayerGPUTestParamsSet> obj) {
|
||||
InterpolateSpecificParams specificParams;
|
||||
ShapeParams shapeParams;
|
||||
ElementType prec;
|
||||
ov::element::Type prec;
|
||||
bool useInterpolateV11;
|
||||
std::map<std::string, std::string> additionalConfig;
|
||||
std::tie(specificParams, shapeParams, prec, useInterpolateV11) = obj.param;
|
||||
|
||||
ngraph::op::v4::Interpolate::InterpolateMode mode;
|
||||
ngraph::op::v4::Interpolate::CoordinateTransformMode transfMode;
|
||||
ngraph::op::v4::Interpolate::NearestMode nearMode;
|
||||
ov::op::v4::Interpolate::InterpolateMode mode;
|
||||
ov::op::v4::Interpolate::CoordinateTransformMode transfMode;
|
||||
ov::op::v4::Interpolate::NearestMode nearMode;
|
||||
bool antiAlias;
|
||||
std::vector<size_t> padBegin;
|
||||
std::vector<size_t> padEnd;
|
||||
double cubeCoef;
|
||||
std::tie(mode, transfMode, nearMode, antiAlias, padBegin, padEnd, cubeCoef) = specificParams;
|
||||
|
||||
ngraph::op::v4::Interpolate::ShapeCalcMode shapeCalcMode;
|
||||
ov::op::v4::Interpolate::ShapeCalcMode shapeCalcMode;
|
||||
InputShape inputShapes;
|
||||
ngraph::helpers::InputLayerType sizesInputType;
|
||||
ngraph::helpers::InputLayerType scalesInputType;
|
||||
ov::test::utils::InputLayerType sizesInputType;
|
||||
ov::test::utils::InputLayerType scalesInputType;
|
||||
std::vector<std::vector<float>> shapeDataForInput;
|
||||
std::vector<int64_t> axes;
|
||||
std::tie(shapeCalcMode, inputShapes, sizesInputType, scalesInputType, shapeDataForInput, axes) = shapeParams;
|
||||
|
||||
std::ostringstream result;
|
||||
using ov::operator<<;
|
||||
result << "ShapeCalcMode=" << shapeCalcMode << "_";
|
||||
result << "IS=";
|
||||
result << ov::test::utils::partialShape2str({inputShapes.first}) << "_";
|
||||
@ -70,7 +72,7 @@ public:
|
||||
for (const auto& shape : inputShapes.second) {
|
||||
result << ov::test::utils::vec2str(shape) << "_";
|
||||
}
|
||||
if (shapeCalcMode == ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES) {
|
||||
if (shapeCalcMode == ov::op::v4::Interpolate::ShapeCalcMode::SCALES) {
|
||||
result << "Scales=";
|
||||
} else {
|
||||
result << "Sizes=";
|
||||
@ -101,7 +103,7 @@ public:
|
||||
return result.str();
|
||||
}
|
||||
|
||||
void generate_inputs(const std::vector<ngraph::Shape>& targetInputStaticShapes) override {
|
||||
void generate_inputs(const std::vector<ov::Shape>& targetInputStaticShapes) override {
|
||||
inputs.clear();
|
||||
const auto& funcInputs = function->inputs();
|
||||
for (size_t i = 0; i < funcInputs.size(); ++i) {
|
||||
@ -111,7 +113,7 @@ public:
|
||||
if (i == 0) {
|
||||
tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], 2560, 0, 256);
|
||||
} else if (i == 1) {
|
||||
if (shapeCalcMode == ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES || funcInputs.size() == 3) {
|
||||
if (shapeCalcMode == ov::op::v4::Interpolate::ShapeCalcMode::SIZES || funcInputs.size() == 3) {
|
||||
tensor = ov::Tensor(funcInput.get_element_type(), targetInputStaticShapes[i], sizes[inferRequestNum].data());
|
||||
} else {
|
||||
tensor = ov::Tensor(funcInput.get_element_type(), targetInputStaticShapes[i], scales[inferRequestNum].data());
|
||||
@ -152,7 +154,7 @@ public:
|
||||
protected:
|
||||
std::vector<std::vector<float>> scales;
|
||||
std::vector<std::vector<int32_t>> sizes;
|
||||
ngraph::op::v4::Interpolate::ShapeCalcMode shapeCalcMode;
|
||||
ov::op::v4::Interpolate::ShapeCalcMode shapeCalcMode;
|
||||
size_t inferRequestNum = 0;
|
||||
|
||||
void SetUp() override {
|
||||
@ -160,13 +162,13 @@ protected:
|
||||
|
||||
InterpolateSpecificParams specificParams;
|
||||
ShapeParams shapeParams;
|
||||
ElementType ngPrc;
|
||||
ov::element::Type ngPrc;
|
||||
bool useInterpolateV11;
|
||||
std::tie(specificParams, shapeParams, ngPrc, useInterpolateV11) = this->GetParam();
|
||||
|
||||
ngraph::op::v4::Interpolate::InterpolateMode mode;
|
||||
ngraph::op::v4::Interpolate::CoordinateTransformMode transfMode;
|
||||
ngraph::op::v4::Interpolate::NearestMode nearMode;
|
||||
ov::op::v4::Interpolate::InterpolateMode mode;
|
||||
ov::op::v4::Interpolate::CoordinateTransformMode transfMode;
|
||||
ov::op::v4::Interpolate::NearestMode nearMode;
|
||||
bool antiAlias;
|
||||
std::vector<size_t> padBegin;
|
||||
std::vector<size_t> padEnd;
|
||||
@ -174,13 +176,13 @@ protected:
|
||||
std::tie(mode, transfMode, nearMode, antiAlias, padBegin, padEnd, cubeCoef) = specificParams;
|
||||
|
||||
InputShape dataShape;
|
||||
ngraph::helpers::InputLayerType sizesInputType;
|
||||
ngraph::helpers::InputLayerType scalesInputType;
|
||||
ov::test::utils::InputLayerType sizesInputType;
|
||||
ov::test::utils::InputLayerType scalesInputType;
|
||||
std::vector<std::vector<float>> shapeDataForInput;
|
||||
std::vector<int64_t> axes;
|
||||
std::tie(shapeCalcMode, dataShape, sizesInputType, scalesInputType, shapeDataForInput, axes) = shapeParams;
|
||||
|
||||
if (shapeCalcMode == ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES) {
|
||||
if (shapeCalcMode == ov::op::v4::Interpolate::ShapeCalcMode::SCALES) {
|
||||
scales = shapeDataForInput;
|
||||
sizes.resize(scales.size(), std::vector<int32_t>(scales.front().size(), 0));
|
||||
} else {
|
||||
@ -195,10 +197,10 @@ protected:
|
||||
|
||||
std::vector<InputShape> inputShapes;
|
||||
inputShapes.push_back(dataShape);
|
||||
if (sizesInputType == ngraph::helpers::InputLayerType::PARAMETER) {
|
||||
if (sizesInputType == ov::test::utils::InputLayerType::PARAMETER) {
|
||||
inputShapes.push_back(InputShape({static_cast<int64_t>(axes.size())}, std::vector<ov::Shape>(dataShape.second.size(), {axes.size()})));
|
||||
}
|
||||
if (scalesInputType == ngraph::helpers::InputLayerType::PARAMETER) {
|
||||
if (scalesInputType == ov::test::utils::InputLayerType::PARAMETER) {
|
||||
inputShapes.push_back(InputShape({static_cast<int64_t>(axes.size())}, std::vector<ov::Shape>(dataShape.second.size(), {axes.size()})));
|
||||
}
|
||||
|
||||
@ -207,111 +209,108 @@ protected:
|
||||
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(ngPrc, inputDynamicShapes.front())};
|
||||
|
||||
std::shared_ptr<ov::Node> sizesInput, scalesInput;
|
||||
if (shapeCalcMode == ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES) {
|
||||
if (scalesInputType == ngraph::helpers::InputLayerType::PARAMETER) {
|
||||
auto paramNode = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::Type_t::f32, ov::Shape{scales.front().size()});
|
||||
if (shapeCalcMode == ov::op::v4::Interpolate::ShapeCalcMode::SCALES) {
|
||||
if (scalesInputType == ov::test::utils::InputLayerType::PARAMETER) {
|
||||
auto paramNode = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape{scales.front().size()});
|
||||
params.push_back(paramNode);
|
||||
scalesInput = paramNode;
|
||||
} else {
|
||||
scalesInput = std::make_shared<ngraph::opset3::Constant>(ngraph::element::Type_t::f32, ov::Shape{scales.front().size()}, scales.front());
|
||||
scalesInput = std::make_shared<ov::op::v0::Constant>(ov::element::f32, ov::Shape{scales.front().size()}, scales.front());
|
||||
}
|
||||
if (sizesInputType == ngraph::helpers::InputLayerType::PARAMETER) {
|
||||
auto paramNode = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::Type_t::i32, ov::Shape{sizes.front().size()});
|
||||
if (sizesInputType == ov::test::utils::InputLayerType::PARAMETER) {
|
||||
auto paramNode = std::make_shared<ov::op::v0::Parameter>(ov::element::i32, ov::Shape{sizes.front().size()});
|
||||
params.push_back(paramNode);
|
||||
sizesInput = paramNode;
|
||||
} else {
|
||||
sizesInput = std::make_shared<ngraph::opset3::Constant>(ngraph::element::Type_t::i32, ov::Shape{sizes.front().size()}, sizes.front());
|
||||
sizesInput = std::make_shared<ov::op::v0::Constant>(ov::element::i32, ov::Shape{sizes.front().size()}, sizes.front());
|
||||
}
|
||||
} else {
|
||||
if (sizesInputType == ngraph::helpers::InputLayerType::PARAMETER) {
|
||||
auto paramNode = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::Type_t::i32, ov::Shape{sizes.front().size()});
|
||||
if (sizesInputType == ov::test::utils::InputLayerType::PARAMETER) {
|
||||
auto paramNode = std::make_shared<ov::op::v0::Parameter>(ov::element::i32, ov::Shape{sizes.front().size()});
|
||||
params.push_back(paramNode);
|
||||
sizesInput = paramNode;
|
||||
} else {
|
||||
sizesInput = std::make_shared<ngraph::opset3::Constant>(ngraph::element::Type_t::i32, ov::Shape{sizes.front().size()}, sizes.front());
|
||||
sizesInput = std::make_shared<ov::op::v0::Constant>(ov::element::i32, ov::Shape{sizes.front().size()}, sizes.front());
|
||||
}
|
||||
if (scalesInputType == ngraph::helpers::InputLayerType::PARAMETER) {
|
||||
auto paramNode = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::Type_t::f32, ov::Shape{scales.front().size()});
|
||||
if (scalesInputType == ov::test::utils::InputLayerType::PARAMETER) {
|
||||
auto paramNode = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape{scales.front().size()});
|
||||
params.push_back(paramNode);
|
||||
scalesInput = paramNode;
|
||||
} else {
|
||||
scalesInput = std::make_shared<ngraph::opset3::Constant>(ngraph::element::Type_t::f32, ov::Shape{scales.front().size()}, scales.front());
|
||||
scalesInput = std::make_shared<ov::op::v0::Constant>(ov::element::f32, ov::Shape{scales.front().size()}, scales.front());
|
||||
}
|
||||
}
|
||||
|
||||
auto axesInput = std::make_shared<ngraph::opset3::Constant>(ngraph::element::Type_t::i64, ov::Shape{axes.size()}, axes);
|
||||
auto axesInput = std::make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{axes.size()}, axes);
|
||||
|
||||
for (size_t i = 0; i < params.size(); i++) {
|
||||
params[i]->set_friendly_name(std::string("param_") + std::to_string(i));
|
||||
}
|
||||
|
||||
ngraph::op::v4::Interpolate::InterpolateAttrs interpAttr{mode, shapeCalcMode, padBegin, padEnd, transfMode, nearMode,
|
||||
ov::op::v4::Interpolate::InterpolateAttrs interpAttr{mode, shapeCalcMode, padBegin, padEnd, transfMode, nearMode,
|
||||
antiAlias, cubeCoef};
|
||||
std::shared_ptr<ngraph::op::Op> interpolate;
|
||||
bool scalesMode = shapeCalcMode == ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES;
|
||||
std::shared_ptr<ov::op::Op> interpolate;
|
||||
bool scalesMode = shapeCalcMode == ov::op::v4::Interpolate::ShapeCalcMode::SCALES;
|
||||
if (useInterpolateV11) {
|
||||
if (axes.size() != dataShape.first.size()) {
|
||||
interpolate = std::make_shared<ngraph::op::v11::Interpolate>(params[0],
|
||||
interpolate = std::make_shared<ov::op::v11::Interpolate>(params[0],
|
||||
scalesMode ? scalesInput : sizesInput,
|
||||
axesInput,
|
||||
interpAttr);
|
||||
} else {
|
||||
interpolate = std::make_shared<ngraph::op::v11::Interpolate>(params[0],
|
||||
interpolate = std::make_shared<ov::op::v11::Interpolate>(params[0],
|
||||
scalesMode ? scalesInput : sizesInput,
|
||||
interpAttr);
|
||||
}
|
||||
} else {
|
||||
interpolate = std::make_shared<ngraph::op::v4::Interpolate>(params[0],
|
||||
interpolate = std::make_shared<ov::op::v4::Interpolate>(params[0],
|
||||
sizesInput,
|
||||
scalesInput,
|
||||
axesInput,
|
||||
interpAttr);
|
||||
}
|
||||
|
||||
ngraph::ResultVector results;
|
||||
ov::ResultVector results;
|
||||
for (size_t i = 0; i < interpolate->get_output_size(); ++i) {
|
||||
results.push_back(std::make_shared<ngraph::opset1::Result>(interpolate->output(i)));
|
||||
results.push_back(std::make_shared<ov::op::v0::Result>(interpolate->output(i)));
|
||||
}
|
||||
function = std::make_shared<ngraph::Function>(results, params, "InterpolateGPU");
|
||||
function = std::make_shared<ov::Model>(results, params, "InterpolateGPU");
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(InterpolateLayerGPUTest, CompareWithRefs) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
TEST_P(InterpolateLayerGPUTest, Inference) {
|
||||
run();
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<ngraph::op::v4::Interpolate::CoordinateTransformMode> coordinateTransformModes_Smoke = {
|
||||
ngraph::op::v4::Interpolate::CoordinateTransformMode::HALF_PIXEL,
|
||||
ngraph::op::v4::Interpolate::CoordinateTransformMode::ASYMMETRIC,
|
||||
const std::vector<ov::op::v4::Interpolate::CoordinateTransformMode> coordinateTransformModes_Smoke = {
|
||||
ov::op::v4::Interpolate::CoordinateTransformMode::HALF_PIXEL,
|
||||
ov::op::v4::Interpolate::CoordinateTransformMode::ASYMMETRIC,
|
||||
};
|
||||
|
||||
const std::vector<ngraph::op::v4::Interpolate::CoordinateTransformMode> coordinateTransformModes_Full = {
|
||||
ngraph::op::v4::Interpolate::CoordinateTransformMode::TF_HALF_PIXEL_FOR_NN,
|
||||
ngraph::op::v4::Interpolate::CoordinateTransformMode::PYTORCH_HALF_PIXEL,
|
||||
ngraph::op::v4::Interpolate::CoordinateTransformMode::HALF_PIXEL,
|
||||
ngraph::op::v4::Interpolate::CoordinateTransformMode::ASYMMETRIC,
|
||||
ngraph::op::v4::Interpolate::CoordinateTransformMode::ALIGN_CORNERS,
|
||||
const std::vector<ov::op::v4::Interpolate::CoordinateTransformMode> coordinateTransformModes_Full = {
|
||||
ov::op::v4::Interpolate::CoordinateTransformMode::TF_HALF_PIXEL_FOR_NN,
|
||||
ov::op::v4::Interpolate::CoordinateTransformMode::PYTORCH_HALF_PIXEL,
|
||||
ov::op::v4::Interpolate::CoordinateTransformMode::HALF_PIXEL,
|
||||
ov::op::v4::Interpolate::CoordinateTransformMode::ASYMMETRIC,
|
||||
ov::op::v4::Interpolate::CoordinateTransformMode::ALIGN_CORNERS,
|
||||
};
|
||||
|
||||
const std::vector<ngraph::op::v4::Interpolate::NearestMode> nearestModes_Smoke = {
|
||||
ngraph::op::v4::Interpolate::NearestMode::SIMPLE,
|
||||
ngraph::op::v4::Interpolate::NearestMode::ROUND_PREFER_FLOOR,
|
||||
ngraph::op::v4::Interpolate::NearestMode::FLOOR,
|
||||
const std::vector<ov::op::v4::Interpolate::NearestMode> nearestModes_Smoke = {
|
||||
ov::op::v4::Interpolate::NearestMode::SIMPLE,
|
||||
ov::op::v4::Interpolate::NearestMode::ROUND_PREFER_FLOOR,
|
||||
ov::op::v4::Interpolate::NearestMode::FLOOR,
|
||||
};
|
||||
|
||||
const std::vector<ngraph::op::v4::Interpolate::NearestMode> nearestModes_Full = {
|
||||
ngraph::op::v4::Interpolate::NearestMode::SIMPLE,
|
||||
ngraph::op::v4::Interpolate::NearestMode::ROUND_PREFER_FLOOR,
|
||||
ngraph::op::v4::Interpolate::NearestMode::FLOOR,
|
||||
ngraph::op::v4::Interpolate::NearestMode::CEIL,
|
||||
ngraph::op::v4::Interpolate::NearestMode::ROUND_PREFER_CEIL,
|
||||
const std::vector<ov::op::v4::Interpolate::NearestMode> nearestModes_Full = {
|
||||
ov::op::v4::Interpolate::NearestMode::SIMPLE,
|
||||
ov::op::v4::Interpolate::NearestMode::ROUND_PREFER_FLOOR,
|
||||
ov::op::v4::Interpolate::NearestMode::FLOOR,
|
||||
ov::op::v4::Interpolate::NearestMode::CEIL,
|
||||
ov::op::v4::Interpolate::NearestMode::ROUND_PREFER_CEIL,
|
||||
};
|
||||
|
||||
const std::vector<ngraph::op::v4::Interpolate::NearestMode> defNearestModes = {
|
||||
ngraph::op::v4::Interpolate::NearestMode::ROUND_PREFER_FLOOR,
|
||||
const std::vector<ov::op::v4::Interpolate::NearestMode> defNearestModes = {
|
||||
ov::op::v4::Interpolate::NearestMode::ROUND_PREFER_FLOOR,
|
||||
};
|
||||
|
||||
const std::vector<bool> antialias = {
|
||||
@ -337,50 +336,50 @@ const std::vector<std::vector<int64_t>> reducedAxes4D = {
|
||||
|
||||
const std::vector<ShapeParams> shapeParams4D_Smoke = {
|
||||
ShapeParams{
|
||||
ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES,
|
||||
ov::op::v4::Interpolate::ShapeCalcMode::SCALES,
|
||||
InputShape{{-1, {2, 20}, -1, -1}, {{1, 11, 4, 4}, {2, 7, 6, 5}, {1, 11, 4, 4}}},
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
ngraph::helpers::InputLayerType::PARAMETER,
|
||||
ov::test::utils::InputLayerType::CONSTANT,
|
||||
ov::test::utils::InputLayerType::PARAMETER,
|
||||
{{1.f, 1.f, 1.25f, 1.5f}, {1.f, 1.f, 1.25f, 1.25f}, {1.f, 1.f, 1.25f, 1.5f}},
|
||||
defaultAxes4D.front()
|
||||
},
|
||||
ShapeParams{
|
||||
ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES,
|
||||
ov::op::v4::Interpolate::ShapeCalcMode::SCALES,
|
||||
InputShape{{-1, {1, 10}, -1, -1}, {{1, 2, 12, 20}}},
|
||||
ngraph::helpers::InputLayerType::PARAMETER,
|
||||
ngraph::helpers::InputLayerType::PARAMETER,
|
||||
ov::test::utils::InputLayerType::PARAMETER,
|
||||
ov::test::utils::InputLayerType::PARAMETER,
|
||||
{{1.f, 1.f, 0.5f, 2.0f}},
|
||||
defaultAxes4D.front()
|
||||
},
|
||||
ShapeParams{
|
||||
ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES,
|
||||
ov::op::v4::Interpolate::ShapeCalcMode::SCALES,
|
||||
InputShape{{-1, {1, 10}, -1, -1}, {{1, 2, 12, 20}}},
|
||||
ngraph::helpers::InputLayerType::PARAMETER,
|
||||
ngraph::helpers::InputLayerType::PARAMETER,
|
||||
ov::test::utils::InputLayerType::PARAMETER,
|
||||
ov::test::utils::InputLayerType::PARAMETER,
|
||||
{{0.5f, 2.0f}},
|
||||
reducedAxes4D.front()
|
||||
},
|
||||
ShapeParams{
|
||||
ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES,
|
||||
ov::op::v4::Interpolate::ShapeCalcMode::SIZES,
|
||||
InputShape{{-1, {2, 20}, -1, -1}, {{1, 11, 4, 4}, {2, 7, 6, 5}, {1, 11, 4, 4}}},
|
||||
ngraph::helpers::InputLayerType::PARAMETER,
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
ov::test::utils::InputLayerType::PARAMETER,
|
||||
ov::test::utils::InputLayerType::CONSTANT,
|
||||
{{1, 11, 5, 6}, {2, 7, 8, 7}, {1, 11, 5, 6}},
|
||||
defaultAxes4D.front()
|
||||
},
|
||||
ShapeParams{
|
||||
ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES,
|
||||
ov::op::v4::Interpolate::ShapeCalcMode::SIZES,
|
||||
InputShape{{-1, {1, 10}, -1, -1}, {{1, 2, 12, 20}}},
|
||||
ngraph::helpers::InputLayerType::PARAMETER,
|
||||
ngraph::helpers::InputLayerType::PARAMETER,
|
||||
ov::test::utils::InputLayerType::PARAMETER,
|
||||
ov::test::utils::InputLayerType::PARAMETER,
|
||||
{{1, 2, 24, 10}},
|
||||
defaultAxes4D.front()
|
||||
},
|
||||
ShapeParams{
|
||||
ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES,
|
||||
ov::op::v4::Interpolate::ShapeCalcMode::SIZES,
|
||||
InputShape{{-1, {1, 10}, -1, -1}, {{1, 2, 12, 20}}},
|
||||
ngraph::helpers::InputLayerType::PARAMETER,
|
||||
ngraph::helpers::InputLayerType::PARAMETER,
|
||||
ov::test::utils::InputLayerType::PARAMETER,
|
||||
ov::test::utils::InputLayerType::PARAMETER,
|
||||
{{24, 10}},
|
||||
reducedAxes4D.front()
|
||||
}
|
||||
@ -388,18 +387,18 @@ const std::vector<ShapeParams> shapeParams4D_Smoke = {
|
||||
|
||||
const std::vector<ShapeParams> shapeParams4D_Full = {
|
||||
ShapeParams{
|
||||
ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES,
|
||||
ov::op::v4::Interpolate::ShapeCalcMode::SCALES,
|
||||
InputShape{{-1, {2, 20}, -1, -1}, {{1, 11, 4, 4}, {2, 7, 6, 5}, {1, 11, 4, 4}}},
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
ov::test::utils::InputLayerType::CONSTANT,
|
||||
ov::test::utils::InputLayerType::CONSTANT,
|
||||
{{1.f, 1.f, 1.25f, 1.5f}},
|
||||
defaultAxes4D.front()
|
||||
},
|
||||
ShapeParams{
|
||||
ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES,
|
||||
ov::op::v4::Interpolate::ShapeCalcMode::SIZES,
|
||||
InputShape{{-1, {2, 20}, -1, -1}, {{1, 11, 4, 4}}},
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
ov::test::utils::InputLayerType::CONSTANT,
|
||||
ov::test::utils::InputLayerType::CONSTANT,
|
||||
{{1, 11, 5, 6}},
|
||||
defaultAxes4D.front()
|
||||
}
|
||||
@ -407,41 +406,41 @@ const std::vector<ShapeParams> shapeParams4D_Full = {
|
||||
|
||||
const std::vector<ShapeParams> shapeParams4DReducedAxis_Full = {
|
||||
ShapeParams{
|
||||
ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES,
|
||||
ov::op::v4::Interpolate::ShapeCalcMode::SCALES,
|
||||
InputShape{{-1, {2, 20}, -1, -1}, {{1, 11, 4, 4}, {2, 7, 6, 5}, {1, 11, 4, 4}}},
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
ov::test::utils::InputLayerType::CONSTANT,
|
||||
ov::test::utils::InputLayerType::CONSTANT,
|
||||
{{1.f, 1.f, 1.25f, 1.5f}},
|
||||
defaultAxes4D.front()
|
||||
},
|
||||
ShapeParams{
|
||||
ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES,
|
||||
ov::op::v4::Interpolate::ShapeCalcMode::SIZES,
|
||||
InputShape{{-1, {2, 20}, -1, -1}, {{1, 11, 4, 4}}},
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
ov::test::utils::InputLayerType::CONSTANT,
|
||||
ov::test::utils::InputLayerType::CONSTANT,
|
||||
{{1, 11, 5, 6}},
|
||||
defaultAxes4D.front()
|
||||
},
|
||||
ShapeParams{
|
||||
ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES,
|
||||
ov::op::v4::Interpolate::ShapeCalcMode::SCALES,
|
||||
InputShape{{-1, {2, 20}, -1, -1}, {{1, 11, 4, 4}, {2, 7, 6, 5}, {1, 11, 4, 4}}},
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
ov::test::utils::InputLayerType::CONSTANT,
|
||||
ov::test::utils::InputLayerType::CONSTANT,
|
||||
{{1.5f}},
|
||||
reducedAxes4D.back()
|
||||
},
|
||||
ShapeParams{
|
||||
ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES,
|
||||
ov::op::v4::Interpolate::ShapeCalcMode::SIZES,
|
||||
InputShape{{-1, {2, 20}, -1, -1}, {{1, 11, 4, 4}}},
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
ov::test::utils::InputLayerType::CONSTANT,
|
||||
ov::test::utils::InputLayerType::CONSTANT,
|
||||
{{6}},
|
||||
reducedAxes4D.back()
|
||||
}
|
||||
};
|
||||
|
||||
const auto interpolateCasesNN_Smoke = ::testing::Combine(
|
||||
::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::NEAREST),
|
||||
::testing::Values(ov::op::v4::Interpolate::InterpolateMode::NEAREST),
|
||||
::testing::ValuesIn(coordinateTransformModes_Smoke),
|
||||
::testing::ValuesIn(nearestModes_Smoke),
|
||||
::testing::ValuesIn(antialias),
|
||||
@ -450,7 +449,7 @@ const auto interpolateCasesNN_Smoke = ::testing::Combine(
|
||||
::testing::ValuesIn(cubeCoefs));
|
||||
|
||||
const auto interpolateCasesNN_Full = ::testing::Combine(
|
||||
::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::NEAREST),
|
||||
::testing::Values(ov::op::v4::Interpolate::InterpolateMode::NEAREST),
|
||||
::testing::ValuesIn(coordinateTransformModes_Full),
|
||||
::testing::ValuesIn(nearestModes_Full),
|
||||
::testing::ValuesIn(antialias),
|
||||
@ -462,7 +461,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_InterpolateNN_Layout_Test, InterpolateLayerGPUTes
|
||||
::testing::Combine(
|
||||
interpolateCasesNN_Smoke,
|
||||
::testing::ValuesIn(shapeParams4D_Smoke),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ov::element::f32),
|
||||
::testing::Values(true, false)),
|
||||
InterpolateLayerGPUTest::getTestCaseName);
|
||||
|
||||
@ -470,12 +469,12 @@ INSTANTIATE_TEST_SUITE_P(InterpolateNN_Layout_Test, InterpolateLayerGPUTest,
|
||||
::testing::Combine(
|
||||
interpolateCasesNN_Full,
|
||||
::testing::ValuesIn(shapeParams4DReducedAxis_Full),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ov::element::f32),
|
||||
::testing::Values(true, false)),
|
||||
InterpolateLayerGPUTest::getTestCaseName);
|
||||
|
||||
const auto interpolateCasesLinearOnnx_Smoke = ::testing::Combine(
|
||||
::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::LINEAR_ONNX),
|
||||
::testing::Values(ov::op::v4::Interpolate::InterpolateMode::LINEAR_ONNX),
|
||||
::testing::ValuesIn(coordinateTransformModes_Smoke),
|
||||
::testing::ValuesIn(defNearestModes),
|
||||
::testing::ValuesIn(antialias),
|
||||
@ -484,7 +483,7 @@ const auto interpolateCasesLinearOnnx_Smoke = ::testing::Combine(
|
||||
::testing::ValuesIn(cubeCoefs));
|
||||
|
||||
const auto interpolateCasesLinearOnnx_Full = ::testing::Combine(
|
||||
::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::LINEAR_ONNX),
|
||||
::testing::Values(ov::op::v4::Interpolate::InterpolateMode::LINEAR_ONNX),
|
||||
::testing::ValuesIn(coordinateTransformModes_Full),
|
||||
::testing::ValuesIn(defNearestModes),
|
||||
::testing::ValuesIn(antialias),
|
||||
@ -496,7 +495,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_InterpolateLinearOnnx_Layout_Test, InterpolateLay
|
||||
::testing::Combine(
|
||||
interpolateCasesLinearOnnx_Smoke,
|
||||
::testing::ValuesIn(shapeParams4D_Smoke),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ov::element::f32),
|
||||
::testing::Values(false)),
|
||||
InterpolateLayerGPUTest::getTestCaseName);
|
||||
|
||||
@ -504,12 +503,12 @@ INSTANTIATE_TEST_SUITE_P(InterpolateLinearOnnx_Layout_Test, InterpolateLayerGPUT
|
||||
::testing::Combine(
|
||||
interpolateCasesLinearOnnx_Full,
|
||||
::testing::ValuesIn(shapeParams4D_Full),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ov::element::f32),
|
||||
::testing::Values(true, false)),
|
||||
InterpolateLayerGPUTest::getTestCaseName);
|
||||
|
||||
const auto interpolateCasesLinear_Smoke = ::testing::Combine(
|
||||
::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::LINEAR),
|
||||
::testing::Values(ov::op::v4::Interpolate::InterpolateMode::LINEAR),
|
||||
::testing::ValuesIn(coordinateTransformModes_Smoke),
|
||||
::testing::ValuesIn(defNearestModes),
|
||||
::testing::ValuesIn(antialias),
|
||||
@ -518,7 +517,7 @@ const auto interpolateCasesLinear_Smoke = ::testing::Combine(
|
||||
::testing::ValuesIn(cubeCoefs));
|
||||
|
||||
const auto interpolateCasesLinear_Full = ::testing::Combine(
|
||||
::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::LINEAR),
|
||||
::testing::Values(ov::op::v4::Interpolate::InterpolateMode::LINEAR),
|
||||
::testing::ValuesIn(coordinateTransformModes_Full),
|
||||
::testing::ValuesIn(defNearestModes),
|
||||
::testing::ValuesIn(antialias),
|
||||
@ -530,7 +529,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_InterpolateLinear_Layout_Test, InterpolateLayerGP
|
||||
::testing::Combine(
|
||||
interpolateCasesLinear_Smoke,
|
||||
::testing::ValuesIn(shapeParams4D_Smoke),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ov::element::f32),
|
||||
::testing::Values(false)),
|
||||
InterpolateLayerGPUTest::getTestCaseName);
|
||||
|
||||
@ -538,12 +537,12 @@ INSTANTIATE_TEST_SUITE_P(InterpolateLinear_Layout_Test, InterpolateLayerGPUTest,
|
||||
::testing::Combine(
|
||||
interpolateCasesLinear_Full,
|
||||
::testing::ValuesIn(shapeParams4DReducedAxis_Full),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ov::element::f32),
|
||||
::testing::Values(true, false)),
|
||||
InterpolateLayerGPUTest::getTestCaseName);
|
||||
|
||||
const auto interpolateCasesCubic_Smoke = ::testing::Combine(
|
||||
::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::CUBIC),
|
||||
::testing::Values(ov::op::v4::Interpolate::InterpolateMode::CUBIC),
|
||||
::testing::ValuesIn(coordinateTransformModes_Smoke),
|
||||
::testing::ValuesIn(defNearestModes),
|
||||
::testing::ValuesIn(antialias),
|
||||
@ -552,7 +551,7 @@ const auto interpolateCasesCubic_Smoke = ::testing::Combine(
|
||||
::testing::ValuesIn(cubeCoefs));
|
||||
|
||||
const auto interpolateCasesCubic_Full = ::testing::Combine(
|
||||
::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::CUBIC),
|
||||
::testing::Values(ov::op::v4::Interpolate::InterpolateMode::CUBIC),
|
||||
::testing::ValuesIn(coordinateTransformModes_Full),
|
||||
::testing::ValuesIn(defNearestModes),
|
||||
::testing::ValuesIn(antialias),
|
||||
@ -564,7 +563,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_InterpolateCubic_Layout_Test, InterpolateLayerGPU
|
||||
::testing::Combine(
|
||||
interpolateCasesCubic_Smoke,
|
||||
::testing::ValuesIn(shapeParams4D_Smoke),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ov::element::f32),
|
||||
::testing::Values(false)),
|
||||
InterpolateLayerGPUTest::getTestCaseName);
|
||||
|
||||
@ -572,7 +571,7 @@ INSTANTIATE_TEST_SUITE_P(InterpolateCubic_Layout_Test, InterpolateLayerGPUTest,
|
||||
::testing::Combine(
|
||||
interpolateCasesCubic_Full,
|
||||
::testing::ValuesIn(shapeParams4DReducedAxis_Full),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ov::element::f32),
|
||||
::testing::Values(true, false)),
|
||||
InterpolateLayerGPUTest::getTestCaseName);
|
||||
|
||||
@ -592,42 +591,42 @@ const std::vector<std::vector<int64_t>> reducedAxes5D = {
|
||||
|
||||
const std::vector<ShapeParams> shapeParams5D_Smoke = {
|
||||
ShapeParams{
|
||||
ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES,
|
||||
ov::op::v4::Interpolate::ShapeCalcMode::SCALES,
|
||||
InputShape{{-1, {2, 20}, -1, -1, -1}, {{1, 11, 4, 4, 4}, {2, 7, 6, 5, 8}, {1, 11, 4, 4, 4}}},
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
ngraph::helpers::InputLayerType::PARAMETER,
|
||||
ov::test::utils::InputLayerType::CONSTANT,
|
||||
ov::test::utils::InputLayerType::PARAMETER,
|
||||
{{1.f, 1.f, 1.25f, 1.5f, 0.5f}, {1.f, 1.f, 1.25f, 1.25f, 1.25f}, {1.f, 1.f, 1.25f, 1.5f, 0.5f}},
|
||||
defaultAxes5D.front()
|
||||
},
|
||||
ShapeParams{
|
||||
ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES,
|
||||
ov::op::v4::Interpolate::ShapeCalcMode::SCALES,
|
||||
InputShape{{-1, {2, 10}, -1, -1, -1}, {{1, 4, 2, 3, 4}}},
|
||||
ngraph::helpers::InputLayerType::PARAMETER,
|
||||
ngraph::helpers::InputLayerType::PARAMETER,
|
||||
ov::test::utils::InputLayerType::PARAMETER,
|
||||
ov::test::utils::InputLayerType::PARAMETER,
|
||||
{{1.f, 1.f, 1.5f, 2.f, 0.5f}},
|
||||
defaultAxes5D.front()
|
||||
},
|
||||
ShapeParams{
|
||||
ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES,
|
||||
ov::op::v4::Interpolate::ShapeCalcMode::SIZES,
|
||||
InputShape{{-1, {2, 20}, -1, -1, -1}, {{1, 11, 4, 4, 4}, {2, 7, 6, 5, 8}, {1, 11, 4, 4, 4}}},
|
||||
ngraph::helpers::InputLayerType::PARAMETER,
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
ov::test::utils::InputLayerType::PARAMETER,
|
||||
ov::test::utils::InputLayerType::CONSTANT,
|
||||
{{1, 11, 5, 6, 2}, {2, 7, 8, 7, 4}, {1, 11, 5, 6, 2}},
|
||||
defaultAxes5D.front()
|
||||
},
|
||||
ShapeParams{
|
||||
ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES,
|
||||
ov::op::v4::Interpolate::ShapeCalcMode::SIZES,
|
||||
InputShape{{-1, {2, 10}, -1, -1, -1}, {{1, 4, 2, 3, 4}}},
|
||||
ngraph::helpers::InputLayerType::PARAMETER,
|
||||
ngraph::helpers::InputLayerType::PARAMETER,
|
||||
ov::test::utils::InputLayerType::PARAMETER,
|
||||
ov::test::utils::InputLayerType::PARAMETER,
|
||||
{{1, 4, 4, 1, 6}},
|
||||
defaultAxes5D.front()
|
||||
},
|
||||
ShapeParams{
|
||||
ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES,
|
||||
ov::op::v4::Interpolate::ShapeCalcMode::SIZES,
|
||||
InputShape{{-1, {2, 10}, -1, -1, -1}, {{1, 4, 2, 3, 4}}},
|
||||
ngraph::helpers::InputLayerType::PARAMETER,
|
||||
ngraph::helpers::InputLayerType::PARAMETER,
|
||||
ov::test::utils::InputLayerType::PARAMETER,
|
||||
ov::test::utils::InputLayerType::PARAMETER,
|
||||
{{4, 1, 6}},
|
||||
reducedAxes5D.front()
|
||||
},
|
||||
@ -635,33 +634,33 @@ const std::vector<ShapeParams> shapeParams5D_Smoke = {
|
||||
|
||||
const std::vector<ShapeParams> shapeParams5D_Full = {
|
||||
ShapeParams{
|
||||
ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES,
|
||||
ov::op::v4::Interpolate::ShapeCalcMode::SCALES,
|
||||
InputShape{{-1, {2, 20}, -1, -1, -1}, {{1, 11, 4, 4, 4}, {2, 7, 6, 5, 8}, {1, 11, 4, 4, 4}}},
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
ov::test::utils::InputLayerType::CONSTANT,
|
||||
ov::test::utils::InputLayerType::CONSTANT,
|
||||
{{1.f, 1.f, 1.25f, 1.5f, 0.5f}},
|
||||
defaultAxes5D.front()
|
||||
},
|
||||
ShapeParams{
|
||||
ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES,
|
||||
ov::op::v4::Interpolate::ShapeCalcMode::SIZES,
|
||||
InputShape{{-1, {2, 20}, -1, -1, -1}, {{1, 11, 4, 4, 4}, {1, 11, 5, 5, 8}, {1, 11, 4, 4, 4}}},
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
ov::test::utils::InputLayerType::CONSTANT,
|
||||
ov::test::utils::InputLayerType::CONSTANT,
|
||||
{{1, 11, 5, 6, 4}},
|
||||
defaultAxes5D.front()
|
||||
},
|
||||
ShapeParams{
|
||||
ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES,
|
||||
ov::op::v4::Interpolate::ShapeCalcMode::SIZES,
|
||||
InputShape{{-1, {2, 20}, -1, -1, -1}, {{1, 11, 4, 4, 4}, {1, 11, 5, 5, 8}, {1, 11, 4, 4, 4}}},
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
ov::test::utils::InputLayerType::CONSTANT,
|
||||
ov::test::utils::InputLayerType::CONSTANT,
|
||||
{{1, 6, 4}},
|
||||
reducedAxes5D.front()
|
||||
}
|
||||
};
|
||||
|
||||
const auto interpolateCasesLinearOnnx5D_Smoke = ::testing::Combine(
|
||||
::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::LINEAR_ONNX),
|
||||
::testing::Values(ov::op::v4::Interpolate::InterpolateMode::LINEAR_ONNX),
|
||||
::testing::ValuesIn(coordinateTransformModes_Smoke),
|
||||
::testing::ValuesIn(defNearestModes),
|
||||
::testing::ValuesIn(antialias),
|
||||
@ -670,7 +669,7 @@ const auto interpolateCasesLinearOnnx5D_Smoke = ::testing::Combine(
|
||||
::testing::ValuesIn(cubeCoefs));
|
||||
|
||||
const auto interpolateCasesLinearOnnx5D_Full = ::testing::Combine(
|
||||
::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::LINEAR_ONNX),
|
||||
::testing::Values(ov::op::v4::Interpolate::InterpolateMode::LINEAR_ONNX),
|
||||
::testing::ValuesIn(coordinateTransformModes_Full),
|
||||
::testing::ValuesIn(defNearestModes),
|
||||
::testing::ValuesIn(antialias),
|
||||
@ -682,7 +681,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_InterpolateLinearOnnx5D_Layout_Test, InterpolateL
|
||||
::testing::Combine(
|
||||
interpolateCasesLinearOnnx5D_Smoke,
|
||||
::testing::ValuesIn(shapeParams5D_Smoke),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ov::element::f32),
|
||||
::testing::Values(false)),
|
||||
InterpolateLayerGPUTest::getTestCaseName);
|
||||
|
||||
@ -690,12 +689,12 @@ INSTANTIATE_TEST_SUITE_P(InterpolateLinearOnnx5D_Layout_Test, InterpolateLayerGP
|
||||
::testing::Combine(
|
||||
interpolateCasesLinearOnnx5D_Full,
|
||||
::testing::ValuesIn(shapeParams5D_Full),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ov::element::f32),
|
||||
::testing::Values(true, false)),
|
||||
InterpolateLayerGPUTest::getTestCaseName);
|
||||
|
||||
const auto interpolateCasesNN5D_Smoke = ::testing::Combine(
|
||||
::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::NEAREST),
|
||||
::testing::Values(ov::op::v4::Interpolate::InterpolateMode::NEAREST),
|
||||
::testing::ValuesIn(coordinateTransformModes_Smoke),
|
||||
::testing::ValuesIn(nearestModes_Smoke),
|
||||
::testing::ValuesIn(antialias),
|
||||
@ -704,7 +703,7 @@ const auto interpolateCasesNN5D_Smoke = ::testing::Combine(
|
||||
::testing::ValuesIn(cubeCoefs));
|
||||
|
||||
const auto interpolateCasesNN5D_Full = ::testing::Combine(
|
||||
::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::NEAREST),
|
||||
::testing::Values(ov::op::v4::Interpolate::InterpolateMode::NEAREST),
|
||||
::testing::ValuesIn(coordinateTransformModes_Full),
|
||||
::testing::ValuesIn(nearestModes_Full),
|
||||
::testing::ValuesIn(antialias),
|
||||
@ -716,7 +715,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_InterpolateNN5D_Layout_Test, InterpolateLayerGPUT
|
||||
::testing::Combine(
|
||||
interpolateCasesNN5D_Smoke,
|
||||
::testing::ValuesIn(shapeParams5D_Smoke),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ov::element::f32),
|
||||
::testing::Values(true, false)),
|
||||
InterpolateLayerGPUTest::getTestCaseName);
|
||||
|
||||
@ -724,10 +723,7 @@ INSTANTIATE_TEST_SUITE_P(InterpolateNN5D_Layout_Test, InterpolateLayerGPUTest,
|
||||
::testing::Combine(
|
||||
interpolateCasesNN5D_Full,
|
||||
::testing::ValuesIn(shapeParams5D_Full),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ov::element::f32),
|
||||
::testing::Values(true, false)),
|
||||
InterpolateLayerGPUTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
|
||||
} // namespace GPULayerTestsDefinitions
|
||||
|
@ -2,18 +2,17 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "shared_test_classes/single_layer/mat_mul.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "ie_precision.hpp"
|
||||
#include "ov_models/builders.hpp"
|
||||
#include <string>
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include "common_test_utils/test_enums.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
|
||||
using namespace ngraph;
|
||||
using namespace InferenceEngine;
|
||||
using namespace ov::test;
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
#include "openvino/op/matmul.hpp"
|
||||
|
||||
namespace GPULayerTestsDefinitions {
|
||||
namespace {
|
||||
using ov::test::InputShape;
|
||||
|
||||
struct ShapeRelatedParams {
|
||||
std::vector<InputShape> inputShapes;
|
||||
@ -22,36 +21,36 @@ struct ShapeRelatedParams {
|
||||
|
||||
typedef std::tuple<
|
||||
ShapeRelatedParams,
|
||||
ElementType, // Network precision
|
||||
ElementType, // Input precision
|
||||
ElementType, // Output precision
|
||||
ngraph::helpers::InputLayerType, // Secondary input type
|
||||
TargetDevice, // Device name
|
||||
ov::element::Type, // Network precision
|
||||
ov::element::Type, // Input precision
|
||||
ov::element::Type, // Output precision
|
||||
ov::test::utils::InputLayerType, // Secondary input type
|
||||
std::string, // Device name
|
||||
std::map<std::string, std::string> // Additional network configuration
|
||||
> MatMulLayerTestParamsSet;
|
||||
|
||||
class MatMulLayerGPUTest : public testing::WithParamInterface<MatMulLayerTestParamsSet>,
|
||||
virtual public SubgraphBaseTest {
|
||||
virtual public ov::test::SubgraphBaseTest {
|
||||
public:
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<MatMulLayerTestParamsSet>& obj) {
|
||||
MatMulLayerTestParamsSet basicParamsSet = obj.param;
|
||||
|
||||
ElementType netType;
|
||||
ElementType inType, outType;
|
||||
ShapeRelatedParams shapeRelatedParams;
|
||||
ngraph::helpers::InputLayerType secondaryInputType;
|
||||
TargetDevice targetDevice;
|
||||
std::map<std::string, std::string> additionalConfig;
|
||||
std::tie(shapeRelatedParams, netType, inType, outType, secondaryInputType, targetDevice, additionalConfig) =
|
||||
ov::element::Type model_type;
|
||||
ov::element::Type inType, outType;
|
||||
ShapeRelatedParams shape_related_params;
|
||||
ov::test::utils::InputLayerType secondary_input_type;
|
||||
std::string targetDevice;
|
||||
std::map<std::string, std::string> additional_config;
|
||||
std::tie(shape_related_params, model_type, inType, outType, secondary_input_type, targetDevice, additional_config) =
|
||||
basicParamsSet;
|
||||
|
||||
std::ostringstream result;
|
||||
result << "IS=";
|
||||
for (const auto& shape : shapeRelatedParams.inputShapes) {
|
||||
for (const auto& shape : shape_related_params.inputShapes) {
|
||||
result << ov::test::utils::partialShape2str({shape.first}) << "_";
|
||||
}
|
||||
result << "TS=";
|
||||
for (const auto& shape : shapeRelatedParams.inputShapes) {
|
||||
for (const auto& shape : shape_related_params.inputShapes) {
|
||||
result << "(";
|
||||
if (!shape.second.empty()) {
|
||||
auto itr = shape.second.begin();
|
||||
@ -61,15 +60,15 @@ public:
|
||||
}
|
||||
result << ")_";
|
||||
}
|
||||
result << "transpose_a=" << shapeRelatedParams.transpose.first << "_";
|
||||
result << "transpose_b=" << shapeRelatedParams.transpose.second << "_";
|
||||
result << "secondaryInputType=" << secondaryInputType << "_";
|
||||
result << "netPRC=" << netType << "_";
|
||||
result << "transpose_a=" << shape_related_params.transpose.first << "_";
|
||||
result << "transpose_b=" << shape_related_params.transpose.second << "_";
|
||||
result << "secondary_input_type=" << secondary_input_type << "_";
|
||||
result << "netPRC=" << model_type << "_";
|
||||
result << "inPRC=" << inType << "_";
|
||||
result << "outPRC=" << outType << "_";
|
||||
result << "trgDev=" << targetDevice;
|
||||
result << "config=(";
|
||||
for (const auto& configEntry : additionalConfig) {
|
||||
for (const auto& configEntry : additional_config) {
|
||||
result << configEntry.first << ", " << configEntry.second << ":";
|
||||
}
|
||||
result << ")";
|
||||
@ -87,17 +86,17 @@ protected:
|
||||
void SetUp() override {
|
||||
MatMulLayerTestParamsSet basicParamsSet = this->GetParam();
|
||||
|
||||
ShapeRelatedParams shapeRelatedParams;
|
||||
ElementType netType;
|
||||
helpers::InputLayerType secondaryInputType;
|
||||
std::map<std::string, std::string> additionalConfig;
|
||||
ShapeRelatedParams shape_related_params;
|
||||
ov::element::Type model_type;
|
||||
ov::test::utils::InputLayerType secondary_input_type;
|
||||
std::map<std::string, std::string> additional_config;
|
||||
|
||||
std::tie(shapeRelatedParams, netType, inType, outType, secondaryInputType, targetDevice, additionalConfig) = basicParamsSet;
|
||||
std::tie(shape_related_params, model_type, inType, outType, secondary_input_type, targetDevice, additional_config) = basicParamsSet;
|
||||
|
||||
init_input_shapes(shapeRelatedParams.inputShapes);
|
||||
init_input_shapes(shape_related_params.inputShapes);
|
||||
|
||||
bool transpA = shapeRelatedParams.transpose.first;
|
||||
bool transpB = shapeRelatedParams.transpose.second;
|
||||
bool transpA = shape_related_params.transpose.first;
|
||||
bool transpB = shape_related_params.transpose.second;
|
||||
|
||||
if (transpA) {
|
||||
transpose(inputDynamicShapes[0]);
|
||||
@ -115,69 +114,64 @@ protected:
|
||||
const auto& inShapeA = inputDynamicShapes[0];
|
||||
const auto& inShapeB = inputDynamicShapes[1];
|
||||
|
||||
configuration.insert(additionalConfig.begin(), additionalConfig.end());
|
||||
configuration.insert(additional_config.begin(), additional_config.end());
|
||||
|
||||
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(netType, inShapeA)};
|
||||
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(model_type, inShapeA)};
|
||||
|
||||
std::shared_ptr<ov::Node> matrixB;
|
||||
if (secondaryInputType == helpers::InputLayerType::PARAMETER) {
|
||||
auto param = std::make_shared<ov::op::v0::Parameter>(netType, inShapeB);
|
||||
if (secondary_input_type == ov::test::utils::InputLayerType::PARAMETER) {
|
||||
auto param = std::make_shared<ov::op::v0::Parameter>(model_type, inShapeB);
|
||||
matrixB = param;
|
||||
params.push_back(param);
|
||||
} else {
|
||||
ASSERT_TRUE(inShapeB.is_static());
|
||||
auto tensor = ov::test::utils::create_and_fill_tensor(netType, inShapeB.to_shape());
|
||||
auto tensor = ov::test::utils::create_and_fill_tensor(model_type, inShapeB.to_shape());
|
||||
matrixB = std::make_shared<ov::op::v0::Constant>(tensor);
|
||||
}
|
||||
|
||||
auto matMul = std::make_shared<ov::op::v0::MatMul>(params[0], matrixB, transpA, transpB);
|
||||
auto makeFunction = [](const ngraph::element::Type &ngPrc, ngraph::ParameterVector ¶ms, const std::shared_ptr<ngraph::Node> &lastNode) {
|
||||
ngraph::ResultVector results;
|
||||
auto makeFunction = [](const ov::element::Type &ngPrc, ov::ParameterVector ¶ms, const std::shared_ptr<ov::Node> &lastNode) {
|
||||
ov::ResultVector results;
|
||||
|
||||
for (size_t i = 0; i < lastNode->get_output_size(); i++)
|
||||
results.push_back(std::make_shared<ngraph::opset1::Result>(lastNode->output(i)));
|
||||
results.push_back(std::make_shared<ov::op::v0::Result>(lastNode->output(i)));
|
||||
|
||||
return std::make_shared<ngraph::Function>(results, params, "MatMul");
|
||||
return std::make_shared<ov::Model>(results, params, "MatMul");
|
||||
};
|
||||
function = makeFunction(netType, params, matMul);
|
||||
function = makeFunction(model_type, params, matMul);
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(MatMulLayerGPUTest, CompareWithRefs) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
|
||||
TEST_P(MatMulLayerGPUTest, Inference) {
|
||||
run();
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
/* ============= Common params ============= */
|
||||
std::map<std::string, std::string> emptyAdditionalConfig;
|
||||
|
||||
std::vector<std::map<std::string, std::string>> additionalConfig {
|
||||
std::vector<std::map<std::string, std::string>> additional_config {
|
||||
std::map<std::string, std::string>{/* empty config */},
|
||||
};
|
||||
|
||||
const std::vector<ElementType> netPRCs {
|
||||
ElementType::f32,
|
||||
const std::vector<ov::element::Type> netPRCs {
|
||||
ov::element::f32,
|
||||
};
|
||||
|
||||
|
||||
/* ============= FullyConnected ============= */
|
||||
namespace fullyConnected {
|
||||
|
||||
const std::vector<ShapeRelatedParams> IS2D_smoke = {
|
||||
{static_shapes_to_test_representation({{59, 1}, {1, 120}}), {false, true}},
|
||||
{static_shapes_to_test_representation({{59, 1}, {1, 120}}), {true, true}},
|
||||
{ov::test::static_shapes_to_test_representation({{59, 1}, {1, 120}}), {false, true}},
|
||||
{ov::test::static_shapes_to_test_representation({{59, 1}, {1, 120}}), {true, true}},
|
||||
|
||||
{static_shapes_to_test_representation({{59, 120}, {120, 1}}), {false, false}},
|
||||
{static_shapes_to_test_representation({{59, 120}, {120, 1}}), {true, true}},
|
||||
{ov::test::static_shapes_to_test_representation({{59, 120}, {120, 1}}), {false, false}},
|
||||
{ov::test::static_shapes_to_test_representation({{59, 120}, {120, 1}}), {true, true}},
|
||||
|
||||
{static_shapes_to_test_representation({{1, 120}, {120, 59}}), {false, false}},
|
||||
{static_shapes_to_test_representation({{1, 120}, {120, 59}}), {true, false}},
|
||||
{ov::test::static_shapes_to_test_representation({{1, 120}, {120, 59}}), {false, false}},
|
||||
{ov::test::static_shapes_to_test_representation({{1, 120}, {120, 59}}), {true, false}},
|
||||
|
||||
{static_shapes_to_test_representation({{71, 128}, {128, 20}}), {true, false}},
|
||||
{static_shapes_to_test_representation({{71, 128}, {128, 20}}), {false, true}},
|
||||
{ov::test::static_shapes_to_test_representation({{71, 128}, {128, 20}}), {true, false}},
|
||||
{ov::test::static_shapes_to_test_representation({{71, 128}, {128, 20}}), {false, true}},
|
||||
|
||||
{
|
||||
{
|
||||
@ -196,17 +190,17 @@ const std::vector<ShapeRelatedParams> IS2D_smoke = {
|
||||
};
|
||||
|
||||
const std::vector<ShapeRelatedParams> IS2D_nightly = {
|
||||
{static_shapes_to_test_representation({{59, 1}, {1, 120}}), {false, false}},
|
||||
{static_shapes_to_test_representation({{59, 1}, {1, 120}}), {true, false}},
|
||||
{ov::test::static_shapes_to_test_representation({{59, 1}, {1, 120}}), {false, false}},
|
||||
{ov::test::static_shapes_to_test_representation({{59, 1}, {1, 120}}), {true, false}},
|
||||
|
||||
{static_shapes_to_test_representation({{59, 120}, {120, 1}}), {true, false}},
|
||||
{static_shapes_to_test_representation({{59, 120}, {120, 1}}), {false, true}},
|
||||
{ov::test::static_shapes_to_test_representation({{59, 120}, {120, 1}}), {true, false}},
|
||||
{ov::test::static_shapes_to_test_representation({{59, 120}, {120, 1}}), {false, true}},
|
||||
|
||||
{static_shapes_to_test_representation({{1, 120}, {120, 59}}), {true, true}},
|
||||
{static_shapes_to_test_representation({{1, 120}, {120, 59}}), {false, true}},
|
||||
{ov::test::static_shapes_to_test_representation({{1, 120}, {120, 59}}), {true, true}},
|
||||
{ov::test::static_shapes_to_test_representation({{1, 120}, {120, 59}}), {false, true}},
|
||||
|
||||
{static_shapes_to_test_representation({{71, 128}, {128, 20}}), {true, true}},
|
||||
{static_shapes_to_test_representation({{71, 128}, {128, 20}}), {false, false}},
|
||||
{ov::test::static_shapes_to_test_representation({{71, 128}, {128, 20}}), {true, true}},
|
||||
{ov::test::static_shapes_to_test_representation({{71, 128}, {128, 20}}), {false, false}},
|
||||
|
||||
{
|
||||
{
|
||||
@ -232,31 +226,31 @@ const std::vector<ShapeRelatedParams> IS2D_nightly = {
|
||||
};
|
||||
|
||||
const auto testParams2D_smoke = ::testing::Combine(::testing::ValuesIn(IS2D_smoke),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ElementType::undefined),
|
||||
::testing::Values(ElementType::undefined),
|
||||
::testing::Values(helpers::InputLayerType::CONSTANT),
|
||||
::testing::Values(ov::element::f32),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(ov::test::utils::InputLayerType::CONSTANT),
|
||||
::testing::Values(ov::test::utils::DEVICE_GPU),
|
||||
::testing::Values(emptyAdditionalConfig));
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_FC_2D, MatMulLayerGPUTest, testParams2D_smoke, MatMulLayerGPUTest::getTestCaseName);
|
||||
|
||||
const auto testParams2D_nightly = ::testing::Combine(::testing::ValuesIn(IS2D_nightly),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ElementType::undefined),
|
||||
::testing::Values(ElementType::undefined),
|
||||
::testing::Values(helpers::InputLayerType::CONSTANT),
|
||||
::testing::Values(ov::element::f32),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(ov::test::utils::InputLayerType::CONSTANT),
|
||||
::testing::Values(ov::test::utils::DEVICE_GPU),
|
||||
::testing::Values(emptyAdditionalConfig));
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(nightly_FC_2D, MatMulLayerGPUTest, testParams2D_nightly, MatMulLayerGPUTest::getTestCaseName);
|
||||
|
||||
const std::vector<ShapeRelatedParams> IS3D_smoke = {
|
||||
{static_shapes_to_test_representation({{1, 32, 120}, {120, 5}}), {false, false}},
|
||||
{static_shapes_to_test_representation({{1, 32, 120}, {120, 5}}), {false, true}},
|
||||
{ov::test::static_shapes_to_test_representation({{1, 32, 120}, {120, 5}}), {false, false}},
|
||||
{ov::test::static_shapes_to_test_representation({{1, 32, 120}, {120, 5}}), {false, true}},
|
||||
|
||||
{static_shapes_to_test_representation({{1, 32, 120}, {120, 50}}), {true, false}},
|
||||
{static_shapes_to_test_representation({{1, 32, 120}, {120, 50}}), {false, true}},
|
||||
{ov::test::static_shapes_to_test_representation({{1, 32, 120}, {120, 50}}), {true, false}},
|
||||
{ov::test::static_shapes_to_test_representation({{1, 32, 120}, {120, 50}}), {false, true}},
|
||||
|
||||
{
|
||||
{
|
||||
@ -266,7 +260,7 @@ const std::vector<ShapeRelatedParams> IS3D_smoke = {
|
||||
{false, true}
|
||||
},
|
||||
|
||||
{static_shapes_to_test_representation({{1, 429}, {1, 429, 1}}), {true, true}},
|
||||
{ov::test::static_shapes_to_test_representation({{1, 429}, {1, 429, 1}}), {true, true}},
|
||||
{
|
||||
{
|
||||
{{-1, -1}, {{1, 129}, {2, 129}, {1, 129}, {2, 129}}},
|
||||
@ -285,11 +279,11 @@ const std::vector<ShapeRelatedParams> IS3D_smoke = {
|
||||
};
|
||||
|
||||
const std::vector<ShapeRelatedParams> IS3D_nightly = {
|
||||
{static_shapes_to_test_representation({{1, 32, 120}, {120, 5}}), {true, false}},
|
||||
{static_shapes_to_test_representation({{1, 32, 120}, {120, 5}}), {true, true}},
|
||||
{ov::test::static_shapes_to_test_representation({{1, 32, 120}, {120, 5}}), {true, false}},
|
||||
{ov::test::static_shapes_to_test_representation({{1, 32, 120}, {120, 5}}), {true, true}},
|
||||
|
||||
{static_shapes_to_test_representation({{1, 32, 120}, {120, 50}}), {false, false}},
|
||||
{static_shapes_to_test_representation({{1, 32, 120}, {120, 50}}), {true, true}},
|
||||
{ov::test::static_shapes_to_test_representation({{1, 32, 120}, {120, 50}}), {false, false}},
|
||||
{ov::test::static_shapes_to_test_representation({{1, 32, 120}, {120, 50}}), {true, true}},
|
||||
|
||||
{
|
||||
{
|
||||
@ -315,20 +309,20 @@ const std::vector<ShapeRelatedParams> IS3D_nightly = {
|
||||
};
|
||||
|
||||
const auto fullyConnectedParams3D_smoke = ::testing::Combine(::testing::ValuesIn(IS3D_smoke),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ElementType::undefined),
|
||||
::testing::Values(ElementType::undefined),
|
||||
::testing::Values(helpers::InputLayerType::CONSTANT),
|
||||
::testing::Values(ov::element::f32),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(ov::test::utils::InputLayerType::CONSTANT),
|
||||
::testing::Values(ov::test::utils::DEVICE_GPU),
|
||||
::testing::Values(emptyAdditionalConfig));
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_FC_3D, MatMulLayerGPUTest, fullyConnectedParams3D_smoke, MatMulLayerGPUTest::getTestCaseName);
|
||||
|
||||
const auto fullyConnectedParams3D_nightly = ::testing::Combine(::testing::ValuesIn(IS3D_nightly),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ElementType::undefined),
|
||||
::testing::Values(ElementType::undefined),
|
||||
::testing::Values(helpers::InputLayerType::CONSTANT),
|
||||
::testing::Values(ov::element::f32),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(ov::test::utils::InputLayerType::CONSTANT),
|
||||
::testing::Values(ov::test::utils::DEVICE_GPU),
|
||||
::testing::Values(emptyAdditionalConfig));
|
||||
|
||||
@ -366,62 +360,59 @@ const std::vector<ShapeRelatedParams> IS4D_smoke = {
|
||||
};
|
||||
|
||||
const auto fullyConnectedParams4D_smoke = ::testing::Combine(::testing::ValuesIn(IS4D_smoke),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ElementType::undefined),
|
||||
::testing::Values(ElementType::undefined),
|
||||
::testing::Values(helpers::InputLayerType::CONSTANT),
|
||||
::testing::Values(ov::element::f32),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(ov::test::utils::InputLayerType::CONSTANT),
|
||||
::testing::Values(ov::test::utils::DEVICE_GPU),
|
||||
::testing::Values(emptyAdditionalConfig));
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_FC_4D, MatMulLayerGPUTest, fullyConnectedParams4D_smoke, MatMulLayerGPUTest::getTestCaseName);
|
||||
|
||||
} // namespace fullyConnected
|
||||
|
||||
/* ============= MatMul ============= */
|
||||
namespace matmul {
|
||||
|
||||
const std::vector<ShapeRelatedParams> IS = {
|
||||
{static_shapes_to_test_representation({{1, 2, 32, 120}, {120, 5}}), {false, false}},
|
||||
{static_shapes_to_test_representation({{1, 2, 32, 120}, {120, 5}}), {true, false}},
|
||||
{static_shapes_to_test_representation({{1, 2, 32, 120}, {120, 5}}), {false, true}},
|
||||
{static_shapes_to_test_representation({{1, 2, 32, 120}, {120, 5}}), {true, true}},
|
||||
{ov::test::static_shapes_to_test_representation({{1, 2, 32, 120}, {120, 5}}), {false, false}},
|
||||
{ov::test::static_shapes_to_test_representation({{1, 2, 32, 120}, {120, 5}}), {true, false}},
|
||||
{ov::test::static_shapes_to_test_representation({{1, 2, 32, 120}, {120, 5}}), {false, true}},
|
||||
{ov::test::static_shapes_to_test_representation({{1, 2, 32, 120}, {120, 5}}), {true, true}},
|
||||
|
||||
{static_shapes_to_test_representation({{1, 2, 100010, 120}, {120, 5}}), {true, true}},
|
||||
{static_shapes_to_test_representation({{1, 2, 200010, 120}, {120, 5}}), {false, true}},
|
||||
{static_shapes_to_test_representation({{1, 2, 30, 120}, {120, 100010}}), {true, true}},
|
||||
{static_shapes_to_test_representation({{1, 2, 30, 120}, {120, 100010}}), {true, false}},
|
||||
{ov::test::static_shapes_to_test_representation({{1, 2, 100010, 120}, {120, 5}}), {true, true}},
|
||||
{ov::test::static_shapes_to_test_representation({{1, 2, 200010, 120}, {120, 5}}), {false, true}},
|
||||
{ov::test::static_shapes_to_test_representation({{1, 2, 30, 120}, {120, 100010}}), {true, true}},
|
||||
{ov::test::static_shapes_to_test_representation({{1, 2, 30, 120}, {120, 100010}}), {true, false}},
|
||||
|
||||
{static_shapes_to_test_representation({{7, 32, 120}, {3, 7, 120, 50}}), {false, false}},
|
||||
{static_shapes_to_test_representation({{7, 32, 120}, {3, 7, 120, 50}}), {true, false}},
|
||||
{static_shapes_to_test_representation({{7, 32, 120}, {3, 7, 120, 50}}), {false, true}},
|
||||
{static_shapes_to_test_representation({{7, 32, 120}, {3, 7, 120, 50}}), {true, true}},
|
||||
{ov::test::static_shapes_to_test_representation({{7, 32, 120}, {3, 7, 120, 50}}), {false, false}},
|
||||
{ov::test::static_shapes_to_test_representation({{7, 32, 120}, {3, 7, 120, 50}}), {true, false}},
|
||||
{ov::test::static_shapes_to_test_representation({{7, 32, 120}, {3, 7, 120, 50}}), {false, true}},
|
||||
{ov::test::static_shapes_to_test_representation({{7, 32, 120}, {3, 7, 120, 50}}), {true, true}},
|
||||
|
||||
{static_shapes_to_test_representation({{10, 10, 10}, {10, 10, 10}}), {false, false}},
|
||||
{static_shapes_to_test_representation({{10, 10, 10}, {10, 10, 10}}), {true, false}},
|
||||
{static_shapes_to_test_representation({{10, 10, 10}, {10, 10, 10}}), {false, true}},
|
||||
{static_shapes_to_test_representation({{10, 10, 10}, {10, 10, 10}}), {true, true}},
|
||||
{ov::test::static_shapes_to_test_representation({{10, 10, 10}, {10, 10, 10}}), {false, false}},
|
||||
{ov::test::static_shapes_to_test_representation({{10, 10, 10}, {10, 10, 10}}), {true, false}},
|
||||
{ov::test::static_shapes_to_test_representation({{10, 10, 10}, {10, 10, 10}}), {false, true}},
|
||||
{ov::test::static_shapes_to_test_representation({{10, 10, 10}, {10, 10, 10}}), {true, true}},
|
||||
|
||||
{static_shapes_to_test_representation({{55, 12}, {12, 55}}), {false, false}},
|
||||
{static_shapes_to_test_representation({{55, 12}, {12, 55}}), {true, false}},
|
||||
{static_shapes_to_test_representation({{55, 12}, {12, 55}}), {false, true}},
|
||||
{static_shapes_to_test_representation({{55, 12}, {12, 55}}), {true, true}}
|
||||
{ov::test::static_shapes_to_test_representation({{55, 12}, {12, 55}}), {false, false}},
|
||||
{ov::test::static_shapes_to_test_representation({{55, 12}, {12, 55}}), {true, false}},
|
||||
{ov::test::static_shapes_to_test_representation({{55, 12}, {12, 55}}), {false, true}},
|
||||
{ov::test::static_shapes_to_test_representation({{55, 12}, {12, 55}}), {true, true}}
|
||||
};
|
||||
|
||||
const std::vector<ShapeRelatedParams> IS_OneDNN = {
|
||||
{static_shapes_to_test_representation({{2, 4, 32, 120}, {2, 4, 120, 5}}), {false, false}},
|
||||
{static_shapes_to_test_representation({{2, 4, 32, 120}, {2, 4, 120, 5}}), {true, false}},
|
||||
{static_shapes_to_test_representation({{2, 4, 32, 120}, {2, 4, 120, 5}}), {false, true}},
|
||||
{static_shapes_to_test_representation({{2, 4, 32, 120}, {2, 4, 120, 5}}), {true, true}},
|
||||
{ov::test::static_shapes_to_test_representation({{2, 4, 32, 120}, {2, 4, 120, 5}}), {false, false}},
|
||||
{ov::test::static_shapes_to_test_representation({{2, 4, 32, 120}, {2, 4, 120, 5}}), {true, false}},
|
||||
{ov::test::static_shapes_to_test_representation({{2, 4, 32, 120}, {2, 4, 120, 5}}), {false, true}},
|
||||
{ov::test::static_shapes_to_test_representation({{2, 4, 32, 120}, {2, 4, 120, 5}}), {true, true}},
|
||||
|
||||
{static_shapes_to_test_representation({{2, 2, 32, 120}, {1, 1, 120, 5}}), {false, false}},
|
||||
{static_shapes_to_test_representation({{2, 2, 32, 120}, {1, 1, 120, 5}}), {true, false}},
|
||||
{static_shapes_to_test_representation({{2, 2, 32, 120}, {1, 1, 120, 5}}), {false, true}},
|
||||
{static_shapes_to_test_representation({{2, 2, 32, 120}, {1, 1, 120, 5}}), {true, true}},
|
||||
{ov::test::static_shapes_to_test_representation({{2, 2, 32, 120}, {1, 1, 120, 5}}), {false, false}},
|
||||
{ov::test::static_shapes_to_test_representation({{2, 2, 32, 120}, {1, 1, 120, 5}}), {true, false}},
|
||||
{ov::test::static_shapes_to_test_representation({{2, 2, 32, 120}, {1, 1, 120, 5}}), {false, true}},
|
||||
{ov::test::static_shapes_to_test_representation({{2, 2, 32, 120}, {1, 1, 120, 5}}), {true, true}},
|
||||
|
||||
{static_shapes_to_test_representation({{12, 12}, {12, 12}}), {false, false}},
|
||||
{static_shapes_to_test_representation({{12, 12}, {12, 12}}), {true, false}},
|
||||
{static_shapes_to_test_representation({{12, 12}, {12, 12}}), {false, true}},
|
||||
{static_shapes_to_test_representation({{12, 12}, {12, 12}}), {true, true}}
|
||||
{ov::test::static_shapes_to_test_representation({{12, 12}, {12, 12}}), {false, false}},
|
||||
{ov::test::static_shapes_to_test_representation({{12, 12}, {12, 12}}), {true, false}},
|
||||
{ov::test::static_shapes_to_test_representation({{12, 12}, {12, 12}}), {false, true}},
|
||||
{ov::test::static_shapes_to_test_representation({{12, 12}, {12, 12}}), {true, true}}
|
||||
};
|
||||
|
||||
const std::vector<ShapeRelatedParams> IS_Dynamic = {
|
||||
@ -678,44 +669,41 @@ const std::vector<ShapeRelatedParams> IS_Dynamic_nightly = {
|
||||
|
||||
const auto testParams = ::testing::Combine(::testing::ValuesIn(IS),
|
||||
::testing::ValuesIn(netPRCs),
|
||||
::testing::Values(ElementType::undefined),
|
||||
::testing::Values(ElementType::undefined),
|
||||
::testing::Values(helpers::InputLayerType::PARAMETER),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(ov::test::utils::InputLayerType::PARAMETER),
|
||||
::testing::Values(ov::test::utils::DEVICE_GPU),
|
||||
::testing::ValuesIn(additionalConfig));
|
||||
::testing::ValuesIn(additional_config));
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_MM_Static, MatMulLayerGPUTest, testParams, MatMulLayerGPUTest::getTestCaseName);
|
||||
|
||||
const auto testParamsOneDNN = ::testing::Combine(::testing::ValuesIn(IS_OneDNN),
|
||||
::testing::Values(ElementType::f16),
|
||||
::testing::Values(ElementType::undefined),
|
||||
::testing::Values(ElementType::undefined),
|
||||
::testing::Values(helpers::InputLayerType::PARAMETER),
|
||||
::testing::Values(ov::element::f16),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(ov::test::utils::InputLayerType::PARAMETER),
|
||||
::testing::Values(ov::test::utils::DEVICE_GPU),
|
||||
::testing::ValuesIn(additionalConfig));
|
||||
::testing::ValuesIn(additional_config));
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_MM_Static_OneDNN, MatMulLayerGPUTest, testParamsOneDNN, MatMulLayerGPUTest::getTestCaseName);
|
||||
|
||||
const auto testParamsDynamic = ::testing::Combine(::testing::ValuesIn(IS_Dynamic),
|
||||
::testing::ValuesIn(netPRCs),
|
||||
::testing::Values(ElementType::undefined),
|
||||
::testing::Values(ElementType::undefined),
|
||||
::testing::Values(helpers::InputLayerType::PARAMETER),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(ov::test::utils::InputLayerType::PARAMETER),
|
||||
::testing::Values(ov::test::utils::DEVICE_GPU),
|
||||
::testing::ValuesIn(additionalConfig));
|
||||
::testing::ValuesIn(additional_config));
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_MM_Dynamic, MatMulLayerGPUTest, testParamsDynamic, MatMulLayerGPUTest::getTestCaseName);
|
||||
|
||||
const auto testParamsDynamic_nightly = ::testing::Combine(::testing::ValuesIn(IS_Dynamic_nightly),
|
||||
::testing::ValuesIn(netPRCs),
|
||||
::testing::Values(ElementType::undefined),
|
||||
::testing::Values(ElementType::undefined),
|
||||
::testing::Values(helpers::InputLayerType::PARAMETER),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(ov::test::utils::InputLayerType::PARAMETER),
|
||||
::testing::Values(ov::test::utils::DEVICE_GPU),
|
||||
::testing::ValuesIn(additionalConfig));
|
||||
::testing::ValuesIn(additional_config));
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(nightly_MM_Dynamic, MatMulLayerGPUTest, testParamsDynamic_nightly, MatMulLayerGPUTest::getTestCaseName);
|
||||
|
||||
} // namespace matmul
|
||||
} // namespace
|
||||
} // namespace GPULayerTestsDefinitions
|
||||
|
@ -2,36 +2,38 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <shared_test_classes/single_layer/mvn.hpp>
|
||||
#include "ov_models/builders.hpp"
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
|
||||
using namespace InferenceEngine;
|
||||
using namespace ov::test;
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
#include "openvino/op/mvn.hpp"
|
||||
|
||||
namespace GPULayerTestsDefinitions {
|
||||
namespace {
|
||||
using ov::test::InputShape;
|
||||
|
||||
using basicGPUMvnParams = std::tuple<
|
||||
InputShape, // Input shapes
|
||||
ElementType, // Input precision
|
||||
ov::element::Type, // Input precision
|
||||
std::vector<int>, // Reduction axes
|
||||
bool, // Normalize variance
|
||||
double>; // Epsilon
|
||||
|
||||
using MvnLayerGPUTestParamSet = std::tuple<
|
||||
basicGPUMvnParams,
|
||||
ElementType>; // CNNNetwork input precision
|
||||
ov::element::Type>; // CNNNetwork input precision
|
||||
|
||||
class MvnLayerGPUTest : public testing::WithParamInterface<MvnLayerGPUTestParamSet>,
|
||||
virtual public SubgraphBaseTest {
|
||||
virtual public ov::test::SubgraphBaseTest {
|
||||
public:
|
||||
static std::string getTestCaseName(testing::TestParamInfo<MvnLayerGPUTestParamSet> obj) {
|
||||
basicGPUMvnParams basicParamsSet;
|
||||
ElementType inputPrecision;
|
||||
ov::element::Type inputPrecision;
|
||||
std::tie(basicParamsSet, inputPrecision) = obj.param;
|
||||
|
||||
InputShape inputShapes;
|
||||
ElementType netPrecision;
|
||||
ov::element::Type netPrecision;
|
||||
std::vector<int> axes;
|
||||
bool normalizeVariance;
|
||||
double eps;
|
||||
@ -56,11 +58,11 @@ protected:
|
||||
targetDevice = ov::test::utils::DEVICE_GPU;
|
||||
|
||||
basicGPUMvnParams basicParamsSet;
|
||||
ElementType inPrc;
|
||||
ov::element::Type inPrc;
|
||||
std::tie(basicParamsSet, inPrc) = this->GetParam();
|
||||
|
||||
InputShape inputShapes;
|
||||
ElementType netPrecision;
|
||||
ov::element::Type netPrecision;
|
||||
std::vector<int> axes;
|
||||
bool normalizeVariance;
|
||||
double eps;
|
||||
@ -75,7 +77,7 @@ protected:
|
||||
for (auto&& shape : inputDynamicShapes)
|
||||
params.push_back(std::make_shared<ov::op::v0::Parameter>(netPrecision, shape));
|
||||
|
||||
auto axesNode = ngraph::builder::makeConstant(axesType, ngraph::Shape{axes.size()}, axes);
|
||||
auto axesNode = std::make_shared<ov::op::v0::Constant>(axesType, ov::Shape{axes.size()}, axes);
|
||||
ov::op::MVNEpsMode nEpsMode = ov::op::MVNEpsMode::INSIDE_SQRT;
|
||||
if (eps_mode == "outside_sqrt")
|
||||
nEpsMode = ov::op::MVNEpsMode::OUTSIDE_SQRT;
|
||||
@ -83,21 +85,18 @@ protected:
|
||||
|
||||
rel_threshold = 0.015f;
|
||||
|
||||
ngraph::ResultVector results;
|
||||
ov::ResultVector results;
|
||||
for (size_t i = 0; i < mvn->get_output_size(); ++i) {
|
||||
results.push_back(std::make_shared<ngraph::opset1::Result>(mvn->output(i)));
|
||||
results.push_back(std::make_shared<ov::op::v0::Result>(mvn->output(i)));
|
||||
}
|
||||
function = std::make_shared<ngraph::Function>(results, params, "MVN");
|
||||
function = std::make_shared<ov::Model>(results, params, "MVN");
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(MvnLayerGPUTest, CompareWithRefs) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
TEST_P(MvnLayerGPUTest, Inference) {
|
||||
run();
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<InputShape> inputShapes_1D = {
|
||||
{
|
||||
// dynamic
|
||||
@ -205,12 +204,12 @@ const std::vector<int> reduction_axes_12 = {1, 2};
|
||||
const std::vector<int> reduction_axes_3 = {3};
|
||||
const std::vector<int> reduction_axes_2 = {2};
|
||||
|
||||
std::vector<ElementType> inpPrc = {ElementType::i8, ElementType::f16, ElementType::f32};
|
||||
std::vector<ov::element::Type> inpPrc = {ov::element::i8, ov::element::f16, ov::element::f32};
|
||||
|
||||
const auto Mvn3D = ::testing::Combine(
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(inputShapes_3D),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ov::element::f32),
|
||||
::testing::ValuesIn({reduction_axes_12, reduction_axes_2}),
|
||||
::testing::ValuesIn(normalizeVariance),
|
||||
::testing::ValuesIn(epsilon)),
|
||||
@ -221,7 +220,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Mvn3D, MvnLayerGPUTest, Mvn3D, Mv
|
||||
const auto Mvn4D = ::testing::Combine(
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(inputShapes_4D),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ov::element::f32),
|
||||
::testing::ValuesIn({reduction_axes_2, reduction_axes_3, reduction_axes_12, reduction_axes_23, reduction_axes_123}),
|
||||
::testing::ValuesIn(normalizeVariance),
|
||||
::testing::ValuesIn(epsilon)),
|
||||
@ -232,13 +231,11 @@ INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Mvn4D, MvnLayerGPUTest, Mvn4D, Mv
|
||||
const auto Mvn5D = ::testing::Combine(
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(inputShapes_5D),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ov::element::f32),
|
||||
::testing::ValuesIn({reduction_axes_3, reduction_axes_23, reduction_axes_123, reduction_axes_1234}),
|
||||
::testing::ValuesIn(normalizeVariance),
|
||||
::testing::ValuesIn(epsilon)),
|
||||
::testing::ValuesIn(inpPrc));
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Mvn5D, MvnLayerGPUTest, Mvn5D, MvnLayerGPUTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
} // namespace GPULayerTestsDefinitions
|
||||
|
@ -2,22 +2,16 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <tuple>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <memory>
|
||||
#include "ov_models/utils/ov_helpers.hpp"
|
||||
#include "ov_models/builders.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "shared_test_classes/single_layer/non_max_suppression.hpp"
|
||||
#include "common_test_utils/test_constants.hpp"
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include "common_test_utils/test_enums.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
|
||||
using namespace InferenceEngine;
|
||||
using namespace ov::test;
|
||||
using namespace ngraph;
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
#include "openvino/op/non_max_suppression.hpp"
|
||||
|
||||
namespace GPULayerTestsDefinitions {
|
||||
namespace {
|
||||
|
||||
enum {
|
||||
BATCHES,
|
||||
@ -32,9 +26,9 @@ using TargetShapeParams = std::tuple<size_t, // Number of batches
|
||||
using InputShapeParams = std::tuple<std::vector<ov::Dimension>, // bounds for input dynamic shape
|
||||
std::vector<TargetShapeParams>>; // target input dimensions
|
||||
|
||||
using InputPrecisions = std::tuple<ElementType, // boxes and scores precisions
|
||||
ElementType, // max_output_boxes_per_class precision
|
||||
ElementType>; // iou_threshold, score_threshold, soft_nms_sigma precisions
|
||||
using InputPrecisions = std::tuple<ov::element::Type, // boxes and scores precisions
|
||||
ov::element::Type, // max_output_boxes_per_class precision
|
||||
ov::element::Type>; // iou_threshold, score_threshold, soft_nms_sigma precisions
|
||||
|
||||
using ThresholdValues = std::tuple<float, // IOU threshold
|
||||
float, // Score threshold
|
||||
@ -44,13 +38,14 @@ using NmsLayerTestParams = std::tuple<InputShapeParams,
|
||||
InputPrecisions, // Input precisions
|
||||
int32_t, // Max output boxes per class
|
||||
ThresholdValues, // IOU, Score, Soft NMS sigma
|
||||
ngraph::op::v9::NonMaxSuppression::BoxEncodingType, // Box encoding
|
||||
ov::op::v9::NonMaxSuppression::BoxEncodingType, // Box encoding
|
||||
bool, // Sort result descending
|
||||
ngraph::element::Type, // Output type
|
||||
TargetDevice, // Device name
|
||||
ov::element::Type, // Output type
|
||||
std::string, // Device name
|
||||
std::map<std::string, std::string>>; // Additional network configuration
|
||||
|
||||
class NmsLayerGPUTest : public testing::WithParamInterface<NmsLayerTestParams>, virtual public SubgraphBaseTest {
|
||||
class NmsLayerGPUTest : public testing::WithParamInterface<NmsLayerTestParams>,
|
||||
virtual public ov::test::SubgraphBaseTest {
|
||||
public:
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<NmsLayerTestParams>& obj) {
|
||||
InputShapeParams inShapeParams;
|
||||
@ -58,17 +53,17 @@ public:
|
||||
int32_t maxOutBoxesPerClass;
|
||||
ThresholdValues thrValues;
|
||||
float iouThr, scoreThr, softNmsSigma;
|
||||
op::v9::NonMaxSuppression::BoxEncodingType boxEncoding;
|
||||
ov::op::v9::NonMaxSuppression::BoxEncodingType boxEncoding;
|
||||
bool sortResDescend;
|
||||
element::Type outType;
|
||||
TargetDevice targetDevice;
|
||||
ov::element::Type outType;
|
||||
std::string targetDevice;
|
||||
std::map<std::string, std::string> additionalConfig;
|
||||
std::tie(inShapeParams, inPrecisions, maxOutBoxesPerClass, thrValues, boxEncoding, sortResDescend, outType,
|
||||
targetDevice, additionalConfig) = obj.param;
|
||||
|
||||
std::tie(iouThr, scoreThr, softNmsSigma) = thrValues;
|
||||
|
||||
ElementType paramsPrec, maxBoxPrec, thrPrec;
|
||||
ov::element::Type paramsPrec, maxBoxPrec, thrPrec;
|
||||
std::tie(paramsPrec, maxBoxPrec, thrPrec) = inPrecisions;
|
||||
|
||||
std::vector<ov::Dimension> bounds;
|
||||
@ -88,6 +83,7 @@ public:
|
||||
result << "paramsPrec=" << paramsPrec << "_maxBoxPrec=" << maxBoxPrec << "_thrPrec=" << thrPrec << "_";
|
||||
result << "maxOutBoxesPerClass=" << maxOutBoxesPerClass << "_";
|
||||
result << "iouThr=" << iouThr << "_scoreThr=" << scoreThr << "_softNmsSigma=" << softNmsSigma << "_";
|
||||
using ov::operator<<;
|
||||
result << "boxEncoding=" << boxEncoding << "_sortResDescend=" << sortResDescend << "_outType=" << outType << "_";
|
||||
result << "config=(";
|
||||
for (const auto& configEntry : additionalConfig) {
|
||||
@ -99,7 +95,7 @@ public:
|
||||
return result.str();
|
||||
}
|
||||
|
||||
void generate_inputs(const std::vector<ngraph::Shape>& targetInputStaticShapes) override {
|
||||
void generate_inputs(const std::vector<ov::Shape>& targetInputStaticShapes) override {
|
||||
SubgraphBaseTest::generate_inputs(targetInputStaticShapes);
|
||||
// w/a to fill valid data for port 2
|
||||
const auto& funcInputs = function->inputs();
|
||||
@ -122,13 +118,13 @@ protected:
|
||||
InputPrecisions inPrecisions;
|
||||
ThresholdValues thrValues;
|
||||
float iouThr, scoreThr, softNmsSigma;
|
||||
op::v9::NonMaxSuppression::BoxEncodingType boxEncoding;
|
||||
ov::op::v9::NonMaxSuppression::BoxEncodingType boxEncoding;
|
||||
bool sortResDescend;
|
||||
element::Type outType;
|
||||
ov::element::Type outType;
|
||||
std::map<std::string, std::string> additionalConfig;
|
||||
std::tie(inShapeParams, inPrecisions, maxOutBoxesPerClass, thrValues, boxEncoding, sortResDescend, outType,
|
||||
targetDevice, additionalConfig) = this->GetParam();
|
||||
element::Type paramsPrec, maxBoxPrec, thrPrec;
|
||||
ov::element::Type paramsPrec, maxBoxPrec, thrPrec;
|
||||
std::tie(paramsPrec, maxBoxPrec, thrPrec) = inPrecisions;
|
||||
|
||||
std::tie(iouThr, scoreThr, softNmsSigma) = thrValues;
|
||||
@ -137,18 +133,18 @@ protected:
|
||||
std::tie(bounds, targetInDims) = inShapeParams;
|
||||
|
||||
if (!bounds.empty()) {
|
||||
inputDynamicShapes = std::vector<ngraph::PartialShape>{{bounds[BATCHES], bounds[BOXES], 4}, {bounds[BATCHES], bounds[CLASSES], bounds[BOXES]}};
|
||||
inputDynamicShapes = std::vector<ov::PartialShape>{{bounds[BATCHES], bounds[BOXES], 4}, {bounds[BATCHES], bounds[CLASSES], bounds[BOXES]}};
|
||||
} else {
|
||||
size_t batches, boxes, classes;
|
||||
std::tie(batches, boxes, classes) = targetInDims.front();
|
||||
ov::Dimension numBatches(batches), numBoxes(boxes), numClasses(classes);
|
||||
inputDynamicShapes = std::vector<ngraph::PartialShape>{{numBatches, numBoxes, 4}, {numBatches, numClasses, numBoxes}};
|
||||
inputDynamicShapes = std::vector<ov::PartialShape>{{numBatches, numBoxes, 4}, {numBatches, numClasses, numBoxes}};
|
||||
}
|
||||
|
||||
for (const auto &ts : targetInDims) {
|
||||
size_t numBatches, numBoxes, numClasses;
|
||||
std::tie(numBatches, numBoxes, numClasses) = ts;
|
||||
targetStaticShapes.push_back(std::vector<ngraph::Shape>{{numBatches, numBoxes, 4}, {numBatches, numClasses, numBoxes}});
|
||||
targetStaticShapes.push_back(std::vector<ov::Shape>{{numBatches, numBoxes, 4}, {numBatches, numClasses, numBoxes}});
|
||||
}
|
||||
|
||||
ov::ParameterVector params;
|
||||
@ -158,17 +154,18 @@ protected:
|
||||
params[0]->set_friendly_name("param_1");
|
||||
params[1]->set_friendly_name("param_2");
|
||||
|
||||
auto maxOutBoxesPerClassNode = builder::makeConstant(maxBoxPrec, ngraph::Shape{}, std::vector<int32_t>{maxOutBoxesPerClass})->output(0);
|
||||
auto iouThrNode = builder::makeConstant(thrPrec, ngraph::Shape{}, std::vector<float>{iouThr})->output(0);
|
||||
auto scoreThrNode = builder::makeConstant(thrPrec, ngraph::Shape{}, std::vector<float>{scoreThr})->output(0);
|
||||
auto softNmsSigmaNode = builder::makeConstant(thrPrec, ngraph::Shape{}, std::vector<float>{softNmsSigma})->output(0);
|
||||
auto nms = std::make_shared<ngraph::op::v9::NonMaxSuppression>(params[0], params[1], maxOutBoxesPerClassNode, iouThrNode, scoreThrNode,
|
||||
softNmsSigmaNode, boxEncoding, sortResDescend, outType);
|
||||
ngraph::ResultVector results;
|
||||
auto maxOutBoxesPerClassNode = std::make_shared<ov::op::v0::Constant>(maxBoxPrec, ov::Shape{}, std::vector<int32_t>{maxOutBoxesPerClass});
|
||||
auto iouThrNode = std::make_shared<ov::op::v0::Constant>(thrPrec, ov::Shape{}, std::vector<float>{iouThr});
|
||||
auto scoreThrNode = std::make_shared<ov::op::v0::Constant>(thrPrec, ov::Shape{}, std::vector<float>{scoreThr});
|
||||
auto softNmsSigmaNode = std::make_shared<ov::op::v0::Constant>(thrPrec, ov::Shape{}, std::vector<float>{softNmsSigma});
|
||||
|
||||
auto nms = std::make_shared<ov::op::v9::NonMaxSuppression>(params[0], params[1], maxOutBoxesPerClassNode, iouThrNode, scoreThrNode,
|
||||
softNmsSigmaNode, boxEncoding, sortResDescend, outType);
|
||||
ov::ResultVector results;
|
||||
for (size_t i = 0; i < nms->get_output_size(); i++) {
|
||||
results.push_back(std::make_shared<ngraph::opset4::Result>(nms->output(i)));
|
||||
results.push_back(std::make_shared<ov::op::v0::Result>(nms->output(i)));
|
||||
}
|
||||
function = std::make_shared<ngraph::Function>(results, params, "Nms");
|
||||
function = std::make_shared<ov::Model>(results, params, "Nms");
|
||||
}
|
||||
|
||||
private:
|
||||
@ -397,14 +394,10 @@ private:
|
||||
int32_t maxOutBoxesPerClass;
|
||||
};
|
||||
|
||||
TEST_P(NmsLayerGPUTest, CompareWithRefs) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
|
||||
TEST_P(NmsLayerGPUTest, Inference) {
|
||||
run();
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
std::map<std::string, std::string> emptyAdditionalConfig;
|
||||
|
||||
const std::vector<InputShapeParams> inShapeParams = {
|
||||
@ -419,18 +412,20 @@ const std::vector<InputShapeParams> inShapeParams = {
|
||||
const std::vector<int32_t> maxOutBoxPerClass = {5, 20};
|
||||
const std::vector<float> threshold = {0.3f, 0.7f};
|
||||
const std::vector<float> sigmaThreshold = {0.0f, 0.5f};
|
||||
const std::vector<op::v9::NonMaxSuppression::BoxEncodingType> encodType = {op::v9::NonMaxSuppression::BoxEncodingType::CENTER,
|
||||
op::v9::NonMaxSuppression::BoxEncodingType::CORNER};
|
||||
const std::vector<ov::op::v9::NonMaxSuppression::BoxEncodingType> encodType =
|
||||
{ov::op::v9::NonMaxSuppression::BoxEncodingType::CENTER,
|
||||
ov::op::v9::NonMaxSuppression::BoxEncodingType::CORNER};
|
||||
|
||||
const std::vector<bool> sortResDesc = {true, false};
|
||||
const std::vector<element::Type> outType = {element::i32};
|
||||
const std::vector<ov::element::Type> outType = {ov::element::i32};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Nms_dynamic, NmsLayerGPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(inShapeParams),
|
||||
::testing::Combine(
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ElementType::i32),
|
||||
::testing::Values(ElementType::f32)),
|
||||
::testing::Values(ov::element::f32),
|
||||
::testing::Values(ov::element::i32),
|
||||
::testing::Values(ov::element::f32)),
|
||||
::testing::ValuesIn(maxOutBoxPerClass),
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(threshold),
|
||||
@ -444,4 +439,3 @@ INSTANTIATE_TEST_SUITE_P(smoke_Nms_dynamic, NmsLayerGPUTest,
|
||||
NmsLayerGPUTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
} // namespace GPULayerTestsDefinitions
|
||||
|
@ -2,30 +2,32 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <shared_test_classes/single_layer/normalize_l2.hpp>
|
||||
#include "ov_models/builders.hpp"
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
|
||||
using namespace InferenceEngine;
|
||||
using namespace ov::test;
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
#include "openvino/op/normalize_l2.hpp"
|
||||
|
||||
namespace GPULayerTestsDefinitions {
|
||||
namespace {
|
||||
using ov::test::InputShape;
|
||||
|
||||
using NormalizeL2LayerGPUTestParams = std::tuple<
|
||||
InputShape, // Input shapes
|
||||
ElementType, // Input precision
|
||||
ov::element::Type, // Input precision
|
||||
std::vector<int64_t>, // Reduction axes
|
||||
ngraph::op::EpsMode, // EpsMode
|
||||
ov::op::EpsMode, // EpsMode
|
||||
float>; // Epsilon
|
||||
|
||||
class NormalizeL2LayerGPUTest : public testing::WithParamInterface<NormalizeL2LayerGPUTestParams>,
|
||||
virtual public SubgraphBaseTest {
|
||||
virtual public ov::test::SubgraphBaseTest {
|
||||
public:
|
||||
static std::string getTestCaseName(testing::TestParamInfo<NormalizeL2LayerGPUTestParams> obj) {
|
||||
InputShape inputShapes;
|
||||
ElementType netPrecision;
|
||||
ov::element::Type netPrecision;
|
||||
std::vector<int64_t> axes;
|
||||
ngraph::op::EpsMode epsMode;
|
||||
ov::op::EpsMode epsMode;
|
||||
float eps;
|
||||
std::tie(inputShapes, netPrecision, axes, epsMode, eps) = obj.param;
|
||||
|
||||
@ -47,9 +49,9 @@ protected:
|
||||
targetDevice = ov::test::utils::DEVICE_GPU;
|
||||
|
||||
InputShape inputShapes;
|
||||
ElementType netPrecision;
|
||||
ov::element::Type netPrecision;
|
||||
std::vector<int64_t> axes;
|
||||
ngraph::op::EpsMode epsMode;
|
||||
ov::op::EpsMode epsMode;
|
||||
float eps;
|
||||
std::tie(inputShapes, netPrecision, axes, epsMode, eps) = this->GetParam();
|
||||
|
||||
@ -62,18 +64,15 @@ protected:
|
||||
auto normAxes = std::make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{axes.size()}, axes);
|
||||
auto normalize = std::make_shared<ov::op::v0::NormalizeL2>(params[0], normAxes, eps, epsMode);
|
||||
|
||||
ngraph::ResultVector results{std::make_shared<ngraph::opset4::Result>(normalize)};
|
||||
function = std::make_shared<ngraph::Function>(results, params, "NormalizeL2");
|
||||
ov::ResultVector results{std::make_shared<ov::op::v0::Result>(normalize)};
|
||||
function = std::make_shared<ov::Model>(results, params, "NormalizeL2");
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(NormalizeL2LayerGPUTest, CompareWithRefs) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
TEST_P(NormalizeL2LayerGPUTest, Inference) {
|
||||
run();
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<InputShape> inputShapes_1D = {
|
||||
{
|
||||
// dynamic
|
||||
@ -163,8 +162,8 @@ const std::vector<InputShape> inputShapes_5D = {
|
||||
}
|
||||
};
|
||||
|
||||
const std::vector<ngraph::op::EpsMode> epsMode = {
|
||||
ngraph::op::EpsMode::ADD, ngraph::op::EpsMode::MAX
|
||||
const std::vector<ov::op::EpsMode> epsMode = {
|
||||
ov::op::EpsMode::ADD, ov::op::EpsMode::MAX
|
||||
};
|
||||
|
||||
const std::vector<float> epsilon = {
|
||||
@ -179,7 +178,7 @@ const std::vector<int64_t> reduction_axes_12 = {1, 2};
|
||||
const std::vector<int64_t> reduction_axes_3 = {3};
|
||||
const std::vector<int64_t> reduction_axes_2 = {2};
|
||||
|
||||
std::vector<ElementType> nrtPrecision = {ElementType::f16, ElementType::f32};
|
||||
std::vector<ov::element::Type> nrtPrecision = {ov::element::f16, ov::element::f32};
|
||||
|
||||
const auto NormalizeL2_3D = ::testing::Combine(
|
||||
::testing::ValuesIn(inputShapes_3D),
|
||||
@ -209,4 +208,3 @@ const auto NormalizeL2_5D = ::testing::Combine(
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_NormalizeL2_5D, NormalizeL2LayerGPUTest, NormalizeL2_5D, NormalizeL2LayerGPUTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
} // namespace GPULayerTestsDefinitions
|
||||
|
@ -2,41 +2,38 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "shared_test_classes/single_layer/pad.hpp"
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include "common_test_utils/test_enums.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "ie_precision.hpp"
|
||||
#include "ov_models/builders.hpp"
|
||||
#include <common_test_utils/ov_tensor_utils.hpp>
|
||||
#include <string>
|
||||
|
||||
using namespace ngraph;
|
||||
using namespace InferenceEngine;
|
||||
using namespace ov;
|
||||
using namespace test;
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
#include "openvino/op/pad.hpp"
|
||||
|
||||
namespace GPULayerTestsDefinitions {
|
||||
namespace {
|
||||
using ov::test::InputShape;
|
||||
|
||||
using PadLayerGPUTestParamSet = std::tuple<
|
||||
InputShape, // Input shape
|
||||
ElementType, // Input element type
|
||||
ov::element::Type, // Input element type
|
||||
std::vector<int64_t>, // padsBegin
|
||||
std::vector<int64_t>, // padsEnd
|
||||
float, // argPadValue
|
||||
std::vector<ngraph::helpers::InputLayerType>, // for {begin, end, padValue}
|
||||
ov::op::PadMode // padMode
|
||||
>;
|
||||
std::vector<ov::test::utils::InputLayerType>, // for {begin, end, padValue}
|
||||
ov::op::PadMode>; // padMode
|
||||
|
||||
class PadLayerGPUTest : public testing::WithParamInterface<PadLayerGPUTestParamSet>,
|
||||
virtual public SubgraphBaseTest {
|
||||
virtual public ov::test::SubgraphBaseTest {
|
||||
public:
|
||||
static std::string getTestCaseName(testing::TestParamInfo<PadLayerGPUTestParamSet> obj) {
|
||||
InputShape shapes;
|
||||
ElementType elementType;
|
||||
ov::element::Type model_type;
|
||||
std::vector<int64_t> padsBegin, padsEnd;
|
||||
ov::op::PadMode padMode;
|
||||
float argPadValue;
|
||||
std::vector<helpers::InputLayerType> inputLayerTypes;
|
||||
std::tie(shapes, elementType, padsBegin, padsEnd, argPadValue, inputLayerTypes, padMode) = obj.param;
|
||||
std::vector<ov::test::utils::InputLayerType> inputLayerTypes;
|
||||
std::tie(shapes, model_type, padsBegin, padsEnd, argPadValue, inputLayerTypes, padMode) = obj.param;
|
||||
|
||||
std::ostringstream results;
|
||||
results << "IS=" << ov::test::utils::partialShape2str({shapes.first}) << "_";
|
||||
@ -44,7 +41,7 @@ public:
|
||||
for (const auto& item : shapes.second) {
|
||||
results << ov::test::utils::vec2str(item) << "_";
|
||||
}
|
||||
results << "Prc=" << elementType << "_";
|
||||
results << "Prc=" << model_type << "_";
|
||||
results << "padsBegin=" << ov::test::utils::vec2str(padsBegin) << "_";
|
||||
results << "padsEnd=" << ov::test::utils::vec2str(padsEnd) << "_";
|
||||
if (padMode == ov::op::PadMode::CONSTANT) {
|
||||
@ -63,24 +60,24 @@ protected:
|
||||
void SetUp() override {
|
||||
InputShape shapes;
|
||||
ov::op::PadMode padMode;
|
||||
std::vector<helpers::InputLayerType> inputLayerTypes;
|
||||
std::vector<ov::test::utils::InputLayerType> inputLayerTypes;
|
||||
std::tie(shapes, inType, padsBegin, padsEnd, argPadValue, inputLayerTypes, padMode) = this->GetParam();
|
||||
|
||||
targetDevice = ov::test::utils::DEVICE_GPU;
|
||||
|
||||
std::vector<InputShape> inputShapes;
|
||||
inputShapes.push_back(shapes);
|
||||
if (inputLayerTypes[0] == helpers::InputLayerType::PARAMETER) {
|
||||
if (inputLayerTypes[0] == ov::test::utils::InputLayerType::PARAMETER) {
|
||||
inputShapes.push_back(InputShape({static_cast<int64_t>(padsBegin.size())}, std::vector<ov::Shape>(shapes.second.size(), {padsBegin.size()})));
|
||||
}
|
||||
if (inputLayerTypes[1] == helpers::InputLayerType::PARAMETER) {
|
||||
if (inputLayerTypes[1] == ov::test::utils::InputLayerType::PARAMETER) {
|
||||
inputShapes.push_back(InputShape({static_cast<int64_t>(padsEnd.size())}, std::vector<ov::Shape>(shapes.second.size(), {padsEnd.size()})));
|
||||
}
|
||||
|
||||
init_input_shapes(inputShapes);
|
||||
|
||||
// Add empty shape for parameter input of scalar 'pad_value'
|
||||
if (inputLayerTypes[2] == helpers::InputLayerType::PARAMETER) {
|
||||
if (inputLayerTypes[2] == ov::test::utils::InputLayerType::PARAMETER) {
|
||||
inputDynamicShapes.push_back(ov::PartialShape({}));
|
||||
for (size_t i = 0; i < shapes.second.size(); ++i) {
|
||||
for (size_t k = 0; k < targetStaticShapes.size(); ++k) {
|
||||
@ -95,43 +92,43 @@ protected:
|
||||
|
||||
std::shared_ptr<ov::Node> pads_begin, pads_end, arg_pad_value;
|
||||
// padsBegin
|
||||
if (inputLayerTypes[0] == helpers::InputLayerType::PARAMETER) {
|
||||
functionParams.push_back(std::make_shared<ngraph::opset1::Parameter>(ngraph::element::i64, ov::Shape{padsBegin.size()}));
|
||||
if (inputLayerTypes[0] == ov::test::utils::InputLayerType::PARAMETER) {
|
||||
functionParams.push_back(std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{padsBegin.size()}));
|
||||
functionParams.back()->set_friendly_name("padsBegin");
|
||||
pads_begin = functionParams.back();
|
||||
} else {
|
||||
pads_begin = std::make_shared<ngraph::opset3::Constant>(ngraph::element::i64, ngraph::Shape{padsBegin.size()}, padsBegin.data());
|
||||
pads_begin = std::make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{padsBegin.size()}, padsBegin.data());
|
||||
}
|
||||
|
||||
// padsEnd
|
||||
if (inputLayerTypes[1] == helpers::InputLayerType::PARAMETER) {
|
||||
functionParams.push_back(std::make_shared<ngraph::opset1::Parameter>(ngraph::element::i64, ov::Shape{padsEnd.size()}));
|
||||
if (inputLayerTypes[1] == ov::test::utils::InputLayerType::PARAMETER) {
|
||||
functionParams.push_back(std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{padsEnd.size()}));
|
||||
functionParams.back()->set_friendly_name("padsEnd");
|
||||
pads_end = functionParams.back();
|
||||
} else {
|
||||
pads_end = std::make_shared<ngraph::opset3::Constant>(ngraph::element::i64, ngraph::Shape{padsEnd.size()}, padsEnd.data());
|
||||
pads_end = std::make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{padsEnd.size()}, padsEnd.data());
|
||||
}
|
||||
|
||||
// argPadValue
|
||||
if (inputLayerTypes[2] == helpers::InputLayerType::PARAMETER) {
|
||||
functionParams.push_back(std::make_shared<ngraph::opset1::Parameter>(inType, ov::PartialShape({})));
|
||||
if (inputLayerTypes[2] == ov::test::utils::InputLayerType::PARAMETER) {
|
||||
functionParams.push_back(std::make_shared<ov::op::v0::Parameter>(inType, ov::PartialShape({})));
|
||||
functionParams.back()->set_friendly_name("padValue");
|
||||
arg_pad_value = functionParams.back();
|
||||
} else {
|
||||
arg_pad_value = std::make_shared<ngraph::opset3::Constant>(inType, ngraph::Shape{}, &argPadValue);
|
||||
arg_pad_value = std::make_shared<ov::op::v0::Constant>(inType, ov::Shape{}, &argPadValue);
|
||||
}
|
||||
|
||||
auto pad = std::make_shared<ngraph::opset3::Pad>(functionParams[0], pads_begin, pads_end, arg_pad_value, padMode);
|
||||
auto pad = std::make_shared<ov::op::v1::Pad>(functionParams[0], pads_begin, pads_end, arg_pad_value, padMode);
|
||||
|
||||
ngraph::ResultVector results;
|
||||
ov::ResultVector results;
|
||||
for (size_t i = 0; i < pad->get_output_size(); ++i) {
|
||||
results.push_back(std::make_shared<ngraph::opset1::Result>(pad->output(i)));
|
||||
results.push_back(std::make_shared<ov::op::v0::Result>(pad->output(i)));
|
||||
}
|
||||
|
||||
function = std::make_shared<ngraph::Function>(results, functionParams, "PadLayerGPUTest");
|
||||
function = std::make_shared<ov::Model>(results, functionParams, "PadLayerGPUTest");
|
||||
}
|
||||
|
||||
void generate_inputs(const std::vector<ngraph::Shape>& targetInputStaticShapes) override {
|
||||
void generate_inputs(const std::vector<ov::Shape>& targetInputStaticShapes) override {
|
||||
inputs.clear();
|
||||
const auto& funcInputs = function->inputs();
|
||||
for (size_t i = 0lu; i < funcInputs.size(); i++) {
|
||||
@ -166,15 +163,12 @@ protected:
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(PadLayerGPUTest, CompareWithRefs) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
TEST_P(PadLayerGPUTest, Inference) {
|
||||
run();
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<ElementType> inputPrecisions = {
|
||||
ElementType::f32
|
||||
const std::vector<ov::element::Type> inputPrecisions = {
|
||||
ov::element::f32
|
||||
};
|
||||
|
||||
const std::vector<float> argPadValue = {0.f, -1.f};
|
||||
@ -185,11 +179,11 @@ const std::vector<ov::op::PadMode> padMode = {
|
||||
ov::op::PadMode::SYMMETRIC
|
||||
};
|
||||
|
||||
const std::vector<std::vector<helpers::InputLayerType>> isConstantInput = {
|
||||
{helpers::InputLayerType::CONSTANT, helpers::InputLayerType::CONSTANT, helpers::InputLayerType::CONSTANT},
|
||||
{helpers::InputLayerType::CONSTANT, helpers::InputLayerType::PARAMETER, helpers::InputLayerType::CONSTANT},
|
||||
{helpers::InputLayerType::CONSTANT, helpers::InputLayerType::PARAMETER, helpers::InputLayerType::PARAMETER},
|
||||
{helpers::InputLayerType::PARAMETER, helpers::InputLayerType::PARAMETER, helpers::InputLayerType::PARAMETER}
|
||||
const std::vector<std::vector<ov::test::utils::InputLayerType>> isConstantInput = {
|
||||
{ov::test::utils::InputLayerType::CONSTANT, ov::test::utils::InputLayerType::CONSTANT, ov::test::utils::InputLayerType::CONSTANT},
|
||||
{ov::test::utils::InputLayerType::CONSTANT, ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::CONSTANT},
|
||||
{ov::test::utils::InputLayerType::CONSTANT, ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::PARAMETER},
|
||||
{ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::PARAMETER}
|
||||
};
|
||||
|
||||
//====================== Dynamic Shapes Tests 2D ======================
|
||||
@ -210,7 +204,7 @@ INSTANTIATE_TEST_SUITE_P(
|
||||
::testing::ValuesIn(padsEnd2D_Smoke),
|
||||
::testing::ValuesIn(argPadValue),
|
||||
::testing::ValuesIn(isConstantInput),
|
||||
::testing::Values(ngraph::helpers::PadMode::CONSTANT)),
|
||||
::testing::Values(ov::op::PadMode::CONSTANT)),
|
||||
PadLayerGPUTest::getTestCaseName
|
||||
);
|
||||
|
||||
@ -246,7 +240,7 @@ INSTANTIATE_TEST_SUITE_P(
|
||||
::testing::ValuesIn(padsEnd4D_Smoke),
|
||||
::testing::ValuesIn(argPadValue),
|
||||
::testing::ValuesIn(isConstantInput),
|
||||
::testing::Values(ngraph::helpers::PadMode::CONSTANT)),
|
||||
::testing::Values(ov::op::PadMode::CONSTANT)),
|
||||
PadLayerGPUTest::getTestCaseName
|
||||
);
|
||||
|
||||
@ -282,7 +276,7 @@ INSTANTIATE_TEST_SUITE_P(
|
||||
::testing::ValuesIn(padsEnd5D_Smoke),
|
||||
::testing::ValuesIn(argPadValue),
|
||||
::testing::ValuesIn(isConstantInput),
|
||||
::testing::Values(ngraph::helpers::PadMode::CONSTANT)),
|
||||
::testing::Values(ov::op::PadMode::CONSTANT)),
|
||||
PadLayerGPUTest::getTestCaseName
|
||||
);
|
||||
|
||||
@ -299,6 +293,4 @@ INSTANTIATE_TEST_SUITE_P(
|
||||
::testing::ValuesIn(padMode)),
|
||||
PadLayerGPUTest::getTestCaseName
|
||||
);
|
||||
|
||||
} // namespace
|
||||
} // namespace GPULayerTestsDefinitions
|
||||
|
@ -2,33 +2,36 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "ov_models/builders.hpp"
|
||||
|
||||
#include "shared_test_classes/single_layer/pooling.hpp"
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "shared_test_classes/single_op/pooling.hpp"
|
||||
|
||||
using namespace ov::test;
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
|
||||
namespace GPULayerTestsDefinitions {
|
||||
namespace {
|
||||
using ov::test::InputShape;
|
||||
|
||||
using poolLayerGpuTestParamsSet = std::tuple<LayerTestsDefinitions::poolSpecificParams,
|
||||
InputShape,
|
||||
ElementType>;
|
||||
using poolLayerGpuTestParamsSet =
|
||||
std::tuple<ov::test::poolSpecificParams,
|
||||
InputShape,
|
||||
ov::element::Type>;
|
||||
|
||||
class PoolingLayerGPUTest : public testing::WithParamInterface<poolLayerGpuTestParamsSet>,
|
||||
virtual public SubgraphBaseTest {
|
||||
virtual public ov::test::SubgraphBaseTest {
|
||||
public:
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<poolLayerGpuTestParamsSet>& obj) {
|
||||
LayerTestsDefinitions::poolSpecificParams basicParamsSet;
|
||||
ov::test::poolSpecificParams basicParamsSet;
|
||||
InputShape inputShapes;
|
||||
ElementType inPrc;
|
||||
ov::element::Type inPrc;
|
||||
std::tie(basicParamsSet, inputShapes, inPrc) = obj.param;
|
||||
|
||||
ngraph::helpers::PoolingTypes poolType;
|
||||
ov::test::utils::PoolingTypes poolType;
|
||||
std::vector<size_t> kernel, stride;
|
||||
std::vector<size_t> padBegin, padEnd;
|
||||
ngraph::op::PadType padType;
|
||||
ngraph::op::RoundingType roundingType;
|
||||
ov::op::PadType padType;
|
||||
ov::op::RoundingType roundingType;
|
||||
bool excludePad;
|
||||
std::tie(poolType, kernel, stride, padBegin, padEnd, roundingType, padType, excludePad) = basicParamsSet;
|
||||
|
||||
@ -41,10 +44,10 @@ public:
|
||||
}
|
||||
results << "Prc=" << inPrc << "_";
|
||||
switch (poolType) {
|
||||
case ngraph::helpers::PoolingTypes::MAX:
|
||||
case ov::test::utils::PoolingTypes::MAX:
|
||||
results << "MaxPool_";
|
||||
break;
|
||||
case ngraph::helpers::PoolingTypes::AVG:
|
||||
case ov::test::utils::PoolingTypes::AVG:
|
||||
results << "AvgPool_";
|
||||
results << "ExcludePad=" << excludePad << "_";
|
||||
break;
|
||||
@ -63,16 +66,16 @@ protected:
|
||||
void SetUp() override {
|
||||
targetDevice = ov::test::utils::DEVICE_GPU;
|
||||
|
||||
LayerTestsDefinitions::poolSpecificParams basicParamsSet;
|
||||
ov::test::poolSpecificParams basicParamsSet;
|
||||
InputShape inputShapes;
|
||||
ElementType inPrc;
|
||||
ov::element::Type inPrc;
|
||||
std::tie(basicParamsSet, inputShapes, inPrc) = this->GetParam();
|
||||
|
||||
ngraph::helpers::PoolingTypes poolType;
|
||||
ov::test::utils::PoolingTypes poolType;
|
||||
std::vector<size_t> kernel, stride;
|
||||
std::vector<size_t> padBegin, padEnd;
|
||||
ngraph::op::PadType padType;
|
||||
ngraph::op::RoundingType roundingType;
|
||||
ov::op::PadType padType;
|
||||
ov::op::RoundingType roundingType;
|
||||
bool excludePad;
|
||||
std::tie(poolType, kernel, stride, padBegin, padEnd, roundingType, padType, excludePad) = basicParamsSet;
|
||||
|
||||
@ -82,7 +85,7 @@ protected:
|
||||
for (auto&& shape : inputDynamicShapes) {
|
||||
params.push_back(std::make_shared<ov::op::v0::Parameter>(inPrc, shape));
|
||||
}
|
||||
std::shared_ptr<ngraph::Node> poolInput = params[0];
|
||||
std::shared_ptr<ov::Node> poolInput = params[0];
|
||||
|
||||
std::shared_ptr<ov::Node> pooling;
|
||||
if (ov::test::utils::PoolingTypes::MAX == poolType) {
|
||||
@ -91,27 +94,23 @@ protected:
|
||||
pooling = std::make_shared<ov::op::v1::AvgPool>(poolInput, stride, padBegin, padEnd, kernel, excludePad, roundingType, padType);
|
||||
}
|
||||
|
||||
auto makeFunction = [](const ngraph::element::Type &ngPrc, ngraph::ParameterVector ¶ms, const std::shared_ptr<ngraph::Node> &lastNode) {
|
||||
ngraph::ResultVector results;
|
||||
auto makeFunction = [](const ov::element::Type &ngPrc, ov::ParameterVector ¶ms, const std::shared_ptr<ov::Node> &lastNode) {
|
||||
ov::ResultVector results;
|
||||
|
||||
for (size_t i = 0; i < lastNode->get_output_size(); i++)
|
||||
results.push_back(std::make_shared<ngraph::opset1::Result>(lastNode->output(i)));
|
||||
results.push_back(std::make_shared<ov::op::v0::Result>(lastNode->output(i)));
|
||||
|
||||
return std::make_shared<ngraph::Function>(results, params, "PoolingGPU");
|
||||
return std::make_shared<ov::Model>(results, params, "PoolingGPU");
|
||||
};
|
||||
function = makeFunction(inPrc, params, pooling);
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(PoolingLayerGPUTest, CompareWithRefs) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
|
||||
TEST_P(PoolingLayerGPUTest, Inference) {
|
||||
run();
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<ElementType> inpOutPrecision = { ElementType::f32 };
|
||||
const std::vector<ov::element::Type> inpOutPrecision = { ov::element::f32 };
|
||||
|
||||
const std::vector<InputShape> inputShapes3D = {
|
||||
{ {}, {{3, 4, 64}} },
|
||||
@ -220,22 +219,22 @@ const std::vector<InputShape> inputShapes5D = {
|
||||
};
|
||||
|
||||
/* ============= Pooling (1D) ============= */
|
||||
const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsMax3D = {
|
||||
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2}, {2}, {0}, {0},
|
||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false },
|
||||
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {4}, {2}, {0}, {0},
|
||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false },
|
||||
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2}, {1}, {0}, {0},
|
||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false },
|
||||
const std::vector<ov::test::poolSpecificParams> paramsMax3D = {
|
||||
ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::MAX, {2}, {2}, {0}, {0},
|
||||
ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, false },
|
||||
ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::MAX, {4}, {2}, {0}, {0},
|
||||
ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, false },
|
||||
ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::MAX, {2}, {1}, {0}, {0},
|
||||
ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, false },
|
||||
};
|
||||
|
||||
const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsAvg3D = {
|
||||
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {3}, {1}, {1}, {0},
|
||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, false },
|
||||
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {3}, {1}, {1}, {0},
|
||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true },
|
||||
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {4}, {4}, {2}, {2},
|
||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true },
|
||||
const std::vector<ov::test::poolSpecificParams> paramsAvg3D = {
|
||||
ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {3}, {1}, {1}, {0},
|
||||
ov::op::RoundingType::CEIL, ov::op::PadType::SAME_UPPER, false },
|
||||
ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {3}, {1}, {1}, {0},
|
||||
ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, true },
|
||||
ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {4}, {4}, {2}, {2},
|
||||
ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, true },
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_MaxPool_GPU_3D, PoolingLayerGPUTest,
|
||||
@ -253,30 +252,30 @@ INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_GPU_3D, PoolingLayerGPUTest,
|
||||
PoolingLayerGPUTest::getTestCaseName);
|
||||
|
||||
/* ============= Pooling (2D) ============= */
|
||||
const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsMax4D = {
|
||||
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2}, {2, 2}, {0, 0}, {0, 0},
|
||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, false },
|
||||
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2}, {2, 2}, {0, 0}, {0, 0},
|
||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, false },
|
||||
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {4, 2}, {2, 2}, {0, 0}, {0, 0},
|
||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false },
|
||||
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {4, 2}, {2, 1}, {0, 0}, {0, 0},
|
||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false },
|
||||
const std::vector<ov::test::poolSpecificParams> paramsMax4D = {
|
||||
ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::MAX, {2, 2}, {2, 2}, {0, 0}, {0, 0},
|
||||
ov::op::RoundingType::CEIL, ov::op::PadType::SAME_LOWER, false },
|
||||
ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::MAX, {2, 2}, {2, 2}, {0, 0}, {0, 0},
|
||||
ov::op::RoundingType::CEIL, ov::op::PadType::SAME_UPPER, false },
|
||||
ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::MAX, {4, 2}, {2, 2}, {0, 0}, {0, 0},
|
||||
ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, false },
|
||||
ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::MAX, {4, 2}, {2, 1}, {0, 0}, {0, 0},
|
||||
ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, false },
|
||||
};
|
||||
|
||||
const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsAvg4D = {
|
||||
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0},
|
||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, true },
|
||||
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0},
|
||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, true },
|
||||
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0},
|
||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, false },
|
||||
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0},
|
||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, false },
|
||||
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {0, 0}, {0, 0},
|
||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true },
|
||||
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {4, 4}, {4, 4}, {2, 2}, {2, 2},
|
||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true }
|
||||
const std::vector<ov::test::poolSpecificParams> paramsAvg4D = {
|
||||
ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0},
|
||||
ov::op::RoundingType::CEIL, ov::op::PadType::SAME_LOWER, true },
|
||||
ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0},
|
||||
ov::op::RoundingType::CEIL, ov::op::PadType::SAME_UPPER, true },
|
||||
ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0},
|
||||
ov::op::RoundingType::CEIL, ov::op::PadType::SAME_LOWER, false },
|
||||
ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0},
|
||||
ov::op::RoundingType::CEIL, ov::op::PadType::SAME_UPPER, false },
|
||||
ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {2, 2}, {2, 2}, {0, 0}, {0, 0},
|
||||
ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, true },
|
||||
ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {4, 4}, {4, 4}, {2, 2}, {2, 2},
|
||||
ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, true }
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_MaxPool_GPU_4D, PoolingLayerGPUTest,
|
||||
@ -293,9 +292,9 @@ INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_GPU_4D, PoolingLayerGPUTest,
|
||||
::testing::ValuesIn(inpOutPrecision)),
|
||||
PoolingLayerGPUTest::getTestCaseName);
|
||||
|
||||
const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsAvg4D_Large = {
|
||||
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {65, 65}, {65, 65}, {0, 0}, {0, 0},
|
||||
ngraph::op::RoundingType::FLOOR, ngraph::op::PadType::VALID, true },
|
||||
const std::vector<ov::test::poolSpecificParams> paramsAvg4D_Large = {
|
||||
ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {65, 65}, {65, 65}, {0, 0}, {0, 0},
|
||||
ov::op::RoundingType::FLOOR, ov::op::PadType::VALID, true },
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_GPU_Large, PoolingLayerGPUTest,
|
||||
@ -306,32 +305,32 @@ INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_GPU_Large, PoolingLayerGPUTest,
|
||||
PoolingLayerGPUTest::getTestCaseName);
|
||||
|
||||
/* ============= Pooling (3D) ============= */
|
||||
const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsMax5D = {
|
||||
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2, 2}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0},
|
||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, false },
|
||||
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2, 2}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0},
|
||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, false },
|
||||
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1},
|
||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false },
|
||||
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {3, 3, 3}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1},
|
||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false },
|
||||
const std::vector<ov::test::poolSpecificParams> paramsMax5D = {
|
||||
ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::MAX, {2, 2, 2}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0},
|
||||
ov::op::RoundingType::CEIL, ov::op::PadType::SAME_LOWER, false },
|
||||
ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::MAX, {2, 2, 2}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0},
|
||||
ov::op::RoundingType::CEIL, ov::op::PadType::SAME_UPPER, false },
|
||||
ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::MAX, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1},
|
||||
ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, false },
|
||||
ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::MAX, {3, 3, 3}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1},
|
||||
ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, false },
|
||||
};
|
||||
|
||||
const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsAvg5D = {
|
||||
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0},
|
||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, true },
|
||||
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0},
|
||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, true },
|
||||
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0},
|
||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, false },
|
||||
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0},
|
||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, false },
|
||||
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0},
|
||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true },
|
||||
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {3, 3, 3}, {3, 3, 3}, {1, 1, 1}, {0, 0, 0},
|
||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true },
|
||||
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {4, 4, 4}, {2, 2, 2}, {2, 2, 2}, {2, 2, 2},
|
||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true }
|
||||
const std::vector<ov::test::poolSpecificParams> paramsAvg5D = {
|
||||
ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0},
|
||||
ov::op::RoundingType::CEIL, ov::op::PadType::SAME_LOWER, true },
|
||||
ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0},
|
||||
ov::op::RoundingType::CEIL, ov::op::PadType::SAME_UPPER, true },
|
||||
ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0},
|
||||
ov::op::RoundingType::CEIL, ov::op::PadType::SAME_LOWER, false },
|
||||
ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0},
|
||||
ov::op::RoundingType::CEIL, ov::op::PadType::SAME_UPPER, false },
|
||||
ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0},
|
||||
ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, true },
|
||||
ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {3, 3, 3}, {3, 3, 3}, {1, 1, 1}, {0, 0, 0},
|
||||
ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, true },
|
||||
ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {4, 4, 4}, {2, 2, 2}, {2, 2, 2}, {2, 2, 2},
|
||||
ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, true }
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_MaxPool_GPU_5D, PoolingLayerGPUTest,
|
||||
@ -348,5 +347,3 @@ INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_GPU_5D, PoolingLayerGPUTest,
|
||||
::testing::ValuesIn(inpOutPrecision)),
|
||||
PoolingLayerGPUTest::getTestCaseName);
|
||||
} // namespace
|
||||
|
||||
} // namespace GPULayerTestsDefinitions
|
||||
|
@ -2,23 +2,18 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "shared_test_classes/single_layer/shape_of.hpp"
|
||||
#include "shared_test_classes/single_layer/strided_slice.hpp"
|
||||
#include "shared_test_classes/single_layer/prior_box.hpp"
|
||||
#include "shared_test_classes/single_layer/prior_box_clustered.hpp"
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "ie_precision.hpp"
|
||||
#include "ov_models/builders.hpp"
|
||||
#include <string>
|
||||
#include <openvino/pass/constant_folding.hpp>
|
||||
#include "openvino/core/type/element_type_traits.hpp"
|
||||
|
||||
using namespace ngraph;
|
||||
using namespace InferenceEngine;
|
||||
using namespace ov::test;
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
#include "openvino/op/random_uniform.hpp"
|
||||
|
||||
using ElementType = ov::element::Type_t;
|
||||
namespace {
|
||||
using ov::test::InputShape;
|
||||
|
||||
namespace GPULayerTestsDefinitions {
|
||||
enum class priorbox_type {
|
||||
V0,
|
||||
V8,
|
||||
@ -27,20 +22,21 @@ enum class priorbox_type {
|
||||
typedef std::tuple<
|
||||
InputShape,
|
||||
InputShape,
|
||||
ElementType, // Net precision
|
||||
ov::element::Type,
|
||||
std::vector<float>,
|
||||
priorbox_type
|
||||
> PriorBoxLayerGPUTestParamsSet;
|
||||
|
||||
class PriorBoxLayerGPUTest : public testing::WithParamInterface<PriorBoxLayerGPUTestParamsSet>,
|
||||
virtual public SubgraphBaseTest {
|
||||
virtual public ov::test::SubgraphBaseTest {
|
||||
public:
|
||||
static std::string getTestCaseName(testing::TestParamInfo<PriorBoxLayerGPUTestParamsSet> obj) {
|
||||
InputShape input1Shape;
|
||||
InputShape input2Shape;
|
||||
ElementType netPrecision;
|
||||
ov::element::Type model_type;
|
||||
std::vector<float> max_size;
|
||||
priorbox_type priorboxType;
|
||||
std::tie(input1Shape, input2Shape, netPrecision, max_size, priorboxType) = obj.param;
|
||||
std::tie(input1Shape, input2Shape, model_type, max_size, priorboxType) = obj.param;
|
||||
|
||||
std::ostringstream result;
|
||||
switch (priorboxType) {
|
||||
@ -55,7 +51,7 @@ public:
|
||||
result << "PriorBoxV8Test_";
|
||||
}
|
||||
result << std::to_string(obj.index) << "_";
|
||||
result << "netPrec=" << netPrecision << "_";
|
||||
result << "netPrec=" << model_type << "_";
|
||||
result << "I1S=";
|
||||
result << ov::test::utils::partialShape2str({input1Shape.first}) << "_";
|
||||
result << "TS=(";
|
||||
@ -77,29 +73,29 @@ protected:
|
||||
void SetUp() override {
|
||||
targetDevice = ov::test::utils::DEVICE_GPU;
|
||||
|
||||
auto netPrecision = ElementType::undefined;
|
||||
auto model_type = ov::element::undefined;
|
||||
InputShape input1Shape;
|
||||
InputShape input2Shape;
|
||||
std::vector<float> max_size;
|
||||
priorbox_type priorboxType;
|
||||
std::tie(input1Shape, input2Shape, netPrecision, max_size, priorboxType) = this->GetParam();
|
||||
std::tie(input1Shape, input2Shape, model_type, max_size, priorboxType) = this->GetParam();
|
||||
|
||||
|
||||
init_input_shapes({input1Shape, input2Shape});
|
||||
|
||||
inType = ov::element::Type(netPrecision);
|
||||
outType = ElementType::f32;
|
||||
inType = ov::element::Type(model_type);
|
||||
outType = ov::element::f32;
|
||||
|
||||
auto beginInput = ov::op::v0::Constant::create(ngraph::element::i32, ngraph::Shape{1}, {2});
|
||||
auto endInput = ov::op::v0::Constant::create(ngraph::element::i32, ngraph::Shape{1}, {4});
|
||||
auto strideInput = ov::op::v0::Constant::create(ngraph::element::i32, ngraph::Shape{1}, {1});
|
||||
auto beginInput = ov::op::v0::Constant::create(ov::element::i32, ov::Shape{1}, {2});
|
||||
auto endInput = ov::op::v0::Constant::create(ov::element::i32, ov::Shape{1}, {4});
|
||||
auto strideInput = ov::op::v0::Constant::create(ov::element::i32, ov::Shape{1}, {1});
|
||||
|
||||
ov::ParameterVector functionParams;
|
||||
for (auto&& shape : inputDynamicShapes)
|
||||
functionParams.push_back(std::make_shared<ov::op::v0::Parameter>(inType, shape));
|
||||
|
||||
auto shapeOfOp1 = std::make_shared<ov::op::v3::ShapeOf>(functionParams[0], element::i32);
|
||||
auto shapeOfOp2 = std::make_shared<ov::op::v3::ShapeOf>(functionParams[1], element::i32);
|
||||
auto shapeOfOp1 = std::make_shared<ov::op::v3::ShapeOf>(functionParams[0], ov::element::i32);
|
||||
auto shapeOfOp2 = std::make_shared<ov::op::v3::ShapeOf>(functionParams[1], ov::element::i32);
|
||||
|
||||
auto stridedSliceOp1 = std::make_shared<ov::op::v1::StridedSlice>(shapeOfOp1,
|
||||
beginInput,
|
||||
@ -123,7 +119,7 @@ protected:
|
||||
|
||||
switch (priorboxType) {
|
||||
case priorbox_type::Clustered: {
|
||||
ngraph::op::v0::PriorBoxClustered::Attributes attributes_clustered;
|
||||
ov::op::v0::PriorBoxClustered::Attributes attributes_clustered;
|
||||
|
||||
attributes_clustered.widths = {86, 13, 57, 39, 68, 34, 142, 50, 23};
|
||||
attributes_clustered.heights = {44, 10, 30, 19, 94, 32, 61, 53, 17};
|
||||
@ -134,14 +130,14 @@ protected:
|
||||
attributes_clustered.offset = 0.5;
|
||||
attributes_clustered.clip = false;
|
||||
|
||||
auto priorBoxOp = std::make_shared<ngraph::op::v0::PriorBoxClustered>(stridedSliceOp1, stridedSliceOp2, attributes_clustered);
|
||||
auto priorBoxOp = std::make_shared<ov::op::v0::PriorBoxClustered>(stridedSliceOp1, stridedSliceOp2, attributes_clustered);
|
||||
|
||||
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(priorBoxOp)};
|
||||
function = std::make_shared <ngraph::Function>(results, functionParams, "PriorBoxV0Function");
|
||||
ov::ResultVector results{std::make_shared<ov::op::v0::Result>(priorBoxOp)};
|
||||
function = std::make_shared <ov::Model>(results, functionParams, "PriorBoxV0Function");
|
||||
break;
|
||||
}
|
||||
case priorbox_type::V0: {
|
||||
ngraph::op::v0::PriorBox::Attributes attributes_v0;
|
||||
ov::op::v0::PriorBox::Attributes attributes_v0;
|
||||
|
||||
attributes_v0.min_size = {64};
|
||||
attributes_v0.max_size = max_size;
|
||||
@ -153,15 +149,15 @@ protected:
|
||||
attributes_v0.flip = true;
|
||||
attributes_v0.scale_all_sizes = true;
|
||||
|
||||
auto priorBoxOp = std::make_shared<ngraph::op::v0::PriorBox>(stridedSliceOp1, stridedSliceOp2, attributes_v0);
|
||||
auto priorBoxOp = std::make_shared<ov::op::v0::PriorBox>(stridedSliceOp1, stridedSliceOp2, attributes_v0);
|
||||
|
||||
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(priorBoxOp)};
|
||||
function = std::make_shared <ngraph::Function>(results, functionParams, "PriorBoxV0Function");
|
||||
ov::ResultVector results{std::make_shared<ov::op::v0::Result>(priorBoxOp)};
|
||||
function = std::make_shared <ov::Model>(results, functionParams, "PriorBoxV0Function");
|
||||
break;
|
||||
}
|
||||
case priorbox_type::V8:
|
||||
default: {
|
||||
ngraph::op::v8::PriorBox::Attributes attributes_v8;
|
||||
ov::op::v8::PriorBox::Attributes attributes_v8;
|
||||
|
||||
attributes_v8.min_size = {64};
|
||||
attributes_v8.max_size = max_size;
|
||||
@ -174,25 +170,21 @@ protected:
|
||||
attributes_v8.scale_all_sizes = true;
|
||||
attributes_v8.min_max_aspect_ratios_order = true;
|
||||
|
||||
auto priorBoxOp = std::make_shared<ngraph::op::v8::PriorBox>(stridedSliceOp1, stridedSliceOp2, attributes_v8);
|
||||
auto priorBoxOp = std::make_shared<ov::op::v8::PriorBox>(stridedSliceOp1, stridedSliceOp2, attributes_v8);
|
||||
|
||||
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(priorBoxOp)};
|
||||
function = std::make_shared <ngraph::Function>(results, functionParams, "PriorBoxV8Function");
|
||||
ov::ResultVector results{std::make_shared<ov::op::v0::Result>(priorBoxOp)};
|
||||
function = std::make_shared <ov::Model>(results, functionParams, "PriorBoxV8Function");
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(PriorBoxLayerGPUTest, CompareWithRefs) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
|
||||
TEST_P(PriorBoxLayerGPUTest, Inference) {
|
||||
run();
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<ElementType> netPrecisions = {
|
||||
ElementType::f32,
|
||||
const std::vector<ov::element::Type> model_types = {
|
||||
ov::element::f32,
|
||||
};
|
||||
|
||||
const std::vector<priorbox_type> mode = {
|
||||
@ -230,10 +222,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_prior_box_full_dynamic,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(inShapesDynamic),
|
||||
::testing::ValuesIn(imgShapesDynamic),
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::ValuesIn(model_types),
|
||||
::testing::ValuesIn(max_size),
|
||||
::testing::ValuesIn(mode)),
|
||||
PriorBoxLayerGPUTest::getTestCaseName);
|
||||
} // namespace
|
||||
|
||||
} // namespace GPULayerTestsDefinitions
|
||||
} // namespace
|
@ -2,26 +2,28 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "ov_models/builders.hpp"
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "shared_test_classes/base/layer_test_utils.hpp"
|
||||
#include "openvino/core/type/element_type_traits.hpp"
|
||||
|
||||
using namespace ngraph;
|
||||
using namespace ov::test;
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
#include "openvino/op/random_uniform.hpp"
|
||||
|
||||
namespace GPULayerTestsDefinitions {
|
||||
namespace {
|
||||
using ov::test::InputShape;
|
||||
|
||||
typedef std::tuple<
|
||||
std::vector<InputShape>, // Input shapes
|
||||
std::pair<double, double>, // Min value, Max value
|
||||
std::pair<uint64_t, uint64_t>, // Global seed, operation seed
|
||||
ElementType, // Network precision
|
||||
TargetDevice, // Device name
|
||||
std::map<std::string, std::string> // Additional network configuration
|
||||
ov::element::Type, // Network precision
|
||||
std::string // Device name
|
||||
> RandomUnifromDynamicGPUTestParamsSet;
|
||||
|
||||
class RandomUnifromDynamicGPUTest : public testing::WithParamInterface<RandomUnifromDynamicGPUTestParamsSet>,
|
||||
virtual public SubgraphBaseTest {
|
||||
virtual public ov::test::SubgraphBaseTest {
|
||||
public:
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<RandomUnifromDynamicGPUTestParamsSet>& obj) {
|
||||
RandomUnifromDynamicGPUTestParamsSet basicParamsSet = obj.param;
|
||||
@ -29,10 +31,9 @@ public:
|
||||
std::vector<InputShape> input_shapes;
|
||||
std::pair<double, double> min_max_values;
|
||||
std::pair<uint64_t, uint64_t> seeds;
|
||||
ElementType precision;
|
||||
TargetDevice target_device;
|
||||
std::map<std::string, std::string> additionalConfig;
|
||||
std::tie(input_shapes, min_max_values, seeds, precision, target_device, additionalConfig) = basicParamsSet;
|
||||
ov::element::Type precision;
|
||||
std::string target_device;
|
||||
std::tie(input_shapes, min_max_values, seeds, precision, target_device) = basicParamsSet;
|
||||
|
||||
result << "shape=";
|
||||
for (const auto& shape : input_shapes) {
|
||||
@ -75,37 +76,37 @@ protected:
|
||||
void set_tensor_value(T scalar, ov::Tensor& tensor) {
|
||||
#define CASE(X) \
|
||||
case X: { \
|
||||
auto *dataPtr = tensor.data<element_type_traits<X>::value_type>(); \
|
||||
dataPtr[0] = static_cast<element_type_traits<X>::value_type>(scalar); \
|
||||
auto *dataPtr = tensor.data<ov::element_type_traits<X>::value_type>(); \
|
||||
dataPtr[0] = static_cast<ov::element_type_traits<X>::value_type>(scalar); \
|
||||
break; \
|
||||
}
|
||||
|
||||
switch (tensor.get_element_type()) {
|
||||
CASE(ElementType::boolean)
|
||||
CASE(ElementType::i8)
|
||||
CASE(ElementType::i16)
|
||||
CASE(ElementType::i32)
|
||||
CASE(ElementType::i64)
|
||||
CASE(ElementType::u8)
|
||||
CASE(ElementType::u16)
|
||||
CASE(ElementType::u32)
|
||||
CASE(ElementType::u64)
|
||||
CASE(ElementType::bf16)
|
||||
CASE(ElementType::f16)
|
||||
CASE(ElementType::f32)
|
||||
CASE(ElementType::f64)
|
||||
CASE(ElementType::u1)
|
||||
CASE(ElementType::i4)
|
||||
CASE(ElementType::u4)
|
||||
CASE(ov::element::boolean)
|
||||
CASE(ov::element::i8)
|
||||
CASE(ov::element::i16)
|
||||
CASE(ov::element::i32)
|
||||
CASE(ov::element::i64)
|
||||
CASE(ov::element::u8)
|
||||
CASE(ov::element::u16)
|
||||
CASE(ov::element::u32)
|
||||
CASE(ov::element::u64)
|
||||
CASE(ov::element::bf16)
|
||||
CASE(ov::element::f16)
|
||||
CASE(ov::element::f32)
|
||||
CASE(ov::element::f64)
|
||||
CASE(ov::element::u1)
|
||||
CASE(ov::element::i4)
|
||||
CASE(ov::element::u4)
|
||||
default: OPENVINO_THROW("Unsupported element type: ", tensor.get_element_type());
|
||||
}
|
||||
}
|
||||
|
||||
void generate_inputs(const std::vector<ngraph::Shape>& targetInputStaticShapes) override {
|
||||
void generate_inputs(const std::vector<ov::Shape>& targetInputStaticShapes) override {
|
||||
inputs.clear();
|
||||
const auto& funcInputs = function->inputs();
|
||||
|
||||
auto generate_input = [&](size_t index, ElementType element_type) {
|
||||
auto generate_input = [&](size_t index, ov::element::Type element_type) {
|
||||
ov::Tensor tensor(element_type, targetInputStaticShapes[index]);
|
||||
if (index != 0) {
|
||||
auto scalar_val = index == 1 ? min_max_values.first : min_max_values.second;
|
||||
@ -121,11 +122,9 @@ protected:
|
||||
void SetUp() override {
|
||||
RandomUnifromDynamicGPUTestParamsSet basicParamsSet = this->GetParam();
|
||||
std::vector<InputShape> shapes;
|
||||
ElementType netType;
|
||||
std::map<std::string, std::string> additionalConfig;
|
||||
ov::element::Type netType;
|
||||
std::pair<uint64_t, uint64_t> seeds;
|
||||
|
||||
std::tie(shapes, min_max_values, seeds, netType, targetDevice, additionalConfig) = basicParamsSet;
|
||||
std::tie(shapes, min_max_values, seeds, netType, targetDevice) = basicParamsSet;
|
||||
|
||||
init_input_shapes(shapes);
|
||||
|
||||
@ -144,14 +143,10 @@ private:
|
||||
std::pair<double, double> min_max_values;
|
||||
};
|
||||
|
||||
|
||||
TEST_P(RandomUnifromDynamicGPUTest, CompareWithRefs) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
TEST_P(RandomUnifromDynamicGPUTest, Inference) {
|
||||
run();
|
||||
}
|
||||
|
||||
namespace {
|
||||
std::map<std::string, std::string> emptyAdditionalConfig;
|
||||
const std::vector<std::vector<ov::test::InputShape>> dynInputShapes = {
|
||||
{
|
||||
{{ov::PartialShape::dynamic(4)}, {{1, 2, 3, 4}, {1, 1, 5, 5}, {2, 3, 4, 5}}},
|
||||
@ -183,21 +178,18 @@ const std::vector<std::pair<uint64_t, uint64_t>> seeds = {
|
||||
{100, 10},
|
||||
};
|
||||
|
||||
const std::vector<ElementType> netPrecisions = {
|
||||
ElementType::i32,
|
||||
ElementType::f32,
|
||||
ElementType::f16,
|
||||
const std::vector<ov::element::Type> netPrecisions = {
|
||||
ov::element::i32,
|
||||
ov::element::f32,
|
||||
ov::element::f16,
|
||||
};
|
||||
|
||||
const auto testParams_smoke = ::testing::Combine(::testing::ValuesIn(dynInputShapes),
|
||||
::testing::ValuesIn(min_max_values),
|
||||
::testing::ValuesIn(seeds),
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(ov::test::utils::DEVICE_GPU),
|
||||
::testing::Values(emptyAdditionalConfig));
|
||||
::testing::Values(ov::test::utils::DEVICE_GPU));
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_dynamic_random_uniform, RandomUnifromDynamicGPUTest,
|
||||
testParams_smoke, RandomUnifromDynamicGPUTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
} // namespace GPULayerTestsDefinitions
|
||||
|
@ -2,35 +2,36 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "ov_models/builders.hpp"
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "shared_test_classes/base/layer_test_utils.hpp"
|
||||
#include "openvino/core/type/element_type_traits.hpp"
|
||||
|
||||
using namespace ngraph;
|
||||
using namespace ov::test;
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
#include "openvino/op/range.hpp"
|
||||
|
||||
namespace GPULayerTestsDefinitions {
|
||||
namespace {
|
||||
using ov::test::InputShape;
|
||||
|
||||
typedef std::tuple<
|
||||
std::vector<InputShape>, // input shapes
|
||||
std::vector<float>, // input values
|
||||
ElementType, // Network precision
|
||||
TargetDevice, // Device name
|
||||
std::map<std::string, std::string> // Additional network configuration
|
||||
ov::element::Type, // Model type
|
||||
std::string // Device name
|
||||
> RangeDynamicGPUTestParamsSet;
|
||||
|
||||
class RangeDynamicGPUTest : public testing::WithParamInterface<RangeDynamicGPUTestParamsSet>,
|
||||
virtual public SubgraphBaseTest {
|
||||
virtual public ov::test::SubgraphBaseTest {
|
||||
public:
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<RangeDynamicGPUTestParamsSet>& obj) {
|
||||
RangeDynamicGPUTestParamsSet basicParamsSet = obj.param;
|
||||
std::ostringstream result;
|
||||
std::vector<InputShape> inputShapes;
|
||||
std::vector<float> inputValues;
|
||||
ElementType netType;
|
||||
TargetDevice targetDevice;
|
||||
std::map<std::string, std::string> additionalConfig;
|
||||
std::tie(inputShapes, inputValues, netType, targetDevice, additionalConfig) = basicParamsSet;
|
||||
ov::element::Type model_type;
|
||||
std::string targetDevice;
|
||||
std::tie(inputShapes, inputValues, model_type, targetDevice) = basicParamsSet;
|
||||
|
||||
result << "IS=";
|
||||
for (const auto& shape : inputShapes) {
|
||||
@ -43,7 +44,7 @@ public:
|
||||
for (const auto& v : inputValues) {
|
||||
result << v << "_";
|
||||
}
|
||||
result << "NetType=" << netType << "_";
|
||||
result << "model_type=" << model_type << "_";
|
||||
result << "targetDevice=" << targetDevice;
|
||||
return result.str();
|
||||
}
|
||||
@ -75,47 +76,47 @@ protected:
|
||||
void add_scalar_to_tensor(T scalar, ov::Tensor& tensor) {
|
||||
#define CASE(X) \
|
||||
case X: { \
|
||||
auto *dataPtr = tensor.data<element_type_traits<X>::value_type>(); \
|
||||
dataPtr[0] = static_cast<element_type_traits<X>::value_type>(scalar); \
|
||||
auto *dataPtr = tensor.data<ov::element_type_traits<X>::value_type>(); \
|
||||
dataPtr[0] = static_cast<ov::element_type_traits<X>::value_type>(scalar); \
|
||||
break; \
|
||||
}
|
||||
|
||||
switch (tensor.get_element_type()) {
|
||||
CASE(ElementType::boolean)
|
||||
CASE(ElementType::i8)
|
||||
CASE(ElementType::i16)
|
||||
CASE(ElementType::i32)
|
||||
CASE(ElementType::i64)
|
||||
CASE(ElementType::u8)
|
||||
CASE(ElementType::u16)
|
||||
CASE(ElementType::u32)
|
||||
CASE(ElementType::u64)
|
||||
CASE(ElementType::bf16)
|
||||
CASE(ElementType::f16)
|
||||
CASE(ElementType::f32)
|
||||
CASE(ElementType::f64)
|
||||
CASE(ElementType::u1)
|
||||
CASE(ElementType::i4)
|
||||
CASE(ElementType::u4)
|
||||
CASE(ov::element::boolean)
|
||||
CASE(ov::element::i8)
|
||||
CASE(ov::element::i16)
|
||||
CASE(ov::element::i32)
|
||||
CASE(ov::element::i64)
|
||||
CASE(ov::element::u8)
|
||||
CASE(ov::element::u16)
|
||||
CASE(ov::element::u32)
|
||||
CASE(ov::element::u64)
|
||||
CASE(ov::element::bf16)
|
||||
CASE(ov::element::f16)
|
||||
CASE(ov::element::f32)
|
||||
CASE(ov::element::f64)
|
||||
CASE(ov::element::u1)
|
||||
CASE(ov::element::i4)
|
||||
CASE(ov::element::u4)
|
||||
default: OPENVINO_THROW("Unsupported element type: ", tensor.get_element_type());
|
||||
}
|
||||
}
|
||||
|
||||
void generate_inputs(const std::vector<ngraph::Shape>& targetInputStaticShapes) override {
|
||||
void generate_inputs(const std::vector<ov::Shape>& targetInputStaticShapes) override {
|
||||
inputs.clear();
|
||||
const auto& funcInputs = function->inputs();
|
||||
|
||||
auto generate_input = [&](size_t index, ElementType element_type) {
|
||||
auto generate_input = [&](size_t index, ov::element::Type element_type) {
|
||||
ov::Tensor tensor(element_type, targetInputStaticShapes[index]);
|
||||
add_scalar_to_tensor<float>(input_values[index], tensor);
|
||||
inputs.insert({funcInputs[index].get_node_shared_ptr(), tensor});
|
||||
};
|
||||
|
||||
// net_type=undifined means mixed type test
|
||||
if (net_type == ElementType::undefined) {
|
||||
generate_input(0, ElementType::f32);
|
||||
generate_input(1, ElementType::i32);
|
||||
generate_input(2, ElementType::f32);
|
||||
if (net_type == ov::element::undefined) {
|
||||
generate_input(0, ov::element::f32);
|
||||
generate_input(1, ov::element::i32);
|
||||
generate_input(2, ov::element::f32);
|
||||
} else {
|
||||
for (size_t i = 0; i < funcInputs.size(); ++i) {
|
||||
generate_input(i, funcInputs[i].get_element_type());
|
||||
@ -127,47 +128,43 @@ protected:
|
||||
RangeDynamicGPUTestParamsSet basicParamsSet = this->GetParam();
|
||||
std::vector<InputShape> inputShapes;
|
||||
std::vector<float> inputValues;
|
||||
ElementType netType;
|
||||
std::map<std::string, std::string> additionalConfig;
|
||||
ov::element::Type model_type;
|
||||
ov::ParameterVector params;
|
||||
std::tie(inputShapes, inputValues, netType, targetDevice, additionalConfig) = basicParamsSet;
|
||||
std::tie(inputShapes, inputValues, model_type, targetDevice) = basicParamsSet;
|
||||
|
||||
input_values = inputValues;
|
||||
net_type = netType;
|
||||
net_type = model_type;
|
||||
|
||||
init_input_shapes(inputShapes);
|
||||
|
||||
if (netType == ElementType::undefined) {
|
||||
std::vector<element::Type> types = { ElementType::f32, ElementType::i32, ElementType::f32 };
|
||||
if (model_type == ov::element::undefined) {
|
||||
std::vector<ov::element::Type> types = { ov::element::f32, ov::element::i32, ov::element::f32 };
|
||||
for (size_t i = 0; i < types.size(); i++) {
|
||||
auto paramNode = std::make_shared<ov::op::v0::Parameter>(types[i], inputDynamicShapes[i]);
|
||||
params.push_back(paramNode);
|
||||
}
|
||||
netType = ElementType::f32;
|
||||
model_type = ov::element::f32;
|
||||
} else {
|
||||
for (auto&& shape : inputDynamicShapes) {
|
||||
params.push_back(std::make_shared<ov::op::v0::Parameter>(netType, shape));
|
||||
params.push_back(std::make_shared<ov::op::v0::Parameter>(model_type, shape));
|
||||
}
|
||||
}
|
||||
const auto range = std::make_shared<ngraph::opset8::Range>(params[0], params[1], params[2], netType);
|
||||
const auto range = std::make_shared<ov::op::v4::Range>(params[0], params[1], params[2], model_type);
|
||||
|
||||
ngraph::ResultVector results = {std::make_shared<ngraph::opset1::Result>(range)};
|
||||
function = std::make_shared<ngraph::Function>(results, params, "shapeof_out");
|
||||
ov::ResultVector results = {std::make_shared<ov::op::v0::Result>(range)};
|
||||
function = std::make_shared<ov::Model>(results, params, "shapeof_out");
|
||||
}
|
||||
|
||||
private:
|
||||
std::vector<float> input_values;
|
||||
ElementType net_type;
|
||||
ov::element::Type net_type;
|
||||
};
|
||||
|
||||
|
||||
TEST_P(RangeDynamicGPUTest, CompareWithRefs) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
TEST_P(RangeDynamicGPUTest, Inference) {
|
||||
run();
|
||||
}
|
||||
|
||||
namespace {
|
||||
std::map<std::string, std::string> emptyAdditionalConfig;
|
||||
const std::vector<std::vector<ov::test::InputShape>> dynInputShapes = {
|
||||
{
|
||||
// Inputs for Range
|
||||
@ -187,17 +184,16 @@ const std::vector<std::vector<float>> inputValues = {
|
||||
}
|
||||
};
|
||||
|
||||
const std::vector<ElementType> netPrecisions = {
|
||||
ElementType::i8,
|
||||
ElementType::i32,
|
||||
ElementType::i64,
|
||||
const std::vector<ov::element::Type> netPrecisions = {
|
||||
ov::element::i8,
|
||||
ov::element::i32,
|
||||
ov::element::i64,
|
||||
};
|
||||
|
||||
const auto testParams_smoke = ::testing::Combine(::testing::ValuesIn(dynInputShapes),
|
||||
::testing::ValuesIn(inputValues),
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(ov::test::utils::DEVICE_GPU),
|
||||
::testing::Values(emptyAdditionalConfig));
|
||||
::testing::Values(ov::test::utils::DEVICE_GPU));
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_dynamic_range_01, RangeDynamicGPUTest,
|
||||
testParams_smoke, RangeDynamicGPUTest::getTestCaseName);
|
||||
@ -211,16 +207,15 @@ const std::vector<std::vector<float>> inputFloatValues = {
|
||||
}
|
||||
};
|
||||
|
||||
const std::vector<ElementType> netFloatPrecisions = {
|
||||
ElementType::f16,
|
||||
ElementType::f32,
|
||||
const std::vector<ov::element::Type> netFloatPrecisions = {
|
||||
ov::element::f16,
|
||||
ov::element::f32,
|
||||
};
|
||||
|
||||
const auto testFloatParams_smoke = ::testing::Combine(::testing::ValuesIn(dynInputShapes),
|
||||
::testing::ValuesIn(inputFloatValues),
|
||||
::testing::ValuesIn(netFloatPrecisions),
|
||||
::testing::Values(ov::test::utils::DEVICE_GPU),
|
||||
::testing::Values(emptyAdditionalConfig));
|
||||
::testing::Values(ov::test::utils::DEVICE_GPU));
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_dynamic_range_02, RangeDynamicGPUTest,
|
||||
testFloatParams_smoke, RangeDynamicGPUTest::getTestCaseName);
|
||||
@ -233,19 +228,17 @@ const std::vector<std::vector<float>> inputMixedValues = {
|
||||
}
|
||||
};
|
||||
|
||||
const std::vector<ElementType> netMixedPrecisions = {
|
||||
const std::vector<ov::element::Type> netMixedPrecisions = {
|
||||
// Mixed type test(start/step:fp32, end:i32)
|
||||
ElementType::undefined
|
||||
ov::element::undefined
|
||||
};
|
||||
|
||||
|
||||
const auto testMixedParams_smoke = ::testing::Combine(::testing::ValuesIn(dynInputShapes),
|
||||
::testing::ValuesIn(inputMixedValues),
|
||||
::testing::ValuesIn(netMixedPrecisions),
|
||||
::testing::Values(ov::test::utils::DEVICE_GPU),
|
||||
::testing::Values(emptyAdditionalConfig));
|
||||
::testing::Values(ov::test::utils::DEVICE_GPU));
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_dynamic_diff_types, RangeDynamicGPUTest,
|
||||
testMixedParams_smoke, RangeDynamicGPUTest::getTestCaseName);
|
||||
} // namespace
|
||||
} // namespace GPULayerTestsDefinitions
|
||||
|
@ -2,17 +2,17 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "shared_test_classes/single_layer/reduce_ops.hpp"
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include "common_test_utils/node_builders/reduce.hpp"
|
||||
#include "common_test_utils/test_enums.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "ie_precision.hpp"
|
||||
#include "ov_models/builders.hpp"
|
||||
#include <string>
|
||||
|
||||
using namespace ngraph;
|
||||
using namespace InferenceEngine;
|
||||
using namespace ov::test;
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
|
||||
namespace GPULayerTestsDefinitions {
|
||||
namespace {
|
||||
using ov::test::InputShape;
|
||||
|
||||
typedef struct {
|
||||
std::vector<InputShape> data_shape;
|
||||
@ -20,22 +20,22 @@ typedef struct {
|
||||
} ReduceInput;
|
||||
|
||||
typedef std::tuple<
|
||||
ReduceInput, // input data (data shape, axes shape, axes values)
|
||||
ElementType, // presion of inputs
|
||||
helpers::ReductionType, // reduction type
|
||||
bool, // keepDims
|
||||
TargetDevice // device name
|
||||
ReduceInput, // input data (data shape, axes shape, axes values)
|
||||
ov::element::Type, // presion of inputs
|
||||
ov::test::utils::ReductionType, // reduction type
|
||||
bool, // keepDims
|
||||
std::string // device name
|
||||
> ReduceLayerTestParamSet;
|
||||
|
||||
class ReduceLayerGPUTest : public testing::WithParamInterface<ReduceLayerTestParamSet>,
|
||||
virtual public SubgraphBaseTest {
|
||||
virtual public ov::test::SubgraphBaseTest {
|
||||
public:
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<ReduceLayerTestParamSet>& obj) {
|
||||
ReduceInput input_data;
|
||||
ElementType netType;
|
||||
helpers::ReductionType reductionType;
|
||||
ov::element::Type netType;
|
||||
ov::test::utils::ReductionType reductionType;
|
||||
bool keepDims;
|
||||
TargetDevice targetDevice;
|
||||
std::string targetDevice;
|
||||
std::tie(input_data, netType, reductionType, keepDims, targetDevice) = obj.param;
|
||||
|
||||
std::vector<InputShape> inshapes = input_data.data_shape;
|
||||
@ -67,8 +67,8 @@ public:
|
||||
protected:
|
||||
void SetUp() override {
|
||||
ReduceInput input_data;
|
||||
ElementType netPrecision;
|
||||
helpers::ReductionType reductionType;
|
||||
ov::element::Type netPrecision;
|
||||
ov::test::utils::ReductionType reductionType;
|
||||
bool keepDims;
|
||||
std::tie(input_data, netPrecision, reductionType, keepDims, targetDevice) = this->GetParam();
|
||||
|
||||
@ -84,52 +84,43 @@ protected:
|
||||
std::vector<size_t> shapeAxes;
|
||||
shapeAxes.push_back(axes.size());
|
||||
|
||||
auto reductionAxesNode = std::dynamic_pointer_cast<ngraph::Node>(
|
||||
std::make_shared<ngraph::opset3::Constant>(ngraph::element::Type_t::i64, ngraph::Shape(shapeAxes), axes));
|
||||
auto reductionAxesNode = std::make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape(shapeAxes), axes);
|
||||
|
||||
const auto reduce = ngraph::builder::makeReduce(params[0], reductionAxesNode, keepDims, reductionType);
|
||||
const auto reduce = ov::test::utils::make_reduce(params[0], reductionAxesNode, keepDims, reductionType);
|
||||
|
||||
auto makeFunction = [](ParameterVector ¶ms, const std::shared_ptr<Node> &lastNode) {
|
||||
ResultVector results;
|
||||
auto makeFunction = [](ov::ParameterVector ¶ms, const std::shared_ptr<ov::Node> &lastNode) {
|
||||
ov::ResultVector results;
|
||||
|
||||
for (size_t i = 0; i < lastNode->get_output_size(); i++)
|
||||
results.push_back(std::make_shared<opset1::Result>(lastNode->output(i)));
|
||||
results.push_back(std::make_shared<ov::op::v0::Result>(lastNode->output(i)));
|
||||
|
||||
return std::make_shared<Function>(results, params, "ReduceLayerGPUTest");
|
||||
return std::make_shared<ov::Model>(results, params, "ReduceLayerGPUTest");
|
||||
};
|
||||
|
||||
function = makeFunction(params, reduce);
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(ReduceLayerGPUTest, CompareWithRefs) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
|
||||
TEST_P(ReduceLayerGPUTest, Inference) {
|
||||
run();
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<bool> keepDims = {
|
||||
true,
|
||||
false,
|
||||
};
|
||||
|
||||
const std::vector<ElementType> floatPrecisions = {
|
||||
ElementType::f32,
|
||||
ElementType::f16,
|
||||
const std::vector<ov::element::Type> float_types = {
|
||||
ov::element::f32,
|
||||
ov::element::f16,
|
||||
};
|
||||
|
||||
const std::vector<ElementType> floatIntPrecisions = {
|
||||
ElementType::f32,
|
||||
ElementType::f16,
|
||||
ElementType::i32,
|
||||
const std::vector<ov::element::Type> float_int_types = {
|
||||
ov::element::f32,
|
||||
ov::element::f16,
|
||||
ov::element::i32,
|
||||
};
|
||||
|
||||
|
||||
|
||||
namespace Reduce {
|
||||
|
||||
const ReduceInput dyn1d = {
|
||||
{
|
||||
{ {-1}, {{4}, {5}} }
|
||||
@ -176,8 +167,8 @@ const ReduceInput dyn6d = {
|
||||
// ================== Reduction int32/float types (Sum, Min, Max, L1) ==================
|
||||
const auto reduceSum = ::testing::Combine(
|
||||
::testing::ValuesIn({dyn1d, dyn5d}),
|
||||
::testing::ValuesIn(floatIntPrecisions),
|
||||
::testing::Values(helpers::ReductionType::Sum),
|
||||
::testing::ValuesIn(float_int_types),
|
||||
::testing::Values(ov::test::utils::ReductionType::Sum),
|
||||
::testing::ValuesIn(keepDims),
|
||||
::testing::Values(ov::test::utils::DEVICE_GPU)
|
||||
);
|
||||
@ -185,8 +176,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_reduce_sum_compareWithRefs_dynamic, ReduceLayerGP
|
||||
|
||||
const auto reduceMin = ::testing::Combine(
|
||||
::testing::ValuesIn({dyn2d, dyn6d}),
|
||||
::testing::ValuesIn(floatIntPrecisions),
|
||||
::testing::Values(helpers::ReductionType::Min),
|
||||
::testing::ValuesIn(float_int_types),
|
||||
::testing::Values(ov::test::utils::ReductionType::Min),
|
||||
::testing::ValuesIn(keepDims),
|
||||
::testing::Values(ov::test::utils::DEVICE_GPU)
|
||||
);
|
||||
@ -194,8 +185,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_reduce_min_compareWithRefs_dynamic, ReduceLayerGP
|
||||
|
||||
const auto reduceMax = ::testing::Combine(
|
||||
::testing::ValuesIn({dyn3d, dyn5d}),
|
||||
::testing::ValuesIn(floatIntPrecisions),
|
||||
::testing::Values(helpers::ReductionType::Max),
|
||||
::testing::ValuesIn(float_int_types),
|
||||
::testing::Values(ov::test::utils::ReductionType::Max),
|
||||
::testing::ValuesIn(keepDims),
|
||||
::testing::Values(ov::test::utils::DEVICE_GPU)
|
||||
);
|
||||
@ -203,8 +194,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_reduce_max_compareWithRefs_dynamic, ReduceLayerGP
|
||||
|
||||
const auto reduceL1 = ::testing::Combine(
|
||||
::testing::ValuesIn({dyn4d, dyn6d}),
|
||||
::testing::ValuesIn(floatIntPrecisions),
|
||||
::testing::Values(helpers::ReductionType::L1),
|
||||
::testing::ValuesIn(float_int_types),
|
||||
::testing::Values(ov::test::utils::ReductionType::L1),
|
||||
::testing::ValuesIn(keepDims),
|
||||
::testing::Values(ov::test::utils::DEVICE_GPU)
|
||||
);
|
||||
@ -214,8 +205,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_reduce_l1_compareWithRefs_dynamic, ReduceLayerGPU
|
||||
// ================== Reduction float types (Mean, Prod, L2) ==================
|
||||
const auto reduceMean = ::testing::Combine(
|
||||
::testing::ValuesIn({dyn1d, dyn6d}),
|
||||
::testing::ValuesIn(floatPrecisions),
|
||||
::testing::Values(helpers::ReductionType::Mean),
|
||||
::testing::ValuesIn(float_types),
|
||||
::testing::Values(ov::test::utils::ReductionType::Mean),
|
||||
::testing::ValuesIn(keepDims),
|
||||
::testing::Values(ov::test::utils::DEVICE_GPU)
|
||||
);
|
||||
@ -223,8 +214,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_reduce_mean_compareWithRefs_dynamic, ReduceLayerG
|
||||
|
||||
const auto reduceProd = ::testing::Combine(
|
||||
::testing::ValuesIn({dyn2d, dyn4d}),
|
||||
::testing::ValuesIn({ElementType::f32}),
|
||||
::testing::Values(helpers::ReductionType::Prod),
|
||||
::testing::ValuesIn({ov::element::f32}),
|
||||
::testing::Values(ov::test::utils::ReductionType::Prod),
|
||||
::testing::ValuesIn(keepDims),
|
||||
::testing::Values(ov::test::utils::DEVICE_GPU)
|
||||
);
|
||||
@ -232,8 +223,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_reduce_prod_compareWithRefs_dynamic, ReduceLayerG
|
||||
|
||||
const auto reduceL2 = ::testing::Combine(
|
||||
::testing::ValuesIn({dyn4d, dyn5d}),
|
||||
::testing::ValuesIn(floatPrecisions),
|
||||
::testing::Values(helpers::ReductionType::L2),
|
||||
::testing::ValuesIn(float_types),
|
||||
::testing::Values(ov::test::utils::ReductionType::L2),
|
||||
::testing::ValuesIn(keepDims),
|
||||
::testing::Values(ov::test::utils::DEVICE_GPU)
|
||||
);
|
||||
@ -243,8 +234,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_reduce_l2_compareWithRefs_dynamic, ReduceLayerGPU
|
||||
// ================== Reduction logical types (LogicalOr, LogicalAnd) ==================
|
||||
const auto reduceLogicalOr = ::testing::Combine(
|
||||
::testing::ValuesIn({dyn1d, dyn6d}),
|
||||
::testing::Values(ElementType::boolean),
|
||||
::testing::Values(helpers::ReductionType::LogicalOr),
|
||||
::testing::Values(ov::element::boolean),
|
||||
::testing::Values(ov::test::utils::ReductionType::LogicalOr),
|
||||
::testing::ValuesIn(keepDims),
|
||||
::testing::Values(ov::test::utils::DEVICE_GPU)
|
||||
);
|
||||
@ -252,8 +243,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_reduce_logicalor_compareWithRefs_dynamic, ReduceL
|
||||
|
||||
const auto reduceLogicalAnd = ::testing::Combine(
|
||||
::testing::ValuesIn({dyn3d, dyn5d}),
|
||||
::testing::Values(ElementType::boolean),
|
||||
::testing::Values(helpers::ReductionType::LogicalAnd),
|
||||
::testing::Values(ov::element::boolean),
|
||||
::testing::Values(ov::test::utils::ReductionType::LogicalAnd),
|
||||
::testing::ValuesIn(keepDims),
|
||||
::testing::Values(ov::test::utils::DEVICE_GPU)
|
||||
);
|
||||
@ -347,15 +338,11 @@ const std::vector<ReduceInput> dynVariousAxisInputs = {
|
||||
|
||||
const auto reduceMaxWithVariousAxis = ::testing::Combine(
|
||||
::testing::ValuesIn(dynVariousAxisInputs),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(helpers::ReductionType::Max),
|
||||
::testing::Values(ov::element::f32),
|
||||
::testing::Values(ov::test::utils::ReductionType::Max),
|
||||
::testing::ValuesIn(keepDims),
|
||||
::testing::Values(ov::test::utils::DEVICE_GPU)
|
||||
);
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_reduce_max_withVariousAxis_compareWithRefs_dynamic,
|
||||
ReduceLayerGPUTest, reduceMaxWithVariousAxis, ReduceLayerGPUTest::getTestCaseName);
|
||||
|
||||
|
||||
} // namespace Reduce
|
||||
} // namespace
|
||||
} // namespace GPULayerTestsDefinitions
|
||||
|
@ -2,18 +2,16 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "shared_test_classes/single_layer/region_yolo.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "ie_precision.hpp"
|
||||
#include "ov_models/builders.hpp"
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include <string>
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
|
||||
using namespace ngraph;
|
||||
using namespace InferenceEngine;
|
||||
using namespace ov::test;
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
#include "openvino/op/region_yolo.hpp"
|
||||
|
||||
namespace GPULayerTestsDefinitions {
|
||||
namespace {
|
||||
using ov::test::InputShape;
|
||||
|
||||
struct regionYoloAttributes {
|
||||
size_t classes;
|
||||
@ -28,9 +26,7 @@ typedef std::tuple<
|
||||
InputShape, // Input Shape
|
||||
regionYoloAttributes, // Params
|
||||
std::vector<int64_t>, // mask
|
||||
ov::test::ElementType, // Network input precision
|
||||
ov::test::ElementType, // Network output precision
|
||||
std::map<std::string, std::string>, // Additional network configuration
|
||||
ov::element::Type, // Model type
|
||||
std::string // Device name
|
||||
> RegionYoloGPUTestParam;
|
||||
|
||||
@ -38,70 +34,60 @@ class RegionYoloLayerGPUTest : public testing::WithParamInterface<RegionYoloGPUT
|
||||
virtual public ov::test::SubgraphBaseTest {
|
||||
public:
|
||||
static std::string getTestCaseName(testing::TestParamInfo<RegionYoloGPUTestParam> obj) {
|
||||
InputShape inputShape;
|
||||
InputShape shapes;
|
||||
regionYoloAttributes attributes;
|
||||
std::vector<int64_t> mask;
|
||||
ov::test::ElementType inpPrecision;
|
||||
ov::test::ElementType outPrecision;
|
||||
ov::element::Type model_type;
|
||||
std::string targetName;
|
||||
std::map<std::string, std::string> additionalConfig;
|
||||
|
||||
std::tie(inputShape, attributes, mask, inpPrecision, outPrecision, additionalConfig, targetName) = obj.param;
|
||||
std::tie(shapes, attributes, mask, model_type, targetName) = obj.param;
|
||||
|
||||
std::ostringstream result;
|
||||
result << "IS=" << inputShape << "_";
|
||||
result << "IS=" << ov::test::utils::partialShape2str({shapes.first}) << "_";
|
||||
for (const auto& item : shapes.second) {
|
||||
result << ov::test::utils::vec2str(item) << "_";
|
||||
}
|
||||
result << "classes=" << attributes.classes << "_";
|
||||
result << "coords=" << attributes.coordinates << "_";
|
||||
result << "num=" << attributes.num_regions << "_";
|
||||
result << "doSoftmax=" << attributes.do_softmax << "_";
|
||||
result << "axis=" << attributes.start_axis << "_";
|
||||
result << "endAxis=" << attributes.end_axis << "_";
|
||||
result << "inpPRC=" << inpPrecision << "_";
|
||||
result << "outPRC=" << outPrecision << "_";
|
||||
result << "inpPRC=" << model_type << "_";
|
||||
result << "targetDevice=" << targetName << "_";
|
||||
return result.str();
|
||||
}
|
||||
|
||||
protected:
|
||||
void SetUp() override {
|
||||
InputShape inputShape;
|
||||
InputShape shapes;
|
||||
regionYoloAttributes attributes;
|
||||
std::vector<int64_t> mask;
|
||||
ov::test::ElementType inPrc;
|
||||
ov::test::ElementType outPrc;
|
||||
std::map<std::string, std::string> additionalConfig;
|
||||
ov::element::Type model_type;
|
||||
std::tie(shapes, attributes, mask, model_type, targetDevice) = this->GetParam();
|
||||
|
||||
std::tie(inputShape, attributes, mask, inPrc, outPrc, additionalConfig, targetDevice) = this->GetParam();
|
||||
|
||||
init_input_shapes({ inputShape });
|
||||
init_input_shapes({ shapes });
|
||||
|
||||
ov::ParameterVector paramRegionYolo;
|
||||
for (auto&& shape : inputDynamicShapes) {
|
||||
paramRegionYolo.push_back(std::make_shared<ov::op::v0::Parameter>(inPrc, shape));
|
||||
paramRegionYolo.push_back(std::make_shared<ov::op::v0::Parameter>(model_type, shape));
|
||||
}
|
||||
|
||||
const auto region_yolo = std::make_shared<ngraph::op::v0::RegionYolo>(paramRegionYolo[0],
|
||||
const auto region_yolo = std::make_shared<ov::op::v0::RegionYolo>(paramRegionYolo[0],
|
||||
attributes.coordinates, attributes.classes, attributes.num_regions,
|
||||
attributes.do_softmax, mask, attributes.start_axis, attributes.end_axis);
|
||||
|
||||
ngraph::ResultVector results;
|
||||
ov::ResultVector results;
|
||||
for (size_t i = 0; i < region_yolo->get_output_size(); i++)
|
||||
results.push_back(std::make_shared<ngraph::opset1::Result>(region_yolo->output(i)));
|
||||
function = std::make_shared<ngraph::Function>(results, paramRegionYolo, "RegionYolo");
|
||||
results.push_back(std::make_shared<ov::op::v0::Result>(region_yolo->output(i)));
|
||||
function = std::make_shared<ov::Model>(results, paramRegionYolo, "RegionYolo");
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(RegionYoloLayerGPUTest, CompareWithRefs) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
|
||||
TEST_P(RegionYoloLayerGPUTest, Inference) {
|
||||
run();
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
std::map<std::string, std::string> emptyAdditionalConfig;
|
||||
|
||||
const std::vector<ov::test::ElementType> inpOutPrc = {ov::test::ElementType::f16, ov::test::ElementType::f32};
|
||||
const std::vector<ov::element::Type> model_types = {ov::element::f16, ov::element::f32};
|
||||
|
||||
const std::vector<InputShape> inShapes_caffe_dynamic = {
|
||||
{{-1, -1, -1, -1}, {{1, 125, 13, 13}, {1, 125, 26, 26}}},
|
||||
@ -134,9 +120,7 @@ const auto testCase_yolov3_dynamic = ::testing::Combine(
|
||||
::testing::ValuesIn(inShapes_v3_dynamic),
|
||||
::testing::Values(yoloV3attr),
|
||||
::testing::Values(masks[2]),
|
||||
::testing::ValuesIn(inpOutPrc),
|
||||
::testing::ValuesIn(inpOutPrc),
|
||||
::testing::Values(emptyAdditionalConfig),
|
||||
::testing::ValuesIn(model_types),
|
||||
::testing::Values(ov::test::utils::DEVICE_GPU)
|
||||
);
|
||||
|
||||
@ -146,9 +130,7 @@ const auto testCase_yolov3_mxnet_dynamic = ::testing::Combine(
|
||||
::testing::ValuesIn(inShapes_mxnet_dynamic),
|
||||
::testing::Values(yoloV3mxnetAttr),
|
||||
::testing::Values(masks[1]),
|
||||
::testing::ValuesIn(inpOutPrc),
|
||||
::testing::ValuesIn(inpOutPrc),
|
||||
::testing::Values(emptyAdditionalConfig),
|
||||
::testing::ValuesIn(model_types),
|
||||
::testing::Values(ov::test::utils::DEVICE_GPU)
|
||||
);
|
||||
|
||||
@ -158,9 +140,7 @@ const auto testCase_yolov2_caffe_dynamic = ::testing::Combine(
|
||||
::testing::ValuesIn(inShapes_caffe_dynamic),
|
||||
::testing::Values(yoloV2caffeAttr),
|
||||
::testing::Values(masks[0]),
|
||||
::testing::ValuesIn(inpOutPrc),
|
||||
::testing::ValuesIn(inpOutPrc),
|
||||
::testing::Values(emptyAdditionalConfig),
|
||||
::testing::ValuesIn(model_types),
|
||||
::testing::Values(ov::test::utils::DEVICE_GPU)
|
||||
);
|
||||
|
||||
@ -177,4 +157,3 @@ INSTANTIATE_TEST_SUITE_P(smoke_GPURegionYoloCaffeDynamic, RegionYoloLayerGPUTest
|
||||
RegionYoloLayerGPUTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
} // namespace GPULayerTestsDefinitions
|
||||
|
@ -2,69 +2,65 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "shared_test_classes/single_layer/reorg_yolo.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "ie_precision.hpp"
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include <string>
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
|
||||
using namespace InferenceEngine;
|
||||
using namespace ov::test;
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
#include "openvino/op/reorg_yolo.hpp"
|
||||
|
||||
namespace GPULayerTestsDefinitions {
|
||||
namespace {
|
||||
using ov::test::InputShape;
|
||||
|
||||
typedef std::tuple<
|
||||
InputShape, // Input Shape
|
||||
size_t, // Stride
|
||||
ElementType, // Network precision
|
||||
TargetDevice // Device
|
||||
InputShape, // Input Shape
|
||||
size_t, // Stride
|
||||
ov::element::Type, // Model type
|
||||
std::string // Device
|
||||
> ReorgYoloGPUTestParams;
|
||||
|
||||
class ReorgYoloLayerGPUTest : public testing::WithParamInterface<ReorgYoloGPUTestParams>,
|
||||
virtual public ov::test::SubgraphBaseTest {
|
||||
public:
|
||||
static std::string getTestCaseName(testing::TestParamInfo<ReorgYoloGPUTestParams> obj) {
|
||||
InputShape inputShape;
|
||||
InputShape shapes;
|
||||
size_t stride;
|
||||
ElementType netPrecision;
|
||||
TargetDevice targetDev;
|
||||
std::tie(inputShape, stride, netPrecision, targetDev) = obj.param;
|
||||
ov::element::Type model_type;
|
||||
std::string targetDev;
|
||||
std::tie(shapes, stride, model_type, targetDev) = obj.param;
|
||||
std::ostringstream result;
|
||||
result << "IS=" << ov::test::utils::partialShape2str({inputShape.first}) << "_";
|
||||
for (const auto& item : inputShape.second) {
|
||||
result << "IS=" << ov::test::utils::partialShape2str({shapes.first}) << "_";
|
||||
for (const auto& item : shapes.second) {
|
||||
result << ov::test::utils::vec2str(item) << "_";
|
||||
}
|
||||
result << "stride=" << stride << "_";
|
||||
result << "netPRC=" << netPrecision << "_";
|
||||
result << "modelPRC=" << model_type << "_";
|
||||
result << "targetDevice=" << targetDev << "_";
|
||||
return result.str();
|
||||
}
|
||||
|
||||
protected:
|
||||
void SetUp() override {
|
||||
InputShape inputShape;
|
||||
InputShape shapes;
|
||||
size_t stride;
|
||||
ElementType netPrecision;
|
||||
std::tie(inputShape, stride, netPrecision, targetDevice) = this->GetParam();
|
||||
ov::element::Type model_type;
|
||||
std::tie(shapes, stride, model_type, targetDevice) = this->GetParam();
|
||||
|
||||
init_input_shapes({inputShape});
|
||||
init_input_shapes({shapes});
|
||||
|
||||
auto param = std::make_shared<ngraph::op::Parameter>(ngraph::element::f32, inputDynamicShapes[0]);
|
||||
auto reorg_yolo = std::make_shared<ngraph::op::v0::ReorgYolo>(param, stride);
|
||||
function = std::make_shared<ov::Model>(std::make_shared<ngraph::opset1::Result>(reorg_yolo),
|
||||
ngraph::ParameterVector{param},
|
||||
auto param = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, inputDynamicShapes[0]);
|
||||
auto reorg_yolo = std::make_shared<ov::op::v0::ReorgYolo>(param, stride);
|
||||
function = std::make_shared<ov::Model>(std::make_shared<ov::op::v0::Result>(reorg_yolo),
|
||||
ov::ParameterVector{param},
|
||||
"ReorgYolo");
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(ReorgYoloLayerGPUTest, CompareWithRefs) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
|
||||
TEST_P(ReorgYoloLayerGPUTest, Inference) {
|
||||
run();
|
||||
};
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<ov::test::InputShape> inShapesDynamic1 = {
|
||||
{{{1, 2}, -1, -1, -1}, {{1, 4, 4, 4}, {1, 8, 4, 4}, {2, 8, 4, 4}}}
|
||||
};
|
||||
@ -94,4 +90,3 @@ INSTANTIATE_TEST_SUITE_P(smoke_TestsReorgYolo_stride2_DynamicShape, ReorgYoloLay
|
||||
ReorgYoloLayerGPUTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
} // namespace GPULayerTestsDefinitions
|
||||
|
@ -2,61 +2,57 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "shared_test_classes/single_layer/roi_pooling.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "ie_precision.hpp"
|
||||
#include "ov_models/builders.hpp"
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include <string>
|
||||
#include "common_test_utils/data_utils.hpp"
|
||||
#include "common_test_utils/test_enums.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
|
||||
using namespace ngraph;
|
||||
using namespace InferenceEngine;
|
||||
using namespace ov::test;
|
||||
|
||||
namespace GPULayerTestsDefinitions {
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
#include "openvino/op/tile.hpp"
|
||||
|
||||
namespace {
|
||||
enum ProposalGenerationMode { RANDOM, ULTIMATE_RIGHT_BORDER };
|
||||
|
||||
using ROIPoolingShapes = std::vector<InputShape>;
|
||||
using ROIPoolingShapes = std::vector<ov::test::InputShape>;
|
||||
|
||||
typedef std::tuple<
|
||||
ROIPoolingShapes, // Input shapes
|
||||
std::vector<size_t>, // Pooled shape {pooled_h, pooled_w}
|
||||
float, // Spatial scale
|
||||
ngraph::helpers::ROIPoolingTypes, // ROIPooling method
|
||||
InferenceEngine::Precision // Net precision
|
||||
ROIPoolingShapes, // Input shapes
|
||||
std::vector<size_t>, // Pooled shape {pooled_h, pooled_w}
|
||||
float, // Spatial scale
|
||||
ov::test::utils::ROIPoolingTypes, // ROIPooling method
|
||||
ov::element::Type // Model type
|
||||
> ROIPoolingParams;
|
||||
|
||||
typedef std::tuple<
|
||||
ROIPoolingParams,
|
||||
ProposalGenerationMode,
|
||||
std::map<std::string, std::string>
|
||||
ProposalGenerationMode
|
||||
> ROIPoolingGPUTestParams;
|
||||
|
||||
class ROIPoolingLayerGPUTest : public testing::WithParamInterface<ROIPoolingGPUTestParams>,
|
||||
virtual public ov::test::SubgraphBaseTest {
|
||||
public:
|
||||
static std::string getTestCaseName(testing::TestParamInfo<ROIPoolingGPUTestParams> obj) {
|
||||
ROIPoolingParams basicParamsSet;
|
||||
ProposalGenerationMode propMode;
|
||||
std::map<std::string, std::string> additionalConfig;
|
||||
ROIPoolingParams basic_params_set;
|
||||
ProposalGenerationMode prop_mode;
|
||||
|
||||
std::tie(basicParamsSet, propMode, additionalConfig) = obj.param;
|
||||
std::tie(basic_params_set, prop_mode) = obj.param;
|
||||
|
||||
ROIPoolingShapes inputShapes;
|
||||
std::vector<size_t> poolShape;
|
||||
ROIPoolingShapes shapes;
|
||||
std::vector<size_t> pool_shape;
|
||||
float spatial_scale;
|
||||
ngraph::helpers::ROIPoolingTypes pool_method;
|
||||
InferenceEngine::Precision netPrecision;
|
||||
std::tie(inputShapes, poolShape, spatial_scale, pool_method, netPrecision) = basicParamsSet;
|
||||
ov::test::utils::ROIPoolingTypes pool_method;
|
||||
ov::element::Type model_type;
|
||||
std::tie(shapes, pool_shape, spatial_scale, pool_method, model_type) = basic_params_set;
|
||||
|
||||
std::ostringstream result;
|
||||
result << "netPRC=" << netPrecision.name() << "_";
|
||||
for (const auto& shape : inputShapes) {
|
||||
result << "netPRC=" << model_type << "_";
|
||||
for (const auto& shape : shapes) {
|
||||
result << ov::test::utils::partialShape2str({ shape.first }) << "_";
|
||||
}
|
||||
result << "TS=";
|
||||
for (const auto& shape : inputShapes) {
|
||||
for (const auto& shape : shapes) {
|
||||
result << "(";
|
||||
if (!shape.second.empty()) {
|
||||
auto itr = shape.second.begin();
|
||||
@ -67,22 +63,17 @@ public:
|
||||
result << ")_";
|
||||
}
|
||||
|
||||
result << "PS=" << ov::test::utils::vec2str(poolShape) << "_";
|
||||
result << "PS=" << ov::test::utils::vec2str(pool_shape) << "_";
|
||||
result << "Scale=" << spatial_scale << "_";
|
||||
switch (pool_method) {
|
||||
case ngraph::helpers::ROIPoolingTypes::ROI_MAX:
|
||||
case ov::test::utils::ROIPoolingTypes::ROI_MAX:
|
||||
result << "Max_";
|
||||
break;
|
||||
case ngraph::helpers::ROIPoolingTypes::ROI_BILINEAR:
|
||||
case ov::test::utils::ROIPoolingTypes::ROI_BILINEAR:
|
||||
result << "Bilinear_";
|
||||
break;
|
||||
}
|
||||
result << "config=(";
|
||||
for (const auto& configEntry : additionalConfig) {
|
||||
result << configEntry.first << ", " << configEntry.second << ":";
|
||||
}
|
||||
result << ")";
|
||||
switch (propMode) {
|
||||
switch (prop_mode) {
|
||||
case ProposalGenerationMode::ULTIMATE_RIGHT_BORDER:
|
||||
result << "_UltimateRightBorderProposal";
|
||||
break;
|
||||
@ -96,16 +87,16 @@ public:
|
||||
}
|
||||
|
||||
protected:
|
||||
void generate_inputs(const std::vector<ngraph::Shape>& targetInputStaticShapes) override {
|
||||
const ProposalGenerationMode propMode = std::get<1>(this->GetParam());
|
||||
void generate_inputs(const std::vector<ov::Shape>& targetInputStaticShapes) override {
|
||||
const ProposalGenerationMode prop_mode = std::get<1>(this->GetParam());
|
||||
const float spatial_scale = std::get<2>(std::get<0>(this->GetParam()));
|
||||
const ngraph::helpers::ROIPoolingTypes pool_method = std::get<3>(std::get<0>(this->GetParam()));
|
||||
const ov::test::utils::ROIPoolingTypes pool_method = std::get<3>(std::get<0>(this->GetParam()));
|
||||
|
||||
inputs.clear();
|
||||
const auto& funcInputs = function->inputs();
|
||||
|
||||
auto feat_map_shape = targetInputStaticShapes[0];
|
||||
const auto is_roi_max_mode = (pool_method == ngraph::helpers::ROIPoolingTypes::ROI_MAX);
|
||||
const auto is_roi_max_mode = (pool_method == ov::test::utils::ROIPoolingTypes::ROI_MAX);
|
||||
const int height = is_roi_max_mode ? feat_map_shape[2] / spatial_scale : 1;
|
||||
const int width = is_roi_max_mode ? feat_map_shape[3] / spatial_scale : 1;
|
||||
|
||||
@ -115,13 +106,13 @@ protected:
|
||||
|
||||
if (i == 1) {
|
||||
tensor = ov::Tensor(funcInput.get_element_type(), targetInputStaticShapes[i]);
|
||||
if (propMode == ULTIMATE_RIGHT_BORDER) {
|
||||
if (prop_mode == ULTIMATE_RIGHT_BORDER) {
|
||||
// because of nonalgebraic character of floating point operation, the following values causes inequity:
|
||||
// ((end_h - start_h) * (input_h - 1) / (pooled_h - 1)) * (pooled_h - 1) > (end_h - start_h) * (input_h - 1)
|
||||
// and as result excess of right limit for proposal value if the border case (current_h == pooled_h - 1)
|
||||
// will not be handled explicitly
|
||||
switch (funcInput.get_element_type()) {
|
||||
case ngraph::element::f32: {
|
||||
case ov::element::f32: {
|
||||
auto* dataPtr = tensor.data<float>();
|
||||
for (size_t i = 0; i < tensor.get_size(); i += 5) {
|
||||
dataPtr[i] = 0;
|
||||
@ -132,14 +123,14 @@ protected:
|
||||
}
|
||||
break;
|
||||
}
|
||||
case ngraph::element::bf16: {
|
||||
case ov::element::bf16: {
|
||||
auto* dataPtr = tensor.data<std::int16_t>();
|
||||
for (size_t i = 0; i < tensor.get_size(); i += 5) {
|
||||
dataPtr[i] = static_cast<std::int16_t>(ngraph::float16(0.f).to_bits());
|
||||
dataPtr[i + 1] = static_cast<std::int16_t>(ngraph::float16(0.f).to_bits());
|
||||
dataPtr[i + 2] = static_cast<std::int16_t>(ngraph::float16(0.248046786f).to_bits());
|
||||
dataPtr[i + 3] = static_cast<std::int16_t>(ngraph::float16(0.471333951f).to_bits());
|
||||
dataPtr[i + 4] = static_cast<std::int16_t>(ngraph::float16(1.f).to_bits());
|
||||
dataPtr[i] = static_cast<std::int16_t>(ov::float16(0.f).to_bits());
|
||||
dataPtr[i + 1] = static_cast<std::int16_t>(ov::float16(0.f).to_bits());
|
||||
dataPtr[i + 2] = static_cast<std::int16_t>(ov::float16(0.248046786f).to_bits());
|
||||
dataPtr[i + 3] = static_cast<std::int16_t>(ov::float16(0.471333951f).to_bits());
|
||||
dataPtr[i + 4] = static_cast<std::int16_t>(ov::float16(1.f).to_bits());
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -166,50 +157,42 @@ protected:
|
||||
}
|
||||
|
||||
void SetUp() override {
|
||||
ROIPoolingParams basicParamsSet;
|
||||
ProposalGenerationMode propMode;
|
||||
std::map<std::string, std::string> additionalConfig;
|
||||
ROIPoolingParams basic_params_set;
|
||||
ProposalGenerationMode prop_mode;
|
||||
|
||||
std::tie(basicParamsSet, propMode, additionalConfig) = this->GetParam();
|
||||
ROIPoolingShapes inputShapes;
|
||||
std::vector<size_t> poolShape;
|
||||
std::tie(basic_params_set, prop_mode) = this->GetParam();
|
||||
ROIPoolingShapes shapes;
|
||||
std::vector<size_t> pool_shape;
|
||||
float spatial_scale;
|
||||
ngraph::helpers::ROIPoolingTypes pool_method;
|
||||
InferenceEngine::Precision netPrecision;
|
||||
std::tie(inputShapes, poolShape, spatial_scale, pool_method, netPrecision) = basicParamsSet;
|
||||
ov::test::utils::ROIPoolingTypes pool_method;
|
||||
ov::element::Type model_type;
|
||||
std::tie(shapes, pool_shape, spatial_scale, pool_method, model_type) = basic_params_set;
|
||||
|
||||
targetDevice = ov::test::utils::DEVICE_GPU;
|
||||
init_input_shapes(inputShapes);
|
||||
init_input_shapes(shapes);
|
||||
|
||||
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
|
||||
ov::ParameterVector params;
|
||||
for (auto&& shape : inputDynamicShapes)
|
||||
params.push_back(std::make_shared<ov::op::v0::Parameter>(ngPrc, shape));
|
||||
params.push_back(std::make_shared<ov::op::v0::Parameter>(model_type, shape));
|
||||
|
||||
std::shared_ptr<ov::Node> roi_pooling;
|
||||
if (ov::test::utils::ROIPoolingTypes::ROI_MAX == pool_method) {
|
||||
roi_pooling = std::make_shared<ov::op::v0::ROIPooling>(params[0], params[1], poolShape, spatial_scale, "max");
|
||||
roi_pooling = std::make_shared<ov::op::v0::ROIPooling>(params[0], params[1], pool_shape, spatial_scale, "max");
|
||||
} else {
|
||||
roi_pooling = std::make_shared<ov::op::v0::ROIPooling>(params[0], params[1], poolShape, spatial_scale, "bilinear");
|
||||
roi_pooling = std::make_shared<ov::op::v0::ROIPooling>(params[0], params[1], pool_shape, spatial_scale, "bilinear");
|
||||
}
|
||||
ngraph::ResultVector results;
|
||||
ov::ResultVector results;
|
||||
for (size_t i = 0; i < roi_pooling->get_output_size(); i++)
|
||||
results.push_back(std::make_shared<ngraph::opset1::Result>(roi_pooling->output(i)));
|
||||
function = std::make_shared<ngraph::Function>(results, params, "ROIPooling");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
results.push_back(std::make_shared<ov::op::v0::Result>(roi_pooling->output(i)));
|
||||
function = std::make_shared<ov::Model>(results, params, "ROIPooling");
|
||||
functionRefs = function->clone();
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(ROIPoolingLayerGPUTest, CompareWithRefs) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
|
||||
TEST_P(ROIPoolingLayerGPUTest, Inference) {
|
||||
run();
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
std::map<std::string, std::string> emptyAdditionalConfig;
|
||||
|
||||
const std::vector<ROIPoolingShapes> inShapes = {
|
||||
ROIPoolingShapes{{{}, {{1, 3, 8, 8}}}, {{}, {{1, 5}}}},
|
||||
ROIPoolingShapes{{{}, {{1, 3, 8, 8}}}, {{}, {{3, 5}}}},
|
||||
@ -291,43 +274,39 @@ const std::vector<std::vector<size_t>> pooledShapes_bilinear = {
|
||||
{6, 6}
|
||||
};
|
||||
|
||||
const std::vector<InferenceEngine::Precision> netPRCs = {InferenceEngine::Precision::FP32};
|
||||
const std::vector<ov::element::Type> model_types = {ov::element::f32};
|
||||
|
||||
const std::vector<float> spatial_scales = {0.625f, 1.f};
|
||||
|
||||
const auto test_ROIPooling_max = ::testing::Combine(::testing::ValuesIn(inShapes),
|
||||
::testing::ValuesIn(pooledShapes_max),
|
||||
::testing::ValuesIn(spatial_scales),
|
||||
::testing::Values(ngraph::helpers::ROIPoolingTypes::ROI_MAX),
|
||||
::testing::ValuesIn(netPRCs));
|
||||
::testing::Values(ov::test::utils::ROIPoolingTypes::ROI_MAX),
|
||||
::testing::ValuesIn(model_types));
|
||||
|
||||
const auto test_ROIPooling_bilinear = ::testing::Combine(::testing::ValuesIn(inShapes),
|
||||
::testing::ValuesIn(pooledShapes_bilinear),
|
||||
::testing::Values(spatial_scales[1]),
|
||||
::testing::Values(ngraph::helpers::ROIPoolingTypes::ROI_BILINEAR),
|
||||
::testing::ValuesIn(netPRCs));
|
||||
::testing::Values(ov::test::utils::ROIPoolingTypes::ROI_BILINEAR),
|
||||
::testing::ValuesIn(model_types));
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_ROIPoolingGPU_max, ROIPoolingLayerGPUTest,
|
||||
::testing::Combine(test_ROIPooling_max,
|
||||
::testing::Values(ProposalGenerationMode::RANDOM),
|
||||
::testing::Values(emptyAdditionalConfig)),
|
||||
::testing::Values(ProposalGenerationMode::RANDOM)),
|
||||
ROIPoolingLayerGPUTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_ROIPoolingGPU_bilinear, ROIPoolingLayerGPUTest,
|
||||
::testing::Combine(test_ROIPooling_bilinear,
|
||||
::testing::Values(ProposalGenerationMode::RANDOM),
|
||||
::testing::Values(emptyAdditionalConfig)),
|
||||
::testing::Values(ProposalGenerationMode::RANDOM)),
|
||||
ROIPoolingLayerGPUTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_ROIPoolingGPU_bilinear_ultimateRightBorderProposal, ROIPoolingLayerGPUTest,
|
||||
::testing::Combine(::testing::Combine(::testing::Values(ROIPoolingShapes{{{}, {{1, 1, 50, 50}}}, {{}, {{1, 5}}}}),
|
||||
::testing::Values(std::vector<size_t> { 4, 4 }),
|
||||
::testing::Values(spatial_scales[1]),
|
||||
::testing::Values(ngraph::helpers::ROIPoolingTypes::ROI_BILINEAR),
|
||||
::testing::Values(InferenceEngine::Precision::FP32)),
|
||||
::testing::Values(ProposalGenerationMode::ULTIMATE_RIGHT_BORDER),
|
||||
::testing::Values(emptyAdditionalConfig)),
|
||||
::testing::Values(ov::test::utils::ROIPoolingTypes::ROI_BILINEAR),
|
||||
::testing::Values(ov::element::f32)),
|
||||
::testing::Values(ProposalGenerationMode::ULTIMATE_RIGHT_BORDER)),
|
||||
ROIPoolingLayerGPUTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
} // namespace GPULayerTestsDefinitions
|
||||
|
@ -2,19 +2,18 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "shared_test_classes/single_layer/scatter_ND_update.hpp"
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "ie_precision.hpp"
|
||||
#include "ov_models/builders.hpp"
|
||||
#include <common_test_utils/ov_tensor_utils.hpp>
|
||||
#include <string>
|
||||
|
||||
using namespace ngraph;
|
||||
using namespace InferenceEngine;
|
||||
using namespace ov::test;
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
#include "openvino/op/scatter_nd_update.hpp"
|
||||
#include "openvino/op/scatter_update.hpp"
|
||||
#include "openvino/op/scatter_elements_update.hpp"
|
||||
|
||||
namespace GPULayerTestsDefinitions {
|
||||
using ScatterUpdateShapes = std::vector<InputShape>;
|
||||
namespace {
|
||||
using ScatterUpdateShapes = std::vector<ov::test::InputShape>;
|
||||
using IndicesValues = std::vector<std::int64_t>;
|
||||
|
||||
enum class Scatterupdate_type {
|
||||
@ -31,24 +30,24 @@ struct ScatterUpdateLayerParams {
|
||||
|
||||
typedef std::tuple<
|
||||
ScatterUpdateLayerParams,
|
||||
ElementType, // input precision
|
||||
ElementType // indices precision
|
||||
ov::element::Type, // input precision
|
||||
ov::element::Type // indices precision
|
||||
> ScatterUpdateParams;
|
||||
|
||||
class ScatterUpdateLayerGPUTest : public testing::WithParamInterface<ScatterUpdateParams>,
|
||||
virtual public SubgraphBaseTest {
|
||||
virtual public ov::test::SubgraphBaseTest {
|
||||
public:
|
||||
static std::string getTestCaseName(testing::TestParamInfo<ScatterUpdateParams> obj) {
|
||||
ScatterUpdateLayerParams scatterParams;
|
||||
ElementType inputPrecision;
|
||||
ElementType idxPrecision;
|
||||
std::tie(scatterParams, inputPrecision, idxPrecision) = obj.param;
|
||||
ov::element::Type model_type;
|
||||
ov::element::Type idx_type;
|
||||
std::tie(scatterParams, model_type, idx_type) = obj.param;
|
||||
const auto inputShapes = scatterParams.inputShapes;
|
||||
const auto indicesValues = scatterParams.indicesValues;
|
||||
const auto scType = scatterParams.scType;
|
||||
|
||||
std::ostringstream result;
|
||||
result << inputPrecision << "_IS=";
|
||||
result << model_type << "_IS=";
|
||||
for (const auto& shape : inputShapes) {
|
||||
result << ov::test::utils::partialShape2str({ shape.first }) << "_";
|
||||
}
|
||||
@ -61,7 +60,7 @@ public:
|
||||
result << ")_";
|
||||
}
|
||||
result << "indices_values=" << ov::test::utils::vec2str(indicesValues);
|
||||
result << "_idx_precision=" << idxPrecision;
|
||||
result << "_idx_precision=" << idx_type;
|
||||
result << "_scatter_mode=";
|
||||
switch (scType) {
|
||||
case Scatterupdate_type::ND:
|
||||
@ -84,30 +83,30 @@ protected:
|
||||
const auto& funcInputs = function->inputs();
|
||||
for (size_t i = 0; i < funcInputs.size(); ++i) {
|
||||
const auto& funcInput = funcInputs[i];
|
||||
const auto& inputPrecision = funcInput.get_element_type();
|
||||
const auto& model_type = funcInput.get_element_type();
|
||||
const auto& targetShape = targetInputStaticShapes[i];
|
||||
ov::Tensor tensor;
|
||||
if (i == 1) {
|
||||
tensor = ov::Tensor{ inputPrecision, targetShape };
|
||||
tensor = ov::Tensor{ model_type, targetShape };
|
||||
const auto indicesVals = std::get<0>(this->GetParam()).indicesValues;
|
||||
if (inputPrecision == ElementType::i32) {
|
||||
if (model_type == ov::element::i32) {
|
||||
auto data = tensor.data<std::int32_t>();
|
||||
for (size_t i = 0; i < tensor.get_size(); ++i) {
|
||||
data[i] = static_cast<std::int32_t>(indicesVals[i]);
|
||||
}
|
||||
} else if (inputPrecision == ElementType::i64) {
|
||||
} else if (model_type == ov::element::i64) {
|
||||
auto data = tensor.data<std::int64_t>();
|
||||
for (size_t i = 0; i < tensor.get_size(); ++i) {
|
||||
data[i] = indicesVals[i];
|
||||
}
|
||||
} else {
|
||||
OPENVINO_THROW("GatherNDUpdate. Unsupported indices precision: ", inputPrecision);
|
||||
OPENVINO_THROW("GatherNDUpdate. Unsupported indices precision: ", model_type);
|
||||
}
|
||||
} else {
|
||||
if (inputPrecision.is_real()) {
|
||||
tensor = ov::test::utils::create_and_fill_tensor(inputPrecision, targetShape, 10, 0, 1000);
|
||||
if (model_type.is_real()) {
|
||||
tensor = ov::test::utils::create_and_fill_tensor(model_type, targetShape, 10, 0, 1000);
|
||||
} else {
|
||||
tensor = ov::test::utils::create_and_fill_tensor(inputPrecision, targetShape);
|
||||
tensor = ov::test::utils::create_and_fill_tensor(model_type, targetShape);
|
||||
}
|
||||
}
|
||||
inputs.insert({ funcInput.get_node_shared_ptr(), tensor });
|
||||
@ -117,19 +116,19 @@ protected:
|
||||
void SetUp() override {
|
||||
targetDevice = ov::test::utils::DEVICE_GPU;
|
||||
ScatterUpdateLayerParams scatterParams;
|
||||
ElementType inputPrecision;
|
||||
ElementType idxPrecision;
|
||||
std::tie(scatterParams, inputPrecision, idxPrecision) = this->GetParam();
|
||||
ov::element::Type model_type;
|
||||
ov::element::Type idx_type;
|
||||
std::tie(scatterParams, model_type, idx_type) = this->GetParam();
|
||||
const auto inputShapes = scatterParams.inputShapes;
|
||||
const auto scType = scatterParams.scType;
|
||||
|
||||
init_input_shapes({inputShapes[0], inputShapes[1], inputShapes[2]});
|
||||
|
||||
|
||||
ov::ParameterVector dataParams{std::make_shared<ov::op::v0::Parameter>(inputPrecision, inputDynamicShapes[0]),
|
||||
std::make_shared<ov::op::v0::Parameter>(inputPrecision, inputDynamicShapes[2])};
|
||||
ov::ParameterVector dataParams{std::make_shared<ov::op::v0::Parameter>(model_type, inputDynamicShapes[0]),
|
||||
std::make_shared<ov::op::v0::Parameter>(model_type, inputDynamicShapes[2])};
|
||||
|
||||
auto indicesParam = std::make_shared<ov::op::v0::Parameter>(idxPrecision, inputDynamicShapes[1]);
|
||||
auto indicesParam = std::make_shared<ov::op::v0::Parameter>(idx_type, inputDynamicShapes[1]);
|
||||
dataParams[0]->set_friendly_name("Param_1");
|
||||
indicesParam->set_friendly_name("Param_2");
|
||||
dataParams[1]->set_friendly_name("Param_3");
|
||||
@ -137,42 +136,39 @@ protected:
|
||||
std::shared_ptr<ov::Node> scatter;
|
||||
switch (scType) {
|
||||
case Scatterupdate_type::ND: {
|
||||
scatter = std::make_shared<ngraph::opset4::ScatterNDUpdate>(dataParams[0], indicesParam, dataParams[1]);
|
||||
scatter = std::make_shared<ov::op::v3::ScatterNDUpdate>(dataParams[0], indicesParam, dataParams[1]);
|
||||
break;
|
||||
}
|
||||
case Scatterupdate_type::Elements: {
|
||||
auto axis = ov::op::v0::Constant::create(ov::element::i32, inputShapes[3].first.get_shape(), inputShapes[3].second[0]);
|
||||
scatter = std::make_shared<ngraph::opset4::ScatterElementsUpdate>(dataParams[0], indicesParam, dataParams[1], axis);
|
||||
scatter = std::make_shared<ov::op::v3::ScatterElementsUpdate>(dataParams[0], indicesParam, dataParams[1], axis);
|
||||
break;
|
||||
}
|
||||
case Scatterupdate_type::Basic:
|
||||
default: {
|
||||
auto axis = ov::op::v0::Constant::create(ov::element::i32, inputShapes[3].first.get_shape(), inputShapes[3].second[0]);
|
||||
scatter = std::make_shared<ngraph::opset4::ScatterUpdate>(dataParams[0], indicesParam, dataParams[1], axis);
|
||||
scatter = std::make_shared<ov::op::v3::ScatterUpdate>(dataParams[0], indicesParam, dataParams[1], axis);
|
||||
}
|
||||
}
|
||||
|
||||
ngraph::ParameterVector allParams{ dataParams[0], indicesParam, dataParams[1] };
|
||||
ov::ParameterVector allParams{ dataParams[0], indicesParam, dataParams[1] };
|
||||
|
||||
auto makeFunction = [](ParameterVector ¶ms, const std::shared_ptr<Node> &lastNode) {
|
||||
ResultVector results;
|
||||
auto makeFunction = [](ov::ParameterVector ¶ms, const std::shared_ptr<ov::Node> &lastNode) {
|
||||
ov::ResultVector results;
|
||||
|
||||
for (size_t i = 0; i < lastNode->get_output_size(); i++)
|
||||
results.push_back(std::make_shared<opset1::Result>(lastNode->output(i)));
|
||||
results.push_back(std::make_shared<ov::op::v0::Result>(lastNode->output(i)));
|
||||
|
||||
return std::make_shared<Function>(results, params, "ScatterUpdateLayerGPUTest");
|
||||
return std::make_shared<ov::Model>(results, params, "ScatterUpdateLayerGPUTest");
|
||||
};
|
||||
function = makeFunction(allParams, scatter);
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(ScatterUpdateLayerGPUTest, CompareWithRefs) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
TEST_P(ScatterUpdateLayerGPUTest, Inference) {
|
||||
run();
|
||||
}
|
||||
|
||||
namespace ScatterNDUpdate {
|
||||
|
||||
const std::vector<ScatterUpdateLayerParams> scatterNDParams = {
|
||||
ScatterUpdateLayerParams{
|
||||
ScatterUpdateShapes{
|
||||
@ -245,12 +241,12 @@ const std::vector<ScatterUpdateLayerParams> scatterElementsParams = {
|
||||
},
|
||||
};
|
||||
|
||||
const std::vector<ElementType> inputPrecisions = {
|
||||
ElementType::f32,
|
||||
const std::vector<ov::element::Type> model_types = {
|
||||
ov::element::f32,
|
||||
};
|
||||
|
||||
const std::vector<ElementType> constantPrecisions = {
|
||||
ElementType::i32,
|
||||
const std::vector<ov::element::Type> constantPrecisions = {
|
||||
ov::element::i32,
|
||||
};
|
||||
|
||||
const std::vector<ScatterUpdateLayerParams> scatterUpdate_EmptyInput1_2Params = {
|
||||
@ -294,28 +290,28 @@ const std::vector<ScatterUpdateLayerParams> scatterElementsUpdate_EmptyInput1_2P
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_ScatterNDUpdate_CompareWithRefs_dynamic, ScatterUpdateLayerGPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(scatterNDParams),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::ValuesIn(model_types),
|
||||
::testing::ValuesIn(constantPrecisions)),
|
||||
ScatterUpdateLayerGPUTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_ScatterElementsUpdate_CompareWithRefs_dynamic, ScatterUpdateLayerGPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(scatterElementsParams),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::ValuesIn(model_types),
|
||||
::testing::ValuesIn(constantPrecisions)),
|
||||
ScatterUpdateLayerGPUTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_ScatterUpdate_EmptyInput1_2_CompareWithRefs_dynamic, ScatterUpdateLayerGPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(scatterUpdate_EmptyInput1_2Params),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::ValuesIn(model_types),
|
||||
::testing::ValuesIn(constantPrecisions)),
|
||||
ScatterUpdateLayerGPUTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_ScatterNDUpdate_EmptyInput1_2_CompareWithRefs_dynamic, ScatterUpdateLayerGPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(scatterNDUpdate_EmptyInput1_2Params),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::ValuesIn(model_types),
|
||||
::testing::ValuesIn(constantPrecisions)),
|
||||
ScatterUpdateLayerGPUTest::getTestCaseName);
|
||||
|
||||
@ -323,8 +319,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_ScatterNDUpdate_EmptyInput1_2_CompareWithRefs_dyn
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_ScatterElementsUpdate_EmptyInput1_2_CompareWithRefs_dynamic, ScatterUpdateLayerGPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(scatterElementsUpdate_EmptyInput1_2Params),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::ValuesIn(model_types),
|
||||
::testing::ValuesIn(constantPrecisions)),
|
||||
ScatterUpdateLayerGPUTest::getTestCaseName);
|
||||
} // namespace ScatterNDUpdate
|
||||
} // namespace GPULayerTestsDefinitions
|
||||
} // namespace
|
||||
|
@ -2,34 +2,33 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "shared_test_classes/single_layer/select.hpp"
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "ie_precision.hpp"
|
||||
#include "ov_models/builders.hpp"
|
||||
#include <string>
|
||||
|
||||
using namespace ngraph;
|
||||
using namespace InferenceEngine;
|
||||
using namespace ov::test;
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
#include "openvino/op/select.hpp"
|
||||
|
||||
namespace GPULayerTestsDefinitions {
|
||||
namespace {
|
||||
using ov::test::InputShape;
|
||||
|
||||
typedef std::tuple<
|
||||
std::vector<InputShape>, // input shapes
|
||||
ElementType, // presion of 'then' and 'else' of inputs
|
||||
op::AutoBroadcastSpec, // broadcast spec
|
||||
TargetDevice // device name
|
||||
std::vector<InputShape>, // input shapes
|
||||
ov::element::Type, // presion of 'then' and 'else' of inputs
|
||||
ov::op::AutoBroadcastSpec, // broadcast spec
|
||||
std::string // device name
|
||||
> SelectLayerTestParamSet;
|
||||
|
||||
class SelectLayerGPUTest : public testing::WithParamInterface<SelectLayerTestParamSet>,
|
||||
virtual public SubgraphBaseTest {
|
||||
virtual public ov::test::SubgraphBaseTest {
|
||||
public:
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<SelectLayerTestParamSet>& obj) {
|
||||
std::vector<InputShape> inshapes;
|
||||
ElementType netType;
|
||||
op::AutoBroadcastSpec broadcast;
|
||||
TargetDevice targetDevice;
|
||||
std::tie(inshapes, netType, broadcast, targetDevice) = obj.param;
|
||||
ov::element::Type model_type;
|
||||
ov::op::AutoBroadcastSpec broadcast;
|
||||
std::string targetDevice;
|
||||
std::tie(inshapes, model_type, broadcast, targetDevice) = obj.param;
|
||||
|
||||
std::ostringstream result;
|
||||
|
||||
@ -43,7 +42,7 @@ public:
|
||||
result << ov::test::utils::vec2str(item) << "_";
|
||||
}
|
||||
}
|
||||
result << "Precision=" << netType << "_";
|
||||
result << "Precision=" << model_type << "_";
|
||||
result << "Broadcast=" << broadcast.m_type << "_";
|
||||
result << "trgDev=" << targetDevice;
|
||||
|
||||
@ -53,48 +52,42 @@ public:
|
||||
protected:
|
||||
void SetUp() override {
|
||||
std::vector<InputShape> inshapes;
|
||||
ElementType netType;
|
||||
op::AutoBroadcastSpec broadcast;
|
||||
std::tie(inshapes, netType, broadcast, targetDevice) = this->GetParam();
|
||||
ov::element::Type model_type;
|
||||
ov::op::AutoBroadcastSpec broadcast;
|
||||
std::tie(inshapes, model_type, broadcast, targetDevice) = this->GetParam();
|
||||
|
||||
init_input_shapes(inshapes);
|
||||
|
||||
ParameterVector params = {
|
||||
std::make_shared<opset1::Parameter>(ElementType::boolean, inputDynamicShapes[0]),
|
||||
std::make_shared<opset1::Parameter>(netType, inputDynamicShapes[1]),
|
||||
std::make_shared<opset1::Parameter>(netType, inputDynamicShapes[2]),
|
||||
ov::ParameterVector params = {
|
||||
std::make_shared<ov::op::v0::Parameter>(ov::element::boolean, inputDynamicShapes[0]),
|
||||
std::make_shared<ov::op::v0::Parameter>(model_type, inputDynamicShapes[1]),
|
||||
std::make_shared<ov::op::v0::Parameter>(model_type, inputDynamicShapes[2]),
|
||||
};
|
||||
|
||||
auto select = std::make_shared<ov::op::v1::Select>(params[0], params[1], params[2], broadcast);
|
||||
|
||||
auto makeFunction = [](ParameterVector ¶ms, const std::shared_ptr<Node> &lastNode) {
|
||||
ResultVector results;
|
||||
auto makeFunction = [](ov::ParameterVector ¶ms, const std::shared_ptr<ov::Node> &lastNode) {
|
||||
ov::ResultVector results;
|
||||
|
||||
for (size_t i = 0; i < lastNode->get_output_size(); i++)
|
||||
results.push_back(std::make_shared<opset1::Result>(lastNode->output(i)));
|
||||
results.push_back(std::make_shared<ov::op::v0::Result>(lastNode->output(i)));
|
||||
|
||||
return std::make_shared<Function>(results, params, "SelectLayerGPUTest");
|
||||
return std::make_shared<ov::Model>(results, params, "SelectLayerGPUTest");
|
||||
};
|
||||
function = makeFunction(params, select);
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(SelectLayerGPUTest, CompareWithRefs) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
|
||||
TEST_P(SelectLayerGPUTest, Inference) {
|
||||
run();
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<ElementType> netPrecisions = {
|
||||
ElementType::f32,
|
||||
ElementType::f16,
|
||||
ElementType::i32,
|
||||
const std::vector<ov::element::Type> model_types = {
|
||||
ov::element::f32,
|
||||
ov::element::f16,
|
||||
ov::element::i32,
|
||||
};
|
||||
|
||||
namespace Select {
|
||||
|
||||
// AutoBroadcastType: NUMPY
|
||||
const std::vector<std::vector<InputShape>> inShapesDynamicNumpy = {
|
||||
{
|
||||
@ -131,8 +124,8 @@ const std::vector<std::vector<InputShape>> inShapesDynamicNumpy = {
|
||||
|
||||
const auto numpyCases = ::testing::Combine(
|
||||
::testing::ValuesIn(inShapesDynamicNumpy),
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(op::AutoBroadcastType::NUMPY),
|
||||
::testing::ValuesIn(model_types),
|
||||
::testing::Values(ov::op::AutoBroadcastType::NUMPY),
|
||||
::testing::Values(ov::test::utils::DEVICE_GPU)
|
||||
);
|
||||
|
||||
@ -148,8 +141,8 @@ const std::vector<std::vector<InputShape>> inShapesDynamicRangeNumpy = {
|
||||
|
||||
const auto rangeNumpyCases = ::testing::Combine(
|
||||
::testing::ValuesIn(inShapesDynamicRangeNumpy),
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(op::AutoBroadcastType::NUMPY),
|
||||
::testing::ValuesIn(model_types),
|
||||
::testing::Values(ov::op::AutoBroadcastType::NUMPY),
|
||||
::testing::Values(ov::test::utils::DEVICE_GPU)
|
||||
);
|
||||
|
||||
@ -171,13 +164,10 @@ const std::vector<std::vector<InputShape>> inShapesDynamicNone = {
|
||||
|
||||
const auto noneCases = ::testing::Combine(
|
||||
::testing::ValuesIn(inShapesDynamicNone),
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(op::AutoBroadcastType::NONE),
|
||||
::testing::ValuesIn(model_types),
|
||||
::testing::Values(ov::op::AutoBroadcastType::NONE),
|
||||
::testing::Values(ov::test::utils::DEVICE_GPU)
|
||||
);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_select_CompareWithRefsNone_dynamic, SelectLayerGPUTest, noneCases, SelectLayerGPUTest::getTestCaseName);
|
||||
|
||||
} // namespace Select
|
||||
} // namespace
|
||||
} // namespace GPULayerTestsDefinitions
|
||||
|
@ -2,36 +2,34 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "shared_test_classes/single_layer/shape_of.hpp"
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "ie_precision.hpp"
|
||||
#include "ov_models/builders.hpp"
|
||||
#include <string>
|
||||
|
||||
using namespace ngraph;
|
||||
using namespace InferenceEngine;
|
||||
using namespace ov::test;
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
#include "openvino/op/shape_of.hpp"
|
||||
|
||||
using ElementType = ov::element::Type_t;
|
||||
namespace {
|
||||
using ov::test::InputShape;
|
||||
|
||||
namespace GPULayerTestsDefinitions {
|
||||
typedef std::tuple<
|
||||
InputShape,
|
||||
ElementType // Net precision
|
||||
ov::element::Type
|
||||
> ShapeOfLayerGPUTestParamsSet;
|
||||
|
||||
class ShapeOfLayerGPUTest : public testing::WithParamInterface<ShapeOfLayerGPUTestParamsSet>,
|
||||
virtual public SubgraphBaseTest {
|
||||
virtual public ov::test::SubgraphBaseTest {
|
||||
public:
|
||||
static std::string getTestCaseName(testing::TestParamInfo<ShapeOfLayerGPUTestParamsSet> obj) {
|
||||
InputShape inputShape;
|
||||
ElementType netPrecision;
|
||||
std::tie(inputShape, netPrecision) = obj.param;
|
||||
ov::element::Type model_type;
|
||||
std::tie(inputShape, model_type) = obj.param;
|
||||
|
||||
std::ostringstream result;
|
||||
result << "ShapeOfTest_";
|
||||
result << std::to_string(obj.index) << "_";
|
||||
result << "netPrec=" << netPrecision << "_";
|
||||
result << "netPrec=" << model_type << "_";
|
||||
result << "IS=";
|
||||
result << ov::test::utils::partialShape2str({inputShape.first}) << "_";
|
||||
result << "TS=(";
|
||||
@ -45,43 +43,39 @@ protected:
|
||||
void SetUp() override {
|
||||
targetDevice = ov::test::utils::DEVICE_GPU;
|
||||
|
||||
auto netPrecision = ElementType::undefined;
|
||||
ov::element::Type model_type;
|
||||
InputShape inputShape;
|
||||
std::tie(inputShape, netPrecision) = this->GetParam();
|
||||
std::tie(inputShape, model_type) = this->GetParam();
|
||||
|
||||
init_input_shapes({inputShape});
|
||||
|
||||
outType = ElementType::i32;
|
||||
outType = ov::element::i32;
|
||||
|
||||
ov::ParameterVector functionParams;
|
||||
for (auto&& shape : inputDynamicShapes)
|
||||
functionParams.push_back(std::make_shared<ov::op::v0::Parameter>(netPrecision, shape));
|
||||
functionParams.push_back(std::make_shared<ov::op::v0::Parameter>(model_type, shape));
|
||||
|
||||
auto shapeOfOp = std::make_shared<opset3::ShapeOf>(functionParams[0], element::i32);
|
||||
auto shapeOfOp = std::make_shared<ov::op::v3::ShapeOf>(functionParams[0], ov::element::i32);
|
||||
|
||||
auto makeFunction = [](ParameterVector ¶ms, const std::shared_ptr<Node> &lastNode) {
|
||||
ResultVector results;
|
||||
auto makeFunction = [](ov::ParameterVector ¶ms, const std::shared_ptr<ov::Node> &lastNode) {
|
||||
ov::ResultVector results;
|
||||
|
||||
for (size_t i = 0; i < lastNode->get_output_size(); i++)
|
||||
results.push_back(std::make_shared<opset1::Result>(lastNode->output(i)));
|
||||
results.push_back(std::make_shared<ov::op::v0::Result>(lastNode->output(i)));
|
||||
|
||||
return std::make_shared<Function>(results, params, "ShapeOfLayerGPUTest");
|
||||
return std::make_shared<ov::Model>(results, params, "ShapeOfLayerGPUTest");
|
||||
};
|
||||
|
||||
function = makeFunction(functionParams, shapeOfOp);
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(ShapeOfLayerGPUTest, CompareWithRefs) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
|
||||
TEST_P(ShapeOfLayerGPUTest, Inference) {
|
||||
run();
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<ElementType> netPrecisions = {
|
||||
ElementType::i32,
|
||||
const std::vector<ov::element::Type> model_types = {
|
||||
ov::element::i32,
|
||||
};
|
||||
|
||||
// We don't check static case, because of constant folding
|
||||
@ -110,10 +104,10 @@ INSTANTIATE_TEST_SUITE_P(smoke_ShapeOf_3d_compareWithRefs_dynamic,
|
||||
ShapeOfLayerGPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(inShapesDynamic3d),
|
||||
::testing::ValuesIn(netPrecisions)),
|
||||
::testing::ValuesIn(model_types)),
|
||||
ShapeOfLayerGPUTest::getTestCaseName);
|
||||
|
||||
std::vector<Shape> inShapesStatic3d = {
|
||||
std::vector<ov::Shape> inShapesStatic3d = {
|
||||
{ 8, 5, 4 },
|
||||
{ 8, 5, 3 },
|
||||
{ 8, 5, 2 },
|
||||
@ -124,8 +118,8 @@ std::vector<Shape> inShapesStatic3d = {
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_ShapeOf_3d_compareWithRefs_static,
|
||||
ShapeOfLayerGPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(static_shapes_to_test_representation(inShapesStatic3d)),
|
||||
::testing::ValuesIn(netPrecisions)),
|
||||
::testing::ValuesIn(ov::test::static_shapes_to_test_representation(inShapesStatic3d)),
|
||||
::testing::Values(ov::element::i32)),
|
||||
ShapeOfLayerGPUTest::getTestCaseName);
|
||||
|
||||
// ==============================================================================
|
||||
@ -152,10 +146,10 @@ INSTANTIATE_TEST_SUITE_P(smoke_ShapeOf_4d_compareWithRefs_dynamic,
|
||||
ShapeOfLayerGPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(inShapesDynamic4d),
|
||||
::testing::ValuesIn(netPrecisions)),
|
||||
::testing::ValuesIn(model_types)),
|
||||
ShapeOfLayerGPUTest::getTestCaseName);
|
||||
|
||||
std::vector<Shape> inShapesStatic4d = {
|
||||
std::vector<ov::Shape> inShapesStatic4d = {
|
||||
{ 8, 5, 3, 4 },
|
||||
{ 8, 5, 3, 3 },
|
||||
{ 8, 5, 3, 2 },
|
||||
@ -166,8 +160,8 @@ std::vector<Shape> inShapesStatic4d = {
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_ShapeOf_4d_compareWithRefs_static,
|
||||
ShapeOfLayerGPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(static_shapes_to_test_representation(inShapesStatic4d)),
|
||||
::testing::ValuesIn(netPrecisions)),
|
||||
::testing::ValuesIn(ov::test::static_shapes_to_test_representation(inShapesStatic4d)),
|
||||
::testing::ValuesIn(model_types)),
|
||||
ShapeOfLayerGPUTest::getTestCaseName);
|
||||
|
||||
// ==============================================================================
|
||||
@ -194,10 +188,10 @@ INSTANTIATE_TEST_SUITE_P(smoke_ShapeOf_5d_compareWithRefs_dynamic,
|
||||
ShapeOfLayerGPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(inShapesDynamic5d),
|
||||
::testing::ValuesIn(netPrecisions)),
|
||||
::testing::ValuesIn(model_types)),
|
||||
ShapeOfLayerGPUTest::getTestCaseName);
|
||||
|
||||
std::vector<Shape> inShapesStatic5d = {
|
||||
std::vector<ov::Shape> inShapesStatic5d = {
|
||||
{ 8, 5, 3, 2, 4 },
|
||||
{ 8, 5, 3, 2, 3 },
|
||||
{ 8, 5, 3, 2, 2 },
|
||||
@ -208,37 +202,35 @@ std::vector<Shape> inShapesStatic5d = {
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_ShapeOf_5d_compareWithRefs_static,
|
||||
ShapeOfLayerGPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(static_shapes_to_test_representation(inShapesStatic5d)),
|
||||
::testing::ValuesIn(netPrecisions)),
|
||||
::testing::ValuesIn(ov::test::static_shapes_to_test_representation(inShapesStatic5d)),
|
||||
::testing::ValuesIn(model_types)),
|
||||
ShapeOfLayerGPUTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
|
||||
using ShapeOfParams = typename std::tuple<
|
||||
InputShape, // Shape
|
||||
InferenceEngine::Precision, // Precision
|
||||
LayerTestsUtils::TargetDevice // Device name
|
||||
InputShape, // Shape
|
||||
ov::element::Type, // Model type
|
||||
std::string // Device name
|
||||
>;
|
||||
|
||||
class ShapeOfDynamicInputGPUTest : public testing::WithParamInterface<ShapeOfParams>,
|
||||
virtual public SubgraphBaseTest {
|
||||
virtual public ov::test::SubgraphBaseTest {
|
||||
public:
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<ShapeOfParams>& obj) {
|
||||
InputShape inputShapes;
|
||||
InferenceEngine::Precision dataPrc;
|
||||
InputShape shapes;
|
||||
ov::element::Type model_type;
|
||||
std::string targetDevice;
|
||||
|
||||
std::tie(inputShapes, dataPrc, targetDevice) = obj.param;
|
||||
std::tie(shapes, model_type, targetDevice) = obj.param;
|
||||
std::ostringstream result;
|
||||
result << "IS=(";
|
||||
result << ov::test::utils::partialShape2str({inputShapes.first}) << "_";
|
||||
for (size_t i = 0lu; i < inputShapes.second.size(); i++) {
|
||||
result << ov::test::utils::partialShape2str({shapes.first}) << "_";
|
||||
for (size_t i = 0lu; i < shapes.second.size(); i++) {
|
||||
result << "{";
|
||||
result << ov::test::utils::vec2str(inputShapes.second[i]) << "_";
|
||||
result << ov::test::utils::vec2str(shapes.second[i]) << "_";
|
||||
result << "}_";
|
||||
}
|
||||
result << ")_";
|
||||
result << "netPRC=" << dataPrc << "_";
|
||||
result << "netPRC=" << model_type << "_";
|
||||
result << "targetDevice=" << targetDevice << "_";
|
||||
auto res_str = result.str();
|
||||
std::replace(res_str.begin(), res_str.end(), '-', '_');
|
||||
@ -247,56 +239,44 @@ public:
|
||||
|
||||
protected:
|
||||
void SetUp() override {
|
||||
InputShape inputShapes;
|
||||
InferenceEngine::Precision dataPrc;
|
||||
InputShape shapes;
|
||||
ov::element::Type model_type;
|
||||
targetDevice = ov::test::utils::DEVICE_GPU;
|
||||
|
||||
std::tie(inputShapes, dataPrc, targetDevice) = GetParam();
|
||||
std::tie(shapes, model_type, targetDevice) = GetParam();
|
||||
|
||||
init_input_shapes({inputShapes});
|
||||
init_input_shapes({shapes});
|
||||
|
||||
InferenceEngine::PreProcessInfo pre_process_info;
|
||||
pre_process_info.setVariant(InferenceEngine::MeanVariant::MEAN_VALUE);
|
||||
|
||||
const auto prc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(dataPrc);
|
||||
|
||||
auto input = std::make_shared<ngraph::opset9::Parameter>(prc, inputShapes.first);
|
||||
input->get_output_tensor(0).get_rt_info()["ie_legacy_preproc"] = pre_process_info;
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(model_type, shapes.first);
|
||||
input->set_friendly_name("input_data");
|
||||
|
||||
auto shape_of_01 = std::make_shared<ngraph::opset9::ShapeOf>(input);
|
||||
auto shape_of_01 = std::make_shared<ov::op::v3::ShapeOf>(input);
|
||||
shape_of_01->set_friendly_name("shape_of_01");
|
||||
|
||||
auto shape_of_02 = std::make_shared<ngraph::opset9::ShapeOf>(shape_of_01);
|
||||
auto shape_of_02 = std::make_shared<ov::op::v3::ShapeOf>(shape_of_01);
|
||||
shape_of_02->set_friendly_name("shape_of_02");
|
||||
|
||||
auto result = std::make_shared<ngraph::opset1::Result>(shape_of_02);
|
||||
auto result = std::make_shared<ov::op::v0::Result>(shape_of_02);
|
||||
result->set_friendly_name("outer_result");
|
||||
|
||||
function = std::make_shared<ngraph::Function>(ngraph::OutputVector{result}, ngraph::ParameterVector{input});
|
||||
function = std::make_shared<ov::Model>(ov::OutputVector{result}, ov::ParameterVector{input});
|
||||
function->set_friendly_name("shape_of_test");
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(ShapeOfDynamicInputGPUTest, CompareWithRefs) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
|
||||
TEST_P(ShapeOfDynamicInputGPUTest, Inference) {
|
||||
run();
|
||||
}
|
||||
|
||||
const std::vector<ov::test::InputShape> dynamicInputShapes = {
|
||||
const std::vector<ov::test::InputShape> dynamicshapes = {
|
||||
ov::test::InputShape(ov::PartialShape({-1, -1, -1, -1, -1}), {{4, 1, 1, 64, 32}, {6, 1, 1, 8, 4}, {8, 1, 1, 24, 16}}),
|
||||
};
|
||||
|
||||
const std::vector<InferenceEngine::Precision> dynamicInputPrec = {
|
||||
InferenceEngine::Precision::FP16,
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Check, ShapeOfDynamicInputGPUTest,
|
||||
testing::Combine(
|
||||
testing::ValuesIn(dynamicInputShapes), // input shapes
|
||||
testing::ValuesIn(dynamicInputPrec), // network precision
|
||||
testing::ValuesIn(dynamicshapes), // input shapes
|
||||
testing::Values(ov::element::f16), // network precision
|
||||
testing::Values<std::string>(ov::test::utils::DEVICE_GPU)), // device type
|
||||
ShapeOfDynamicInputGPUTest::getTestCaseName);
|
||||
|
||||
} // namespace GPULayerTestsDefinitions
|
||||
} // namespace
|
||||
|
@ -2,33 +2,33 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <ov_models/builders.hpp>
|
||||
#include "shared_test_classes/single_layer/shape_of.hpp"
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
|
||||
using namespace ngraph;
|
||||
using namespace InferenceEngine;
|
||||
using namespace ov::test;
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
#include "openvino/op/softmax.hpp"
|
||||
|
||||
namespace GPULayerTestsDefinitions {
|
||||
namespace {
|
||||
|
||||
typedef std::tuple<ElementType, // netPrecision
|
||||
ov::test::InputShape, // inputShape
|
||||
int64_t // axis
|
||||
>
|
||||
softmaxGPUTestParamsSet;
|
||||
typedef std::tuple<
|
||||
ov::element::Type, // model type
|
||||
ov::test::InputShape, // inputShape
|
||||
int64_t> // axis
|
||||
softmaxGPUTestParamsSet;
|
||||
|
||||
class SoftMaxLayerGPUTest : public testing::WithParamInterface<softmaxGPUTestParamsSet>,
|
||||
virtual public SubgraphBaseTest {
|
||||
virtual public ov::test::SubgraphBaseTest {
|
||||
public:
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<softmaxGPUTestParamsSet>& obj) {
|
||||
ElementType inType;
|
||||
ov::element::Type model_type;
|
||||
ov::test::InputShape inShape;
|
||||
int64_t axis;
|
||||
std::tie(inType, inShape, axis) = obj.param;
|
||||
std::tie(model_type, inShape, axis) = obj.param;
|
||||
|
||||
std::ostringstream result;
|
||||
result << "netPRC=" << inType << "_";
|
||||
result << "netPRC=" << model_type << "_";
|
||||
result << "IS=" << ov::test::utils::partialShape2str({inShape.first}) << "_";
|
||||
result << "TS=";
|
||||
for (const auto& shape : inShape.second) {
|
||||
@ -42,40 +42,39 @@ public:
|
||||
protected:
|
||||
void SetUp() override {
|
||||
targetDevice = ov::test::utils::DEVICE_GPU;
|
||||
ElementType inType;
|
||||
ov::element::Type model_type;
|
||||
ov::test::InputShape inShape;
|
||||
int64_t axis;
|
||||
std::tie(inType, inShape, axis) = this->GetParam();
|
||||
std::tie(model_type, inShape, axis) = this->GetParam();
|
||||
|
||||
if (inType == element::Type_t::f16) {
|
||||
if (model_type == ov::element::f16) {
|
||||
abs_threshold = 0.005;
|
||||
}
|
||||
|
||||
init_input_shapes({inShape});
|
||||
ov::ParameterVector params;
|
||||
for (auto&& shape : inputDynamicShapes)
|
||||
params.push_back(std::make_shared<ov::op::v0::Parameter>(inType, shape));
|
||||
params.push_back(std::make_shared<ov::op::v0::Parameter>(model_type, shape));
|
||||
|
||||
const auto softMax = std::make_shared<ngraph::opset1::Softmax>(params.at(0), axis);
|
||||
auto makeFunction = [](ParameterVector ¶ms, const std::shared_ptr<Node> &lastNode) {
|
||||
ResultVector results;
|
||||
const auto softMax = std::make_shared<ov::op::v1::Softmax>(params.at(0), axis);
|
||||
auto makeFunction = [](ov::ParameterVector ¶ms, const std::shared_ptr<ov::Node> &lastNode) {
|
||||
ov::ResultVector results;
|
||||
|
||||
for (size_t i = 0; i < lastNode->get_output_size(); i++)
|
||||
results.push_back(std::make_shared<opset1::Result>(lastNode->output(i)));
|
||||
results.push_back(std::make_shared<ov::op::v0::Result>(lastNode->output(i)));
|
||||
|
||||
return std::make_shared<Function>(results, params, "ShapeOfLayerGPUTest");
|
||||
return std::make_shared<ov::Model>(results, params, "ShapeOfLayerGPUTest");
|
||||
};
|
||||
function = makeFunction(params, softMax);
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(SoftMaxLayerGPUTest, CompareWithRefs) {
|
||||
TEST_P(SoftMaxLayerGPUTest, Inference) {
|
||||
run();
|
||||
}
|
||||
|
||||
namespace {
|
||||
const std::vector<ElementType> netPrecisions = {
|
||||
ElementType::f32, ElementType::f16
|
||||
const std::vector<ov::element::Type> netPrecisions = {
|
||||
ov::element::f32, ov::element::f16
|
||||
};
|
||||
|
||||
const std::vector<int64_t> axis2D = {0, 1};
|
||||
@ -137,6 +136,4 @@ INSTANTIATE_TEST_SUITE_P(softMaxGPUDynamicTest5D,
|
||||
testing::ValuesIn(inputShapes5D),
|
||||
testing::ValuesIn(axis5D)),
|
||||
SoftMaxLayerGPUTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
} // namespace GPULayerTestsDefinitions
|
||||
|
@ -2,16 +2,17 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "shared_test_classes/single_layer/space_to_batch.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "ov_models/builders.hpp"
|
||||
#include "common_test_utils/test_constants.hpp"
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include "common_test_utils/test_enums.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
|
||||
using namespace InferenceEngine;
|
||||
using namespace ov::test;
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
#include "openvino/op/space_to_batch.hpp"
|
||||
|
||||
namespace GPULayerTestsDefinitions {
|
||||
namespace {
|
||||
using ov::test::InputShape;
|
||||
|
||||
struct SpaceToBatchParams {
|
||||
std::vector<int64_t> block;
|
||||
@ -22,22 +23,19 @@ struct SpaceToBatchParams {
|
||||
typedef std::tuple<
|
||||
InputShape, // Input shapes
|
||||
SpaceToBatchParams,
|
||||
ElementType, // Element type
|
||||
ngraph::helpers::InputLayerType, // block/begin/end input type
|
||||
std::map<std::string, std::string> // Additional network configuration
|
||||
> SpaceToBatchParamsLayerParamSet;
|
||||
ov::element::Type, // Element type
|
||||
ov::test::utils::InputLayerType> // block/begin/end input type
|
||||
SpaceToBatchParamsLayerParamSet;
|
||||
|
||||
class SpaceToBatchLayerGPUTest : public testing::WithParamInterface<SpaceToBatchParamsLayerParamSet>,
|
||||
virtual public SubgraphBaseTest {
|
||||
virtual public ov::test::SubgraphBaseTest {
|
||||
public:
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<SpaceToBatchParamsLayerParamSet>& obj) {
|
||||
InputShape shapes;
|
||||
SpaceToBatchParams params;
|
||||
ElementType elementType;
|
||||
ngraph::helpers::InputLayerType restInputType;
|
||||
TargetDevice targetDevice;
|
||||
std::map<std::string, std::string> additionalConfig;
|
||||
std::tie(shapes, params, elementType, restInputType, additionalConfig) = obj.param;
|
||||
ov::element::Type elementType;
|
||||
ov::test::utils::InputLayerType restInputType;
|
||||
std::tie(shapes, params, elementType, restInputType) = obj.param;
|
||||
|
||||
std::ostringstream results;
|
||||
results << "IS=" << ov::test::utils::partialShape2str({shapes.first}) << "_";
|
||||
@ -49,17 +47,12 @@ public:
|
||||
results << "block=" << ov::test::utils::vec2str(params.block) << "_";
|
||||
results << "begin=" << ov::test::utils::vec2str(params.begin) << "_";
|
||||
results << "end=" << ov::test::utils::vec2str(params.end) << "_";
|
||||
results << "restInputType=" << restInputType << "_";
|
||||
results << "config=(";
|
||||
for (const auto& configEntry : additionalConfig) {
|
||||
results << configEntry.first << ", " << configEntry.second << ":";
|
||||
}
|
||||
results << ")";
|
||||
results << "restInputType=" << restInputType;
|
||||
|
||||
return results.str();
|
||||
}
|
||||
|
||||
void generate_inputs(const std::vector<ngraph::Shape>& targetInputStaticShapes) override {
|
||||
void generate_inputs(const std::vector<ov::Shape>& targetInputStaticShapes) override {
|
||||
inputs.clear();
|
||||
const auto& funcInputs = function->inputs();
|
||||
for (size_t i = 0; i < funcInputs.size(); ++i) {
|
||||
@ -100,9 +93,8 @@ protected:
|
||||
void SetUp() override {
|
||||
InputShape shapes;
|
||||
SpaceToBatchParams ssParams;
|
||||
ngraph::helpers::InputLayerType restInputType;
|
||||
std::map<std::string, std::string> additionalConfig;
|
||||
std::tie(shapes, ssParams, inType, restInputType, additionalConfig) = this->GetParam();
|
||||
ov::test::utils::InputLayerType restInputType;
|
||||
std::tie(shapes, ssParams, inType, restInputType) = this->GetParam();
|
||||
|
||||
block = ssParams.block;
|
||||
begin = ssParams.begin;
|
||||
@ -112,7 +104,7 @@ protected:
|
||||
|
||||
std::vector<InputShape> inputShapes;
|
||||
inputShapes.push_back(shapes);
|
||||
if (restInputType == ngraph::helpers::InputLayerType::PARAMETER) {
|
||||
if (restInputType == ov::test::utils::InputLayerType::PARAMETER) {
|
||||
inputShapes.push_back(InputShape({static_cast<int64_t>(block.size())}, std::vector<ov::Shape>(shapes.second.size(), {block.size()})));
|
||||
inputShapes.push_back(InputShape({static_cast<int64_t>(begin.size())}, std::vector<ov::Shape>(shapes.second.size(), {begin.size()})));
|
||||
inputShapes.push_back(InputShape({static_cast<int64_t>(end.size())}, std::vector<ov::Shape>(shapes.second.size(), {end.size()})));
|
||||
@ -122,10 +114,10 @@ protected:
|
||||
|
||||
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(inType, inputDynamicShapes.front())};
|
||||
std::shared_ptr<ov::Node> blockInput, beginInput, endInput;
|
||||
if (restInputType == ngraph::helpers::InputLayerType::PARAMETER) {
|
||||
auto blockNode = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::Type_t::i64, ov::Shape{block.size()});
|
||||
auto beginNode = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::Type_t::i64, ov::Shape{begin.size()});
|
||||
auto endNode = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::Type_t::i64, ov::Shape{end.size()});
|
||||
if (restInputType == ov::test::utils::InputLayerType::PARAMETER) {
|
||||
auto blockNode = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{block.size()});
|
||||
auto beginNode = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{begin.size()});
|
||||
auto endNode = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{end.size()});
|
||||
|
||||
params.push_back(blockNode);
|
||||
params.push_back(beginNode);
|
||||
@ -135,38 +127,32 @@ protected:
|
||||
beginInput = beginNode;
|
||||
endInput = endNode;
|
||||
} else {
|
||||
blockInput = std::make_shared<ngraph::opset1::Constant>(ngraph::element::Type_t::i64, ov::Shape{block.size()}, block);
|
||||
beginInput = std::make_shared<ngraph::opset1::Constant>(ngraph::element::Type_t::i64, ov::Shape{begin.size()}, begin);
|
||||
endInput = std::make_shared<ngraph::opset1::Constant>(ngraph::element::Type_t::i64, ov::Shape{end.size()}, end);
|
||||
blockInput = std::make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{block.size()}, block);
|
||||
beginInput = std::make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{begin.size()}, begin);
|
||||
endInput = std::make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{end.size()}, end);
|
||||
}
|
||||
auto ss = std::make_shared<ngraph::op::v1::SpaceToBatch>(params[0], blockInput, beginInput, endInput);
|
||||
auto ss = std::make_shared<ov::op::v1::SpaceToBatch>(params[0], blockInput, beginInput, endInput);
|
||||
|
||||
ngraph::ResultVector results;
|
||||
ov::ResultVector results;
|
||||
for (size_t i = 0; i < ss->get_output_size(); i++) {
|
||||
results.push_back(std::make_shared<ngraph::opset1::Result>(ss->output(i)));
|
||||
results.push_back(std::make_shared<ov::op::v0::Result>(ss->output(i)));
|
||||
}
|
||||
|
||||
function = std::make_shared<ngraph::Function>(results, params, "SpaceToBatchFuncTest");
|
||||
function = std::make_shared<ov::Model>(results, params, "SpaceToBatchFuncTest");
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(SpaceToBatchLayerGPUTest, CompareWithRefs) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
|
||||
TEST_P(SpaceToBatchLayerGPUTest, Inferecne) {
|
||||
run();
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
std::map<std::string, std::string> emptyAdditionalConfig;
|
||||
|
||||
const std::vector<ElementType> inputPrecisions = {
|
||||
ElementType::f32
|
||||
const std::vector<ov::element::Type> inputPrecisions = {
|
||||
ov::element::f32
|
||||
};
|
||||
|
||||
const std::vector<ngraph::helpers::InputLayerType> restInputTypes = {
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
ngraph::helpers::InputLayerType::PARAMETER
|
||||
const std::vector<ov::test::utils::InputLayerType> restInputTypes = {
|
||||
ov::test::utils::InputLayerType::CONSTANT,
|
||||
ov::test::utils::InputLayerType::PARAMETER
|
||||
};
|
||||
|
||||
const std::vector<InputShape> inputShapesDynamic3D = {
|
||||
@ -183,8 +169,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Dynamic3D, SpaceToBatchLayerGPUTe
|
||||
::testing::ValuesIn(inputShapesDynamic3D),
|
||||
::testing::ValuesIn(paramsPlain3D),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::ValuesIn(restInputTypes),
|
||||
::testing::Values(emptyAdditionalConfig)),
|
||||
::testing::ValuesIn(restInputTypes)),
|
||||
SpaceToBatchLayerGPUTest::getTestCaseName);
|
||||
|
||||
|
||||
@ -202,8 +187,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Dynamic4D, SpaceToBatchLayerGPUTe
|
||||
::testing::ValuesIn(inputShapesDynamic4D),
|
||||
::testing::ValuesIn(paramsPlain4D),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::ValuesIn(restInputTypes),
|
||||
::testing::Values(emptyAdditionalConfig)),
|
||||
::testing::ValuesIn(restInputTypes)),
|
||||
SpaceToBatchLayerGPUTest::getTestCaseName);
|
||||
|
||||
const std::vector<InputShape> inputShapesDynamic5D = {
|
||||
@ -220,9 +204,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Dynamic5D, SpaceToBatchLayerGPUTe
|
||||
::testing::ValuesIn(inputShapesDynamic5D),
|
||||
::testing::ValuesIn(paramsPlain5D),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::ValuesIn(restInputTypes),
|
||||
::testing::Values(emptyAdditionalConfig)),
|
||||
::testing::ValuesIn(restInputTypes)),
|
||||
SpaceToBatchLayerGPUTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
} // namespace GPULayerTestsDefinitions
|
||||
|
@ -2,23 +2,23 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "shared_test_classes/single_layer/space_to_depth.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "ie_precision.hpp"
|
||||
#include "ov_models/builders.hpp"
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include <string>
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
|
||||
using namespace ov::op::v0;
|
||||
using namespace ov::test;
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
#include "openvino/op/space_to_depth.hpp"
|
||||
|
||||
namespace GPULayerTestsDefinitions {
|
||||
namespace {
|
||||
using ov::test::InputShape;
|
||||
using ov::op::v0::SpaceToDepth;
|
||||
|
||||
typedef std::tuple<
|
||||
InputShape, // Input shape
|
||||
ElementType, // Input element type
|
||||
SpaceToDepth::SpaceToDepthMode, // Mode
|
||||
std::size_t // Block size
|
||||
InputShape, // Input shape
|
||||
ov::element::Type, // Input element type
|
||||
SpaceToDepth::SpaceToDepthMode, // Mode
|
||||
std::size_t // Block size
|
||||
> SpaceToDepthLayerGPUTestParams;
|
||||
|
||||
class SpaceToDepthLayerGPUTest : public testing::WithParamInterface<SpaceToDepthLayerGPUTestParams>,
|
||||
@ -26,10 +26,10 @@ class SpaceToDepthLayerGPUTest : public testing::WithParamInterface<SpaceToDepth
|
||||
public:
|
||||
static std::string getTestCaseName(testing::TestParamInfo<SpaceToDepthLayerGPUTestParams> obj) {
|
||||
InputShape shapes;
|
||||
ElementType inType;
|
||||
ov::element::Type model_type;
|
||||
SpaceToDepth::SpaceToDepthMode mode;
|
||||
std::size_t blockSize;
|
||||
std::tie(shapes, inType, mode, blockSize) = obj.param;
|
||||
std::size_t block_size;
|
||||
std::tie(shapes, model_type, mode, block_size) = obj.param;
|
||||
|
||||
std::ostringstream results;
|
||||
results << "IS=" << ov::test::utils::partialShape2str({shapes.first}) << "_";
|
||||
@ -37,7 +37,7 @@ public:
|
||||
for (const auto& item : shapes.second) {
|
||||
results << ov::test::utils::vec2str(item) << "_";
|
||||
}
|
||||
results << "Prc=" << inType << "_";
|
||||
results << "Prc=" << model_type << "_";
|
||||
switch (mode) {
|
||||
case SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST:
|
||||
results << "BLOCKS_FIRST_";
|
||||
@ -48,7 +48,7 @@ public:
|
||||
default:
|
||||
throw std::runtime_error("Unsupported SpaceToDepthMode");
|
||||
}
|
||||
results << "BS=" << blockSize;
|
||||
results << "BS=" << block_size;
|
||||
|
||||
return results.str();
|
||||
}
|
||||
@ -57,17 +57,18 @@ protected:
|
||||
void SetUp() override {
|
||||
InputShape shapes;
|
||||
SpaceToDepth::SpaceToDepthMode mode;
|
||||
std::size_t blockSize;
|
||||
std::tie(shapes, inType, mode, blockSize) = this->GetParam();
|
||||
std::size_t block_size;
|
||||
ov::element::Type model_type;
|
||||
std::tie(shapes, model_type, mode, block_size) = this->GetParam();
|
||||
|
||||
targetDevice = ov::test::utils::DEVICE_GPU;
|
||||
init_input_shapes({shapes});
|
||||
|
||||
ov::ParameterVector params;
|
||||
for (auto&& shape : inputDynamicShapes)
|
||||
params.push_back(std::make_shared<ov::op::v0::Parameter>(inType, shape));
|
||||
params.push_back(std::make_shared<ov::op::v0::Parameter>(model_type, shape));
|
||||
|
||||
auto d2s = std::make_shared<ov::op::v0::SpaceToDepth>(params[0], mode, blockSize);
|
||||
auto d2s = std::make_shared<ov::op::v0::SpaceToDepth>(params[0], mode, block_size);
|
||||
|
||||
ov::ResultVector results;
|
||||
for (size_t i = 0; i < d2s->get_output_size(); i++)
|
||||
@ -76,18 +77,14 @@ protected:
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(SpaceToDepthLayerGPUTest, CompareWithRefs) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
|
||||
TEST_P(SpaceToDepthLayerGPUTest, Inference) {
|
||||
run();
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<ElementType> inputElementType = {
|
||||
ElementType::f32,
|
||||
ElementType::f16,
|
||||
ElementType::i8
|
||||
const std::vector<ov::element::Type> model_types = {
|
||||
ov::element::f32,
|
||||
ov::element::f16,
|
||||
ov::element::i8
|
||||
};
|
||||
|
||||
const std::vector<SpaceToDepth::SpaceToDepthMode> SpaceToDepthModes = {
|
||||
@ -96,7 +93,6 @@ const std::vector<SpaceToDepth::SpaceToDepthMode> SpaceToDepthModes = {
|
||||
};
|
||||
|
||||
// ======================== Static Shapes Tests ========================
|
||||
namespace static_shapes {
|
||||
|
||||
const std::vector<ov::Shape> inputShapesBS2_4D = {
|
||||
{1, 16, 8, 8},
|
||||
@ -115,16 +111,16 @@ const std::vector<ov::Shape> inputShapesBS3_4D = {
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_GPUSpaceToDepthStaticBS2_4D, SpaceToDepthLayerGPUTest,
|
||||
testing::Combine(
|
||||
testing::ValuesIn(static_shapes_to_test_representation(inputShapesBS2_4D)),
|
||||
testing::ValuesIn(inputElementType),
|
||||
testing::ValuesIn(ov::test::static_shapes_to_test_representation(inputShapesBS2_4D)),
|
||||
testing::ValuesIn(model_types),
|
||||
testing::ValuesIn(SpaceToDepthModes),
|
||||
testing::Values(1, 4)),
|
||||
SpaceToDepthLayerGPUTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_GPUSpaceToDepthStaticBS3_4D, SpaceToDepthLayerGPUTest,
|
||||
testing::Combine(
|
||||
testing::ValuesIn(static_shapes_to_test_representation(inputShapesBS3_4D)),
|
||||
testing::ValuesIn(inputElementType),
|
||||
testing::ValuesIn(ov::test::static_shapes_to_test_representation(inputShapesBS3_4D)),
|
||||
testing::ValuesIn(model_types),
|
||||
testing::ValuesIn(SpaceToDepthModes),
|
||||
testing::Values(1, 3)),
|
||||
SpaceToDepthLayerGPUTest::getTestCaseName);
|
||||
@ -146,24 +142,22 @@ const std::vector<ov::Shape> inputShapesBS3_5D = {
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_GPUSpaceToDepthStaticBS2_5D, SpaceToDepthLayerGPUTest,
|
||||
testing::Combine(
|
||||
testing::ValuesIn(static_shapes_to_test_representation(inputShapesBS2_5D)),
|
||||
testing::ValuesIn(inputElementType),
|
||||
testing::ValuesIn(ov::test::static_shapes_to_test_representation(inputShapesBS2_5D)),
|
||||
testing::ValuesIn(model_types),
|
||||
testing::ValuesIn(SpaceToDepthModes),
|
||||
testing::Values(1, 4)),
|
||||
SpaceToDepthLayerGPUTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_GPUSpaceToDepthStaticBS3_5D, SpaceToDepthLayerGPUTest,
|
||||
testing::Combine(
|
||||
testing::ValuesIn(static_shapes_to_test_representation(inputShapesBS3_5D)),
|
||||
testing::ValuesIn(inputElementType),
|
||||
testing::ValuesIn(ov::test::static_shapes_to_test_representation(inputShapesBS3_5D)),
|
||||
testing::ValuesIn(model_types),
|
||||
testing::ValuesIn(SpaceToDepthModes),
|
||||
testing::Values(1, 3)),
|
||||
SpaceToDepthLayerGPUTest::getTestCaseName);
|
||||
|
||||
} // namespace static_shapes
|
||||
|
||||
//======================== Dynamic Shapes Tests ========================
|
||||
namespace dynamic_shapes {
|
||||
|
||||
const std::vector<InputShape> inputShapes4D = {
|
||||
{{-1, -1, -1, -1}, {{2, 3, 12, 24}}},
|
||||
@ -176,7 +170,7 @@ const std::vector<InputShape> inputShapes5D = {
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_GPUSpaceToDepthDynamic4D, SpaceToDepthLayerGPUTest,
|
||||
testing::Combine(
|
||||
testing::ValuesIn(inputShapes4D),
|
||||
testing::ValuesIn(inputElementType),
|
||||
testing::ValuesIn(model_types),
|
||||
testing::ValuesIn(SpaceToDepthModes),
|
||||
testing::Values(1, 2, 3)),
|
||||
SpaceToDepthLayerGPUTest::getTestCaseName);
|
||||
@ -184,12 +178,9 @@ INSTANTIATE_TEST_SUITE_P(smoke_GPUSpaceToDepthDynamic4D, SpaceToDepthLayerGPUTes
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_GPUSpaceToDepthDynamic5D, SpaceToDepthLayerGPUTest,
|
||||
testing::Combine(
|
||||
testing::ValuesIn(inputShapes5D),
|
||||
testing::ValuesIn(inputElementType),
|
||||
testing::ValuesIn(model_types),
|
||||
testing::ValuesIn(SpaceToDepthModes),
|
||||
testing::Values(1, 2)),
|
||||
SpaceToDepthLayerGPUTest::getTestCaseName);
|
||||
|
||||
} // namespace dynamic_shapes
|
||||
|
||||
} // namespace
|
||||
} // namespace GPULayerTestsDefinitions
|
||||
|
@ -2,51 +2,50 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "shared_test_classes/single_layer/select.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "ie_precision.hpp"
|
||||
#include "ov_models/builders.hpp"
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include <string>
|
||||
#include "common_test_utils/test_enums.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
|
||||
using namespace ngraph;
|
||||
using namespace InferenceEngine;
|
||||
using namespace ov::test;
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
#include "openvino/op/variadic_split.hpp"
|
||||
|
||||
namespace GPULayerTestsDefinitions {
|
||||
namespace {
|
||||
using ov::test::InputShape;
|
||||
|
||||
typedef std::tuple<
|
||||
size_t, // Num splits
|
||||
int64_t, // Axis
|
||||
ElementType, // Net precision
|
||||
ov::element::Type, // Model type
|
||||
InputShape, // Input shapes
|
||||
std::vector<size_t> // Used outputs indices
|
||||
> splitDynamicGPUTestParams;
|
||||
|
||||
class SplitLayerGPUDynamicTest : public testing::WithParamInterface<splitDynamicGPUTestParams>,
|
||||
virtual public SubgraphBaseTest {
|
||||
virtual public ov::test::SubgraphBaseTest {
|
||||
public:
|
||||
static std::string getTestCaseName(testing::TestParamInfo<splitDynamicGPUTestParams> obj) {
|
||||
std::ostringstream result;
|
||||
size_t numSplits;
|
||||
size_t num_splits;
|
||||
int64_t axis;
|
||||
ElementType netPrecision;
|
||||
InputShape inputShape;
|
||||
std::vector<size_t> outIndices;
|
||||
std::tie(numSplits, axis, netPrecision, inputShape, outIndices) = obj.param;
|
||||
ov::element::Type model_type;
|
||||
InputShape input_shape;
|
||||
std::vector<size_t> out_indices;
|
||||
std::tie(num_splits, axis, model_type, input_shape, out_indices) = obj.param;
|
||||
|
||||
result << "IS=";
|
||||
result << ov::test::utils::partialShape2str({inputShape.first}) << "_";
|
||||
result << ov::test::utils::partialShape2str({input_shape.first}) << "_";
|
||||
result << "TS=";
|
||||
for (const auto& shape : inputShape.second) {
|
||||
for (const auto& shape : input_shape.second) {
|
||||
result << ov::test::utils::vec2str(shape) << "_";
|
||||
}
|
||||
result << "numSplits=" << numSplits << "_";
|
||||
result << "num_splits=" << num_splits << "_";
|
||||
result << "axis=" << axis << "_";
|
||||
if (!outIndices.empty()) {
|
||||
result << "outIndices" << ov::test::utils::vec2str(outIndices) << "_";
|
||||
if (!out_indices.empty()) {
|
||||
result << "out_indices" << ov::test::utils::vec2str(out_indices) << "_";
|
||||
}
|
||||
result << "netPRC=" << netPrecision << "_";
|
||||
result << "netPRC=" << model_type << "_";
|
||||
return result.str();
|
||||
}
|
||||
|
||||
@ -54,47 +53,46 @@ protected:
|
||||
void SetUp() override {
|
||||
targetDevice = ov::test::utils::DEVICE_GPU;
|
||||
int64_t axis;
|
||||
size_t numSplits;
|
||||
InputShape inputShape;
|
||||
std::vector<size_t> outIndices;
|
||||
ElementType netPrecision;
|
||||
std::tie(numSplits, axis, netPrecision, inputShape, outIndices) = this->GetParam();
|
||||
if (outIndices.empty()) {
|
||||
for (size_t i = 0; i < numSplits; ++i) {
|
||||
outIndices.push_back(i);
|
||||
size_t num_splits;
|
||||
InputShape input_shape;
|
||||
std::vector<size_t> out_indices;
|
||||
ov::element::Type model_type;
|
||||
std::tie(num_splits, axis, model_type, input_shape, out_indices) = this->GetParam();
|
||||
if (out_indices.empty()) {
|
||||
for (size_t i = 0; i < num_splits; ++i) {
|
||||
out_indices.push_back(i);
|
||||
}
|
||||
}
|
||||
init_input_shapes({inputShape});
|
||||
ov::ParameterVector dyn_params{std::make_shared<ov::op::v0::Parameter>(netPrecision, inputDynamicShapes[0])};
|
||||
init_input_shapes({input_shape});
|
||||
ov::ParameterVector dyn_params{std::make_shared<ov::op::v0::Parameter>(model_type, inputDynamicShapes[0])};
|
||||
auto split_axis_op = std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::i64, ov::Shape{}, std::vector<int64_t>{axis});
|
||||
auto split = std::make_shared<ov::op::v1::Split>(dyn_params[0], split_axis_op, numSplits);
|
||||
auto split = std::make_shared<ov::op::v1::Split>(dyn_params[0], split_axis_op, num_splits);
|
||||
|
||||
ngraph::ResultVector results;
|
||||
for (size_t i = 0; i < outIndices.size(); i++) {
|
||||
results.push_back(std::make_shared<ngraph::opset1::Result>(split->output(outIndices[i])));
|
||||
ov::ResultVector results;
|
||||
for (size_t i = 0; i < out_indices.size(); i++) {
|
||||
results.push_back(std::make_shared<ov::op::v0::Result>(split->output(out_indices[i])));
|
||||
}
|
||||
function = std::make_shared<ngraph::Function>(results, dyn_params, "split");
|
||||
function = std::make_shared<ov::Model>(results, dyn_params, "split");
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(SplitLayerGPUDynamicTest, CompareWithRefs) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
TEST_P(SplitLayerGPUDynamicTest, Inference) {
|
||||
run();
|
||||
}
|
||||
|
||||
const std::vector<InputShape> inputShapes4d = {
|
||||
const std::vector<InputShape> input_shapes4d = {
|
||||
{
|
||||
{-1, -1, -1, -1}, {{1, 4, 5, 7}, {3, 8, 5, 9}, {5, 16, 1, 8}}
|
||||
}
|
||||
};
|
||||
|
||||
const std::vector<InputShape> inputShapes5d = {
|
||||
const std::vector<InputShape> input_shapes5d = {
|
||||
{
|
||||
{-1, -1, -1, -1, -1}, {{10, 20, 30, 40, 10}, {5, 18, 3, 10, 10}, {3, 10, 6, 2, 4}}
|
||||
}
|
||||
};
|
||||
|
||||
const std::vector<InputShape> inputShapes6d = {
|
||||
const std::vector<InputShape> input_shapes6d = {
|
||||
{
|
||||
{-1, -1, -1, -1, -1, -1}, {{10, 32, 3, 4, 12, 24}, {5, 2, 3, 1, 32, 12}, {3, 1, 6, 2, 4, 18}}
|
||||
}
|
||||
@ -104,63 +102,63 @@ INSTANTIATE_TEST_SUITE_P(smoke_SplitsCheck4Dr, SplitLayerGPUDynamicTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(2), // nSplits
|
||||
::testing::Values(1), // axes
|
||||
::testing::Values(ElementType::f16), // netPrec
|
||||
::testing::ValuesIn(inputShapes4d), // inShapes
|
||||
::testing::Values(std::vector<size_t>({}))), // outIndices
|
||||
::testing::Values(ov::element::f16), // netPrec
|
||||
::testing::ValuesIn(input_shapes4d), // inShapes
|
||||
::testing::Values(std::vector<size_t>({}))), // out_indices
|
||||
SplitLayerGPUDynamicTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_SplitsCheck5D, SplitLayerGPUDynamicTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(3), // nSplits
|
||||
::testing::Values(2), // axes
|
||||
::testing::Values(ElementType::f32), // netPrec
|
||||
::testing::ValuesIn(inputShapes5d), // inShapes
|
||||
::testing::Values(std::vector<size_t>({}))), // outIndices
|
||||
::testing::Values(ov::element::f32), // netPrec
|
||||
::testing::ValuesIn(input_shapes5d), // inShapes
|
||||
::testing::Values(std::vector<size_t>({}))), // out_indices
|
||||
SplitLayerGPUDynamicTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_SplitsCheck6D, SplitLayerGPUDynamicTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(4), // nSplits
|
||||
::testing::Values(4), // axes
|
||||
::testing::Values(ElementType::i8), // netPrec
|
||||
::testing::ValuesIn(inputShapes6d), // inShapes
|
||||
::testing::Values(std::vector<size_t>({}))), // outIndices
|
||||
::testing::Values(ov::element::i8), // netPrec
|
||||
::testing::ValuesIn(input_shapes6d), // inShapes
|
||||
::testing::Values(std::vector<size_t>({}))), // out_indices
|
||||
SplitLayerGPUDynamicTest::getTestCaseName);
|
||||
|
||||
typedef std::tuple<
|
||||
int64_t, // Axis
|
||||
std::vector<int32_t>, // SplitLength
|
||||
ElementType, // Net precision
|
||||
ov::element::Type, // Model type
|
||||
InputShape, // Input shapes
|
||||
ngraph::helpers::InputLayerType // input type of splitLength
|
||||
ov::test::utils::InputLayerType // input type of split_length
|
||||
> varSplitDynamicGPUTestParams;
|
||||
|
||||
class VariadicSplitLayerGPUDynamicTest : public testing::WithParamInterface<varSplitDynamicGPUTestParams>,
|
||||
virtual public SubgraphBaseTest {
|
||||
virtual public ov::test::SubgraphBaseTest {
|
||||
public:
|
||||
static std::string getTestCaseName(testing::TestParamInfo<varSplitDynamicGPUTestParams> obj) {
|
||||
std::ostringstream result;
|
||||
int64_t axis;
|
||||
std::vector<int32_t> splitLength;
|
||||
ElementType netPrecision;
|
||||
InputShape inputShape;
|
||||
ngraph::helpers::InputLayerType inputType;
|
||||
std::tie(axis, splitLength, netPrecision, inputShape, inputType) = obj.param;
|
||||
std::vector<int32_t> split_length;
|
||||
ov::element::Type model_type;
|
||||
InputShape input_shape;
|
||||
ov::test::utils::InputLayerType inputType;
|
||||
std::tie(axis, split_length, model_type, input_shape, inputType) = obj.param;
|
||||
|
||||
result << "IS=";
|
||||
result << ov::test::utils::partialShape2str({inputShape.first}) << "_";
|
||||
result << ov::test::utils::partialShape2str({input_shape.first}) << "_";
|
||||
result << "TS=";
|
||||
for (const auto& shape : inputShape.second) {
|
||||
for (const auto& shape : input_shape.second) {
|
||||
result << ov::test::utils::vec2str(shape) << "_";
|
||||
}
|
||||
result << "SplitLen=" << ov::test::utils::vec2str(splitLength) << "_";
|
||||
result << "SplitLen=" << ov::test::utils::vec2str(split_length) << "_";
|
||||
result << "axis=" << axis << "_";
|
||||
result << "netPRC=" << netPrecision << "_";
|
||||
result << "netPRC=" << model_type << "_";
|
||||
result << "restInputType=" << inputType << "_";
|
||||
return result.str();
|
||||
}
|
||||
|
||||
void generate_inputs(const std::vector<ngraph::Shape>& targetInputStaticShapes) override {
|
||||
void generate_inputs(const std::vector<ov::Shape>& targetInputStaticShapes) override {
|
||||
inputs.clear();
|
||||
const auto& funcInputs = function->inputs();
|
||||
for (size_t i = 0; i < funcInputs.size(); ++i) {
|
||||
@ -169,8 +167,8 @@ public:
|
||||
if (i == 1) {
|
||||
tensor = ov::Tensor(ov::element::i64, targetInputStaticShapes[i]);
|
||||
auto *dataPtr = tensor.data<ov::element_type_traits<ov::element::i64>::value_type>();
|
||||
for (size_t i = 0; i < splitLength_vec.size(); i++) {
|
||||
dataPtr[i] = splitLength_vec[i];
|
||||
for (size_t i = 0; i < split_length_vec.size(); i++) {
|
||||
dataPtr[i] = split_length_vec[i];
|
||||
}
|
||||
} else {
|
||||
tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i]);
|
||||
@ -181,89 +179,88 @@ public:
|
||||
}
|
||||
|
||||
protected:
|
||||
std::vector<int32_t> splitLength_vec;
|
||||
std::vector<int32_t> split_length_vec;
|
||||
size_t inferRequestNum = 0;
|
||||
ElementType netPrecision;
|
||||
ov::element::Type model_type;
|
||||
|
||||
void SetUp() override {
|
||||
targetDevice = ov::test::utils::DEVICE_GPU;
|
||||
int64_t axis;
|
||||
InputShape inputShape;
|
||||
std::vector<int32_t> splitLength;
|
||||
ngraph::helpers::InputLayerType inputType;
|
||||
std::tie(axis, splitLength, netPrecision, inputShape, inputType) = this->GetParam();
|
||||
InputShape input_shape;
|
||||
std::vector<int32_t> split_length;
|
||||
ov::test::utils::InputLayerType inputType;
|
||||
std::tie(axis, split_length, model_type, input_shape, inputType) = this->GetParam();
|
||||
|
||||
splitLength_vec = splitLength;
|
||||
split_length_vec = split_length;
|
||||
|
||||
std::vector<InputShape> inputShapes;
|
||||
inputShapes.push_back(inputShape);
|
||||
if (inputType == ngraph::helpers::InputLayerType::PARAMETER) {
|
||||
inputShapes.push_back(InputShape({static_cast<int64_t>(splitLength.size())},
|
||||
std::vector<ov::Shape>(inputShape.second.size(), {splitLength.size()})));
|
||||
std::vector<InputShape> input_shapes;
|
||||
input_shapes.push_back(input_shape);
|
||||
if (inputType == ov::test::utils::InputLayerType::PARAMETER) {
|
||||
input_shapes.push_back(InputShape({static_cast<int64_t>(split_length.size())},
|
||||
std::vector<ov::Shape>(input_shape.second.size(), {split_length.size()})));
|
||||
}
|
||||
init_input_shapes(inputShapes);
|
||||
init_input_shapes(input_shapes);
|
||||
|
||||
ov::ParameterVector dyn_params{std::make_shared<ov::op::v0::Parameter>(netPrecision, inputDynamicShapes[0])};
|
||||
ov::ParameterVector dyn_params{std::make_shared<ov::op::v0::Parameter>(model_type, inputDynamicShapes[0])};
|
||||
|
||||
auto splitAxisOp = std::make_shared<ngraph::opset3::Constant>(ngraph::element::i64, ngraph::Shape{}, std::vector<int64_t>{static_cast<int64_t>(axis)});
|
||||
auto splitAxisOp = std::make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{}, std::vector<int64_t>{static_cast<int64_t>(axis)});
|
||||
|
||||
std::shared_ptr<ov::Node> splitLengthOp;
|
||||
if (inputType == ngraph::helpers::InputLayerType::PARAMETER) {
|
||||
auto splitLengthNode = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::Type_t::i64, ov::Shape{splitLength.size()});
|
||||
dyn_params.push_back(splitLengthNode);
|
||||
splitLengthOp = splitLengthNode;
|
||||
std::shared_ptr<ov::Node> split_lengthOp;
|
||||
if (inputType == ov::test::utils::InputLayerType::PARAMETER) {
|
||||
auto split_lengthNode = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{split_length.size()});
|
||||
dyn_params.push_back(split_lengthNode);
|
||||
split_lengthOp = split_lengthNode;
|
||||
} else {
|
||||
splitLengthOp = std::make_shared<ngraph::opset3::Constant>(ngraph::element::Type_t::i64, ngraph::Shape{splitLength.size()}, splitLength);
|
||||
split_lengthOp = std::make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{split_length.size()}, split_length);
|
||||
}
|
||||
|
||||
auto varSplit = std::make_shared<ngraph::opset3::VariadicSplit>(dyn_params[0], splitAxisOp, splitLengthOp);
|
||||
ngraph::ResultVector results;
|
||||
for (size_t i = 0; i < splitLength.size(); i++) {
|
||||
results.push_back(std::make_shared<ngraph::opset1::Result>(varSplit->output(i)));
|
||||
auto varSplit = std::make_shared<ov::op::v1::VariadicSplit>(dyn_params[0], splitAxisOp, split_lengthOp);
|
||||
ov::ResultVector results;
|
||||
for (size_t i = 0; i < split_length.size(); i++) {
|
||||
results.push_back(std::make_shared<ov::op::v0::Result>(varSplit->output(i)));
|
||||
}
|
||||
function = std::make_shared<ngraph::Function>(results, dyn_params, "varSplit");
|
||||
function = std::make_shared<ov::Model>(results, dyn_params, "varSplit");
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(VariadicSplitLayerGPUDynamicTest, CompareWithRefs) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
TEST_P(VariadicSplitLayerGPUDynamicTest, Inference) {
|
||||
run();
|
||||
}
|
||||
|
||||
const std::vector<ngraph::helpers::InputLayerType> restInputTypes = {
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
ngraph::helpers::InputLayerType::PARAMETER
|
||||
const std::vector<ov::test::utils::InputLayerType> restInputTypes = {
|
||||
ov::test::utils::InputLayerType::CONSTANT,
|
||||
ov::test::utils::InputLayerType::PARAMETER
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplitsCheck4D, VariadicSplitLayerGPUDynamicTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(1), // axes
|
||||
::testing::Values(std::vector<int32_t>{2, 1, -1}), // splitLength
|
||||
::testing::Values(ElementType::f16), // netPrec
|
||||
::testing::ValuesIn(inputShapes4d), // inShapes
|
||||
::testing::ValuesIn(restInputTypes)), // input type of splitLength
|
||||
::testing::Values(std::vector<int32_t>{2, 1, -1}), // split_length
|
||||
::testing::Values(ov::element::f16), // netPrec
|
||||
::testing::ValuesIn(input_shapes4d), // inShapes
|
||||
::testing::ValuesIn(restInputTypes)), // input type of split_length
|
||||
VariadicSplitLayerGPUDynamicTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplitsCheck5D, VariadicSplitLayerGPUDynamicTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(2), // axes
|
||||
::testing::Values(std::vector<int32_t>{2, -1}), // splitLength
|
||||
::testing::Values(ElementType::f32), // netPrec
|
||||
::testing::ValuesIn(inputShapes5d), // inShapes
|
||||
::testing::ValuesIn(restInputTypes)), // input type of splitLength
|
||||
::testing::Values(std::vector<int32_t>{2, -1}), // split_length
|
||||
::testing::Values(ov::element::f32), // netPrec
|
||||
::testing::ValuesIn(input_shapes5d), // inShapes
|
||||
::testing::ValuesIn(restInputTypes)), // input type of split_length
|
||||
VariadicSplitLayerGPUDynamicTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplitsCheck6D, VariadicSplitLayerGPUDynamicTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(5), // nSplits
|
||||
::testing::Values(std::vector<int32_t>{2, 3, 2, -1}), // splitLength
|
||||
::testing::Values(ElementType::i8), // netPrec
|
||||
::testing::ValuesIn(inputShapes6d), // inShapes
|
||||
::testing::ValuesIn(restInputTypes)), // input type of splitLength
|
||||
::testing::Values(std::vector<int32_t>{2, 3, 2, -1}), // split_length
|
||||
::testing::Values(ov::element::i8), // netPrec
|
||||
::testing::ValuesIn(input_shapes6d), // inShapes
|
||||
::testing::ValuesIn(restInputTypes)), // input type of split_length
|
||||
VariadicSplitLayerGPUDynamicTest::getTestCaseName);
|
||||
|
||||
|
||||
const std::vector<InputShape> inputShapes4d_static = {
|
||||
const std::vector<InputShape> input_shapes4d_static = {
|
||||
{
|
||||
{5, 16, 10, 8}, {{5, 16, 10, 8}, }
|
||||
}
|
||||
@ -272,10 +269,10 @@ const std::vector<InputShape> inputShapes4d_static = {
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplitsCheck4D_static_input_dyn_output, VariadicSplitLayerGPUDynamicTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(1), // axes
|
||||
::testing::Values(std::vector<int32_t>{2, 1, -1}), // splitLength
|
||||
::testing::Values(ElementType::f16), // netPrec
|
||||
::testing::ValuesIn(inputShapes4d_static), // inShapes
|
||||
::testing::ValuesIn(restInputTypes)), // input type of splitLength
|
||||
::testing::Values(std::vector<int32_t>{2, 1, -1}), // split_length
|
||||
::testing::Values(ov::element::f16), // netPrec
|
||||
::testing::ValuesIn(input_shapes4d_static), // inShapes
|
||||
::testing::ValuesIn(restInputTypes)), // input type of split_length
|
||||
VariadicSplitLayerGPUDynamicTest::getTestCaseName);
|
||||
|
||||
} // namespace GPULayerTestsDefinitions
|
||||
} // namespace
|
||||
|
@ -2,16 +2,17 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "shared_test_classes/single_layer/strided_slice.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "ov_models/builders.hpp"
|
||||
#include "common_test_utils/test_constants.hpp"
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include "common_test_utils/test_enums.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
|
||||
using namespace InferenceEngine;
|
||||
using namespace ov::test;
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
#include "openvino/op/strided_slice.hpp"
|
||||
|
||||
namespace GPULayerTestsDefinitions {
|
||||
namespace {
|
||||
using ov::test::InputShape;
|
||||
|
||||
struct StridedSliceParams {
|
||||
std::vector<int64_t> begin;
|
||||
@ -27,22 +28,20 @@ struct StridedSliceParams {
|
||||
typedef std::tuple<
|
||||
InputShape, // Input shapes
|
||||
StridedSliceParams,
|
||||
ElementType, // Element type
|
||||
std::vector<ngraph::helpers::InputLayerType>, // begin/end/stride input type
|
||||
std::map<std::string, std::string> // Additional network configuration
|
||||
ov::element::Type, // Element type
|
||||
std::vector<ov::test::utils::InputLayerType> // begin/end/stride input type
|
||||
> StridedSliceLayerParamSet;
|
||||
|
||||
class StridedSliceLayerGPUTest : public testing::WithParamInterface<StridedSliceLayerParamSet>,
|
||||
virtual public SubgraphBaseTest {
|
||||
virtual public ov::test::SubgraphBaseTest {
|
||||
public:
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<StridedSliceLayerParamSet>& obj) {
|
||||
InputShape shapes;
|
||||
StridedSliceParams params;
|
||||
ElementType elementType;
|
||||
std::vector<ngraph::helpers::InputLayerType> restInputType;
|
||||
TargetDevice targetDevice;
|
||||
std::map<std::string, std::string> additionalConfig;
|
||||
std::tie(shapes, params, elementType, restInputType, additionalConfig) = obj.param;
|
||||
ov::element::Type model_type;
|
||||
std::vector<ov::test::utils::InputLayerType> rest_input_type;
|
||||
std::string targetDevice;
|
||||
std::tie(shapes, params, model_type, rest_input_type) = obj.param;
|
||||
|
||||
std::ostringstream results;
|
||||
results << "IS=" << ov::test::utils::partialShape2str({shapes.first}) << "_";
|
||||
@ -50,7 +49,7 @@ public:
|
||||
for (const auto& item : shapes.second) {
|
||||
results << ov::test::utils::vec2str(item) << "_";
|
||||
}
|
||||
results << "netPRC=" << elementType << "_";
|
||||
results << "netPRC=" << model_type << "_";
|
||||
results << "begin=" << ov::test::utils::vec2str(params.begin) << "_";
|
||||
results << "end=" << ov::test::utils::vec2str(params.end) << "_";
|
||||
results << "stride=" << ov::test::utils::vec2str(params.stride) << "_";
|
||||
@ -59,19 +58,14 @@ public:
|
||||
results << "new_axis_m=" << (params.newAxisMask.empty() ? "def" : ov::test::utils::vec2str(params.newAxisMask)) << "_";
|
||||
results << "shrink_m=" << (params.shrinkAxisMask.empty() ? "def" : ov::test::utils::vec2str(params.shrinkAxisMask)) << "_";
|
||||
results << "ellipsis_m=" << (params.ellipsisAxisMask.empty() ? "def" : ov::test::utils::vec2str(params.ellipsisAxisMask)) << "_";
|
||||
results << "beginType=" << restInputType[0] << "_";
|
||||
results << "endType=" << restInputType[1] << "_";
|
||||
results << "strideType=" << restInputType[2] << "_";
|
||||
results << "config=(";
|
||||
for (const auto& configEntry : additionalConfig) {
|
||||
results << configEntry.first << ", " << configEntry.second << ":";
|
||||
}
|
||||
results << ")";
|
||||
results << "beginType=" << rest_input_type[0] << "_";
|
||||
results << "endType=" << rest_input_type[1] << "_";
|
||||
results << "strideType=" << rest_input_type[2];
|
||||
|
||||
return results.str();
|
||||
}
|
||||
|
||||
void generate_inputs(const std::vector<ngraph::Shape>& targetInputStaticShapes) override {
|
||||
void generate_inputs(const std::vector<ov::Shape>& targetInputStaticShapes) override {
|
||||
inputs.clear();
|
||||
const auto& funcInputs = function->inputs();
|
||||
ov::Tensor tensor;
|
||||
@ -82,7 +76,7 @@ public:
|
||||
inputs.insert({funcInputs[idx].get_node_shared_ptr(), tensor});
|
||||
|
||||
// input1: begin
|
||||
if (restInputType[0] == ngraph::helpers::InputLayerType::PARAMETER) {
|
||||
if (rest_input_type[0] == ov::test::utils::InputLayerType::PARAMETER) {
|
||||
idx += 1;
|
||||
tensor = ov::Tensor(funcInputs[idx].get_element_type(), targetInputStaticShapes[idx]);
|
||||
auto *dataPtr = tensor.data<float>();
|
||||
@ -93,7 +87,7 @@ public:
|
||||
}
|
||||
|
||||
// input2: end
|
||||
if (restInputType[1] == ngraph::helpers::InputLayerType::PARAMETER) {
|
||||
if (rest_input_type[1] == ov::test::utils::InputLayerType::PARAMETER) {
|
||||
idx += 1;
|
||||
tensor = ov::Tensor(funcInputs[idx].get_element_type(), targetInputStaticShapes[idx]);
|
||||
auto *dataPtr = tensor.data<float>();
|
||||
@ -104,7 +98,7 @@ public:
|
||||
}
|
||||
|
||||
// input3: stride
|
||||
if (restInputType[2] == ngraph::helpers::InputLayerType::PARAMETER) {
|
||||
if (rest_input_type[2] == ov::test::utils::InputLayerType::PARAMETER) {
|
||||
idx += 1;
|
||||
tensor = ov::Tensor(funcInputs[idx].get_element_type(), targetInputStaticShapes[idx]);
|
||||
auto *dataPtr = tensor.data<float>();
|
||||
@ -121,14 +115,13 @@ protected:
|
||||
std::vector<int64_t> begin;
|
||||
std::vector<int64_t> end;
|
||||
std::vector<int64_t> stride;
|
||||
std::vector<ngraph::helpers::InputLayerType> restInputType;
|
||||
std::vector<ov::test::utils::InputLayerType> rest_input_type;
|
||||
size_t inferRequestNum = 0;
|
||||
|
||||
void SetUp() override {
|
||||
InputShape shapes;
|
||||
StridedSliceParams ssParams;
|
||||
std::map<std::string, std::string> additionalConfig;
|
||||
std::tie(shapes, ssParams, inType, restInputType, additionalConfig) = this->GetParam();
|
||||
std::tie(shapes, ssParams, inType, rest_input_type) = this->GetParam();
|
||||
|
||||
begin = ssParams.begin;
|
||||
end = ssParams.end;
|
||||
@ -138,11 +131,11 @@ protected:
|
||||
|
||||
std::vector<InputShape> inputShapes;
|
||||
inputShapes.push_back(shapes);
|
||||
if (restInputType[0] == ngraph::helpers::InputLayerType::PARAMETER)
|
||||
if (rest_input_type[0] == ov::test::utils::InputLayerType::PARAMETER)
|
||||
inputShapes.push_back(InputShape({static_cast<int64_t>(begin.size())}, std::vector<ov::Shape>(shapes.second.size(), {begin.size()})));
|
||||
if (restInputType[1] == ngraph::helpers::InputLayerType::PARAMETER)
|
||||
if (rest_input_type[1] == ov::test::utils::InputLayerType::PARAMETER)
|
||||
inputShapes.push_back(InputShape({static_cast<int64_t>(end.size())}, std::vector<ov::Shape>(shapes.second.size(), {end.size()})));
|
||||
if (restInputType[2] == ngraph::helpers::InputLayerType::PARAMETER)
|
||||
if (rest_input_type[2] == ov::test::utils::InputLayerType::PARAMETER)
|
||||
inputShapes.push_back(InputShape({static_cast<int64_t>(stride.size())}, std::vector<ov::Shape>(shapes.second.size(), {stride.size()})));
|
||||
|
||||
init_input_shapes(inputShapes);
|
||||
@ -150,65 +143,59 @@ protected:
|
||||
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(inType, inputDynamicShapes.front())};
|
||||
|
||||
std::shared_ptr<ov::Node> beginInput, endInput, strideInput;
|
||||
if (restInputType[0] == ngraph::helpers::InputLayerType::PARAMETER) {
|
||||
auto beginNode = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::Type_t::i64, ov::Shape{begin.size()});
|
||||
if (rest_input_type[0] == ov::test::utils::InputLayerType::PARAMETER) {
|
||||
auto beginNode = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{begin.size()});
|
||||
params.push_back(beginNode);
|
||||
beginInput = beginNode;
|
||||
} else {
|
||||
beginInput = std::make_shared<ngraph::opset1::Constant>(ngraph::element::Type_t::i64, ov::Shape{begin.size()}, begin);
|
||||
beginInput = std::make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{begin.size()}, begin);
|
||||
}
|
||||
|
||||
if (restInputType[1] == ngraph::helpers::InputLayerType::PARAMETER) {
|
||||
auto endNode = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::Type_t::i64, ov::Shape{end.size()});
|
||||
if (rest_input_type[1] == ov::test::utils::InputLayerType::PARAMETER) {
|
||||
auto endNode = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{end.size()});
|
||||
params.push_back(endNode);
|
||||
endInput = endNode;
|
||||
} else {
|
||||
endInput = std::make_shared<ngraph::opset1::Constant>(ngraph::element::Type_t::i64, ov::Shape{end.size()}, end);
|
||||
endInput = std::make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{end.size()}, end);
|
||||
}
|
||||
|
||||
if (restInputType[2] == ngraph::helpers::InputLayerType::PARAMETER) {
|
||||
auto strideNode = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::Type_t::i64, ov::Shape{stride.size()});
|
||||
if (rest_input_type[2] == ov::test::utils::InputLayerType::PARAMETER) {
|
||||
auto strideNode = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{stride.size()});
|
||||
params.push_back(strideNode);
|
||||
strideInput = strideNode;
|
||||
} else {
|
||||
strideInput = std::make_shared<ngraph::opset1::Constant>(ngraph::element::Type_t::i64, ov::Shape{stride.size()}, stride);
|
||||
strideInput = std::make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{stride.size()}, stride);
|
||||
}
|
||||
|
||||
auto ss = std::make_shared<ngraph::op::v1::StridedSlice>(params[0], beginInput, endInput, strideInput, ssParams.beginMask, ssParams.endMask,
|
||||
auto ss = std::make_shared<ov::op::v1::StridedSlice>(params[0], beginInput, endInput, strideInput, ssParams.beginMask, ssParams.endMask,
|
||||
ssParams.newAxisMask, ssParams.shrinkAxisMask, ssParams.ellipsisAxisMask);
|
||||
|
||||
ngraph::ResultVector results;
|
||||
ov::ResultVector results;
|
||||
for (size_t i = 0; i < ss->get_output_size(); i++) {
|
||||
results.push_back(std::make_shared<ngraph::opset1::Result>(ss->output(i)));
|
||||
results.push_back(std::make_shared<ov::op::v0::Result>(ss->output(i)));
|
||||
}
|
||||
|
||||
function = std::make_shared<ngraph::Function>(results, params, "StridedSlice");
|
||||
function = std::make_shared<ov::Model>(results, params, "StridedSlice");
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(StridedSliceLayerGPUTest, CompareWithRefs) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
|
||||
TEST_P(StridedSliceLayerGPUTest, Inference) {
|
||||
run();
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
std::map<std::string, std::string> emptyAdditionalConfig;
|
||||
|
||||
const std::vector<ElementType> inputPrecisions = {
|
||||
ElementType::f32
|
||||
const std::vector<ov::element::Type> model_types = {
|
||||
ov::element::f32
|
||||
};
|
||||
|
||||
const std::vector<std::vector<ngraph::helpers::InputLayerType>> restInputTypes = {
|
||||
{ngraph::helpers::InputLayerType::CONSTANT, ngraph::helpers::InputLayerType::CONSTANT, ngraph::helpers::InputLayerType::CONSTANT},
|
||||
{ngraph::helpers::InputLayerType::PARAMETER, ngraph::helpers::InputLayerType::PARAMETER, ngraph::helpers::InputLayerType::PARAMETER},
|
||||
{ngraph::helpers::InputLayerType::PARAMETER, ngraph::helpers::InputLayerType::CONSTANT, ngraph::helpers::InputLayerType::CONSTANT},
|
||||
{ngraph::helpers::InputLayerType::CONSTANT, ngraph::helpers::InputLayerType::PARAMETER, ngraph::helpers::InputLayerType::CONSTANT},
|
||||
{ngraph::helpers::InputLayerType::CONSTANT, ngraph::helpers::InputLayerType::CONSTANT, ngraph::helpers::InputLayerType::PARAMETER},
|
||||
{ngraph::helpers::InputLayerType::CONSTANT, ngraph::helpers::InputLayerType::PARAMETER, ngraph::helpers::InputLayerType::PARAMETER},
|
||||
{ngraph::helpers::InputLayerType::PARAMETER, ngraph::helpers::InputLayerType::CONSTANT, ngraph::helpers::InputLayerType::PARAMETER},
|
||||
{ngraph::helpers::InputLayerType::PARAMETER, ngraph::helpers::InputLayerType::PARAMETER, ngraph::helpers::InputLayerType::CONSTANT},
|
||||
const std::vector<std::vector<ov::test::utils::InputLayerType>> rest_input_types = {
|
||||
{ov::test::utils::InputLayerType::CONSTANT, ov::test::utils::InputLayerType::CONSTANT, ov::test::utils::InputLayerType::CONSTANT},
|
||||
{ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::PARAMETER},
|
||||
{ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::CONSTANT, ov::test::utils::InputLayerType::CONSTANT},
|
||||
{ov::test::utils::InputLayerType::CONSTANT, ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::CONSTANT},
|
||||
{ov::test::utils::InputLayerType::CONSTANT, ov::test::utils::InputLayerType::CONSTANT, ov::test::utils::InputLayerType::PARAMETER},
|
||||
{ov::test::utils::InputLayerType::CONSTANT, ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::PARAMETER},
|
||||
{ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::CONSTANT, ov::test::utils::InputLayerType::PARAMETER},
|
||||
{ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::CONSTANT},
|
||||
};
|
||||
|
||||
const std::vector<InputShape> inputShapesDynamic2D = {
|
||||
@ -227,20 +214,18 @@ const std::vector<StridedSliceParams> paramsPlain2D = {
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Plain_Static_2D, StridedSliceLayerGPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(static_shapes_to_test_representation({{32, 20}})),
|
||||
::testing::ValuesIn(ov::test::static_shapes_to_test_representation({{32, 20}})),
|
||||
::testing::ValuesIn(paramsPlain2D),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::Values(restInputTypes[0]),
|
||||
::testing::Values(emptyAdditionalConfig)),
|
||||
::testing::ValuesIn(model_types),
|
||||
::testing::Values(rest_input_types[0])),
|
||||
StridedSliceLayerGPUTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Plain_Dynamic_2D, StridedSliceLayerGPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(inputShapesDynamic2D),
|
||||
::testing::ValuesIn(paramsPlain2D),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::ValuesIn(restInputTypes),
|
||||
::testing::Values(emptyAdditionalConfig)),
|
||||
::testing::ValuesIn(model_types),
|
||||
::testing::ValuesIn(rest_input_types)),
|
||||
StridedSliceLayerGPUTest::getTestCaseName);
|
||||
|
||||
const std::vector<StridedSliceParams> testCasesCommon4D = {
|
||||
@ -266,9 +251,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Dynamic_4D, StridedSliceLa
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(inputShapesDynamic4D),
|
||||
::testing::ValuesIn(testCasesCommon4D),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::ValuesIn(restInputTypes),
|
||||
::testing::Values(emptyAdditionalConfig)),
|
||||
::testing::ValuesIn(model_types),
|
||||
::testing::ValuesIn(rest_input_types)),
|
||||
StridedSliceLayerGPUTest::getTestCaseName);
|
||||
|
||||
|
||||
@ -295,9 +279,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Dynamic_5D, StridedSliceLa
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(inputShapesDynamic5D),
|
||||
::testing::ValuesIn(testCasesCommon5D),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::ValuesIn(restInputTypes),
|
||||
::testing::Values(emptyAdditionalConfig)),
|
||||
::testing::ValuesIn(model_types),
|
||||
::testing::ValuesIn(rest_input_types)),
|
||||
StridedSliceLayerGPUTest::getTestCaseName);
|
||||
|
||||
|
||||
@ -318,10 +301,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Dynamic_6D, StridedSliceLa
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(inputShapesDynamic6D),
|
||||
::testing::ValuesIn(testCasesCommon6D),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::ValuesIn(restInputTypes),
|
||||
::testing::Values(emptyAdditionalConfig)),
|
||||
::testing::ValuesIn(model_types),
|
||||
::testing::ValuesIn(rest_input_types)),
|
||||
StridedSliceLayerGPUTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
} // namespace GPULayerTestsDefinitions
|
||||
|
@ -2,26 +2,19 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <tuple>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <memory>
|
||||
#include "ov_models/utils/ov_helpers.hpp"
|
||||
#include "ov_models/builders.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "shared_test_classes/single_layer/tile.hpp"
|
||||
#include "common_test_utils/test_constants.hpp"
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
|
||||
using namespace InferenceEngine;
|
||||
using namespace ov::test;
|
||||
|
||||
namespace GPULayerTestsDefinitions {
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
#include "openvino/op/tile.hpp"
|
||||
|
||||
namespace {
|
||||
using TileLayerTestParamsSet = typename std::tuple<
|
||||
std::vector<ov::test::InputShape>, // Input shapes
|
||||
std::vector<int64_t>, // Repeats
|
||||
ov::element::Type_t, // Network precision
|
||||
ov::element::Type, // Model type
|
||||
bool, // Is Repeats input constant
|
||||
std::string>; // Device name
|
||||
|
||||
@ -32,27 +25,27 @@ public:
|
||||
static std::string getTestCaseName(testing::TestParamInfo<TileLayerTestParamsSet> obj) {
|
||||
TileLayerTestParamsSet basicParamsSet = obj.param;
|
||||
|
||||
std::vector<ov::test::InputShape> inputShapes;
|
||||
std::vector<ov::test::InputShape> input_shapes;
|
||||
std::vector<int64_t> repeats;
|
||||
ov::element::Type_t netPrecision;
|
||||
bool isRepeatsConst;
|
||||
ov::element::Type_t model_type;
|
||||
bool is_repeats_const;
|
||||
std::string deviceName;
|
||||
std::tie(inputShapes, repeats, netPrecision, isRepeatsConst, deviceName) = basicParamsSet;
|
||||
std::tie(input_shapes, repeats, model_type, is_repeats_const, deviceName) = basicParamsSet;
|
||||
|
||||
std::ostringstream result;
|
||||
result << "IS=(";
|
||||
for (const auto& shape : inputShapes) {
|
||||
for (const auto& shape : input_shapes) {
|
||||
result << ov::test::utils::partialShape2str({shape.first}) << "_";
|
||||
}
|
||||
result << ")_TS=(";
|
||||
for (const auto& shape : inputShapes) {
|
||||
for (const auto& shape : input_shapes) {
|
||||
for (const auto& item : shape.second) {
|
||||
result << ov::test::utils::vec2str(item) << "_";
|
||||
}
|
||||
}
|
||||
result << "Repeats=" << ov::test::utils::vec2str(repeats) << "_";
|
||||
result << "netPrec=" << netPrecision << "_";
|
||||
result << "constRepeats=" << (isRepeatsConst ? "True" : "False") << "_";
|
||||
result << "netPrec=" << model_type << "_";
|
||||
result << "constRepeats=" << (is_repeats_const ? "True" : "False") << "_";
|
||||
result << "trgDev=" << deviceName;
|
||||
|
||||
return result.str();
|
||||
@ -62,31 +55,31 @@ protected:
|
||||
void SetUp() override {
|
||||
TileLayerTestParamsSet basicParamsSet = this->GetParam();
|
||||
|
||||
std::vector<ov::test::InputShape> inputShapes;
|
||||
ov::element::Type_t netPrecision;
|
||||
bool isRepeatsConst;
|
||||
std::tie(inputShapes, repeatsData, netPrecision, isRepeatsConst, targetDevice) = basicParamsSet;
|
||||
std::vector<ov::test::InputShape> input_shapes;
|
||||
ov::element::Type_t model_type;
|
||||
bool is_repeats_const;
|
||||
std::tie(input_shapes, repeatsData, model_type, is_repeats_const, targetDevice) = basicParamsSet;
|
||||
|
||||
if (inputShapes.front().first.rank() != 0) {
|
||||
inputDynamicShapes.push_back(inputShapes.front().first);
|
||||
if (!isRepeatsConst) {
|
||||
if (input_shapes.front().first.rank() != 0) {
|
||||
inputDynamicShapes.push_back(input_shapes.front().first);
|
||||
if (!is_repeats_const) {
|
||||
inputDynamicShapes.push_back({ static_cast<int64_t>(repeatsData.size()) });
|
||||
}
|
||||
}
|
||||
const size_t targetStaticShapeSize = inputShapes.front().second.size();
|
||||
const size_t targetStaticShapeSize = input_shapes.front().second.size();
|
||||
targetStaticShapes.resize(targetStaticShapeSize);
|
||||
for (size_t i = 0lu; i < targetStaticShapeSize; ++i) {
|
||||
targetStaticShapes[i].push_back(inputShapes.front().second[i]);
|
||||
if (!isRepeatsConst)
|
||||
targetStaticShapes[i].push_back(input_shapes.front().second[i]);
|
||||
if (!is_repeats_const)
|
||||
targetStaticShapes[i].push_back({ repeatsData.size() });
|
||||
}
|
||||
|
||||
ov::ParameterVector functionParams;
|
||||
if (inputDynamicShapes.empty()) {
|
||||
functionParams.push_back(std::make_shared<ov::op::v0::Parameter>(netPrecision, targetStaticShapes.front().front()));
|
||||
functionParams.push_back(std::make_shared<ov::op::v0::Parameter>(model_type, targetStaticShapes.front().front()));
|
||||
} else {
|
||||
functionParams.push_back(std::make_shared<ov::op::v0::Parameter>(netPrecision, inputDynamicShapes.front()));
|
||||
if (!isRepeatsConst) {
|
||||
functionParams.push_back(std::make_shared<ov::op::v0::Parameter>(model_type, inputDynamicShapes.front()));
|
||||
if (!is_repeats_const) {
|
||||
functionParams.push_back(std::make_shared<ov::op::v0::Parameter>(ov::element::i64, inputDynamicShapes[1]));
|
||||
functionParams.back()->set_friendly_name("repeats");
|
||||
}
|
||||
@ -94,22 +87,22 @@ protected:
|
||||
functionParams.front()->set_friendly_name("data");
|
||||
|
||||
std::shared_ptr<ov::Node> tileNode;
|
||||
if (isRepeatsConst) {
|
||||
if (is_repeats_const) {
|
||||
tileNode = std::make_shared<ov::op::v0::Tile>(functionParams[0],
|
||||
ov::op::v0::Constant::create(ov::element::i64, { repeatsData.size() }, repeatsData));
|
||||
} else {
|
||||
tileNode = std::make_shared<ov::op::v0::Tile>(functionParams[0], functionParams[1]);
|
||||
}
|
||||
|
||||
ngraph::ResultVector results;
|
||||
ov::ResultVector results;
|
||||
for (size_t i = 0; i < tileNode->get_output_size(); i++) {
|
||||
results.push_back(std::make_shared<ngraph::opset4::Result>(tileNode->output(i)));
|
||||
results.push_back(std::make_shared<ov::op::v0::Result>(tileNode->output(i)));
|
||||
}
|
||||
|
||||
function = std::make_shared<ngraph::Function>(results, functionParams, "Tile");
|
||||
function = std::make_shared<ov::Model>(results, functionParams, "Tile");
|
||||
}
|
||||
|
||||
void generate_inputs(const std::vector<ngraph::Shape>& targetInputStaticShapes) override {
|
||||
void generate_inputs(const std::vector<ov::Shape>& targetInputStaticShapes) override {
|
||||
inputs.clear();
|
||||
const auto& funcInputs = function->inputs();
|
||||
for (size_t i = 0lu; i < funcInputs.size(); i++) {
|
||||
@ -136,18 +129,16 @@ protected:
|
||||
std::vector<int64_t> repeatsData;
|
||||
};
|
||||
|
||||
TEST_P(TileLayerGPUTest, CompareWithRefs) {
|
||||
TEST_P(TileLayerGPUTest, Inference) {
|
||||
run();
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<ov::element::Type_t> netPrecisions = {
|
||||
const std::vector<ov::element::Type> model_types = {
|
||||
ov::element::f32,
|
||||
ov::element::f16,
|
||||
};
|
||||
|
||||
const std::vector<std::vector<ov::test::InputShape>> dynamicInputShapes4D = {
|
||||
const std::vector<std::vector<ov::test::InputShape>> dynamic_input_shapes4D = {
|
||||
{
|
||||
{ // Origin dynamic shapes
|
||||
{ov::Dimension(1, 20), ov::Dimension(10, 20), ov::Dimension(1, 20), ov::Dimension(1, 20)},
|
||||
@ -169,7 +160,7 @@ const std::vector<std::vector<ov::test::InputShape>> dynamicInputShapes4D = {
|
||||
}
|
||||
};
|
||||
|
||||
const std::vector<std::vector<ov::test::InputShape>> dynamicInputShapes5D = {
|
||||
const std::vector<std::vector<ov::test::InputShape>> dynamic_input_shapes5D = {
|
||||
{
|
||||
{ // Origin dynamic shapes
|
||||
{ov::Dimension(1, 20), ov::Dimension(1, 20), ov::Dimension(1, 20), ov::Dimension(1, 20), ov::Dimension(1, 70)},
|
||||
@ -212,22 +203,20 @@ const std::vector<std::vector<int64_t>> repeats5D = {
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(DynamicShape4D, TileLayerGPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(dynamicInputShapes4D),
|
||||
::testing::ValuesIn(dynamic_input_shapes4D),
|
||||
::testing::ValuesIn(repeats4D),
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::ValuesIn(model_types),
|
||||
::testing::Values(true, false),
|
||||
::testing::Values(ov::test::utils::DEVICE_GPU)),
|
||||
TileLayerGPUTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(DynamicShape5D, TileLayerGPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(dynamicInputShapes5D),
|
||||
::testing::ValuesIn(dynamic_input_shapes5D),
|
||||
::testing::ValuesIn(repeats5D),
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::ValuesIn(model_types),
|
||||
::testing::Values(true, false),
|
||||
::testing::Values(ov::test::utils::DEVICE_GPU)),
|
||||
TileLayerGPUTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
|
||||
} // namespace GPULayerTestsDefinitions
|
||||
|
@ -2,64 +2,62 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <tuple>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <memory>
|
||||
#include "ov_models/utils/ov_helpers.hpp"
|
||||
#include "ov_models/builders.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "shared_test_classes/single_layer/topk.hpp"
|
||||
#include "common_test_utils/test_constants.hpp"
|
||||
#include <random>
|
||||
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include "common_test_utils/test_enums.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
|
||||
using namespace InferenceEngine;
|
||||
using namespace ov::test;
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
#include "openvino/op/topk.hpp"
|
||||
|
||||
namespace GPULayerTestsDefinitions {
|
||||
namespace {
|
||||
using ov::test::InputShape;
|
||||
|
||||
typedef std::tuple<
|
||||
int64_t, // keepK
|
||||
int64_t, // axis
|
||||
ngraph::opset4::TopK::Mode, // mode
|
||||
ngraph::opset4::TopK::SortType, // sort
|
||||
ElementType, // Net precision
|
||||
ElementType, // Input precision
|
||||
ElementType, // Output precision
|
||||
InputShape, // inputShape
|
||||
TargetDevice, // Device name
|
||||
ngraph::helpers::InputLayerType // Input type
|
||||
int64_t, // keepK
|
||||
int64_t, // axis
|
||||
ov::op::v1::TopK::Mode, // mode
|
||||
ov::op::v1::TopK::SortType, // sort
|
||||
ov::element::Type, // Model type
|
||||
ov::element::Type, // Input precision
|
||||
ov::element::Type, // Output precision
|
||||
InputShape, // input_shape
|
||||
std::string, // Device name
|
||||
ov::test::utils::InputLayerType // Input type
|
||||
> TopKLayerTestParamsSet;
|
||||
|
||||
class TopKLayerGPUTest : public testing::WithParamInterface<TopKLayerTestParamsSet>,
|
||||
virtual public SubgraphBaseTest {
|
||||
virtual public ov::test::SubgraphBaseTest {
|
||||
public:
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<TopKLayerTestParamsSet>& obj) {
|
||||
TopKLayerTestParamsSet basicParamsSet = obj.param;
|
||||
|
||||
int64_t keepK, axis;
|
||||
ngraph::opset4::TopK::Mode mode;
|
||||
ngraph::opset4::TopK::SortType sort;
|
||||
ElementType netPrecision, inPrc, outPrc;
|
||||
InputShape inputShape;
|
||||
TargetDevice targetDevice;
|
||||
ngraph::helpers::InputLayerType inputType;
|
||||
std::tie(keepK, axis, mode, sort, netPrecision, inPrc, outPrc, inputShape, targetDevice, inputType) = basicParamsSet;
|
||||
ov::op::v1::TopK::Mode mode;
|
||||
ov::op::v1::TopK::SortType sort;
|
||||
ov::element::Type model_type, inPrc, outPrc;
|
||||
InputShape input_shape;
|
||||
std::string targetDevice;
|
||||
ov::test::utils::InputLayerType input_type;
|
||||
std::tie(keepK, axis, mode, sort, model_type, inPrc, outPrc, input_shape, targetDevice, input_type) = basicParamsSet;
|
||||
|
||||
std::ostringstream result;
|
||||
result << "k=" << keepK << "_";
|
||||
result << "axis=" << axis << "_";
|
||||
result << "mode=" << mode << "_";
|
||||
result << "sort=" << sort << "_";
|
||||
result << "netPRC=" << netPrecision << "_";
|
||||
result << "netPRC=" << model_type << "_";
|
||||
result << "inPRC=" << inPrc << "_";
|
||||
result << "outPRC=" << outPrc << "_";
|
||||
result << "IS=" << ov::test::utils::partialShape2str({inputShape.first}) << "_" << "TS=(";
|
||||
for (const auto& shape : inputShape.second) {
|
||||
result << "IS=" << ov::test::utils::partialShape2str({input_shape.first}) << "_" << "TS=(";
|
||||
for (const auto& shape : input_shape.second) {
|
||||
result << ov::test::utils::vec2str(shape) << "_";
|
||||
}
|
||||
result << ")_";
|
||||
result << "inputType=" << inputType;
|
||||
result << "input_type=" << input_type;
|
||||
result << "TargetDevice=" << targetDevice;
|
||||
|
||||
return result.str();
|
||||
@ -70,43 +68,43 @@ protected:
|
||||
TopKLayerTestParamsSet basicParamsSet = this->GetParam();
|
||||
|
||||
int64_t keepK;
|
||||
ngraph::opset4::TopK::Mode mode;
|
||||
ngraph::opset4::TopK::SortType sort;
|
||||
ElementType inPrc, outPrc;
|
||||
InputShape inputShape;
|
||||
std::tie(keepK, axis, mode, sort, netPrecision, inPrc, outPrc, inputShape, targetDevice, inputType) = basicParamsSet;
|
||||
ov::op::v1::TopK::Mode mode;
|
||||
ov::op::v1::TopK::SortType sort;
|
||||
ov::element::Type inPrc, outPrc;
|
||||
InputShape input_shape;
|
||||
std::tie(keepK, axis, mode, sort, model_type, inPrc, outPrc, input_shape, targetDevice, input_type) = basicParamsSet;
|
||||
|
||||
if (inputType == ngraph::helpers::InputLayerType::CONSTANT) {
|
||||
init_input_shapes({inputShape});
|
||||
if (input_type == ov::test::utils::InputLayerType::CONSTANT) {
|
||||
init_input_shapes({input_shape});
|
||||
} else {
|
||||
inputDynamicShapes = {inputShape.first, {}};
|
||||
for (size_t i = 0; i < inputShape.second.size(); ++i) {
|
||||
targetStaticShapes.push_back({inputShape.second[i], {}});
|
||||
inputDynamicShapes = {input_shape.first, {}};
|
||||
for (size_t i = 0; i < input_shape.second.size(); ++i) {
|
||||
targetStaticShapes.push_back({input_shape.second[i], {}});
|
||||
}
|
||||
}
|
||||
|
||||
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(netPrecision, inputDynamicShapes[0])};
|
||||
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(model_type, inputDynamicShapes[0])};
|
||||
|
||||
std::shared_ptr<ngraph::opset4::TopK> topk;
|
||||
if (inputType == ngraph::helpers::InputLayerType::CONSTANT) {
|
||||
auto k = std::make_shared<ngraph::opset3::Constant>(ngraph::element::Type_t::i64, ngraph::Shape{}, &keepK);
|
||||
topk = std::dynamic_pointer_cast<ngraph::opset4::TopK>(std::make_shared<ngraph::opset4::TopK>(params[0], k, axis, mode, sort));
|
||||
std::shared_ptr<ov::op::v1::TopK> topk;
|
||||
if (input_type == ov::test::utils::InputLayerType::CONSTANT) {
|
||||
auto k = std::make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{}, &keepK);
|
||||
topk = std::dynamic_pointer_cast<ov::op::v1::TopK>(std::make_shared<ov::op::v1::TopK>(params[0], k, axis, mode, sort));
|
||||
} else {
|
||||
auto k = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::Type_t::i64, inputDynamicShapes[1]);
|
||||
auto k = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, inputDynamicShapes[1]);
|
||||
params.push_back(k);
|
||||
topk = std::dynamic_pointer_cast<ngraph::opset4::TopK>(
|
||||
std::make_shared<ngraph::opset4::TopK>(params[0], k, axis, mode, sort));
|
||||
topk = std::dynamic_pointer_cast<ov::op::v1::TopK>(
|
||||
std::make_shared<ov::op::v1::TopK>(params[0], k, axis, mode, sort));
|
||||
}
|
||||
|
||||
ngraph::ResultVector results;
|
||||
ov::ResultVector results;
|
||||
for (size_t i = 0; i < topk->get_output_size(); i++) {
|
||||
results.push_back(std::make_shared<ngraph::opset4::Result>(topk->output(i)));
|
||||
results.push_back(std::make_shared<ov::op::v0::Result>(topk->output(i)));
|
||||
}
|
||||
|
||||
function = std::make_shared<ngraph::Function>(results, params, "TopK");
|
||||
function = std::make_shared<ov::Model>(results, params, "TopK");
|
||||
}
|
||||
|
||||
void generate_inputs(const std::vector<ngraph::Shape>& targetInputStaticShapes) override {
|
||||
void generate_inputs(const std::vector<ov::Shape>& targetInputStaticShapes) override {
|
||||
inputs.clear();
|
||||
const auto& funcInputs = function->inputs();
|
||||
auto shape = targetInputStaticShapes.front();
|
||||
@ -114,7 +112,7 @@ protected:
|
||||
tensor = ov::test::utils::create_and_fill_tensor(funcInputs[0].get_element_type(), shape);
|
||||
size_t size = tensor.get_size();
|
||||
|
||||
if (netPrecision == ElementType::f32) {
|
||||
if (model_type == ov::element::f32) {
|
||||
std::vector<int> data(size);
|
||||
|
||||
int start = - static_cast<int>(size / 2);
|
||||
@ -127,11 +125,11 @@ protected:
|
||||
rawBlobDataPtr[i] = static_cast<float>(data[i]);
|
||||
}
|
||||
} else {
|
||||
FAIL() << "generate_inputs for " << netPrecision << " precision isn't supported";
|
||||
FAIL() << "generate_inputs for " << model_type << " precision isn't supported";
|
||||
}
|
||||
inputs.insert({funcInputs[0].get_node_shared_ptr(), tensor});
|
||||
|
||||
if (inputType == ngraph::helpers::InputLayerType::PARAMETER) {
|
||||
if (input_type == ov::test::utils::InputLayerType::PARAMETER) {
|
||||
const auto& kPrecision = funcInputs[1].get_element_type();
|
||||
const auto& kShape = targetInputStaticShapes[1];
|
||||
|
||||
@ -147,36 +145,32 @@ protected:
|
||||
private:
|
||||
int64_t axis;
|
||||
size_t inferRequestNum = 0;
|
||||
ElementType netPrecision;
|
||||
ngraph::helpers::InputLayerType inputType;
|
||||
ov::element::Type model_type;
|
||||
ov::test::utils::InputLayerType input_type;
|
||||
};
|
||||
|
||||
TEST_P(TopKLayerGPUTest, CompareWithRefs) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
|
||||
TEST_P(TopKLayerGPUTest, Inference) {
|
||||
run();
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<ElementType> netPrecisions = {
|
||||
ElementType::f32,
|
||||
const std::vector<ov::element::Type> model_types = {
|
||||
ov::element::f32,
|
||||
};
|
||||
|
||||
const std::vector<int64_t> axes = {0, 3};
|
||||
const std::vector<int64_t> k = {3, 5, 7};
|
||||
|
||||
const std::vector<ngraph::opset4::TopK::Mode> modes = {
|
||||
ngraph::opset4::TopK::Mode::MIN,
|
||||
ngraph::opset4::TopK::Mode::MAX
|
||||
const std::vector<ov::op::v1::TopK::Mode> modes = {
|
||||
ov::op::v1::TopK::Mode::MIN,
|
||||
ov::op::v1::TopK::Mode::MAX
|
||||
};
|
||||
|
||||
const std::vector<ngraph::opset4::TopK::SortType> sortTypes = {
|
||||
ngraph::opset4::TopK::SortType::SORT_VALUES,
|
||||
ngraph::opset4::TopK::SortType::SORT_INDICES,
|
||||
const std::vector<ov::op::v1::TopK::SortType> sortTypes = {
|
||||
ov::op::v1::TopK::SortType::SORT_VALUES,
|
||||
ov::op::v1::TopK::SortType::SORT_INDICES,
|
||||
};
|
||||
|
||||
std::vector<ov::test::InputShape> inputShapesDynamic = {
|
||||
std::vector<ov::test::InputShape> input_shapesDynamic = {
|
||||
{
|
||||
{ov::PartialShape::dynamic(4), {{7, 7, 7, 7}, {7, 8, 7, 9}}},
|
||||
{{-1, -1, -1, -1}, {{8, 9, 10, 11}, {11, 7, 8, 9}}}
|
||||
@ -189,12 +183,12 @@ INSTANTIATE_TEST_CASE_P(smoke_TopK_constant_dynamic, TopKLayerGPUTest,
|
||||
::testing::ValuesIn(axes),
|
||||
::testing::ValuesIn(modes),
|
||||
::testing::ValuesIn(sortTypes),
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(ElementType::undefined),
|
||||
::testing::Values(ElementType::undefined),
|
||||
::testing::ValuesIn(inputShapesDynamic),
|
||||
::testing::ValuesIn(model_types),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::ValuesIn(input_shapesDynamic),
|
||||
::testing::Values(ov::test::utils::DEVICE_GPU),
|
||||
::testing::Values(ngraph::helpers::InputLayerType::CONSTANT)),
|
||||
::testing::Values(ov::test::utils::InputLayerType::CONSTANT)),
|
||||
TopKLayerGPUTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(smoke_TopK_parameter_dynamic, TopKLayerGPUTest,
|
||||
@ -203,13 +197,13 @@ INSTANTIATE_TEST_CASE_P(smoke_TopK_parameter_dynamic, TopKLayerGPUTest,
|
||||
::testing::ValuesIn(axes),
|
||||
::testing::ValuesIn(modes),
|
||||
::testing::ValuesIn(sortTypes),
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(ElementType::undefined),
|
||||
::testing::Values(ElementType::undefined),
|
||||
::testing::ValuesIn(inputShapesDynamic),
|
||||
::testing::ValuesIn(model_types),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::ValuesIn(input_shapesDynamic),
|
||||
::testing::Values(ov::test::utils::DEVICE_GPU),
|
||||
::testing::Values(ngraph::helpers::InputLayerType::PARAMETER)),
|
||||
::testing::Values(ov::test::utils::InputLayerType::PARAMETER)),
|
||||
TopKLayerGPUTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
} // namespace GPULayerTestsDefinitions
|
||||
|
||||
|
@ -3,82 +3,83 @@
|
||||
//
|
||||
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include "ov_models/builders.hpp"
|
||||
#include "shared_test_classes/base/layer_test_utils.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
|
||||
using namespace ov::test;
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
#include "openvino/op/unique.hpp"
|
||||
|
||||
namespace GPULayerTestsDefinitions {
|
||||
namespace {
|
||||
using ov::test::InputShape;
|
||||
|
||||
typedef std::tuple<std::vector<InputShape>, // Input shapes
|
||||
std::tuple<bool, int>, // Is flattened and axis
|
||||
bool, // Sorted
|
||||
ElementType // Data precision
|
||||
>
|
||||
ov::element::Type> // Model type
|
||||
UniqueDynamicGPUTestParams;
|
||||
|
||||
class UniqueLayerDynamicGPUTest : public testing::WithParamInterface<UniqueDynamicGPUTestParams>,
|
||||
virtual public SubgraphBaseTest {
|
||||
virtual public ov::test::SubgraphBaseTest {
|
||||
public:
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<UniqueDynamicGPUTestParams>& obj) {
|
||||
std::vector<InputShape> inputShapes;
|
||||
std::tuple<bool, int> flatOrAxis;
|
||||
std::vector<InputShape> input_shapes;
|
||||
std::tuple<bool, int> flat_or_axis;
|
||||
bool sorted;
|
||||
ElementType dataPrecision;
|
||||
std::tie(inputShapes, flatOrAxis, sorted, dataPrecision) = obj.param;
|
||||
ov::element::Type model_type;
|
||||
std::tie(input_shapes, flat_or_axis, sorted, model_type) = obj.param;
|
||||
|
||||
std::ostringstream result;
|
||||
result << "IS=(";
|
||||
for (size_t i = 0lu; i < inputShapes.size(); i++) {
|
||||
result << ov::test::utils::partialShape2str({inputShapes[i].first})
|
||||
<< (i < inputShapes.size() - 1lu ? "_" : "");
|
||||
for (size_t i = 0lu; i < input_shapes.size(); i++) {
|
||||
result << ov::test::utils::partialShape2str({input_shapes[i].first})
|
||||
<< (i < input_shapes.size() - 1lu ? "_" : "");
|
||||
}
|
||||
result << ")_TS=";
|
||||
for (size_t i = 0lu; i < inputShapes.front().second.size(); i++) {
|
||||
for (size_t i = 0lu; i < input_shapes.front().second.size(); i++) {
|
||||
result << "{";
|
||||
for (size_t j = 0lu; j < inputShapes.size(); j++) {
|
||||
result << ov::test::utils::vec2str(inputShapes[j].second[i])
|
||||
<< (j < inputShapes.size() - 1lu ? "_" : "");
|
||||
for (size_t j = 0lu; j < input_shapes.size(); j++) {
|
||||
result << ov::test::utils::vec2str(input_shapes[j].second[i])
|
||||
<< (j < input_shapes.size() - 1lu ? "_" : "");
|
||||
}
|
||||
result << "}_";
|
||||
}
|
||||
|
||||
if (!std::get<0>(flatOrAxis)) {
|
||||
result << "axis=" << std::get<1>(flatOrAxis) << "_";
|
||||
if (!std::get<0>(flat_or_axis)) {
|
||||
result << "axis=" << std::get<1>(flat_or_axis) << "_";
|
||||
} else {
|
||||
result << "flattened"
|
||||
<< "_";
|
||||
}
|
||||
result << "sorted=" << (sorted ? "True" : "False") << "_";
|
||||
result << "dataPrc=" << dataPrecision;
|
||||
result << "dataPrc=" << model_type;
|
||||
|
||||
return result.str();
|
||||
}
|
||||
|
||||
protected:
|
||||
void SetUp() override {
|
||||
std::vector<InputShape> inputShapes;
|
||||
std::tuple<bool, int> flatOrAxis;
|
||||
std::vector<InputShape> input_shapes;
|
||||
std::tuple<bool, int> flat_or_axis;
|
||||
bool sorted, flattened;
|
||||
int axis;
|
||||
ElementType dataPrecision;
|
||||
ov::element::Type model_type;
|
||||
|
||||
std::tie(inputShapes, flatOrAxis, sorted, dataPrecision) = this->GetParam();
|
||||
std::tie(input_shapes, flat_or_axis, sorted, model_type) = this->GetParam();
|
||||
targetDevice = ov::test::utils::DEVICE_GPU;
|
||||
init_input_shapes(inputShapes);
|
||||
flattened = std::get<0>(flatOrAxis);
|
||||
init_input_shapes(input_shapes);
|
||||
flattened = std::get<0>(flat_or_axis);
|
||||
|
||||
ov::ParameterVector params;
|
||||
for (auto&& shape : inputDynamicShapes) {
|
||||
params.push_back(std::make_shared<ov::op::v0::Parameter>(dataPrecision, shape));
|
||||
params.push_back(std::make_shared<ov::op::v0::Parameter>(model_type, shape));
|
||||
}
|
||||
params[0]->set_friendly_name("data");
|
||||
std::shared_ptr<ov::Node> uniqueNode;
|
||||
if (flattened) {
|
||||
uniqueNode = std::make_shared<ov::op::v10::Unique>(params[0], sorted);
|
||||
} else {
|
||||
axis = std::get<1>(flatOrAxis);
|
||||
axis = std::get<1>(flat_or_axis);
|
||||
uniqueNode = std::make_shared<ov::op::v10::Unique>(
|
||||
params[0],
|
||||
ov::op::v0::Constant::create(ov::element::i64, ov::Shape({1}), {axis}),
|
||||
@ -86,12 +87,12 @@ protected:
|
||||
}
|
||||
|
||||
// Need to create results for all outputs
|
||||
ngraph::ResultVector results;
|
||||
ov::ResultVector results;
|
||||
for (auto i = 0U; i < uniqueNode->get_output_size(); ++i) {
|
||||
results.push_back(std::make_shared<ngraph::opset1::Result>(uniqueNode->output(i)));
|
||||
results.push_back(std::make_shared<ov::op::v0::Result>(uniqueNode->output(i)));
|
||||
}
|
||||
|
||||
function = std::make_shared<ngraph::Function>(results, params, "Unique");
|
||||
function = std::make_shared<ov::Model>(results, params, "Unique");
|
||||
}
|
||||
|
||||
void generate_inputs(const std::vector<ov::Shape>& targetInputStaticShapes) override {
|
||||
@ -107,7 +108,7 @@ protected:
|
||||
targetInputStaticShapes[0].end(),
|
||||
1,
|
||||
std::multiplies<size_t>());
|
||||
tensor = utils::create_and_fill_tensor(funcInput.get_element_type(),
|
||||
tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(),
|
||||
targetInputStaticShapes[0],
|
||||
range,
|
||||
-range / 2,
|
||||
@ -118,19 +119,16 @@ protected:
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(UniqueLayerDynamicGPUTest, CompareWithRefs) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
TEST_P(UniqueLayerDynamicGPUTest, Inference) {
|
||||
run();
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<ElementType> dataPrecision = {
|
||||
ElementType::f16,
|
||||
ElementType::i32,
|
||||
const std::vector<ov::element::Type> model_types = {
|
||||
ov::element::f16,
|
||||
ov::element::i32,
|
||||
};
|
||||
|
||||
std::vector<std::tuple<bool, int>> flatOrAxis{{true, 0}, {false, 0}, {false, 1}, {false, -1}};
|
||||
std::vector<std::tuple<bool, int>> flat_or_axis{{true, 0}, {false, 0}, {false, 1}, {false, -1}};
|
||||
|
||||
std::vector<bool> sorted{true, false};
|
||||
|
||||
@ -145,9 +143,9 @@ std::vector<std::vector<InputShape>> getStaticShapes() {
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_static,
|
||||
UniqueLayerDynamicGPUTest,
|
||||
::testing::Combine(::testing::ValuesIn(getStaticShapes()),
|
||||
::testing::ValuesIn(flatOrAxis),
|
||||
::testing::ValuesIn(flat_or_axis),
|
||||
::testing::ValuesIn(sorted),
|
||||
::testing::ValuesIn(dataPrecision)),
|
||||
::testing::ValuesIn(model_types)),
|
||||
UniqueLayerDynamicGPUTest::getTestCaseName);
|
||||
|
||||
std::vector<std::vector<InputShape>> getDynamicShapes() {
|
||||
@ -162,10 +160,9 @@ std::vector<std::vector<InputShape>> getDynamicShapes() {
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_dynamic,
|
||||
UniqueLayerDynamicGPUTest,
|
||||
::testing::Combine(::testing::ValuesIn(getDynamicShapes()),
|
||||
::testing::ValuesIn(flatOrAxis),
|
||||
::testing::ValuesIn(flat_or_axis),
|
||||
::testing::ValuesIn(sorted),
|
||||
::testing::ValuesIn(dataPrecision)),
|
||||
::testing::ValuesIn(model_types)),
|
||||
UniqueLayerDynamicGPUTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
} // namespace GPULayerTestsDefinitions
|
||||
|
Loading…
Reference in New Issue
Block a user