Merge branch 'master' into create_ov_runtime

This commit is contained in:
Ilya Churaev 2021-12-16 07:29:35 +03:00
commit 981e04fe09
68 changed files with 1989 additions and 883 deletions

View File

@ -38,17 +38,10 @@ target_include_directories(interpreter_backend PUBLIC $<BUILD_INTERFACE:${CMAKE_
file(GLOB_RECURSE all_backends_src "${CMAKE_CURRENT_SOURCE_DIR}/*.cpp" "${CMAKE_CURRENT_SOURCE_DIR}/*.hpp")
add_clang_format_target(interpreter_backend_clang FOR_SOURCES ${all_backends_src})
# developer package
openvino_developer_export_targets(COMPONENT core TARGETS interpreter_backend)
install(TARGETS interpreter_backend
RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT tests OPTIONAL EXCLUDE_FROM_ALL
ARCHIVE DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT tests OPTIONAL EXCLUDE_FROM_ALL
LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT tests OPTIONAL EXCLUDE_FROM_ALL)
if(NOT BUILD_SHARED_LIBS)
install(TARGETS interpreter_backend
RUNTIME DESTINATION tests COMPONENT tests OPTIONAL EXCLUDE_FROM_ALL
ARCHIVE DESTINATION tests COMPONENT tests OPTIONAL EXCLUDE_FROM_ALL
LIBRARY DESTINATION tests COMPONENT tests OPTIONAL EXCLUDE_FROM_ALL)
endif()
# install
ov_install_static_lib(interpreter_backend template)

View File

@ -37,4 +37,3 @@ set_target_properties(${TARGET_NAME} PROPERTIES INTERPROCEDURAL_OPTIMIZATION_REL
# ie_register_plugins(MAIN_TARGET ${TARGET_NAME}
# POSSIBLE_PLUGINS ${TARGET_NAME})
# [cmake:plugin]
ov_install_static_lib(interpreter_backend tests)

View File

@ -0,0 +1,182 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include "openvino/opsets/opset7.hpp"
#include "openvino/opsets/opset1.hpp"
#include "base_reference_test.hpp"
using namespace reference_tests;
using namespace ov;
namespace {
struct EinsumParams {
std::vector<Tensor> inputs;
std::string equation;
Tensor expectedResult;
std::string testcaseName;
};
struct Builder : ParamsBuilder<EinsumParams> {
REFERENCE_TESTS_ADD_SET_PARAM(Builder, inputs);
REFERENCE_TESTS_ADD_SET_PARAM(Builder, equation);
REFERENCE_TESTS_ADD_SET_PARAM(Builder, expectedResult);
REFERENCE_TESTS_ADD_SET_PARAM(Builder, testcaseName);
};
class ReferenceEinsumTest : public testing::TestWithParam<EinsumParams>, public CommonReferenceTest {
public:
void SetUp() override {
auto params = GetParam();
function = CreateModel(params);
for (const auto& input_tensor : params.inputs) {
inputData.push_back(input_tensor.data);
}
refOutData = {params.expectedResult.data};
}
static std::string getTestCaseName(const testing::TestParamInfo<EinsumParams>& obj) {
auto param = obj.param;
std::ostringstream result;
result << "iType=" << param.inputs[0].type;
result << "_iShape=" << param.inputs[0].shape;
result << "_equation=" << param.equation;
result << "_eType=" << param.expectedResult.type;
result << "_eShape=" << param.expectedResult.shape;
if (param.testcaseName != "") {
result << "_=" << param.testcaseName;
}
return result.str();
}
private:
static std::shared_ptr<Model> CreateModel(const EinsumParams& params) {
OutputVector output_vector;
ParameterVector param_vector;
for (const auto& input_tensor : params.inputs) {
auto param = std::make_shared<opset1::Parameter>(input_tensor.type, input_tensor.shape);
output_vector.push_back(param);
param_vector.push_back(param);
}
const auto einsum = std::make_shared<opset7::Einsum>(output_vector, params.equation);
const auto f = std::make_shared<Model>(OutputVector{einsum}, param_vector);
return f;
}
};
TEST_P(ReferenceEinsumTest, CompareWithRefs) {
Exec();
}
template <element::Type_t ET>
std::vector<EinsumParams> generateParams() {
using T = typename element_type_traits<ET>::value_type;
std::vector<EinsumParams> params {
Builder {}
.inputs({{ET, {1, 2}, std::vector<T>{1, 2}},
{ET, {3, 4}, std::vector<T>{3, 4, 5, 6,
7, 8, 9, 10,
11, 12, 13, 14}}})
.equation("ab,cd->abcd")
.expectedResult({ET, {1, 2, 3, 4}, std::vector<T>{3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 6, 8, 10, 12,
14, 16, 18, 20, 22, 24, 26, 28}})
.testcaseName("einsum_no_reduction"),
Builder {}
.inputs({{ET, {1, 2, 3}, std::vector<T>{1, 2, 3, 4, 5, 6}}})
.equation("ijk->kij")
.expectedResult({ET, {3, 1, 2}, std::vector<T>{1, 4, 2, 5, 3, 6}})
.testcaseName("einsum_transpose"),
Builder {}
.inputs({{ET, {2, 3}, std::vector<T>{1, 2, 3, 4, 5, 6}}})
.equation("ab->a")
.expectedResult({ET, {2}, std::vector<T>{6, 15}})
.testcaseName("einsum_reduce"),
Builder {}
.inputs({{ET, {2, 3}, std::vector<T>{1, 2, 3, 4, 5, 6}},
{ET, {3, 2}, std::vector<T>{1, 2, 3, 4, 5, 6}}})
.equation("ab,bc->ac")
.expectedResult({ET, {2, 2}, std::vector<T>{22, 28, 49, 64}})
.testcaseName("einsum_matrix_multiplication"),
Builder {}
.inputs({{ET, {2, 4}, std::vector<T>{1, 3, 2, 7, 5, 6, 0, 1}},
{ET, {4, 3, 1}, std::vector<T>{1, 2, 3, 4, 5, 6, 5, 7, 3, 7, 9, 1}},
{ET, {4, 3}, std::vector<T>{4, 3, 1, 6, 4, 2, 2, 5, 3, 1, 9, 4}}})
.equation("ab,bcd,bc->ca")
.expectedResult({ET, {3, 2}, std::vector<T>{145, 171, 703, 231, 85, 91}})
.testcaseName("einsum_multiple_multiplication"),
Builder {}
.inputs({{ET, {2, 2, 3}, std::vector<T>{1, 3, 2, 7, 5, 6, 3, 5, 2, 1, 0, 7}}})
.equation("a...->...")
.expectedResult({ET, {2, 3}, std::vector<T>{4, 8, 4, 8, 5, 13}})
.testcaseName("einsum_ellipsis_one_input_reduction"),
Builder {}
.inputs({{ET, {2, 2, 3}, std::vector<T>{1, 3, 2, 7, 5, 6, 3, 5, 2, 1, 0, 7}}})
.equation("a...->...a")
.expectedResult({ET, {2, 3, 2}, std::vector<T>{1, 3, 3, 5, 2, 2, 7, 1, 5, 0, 6, 7}})
.testcaseName("einsum_ellipsis_one_input_transpose"),
Builder {}
.inputs({{ET, {2, 2, 3}, std::vector<T>{1, 3, 2, 7, 5, 6, 3, 5, 2, 1, 0, 7}},
{ET, {1}, std::vector<T>{2}}})
.equation("ab...,...->ab...")
.expectedResult({ET, {2, 2, 3}, std::vector<T>{2, 6, 4, 14, 10, 12, 6, 10, 4, 2, 0, 14}})
.testcaseName("einsum_ellipsis_mul_by_1dscalar"),
Builder {}
.inputs({{ET, {1, 1, 4, 3}, std::vector<T>{1, 3, 2, 7, 5, 6, 3, 5, 2, 1, 0, 7}},
{ET, {3, 4, 2, 1}, std::vector<T>{3, 1, 6, 2, 3, 10, 9, 8, 2, 9, 3, 2,
4, 2, 3, 1, 9, 1, 11, 4, 7, 2, 3, 1}}})
.equation("a...j,j...->a...")
.expectedResult({ET, {1, 4, 2, 4}, std::vector<T>{27, 85, 37, 66, 30, 58, 50, 8,
37, 123, 55, 83, 16, 48, 24, 30,
29, 83, 43, 52, 20, 92, 44, 24,
24, 96, 48, 30, 13, 67, 31, 15}})
.testcaseName("einsum_ellipsis_complex_mul"),
Builder {}
.inputs({{ET, {1, 3, 3}, std::vector<T>{1, 2, 3, 4, 5, 6, 7, 8, 9}}})
.equation("kii->ki")
.expectedResult({ET, {1, 3}, std::vector<T>{1, 5, 9}})
.testcaseName("einsum_diagonal"),
Builder {}
.inputs({{ET, {2, 3, 3, 2, 4}, std::vector<T>{4, 2, 5, 4, 5, 5, 1, 1, 3, 3, 1, 1, 2, 2, 4, 1, 3, 4,
4, 5, 1, 3, 1, 3, 1, 4, 3, 5, 4, 4, 5, 4, 4, 5, 4, 2,
2, 2, 3, 3, 1, 1, 4, 3, 4, 2, 2, 1, 1, 2, 3, 1, 1, 4,
2, 3, 1, 3, 4, 2, 5, 5, 3, 4, 3, 4, 5, 4, 4, 5, 1, 3,
4, 4, 5, 3, 1, 3, 2, 5, 3, 2, 5, 4, 4, 2, 4, 4, 1, 4,
4, 5, 4, 4, 4, 2, 3, 3, 4, 2, 4, 2, 5, 1, 3, 2, 4, 3,
5, 1, 2, 3, 1, 1, 2, 5, 1, 1, 2, 1, 4, 5, 3, 4, 1, 3,
3, 1, 3, 2, 4, 5, 1, 1, 5, 4, 5, 2, 2, 3, 3, 1, 2, 4}},
{ET, {3, 2, 1}, std::vector<T>{1, 4, 4, 5, 3, 3}}})
.equation("abbac,bad->ad")
.expectedResult({ET, {2, 1}, std::vector<T>{123, 129}})
.testcaseName("einsum_diagonal_with_matmul"),
};
return params;
}
std::vector<EinsumParams> generateCombinedParams() {
const std::vector<std::vector<EinsumParams>> generatedParams {
generateParams<element::Type_t::i32>(),
generateParams<element::Type_t::f32>(),
};
std::vector<EinsumParams> combinedParams;
for (const auto& params : generatedParams) {
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
}
return combinedParams;
}
INSTANTIATE_TEST_SUITE_P(smoke_Einsum_With_Hardcoded_Refs, ReferenceEinsumTest,
testing::ValuesIn(generateCombinedParams()), ReferenceEinsumTest::getTestCaseName);
} // namespace

View File

@ -0,0 +1,246 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include "openvino/opsets/opset3.hpp"
#include "openvino/opsets/opset1.hpp"
#include "base_reference_test.hpp"
using namespace reference_tests;
using namespace ov;
namespace {
struct ExtractImagePatchesParams {
Tensor data;
Shape sizes;
Strides strides;
Shape rates;
op::PadType autoPad;
Tensor expectedResult;
std::string testcaseName;
};
struct Builder : ParamsBuilder<ExtractImagePatchesParams> {
REFERENCE_TESTS_ADD_SET_PARAM(Builder, data);
REFERENCE_TESTS_ADD_SET_PARAM(Builder, sizes);
REFERENCE_TESTS_ADD_SET_PARAM(Builder, strides);
REFERENCE_TESTS_ADD_SET_PARAM(Builder, rates);
REFERENCE_TESTS_ADD_SET_PARAM(Builder, autoPad);
REFERENCE_TESTS_ADD_SET_PARAM(Builder, expectedResult);
REFERENCE_TESTS_ADD_SET_PARAM(Builder, testcaseName);
};
class ReferenceExtractImagePatchesTest : public testing::TestWithParam<ExtractImagePatchesParams>, public CommonReferenceTest {
public:
void SetUp() override {
auto params = GetParam();
function = CreateModel(params);
inputData = {params.data.data};
refOutData = {params.expectedResult.data};
}
static std::string getTestCaseName(const testing::TestParamInfo<ExtractImagePatchesParams>& obj) {
auto param = obj.param;
std::ostringstream result;
result << "dType=" << param.data.type;
result << "_dShape=" << param.data.shape;
result << "_sizes=" << param.sizes;
result << "_strides=" << param.strides;
result << "_rates=" << param.rates;
result << "_autoPad=" << param.autoPad;
result << "_eType=" << param.expectedResult.type;
result << "_eShape=" << param.expectedResult.shape;
if (param.testcaseName != "") {
result << "_=" << param.testcaseName;
}
return result.str();
}
private:
static std::shared_ptr<Model> CreateModel(const ExtractImagePatchesParams& params) {
const auto data = std::make_shared<opset1::Parameter>(params.data.type, params.data.shape);
const auto extrace_image_patches = std::make_shared<opset3::ExtractImagePatches>(data,
params.sizes,
params.strides,
params.rates,
params.autoPad);
const auto f = std::make_shared<Model>(extrace_image_patches, ParameterVector{data});
return f;
}
};
TEST_P(ReferenceExtractImagePatchesTest, CompareWithRefs) {
Exec();
}
template <element::Type_t ET>
std::vector<ExtractImagePatchesParams> generateParams() {
using T = typename element_type_traits<ET>::value_type;
std::vector<ExtractImagePatchesParams> params {
Builder {}
.data({ET, {1, 1, 10, 10}, std::vector<T>{
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
91, 92, 93, 94, 95, 96, 97, 98, 99, 100}})
.sizes({3, 3})
.strides({5, 5})
.rates({1, 1})
.autoPad(op::PadType::VALID)
.expectedResult({ET, {1, 9, 2, 2}, std::vector<T>{
1, 6, 51, 56,
2, 7, 52, 57,
3, 8, 53, 58,
11, 16, 61, 66,
12, 17, 62, 67,
13, 18, 63, 68,
21, 26, 71, 76,
22, 27, 72, 77,
23, 28, 73, 78}}),
Builder {}
.data({ET, {1, 1, 10, 10}, std::vector<T>{
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
91, 92, 93, 94, 95, 96, 97, 98, 99, 100}})
.sizes({4, 4})
.strides({8, 8})
.rates({1, 1})
.autoPad(op::PadType::VALID)
.expectedResult({ET, {1, 16, 1, 1}, std::vector<T>{
1, 2, 3, 4,
11, 12, 13, 14,
21, 22, 23, 24,
31, 32, 33, 34}}),
Builder {}
.data({ET, {1, 1, 10, 10}, std::vector<T>{
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
91, 92, 93, 94, 95, 96, 97, 98, 99, 100}})
.sizes({4, 4})
.strides({9, 9})
.rates({1, 1})
.autoPad(op::PadType::SAME_UPPER)
.expectedResult({ET, {1, 16, 2, 2}, std::vector<T>{
0, 0, 0, 89,
0, 0, 81, 90,
0, 0, 82, 0,
0, 0, 83, 0,
0, 9, 0, 99,
1, 10, 91, 100,
2, 0, 92, 0,
3, 0, 93, 0,
0, 19, 0, 0,
11, 20, 0, 0,
12, 0, 0, 0,
13, 0, 0, 0,
0, 29, 0, 0,
21, 30, 0, 0,
22, 0, 0, 0,
23, 0, 0, 0}}),
Builder {}
.data({ET, {1, 1, 10, 10}, std::vector<T>{
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
91, 92, 93, 94, 95, 96, 97, 98, 99, 100}})
.sizes({3, 3})
.strides({5, 5})
.rates({2, 2})
.autoPad(op::PadType::VALID)
.expectedResult({ET, {1, 9, 2, 2}, std::vector<T>{
1, 6, 51, 56,
3, 8, 53, 58,
5, 10, 55, 60,
21, 26, 71, 76,
23, 28, 73, 78,
25, 30, 75, 80,
41, 46, 91, 96,
43, 48, 93, 98,
45, 50, 95, 100}}),
Builder {}
.data({ET, {1, 2, 5, 5}, std::vector<T>{
1, 2, 3, 4, 5,
6, 7, 8, 9, 10,
11, 12, 13, 14, 15,
16, 17, 18, 19, 20,
21, 22, 23, 24, 25,
26, 27, 28, 29, 30,
31, 32, 33, 34, 35,
36, 37, 38, 39, 40,
41, 42, 43, 44, 45,
46, 47, 48, 49, 50}})
.sizes({2, 2})
.strides({3, 3})
.rates({1, 1})
.autoPad(op::PadType::VALID)
.expectedResult({ET, {1, 8, 2, 2}, std::vector<T>{
1, 4, 16, 19,
26, 29, 41, 44,
2, 5, 17, 20,
27, 30, 42, 45,
6, 9, 21, 24,
31, 34, 46, 49,
7, 10, 22, 25,
32, 35, 47, 50}}),
};
return params;
}
std::vector<ExtractImagePatchesParams> generateCombinedParams() {
const std::vector<std::vector<ExtractImagePatchesParams>> generatedParams {
generateParams<element::Type_t::i8>(),
generateParams<element::Type_t::i16>(),
generateParams<element::Type_t::i32>(),
generateParams<element::Type_t::i64>(),
generateParams<element::Type_t::u8>(),
generateParams<element::Type_t::u16>(),
generateParams<element::Type_t::u32>(),
generateParams<element::Type_t::u64>(),
generateParams<element::Type_t::bf16>(),
generateParams<element::Type_t::f16>(),
generateParams<element::Type_t::f32>(),
generateParams<element::Type_t::f64>(),
};
std::vector<ExtractImagePatchesParams> combinedParams;
for (const auto& params : generatedParams) {
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
}
return combinedParams;
}
INSTANTIATE_TEST_SUITE_P(smoke_ExtractImagePatches_With_Hardcoded_Refs, ReferenceExtractImagePatchesTest,
testing::ValuesIn(generateCombinedParams()), ReferenceExtractImagePatchesTest::getTestCaseName);
} // namespace

View File

@ -4,8 +4,8 @@
#include <gtest/gtest.h>
#include "openvino/op/topk.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/opsets/opset3.hpp"
#include "openvino/opsets/opset1.hpp"
#include "base_reference_test.hpp"
using namespace reference_tests;
@ -15,7 +15,7 @@ namespace {
struct TopKParams {
TopKParams(
const Tensor& A, const Tensor& k, const int64_t axis,
const op::v1::TopK::Mode mode, const op::v1::TopK::SortType sort,
const opset1::TopK::Mode mode, const opset1::TopK::SortType sort,
const Tensor& result0, const Tensor& result1, const size_t outIdx,
const std::string& testcaseName = "") :
A(A), k(k), axis(axis), mode(mode), sort(sort),
@ -25,8 +25,8 @@ struct TopKParams {
Tensor A;
Tensor k;
int64_t axis;
op::v1::TopK::Mode mode;
op::v1::TopK::SortType sort;
opset1::TopK::Mode mode;
opset1::TopK::SortType sort;
Tensor result0;
Tensor result1;
size_t outIdx;
@ -71,7 +71,6 @@ struct TopKParamsResnet50 {
std::string testcaseName;
};
class ReferenceTopKTestResnet50 : public testing::TestWithParam<TopKParamsResnet50>, public CommonReferenceTest {
public:
void SetUp() override {
@ -101,18 +100,18 @@ public:
private:
static std::shared_ptr<Model> CreateFunction(const TopKParamsResnet50& params) {
const auto A = std::make_shared<op::v0::Parameter>(params.A.type,
const auto A = std::make_shared<opset1::Parameter>(params.A.type,
params.A.shape);
const auto B = std::make_shared<op::v1::TopK>(A,
op::v0::Constant::create(element::i64, {}, {5}),
const auto B = std::make_shared<opset1::TopK>(A,
opset1::Constant::create(element::i64, {}, {5}),
1,
op::v1::TopK::Mode::MAX,
op::v1::TopK::SortType::SORT_VALUES);
const auto C = std::make_shared<op::v1::TopK>(A,
op::v0::Constant::create(element::i64, {}, {1}),
opset1::TopK::Mode::MAX,
opset1::TopK::SortType::SORT_VALUES);
const auto C = std::make_shared<opset1::TopK>(A,
opset1::Constant::create(element::i64, {}, {1}),
1,
op::v1::TopK::Mode::MAX,
op::v1::TopK::SortType::SORT_VALUES);
opset1::TopK::Mode::MAX,
opset1::TopK::SortType::SORT_VALUES);
const auto out5_value = B->output(0);
const auto out5_index = B->output(1);
@ -220,12 +219,12 @@ public:
private:
static std::shared_ptr<Model> CreateFunction(const TopKParams& params) {
const auto A = std::make_shared<op::v0::Parameter>(params.A.type,
const auto A = std::make_shared<opset1::Parameter>(params.A.type,
params.A.shape);
const auto k = op::v0::Constant::create(params.k.type,
const auto k = opset1::Constant::create(params.k.type,
params.k.shape,
params.k.data.data());
const auto B = std::make_shared<op::v1::TopK>(A, k, params.axis, params.mode, params.sort);
const auto B = std::make_shared<opset1::TopK>(A, k, params.axis, params.mode, params.sort);
const auto f = std::make_shared<Model>(B->outputs(), ParameterVector{A});
return f;
}
@ -253,8 +252,8 @@ std::vector<TopKParams> generateParamsMaxMinSort() {
}({128, 1000})),
Tensor(ET2, {}, std::vector<T2>{5}),
1,
op::v1::TopK::Mode::MAX,
op::v1::TopK::SortType::NONE,
opset1::TopK::Mode::MAX,
opset1::TopK::SortType::NONE,
Tensor(ET, {128, 5}, [](std::vector<size_t> rshape, std::vector<size_t> shape) -> std::vector<T>{
std::vector<T> expected_value;
for (size_t i = 0; i < rshape[0]; i++) {
@ -292,8 +291,8 @@ std::vector<TopKParams> generateParamsMaxMinSort() {
}({128, 1000})),
Tensor(ET2, {}, std::vector<T2>{5}),
1,
op::v1::TopK::Mode::MIN,
op::v1::TopK::SortType::NONE,
opset1::TopK::Mode::MIN,
opset1::TopK::SortType::NONE,
Tensor(ET, {128, 5}, [](std::vector<size_t> rshape) -> std::vector<T>{
std::vector<T> expected_value;
for (size_t i = 0; i < rshape[0]; i++) {
@ -331,8 +330,8 @@ std::vector<TopKParams> generateParamsMaxMinSort() {
}({128, 1000})),
Tensor(ET2, {}, std::vector<T2>{5}),
1,
op::v1::TopK::Mode::MAX,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MAX,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {128, 5}, [](std::vector<size_t> rshape, std::vector<size_t> shape) -> std::vector<T>{
std::vector<T> expected_value;
for (size_t i = 0; i < rshape[0]; i++) {
@ -366,8 +365,8 @@ std::vector<TopKParams> generateParamsMaxMinSort() {
}({128, 1000})),
Tensor(ET2, {}, std::vector<T2>{5}),
1,
op::v1::TopK::Mode::MIN,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MIN,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {128, 5}, [](std::vector<size_t> rshape) -> std::vector<T>{
std::vector<T> expected_value;
for (size_t i = 0; i < rshape[0]; i++) {
@ -401,8 +400,8 @@ std::vector<TopKParams> generateParamsMaxMinSort() {
}({128, 1000})),
Tensor(ET2, {}, std::vector<T2>{5}),
1,
op::v1::TopK::Mode::MAX,
op::v1::TopK::SortType::SORT_INDICES,
opset1::TopK::Mode::MAX,
opset1::TopK::SortType::SORT_INDICES,
Tensor(ET, {128, 5}, [](std::vector<size_t> rshape, std::vector<size_t> shape) -> std::vector<T>{
std::vector<T> expected_value;
for (size_t i = 0; i < rshape[0]; i++) {
@ -440,8 +439,8 @@ std::vector<TopKParams> generateParamsMaxMinSort() {
}({128, 1000})),
Tensor(ET2, {}, std::vector<T2>{5}),
1,
op::v1::TopK::Mode::MIN,
op::v1::TopK::SortType::SORT_INDICES,
opset1::TopK::Mode::MIN,
opset1::TopK::SortType::SORT_INDICES,
Tensor(ET, {128, 5}, [](std::vector<size_t> rshape) -> std::vector<T>{
std::vector<T> expected_value;
for (size_t i = 0; i < rshape[0]; i++) {
@ -467,8 +466,8 @@ std::vector<TopKParams> generateParamsMaxMinSort() {
Tensor(ET, {5}, std::vector<T>{3, 1, 2, 5, 4}),
Tensor(ET2, {}, std::vector<T2>{3}),
0,
op::v1::TopK::Mode::MAX,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MAX,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {3}, std::vector<T>{5, 4, 3}),
Tensor(ET_OUT, {3}, std::vector<T_OUT>{3, 4, 0}),
0,
@ -478,8 +477,8 @@ std::vector<TopKParams> generateParamsMaxMinSort() {
Tensor(ET, {5}, std::vector<T>{3, 1, 2, 5, 4}),
Tensor(ET2, {}, std::vector<T2>{3}),
0,
op::v1::TopK::Mode::MAX,
op::v1::TopK::SortType::SORT_INDICES,
opset1::TopK::Mode::MAX,
opset1::TopK::SortType::SORT_INDICES,
Tensor(ET, {3}, std::vector<T>{3, 5, 4}),
Tensor(ET_OUT, {3}, std::vector<T_OUT>{0, 3, 4}),
0,
@ -489,8 +488,8 @@ std::vector<TopKParams> generateParamsMaxMinSort() {
Tensor(ET, {5}, std::vector<T>{3, 1, 2, 5, 4}),
Tensor(ET2, {}, std::vector<T2>{3}),
0,
op::v1::TopK::Mode::MIN,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MIN,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {3}, std::vector<T>{1, 2, 3}),
Tensor(ET_OUT, {3}, std::vector<T_OUT>{1, 2, 0}),
0,
@ -500,8 +499,8 @@ std::vector<TopKParams> generateParamsMaxMinSort() {
Tensor(ET, {5}, std::vector<T>{3, 1, 2, 5, 4}),
Tensor(ET2, {}, std::vector<T2>{3}),
0,
op::v1::TopK::Mode::MIN,
op::v1::TopK::SortType::SORT_INDICES,
opset1::TopK::Mode::MIN,
opset1::TopK::SortType::SORT_INDICES,
Tensor(ET, {3}, std::vector<T>{3, 1, 2}),
Tensor(ET_OUT, {3}, std::vector<T_OUT>{0, 1, 2}),
0,
@ -536,7 +535,7 @@ std::vector<TopKParams> generateCombinedParamsMaxMinSort() {
INSTANTIATE_TEST_SUITE_P(smoke_TopK_With_Hardcoded_Refs, ReferenceTopKTestMaxMinSort,
testing::ValuesIn(generateCombinedParamsMaxMinSort()), ReferenceTopKTest::getTestCaseName);
class ReferenceTopKTestV3 : public ReferenceTopKTest {
class ReferenceTopKTestBackend : public ReferenceTopKTest {
public:
void SetUp() override {
auto params = GetParam();
@ -547,18 +546,18 @@ public:
private:
static std::shared_ptr<Model> CreateFunction(const TopKParams& params) {
const auto A = std::make_shared<op::v0::Parameter>(params.A.type,
const auto A = std::make_shared<opset1::Parameter>(params.A.type,
params.A.shape);
const auto k = op::v0::Constant::create(params.k.type,
const auto k = opset1::Constant::create(params.k.type,
params.k.shape,
params.k.data.data());
const auto B = std::make_shared<op::v3::TopK>(A, k, params.axis, params.mode, params.sort);
const auto B = std::make_shared<opset1::TopK>(A, k, params.axis, params.mode, params.sort);
const auto f = std::make_shared<Model>(B->outputs(), ParameterVector{A});
return f;
}
};
TEST_P(ReferenceTopKTestV3, CompareWithRefs) {
TEST_P(ReferenceTopKTestBackend, CompareWithRefs) {
Exec();
}
@ -572,8 +571,8 @@ std::vector<TopKParams> generateParamsV3() {
Tensor(ET, {5}, std::vector<T>{3, 1, 2, 5, 4}),
Tensor(ET2, {}, std::vector<T2>{3}),
0,
op::v1::TopK::Mode::MAX,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MAX,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {3}, std::vector<T>{5, 4, 3}),
Tensor(ET_OUT, {3}, std::vector<T_OUT>{3, 4, 0}),
0,
@ -583,8 +582,8 @@ std::vector<TopKParams> generateParamsV3() {
Tensor(ET, {5}, std::vector<T>{3, 1, 2, 5, 4}),
Tensor(ET2, {}, std::vector<T2>{3}),
0,
op::v1::TopK::Mode::MAX,
op::v1::TopK::SortType::SORT_INDICES,
opset1::TopK::Mode::MAX,
opset1::TopK::SortType::SORT_INDICES,
Tensor(ET, {3}, std::vector<T>{3, 5, 4}),
Tensor(ET_OUT, {3}, std::vector<T_OUT>{0, 3, 4}),
0,
@ -594,8 +593,8 @@ std::vector<TopKParams> generateParamsV3() {
Tensor(ET, {5}, std::vector<T>{3, 1, 2, 5, 4}),
Tensor(ET2, {}, std::vector<T2>{3}),
0,
op::v1::TopK::Mode::MIN,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MIN,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {3}, std::vector<T>{1, 2, 3}),
Tensor(ET_OUT, {3}, std::vector<T_OUT>{1, 2, 0}),
0,
@ -605,8 +604,8 @@ std::vector<TopKParams> generateParamsV3() {
Tensor(ET, {5}, std::vector<T>{3, 1, 2, 5, 4}),
Tensor(ET2, {}, std::vector<T2>{3}),
0,
op::v1::TopK::Mode::MIN,
op::v1::TopK::SortType::SORT_INDICES,
opset1::TopK::Mode::MIN,
opset1::TopK::SortType::SORT_INDICES,
Tensor(ET, {3}, std::vector<T>{3, 1, 2}),
Tensor(ET_OUT, {3}, std::vector<T_OUT>{0, 1, 2}),
0,
@ -615,7 +614,7 @@ std::vector<TopKParams> generateParamsV3() {
return params;
}
std::vector<TopKParams> generateCombinedParamsV3() {
std::vector<TopKParams> generateCombinedParamsBackend() {
const std::vector<std::vector<TopKParams>> generatedParams {
generateParamsMaxMinSort<element::Type_t::i8, element::Type_t::i64, element::Type_t::i32>(),
generateParamsMaxMinSort<element::Type_t::i16, element::Type_t::i64, element::Type_t::i32>(),
@ -638,8 +637,8 @@ std::vector<TopKParams> generateCombinedParamsV3() {
return combinedParams;
}
INSTANTIATE_TEST_SUITE_P(smoke_TopK_With_Hardcoded_Refs, ReferenceTopKTestV3,
testing::ValuesIn(generateCombinedParamsV3()), ReferenceTopKTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_TopK_With_Hardcoded_Refs, ReferenceTopKTestBackend,
testing::ValuesIn(generateCombinedParamsBackend()), ReferenceTopKTest::getTestCaseName);
class ReferenceTopKTest1dMaxMin : public ReferenceTopKTest {
public:
@ -673,12 +672,12 @@ public:
private:
static std::shared_ptr<Model> CreateFunction(const TopKParams& params, size_t out_idx) {
const auto A = std::make_shared<op::v0::Parameter>(params.A.type,
const auto A = std::make_shared<opset1::Parameter>(params.A.type,
params.A.shape);
const auto k = op::v0::Constant::create(params.k.type,
const auto k = opset1::Constant::create(params.k.type,
params.k.shape,
params.k.data.data());
const auto B = std::make_shared<op::v1::TopK>(A, k, params.axis, params.mode, params.sort);
const auto B = std::make_shared<opset1::TopK>(A, k, params.axis, params.mode, params.sort);
const auto f = std::make_shared<Model>(OutputVector{B->output(out_idx)}, ParameterVector{A});
return f;
}
@ -698,8 +697,8 @@ std::vector<TopKParams> generateParams1dMaxMin() {
Tensor(ET, {6}, std::vector<T>{1, 2, 3, 4, 5, 6}),
Tensor(ET2, {}, std::vector<T2>{6}),
0,
op::v1::TopK::Mode::MAX,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MAX,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {6}, std::vector<T>{6, 5, 4, 3, 2, 1}),
Tensor(ET_OUT, {6}, std::vector<T_OUT>{5, 4, 3, 2, 1, 0}),
0,
@ -709,8 +708,8 @@ std::vector<TopKParams> generateParams1dMaxMin() {
Tensor(ET, {6}, std::vector<T>{1, 2, 3, 4, 5, 6}),
Tensor(ET2, {}, std::vector<T2>{6}),
0,
op::v1::TopK::Mode::MAX,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MAX,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {6}, std::vector<T>{6, 5, 4, 3, 2, 1}),
Tensor(ET_OUT, {6}, std::vector<T_OUT>{5, 4, 3, 2, 1, 0}),
1,
@ -720,8 +719,8 @@ std::vector<TopKParams> generateParams1dMaxMin() {
Tensor(ET, {6}, std::vector<T>{1, 2, 3, 4, 5, 6}),
Tensor(ET2, {}, std::vector<T2>{3}),
0,
op::v1::TopK::Mode::MAX,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MAX,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {3}, std::vector<T>{6, 5, 4}),
Tensor(ET_OUT, {3}, std::vector<T_OUT>{5, 4, 3}),
0,
@ -731,8 +730,8 @@ std::vector<TopKParams> generateParams1dMaxMin() {
Tensor(ET, {6}, std::vector<T>{1, 2, 3, 4, 5, 6}),
Tensor(ET2, {}, std::vector<T2>{3}),
0,
op::v1::TopK::Mode::MAX,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MAX,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {3}, std::vector<T>{6, 5, 4}),
Tensor(ET_OUT, {3}, std::vector<T_OUT>{5, 4, 3}),
1,
@ -742,8 +741,8 @@ std::vector<TopKParams> generateParams1dMaxMin() {
Tensor(ET, {6}, std::vector<T>{1, 2, 3, 4, 5, 6}),
Tensor(ET2, {}, std::vector<T2>{1}),
0,
op::v1::TopK::Mode::MAX,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MAX,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {1}, std::vector<T>{6}),
Tensor(ET_OUT, {1}, std::vector<T_OUT>{5}),
0,
@ -753,8 +752,8 @@ std::vector<TopKParams> generateParams1dMaxMin() {
Tensor(ET, {6}, std::vector<T>{1, 2, 3, 4, 5, 6}),
Tensor(ET2, {}, std::vector<T2>{1}),
0,
op::v1::TopK::Mode::MAX,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MAX,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {1}, std::vector<T>{6}),
Tensor(ET_OUT, {1}, std::vector<T_OUT>{5}),
1,
@ -764,8 +763,8 @@ std::vector<TopKParams> generateParams1dMaxMin() {
Tensor(ET, {6}, std::vector<T>{6, 5, 4, 3, 2, 1}),
Tensor(ET2, {}, std::vector<T2>{6}),
0,
op::v1::TopK::Mode::MIN,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MIN,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {6}, std::vector<T>{1, 2, 3, 4, 5, 6}),
Tensor(ET_OUT, {6}, std::vector<T_OUT>{5, 4, 3, 2, 1, 0}),
0,
@ -775,8 +774,8 @@ std::vector<TopKParams> generateParams1dMaxMin() {
Tensor(ET, {6}, std::vector<T>{6, 5, 4, 3, 2, 1}),
Tensor(ET2, {}, std::vector<T2>{6}),
0,
op::v1::TopK::Mode::MIN,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MIN,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {6}, std::vector<T>{1, 2, 3, 4, 5, 6}),
Tensor(ET_OUT, {6}, std::vector<T_OUT>{5, 4, 3, 2, 1, 0}),
1,
@ -786,8 +785,8 @@ std::vector<TopKParams> generateParams1dMaxMin() {
Tensor(ET, {6}, std::vector<T>{6, 5, 4, 3, 2, 1}),
Tensor(ET2, {}, std::vector<T2>{3}),
0,
op::v1::TopK::Mode::MIN,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MIN,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {3}, std::vector<T>{1, 2, 3}),
Tensor(ET_OUT, {3}, std::vector<T_OUT>{5, 4, 3}),
0,
@ -797,8 +796,8 @@ std::vector<TopKParams> generateParams1dMaxMin() {
Tensor(ET, {6}, std::vector<T>{6, 5, 4, 3, 2, 1}),
Tensor(ET2, {}, std::vector<T2>{3}),
0,
op::v1::TopK::Mode::MIN,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MIN,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {3}, std::vector<T>{1, 2, 3}),
Tensor(ET_OUT, {3}, std::vector<T_OUT>{5, 4, 3}),
1,
@ -808,8 +807,8 @@ std::vector<TopKParams> generateParams1dMaxMin() {
Tensor(ET, {6}, std::vector<T>{6, 5, 4, 3, 2, 1}),
Tensor(ET2, {}, std::vector<T2>{1}),
0,
op::v1::TopK::Mode::MIN,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MIN,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {1}, std::vector<T>{1}),
Tensor(ET_OUT, {1}, std::vector<T_OUT>{5}),
0,
@ -819,8 +818,8 @@ std::vector<TopKParams> generateParams1dMaxMin() {
Tensor(ET, {6}, std::vector<T>{6, 5, 4, 3, 2, 1}),
Tensor(ET2, {}, std::vector<T2>{1}),
0,
op::v1::TopK::Mode::MIN,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MIN,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {1}, std::vector<T>{1}),
Tensor(ET_OUT, {1}, std::vector<T_OUT>{5}),
1,
@ -832,8 +831,8 @@ std::vector<TopKParams> generateParams1dMaxMin() {
}),
Tensor(ET2, {}, std::vector<T2>{3}),
1,
op::v1::TopK::Mode::MAX,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MAX,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {2, 3, 2}, std::vector<T>{
10, 12, 9, 4, 8, 2, 11, 7, 6, 3, 5, 1
}),
@ -849,8 +848,8 @@ std::vector<TopKParams> generateParams1dMaxMin() {
}),
Tensor(ET2, {}, std::vector<T2>{3}),
1,
op::v1::TopK::Mode::MAX,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MAX,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {2, 3, 2}, std::vector<T>{
10, 12, 9, 4, 8, 2, 11, 7, 6, 3, 5, 1
}),
@ -882,8 +881,8 @@ std::vector<TopKParams> generateParams1dMaxMin() {
}),
Tensor(ET2, {}, std::vector<T2>{2}),
1,
op::v1::TopK::Mode::MAX,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MAX,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {2, 2, 3, 2, 4}, std::vector<T>{
169, 241, 177, 249, 185, 233, 170, 242, 178, 250, 186, 258, 171, 243, 179, 251,
187, 259, 172, 224, 180, 252, 188, 260, 149, 221, 157, 229, 165, 113, 150, 222,
@ -923,8 +922,8 @@ std::vector<TopKParams> generateParams1dMaxMin() {
}),
Tensor(ET2, {}, std::vector<T2>{2}),
1,
op::v1::TopK::Mode::MAX,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MAX,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {2, 2, 3, 2, 4}, std::vector<T>{
169, 241, 177, 249, 185, 233, 170, 242, 178, 250, 186, 258, 171, 243, 179, 251,
187, 259, 172, 224, 180, 252, 188, 260, 149, 221, 157, 229, 165, 113, 150, 222,
@ -948,8 +947,8 @@ std::vector<TopKParams> generateParams1dMaxMin() {
}),
Tensor(ET2, {}, std::vector<T2>{2}),
1,
op::v1::TopK::Mode::MAX,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MAX,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {2, 2, 2}, std::vector<T>{
10, 12, 9, 4, 11, 7, 6, 3
}),
@ -965,8 +964,8 @@ std::vector<TopKParams> generateParams1dMaxMin() {
}),
Tensor(ET2, {}, std::vector<T2>{2}),
1,
op::v1::TopK::Mode::MAX,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MAX,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {2, 2, 2}, std::vector<T>{
10, 12, 9, 4, 11, 7, 6, 3
}),
@ -982,8 +981,8 @@ std::vector<TopKParams> generateParams1dMaxMin() {
}),
Tensor(ET2, {}, std::vector<T2>{1}),
1,
op::v1::TopK::Mode::MAX,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MAX,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {2, 1, 2}, std::vector<T>{
10, 12, 11, 7
}),
@ -999,8 +998,8 @@ std::vector<TopKParams> generateParams1dMaxMin() {
}),
Tensor(ET2, {}, std::vector<T2>{1}),
1,
op::v1::TopK::Mode::MAX,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MAX,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {2, 1, 2}, std::vector<T>{
10, 12, 11, 7
}),
@ -1016,8 +1015,8 @@ std::vector<TopKParams> generateParams1dMaxMin() {
}),
Tensor(ET2, {}, std::vector<T2>{3}),
1,
op::v1::TopK::Mode::MIN,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MIN,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {2, 3, 2}, std::vector<T>{
8, 2, 10, 4, 12, 9, 5, 1, 6, 3, 11, 7
}),
@ -1033,8 +1032,8 @@ std::vector<TopKParams> generateParams1dMaxMin() {
}),
Tensor(ET2, {}, std::vector<T2>{3}),
1,
op::v1::TopK::Mode::MIN,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MIN,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {2, 3, 2}, std::vector<T>{
8, 2, 10, 4, 12, 9, 5, 1, 6, 3, 11, 7
}),
@ -1050,8 +1049,8 @@ std::vector<TopKParams> generateParams1dMaxMin() {
}),
Tensor(ET2, {}, std::vector<T2>{2}),
1,
op::v1::TopK::Mode::MIN,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MIN,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {2, 2, 2}, std::vector<T>{
8, 2, 10, 4, 5, 1, 6, 3
}),
@ -1067,8 +1066,8 @@ std::vector<TopKParams> generateParams1dMaxMin() {
}),
Tensor(ET2, {}, std::vector<T2>{2}),
1,
op::v1::TopK::Mode::MIN,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MIN,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {2, 2, 2}, std::vector<T>{
8, 2, 10, 4, 5, 1, 6, 3
}),
@ -1084,8 +1083,8 @@ std::vector<TopKParams> generateParams1dMaxMin() {
}),
Tensor(ET2, {}, std::vector<T2>{1}),
1,
op::v1::TopK::Mode::MIN,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MIN,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {2, 1, 2}, std::vector<T>{
8, 2, 5, 1
}),
@ -1101,8 +1100,8 @@ std::vector<TopKParams> generateParams1dMaxMin() {
}),
Tensor(ET2, {}, std::vector<T2>{1}),
1,
op::v1::TopK::Mode::MIN,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MIN,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {2, 1, 2}, std::vector<T>{
8, 2, 5, 1
}),
@ -1118,8 +1117,8 @@ std::vector<TopKParams> generateParams1dMaxMin() {
}),
Tensor(ET2, {}, std::vector<T2>{4}),
0,
op::v1::TopK::Mode::MAX,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MAX,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {4, 3}, std::vector<T>{
12, 11, 10, 9, 8, 7, 6, 2, 5, 3, 1, 4
}),
@ -1135,8 +1134,8 @@ std::vector<TopKParams> generateParams1dMaxMin() {
}),
Tensor(ET2, {}, std::vector<T2>{4}),
0,
op::v1::TopK::Mode::MAX,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MAX,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {4, 3}, std::vector<T>{
12, 11, 10, 9, 8, 7, 6, 2, 5, 3, 1, 4
}),
@ -1152,8 +1151,8 @@ std::vector<TopKParams> generateParams1dMaxMin() {
}),
Tensor(ET2, {}, std::vector<T2>{2}),
0,
op::v1::TopK::Mode::MAX,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MAX,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {2, 3}, std::vector<T>{
12, 11, 10, 9, 8, 7
}),
@ -1169,8 +1168,8 @@ std::vector<TopKParams> generateParams1dMaxMin() {
}),
Tensor(ET2, {}, std::vector<T2>{2}),
0,
op::v1::TopK::Mode::MAX,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MAX,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {2, 3}, std::vector<T>{
12, 11, 10, 9, 8, 7
}),
@ -1186,8 +1185,8 @@ std::vector<TopKParams> generateParams1dMaxMin() {
}),
Tensor(ET2, {}, std::vector<T2>{1}),
0,
op::v1::TopK::Mode::MAX,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MAX,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {1, 3}, std::vector<T>{
12, 11, 10
}),
@ -1203,8 +1202,8 @@ std::vector<TopKParams> generateParams1dMaxMin() {
}),
Tensor(ET2, {}, std::vector<T2>{1}),
0,
op::v1::TopK::Mode::MAX,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MAX,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {1, 3}, std::vector<T>{
12, 11, 10
}),
@ -1220,8 +1219,8 @@ std::vector<TopKParams> generateParams1dMaxMin() {
}),
Tensor(ET2, {}, std::vector<T2>{1}),
1,
op::v1::TopK::Mode::MAX,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MAX,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {2, 1}, std::vector<T>{
4, 3
}),
@ -1237,8 +1236,8 @@ std::vector<TopKParams> generateParams1dMaxMin() {
}),
Tensor(ET2, {}, std::vector<T2>{1}),
1,
op::v1::TopK::Mode::MAX,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MAX,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {2, 1}, std::vector<T>{
4, 3
}),
@ -1254,8 +1253,8 @@ std::vector<TopKParams> generateParams1dMaxMin() {
}),
Tensor(ET2, {}, std::vector<T2>{4}),
0,
op::v1::TopK::Mode::MIN,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MIN,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {4, 3}, std::vector<T>{
3, 1, 4, 6, 2, 5, 9, 8, 7, 12, 11, 10
}),
@ -1271,8 +1270,8 @@ std::vector<TopKParams> generateParams1dMaxMin() {
}),
Tensor(ET2, {}, std::vector<T2>{4}),
0,
op::v1::TopK::Mode::MIN,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MIN,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {4, 3}, std::vector<T>{
3, 1, 4, 6, 2, 5, 9, 8, 7, 12, 11, 10
}),
@ -1288,8 +1287,8 @@ std::vector<TopKParams> generateParams1dMaxMin() {
}),
Tensor(ET2, {}, std::vector<T2>{2}),
0,
op::v1::TopK::Mode::MIN,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MIN,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {2, 3}, std::vector<T>{
3, 1, 4, 6, 2, 5
}),
@ -1305,8 +1304,8 @@ std::vector<TopKParams> generateParams1dMaxMin() {
}),
Tensor(ET2, {}, std::vector<T2>{2}),
0,
op::v1::TopK::Mode::MIN,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MIN,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {2, 3}, std::vector<T>{
3, 1, 4, 6, 2, 5
}),
@ -1322,8 +1321,8 @@ std::vector<TopKParams> generateParams1dMaxMin() {
}),
Tensor(ET2, {}, std::vector<T2>{1}),
0,
op::v1::TopK::Mode::MIN,
op::v1::TopK::SortType::NONE,
opset1::TopK::Mode::MIN,
opset1::TopK::SortType::NONE,
Tensor(ET, {1, 3}, std::vector<T>{
3, 1, 4
}),
@ -1339,8 +1338,8 @@ std::vector<TopKParams> generateParams1dMaxMin() {
}),
Tensor(ET2, {}, std::vector<T2>{1}),
0,
op::v1::TopK::Mode::MIN,
op::v1::TopK::SortType::NONE,
opset1::TopK::Mode::MIN,
opset1::TopK::SortType::NONE,
Tensor(ET, {1, 3}, std::vector<T>{
3, 1, 4
}),
@ -1380,12 +1379,12 @@ INSTANTIATE_TEST_SUITE_P(smoke_TopK_With_Hardcoded_Refs, ReferenceTopKTest1dMaxM
class ReferenceTopKTestInt64 : public ReferenceTopKTest1dMaxMin {
private:
static std::shared_ptr<Model> CreateFunction(const TopKParams& params, size_t out_idx) {
const auto A = std::make_shared<op::v0::Parameter>(params.A.type,
const auto A = std::make_shared<opset1::Parameter>(params.A.type,
params.A.shape);
const auto k = op::v0::Constant::create(params.k.type,
const auto k = opset1::Constant::create(params.k.type,
params.k.shape,
params.k.data.data());
const auto B = std::make_shared<op::v1::TopK>(A,
const auto B = std::make_shared<opset1::TopK>(A,
k,
params.axis,
params.mode,
@ -1412,8 +1411,8 @@ std::vector<TopKParams> generateParamsInt64() {
}),
Tensor(ET2, {}, std::vector<T2>{3}),
1,
op::v1::TopK::Mode::MAX,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MAX,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {2, 3, 2}, std::vector<T>{
10, 12, 9, 4, 8, 2, 11, 7, 6, 3, 5, 1
}),
@ -1428,8 +1427,8 @@ std::vector<TopKParams> generateParamsInt64() {
}),
Tensor(ET2, {}, std::vector<T2>{3}),
1,
op::v1::TopK::Mode::MAX,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MAX,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {2, 3, 2}, std::vector<T>{
10, 12, 9, 4, 8, 2, 11, 7, 6, 3, 5, 1
}),
@ -1468,12 +1467,12 @@ public:
private:
static std::shared_ptr<Model> CreateFunction(const TopKParams& params) {
const auto A = std::make_shared<op::v0::Parameter>(params.A.type,
const auto A = std::make_shared<opset1::Parameter>(params.A.type,
params.A.shape);
const auto k = op::v0::Constant::create(params.k.type,
const auto k = opset1::Constant::create(params.k.type,
params.k.shape,
params.k.data.data());
const auto B = std::make_shared<op::v1::TopK>(A, k, params.axis, params.mode, params.sort);
const auto B = std::make_shared<opset1::TopK>(A, k, params.axis, params.mode, params.sort);
const auto f = std::make_shared<Model>(OutputVector{B->output(1)}, ParameterVector{A});
return f;
}
@ -1493,8 +1492,8 @@ std::vector<TopKParams> generateParamsSingleOutput() {
Tensor(ET, {2, 3, 2}, std::vector<T>{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}),
Tensor(ET2, {}, std::vector<T2>{2}),
1,
op::v1::TopK::Mode::MIN,
op::v1::TopK::SortType::SORT_VALUES,
opset1::TopK::Mode::MIN,
opset1::TopK::SortType::SORT_VALUES,
Tensor(ET, {2, 2, 2}, std::vector<T>{}),
Tensor(ET_OUT, {2, 2, 2}, std::vector<T_OUT>{2, 0, 1, 2, 1, 0, 0, 1}),
0,
@ -1530,19 +1529,181 @@ INSTANTIATE_TEST_SUITE_P(smoke_TopK_With_Hardcoded_Refs, ReferenceTopKTestSingle
testing::ValuesIn(generateCombinedParamsSingleOutput()), ReferenceTopKTest::getTestCaseName);
TEST(ReferenceTopKTestInvalid, topk_v1_invalid_strings) {
const auto data = std::make_shared<op::v0::Parameter>(element::f32, Shape{1, 2, 3});
const auto k = op::v0::Constant::create(element::i64, Shape{}, {1});
EXPECT_THROW(op::v1::TopK(data, k, 0, "max", "invalid_mode"), ngraph::CheckFailure);
EXPECT_THROW(op::v1::TopK(data, k, 0, "invalid_sort", "index"), ngraph::CheckFailure);
const auto data = std::make_shared<opset1::Parameter>(element::f32, Shape{1, 2, 3});
const auto k = opset1::Constant::create(element::i64, Shape{}, {1});
EXPECT_THROW(opset1::TopK(data, k, 0, "max", "invalid_mode"), ngraph::CheckFailure);
EXPECT_THROW(opset1::TopK(data, k, 0, "invalid_sort", "index"), ngraph::CheckFailure);
}
TEST(ReferenceTopKTestInvalid, topk_v1_invalid_k) {
const auto data = std::make_shared<op::v0::Parameter>(element::f32, Shape{1, 2, 3});
const auto k_non_scalar = op::v0::Constant::create(element::i64, Shape{2}, {1, 2});
EXPECT_THROW(op::v1::TopK(data, k_non_scalar, 0, "max", "index"), ngraph::NodeValidationFailure);
const auto k_float = op::v0::Constant::create(element::f32, Shape{}, {1.0f});
EXPECT_THROW(op::v1::TopK(data, k_float, 0, "max", "index"), ngraph::NodeValidationFailure);
const auto k_negative = op::v0::Constant::create(element::i8, Shape{}, {-1});
EXPECT_THROW(op::v1::TopK(data, k_negative, 0, "max", "index"), ngraph::NodeValidationFailure);
const auto data = std::make_shared<opset1::Parameter>(element::f32, Shape{1, 2, 3});
const auto k_non_scalar = opset1::Constant::create(element::i64, Shape{2}, {1, 2});
EXPECT_THROW(opset1::TopK(data, k_non_scalar, 0, "max", "index"), ngraph::NodeValidationFailure);
const auto k_float = opset1::Constant::create(element::f32, Shape{}, {1.0f});
EXPECT_THROW(opset1::TopK(data, k_float, 0, "max", "index"), ngraph::NodeValidationFailure);
const auto k_negative = opset1::Constant::create(element::i8, Shape{}, {-1});
EXPECT_THROW(opset1::TopK(data, k_negative, 0, "max", "index"), ngraph::NodeValidationFailure);
}
class ReferenceTopKTestResnet50V3 : public ReferenceTopKTestResnet50 {
private:
static std::shared_ptr<Model> CreateFunction(const TopKParamsResnet50& params) {
const auto A = std::make_shared<opset1::Parameter>(params.A.type,
params.A.shape);
const auto B = std::make_shared<opset3::TopK>(A,
opset1::Constant::create(element::i64, {}, {5}),
1,
opset1::TopK::Mode::MAX,
opset1::TopK::SortType::SORT_VALUES);
const auto C = std::make_shared<opset3::TopK>(A,
opset1::Constant::create(element::i64, {}, {1}),
1,
opset1::TopK::Mode::MAX,
opset1::TopK::SortType::SORT_VALUES);
const auto out5_value = B->output(0);
const auto out5_index = B->output(1);
const auto out1_value = C->output(0);
const auto out1_index = C->output(1);
const auto f = std::make_shared<Model>(OutputVector{out5_value, out5_index, out1_value, out1_index}, ParameterVector{A});
return f;
}
};
TEST_P(ReferenceTopKTestResnet50V3, CompareWithRefs) {
Exec();
}
INSTANTIATE_TEST_SUITE_P(smoke_TopK_With_Hardcoded_Refs, ReferenceTopKTestResnet50V3,
testing::ValuesIn(generateCombinedParamsResnet50()), ReferenceTopKTestResnet50V3::getTestCaseName);
class ReferenceTopKTestMaxMinSortV3 : public ReferenceTopKTestMaxMinSort {
private:
static std::shared_ptr<Model> CreateFunction(const TopKParams& params) {
const auto A = std::make_shared<opset1::Parameter>(params.A.type,
params.A.shape);
const auto k = opset1::Constant::create(params.k.type,
params.k.shape,
params.k.data.data());
const auto B = std::make_shared<opset3::TopK>(A, k, params.axis, params.mode, params.sort);
const auto f = std::make_shared<Model>(B->outputs(), ParameterVector{A});
return f;
}
};
TEST_P(ReferenceTopKTestMaxMinSortV3, CompareWithRefs) {
Exec();
}
INSTANTIATE_TEST_SUITE_P(smoke_TopK_With_Hardcoded_Refs, ReferenceTopKTestMaxMinSortV3,
testing::ValuesIn(generateCombinedParamsMaxMinSort()), ReferenceTopKTestMaxMinSortV3::getTestCaseName);
class ReferenceTopKTestBackendV3 : public ReferenceTopKTestBackend {
private:
static std::shared_ptr<Model> CreateFunction(const TopKParams& params) {
const auto A = std::make_shared<opset1::Parameter>(params.A.type,
params.A.shape);
const auto k = opset1::Constant::create(params.k.type,
params.k.shape,
params.k.data.data());
const auto B = std::make_shared<opset3::TopK>(A, k, params.axis, params.mode, params.sort);
const auto f = std::make_shared<Model>(B->outputs(), ParameterVector{A});
return f;
}
};
TEST_P(ReferenceTopKTestBackendV3, CompareWithRefs) {
Exec();
}
INSTANTIATE_TEST_SUITE_P(smoke_TopK_With_Hardcoded_Refs, ReferenceTopKTestBackendV3,
testing::ValuesIn(generateCombinedParamsBackend()), ReferenceTopKTestBackendV3::getTestCaseName);
class ReferenceTopKTest1dMaxMinV3 : public ReferenceTopKTest1dMaxMin {
private:
static std::shared_ptr<Model> CreateFunction(const TopKParams& params, size_t out_idx) {
const auto A = std::make_shared<opset1::Parameter>(params.A.type,
params.A.shape);
const auto k = opset1::Constant::create(params.k.type,
params.k.shape,
params.k.data.data());
const auto B = std::make_shared<opset3::TopK>(A, k, params.axis, params.mode, params.sort);
const auto f = std::make_shared<Model>(OutputVector{B->output(out_idx)}, ParameterVector{A});
return f;
}
};
TEST_P(ReferenceTopKTest1dMaxMinV3, CompareWithRefs) {
Exec();
}
INSTANTIATE_TEST_SUITE_P(smoke_TopK_With_Hardcoded_Refs, ReferenceTopKTest1dMaxMinV3,
testing::ValuesIn(generateCombinedParams1dMaxMin()), ReferenceTopKTest1dMaxMinV3::getTestCaseName);
class ReferenceTopKTestInt64V3 : public ReferenceTopKTestInt64 {
private:
static std::shared_ptr<Model> CreateFunction(const TopKParams& params, size_t out_idx) {
const auto A = std::make_shared<opset1::Parameter>(params.A.type,
params.A.shape);
const auto k = opset1::Constant::create(params.k.type,
params.k.shape,
params.k.data.data());
const auto B = std::make_shared<opset3::TopK>(A,
k,
params.axis,
params.mode,
params.sort,
element::i64);
const auto f = std::make_shared<Model>(OutputVector{B->output(out_idx)}, ParameterVector{A});
return f;
}
};
TEST_P(ReferenceTopKTestInt64V3, CompareWithRefs) {
Exec();
}
INSTANTIATE_TEST_SUITE_P(smoke_TopK_With_Hardcoded_Refs, ReferenceTopKTestInt64V3,
testing::ValuesIn(generateCombinedParamsInt64()), ReferenceTopKTestInt64V3::getTestCaseName);
class ReferenceTopKTestSingleOutputV3 : public ReferenceTopKTestSingleOutput {
private:
static std::shared_ptr<Model> CreateFunction(const TopKParams& params) {
const auto A = std::make_shared<opset1::Parameter>(params.A.type,
params.A.shape);
const auto k = opset1::Constant::create(params.k.type,
params.k.shape,
params.k.data.data());
const auto B = std::make_shared<opset3::TopK>(A, k, params.axis, params.mode, params.sort);
const auto f = std::make_shared<Model>(OutputVector{B->output(1)}, ParameterVector{A});
return f;
}
};
TEST_P(ReferenceTopKTestSingleOutputV3, CompareWithRefs) {
Exec();
}
INSTANTIATE_TEST_SUITE_P(smoke_TopK_With_Hardcoded_Refs, ReferenceTopKTestSingleOutputV3,
testing::ValuesIn(generateCombinedParamsSingleOutput()), ReferenceTopKTestSingleOutputV3::getTestCaseName);
TEST(ReferenceTopKTestInvalidV3, topk_v3_invalid_strings) {
const auto data = std::make_shared<opset1::Parameter>(element::f32, Shape{1, 2, 3});
const auto k = opset1::Constant::create(element::i64, Shape{}, {1});
EXPECT_THROW(opset3::TopK(data, k, 0, "max", "invalid_mode"), ngraph::CheckFailure);
EXPECT_THROW(opset3::TopK(data, k, 0, "invalid_sort", "index"), ngraph::CheckFailure);
}
TEST(ReferenceTopKTestInvalidV3, topk_v3_invalid_k) {
const auto data = std::make_shared<opset1::Parameter>(element::f32, Shape{1, 2, 3});
const auto k_non_scalar = opset1::Constant::create(element::i64, Shape{2}, {1, 2});
EXPECT_THROW(opset3::TopK(data, k_non_scalar, 0, "max", "index"), ngraph::NodeValidationFailure);
const auto k_float = opset1::Constant::create(element::f32, Shape{}, {1.0f});
EXPECT_THROW(opset3::TopK(data, k_float, 0, "max", "index"), ngraph::NodeValidationFailure);
const auto k_negative = opset1::Constant::create(element::i8, Shape{}, {-1});
EXPECT_THROW(opset3::TopK(data, k_negative, 0, "max", "index"), ngraph::NodeValidationFailure);
}
} // namespace

View File

@ -85,6 +85,7 @@ struct format {
bs_fs_zyx_bsv16_fsv16, ///< format used for 3D blocked convolution (batch and features blocked by 16)
bs_fs_yx_bsv16_fsv16, ///< format used for 2D blocked convolution (batch and features blocked by 16)
bs_fs_yx_bsv4_fsv4, ///< format used for 2D blocked convolution (batch and features blocked by 4)
bs_fs_yx_bsv8_fsv4, ///< format used for 2D blocked convolution (batch and features blocked by 8 and 4)
bs_fs_yx_bsv4_fsv2, ///< format used for 2D blocked convolution (batch blocked by 4, features blocked by 2)
bs_fs_zyx_bsv4_fsv4, ///< format used for 3D blocked convolution (batch and features blocked by 4)
bs_fs_zyx_bsv4_fsv2, ///< format used for 3D blocked convolution (batch blocked by 4, features blocked by 2)
@ -255,6 +256,7 @@ struct format {
{ bs_fs_zyx_bsv16_fsv16, { 1, 1, 3, 0, "bfzyx", "bfxyz", {{0, 16 }, {1, 16}}}},
{ bs_fs_yx_bsv16_fsv16, { 1, 1, 2, 0, "bfyx", "bfxy?", {{0, 16 }, {1, 16}}}},
{ bs_fs_yx_bsv4_fsv4, { 1, 1, 2, 0, "bfyx", "bfxy?", {{0, 4 }, {1, 4}}}},
{ bs_fs_yx_bsv8_fsv4, { 1, 1, 2, 0, "bfyx", "bfxy?", {{0, 8 }, {1, 4}}}},
{ bs_fs_yx_bsv4_fsv2, { 1, 1, 2, 0, "bfyx", "bfxy?", {{0, 4 }, {1, 2}}}},
{ bs_fs_zyx_bsv4_fsv4, { 1, 1, 3, 0, "bfzyx", "bfxyz", {{0, 4 }, {1, 4}}}},
{ bs_fs_zyx_bsv4_fsv2, { 1, 1, 3, 0, "bfzyx", "bfxyz", {{0, 4 }, {1, 2}}}},

View File

@ -29,6 +29,7 @@ DataTensor::DataChannelArray DataTensor::dataChannelArray {{
{ DataLayout::bs_fs_zyx_bsv16_fsv16, { 0, 1, 2, -1, 3, 4 } },
{ DataLayout::bs_fs_yx_bsv16_fsv16, { 0, 1, -1, -1, 2, 3 } },
{ DataLayout::bs_fs_yx_bsv4_fsv4, { 0, 1, -1, -1, 2, 3 } },
{ DataLayout::bs_fs_yx_bsv8_fsv4, { 0, 1, -1, -1, 2, 3 } },
{ DataLayout::bs_fs_yx_bsv4_fsv2, { 0, 1, -1, -1, 2, 3 } },
{ DataLayout::bs_fs_yx_bsv32_fsv32, { 0, 1, -1, -1, 2, 3 } },
{ DataLayout::bs_fs_yx_bsv32_fsv16, { 0, 1, -1, -1, 2, 3 } },
@ -206,6 +207,11 @@ NDims DataTensor::GetSimpleDims(const std::vector<size_t>& d, DataLayout l) {
newDims[2] = RoundUp(newDims[2], 4);
newDims[3] = RoundUp(newDims[3], 4);
break;
case bs_fs_yx_bsv8_fsv4:
assert(newDims.size() == 4);
newDims[2] = RoundUp(newDims[2], 4);
newDims[3] = RoundUp(newDims[3], 8);
break;
case bs_fs_yx_bsv4_fsv2:
assert(newDims.size() == 4);
newDims[2] = RoundUp(newDims[2], 2);

View File

@ -39,6 +39,7 @@ enum DataLayout {
bs_fs_yx_bsv16_fsv16, // batch, feature, 2D spatial. Blocks of 16 batch and channels
bs_fs_zyx_bsv16_fsv16, // batch, feature, 3D spatial. Blocks of 16 batch and channels
bs_fs_yx_bsv4_fsv4, // batch, feature, 2D spatial. Blocks of 4 batch and 4 channels
bs_fs_yx_bsv8_fsv4, // batch, feature, 2D spatial. Blocks of 8 batch and 4 channels
bs_fs_yx_bsv4_fsv2, // batch, feature, 2D spatial. Blocks of 4 batch and 2 channels
bs_fs_yx_bsv32_fsv32, // batch, feature, 2D spatial. Blocks of 32 batch and 32 channels
bs_fs_yx_bsv32_fsv16, // batch, feature, 2D spatial. Blocks of 32 batch and 16 channels

View File

@ -196,15 +196,16 @@ KernelsData ConvolutionKernelBase::GetCommonKernelsData(const Params& params,
return {};
}
auto preferredWeightsLayout = GetPreferredWeightsLayout(newParams);
bool succeed = UpdateWeightsParams(newParams,
options,
GetPreferredWeightsLayout(newParams),
preferredWeightsLayout,
kd.weightsReorderParams,
GetSupportedKey(),
newParams.groups,
newParams.transposed);
bool bSupportedWeightsLayout = newParams.weights.GetLayout() == GetPreferredWeightsLayout(newParams);
bool bSupportedWeightsLayout = newParams.weights.GetLayout() == preferredWeightsLayout;
const bool bWeightsOK = bSupportedWeightsLayout || options.allowStaticInputReordering;
if (!succeed || !bWeightsOK) {

View File

@ -506,6 +506,22 @@ inline uint get_bs_fs_zyx_bsv_fsv_index(uint b, uint f, uint z, uint y, uint x,
CAT(prefix, _PAD_BEFORE_SIZE_X), \
CAT(prefix, _PAD_AFTER_SIZE_X), 4, 4)
#define GET_DATA_BS_FS_YX_BSV8_FSV4_INDEX(prefix, b, f, y, x) \
get_bs_fs_zyx_bsv_fsv_index( \
b, f, 0, y, x, \
CAT(prefix, _SIZE_X), \
CAT(prefix, _SIZE_Y), \
CAT(prefix, _SIZE_Z), \
CAT(prefix, _FEATURE_NUM), \
CAT(prefix, _PAD_BEFORE_FEATURE_NUM), \
CAT(prefix, _PAD_AFTER_FEATURE_NUM), \
CAT(prefix, _PAD_BEFORE_SIZE_Z), \
CAT(prefix, _PAD_AFTER_SIZE_Z), \
CAT(prefix, _PAD_BEFORE_SIZE_Y), \
CAT(prefix, _PAD_AFTER_SIZE_Y), \
CAT(prefix, _PAD_BEFORE_SIZE_X), \
CAT(prefix, _PAD_AFTER_SIZE_X), 8, 4)
#define GET_DATA_BS_FS_YX_BSV4_FSV2_INDEX(prefix, b, f, y, x) \
get_bs_fs_zyx_bsv_fsv_index( \
b, f, 0, y, x, \
@ -605,6 +621,23 @@ inline uint get_bs_fs_zyx_bsv_fsv_index(uint b, uint f, uint z, uint y, uint x,
CAT(prefix, _PAD_BEFORE_SIZE_X), \
CAT(prefix, _PAD_AFTER_SIZE_X), 4, 4)
#define GET_DATA_BS_FS_YX_BSV8_FSV4_INDEX_SAFE(prefix, b, f, y, x) \
get_bs_fs_zyx_bsv_fsv_index_safe( \
b, f, 0, y, x, \
CAT(prefix, _SIZE_X), \
CAT(prefix, _SIZE_Y), \
CAT(prefix, _SIZE_Z), \
CAT(prefix, _FEATURE_NUM), \
CAT(prefix, _BATCH_NUM), \
CAT(prefix, _PAD_BEFORE_FEATURE_NUM), \
CAT(prefix, _PAD_AFTER_FEATURE_NUM), \
CAT(prefix, _PAD_BEFORE_SIZE_Z), \
CAT(prefix, _PAD_AFTER_SIZE_Z), \
CAT(prefix, _PAD_BEFORE_SIZE_Y), \
CAT(prefix, _PAD_AFTER_SIZE_Y), \
CAT(prefix, _PAD_BEFORE_SIZE_X), \
CAT(prefix, _PAD_AFTER_SIZE_X), 8, 4)
#define GET_DATA_BS_FS_YX_BSV4_FSV2_INDEX_SAFE(prefix, b, f, y, x) \
get_bs_fs_zyx_bsv_fsv_index_safe( \
b, f, 0, y, x, \

View File

@ -334,6 +334,7 @@ JitDefinitions DataTensorJitConstant::GetDefinitions() const {
layout == DataLayout::fs_b_yx_fsv32 ||
layout == DataLayout::bs_fs_yx_bsv16_fsv16 ||
layout == DataLayout::bs_fs_yx_bsv4_fsv4 ||
layout == DataLayout::bs_fs_yx_bsv8_fsv4 ||
layout == DataLayout::bs_fs_yx_bsv4_fsv2 ||
layout == DataLayout::bs_fs_yx_bsv32_fsv16 ||
layout == DataLayout::bs_fs_yx_bsv32_fsv32) {
@ -346,6 +347,7 @@ JitDefinitions DataTensorJitConstant::GetDefinitions() const {
layout == DataLayout::bs_fs_yx_bsv32_fsv32 ||
layout == DataLayout::bs_fs_yx_bsv32_fsv16 ||
layout == DataLayout::bs_fs_yx_bsv4_fsv4 ||
layout == DataLayout::bs_fs_yx_bsv8_fsv4 ||
layout == DataLayout::bs_fs_yx_bsv4_fsv2 ||
layout == DataLayout::bs_fs_yx_bsv16_fsv16)
safe_index_func_val = "GET_DATA_" + layout_str + "_INDEX_SAFE(" + _name + ", b, f, y, x)";

View File

@ -105,6 +105,7 @@ std::string toString(DataLayout l) {
case kernel_selector::DataLayout::bs_fs_yx_bsv16_fsv16: return "BS_FS_YX_BSV16_FSV16";
case kernel_selector::DataLayout::bs_fs_zyx_bsv16_fsv16: return "BS_FS_ZYX_BSV16_FSV16";
case kernel_selector::DataLayout::bs_fs_yx_bsv4_fsv4: return "BS_FS_YX_BSV4_FSV4";
case kernel_selector::DataLayout::bs_fs_yx_bsv8_fsv4: return "BS_FS_YX_BSV8_FSV4";
case kernel_selector::DataLayout::bs_fs_yx_bsv4_fsv2: return "BS_FS_YX_BSV4_FSV2";
case kernel_selector::DataLayout::bs_fs_yx_bsv32_fsv32: return "BS_FS_YX_BSV32_FSV32";
case kernel_selector::DataLayout::bs_fs_yx_bsv32_fsv16: return "BS_FS_YX_BSV32_FSV16";

View File

@ -125,7 +125,7 @@ binary_convolution_inst::typed_primitive_inst(network& network, binary_convoluti
"Only one-dimensional batch size are supported");
CLDNN_ERROR_LESS_THAN(node.id(),
"Weights feature maps number",
(input_inst.size.feature[0] + pad.feature[0]) / split,
input_inst.size.feature[0],
"input feature maps number",
filter_inst.size.feature[0],
"Weights/ifm mismatch");

View File

@ -97,7 +97,7 @@ layout convolution_inst::calc_output_layout(convolution_node const& node) {
input_layout.format == format::image_2d_weights_winograd_6x3_s1_xfbyb)
CLDNN_ERROR_MESSAGE(
node.id(),
"Input for convolution should not be in windograd weights format - it is reserved for weights only");
"Input for convolution should not be in winograd weights format - it is reserved for weights only");
if (input_layout.format == format::winograd_2x3_s1_data) {
CLDNN_ERROR_NOT_EQUAL(node.id(),
@ -369,10 +369,19 @@ convolution_inst::typed_primitive_inst(network& network, convolution_node const&
"Only one-dimensional batch size are supported");
CLDNN_ERROR_LESS_THAN(node.id(),
"Weights feature maps number",
(input_inst.size.feature[0] + pad.feature[0]) / split,
input_inst.size.feature[0],
"input feature maps number",
weights_ifm,
"Weights/ifm mismatch");
if (!argument.grouped_weights_shape && !format::is_grouped(filter_inst.format)) {
CLDNN_ERROR_NOT_EQUAL(node.id(),
"Weights feature maps number",
input_inst.size.feature[0],
"input feature maps number",
weights_ifm,
"Weights/ifm mismatch");
}
}
}
} // namespace cldnn

View File

@ -82,11 +82,11 @@ layout deconvolution_inst::calc_output_layout(deconvolution_node const& node) {
int32_t off_factor = -2;
size_t spatial_dims = cldnn::format::traits(input_layout.format).spatial_num;
CLDNN_ERROR_GREATER_THAN(node.id(),
"number of spatial dimensions",
spatial_dims,
"expected number of dimensions",
3,
"As for now, deconvolutions with more than 3 dimensions are not supported");
"number of spatial dimensions",
spatial_dims,
"expected number of dimensions",
3,
"As for now, deconvolutions with more than 3 dimensions are not supported");
int32_t x = off_factor * pad.spatial[0] + (input_layout.size.spatial[0] - 1) * strd.spatial[0] + filter_size.spatial[0];
int32_t y = 1;
@ -208,6 +208,7 @@ deconvolution_inst::typed_primitive_inst(network& network, deconvolution_node co
1,
"Spatial[0] of bias should be 1. Bias isn't 1D vector.");
}
CLDNN_ERROR_NOT_EQUAL(node.id(),
"deconvolution padding filling value",
node.get_output_layout().data_padding.filling_value(),
@ -240,10 +241,19 @@ deconvolution_inst::typed_primitive_inst(network& network, deconvolution_node co
"Only one-dimensional features are supported");
CLDNN_ERROR_LESS_THAN(node.id(),
"Weights feature maps number",
(input_inst.size.feature[0] + pad.feature[0]) / split,
input_inst.size.feature[0],
"input feature maps number",
weights_ifm,
"Weights/ifm mimsmatch");
"Weights/ifm mismatch");
if (!argument.grouped_weights_shape && !format::is_grouped(filter_inst.format)) {
CLDNN_ERROR_NOT_EQUAL(node.id(),
"Weights feature maps number",
input_inst.size.feature[0],
"input feature maps number",
weights_ifm,
"Weights/ifm mismatch");
}
}
}
} // namespace cldnn

View File

@ -536,7 +536,7 @@ void reorder_inputs::run(program& p, layout_optimizer& lo, reorder_factory& rf)
}
};
const auto reorder_input_deconvolution = [&p, &lo, &rf](typed_program_node<deconvolution>& deconv_node) {
const auto reorder_input_and_weights_deconvolution = [&p, &lo, &rf](typed_program_node<deconvolution>& deconv_node) {
auto& input = deconv_node.input();
auto input_layout = input.get_output_layout();
auto new_format = lo.get_preferred_format(deconv_node);
@ -547,14 +547,41 @@ void reorder_inputs::run(program& p, layout_optimizer& lo, reorder_factory& rf)
p.add_intermediate(reorder.first, deconv_node, 0, !reorder.second);
}
}
auto& weights = deconv_node.weights();
auto weights_layout = weights.get_output_layout();
if (!format::is_simple_data_format(weights_layout.format) && !weights.is_type<data>() && !weights.is_constant()) {
auto dims = weights_layout.format.dimension();
auto preferred_format = dims <= 4 ? format::bfyx : dims == 5 ? format::bfzyx : format::bfwzyx;
auto reorder = rf.get_reorder(weights.id(), weights_layout,
layout{ weights_layout.data_type, preferred_format, weights_layout.size });
if (reorder.first) {
p.add_intermediate(reorder.first, deconv_node, 1, !reorder.second);
}
}
};
const auto reorder_weights_convolution = [&p, &lo, &rf](typed_program_node<convolution>& conv_node) {
auto& weights = conv_node.weights();
auto weights_layout = weights.get_output_layout();
if (!format::is_simple_data_format(weights_layout.format) && !weights.is_type<data>() && !weights.is_constant()) {
auto dims = weights_layout.format.dimension();
auto preferred_format = dims <= 4 ? format::bfyx : dims == 5 ? format::bfzyx : format::bfwzyx;
auto reorder = rf.get_reorder(weights.id(), weights_layout,
layout{ weights_layout.data_type, preferred_format, weights_layout.size });
if (reorder.first) {
p.add_intermediate(reorder.first, conv_node, 1, !reorder.second);
}
}
};
for (auto& prim : p.get_processing_order()) {
program_helpers::do_for_types<detection_output, binary_convolution, deconvolution>(
program_helpers::do_for_types<detection_output, binary_convolution, deconvolution, convolution>(
*prim,
reorder_input_detection_output,
reorder_input_binary_convolution,
reorder_input_deconvolution);
reorder_input_and_weights_deconvolution,
reorder_weights_convolution);
}
for (auto n : p.get_processing_order()) {

View File

@ -225,6 +225,11 @@ attach_convolution_impl::attach_convolution_impl() {
std::make_tuple(data_types::u8, format::bs_fs_yx_bsv4_fsv4),
std::make_tuple(data_types::i8, format::bs_fs_yx_bsv4_fsv4),
std::make_tuple(data_types::f32, format::bs_fs_yx_bsv8_fsv4),
std::make_tuple(data_types::f16, format::bs_fs_yx_bsv8_fsv4),
std::make_tuple(data_types::u8, format::bs_fs_yx_bsv8_fsv4),
std::make_tuple(data_types::i8, format::bs_fs_yx_bsv8_fsv4),
std::make_tuple(data_types::f32, format::bs_fs_yx_bsv4_fsv2),
std::make_tuple(data_types::f16, format::bs_fs_yx_bsv4_fsv2),
std::make_tuple(data_types::u8, format::bs_fs_yx_bsv4_fsv2),

View File

@ -214,6 +214,13 @@ attach_eltwise_impl::attach_eltwise_impl() {
std::make_tuple(data_types::i32, format::bs_fs_yx_bsv4_fsv4),
std::make_tuple(data_types::i64, format::bs_fs_yx_bsv4_fsv4),
std::make_tuple(data_types::f32, format::bs_fs_yx_bsv8_fsv4),
std::make_tuple(data_types::f16, format::bs_fs_yx_bsv8_fsv4),
std::make_tuple(data_types::i8, format::bs_fs_yx_bsv8_fsv4),
std::make_tuple(data_types::u8, format::bs_fs_yx_bsv8_fsv4),
std::make_tuple(data_types::i32, format::bs_fs_yx_bsv8_fsv4),
std::make_tuple(data_types::i64, format::bs_fs_yx_bsv8_fsv4),
std::make_tuple(data_types::f32, format::bs_fs_yx_bsv4_fsv2),
std::make_tuple(data_types::f16, format::bs_fs_yx_bsv4_fsv2),
std::make_tuple(data_types::i8, format::bs_fs_yx_bsv4_fsv2),

View File

@ -119,6 +119,11 @@ attach_concatenation_onednn::attach_concatenation_onednn() {
std::make_tuple(data_types::f16, format::bs_fs_yx_bsv4_fsv4),
std::make_tuple(data_types::u8, format::bs_fs_yx_bsv4_fsv4),
std::make_tuple(data_types::i8, format::bs_fs_yx_bsv4_fsv4),
std::make_tuple(data_types::f32, format::bs_fs_yx_bsv8_fsv4),
std::make_tuple(data_types::f16, format::bs_fs_yx_bsv8_fsv4),
std::make_tuple(data_types::u8, format::bs_fs_yx_bsv8_fsv4),
std::make_tuple(data_types::i8, format::bs_fs_yx_bsv8_fsv4),
});
}

View File

@ -256,6 +256,11 @@ attach_convolution_onednn::attach_convolution_onednn() {
std::make_tuple(data_types::u8, format::bs_fs_yx_bsv4_fsv4),
std::make_tuple(data_types::i8, format::bs_fs_yx_bsv4_fsv4),
std::make_tuple(data_types::f32, format::bs_fs_yx_bsv8_fsv4),
std::make_tuple(data_types::f16, format::bs_fs_yx_bsv8_fsv4),
std::make_tuple(data_types::u8, format::bs_fs_yx_bsv8_fsv4),
std::make_tuple(data_types::i8, format::bs_fs_yx_bsv8_fsv4),
std::make_tuple(data_types::f32, format::bs_fs_yx_bsv4_fsv2),
std::make_tuple(data_types::f16, format::bs_fs_yx_bsv4_fsv2),
std::make_tuple(data_types::u8, format::bs_fs_yx_bsv4_fsv2),

View File

@ -199,6 +199,11 @@ attach_deconvolution_onednn::attach_deconvolution_onednn() {
std::make_tuple(data_types::u8, format::bs_fs_yx_bsv4_fsv4),
std::make_tuple(data_types::i8, format::bs_fs_yx_bsv4_fsv4),
std::make_tuple(data_types::f32, format::bs_fs_yx_bsv8_fsv4),
std::make_tuple(data_types::f16, format::bs_fs_yx_bsv8_fsv4),
std::make_tuple(data_types::u8, format::bs_fs_yx_bsv8_fsv4),
std::make_tuple(data_types::i8, format::bs_fs_yx_bsv8_fsv4),
std::make_tuple(data_types::f32, format::bs_fs_yx_bsv4_fsv2),
std::make_tuple(data_types::f16, format::bs_fs_yx_bsv4_fsv2),
std::make_tuple(data_types::u8, format::bs_fs_yx_bsv4_fsv2),

View File

@ -91,6 +91,7 @@ dnnl::memory::format_tag convert_data_format(cldnn::format fmt) {
case cldnn::format::bs_fs_yx_bsv16_fsv16: return dnnl::memory::format_tag::NChw16n16c;
case cldnn::format::bs_fs_yx_bsv32_fsv32: return dnnl::memory::format_tag::NChw32n32c;
case cldnn::format::bs_fs_yx_bsv4_fsv4: return dnnl::memory::format_tag::ABcd4a4b;
case cldnn::format::bs_fs_yx_bsv8_fsv4: return dnnl::memory::format_tag::ABcd8a4b;
case cldnn::format::bs_fs_yx_bsv4_fsv2: return dnnl::memory::format_tag::ABcd4a2b;
case cldnn::format::bs_fs_yx_bsv32_fsv16: return dnnl::memory::format_tag::NChw32n16c;
case cldnn::format::bs_fs_zyx_bsv16_fsv16: return dnnl::memory::format_tag::NCdhw16n16c;

View File

@ -97,6 +97,8 @@ inline std::string fmt_to_str(format fmt) {
return "bs_fs_yx_bsv4_fsv2";
case format::bs_fs_yx_bsv4_fsv4:
return "bs_fs_yx_bsv4_fsv4";
case format::bs_fs_yx_bsv8_fsv4:
return "bs_fs_yx_bsv8_fsv4";
case format::bs_fs_yx_bsv32_fsv32:
return "bs_fs_yx_bsv32_fsv32";
case format::b_fs_zyx_fsv16:

View File

@ -136,6 +136,8 @@ kernel_selector::data_layout to_data_layout(format f) {
return kernel_selector::data_layout::bs_fs_yx_bsv32_fsv16;
case format::bs_fs_yx_bsv4_fsv4:
return kernel_selector::data_layout::bs_fs_yx_bsv4_fsv4;
case format::bs_fs_yx_bsv8_fsv4:
return kernel_selector::data_layout::bs_fs_yx_bsv8_fsv4;
case format::bs_fs_yx_bsv4_fsv2:
return kernel_selector::data_layout::bs_fs_yx_bsv4_fsv2;
case format::bs_fs_yx_bsv32_fsv32:
@ -193,6 +195,8 @@ cldnn::format from_data_layout(kernel_selector::data_layout l) {
return cldnn::format::bs_fs_yx_bsv4_fsv2;
case kernel_selector::data_layout::bs_fs_yx_bsv4_fsv4:
return cldnn::format::bs_fs_yx_bsv4_fsv4;
case kernel_selector::data_layout::bs_fs_yx_bsv8_fsv4:
return cldnn::format::bs_fs_yx_bsv8_fsv4;
case kernel_selector::data_layout::bs_fs_yx_bsv32_fsv32:
return cldnn::format::bs_fs_yx_bsv32_fsv32;
case kernel_selector::data_layout::nv12:

View File

@ -284,10 +284,11 @@ bool layout_optimizer::can_fuse_reorder(program_node& prev, program_node& next,
return true;
if (next.is_type<convolution>() &&
(fmt_prev == format::b_fs_yx_fsv4 || fmt_prev == format::bs_fs_yx_bsv4_fsv4) &&
(fmt_prev == format::b_fs_yx_fsv4 || fmt_prev == format::bs_fs_yx_bsv4_fsv4 || fmt_prev == format::bs_fs_yx_bsv8_fsv4) &&
((fmt_next == format::b_fs_yx_fsv32 && (prev_output_layout.size.feature[0] == 3 || prev_output_layout.size.feature[0] == 4)) ||
(fmt_next == format::bs_fs_yx_bsv32_fsv32 && (prev_output_layout.size.feature[0] == 3 || prev_output_layout.size.feature[0] == 4)) ||
(fmt_next == format::bs_fs_yx_bsv4_fsv4 && (prev_output_layout.size.feature[0] == 3 || prev_output_layout.size.feature[0] == 4)) ||
(fmt_next == format::bs_fs_yx_bsv8_fsv4 && (prev_output_layout.size.feature[0] == 3 || prev_output_layout.size.feature[0] == 4)) ||
(fmt_next == format::b_fs_yx_fsv16 && next_output_layout.size.feature[0] >= 16 &&
(prev_output_layout.size.feature[0] == 3 || (prev_output_layout.size.feature[0] == 4 && (prev_dt == data_types::u8 || prev_dt == data_types::i8))))))
return true;
@ -1269,6 +1270,7 @@ impl_types layout_optimizer::get_preferred_impl_type(program_node& node, format
format::bs_fs_yx_bsv32_fsv16,
format::bs_fs_yx_bsv32_fsv32,
format::bs_fs_yx_bsv4_fsv4,
format::bs_fs_yx_bsv8_fsv4,
format::bs_fs_yx_bsv4_fsv2,
format::bs_fs_zyx_bsv4_fsv4,
format::bs_fs_zyx_bsv4_fsv2,
@ -1320,17 +1322,27 @@ impl_types layout_optimizer::get_preferred_impl_type(program_node& node, format
impl_candidate = impl_types::ocl;
}
size_t eltw_dep = 0;
for (auto& fo : node.get_fused_primitives()) {
if (fo.node->is_type<eltwise>()) {
auto in_layout = node.get_dependency(fo.dep_start_idx).get_output_layout();
auto out_layout = node.get_output_layout();
auto in_dt = in_layout.data_type;
auto out_dt = out_layout.data_type;
if ((out_layout.count() == in_layout.count()) &&
(data_type_traits::is_floating_point(in_dt) || data_type_traits::is_floating_point(out_dt)) && in_dt != out_dt &&
fo.node->as<eltwise>().get_primitive()->needs_onednn_sum_post_op(in_layout)) {
impl_candidate = impl_types::ocl;
break;
if (fo.node->as<eltwise>().get_primitive()->needs_onednn_sum_post_op(in_layout)) {
if ((out_layout.count() == in_layout.count()) &&
(data_type_traits::is_floating_point(in_dt) || data_type_traits::is_floating_point(out_dt)) && in_dt != out_dt) {
impl_candidate = impl_types::ocl;
break;
}
if (in_layout.size == out_layout.size && in_layout.format == out_layout.format && in_layout.data_padding == out_layout.data_padding &&
data_type_traits::size_of(in_dt) == data_type_traits::size_of(out_dt)) {
if (eltw_dep > 0) {
impl_candidate = impl_types::ocl;
break;
}
eltw_dep = fo.dep_start_idx;
}
}
} else if (fo.node->is_type<activation>()) {
// Some activations aren't implemented in oneDNN
@ -1453,7 +1465,7 @@ format layout_optimizer::get_preferred_format(program_node& node) {
if (data_type_traits::is_floating_point(conv.get_output_layout().data_type) || ws.spatial[0] != 7 || conv.get_primitive()->groups > 1)
expected = format::bfyx;
else
expected = format::bs_fs_yx_bsv4_fsv4;
expected = format::bs_fs_yx_bsv8_fsv4;
auto conv_output_layout = conv.get_output_layout();
auto weights_layout = conv.weights(0).get_output_layout();

View File

@ -514,15 +514,17 @@ void network::allocate_primitives() {
can_reuse_eltwise_mem = true;
}
if (_primitives.find(eltw_in.id()) != _primitives.end() && _primitives.find(node->id()) != _primitives.end()) {
auto& eltw_inst = _primitives.at(eltw_in.id());
auto& prim_inst = _primitives.at(node->id());
auto eltw_mem_type = eltw_inst->output_memory().get_allocation_type();
auto prim_mem_type = prim_inst->output_memory().get_allocation_type();
if (!can_reuse_eltwise_mem) {
if (_primitives.find(eltw_in.id()) != _primitives.end() && _primitives.find(node->id()) != _primitives.end()) {
auto& eltw_inst = _primitives.at(eltw_in.id());
auto& prim_inst = _primitives.at(node->id());
auto eltw_mem_type = eltw_inst->output_memory().get_allocation_type();
auto prim_mem_type = prim_inst->output_memory().get_allocation_type();
// Keep lockable memory type for `prim_inst` output if needed
if (eltw_mem_type != prim_mem_type && eltw_mem_type != allocation_type::cl_mem && eltw_mem_type != allocation_type::usm_host)
can_reuse_eltwise_mem = false;
// Keep lockable memory type for `prim_inst` output if needed
if (eltw_mem_type != prim_mem_type && eltw_mem_type != allocation_type::cl_mem && eltw_mem_type != allocation_type::usm_host)
can_reuse_eltwise_mem = false;
}
}
if (fused_op.node->as<eltwise>().get_primitive()->needs_onednn_sum_post_op(eltw_in_layout) && !can_reuse_eltwise_mem) {

View File

@ -741,10 +741,10 @@ program_node& program::get_or_create(std::shared_ptr<primitive> prim) {
}
void program::add_intermediate(program_node& node,
program_node& next,
size_t prev_idx,
bool connect_int_node_with_old_dep,
bool move_usrs_of_prev_to_node) {
program_node& next,
size_t prev_idx,
bool connect_int_node_with_old_dep,
bool move_usrs_of_prev_to_node) {
if (connect_int_node_with_old_dep && !node.dependencies.empty())
throw std::invalid_argument(
"Node which is about to be added in between two other nodes should not have any existing dependencies");
@ -1112,8 +1112,8 @@ void program::remove_nodes(std::vector<program_node*>& to_remove) {
// TODO: break this function into number of smaller ones + add per-primitive fields (possibly use
// primitive_inst::to_string?)
void program::dump_program(const char* stage,
bool with_full_info,
std::function<bool(program_node const&)> const& filter) const {
bool with_full_info,
std::function<bool(program_node const&)> const& filter) const {
std::string path = get_dir_path(options);
if (path.empty() || !with_full_info) {
return;
@ -1230,7 +1230,7 @@ void program::save_pass_info(std::string pass_name) {
}
void program::add_optimized_primitive_info(primitive_id optimized_primitive_id,
std::vector<primitive_id> replaced_with_ids) {
std::vector<primitive_id> replaced_with_ids) {
for (auto& e : optimized) {
auto it = std::find_if(e.second.begin(), e.second.end(), [&optimized_primitive_id](const primitive_id& id) {
return optimized_primitive_id == id;

View File

@ -139,30 +139,25 @@ std::pair<bool, bool> program_helpers::are_layouts_identical(layout const& l1, l
return {false, false};
if (l1.get_linear_size() != l2.get_linear_size())
return {false, false};
if ((l1.format == format::b_fs_yx_fsv4 && l2.format != format::b_fs_yx_fsv4) ||
(l2.format == format::b_fs_yx_fsv4 && l1.format != format::b_fs_yx_fsv4) ||
(l1.format == format::fs_b_yx_fsv32 && l2.format != format::fs_b_yx_fsv32) ||
(l2.format == format::fs_b_yx_fsv32 && l1.format != format::fs_b_yx_fsv32) ||
(l1.format == format::b_fs_yx_fsv16 && l2.format != format::b_fs_yx_fsv16) ||
(l2.format == format::b_fs_yx_fsv16 && l1.format != format::b_fs_yx_fsv16) ||
(l1.format == format::b_fs_yx_fsv32 && l2.format != format::b_fs_yx_fsv32) ||
(l2.format == format::b_fs_yx_fsv32 && l1.format != format::b_fs_yx_fsv32) ||
(l1.format == format::b_fs_zyx_fsv32 && l2.format != format::b_fs_zyx_fsv32) ||
(l2.format == format::b_fs_zyx_fsv32 && l1.format != format::b_fs_zyx_fsv32) ||
(l1.format == format::b_fs_zyx_fsv16 && l2.format != format::b_fs_zyx_fsv16) ||
(l2.format == format::b_fs_zyx_fsv16 && l1.format != format::b_fs_zyx_fsv16) ||
(l1.format == format::bs_fs_yx_bsv4_fsv4 && l2.format != format::bs_fs_yx_bsv4_fsv4) ||
(l2.format == format::bs_fs_yx_bsv4_fsv4 && l1.format != format::bs_fs_yx_bsv4_fsv4) ||
(l1.format == format::bs_fs_yx_bsv4_fsv2 && l2.format != format::bs_fs_yx_bsv4_fsv2) ||
(l2.format == format::bs_fs_yx_bsv4_fsv2 && l1.format != format::bs_fs_yx_bsv4_fsv2) ||
(l1.format == format::bs_fs_yx_bsv32_fsv16 && l2.format != format::bs_fs_yx_bsv32_fsv16) ||
(l2.format == format::bs_fs_yx_bsv32_fsv16 && l1.format != format::bs_fs_yx_bsv32_fsv16) ||
(l1.format == format::bs_fs_yx_bsv32_fsv32 && l2.format != format::bs_fs_yx_bsv32_fsv32) ||
(l2.format == format::bs_fs_yx_bsv32_fsv32 && l1.format != format::bs_fs_yx_bsv32_fsv32) ||
(l1.format == format::bs_fs_yx_bsv16_fsv16 && l2.format != format::bs_fs_yx_bsv16_fsv16) ||
(l2.format == format::bs_fs_yx_bsv16_fsv16 && l1.format != format::bs_fs_yx_bsv16_fsv16) ||
(l1.format == format::bs_fs_zyx_bsv16_fsv16 && l2.format != format::bs_fs_zyx_bsv16_fsv16) ||
(l2.format == format::bs_fs_zyx_bsv16_fsv16 && l1.format != format::bs_fs_zyx_bsv16_fsv16))
auto check_format = [&l1, &l2](cldnn::format format) {
return (l1.format == format && l2.format != format) ||
(l2.format == format && l1.format != format);
};
if (check_format(format::b_fs_yx_fsv4) ||
check_format(format::fs_b_yx_fsv32) ||
check_format(format::b_fs_yx_fsv16) ||
check_format(format::b_fs_yx_fsv32) ||
check_format(format::b_fs_zyx_fsv32) ||
check_format(format::b_fs_zyx_fsv16) ||
check_format(format::bs_fs_yx_bsv4_fsv4) ||
check_format(format::bs_fs_yx_bsv8_fsv4) ||
check_format(format::bs_fs_yx_bsv4_fsv2) ||
check_format(format::bs_fs_yx_bsv32_fsv16) ||
check_format(format::bs_fs_yx_bsv32_fsv32) ||
check_format(format::bs_fs_yx_bsv16_fsv16) ||
check_format(format::bs_fs_zyx_bsv16_fsv16))
return {false, false};
auto l1_pitch = l1.get_pitches();

View File

@ -428,7 +428,8 @@ dnnl::post_ops program_node::try_optimize_post_ops(dnnl::post_ops& p_ops, const
// Ignore optimized operations for "previous" operation in our operation pair
while (type_is_any_optimized(prev_type) && cur_post_op_idx < post_ops_size - 1) {
prev_post_op_idx++;
cur_post_op_idx++;
if (prev_post_op_idx == cur_post_op_idx)
cur_post_op_idx++;
prev_type = cur_post_ops[prev_post_op_idx].op_type;
cur_type = cur_post_ops[cur_post_op_idx].op_type;
}

File diff suppressed because it is too large Load Diff

View File

@ -681,7 +681,7 @@ TEST_P(conv_fp32_reorder_fsv16_to_bfyx_conv, basic) {
reorder("reorder_fsv16", "input", format::b_fs_yx_fsv16, data_types::f32),
convolution("conv_prim", "reorder_fsv16", { "weights" }, p.groups, p.stride, p.pad, p.dilation),
reorder("reorder_bfyx", "conv_prim", format::bfyx, data_types::f32),
convolution("conv_output", "reorder_bfyx", { "weights_dw" }, 1, dw_stride, p.pad, p.dilation),
convolution("conv_output", "reorder_bfyx", { "weights_dw" }, p.out_shape.feature[0], dw_stride, p.pad, p.dilation),
activation("activation", "conv_output", activation_func::abs),
reorder("reorder_output", "activation", p.default_format, data_types::f32)
);
@ -10059,7 +10059,7 @@ TEST_P(conv_fp32_reorder_bfyx_to_fsv32_conv_subtract, have_subtract_per_feature)
data("weights_dw", get_mem(dw_weights_layout, -127, 127)),
convolution("conv_prim", "input", { "weights" }, p.groups, p.stride, p.pad, p.dilation),
reorder("reorder_fsv32", "conv_prim", format::fs_b_yx_fsv32, data_types::f32, values_to_subtract),
convolution("conv_output", "reorder_fsv32", { "weights_dw" }, 1, dw_stride, p.pad, p.dilation),
convolution("conv_output", "reorder_fsv32", { "weights_dw" }, p.out_shape.feature[0], dw_stride, p.pad, p.dilation),
activation("activation", "conv_output", activation_func::abs)
);
@ -10088,7 +10088,7 @@ TEST_P(conv_fp32_reorder_bfyx_to_fsv32_conv_fused_activation, have_fused_activat
convolution("conv_prim", "input", { "weights" }, p.groups, p.stride, p.pad, p.dilation),
reorder("reorder_fsv32", "conv_prim", format::fs_b_yx_fsv32, data_types::f32),
activation("activation_quantize", "reorder_fsv32", activation_func::relu),
convolution("conv_output", "activation_quantize", { "weights_dw" }, 1, dw_stride, p.pad, p.dilation),
convolution("conv_output", "activation_quantize", { "weights_dw" }, p.out_shape.feature[0], dw_stride, p.pad, p.dilation),
activation("activation", "conv_output", activation_func::abs)
);
@ -10116,7 +10116,7 @@ TEST_P(conv_fp32_reorder_bfyx_to_fsv32_conv_data_padding, have_data_padding) {
data("weights_dw", get_mem(dw_weights_layout, -127, 127)),
convolution("conv_prim", "input", { "weights" }, p.groups, p.stride, p.pad, p.dilation),
reorder("reorder_fsv32", "conv_prim", layout(data_types::f32, format::fs_b_yx_fsv32, dw_tensor, padding{ {0, 0, 1, 1}, 0 })),
convolution("conv_output", "reorder_fsv32", { "weights_dw" }, 1, dw_stride, p.pad, p.dilation),
convolution("conv_output", "reorder_fsv32", { "weights_dw" }, p.out_shape.feature[0], dw_stride, p.pad, p.dilation),
activation("activation", "conv_output", activation_func::abs),
activation("activation2", "conv_prim", activation_func::abs),
eltwise("add_bias", { "activation", "activation2" }, eltwise_mode::sum)

View File

@ -43,7 +43,7 @@ TEST(memory_tests, DISABLED_network_creation_loop)
{
engine eng;
memory in = memory::allocate(eng, layout{ data_types::f32, format::bfyx,{ 1, 1, 1000, 1000 } });
memory in = memory::allocate(eng, layout{ data_types::f32, format::bfyx, { 1, 1, 1000, 1000 } });
topology tpl{
input_layout("in", in->get_layout()),
@ -66,7 +66,7 @@ TEST(memory_pool, basic_non_padded_relu_pipe) {
auto x_size = 1;
auto y_size = 1;
auto input = engine->allocate_memory({ data_types::f32, format::bfyx,{ tensor(spatial(x_size, y_size), feature(feature_num), batch(batch_num)) } });
auto input = engine->allocate_memory({ data_types::f32, format::bfyx, { tensor(spatial(x_size, y_size), feature(feature_num), batch(batch_num)) } });
topology topology;
topology.add(input_layout("input", input->get_layout()));
@ -86,7 +86,7 @@ TEST(memory_pool, basic_non_padded_relu_pipe) {
network.set_input_data("input", input);
auto outputs = network.execute();
EXPECT_EQ(engine->get_max_used_device_memory(), (uint64_t) 64);
EXPECT_EQ(engine->get_max_used_device_memory(), (uint64_t)64);
}
TEST(memory_pool, basic_non_padded_relu_and_pooling_pipe) {
@ -99,13 +99,13 @@ TEST(memory_pool, basic_non_padded_relu_and_pooling_pipe) {
auto x_size = 4;
auto y_size = 4;
auto input = engine->allocate_memory({ data_types::f32, format::bfyx,{ tensor(spatial(x_size, y_size), feature(feature_num), batch(batch_num)) } });
auto input = engine->allocate_memory({ data_types::f32, format::bfyx, { tensor(spatial(x_size, y_size), feature(feature_num), batch(batch_num)) } });
topology topology;
topology.add(input_layout("input", input->get_layout()));
topology.add(activation("relu", "input", activation_func::relu));
topology.add(activation("relu1", "relu", activation_func::relu));
topology.add(pooling("pool1", "relu1",pooling_mode::max, { 1,1,3,3 }, { 1,1,2,2 }));
topology.add(pooling("pool1", "relu1", pooling_mode::max, { 1, 1, 3, 3 }, { 1, 1, 2, 2 }));
topology.add(activation("relu2", "pool1", activation_func::relu));
topology.add(activation("relu3", "relu2", activation_func::relu));
topology.add(activation("relu4", "relu3", activation_func::relu));
@ -133,7 +133,7 @@ TEST(memory_pool, multi_outputs_network) {
auto x_size = 4;
auto y_size = 4;
auto input = engine->allocate_memory({ data_types::f32, format::bfyx,{ tensor(spatial(x_size, y_size), feature(feature_num), batch(batch_num)) } });
auto input = engine->allocate_memory({ data_types::f32, format::bfyx, { tensor(spatial(x_size, y_size), feature(feature_num), batch(batch_num)) } });
topology topology;
topology.add(input_layout("input", input->get_layout()));
@ -153,7 +153,7 @@ TEST(memory_pool, multi_outputs_network) {
network.set_input_data("input", input);
auto outputs = network.execute();
EXPECT_EQ(engine->get_max_used_device_memory(), (uint64_t)1536);
EXPECT_EQ(engine->get_max_used_device_memory(), (uint64_t) 1536);
}
TEST(memory_pool, oooq) {
@ -171,14 +171,14 @@ TEST(memory_pool, oooq) {
auto x_size = 4;
auto y_size = 4;
auto input = engine->allocate_memory({ data_types::f32, format::bfyx,{ tensor(spatial(x_size, y_size), feature(feature_num), batch(batch_num)) } });
auto input = engine->allocate_memory({ data_types::f32, format::bfyx, { tensor(spatial(x_size, y_size), feature(feature_num), batch(batch_num)) } });
topology topology;
topology.add(input_layout("input", input->get_layout()));
topology.add(activation("relu1", "input", activation_func::relu));
topology.add(activation("relu2", "input", activation_func::relu));
topology.add(activation("relu3", "input", activation_func::relu));
topology.add(concatenation("concat1", { "relu1", "relu2"},concatenation::along_f));
topology.add(concatenation("concat1", { "relu1", "relu2" },concatenation::along_f));
topology.add(activation("relu4", "concat1", activation_func::relu));
topology.add(activation("relu5", "relu3", activation_func::relu));
topology.add(concatenation("concat2", { "relu4", "relu5" }, concatenation::along_f));
@ -209,7 +209,7 @@ TEST(memory_pool, DISABLED_shared_mem_pool_same_topology_twice) {
auto inp_x_size = 4;
auto inp_y_size = 4;
auto input = engine->allocate_memory({ data_types::f32, format::bfyx,{ tensor(spatial(inp_x_size, inp_y_size), feature(feature_num), batch(batch_num)) } });
auto input = engine->allocate_memory({ data_types::f32, format::bfyx, { tensor(spatial(inp_x_size, inp_y_size), feature(feature_num), batch(batch_num)) } });
set_values(input,
{ 1.0f, 2.5f, 3.0f, 4.0f, 5.0f, 2.0f, 2.0f, 3.0f, 6.1f, 4.7f, 1.0f, 1.0f, 8.2f, 1.0f, 2.0f, 1.0f,
@ -227,7 +227,7 @@ TEST(memory_pool, DISABLED_shared_mem_pool_same_topology_twice) {
topology.add(activation("relu4", "concat1", activation_func::relu));
topology.add(activation("relu5", "relu3", activation_func::relu));
topology.add(concatenation("concat2", { "relu4", "relu5" }, concatenation::along_f));
topology.add(activation("relu6", "concat2", activation_func::linear, {1.0f, 0.5f}));
topology.add(activation("relu6", "concat2", activation_func::linear, { 1.0f, 0.5f }));
build_options bo;
bo.set_option(build_option::optimize_data(true));
@ -286,8 +286,8 @@ TEST(memory_pool, DISABLED_shared_mem_pool_same_topology_twice_weights) {
auto inp_x_size = 4;
auto inp_y_size = 4;
auto input= engine->allocate_memory({ data_types::f32, format::bfyx,{ tensor(spatial(inp_x_size, inp_y_size), feature(feature_num), batch(batch_num)) } });
auto weights = engine->allocate_memory({ data_types::f32,format::bfyx,{ 1, 1, 3, 2 } });
auto input= engine->allocate_memory({ data_types::f32, format::bfyx, { tensor(spatial(inp_x_size, inp_y_size), feature(feature_num), batch(batch_num)) } });
auto weights = engine->allocate_memory({ data_types::f32, format::bfyx, { 1, 1, 3, 2 } });
std::vector<float> dummy_input_data_1 = {
/*f0 xy*/ 0.8f, 0.65f, 0.1f, 1.0f, 1.0f, 0.5f, 0.11f, 0.33f, 0.66f, 0.11f, 0.22f, 0.33f, 0.99f, 0.8f, 0.7f, 0.5f,
@ -373,10 +373,10 @@ TEST(memory_pool, shared_mem_pool_diff_batches) {
layout lay_batch_8 = { dt, fmt, { tensor(spatial(inp_x_size, inp_y_size), feature(feature_num), batch(batch_8)) }};
auto input_1 = engine->allocate_memory(lay_batch_1);
auto input_8 = engine->allocate_memory(lay_batch_8);
auto weights = engine->allocate_memory({ dt, fmt, { 1, 1, 3, 2 } });
auto weights = engine->allocate_memory({ dt, fmt, { 1, 3, 3, 2 } });
std::vector<float> dummy_input_data_1 = generate_random_1d<float>(batch_1*feature_num*inp_x_size*inp_y_size, 0, 1);
std::vector<float> dummy_input_data_8 = generate_random_1d<float>(batch_8*feature_num*inp_x_size*inp_y_size, 0, 1);
std::vector<float> dummy_input_data_1 = generate_random_1d<float>(batch_1 * feature_num * inp_x_size * inp_y_size, 0, 1);
std::vector<float> dummy_input_data_8 = generate_random_1d<float>(batch_8 * feature_num * inp_x_size * inp_y_size, 0, 1);
set_values(input_1, dummy_input_data_1);
set_values(input_8, dummy_input_data_8);
@ -396,14 +396,14 @@ TEST(memory_pool, shared_mem_pool_diff_batches) {
auto outputs = network_first.execute();
auto dev_info = engine->get_device_info();
EXPECT_EQ(engine->get_max_used_device_memory(), (uint64_t)3928);
EXPECT_EQ(engine->get_max_used_device_memory(), (uint64_t) 4744);
topo.change_input_layout("input", input_1->get_layout());//change input layout to batch=1
network network_second(*engine, topo, bo);
network_second.set_input_data("input", input_1);
auto outputs_second = network_second.execute();
EXPECT_EQ(engine->get_max_used_device_memory(), (uint64_t)4328);
EXPECT_EQ(engine->get_max_used_device_memory(), (uint64_t) 5912);
}
TEST(memory_pool, shared_dep_two_output) {
@ -459,20 +459,20 @@ TEST(memory_pool, non_opt_intermidate_opt_after) {
auto input_memory1 = engine.allocate_memory(input_layout1);
auto input_memory2 = engine.allocate_memory(input_layout2);
auto scale_memory = engine.allocate_memory(layout(cldnn::data_types::f32, cldnn::format::bfyx, { 1,1,1,1 }));
auto scale_memory = engine.allocate_memory(layout(cldnn::data_types::f32, cldnn::format::bfyx, { 1, 1, 1, 1 }));
auto data_memory = cldnn::data("scale_mem", scale_memory);
set_values(input_memory1, { 1.0f, 2.0f, 3.0f, 4.0f });
set_values(input_memory2, { 5.0f, 6.0f, 7.0f, 8.0f });
set_values(scale_memory, { 1.0f});
set_values(scale_memory, { 1.0f });
auto reshape_tensor = cldnn::tensor(8, 1, 1, 1);
auto input = cldnn::input_layout("input1", input_layout1);
auto input2 = cldnn::input_layout("input2", input_layout2);
auto concat = cldnn::concatenation("concat", { "input1", "input2" }, cldnn::concatenation::along_b);
auto reshape = cldnn::reshape("reshape", "concat", reshape_tensor);
auto crop1 = cldnn::crop("crop1", "reshape", { 1,1,1,1 }, { 0, 0, 0, 0 });
auto crop2 = cldnn::crop("crop2", "reshape", { 1,1,1,1 }, { 1, 0, 0, 0 });
auto crop1 = cldnn::crop("crop1", "reshape", { 1, 1, 1, 1 }, { 0, 0, 0, 0 });
auto crop2 = cldnn::crop("crop2", "reshape", { 1, 1, 1, 1 }, { 1, 0, 0, 0 });
auto eltwise1 = cldnn::scale("elt1", "crop1", "scale_mem");
auto eltwise2 = cldnn::scale("elt2", "crop2", "scale_mem");
@ -508,7 +508,7 @@ TEST(memory_pool, add_mem_dep_test) {
auto input_layout1 = layout(cldnn::data_types::f32, cldnn::format::bfyx, { 1, 2, 2, 2 });
auto input_memory1 = engine.allocate_memory(input_layout1);
auto scale_memory = engine.allocate_memory(layout(cldnn::data_types::f32, cldnn::format::bfyx, { 1,1,1,1 }));
auto scale_memory = engine.allocate_memory(layout(cldnn::data_types::f32, cldnn::format::bfyx, { 1, 1, 1, 1 }));
auto data_memory = cldnn::data("scale_mem", scale_memory);
set_values(input_memory1, { 1.0f, 2.0f, 3.0f, 4.0f,
@ -518,8 +518,8 @@ TEST(memory_pool, add_mem_dep_test) {
auto input = cldnn::input_layout("input1", input_layout1);
auto actv1 = cldnn::activation("input_activ1", "input1", activation_func::abs);
auto actv2 = cldnn::activation("input_activ2", "input1", activation_func::abs);
auto crop1 = cldnn::crop("crop1", "input_activ1", { 1,1,2,2 }, { 0, 0, 0, 0 });
auto crop2 = cldnn::crop("crop2", "input_activ2", { 1,1,2,2 }, { 0, 1, 0, 0 });
auto crop1 = cldnn::crop("crop1", "input_activ1", { 1, 1, 2, 2 }, { 0, 0, 0, 0 });
auto crop2 = cldnn::crop("crop2", "input_activ2", { 1, 1, 2, 2 }, { 0, 1, 0, 0 });
auto eltwise1 = cldnn::scale("elt1", "crop1", "scale_mem");
auto eltwise2 = cldnn::scale("elt2", "crop2", "scale_mem");
auto actv3 = cldnn::activation("out3", "elt1", activation_func::abs);

View File

@ -9,7 +9,7 @@ file(GLOB SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp ${CMAKE_CURRENT_SOURCE_DIR}/
file(GLOB HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/*.h)
# create library
add_library(${TARGET_NAME} SHARED EXCLUDE_FROM_ALL ${HEADERS} ${SOURCES})
add_library(${TARGET_NAME} SHARED ${HEADERS} ${SOURCES})
# Find OpenCV components if exist
find_package(OpenCV COMPONENTS core imgproc imgcodecs QUIET)

View File

@ -1,31 +1,72 @@
#include "bmp_reader.h"
#include <memory.h>
#include <stdio.h>
#include <stdlib.h>
int readBmpImage(const char* fileName, BitMap* image) {
FILE* input = fopen(fileName, "rb");
size_t cnt;
int status = 0;
FILE* input = 0;
if (input == NULL) {
printf("[BMP] file %s is not opened\n", fileName);
return 1;
if (NULL == fileName || NULL == image) {
printf("[BMP] bad arguments\n");
status = -1;
goto Exit;
}
fread(&image->header.type, 2, 1, input);
memset(image, 0, sizeof(BitMap));
input = fopen(fileName, "rb");
if (input == NULL) {
printf("[BMP] file %s is not opened\n", fileName);
status = 1;
goto Exit;
}
cnt = fread(&image->header.type, sizeof(image->header.type), sizeof(unsigned char), input);
if (cnt != sizeof(image->header.type)) {
printf("[BMP] file read error\n");
status = 2;
goto Exit;
}
if (image->header.type != 'M' * 256 + 'B') {
printf("[BMP] file is not bmp type\n");
return 2;
status = 2;
goto Exit;
}
fread(&image->header.size, 4, 1, input);
fread(&image->header.reserved, 4, 1, input);
fread(&image->header.offset, 4, 1, input);
cnt = fread(&image->header.size, sizeof(image->header.size), sizeof(unsigned char), input);
if (cnt != sizeof(image->header.size)) {
printf("[BMP] file read error\n");
status = 2;
goto Exit;
}
fread(&image->infoHeader, sizeof(BmpInfoHeader), 1, input);
cnt = fread(&image->header.reserved, sizeof(image->header.reserved), sizeof(unsigned char), input);
if (cnt != sizeof(image->header.reserved)) {
printf("[BMP] file read error\n");
status = 2;
goto Exit;
}
cnt = fread(&image->header.offset, sizeof(image->header.offset), sizeof(unsigned char), input);
if (cnt != sizeof(image->header.offset)) {
printf("[BMP] file read error\n");
status = 2;
goto Exit;
}
cnt = fread(&image->infoHeader, sizeof(BmpInfoHeader), sizeof(unsigned char), input);
if (cnt != sizeof(image->header.offset)) {
printf("[BMP] file read error\n");
status = 2;
goto Exit;
}
image->width = image->infoHeader.width;
image->height = image->infoHeader.height;
image->height = abs(image->infoHeader.height);
if (image->infoHeader.bits != 24) {
printf("[BMP] 24bpp only supported. But input has: %d\n", image->infoHeader.bits);
@ -38,21 +79,49 @@ int readBmpImage(const char* fileName, BitMap* image) {
}
int padSize = image->width & 3;
size_t row_size = (size_t)image->width * 3;
char pad[3];
size_t size = image->width * image->height * 3;
size_t size = row_size * image->height;
image->data = malloc(sizeof(char) * size);
if (NULL == image->data) {
printf("[BMP] memory allocation failed\n");
return 5;
}
fseek(input, image->header.offset, 0);
if (0 != fseek(input, image->header.offset, SEEK_SET)) {
printf("[BMP] file seek error\n");
status = 2;
goto Exit;
}
// reading by rows in invert vertically
int i;
for (i = 0; i < image->height; i++) {
unsigned int storeAt = image->infoHeader.height < 0 ? i : (unsigned int)image->height - 1 - i;
fread(image->data + image->width * 3 * storeAt, image->width * 3, 1, input);
fread(pad, padSize, 1, input);
cnt = fread(image->data + row_size * storeAt, row_size, sizeof(unsigned char), input);
if (cnt != row_size) {
printf("[BMP] file read error\n");
status = 2;
goto Exit;
}
cnt = fread(pad, padSize, sizeof(unsigned char), input);
if (cnt != padSize) {
printf("[BMP] file read error\n");
status = 2;
goto Exit;
}
}
fclose(input);
return 0;
Exit:
if (0 != status && NULL != image && NULL != image->data) {
free(image->data);
}
if (NULL != input) {
fclose(input);
}
return status;
}

View File

@ -13,7 +13,7 @@ source_group("src" FILES ${LIBRARY_SRC})
source_group("include" FILES ${LIBRARY_HEADERS})
# Create library file from sources.
add_library(${TARGET_NAME} SHARED EXCLUDE_FROM_ALL ${MAIN_SRC} ${LIBRARY_HEADERS})
add_library(${TARGET_NAME} SHARED ${MAIN_SRC} ${LIBRARY_HEADERS})
# Find OpenCV components if exist
find_package(OpenCV COMPONENTS core imgproc imgcodecs QUIET)

View File

@ -26,6 +26,32 @@ def create_onnx_model():
return make_model(graph, producer_name="ngraph ONNX Importer")
def create_onnx_model_with_subgraphs():
A = onnx.helper.make_tensor_value_info("A", onnx.TensorProto.FLOAT, [3])
B = onnx.helper.make_tensor_value_info("B", onnx.TensorProto.FLOAT, [3])
add_out = onnx.helper.make_tensor_value_info("add_out", onnx.TensorProto.FLOAT, [3])
sub_out = onnx.helper.make_tensor_value_info("sub_out", onnx.TensorProto.FLOAT, [3])
add = onnx.helper.make_node("Add", inputs=["A", "B"], outputs=["add_out"])
sub = onnx.helper.make_node("Sub", inputs=["A", "B"], outputs=["sub_out"])
then_body = make_graph([add], "then_body", [], [add_out])
else_body = make_graph([sub], "else_body", [], [sub_out])
if_node = onnx.helper.make_node(
"If",
inputs=["cond"],
outputs=["res"],
then_branch=then_body,
else_branch=else_body
)
cond = onnx.helper.make_tensor_value_info("cond", onnx.TensorProto.BOOL, [])
res = onnx.helper.make_tensor_value_info("res", onnx.TensorProto.FLOAT, [3])
graph = make_graph([if_node], "graph", [cond, A, B], [res])
return make_model(graph, producer_name="ngraph ONNX Importer")
def run_function(function, *inputs, expected):
runtime = get_runtime()
computation = runtime.computation(function)
@ -37,15 +63,18 @@ def run_function(function, *inputs, expected):
fem = FrontEndManager()
onnx_model_filename = "model.onnx"
onnx_model_with_subgraphs_filename = "model_subgraphs.onnx"
ONNX_FRONTEND_NAME = "onnx"
def setup_module():
onnx.save_model(create_onnx_model(), onnx_model_filename)
onnx.save_model(create_onnx_model_with_subgraphs(), onnx_model_with_subgraphs_filename)
def teardown_module():
os.remove(onnx_model_filename)
os.remove(onnx_model_with_subgraphs_filename)
def skip_if_onnx_frontend_is_disabled():
@ -72,17 +101,29 @@ def test_convert():
run_function(function, a, b, expected=[expected])
def test_decode_and_convert():
@pytest.mark.parametrize("model_filename, inputs, expected", [
[onnx_model_filename,
[np.array([[1, 2], [3, 4]], dtype=np.float32),
np.array([[2, 3], [4, 5]], dtype=np.float32)],
np.array([[1.5, 5], [10.5, 18]], dtype=np.float32)],
[onnx_model_with_subgraphs_filename,
[np.array(False, dtype=bool),
np.array([1, 2, 3], dtype=np.float32),
np.array([2, 3, 5], dtype=np.float32)],
np.array([-1, -1, -2], dtype=np.float32)],
])
def test_decode_and_convert(model_filename, inputs, expected):
skip_if_onnx_frontend_is_disabled()
fe = fem.load_by_framework(framework=ONNX_FRONTEND_NAME)
assert fe
model = fe.load(onnx_model_filename)
model = fe.load(model_filename)
assert model
decoded_function = fe.decode(model)
assert decoded_function
for op in decoded_function.get_ordered_ops():
assert op.get_type_name() in ["Parameter", "Constant", "ONNXFrameworkNode",
"ONNXSubgraphFrameworkNode", "Result"]
@ -92,10 +133,7 @@ def test_decode_and_convert():
for op in decoded_function.get_ordered_ops():
assert op.get_type_name() not in ["ONNXFrameworkNode", "ONNXSubgraphFrameworkNode"]
a = np.array([[1, 2], [3, 4]], dtype=np.float32)
b = np.array([[2, 3], [4, 5]], dtype=np.float32)
expected = np.array([[1.5, 5], [10.5, 18]], dtype=np.float32)
run_function(decoded_function, a, b, expected=[expected])
run_function(decoded_function, *inputs, expected=[expected])
def test_load_by_model():

View File

@ -11,7 +11,7 @@ add_library(${TARGET_NAME} STATIC EXCLUDE_FROM_ALL ${LIBRARY_SRC} ${LIBRARY_HEAD
target_include_directories(${TARGET_NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include)
target_include_directories(${TARGET_NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/../..)
target_link_libraries(${TARGET_NAME} PUBLIC frontend_common interpreter_backend engines_test_util
target_link_libraries(${TARGET_NAME} PUBLIC frontend_common engines_test_util
ngraph cnpy commonTestUtils ngraph_test_util openvino::util)
target_compile_definitions(${TARGET_NAME}

View File

@ -0,0 +1,56 @@
ir_version: 3
producer_name: "nGraph ONNX Importer"
graph {
node {
input: "x"
output: "y"
op_type: "Softmax"
attribute {
name: "axis"
i: 1
type: INT
}
}
name: "test_softmax_axis_1"
input {
name: "x"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 3
}
dim {
dim_value: 4
}
dim {
dim_value: 5
}
}
}
}
}
output {
name: "y"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 3
}
dim {
dim_value: 4
}
dim {
dim_value: 5
}
}
}
}
}
}
opset_import {
version: 11
}

View File

@ -0,0 +1,56 @@
ir_version: 3
producer_name: "nGraph ONNX Importer"
graph {
node {
input: "x"
output: "y"
op_type: "Softmax"
attribute {
name: "axis"
i: -1
type: INT
}
}
name: "test_softmax_axis_0"
input {
name: "x"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 3
}
dim {
dim_value: 4
}
dim {
dim_value: 5
}
}
}
}
}
output {
name: "y"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 3
}
dim {
dim_value: 4
}
dim {
dim_value: 5
}
}
}
}
}
}
opset_import {
version: 11
}

View File

@ -0,0 +1,56 @@
ir_version: 3
producer_name: "nGraph ONNX Importer"
graph {
node {
input: "x"
output: "y"
op_type: "Softmax"
attribute {
name: "axis"
i: -1
type: INT
}
}
name: "test_softmax_axis_0"
input {
name: "x"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 3
}
dim {
dim_value: 4
}
dim {
dim_value: 5
}
}
}
}
}
output {
name: "y"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 3
}
dim {
dim_value: 4
}
dim {
dim_value: 5
}
}
}
}
}
}
opset_import {
version: 13
}

View File

@ -380,7 +380,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_model_initializer_wo_input) {
test_case.run();
}
NGRAPH_TEST(onnx_${BACKEND_NAME}, onnx_expand_function) {
NGRAPH_TEST(${BACKEND_NAME}, onnx_expand_function) {
const auto function = onnx_import::import_onnx_model(
file_util::path_join(SERIALIZED_ZOO, "onnx/quantization/dynamicquantizelinear.onnx"));
@ -392,7 +392,7 @@ NGRAPH_TEST(onnx_${BACKEND_NAME}, onnx_expand_function) {
test_case.run();
}
NGRAPH_TEST(onnx_${BACKEND_NAME}, onnx_expand_function_dependency_to_created_subgraph) {
NGRAPH_TEST(${BACKEND_NAME}, onnx_expand_function_dependency_to_created_subgraph) {
const auto function = onnx_import::import_onnx_model(
file_util::path_join(SERIALIZED_ZOO, "onnx/transformations/greater_or_equal.onnx"));
@ -403,7 +403,7 @@ NGRAPH_TEST(onnx_${BACKEND_NAME}, onnx_expand_function_dependency_to_created_sub
test_case.run();
}
NGRAPH_TEST(onnx_${BACKEND_NAME}, onnx_expand_context_dependent_function) {
NGRAPH_TEST(${BACKEND_NAME}, onnx_expand_context_dependent_function) {
auto function = onnx_import::import_onnx_model(
file_util::path_join(SERIALIZED_ZOO, "onnx/transformations/softmax_crossentropy_consumed.onnx"));
@ -690,19 +690,24 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_model_softmax_1D) {
}
namespace {
// common input for all Softmax 3D test cases (Shape = {3,4,5})
// clang-format off
const std::vector<float> SOFTMAX_INPUT = {
2.75793882, -0.50841322, 0.82013929, -0.62409912, -0.96136118, 0.21004745, 1.38337255,
1.19030397, 2.0940445, -0.03551657, -0.78686039, 1.992782, 0.04300319, -0.29230777,
-0.56797112, -1.26732165, -0.61935399, 0.57670432, 0.92844898, 2.82469233,
2.75793882, -0.50841322, 0.82013929, -0.62409912, -0.96136118,
0.21004745, 1.38337255, 1.19030397, 2.0940445, -0.03551657,
-0.78686039, 1.992782, 0.04300319, -0.29230777, -0.56797112,
-1.26732165, -0.61935399, 0.57670432, 0.92844898, 2.82469233,
0.98721677, -0.05100663, -1.21178917, -0.17530157, 1.40051805, -0.13259761, -1.14313018,
0.2673723, -0.87996154, 1.29053106, 1.55, 0.8396538, 1.20729817, 0.23727845,
-0.89113606, -1.70909842, 0.26460363, -0.70566808, 2.383518, 1.07024615,
0.98721677, -0.05100663, -1.21178917, -0.17530157, 1.40051805,
-0.13259761, -1.14313018, 0.2673723, -0.87996154, 1.29053106,
1.55, 0.8396538, 1.20729817, 0.23727845, -0.89113606,
-1.70909842, 0.26460363, -0.70566808, 2.383518, 1.07024615,
-1.21722605, 0.82919357, 0.55765697, 0.12657686, 0.63432172, 0.75425957, -2.43721014,
-1.24478184, 2.65316853, 1.19509542, -0.95523998, 0.5149006, -0.01151649, 0.68327026,
-0.4589638, -0.46554745, 0.21055324, 0.39266729, 2.05098086, 1.83207919};
-1.21722605, 0.82919357, 0.55765697, 0.12657686, 0.63432172,
0.75425957, -2.43721014, -1.24478184, 2.65316853, 1.19509542,
-0.95523998, 0.5149006, -0.01151649, 0.68327026, -0.4589638,
-0.46554745, 0.21055324, 0.39266729, 2.05098086, 1.83207919};
} // namespace
// clang-format on
NGRAPH_TEST(${BACKEND_NAME}, onnx_model_softmax_axis_0) {
auto function = onnx_import::import_onnx_model(file_util::path_join(SERIALIZED_ZOO, "onnx/softmax_axis_0.onnx"));
@ -710,19 +715,24 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_model_softmax_axis_0) {
auto test_case = test::TestCase(function, s_device);
test_case.add_input<float>(SOFTMAX_INPUT);
// clang-format off
test_case.add_expected_output<float>(
Shape{3, 4, 5},
{0.09683057, 0.00369363, 0.01394559, 0.00329012, 0.00234823, 0.00757665, 0.02449322,
0.02019284, 0.04985249, 0.00592694, 0.00279593, 0.04505148, 0.00641108, 0.00458466,
0.00348007, 0.00172928, 0.00330577, 0.01093237, 0.01554086, 0.10351497,
{0.09683057, 0.00369363, 0.01394559, 0.00329012, 0.00234823,
0.00757665, 0.02449322, 0.02019284, 0.04985249, 0.00592694,
0.00279593, 0.04505148, 0.00641108, 0.00458466, 0.00348007,
0.00172928, 0.00330577, 0.01093237, 0.01554086, 0.10351497,
0.01648154, 0.00583583, 0.00182802, 0.00515374, 0.02491679, 0.00537859, 0.00195794,
0.00802367, 0.00254737, 0.0223216, 0.02893419, 0.0142204, 0.02053893, 0.00778581,
0.00251907, 0.00111174, 0.00800149, 0.0030324, 0.06658917, 0.0179084,
0.01648154, 0.00583583, 0.00182802, 0.00515374, 0.02491679,
0.00537859, 0.00195794, 0.00802367, 0.00254737, 0.0223216,
0.02893419, 0.0142204, 0.02053893, 0.00778581, 0.00251907,
0.00111174, 0.00800149, 0.0030324, 0.06658917, 0.0179084,
0.00181811, 0.01407243, 0.01072611, 0.0069699, 0.01158077, 0.01305647, 0.00053677,
0.0017687, 0.08719896, 0.02028982, 0.00236265, 0.01027717, 0.0060709, 0.01216173,
0.00388087, 0.00385541, 0.00758048, 0.00909469, 0.04775123, 0.03836337});
0.00181811, 0.01407243, 0.01072611, 0.0069699, 0.01158077,
0.01305647, 0.00053677, 0.0017687, 0.08719896, 0.02028982,
0.00236265, 0.01027717, 0.0060709, 0.01216173, 0.00388087,
0.00385541, 0.00758048, 0.00909469, 0.04775123, 0.03836337});
// clang-format on
test_case.run(6);
}
@ -733,35 +743,113 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_model_softmax_axis_1) {
auto test_case = test::TestCase(function, s_device);
test_case.add_input<float>(SOFTMAX_INPUT);
// clang-format off
test_case.add_expected_output<float>(
Shape{3, 4, 5},
{0.22757064, 0.00868076, 0.03277484, 0.00773243, 0.0055188, 0.0178066, 0.05756383,
0.04745709, 0.11716303, 0.01392945, 0.00657097, 0.10587974, 0.01506727, 0.01077484,
0.00817884, 0.00406413, 0.00776921, 0.0256932, 0.03652405, 0.24328028,
{0.22757064, 0.00868076, 0.03277484, 0.00773243, 0.0055188,
0.0178066, 0.05756383, 0.04745709, 0.11716303, 0.01392945,
0.00657097, 0.10587974, 0.01506727, 0.01077484, 0.00817884,
0.00406413, 0.00776921, 0.0256932, 0.03652405, 0.24328028,
0.06217413, 0.02201481, 0.00689594, 0.01944171, 0.09399488, 0.02028993, 0.00738604,
0.03026811, 0.00960958, 0.08420492, 0.10914991, 0.05364435, 0.07748005, 0.02937079,
0.0095028, 0.00419387, 0.03018442, 0.01143929, 0.2511977, 0.06755678,
0.06217413, 0.02201481, 0.00689594, 0.01944171, 0.09399488,
0.02028993, 0.00738604, 0.03026811, 0.00960958, 0.08420492,
0.10914991, 0.05364435, 0.07748005, 0.02937079, 0.0095028,
0.00419387, 0.03018442, 0.01143929, 0.2511977, 0.06755678,
0.00587593, 0.04548053, 0.0346656, 0.02252594, 0.03742775, 0.04219705, 0.00173478,
0.00571623, 0.2818174, 0.06557446, 0.00763582, 0.03321466, 0.01962049, 0.03930537,
0.01254255, 0.01246025, 0.02449929, 0.02939305, 0.15432668, 0.12398617});
0.00587593, 0.04548053, 0.0346656, 0.02252594, 0.03742775,
0.04219705, 0.00173478, 0.00571623, 0.2818174, 0.06557446,
0.00763582, 0.03321466, 0.01962049, 0.03930537, 0.01254255,
0.01246025, 0.02449929, 0.02939305, 0.15432668, 0.12398617});
// clang-format on
test_case.run(4);
}
NGRAPH_TEST(${BACKEND_NAME}, onnx_model_softmax_invalid_axis_1D) {
ASSERT_THROW(
onnx_import::import_onnx_model(file_util::path_join(SERIALIZED_ZOO, "onnx/softmax_invalid_axis_1D.onnx")),
ngraph::ngraph_error)
<< "Softmax model with invalid axis was successfully imported while it should have thrown.";
NGRAPH_TEST(${BACKEND_NAME}, onnx_model_softmax_axis_1_opset11) {
auto function =
onnx_import::import_onnx_model(file_util::path_join(SERIALIZED_ZOO, "onnx/softmax_axis_1_opset11.onnx"));
auto test_case = test::TestCase(function, s_device);
test_case.add_input<float>(SOFTMAX_INPUT);
// clang-format off
test_case.add_expected_output<float>(
Shape{3, 4, 5},
{0.88890495, 0.04825497, 0.27088348, 0.04490523, 0.02037154,
0.06955369, 0.31998834, 0.39223197, 0.68041159, 0.05141776,
0.02566661, 0.5885689, 0.12453075, 0.06257374, 0.03019055,
0.01587475, 0.0431878, 0.21235381, 0.21210944, 0.89802015,
0.31752626, 0.19442629, 0.0546935, 0.06279221, 0.36823282,
0.10362164, 0.06523066, 0.24006419, 0.03103672, 0.32987983,
0.55743381, 0.473766, 0.61451431, 0.09486084, 0.03722801,
0.02141829, 0.26657706, 0.090728, 0.81131024, 0.26465935,
0.08619648, 0.43343993, 0.3877785, 0.04523505, 0.15625437,
0.61900597, 0.01653285, 0.06394322, 0.56592636, 0.27376196,
0.11201305, 0.31654337, 0.21947994, 0.07893034, 0.05236297,
0.18278451, 0.23348385, 0.32879834, 0.30990825, 0.5176207});
// clang-format on
test_case.run(4);
}
NGRAPH_TEST(${BACKEND_NAME}, onnx_model_softmax_invalid_axis_3D) {
ASSERT_THROW(
onnx_import::import_onnx_model(file_util::path_join(SERIALIZED_ZOO, "onnx/softmax_invalid_axis_3D.onnx")),
ngraph::ngraph_error)
<< "Softmax model with invalid axis was successfully imported while it should have thrown.";
NGRAPH_TEST(${BACKEND_NAME}, onnx_model_softmax_axis_negative_1_opset11) {
auto function = onnx_import::import_onnx_model(
file_util::path_join(SERIALIZED_ZOO, "onnx/softmax_axis_negative_1_opset11.onnx"));
auto test_case = test::TestCase(function);
test_case.add_input<float>(SOFTMAX_INPUT);
// clang-format off
test_case.add_expected_output<float>(
Shape{3, 4, 5},
{0.88890495, 0.04825497, 0.27088348, 0.04490523, 0.02037154,
0.06955369, 0.31998834, 0.39223197, 0.68041159, 0.05141776,
0.02566661, 0.5885689, 0.12453075, 0.06257374, 0.03019055,
0.01587475, 0.0431878, 0.21235381, 0.21210944, 0.89802015,
0.31752626, 0.19442629, 0.0546935, 0.06279221, 0.36823282,
0.10362164, 0.06523066, 0.24006419, 0.03103672, 0.32987983,
0.55743381, 0.473766, 0.61451431, 0.09486084, 0.03722801,
0.02141829, 0.26657706, 0.090728, 0.81131024, 0.26465935,
0.08619648, 0.43343993, 0.3877785, 0.04523505, 0.15625437,
0.61900597, 0.01653285, 0.06394322, 0.56592636, 0.27376196,
0.11201305, 0.31654337, 0.21947994, 0.07893034, 0.05236297,
0.18278451, 0.23348385, 0.32879834, 0.30990825, 0.5176207});
// clang-format on
test_case.run(6);
}
NGRAPH_TEST(${BACKEND_NAME}, onnx_model_softmax_axis_negative_1_opset13) {
auto function = onnx_import::import_onnx_model(
file_util::path_join(SERIALIZED_ZOO, "onnx/softmax_axis_negative_1_opset13.onnx"));
auto test_case = test::TestCase(function);
test_case.add_input<float>(SOFTMAX_INPUT);
// clang-format off
test_case.add_expected_output<float>(
Shape{3, 4, 5},
{0.88890495, 0.04825497, 0.27088348, 0.04490523, 0.02037154,
0.06955369, 0.31998834, 0.39223197, 0.68041159, 0.05141776,
0.02566661, 0.5885689, 0.12453075, 0.06257374, 0.03019055,
0.01587475, 0.0431878, 0.21235381, 0.21210944, 0.89802015,
0.31752626, 0.19442629, 0.0546935, 0.06279221, 0.36823282,
0.10362164, 0.06523066, 0.24006419, 0.03103672, 0.32987983,
0.55743381, 0.473766, 0.61451431, 0.09486084, 0.03722801,
0.02141829, 0.26657706, 0.090728, 0.81131024, 0.26465935,
0.08619648, 0.43343993, 0.3877785, 0.04523505, 0.15625437,
0.61900597, 0.01653285, 0.06394322, 0.56592636, 0.27376196,
0.11201305, 0.31654337, 0.21947994, 0.07893034, 0.05236297,
0.18278451, 0.23348385, 0.32879834, 0.30990825, 0.5176207});
// clang-format on
test_case.run(6);
}
NGRAPH_TEST(${BACKEND_NAME}, onnx_model_sub) {

View File

@ -199,9 +199,10 @@ void Graph::decode_to_framework_nodes() {
if (node.has_subgraphs()) {
const auto& subgraphs = node.get_subgraphs();
auto inputs = node.get_ng_inputs();
std::vector<std::shared_ptr<Function>> functions;
for (const auto& kv : subgraphs) {
auto& subgraph = kv.second;
subgraph->decode();
functions.push_back(subgraph->decode());
for (const auto& input : subgraph->get_inputs_from_parent()) {
const auto& name = input.get_node()->get_friendly_name();
if (std::find_if(inputs.begin(), inputs.end(), [&name](const Output<ngraph::Node>& n) -> bool {
@ -211,10 +212,9 @@ void Graph::decode_to_framework_nodes() {
}
}
}
framework_node =
std::make_shared<ngraph::frontend::ONNXSubgraphFrameworkNode>(shared_from_this(), node, inputs);
framework_node = std::make_shared<frontend::ONNXSubgraphFrameworkNode>(node, functions, inputs);
} else {
framework_node = std::make_shared<ngraph::frontend::ONNXFrameworkNode>(shared_from_this(), node);
framework_node = std::make_shared<frontend::ONNXFrameworkNode>(node);
}
OutputVector ng_nodes{framework_node->outputs()};
set_friendly_names(node, ng_nodes);
@ -240,7 +240,10 @@ std::shared_ptr<Function> Graph::create_function() {
std::shared_ptr<Function> Graph::decode() {
decode_to_framework_nodes();
return create_function();
auto function = create_function();
auto& rt_info = function->get_rt_info();
rt_info[ONNX_GRAPH_RT_ATTRIBUTE] = shared_from_this();
return function;
}
bool Graph::is_ng_node_in_cache(const std::string& name) const {
@ -399,7 +402,8 @@ void Subgraph::find_inputs_from_parent() {
for (const auto& out_name : node_proto.output()) {
if (m_cache->contains(out_name)) {
auto node_to_replace_input = m_cache->get_node(out_name).get_node();
if (!dynamic_cast<op::util::MultiSubGraphOp*>(node_to_replace_input))
if (!ov::is_type<op::util::MultiSubGraphOp>(node_to_replace_input) &&
!ov::is_type<frontend::ONNXSubgraphFrameworkNode>(node_to_replace_input))
continue;
auto inputs = node_to_replace_input->input_values();
for (size_t i = 0; i < inputs.size(); i++) {

View File

@ -121,6 +121,8 @@ inline std::ostream& operator<<(std::ostream& outs, const Graph& graph) {
return (outs << "<Graph: " << graph.get_name() << ">");
}
static const char* const ONNX_GRAPH_RT_ATTRIBUTE = "onnx_graph";
} // namespace onnx_import
} // namespace ngraph

View File

@ -21,10 +21,14 @@ namespace frontend {
NGRAPH_RTTI_DEFINITION(ONNXFrameworkNode, "ONNXFrameworkNode", 1);
std::shared_ptr<Node> ONNXFrameworkNode::clone_with_new_inputs(const OutputVector& inputs) const {
return std::make_shared<ONNXFrameworkNode>(m_graph, m_node, inputs);
return std::make_shared<ONNXFrameworkNode>(m_node, inputs);
}
NGRAPH_RTTI_DEFINITION(ONNXSubgraphFrameworkNode, "ONNXSubgraphFrameworkNode", 1);
std::shared_ptr<Node> ONNXSubgraphFrameworkNode::clone_with_new_inputs(const OutputVector& inputs) const {
return std::make_shared<ONNXSubgraphFrameworkNode>(m_node, m_functions, inputs);
}
} // namespace frontend
} // namespace ngraph

View File

@ -38,20 +38,16 @@ class ONNXFrameworkNode : public ov::op::util::FrameworkNode {
public:
NGRAPH_RTTI_DECLARATION;
ONNXFrameworkNode(std::shared_ptr<onnx_import::Graph> graph, const onnx_import::Node& node)
ONNXFrameworkNode(const onnx_import::Node& node)
: ov::op::util::FrameworkNode(node.get_ng_inputs(), node.get_outputs_size()),
m_node(node),
m_graph(graph) {}
m_node(node) {}
ONNXFrameworkNode(std::shared_ptr<onnx_import::Graph> graph,
const onnx_import::Node& node,
const OutputVector& inputs)
ONNXFrameworkNode(const onnx_import::Node& node, const OutputVector& inputs)
: ov::op::util::FrameworkNode(inputs, node.get_outputs_size()),
m_node(node),
m_graph(graph) {}
m_node(node) {}
OutputVector get_ng_nodes() const {
OutputVector ng_nodes{m_graph->make_ng_nodes(m_node)};
OutputVector get_ng_nodes(const std::shared_ptr<onnx_import::Graph>& graph) const {
OutputVector ng_nodes{graph->make_ng_nodes(m_node)};
if (ng_nodes.size() > get_output_size()) {
ng_nodes.resize(get_output_size());
}
@ -71,35 +67,31 @@ public:
protected:
onnx_import::Node m_node;
private:
std::shared_ptr<onnx_import::Graph> m_graph;
};
class ONNXSubgraphFrameworkNode : public ONNXFrameworkNode {
public:
NGRAPH_RTTI_DECLARATION;
ONNXSubgraphFrameworkNode(std::shared_ptr<onnx_import::Graph> graph,
const onnx_import::Node& node,
ONNXSubgraphFrameworkNode(const onnx_import::Node& node,
const std::vector<std::shared_ptr<Function>>& functions,
const OutputVector& inputs)
: ONNXFrameworkNode(graph, node, inputs) {}
: ONNXFrameworkNode(node, inputs),
m_functions(functions) {}
void infer_inputs_from_parent() {
for (auto& subgraph : m_node.get_subgraphs())
subgraph.second->infer_inputs_from_parent();
}
std::vector<std::shared_ptr<Function>> get_subgraph_functions() const {
std::vector<std::shared_ptr<Function>> ret;
for (const auto& kv : m_node.get_subgraphs()) {
auto& subgraph = kv.second;
ret.push_back(std::make_shared<Function>(subgraph->get_ng_outputs(),
subgraph->get_ng_parameters(),
subgraph->get_name()));
}
return ret;
const std::vector<std::shared_ptr<Function>>& get_subgraph_functions() const {
return m_functions;
}
virtual std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& inputs) const override;
private:
std::vector<std::shared_ptr<Function>> m_functions;
};
} // namespace frontend

View File

@ -37,17 +37,8 @@ OutputVector softmax(const Node& node) {
result = default_opset::Constant::create(data.get_element_type(), Shape{}, {1});
break;
}
case 1: {
// checks if the axis belongs to the allowed values set (-1 and 0 for 1D)
ngraph::normalize_axis(node.get_description(), axis, data.get_partial_shape().rank());
result = std::make_shared<default_opset::Softmax>(data, 0);
break;
}
default: {
const auto normalized_axis =
ngraph::normalize_axis(node.get_description(), axis, data.get_partial_shape().rank());
result = onnx_softmax(data, normalized_axis);
result = onnx_softmax(data, axis);
break;
}
}
@ -69,17 +60,8 @@ OutputVector softmax(const Node& node) {
result = default_opset::Constant::create(data.get_element_type(), Shape{}, {1});
break;
}
case 1: {
// checks if the axis belongs to the allowed values set (-1 and 0 for 1D)
ngraph::normalize_axis(node.get_description(), axis, data.get_partial_shape().rank());
result = std::make_shared<default_opset::Softmax>(data, 0);
break;
}
default: {
const auto normalized_axis =
ngraph::normalize_axis(node.get_description(), axis, data.get_partial_shape().rank());
result = std::make_shared<default_opset::Softmax>(data, normalized_axis);
result = std::make_shared<ov::op::v8::Softmax>(data, axis);
break;
}
}
@ -92,9 +74,8 @@ OutputVector softmax(const Node& node) {
const auto data = node.get_ng_inputs().at(0);
const auto axis = node.get_attribute_value<int64_t>("axis", -1);
const auto normalized_axis = ngraph::normalize_axis(node.get_description(), axis, data.get_partial_shape().rank());
return {std::make_shared<default_opset::Softmax>(data, normalized_axis)};
return {std::make_shared<ov::op::v8::Softmax>(data, axis)};
}
} // namespace set_13
} // namespace op

View File

@ -60,6 +60,12 @@ void apply_transformations(ONNX_NAMESPACE::ModelProto& model_proto, const std::s
} // namespace
void convert_decoded_function(std::shared_ptr<Function> function) {
auto& rt_info = function->get_rt_info();
auto it = rt_info.find(ONNX_GRAPH_RT_ATTRIBUTE);
OPENVINO_ASSERT(it != rt_info.end(),
"Could not find '" + std::string(ONNX_GRAPH_RT_ATTRIBUTE) +
"' attribute in decoded model. Model probably wasn't created by FrontEnd::decode function.");
auto onnx_graph = it->second.as<std::shared_ptr<onnx_import::Graph>>();
for (const auto& node : function->get_ordered_ops()) {
if (auto raw_node = std::dynamic_pointer_cast<frontend::ONNXFrameworkNode>(node)) {
if (auto subgraph_node = std::dynamic_pointer_cast<frontend::ONNXSubgraphFrameworkNode>(node)) {
@ -68,7 +74,7 @@ void convert_decoded_function(std::shared_ptr<Function> function) {
convert_decoded_function(function);
}
}
auto ng_nodes = raw_node->get_ng_nodes();
auto ng_nodes = raw_node->get_ng_nodes(onnx_graph);
replace_node(raw_node, ng_nodes);
} else {
// Have to revalidate node because new intpus can affect shape/type
@ -76,6 +82,7 @@ void convert_decoded_function(std::shared_ptr<Function> function) {
node->revalidate_and_infer_types();
}
}
rt_info.erase(it);
detail::remove_dangling_parameters(function);
detail::remove_dangling_results(function);
}

View File

@ -137,11 +137,12 @@ static void CreateConvolutionBackpropDataOp(Program& p, const std::shared_ptr<ng
auto weightsName = inputs[1];
auto weights_node = op->get_input_node_shared_ptr(1);
// WA: For the cases like Const(weights)->Sub(zp)->Deconv.
bool hasConstantWeights = IsNodeOnConstPath(weights_node);
// WA: For the cases like Const(weights)->Sub(zp)->Deconv. And also for the cases with real runtime weights.
// Dimensions order of weights blob is IOYX, but
// the selected format is OIYX by default. So we need to swap (and transpose) I and O dimensions to match the format
// For Constant node on input transpose is not needed, because the data is transposed on const node creation
if (IsNodeOnConstPath(weights_node) && std::dynamic_pointer_cast<ngraph::op::v0::Constant>(weights_node) == nullptr) {
if ((hasConstantWeights && std::dynamic_pointer_cast<ngraph::op::v0::Constant>(weights_node) == nullptr) || !hasConstantWeights) {
std::string permuteName = layerName + "_cldnn_weights_permute";
auto weights_rank = op->get_input_shape(1).size();
std::vector<uint16_t> permute_order(weights_rank);
@ -195,11 +196,12 @@ static void CreateGroupConvolutionBackpropDataOp(Program& p, const std::shared_p
auto weightsName = inputs[1];
auto weights_node = op->get_input_node_shared_ptr(1);
// WA: For the cases like Const(weights)->Sub(zp)->Deconv.
bool hasConstWeights = IsNodeOnConstPath(weights_node);
// WA: For the cases like Const(weights)->Sub(zp)->Deconv. And also for the cases with real runtime weights.
// Dimensions order of weights blob is IOYX, but
// the selected format is OIYX by default. So we need to swap I and O dimensions to match the format.
// For Constant node on input transpose is not needed, because the data is transposed on const node creation
if (IsNodeOnConstPath(weights_node) && std::dynamic_pointer_cast<ngraph::op::v0::Constant>(weights_node) == nullptr) {
if ((hasConstWeights && std::dynamic_pointer_cast<ngraph::op::v0::Constant>(weights_node) == nullptr) || !hasConstWeights) {
std::string permuteName = layerName + "_cldnn_weights_permute";
auto weights_rank = op->get_input_shape(1).size();
std::vector<uint16_t> permute_order(weights_rank);

View File

@ -346,7 +346,7 @@ bool IsNodeOnConstPath(const std::shared_ptr<ngraph::Node>& node) {
std::function<bool(const std::shared_ptr<ngraph::Node>&)> is_const_node = [&nodes_processed, &is_const_node](const std::shared_ptr<ngraph::Node>& node) {
if (nodes_processed.count(node)) return true;
nodes_processed.insert(node);
// If input is constant, then drop if from the processing list
// If input is constant, then drop it from the processing list
if (std::dynamic_pointer_cast<ngraph::op::v0::Constant>(node) != nullptr)
return true;
// If the node doesn't have any parents and it's not a constant, then we deal with dynamic path

View File

@ -0,0 +1,91 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include <memory>
#include <tuple>
#include <vector>
#include <string>
#include <fstream>
#include <ie_core.hpp>
#include <ie_layouts.h>
#include "ngraph_functions/builders.hpp"
#include "base/import_export_base/import_export_base.hpp"
namespace LayerTestDefinitions {
class ImportBatchTest : public FuncTestUtils::ImportNetworkTestBase {
protected:
InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo& info) const override {
return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), 0.2f, -0.1f);
}
void SetUp() override {
InferenceEngine::Precision netPrecision;
std::vector<size_t> inputShape;
std::string _;
std::tie(inputShape, netPrecision, targetDevice, exportConfiguration, importConfiguration, _) = this->GetParam();
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
auto params = ngraph::builder::makeParams(ngPrc, {inputShape});
auto mul_const_1 = ngraph::builder::makeConstant<float>(ngPrc, { inputShape[1], 2048 },
CommonTestUtils::generate_float_numbers(2048 * inputShape[1], -0.1f, 0.1f), false);
auto matmul_1 = std::make_shared<ngraph::op::MatMul>(params[0], mul_const_1);
auto sigmoid_1 = std::make_shared<ngraph::op::Sigmoid>(matmul_1);
auto mul_const_2 = ngraph::builder::makeConstant<float>(ngPrc, { 2048, 3425 },
CommonTestUtils::generate_float_numbers(2048 * 3425, -0.1f, 0.1f), false);
auto matmul_2 = std::make_shared<ngraph::op::MatMul>(sigmoid_1, mul_const_2);
function = std::make_shared<ngraph::Function>(matmul_2, params, "ExportImportNetwork");
}
};
TEST_P(ImportBatchTest, CompareWithRefImpl) {
Run();
};
const std::vector<std::vector<size_t>> inputShapes = {
{1, 440},
{2, 440},
{4, 128}
};
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> exportConfigs = {
{
{"GNA_DEVICE_MODE", "GNA_SW_EXACT"},
{"GNA_SCALE_FACTOR_0", "327.67"}
}
};
const std::vector<std::map<std::string, std::string>> importConfigs = {
{
{"GNA_DEVICE_MODE", "GNA_SW_EXACT"}
}
};
const std::vector<std::string> appHeader = {
""
};
INSTANTIATE_TEST_SUITE_P(smoke_ImportNetworkBatchCase, ImportBatchTest,
::testing::Combine(
::testing::ValuesIn(inputShapes),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_GNA),
::testing::ValuesIn(exportConfigs),
::testing::ValuesIn(importConfigs),
::testing::ValuesIn(appHeader)),
ImportBatchTest::getTestCaseName);
} // namespace LayerTestDefinitions

View File

@ -17,11 +17,12 @@ namespace LayerTestsDefinitions {
class ImportMultiInput : public FuncTestUtils::ImportNetworkTestBase {
protected:
void SetUp() override {
std::vector<size_t> inputShape;
InferenceEngine::Precision netPrecision;
std::tie(netPrecision, targetDevice, exportConfiguration, importConfiguration, applicationHeader) = this->GetParam();
std::tie(inputShape, netPrecision, targetDevice, exportConfiguration, importConfiguration, applicationHeader) = this->GetParam();
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
auto input = ngraph::builder::makeParams(ngPrc, {{1, 10}, {1, 10}});
auto input = ngraph::builder::makeParams(ngPrc, {inputShape, inputShape});
auto mul1 = ngraph::builder::makeEltwise(input[0], input[1], ngraph::helpers::EltwiseTypes::ADD);
auto result = std::make_shared<ngraph::opset7::Result>(mul1);
@ -40,6 +41,10 @@ TEST_P(ImportMultiInputChanged, CompareWithRefImpl) {
TestRun(true);
};
const std::vector<std::vector<size_t>> inputShape = {
{1, 10}
};
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32
};
@ -98,6 +103,7 @@ const std::vector<std::map<std::string, std::string>> importConfigsUnchanged = {
INSTANTIATE_TEST_SUITE_P(smoke_ImportNetworkGNA, ImportMultiInputUnchanged,
::testing::Combine(
::testing::ValuesIn(inputShape),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_GNA),
::testing::ValuesIn(exportConfigs),
@ -107,6 +113,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_ImportNetworkGNA, ImportMultiInputUnchanged,
INSTANTIATE_TEST_SUITE_P(smoke_ImportNetworkGNA, ImportMultiInputChanged,
::testing::Combine(
::testing::ValuesIn(inputShape),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_GNA),
::testing::ValuesIn(exportConfigs),

View File

@ -52,6 +52,10 @@ TEST_P(ImportExportGNAModelChanged, ReshapePermuteConv) {
TestRun(true);
};
const std::vector<std::vector<size_t>> inputShapes = {
{1, 336}
};
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
@ -92,6 +96,7 @@ const std::vector<std::string> appHeaders = {
INSTANTIATE_TEST_SUITE_P(smoke_ImportNetworkGNA, ImportExportGNAModelUnchanged,
::testing::Combine(
::testing::ValuesIn(inputShapes),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_GNA),
::testing::ValuesIn(exportConfigs),
@ -101,6 +106,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_ImportNetworkGNA, ImportExportGNAModelUnchanged,
INSTANTIATE_TEST_SUITE_P(smoke_ImportNetworkGNA, ImportExportGNAModelChanged,
::testing::Combine(
::testing::ValuesIn(inputShapes),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_GNA),
::testing::ValuesIn(exportConfigs),

View File

@ -26,8 +26,11 @@ const std::vector<std::string> appHeaders = {
"APPLICATION_HEADER"
};
std::vector<size_t> inputShape = ngraph::Shape{1000};
INSTANTIATE_TEST_SUITE_P(smoke_ImportNetworkCase, ImportNonZero,
::testing::Combine(
::testing::Values(inputShape),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_MYRIAD),
::testing::ValuesIn(exportConfigs),

View File

@ -9,6 +9,7 @@
#include <ie_core.hpp>
typedef std::tuple<
std::vector<size_t>, // Input Shape
InferenceEngine::Precision, // Network Precision
std::string, // Target Device
std::map<std::string, std::string>, // Export Configuration

View File

@ -9,14 +9,16 @@
namespace FuncTestUtils {
std::string ImportNetworkTestBase::getTestCaseName(testing::TestParamInfo<exportImportNetworkParams> obj) {
std::vector<size_t> inputShape;
InferenceEngine::Precision netPrecision;
std::string targetDevice;
std::map<std::string, std::string> exportConfiguration;
std::map<std::string, std::string> importConfiguration;
std::string appHeader;
std::tie(netPrecision, targetDevice, exportConfiguration, importConfiguration, appHeader) = obj.param;
std::tie(inputShape, netPrecision, targetDevice, exportConfiguration, importConfiguration, appHeader) = obj.param;
std::ostringstream result;
result << "IS=" << CommonTestUtils::vec2str(inputShape) << "_";
result << "netPRC=" << netPrecision.name() << "_";
result << "targetDevice=" << targetDevice << "_";
for (auto const& configItem : exportConfiguration) {

View File

@ -10,10 +10,11 @@ namespace LayerTestsDefinitions {
void ImportNonZero::SetUp() {
InferenceEngine::Precision netPrecision;
std::tie(netPrecision, targetDevice, exportConfiguration, importConfiguration, applicationHeader) = this->GetParam();
ngraph::Shape inputShape;
std::tie(inputShape, netPrecision, targetDevice, exportConfiguration, importConfiguration, applicationHeader) = this->GetParam();
const auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
const auto parameter = std::make_shared<ngraph::opset5::Parameter>(ngPrc, ngraph::Shape{1000});
const auto parameter = std::make_shared<ngraph::opset5::Parameter>(ngPrc, inputShape);
const auto nonZero = std::make_shared<ngraph::opset5::NonZero>(parameter);
function = std::make_shared<ngraph::Function>(nonZero->outputs(), ngraph::ParameterVector{parameter}, "ExportImportNetwork");

View File

@ -9,11 +9,12 @@
namespace LayerTestsDefinitions {
void ImportReshapePermuteConv::SetUp() {
std::vector<size_t> inputShape;
InferenceEngine::Precision netPrecision;
std::tie(netPrecision, targetDevice, exportConfiguration, importConfiguration, applicationHeader) = this->GetParam();
std::tie(inputShape, netPrecision, targetDevice, exportConfiguration, importConfiguration, applicationHeader) = this->GetParam();
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
auto params = ngraph::builder::makeParams(ngPrc, { {1, 336} });
auto params = ngraph::builder::makeParams(ngPrc, { inputShape });
std::vector<size_t> outFormShapes1 = { 1, 1, 168, 2 };
auto pattern1 = std::make_shared<ngraph::opset1::Constant>(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, outFormShapes1);

View File

@ -99,15 +99,15 @@ function filterTable() {
if (implementation != 0) {
if (implementation == 'ni') {
$("#report #data tr:not(:hidden)").filter(function () {
$(this).toggle($(this).find('td').hasClass("not_impl"))
$(this).toggle($(this).find('td').hasClass("value " + device + " not_impl"))
});
} else if (implementation == 'i') {
$("#report #data tr:not(:hidden)").filter(function () {
$(this).toggle($(this).find('td').hasClass("impl"));
$(this).toggle($(this).find('td').hasClass("value " + device + " impl"));
});
} else {
$("#report #data tr:not(:hidden)").filter(function () {
$(this).toggle(!$(this).find('td').hasClass("not_impl") && !$(this).find('td').hasClass("impl"));
$(this).toggle(!$(this).find('td').hasClass("value"));
});
}
}
@ -116,19 +116,19 @@ function filterTable() {
selector = [];
select.forEach(item => {
if (item == '100p') {
selector.push('.value:visible[crashed="0"][failed="0"][skipped="0"]');
selector.push('.value:visible[crashed="0"][failed="0"][skipped="0"][value!="---"]');
}
if (item == '100f') {
selector.push('.value:visible[passed="0"]');
selector.push('.value:visible[passed="0"][value!="---"]');
}
if (item == 'p') {
selector.push('.value:visible[passed!="0"]');
selector.push('.value:visible[passed!="0"][value!="---"]');
}
if (item == 'f') {
selector.push('.value:visible[failed!="0"]');
selector.push('.value:visible[failed!="0"][value!="---"]');
}
if (item == 'c') {
selector.push('.value:visible[crashed!="0"]');
selector.push('.value:visible[crashed!="0"][value!="---"]');
}
if (item == 's') {
selector.push('.value:visible[value!="---"][skipped!="0"]');

View File

@ -21,7 +21,6 @@ addIeTargetTest(
vpu_graph_transformer_test_static
mvnc
ngraph
interpreter_backend
inference_engine_lp_transformations # for ngraphFunctions
ADD_CPPLINT
LABELS

View File

@ -17,6 +17,7 @@ of all possible parameters can be found in the default_quantization_spec.json */
"engine": {
"type": "simplified",
"layout": "NCHW", // Layout of input data. Supported ["NCHW", "NHWC", "CHW", "CWH"] layout
"data_source": "PATH_TO_SOURCE" // You can specify path to directory with images. Also you can
// specify template for file names to filter images to load.
// Templates are unix style (This option valid only in simplified mode)

View File

@ -26,6 +26,7 @@ def create_data_loader(config, model):
if tuple(in_node.shape) != (1, 3):
data_loader = ImageLoader(config)
data_loader.shape = in_node.shape
data_loader.get_layout(in_node)
return data_loader
if data_loader is None:

View File

@ -3,6 +3,7 @@
from cv2 import imread, IMREAD_GRAYSCALE
from openvino.runtime import Layout, Dimension # pylint: disable=E0611,E0401
from ..api.data_loader import DataLoader
from ..data_loaders.utils import prepare_image, collect_img_files
@ -14,6 +15,7 @@ class ImageLoader(DataLoader):
self._img_files = collect_img_files(config.data_source)
self._shape = None
self._layout = config.get('layout', None)
self._crop_central_fraction = config.get('central_fraction', None)
def __getitem__(self, idx):
@ -37,4 +39,29 @@ class ImageLoader(DataLoader):
if image is None:
raise Exception('Can not read the image: {}'.format(img_path))
return prepare_image(image, self.shape[-2:], self._crop_central_fraction)
return prepare_image(image, self._layout, self.shape[-2:], self._crop_central_fraction)
def get_layout(self, input_node):
if self._layout is not None:
if 'C' not in self._layout or 'H' not in self._layout or 'W' not in self._layout:
raise ValueError('Unexpected {} layout'.format(self._layout))
self._layout = Layout(self._layout)
return
layout_from_ir = input_node.graph.graph.get('layout', None)
if layout_from_ir is not None:
self._layout = Layout(layout_from_ir)
return
image_colors_dim = (Dimension(3), Dimension(1))
num_dims = len(self._shape)
if num_dims == 4:
if self._shape[1] in image_colors_dim:
self._layout = Layout("NCHW")
elif self._shape[3] in image_colors_dim:
self._layout = Layout("NHWC")
elif num_dims == 3:
if self._shape[0] in image_colors_dim:
self._layout = Layout("CHW")
elif self._shape[2] in image_colors_dim:
self._layout = Layout("HWC")

View File

@ -9,6 +9,7 @@ from pathlib import Path
import numpy as np
import cv2 as cv
from openvino.runtime import Layout # pylint: disable=E0611,E0401
from openvino.tools.pot.utils.logger import get_logger
logger = get_logger(__name__)
@ -34,12 +35,11 @@ def crop(image, central_fraction):
return image[start_height:start_height + dst_height, start_width:start_width + dst_width]
def prepare_image(image, dst_shape, central_fraction=None):
def prepare_image(image, layout, dst_shape, central_fraction=None):
if central_fraction:
image = crop(image, central_fraction)
if image.shape[-1] in [3, 1]:
if layout == Layout('NCHW') or layout == Layout('CHW'):
image = cv.resize(image, dst_shape[::-1])
return image.transpose(2, 0, 1)

View File

@ -44,3 +44,29 @@ def test_check_image(tmp_path, models, model_name, model_framework):
num_images_in_dir = len(os.listdir(path_image_data))
assert num_images_from_data_loader == num_images_in_dir
TEST_MODELS_LAYOUT = [('mobilenet-v2-pytorch', 'pytorch', 'NCHW', (3, 224, 224)),
('mobilenet-v2-pytorch', 'pytorch', 'NHWC', (224, 224, 3)),
('mobilenet-v2-pytorch', 'pytorch', None, (3, 224, 224)),
('mobilenet-v1-1.0-224-tf', 'tf', None, (224, 224, 3))]
@pytest.mark.parametrize(
'model_name, model_framework, layout, reference_shape', TEST_MODELS,
ids=['{}_{}'.format(m[0], m[1]) for m in TEST_MODELS])
def test_check_layout(tmp_path, models, model_name, model_framework, layout, reference_shape):
test_dir = Path(__file__).parent
path_image_data = os.path.join(test_dir, "data/image_data")
engine_config = Dict({"device": "CPU",
"type": "simplified",
"layout": layout,
"data_source": path_image_data})
model = models.get(model_name, model_framework, tmp_path)
model = load_model(model.model_params)
data_loader = create_data_loader(engine_config, model)
image = data_loader.item()
assert image.shape == reference_shape