Merge remote-tracking branch 'upstream/master' into layer_test_common

This commit is contained in:
Efode, Irina 2021-10-28 11:00:43 +03:00
commit 39ed74825d
332 changed files with 4581 additions and 3168 deletions

View File

@ -46,6 +46,9 @@
# `openvino::frontend::paddlepaddle`
# PaddlePaddle FrontEnd target (optional)
#
# `openvino::frontend::tensorflow`
# TensorFlow FrontEnd target (optional)
#
# Result variables:
# ------
#
@ -63,6 +66,9 @@
# `OpenVINO_Frontend_PaddlePaddle_FOUND`
# OpenVINO PaddlePaddle frontend is available
#
# `OpenVINO_Frontend_TensorFlow_FOUND`
# OpenVINO TensorFlow frontend is available
#
# `OpenVINO_Frontend_IR_FOUND`
# OpenVINO IR frontend is available
#
@ -170,6 +176,7 @@ set(${CMAKE_FIND_PACKAGE_NAME}_Runtime_FOUND ON)
set(${CMAKE_FIND_PACKAGE_NAME}_ONNX_FOUND @NGRAPH_ONNX_FRONTEND_ENABLE@)
set(${CMAKE_FIND_PACKAGE_NAME}_PaddlePaddle_FOUND @NGRAPH_PDPD_FRONTEND_ENABLE@)
set(${CMAKE_FIND_PACKAGE_NAME}_TensorFlow_FOUND @NGRAPH_TF_FRONTEND_ENABLE@)
set(${CMAKE_FIND_PACKAGE_NAME}_IR_FOUND @NGRAPH_IR_FRONTEND_ENABLE@)
set(${CMAKE_FIND_PACKAGE_NAME}_Frontend_ONNX_FOUND ${${CMAKE_FIND_PACKAGE_NAME}_ONNX_FOUND})
set(${CMAKE_FIND_PACKAGE_NAME}_Frontend_PaddlePaddle_FOUND ${${CMAKE_FIND_PACKAGE_NAME}_PaddlePaddle_FOUND})

View File

@ -21,16 +21,14 @@ addIeTargetTest(
TEMPLATE
)
if(ENABLE_TEMPLATE_OPENCV_TESTS)
find_package(OpenCV QUIET COMPONENTS core imgproc)
find_package(OpenCV QUIET COMPONENTS core imgproc)
if(OpenCV_FOUND)
message("-- Reference preprocessing: OpenCV tests are enabled")
target_compile_definitions(${TARGET_NAME} PRIVATE OPENCV_TEMPLATE_TESTS)
target_link_libraries(${TARGET_NAME} PRIVATE opencv_imgproc opencv_core)
else()
message("-- Reference preprocessing: OpenCV tests are disabled")
endif()
if(OpenCV_FOUND)
message("-- Reference preprocessing: OpenCV tests are enabled")
target_compile_definitions(${TARGET_NAME} PRIVATE OPENCV_TEMPLATE_TESTS)
target_link_libraries(${TARGET_NAME} PRIVATE opencv_imgproc opencv_core)
else()
message("-- Reference preprocessing: OpenCV tests are disabled")
endif()
# [cmake:functional_tests]

View File

@ -0,0 +1,181 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include "base_reference_test.hpp"
#include "openvino/op/adaptive_avg_pool.hpp"
#include "openvino/op/constant.hpp"
using namespace ov;
using namespace reference_tests;
namespace {
struct AdaptiveAvgPoolParams {
template <class IT>
AdaptiveAvgPoolParams(const Shape& input_shape,
const Shape& output_shape,
const element::Type& input_type,
const element::Type& ouput_type,
const std::vector<IT>& input_values,
const std::vector<IT>& output_values,
const Shape& adaptive_shape,
const std::vector<int64_t>& adaptive_values)
: m_input_shape(input_shape),
m_output_shape(output_shape),
m_input_type(input_type),
m_output_type(ouput_type),
m_input_data(CreateTensor(input_type, input_values)),
m_expected_data(CreateTensor(ouput_type, output_values)),
m_adaptive_shape(adaptive_shape),
m_adaptive_values(adaptive_values) {}
Shape m_input_shape;
Shape m_output_shape;
element::Type m_input_type;
element::Type m_output_type;
runtime::Tensor m_input_data;
runtime::Tensor m_expected_data;
Shape m_adaptive_shape;
std::vector<int64_t> m_adaptive_values;
};
class ReferenceAdaptiveAvgPoolLayerTest : public testing::TestWithParam<AdaptiveAvgPoolParams>, public CommonReferenceTest {
public:
void SetUp() override {
auto params = GetParam();
function = CreateFunction(params.m_input_shape,
params.m_input_type,
params.m_adaptive_shape,
params.m_adaptive_values);
inputData = {params.m_input_data};
refOutData = {params.m_expected_data};
}
static std::string getTestCaseName(const testing::TestParamInfo<AdaptiveAvgPoolParams>& obj) {
auto params = obj.param;
std::ostringstream result;
result << "shape=" << params.m_input_shape << "_";
result << "iType=" << params.m_input_type << "_";
result << "shape=" << params.m_output_shape << "_";
result << "oType=" << params.m_output_type;
return result.str();
}
private:
static std::shared_ptr<Function> CreateFunction(const Shape& input_shape,
const element::Type& input_type,
const Shape& adaptive_shape,
const std::vector<int64_t> adaptive_values) {
const auto in = std::make_shared<op::v0::Parameter>(input_type, input_shape);
const auto out = op::v0::Constant::create<int64_t>(element::Type_t::i64, adaptive_shape, adaptive_values);
const auto adaptive_avg_pool = std::make_shared<op::v8::AdaptiveAvgPool>(in, out);
return std::make_shared<Function>(NodeVector{adaptive_avg_pool}, ParameterVector{in});
}
};
TEST_P(ReferenceAdaptiveAvgPoolLayerTest, AdaptiveAvgPoolWithHardcodedRefs) {
Exec();
}
template <element::Type_t IN_ET>
std::vector<AdaptiveAvgPoolParams> generateParamsForAdaptiveAvgPool() {
using T = typename element_type_traits<IN_ET>::value_type;
std::vector<AdaptiveAvgPoolParams> params{
AdaptiveAvgPoolParams(ov::Shape{2, 3, 7},
ov::Shape{2, 3, 3},
IN_ET,
IN_ET,
std::vector<T>{0, 4, 1, 3, -2, -5, -2, -2, 1, -3, 1, -3, -4, 0, -2, 1, -1, -2, 3, -1, -3,
-1, -2, 3, 4, -3, -4, 1, 2, 0, -4, -5, -2, -2, -3, 2, 3, 1, -5, 2, -4, -2},
std::vector<T>{1.66666663,
0.66666669,
-3.,
-1.33333337,
-1.66666663,
-2.33333325,
-0.66666669,
0.,
-0.33333334,
0.,
1.33333337,
-2.,
-0.66666669,
-3.66666675,
-2.33333325,
2.,
-0.66666669,
-1.33333337},
ov::Shape{1},
{3}),
AdaptiveAvgPoolParams(
ov::Shape{1, 3, 7, 10},
ov::Shape{1, 3, 3, 3},
IN_ET,
IN_ET,
std::vector<T>{-2, -3, -4, 3, -5, 4, 0, -4, -2, -4, -5, 0, -3, 0, -2, 0, 0, -5, -4, -1, 3, -1, 0, -1,
0, -2, 0, 4, 1, 4, 0, -1, -4, 2, -2, -5, -1, -1, -2, 1, 2, -2, -1, 2, 0, -1, 0, -5,
4, 4, 3, 0, -4, -4, -4, -2, 0, 1, -2, -1, 4, -2, -4, 1, -1, -3, -4, -1, 1, -4,
-2, -4, -5, 0, -4, 3, 4, -5, -4, -2, 0, 2, -4, -3, 3, -1, 1, -4, -5, 4, 2, -5, 2, -3,
0, 4, 3, 3, 1, 2, -1, -4, 1, -3, -3, -2, 3, 4, -2, -5, 1, 4, 4, -2, 2, 1, -5, -2,
-5, 1, 1, -2, -3, -3, -1, -5, 1, -3, -5, -3, -4, -1, 4, -3, 4, -1, 4, 3, 1, 4,
-2, -4, -4, 4, -3, 4, 2, -3, -2, 4, -3, 0, 1, -4, 4, 4, 0, 3, -1, 3, 3, -5, 0, 3,
-3, 1, -2, 4, -5, -5, 1, 0, -1, 0, -3, -2, 0, -3, 3, -2, -2, 0, -3, 4, -1, 2, -2, 2,
-3, -1, -4, -2, 0, 2, 0, 2, 0, -3, 4, 3, -5, -3, -5, 1, -5, -3, -5, 4, -3, 3},
std::vector<T>{-1.08333337, -0.25000000, -0.91666669, -0.08333334, -0.66666669,
0.75000000, -0.41666666, -1.33333337, -0.58333331,
-1.66666663, 0.58333331, -0.16666667, -0.33333334, -0.41666666,
-0.16666667, -0.33333334, -0.66666669, -0.75000000,
-0.91666669, 0.83333331, -0.16666667, 0., -0.25000000,
-1.16666663, -1.41666663, -0.41666666, -0.08333334},
ov::Shape{2},
{3, 3}),
AdaptiveAvgPoolParams(
ov::Shape{2, 2, 3, 3, 3},
ov::Shape{2, 2, 2, 2, 2},
IN_ET,
IN_ET,
std::vector<T>{-5, 1, -3, -4, 4, -4, 3, -3, -1, 0, 0, -2, -4, 2, 0, -4, -5, -2, -4, -4, 0, -2, 3, -3, 4, -1, -4,
-1, -1, -5, 4, -1, -2, -3, 0, 4, -1, -5, -4, 1, 1, 4, -5, -5, -5, 4, -3, -3, -3, 4, 0, -3, -5, 1,
4, 2, 1, -5, -5, 1, 0, -4, -1, 2, -4, -2, 4, 3, 1, -3, -3, -2, -4, -3, -3, 3, -1, 1, 2, 2, -4,
-5, -4, 1, 3, -4, -1, 2, 4, -5, 0, 1, -2, 0, 0, -2, 3, -2, -5, -3, -5, -2, -1, 3, -2, 4, 3, -3},
std::vector<T>{-0.750, -0.250, -1.375, -1.125, -1.125, -0.500, -0.875, -1.250,
-0.375, -1.625, -1., -0.500, -0.250, -0.750, -1.875, -0.625,
0.125, -0.375, -1.625, -1.250, 0., -1., 0.875, -0.375,
-1.125, -1.375, 0.750, -1.875, -0.625, -1.125, 1.250, -1.},
ov::Shape{3},
{2, 2, 2}),
};
return params;
}
std::vector<AdaptiveAvgPoolParams> generateCombinedParamsForAdaptiveAvgPool() {
const std::vector<std::vector<AdaptiveAvgPoolParams>> allTypeParams{
generateParamsForAdaptiveAvgPool<element::Type_t::f32>(),
generateParamsForAdaptiveAvgPool<element::Type_t::f16>(),
generateParamsForAdaptiveAvgPool<element::Type_t::bf16>()
};
std::vector<AdaptiveAvgPoolParams> combinedParams;
for (const auto& params : allTypeParams) {
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
}
return combinedParams;
}
INSTANTIATE_TEST_SUITE_P(
smoke_AdaptiveAvgPool_With_Hardcoded_Refs,
ReferenceAdaptiveAvgPoolLayerTest,
::testing::ValuesIn(generateCombinedParamsForAdaptiveAvgPool()),
ReferenceAdaptiveAvgPoolLayerTest::getTestCaseName);
} // namespace

View File

@ -0,0 +1,217 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include "base_reference_test.hpp"
#include "openvino/op/adaptive_max_pool.hpp"
#include "openvino/op/constant.hpp"
using namespace ov;
using namespace reference_tests;
namespace {
struct AdaptiveMaxPoolParams {
template <class IT>
AdaptiveMaxPoolParams(const Shape& input_shape,
const Shape& output_shape,
const element::Type& input_type,
const element::Type& output_type,
const std::vector<IT>& input_values,
const std::vector<IT>& output_values,
const std::vector<int64_t>& output_indices,
const Shape& adaptive_shape,
const std::vector<int64_t>& adaptive_values)
: m_input_shape(input_shape),
m_output_shape(output_shape),
m_input_type(input_type),
m_output_type(output_type),
m_input_data(CreateTensor(input_type, input_values)),
m_expected_data(CreateTensor(output_type, output_values)),
m_expected_indices(CreateTensor(element::Type_t::i64, output_indices)),
m_adaptive_shape(adaptive_shape),
m_adaptive_values(adaptive_values) {}
Shape m_input_shape;
Shape m_output_shape;
element::Type m_input_type;
element::Type m_output_type;
runtime::Tensor m_input_data;
runtime::Tensor m_expected_data;
runtime::Tensor m_expected_indices;
Shape m_adaptive_shape;
std::vector<int64_t> m_adaptive_values;
};
class ReferenceAdaptiveMaxPoolLayerTest : public testing::TestWithParam<AdaptiveMaxPoolParams>, public CommonReferenceTest {
public:
void SetUp() override {
auto params = GetParam();
function = CreateFunction(params.m_input_shape,
params.m_input_type,
params.m_adaptive_shape,
params.m_adaptive_values);
inputData = {params.m_input_data};
refOutData = {params.m_expected_data, params.m_expected_indices};
}
static std::string getTestCaseName(const testing::TestParamInfo<AdaptiveMaxPoolParams>& obj) {
auto params = obj.param;
std::ostringstream result;
result << "shape=" << params.m_input_shape << "_";
result << "iType=" << params.m_input_type << "_";
result << "shape=" << params.m_output_shape << "_";
result << "oType=" << params.m_output_type;
return result.str();
}
private:
static std::shared_ptr<Function> CreateFunction(const Shape& input_shape,
const element::Type& input_type,
const Shape& adaptive_shape,
const std::vector<int64_t> adaptive_values) {
const auto in = std::make_shared<op::v0::Parameter>(input_type, input_shape);
const auto out = op::v0::Constant::create<int64_t>(element::Type_t::i64, adaptive_shape, adaptive_values);
const auto adaptive_max_pool = std::make_shared<op::v8::AdaptiveMaxPool>(in, out);
return std::make_shared<Function>(adaptive_max_pool->outputs(), ParameterVector{in});
}
};
TEST_P(ReferenceAdaptiveMaxPoolLayerTest, AdaptiveMaxPoolWithHardcodedRefs) {
Exec();
}
template <element::Type_t IN_ET>
std::vector<AdaptiveMaxPoolParams> generateParamsForAdaptiveMaxPoolWithExpectedResult() {
using T = typename element_type_traits<IN_ET>::value_type;
std::vector<AdaptiveMaxPoolParams> params{
AdaptiveMaxPoolParams(
ov::Shape{2, 3, 7},
ov::Shape{2, 3, 3},
IN_ET,
IN_ET,
std::vector<T>{0, 4, 1, 3, -2, -5, -2, -2, 1, -3, 1, -3, -4, 0, -2, 1, -1, -2, 3, -1, -3,
-1, -2, 3, 4, -3, -4, 1, 2, 0, -4, -5, -2, -2, -3, 2, 3, 1, -5, 2, -4, -2},
std::vector<T>{4,
3,
-2,
1,
1,
0,
1,
3,
3,
3,
4,
1,
2,
-2,
-2,
3,
2,
2},
std::vector<int64_t>{1,
3,
4,
1,
3,
6,
1,
4,
4,
2,
3,
6,
0,
4,
4,
1,
4,
4},
ov::Shape{1},
std::vector<int64_t>{3}),
AdaptiveMaxPoolParams(
ov::Shape{1, 3, 7, 10},
ov::Shape{1, 3, 3, 3},
IN_ET,
IN_ET,
std::vector<T>{0, -2, -5, -5, 2, 3, 2, -3, 1, -2, -4, -1, -1, -1, 2, -4, 3, -5, -1, -1, 1, 2, 4, -2,
-3, -2, 0, -5, 2, -4, -1, -4, 4, 2, 1, -2, 2, -3, 0, 1, -3, 3, -1, 4, 0, 2, 0, 3,
4, -4, 1, 4, -1, -5, -2, 4, -3, 3, 2, 1, 0, 4, 2, -5, 2, -5, -2, -1, 4, 2,
0, 4, -2, 0, -5, -3, 4, -4, -2, -2, 2, 1, 4, 3, 2, -5, -4, -4, 0, 1, 4, -4, -3, 3,
3, 4, -2, -3, -4, -2, 0, 1, -1, 3, -2, 2, 0, -3, -1, -1, 0, 0, 2, 2, -2, 1, -3, 1,
2, 4, 3, -5, -4, 1, -4, 2, 0, -2, -5, 2, -3, -2, -3, -4, 2, -2, -4, 2, -4, -3,
1, -5, -1, -5, 2, 1, 3, 4, 3, 0, -5, 4, -3, -4, -1, 2, -4, 2, 0, -5, -3, 0, 2, -3,
-5, 3, -2, -1, -5, -4, -5, 0, -5, -1, -3, 3, 3, -4, -3, -4, -5, 4, -1, 1, -1, -4, 1, -3,
-4, -1, -2, -3, -5, 2, 2, -5, 1, 1, -5, -4, 0, 2, 4, 2, 0, 2, 4, 0, -5, 2},
std::vector<T>{4, 3, 3, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 3, 2, 4,
4, 3, 4, 4, 3, 3, 4, 4, 4},
std::vector<int64_t>{22, 5, 16, 22, 43, 48, 43, 43, 48,
1, 6, 6, 20, 25, 49, 50, 43, 49,
11, 6, 7, 41, 25, 36, 41, 66, 66},
ov::Shape{2},
std::vector<int64_t>{3, 3}),
AdaptiveMaxPoolParams(
ov::Shape{2, 2, 3, 3, 3},
ov::Shape{2, 2, 2, 2, 2},
IN_ET,
IN_ET,
std::vector<T>{-5, 1, -3, -4, 4, -4, 3, -3, -1, 0, 0, -2, -4, 2,
0, -4, -5, -2, -4, -4, 0, -2, 3, -3, 4, -1, -4,
-1, -1, -5, 4, -1, -2, -3, 0, 4, -1, -5, -4, 1, 1,
4, -5, -5, -5, 4, -3, -3, -3, 4, 0, -3, -5, 1,
4, 2, 1, -5, -5, 1, 0, -4, -1, 2, -4, -2, 4, 3,
1, -3, -3, -2, -4, -3, -3, 3, -1, 1, 2, 2, -4,
-5, -4, 1, 3, -4, -1, 2, 4, -5, 0, 1, -2, 0, 0,
-2, 3, -2, -5, -3, -5, -2, -1, 3, -2, 4, 3, -3},
std::vector<T>{4, 4, 4, 4, 3, 3, 4, 3,
4, 4, 4, 4, 4, 4, 4, 4,
4, 3, 4, 3, 4, 3, 4, 3,
3, 1, 4, 4, 3, 3, 4, 3},
std::vector<int64_t>{4, 4, 4, 4, 22, 22, 24, 22,
3, 14, 3, 8, 18, 14, 22, 14,
0, 13, 12, 13, 12, 13, 12, 13,
3, 2, 7, 7, 22, 22, 24, 22},
ov::Shape{3},
std::vector<int64_t>{2, 2, 2})
};
return params;
}
std::vector<AdaptiveMaxPoolParams> generateCombinedParamsForAdaptiveMaxPool() {
const std::vector<std::vector<AdaptiveMaxPoolParams>> allTypeParams{
generateParamsForAdaptiveMaxPoolWithExpectedResult<element::Type_t::f32>(),
generateParamsForAdaptiveMaxPoolWithExpectedResult<element::Type_t::f16>(),
generateParamsForAdaptiveMaxPoolWithExpectedResult<element::Type_t::bf16>(),
generateParamsForAdaptiveMaxPoolWithExpectedResult<element::Type_t::i64>(),
generateParamsForAdaptiveMaxPoolWithExpectedResult<element::Type_t::i32>(),
generateParamsForAdaptiveMaxPoolWithExpectedResult<element::Type_t::i16>(),
generateParamsForAdaptiveMaxPoolWithExpectedResult<element::Type_t::i8>(),
};
std::vector<AdaptiveMaxPoolParams> combinedParams;
for (const auto& params : allTypeParams) {
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
}
return combinedParams;
}
INSTANTIATE_TEST_SUITE_P(
smoke_AdaptiveMaxPool_With_Hardcoded_Refs,
ReferenceAdaptiveMaxPoolLayerTest,
::testing::ValuesIn(generateCombinedParamsForAdaptiveMaxPool()),
ReferenceAdaptiveMaxPoolLayerTest::getTestCaseName);
} // namespace

View File

@ -0,0 +1,271 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include "base_reference_test.hpp"
#include "openvino/op/avg_pool.hpp"
using namespace ov;
using namespace reference_tests;
namespace {
struct AvgPoolParams {
template <class IT>
AvgPoolParams(const Shape& input_shape,
const Shape& output_shape,
const element::Type& input_type,
const element::Type& ouput_type,
const std::vector<IT>& input_values,
const std::vector<IT>& output_values,
const Strides& strides,
const Shape& pads_begin,
const Shape& pads_end,
const Shape& kernel,
const bool exclude_pad,
const op::RoundingType& rounding_type,
const op::PadType& pad_type)
: m_input_shape(input_shape),
m_output_shape(output_shape),
m_input_type(input_type),
m_output_type(ouput_type),
m_input_data(CreateTensor(input_type, input_values)),
m_expected_data(CreateTensor(ouput_type, output_values)),
m_strides(strides),
m_pads_begin(pads_begin),
m_pads_end(pads_end),
m_kernel(kernel),
m_exclude_pad(exclude_pad),
m_rounding_type(rounding_type),
m_pad_type(pad_type) {}
Shape m_input_shape;
Shape m_output_shape;
element::Type m_input_type;
element::Type m_output_type;
runtime::Tensor m_input_data;
runtime::Tensor m_expected_data;
Strides m_strides;
Shape m_pads_begin;
Shape m_pads_end;
Shape m_kernel;
bool m_exclude_pad;
op::RoundingType m_rounding_type;
op::PadType m_pad_type;
};
class ReferenceAvgPoolLayerTest : public testing::TestWithParam<AvgPoolParams>, public CommonReferenceTest {
public:
void SetUp() override {
auto params = GetParam();
function = CreateFunction(params.m_input_shape,
params.m_input_type,
params.m_strides,
params.m_pads_begin,
params.m_pads_end,
params.m_kernel,
params.m_exclude_pad,
params.m_rounding_type,
params.m_pad_type);
inputData = {params.m_input_data};
refOutData = {params.m_expected_data};
}
static std::string getTestCaseName(const testing::TestParamInfo<AvgPoolParams>& obj) {
auto params = obj.param;
std::ostringstream result;
result << "iShape=" << params.m_input_shape << "_";
result << "iType=" << params.m_input_type << "_";
result << "iShape=" << params.m_output_shape << "_";
result << "oType=" << params.m_output_type << "_";
result << "excludePad=" << params.m_exclude_pad << "_";
result << "roundingType=" << params.m_rounding_type << "_";
result << "padType=" << params.m_pad_type;
return result.str();
}
private:
static std::shared_ptr<Function> CreateFunction(const Shape& input_shape,
const element::Type& input_type,
const Strides& strides,
const Shape& pads_begin,
const Shape& pads_end,
const Shape& kernel,
const bool exclude_pad,
const op::RoundingType rounding_type,
const op::PadType pad_type) {
const auto in = std::make_shared<op::v0::Parameter>(input_type, input_shape);
const auto avgPool = std::make_shared<op::v1::AvgPool>(in,
strides,
pads_begin,
pads_end,
kernel,
exclude_pad,
rounding_type,
pad_type);
return std::make_shared<Function>(NodeVector{avgPool}, ParameterVector{in});
}
};
TEST_P(ReferenceAvgPoolLayerTest, AvgPoolWithHardcodedRefs) {
Exec();
}
template<typename T>
std::vector<T> getContinuousIncreasingValue(size_t elementSize, float step) {
std::vector<T> a(elementSize);
std::iota(std::begin(a), std::end(a), step);
return a;
}
template <element::Type_t IN_ET>
std::vector<AvgPoolParams> generateParamsForAvgPool() {
using T = typename element_type_traits<IN_ET>::value_type;
std::vector<AvgPoolParams> params{
AvgPoolParams(ov::Shape{1, 1, 3, 3},
ov::Shape{1, 1, 2, 2},
IN_ET,
IN_ET,
std::vector<T>{1, 2, 3, 4, 5, 6, 7, 8, 9},
std::vector<T>{3, 4, 6, 7},
Strides{1, 1},
Shape{0, 0},
Shape{0, 0},
Shape{2, 2},
true,
op::RoundingType::FLOOR,
op::PadType::NOTSET),
AvgPoolParams(ov::Shape{1, 1, 4, 4},
ov::Shape{1, 1, 2, 2},
IN_ET,
IN_ET,
std::vector<T>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
std::vector<T>{6, 7, 10, 11},
Strides{1, 1},
Shape{0, 0},
Shape{0, 0},
Shape{3, 3},
true,
op::RoundingType::CEIL,
op::PadType::NOTSET),
AvgPoolParams(ov::Shape{1, 1, 2, 2},
ov::Shape{1, 1, 3, 3},
IN_ET,
IN_ET,
std::vector<T>{1, 2, 3, 4},
std::vector<T>{1, 1.5, 2, 2, 2.5, 3, 3, 3.5, 4},
Strides{1, 1},
Shape{1, 1},
Shape{1, 1},
Shape{2, 2},
true,
op::RoundingType::CEIL,
op::PadType::NOTSET),
AvgPoolParams(ov::Shape{1, 1, 1, 5},
ov::Shape{1, 1, 1, 3},
IN_ET,
IN_ET,
std::vector<T>{1, 2, 3, 4, 5},
std::vector<T>{1.5, 3, 4.5},
Strides{1, 2},
Shape{1, 1},
Shape{1, 1},
Shape{3, 3},
true,
op::RoundingType::CEIL,
op::PadType::EXPLICIT),
AvgPoolParams(ov::Shape{1, 1, 1, 5},
ov::Shape{1, 1, 1, 3},
IN_ET,
IN_ET,
std::vector<T>{2.5, 2, 12, 4, 5},
std::vector<T>{0.5, 2, 1},
Strides{1, 2},
Shape{1, 1},
Shape{1, 1},
Shape{3, 3},
false,
op::RoundingType::CEIL,
op::PadType::EXPLICIT),
AvgPoolParams(ov::Shape{1, 1, 3, 3},
ov::Shape{1, 1, 3, 3},
IN_ET,
IN_ET,
std::vector<T>{1, 2, 3, 4, 5, 6, 7, 8, 9},
std::vector<T>{3, 4, 2.25, 6, 7, 3.75, 3.75, 4.25, 2.25},
Strides{1, 1},
Shape{0, 0},
Shape{0, 0},
Shape{2, 2},
false,
op::RoundingType::CEIL,
op::PadType::SAME_UPPER),
AvgPoolParams(ov::Shape{1, 1, 3, 3},
ov::Shape{1, 1, 3, 3},
IN_ET,
IN_ET,
std::vector<T>{1, 2, 3, 4, 5, 6, 7, 8, 9},
std::vector<T>{0.25, 0.75, 1.25, 1.25, 3, 4, 2.75, 6, 7},
Strides{1, 1},
Shape{0, 0},
Shape{0, 0},
Shape{2, 2},
false,
op::RoundingType::CEIL,
op::PadType::SAME_LOWER),
AvgPoolParams(ov::Shape{1, 1, 2, 2, 2},
ov::Shape{1, 1, 2, 2, 1},
IN_ET,
IN_ET,
std::vector<T>{1, 2, 3, 4, 5, 6, 7, 8},
std::vector<T>{1.5, 3.5, 5.5, 7.5},
Strides{1, 1, 1},
Shape{0, 0, 0},
Shape{0, 0, 0},
Shape{1, 1, 2},
true,
op::RoundingType::CEIL,
op::PadType::VALID),
AvgPoolParams(ov::Shape{1, 1, 3, 3},
ov::Shape{1, 1, 3, 3},
IN_ET,
IN_ET,
getContinuousIncreasingValue<T>(1 * 1 * 3 * 3, 1),
std::vector<T>{1.0f, 2.5f, 0, 5.5f, 7.0f, 0, 0, 0, 0},
Strides{2, 2},
Shape{1, 1},
Shape{1, 1},
Shape{2, 2},
true,
op::RoundingType::CEIL,
op::PadType::NOTSET),
};
return params;
}
std::vector<AvgPoolParams> generateCombinedParamsForAvgPool() {
const std::vector<std::vector<AvgPoolParams>> allTypeParams{
generateParamsForAvgPool<element::Type_t::f32>(),
generateParamsForAvgPool<element::Type_t::f16>(),
generateParamsForAvgPool<element::Type_t::bf16>()
};
std::vector<AvgPoolParams> combinedParams;
for (const auto& params : allTypeParams) {
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
}
return combinedParams;
}
INSTANTIATE_TEST_SUITE_P(
smoke_AvgPool_With_Hardcoded_Refs,
ReferenceAvgPoolLayerTest,
::testing::ValuesIn(generateCombinedParamsForAvgPool()),
ReferenceAvgPoolLayerTest::getTestCaseName);
} // namespace

View File

@ -0,0 +1,201 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include "openvino/op/scatter_nd_update.hpp"
#include "openvino/op/constant.hpp"
#include "base_reference_test.hpp"
using namespace reference_tests;
using namespace ov;
namespace {
struct ScatterNDUpdateParams {
ScatterNDUpdateParams(const Tensor& dataTensor, const Tensor& indexTensor, const Tensor& updateTensor,
const Tensor& expectedTensor, const std::string& testcaseName = "") :
dataTensor(dataTensor), indexTensor(indexTensor), updateTensor(updateTensor),
expectedTensor(expectedTensor), testcaseName(testcaseName) {}
Tensor dataTensor;
Tensor indexTensor;
Tensor updateTensor;
Tensor expectedTensor;
std::string testcaseName;
};
class ReferenceScatterNDUpdateLayerTest : public testing::TestWithParam<ScatterNDUpdateParams>, public CommonReferenceTest {
public:
void SetUp() override {
auto params = GetParam();
function = CreateFunction(params);
inputData = {params.dataTensor.data};
refOutData = {params.expectedTensor.data};
}
static std::string getTestCaseName(const testing::TestParamInfo<ScatterNDUpdateParams>& obj) {
auto param = obj.param;
std::ostringstream result;
result << "dType=" << param.dataTensor.type;
result << "_dShape=" << param.dataTensor.shape;
result << "_iType=" << param.indexTensor.type;
result << "_iShape=" << param.indexTensor.shape;
result << "_uType=" << param.updateTensor.type;
result << "_uShape=" << param.updateTensor.shape;
result << "_eType=" << param.expectedTensor.type;
if (param.testcaseName != "") {
result << "_eShape=" << param.expectedTensor.shape;
result << "_" << param.testcaseName;
} else {
result << "_eShape=" << param.expectedTensor.shape;
}
return result.str();
}
private:
static std::shared_ptr<Function> CreateFunction(const ScatterNDUpdateParams& params) {
const auto data = std::make_shared<op::v0::Parameter>(params.dataTensor.type, params.dataTensor.shape);
const auto indices = std::make_shared<op::v0::Constant>(params.indexTensor.type,
params.indexTensor.shape,
params.indexTensor.data.data());
const auto updates = std::make_shared<op::v0::Constant>(params.updateTensor.type,
params.updateTensor.shape,
params.updateTensor.data.data());
const auto scatter = std::make_shared<op::v3::ScatterNDUpdate>(data, indices, updates);
return std::make_shared<ov::Function>(NodeVector{scatter}, ParameterVector{data});
}
};
TEST_P(ReferenceScatterNDUpdateLayerTest, CompareWithRefs) {
Exec();
}
template <element::Type_t IN_ET, element::Type_t IU_ET>
std::vector<ScatterNDUpdateParams> generateScatterNDUpdateParams() {
using T = typename element_type_traits<IN_ET>::value_type;
using U = typename element_type_traits<IU_ET>::value_type;
std::vector<ScatterNDUpdateParams> scatterParams {
// scatter_nd_update_1x1
ScatterNDUpdateParams(Tensor({1}, IN_ET, std::vector<T>{1}),
Tensor({1}, IU_ET, std::vector<U>{0}),
Tensor({1}, IN_ET, std::vector<T>{20}),
Tensor({1}, IN_ET, std::vector<T>{20}),
"scatter_nd_update_1x1"),
// scatter_nd_update_2x2_by_1
ScatterNDUpdateParams(Tensor({2, 2}, IN_ET, std::vector<T>{1, 2, 3, 4}),
Tensor({2, 1}, IU_ET, std::vector<U>{1, 0}),
Tensor({2, 2}, IN_ET, std::vector<T>{10, 20, 30, 40}),
Tensor({2, 2}, IN_ET, std::vector<T>{30, 40, 10, 20}),
"scatter_nd_update_2x2_by_1"),
// scatter_nd_update_2x2_by_2
ScatterNDUpdateParams(Tensor({2, 2}, IN_ET, std::vector<T>{1, 2, 3, 4}),
Tensor({2, 2}, IU_ET, std::vector<U>{0, 0, 1, 1}),
Tensor({2}, IN_ET, std::vector<T>{10, 40}),
Tensor({2, 2}, IN_ET, std::vector<T>{10, 2, 3, 40}),
"scatter_nd_update_2x2_by_2"),
// scatter_nd_update_3x3_by_1
ScatterNDUpdateParams(Tensor({3, 3, 3}, IN_ET, std::vector<T>{11, 12, 13, 14, 15, 16, 17, 18, 19,
21, 22, 23, 24, 25, 26, 27, 28, 29,
31, 32, 33, 34, 35, 36, 37, 38, 39}),
Tensor({2, 1}, IU_ET, std::vector<U>{0, 2}),
Tensor({2, 3, 3}, IN_ET, std::vector<T>{91, 92, 93, 94, 95, 96, 97, 98, 99,
81, 82, 83, 84, 85, 86, 87, 88, 89}),
Tensor({3, 3, 3}, IN_ET, std::vector<T>{91, 92, 93, 94, 95, 96, 97, 98, 99,
21, 22, 23, 24, 25, 26, 27, 28, 29,
81, 82, 83, 84, 85, 86, 87, 88, 89}),
"scatter_nd_update_3x3_by_1"),
// scatter_nd_update_3x3_by_2v2
ScatterNDUpdateParams(Tensor({3, 3, 3}, IN_ET, std::vector<T>{11, 12, 13, 14, 15, 16, 17, 18, 19,
21, 22, 23, 24, 25, 26, 27, 28, 29,
31, 32, 33, 34, 35, 36, 37, 38, 39}),
Tensor({2, 2, 3}, IU_ET, std::vector<U>{0, 0, 0, 2, 2, 2, 1, 0, 0, 1, 2, 2}),
Tensor({2, 2}, IN_ET, std::vector<T>{91, 92, 81, 82}),
Tensor({3, 3, 3}, IN_ET, std::vector<T>{91, 12, 13, 14, 15, 16, 17, 18, 19,
81, 22, 23, 24, 25, 26, 27, 28, 82,
31, 32, 33, 34, 35, 36, 37, 38, 92}),
"scatter_nd_update_3x3_by_2v2"),
// scatter_nd_update_3x3_by_2
ScatterNDUpdateParams(Tensor({3, 3, 3}, IN_ET, std::vector<T>{11, 12, 13, 14, 15, 16, 17, 18, 19,
21, 22, 23, 24, 25, 26, 27, 28, 29,
31, 32, 33, 34, 35, 36, 37, 38, 39}),
Tensor({2, 2}, IU_ET, std::vector<U>{0, 0, 2, 2}),
Tensor({2, 3}, IN_ET, std::vector<T>{91, 92, 93, 87, 88, 89}),
Tensor({3, 3, 3}, IN_ET, std::vector<T>{91, 92, 93, 14, 15, 16, 17, 18, 19,
21, 22, 23, 24, 25, 26, 27, 28, 29,
31, 32, 33, 34, 35, 36, 87, 88, 89}),
"scatter_nd_update_3x3_by_2"),
// scatter_nd_update_3x3_by_3
ScatterNDUpdateParams(Tensor({3, 3, 3}, IN_ET, std::vector<T>{11, 12, 13, 14, 15, 16, 17, 18, 19,
21, 22, 23, 24, 25, 26, 27, 28, 29,
31, 32, 33, 34, 35, 36, 37, 38, 39}),
Tensor({2, 3}, IU_ET, std::vector<U>{0, 0, 0, 2, 2, 2}),
Tensor({2}, IN_ET, std::vector<T>{91, 99}),
Tensor({3, 3, 3}, IN_ET, std::vector<T>{91, 12, 13, 14, 15, 16, 17, 18, 19,
21, 22, 23, 24, 25, 26, 27, 28, 29,
31, 32, 33, 34, 35, 36, 37, 38, 99}),
"scatter_nd_update_3x3_by_3"),
// scatter_nd_update_1d_from_examples
ScatterNDUpdateParams(Tensor({8}, IN_ET, std::vector<T>{1, 2, 3, 4, 5, 6, 7, 8}),
Tensor({4, 1}, IU_ET, std::vector<U>{4, 3, 1, 7}),
Tensor({4}, IN_ET, std::vector<T>{9, 10, 11, 12}),
Tensor({8}, IN_ET, std::vector<T>{1, 11, 3, 10, 9, 6, 7, 12}),
"scatter_nd_update_1d_from_examples"),
// scatter_nd_update_4x4_shape_from_examples
ScatterNDUpdateParams(Tensor({4, 4, 4}, IN_ET, std::vector<T>{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1,
1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1,
8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, 6, 7, 8,
8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, 6, 7, 8}),
Tensor({2, 1}, IU_ET, std::vector<U>{0, 2}),
Tensor({2, 4, 4}, IN_ET, std::vector<T>{5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8,
1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4}),
Tensor({4, 4, 4}, IN_ET, std::vector<T>{5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8,
1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1,
1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4,
8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, 6, 7, 8}),
"scatter_nd_update_4x4_shape_from_examples"),
// scatter_nd_update_4x4_v2
ScatterNDUpdateParams(Tensor({4, 4, 4}, IN_ET, std::vector<T>{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1,
1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1,
8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, 6, 7, 8,
8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, 6, 7, 8}),
Tensor({2, 2, 2}, IU_ET, std::vector<U>{0, 0, 2, 2, 1, 1, 3, 3}),
Tensor({2, 2, 4}, IN_ET, std::vector<T>{15, 16, 17, 18, 25, 26, 27, 28,
35, 36, 37, 38, 45, 46, 47, 58}),
Tensor({4, 4, 4}, IN_ET, std::vector<T>{15, 16, 17, 18, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1,
1, 2, 3, 4, 35, 36, 37, 38, 8, 7, 6, 5, 4, 3, 2, 1,
8, 7, 6, 5, 4, 3, 2, 1, 25, 26, 27, 28, 5, 6, 7, 8,
8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4, 45, 46, 47, 58}),
"scatter_nd_update_4x4_v2"),
};
return scatterParams;
}
std::vector<ScatterNDUpdateParams> generateScatterNDUpdateCombinedParams() {
const std::vector<std::vector<ScatterNDUpdateParams>> scatterTypeParams {
generateScatterNDUpdateParams<element::Type_t::i32, element::Type_t::i32>(),
generateScatterNDUpdateParams<element::Type_t::i64, element::Type_t::i32>(),
generateScatterNDUpdateParams<element::Type_t::u32, element::Type_t::i32>(),
generateScatterNDUpdateParams<element::Type_t::u64, element::Type_t::i32>(),
generateScatterNDUpdateParams<element::Type_t::f16, element::Type_t::i32>(),
generateScatterNDUpdateParams<element::Type_t::f32, element::Type_t::i32>(),
generateScatterNDUpdateParams<element::Type_t::boolean, element::Type_t::i32>(),
generateScatterNDUpdateParams<element::Type_t::i32, element::Type_t::i64>(),
generateScatterNDUpdateParams<element::Type_t::i64, element::Type_t::i64>(),
generateScatterNDUpdateParams<element::Type_t::u32, element::Type_t::i64>(),
generateScatterNDUpdateParams<element::Type_t::u64, element::Type_t::i64>(),
generateScatterNDUpdateParams<element::Type_t::f16, element::Type_t::i64>(),
generateScatterNDUpdateParams<element::Type_t::f32, element::Type_t::i64>(),
generateScatterNDUpdateParams<element::Type_t::boolean, element::Type_t::i64>(),
};
std::vector<ScatterNDUpdateParams> combinedParams;
for (const auto& params : scatterTypeParams) {
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
}
return combinedParams;
}
INSTANTIATE_TEST_SUITE_P(smoke_ScatterNDUpdate_With_Hardcoded_Refs, ReferenceScatterNDUpdateLayerTest,
testing::ValuesIn(generateScatterNDUpdateCombinedParams()), ReferenceScatterNDUpdateLayerTest::getTestCaseName);
} // namespace

View File

@ -26,6 +26,10 @@ public:
void SetUp() override {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
}
};
class PreprocessOpenCVReferenceTest_NV12 : public PreprocessOpenCVReferenceTest {
public:
void Validate() override {
threshold = 1.f;
abs_threshold = 1.f;
@ -40,7 +44,7 @@ public:
} // namespace
static std::shared_ptr<Function> create_simple_function_nv12(element::Type type, const PartialShape& shape) {
static std::shared_ptr<Function> create_simple_function(element::Type type, const PartialShape& shape) {
auto data1 = std::make_shared<op::v0::Parameter>(type, shape);
data1->set_friendly_name("input1");
data1->get_output_tensor(0).set_names({"tensor_input1", "input1"});
@ -49,11 +53,11 @@ static std::shared_ptr<Function> create_simple_function_nv12(element::Type type,
op->set_friendly_name("Add0");
auto res = std::make_shared<op::v0::Result>(op);
res->set_friendly_name("Result1");
res->get_output_tensor(0).set_names({"tensor_output1", "Result1", "Convert1"});
res->get_output_tensor(0).set_names({"tensor_output1", "Result1"});
return std::make_shared<ov::Function>(ResultVector{res}, ParameterVector{data1});
}
TEST_F(PreprocessOpenCVReferenceTest, convert_nv12_full_color_range) {
TEST_F(PreprocessOpenCVReferenceTest_NV12, convert_nv12_full_color_range) {
size_t height = 64; // 64/2 = 32 values for R
size_t width = 64; // 64/2 = 32 values for G
int b_step = 5;
@ -64,7 +68,7 @@ TEST_F(PreprocessOpenCVReferenceTest, convert_nv12_full_color_range) {
auto full_height = height * b_dim;
auto func_shape = Shape{1, full_height, width, 3};
function = create_simple_function_nv12(element::u8, func_shape);
function = create_simple_function(element::u8, func_shape);
inputData.clear();
@ -90,10 +94,10 @@ TEST_F(PreprocessOpenCVReferenceTest, convert_nv12_full_color_range) {
Exec();
}
TEST_F(PreprocessOpenCVReferenceTest, convert_nv12_colored) {
TEST_F(PreprocessOpenCVReferenceTest_NV12, convert_nv12_colored) {
auto input_yuv = std::vector<uint8_t> {235, 81, 235, 81, 109, 184};
auto func_shape = Shape{1, 2, 2, 3};
function = create_simple_function_nv12(element::u8, func_shape);
function = create_simple_function(element::u8, func_shape);
inputData.clear();
@ -116,4 +120,136 @@ TEST_F(PreprocessOpenCVReferenceTest, convert_nv12_colored) {
Exec();
}
TEST_F(PreprocessOpenCVReferenceTest, resize_u8_simple_linear) {
auto input_shape = Shape{1, 1, 2, 2};
auto func_shape = Shape{1, 1, 1, 1};
auto input_img = std::vector<uint8_t> {5, 5, 5, 4};
function = create_simple_function(element::u8, func_shape);
inputData.clear();
function = PrePostProcessor().input(InputInfo()
.tensor(InputTensorInfo().set_spatial_static_shape(2, 2))
.preprocess(PreProcessSteps().resize(ResizeAlgorithm::RESIZE_LINEAR))
.network(InputNetworkInfo().set_layout("NCHW"))
)
.build(function);
const auto &param = function->get_parameters()[0];
inputData.emplace_back(param->get_element_type(), param->get_shape(), input_img.data());
// Calculate reference expected values from OpenCV
cv::Mat cvPic = cv::Mat(2, 2, CV_8UC1, input_img.data());
cv::Mat cvPicResized;
cv::resize(cvPic, cvPicResized, cv::Size(1, 1), cv::INTER_NEAREST);
refOutData.emplace_back(param->get_element_type(), func_shape, cvPicResized.data);
// Exec now
Exec();
}
TEST_F(PreprocessOpenCVReferenceTest, resize_u8_large_picture_linear) {
threshold = 1.f;
abs_threshold = 1.f; // Some pixels still have deviations of 1 step
const size_t input_height = 50;
const size_t input_width = 50;
const size_t func_height = 37;
const size_t func_width = 31;
auto input_shape = Shape{1, 1, input_height, input_width};
auto func_shape = Shape{1, 1, func_height, func_width};
auto input_img = std::vector<uint8_t> (shape_size(input_shape));
std::default_random_engine random(0); // hard-coded seed to make test results predictable
std::uniform_int_distribution<int> distrib(0, 255);
for (std::size_t i = 0; i < shape_size(input_shape); i++) {
auto v = distrib(random);
input_img[i] = static_cast<uint8_t>(v);
}
function = create_simple_function(element::u8, func_shape);
inputData.clear();
function = PrePostProcessor().input(InputInfo()
.tensor(InputTensorInfo().set_spatial_static_shape(input_height, input_width))
.preprocess(PreProcessSteps().resize(ResizeAlgorithm::RESIZE_LINEAR))
.network(InputNetworkInfo().set_layout("NCHW"))
)
.build(function);
const auto &param = function->get_parameters()[0];
inputData.emplace_back(param->get_element_type(), param->get_shape(), input_img.data());
// Calculate reference expected values from OpenCV
cv::Mat cvPic = cv::Mat(input_height, input_width, CV_8UC1, input_img.data());
cv::Mat cvPicResized;
cv::resize(cvPic, cvPicResized, cv::Size(func_width, func_height), cv::INTER_LINEAR_EXACT);
refOutData.emplace_back(param->get_element_type(), func_shape, cvPicResized.data);
// Exec now
Exec();
}
TEST_F(PreprocessOpenCVReferenceTest, resize_f32_large_picture_linear) {
threshold = 0.01f;
abs_threshold = 0.01f;
const size_t input_height = 50;
const size_t input_width = 50;
const size_t func_height = 37;
const size_t func_width = 31;
auto input_shape = Shape{1, 1, input_height, input_width};
auto func_shape = Shape{1, 1, func_height, func_width};
auto input_img = std::vector<float> (shape_size(input_shape));
std::default_random_engine random(0); // hard-coded seed to make test results predictable
std::uniform_int_distribution<int> distrib(0, 255);
for (std::size_t i = 0; i < shape_size(input_shape); i++) {
input_img[i] = static_cast<float>(distrib(random));
}
function = create_simple_function(element::f32, func_shape);
inputData.clear();
function = PrePostProcessor().input(InputInfo()
.tensor(InputTensorInfo().set_spatial_static_shape(input_height, input_width))
.preprocess(PreProcessSteps().resize(ResizeAlgorithm::RESIZE_LINEAR))
.network(InputNetworkInfo().set_layout("NCHW"))
)
.build(function);
const auto &param = function->get_parameters()[0];
inputData.emplace_back(param->get_element_type(), param->get_shape(), input_img.data());
// Calculate reference expected values from OpenCV
cv::Mat cvPic = cv::Mat(input_height, input_width, CV_32FC1, input_img.data());
cv::Mat cvPicResized;
cv::resize(cvPic, cvPicResized, cv::Size(func_width, func_height), cv::INTER_LINEAR_EXACT);
refOutData.emplace_back(param->get_element_type(), func_shape, cvPicResized.data);
// Exec now
Exec();
}
TEST_F(PreprocessOpenCVReferenceTest, DISABLED_resize_f32_large_picture_cubic_small) {
const size_t input_height = 4;
const size_t input_width = 4;
const size_t func_height = 3;
const size_t func_width = 3;
auto input_shape = Shape{1, 1, input_height, input_width};
auto func_shape = Shape{1, 1, func_height, func_width};
auto element_type = element::f32;
auto input_img = std::vector<float> {1.f, 2.f, 3.f, 4.f, 4.f, 3.f, 2.f, 1.f, 1.f, 2.f, 3.f, 4.f, 4.f, 3.f, 2.f, 1.f};
function = create_simple_function(element_type, func_shape);
function = PrePostProcessor().input(InputInfo()
.tensor(InputTensorInfo().set_spatial_static_shape(input_height, input_width))
.preprocess(PreProcessSteps().resize(ResizeAlgorithm::RESIZE_CUBIC))
.network(InputNetworkInfo().set_layout("NCHW"))
)
.build(function);
inputData.emplace_back(element_type, input_shape, input_img.data());
// Calculate reference expected values from OpenCV
cv::Mat cvPic = cv::Mat(input_height, input_width, CV_32FC1, input_img.data());
cv::Mat cvPicResized;
cv::resize(cvPic, cvPicResized, cv::Size(func_width, func_height), cv::INTER_CUBIC);
refOutData.emplace_back(element_type, func_shape, cvPicResized.data);
// Exec now
Exec();
}
#endif // OPENCV_TEMPLATE_TESTS

View File

@ -2,8 +2,9 @@
# SPDX-License-Identifier: Apache-2.0
#
if(CMAKE_COMPILER_IS_GNUCC)
if(CMAKE_COMPILER_IS_GNUCXX)
ie_add_compiler_flags(-Wall)
ie_add_compiler_flags(-Wmissing-declarations)
endif()
add_subdirectory(preprocessing)

View File

@ -29,12 +29,13 @@ enum class eltwise_mode : int32_t;
} // namespace cldnn
#define REGISTER_FACTORY_IMPL(op_version, op_name) \
void __register ## _ ## op_name ## _ ## op_version(); \
void __register ## _ ## op_name ## _ ## op_version() { \
Program::RegisterFactory<ngraph::op::op_version::op_name>( \
[](Program& p, const std::shared_ptr<ngraph::Node>& op) { \
auto op_casted = std::dynamic_pointer_cast<ngraph::op::op_version::op_name>(op); \
if (!op_casted) \
IE_THROW() << "Invalid ngraph Node type passed into " << __PRETTY_FUNCTION__; \
IE_THROW() << "Invalid ngraph Node type passed into " << __PRETTY_FUNCTION__; \
Create##op_name##Op(p, op_casted); \
}); \
}

View File

@ -12,7 +12,7 @@
namespace CLDNNPlugin {
void CreateBatchToSpaceOp(Program& p, const std::shared_ptr<ngraph::op::v1::BatchToSpace>& op) {
static void CreateBatchToSpaceOp(Program& p, const std::shared_ptr<ngraph::op::v1::BatchToSpace>& op) {
p.ValidateInputs(op, {4});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);

View File

@ -88,7 +88,7 @@ static void CreateCommonBroadcastOp(Program& p, const std::shared_ptr<ngraph::No
p.AddPrimitiveToProfiler(op);
}
void CreateBroadcastOp(Program& p, const std::shared_ptr<ngraph::op::v1::Broadcast>& op) {
static void CreateBroadcastOp(Program& p, const std::shared_ptr<ngraph::op::v1::Broadcast>& op) {
p.ValidateInputs(op, {2, 3});
if (op->get_broadcast_spec().m_type == ngraph::op::AutoBroadcastType::NONE && op->get_input_size() == 3) {
auto axis_mapping_node = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(2));
@ -103,7 +103,7 @@ void CreateBroadcastOp(Program& p, const std::shared_ptr<ngraph::op::v1::Broadca
}
}
void CreateBroadcastOp(Program& p, const std::shared_ptr<ngraph::op::v3::Broadcast>& op) {
static void CreateBroadcastOp(Program& p, const std::shared_ptr<ngraph::op::v3::Broadcast>& op) {
p.ValidateInputs(op, {2, 3});
ngraph::AxisSet axis_mapping;
if (op->get_input_size() == 3) {

View File

@ -38,7 +38,7 @@ static cldnn::concatenation::concatenation_axis GetConcatAxis(int32_t axis, size
return cldnn::concatenation::concatenation_axis::along_f; // shouldn't get here
}
void CreateConcatOp(Program& p, const std::shared_ptr<ngraph::op::v0::Concat>& op) {
static void CreateConcatOp(Program& p, const std::shared_ptr<ngraph::op::v0::Concat>& op) {
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);
auto concatPrim = cldnn::concatenation(

View File

@ -78,7 +78,7 @@ static cldnn::tensor getConstTensor(const ngraph::Shape constDims) {
return constTensor;
}
void CreateConstantOp(Program& p, const std::shared_ptr<ngraph::op::v0::Constant>& op) {
static void CreateConstantOp(Program& p, const std::shared_ptr<ngraph::op::v0::Constant>& op) {
auto constDims = op->get_shape();
cldnn::tensor constTensor = getConstTensor(constDims);

View File

@ -12,7 +12,7 @@
namespace CLDNNPlugin {
void CreateConvertLikeOp(Program& p, const std::shared_ptr<ngraph::op::v1::ConvertLike>& op) {
static void CreateConvertLikeOp(Program& p, const std::shared_ptr<ngraph::op::v1::ConvertLike>& op) {
p.ValidateInputs(op, {2});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);
@ -30,7 +30,7 @@ void CreateConvertLikeOp(Program& p, const std::shared_ptr<ngraph::op::v1::Conve
p.AddPrimitiveToProfiler(op);
}
void CreateConvertOp(Program& p, const std::shared_ptr<ngraph::op::v0::Convert>& op) {
static void CreateConvertOp(Program& p, const std::shared_ptr<ngraph::op::v0::Convert>& op) {
p.ValidateInputs(op, {1});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);

View File

@ -61,7 +61,7 @@ static ConvoltuionParameters GetConvolutionParameters(const ngraph::CoordinateDi
return {stride, padding, dilation, groups};
}
void CreateGroupConvolutionOp(Program& p, const std::shared_ptr<ngraph::op::v1::GroupConvolution>& op) {
static void CreateGroupConvolutionOp(Program& p, const std::shared_ptr<ngraph::op::v1::GroupConvolution>& op) {
p.ValidateInputs(op, {2});
auto inputs = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);
@ -91,7 +91,7 @@ void CreateGroupConvolutionOp(Program& p, const std::shared_ptr<ngraph::op::v1::
p.AddPrimitiveToProfiler(op);
}
void CreateConvolutionOp(Program& p, const std::shared_ptr<ngraph::op::v1::Convolution>& op) {
static void CreateConvolutionOp(Program& p, const std::shared_ptr<ngraph::op::v1::Convolution>& op) {
p.ValidateInputs(op, {2});
auto inputs = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);
@ -120,7 +120,7 @@ void CreateConvolutionOp(Program& p, const std::shared_ptr<ngraph::op::v1::Convo
p.AddPrimitiveToProfiler(op);
}
void CreateConvolutionBackpropDataOp(Program& p, const std::shared_ptr<ngraph::op::v1::ConvolutionBackpropData>& op) {
static void CreateConvolutionBackpropDataOp(Program& p, const std::shared_ptr<ngraph::op::v1::ConvolutionBackpropData>& op) {
// 3rd input is an optional output shape
p.ValidateInputs(op, {2, 3});
auto inputs = p.GetInputPrimitiveIDs(op);
@ -176,7 +176,7 @@ void CreateConvolutionBackpropDataOp(Program& p, const std::shared_ptr<ngraph::o
p.AddPrimitiveToProfiler(op);
}
void CreateGroupConvolutionBackpropDataOp(Program& p, const std::shared_ptr<ngraph::op::v1::GroupConvolutionBackpropData>& op) {
static void CreateGroupConvolutionBackpropDataOp(Program& p, const std::shared_ptr<ngraph::op::v1::GroupConvolutionBackpropData>& op) {
p.ValidateInputs(op, {2});
auto inputs = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);
@ -233,7 +233,7 @@ void CreateGroupConvolutionBackpropDataOp(Program& p, const std::shared_ptr<ngra
p.AddPrimitiveToProfiler(op);
}
void CreateDeformableConvolutionOp(Program& p, const std::shared_ptr<ngraph::op::v1::DeformableConvolution>& op) {
static void CreateDeformableConvolutionOp(Program& p, const std::shared_ptr<ngraph::op::v1::DeformableConvolution>& op) {
p.ValidateInputs(op, {3});
auto inputs = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);
@ -303,7 +303,7 @@ void CreateDeformableConvolutionOp(Program& p, const std::shared_ptr<ngraph::op:
}
}
void CreateBinaryConvolutionOp(Program& p, const std::shared_ptr<ngraph::op::v1::BinaryConvolution>& op) {
static void CreateBinaryConvolutionOp(Program& p, const std::shared_ptr<ngraph::op::v1::BinaryConvolution>& op) {
p.ValidateInputs(op, {2});
auto inputs = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);

View File

@ -17,7 +17,7 @@
namespace CLDNNPlugin {
void CreateCommonCTCGreedyDecoderOp(Program& p, const std::shared_ptr<ngraph::Node>& op, bool ctc_merge_repeated) {
static void CreateCommonCTCGreedyDecoderOp(Program& p, const std::shared_ptr<ngraph::Node>& op, bool ctc_merge_repeated) {
p.ValidateInputs(op, {2, 3});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
@ -120,11 +120,11 @@ void CreateCommonCTCGreedyDecoderOp(Program& p, const std::shared_ptr<ngraph::No
p.AddPrimitiveToProfiler(CTCGreedyDecoderLayerName, op);
}
void CreateCTCGreedyDecoderOp(Program& p, const std::shared_ptr<ngraph::op::v0::CTCGreedyDecoder>& op) {
static void CreateCTCGreedyDecoderOp(Program& p, const std::shared_ptr<ngraph::op::v0::CTCGreedyDecoder>& op) {
CreateCommonCTCGreedyDecoderOp(p, op, op->get_ctc_merge_repeated());
}
void CreateCTCGreedyDecoderSeqLenOp(Program& p, const std::shared_ptr<ngraph::op::v6::CTCGreedyDecoderSeqLen>& op) {
static void CreateCTCGreedyDecoderSeqLenOp(Program& p, const std::shared_ptr<ngraph::op::v6::CTCGreedyDecoderSeqLen>& op) {
CreateCommonCTCGreedyDecoderOp(p, op, op->get_merge_repeated());
}

View File

@ -41,7 +41,7 @@ static inline cldnn::cum_sum::cum_sum_axis GetCumSumAxis(int32_t axis, uint32_t
return cldnn::cum_sum::cum_sum_axis::along_f; // shouldn't get here
}
void CreateCumSumOp(Program& p, const std::shared_ptr<ngraph::op::v0::CumSum>& op) {
static void CreateCumSumOp(Program& p, const std::shared_ptr<ngraph::op::v0::CumSum>& op) {
p.ValidateInputs(op, {1, 2});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);

View File

@ -22,7 +22,7 @@ static cldnn::depth_to_space_mode GetDepthMode(ngraph::op::v0::DepthToSpace::Dep
return cldnn::depth_to_space_mode::blocks_first;
}
void CreateDepthToSpaceOp(Program& p, const std::shared_ptr<ngraph::op::v0::DepthToSpace>& op) {
static void CreateDepthToSpaceOp(Program& p, const std::shared_ptr<ngraph::op::v0::DepthToSpace>& op) {
p.ValidateInputs(op, {1});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);

View File

@ -26,7 +26,7 @@ static cldnn::prior_box_code_type PriorBoxCodeFromString(const std::string& str)
return cldnn::prior_box_code_type::corner;
}
void CreateDetectionOutputOp(Program& p, const std::shared_ptr<ngraph::op::v0::DetectionOutput>& op) {
static void CreateDetectionOutputOp(Program& p, const std::shared_ptr<ngraph::op::v0::DetectionOutput>& op) {
p.ValidateInputs(op, {3});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);

View File

@ -87,71 +87,71 @@ void CreateElementwiseOp(Program& p, const std::shared_ptr<ngraph::Node>& op, cl
p.AddPrimitiveToProfiler(op);
}
void CreateAddOp(Program& p, const std::shared_ptr<ngraph::op::v1::Add>& op) {
static void CreateAddOp(Program& p, const std::shared_ptr<ngraph::op::v1::Add>& op) {
CreateElementwiseOp(p, op, cldnn::eltwise_mode::sum);
}
void CreateMultiplyOp(Program& p, const std::shared_ptr<ngraph::op::v1::Multiply>& op) {
static void CreateMultiplyOp(Program& p, const std::shared_ptr<ngraph::op::v1::Multiply>& op) {
CreateElementwiseOp(p, op, cldnn::eltwise_mode::prod);
}
void CreateMaximumOp(Program& p, const std::shared_ptr<ngraph::op::v1::Maximum>& op) {
static void CreateMaximumOp(Program& p, const std::shared_ptr<ngraph::op::v1::Maximum>& op) {
CreateElementwiseOp(p, op, cldnn::eltwise_mode::max);
}
void CreateMinimumOp(Program& p, const std::shared_ptr<ngraph::op::v1::Minimum>& op) {
static void CreateMinimumOp(Program& p, const std::shared_ptr<ngraph::op::v1::Minimum>& op) {
CreateElementwiseOp(p, op, cldnn::eltwise_mode::min);
}
void CreateSubtractOp(Program& p, const std::shared_ptr<ngraph::op::v1::Subtract>& op) {
static void CreateSubtractOp(Program& p, const std::shared_ptr<ngraph::op::v1::Subtract>& op) {
CreateElementwiseOp(p, op, cldnn::eltwise_mode::sub);
}
void CreateDivideOp(Program& p, const std::shared_ptr<ngraph::op::v1::Divide>& op) {
static void CreateDivideOp(Program& p, const std::shared_ptr<ngraph::op::v1::Divide>& op) {
CreateElementwiseOp(p, op, cldnn::eltwise_mode::div);
}
void CreateSquaredDifferenceOp(Program& p, const std::shared_ptr<ngraph::op::v0::SquaredDifference>& op) {
static void CreateSquaredDifferenceOp(Program& p, const std::shared_ptr<ngraph::op::v0::SquaredDifference>& op) {
CreateElementwiseOp(p, op, cldnn::eltwise_mode::squared_diff);
}
void CreateEqualOp(Program& p, const std::shared_ptr<ngraph::op::v1::Equal>& op) {
static void CreateEqualOp(Program& p, const std::shared_ptr<ngraph::op::v1::Equal>& op) {
CreateElementwiseOp(p, op, cldnn::eltwise_mode::eq);
}
void CreateNotEqualOp(Program& p, const std::shared_ptr<ngraph::op::v1::NotEqual>& op) {
static void CreateNotEqualOp(Program& p, const std::shared_ptr<ngraph::op::v1::NotEqual>& op) {
CreateElementwiseOp(p, op, cldnn::eltwise_mode::ne);
}
void CreateLessOp(Program& p, const std::shared_ptr<ngraph::op::v1::Less>& op) {
static void CreateLessOp(Program& p, const std::shared_ptr<ngraph::op::v1::Less>& op) {
CreateElementwiseOp(p, op, cldnn::eltwise_mode::lt);
}
void CreateLessEqualOp(Program& p, const std::shared_ptr<ngraph::op::v1::LessEqual>& op) {
static void CreateLessEqualOp(Program& p, const std::shared_ptr<ngraph::op::v1::LessEqual>& op) {
CreateElementwiseOp(p, op, cldnn::eltwise_mode::le);
}
void CreateGreaterOp(Program& p, const std::shared_ptr<ngraph::op::v1::Greater>& op) {
static void CreateGreaterOp(Program& p, const std::shared_ptr<ngraph::op::v1::Greater>& op) {
CreateElementwiseOp(p, op, cldnn::eltwise_mode::gt);
}
void CreateGreaterEqualOp(Program& p, const std::shared_ptr<ngraph::op::v1::GreaterEqual>& op) {
static void CreateGreaterEqualOp(Program& p, const std::shared_ptr<ngraph::op::v1::GreaterEqual>& op) {
CreateElementwiseOp(p, op, cldnn::eltwise_mode::ge);
}
void CreateLogicalAndOp(Program& p, const std::shared_ptr<ngraph::op::v1::LogicalAnd>& op) {
static void CreateLogicalAndOp(Program& p, const std::shared_ptr<ngraph::op::v1::LogicalAnd>& op) {
CreateElementwiseOp(p, op, cldnn::eltwise_mode::logic_and);
}
void CreateLogicalOrOp(Program& p, const std::shared_ptr<ngraph::op::v1::LogicalOr>& op) {
static void CreateLogicalOrOp(Program& p, const std::shared_ptr<ngraph::op::v1::LogicalOr>& op) {
CreateElementwiseOp(p, op, cldnn::eltwise_mode::logic_or);
}
void CreateLogicalXorOp(Program& p, const std::shared_ptr<ngraph::op::v1::LogicalXor>& op) {
static void CreateLogicalXorOp(Program& p, const std::shared_ptr<ngraph::op::v1::LogicalXor>& op) {
CreateElementwiseOp(p, op, cldnn::eltwise_mode::logic_xor);
}
void CreatePowerOp(Program& p, const std::shared_ptr<ngraph::op::v1::Power>& op) {
static void CreatePowerOp(Program& p, const std::shared_ptr<ngraph::op::v1::Power>& op) {
p.ValidateInputs(op, {2});
auto power_node = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(1));
if (power_node) {
@ -166,11 +166,11 @@ void CreatePowerOp(Program& p, const std::shared_ptr<ngraph::op::v1::Power>& op)
CreateElementwiseOp(p, op, cldnn::eltwise_mode::pow);
}
void CreateFloorModOp(Program& p, const std::shared_ptr<ngraph::op::v1::FloorMod>& op) {
static void CreateFloorModOp(Program& p, const std::shared_ptr<ngraph::op::v1::FloorMod>& op) {
CreateElementwiseOp(p, op, cldnn::eltwise_mode::floor_mod);
}
void CreateModOp(Program& p, const std::shared_ptr<ngraph::op::v1::Mod>& op) {
static void CreateModOp(Program& p, const std::shared_ptr<ngraph::op::v1::Mod>& op) {
CreateElementwiseOp(p, op, cldnn::eltwise_mode::mod);
}

View File

@ -16,7 +16,7 @@
namespace CLDNNPlugin {
void CreateEmbeddingBagOffsetsSumOp(Program& p, const std::shared_ptr<ngraph::op::v3::EmbeddingBagOffsetsSum>& op) {
static void CreateEmbeddingBagOffsetsSumOp(Program& p, const std::shared_ptr<ngraph::op::v3::EmbeddingBagOffsetsSum>& op) {
p.ValidateInputs(op, {3, 4, 5});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);
@ -72,7 +72,7 @@ void CreateEmbeddingBagOffsetsSumOp(Program& p, const std::shared_ptr<ngraph::op
p.AddPrimitiveToProfiler(op);
}
void CreateEmbeddingBagPackedSumOp(Program& p, const std::shared_ptr<ngraph::op::v3::EmbeddingBagPackedSum>& op) {
static void CreateEmbeddingBagPackedSumOp(Program& p, const std::shared_ptr<ngraph::op::v3::EmbeddingBagPackedSum>& op) {
p.ValidateInputs(op, {2, 3});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);
@ -113,7 +113,7 @@ void CreateEmbeddingBagPackedSumOp(Program& p, const std::shared_ptr<ngraph::op:
p.AddPrimitiveToProfiler(op);
}
void CreateEmbeddingSegmentsSumOp(Program& p, const std::shared_ptr<ngraph::op::v3::EmbeddingSegmentsSum>& op) {
static void CreateEmbeddingSegmentsSumOp(Program& p, const std::shared_ptr<ngraph::op::v3::EmbeddingSegmentsSum>& op) {
p.ValidateInputs(op, {4, 5, 6});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);

View File

@ -22,7 +22,7 @@ static inline std::string PadToString(ngraph::op::PadType pad) {
return "";
}
void CreateExtractImagePatchesOp(Program& p, const std::shared_ptr<ngraph::op::v3::ExtractImagePatches>& op) {
static void CreateExtractImagePatchesOp(Program& p, const std::shared_ptr<ngraph::op::v3::ExtractImagePatches>& op) {
p.ValidateInputs(op, {1});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);

View File

@ -11,7 +11,7 @@
namespace CLDNNPlugin {
void CreateFakeQuantizeOp(Program& p, const std::shared_ptr<ngraph::op::v0::FakeQuantize>& op) {
static void CreateFakeQuantizeOp(Program& p, const std::shared_ptr<ngraph::op::v0::FakeQuantize>& op) {
p.ValidateInputs(op, {5});
std::string layerName = layer_type_name_ID(op);
auto inputPrimitives = p.GetInputPrimitiveIDs(op);

View File

@ -12,7 +12,7 @@
namespace CLDNNPlugin {
void CreateGatherTreeOp(Program& p, const std::shared_ptr<ngraph::op::v1::GatherTree>& op) {
static void CreateGatherTreeOp(Program& p, const std::shared_ptr<ngraph::op::v1::GatherTree>& op) {
p.ValidateInputs(op, {4});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);

View File

@ -104,21 +104,21 @@ void CreateGatherOpBase(Program& p, const std::shared_ptr<T>& op, const int64_t
p.AddPrimitiveToProfiler(op);
}
void CreateGatherOp(Program& p, const std::shared_ptr<ngraph::op::v1::Gather>& op) {
static void CreateGatherOp(Program& p, const std::shared_ptr<ngraph::op::v1::Gather>& op) {
p.ValidateInputs(op, {2, 3});
CreateGatherOpBase<ngraph::op::v1::Gather>(p, op);
}
REGISTER_FACTORY_IMPL(v1, Gather);
void CreateGatherOp(Program& p, const std::shared_ptr<ngraph::op::v7::Gather>& op) {
static void CreateGatherOp(Program& p, const std::shared_ptr<ngraph::op::v7::Gather>& op) {
p.ValidateInputs(op, {2, 3, 4});
CreateGatherOpBase<ngraph::op::v7::Gather>(p, op, op->get_batch_dims());
}
REGISTER_FACTORY_IMPL(v7, Gather);
void CreateGatherOp(Program& p, const std::shared_ptr<ngraph::op::v8::Gather>& op) {
static void CreateGatherOp(Program& p, const std::shared_ptr<ngraph::op::v8::Gather>& op) {
p.ValidateInputs(op, {2, 3, 4});
CreateGatherOpBase<ngraph::op::v8::Gather>(p, op, op->get_batch_dims(), true);
}

View File

@ -40,7 +40,7 @@ static cldnn::gather_elements::gather_elements_axis GetGatherAxis(int axis, unsi
return cldnn::gather_elements::gather_elements_axis::along_f; // shouldn't get here
}
void CreateGatherElementsOp(Program& p, const std::shared_ptr<ngraph::op::v6::GatherElements>& op) {
static void CreateGatherElementsOp(Program& p, const std::shared_ptr<ngraph::op::v6::GatherElements>& op) {
p.ValidateInputs(op, {2});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);

View File

@ -12,7 +12,7 @@
namespace CLDNNPlugin {
void CreateGatherNDOp(Program& p, const std::shared_ptr<ngraph::op::v5::GatherND>& op) {
static void CreateGatherNDOp(Program& p, const std::shared_ptr<ngraph::op::v5::GatherND>& op) {
p.ValidateInputs(op, {2});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);

View File

@ -11,7 +11,7 @@
namespace CLDNNPlugin {
void CreateGRNOp(Program& p, const std::shared_ptr<ngraph::op::v0::GRN>& op) {
static void CreateGRNOp(Program& p, const std::shared_ptr<ngraph::op::v0::GRN>& op) {
p.ValidateInputs(op, {1});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);

View File

@ -100,7 +100,7 @@ static cldnn::resample::resample_axis GetInterpolationAxis(int32_t axis, uint32_
IE_THROW() << "Unsupported Interpolate axis: " << axis;
}
void CreateInterpolateOp(Program& p, const std::shared_ptr<ngraph::op::v4::Interpolate>& op) {
static void CreateInterpolateOp(Program& p, const std::shared_ptr<ngraph::op::v4::Interpolate>& op) {
p.ValidateInputs(op, {3, 4});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);

View File

@ -69,7 +69,7 @@ static std::string GetExternalInputName(const int64_t body_parameter_index,
return {""};
}
void CreateLoopOp(Program& p, const std::shared_ptr<Loop>& op) {
static void CreateLoopOp(Program& p, const std::shared_ptr<Loop>& op) {
const std::string layerName = layer_type_name_ID(op);
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
const auto& loop_input_descs = op->get_input_descriptions();

View File

@ -20,7 +20,7 @@ static cldnn::lrn_norm_region GetNormRegion(std::vector<int64_t> axis_value) {
}
}
void CreateLRNOp(Program& p, const std::shared_ptr<ngraph::op::v0::LRN>& op) {
static void CreateLRNOp(Program& p, const std::shared_ptr<ngraph::op::v0::LRN>& op) {
p.ValidateInputs(op, {2});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);

View File

@ -52,7 +52,7 @@ static std::pair<ngraph::Shape, ngraph::Shape> get_aligned_shapes(const ngraph::
return {shape_a_aligned, shape_b_aligned};
}
void CreateMatMulOp(Program& p, const std::shared_ptr<ngraph::op::v0::MatMul>& op) {
static void CreateMatMulOp(Program& p, const std::shared_ptr<ngraph::op::v0::MatMul>& op) {
p.ValidateInputs(op, {2});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);

View File

@ -31,7 +31,7 @@ static void CreateCommonMVNOp(Program& p, const std::shared_ptr<ngraph::Node>& o
p.AddPrimitiveToProfiler(op);
}
void CreateMVNOp(Program& p, const std::shared_ptr<ngraph::op::v0::MVN>& op) {
static void CreateMVNOp(Program& p, const std::shared_ptr<ngraph::op::v0::MVN>& op) {
p.ValidateInputs(op, {1});
bool across_channels = op->get_across_channels();
@ -41,7 +41,7 @@ void CreateMVNOp(Program& p, const std::shared_ptr<ngraph::op::v0::MVN>& op) {
CreateCommonMVNOp(p, op, across_channels, normalize_variance, eps);
}
void CreateMVNOp(Program& p, const std::shared_ptr<ngraph::op::v6::MVN>& op) {
static void CreateMVNOp(Program& p, const std::shared_ptr<ngraph::op::v6::MVN>& op) {
p.ValidateInputs(op, {2});
auto inConst = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(1));

View File

@ -25,7 +25,7 @@ static bool GetCenterPointBox(ngraph::op::v5::NonMaxSuppression::BoxEncodingType
return false;
}
void CreateNonMaxSuppressionIEInternalOp(Program& p, const std::shared_ptr<ngraph::op::internal::NonMaxSuppressionIEInternal>& op) {
static void CreateNonMaxSuppressionIEInternalOp(Program& p, const std::shared_ptr<ngraph::op::internal::NonMaxSuppressionIEInternal>& op) {
p.ValidateInputs(op, {2, 3, 4, 5, 6});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);

View File

@ -13,7 +13,7 @@
namespace CLDNNPlugin {
void CreateNormalizeL2Op(Program& p, const std::shared_ptr<ngraph::op::v0::NormalizeL2>& op) {
static void CreateNormalizeL2Op(Program& p, const std::shared_ptr<ngraph::op::v0::NormalizeL2>& op) {
p.ValidateInputs(op, {2});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);

View File

@ -12,7 +12,7 @@
namespace CLDNNPlugin {
void CreateOneHotOp(Program& p, const std::shared_ptr<ngraph::op::v1::OneHot>& op) {
static void CreateOneHotOp(Program& p, const std::shared_ptr<ngraph::op::v1::OneHot>& op) {
p.ValidateInputs(op, {4});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);

View File

@ -40,7 +40,7 @@ static std::vector<int32_t> GetPermuteOrder(const ngraph::CoordinateDiff& ie_ord
return cldnn_order;
}
void CreatePadOp(Program& p, const std::shared_ptr<ngraph::op::v1::Pad>& op) {
static void CreatePadOp(Program& p, const std::shared_ptr<ngraph::op::v1::Pad>& op) {
p.ValidateInputs(op, {3, 4});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);

View File

@ -16,7 +16,7 @@ using namespace InferenceEngine;
namespace CLDNNPlugin {
void CreateParameterOp(Program& p, const std::shared_ptr<ngraph::op::v0::Parameter>& op) {
static void CreateParameterOp(Program& p, const std::shared_ptr<ngraph::op::v0::Parameter>& op) {
auto networkInputs = p.GetNetworkInputs();
if (networkInputs.find(op->get_friendly_name()) == networkInputs.end()) {
IE_THROW() << "Can't find input " << op->get_friendly_name() << " in InputsDataMap";

View File

@ -57,7 +57,7 @@ static PoolingParameters GetPoolingParameters(const ngraph::Shape& kernel,
return {k, s, pb, pe};
}
void CreateAvgPoolOp(Program& p, const std::shared_ptr<ngraph::op::v1::AvgPool>& op) {
static void CreateAvgPoolOp(Program& p, const std::shared_ptr<ngraph::op::v1::AvgPool>& op) {
p.ValidateInputs(op, {1});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);
@ -77,7 +77,7 @@ void CreateAvgPoolOp(Program& p, const std::shared_ptr<ngraph::op::v1::AvgPool>&
p.AddPrimitiveToProfiler(op);
}
void CreateMaxPoolOp(Program& p, const std::shared_ptr<ngraph::op::v1::MaxPool>& op) {
static void CreateMaxPoolOp(Program& p, const std::shared_ptr<ngraph::op::v1::MaxPool>& op) {
p.ValidateInputs(op, {1});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);

View File

@ -12,7 +12,7 @@
namespace CLDNNPlugin {
void CreatePriorBoxClusteredOp(Program& p, const std::shared_ptr<ngraph::op::v0::PriorBoxClustered>& op) {
static void CreatePriorBoxClusteredOp(Program& p, const std::shared_ptr<ngraph::op::v0::PriorBoxClustered>& op) {
p.ValidateInputs(op, {2});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);
@ -61,7 +61,7 @@ void CreatePriorBoxClusteredOp(Program& p, const std::shared_ptr<ngraph::op::v0:
p.AddPrimitiveToProfiler(op);
}
void CreatePriorBoxOp(Program& p, const std::shared_ptr<ngraph::op::v0::PriorBox>& op) {
static void CreatePriorBoxOp(Program& p, const std::shared_ptr<ngraph::op::v0::PriorBox>& op) {
p.ValidateInputs(op, {2});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);

View File

@ -13,7 +13,7 @@
namespace CLDNNPlugin {
void CreateProposalOp(Program& p, const std::shared_ptr<ngraph::op::v0::Proposal>& op) {
static void CreateProposalOp(Program& p, const std::shared_ptr<ngraph::op::v0::Proposal>& op) {
p.ValidateInputs(op, {3});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);

View File

@ -22,7 +22,7 @@
namespace CLDNNPlugin {
void CreateReduceOp(Program& p, const std::shared_ptr<ngraph::Node>& op, cldnn::reduce_mode mode, bool keep_dims) {
static void CreateReduceOp(Program& p, const std::shared_ptr<ngraph::Node>& op, cldnn::reduce_mode mode, bool keep_dims) {
p.ValidateInputs(op, {2});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);
@ -127,39 +127,39 @@ void CreateReduceOp(Program& p, const std::shared_ptr<ngraph::Node>& op, cldnn::
}
}
void CreateReduceMaxOp(Program& p, const std::shared_ptr<ngraph::op::v1::ReduceMax>& op) {
static void CreateReduceMaxOp(Program& p, const std::shared_ptr<ngraph::op::v1::ReduceMax>& op) {
CreateReduceOp(p, op, cldnn::reduce_mode::max, op->get_keep_dims());
}
void CreateReduceLogicalAndOp(Program& p, const std::shared_ptr<ngraph::op::v1::ReduceLogicalAnd>& op) {
static void CreateReduceLogicalAndOp(Program& p, const std::shared_ptr<ngraph::op::v1::ReduceLogicalAnd>& op) {
CreateReduceOp(p, op, cldnn::reduce_mode::logical_and, op->get_keep_dims());
}
void CreateReduceLogicalOrOp(Program& p, const std::shared_ptr<ngraph::op::v1::ReduceLogicalOr>& op) {
static void CreateReduceLogicalOrOp(Program& p, const std::shared_ptr<ngraph::op::v1::ReduceLogicalOr>& op) {
CreateReduceOp(p, op, cldnn::reduce_mode::logical_or, op->get_keep_dims());
}
void CreateReduceMeanOp(Program& p, const std::shared_ptr<ngraph::op::v1::ReduceMean>& op) {
static void CreateReduceMeanOp(Program& p, const std::shared_ptr<ngraph::op::v1::ReduceMean>& op) {
CreateReduceOp(p, op, cldnn::reduce_mode::mean, op->get_keep_dims());
}
void CreateReduceMinOp(Program& p, const std::shared_ptr<ngraph::op::v1::ReduceMin>& op) {
static void CreateReduceMinOp(Program& p, const std::shared_ptr<ngraph::op::v1::ReduceMin>& op) {
CreateReduceOp(p, op, cldnn::reduce_mode::min, op->get_keep_dims());
}
void CreateReduceProdOp(Program& p, const std::shared_ptr<ngraph::op::v1::ReduceProd>& op) {
static void CreateReduceProdOp(Program& p, const std::shared_ptr<ngraph::op::v1::ReduceProd>& op) {
CreateReduceOp(p, op, cldnn::reduce_mode::prod, op->get_keep_dims());
}
void CreateReduceSumOp(Program& p, const std::shared_ptr<ngraph::op::v1::ReduceSum>& op) {
static void CreateReduceSumOp(Program& p, const std::shared_ptr<ngraph::op::v1::ReduceSum>& op) {
CreateReduceOp(p, op, cldnn::reduce_mode::sum, op->get_keep_dims());
}
void CreateReduceL1Op(Program& p, const std::shared_ptr<ngraph::op::v4::ReduceL1>& op) {
static void CreateReduceL1Op(Program& p, const std::shared_ptr<ngraph::op::v4::ReduceL1>& op) {
CreateReduceOp(p, op, cldnn::reduce_mode::l1, op->get_keep_dims());
}
void CreateReduceL2Op(Program& p, const std::shared_ptr<ngraph::op::v4::ReduceL2>& op) {
static void CreateReduceL2Op(Program& p, const std::shared_ptr<ngraph::op::v4::ReduceL2>& op) {
CreateReduceOp(p, op, cldnn::reduce_mode::l2, op->get_keep_dims());
}

View File

@ -11,7 +11,7 @@
namespace CLDNNPlugin {
void CreateRegionYoloOp(Program& p, const std::shared_ptr<ngraph::op::v0::RegionYolo>& op) {
static void CreateRegionYoloOp(Program& p, const std::shared_ptr<ngraph::op::v0::RegionYolo>& op) {
p.ValidateInputs(op, {1});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);

View File

@ -11,7 +11,7 @@
namespace CLDNNPlugin {
void CreateReorgYoloOp(Program& p, const std::shared_ptr<ngraph::op::v0::ReorgYolo>& op) {
static void CreateReorgYoloOp(Program& p, const std::shared_ptr<ngraph::op::v0::ReorgYolo>& op) {
p.ValidateInputs(op, {1});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);

View File

@ -14,7 +14,7 @@
namespace CLDNNPlugin {
void CreateCommonReshapeOp(Program& p, const std::shared_ptr<ngraph::Node>& op) {
static void CreateCommonReshapeOp(Program& p, const std::shared_ptr<ngraph::Node>& op) {
p.ValidateInputs(op, {1, 2});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);
@ -58,15 +58,15 @@ void CreateCommonReshapeOp(Program& p, const std::shared_ptr<ngraph::Node>& op)
p.AddPrimitiveToProfiler(op);
}
void CreateReshapeOp(Program& p, const std::shared_ptr<ngraph::op::v1::Reshape>& op) {
static void CreateReshapeOp(Program& p, const std::shared_ptr<ngraph::op::v1::Reshape>& op) {
CreateCommonReshapeOp(p, op);
}
void CreateSqueezeOp(Program& p, const std::shared_ptr<ngraph::op::v0::Squeeze>& op) {
static void CreateSqueezeOp(Program& p, const std::shared_ptr<ngraph::op::v0::Squeeze>& op) {
CreateCommonReshapeOp(p, op);
}
void CreateUnsqueezeOp(Program& p, const std::shared_ptr<ngraph::op::v0::Unsqueeze>& op) {
static void CreateUnsqueezeOp(Program& p, const std::shared_ptr<ngraph::op::v0::Unsqueeze>& op) {
CreateCommonReshapeOp(p, op);
}

View File

@ -13,7 +13,7 @@ using namespace InferenceEngine;
namespace CLDNNPlugin {
void CreateResultOp(Program& p, const std::shared_ptr<ngraph::op::v0::Result>& op) {
static void CreateResultOp(Program& p, const std::shared_ptr<ngraph::op::v0::Result>& op) {
OutputsDataMap networkOutputs = p.GetNetworkOutputs();
p.ValidateInputs(op, {1});

View File

@ -11,7 +11,7 @@
namespace CLDNNPlugin {
void CreateReverseSequenceOp(Program& p, const std::shared_ptr<ngraph::op::v0::ReverseSequence>& op) {
static void CreateReverseSequenceOp(Program& p, const std::shared_ptr<ngraph::op::v0::ReverseSequence>& op) {
p.ValidateInputs(op, {2});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);

View File

@ -16,7 +16,7 @@
#include "cldnn/primitives/concatenation.hpp"
namespace CLDNNPlugin {
cldnn::activation_func GetActivationFunc(std::string name) {
static cldnn::activation_func GetActivationFunc(std::string name) {
static const std::map<std::string, cldnn::activation_func> name_mapping = {
{"sigmoid", cldnn::activation_func::logistic},
{"tanh", cldnn::activation_func::hyperbolic_tan},
@ -61,7 +61,7 @@ void GetLSTMActivationParams(const std::shared_ptr<T>& op,
}
}
void CreateLSTMCellOp(Program& p, const std::shared_ptr<ngraph::op::v4::LSTMCell>& op) {
static void CreateLSTMCellOp(Program& p, const std::shared_ptr<ngraph::op::v4::LSTMCell>& op) {
p.ValidateInputs(op, {6});
int lstm_batch_size, lstm_input_size, lstm_hidden_size;
bool hasBias = true;
@ -199,7 +199,7 @@ void CreateLSTMCellOp(Program& p, const std::shared_ptr<ngraph::op::v4::LSTMCell
p.AddPrimitiveToProfiler(layerName, op, outputHiddenID);
}
void CreateLSTMSequenceOp(Program& p, const std::shared_ptr<ngraph::op::v5::LSTMSequence>& op) {
static void CreateLSTMSequenceOp(Program& p, const std::shared_ptr<ngraph::op::v5::LSTMSequence>& op) {
p.ValidateInputs(op, {7});
std::string layerName = layer_type_name_ID(op);

View File

@ -24,7 +24,7 @@ static cldnn::pooling_mode GetPoolingMode(std::string method) {
return cldnn::pooling_mode::deformable_bilinear;
}
void CreateDeformablePSROIPoolingOp(Program& p, const std::shared_ptr<ngraph::op::v1::DeformablePSROIPooling>& op) {
static void CreateDeformablePSROIPoolingOp(Program& p, const std::shared_ptr<ngraph::op::v1::DeformablePSROIPooling>& op) {
p.ValidateInputs(op, {2, 3});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);
@ -63,7 +63,7 @@ void CreateDeformablePSROIPoolingOp(Program& p, const std::shared_ptr<ngraph::op
p.AddPrimitiveToProfiler(op);
}
void CreatePSROIPoolingOp(Program& p, const std::shared_ptr<ngraph::op::v0::PSROIPooling>& op) {
static void CreatePSROIPoolingOp(Program& p, const std::shared_ptr<ngraph::op::v0::PSROIPooling>& op) {
p.ValidateInputs(op, {2});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);
@ -92,7 +92,7 @@ void CreatePSROIPoolingOp(Program& p, const std::shared_ptr<ngraph::op::v0::PSRO
p.AddPrimitiveToProfiler(op);
}
void CreateROIPoolingOp(Program& p, const std::shared_ptr<ngraph::op::v0::ROIPooling>& op) {
static void CreateROIPoolingOp(Program& p, const std::shared_ptr<ngraph::op::v0::ROIPooling>& op) {
p.ValidateInputs(op, {2});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);

View File

@ -41,7 +41,7 @@ static inline cldnn::scatter_elements_update::scatter_elements_update_axis GetSc
return cldnn::scatter_elements_update::scatter_elements_update_axis::along_f; // shouldn't get here
}
void CreateScatterElementsUpdateOp(Program& p, const std::shared_ptr<ngraph::op::v3::ScatterElementsUpdate>& op) {
static void CreateScatterElementsUpdateOp(Program& p, const std::shared_ptr<ngraph::op::v3::ScatterElementsUpdate>& op) {
p.ValidateInputs(op, {4});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);

View File

@ -12,7 +12,7 @@
namespace CLDNNPlugin {
void CreateScatterNDUpdateOp(Program& p, const std::shared_ptr<ngraph::op::v3::ScatterNDUpdate>& op) {
static void CreateScatterNDUpdateOp(Program& p, const std::shared_ptr<ngraph::op::v3::ScatterNDUpdate>& op) {
p.ValidateInputs(op, {3});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);

View File

@ -41,7 +41,7 @@ static inline cldnn::scatter_update::scatter_update_axis GetScatterUpdateAxis(in
return cldnn::scatter_update::scatter_update_axis::along_f; // shouldn't get here
}
void CreateScatterUpdateOp(Program& p, const std::shared_ptr<ngraph::op::v3::ScatterUpdate>& op) {
static void CreateScatterUpdateOp(Program& p, const std::shared_ptr<ngraph::op::v3::ScatterUpdate>& op) {
p.ValidateInputs(op, {4});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);

View File

@ -13,7 +13,7 @@
namespace CLDNNPlugin {
void CreateSelectOp(Program& p, const std::shared_ptr<ngraph::op::v1::Select>& op) {
static void CreateSelectOp(Program& p, const std::shared_ptr<ngraph::op::v1::Select>& op) {
p.ValidateInputs(op, {3});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);

View File

@ -11,7 +11,7 @@
namespace CLDNNPlugin {
void CreateShuffleChannelsOp(Program& p, const std::shared_ptr<ngraph::op::v0::ShuffleChannels>& op) {
static void CreateShuffleChannelsOp(Program& p, const std::shared_ptr<ngraph::op::v0::ShuffleChannels>& op) {
p.ValidateInputs(op, {1, 2});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);

View File

@ -35,7 +35,7 @@ static cldnn::softmax::dimension_t GetSoftmaxAxis(int64_t axis, size_t rank) {
return cldnn::softmax::normalize_fyx;
}
void CreateSoftmaxOp(Program& p, const std::shared_ptr<ngraph::op::v1::Softmax>& op) {
static void CreateSoftmaxOp(Program& p, const std::shared_ptr<ngraph::op::v1::Softmax>& op) {
p.ValidateInputs(op, {1});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);
@ -47,7 +47,7 @@ void CreateSoftmaxOp(Program& p, const std::shared_ptr<ngraph::op::v1::Softmax>&
p.AddPrimitiveToProfiler(op);
}
void CreateLogSoftmaxOp(Program& p, const std::shared_ptr<ngraph::op::v5::LogSoftmax>& op) {
static void CreateLogSoftmaxOp(Program& p, const std::shared_ptr<ngraph::op::v5::LogSoftmax>& op) {
p.ValidateInputs(op, {1});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);

View File

@ -12,7 +12,7 @@
namespace CLDNNPlugin {
void CreateSpaceToBatchOp(Program& p, const std::shared_ptr<ngraph::op::v1::SpaceToBatch>& op) {
static void CreateSpaceToBatchOp(Program& p, const std::shared_ptr<ngraph::op::v1::SpaceToBatch>& op) {
p.ValidateInputs(op, {4});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);

View File

@ -20,7 +20,7 @@ static cldnn::space_to_depth::depth_mode GetDepthMode(ngraph::op::v0::SpaceToDep
return cldnn::space_to_depth::blocks_first;
}
void CreateSpaceToDepthOp(Program& p, const std::shared_ptr<ngraph::op::v0::SpaceToDepth>& op) {
static void CreateSpaceToDepthOp(Program& p, const std::shared_ptr<ngraph::op::v0::SpaceToDepth>& op) {
p.ValidateInputs(op, {1});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);

View File

@ -12,7 +12,7 @@
namespace CLDNNPlugin {
void CreateCommonSplitOp(Program& p, const std::shared_ptr<ngraph::Node>& op) {
static void CreateCommonSplitOp(Program& p, const std::shared_ptr<ngraph::Node>& op) {
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);
@ -58,12 +58,12 @@ void CreateCommonSplitOp(Program& p, const std::shared_ptr<ngraph::Node>& op) {
p.InitProfileInfo(op->get_friendly_name(), op->get_type_name(), false, InferenceEngine::InferenceEngineProfileInfo::OPTIMIZED_OUT);
}
void CreateSplitOp(Program& p, const std::shared_ptr<ngraph::op::v1::Split>& op) {
static void CreateSplitOp(Program& p, const std::shared_ptr<ngraph::op::v1::Split>& op) {
p.ValidateInputs(op, {2});
CreateCommonSplitOp(p, op);
}
void CreateVariadicSplitOp(Program& p, const std::shared_ptr<ngraph::op::v1::VariadicSplit>& op) {
static void CreateVariadicSplitOp(Program& p, const std::shared_ptr<ngraph::op::v1::VariadicSplit>& op) {
p.ValidateInputs(op, {3});
CreateCommonSplitOp(p, op);
}

View File

@ -14,7 +14,7 @@
namespace CLDNNPlugin {
void CreateStridedSliceOp(Program& p, const std::shared_ptr<ngraph::op::v1::StridedSlice>& op) {
static void CreateStridedSliceOp(Program& p, const std::shared_ptr<ngraph::op::v1::StridedSlice>& op) {
p.ValidateInputs(op, {4});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);

View File

@ -55,7 +55,7 @@ static void UpdateBackedge(std::vector<cldnn::loop::backedge_mapping>& back_edge
}
}
void CreateTensorIteratorOp(Program &p, const std::shared_ptr<TensorIterator> &op) {
static void CreateTensorIteratorOp(Program &p, const std::shared_ptr<TensorIterator> &op) {
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
// get body topology from ngraph function

View File

@ -11,7 +11,7 @@
namespace CLDNNPlugin {
void CreateTileOp(Program& p, const std::shared_ptr<ngraph::op::v0::Tile>& op) {
static void CreateTileOp(Program& p, const std::shared_ptr<ngraph::op::v0::Tile>& op) {
p.ValidateInputs(op, {2});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);

View File

@ -40,7 +40,7 @@ static cldnn::arg_max_min::axis_name GetAxis(int32_t axis, size_t in_rank) {
return cldnn::arg_max_min::axis_name::batch;
}
void CreateTopKOp(Program& p, const std::shared_ptr<ngraph::op::v1::TopK>& op) {
static void CreateTopKOp(Program& p, const std::shared_ptr<ngraph::op::v1::TopK>& op) {
p.ValidateInputs(op, {2});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);

View File

@ -12,7 +12,7 @@
namespace CLDNNPlugin {
void CreateTransposeOp(Program& p, const std::shared_ptr<ngraph::op::v1::Transpose>& op) {
static void CreateTransposeOp(Program& p, const std::shared_ptr<ngraph::op::v1::Transpose>& op) {
p.ValidateInputs(op, {1, 2});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);

View File

@ -54,24 +54,24 @@ void CreateUnaryEltwiseOp(Program& p, const std::shared_ptr<ngraph::Node>& op,
p.AddPrimitiveToProfiler(op);
}
void CreateTanhOp(Program& p, const std::shared_ptr<ngraph::op::v0::Tanh>& op) {
static void CreateTanhOp(Program& p, const std::shared_ptr<ngraph::op::v0::Tanh>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::hyperbolic_tan, {});
}
void CreateEluOp(Program& p, const std::shared_ptr<ngraph::op::v0::Elu>& op) {
static void CreateEluOp(Program& p, const std::shared_ptr<ngraph::op::v0::Elu>& op) {
auto alpha = static_cast<float>(op->get_alpha());
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::elu, {alpha});
}
void CreateSigmoidOp(Program& p, const std::shared_ptr<ngraph::op::v0::Sigmoid>& op) {
static void CreateSigmoidOp(Program& p, const std::shared_ptr<ngraph::op::v0::Sigmoid>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::logistic, {});
}
void CreateReluOp(Program& p, const std::shared_ptr<ngraph::op::v0::Relu>& op) {
static void CreateReluOp(Program& p, const std::shared_ptr<ngraph::op::v0::Relu>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::relu, {});
}
void CreatePReluOp(Program& p, const std::shared_ptr<ngraph::op::v0::PRelu>& op) {
static void CreatePReluOp(Program& p, const std::shared_ptr<ngraph::op::v0::PRelu>& op) {
p.ValidateInputs(op, {2});
auto slope_node = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(1));
@ -96,65 +96,65 @@ void CreatePReluOp(Program& p, const std::shared_ptr<ngraph::op::v0::PRelu>& op)
}
}
void CreateClampOp(Program& p, const std::shared_ptr<ngraph::op::v0::Clamp>& op) {
static void CreateClampOp(Program& p, const std::shared_ptr<ngraph::op::v0::Clamp>& op) {
float min = static_cast<float>(op->get_min());
float max = static_cast<float>(op->get_max());
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::clamp, {min, max});
}
void CreateExpOp(Program& p, const std::shared_ptr<ngraph::op::v0::Exp>& op) {
static void CreateExpOp(Program& p, const std::shared_ptr<ngraph::op::v0::Exp>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::exp, {});
}
void CreateLogicalNotOp(Program& p, const std::shared_ptr<ngraph::op::v1::LogicalNot>& op) {
static void CreateLogicalNotOp(Program& p, const std::shared_ptr<ngraph::op::v1::LogicalNot>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::negation, {});
}
void CreateAsinOp(Program& p, const std::shared_ptr<ngraph::op::v0::Asin>& op) {
static void CreateAsinOp(Program& p, const std::shared_ptr<ngraph::op::v0::Asin>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::asin, {});
}
void CreateAsinhOp(Program& p, const std::shared_ptr<ngraph::op::v3::Asinh>& op) {
static void CreateAsinhOp(Program& p, const std::shared_ptr<ngraph::op::v3::Asinh>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::asinh, {});
}
void CreateAcosOp(Program& p, const std::shared_ptr<ngraph::op::v0::Acos>& op) {
static void CreateAcosOp(Program& p, const std::shared_ptr<ngraph::op::v0::Acos>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::acos, {});
}
void CreateAcoshOp(Program& p, const std::shared_ptr<ngraph::op::v3::Acosh>& op) {
static void CreateAcoshOp(Program& p, const std::shared_ptr<ngraph::op::v3::Acosh>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::acosh, {});
}
void CreateAtanOp(Program& p, const std::shared_ptr<ngraph::op::v0::Atan>& op) {
static void CreateAtanOp(Program& p, const std::shared_ptr<ngraph::op::v0::Atan>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::atan, {});
}
void CreateAtanhOp(Program& p, const std::shared_ptr<ngraph::op::v3::Atanh>& op) {
static void CreateAtanhOp(Program& p, const std::shared_ptr<ngraph::op::v3::Atanh>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::atanh, {});
}
void CreateAbsOp(Program& p, const std::shared_ptr<ngraph::op::v0::Abs>& op) {
static void CreateAbsOp(Program& p, const std::shared_ptr<ngraph::op::v0::Abs>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::abs, {});
}
void CreateFloorOp(Program& p, const std::shared_ptr<ngraph::op::v0::Floor>& op) {
static void CreateFloorOp(Program& p, const std::shared_ptr<ngraph::op::v0::Floor>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::floor, {});
}
void CreateCeilingOp(Program& p, const std::shared_ptr<ngraph::op::v0::Ceiling>& op) {
static void CreateCeilingOp(Program& p, const std::shared_ptr<ngraph::op::v0::Ceiling>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::ceil, {});
}
void CreateSqrtOp(Program& p, const std::shared_ptr<ngraph::op::v0::Sqrt>& op) {
static void CreateSqrtOp(Program& p, const std::shared_ptr<ngraph::op::v0::Sqrt>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::sqrt, {});
}
void CreateErfOp(Program& p, const std::shared_ptr<ngraph::op::v0::Erf>& op) {
static void CreateErfOp(Program& p, const std::shared_ptr<ngraph::op::v0::Erf>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::erf, {});
}
void CreateHardSigmoidOp(Program& p, const std::shared_ptr<ngraph::op::v0::HardSigmoid>& op) {
static void CreateHardSigmoidOp(Program& p, const std::shared_ptr<ngraph::op::v0::HardSigmoid>& op) {
p.ValidateInputs(op, {3});
auto alpha_node = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(1));
auto beta_node = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(2));
@ -172,15 +172,15 @@ void CreateHardSigmoidOp(Program& p, const std::shared_ptr<ngraph::op::v0::HardS
}
}
void CreateLogOp(Program& p, const std::shared_ptr<ngraph::op::v0::Log>& op) {
static void CreateLogOp(Program& p, const std::shared_ptr<ngraph::op::v0::Log>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::log, {});
}
void CreateNegativeOp(Program& p, const std::shared_ptr<ngraph::op::v0::Negative>& op) {
static void CreateNegativeOp(Program& p, const std::shared_ptr<ngraph::op::v0::Negative>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::negative, {});
}
void CreateSeluOp(Program& p, const std::shared_ptr<ngraph::op::v0::Selu>& op) {
static void CreateSeluOp(Program& p, const std::shared_ptr<ngraph::op::v0::Selu>& op) {
p.ValidateInputs(op, {3});
auto alpha_node = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(1));
auto lambda_node = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(2));
@ -200,31 +200,31 @@ void CreateSeluOp(Program& p, const std::shared_ptr<ngraph::op::v0::Selu>& op) {
}
}
void CreateSoftPlusOp(Program& p, const std::shared_ptr<ngraph::op::v4::SoftPlus>& op) {
static void CreateSoftPlusOp(Program& p, const std::shared_ptr<ngraph::op::v4::SoftPlus>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::softplus, {});
}
void CreateTanOp(Program& p, const std::shared_ptr<ngraph::op::v0::Tan>& op) {
static void CreateTanOp(Program& p, const std::shared_ptr<ngraph::op::v0::Tan>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::tan, {});
}
void CreateSinOp(Program& p, const std::shared_ptr<ngraph::op::v0::Sin>& op) {
static void CreateSinOp(Program& p, const std::shared_ptr<ngraph::op::v0::Sin>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::sin, {});
}
void CreateSinhOp(Program& p, const std::shared_ptr<ngraph::op::v0::Sinh>& op) {
static void CreateSinhOp(Program& p, const std::shared_ptr<ngraph::op::v0::Sinh>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::sinh, {});
}
void CreateCosOp(Program& p, const std::shared_ptr<ngraph::op::v0::Cos>& op) {
static void CreateCosOp(Program& p, const std::shared_ptr<ngraph::op::v0::Cos>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::cos, {});
}
void CreateCoshOp(Program& p, const std::shared_ptr<ngraph::op::v0::Cosh>& op) {
static void CreateCoshOp(Program& p, const std::shared_ptr<ngraph::op::v0::Cosh>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::cosh, {});
}
void CreateSwishOp(Program& p, const std::shared_ptr<ngraph::op::v4::Swish>& op) {
static void CreateSwishOp(Program& p, const std::shared_ptr<ngraph::op::v4::Swish>& op) {
p.ValidateInputs(op, {1, 2});
if (op->get_input_size() == 2) {
auto beta_node = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(1));
@ -246,27 +246,27 @@ void CreateSwishOp(Program& p, const std::shared_ptr<ngraph::op::v4::Swish>& op)
}
}
void CreateHSwishOp(Program& p, const std::shared_ptr<ngraph::op::v4::HSwish>& op) {
static void CreateHSwishOp(Program& p, const std::shared_ptr<ngraph::op::v4::HSwish>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::hswish, {});
}
void CreateMishOp(Program& p, const std::shared_ptr<ngraph::op::v4::Mish>& op) {
static void CreateMishOp(Program& p, const std::shared_ptr<ngraph::op::v4::Mish>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::mish, {});
}
void CreateGeluOp(Program& p, const std::shared_ptr<ngraph::op::v0::Gelu>& op) {
static void CreateGeluOp(Program& p, const std::shared_ptr<ngraph::op::v0::Gelu>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::gelu, {});
}
void CreateSignOp(Program& p, const std::shared_ptr<ngraph::op::v0::Sign>& op) {
static void CreateSignOp(Program& p, const std::shared_ptr<ngraph::op::v0::Sign>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::sign, {});
}
void CreateHSigmoidOp(Program& p, const std::shared_ptr<ngraph::op::v5::HSigmoid>& op) {
static void CreateHSigmoidOp(Program& p, const std::shared_ptr<ngraph::op::v5::HSigmoid>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::hsigmoid, {});
}
void CreateRoundOp(Program& p, const std::shared_ptr<ngraph::op::v5::Round>& op) {
static void CreateRoundOp(Program& p, const std::shared_ptr<ngraph::op::v5::Round>& op) {
auto func = cldnn::activation_func::none;
switch (op->get_mode()) {
case ngraph::op::v5::Round::RoundMode::HALF_TO_EVEN : func = cldnn::activation_func::round_half_to_even; break;

View File

@ -8,6 +8,7 @@
#include <cmath>
#include "runtime/pwl.h"
#include "make_pwl.hpp"
#include "gna_slope_scale.h"
#include "dnn_types.h"
#include "backend/gna_types.h"

View File

@ -19,6 +19,8 @@
#include <cmath>
#include <map>
namespace {
std::string GetLayerType(Gna2OperationType type) {
switch (type) {
case Gna2OperationTypeFullyConnectedAffine: return "Gna2OperationTypeFullyConnectedAffine";
@ -92,6 +94,8 @@ int32_t GetValue(const Gna2Tensor& tensor, const T & elementIndex) {
return intValue;
}
} // namespace
void WriteInputAndOutputTextGNAImpl(const Gna2Model & gnaModel, const std::string dumpFolderNameGNA, const std::string refFolderName) {
for (uint32_t i = 0; i < gnaModel.NumberOfOperations; i++) {
const auto & operation = gnaModel.Operations[i];
@ -161,6 +165,8 @@ void WriteInputAndOutputTextGNAImpl(const Gna2Model & gnaModel, const std::strin
}
}
namespace {
template<typename T>
static std::string GetName(T name, size_t index) {
return name;
@ -172,7 +178,7 @@ std::string GetName<>(std::vector<std::string> names, size_t index) {
}
template<class MapType>
static std::string FindInMapOrReturnUnknown(MapType map, typename MapType::key_type key, size_t index = 0) {
std::string FindInMapOrReturnUnknown(MapType map, typename MapType::key_type key, size_t index = 0) {
auto value = map.find(key);
if (value != map.end()) {
return GetName(value->second, index);
@ -180,8 +186,8 @@ static std::string FindInMapOrReturnUnknown(MapType map, typename MapType::key_t
return std::string {"unknown"};
}
static std::string GetOperandType(Gna2DataType type) {
const std::map<Gna2DataType, std::string> operandTypeMap = {
std::string GetOperandType(Gna2DataType type) {
static const std::map<Gna2DataType, std::string> operandTypeMap = {
{Gna2DataTypeNone, "Gna2DataTypeNone"},
{Gna2DataTypeBoolean, "Gna2DataTypeBoolean"},
{Gna2DataTypeInt4, "Gna2DataTypeInt4"},
@ -200,8 +206,8 @@ static std::string GetOperandType(Gna2DataType type) {
return FindInMapOrReturnUnknown(operandTypeMap, type);
}
static std::string GetOperandName(Gna2OperationType type, size_t index) {
const std::map<Gna2OperationType, std::vector<std::string>> operationOperandNamesMap = {
std::string GetOperandName(Gna2OperationType type, size_t index) {
static const std::map<Gna2OperationType, std::vector<std::string>> operationOperandNamesMap = {
{Gna2OperationTypeConvolution, {"inputs", "outputs", "filters", "biases", "activationFunction"}},
{Gna2OperationTypeCopy, {"inputs", "outputs"}},
{Gna2OperationTypeFullyConnectedAffine, {"inputs", "outputs", "weights", "biases", "activationFunction", "weightScaleFactors"}},
@ -213,8 +219,8 @@ static std::string GetOperandName(Gna2OperationType type, size_t index) {
return FindInMapOrReturnUnknown(operationOperandNamesMap, type, index);
}
static std::string GetBiasMode(Gna2BiasMode mode) {
const std::map<Gna2BiasMode, std::string> biasModeMap = {
std::string GetBiasMode(Gna2BiasMode mode) {
static const std::map<Gna2BiasMode, std::string> biasModeMap = {
{Gna2BiasModeDefault, "Gna2BiasModeDefault"},
{Gna2BiasModePerStride, "Gna2BiasModePerStride"},
{Gna2BiasModeGrouping, "Gna2BiasModeGrouping"}
@ -222,8 +228,8 @@ static std::string GetBiasMode(Gna2BiasMode mode) {
return FindInMapOrReturnUnknown(biasModeMap, mode);
}
static std::string GetPoolingMode(Gna2PoolingMode mode) {
const std::map<Gna2PoolingMode, std::string> poolingModeMap = {
std::string GetPoolingMode(Gna2PoolingMode mode) {
static const std::map<Gna2PoolingMode, std::string> poolingModeMap = {
{Gna2PoolingModeDisabled, "Gna2PoolingModeDisabled"},
{Gna2PoolingModeMax, "Gna2PoolingModeMax"},
{Gna2PoolingModeSum, "Gna2PoolingModeSum"}
@ -231,7 +237,7 @@ static std::string GetPoolingMode(Gna2PoolingMode mode) {
return FindInMapOrReturnUnknown(poolingModeMap, mode);
}
static void DumpShape(std::ostream& dumpFile, Gna2Shape* shape, const std::string paramName) {
void DumpShape(std::ostream& dumpFile, Gna2Shape* shape, const std::string paramName) {
dumpFile << "\tParameter name: " << paramName << ", ";
dumpFile << "parameter type: Gna2Shape\n";
dumpFile << "\t\tNumber of dimensions: " << shape->NumberOfDimensions;
@ -242,7 +248,7 @@ static void DumpShape(std::ostream& dumpFile, Gna2Shape* shape, const std::strin
dumpFile << "]\n";
}
static void DumpConvolutionParameters(std::ostream& dumpFile, void** parameters, size_t knownParamCount, const std::vector<std::string> paramNames) {
void DumpConvolutionParameters(std::ostream& dumpFile, void** parameters, size_t knownParamCount, const std::vector<std::string> paramNames) {
size_t i = 0;
while (i < knownParamCount) {
@ -263,12 +269,12 @@ static void DumpConvolutionParameters(std::ostream& dumpFile, void** parameters,
}
}
static void DumpCopyParameters(std::ostream& dumpFile, void** parameters, size_t knownParamCount, const std::vector<std::string> paramNames) {
void DumpCopyParameters(std::ostream& dumpFile, void** parameters, size_t knownParamCount, const std::vector<std::string> paramNames) {
Gna2Shape* subTensorShape = reinterpret_cast<Gna2Shape*>(parameters[CopyShapeParamIdx]);
DumpShape(dumpFile, subTensorShape, paramNames[CopyShapeParamIdx]);
}
static void DumpFCAffineParameters(std::ostream& dumpFile, void** parameters, size_t knownParamCount, const std::vector<std::string> paramNames) {
void DumpFCAffineParameters(std::ostream& dumpFile, void** parameters, size_t knownParamCount, const std::vector<std::string> paramNames) {
size_t i = 0;
while (i < knownParamCount) {
@ -285,13 +291,13 @@ static void DumpFCAffineParameters(std::ostream& dumpFile, void** parameters, si
}
}
static void DumpIntParameter(std::ostream& dumpFile, void** parameters, size_t knownParamCount, const std::vector<std::string> paramNames) {
void DumpIntParameter(std::ostream& dumpFile, void** parameters, size_t knownParamCount, const std::vector<std::string> paramNames) {
uint32_t* param = reinterpret_cast<uint32_t*>(parameters[0]);
if (param != nullptr)
dumpFile << "\tParameter name: " << paramNames[0] << ", value: " << *param << "\n";
}
static std::vector<std::string> GetParamaterNames(Gna2OperationType type) {
std::vector<std::string> GetParamaterNames(Gna2OperationType type) {
// This map must be aligned with dumpParamMap in this file
const std::map<Gna2OperationType, std::vector<std::string>> operationParamaterNamesMap = {
{Gna2OperationTypeConvolution, {"convolutionStride", "biasMode", "poolingMode", "poolingWindow", "poolingStride", "zeroPadding"}},
@ -306,7 +312,7 @@ static std::vector<std::string> GetParamaterNames(Gna2OperationType type) {
typedef void (*dumpParameters) (std::ostream&, void**, size_t, const std::vector<std::string>);
static dumpParameters GetParamDumpFunc(Gna2OperationType type) {
dumpParameters GetParamDumpFunc(Gna2OperationType type) {
// This map must be aligned with operationParamaterNamesMap in this file
static const std::map<Gna2OperationType, dumpParameters> dumpParamMap = {
{Gna2OperationTypeConvolution, DumpConvolutionParameters},
@ -318,7 +324,7 @@ static dumpParameters GetParamDumpFunc(Gna2OperationType type) {
return dumpParamMap.find(type) != dumpParamMap.end() ? dumpParamMap.find(type)->second : nullptr;
}
static void DumpPwl(std::ostream& dumpFile, const Gna2Tensor& activation) {
void DumpPwl(std::ostream& dumpFile, const Gna2Tensor& activation) {
const Gna2PwlSegment* const segments = static_cast<Gna2PwlSegment*>(activation.Data);
const uint32_t numberOfSegments = activation.Shape.Dimensions[0];
@ -342,7 +348,7 @@ static void DumpPwl(std::ostream& dumpFile, const Gna2Tensor& activation) {
}
}
static void DumpCompoundBias(std::ostream& dumpFile, const Gna2Tensor& tensor) {
void DumpCompoundBias(std::ostream& dumpFile, const Gna2Tensor& tensor) {
auto i = 0;
while (i < tensor.Shape.Dimensions[0]) {
@ -352,7 +358,7 @@ static void DumpCompoundBias(std::ostream& dumpFile, const Gna2Tensor& tensor) {
}
}
static void DumpCharArray(std::ostream& dumpFile, const char *carray, size_t count) {
void DumpCharArray(std::ostream& dumpFile, const char *carray, size_t count) {
auto i = 0;
while (*(carray + i) != 0 && i < count) {
dumpFile << *(carray + i) << " ";
@ -361,6 +367,8 @@ static void DumpCharArray(std::ostream& dumpFile, const char *carray, size_t co
dumpFile << "\n";
}
} // namespace
void DumpGna2Model(const Gna2Model& gnaModel, const std::string dumpFolderNameGNA, bool dumpData) {
std::stringstream dumpFileName;
uint32_t opsNo = gnaModel.NumberOfOperations;

View File

@ -59,6 +59,8 @@ Gna2Tensor HelperGna2TensorInit3D(uint32_t x, uint32_t y, uint32_t z, Gna2DataTy
return t;
}
namespace {
Gna2DataType FromOvDataType(OvGnaType t) {
static const std::map< OvGnaType, Gna2DataType> m = {
{OvGnaTypeInt8, Gna2DataTypeInt8},
@ -85,6 +87,8 @@ Gna2Tensor HelperGna2TensorInit(OvGnaTensor tensor, void* data) {
return t;
}
} // namespace
Gna2Tensor * createGna2Tensor1D(uint32_t x, uint32_t byteSize, void* data) {
const auto input = reinterpret_cast<Gna2Tensor*>(gnaUserAllocator(sizeof(Gna2Tensor)));
IE_ASSERT(input != nullptr);

View File

@ -117,7 +117,7 @@ uint32_t GNADeviceHelper::propagate(const uint32_t requestConfigId, Gna2Accelera
return reqId;
}
void enforceLegacyCnn(Gna2Operation& operation) {
inline void enforceLegacyCnn(Gna2Operation& operation) {
snprintf(
const_cast<char*>(operation.Operands[1]->Layout),
sizeof(operation.Operands[1]->Layout) / sizeof(char),

View File

@ -2101,7 +2101,7 @@ void GNAGraphCompiler::PermutePrimitive(InferenceEngine::CNNLayerPtr layer) {
connectOutput(layer, ptr_outputs, num_data_bytes_out);
}
void SKIP(GNAGraphCompiler*, CNNLayerPtr) {}
inline void SKIP(GNAGraphCompiler*, CNNLayerPtr) {}
void GNAGraphCompiler::CreateLayerPrimitive(CNNLayerPtr layer) {
static const LayersBuilder layersBuilder[] = {

View File

@ -1,196 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
// gna_helper.cpp : various GNA-related utility functions
//
#include <cstdint>
#include <cstdio>
#include <fstream>
#include <vector>
#include <sstream>
#include <string>
#include "backend/gna_types.h"
#include "gna_plugin_log.hpp"
#include "gna_lib_ver_selector.hpp"
void PrintMatrixInt16(const char *ptr_name, int16_t *ptr_matrix, int num_rows, int num_cols, int lda, float scale) {
printf("%s: %dx%d lda %d\n", ptr_name, num_rows, num_cols, lda);
for (int i = 0; i < num_rows; i++) {
for (int j = 0; j < num_cols; j++) {
printf("[%d,%d]: %e\n", i, j, *(ptr_matrix + i*lda + j) / scale);
}
}
}
void PrintMatrixInt32(char *ptr_name, int32_t *ptr_matrix, int num_rows, int num_cols, int lda, float scale) {
printf("%s: %dx%d lda %d\n", ptr_name, num_rows, num_cols, lda);
for (int i = 0; i < num_rows; i++) {
for (int j = 0; j < num_cols; j++) {
printf("[%d,%d]: %e\n", i, j, *(ptr_matrix + i*lda + j) / scale);
}
}
}
void PrintMatrixFloat32(char *ptr_name, float *ptr_matrix, int num_rows, int num_cols, int lda) {
#if (defined _WIN32 || defined _WIN64) && (_MSC_VER < 1900)
_set_output_format(_TWO_DIGIT_EXPONENT);
#endif
printf("%s: %dx%d lda %d\n", ptr_name, num_rows, num_cols, lda);
for (int i = 0; i < num_rows; i++) {
for (int j = 0; j < num_cols; j++) {
printf("[%d,%d]: %e\n", i, j, *(ptr_matrix + i*lda + j));
}
}
}
typedef struct {
std::string sName;
std::string sType; // if wgt/bias/filt/pwl is writeable, then do not write it to file
void *pAddress;
uint32_t nBytes;
} intel_memory_region_t;
void AddBufferEntry(std::vector<intel_memory_region_t> &vBuffer,
const std::string &sName,
const std::string &sType,
void *pBuffer,
uint32_t nBytes) {
if (pBuffer != NULL) {
intel_memory_region_t region;
region.sName = sName;
region.sType = sType;
region.pAddress = pBuffer;
region.nBytes = nBytes;
vBuffer.push_back(region);
}
}
std::string BufferNameFromAddress(std::vector<intel_memory_region_t> &vBuffer, void *pBuffer) {
std::stringstream ss;
std::string sAddr, sName;
void *pParentBuffer = pBuffer;
bool found = false;
bool found_persistent = false;
bool found_output = false;
for (uint32_t i = 0; i < vBuffer.size(); i++) {
uint8_t *pBufferStart = reinterpret_cast<uint8_t *>(pBuffer);
uint8_t *pEntryBufferStart = reinterpret_cast<uint8_t *>(vBuffer.at(i).pAddress);
uint8_t *pEntryBufferEnd = reinterpret_cast<uint8_t *>(vBuffer.at(i).pAddress) + vBuffer.at(i).nBytes;
if ((pBufferStart >= pEntryBufferStart) && (pBufferStart < pEntryBufferEnd)) {
found = true;
if (pBufferStart > pEntryBufferStart) {
pParentBuffer = pEntryBufferStart;
}
if ((vBuffer.at(i).sType.compare("pOutputs") == 0)
|| (vBuffer.at(i).sType.compare("pOutputsIntermediate") == 0)) {
found_output = true;
} else if (vBuffer.at(i).sType.compare("pWeights") == 0) {
sName = "wgt_";
found_persistent = true;
} else if (vBuffer.at(i).sType.compare("pBiases") == 0) {
sName = "bias_";
found_persistent = true;
} else if (vBuffer.at(i).sType.compare("pSegments") == 0) {
sName = "pwl_";
found_persistent = true;
}
}
}
if (found) {
if ((found_output) || (!found_persistent)) {
sName = "buf_";
}
ss << (int64_t) pParentBuffer;
sAddr = ss.str();
sName.append(sAddr);
} else {
fprintf(stderr, "Error: buffer address does not exist in BufferNameFromAddress!\n");
exit(EXIT_FAILURE);
}
return (sName);
}
uint32_t BufferOffsetFromAddress(std::vector<intel_memory_region_t> &vBuffer, void *pBuffer) {
uint32_t nOffsetBytes = 0;
for (uint32_t i = 0; i < vBuffer.size(); i++) {
uint8_t *pBufferStart = reinterpret_cast<uint8_t *>(pBuffer);
uint8_t *pEntryBufferStart = reinterpret_cast<uint8_t *>(vBuffer.at(i).pAddress);
uint8_t *pEntryBufferEnd = reinterpret_cast<uint8_t *>(vBuffer.at(i).pAddress) + vBuffer.at(i).nBytes;
if ((pBufferStart >= pEntryBufferStart) && (pBufferStart < pEntryBufferEnd)) {
if (pBufferStart > pEntryBufferStart) {
nOffsetBytes = (uint32_t) (pBufferStart - pEntryBufferStart);
}
}
}
return (nOffsetBytes);
}
std::string LayerName(gna_nnet_layer_t *pLayer) {
const auto nKind = pLayer->nLayerKind;
std::string sKind;
if (nKind == INTEL_AFFINE) {
sKind = "affine";
} else if (nKind == INTEL_AFFINE_DIAGONAL) {
sKind = "diagonal";
} else if (nKind == INTEL_INTERLEAVE) {
sKind = "interleave";
} else if (nKind == INTEL_DEINTERLEAVE) {
sKind = "deinterleave";
} else {
fprintf(stderr, "Error: nLayerKind not supported in LayerName()!\n");
exit(EXIT_FAILURE);
}
return (sKind);
}
uint32_t NumInputs(gna_nnet_layer_t *pLayer) {
const auto nKind = pLayer->nLayerKind;
uint32_t nInputs;
if ((nKind == INTEL_AFFINE) || (nKind == INTEL_AFFINE_DIAGONAL)) {
nInputs = pLayer->nInputRows;
} else if (nKind == INTEL_INTERLEAVE) {
nInputs = pLayer->nInputColumns;
} else if (nKind == INTEL_DEINTERLEAVE) {
nInputs = pLayer->nInputRows;
} else {
fprintf(stderr, "Error: nLayerKind not supported in NumInputs()!\n");
exit(EXIT_FAILURE);
}
return (nInputs);
}
uint32_t NumOutputs(gna_nnet_layer_t *pLayer) {
const auto nKind = pLayer->nLayerKind;
uint32_t nOutputs;
if ((nKind == INTEL_AFFINE) || (nKind == INTEL_AFFINE_DIAGONAL)) {
nOutputs = pLayer->nOutputRows;
} else if (nKind == INTEL_INTERLEAVE) {
nOutputs = pLayer->nOutputRows;
} else if (nKind == INTEL_DEINTERLEAVE) {
nOutputs = pLayer->nOutputColumns;
} else {
fprintf(stderr, "Error: nLayerKind not supported in NumInputs()!\n");
exit(EXIT_FAILURE);
}
return (nOutputs);
}
uint32_t NumGroupSize(gna_nnet_layer_t *pLayer) {
const auto nKind = pLayer->nLayerKind;
uint32_t nGroupSize;
if ((nKind == INTEL_AFFINE) || (nKind == INTEL_AFFINE_DIAGONAL)) {
nGroupSize = pLayer->nOutputColumns;
} else if (nKind == INTEL_INTERLEAVE) {
nGroupSize = pLayer->nOutputColumns;
} else if (nKind == INTEL_DEINTERLEAVE) {
nGroupSize = pLayer->nOutputRows;
} else {
fprintf(stderr, "Error: nLayerKind not supported in NumGroupSize()!\n");
exit(EXIT_FAILURE);
}
return (nGroupSize);
}

View File

@ -69,7 +69,7 @@ union {
uint8_t c[2];
} constexpr static LECheck {1};
bool is_little_endian() {
inline bool is_little_endian() {
return LECheck.c[0] == 1;
}
@ -195,7 +195,7 @@ getOffsetFromBase(field, #field)
#if GNA_LIB_VER == 2
bool IsEmptyTensor(const Gna2Tensor& t) {
inline bool IsEmptyTensor(const Gna2Tensor& t) {
return t.Type == Gna2DataTypeNone &&
t.Data == nullptr &&
t.Layout[0] == '\0' &&
@ -203,7 +203,7 @@ bool IsEmptyTensor(const Gna2Tensor& t) {
t.Shape.NumberOfDimensions == 0;
}
const std::map<Gna2OperationType, std::vector<uint32_t>> GnaParamSize{
static const std::map<Gna2OperationType, std::vector<uint32_t>> GnaParamSize{
{Gna2OperationTypeFullyConnectedAffine, {sizeof(Gna2BiasMode), sizeof(uint32_t)}},
{Gna2OperationTypeConvolution, {
sizeof(Gna2Shape),

View File

@ -77,7 +77,7 @@
#if GNA_LIB_VER == 2
#include <gna2-model-api.h>
uint32_t ToByteSize(const Gna2DataType type) {
inline uint32_t ToByteSize(const Gna2DataType type) {
switch (type) {
case Gna2DataTypeInt8:
case Gna2DataTypeUint8:

View File

@ -48,6 +48,8 @@ void CNNFilter32(intel_dnn_component_t *component) {
}
}
namespace {
void CNNMaxPoolLegacy(intel_dnn_component_t *component, intel_dnn_number_type_t number_type, const bool sumPoolingOverRide) {
const uint32_t num_inputs = component->op.maxpool.inCHW[0] * component->op.maxpool.inCHW[1] * component->op.maxpool.inCHW[2];
const uint32_t in_c = component->op.maxpool.inCHW[0];
@ -128,14 +130,12 @@ void CNNMaxPoolLegacy(intel_dnn_component_t *component, intel_dnn_number_type_t
}
}
namespace {
// a1: fastest changing index
// A - size neede
template <typename T>
T getQubeIndex(T a1, T a2, T a3, T A2, T A3) {
return a1 * A2 * A3 + a2 * A3 + a3;
}
} // namespace
float MaxPool2D32SingleHWC(const unsigned poolWinH, const unsigned poolWinW,
const float* input, const unsigned IH, const unsigned IW, const unsigned IC,
@ -184,8 +184,12 @@ void CNNMaxPool2DFloat(intel_dnn_component_t* component) {
}
}
} // namespace
#if GNA_LIB_VER == 2
namespace {
bool matchesPaddedArea(unsigned filterIndex, unsigned outputIndex, unsigned inputSize, unsigned paddingSize, unsigned stride) {
const auto paddedIndex = stride * outputIndex + filterIndex;
if (paddedIndex >= inputSize + 2 * paddingSize) {
@ -231,6 +235,8 @@ float CNN2DFilter32SingleHWC(const float bias, const float* filter, const unsign
return output;
}
} // namespace
void CNN2DFilter32(intel_dnn_component_t* component) {
float* ptr_filters = reinterpret_cast<float*>(component->op.conv2D.ptr_filters);
float* ptr_biases = reinterpret_cast<float*>(component->op.conv2D.ptr_biases);

View File

@ -30,12 +30,12 @@
#include "round_float_define.hpp"
double first_deriv_tanh(const double x) { return(1.0 - tanh(x) * tanh(x)); }
double first_deriv_exp(const double x) { return(exp(x)); }
double first_deriv_log(const double x) { return(1.0 / x); }
double neglog(const double x) { return(-1.0*log(x)); }
double neghalflog(const double x) { return(-0.5*log(x)); }
double first_deriv_neglog(const double x) { return(-1.0 / x); }
double first_deriv_neghalflog(const double x) { return(-0.5 / x); }
inline double first_deriv_exp(const double x) { return(exp(x)); }
inline double first_deriv_log(const double x) { return(1.0 / x); }
inline double neglog(const double x) { return(-1.0*log(x)); }
inline double neghalflog(const double x) { return(-0.5*log(x)); }
inline double first_deriv_neglog(const double x) { return(-1.0 / x); }
inline double first_deriv_neghalflog(const double x) { return(-0.5 / x); }
double sigmoid(const double x) { return(0.5 * (1.0 + tanh(x / 2))); }
double first_deriv_sigmoid(const double x) { return(sigmoid(x) * (1.0 - sigmoid(x))); }
double softsign(const double x) { return(x / (1.0 + fabs(x))); }
@ -44,12 +44,12 @@ double relu(const double x) { if (x < 0) { return(0.0); } else { return(x); } }
double leaky_relu(const double x) { if (x < 0.0) { return(LEAKYRELU_SLOPE*x); } else { return(x); } }
double clipping(const double x, const double lbound, const double ubound) { return((x < lbound)?lbound:((x > ubound)?ubound:x)); }
double first_deriv_power(const double x, const std::tuple<double, double, double>& args) {
inline double first_deriv_power(const double x, const std::tuple<double, double, double>& args) {
//scale * exponent * (offset + scale * x)^(exponent - 1)
return (std::get<1>(args) * std::get<0>(args) * pow(std::get<2>(args) + std::get<1>(args) * x, std::get<0>(args) - 1));
}
double power(const double x, const std::tuple<double, double, double>& args) {
inline double power(const double x, const std::tuple<double, double, double>& args) {
return (pow(std::get<2>(args) + std::get<1>(args) * x, std::get<0>(args)));
}
@ -272,7 +272,7 @@ double calculate_error_pct(const DnnActivation& activation_type,
return(100.0 * fabs(offset) / (max_val - min_val));
}
double get_break_bound(const DnnActivation& activation_type) {
inline double get_break_bound(const DnnActivation& activation_type) {
double break_bound = 0.0;
switch (activation_type) {
case kActExp:
@ -287,7 +287,7 @@ double get_break_bound(const DnnActivation& activation_type) {
return break_bound;
}
bool split_search(const DnnActivation& activation_type,
inline bool split_search(const DnnActivation& activation_type,
const double l_bound,
const double u_bound) {
bool is_split = false;

View File

@ -22,6 +22,8 @@ NGRAPH_RTTI_DEFINITION(HandleTransposesAroundMatMul, "HandleTransposesAroundMatM
NGRAPH_RTTI_DEFINITION(HandleTransposeBeforeMatMul, "HandleTransposeBeforeMatMul", 0);
NGRAPH_RTTI_DEFINITION(HandleTransposeAfterMatMul, "HandleTransposeAfterMatMul", 0);
namespace {
void ReplaceTransposeWithReshape(std::shared_ptr<ngraph::Node> transpose_node) {
auto shape = transpose_node->get_output_shape(0);
auto reshape_const = std::make_shared<ngraph::opset8::Constant>(ngraph::element::Type_t::i64,
@ -61,12 +63,14 @@ void InsertTranspose(std::shared_ptr<ngraph::Node> prev_node, const std::string&
}
}
static bool VerifyReshape(const ngraph::Output<ngraph::Node>& reshape_out) {
bool VerifyReshape(const ngraph::Output<ngraph::Node>& reshape_out) {
auto in_shape = reshape_out.get_node_shared_ptr()->get_input_shape(0);
auto out_shape = reshape_out.get_node_shared_ptr()->get_output_shape(0);
return in_shape[0] != out_shape[0];
}
} // namespace
HandleTransposeBeforeMatMul::HandleTransposeBeforeMatMul() {
auto constant = ngraph::pattern::wrap_type<ngraph::opset8::Constant>();
auto fq = ngraph::pattern::wrap_type<ngraph::opset8::FakeQuantize>({constant, ngraph::pattern::any_input(),

View File

@ -42,7 +42,7 @@ static bool shouldSplitCnn(const ngraph::Output<ngraph::Node>& node) {
return true;
}
std::shared_ptr<ngraph::Node> getConvForMatcher() {
static std::shared_ptr<ngraph::Node> getConvForMatcher() {
return ngraph::pattern::wrap_type<ngraph::opset7::Convolution>({ ngraph::pattern::any_input(),
ngraph::pattern::any_input() }, [](const ngraph::Output<ngraph::Node>& convolution) {
return shouldSplitCnn(convolution);

View File

@ -18,7 +18,8 @@
#include "ie_itt.hpp"
#include "ngraph/opsets/opset6.hpp"
#include "ngraph/variant.hpp"
#include "openvino/pass/serialize.hpp"
#include "openvino/pass/manager.hpp"
#include "transformations/hash.hpp"
#include "transformations/rt_info/fused_names_attribute.hpp"
#include "transformations/rt_info/primitives_priority_attribute.hpp"
@ -29,7 +30,7 @@
namespace InferenceEngine {
template <typename T>
static std::size_t hash_combine(std::size_t seed, const T& a) {
static uint64_t hash_combine(uint64_t seed, const T& a) {
// Hash combine formula from boost
return seed ^ (std::hash<T>()(a) + 0x9e3779b9 + (seed << 6) + (seed >> 2));
}
@ -39,34 +40,10 @@ static int32_t as_int32_t(T v) {
return static_cast<int32_t>(v);
}
class OstreamHashWrapper final : public std::streambuf {
std::size_t m_res = 0;
public:
std::size_t getResult() const {
return m_res;
}
std::streamsize xsputn(const char* s, std::streamsize n) override {
const std::int64_t* intS = (const std::int64_t*)s;
std::streamsize n64 = n / sizeof(std::int64_t);
std::streamsize i = 0;
// Using 64-bit values executes much faster than char
while (i++ < n64) {
m_res += *(intS++);
}
std::streamsize rest = n % sizeof(std::int64_t);
for (i = 0; i < rest; i++) {
m_res += s[n - rest + i];
}
return n;
}
};
//////////////////////////////////////////////////
std::string NetworkCompilationContext::calculateFileInfo(const std::string& filePath) {
size_t seed = 0;
uint64_t seed = 0;
auto absPath = filePath;
try {
absPath = FileUtils::absoluteFilePath(filePath);
@ -88,23 +65,17 @@ std::string NetworkCompilationContext::calculateFileInfo(const std::string& file
std::string NetworkCompilationContext::computeHash(const CNNNetwork& network,
const std::map<std::string, std::string>& compileOptions) {
OV_ITT_SCOPE(FIRST_INFERENCE, itt::domains::IE_LT, "NetworkCompilationContext::computeHash - CNN");
OstreamHashWrapper xmlHash;
OstreamHashWrapper binHash;
std::ostream xml(&xmlHash);
std::ostream bin(&binHash);
IE_ASSERT(network.getFunction());
// 1. Serialize
uint64_t seed = 0;
// 1. Calculate hash on function
CNNNetwork net(network);
ov::pass::Serialize serializer(xml, bin);
serializer.run_on_function(net.getFunction());
ov::pass::Manager m;
m.register_pass<ov::pass::Hash>(seed);
m.run_passes(net.getFunction());
// 2. Compute hash on serialized data and options
size_t seed = 0;
seed = hash_combine(seed, xmlHash.getResult());
seed = hash_combine(seed, binHash.getResult());
for (const auto& kvp : compileOptions) {
seed = hash_combine(seed, kvp.first + kvp.second);
}
@ -163,7 +134,7 @@ std::string NetworkCompilationContext::computeHash(const CNNNetwork& network,
std::string NetworkCompilationContext::computeHash(const std::string& modelName,
const std::map<std::string, std::string>& compileOptions) {
OV_ITT_SCOPE(FIRST_INFERENCE, itt::domains::IE_LT, "NetworkCompilationContext::computeHash - ModelName");
size_t seed = 0;
uint64_t seed = 0;
try {
seed = hash_combine(seed, FileUtils::absoluteFilePath(modelName));
} catch (...) {

View File

@ -205,15 +205,19 @@ const SizeVector& Data::getDims() const {
namespace InferenceEngine {
INFERENCE_ENGINE_API_CPP(CNNLayerWeakPtr&) getCreatorLayer(const DataPtr& data) {
INFERENCE_ENGINE_API_CPP(CNNLayerWeakPtr&) getCreatorLayer(const DataPtr& data);
INFERENCE_ENGINE_API_CPP(std::map<std::string, CNNLayerPtr>&) getInputTo(const DataPtr& data);
INFERENCE_ENGINE_API_CPP(std::map<std::string, CNNLayerPtr>&) getInputTo(Data* data);
CNNLayerWeakPtr& getCreatorLayer(const DataPtr& data) {
return data->_impl->creatorLayer;
}
INFERENCE_ENGINE_API_CPP(std::map<std::string, CNNLayerPtr>&) getInputTo(const DataPtr& data) {
std::map<std::string, CNNLayerPtr>& getInputTo(const DataPtr& data) {
return data->_impl->inputTo;
}
INFERENCE_ENGINE_API_CPP(std::map<std::string, CNNLayerPtr>&) getInputTo(Data* data) {
std::map<std::string, CNNLayerPtr>& getInputTo(Data* data) {
return data->_impl->inputTo;
}

View File

@ -38,7 +38,7 @@ void __TBB_internal_restore_affinity(binding_handler* handler_ptr, int slot_num)
int __TBB_internal_get_default_concurrency(int numa_id, int core_type_id, int max_threads_per_core);
}
int get_processors_group_num() {
static int get_processors_group_num() {
# if defined(_WIN32) || defined(_WIN64)
SYSTEM_INFO si;
GetNativeSystemInfo(&si);
@ -57,7 +57,7 @@ int get_processors_group_num() {
return 1;
}
bool is_binding_environment_valid() {
static bool is_binding_environment_valid() {
# if defined(_WIN32) && !defined(_WIN64)
static bool result = [] {
// For 32-bit Windows applications, process affinity masks can only support up to 32 logical CPUs.
@ -79,7 +79,7 @@ static int* numa_nodes_indexes = nullptr;
static int core_types_count = 0;
static int* core_types_indexes = nullptr;
void initialize_system_topology() {
static void initialize_system_topology() {
static std::once_flag is_topology_initialized;
std::call_once(is_topology_initialized, [&] {
@ -120,7 +120,7 @@ void binding_observer::on_scheduler_exit(bool) {
detail::__TBB_internal_restore_affinity(my_binding_handler, tbb::this_task_arena::current_thread_index());
}
binding_oberver_ptr construct_binding_observer(tbb::task_arena& ta, int num_slots, const constraints& c) {
static binding_oberver_ptr construct_binding_observer(tbb::task_arena& ta, int num_slots, const constraints& c) {
binding_oberver_ptr observer{};
if (detail::is_binding_environment_valid() &&
((c.core_type >= 0 && info::core_types().size() > 1) || (c.numa_id >= 0 && info::numa_nodes().size() > 1) ||
@ -131,10 +131,8 @@ binding_oberver_ptr construct_binding_observer(tbb::task_arena& ta, int num_slot
return observer;
}
# endif /*USE_TBBBIND_2_4*/
# if TBB_NUMA_SUPPORT_PRESENT
tbb::task_arena::constraints convert_constraints(const custom::task_arena::constraints& c) {
# elif TBB_NUMA_SUPPORT_PRESENT
static tbb::task_arena::constraints convert_constraints(const custom::task_arena::constraints& c) {
tbb::task_arena::constraints result{};
# if TBB_HYBRID_CPUS_SUPPORT_PRESENT
result.core_type = c.core_type;

View File

@ -38,6 +38,8 @@ using namespace std;
using namespace InferenceEngine;
using namespace InferenceEngine::details;
namespace {
std::map<CNNLayer*, bool> getConstLayersMap(const CNNNetwork& network) {
std::map<CNNLayer*, bool> result;
@ -86,6 +88,8 @@ std::map<CNNLayer*, bool> getConstLayersMap(const CNNNetwork& network) {
return result;
}
} // namespace
CNNNetworkImpl::CNNNetworkImpl() {}
CNNNetworkImpl::CNNNetworkImpl(const CNNNetwork & cnnnetwork) {

View File

@ -96,7 +96,7 @@ std::string asString<float>(const float& value) {
} // namespace Builder
namespace InferenceEngine {
namespace details {
namespace {
// helper for adding creators with a specific exception
#define REQUIRED_IE_CONVERSION_CREATOR(type_name, ie_type_name)\
@ -348,12 +348,12 @@ public:
void on_adapter(const std::string& name, ::ngraph::ValueAccessor<std::vector<int32_t>>& adapter) override {
auto shape = adapter.get();
params[name] = joinVec(shape);
params[name] = details::joinVec(shape);
}
void on_adapter(const std::string& name, ::ngraph::ValueAccessor<std::vector<int64_t>>& adapter) override {
auto shape = adapter.get();
params[name] = joinVec(shape);
params[name] = details::joinVec(shape);
}
void on_adapter(const std::string& name, ::ngraph::ValueAccessor<double>& adapter) override {
@ -382,7 +382,7 @@ public:
void on_adapter(const std::string& name, ngraph::ValueAccessor<std::vector<float>>& adapter) override {
auto data = adapter.get();
params[name] = joinVec(data);
params[name] = details::joinVec(data);
}
void on_adapter(const std::string& name, ::ngraph::ValueAccessor<std::shared_ptr<ngraph::Function>>& adapter) override {
@ -396,7 +396,7 @@ private:
std::map<std::string, CreatorFor> creators;
};
void InferenceEngine::details::CNNLayerCreator::on_adapter(const std::string& name,
void CNNLayerCreator::on_adapter(const std::string& name,
::ngraph::ValueAccessor<void>& adapter) {
if (auto a = ::ngraph::as_type<::ngraph::AttributeAdapter<::ngraph::element::Type>>(&adapter)) {
auto type = static_cast<::ngraph::element::Type&>(*a);
@ -411,13 +411,13 @@ void InferenceEngine::details::CNNLayerCreator::on_adapter(const std::string& na
params[name] = dims;
} else if (auto a = ::ngraph::as_type<::ngraph::AttributeAdapter<::ngraph::Shape>>(&adapter)) {
auto shape = static_cast<::ngraph::Shape&>(*a);
params[name] = joinVec(shape);
params[name] = details::joinVec(shape);
} else if (auto a = ::ngraph::as_type<::ngraph::AttributeAdapter<::ngraph::Strides>>(&adapter)) {
auto shape = static_cast<::ngraph::Strides&>(*a);
params[name] = joinVec(shape);
params[name] = details::joinVec(shape);
} else if (auto a = ::ngraph::as_type<::ngraph::AttributeAdapter<std::vector<size_t>>>(& adapter)) {
auto data = a->get();
params[name] = joinVec(data);
params[name] = details::joinVec(data);
} else if (auto a = ::ngraph::as_type<::ngraph::AttributeAdapter<std::shared_ptr<::ngraph::Variable>>>(& adapter)) {
params[name] = a->get()->get_info().variable_id;
} else if (auto a = ::ngraph::as_type<::ngraph::AttributeAdapter<std::vector<std::shared_ptr<
@ -435,14 +435,14 @@ void InferenceEngine::details::CNNLayerCreator::on_adapter(const std::string& na
}
} else if (const auto& a = ngraph::as_type<ngraph::AttributeAdapter<ngraph::element::TypeVector>>(& adapter)) {
const auto & attrs = a->get();
params[name] = joinVec(attrs);
params[name] = details::joinVec(attrs);
} else {
IE_THROW() << "Error converting ngraph to CNN network. "
"Attribute adapter can not be found for " << name << " parameter";
}
}
InferenceEngine::details::CNNLayerCreator::CNNLayerCreator(const std::shared_ptr<::ngraph::Node>& node): node(node) {
CNNLayerCreator::CNNLayerCreator(const std::shared_ptr<::ngraph::Node>& node): node(node) {
addSpecificCreator({"Parameter"}, [](const std::shared_ptr<::ngraph::Node>& node,
const std::map<std::string, std::string>& params) -> CNNLayerPtr {
LayerParams attrs = {node->get_friendly_name(), "Input",
@ -635,7 +635,7 @@ InferenceEngine::details::CNNLayerCreator::CNNLayerCreator(const std::shared_ptr
res->params["pad_value"] = Builder::asString(castedLayer->get_pad_value());
const auto weightsNode = castedLayer->input(1).get_source_output().get_node_shared_ptr();
InferenceEngine::details::addBlob(weightsNode, res, InferenceEngine::details::weights);
addBlob(weightsNode, res, InferenceEngine::details::weights);
return res;
});
@ -1776,7 +1776,7 @@ InferenceEngine::details::CNNLayerCreator::CNNLayerCreator(const std::shared_ptr
});
}
CNNLayerPtr InferenceEngine::details::CNNLayerCreator::create() {
CNNLayerPtr CNNLayerCreator::create() {
LayerParams attrs = {node->get_friendly_name(), node->description(),
details::convertPrecision(node->get_output_element_type(0))};
if (creators.find(node->description()) != creators.end())
@ -1787,6 +1787,9 @@ CNNLayerPtr InferenceEngine::details::CNNLayerCreator::create() {
return res;
}
} // namespace
namespace details {
void convertFunctionToICNNNetwork(const std::shared_ptr<const ::ngraph::Function> &graph,
const CNNNetwork &network,
CNNNetworkImpl* cnnNetworkImpl,
@ -2165,7 +2168,7 @@ void convertFunctionToICNNNetwork(const std::shared_ptr<const ::ngraph::Function
std::shared_ptr<CNNNetworkImpl> convertFunctionToICNNNetwork(const std::shared_ptr<const ::ngraph::Function> &graph,
const CNNNetwork &network,
bool keep_constant_inputs) {
auto cnnNetworkImpl = std::make_shared<details::CNNNetworkImpl>();
auto cnnNetworkImpl = std::make_shared<CNNNetworkImpl>();
convertFunctionToICNNNetwork(graph, network, cnnNetworkImpl.get(), keep_constant_inputs);
return cnnNetworkImpl;
}

View File

@ -24,7 +24,7 @@ using namespace InferenceEngine::details;
namespace InferenceEngine {
bool isForFakeQuantize(const CNNLayer& layer) {
inline bool isForFakeQuantize(const CNNLayer& layer) {
for (const DataPtr data : layer.outData) {
for (const auto it : getInputTo(data)) {
const CNNLayerPtr childLayer = it.second;

View File

@ -293,7 +293,7 @@ static RuleClassSet classifyOutputRules(const TensorIterator& ti) {
* @param master
* @param slave
*/
void CombineData(DataPtr& master, DataPtr& slave) {
static void CombineData(DataPtr& master, DataPtr& slave) {
for (auto& kvp : getInputTo(slave)) {
auto& slave_layer = kvp.second;
for (auto& slv_ins_wptr : slave_layer->insData) {
@ -325,7 +325,7 @@ void SaveOutputDataName(InferenceEngine::DataPtr in_data, InferenceEngine::DataP
* void SaveOutputDataName(InferenceEngine::DataPtr in_data, InferenceEngine::DataPtr out_data, NET &net), where
* NET = CNNNetwork
*/
void SaveOutputDataName(InferenceEngine::DataPtr in_data, InferenceEngine::DataPtr out_data, CNNNetwork& net) {
static void SaveOutputDataName(InferenceEngine::DataPtr in_data, InferenceEngine::DataPtr out_data, CNNNetwork& net) {
if (getInputTo(out_data).empty()) {
InferenceEngine::OutputsDataMap outputs_data_map = net.getOutputsInfo();
auto out_data_name = out_data->getName();
@ -522,7 +522,7 @@ bool convertToRNNSeq(CNNLayerPtr cur, const N& net) {
return true;
}
bool unrollTI(CNNLayerPtr cur, CNNNetwork& net) {
static bool unrollTI(CNNLayerPtr cur, CNNNetwork& net) {
IE_SUPPRESS_DEPRECATED_START
auto & icnnnet = static_cast<ICNNNetwork&>(net);
IE_SUPPRESS_DEPRECATED_END
@ -1282,7 +1282,7 @@ std::vector<CNNLayerPtr> TopolSort(const details::CNNSubnet& net) {
return details::CNNSubnetSortTopologically(net);
}
void restore_net_consistency(CNNNetwork& net) {
static void restore_net_consistency(CNNNetwork& net) {
IE_SUPPRESS_DEPRECATED_START
auto & icnnnet = static_cast<ICNNNetwork&>(net);
auto inet = dynamic_cast<details::CNNNetworkImpl*>(&icnnnet);

View File

@ -80,7 +80,7 @@ bool op::Eltwise::visit_attributes(AttributeVisitor &visitor) {
visitor.on_attribute("operation", eltwise_type);
return true;
}
std::ostream &operator<<(std::ostream &s, const ELTWISE_TYPE &type) {
std::ostream &ngraph::operator<<(std::ostream &s, const ELTWISE_TYPE &type) {
return s << as_string(type);
}

View File

@ -12,7 +12,7 @@
using namespace std;
using namespace ngraph;
element::Type getMaxBitwidth(const std::vector<element::Type>& types) {
inline element::Type getMaxBitwidth(const std::vector<element::Type>& types) {
if (types.empty()) {
return element::undefined;
}

View File

@ -13,6 +13,8 @@
#include <legacy/ngraph_ops/proposal_ie.hpp>
#include <ngraph/rt_info.hpp>
namespace {
bool convert_to_proposal_ie(std::shared_ptr<ngraph::op::v0::Proposal> proposal, bool infer_probs = false) {
ngraph::Output<ngraph::Node> last; // 2D tensor of size [1, 3-4] with im_info will be retrieved from this node
ngraph::NodeVector ops_to_replace, new_ops;
@ -46,6 +48,8 @@ bool convert_to_proposal_ie(std::shared_ptr<ngraph::op::v0::Proposal> proposal,
return true;
}
} // namespace
NGRAPH_RTTI_DEFINITION(ngraph::pass::ConvertProposalToLegacyMatcher, "ConvertProposalToLegacyMatcher", 0);
ngraph::pass::ConvertProposalToLegacyMatcher::ConvertProposalToLegacyMatcher() {

View File

@ -16,6 +16,8 @@
using namespace ngraph;
namespace {
template <class T>
std::shared_ptr<Node> convert(const Output<Node> & data, std::shared_ptr<T> node, NodeVector & new_ops);
@ -149,6 +151,8 @@ matcher_pass_callback get_callback() {
};
}
} // namespace
NGRAPH_RTTI_DEFINITION(ngraph::pass::Reshape1DOps, "Reshape1DOps", 0);
NGRAPH_RTTI_DEFINITION(ngraph::pass::Reshape1DConvolution, "Reshape1DConvolution", 0);

View File

@ -22,6 +22,8 @@ namespace low_precision {
NGRAPH_RTTI_DEFINITION(AddTransformation, "AddTransformation", 0);
namespace {
std::shared_ptr<opset1::Subtract> replaceToSubtract(const std::shared_ptr<Node>& op) {
// TODO: separate this part to standalone transformation: AddToSubtractTransformation
// motivation:
@ -90,6 +92,8 @@ std::shared_ptr<opset1::Subtract> fuseWithSubtract(const std::shared_ptr<Node>&
return newSubtract;
}
} // namespace
AddTransformation::AddTransformation(const Params& params) : EltwiseBaseTransformation(params) {
auto matcher = ngraph::pattern::wrap_type<opset1::Add>();

View File

@ -37,6 +37,7 @@ FakeQuantizeDecompositionTransformation::FakeQuantizeDecompositionTransformation
}
namespace fq_decomposition {
namespace {
// get precision details, depends on:
// 1. FakeQuantize operation parameters (QuantizationDetails::getDetails & LayerTransformation::getPrecisionDetails)
@ -266,6 +267,7 @@ std::tuple<std::shared_ptr<Node>, std::shared_ptr<Node>> decomposeFakeQuantize(
return std::make_tuple(dequantize, newFQ);
}
} // namespace
} // namespace fq_decomposition
bool FakeQuantizeDecompositionTransformation::transform(TransformationContext& context, ngraph::pattern::Matcher& m) {

View File

@ -38,6 +38,8 @@ FuseConvertTransformation::FuseConvertTransformation(const Params& params) : Lay
this->register_matcher(matcher, callback);
}
namespace {
std::shared_ptr<Node> removeConvertIfPossibleForSubtract(
const std::shared_ptr<opset1::Convert>& convert,
const std::shared_ptr<opset1::Subtract>& subtract) {
@ -56,6 +58,8 @@ std::shared_ptr<Node> removeConvertIfPossibleForSubtract(
return newSubtract;
}
} // namespace
bool FuseConvertTransformation::transform(TransformationContext& context, ngraph::pattern::Matcher &m) {
const auto op = m.get_match_root();
if (!canBeTransformed(context, op)) {

View File

@ -39,6 +39,7 @@ bool FuseFakeQuantizeTransformation::transform(TransformationContext& context, n
}
namespace fuse_fq {
namespace {
std::shared_ptr<Node> updateShape(std::shared_ptr<Node> op, const PartialShape& targetPShape) {
assert(targetPShape.is_static());
@ -111,6 +112,7 @@ bool eltwiseWithConstant(const std::shared_ptr<Node>& eltwise) {
return getDataNode(eltwise) != nullptr;
}
} // namespace
} // namespace fuse_fq
std::shared_ptr<opset1::FakeQuantize> FuseFakeQuantizeTransformation::handle(

View File

@ -18,6 +18,7 @@ using namespace ngraph;
NGRAPH_RTTI_DEFINITION(ngraph::pass::low_precision::PullReshapeThroughDequantization, "PullReshapeThroughDequantizationFusion", 0);
namespace pull_reshape_through_dequantization {
namespace {
std::shared_ptr<Node> moveThroughElementwise(const std::shared_ptr<Node>& reshape, const std::shared_ptr<Node>& elementwise) {
const auto reshapeValues = reshape->get_input_node_shared_ptr(1);
@ -87,6 +88,7 @@ void fuseConstant(const std::shared_ptr<Node>& reshape, const std::shared_ptr<No
copy_runtime_info({ constant, reshape }, newConstant);
}
} // namespace
} // namespace pull_reshape_through_dequantization
ngraph::pass::low_precision::PullReshapeThroughDequantization::PullReshapeThroughDequantization(

View File

@ -19,6 +19,7 @@ using namespace ngraph;
NGRAPH_RTTI_DEFINITION(ngraph::pass::low_precision::PullTransposeThroughDequantization, "PullTransposeThroughDequantization", 0);
namespace pull_transpose_through_dequantization {
namespace {
std::shared_ptr<Node> moveThroughElementwise(const std::shared_ptr<Node>& transpose, const std::shared_ptr<Node>& elementwise) {
const auto transposeValues = transpose->get_input_node_shared_ptr(1);
@ -85,6 +86,7 @@ void fuseConstant(const std::shared_ptr<Node>& transpose, const std::shared_ptr<
copy_runtime_info({ constant, transpose }, newConstant);
}
} // namespace
} // namespace pull_transpose_through_dequantization
ngraph::pass::low_precision::PullTransposeThroughDequantization::PullTransposeThroughDequantization(

View File

@ -54,6 +54,8 @@ ReshapeTransformation::ReshapeTransformation(const Params& params) : LayerTransf
this->register_matcher(m, callback);
}
namespace {
void reshapeDequantizationConstant(const std::shared_ptr<opset1::Reshape>& reshape) {
// Reshape dequantization operation Constant.
// 1. Calculate result dequantization Constant shape for broadcast based on original dequantization Constant shape and Reshape output.
@ -141,6 +143,8 @@ void reshapeDequantizationConstant(const std::shared_ptr<opset1::Reshape>& resha
}
}
} // namespace
bool ReshapeTransformation::transform(TransformationContext& context, ngraph::pattern::Matcher &m) {
std::shared_ptr<opset1::Reshape> reshape = ov::as_type_ptr<opset1::Reshape>(m.get_match_root());
if (NetworkHelper::isConstantPath(reshape)) {
@ -161,7 +165,7 @@ bool ReshapeTransformation::isPrecisionPreserved(std::shared_ptr<Node> op) const
return true;
}
size_t getLastNotBroadcastedDimension(const Shape& shape) {
inline size_t getLastNotBroadcastedDimension(const Shape& shape) {
for (int i = static_cast<int>(shape.size()) - 1; i >= 0; --i) {
if (shape[i] != 1ul) {
return i;
@ -170,7 +174,7 @@ size_t getLastNotBroadcastedDimension(const Shape& shape) {
return 0;
}
size_t getFirstChangedDimension(const PartialShape& shape1, const PartialShape& shape2) {
inline size_t getFirstChangedDimension(const PartialShape& shape1, const PartialShape& shape2) {
const size_t minSize = std::min(shape1.rank().get_length(), shape2.rank().get_length());
size_t i = 0;
for (; i < minSize; ++i) {

View File

@ -16,6 +16,8 @@ namespace low_precision {
NGRAPH_RTTI_DEFINITION(ngraph::pass::low_precision::StridedSliceTransformation, "StridedSliceTransformation", 0);
namespace {
std::shared_ptr<opset1::Constant> stridedSliceDeqConstant(
const std::shared_ptr<ngraph::Node> strSlice,
const std::shared_ptr<ngraph::Node> dequantizaitonConstant) {
@ -74,6 +76,8 @@ std::shared_ptr<opset1::Constant> stridedSliceDeqConstant(
return ov::as_type_ptr<opset1::Constant>(NetworkHelper::toScalarIfPossible(result));
}
} // namespace
StridedSliceTransformation::StridedSliceTransformation(const Params& params) : LayerTransformation(params) {
auto matcher = ngraph::pattern::wrap_type<opset1::StridedSlice>();

View File

@ -33,6 +33,8 @@ TransposeTransformation::TransposeTransformation(const Params& params) : LayerTr
this->register_matcher(m, callback);
}
namespace {
void transposeDequantizationConstant(std::shared_ptr<Node>& transpose) {
const FakeQuantizeDequantization dequantization = NetworkHelper::getDequantization(transpose);
@ -79,6 +81,8 @@ void transposeDequantizationConstant(std::shared_ptr<Node>& transpose) {
}
}
} // namespace
bool TransposeTransformation::transform(TransformationContext& context, ngraph::pattern::Matcher &m) {
std::shared_ptr<Node> transpose = m.get_match_root();
if (!canBeTransformed(context, transpose)) {

View File

@ -486,7 +486,7 @@ void MKLDNNGraph::InitEdges() {
std::string convertName = edge->getParent()->getName() + "_" +
inDesc.getPrecision().name() + "_" + outDesc.getPrecision().name();
auto convertNode = std::make_shared<MKLDNNConvertNode>(inDesc.getShape().getStaticDims(), inDesc.getPrecision(), outDesc.getPrecision(),
auto convertNode = std::make_shared<MKLDNNConvertNode>(inDesc.getShape(), inDesc.getPrecision(), outDesc.getPrecision(),
convertName, this->getEngine(), this->weightsCache);
convertNode->setDescs(inDesc, outDesc);
InsertNode(edge, convertNode, true);

View File

@ -1609,7 +1609,30 @@ void MKLDNNGraphOptimizer::FusePerformedAsScaleShiftAndFakeQuantize(MKLDNNGraph
std::vector<float> scalesBuffer;
std::vector<float> shiftsBuffer;
parent->fillScalesAndShifts(parent->getParentEdgesAtPort(1 - getConstPort(parent))[0]->getParent().get(), scalesBuffer, shiftsBuffer, 1);
auto parentEltwise = std::dynamic_pointer_cast<MKLDNNEltwiseNode>(parent);
if (!parentEltwise) {
IE_THROW() << "Cannot cast " << parent->getName() << " to Eltwise node";
}
std::tie(scalesBuffer, shiftsBuffer) = parentEltwise->getScalesAndShifts(parent->getParentEdgesAtPort(1 - getConstPort(parent))[0]->getParent().get());
const auto &outputShape = child->getOutputShapeAtPort(0);
VectorDims outputDims = outputShape.getDims();
const size_t channelPos = outputDims.size() > 1 ? 1 : 0;
if (outputShape.isDynamic()) {
if (outputDims[channelPos] == Shape::UNDEFINED_DIM) {
if (scalesBuffer.size() > 1) {
outputDims[channelPos] = scalesBuffer.size();
} else if (shiftsBuffer.size() > 1) {
outputDims[channelPos] = shiftsBuffer.size();
} else {
return false;
}
}
}
scalesBuffer = makeAlignedBuffer(outputDims[channelPos], scalesBuffer, 1);
shiftsBuffer = makeAlignedBuffer(outputDims[channelPos], shiftsBuffer, 1);
for (int i = 0; i < scalesBuffer.size(); i++)
if (scalesBuffer[i] == 0.f)

View File

@ -503,8 +503,9 @@ void MKLDNNNode::execute(mkldnn::stream strm) {
}
void MKLDNNNode::executeDynamic(mkldnn::stream strm) {
if (needShapeInfer())
if (needShapeInfer()) {
redefineOutputMemory(shapeInfer());
}
if (needPrepareParams()) {
IE_ASSERT(inputShapesDefined()) << "Can't prepare params for " << getTypeStr() << " node with name: " << getName() <<
" since the input shapes are not defined.";
@ -1045,7 +1046,7 @@ Layout MKLDNNNode::getWeightsLayoutByDims(SizeVector dims, bool isGrouped) {
}
}
void MKLDNNNode::appendPostOps(mkldnn::post_ops& ops, bool initAsBinary, bool initBinaryMemory) {
void MKLDNNNode::appendPostOps(mkldnn::post_ops& ops, const VectorDims &postOpDims, int align, bool initAsBinary, bool initBinaryMemory) {
IE_THROW() << "Fusing of " << this->getType() << " operation is not implemented";
}
@ -1192,7 +1193,7 @@ bool MKLDNNNode::canBePerformedAsScaleShift(const MKLDNNNode *parentNode) const
if (i == fusingPort)
continue;
auto& weightShape = getInputShapeAtPort(i).getDims();
if (getParentEdgesAtPort(i)[0]->getParent()->getChildEdges().size() != 1 || !isPerTensorOrPerChannelBroadcastable(dataShape, weightShape))
if (getParentEdgesAtPort(i)[0]->getParent()->getChildEdges().size() != 1 || !isPerTensorOrPerChannelBroadcastable(dataShape, weightShape, true))
return false;
}
return true;
@ -1213,6 +1214,66 @@ bool MKLDNNNode::canBePerformedAsScaleShift(const MKLDNNNode *parentNode) const
|| isConvertablePowerStatic();
}
std::pair<std::vector<float>, std::vector<float>> MKLDNNNode::getScalesAndShifts(const MKLDNNNode *parentNode) const {
std::vector<float> scales, shifts;
const auto fillValuesFrom = [&](const MKLDNNNodePtr& constInput, std::vector<float>& buffer) {
auto *constInputNode = dynamic_cast<MKLDNNInputNode *>(constInput.get());
auto constBlob = constInputNode->getMemoryPtr();
const auto elementsCount = constBlob->GetDescWithType<BlockedMemoryDesc>()->getPaddedElementsCount();
buffer.resize(elementsCount);
cpu_convert(constBlob->GetPtr(),
&buffer[0],
MKLDNNExtensionUtils::DataTypeToIEPrecision(constBlob->GetDataType()),
Precision::FP32,
elementsCount);
};
const auto constPort = getParentEdgesAtPort(0)[0]->getParent().get() == parentNode ? 1 : 0;
if (one_of(getAlgorithm(), EltwiseMultiply, EltwiseDivide, EltwisePrelu)) {
fillValuesFrom(getParentEdgesAtPort(constPort)[0]->getParent(), scales);
} else if (one_of(getAlgorithm(), EltwiseAdd, EltwiseSubtract)) {
fillValuesFrom(getParentEdgesAtPort(constPort)[0]->getParent(), shifts);
} else if (one_of(getAlgorithm(), EltwiseMulAdd)) {
fillValuesFrom(getParentEdgesAtPort(1)[0]->getParent(), scales);
fillValuesFrom(getParentEdgesAtPort(2)[0]->getParent(), shifts);
} else if (one_of(getAlgorithm(), EltwisePowerStatic)) {
const auto power = dynamic_cast<const MKLDNNEltwiseNode *>(this);
if (!power) {
IE_THROW() << "Cannot cast " << getName() << " to MKLDNNEltwiseNode";
}
scales.push_back(power->getBeta());
shifts.push_back(power->getGamma());
} else {
IE_THROW() << "Can't fill scale and shifts for node: " << getName() << " with type: " << NameFromType(getType());
}
switch (getAlgorithm()) {
case EltwiseAdd: {
scales.resize(shifts.size(), 1.0f);
break;
}
case EltwiseSubtract: {
scales.resize(shifts.size(), 1.0f);
std::transform(shifts.begin(), shifts.end(), shifts.begin(), [](float shift){ return -1.0f * shift; });
break;
}
case EltwiseMultiply: {
shifts.resize(scales.size(), 0.0f);
break;
}
case EltwiseDivide: {
shifts.resize(scales.size(), 0.0f);
std::transform(scales.begin(), scales.end(), scales.begin(), [](float scale){ return 1.0f / scale; });
break;
}
default: break;
}
return {scales, shifts};
}
bool MKLDNNNode::inputShapesDefined() const {
for (size_t i = 0; i < getParentEdges().size(); i++) {
if (!getParentEdgesAtPort(i)[0]->getMemory().getDesc().isDefined())
@ -1307,86 +1368,6 @@ bool MKLDNNNode::canFuseSimpleOperation(const MKLDNNNodePtr& node) const {
return false;
}
void MKLDNNNode::fillScalesAndShifts(const MKLDNNNode *parentNode, std::vector<float> &scales, std::vector<float> &shifts, int align) {
scales.clear();
shifts.clear();
const auto fillValuesFrom = [&](const MKLDNNNodePtr& constInput, std::vector<float>& buffer) {
auto *constInputNode = dynamic_cast<MKLDNNInputNode *>(constInput.get());
auto constBlob = constInputNode->getMemoryPtr();
const auto elementsCount = constBlob->GetDescWithType<BlockedMemoryDesc>()->getPaddedElementsCount();
buffer.resize(elementsCount);
cpu_convert(constBlob->GetPtr(),
&buffer[0],
MKLDNNExtensionUtils::DataTypeToIEPrecision(constBlob->GetDataType()),
Precision::FP32,
elementsCount);
};
const size_t constPort = getParentEdgesAtPort(0)[0]->getParent().get() == parentNode ? 1 : 0;
if (one_of(getAlgorithm(), EltwiseMultiply, EltwiseDivide, EltwisePrelu)) {
fillValuesFrom(getParentEdgesAtPort(constPort)[0]->getParent(), scales);
} else if (one_of(getAlgorithm(), EltwiseAdd, EltwiseSubtract)) {
fillValuesFrom(getParentEdgesAtPort(constPort)[0]->getParent(), shifts);
} else if (one_of(getAlgorithm(), EltwiseMulAdd)) {
fillValuesFrom(getParentEdgesAtPort(1)[0]->getParent(), scales);
fillValuesFrom(getParentEdgesAtPort(2)[0]->getParent(), shifts);
} else if (one_of(getAlgorithm(), EltwisePowerStatic)) {
const auto power = dynamic_cast<const MKLDNNEltwiseNode *>(this);
if (!power) {
IE_THROW() << "Cannot cast " << getName() << " to MKLDNNEltwiseNode";
}
scales.push_back(power->getBeta());
shifts.push_back(power->getGamma());
} else {
IE_THROW() << "Can't fill scale and shifts for node: " << getName() << " with type: " << NameFromType(getType());
}
const size_t bufferSize = static_cast<size_t>(outputShapes[0].getStaticDims()[outputShapes[0].getRank() > 1 ? 1 : 0]);
if (align == -1) {
align = bufferSize;
}
const size_t bufferSizeAligned = rnd_up(bufferSize, static_cast<size_t>(align));
size_t initSize = scales.size();
if (initSize > 0) {
scales.resize(bufferSizeAligned, 0);
if (initSize == 1) {
std::fill(scales.begin() + 1, scales.begin() + bufferSize, scales[0]);
}
}
initSize = shifts.size();
if (initSize > 0) {
shifts.resize(bufferSizeAligned, 0);
if (initSize == 1) {
std::fill(shifts.begin() + 1, shifts.begin() + bufferSize, shifts[0]);
}
}
switch (getAlgorithm()) {
case EltwiseAdd: {
scales.resize(bufferSizeAligned, 1.0f);
break;
}
case EltwiseSubtract: {
scales.resize(bufferSizeAligned, 1.0f);
std::transform(shifts.begin(), shifts.end(), shifts.begin(), [](float shift){ return -1.0f * shift; });
break;
}
case EltwiseMultiply: {
shifts.resize(bufferSizeAligned, 0.0f);
break;
}
case EltwiseDivide: {
shifts.resize(bufferSizeAligned, 0.0f);
std::transform(scales.begin(), scales.end(), scales.begin(), [](float scale){ return 1.0f / scale; });
break;
}
default: break;
}
}
void MKLDNNNode::createShapeInferSubgraph(const std::shared_ptr<ngraph::Node>& op) {
ngraph::OutputVector inputsForShapeInfer;
for (size_t i = 0; i < inputShapes.size(); i++) {

View File

@ -556,10 +556,18 @@ public:
return outputShapes[port];
}
/**
* @brief Return scales and shift if nodes can be executed as ScaleShift, else raise exception
* If node has only scale or shift value, fill missing value with default values
* i.e. EltwiseAdd: fill shifts from constant, fill scales with default values = 1.0f
* @param parentNode
* node from which data comes
* @return pair of scales and shifts
*/
std::pair<std::vector<float>, std::vector<float>> getScalesAndShifts(const MKLDNNNode *parentNode) const;
protected:
bool canFuseSimpleOperation(const MKLDNNNodePtr& node) const;
// TODO [mandrono]: place outside of the node API
void fillScalesAndShifts(const MKLDNNNode *parentNode, std::vector<float> &scales, std::vector<float> &shifts, const int align = -1);
void setType(Type type) {
this->type = type;
@ -578,7 +586,7 @@ protected:
* Seed node should call this routine and pass its post operations list as parameter.
* @param ops List of fused post operations
*/
virtual void appendPostOps(mkldnn::post_ops& ops, bool initAsBinary = false, bool initBinaryMemory = false);
virtual void appendPostOps(mkldnn::post_ops& ops, const VectorDims &postOpDims, int align = -1, bool initAsBinary = false, bool initBinaryMemory = false);
virtual std::shared_ptr<mkldnn::primitive_attr> initPrimitiveAttr() const { return nullptr; }
typedef std::function<DnnlMemoryDescPtr (mkldnn::primitive_desc_iterator &primitive_desc_it, size_t idx)>

Some files were not shown because too many files have changed in this diff Show More