From 8a200d60f399c5677e8b1c26b1c41119b7faa83e Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Wed, 11 Aug 2021 09:21:37 +0300 Subject: [PATCH 01/19] Align python style with IE (#7007) --- .../ie_bridges/python/src/.clang-format | 13 +- .../openvino/inference_engine/ie_api_impl.cpp | 131 +++++++++++------- .../openvino/inference_engine/ie_api_impl.hpp | 28 ++-- .../offline_transformations_api_impl.cpp | 14 +- .../test_utils/test_utils_api_impl.cpp | 3 +- 5 files changed, 122 insertions(+), 67 deletions(-) diff --git a/inference-engine/ie_bridges/python/src/.clang-format b/inference-engine/ie_bridges/python/src/.clang-format index c93e6254b5b..ebe747b7838 100644 --- a/inference-engine/ie_bridges/python/src/.clang-format +++ b/inference-engine/ie_bridges/python/src/.clang-format @@ -1,6 +1,7 @@ BasedOnStyle: Google IndentWidth: 4 UseTab: Never +ColumnLimit: 120 Language: Cpp Standard: Cpp11 @@ -8,18 +9,20 @@ Standard: Cpp11 AccessModifierOffset: -4 AlignConsecutiveMacros: true AllowAllArgumentsOnNextLine: false +AllowAllConstructorInitializersOnNextLine: false AllowAllParametersOfDeclarationOnNextLine: false AllowShortFunctionsOnASingleLine: Empty AllowShortIfStatementsOnASingleLine: Never AllowShortLambdasOnASingleLine: Empty AllowShortLoopsOnASingleLine: false AlwaysBreakBeforeMultilineStrings: false -ColumnLimit: 160 -# Specialize this comment pragma in order to avoid changes in SEA copyrights +BinPackArguments: false +BinPackParameters: false CommentPragmas: '^#' DerivePointerAlignment: false FixNamespaceComments: true IndentCaseLabels: false -IndentPPDirectives: BeforeHash -SpaceBeforeCpp11BracedList: true -SpaceBeforeCtorInitializerColon: false \ No newline at end of file +IndentPPDirectives: AfterHash +ForEachMacros: + - foreach + - FOREACH_CHILD diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.cpp b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.cpp index 1a6ae4f57ed..7aa289377d2 100644 --- a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.cpp +++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.cpp @@ -8,11 +8,17 @@ #include "ie_plugin_config.hpp" const std::string EXPORTED_NETWORK_NAME = "undefined"; -std::map precision_map = { - {"FP32", InferenceEngine::Precision::FP32}, {"FP64", InferenceEngine::Precision::FP64}, {"FP16", InferenceEngine::Precision::FP16}, - {"I8", InferenceEngine::Precision::I8}, {"I16", InferenceEngine::Precision::I16}, {"I32", InferenceEngine::Precision::I32}, - {"I64", InferenceEngine::Precision::I64}, {"U8", InferenceEngine::Precision::U8}, {"U16", InferenceEngine::Precision::U16}, - {"U32", InferenceEngine::Precision::U32}, {"U64", InferenceEngine::Precision::U64}}; +std::map precision_map = {{"FP32", InferenceEngine::Precision::FP32}, + {"FP64", InferenceEngine::Precision::FP64}, + {"FP16", InferenceEngine::Precision::FP16}, + {"I8", InferenceEngine::Precision::I8}, + {"I16", InferenceEngine::Precision::I16}, + {"I32", InferenceEngine::Precision::I32}, + {"I64", InferenceEngine::Precision::I64}, + {"U8", InferenceEngine::Precision::U8}, + {"U16", InferenceEngine::Precision::U16}, + {"U32", InferenceEngine::Precision::U32}, + {"U64", InferenceEngine::Precision::U64}}; std::map layout_map = {{"ANY", InferenceEngine::Layout::ANY}, {"NCHW", InferenceEngine::Layout::NCHW}, @@ -200,7 +206,8 @@ InferenceEnginePython::IENetwork InferenceEnginePython::read_network(std::string return InferenceEnginePython::IENetwork(std::make_shared(net)); } -InferenceEnginePython::IENetwork::IENetwork(const std::shared_ptr& cnn_network): actual(cnn_network) { +InferenceEnginePython::IENetwork::IENetwork(const std::shared_ptr& cnn_network) + : actual(cnn_network) { if (actual == nullptr) IE_THROW() << "IENetwork was not initialized."; name = actual->getName(); @@ -286,7 +293,9 @@ void InferenceEnginePython::IENetwork::reshape(const std::mapreshape(input_shapes); } -InferenceEnginePython::IEExecNetwork::IEExecNetwork(const std::string& name, size_t num_requests): infer_requests(num_requests), name(name) { +InferenceEnginePython::IEExecNetwork::IEExecNetwork(const std::string& name, size_t num_requests) + : infer_requests(num_requests), + name(name) { request_queue_ptr = std::make_shared(); } @@ -333,16 +342,19 @@ std::shared_ptr InferenceEnginePython::IEExe return actual; } -void InferenceEnginePython::InferRequestWrap::setBlob(const std::string& blob_name, const InferenceEngine::Blob::Ptr& blob_ptr) { +void InferenceEnginePython::InferRequestWrap::setBlob(const std::string& blob_name, + const InferenceEngine::Blob::Ptr& blob_ptr) { request_ptr.SetBlob(blob_name.c_str(), blob_ptr); } -void InferenceEnginePython::InferRequestWrap::setBlob(const std::string& blob_name, const InferenceEngine::Blob::Ptr& blob_ptr, +void InferenceEnginePython::InferRequestWrap::setBlob(const std::string& blob_name, + const InferenceEngine::Blob::Ptr& blob_ptr, const InferenceEngine::PreProcessInfo& info) { request_ptr.SetBlob(blob_name.c_str(), blob_ptr, info); } -const InferenceEngine::PreProcessInfo& InferenceEnginePython::InferRequestWrap::getPreProcess(const std::string& blob_name) { +const InferenceEngine::PreProcessInfo& InferenceEnginePython::InferRequestWrap::getPreProcess( + const std::string& blob_name) { return request_ptr.GetPreProcess(blob_name.c_str()); } @@ -392,7 +404,8 @@ int InferenceEnginePython::InferRequestWrap::wait(int64_t timeout) { return static_cast(code); } -std::map InferenceEnginePython::InferRequestWrap::getPerformanceCounts() { +std::map +InferenceEnginePython::InferRequestWrap::getPerformanceCounts() { std::map perf_counts = request_ptr.GetPerformanceCounts(); std::map perf_map; @@ -430,7 +443,8 @@ InferenceEnginePython::IECore::IECore(const std::string& xmlConfigFile) { actual = InferenceEngine::Core(xmlConfigFile); } -std::map InferenceEnginePython::IECore::getVersions(const std::string& deviceName) { +std::map InferenceEnginePython::IECore::getVersions( + const std::string& deviceName) { return actual.GetVersions(deviceName); } @@ -485,31 +499,38 @@ void InferenceEnginePython::IEExecNetwork::createInferRequests(int num_requests) infer_request.request_queue_ptr = request_queue_ptr; infer_request.request_ptr = actual->CreateInferRequest(); - infer_request.request_ptr.SetCompletionCallback>( - [&](InferenceEngine::InferRequest request, InferenceEngine::StatusCode code) { - if (code != InferenceEngine::StatusCode::OK) { - IE_EXCEPTION_SWITCH(code, ExceptionType, - InferenceEngine::details::ThrowNow {} <<= - std::stringstream {} << IE_LOCATION << InferenceEngine::details::ExceptionTraits::string()); - } + infer_request.request_ptr + .SetCompletionCallback>( + [&](InferenceEngine::InferRequest request, InferenceEngine::StatusCode code) { + if (code != InferenceEngine::StatusCode::OK) { + IE_EXCEPTION_SWITCH(code, + ExceptionType, + InferenceEngine::details::ThrowNow{} <<= + std::stringstream{} + << IE_LOCATION + << InferenceEngine::details::ExceptionTraits::string()); + } - auto end_time = Time::now(); - auto execTime = std::chrono::duration_cast(end_time - infer_request.start_time); - infer_request.exec_time = static_cast(execTime.count()) * 0.000001; - infer_request.request_queue_ptr->setRequestIdle(infer_request.index); - if (infer_request.user_callback) { - infer_request.user_callback(infer_request.user_data, code); - } - }); + auto end_time = Time::now(); + auto execTime = std::chrono::duration_cast(end_time - infer_request.start_time); + infer_request.exec_time = static_cast(execTime.count()) * 0.000001; + infer_request.request_queue_ptr->setRequestIdle(infer_request.index); + if (infer_request.user_callback) { + infer_request.user_callback(infer_request.user_data, code); + } + }); } } -InferenceEnginePython::IENetwork InferenceEnginePython::IECore::readNetwork(const std::string& modelPath, const std::string& binPath) { +InferenceEnginePython::IENetwork InferenceEnginePython::IECore::readNetwork(const std::string& modelPath, + const std::string& binPath) { InferenceEngine::CNNNetwork net = actual.ReadNetwork(modelPath, binPath); return IENetwork(std::make_shared(net)); } -InferenceEnginePython::IENetwork InferenceEnginePython::IECore::readNetwork(const std::string& model, const uint8_t* bin, size_t bin_size) { +InferenceEnginePython::IENetwork InferenceEnginePython::IECore::readNetwork(const std::string& model, + const uint8_t* bin, + size_t bin_size) { InferenceEngine::MemoryBlob::Ptr weights_blob; if (bin_size != 0) { InferenceEngine::TensorDesc tensorDesc(InferenceEngine::Precision::U8, {bin_size}, InferenceEngine::Layout::C); @@ -521,44 +542,58 @@ InferenceEnginePython::IENetwork InferenceEnginePython::IECore::readNetwork(cons return IENetwork(std::make_shared(net)); } -std::unique_ptr InferenceEnginePython::IECore::loadNetwork(IENetwork network, const std::string& deviceName, - const std::map& config, - int num_requests) { - auto exec_network = InferenceEnginePython::make_unique(network.name, num_requests); - exec_network->actual = std::make_shared(actual.LoadNetwork(*network.actual, deviceName, config)); +std::unique_ptr InferenceEnginePython::IECore::loadNetwork( + IENetwork network, + const std::string& deviceName, + const std::map& config, + int num_requests) { + auto exec_network = + InferenceEnginePython::make_unique(network.name, num_requests); + exec_network->actual = + std::make_shared(actual.LoadNetwork(*network.actual, deviceName, config)); exec_network->createInferRequests(num_requests); return exec_network; } -std::unique_ptr InferenceEnginePython::IECore::loadNetworkFromFile(const std::string& modelPath, - const std::string& deviceName, - const std::map& config, - int num_requests) { - auto exec_network = InferenceEnginePython::make_unique(modelPath, num_requests); - exec_network->actual = std::make_shared(actual.LoadNetwork(modelPath, deviceName, config)); +std::unique_ptr InferenceEnginePython::IECore::loadNetworkFromFile( + const std::string& modelPath, + const std::string& deviceName, + const std::map& config, + int num_requests) { + auto exec_network = + InferenceEnginePython::make_unique(modelPath, num_requests); + exec_network->actual = + std::make_shared(actual.LoadNetwork(modelPath, deviceName, config)); exec_network->createInferRequests(num_requests); return exec_network; } -std::unique_ptr InferenceEnginePython::IECore::importNetwork(const std::string& modelFIle, const std::string& deviceName, - const std::map& config, - int num_requests) { - auto exec_network = InferenceEnginePython::make_unique(EXPORTED_NETWORK_NAME, num_requests); - exec_network->actual = std::make_shared(actual.ImportNetwork(modelFIle, deviceName, config)); +std::unique_ptr InferenceEnginePython::IECore::importNetwork( + const std::string& modelFIle, + const std::string& deviceName, + const std::map& config, + int num_requests) { + auto exec_network = + InferenceEnginePython::make_unique(EXPORTED_NETWORK_NAME, num_requests); + exec_network->actual = + std::make_shared(actual.ImportNetwork(modelFIle, deviceName, config)); exec_network->createInferRequests(num_requests); return exec_network; } -std::map InferenceEnginePython::IECore::queryNetwork(InferenceEnginePython::IENetwork network, const std::string& deviceName, - const std::map& config) { +std::map InferenceEnginePython::IECore::queryNetwork( + InferenceEnginePython::IENetwork network, + const std::string& deviceName, + const std::map& config) { auto res = actual.QueryNetwork(*network.actual, deviceName, config); return res.supportedLayersMap; } -void InferenceEnginePython::IECore::setConfig(const std::map& config, const std::string& deviceName) { +void InferenceEnginePython::IECore::setConfig(const std::map& config, + const std::string& deviceName) { actual.SetConfig(config, deviceName); } diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.hpp b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.hpp index 23d27474aff..4823b52287c 100644 --- a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.hpp +++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.hpp @@ -115,7 +115,9 @@ struct InferRequestWrap { void setBlob(const std::string& blob_name, const InferenceEngine::Blob::Ptr& blob_ptr); - void setBlob(const std::string& name, const InferenceEngine::Blob::Ptr& data, const InferenceEngine::PreProcessInfo& info); + void setBlob(const std::string& name, + const InferenceEngine::Blob::Ptr& data, + const InferenceEngine::PreProcessInfo& info); void setBatch(int size); @@ -160,13 +162,23 @@ struct IECore { std::map getVersions(const std::string& deviceName); InferenceEnginePython::IENetwork readNetwork(const std::string& modelPath, const std::string& binPath); InferenceEnginePython::IENetwork readNetwork(const std::string& model, const uint8_t* bin, size_t bin_size); - std::unique_ptr loadNetwork(IENetwork network, const std::string& deviceName, - const std::map& config, int num_requests); - std::unique_ptr loadNetworkFromFile(const std::string& modelPath, const std::string& deviceName, - const std::map& config, int num_requests); - std::unique_ptr importNetwork(const std::string& modelFIle, const std::string& deviceName, - const std::map& config, int num_requests); - std::map queryNetwork(IENetwork network, const std::string& deviceName, const std::map& config); + std::unique_ptr loadNetwork(IENetwork network, + const std::string& deviceName, + const std::map& config, + int num_requests); + std::unique_ptr loadNetworkFromFile( + const std::string& modelPath, + const std::string& deviceName, + const std::map& config, + int num_requests); + std::unique_ptr importNetwork( + const std::string& modelFIle, + const std::string& deviceName, + const std::map& config, + int num_requests); + std::map queryNetwork(IENetwork network, + const std::string& deviceName, + const std::map& config); void setConfig(const std::map& config, const std::string& deviceName = std::string()); void registerPlugin(const std::string& pluginName, const std::string& deviceName); void unregisterPlugin(const std::string& deviceName); diff --git a/inference-engine/ie_bridges/python/src/openvino/offline_transformations/offline_transformations_api_impl.cpp b/inference-engine/ie_bridges/python/src/openvino/offline_transformations/offline_transformations_api_impl.cpp index 183deaccfb3..8b213824a33 100644 --- a/inference-engine/ie_bridges/python/src/openvino/offline_transformations/offline_transformations_api_impl.cpp +++ b/inference-engine/ie_bridges/python/src/openvino/offline_transformations/offline_transformations_api_impl.cpp @@ -26,7 +26,8 @@ void InferenceEnginePython::ApplyPOTTransformations(InferenceEnginePython::IENet manager.run_passes(network.actual->getFunction()); } -void InferenceEnginePython::ApplyLowLatencyTransformation(InferenceEnginePython::IENetwork network, bool use_const_initializer) { +void InferenceEnginePython::ApplyLowLatencyTransformation(InferenceEnginePython::IENetwork network, + bool use_const_initializer) { ngraph::pass::Manager manager; manager.register_pass(use_const_initializer); manager.run_passes(network.actual->getFunction()); @@ -38,7 +39,9 @@ void InferenceEnginePython::ApplyPruningTransformation(InferenceEnginePython::IE manager.run_passes(network.actual->getFunction()); } -void InferenceEnginePython::GenerateMappingFile(InferenceEnginePython::IENetwork network, std::string path, bool extract_names) { +void InferenceEnginePython::GenerateMappingFile(InferenceEnginePython::IENetwork network, + std::string path, + bool extract_names) { ngraph::pass::Manager manager; manager.register_pass(path, extract_names); manager.run_passes(network.actual->getFunction()); @@ -47,9 +50,10 @@ void InferenceEnginePython::GenerateMappingFile(InferenceEnginePython::IENetwork void InferenceEnginePython::CheckAPI() { std::shared_ptr f; { - auto input = std::make_shared(ngraph::element::f32, ngraph::Shape {1, 1000, 4}); - auto reshape = std::make_shared(input, std::make_shared(input), true); - f = std::make_shared(ngraph::NodeVector {reshape}, ngraph::ParameterVector {input}); + auto input = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1000, 4}); + auto reshape = + std::make_shared(input, std::make_shared(input), true); + f = std::make_shared(ngraph::NodeVector{reshape}, ngraph::ParameterVector{input}); } ngraph::pass::Manager m; m.register_pass(); diff --git a/inference-engine/ie_bridges/python/src/openvino/test_utils/test_utils_api_impl.cpp b/inference-engine/ie_bridges/python/src/openvino/test_utils/test_utils_api_impl.cpp index bb289b9266a..f30a3e74d9c 100644 --- a/inference-engine/ie_bridges/python/src/openvino/test_utils/test_utils_api_impl.cpp +++ b/inference-engine/ie_bridges/python/src/openvino/test_utils/test_utils_api_impl.cpp @@ -7,6 +7,7 @@ #include #include -std::pair InferenceEnginePython::CompareNetworks(InferenceEnginePython::IENetwork lhs, InferenceEnginePython::IENetwork rhs) { +std::pair InferenceEnginePython::CompareNetworks(InferenceEnginePython::IENetwork lhs, + InferenceEnginePython::IENetwork rhs) { return compare_functions(lhs.actual->getFunction(), rhs.actual->getFunction(), true, true, false, true, true); } From e36f42b205bd2ef64f98c21a38d995760fa81018 Mon Sep 17 00:00:00 2001 From: Zhang Yi Date: Wed, 11 Aug 2021 14:22:04 +0800 Subject: [PATCH 02/19] [Frontend]Add Paddle Op Conversion Tests (#6982) * fix paddle model test * enable paddle ops tests * fix code style * remove useless log in paddle scripts --- .../test/frontend/paddlepaddle/op_fuzzy.cpp | 138 +++++++++- .../gen_scripts/generate_assign_value.py | 2 - .../test_models/gen_scripts/generate_bmm.py | 39 +++ .../generate_conv2d_combinations.py | 145 ++++++++++ .../gen_scripts/generate_conv2d_transpose.py | 144 ++++++++++ .../gen_scripts/generate_dropout.py | 47 ++++ .../gen_scripts/generate_elementwise_ops.py | 179 ++++++++++++ .../test_models/gen_scripts/generate_equal.py | 43 +++ .../gen_scripts/generate_expand_v2.py | 71 +++++ .../gen_scripts/generate_fill_constant.py | 97 +++++++ .../generate_fill_constant_batch_size_like.py | 39 +++ .../generate_flatten_contiguous_range.py | 38 +++ .../gen_scripts/generate_greater_equal.py | 60 ++++ .../gen_scripts/generate_hard_sigmoid.py | 40 +++ .../gen_scripts/generate_hard_swish.py | 39 +++ .../gen_scripts/generate_interpolate.py | 188 +++++++++++++ .../gen_scripts/generate_leaky_relu.py | 40 +++ .../test_models/gen_scripts/generate_log.py | 39 +++ .../gen_scripts/generate_logical_not.py | 44 +++ .../test_models/gen_scripts/generate_mul.py | 75 +++++ .../test_models/gen_scripts/generate_pad3d.py | 70 +++++ .../gen_scripts/generate_place_test_model.py | 1 - .../gen_scripts/generate_pool2d.py | 260 ++++++++++++++++++ .../test_models/gen_scripts/generate_pow.py | 95 +++++++ .../test_models/gen_scripts/generate_range.py | 45 +++ .../test_models/gen_scripts/generate_relu.py | 2 +- .../test_models/gen_scripts/generate_relu6.py | 40 +++ .../gen_scripts/generate_reshape.py | 78 ++++++ .../gen_scripts/generate_rnn_lstm.py | 66 +++++ .../test_models/gen_scripts/generate_scale.py | 93 +++++++ .../test_models/gen_scripts/generate_shape.py | 38 +++ .../gen_scripts/generate_sigmoid.py | 39 +++ .../test_models/gen_scripts/generate_slice.py | 39 +++ .../gen_scripts/generate_softmax.py | 45 +++ .../test_models/gen_scripts/generate_split.py | 115 ++++++++ .../gen_scripts/generate_squeeze.py | 38 +++ .../gen_scripts/generate_unsqueeze.py | 37 +++ .../gen_scripts/generate_yolo_box.py | 114 ++++++++ .../test_models/gen_scripts/save_model.py | 18 +- 39 files changed, 2725 insertions(+), 15 deletions(-) create mode 100644 ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_bmm.py create mode 100644 ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_conv2d_combinations.py create mode 100644 ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_conv2d_transpose.py create mode 100644 ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_dropout.py create mode 100644 ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_elementwise_ops.py create mode 100644 ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_equal.py create mode 100644 ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_expand_v2.py create mode 100644 ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_fill_constant.py create mode 100644 ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_fill_constant_batch_size_like.py create mode 100644 ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_flatten_contiguous_range.py create mode 100644 ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_greater_equal.py create mode 100644 ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_hard_sigmoid.py create mode 100644 ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_hard_swish.py create mode 100644 ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_interpolate.py create mode 100644 ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_leaky_relu.py create mode 100644 ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_log.py create mode 100644 ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_logical_not.py create mode 100644 ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_mul.py create mode 100644 ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_pad3d.py create mode 100644 ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_pool2d.py create mode 100644 ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_pow.py create mode 100644 ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_range.py create mode 100644 ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_relu6.py create mode 100644 ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_reshape.py create mode 100644 ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_rnn_lstm.py create mode 100644 ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_scale.py create mode 100644 ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_shape.py create mode 100644 ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_sigmoid.py create mode 100644 ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_slice.py create mode 100644 ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_softmax.py create mode 100644 ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_split.py create mode 100644 ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_squeeze.py create mode 100644 ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_unsqueeze.py create mode 100644 ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_yolo_box.py diff --git a/ngraph/test/frontend/paddlepaddle/op_fuzzy.cpp b/ngraph/test/frontend/paddlepaddle/op_fuzzy.cpp index c9915435b11..b04aca480e8 100644 --- a/ngraph/test/frontend/paddlepaddle/op_fuzzy.cpp +++ b/ngraph/test/frontend/paddlepaddle/op_fuzzy.cpp @@ -26,11 +26,147 @@ static const std::vector models{ std::string("assign_value_fp32"), std::string("assign_value_int32"), std::string("assign_value_int64"), + std::string("avgAdaptivePool2D_test1"), + std::string("avgPool_test1"), + std::string("avgPool_test10"), + std::string("avgPool_test11"), + std::string("avgPool_test2"), + std::string("avgPool_test3"), + std::string("avgPool_test4"), + std::string("avgPool_test5"), + // avgPool_test6, + std::string("avgPool_test7"), + std::string("avgPool_test8"), + std::string("avgPool_test9"), std::string("batch_norm_nchw"), std::string("batch_norm_nhwc"), + std::string("bilinear_downsample_false_0"), + std::string("bilinear_downsample_false_1"), + std::string("bilinear_downsample_true_0"), + std::string("bilinear_upsample_false_0"), + std::string("bilinear_upsample_false_1"), + std::string("bilinear_upsample_scales"), + std::string("bilinear_upsample_scales2"), + std::string("bilinear_upsample_true_0"), + std::string("bmm"), std::string("clip"), + std::string("conv2d_dilation_assymetric_pads_strides"), + std::string("conv2d_SAME_padding"), + std::string("conv2d_strides_assymetric_padding"), + std::string("conv2d_strides_no_padding"), + std::string("conv2d_strides_padding"), + std::string("conv2d_transpose_dilation_assymetric_pads_strides"), + // conv2d_transpose_SAME_padding(PDPD outputs wrong results), + std::string("conv2d_transpose_strides_assymetric_padding"), + std::string("conv2d_transpose_strides_no_padding"), + std::string("conv2d_transpose_strides_padding"), + std::string("conv2d_transpose_VALID_padding"), + std::string("conv2d_VALID_padding"), + std::string("depthwise_conv2d_convolution"), + std::string("depthwise_conv2d_transpose_convolution"), + std::string("dropout"), + std::string("dropout_upscale_in_train"), + std::string("elementwise_add1"), + std::string("elementwise_div1"), + std::string("elementwise_max1"), + std::string("elementwise_min1"), + std::string("elementwise_mul1"), + std::string("elementwise_pow1"), + std::string("elementwise_sub1"), + std::string("equal"), + std::string("expand_v2"), + std::string("expand_v2_tensor"), + std::string("expand_v2_tensor_list"), + std::string("fill_constant"), + std::string("fill_constant_batch_size_like"), + std::string("fill_constant_int32"), + std::string("fill_constant_int64"), + std::string("fill_constant_tensor"), + std::string("fill_constant_shape_tensor"), + std::string("fill_constant_shape_tensor_list"), + std::string("flatten_contiguous_range_test1"), + // greater_equal_big_int64(failure due to CPU inference), + std::string("greater_equal_float32"), + std::string("greater_equal_int32"), + std::string("greater_equal_int64"), + std::string("hard_sigmoid"), + std::string("hard_swish"), + std::string("leaky_relu"), + std::string("log"), + std::string("logical_not"), + std::string("matmul_xt"), + std::string("matmul_xt_yt"), + std::string("matmul_yt"), + std::string("maxAdaptivePool2D_test1"), + std::string("maxPool_test1"), + std::string("maxPool_test10"), + std::string("maxPool_test11"), + std::string("maxPool_test2"), + std::string("maxPool_test3"), + std::string("maxPool_test4"), + std::string("maxPool_test5"), + // maxPool_test6(nchw support is disabled now), + std::string("maxPool_test7"), + std::string("maxPool_test8"), + std::string("maxPool_test9"), + std::string("mul_fp32"), + std::string("nearest_downsample_false_0"), + std::string("nearest_downsample_false_1"), + std::string("nearest_upsample_false_0"), + std::string("nearest_upsample_false_1"), + std::string("pad3d_test1"), + std::string("pad3d_test2"), + std::string("pad3d_test3"), + // pad3d_test4, + std::string("pow_float32"), + std::string("pow_int32"), + std::string("pow_int64"), + // pow_int64_out_of_range(out of range of OV int64), + std::string("pow_y_tensor"), + std::string("range0"), + std::string("range1"), + std::string("range2"), std::string("relu"), -}; + std::string("relu6"), + std::string("relu6_1"), + std::string("reshape"), + std::string("reshape_tensor"), + std::string("reshape_tensor_list"), + std::string("rnn_lstm_layer_1_bidirectional"), + std::string("rnn_lstm_layer_1_forward"), + std::string("rnn_lstm_layer_2_bidirectional"), + std::string("rnn_lstm_layer_2_forward"), + std::string("scale_bias_after_float32"), + std::string("scale_bias_after_int32"), + std::string("scale_bias_after_int64"), + std::string("scale_bias_before_float32"), + std::string("scale_bias_before_int32"), + std::string("scale_bias_before_int64"), + std::string("scale_tensor_bias_after"), + std::string("scale_tensor_bias_before"), + std::string("shape"), + std::string("sigmoid"), + std::string("slice"), + std::string("slice_1d"), + std::string("softmax"), + std::string("softmax_minus"), + std::string("split_test1"), + std::string("split_test2"), + std::string("split_test3"), + std::string("split_test4"), + std::string("split_test5"), + std::string("split_test6"), + std::string("split_test_dim_int32"), + std::string("split_test_dim_int64"), + std::string("split_test_list"), + std::string("split_test_list_tensor"), + std::string("squeeze"), + std::string("squeeze_null_axes"), + std::string("unsqueeze"), + std::string("yolo_box_clip_box"), + std::string("yolo_box_default"), + std::string("yolo_box_scale_xy"), + std::string("yolo_box_uneven_wh")}; INSTANTIATE_TEST_SUITE_P( PDPDFuzzyOpTest, diff --git a/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_assign_value.py b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_assign_value.py index 7d29574b2a9..10e5d5ea904 100644 --- a/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_assign_value.py +++ b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_assign_value.py @@ -27,8 +27,6 @@ def pdpd_assign_value(name, test_x): saveModel(name, exe, feedkeys=['x'], fetchlist=[result], inputs=[test_x], outputs=[outs[0]], target_dir=sys.argv[1]) - print(outs[0]) - def compare(): diff --git a/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_bmm.py b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_bmm.py new file mode 100644 index 00000000000..92a468db917 --- /dev/null +++ b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_bmm.py @@ -0,0 +1,39 @@ +import numpy as np +from save_model import saveModel +import sys + + +def pdpd_bmm(x1, x2): + import paddle as pdpd + + pdpd.enable_static() + node_x1 = pdpd.static.data(name='x1', shape=x1.shape, dtype=x1.dtype) + node_x2 = pdpd.static.data(name='x2', shape=x2.shape, dtype=x2.dtype) + bmm_node = pdpd.bmm(node_x1, node_x2) + result = pdpd.static.nn.batch_norm(bmm_node, use_global_stats=True) + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x1': x1, 'x2': x2}, + fetch_list=[result]) + saveModel("bmm", exe, feedkeys=['x1', 'x2'], fetchlist=[result], + inputs=[x1, x2], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +if __name__ == "__main__": + input1 = np.array([[[0., 1., 2., 3., 4.], # (1, 1, 7, 5) input tensor + [5., 6., 7., 8., 9.], + [10., 11., 12., 13., 14.], + [15., 16., 17., 18., 19.], + [20., 21., 22., 23., 24.], + [25., 26., 27., 28., 29.], + [30., 31., 32., 33., 34.,]]]).astype(np.float32) + + input2 = np.ones([1, 5, 7]).astype('float32') + pdpd_result = pdpd_bmm(input1, input2) diff --git a/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_conv2d_combinations.py b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_conv2d_combinations.py new file mode 100644 index 00000000000..e54643decfe --- /dev/null +++ b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_conv2d_combinations.py @@ -0,0 +1,145 @@ +from save_model import saveModel +import numpy as np +import paddle as pdpd +import sys +pdpd.enable_static() + + +def run_and_save_model(input_x, name, feed, fetch_list, main_prog, start_prog): + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + exe.run(start_prog) + outs = exe.run( + feed={'x': input_x}, + fetch_list=fetch_list, + program=main_prog) + + with pdpd.static.program_guard(main_prog, start_prog): + saveModel(name, exe, feedkeys=['x'], fetchlist=fetch_list, inputs=[input_x], + outputs=[outs[0]], target_dir=sys.argv[1]) + + +def pdpd_conv2d(input_x, name, input_shape, kernel, dilation, padding, stride, groups=1, use_cudnn=True): + main_program = pdpd.static.Program() + startup_program = pdpd.static.Program() + with pdpd.static.program_guard(main_program, startup_program): + data = pdpd.static.data(name='x', shape=input_shape, dtype='float32') + weight_attr = pdpd.ParamAttr(name="conv2d_weight", initializer=pdpd.nn.initializer.Assign(kernel)) + conv2d = pdpd.static.nn.conv2d(input=data, num_filters=kernel.shape[0], filter_size=kernel.shape[2:4], + padding=padding, param_attr=weight_attr, dilation=dilation, stride=stride, groups=groups, use_cudnn=use_cudnn) + run_and_save_model(input_x, name, data, conv2d, main_program, startup_program) + + +if __name__ == "__main__": + + test_cases =[ + { + "input_x": np.array([[[[0., 1., 2., 3., 4.], # (1, 1, 7, 5) input tensor + [5., 6., 7., 8., 9.], + [10., 11., 12., 13., 14.], + [15., 16., 17., 18., 19.], + [20., 21., 22., 23., 24.], + [25., 26., 27., 28., 29.], + [30., 31., 32., 33., 34.,]]]]).astype(np.float32), + "name": "conv2d_SAME_padding", + "input_shape": [1, 1, 7, 5], + "kernel": np.array([[[[1., 1., 1.],[1., 1., 1.],[1., 1., 1.]]]]).astype(np.float32), + "dilation": 1, + "padding": "SAME", + "stride" : 2, + }, + { + "input_x": np.array([[[[0., 1., 2., 3., 4.], # (1, 1, 7, 5) input tensor + [5., 6., 7., 8., 9.], + [10., 11., 12., 13., 14.], + [15., 16., 17., 18., 19.], + [20., 21., 22., 23., 24.], + [25., 26., 27., 28., 29.], + [30., 31., 32., 33., 34.,]]]]).astype(np.float32), + "name": "conv2d_VALID_padding", + "input_shape": [1, 1, 7, 5], + "kernel": np.array([[[[1., 1., 1.],[1., 1., 1.],[1., 1., 1.]]]]).astype(np.float32), + "dilation": 1, + "padding": "VALID", + "stride" : 2, + }, + { + "input_x": np.array([[[[0., 1., 2., 3., 4.], # (1, 1, 7, 5) input tensor + [5., 6., 7., 8., 9.], + [10., 11., 12., 13., 14.], + [15., 16., 17., 18., 19.], + [20., 21., 22., 23., 24.], + [25., 26., 27., 28., 29.], + [30., 31., 32., 33., 34.,]]]]).astype(np.float32), + "name": "conv2d_strides_padding", + "input_shape": [1, 1, 7, 5], + "kernel": np.array([[[[1., 1., 1.],[1., 1., 1.],[1., 1., 1.]]]]).astype(np.float32), + "dilation": 1, + "padding": 1, + "stride" : 2, + }, + { "input_x": np.array([[[[0., 1., 2., 3., 4.], # (1, 1, 7, 5) input tensor + [5., 6., 7., 8., 9.], + [10., 11., 12., 13., 14.], + [15., 16., 17., 18., 19.], + [20., 21., 22., 23., 24.], + [25., 26., 27., 28., 29.], + [30., 31., 32., 33., 34.,]]]]).astype(np.float32), + "name": "conv2d_strides_no_padding", + "input_shape": [1, 1, 7, 5], + "kernel": np.array([[[[1., 1., 1.],[1., 1., 1.],[1., 1., 1.]]]]).astype(np.float32), + "dilation": 1, + "padding": 0, + "stride" : 2, + }, + { "input_x": np.array([[[[0., 1., 2., 3., 4.], # (1, 1, 7, 5) input tensor + [5., 6., 7., 8., 9.], + [10., 11., 12., 13., 14.], + [15., 16., 17., 18., 19.], + [20., 21., 22., 23., 24.], + [25., 26., 27., 28., 29.], + [30., 31., 32., 33., 34.,]]]]).astype(np.float32), + "name": "conv2d_strides_assymetric_padding", + "input_shape": [1, 1, 7, 5], + "kernel": np.array([[[[1., 1., 1.],[1., 1., 1.],[1., 1., 1.]]]]).astype(np.float32), + "dilation": 1, + "padding": [1,1,0,1], + "stride" : 2, + }, + { + "input_x": np.array([[[[0., 1., 2., 3., 4.], # (1, 1, 7, 5) input tensor + [5., 6., 7., 8., 9.], + [10., 11., 12., 13., 14.], + [15., 16., 17., 18., 19.], + [20., 21., 22., 23., 24.], + [25., 26., 27., 28., 29.], + [30., 31., 32., 33., 34.,]]]]).astype(np.float32), + "name": "conv2d_dilation_assymetric_pads_strides", + "input_shape": [1, 1, 7, 5], + "kernel": np.array([[[[1., 1., 1.],[1., 1., 1.],[1., 1., 1.]]]]).astype(np.float32), + "dilation": 1, + "padding": [1, 1, 1, 2], + "stride" : [3, 1], + }, + { + "input_x": np.arange(27).astype(np.float32).reshape([1, 3, 3, 3]), + "name": "depthwise_conv2d_convolution", + "input_shape": [1, 3, 3, 3], + "kernel": np.ones([3, 1, 3, 3]).astype(np.float32), + "dilation": 1, + "padding": 1, + "stride": 1, + "groups": 3, + "use_cudnn": False + } + ] + for test in test_cases: + + pdpd_conv2d(test['input_x'], test['name'], test["input_shape"], + test['kernel'], test['dilation'], + test['padding'], + test['stride'], + 1 if "groups" not in test else test['groups'], + True if "use_cudnn" not in test else test['use_cudnn']) + + diff --git a/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_conv2d_transpose.py b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_conv2d_transpose.py new file mode 100644 index 00000000000..79608128bc6 --- /dev/null +++ b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_conv2d_transpose.py @@ -0,0 +1,144 @@ +import numpy as np +import paddle as pdpd +pdpd.enable_static() +from save_model import saveModel +import sys + + +def run_and_save_model(input_x, name, feed, fetch_list, main_prog, start_prog): + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + exe.run(start_prog) + outs = exe.run( + feed={'x': input_x}, + fetch_list=fetch_list, + program=main_prog) + with pdpd.static.program_guard(main_prog, start_prog): + saveModel(name, exe, feedkeys=['x'], fetchlist=fetch_list, inputs=[input_x], + outputs=[outs[0]], target_dir=sys.argv[1]) + + +def pdpd_conv2d_transpose(input_x, name, input_shape, kernel, dilation, padding, stride, groups=1, use_cudnn=True): + main_program = pdpd.static.Program() + startup_program = pdpd.static.Program() + with pdpd.static.program_guard(main_program, startup_program): + data = pdpd.static.data(name='x', shape=input_shape, dtype='float32') + weight_attr = pdpd.ParamAttr(name="conv2d_weight", initializer=pdpd.nn.initializer.Assign(kernel)) + conv2d = pdpd.static.nn.conv2d_transpose(input=data, num_filters=kernel.shape[0], filter_size=kernel.shape[2:4], + padding=padding, param_attr=weight_attr, dilation=dilation, stride=stride, groups=groups, use_cudnn=use_cudnn) + run_and_save_model(input_x, name, data, conv2d, main_program, startup_program) + + +if __name__ == "__main__": + + test_cases =[ + { + "input_x": np.array([[[[0., 1., 2., 3., 4.], # (1, 1, 7, 5) input tensor + [5., 6., 7., 8., 9.], + [10., 11., 12., 13., 14.], + [15., 16., 17., 18., 19.], + [20., 21., 22., 23., 24.], + [25., 26., 27., 28., 29.], + [30., 31., 32., 33., 34.,]]]]).astype(np.float32), + "name": "conv2d_transpose_SAME_padding", + "input_shape": [1, 1, 7, 5], + "kernel": np.array([[[[1., 1., 1.],[1., 1., 1.],[1., 1., 1.]]]]).astype(np.float32), + "dilation": 1, + "padding": "SAME", + "stride" : 2, + }, + { + "input_x": np.array([[[[0., 1., 2., 3., 4.], # (1, 1, 7, 5) input tensor + [5., 6., 7., 8., 9.], + [10., 11., 12., 13., 14.], + [15., 16., 17., 18., 19.], + [20., 21., 22., 23., 24.], + [25., 26., 27., 28., 29.], + [30., 31., 32., 33., 34.,]]]]).astype(np.float32), + "name": "conv2d_transpose_VALID_padding", + "input_shape": [1, 1, 7, 5], + "kernel": np.array([[[[1., 1., 1.],[1., 1., 1.],[1., 1., 1.]]]]).astype(np.float32), + "dilation": 1, + "padding": "VALID", + "stride" : 2, + }, + { + "input_x": np.array([[[[0., 1., 2., 3., 4.], # (1, 1, 7, 5) input tensor + [5., 6., 7., 8., 9.], + [10., 11., 12., 13., 14.], + [15., 16., 17., 18., 19.], + [20., 21., 22., 23., 24.], + [25., 26., 27., 28., 29.], + [30., 31., 32., 33., 34.,]]]]).astype(np.float32), + "name": "conv2d_transpose_strides_padding", + "input_shape": [1, 1, 7, 5], + "kernel": np.array([[[[1., 1., 1.],[1., 1., 1.],[1., 1., 1.]]]]).astype(np.float32), + "dilation": 1, + "padding": 1, + "stride" : 2, + }, + { "input_x": np.array([[[[0., 1., 2., 3., 4.], # (1, 1, 7, 5) input tensor + [5., 6., 7., 8., 9.], + [10., 11., 12., 13., 14.], + [15., 16., 17., 18., 19.], + [20., 21., 22., 23., 24.], + [25., 26., 27., 28., 29.], + [30., 31., 32., 33., 34.,]]]]).astype(np.float32), + "name": "conv2d_transpose_strides_no_padding", + "input_shape": [1, 1, 7, 5], + "kernel": np.array([[[[1., 1., 1.],[1., 1., 1.],[1., 1., 1.]]]]).astype(np.float32), + "dilation": 1, + "padding": 0, + "stride" : 2, + }, + { "input_x": np.array([[[[0., 1., 2., 3., 4.], # (1, 1, 7, 5) input tensor + [5., 6., 7., 8., 9.], + [10., 11., 12., 13., 14.], + [15., 16., 17., 18., 19.], + [20., 21., 22., 23., 24.], + [25., 26., 27., 28., 29.], + [30., 31., 32., 33., 34.,]]]]).astype(np.float32), + "name": "conv2d_transpose_strides_assymetric_padding", + "input_shape": [1, 1, 7, 5], + "kernel": np.array([[[[1., 1., 1.],[1., 1., 1.],[1., 1., 1.]]]]).astype(np.float32), + "dilation": 1, + "padding": [1,1,0,1], + "stride" : 2, + }, + { + "input_x": np.array([[[[0., 1., 2., 3., 4.], # (1, 1, 7, 5) input tensor + [5., 6., 7., 8., 9.], + [10., 11., 12., 13., 14.], + [15., 16., 17., 18., 19.], + [20., 21., 22., 23., 24.], + [25., 26., 27., 28., 29.], + [30., 31., 32., 33., 34.,]]]]).astype(np.float32), + "name": "conv2d_transpose_dilation_assymetric_pads_strides", + "input_shape": [1, 1, 7, 5], + "kernel": np.array([[[[1., 1., 1.],[1., 1., 1.],[1., 1., 1.]]]]).astype(np.float32), + "dilation": 1, + "padding": [1, 1, 1, 2], + "stride" : [3, 1], + }, + { + "input_x": np.arange(27).astype(np.float32).reshape([1, 3, 3, 3]), + "name": "depthwise_conv2d_transpose_convolution", + "input_shape": [1, 3, 3, 3], + "kernel": np.ones([3, 1, 3, 3]).astype(np.float32), + "dilation": 1, + "padding": 1, + "stride": 1, + "groups": 3, + "use_cudnn": False + } + ] + for test in test_cases: + + pdpd_conv2d_transpose(test['input_x'], test['name'], test["input_shape"], + test['kernel'], test['dilation'], + test['padding'], + test['stride'], + 1 if "groups" not in test else test['groups'], + True if "use_cudnn" not in test else test['use_cudnn']) + + diff --git a/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_dropout.py b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_dropout.py new file mode 100644 index 00000000000..6f40afdb1f2 --- /dev/null +++ b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_dropout.py @@ -0,0 +1,47 @@ +# +# pool2d paddle model generator +# +import numpy as np +from save_model import saveModel +import sys + + +def pdpd_dropout(name : str, x, p, pdpd_attrs): + import paddle as pdpd + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32') + out = pdpd.nn.functional.dropout(x=node_x, p=p, training=pdpd_attrs['training'], mode=pdpd_attrs['mode']) + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], + outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + p=0.5 + data = np.random.random(size=(3, 10, 3, 7)).astype('float32') + pdpd_attrs = { + 'training' : False, + 'mode' : "downscale_in_infer" + } + pdpd_attrs2 = { + 'training' : False, + 'mode' : "upscale_in_train" + } + pdpd_dropout("dropout", data, p, pdpd_attrs) + pdpd_dropout("dropout_upscale_in_train", data, p, pdpd_attrs2) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_elementwise_ops.py b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_elementwise_ops.py new file mode 100644 index 00000000000..13a08af86ca --- /dev/null +++ b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_elementwise_ops.py @@ -0,0 +1,179 @@ +# +# elementwise paddle model generator +# +import numpy as np +import sys +from save_model import saveModel + + +def elementwise_add(name : str, x, y, in_dtype): + import paddle as pdpd + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype=in_dtype) + node_y = pdpd.static.data(name='y', shape=y.shape, dtype=in_dtype) + out = pdpd.fluid.layers.nn.elementwise_add(node_x, node_y) + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + outs = exe.run( + feed={'x': x, 'y': y}, + fetch_list=[out]) + saveModel(name, exe, feedkeys=['x', 'y'], fetchlist=[out], inputs=[x, y], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def elementwise_sub(name : str, x, y, in_dtype): + import paddle as pdpd + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype=in_dtype) + node_y = pdpd.static.data(name='y', shape=y.shape, dtype=in_dtype) + out = pdpd.fluid.layers.nn.elementwise_sub(node_x, node_y) + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + outs = exe.run( + feed={'x': x, 'y': y}, + fetch_list=[out]) + saveModel(name, exe, feedkeys=['x', 'y'], fetchlist=[out], inputs=[x, y], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def elementwise_div(name : str, x, y, in_dtype): + import paddle as pdpd + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name = 'x', shape = x.shape, dtype = in_dtype) + node_y = pdpd.static.data(name = 'y', shape = y.shape, dtype = in_dtype) + out = pdpd.fluid.layers.nn.elementwise_div(node_x, node_y) + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + outs = exe.run( + feed={'x': x, 'y': y}, + fetch_list=[out]) + saveModel(name, exe, feedkeys=['x', 'y'], fetchlist=[out], inputs=[x, y], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def elementwise_mul(name : str, x, y, in_dtype): + import paddle as pdpd + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name = 'x', shape = x.shape, dtype = in_dtype) + node_y = pdpd.static.data(name = 'y', shape = y.shape, dtype = in_dtype) + out = pdpd.fluid.layers.nn.elementwise_mul(node_x, node_y) + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + outs = exe.run( + feed={'x': x, 'y': y}, + fetch_list=[out]) + saveModel(name, exe, feedkeys=['x', 'y'], fetchlist=[out], inputs=[x, y], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def elementwise_min(name : str, x, y, in_dtype): + import paddle as pdpd + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name = 'x', shape = x.shape, dtype = in_dtype) + node_y = pdpd.static.data(name = 'y', shape = y.shape, dtype = in_dtype) + out = pdpd.fluid.layers.nn.elementwise_min(node_x, node_y) + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + outs = exe.run( + feed={'x': x, 'y': y}, + fetch_list=[out]) + saveModel(name, exe, feedkeys=['x', 'y'], fetchlist=[out], inputs=[x, y], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def elementwise_max(name : str, x, y, in_dtype): + import paddle as pdpd + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name = 'x', shape = x.shape, dtype = in_dtype) + node_y = pdpd.static.data(name = 'y', shape = y.shape, dtype = in_dtype) + out = pdpd.fluid.layers.nn.elementwise_max(node_x, node_y) + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + outs = exe.run( + feed={'x': x, 'y': y}, + fetch_list=[out]) + saveModel(name, exe, feedkeys=['x', 'y'], fetchlist=[out], inputs=[x, y], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def elementwise_pow(name : str, x, y, in_dtype): + import paddle as pdpd + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name = 'x', shape = x.shape, dtype = in_dtype) + node_y = pdpd.static.data(name = 'y', shape = y.shape, dtype = in_dtype) + out = pdpd.fluid.layers.nn.elementwise_pow(node_x, node_y) + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + outs = exe.run( + feed={'x': x, 'y': y}, + fetch_list=[out]) + saveModel(name, exe, feedkeys=['x', 'y'], fetchlist=[out], inputs=[x, y], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + + in_dtype = 'float32' + data_x = np.array([2, 3, 4]).astype(in_dtype) + data_y = np.array([1, 5, 2]).astype(in_dtype) + + elementwise_add("elementwise_add1", data_x, data_y, in_dtype) + elementwise_sub("elementwise_sub1", data_x, data_y, in_dtype) + elementwise_div("elementwise_div1", data_x, data_y, in_dtype) + elementwise_mul("elementwise_mul1", data_x, data_y, in_dtype) + elementwise_min("elementwise_min1", data_x, data_y, in_dtype) + elementwise_max("elementwise_max1", data_x, data_y, in_dtype) + elementwise_pow("elementwise_pow1", data_x, data_y, in_dtype) + + +if __name__ == "__main__": + main() diff --git a/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_equal.py b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_equal.py new file mode 100644 index 00000000000..b311bf89e89 --- /dev/null +++ b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_equal.py @@ -0,0 +1,43 @@ +# +# pool2d paddle model generator +# +import numpy as np +from save_model import saveModel +import sys + + +def equal(name : str, x, y): + import paddle as pdpd + pdpd.enable_static() + + node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32') + node_y = pdpd.static.data(name='y', shape=y.shape, dtype='float32') + + out = pdpd.equal(node_x, node_y) + out = pdpd.cast(out, np.float32) + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x, 'y': y}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x', 'y'], fetchlist=[out], + inputs=[x, y], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + import paddle as pdpd + data_x = np.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(np.float32) + data_y = np.array([[[[2, 0, 3]], [[3, 1, 4]]]]).astype(np.float32) + + equal("equal", data_x, data_y) + + +if __name__ == "__main__": + main() diff --git a/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_expand_v2.py b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_expand_v2.py new file mode 100644 index 00000000000..ab5aacaacec --- /dev/null +++ b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_expand_v2.py @@ -0,0 +1,71 @@ +# +# expand_v2 paddle model generator +# +import numpy as np +from save_model import saveModel +import paddle as pdpd +import sys + +data_type = 'float32' + + +def expand_v2(name:str, x, shape:list): + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type) + out = pdpd.expand(node_x, shape=shape, name='expand_v2') + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], + inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def expand_v2_tensor(name:str, x, out_shape, use_tensor_in_list): + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type) + if use_tensor_in_list: + out_shape[0] = pdpd.assign(np.array((out_shape[0],)).astype('int32')) + out = pdpd.expand(node_x, shape=out_shape, name='expand_v2') + else: + out_shape = np.array(out_shape).astype('int32') + node_shape = pdpd.assign(out_shape, output=None) + out = pdpd.expand(node_x, shape=node_shape, name='expand_v2') + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], + inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + data = np.random.rand(1, 1, 6).astype(data_type) + + expand_v2("expand_v2", data, [2, 3, -1]) + expand_v2_tensor("expand_v2_tensor", data, [2, 3, -1], False) + expand_v2_tensor("expand_v2_tensor_list", data, [2, 3, -1], True) + + +if __name__ == "__main__": + main() diff --git a/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_fill_constant.py b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_fill_constant.py new file mode 100644 index 00000000000..feb8ce40300 --- /dev/null +++ b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_fill_constant.py @@ -0,0 +1,97 @@ +# +# fill_const paddle model generator +# +import numpy as np +from save_model import saveModel +import paddle as pdpd +import sys + + +def fill_constant(name : str, shape : list, dtype, value): + pdpd.enable_static() + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + x1 = pdpd.fluid.layers.fill_constant(shape=shape, value=value, dtype=dtype, name='fill_constant') + x2 = pdpd.fluid.layers.fill_constant(shape=shape, value=value, dtype=dtype, name='fill_constant') + out = pdpd.add(pdpd.cast(x1, np.float32), pdpd.cast(x2, np.float32)) + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + fetch_list=[out]) + + saveModel(name, exe, feedkeys=[], fetchlist=[out], inputs=[], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def fill_constant_tensor(name : str, shape : list, dtype, value): + pdpd.enable_static() + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_value = pdpd.static.data(name='value', shape=[1], dtype=dtype) + x1 = pdpd.fluid.layers.fill_constant(shape=shape, value=node_value, dtype=dtype, name='fill_constant1') + out = pdpd.cast(x1, np.float32) + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={"value": value}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=["value"], fetchlist=[out], inputs=[np.array([value]).astype(dtype)], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def fill_constant_shape_tensor(name : str, shape, dtype, value): + pdpd.enable_static() + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_shape = pdpd.fluid.layers.fill_constant(shape=[2], value=shape, dtype='int32', name='shape') + x1 = pdpd.fluid.layers.fill_constant(shape=node_shape, value=value, dtype=dtype, name='fill_constant') + out = pdpd.cast(x1, np.float32) + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + fetch_list=[out]) + + saveModel(name, exe, feedkeys=[], fetchlist=[out], inputs=[], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def fill_constant_shape_tensor_list(name : str, shape: list, dtype, value): + pdpd.enable_static() + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_shape = pdpd.fluid.layers.fill_constant(shape=[1], value=shape, dtype='int32', name='shape') + x1 = pdpd.fluid.layers.fill_constant(shape=[2, node_shape], value=value, dtype=dtype, name='fill_constant') + out = pdpd.cast(x1, np.float32) + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + fetch_list=[out]) + + saveModel(name, exe, feedkeys=[], fetchlist=[out], inputs=[], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + fill_constant("fill_constant", [2, 3, 4], 'float32', 0.03) + fill_constant("fill_constant_int32", [2, 3, 4], "int32", 2) + fill_constant("fill_constant_int64", [2, 3, 4], "int64", 4) + fill_constant_tensor("fill_constant_tensor", [2, 3, 4], 'float32', 0.05) + fill_constant_shape_tensor("fill_constant_shape_tensor", 2, 'float32', 0.05) + fill_constant_shape_tensor_list("fill_constant_shape_tensor_list", 2, 'float32', 0.05) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_fill_constant_batch_size_like.py b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_fill_constant_batch_size_like.py new file mode 100644 index 00000000000..25bde96ad59 --- /dev/null +++ b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_fill_constant_batch_size_like.py @@ -0,0 +1,39 @@ +# +# fill_constant_batch_size_like paddle model generator +# +import numpy as np +from save_model import saveModel +import paddle as pdpd +import sys + +data_type = 'float32' + +def fill_constant_batch_size_like(name : str, x, shape, dtype, value, input_dim_idx=0, output_dim_idx=0): + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + like = pdpd.static.data(name='x', shape=x.shape, dtype = data_type) + out = pdpd.fluid.layers.fill_constant_batch_size_like(input=like, shape=shape, \ + value=value, dtype=dtype, \ + output_dim_idx=output_dim_idx, input_dim_idx=input_dim_idx) + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + +def main(): + x = np.random.rand(4, 3, 2).astype(data_type) + fill_constant_batch_size_like("fill_constant_batch_size_like", \ + x, [1, -1, 3], data_type, 0.03, 2, 1) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_flatten_contiguous_range.py b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_flatten_contiguous_range.py new file mode 100644 index 00000000000..5d6274587f5 --- /dev/null +++ b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_flatten_contiguous_range.py @@ -0,0 +1,38 @@ +# +# generate_flatten_contiguous_range paddle model generator +# +import numpy as np +from save_model import saveModel +import sys + +def generate_flatten_contiguous_range(name : str, x, start_axis, stop_axis, in_dtype): + import paddle as pdpd + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name = 'x', shape = x.shape, dtype = in_dtype) + out = pdpd.flatten(node_x, start_axis, stop_axis) + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + # TODO: more type + in_dtype = 'float32' + data = np.random.randn(3, 2, 5, 4).astype(in_dtype) + start_axis = 1 + stop_axis = 2 + generate_flatten_contiguous_range("flatten_contiguous_range_test1", data, start_axis, stop_axis, in_dtype) + +if __name__ == "__main__": + main() diff --git a/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_greater_equal.py b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_greater_equal.py new file mode 100644 index 00000000000..506e9ccb214 --- /dev/null +++ b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_greater_equal.py @@ -0,0 +1,60 @@ +# +# greater_equal paddle model generator +# +import numpy as np +from save_model import saveModel +import paddle as pdpd +import sys + + +def greater_equal(name : str, x, y, data_type, cast_to_fp32=False): + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name='input_x', shape=x.shape, dtype=data_type) + node_y = pdpd.static.data(name='input_y', shape=y.shape, dtype=data_type) + out = pdpd.fluid.layers.greater_equal(x=node_x, y=node_y, name='greater_equal') + # FuzzyTest framework doesn't support boolean so cast to fp32/int32 + + if cast_to_fp32: + data_type = "float32" + + out = pdpd.cast(out, data_type) + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'input_x': x, 'input_y': y}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['input_x', 'input_y'], fetchlist=[out], + inputs=[x, y], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + + test_cases = [ + "float32", + "int32", + "int64" + ] + + for test in test_cases: + x = np.array([0, 1, 2, 3]).astype(test) + y = np.array([1, 0, 2, 4]).astype(test) + if test == "int64": + greater_equal("greater_equal_" + test, x, y, test, True) + else: + greater_equal("greater_equal_" + test, x, y, test, False) + + x = np.array([5000000000]).astype("int64") + y = np.array([2000000000]).astype("int64") + greater_equal("greater_equal_big_int64", x, y, test) + + +if __name__ == "__main__": + main() diff --git a/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_hard_sigmoid.py b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_hard_sigmoid.py new file mode 100644 index 00000000000..2a9d8e55842 --- /dev/null +++ b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_hard_sigmoid.py @@ -0,0 +1,40 @@ +# +# hard_sigmoid paddle model generator +# +import numpy as np +from save_model import saveModel +import paddle as pdpd +import sys + + +def hard_sigmoid(name: str, x, slope: float = 0.2, offset: float = 0.5, data_type='float32'): + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype = data_type) + out = pdpd.fluid.layers.hard_sigmoid(node_x, slope=slope, offset=offset, name='hard_sigmoid') + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], + inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + data_type = 'float32' + data = np.array([0, 1, 2, 3, 4, 5, 6, -10]).astype(data_type) + + hard_sigmoid("hard_sigmoid", data, 0.1, 0.6, data_type) + + +if __name__ == "__main__": + main() diff --git a/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_hard_swish.py b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_hard_swish.py new file mode 100644 index 00000000000..bc1bec52f5b --- /dev/null +++ b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_hard_swish.py @@ -0,0 +1,39 @@ +# +# sigmoid paddle model generator +# +import numpy as np +from save_model import saveModel +import paddle as pdpd +import sys + + +def hard_swish(name: str, x, threshold=6.0, scale=6.0, offset=3.0, data_type='float32'): + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type) + out = pdpd.fluid.layers.hard_swish(node_x, threshold=threshold, scale=scale, offset=offset, name='hard_swish') + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], + inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + data_type = 'float32' + data = np.array([-6, 1, 6]).astype(data_type) + hard_swish("hard_swish", data, data_type='float32') + + +if __name__ == "__main__": + main() diff --git a/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_interpolate.py b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_interpolate.py new file mode 100644 index 00000000000..a8fa5114136 --- /dev/null +++ b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_interpolate.py @@ -0,0 +1,188 @@ +import numpy as np +import paddle as pdpd +from paddle.nn.functional import interpolate +from save_model import saveModel +import sys +pdpd.enable_static() + + +def run_and_save_model(input_x, name, feed, fetch_list, main_prog, start_prog): + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + exe.run(start_prog) + outs = exe.run( + feed={'x': input_x}, + fetch_list=fetch_list, + program=main_prog) + + with pdpd.static.program_guard(main_prog, start_prog): + saveModel(name, exe, feedkeys=['x'], fetchlist=fetch_list, inputs=[input_x], + outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs + + +def pdpd_interpolate(x, sizes=None, scale_factor=None, mode='nearest', align_corners=True, + align_mode=0, data_format='NCHW', name=None): + pdpd.enable_static() + main_program = pdpd.static.Program() + startup_program = pdpd.static.Program() + with pdpd.static.program_guard(main_program, startup_program): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32') + interp = interpolate(node_x, size=sizes, scale_factor=scale_factor, + mode=mode, align_corners=align_corners, align_mode=align_mode, + data_format=data_format, name=name) + out = pdpd.static.nn.batch_norm(interp, use_global_stats=True, epsilon=0) + outs = run_and_save_model(x, name, node_x, out, main_program, startup_program) + return outs[0] + + +def resize_upsample_bilinear(): + data = np.array([[[ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16] + ]]], dtype=np.float32) + + test_case = [{'name': 'bilinear_upsample_false_1', 'align_corners': False, 'align_mode': 1}, + {'name': 'bilinear_upsample_false_0', 'align_corners': False, 'align_mode': 0}, + {'name': 'bilinear_upsample_true_0', 'align_corners': True, 'align_mode': 0}] + + for test in test_case: + pdpd_result = pdpd_interpolate(data, [64, 64], None, mode='bilinear', align_corners=test['align_corners'], + align_mode=test['align_mode'], data_format='NCHW', name=test['name']) + + +def resize_downsample_bilinear(): + data = np.array([[[ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16] + ]]], dtype=np.float32) + data_28 = data.reshape([1, 1, 2, 8]) + test_case = [{'name': 'bilinear_downsample_false_1', 'align_corners': False, 'align_mode': 1}, + {'name': 'bilinear_downsample_false_0', 'align_corners': False, 'align_mode': 0}, + {'name': 'bilinear_downsample_true_0', 'align_corners': True, 'align_mode': 0}] + + for test in test_case: + pdpd_result = pdpd_interpolate(data_28, [2, 4], None, mode='bilinear', align_corners=test['align_corners'], + align_mode=test['align_mode'], data_format='NCHW', name=test['name']) + +def resize_upsample_nearest(): + data = np.array([[[ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16] + ]]], dtype=np.float32) + + test_case = [ + {'name': 'nearest_upsample_false_0', 'size': [64, 64], 'align_corners': False, 'align_mode': 0}, + {'name': 'nearest_upsample_false_1', 'size': [16, 64], 'align_corners': False, 'align_mode': 0} + ] + + for test in test_case: + pdpd_result = pdpd_interpolate(data, test['size'], None, mode='nearest', align_corners=test['align_corners'], + align_mode=test['align_mode'], data_format='NCHW', name=test['name']) + + +def resize_downsample_nearest(): + data = np.arange(0, 4096).astype(np.float32) + data_64 = data.reshape([1, 1, 64, 64]) + test_case = [ + {'name': 'nearest_downsample_false_0', 'size': [8, 8], 'align_corners': False, 'align_mode': 1}, + {'name': 'nearest_downsample_false_1', 'size': [4, 8], 'align_corners': False, 'align_mode': 1} + ] + + for test in test_case: + pdpd_result = pdpd_interpolate(data_64, test['size'], None, mode='nearest', align_corners=test['align_corners'], + align_mode=test['align_mode'], data_format='NCHW', name=test['name']) + + +def nearest_upsample_tensor_size(): + data = np.array([[[ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16] + ]]], dtype=np.float32) + sizes = np.array([8, 8], dtype=np.int32) + pdpd.enable_static() + test_case = [{'name': 'nearest_upsample_tensor_size', 'align_corners': False, 'align_mode': 0}] + for test in test_case: + main_program = pdpd.static.Program() + startup_program = pdpd.static.Program() + with pdpd.static.program_guard(main_program, startup_program): + node_x = pdpd.static.data(name='x', shape=data.shape, dtype='float32') + node_sizes = pdpd.static.data(name='sizes', shape=sizes.shape, dtype='int32') + interp = interpolate(node_x, size=node_sizes, scale_factor=None, + mode='nearest', align_corners=test['align_corners'], align_mode=test['align_mode'], + data_format='NCHW', name=test['name']) + out = pdpd.static.nn.batch_norm(interp, use_global_stats=True, epsilon=0) + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + exe.run(startup_program) + outs = exe.run( + feed={'x': data, 'sizes': sizes}, + fetch_list=out, + program=main_program) + saveModel(test['name'], exe, feedkeys=['x', 'sizes'], fetchlist=out, inputs=[data, sizes], outputs=[outs[0]], target_dir=sys.argv[1]) + + +def bilinear_upsample_tensor_size(): + data = np.array([[[ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16] + ]]], dtype=np.float32) + sizes = np.array([8, 8], dtype="int32") + + test_case = [{'name': 'bilinear_upsample_tensor_size', 'align_corners': False, 'align_mode': 1}] + + for test in test_case: + main_program = pdpd.static.Program() + startup_program = pdpd.static.Program() + with pdpd.static.program_guard(main_program, startup_program): + node_x = pdpd.static.data(name='x', shape=data.shape, dtype='float32') + node_sizes = pdpd.static.data(name='sizes', shape=sizes.shape, dtype='int32') + interp = interpolate(node_x, size=node_sizes, scale_factor=None, + mode='bilinear', align_corners=test['align_corners'], align_mode=test['align_mode'], + data_format='NCHW', name=test['name']) + out = pdpd.static.nn.batch_norm(interp, use_global_stats=True, epsilon=0) + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + exe.run(startup_program) + outs = exe.run( + feed={'x': data, 'sizes': sizes}, + fetch_list=out, + program=main_program) + saveModel(test['name'], exe, feedkeys=['x', 'sizes'], fetchlist=out, inputs=[data, sizes], outputs=[outs[0]], target_dir=sys.argv[1]) + + +def bilinear_upsample_scales(): + data = np.array([[[ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16] + ]]], dtype=np.float32) + + test_case = [{'name': 'bilinear_upsample_scales', 'align_corners': False, 'align_mode': 1, "scales": 2}, + {'name': 'bilinear_upsample_scales2', 'align_corners': False, 'align_mode': 1, "scales": [2, 2]}] + + for test in test_case: + pdpd_result = pdpd_interpolate(data, None, 2, mode='bilinear', align_corners=test['align_corners'], + align_mode=test['align_mode'], data_format='NCHW', name=test['name']) + + +if __name__ == "__main__": + resize_downsample_bilinear() + resize_upsample_bilinear() + resize_downsample_nearest() + resize_upsample_nearest() + nearest_upsample_tensor_size() + bilinear_upsample_tensor_size() + bilinear_upsample_scales() diff --git a/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_leaky_relu.py b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_leaky_relu.py new file mode 100644 index 00000000000..bc2a54741d7 --- /dev/null +++ b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_leaky_relu.py @@ -0,0 +1,40 @@ +# +# leaky_relu paddle model generator +# +import numpy as np +from save_model import saveModel +import paddle as pdpd +import sys + + +def leaky_relu(name: str, x, alpha: float = 0.02, data_type='float32'): + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype = data_type) + out = pdpd.fluid.layers.leaky_relu(node_x, alpha=alpha, name='leaky_relu') + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], + inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + data_type = 'float32' + data = np.array([-1, 2, 3]).astype(data_type) + + leaky_relu("leaky_relu", data, 0.03) + + +if __name__ == "__main__": + main() diff --git a/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_log.py b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_log.py new file mode 100644 index 00000000000..6e269e03ed5 --- /dev/null +++ b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_log.py @@ -0,0 +1,39 @@ +# +# log paddle model generator +# +import numpy as np +from save_model import saveModel +import paddle as pdpd +import sys + + +def log(name: str, x, data_type='float32'): + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type) + out = pdpd.fluid.layers.log(node_x, name='log') + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], + inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + data_type = 'float32' + x = np.array([0, 1, 2, -10]).astype(data_type) + log("log", x) + + +if __name__ == "__main__": + main() diff --git a/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_logical_not.py b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_logical_not.py new file mode 100644 index 00000000000..6e638738134 --- /dev/null +++ b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_logical_not.py @@ -0,0 +1,44 @@ +# +# pool2d paddle model generator +# +import numpy as np +from save_model import saveModel +import sys + + +def equal_logical_not(name : str, x, y): + import paddle as pdpd + pdpd.enable_static() + + node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32') + node_y = pdpd.static.data(name='y', shape=y.shape, dtype='float32') + + out = pdpd.equal(node_x, node_y) + out = pdpd.logical_not(out) + out = pdpd.cast(out, np.float32) + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x, 'y': y}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x', 'y'], fetchlist=[out], + inputs=[x, y], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + import paddle as pdpd + data_x = np.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(np.float32) + data_y = np.array([[[[2, 0, 3]], [[3, 1, 4]]]]).astype(np.float32) + + equal_logical_not("logical_not", data_x, data_y) + + +if __name__ == "__main__": + main() diff --git a/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_mul.py b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_mul.py new file mode 100644 index 00000000000..1489fd80909 --- /dev/null +++ b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_mul.py @@ -0,0 +1,75 @@ +import numpy as np +from save_model import saveModel +import sys + + +def pdpd_mul(name, x1, x2): + import paddle as pdpd + + pdpd.enable_static() + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x1 = pdpd.static.data(name='x1', shape=x1.shape, dtype=x1.dtype) + node_x2 = pdpd.static.data(name='x2', shape=x2.shape, dtype=x2.dtype) + bmm_node = pdpd.fluid.layers.mul(node_x1, node_x2) + result = pdpd.static.nn.batch_norm(bmm_node, use_global_stats=True) + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x1': x1, 'x2': x2}, + fetch_list=[result]) + saveModel(name, exe, feedkeys=['x1', 'x2'], fetchlist=[result], inputs=[x1, x2], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def pdpd_matmul(name, x1, x2, x_transpose=False, y_transpose=False): + import paddle as pdpd + + pdpd.enable_static() + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x1 = pdpd.static.data(name='x1', shape=x1.shape, dtype=x1.dtype) + node_x2 = pdpd.static.data(name='x2', shape=x2.shape, dtype=x2.dtype) + mul_node = pdpd.fluid.layers.matmul(node_x1, node_x2, x_transpose, y_transpose) + result = pdpd.static.nn.batch_norm(mul_node, use_global_stats=True) + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x1': x1, 'x2': x2}, + fetch_list=[result]) + saveModel(name, exe, feedkeys=['x1', 'x2'], fetchlist=[result], inputs=[x1, x2], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +if __name__ == "__main__": + input_2x5 = np.array([[1, 2, 3, 4, 5], + [6, 7, 8, 9, 10]]).astype(np.float32) + + input_5x3 = np.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9], + [10, 11, 12], + [13, 14, 15]]).astype(np.float32) + + input_5x2 = np.array([[1, 2], + [4, 5], + [7, 8], + [10, 11], + [13, 14]]).astype(np.float32) + + input_2x3 = np.array([[1, 2, 3], + [4, 5, 6]]).astype(np.float32) + + pdpd_result = pdpd_mul("mul_fp32", input_2x5, input_5x3) + + pdpd_matmul("matmul_xt", input_2x5, input_2x3, x_transpose=True, y_transpose=False) + pdpd_matmul("matmul_yt", input_2x3, input_5x3, x_transpose=False, y_transpose=True) + pdpd_matmul("matmul_xt_yt", input_2x5, input_5x2, x_transpose=True, y_transpose=True) diff --git a/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_pad3d.py b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_pad3d.py new file mode 100644 index 00000000000..51b3a81f0e9 --- /dev/null +++ b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_pad3d.py @@ -0,0 +1,70 @@ +# +# pad3d paddle model generator +# +import numpy as np +from save_model import saveModel +import sys + +def pad3d(name : str, x, in_dtype, pad, data_format, mode, value = 0): + import paddle as pdpd + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name = 'x', shape = x.shape, dtype = in_dtype) + + if mode == 'constant': + pad_constant = pdpd.nn.Pad3D(padding=pad, mode=mode, value=value, data_format=data_format) + out = pad_constant(node_x) + else: + pad_other_mode = pdpd.nn.Pad3D(padding=pad, mode=mode, data_format=data_format) + out = pad_other_mode(node_x) + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + in_dtype = 'float32' + + input_shape = (1, 2, 3, 4, 5) + pad = [1, 2, 1, 1, 3, 4] + mode = 'constant' + data_format = 'NCDHW' + value = 100 + input_data = np.random.rand(*input_shape).astype(np.float32) + pad3d("pad3d_test1", input_data, in_dtype, pad, data_format, mode, value) + + input_shape = (2, 3, 4, 5, 6) + pad = [1, 2, 1, 1, 1, 2] + mode = "reflect" + data_format = 'NDHWC' + input_data = np.random.rand(*input_shape).astype(np.float32) + pad3d("pad3d_test2", input_data, in_dtype, pad, data_format, mode) + + input_shape = (2, 3, 4, 5, 6) + pad = [1, 2, 1, 1, 1, 2] + mode = "replicate" + data_format = 'NDHWC' + input_data = np.random.rand(*input_shape).astype(np.float32) + pad3d("pad3d_test3", input_data, in_dtype, pad, data_format, mode) + + # padding of type int feature only supported by PaddlePaddle 'develop' version(>=2.1.0) +# input_shape = (1, 2, 3, 4, 5) +# pad_int = 1 +# mode = 'constant' +# data_format= 'NCDHW' +# value = 100 +# input_data = np.random.rand(*input_shape).astype(np.float32) +# pad3d("pad3d_test4", input_data, in_dtype, pad_int, data_format, mode, value) + +if __name__ == "__main__": + main() diff --git a/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_place_test_model.py b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_place_test_model.py index bd377275274..ef8dd32f0b3 100644 --- a/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_place_test_model.py +++ b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_place_test_model.py @@ -35,7 +35,6 @@ def pdpd_rnn_lstm(input_size, hidden_size, layers, direction): fetchlist=[y, h, c, relu_1, relu_2, relu_3], inputs=[np.ones([4, 3, input_size]).astype(np.float32)], outputs=[outs[0], outs[1], outs[2]], target_dir=sys.argv[1]) - print(outs[0]) return outs[0] diff --git a/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_pool2d.py b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_pool2d.py new file mode 100644 index 00000000000..1f6c32e242b --- /dev/null +++ b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_pool2d.py @@ -0,0 +1,260 @@ +# +# pool2d paddle model generator +# +import numpy as np +import sys +from save_model import saveModel + +data_type = 'float32' + +def pool2d(name : str, x, attrs : dict): + import paddle as pdpd + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type) + out = pdpd.fluid.layers.pool2d(node_x, + pool_size=attrs['pool_size'], + pool_type=attrs['pool_type'], + pool_stride=attrs['pool_stride'], + pool_padding=attrs['pool_padding'], + global_pooling=attrs['global_pooling'], + ceil_mode=attrs['ceil_mode'], + exclusive=attrs['exclusive'], + data_format=attrs['data_format']) + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + +def adaptive_pool2d(name : str, x, attrs : dict): + import paddle as pdpd + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type) + out = pdpd.fluid.layers.adaptive_pool2d( + input=node_x, + pool_size=attrs['pool_size'], + pool_type=attrs['pool_type'], + require_index=attrs['require_index']) + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + +def main(): + N, C, H, W = 2, 3, 4, 4 + data = np.arange(N*C*H*W).astype(data_type) + data_NCHW = data.reshape(N, C, H, W) + data_NHWC = data.reshape(N, H, W, C) + #print(data_NCHW, data_NCHW.shape) + + pooling_types = ['max', 'avg'] + + # pool2d + for i, pooling_type in enumerate(pooling_types): + # example 1: + # ceil_mode = False + pdpd_attrs = { + # input=data_NCHW, # shape: [2, 3, 8, 8] + 'pool_size' : [3,3], + 'pool_type' : pooling_type, + 'pool_stride' : [3,3], + 'pool_padding' : [2,1], # it is same as pool_padding = [2,2,1,1] + 'global_pooling' : False, + 'ceil_mode' : False, + 'exclusive' : True, + 'data_format' : "NCHW" + } + # shape of out_1: [2, 3, 4, 3] + pool2d(pooling_type+'Pool_test1', data_NCHW, pdpd_attrs) + + # Cecilia: there is a bug of PaddlePaddle in this case. + # example 2: + # ceil_mode = True (different from example 1) + pdpd_attrs = { + #input=data_NCHW, + 'pool_size':[3,3], + 'pool_type' : pooling_type, + 'pool_stride' : [3,3], + 'pool_padding':[[0,0], [0,0], [2,2], [1,1]], # it is same as pool_padding = [2,2,1,1] + 'global_pooling':False, + 'ceil_mode':True, + 'exclusive':True, + 'data_format':"NCHW" + } + # shape of out_2: [2, 3, 4, 4] which is different from out_1 + pool2d(pooling_type+'Pool_test2', data_NCHW, pdpd_attrs) + + # example 3: + # pool_padding = "SAME" (different from example 1) + pdpd_attrs = { + #input=data_NCHW, + 'pool_size':[3,3], + 'pool_type' : pooling_type, + 'pool_stride' : [3,3], + 'pool_padding':"SAME", + 'global_pooling':False, + 'ceil_mode':False, + 'exclusive':True, + 'data_format':"NCHW" + } + # shape of out_3: [2, 3, 3, 3] which is different from out_1 + pool2d(pooling_type+'Pool_test3', data_NCHW, pdpd_attrs) + + # example 4: + # pool_padding = "VALID" (different from example 1) + pdpd_attrs = { + #input=data_NCHW, + 'pool_size':[3,3], + 'pool_type' : pooling_type, + 'pool_stride' : [3,3], + 'pool_padding':"VALID", + 'global_pooling':False, + 'ceil_mode':False, + 'exclusive':True, + 'data_format':"NCHW" + } + # shape of out_4: [2, 3, 2, 2] which is different from out_1 + pool2d(pooling_type+'Pool_test4', data_NCHW, pdpd_attrs) + + # example 5: + # global_pooling = True (different from example 1) + # It will be set pool_size = [8,8] and pool_padding = [0,0] actually. + pdpd_attrs = { + #input=data_NCHW, + 'pool_size':[3,3], + 'pool_type' : pooling_type, + 'pool_stride' : [3,3], + 'pool_padding':[2,1], + 'global_pooling':True, + 'ceil_mode':False, + 'exclusive':True, + 'data_format':"NCHW" + } + # shape of out_5: [2, 3, 1, 1] which is different from out_1 + pool2d(pooling_type+'Pool_test5', data_NCHW, pdpd_attrs) + + # example 6: + # data_format = "NHWC" (different from example 1) + pdpd_attrs = { + #input=data_NHWC, # shape: [2, 8, 8, 3] + 'pool_size':[3,3], + 'pool_type' : pooling_type, + 'pool_stride' : [3,3], + 'pool_padding':[2,1], + 'global_pooling':False, + 'ceil_mode':False, + 'exclusive':True, + 'data_format':"NHWC" + } + # shape of out_6: [2, 4, 3, 3] which is different from out_1 + pool2d(pooling_type+'Pool_test6', data_NHWC, pdpd_attrs) + + # example 7: + # pool_size is [9, 9] + pdpd_attrs = { + #input=data_NCHW, + 'pool_size':[9,9], + 'pool_type' : pooling_type, + 'pool_stride' : [3,3], + 'pool_padding':[[0,0], [0,0], [2,2], [1,1]], # it is same as pool_padding = [2,2,1,1] + 'global_pooling':False, + 'ceil_mode':True, + 'exclusive':True, + 'data_format':"NCHW" + } + pool2d(pooling_type+'Pool_test7', data_NCHW, pdpd_attrs) + + # example 8: + # pool_padding size is 1 + pdpd_attrs = { + 'pool_size':[3,3], + 'pool_type' : pooling_type, + 'pool_stride' : [3,3], + 'pool_padding':2, + 'global_pooling':False, + 'ceil_mode':False, + 'exclusive':True, + 'data_format':"NCHW" + } + pool2d(pooling_type+'Pool_test8', data_NCHW, pdpd_attrs) + + #input data for test9 and test10 + N_data1, C_data1, H_data1, W_data1 = 2, 3, 8, 8 + data1 = np.arange(N_data1*C_data1*H_data1*W_data1).astype(data_type) + data1_NCHW = data1.reshape(N_data1, C_data1, H_data1, W_data1) + # example 9: + # pool_padding size is 4: [pad_height_top, pad_height_bottom, pad_width_left, pad_width_right] + pdpd_attrs = { + 'pool_size':[3,3], + 'pool_type' : pooling_type, + 'pool_stride' : [3,3], + 'pool_padding':[2, 1, 2, 1], + 'global_pooling':False, + 'ceil_mode':False, + 'exclusive':True, + 'data_format':"NCHW" + } + pool2d(pooling_type+'Pool_test9', data1_NCHW, pdpd_attrs) + + # example 10: + # input=data_NCHW and pool_padding is [[0,0], [0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]] + pdpd_attrs = { + 'pool_size':[3,3], + 'pool_type' : pooling_type, + 'pool_stride' : [3,3], + 'pool_padding':[[0,0], [0,0], [2, 1], [2, 1]], + 'global_pooling':False, + 'ceil_mode':False, + 'exclusive':True, + 'data_format':"NCHW" + } + pool2d(pooling_type+'Pool_test10', data1_NCHW, pdpd_attrs) + + # example 11: + # input=data_NCHW and poolsize is the multiply by width & height. pool_padding is [[0,0], [0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]] + pdpd_attrs = { + 'pool_size': 9, + 'pool_type' : pooling_type, + 'pool_stride' : [3,3], + 'pool_padding':[[0,0], [0,0], [2, 1], [2, 1]], + 'global_pooling':False, + 'ceil_mode':False, + 'exclusive':True, + 'data_format':"NCHW" + } + pool2d(pooling_type+'Pool_test11', data1_NCHW, pdpd_attrs) + + + # adaptive_pool2d + for i, pooling_type in enumerate(pooling_types): + pdpd_attrs = { + 'pool_size': [3,3], + 'pool_type': pooling_type, + 'require_index': False + } + adaptive_pool2d(pooling_type+'AdaptivePool2D_test1', data_NCHW, pdpd_attrs) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_pow.py b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_pow.py new file mode 100644 index 00000000000..97e1df3ae72 --- /dev/null +++ b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_pow.py @@ -0,0 +1,95 @@ +# +# pow paddle model generator +# +import numpy as np +from save_model import saveModel +import paddle as pdpd +import sys + + +def pdpd_pow(name : str, x, y, data_type): + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type) + out = pdpd.fluid.layers.pow(node_x, y, name='pow') + #FuzzyTest supports int32 & float32 + if data_type == "int64": + out = pdpd.cast(out, "float32") + out = pdpd.cast(out, "float32") + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def pdpd_pow_tensor(name : str, x, y, data_type): + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type) + node_y = pdpd.static.data(name='y', shape=y.shape, dtype=data_type) + out = pdpd.fluid.layers.pow(node_x, node_y, name='pow') + out = pdpd.cast(out, "float32") + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x, 'y': y}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x', 'y'], fetchlist=[out], + inputs=[x, y], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + test_cases = [ + { + 'name': "float32", + 'x': np.array([0, 1, 2, -10]).astype("float32"), + 'y': np.array([1.5]).astype("float32"), + 'dtype': "float32", + }, + { + 'name': "int32", + 'x': np.array([0, 1, 2, -10]).astype("int32"), + 'y': np.array([2.0]).astype("float32"), + 'dtype': "int32" + }, + { + 'name': "int64", + 'x': np.array([0, 1, 2]).astype("int64"), + 'y': np.array([30.0]).astype("float32"), + 'dtype': "int64" + }, + { + 'name': "int64_out_of_range", + 'x': np.array([0, 1, 2]).astype("int64"), + 'y': np.array([40]).astype("float32"), + 'dtype': "int64" + } + ] + + for test in test_cases: + pdpd_pow("pow_" + test['name'], test['x'], test['y'], test['dtype']) + + x = np.array([0, 1, 2, -10]).astype("float32") + y = np.array([2.0]).astype("float32") + pdpd_pow_tensor("pow_y_tensor", x, y, 'float32') + + +if __name__ == "__main__": + main() diff --git a/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_range.py b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_range.py new file mode 100644 index 00000000000..c2d7a0b5715 --- /dev/null +++ b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_range.py @@ -0,0 +1,45 @@ +# +# range paddle model generator +# +import numpy as np +from save_model import saveModel +import sys + + +def pdpd_range(name : str, x, start, end, step, out_type): + import paddle as pdpd + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32') + # Range op only support fill_constant input, since dynamic op is not supported in ov + out = pdpd.fluid.layers.range(start, end, step, out_type) + out = pdpd.cast(out, np.float32) + out = pdpd.add(node_x, out) + #out = pdpd.cast(out, np.float32) + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + start = 1.5 + end = 10.5 + step = 2 + data = np.random.random([1, 5]).astype("float32") + out_type = ["float32", "int32", "int64"] + for i, dtype in enumerate(out_type): + pdpd_range("range"+str(i), data, start, end, step, dtype) + + +if __name__ == "__main__": + main() diff --git a/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_relu.py b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_relu.py index a0e892fcab4..6952bd27cd8 100644 --- a/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_relu.py +++ b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_relu.py @@ -35,4 +35,4 @@ def main(): if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_relu6.py b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_relu6.py new file mode 100644 index 00000000000..af96e5e690e --- /dev/null +++ b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_relu6.py @@ -0,0 +1,40 @@ +# +# relu6 paddle model generator +# +import numpy as np +from save_model import saveModel +import paddle as pdpd +import sys + + +def relu6(name: str, x, threshold: float = 6.0, data_type='float32'): + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type) + out = pdpd.fluid.layers.relu6(node_x, threshold=threshold, name='relu6') + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], + inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + data_type = 'float32' + data = np.array([-1, 1, 5]).astype(data_type) + relu6("relu6", data, 4) + relu6("relu6_1", data) + + +if __name__ == "__main__": + main() diff --git a/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_reshape.py b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_reshape.py new file mode 100644 index 00000000000..3c2a7dd8f67 --- /dev/null +++ b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_reshape.py @@ -0,0 +1,78 @@ +# +# reshape paddle model generator +# +import numpy as np +from save_model import saveModel +import sys + +data_type = 'float32' + + +def reshape(name : str, x, out_shape): + import paddle as pdpd + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type) + out = pdpd.fluid.layers.reshape(x=node_x, shape=out_shape) + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], + inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def reshape_tensor(name : str, x, out_shape, use_tensor_in_list): + import paddle as pdpd + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type) + if use_tensor_in_list: + out_shape[0] = pdpd.assign(np.array((out_shape[0],)).astype('int32')) + out = pdpd.fluid.layers.reshape(x=node_x, shape=out_shape) + else: + out_shape = np.array(out_shape).astype('int32') + node_shape = pdpd.assign(out_shape) + out = pdpd.fluid.layers.reshape(x=node_x, shape=node_shape) + + out = pdpd.pow(out, 1) + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], + inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + data = np.array([[[ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16], + ]]], dtype=np.float32) + out_shape = [1, 1, 2, 8] + reshape("reshape", data, out_shape) + reshape_tensor("reshape_tensor", data, out_shape, False) + reshape_tensor("reshape_tensor_list", data, out_shape, True) + + +if __name__ == "__main__": + main() diff --git a/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_rnn_lstm.py b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_rnn_lstm.py new file mode 100644 index 00000000000..b6b4f5b265c --- /dev/null +++ b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_rnn_lstm.py @@ -0,0 +1,66 @@ +import numpy as np +from save_model import saveModel +import sys + + +def pdpd_rnn_lstm(input_size, hidden_size, layers, direction): + import paddle as pdpd + pdpd.enable_static() + main_program = pdpd.static.Program() + startup_program = pdpd.static.Program() + + num_of_directions = 1 if direction == 'forward' else 2 + with pdpd.static.program_guard(main_program, startup_program): + + rnn = pdpd.nn.LSTM(input_size, hidden_size, layers, direction) + + data = pdpd.static.data(name='x', shape=[4, 3, input_size], dtype='float32') + prev_h = pdpd.ones(shape=[layers * num_of_directions, 4, hidden_size], dtype=np.float32) + prev_c = pdpd.ones(shape=[layers * num_of_directions, 4, hidden_size], dtype=np.float32) + + y, (h, c) = rnn(data, (prev_h, prev_c)) + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + exe.run(startup_program) + + outs = exe.run( + feed={'x': np.ones([4, 3, input_size]).astype(np.float32)}, + fetch_list=[y, h, c], + program=main_program) + saveModel("rnn_lstm_layer_" + str(layers) + '_' + str(direction), exe, feedkeys=['x'], + fetchlist=[y, h, c], inputs=[np.ones([4, 3, input_size]).astype(np.float32)], outputs=[outs[0], outs[1], outs[2]], target_dir=sys.argv[1]) + return outs[0] + + +if __name__ == "__main__": + + testCases = [ + { + 'input_size': 2, + 'hidden_size': 2, + 'layers': 1, + 'direction': 'forward', + }, + { + 'input_size': 2, + 'hidden_size': 2, + 'layers': 1, + 'direction': 'bidirectional', + }, + { + 'input_size': 2, + 'hidden_size': 2, + 'layers': 2, + 'direction': 'forward', + }, + { + 'input_size': 2, + 'hidden_size': 2, + 'layers': 2, + 'direction': 'bidirectional', + } + ] + + for test in testCases: + pdpd_rnn_lstm(test['input_size'], test['hidden_size'], test['layers'], test['direction']) \ No newline at end of file diff --git a/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_scale.py b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_scale.py new file mode 100644 index 00000000000..7ceee084fa6 --- /dev/null +++ b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_scale.py @@ -0,0 +1,93 @@ +# +# pool2d paddle model generator +# +import numpy as np +import sys +from save_model import saveModel + + +def pdpd_scale(name : str, x, scale, bias, attrs : dict, data_type): + import paddle as pdpd + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type) + out = pdpd.scale(x=node_x, scale=scale, bias=bias, + bias_after_scale=attrs['bias_after_scale']) + #FuzzyTest only support FP32 now, so cast result to fp32 + out = pdpd.cast(out, "float32") + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def pdpd_scale_tensor(name : str, x, scale, bias, attrs : dict, data_type): + import paddle as pdpd + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type) + node_scale = pdpd.static.data(name='scale', shape=[1], dtype='float32') + out = pdpd.scale(x=node_x, scale=node_scale, bias=bias, + bias_after_scale=attrs['bias_after_scale']) + #FuzzyTest only support FP32 now, so cast result to fp32 + out = pdpd.cast(out, "float32") + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x, 'scale': scale}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x', 'scale'], fetchlist=[out], inputs=[x, np.array([scale]).astype('float32')], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + +def main(): + scale = 2.0 + bias = 1.0 + data = np.random.random([2, 3]).astype("float32") + + test_cases = [ + "float32", + "int32", + "int64" + ] + + pdpd_attrs = { + 'bias_after_scale': True, + } + pdpd_scale_tensor("scale_tensor_bias_after", data, scale, bias, pdpd_attrs, 'float32') + + pdpd_attrs = { + 'bias_after_scale': False, + } + pdpd_scale_tensor("scale_tensor_bias_before", data, scale, bias, pdpd_attrs, 'float32') + + for test in test_cases: + data = np.random.random([2, 3]).astype(test) + pdpd_attrs = { + 'bias_after_scale': True, + } + pdpd_scale("scale_bias_after_" + test, data, scale, bias, pdpd_attrs, test) + + pdpd_attrs = { + 'bias_after_scale': False, + } + pdpd_scale("scale_bias_before_" + test, data, scale, bias, pdpd_attrs, test) + + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_shape.py b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_shape.py new file mode 100644 index 00000000000..35241487bba --- /dev/null +++ b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_shape.py @@ -0,0 +1,38 @@ +# +# pool2d paddle model generator +# +import numpy as np +from save_model import saveModel +import sys + + +def pdpd_shape(name : str, x): + import paddle as pdpd + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32') + out = pdpd.shape(node_x) + out = pdpd.cast(out, np.float32) + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + + data = np.random.random(size=(2, 3)).astype('float32') + pdpd_shape("shape", data) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_sigmoid.py b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_sigmoid.py new file mode 100644 index 00000000000..815a016907d --- /dev/null +++ b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_sigmoid.py @@ -0,0 +1,39 @@ +# +# sigmoid paddle model generator +# +import numpy as np +from save_model import saveModel +import paddle as pdpd +import sys + + +def sigmoid(name: str, x, data_type): + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type) + out = pdpd.fluid.layers.sigmoid(node_x, name='sigmoid') + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], + inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + data_type = 'float32' + data = np.array([0, 1, -1]).astype(data_type) + sigmoid("sigmoid", data, data_type) + + +if __name__ == "__main__": + main() diff --git a/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_slice.py b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_slice.py new file mode 100644 index 00000000000..bcfabdd28c4 --- /dev/null +++ b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_slice.py @@ -0,0 +1,39 @@ +# +# slice paddle model generator +# +import numpy as np +from save_model import saveModel +import paddle as pdpd +import sys + +data_type = 'float32' + +def slice(name : str, x, axes : list, start : list, end : list): + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype = data_type) + out = pdpd.fluid.layers.slice(node_x, axes = axes, starts = start, ends = end) + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + +def main(): + x = np.linspace(1, 60, num = 60, dtype=np.int32).reshape(4, 3, 5).astype(data_type) + slice("slice", x, axes=[1, 2], start=(0, 1), end=(-1, 3)) + + x = np.linspace(1, 60, num = 60, dtype=np.int32).reshape(2, 30).astype(data_type) + slice("slice_1d", x, axes=[0], start=[0], end=[1]) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_softmax.py b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_softmax.py new file mode 100644 index 00000000000..f797a048606 --- /dev/null +++ b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_softmax.py @@ -0,0 +1,45 @@ +# +# softmax paddle model generator +# +import numpy as np +import sys +from save_model import saveModel + + +def softmax(name: str, x, axis): + import paddle as pdpd + pdpd.enable_static() + + node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32') + out = pdpd.nn.functional.softmax(x=node_x, axis=axis) + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + data = np.array( + [[[2.0, 3.0, 4.0, 5.0], + [3.0, 4.0, 5.0, 6.0], + [7.0, 8.0, 8.0, 9.0]], + [[1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0], + [6.0, 7.0, 8.0, 9.0]]] + ).astype(np.float32) + + softmax("softmax", data, axis=1) + softmax("softmax_minus", data, axis=-1) + + +if __name__ == "__main__": + main() diff --git a/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_split.py b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_split.py new file mode 100644 index 00000000000..8ddb4790126 --- /dev/null +++ b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_split.py @@ -0,0 +1,115 @@ +# +# split paddle model generator +# +import numpy as np +from save_model import saveModel +import sys + + +def split(name : str, x, attrs : dict): + import paddle as pdpd + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype=x.dtype) + out = pdpd.fluid.layers.split(node_x, num_or_sections=attrs['num_or_sections'], dim=attrs['axis']) + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + print("outputs: ", type(outs),len(outs)) + print("out: ", type(out), len(out)) + + saveModel(name, exe, feedkeys=['x'], fetchlist=out, inputs=[x], outputs=outs, target_dir=sys.argv[1]) + + return outs[0] + + +def split_dim_tensor(name : str, x, attrs : dict, dim): + import paddle as pdpd + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype=x.dtype) + dim_node = pdpd.assign(dim) + out = pdpd.fluid.layers.split(node_x, num_or_sections=attrs['num_or_sections'], dim=dim_node) + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + print("outputs: ", type(outs),len(outs)) + print("out: ", type(out), len(out)) + + saveModel(name, exe, feedkeys=['x'], fetchlist=out, inputs=[x], outputs=outs, target_dir=sys.argv[1]) + + return outs[0] + + +def split_test_list_tensor(name : str, x, attrs : dict): + import paddle as pdpd + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype=x.dtype) + section = attrs['num_or_sections'] + section[0] = pdpd.assign(np.array((section[0],)).astype('int32')) + out = pdpd.fluid.layers.split(node_x, num_or_sections=section, dim=attrs['axis']) + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + print("outputs: ", type(outs),len(outs)) + print("out: ", type(out), len(out)) + + saveModel(name, exe, feedkeys=['x'], fetchlist=out, inputs=[x], outputs=outs, target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + # split + data_types = ['float32'] #TODOD: ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'] + num_or_sections = [3, [2, 3, 4], [2, 3, -1]] + axes = [1, -2] + + idx = 1 + for t in data_types: + for s in num_or_sections: + for i in axes: + pdpd_attrs = { + 'num_or_sections': s, + 'axis': i + } + data_NCHW = np.random.rand(3,9,5).astype(t) + split("split_test{}".format(idx), data_NCHW, pdpd_attrs) + idx+=1 + + split("split_test_list", data_NCHW, { + 'num_or_sections': [4, 5], + 'axis': 1}) + split_dim_tensor("split_test_dim_int32", data_NCHW, { + 'num_or_sections': 3}, np.array([1,]).astype('int32')) + split_dim_tensor("split_test_dim_int64", data_NCHW, { + 'num_or_sections': 3}, np.array([1,]).astype('int64')) + split_test_list_tensor("split_test_list_tensor", data_NCHW, { + 'num_or_sections': [4, 5], + 'axis': 1}) + + +if __name__ == "__main__": + main() diff --git a/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_squeeze.py b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_squeeze.py new file mode 100644 index 00000000000..04eae5cf0b1 --- /dev/null +++ b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_squeeze.py @@ -0,0 +1,38 @@ +# +# squeeze paddle model generator +# +import numpy as np +from save_model import saveModel +import paddle as pdpd +import sys + +data_type = 'float32' + +def squeeze(name : str, x, axes : list): + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype = data_type) + out = pdpd.fluid.layers.squeeze(node_x, axes=axes, name='squeeze') + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + +def main(): + data = np.random.rand(1, 3, 1, 4).astype(data_type) + + squeeze("squeeze", data, [0, -2]) + squeeze("squeeze_null_axes", data, []) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_unsqueeze.py b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_unsqueeze.py new file mode 100644 index 00000000000..e2fee0e97f5 --- /dev/null +++ b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_unsqueeze.py @@ -0,0 +1,37 @@ +# +# unsqueeze paddle model generator +# +import numpy as np +from save_model import saveModel +import paddle as pdpd +import sys + +data_type = 'float32' + +def unsqueeze(name : str, x, axes : list): + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype = data_type) + out = pdpd.fluid.layers.unsqueeze(node_x, axes = axes, name = 'unsqueeze') + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + +def main(): + data = np.random.rand(5, 10).astype(data_type) + + unsqueeze("unsqueeze", data, [1]) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_yolo_box.py b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_yolo_box.py new file mode 100644 index 00000000000..f737068faf5 --- /dev/null +++ b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_yolo_box.py @@ -0,0 +1,114 @@ +# +# pool2d paddle model generator +# +import numpy as np +from save_model import saveModel +import sys + +def yolo_box(name : str, x, img_size, attrs : dict): + import paddle as pdpd + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype=x.dtype) + node_img_size = pdpd.static.data(name='img_size', shape=img_size.shape, dtype=img_size.dtype) + boxes, scores = pdpd.vision.ops.yolo_box(node_x, + node_img_size, + anchors=attrs['anchors'], + class_num=attrs['class_num'], + conf_thresh=attrs['conf_thresh'], + downsample_ratio=attrs['downsample_ratio'], + clip_bbox=attrs['clip_bbox'], + name=None, + scale_x_y=attrs['scale_x_y']) + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x, 'img_size': img_size}, + fetch_list=[boxes, scores]) + + # Save inputs in order of ngraph function, to facilite Fuzzy test, + # which accepts inputs and outputs in this order as well. + saveModel(name, exe, feedkeys=['x', 'img_size'], fetchlist=[boxes, scores], + inputs=[x, img_size], outputs=outs, target_dir=sys.argv[1]) + + return outs + + +def TEST1(): + # yolo_box + pdpd_attrs = { + 'name': "yolo_box_default", + 'anchors': [10, 13, 16, 30, 33, 23], + 'class_num': 2, + 'conf_thresh': 0.5, + 'downsample_ratio': 32, + 'clip_bbox': False, + 'scale_x_y': 1.0 + } + + pdpd_attrs_clip_box = { + 'name': "yolo_box_clip_box", + 'anchors': [10, 13, 16, 30, 33, 23], + 'class_num': 2, + 'conf_thresh': 0.5, + 'downsample_ratio': 32, + 'clip_bbox': True, + 'scale_x_y': 1.0 + } + + pdpd_attrs_scale_xy = { + 'name': "yolo_box_scale_xy", + 'anchors': [10, 13, 16, 30, 33, 23], + 'class_num': 2, + 'conf_thresh': 0.5, + 'downsample_ratio': 32, + 'clip_bbox': True, + 'scale_x_y': 1.2 + } + + pdpd_attrs_list = [pdpd_attrs, pdpd_attrs_clip_box, pdpd_attrs_scale_xy] + + N = 32 + num_anchors = int(len(pdpd_attrs['anchors'])//2) + x_shape = (N, num_anchors * (5 + pdpd_attrs['class_num']), 13, 13) + imgsize_shape = (N, 2) + + data = np.random.random(x_shape).astype('float32') + data_ImSize = np.random.randint(10, 20, imgsize_shape).astype('int32') + + for item in pdpd_attrs_list: + pred_pdpd = yolo_box(item['name'], data, data_ImSize, item) + + +def TEST2(): + # yolo_box uneven spatial width and height + pdpd_attrs = { + 'name': "yolo_box_uneven_wh", + 'anchors': [10, 13, 16, 30, 33, 23], + 'class_num': 2, + 'conf_thresh': 0.5, + 'downsample_ratio': 32, + 'clip_bbox': False, + 'scale_x_y': 1.0 + } + + N = 16 + SPATIAL_WIDTH = 13 + SPATIAL_HEIGHT = 9 + num_anchors = int(len(pdpd_attrs['anchors'])//2) + x_shape = (N, num_anchors * (5 + pdpd_attrs['class_num']), SPATIAL_HEIGHT, SPATIAL_WIDTH) + imgsize_shape = (N, 2) + + data = np.random.random(x_shape).astype('float32') + data_ImSize = np.random.randint(10, 20, imgsize_shape).astype('int32') + + pred_pdpd = yolo_box(pdpd_attrs['name'], data, data_ImSize, pdpd_attrs) + +if __name__ == "__main__": + TEST1() + TEST2() \ No newline at end of file diff --git a/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/save_model.py b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/save_model.py index da3d102e1ef..95727ac9632 100644 --- a/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/save_model.py +++ b/ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/save_model.py @@ -29,26 +29,25 @@ def print_alike(arr): line += end #print(line) return line - + # print(print_array(arr, "}")) - print(print_array(arr, "}")) def saveModel(name, exe, feedkeys:list, fetchlist:list, inputs:list, outputs:list, target_dir:str): model_dir = os.path.join(target_dir, name) if not os.path.exists(model_dir): os.makedirs(model_dir) - print("\n\n------------- %s -----------\n" % (name)) + # print("\n\n------------- %s -----------\n" % (name)) for i, input in enumerate(inputs): - print("INPUT %s :" % (feedkeys[i]), input.shape, input.dtype, "\n") - print_alike(input) + # print("INPUT %s :" % (feedkeys[i]), input.shape, input.dtype, "\n") + # print_alike(input) np.save(os.path.join(model_dir, "input{}".format(i)), input) np.save(os.path.join(model_dir, "input{}.{}.{}".format(i, feedkeys[i], input.dtype)), input) - print("\n") + # print("\n") for i, output in enumerate(outputs): - print("OUTPUT %s :" % (fetchlist[i]),output.shape, output.dtype, "\n") - print_alike(output) + # print("OUTPUT %s :" % (fetchlist[i]),output.shape, output.dtype, "\n") + # print_alike(output) np.save(os.path.join(model_dir, "output{}".format(i)), output) # composited model + scattered model @@ -76,5 +75,4 @@ if __name__ == "__main__": [ [1, 2, 3], [4, 5, 6] - ]]]).astype(np.float32) - print_alike(x) \ No newline at end of file + ]]]).astype(np.float32) \ No newline at end of file From ff500b0bed6d5ed7cd0c10cec6c5492a269bfdc8 Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Wed, 11 Aug 2021 09:38:43 +0300 Subject: [PATCH 03/19] Fixed documentation code style (#7008) --- docs/.clang-format | 13 ++-- docs/template_extension/cpu_kernel.cpp | 36 ++++++--- docs/template_extension/cpu_kernel.hpp | 6 +- docs/template_extension/extension.cpp | 43 ++++++----- docs/template_extension/extension.hpp | 3 +- docs/template_extension/fft_kernel.cpp | 15 ++-- docs/template_extension/fft_kernel.hpp | 6 +- docs/template_extension/fft_op.cpp | 2 +- docs/template_extension/fft_op.hpp | 2 +- docs/template_extension/op.cpp | 2 +- .../src/template_async_infer_request.cpp | 10 ++- .../src/template_async_infer_request.hpp | 6 +- docs/template_plugin/src/template_config.cpp | 3 +- docs/template_plugin/src/template_config.hpp | 4 +- .../src/template_executable_network.cpp | 53 +++++++++---- .../src/template_executable_network.hpp | 15 ++-- .../src/template_infer_request.cpp | 69 ++++++++++------- .../src/template_infer_request.hpp | 3 +- docs/template_plugin/src/template_plugin.cpp | 76 +++++++++++++------ docs/template_plugin/src/template_plugin.hpp | 17 +++-- .../preprocessing/mean_image_or_value.cpp | 5 +- .../preprocessing/preprocessing.cpp | 14 ++-- .../preprocessing/std_scale.cpp | 5 +- .../template_function_transformation.cpp | 6 +- .../template_pattern_transformation.cpp | 7 +- 25 files changed, 277 insertions(+), 144 deletions(-) diff --git a/docs/.clang-format b/docs/.clang-format index c93e6254b5b..ebe747b7838 100644 --- a/docs/.clang-format +++ b/docs/.clang-format @@ -1,6 +1,7 @@ BasedOnStyle: Google IndentWidth: 4 UseTab: Never +ColumnLimit: 120 Language: Cpp Standard: Cpp11 @@ -8,18 +9,20 @@ Standard: Cpp11 AccessModifierOffset: -4 AlignConsecutiveMacros: true AllowAllArgumentsOnNextLine: false +AllowAllConstructorInitializersOnNextLine: false AllowAllParametersOfDeclarationOnNextLine: false AllowShortFunctionsOnASingleLine: Empty AllowShortIfStatementsOnASingleLine: Never AllowShortLambdasOnASingleLine: Empty AllowShortLoopsOnASingleLine: false AlwaysBreakBeforeMultilineStrings: false -ColumnLimit: 160 -# Specialize this comment pragma in order to avoid changes in SEA copyrights +BinPackArguments: false +BinPackParameters: false CommentPragmas: '^#' DerivePointerAlignment: false FixNamespaceComments: true IndentCaseLabels: false -IndentPPDirectives: BeforeHash -SpaceBeforeCpp11BracedList: true -SpaceBeforeCtorInitializerColon: false \ No newline at end of file +IndentPPDirectives: AfterHash +ForEachMacros: + - foreach + - FOREACH_CHILD diff --git a/docs/template_extension/cpu_kernel.cpp b/docs/template_extension/cpu_kernel.cpp index b1d426b1582..84a57dbe9e9 100644 --- a/docs/template_extension/cpu_kernel.cpp +++ b/docs/template_extension/cpu_kernel.cpp @@ -22,7 +22,8 @@ OpImplementation::OpImplementation(const std::shared_ptr& node) { IE_THROW() << "Cannot create implementation for op with dynamic shapes!"; if (castedNode->get_input_shape(0).size() != 4 || castedNode->get_output_shape(0).size() != 4) IE_THROW() << "Operation supports only 4d tensors for input and output."; - if (castedNode->get_input_element_type(0) != ngraph::element::f32 || castedNode->get_output_element_type(0) != ngraph::element::f32) + if (castedNode->get_input_element_type(0) != ngraph::element::f32 || + castedNode->get_output_element_type(0) != ngraph::element::f32) IE_THROW() << "Operation supports only FP32 tensors."; add = castedNode->getAddAttr(); inShape = castedNode->get_input_shape(0); @@ -34,9 +35,12 @@ OpImplementation::OpImplementation(const std::shared_ptr& node) { //! [cpu_implementation:ctor] //! [cpu_implementation:getSupportedConfigurations] -InferenceEngine::StatusCode OpImplementation::getSupportedConfigurations(std::vector& conf, - InferenceEngine::ResponseDesc* resp) noexcept { - auto createConfig = [](const InferenceEngine::SizeVector inShape, const InferenceEngine::SizeVector& outShape, bool planar) { +InferenceEngine::StatusCode OpImplementation::getSupportedConfigurations( + std::vector& conf, + InferenceEngine::ResponseDesc* resp) noexcept { + auto createConfig = [](const InferenceEngine::SizeVector inShape, + const InferenceEngine::SizeVector& outShape, + bool planar) { InferenceEngine::LayerConfig config; config.dynBatchSupport = false; InferenceEngine::DataConfig inData; @@ -45,9 +49,11 @@ InferenceEngine::StatusCode OpImplementation::getSupportedConfigurations(std::ve // Allow any offset before data size_t offset((std::numeric_limits::max)()); if (planar) { - inData.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, inShape, {inShape, order, offset}); + inData.desc = + InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, inShape, {inShape, order, offset}); config.inConfs.push_back(inData); - outData.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, outShape, {outShape, order, offset}); + outData.desc = + InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, outShape, {outShape, order, offset}); config.outConfs.push_back(outData); } else { // Add blocked (nChw8c) format @@ -64,9 +70,11 @@ InferenceEngine::StatusCode OpImplementation::getSupportedConfigurations(std::ve InferenceEngine::SizeVector outBlkDims = outShape; outBlkDims[1] = div_up(outBlkDims[1], 8); outBlkDims.push_back(8); - inData.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, inShape, {inBlkDims, order, offset}); + inData.desc = + InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, inShape, {inBlkDims, order, offset}); config.inConfs.push_back(inData); - outData.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, outShape, {outBlkDims, order, offset}); + outData.desc = + InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, outShape, {outBlkDims, order, offset}); config.outConfs.push_back(outData); } return config; @@ -87,7 +95,8 @@ InferenceEngine::StatusCode OpImplementation::getSupportedConfigurations(std::ve //! [cpu_implementation:getSupportedConfigurations] //! [cpu_implementation:init] -InferenceEngine::StatusCode OpImplementation::init(InferenceEngine::LayerConfig& config, InferenceEngine::ResponseDesc* resp) noexcept { +InferenceEngine::StatusCode OpImplementation::init(InferenceEngine::LayerConfig& config, + InferenceEngine::ResponseDesc* resp) noexcept { try { if (config.inConfs.size() != 1 || config.outConfs.size() != 1) { IE_THROW() << "Operation cannot be initialized with incorrect number of inputs/outputs!"; @@ -115,10 +124,13 @@ InferenceEngine::StatusCode OpImplementation::init(InferenceEngine::LayerConfig& //! [cpu_implementation:init] //! [cpu_implementation:execute] -InferenceEngine::StatusCode OpImplementation::execute(std::vector& inputs, std::vector& outputs, +InferenceEngine::StatusCode OpImplementation::execute(std::vector& inputs, + std::vector& outputs, InferenceEngine::ResponseDesc* resp) noexcept { - const float* src_data = inputs[0]->cbuffer().as() + inputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding(); - float* dst_data = outputs[0]->buffer().as() + outputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding(); + const float* src_data = + inputs[0]->cbuffer().as() + inputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding(); + float* dst_data = + outputs[0]->buffer().as() + outputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding(); for (size_t i = 0; i < inputs[0]->size(); i++) { dst_data[i] = src_data[i] + add; diff --git a/docs/template_extension/cpu_kernel.hpp b/docs/template_extension/cpu_kernel.hpp index 901d33093b5..9c71bdb0cef 100644 --- a/docs/template_extension/cpu_kernel.hpp +++ b/docs/template_extension/cpu_kernel.hpp @@ -16,8 +16,10 @@ public: explicit OpImplementation(const std::shared_ptr& node); InferenceEngine::StatusCode getSupportedConfigurations(std::vector& conf, InferenceEngine::ResponseDesc* resp) noexcept override; - InferenceEngine::StatusCode init(InferenceEngine::LayerConfig& config, InferenceEngine::ResponseDesc* resp) noexcept override; - InferenceEngine::StatusCode execute(std::vector& inputs, std::vector& outputs, + InferenceEngine::StatusCode init(InferenceEngine::LayerConfig& config, + InferenceEngine::ResponseDesc* resp) noexcept override; + InferenceEngine::StatusCode execute(std::vector& inputs, + std::vector& outputs, InferenceEngine::ResponseDesc* resp) noexcept override; private: diff --git a/docs/template_extension/extension.cpp b/docs/template_extension/extension.cpp index 7a0874f2bea..4c5885a090f 100644 --- a/docs/template_extension/extension.cpp +++ b/docs/template_extension/extension.cpp @@ -7,12 +7,12 @@ #include "cpu_kernel.hpp" #include "op.hpp" #ifdef OPENCV_IMPORT_ENABLED - #include "fft_kernel.hpp" - #include "fft_op.hpp" +# include "fft_kernel.hpp" +# include "fft_op.hpp" #endif #include #ifdef NGRAPH_ONNX_IMPORT_ENABLED - #include +# include #endif #include @@ -25,18 +25,24 @@ using namespace TemplateExtension; //! [extension:ctor] Extension::Extension() { #ifdef NGRAPH_ONNX_IMPORT_ENABLED - ngraph::onnx_import::register_operator(Operation::type_info.name, 1, "custom_domain", [](const ngraph::onnx_import::Node& node) -> ngraph::OutputVector { - ngraph::OutputVector ng_inputs {node.get_ng_inputs()}; - int64_t add = node.get_attribute_value("add"); - return {std::make_shared(ng_inputs.at(0), add)}; - }); - #ifdef OPENCV_IMPORT_ENABLED - ngraph::onnx_import::register_operator(FFTOp::type_info.name, 1, "custom_domain", [](const ngraph::onnx_import::Node& node) -> ngraph::OutputVector { - ngraph::OutputVector ng_inputs {node.get_ng_inputs()}; - bool inverse = node.get_attribute_value("inverse"); - return {std::make_shared(ng_inputs.at(0), inverse)}; - }); - #endif + ngraph::onnx_import::register_operator(Operation::type_info.name, + 1, + "custom_domain", + [](const ngraph::onnx_import::Node& node) -> ngraph::OutputVector { + ngraph::OutputVector ng_inputs{node.get_ng_inputs()}; + int64_t add = node.get_attribute_value("add"); + return {std::make_shared(ng_inputs.at(0), add)}; + }); +# ifdef OPENCV_IMPORT_ENABLED + ngraph::onnx_import::register_operator(FFTOp::type_info.name, + 1, + "custom_domain", + [](const ngraph::onnx_import::Node& node) -> ngraph::OutputVector { + ngraph::OutputVector ng_inputs{node.get_ng_inputs()}; + bool inverse = node.get_attribute_value("inverse"); + return {std::make_shared(ng_inputs.at(0), inverse)}; + }); +# endif #endif } //! [extension:ctor] @@ -45,9 +51,9 @@ Extension::Extension() { Extension::~Extension() { #ifdef NGRAPH_ONNX_IMPORT_ENABLED ngraph::onnx_import::unregister_operator(Operation::type_info.name, 1, "custom_domain"); - #ifdef OPENCV_IMPORT_ENABLED +# ifdef OPENCV_IMPORT_ENABLED ngraph::onnx_import::unregister_operator(FFTOp::type_info.name, 1, "custom_domain"); - #endif // OPENCV_IMPORT_ENABLED +# endif // OPENCV_IMPORT_ENABLED #endif // NGRAPH_ONNX_IMPORT_ENABLED } //! [extension:dtor] @@ -92,7 +98,8 @@ std::vector Extension::getImplTypes(const std::shared_ptr& node, const std::string& implType) { +InferenceEngine::ILayerImpl::Ptr Extension::getImplementation(const std::shared_ptr& node, + const std::string& implType) { if (implType == "CPU") { if (std::dynamic_pointer_cast(node)) { return std::make_shared(node); diff --git a/docs/template_extension/extension.hpp b/docs/template_extension/extension.hpp index 0cc3b5816fe..407719f4e3a 100644 --- a/docs/template_extension/extension.hpp +++ b/docs/template_extension/extension.hpp @@ -25,7 +25,8 @@ public: std::map getOpSets() override; std::vector getImplTypes(const std::shared_ptr& node) override; - InferenceEngine::ILayerImpl::Ptr getImplementation(const std::shared_ptr& node, const std::string& implType) override; + InferenceEngine::ILayerImpl::Ptr getImplementation(const std::shared_ptr& node, + const std::string& implType) override; }; } // namespace TemplateExtension diff --git a/docs/template_extension/fft_kernel.cpp b/docs/template_extension/fft_kernel.cpp index 3fcf71a8f64..b104af5a3da 100644 --- a/docs/template_extension/fft_kernel.cpp +++ b/docs/template_extension/fft_kernel.cpp @@ -21,14 +21,16 @@ FFTImpl::FFTImpl(const std::shared_ptr& node) { IE_THROW() << "Cannot create implementation for operation with incorrect number of inputs or outputs!"; if (castedNode->get_input_partial_shape(0).is_dynamic() || castedNode->get_output_partial_shape(0).is_dynamic()) IE_THROW() << "Cannot create implementation for op with dynamic shapes!"; - if (castedNode->get_input_element_type(0) != ngraph::element::f32 || castedNode->get_output_element_type(0) != ngraph::element::f32) + if (castedNode->get_input_element_type(0) != ngraph::element::f32 || + castedNode->get_output_element_type(0) != ngraph::element::f32) IE_THROW() << "Operation supports only FP32 tensors."; inpShape = castedNode->get_input_shape(0); outShape = castedNode->get_output_shape(0); inverse = castedNode->inverse; } -InferenceEngine::StatusCode FFTImpl::getSupportedConfigurations(std::vector& conf, InferenceEngine::ResponseDesc* resp) noexcept { +InferenceEngine::StatusCode FFTImpl::getSupportedConfigurations(std::vector& conf, + InferenceEngine::ResponseDesc* resp) noexcept { std::vector inDataConfig; std::vector outDataConfig; InferenceEngine::SizeVector order(inpShape.size()); @@ -55,7 +57,8 @@ InferenceEngine::StatusCode FFTImpl::getSupportedConfigurations(std::vectorbuffer()); } -InferenceEngine::StatusCode FFTImpl::execute(std::vector& inputs, std::vector& outputs, +InferenceEngine::StatusCode FFTImpl::execute(std::vector& inputs, + std::vector& outputs, InferenceEngine::ResponseDesc* resp) noexcept { cv::Mat inp = infEngineBlobToMat(inputs[0]); cv::Mat out = infEngineBlobToMat(outputs[0]); @@ -95,7 +99,8 @@ InferenceEngine::StatusCode FFTImpl::execute(std::vector components = {cv::Mat(h, w, CV_32F, inp.ptr(i, 0)), cv::Mat(h, w, CV_32F, inp.ptr(i, 1))}; + std::vector components = {cv::Mat(h, w, CV_32F, inp.ptr(i, 0)), + cv::Mat(h, w, CV_32F, inp.ptr(i, 1))}; cv::merge(components, complex); if (!inverse) diff --git a/docs/template_extension/fft_kernel.hpp b/docs/template_extension/fft_kernel.hpp index f3283288861..8de1e841590 100644 --- a/docs/template_extension/fft_kernel.hpp +++ b/docs/template_extension/fft_kernel.hpp @@ -16,8 +16,10 @@ public: explicit FFTImpl(const std::shared_ptr& node); InferenceEngine::StatusCode getSupportedConfigurations(std::vector& conf, InferenceEngine::ResponseDesc* resp) noexcept override; - InferenceEngine::StatusCode init(InferenceEngine::LayerConfig& config, InferenceEngine::ResponseDesc* resp) noexcept override; - InferenceEngine::StatusCode execute(std::vector& inputs, std::vector& outputs, + InferenceEngine::StatusCode init(InferenceEngine::LayerConfig& config, + InferenceEngine::ResponseDesc* resp) noexcept override; + InferenceEngine::StatusCode execute(std::vector& inputs, + std::vector& outputs, InferenceEngine::ResponseDesc* resp) noexcept override; private: diff --git a/docs/template_extension/fft_op.cpp b/docs/template_extension/fft_op.cpp index b71a06bc746..028c3bf9399 100644 --- a/docs/template_extension/fft_op.cpp +++ b/docs/template_extension/fft_op.cpp @@ -9,7 +9,7 @@ using namespace TemplateExtension; constexpr ngraph::NodeTypeInfo FFTOp::type_info; -FFTOp::FFTOp(const ngraph::Output& inp, bool _inverse): Op({inp}) { +FFTOp::FFTOp(const ngraph::Output& inp, bool _inverse) : Op({inp}) { constructor_validate_and_infer_types(); inverse = _inverse; } diff --git a/docs/template_extension/fft_op.hpp b/docs/template_extension/fft_op.hpp index 2e79888cfd3..7914a1c2083 100644 --- a/docs/template_extension/fft_op.hpp +++ b/docs/template_extension/fft_op.hpp @@ -11,7 +11,7 @@ namespace TemplateExtension { class FFTOp : public ngraph::op::Op { public: - static constexpr ngraph::NodeTypeInfo type_info {"FFT", 0}; + static constexpr ngraph::NodeTypeInfo type_info{"FFT", 0}; const ngraph::NodeTypeInfo& get_type_info() const override { return type_info; } diff --git a/docs/template_extension/op.cpp b/docs/template_extension/op.cpp index ec53c2ca26c..f451d9c5cac 100644 --- a/docs/template_extension/op.cpp +++ b/docs/template_extension/op.cpp @@ -9,7 +9,7 @@ using namespace TemplateExtension; //! [op:ctor] NGRAPH_RTTI_DEFINITION(TemplateExtension::Operation, "Template", 0); -Operation::Operation(const ngraph::Output& arg, int64_t add): Op({arg}), add(add) { +Operation::Operation(const ngraph::Output& arg, int64_t add) : Op({arg}), add(add) { constructor_validate_and_infer_types(); } //! [op:ctor] diff --git a/docs/template_plugin/src/template_async_infer_request.cpp b/docs/template_plugin/src/template_async_infer_request.cpp index bcdd3b6f2a2..f4033f23c3b 100644 --- a/docs/template_plugin/src/template_async_infer_request.cpp +++ b/docs/template_plugin/src/template_async_infer_request.cpp @@ -9,10 +9,13 @@ using namespace TemplatePlugin; // ! [async_infer_request:ctor] -TemplateAsyncInferRequest::TemplateAsyncInferRequest(const TemplateInferRequest::Ptr& inferRequest, const InferenceEngine::ITaskExecutor::Ptr& cpuTaskExecutor, +TemplateAsyncInferRequest::TemplateAsyncInferRequest(const TemplateInferRequest::Ptr& inferRequest, + const InferenceEngine::ITaskExecutor::Ptr& cpuTaskExecutor, const InferenceEngine::ITaskExecutor::Ptr& waitExecutor, const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor) - : AsyncInferRequestThreadSafeDefault(inferRequest, cpuTaskExecutor, callbackExecutor), _inferRequest(inferRequest), _waitExecutor(waitExecutor) { + : AsyncInferRequestThreadSafeDefault(inferRequest, cpuTaskExecutor, callbackExecutor), + _inferRequest(inferRequest), + _waitExecutor(waitExecutor) { // In current implementation we have CPU only tasks and no needs in 2 executors // So, by default single stage pipeline is created. // This stage executes InferRequest::Infer() using cpuTaskExecutor. @@ -23,7 +26,8 @@ TemplateAsyncInferRequest::TemplateAsyncInferRequest(const TemplateInferRequest: if (remoteDevice) { _pipeline = {{cpuTaskExecutor, [this] { - OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "TemplateAsyncInferRequest::PreprocessingAndStartPipeline"); + OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, + "TemplateAsyncInferRequest::PreprocessingAndStartPipeline"); _inferRequest->inferPreprocess(); _inferRequest->startPipeline(); }}, diff --git a/docs/template_plugin/src/template_async_infer_request.hpp b/docs/template_plugin/src/template_async_infer_request.hpp index 942f71a616f..52250e86afd 100644 --- a/docs/template_plugin/src/template_async_infer_request.hpp +++ b/docs/template_plugin/src/template_async_infer_request.hpp @@ -13,8 +13,10 @@ namespace TemplatePlugin { // ! [async_infer_request:header] class TemplateAsyncInferRequest : public InferenceEngine::AsyncInferRequestThreadSafeDefault { public: - TemplateAsyncInferRequest(const TemplateInferRequest::Ptr& inferRequest, const InferenceEngine::ITaskExecutor::Ptr& taskExecutor, - const InferenceEngine::ITaskExecutor::Ptr& waitExecutor, const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor); + TemplateAsyncInferRequest(const TemplateInferRequest::Ptr& inferRequest, + const InferenceEngine::ITaskExecutor::Ptr& taskExecutor, + const InferenceEngine::ITaskExecutor::Ptr& waitExecutor, + const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor); ~TemplateAsyncInferRequest(); diff --git a/docs/template_plugin/src/template_config.cpp b/docs/template_plugin/src/template_config.cpp index 3d9d4e488fe..8c2dc7ebbbe 100644 --- a/docs/template_plugin/src/template_config.cpp +++ b/docs/template_plugin/src/template_config.cpp @@ -23,7 +23,8 @@ Configuration::Configuration(const ConfigMap& config, const Configuration& defau if (TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS) == key) { _streamsExecutorConfig.SetConfig(CONFIG_KEY(CPU_THROUGHPUT_STREAMS), value); - } else if (streamExecutorConfigKeys.end() != std::find(std::begin(streamExecutorConfigKeys), std::end(streamExecutorConfigKeys), key)) { + } else if (streamExecutorConfigKeys.end() != + std::find(std::begin(streamExecutorConfigKeys), std::end(streamExecutorConfigKeys), key)) { _streamsExecutorConfig.SetConfig(key, value); } else if (CONFIG_KEY(DEVICE_ID) == key) { deviceId = std::stoi(value); diff --git a/docs/template_plugin/src/template_config.hpp b/docs/template_plugin/src/template_config.hpp index d49bf491327..ce5cb39595f 100644 --- a/docs/template_plugin/src/template_config.hpp +++ b/docs/template_plugin/src/template_config.hpp @@ -21,7 +21,9 @@ struct Configuration { Configuration& operator=(const Configuration&) = default; Configuration& operator=(Configuration&&) = default; - explicit Configuration(const ConfigMap& config, const Configuration& defaultCfg = {}, const bool throwOnUnsupported = true); + explicit Configuration(const ConfigMap& config, + const Configuration& defaultCfg = {}, + const bool throwOnUnsupported = true); InferenceEngine::Parameter Get(const std::string& name) const; diff --git a/docs/template_plugin/src/template_executable_network.cpp b/docs/template_plugin/src/template_executable_network.cpp index 4aba4622e50..1231f4970c2 100644 --- a/docs/template_plugin/src/template_executable_network.cpp +++ b/docs/template_plugin/src/template_executable_network.cpp @@ -18,8 +18,10 @@ using namespace TemplatePlugin; // ! [executable_network:ctor_cnnnetwork] TemplatePlugin::ExecutableNetwork::ExecutableNetwork(const std::shared_ptr& function, - const InferenceEngine::InputsDataMap& inputInfoMap, const InferenceEngine::OutputsDataMap& outputsInfoMap, - const Configuration& cfg, const Plugin::Ptr& plugin) + const InferenceEngine::InputsDataMap& inputInfoMap, + const InferenceEngine::OutputsDataMap& outputsInfoMap, + const Configuration& cfg, + const Plugin::Ptr& plugin) : InferenceEngine::ExecutableNetworkThreadSafeDefault(nullptr, nullptr), // Disable default threads creation _cfg(cfg), _plugin(plugin) { @@ -40,7 +42,11 @@ TemplatePlugin::ExecutableNetwork::ExecutableNetwork(const std::shared_ptr(&dataSize), sizeof(dataSize)); if (0 != dataSize) { dataBlob = InferenceEngine::make_shared_blob( - InferenceEngine::TensorDesc(InferenceEngine::Precision::U8, {static_cast(dataSize)}, InferenceEngine::Layout::C)); + InferenceEngine::TensorDesc(InferenceEngine::Precision::U8, + {static_cast(dataSize)}, + InferenceEngine::Layout::C)); dataBlob->allocate(); model.read(dataBlob->buffer(), dataSize); } @@ -84,7 +92,8 @@ TemplatePlugin::ExecutableNetwork::ExecutableNetwork(std::istream& model, const // ! [executable_network:map_graph] // forward declaration -std::shared_ptr TransformNetwork(const std::shared_ptr& function, const InferenceEngine::InputsDataMap& inputInfoMap, +std::shared_ptr TransformNetwork(const std::shared_ptr& function, + const InferenceEngine::InputsDataMap& inputInfoMap, const InferenceEngine::OutputsDataMap& outputsInfoMap); void TemplatePlugin::ExecutableNetwork::CompileNetwork(const std::shared_ptr& function, @@ -117,29 +126,36 @@ void TemplatePlugin::ExecutableNetwork::CompileNetwork(const std::shared_ptrgetIdleCPUStreamsExecutor(streamsExecutorConfig); - // NOTE: callback Executor is not configured. So callback will be called in the thread of the last stage of inference request pipeline - // _callbackExecutor = InferenceEngine::ExecutorManager::getInstance()->getIdleCPUStreamsExecutor({"TemplateCallbackExecutor"}); + // NOTE: callback Executor is not configured. So callback will be called in the thread of the last stage of + // inference request pipeline _callbackExecutor = + // InferenceEngine::ExecutorManager::getInstance()->getIdleCPUStreamsExecutor({"TemplateCallbackExecutor"}); } // ! [executable_network:init_executor] // ! [executable_network:create_infer_request_impl] -InferenceEngine::IInferRequestInternal::Ptr TemplatePlugin::ExecutableNetwork::CreateInferRequestImpl(InferenceEngine::InputsDataMap networkInputs, - InferenceEngine::OutputsDataMap networkOutputs) { - return std::make_shared(networkInputs, networkOutputs, std::static_pointer_cast(shared_from_this())); +InferenceEngine::IInferRequestInternal::Ptr TemplatePlugin::ExecutableNetwork::CreateInferRequestImpl( + InferenceEngine::InputsDataMap networkInputs, + InferenceEngine::OutputsDataMap networkOutputs) { + return std::make_shared(networkInputs, + networkOutputs, + std::static_pointer_cast(shared_from_this())); } // ! [executable_network:create_infer_request_impl] // ! [executable_network:create_infer_request] InferenceEngine::IInferRequestInternal::Ptr TemplatePlugin::ExecutableNetwork::CreateInferRequest() { auto internalRequest = CreateInferRequestImpl(_networkInputs, _networkOutputs); - return std::make_shared(std::static_pointer_cast(internalRequest), _taskExecutor, _plugin->_waitExecutor, + return std::make_shared(std::static_pointer_cast(internalRequest), + _taskExecutor, + _plugin->_waitExecutor, _callbackExecutor); } // ! [executable_network:create_infer_request] @@ -154,11 +170,16 @@ InferenceEngine::Parameter TemplatePlugin::ExecutableNetwork::GetConfig(const st InferenceEngine::Parameter TemplatePlugin::ExecutableNetwork::GetMetric(const std::string& name) const { // TODO: return more supported values for metrics if (EXEC_NETWORK_METRIC_KEY(SUPPORTED_METRICS) == name) { - IE_SET_METRIC_RETURN(SUPPORTED_METRICS, std::vector {METRIC_KEY(NETWORK_NAME), METRIC_KEY(SUPPORTED_METRICS), - METRIC_KEY(SUPPORTED_CONFIG_KEYS), METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)}); + IE_SET_METRIC_RETURN(SUPPORTED_METRICS, + std::vector{METRIC_KEY(NETWORK_NAME), + METRIC_KEY(SUPPORTED_METRICS), + METRIC_KEY(SUPPORTED_CONFIG_KEYS), + METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)}); } else if (EXEC_NETWORK_METRIC_KEY(SUPPORTED_CONFIG_KEYS) == name) { - std::vector configKeys = {CONFIG_KEY(DEVICE_ID), CONFIG_KEY(PERF_COUNT), TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS)}; - auto streamExecutorConfigKeys = InferenceEngine::IStreamsExecutor::Config {}.SupportedKeys(); + std::vector configKeys = {CONFIG_KEY(DEVICE_ID), + CONFIG_KEY(PERF_COUNT), + TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS)}; + auto streamExecutorConfigKeys = InferenceEngine::IStreamsExecutor::Config{}.SupportedKeys(); for (auto&& configKey : streamExecutorConfigKeys) { configKeys.emplace_back(configKey); } diff --git a/docs/template_plugin/src/template_executable_network.hpp b/docs/template_plugin/src/template_executable_network.hpp index a68df02f958..f75c59411bf 100644 --- a/docs/template_plugin/src/template_executable_network.hpp +++ b/docs/template_plugin/src/template_executable_network.hpp @@ -23,16 +23,20 @@ class Plugin; // ! [executable_network:header] class ExecutableNetwork : public InferenceEngine::ExecutableNetworkThreadSafeDefault { public: - ExecutableNetwork(const std::shared_ptr& function, const InferenceEngine::InputsDataMap& inputInfoMap, - const InferenceEngine::OutputsDataMap& outputsInfoMap, const Configuration& cfg, const std::shared_ptr& plugin); + ExecutableNetwork(const std::shared_ptr& function, + const InferenceEngine::InputsDataMap& inputInfoMap, + const InferenceEngine::OutputsDataMap& outputsInfoMap, + const Configuration& cfg, + const std::shared_ptr& plugin); ExecutableNetwork(std::istream& model, const Configuration& cfg, const std::shared_ptr& plugin); // Methods from a base class ExecutableNetworkThreadSafeDefault void Export(std::ostream& model) override; - InferenceEngine::IInferRequestInternal::Ptr CreateInferRequestImpl(InferenceEngine::InputsDataMap networkInputs, - InferenceEngine::OutputsDataMap networkOutputs) override; + InferenceEngine::IInferRequestInternal::Ptr CreateInferRequestImpl( + InferenceEngine::InputsDataMap networkInputs, + InferenceEngine::OutputsDataMap networkOutputs) override; InferenceEngine::IInferRequestInternal::Ptr CreateInferRequest() override; InferenceEngine::Parameter GetMetric(const std::string& name) const override; InferenceEngine::Parameter GetConfig(const std::string& name) const override; @@ -40,7 +44,8 @@ public: private: friend class TemplateInferRequest; - void CompileNetwork(const std::shared_ptr& function, const InferenceEngine::InputsDataMap& inputInfoMap, + void CompileNetwork(const std::shared_ptr& function, + const InferenceEngine::InputsDataMap& inputInfoMap, const InferenceEngine::OutputsDataMap& outputsInfoMap); void InitExecutor(); diff --git a/docs/template_plugin/src/template_infer_request.cpp b/docs/template_plugin/src/template_infer_request.cpp index 20c47bfd19e..2f9d0446ca1 100644 --- a/docs/template_plugin/src/template_infer_request.cpp +++ b/docs/template_plugin/src/template_infer_request.cpp @@ -23,19 +23,25 @@ using namespace InferenceEngine; using Time = std::chrono::high_resolution_clock; // ! [infer_request:ctor] -TemplateInferRequest::TemplateInferRequest(const InferenceEngine::InputsDataMap& networkInputs, const InferenceEngine::OutputsDataMap& networkOutputs, +TemplateInferRequest::TemplateInferRequest(const InferenceEngine::InputsDataMap& networkInputs, + const InferenceEngine::OutputsDataMap& networkOutputs, const std::shared_ptr& executableNetwork) - : IInferRequestInternal(networkInputs, networkOutputs), _executableNetwork(executableNetwork) { + : IInferRequestInternal(networkInputs, networkOutputs), + _executableNetwork(executableNetwork) { // TODO: allocate infer request device and host buffers if needed, fill actual list of profiling tasks auto requestID = std::to_string(_executableNetwork->_requestId.fetch_add(1)); std::string name = _executableNetwork->_function->get_friendly_name() + "_Req" + requestID; _profilingTask = { - openvino::itt::handle("Template" + std::to_string(_executableNetwork->_cfg.deviceId) + "_" + name + "_Preprocess"), - openvino::itt::handle("Template" + std::to_string(_executableNetwork->_cfg.deviceId) + "_" + name + "_Postprocess"), - openvino::itt::handle("Template" + std::to_string(_executableNetwork->_cfg.deviceId) + "_" + name + "_StartPipline"), - openvino::itt::handle("Template" + std::to_string(_executableNetwork->_cfg.deviceId) + "_" + name + "_WaitPipline"), + openvino::itt::handle("Template" + std::to_string(_executableNetwork->_cfg.deviceId) + "_" + name + + "_Preprocess"), + openvino::itt::handle("Template" + std::to_string(_executableNetwork->_cfg.deviceId) + "_" + name + + "_Postprocess"), + openvino::itt::handle("Template" + std::to_string(_executableNetwork->_cfg.deviceId) + "_" + name + + "_StartPipline"), + openvino::itt::handle("Template" + std::to_string(_executableNetwork->_cfg.deviceId) + "_" + name + + "_WaitPipline"), }; _executable = _executableNetwork->_plugin->_backend->compile(_executableNetwork->_function); @@ -60,7 +66,10 @@ void TemplateInferRequest::allocateDeviceBuffers() { } template -static void AllocateImpl(const BlobDataMap& userDataMap, BlobMap& userBlobMap, BlobMap& deviceBlobMap, GetNetworkPrecisionF&& GetNetworkPrecision, +static void AllocateImpl(const BlobDataMap& userDataMap, + BlobMap& userBlobMap, + BlobMap& deviceBlobMap, + GetNetworkPrecisionF&& GetNetworkPrecision, bool isInputBlob = true) { for (auto&& userData : userDataMap) { const auto& dims = userData.second->getTensorDesc().getDims(); @@ -95,7 +104,9 @@ void TemplateInferRequest::allocateBlobs() { }); auto&& results = _executableNetwork->_function->get_results(); AllocateImpl( - _networkOutputs, _outputs, _networkOutputBlobs, + _networkOutputs, + _outputs, + _networkOutputBlobs, [&](const std::string& blobName) { return results.at(_executableNetwork->_outputIndex.at(blobName))->get_element_type(); }, @@ -114,8 +125,10 @@ void TemplateInferRequest::InferImpl() { template static void blobCopy(const Blob::Ptr& src, const Blob::Ptr& dst) { - ngraph::runtime::reference::convert(InferenceEngine::as(src)->rmap().as(), - InferenceEngine::as(dst)->wmap().as(), src->size()); + ngraph::runtime::reference::convert( + InferenceEngine::as(src)->rmap().as(), + InferenceEngine::as(dst)->wmap().as(), + src->size()); } static void blobCopy(const Blob::Ptr& src, const Blob::Ptr& dst) { @@ -128,8 +141,8 @@ static void blobCopy(const Blob::Ptr& src, const Blob::Ptr& dst) { blobCopy(src, dst); } break; default: { - IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to " - << dst->getTensorDesc().getPrecision(); + IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() + << " to " << dst->getTensorDesc().getPrecision(); } } } break; @@ -141,8 +154,8 @@ static void blobCopy(const Blob::Ptr& src, const Blob::Ptr& dst) { blobCopy(src, dst); } break; default: { - IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to " - << dst->getTensorDesc().getPrecision(); + IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() + << " to " << dst->getTensorDesc().getPrecision(); } } } break; @@ -154,8 +167,8 @@ static void blobCopy(const Blob::Ptr& src, const Blob::Ptr& dst) { blobCopy(src, dst); } break; default: { - IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to " - << dst->getTensorDesc().getPrecision(); + IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() + << " to " << dst->getTensorDesc().getPrecision(); } } } break; @@ -167,8 +180,8 @@ static void blobCopy(const Blob::Ptr& src, const Blob::Ptr& dst) { blobCopy(src, dst); } break; default: { - IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to " - << dst->getTensorDesc().getPrecision(); + IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() + << " to " << dst->getTensorDesc().getPrecision(); } } } break; @@ -180,8 +193,8 @@ static void blobCopy(const Blob::Ptr& src, const Blob::Ptr& dst) { blobCopy(src, dst); } break; default: { - IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to " - << dst->getTensorDesc().getPrecision(); + IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() + << " to " << dst->getTensorDesc().getPrecision(); } } } break; @@ -193,8 +206,8 @@ static void blobCopy(const Blob::Ptr& src, const Blob::Ptr& dst) { blobCopy(src, dst); } break; default: { - IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to " - << dst->getTensorDesc().getPrecision(); + IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() + << " to " << dst->getTensorDesc().getPrecision(); } } } break; @@ -206,8 +219,8 @@ static void blobCopy(const Blob::Ptr& src, const Blob::Ptr& dst) { blobCopy(src, dst); } break; default: { - IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to " - << dst->getTensorDesc().getPrecision(); + IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() + << " to " << dst->getTensorDesc().getPrecision(); } } } break; @@ -230,7 +243,9 @@ void TemplateInferRequest::inferPreprocess() { const auto& parameterShape = parameter->get_shape(); const auto& parameterType = parameter->get_element_type(); _inputTensors[index] = _executableNetwork->_plugin->_backend->create_tensor( - parameterType, parameterShape, InferenceEngine::as(networkInput.second)->rmap().as()); + parameterType, + parameterShape, + InferenceEngine::as(networkInput.second)->rmap().as()); } for (auto&& output : _outputs) { auto outputBlob = output.second; @@ -243,7 +258,9 @@ void TemplateInferRequest::inferPreprocess() { const auto& resultShape = result->get_shape(); const auto& resultType = result->get_element_type(); _outputTensors[index] = _executableNetwork->_plugin->_backend->create_tensor( - resultType, resultShape, InferenceEngine::as(networkOutput)->wmap().as()); + resultType, + resultShape, + InferenceEngine::as(networkOutput)->wmap().as()); } _durations[Preprocess] = Time::now() - start; } diff --git a/docs/template_plugin/src/template_infer_request.hpp b/docs/template_plugin/src/template_infer_request.hpp index ca92c76bbbd..0e1b904ccdb 100644 --- a/docs/template_plugin/src/template_infer_request.hpp +++ b/docs/template_plugin/src/template_infer_request.hpp @@ -26,7 +26,8 @@ class TemplateInferRequest : public InferenceEngine::IInferRequestInternal { public: typedef std::shared_ptr Ptr; - TemplateInferRequest(const InferenceEngine::InputsDataMap& networkInputs, const InferenceEngine::OutputsDataMap& networkOutputs, + TemplateInferRequest(const InferenceEngine::InputsDataMap& networkInputs, + const InferenceEngine::OutputsDataMap& networkOutputs, const std::shared_ptr& executableNetwork); ~TemplateInferRequest(); diff --git a/docs/template_plugin/src/template_plugin.cpp b/docs/template_plugin/src/template_plugin.cpp index c92918983cd..20eb3fc240d 100644 --- a/docs/template_plugin/src/template_plugin.cpp +++ b/docs/template_plugin/src/template_plugin.cpp @@ -38,7 +38,8 @@ Plugin::Plugin() { _backend = ngraph::runtime::Backend::create("INTERPRETER"); // create default stream executor with a given name - _waitExecutor = InferenceEngine::ExecutorManager::getInstance()->getIdleCPUStreamsExecutor({"TemplateWaitExecutor"}); + _waitExecutor = + InferenceEngine::ExecutorManager::getInstance()->getIdleCPUStreamsExecutor({"TemplateWaitExecutor"}); } // ! [plugin:ctor] @@ -54,7 +55,8 @@ Plugin::~Plugin() { // ! [plugin:transform_network] -std::shared_ptr TransformNetwork(const std::shared_ptr& function, const InferenceEngine::InputsDataMap& inputInfoMap, +std::shared_ptr TransformNetwork(const std::shared_ptr& function, + const InferenceEngine::InputsDataMap& inputInfoMap, const InferenceEngine::OutputsDataMap& outputsInfoMap) { // 1. Copy ngraph::Function first to apply some transformations which modify original ngraph::Function auto transformedNetwork = ngraph::clone_function(*function); @@ -70,13 +72,15 @@ std::shared_ptr TransformNetwork(const std::shared_ptrget_parameters()) { if (param->get_element_type() == ngraph::element::f16 && - inputInfoMap.at(param->get_friendly_name())->getTensorDesc().getPrecision() != InferenceEngine::Precision::FP16) { + inputInfoMap.at(param->get_friendly_name())->getTensorDesc().getPrecision() != + InferenceEngine::Precision::FP16) { needF16toF32 = true; break; } } if (needF16toF32) - passManager.register_pass(precisions_array {{ngraph::element::f16, ngraph::element::f32}}); + passManager.register_pass( + precisions_array{{ngraph::element::f16, ngraph::element::f32}}); // Example: register plugin specific transformation passManager.register_pass(); passManager.register_pass(); @@ -92,32 +96,41 @@ std::shared_ptr TransformNetwork(const std::shared_ptr(network.getFunction(), networkInputs, networkOutputs, fullConfig, + auto fullConfig = Configuration{config, _cfg}; + return std::make_shared(network.getFunction(), + networkInputs, + networkOutputs, + fullConfig, std::static_pointer_cast(shared_from_this())); } // ! [plugin:load_exe_network_impl] // ! [plugin:import_network] -InferenceEngine::IExecutableNetworkInternal::Ptr Plugin::ImportNetwork(std::istream& modelStream, const std::map& config) { +InferenceEngine::IExecutableNetworkInternal::Ptr Plugin::ImportNetwork( + std::istream& modelStream, + const std::map& config) { OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "Plugin::ImportNetwork"); - auto fullConfig = Configuration {config, _cfg}; - return std::make_shared(modelStream, fullConfig, std::static_pointer_cast(shared_from_this())); + auto fullConfig = Configuration{config, _cfg}; + return std::make_shared(modelStream, + fullConfig, + std::static_pointer_cast(shared_from_this())); } // ! [plugin:import_network] // ! [plugin:query_network] -InferenceEngine::QueryNetworkResult Plugin::QueryNetwork(const InferenceEngine::CNNNetwork& network, const ConfigMap& config) const { +InferenceEngine::QueryNetworkResult Plugin::QueryNetwork(const InferenceEngine::CNNNetwork& network, + const ConfigMap& config) const { OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "Plugin::QueryNetwork"); - Configuration fullConfig {config, _cfg, false}; + Configuration fullConfig{config, _cfg, false}; auto function = network.getFunction(); // 1. First of all we should store initial input operation set @@ -160,7 +173,8 @@ InferenceEngine::QueryNetworkResult Plugin::QueryNetwork(const InferenceEngine:: // 5. If some housekeeping nodes were not added - add them. if (InferenceEngine::details::contains(supported, node->get_friendly_name())) { for (auto&& inputNodeOutput : node->input_values()) { - if (ngraph::op::is_constant(inputNodeOutput.get_node()) || ngraph::op::is_parameter(inputNodeOutput.get_node())) { + if (ngraph::op::is_constant(inputNodeOutput.get_node()) || + ngraph::op::is_parameter(inputNodeOutput.get_node())) { supported.emplace(inputNodeOutput.get_node()->get_friendly_name()); } } @@ -175,11 +189,14 @@ InferenceEngine::QueryNetworkResult Plugin::QueryNetwork(const InferenceEngine:: // 6. Eliminate subgraphs that consist of housekeeping nodes only if (ngraph::op::is_constant(node) || ngraph::op::is_parameter(node)) { - if (!InferenceEngine::details::contains(supported, node->output(0).get_target_inputs().begin()->get_node()->get_friendly_name())) { + if (!InferenceEngine::details::contains( + supported, + node->output(0).get_target_inputs().begin()->get_node()->get_friendly_name())) { supported.erase(node->get_friendly_name()); } } else if (ngraph::op::is_output(node)) { - if (!InferenceEngine::details::contains(supported, node->input_values().begin()->get_node()->get_friendly_name())) { + if (!InferenceEngine::details::contains(supported, + node->input_values().begin()->get_node()->get_friendly_name())) { supported.erase(node->get_friendly_name()); } } @@ -204,27 +221,36 @@ void Plugin::AddExtension(const InferenceEngine::IExtensionPtr& /*extension*/) { // ! [plugin:set_config] void Plugin::SetConfig(const ConfigMap& config) { - _cfg = Configuration {config, _cfg}; + _cfg = Configuration{config, _cfg}; } // ! [plugin:set_config] // ! [plugin:get_config] -InferenceEngine::Parameter Plugin::GetConfig(const std::string& name, const std::map& /*options*/) const { +InferenceEngine::Parameter Plugin::GetConfig( + const std::string& name, + const std::map& /*options*/) const { return _cfg.Get(name); } // ! [plugin:get_config] // ! [plugin:get_metric] -InferenceEngine::Parameter Plugin::GetMetric(const std::string& name, const std::map& options) const { +InferenceEngine::Parameter Plugin::GetMetric(const std::string& name, + const std::map& options) const { if (METRIC_KEY(SUPPORTED_METRICS) == name) { - std::vector supportedMetrics = {METRIC_KEY(AVAILABLE_DEVICES), METRIC_KEY(SUPPORTED_METRICS), - METRIC_KEY(SUPPORTED_CONFIG_KEYS), METRIC_KEY(FULL_DEVICE_NAME), - METRIC_KEY(IMPORT_EXPORT_SUPPORT), METRIC_KEY(DEVICE_ARCHITECTURE), - METRIC_KEY(OPTIMIZATION_CAPABILITIES), METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS)}; + std::vector supportedMetrics = {METRIC_KEY(AVAILABLE_DEVICES), + METRIC_KEY(SUPPORTED_METRICS), + METRIC_KEY(SUPPORTED_CONFIG_KEYS), + METRIC_KEY(FULL_DEVICE_NAME), + METRIC_KEY(IMPORT_EXPORT_SUPPORT), + METRIC_KEY(DEVICE_ARCHITECTURE), + METRIC_KEY(OPTIMIZATION_CAPABILITIES), + METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS)}; IE_SET_METRIC_RETURN(SUPPORTED_METRICS, supportedMetrics); } else if (METRIC_KEY(SUPPORTED_CONFIG_KEYS) == name) { - std::vector configKeys = {CONFIG_KEY(DEVICE_ID), CONFIG_KEY(PERF_COUNT), TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS)}; - auto streamExecutorConfigKeys = InferenceEngine::IStreamsExecutor::Config {}.SupportedKeys(); + std::vector configKeys = {CONFIG_KEY(DEVICE_ID), + CONFIG_KEY(PERF_COUNT), + TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS)}; + auto streamExecutorConfigKeys = InferenceEngine::IStreamsExecutor::Config{}.SupportedKeys(); for (auto&& configKey : streamExecutorConfigKeys) { if (configKey != InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS) { configKeys.emplace_back(configKey); @@ -251,7 +277,7 @@ InferenceEngine::Parameter Plugin::GetMetric(const std::string& name, const std: } else if (METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS) == name) { // TODO: fill with actual values using uint = unsigned int; - IE_SET_METRIC_RETURN(RANGE_FOR_ASYNC_INFER_REQUESTS, std::make_tuple(uint {1}, uint {1}, uint {1})); + IE_SET_METRIC_RETURN(RANGE_FOR_ASYNC_INFER_REQUESTS, std::make_tuple(uint{1}, uint{1}, uint{1})); } else { IE_THROW(NotFound) << "Unsupported device metric: " << name; } diff --git a/docs/template_plugin/src/template_plugin.hpp b/docs/template_plugin/src/template_plugin.hpp index 71c37410ea7..c0c7625330c 100644 --- a/docs/template_plugin/src/template_plugin.hpp +++ b/docs/template_plugin/src/template_plugin.hpp @@ -23,12 +23,19 @@ public: void SetConfig(const std::map& config) override; InferenceEngine::QueryNetworkResult QueryNetwork(const InferenceEngine::CNNNetwork& network, const std::map& config) const override; - InferenceEngine::IExecutableNetworkInternal::Ptr LoadExeNetworkImpl(const InferenceEngine::CNNNetwork& network, - const std::map& config) override; + InferenceEngine::IExecutableNetworkInternal::Ptr LoadExeNetworkImpl( + const InferenceEngine::CNNNetwork& network, + const std::map& config) override; void AddExtension(const std::shared_ptr& extension) override; - InferenceEngine::Parameter GetConfig(const std::string& name, const std::map& options) const override; - InferenceEngine::Parameter GetMetric(const std::string& name, const std::map& options) const override; - InferenceEngine::IExecutableNetworkInternal::Ptr ImportNetwork(std::istream& model, const std::map& config) override; + InferenceEngine::Parameter GetConfig( + const std::string& name, + const std::map& options) const override; + InferenceEngine::Parameter GetMetric( + const std::string& name, + const std::map& options) const override; + InferenceEngine::IExecutableNetworkInternal::Ptr ImportNetwork( + std::istream& model, + const std::map& config) override; private: friend class ExecutableNetwork; diff --git a/docs/template_plugin/src/transformations/preprocessing/mean_image_or_value.cpp b/docs/template_plugin/src/transformations/preprocessing/mean_image_or_value.cpp index 39fd7942387..ef9a66cdea5 100644 --- a/docs/template_plugin/src/transformations/preprocessing/mean_image_or_value.cpp +++ b/docs/template_plugin/src/transformations/preprocessing/mean_image_or_value.cpp @@ -28,7 +28,10 @@ ngraph::pass::AddMeanSubtract::AddMeanSubtract(const MeanMap& inputInfoMap) { } auto mean_const = it->second; - NGRAPH_CHECK(mean_const->get_element_type() == ngraph::element::f32, "Mean for ", param->get_friendly_name(), " must have f32 type"); + NGRAPH_CHECK(mean_const->get_element_type() == ngraph::element::f32, + "Mean for ", + param->get_friendly_name(), + " must have f32 type"); auto copy_param = param->clone_with_new_inputs({}); auto sub = std::make_shared(copy_param, mean_const); diff --git a/docs/template_plugin/src/transformations/preprocessing/preprocessing.cpp b/docs/template_plugin/src/transformations/preprocessing/preprocessing.cpp index 1d391d8f49a..f9557b4277b 100644 --- a/docs/template_plugin/src/transformations/preprocessing/preprocessing.cpp +++ b/docs/template_plugin/src/transformations/preprocessing/preprocessing.cpp @@ -12,7 +12,8 @@ NGRAPH_RTTI_DEFINITION(ngraph::pass::AddPreprocessing, "AddPreprocessing", 0); -ngraph::pass::AddPreprocessing::AddPreprocessing(const InferenceEngine::InputsDataMap& inputInfoMap): m_inputInfoMap(inputInfoMap) {} +ngraph::pass::AddPreprocessing::AddPreprocessing(const InferenceEngine::InputsDataMap& inputInfoMap) + : m_inputInfoMap(inputInfoMap) {} bool ngraph::pass::AddPreprocessing::run_on_function(std::shared_ptr f) { ngraph::pass::AddMeanSubtract::MeanMap meanMap; @@ -39,10 +40,12 @@ bool ngraph::pass::AddPreprocessing::run_on_function(std::shared_ptrmeanData; - NGRAPH_CHECK(meanImage->getTensorDesc().getPrecision() == InferenceEngine::Precision::FP32, - "Only InferenceEngine::Precision::FP32 precision is supported for PreProcessChannel::meanData"); + NGRAPH_CHECK( + meanImage->getTensorDesc().getPrecision() == InferenceEngine::Precision::FP32, + "Only InferenceEngine::Precision::FP32 precision is supported for PreProcessChannel::meanData"); } else { - NGRAPH_CHECK(meanImage->getTensorDesc() == pInfo[c]->meanData->getTensorDesc(), "TensorDesc for PreProcessChannel::meanData must be equal"); + NGRAPH_CHECK(meanImage->getTensorDesc() == pInfo[c]->meanData->getTensorDesc(), + "TensorDesc for PreProcessChannel::meanData must be equal"); } } } @@ -52,7 +55,8 @@ bool ngraph::pass::AddPreprocessing::run_on_function(std::shared_ptrsecond; - NGRAPH_CHECK(scale_const->get_element_type() == ngraph::element::f32, "Scale for ", param->get_friendly_name(), " must have f32 type"); + NGRAPH_CHECK(scale_const->get_element_type() == ngraph::element::f32, + "Scale for ", + param->get_friendly_name(), + " must have f32 type"); auto copy_param = param->clone_with_new_inputs({}); auto div = std::make_shared(copy_param, it->second); diff --git a/docs/template_plugin/src/transformations/template_function_transformation.cpp b/docs/template_plugin/src/transformations/template_function_transformation.cpp index 2470f1d23d6..4199b81eac3 100644 --- a/docs/template_plugin/src/transformations/template_function_transformation.cpp +++ b/docs/template_plugin/src/transformations/template_function_transformation.cpp @@ -24,7 +24,8 @@ bool pass::MyFunctionTransformation::run_on_function(std::shared_ptr input = node->input(0); Output output = node->output(0); - if (input.get_partial_shape().is_static() && output.get_partial_shape().is_static() && output.get_target_inputs().size() == 1) { + if (input.get_partial_shape().is_static() && output.get_partial_shape().is_static() && + output.get_target_inputs().size() == 1) { nodes.push_back(node); } } @@ -32,7 +33,8 @@ bool pass::MyFunctionTransformation::run_on_function(std::shared_ptrget_type_info().name << std::endl << "Name: " << node->get_friendly_name() << std::endl; + std::cout << "Type: " << node->get_type_info().name << std::endl + << "Name: " << node->get_friendly_name() << std::endl; } // Return false because we didn't change nGraph Function diff --git a/docs/template_plugin/src/transformations/template_pattern_transformation.cpp b/docs/template_plugin/src/transformations/template_pattern_transformation.cpp index 010db33d465..60de44c5f80 100644 --- a/docs/template_plugin/src/transformations/template_pattern_transformation.cpp +++ b/docs/template_plugin/src/transformations/template_pattern_transformation.cpp @@ -33,7 +33,9 @@ ngraph::pass::DecomposeDivideMatcher::DecomposeDivideMatcher() { } // Decompose Divide into Multiply with Power operations - auto pow = std::make_shared(div->input_value(1), opset3::Constant::create(div->get_input_element_type(1), Shape {1}, {-1})); + auto pow = std::make_shared( + div->input_value(1), + opset3::Constant::create(div->get_input_element_type(1), Shape{1}, {-1})); auto mul = std::make_shared(div->input_value(0), pow); @@ -70,7 +72,8 @@ ngraph::pass::ReluReluFusionMatcher::ReluReluFusionMatcher() { auto& node_to_output = m.get_pattern_value_map(); // Create new Relu operation and add register it for additional execution - auto new_relu = register_new_node(node_to_output.at(m_relu1).get_node_shared_ptr()->input_value(0)); + auto new_relu = + register_new_node(node_to_output.at(m_relu1).get_node_shared_ptr()->input_value(0)); // Copy runtime info attributes to newly created operation ngraph::copy_runtime_info(m.get_matched_nodes(), new_relu); From 0834ae2e6d59dac549924fc6f0df99ddf1734085 Mon Sep 17 00:00:00 2001 From: Elizaveta Lobanova Date: Wed, 11 Aug 2021 10:10:33 +0300 Subject: [PATCH 04/19] [GNA] Support bias and FQ in SwapInputMatMul transformation (#6996) * [GNA] Support bias and FQ in SwapInputMatMul transformation * Updated opset for transformation and removed debug info --- .../src/gna_plugin/gna_plugin.cpp | 2 + .../transformations/swap_input_matmul_gna.cpp | 212 +++++++++---- .../transformations/swap_input_matmul_gna.hpp | 14 +- .../transformations/gna_swap_input_matmul.cpp | 283 +++++++++--------- 4 files changed, 307 insertions(+), 204 deletions(-) diff --git a/inference-engine/src/gna_plugin/gna_plugin.cpp b/inference-engine/src/gna_plugin/gna_plugin.cpp index 5e069154763..55ebcc460dd 100644 --- a/inference-engine/src/gna_plugin/gna_plugin.cpp +++ b/inference-engine/src/gna_plugin/gna_plugin.cpp @@ -704,6 +704,8 @@ void GNAPlugin::LoadNetwork(CNNNetwork & _network) { manager.register_pass(); manager.register_pass(); manager.register_pass(); + manager.register_pass(); + manager.register_pass(); manager.register_pass(); manager.register_pass(); manager.register_pass(); diff --git a/inference-engine/src/gna_plugin/transformations/swap_input_matmul_gna.cpp b/inference-engine/src/gna_plugin/transformations/swap_input_matmul_gna.cpp index fdfcfc254d4..2db8e10620c 100644 --- a/inference-engine/src/gna_plugin/transformations/swap_input_matmul_gna.cpp +++ b/inference-engine/src/gna_plugin/transformations/swap_input_matmul_gna.cpp @@ -9,7 +9,7 @@ #include #include -#include +#include #include #include #include @@ -20,75 +20,163 @@ using namespace GNAPluginNS; NGRAPH_RTTI_DEFINITION(SwapInputMatMul, "SwapInputMatMul", 0); +NGRAPH_RTTI_DEFINITION(SwapInputMatMulWithBias, "SwapInputMatMulWithBias", 0); +NGRAPH_RTTI_DEFINITION(SwapInputMatMulWithFq, "SwapInputMatMulWithFq", 0); + +static void SwapAndTransposeInputs(std::shared_ptr matmul_node, + std::shared_ptr add, + std::shared_ptr bias, + std::shared_ptr fq) { + auto create_transpose = + [](ngraph::Output node, const std::string& transpose_name) -> std::shared_ptr { + ngraph::Shape output_shape = node.get_node_shared_ptr()->get_shape(); + + std::vector transpose_order(output_shape.size()); + std::iota(transpose_order.begin(), transpose_order.end(), 0); + std::swap(*(transpose_order.end() - 1), *(transpose_order.end() - 2)); + + auto transpose = std::make_shared( + node, ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape {transpose_order.size()}, transpose_order)); + transpose->set_friendly_name(transpose_name); + return transpose; + }; + + ngraph::NodeVector new_ops; + + gnalog() << "Swap and transpose inputs for " << matmul_node->get_friendly_name() << "\n"; + std::shared_ptr new_matmul = std::make_shared( + matmul_node->input(1).get_source_output(), matmul_node->input(0).get_source_output(), + !matmul_node->get_transpose_b(), !matmul_node->get_transpose_a()); + new_matmul->set_friendly_name(matmul_node->get_friendly_name() + "/swap_inputs"); + new_ops.push_back(new_matmul); + + std::shared_ptr old_root_node = matmul_node; + if (bias != nullptr) { + // output of MatMul will be transposed comparing with original one, so the bias should be transposed too + if (bias->get_output_shape(0).size() > 1) { + bias = create_transpose(bias, bias->get_friendly_name() + "/transpose"); + new_ops.push_back(bias); + } + + new_matmul = std::make_shared(new_matmul, bias); + old_root_node = add; + new_ops.push_back(new_matmul); + } + + if (fq != nullptr) { + new_matmul = fq->clone_with_new_inputs({new_matmul, fq->input_value(1), fq->input_value(2), + fq->input_value(3), fq->input_value(4)}); + old_root_node = fq; + new_ops.push_back(new_matmul); + } + + auto output = create_transpose(new_matmul, matmul_node->get_friendly_name()); + new_ops.push_back(output); + + ngraph::copy_runtime_info(matmul_node, new_ops); + ngraph::replace_node(old_root_node, output); +} SwapInputMatMul::SwapInputMatMul() { MATCHER_SCOPE(SwapInputMatMul); - auto constant = ngraph::pattern::wrap_type({}, ngraph::pattern::rank_equals(2)); - auto fake_quantize = ngraph::pattern::wrap_type({constant, - ngraph::pattern::wrap_type(), - ngraph::pattern::wrap_type(), - ngraph::pattern::wrap_type(), - ngraph::pattern::wrap_type()}); + auto constant = ngraph::pattern::wrap_type({}, [](const ngraph::Output& node) { + auto shape = node.get_node_shared_ptr()->get_output_shape(0); + if (shape.size() != 2 || shape[0] < 8 || ((shape[0] % 8 != 0 || shape[1] % 8 != 0))) { + return false; + } + return true; + }); + auto fake_quantize = ngraph::pattern::wrap_type({constant, + ngraph::pattern::wrap_type(), + ngraph::pattern::wrap_type(), + ngraph::pattern::wrap_type(), + ngraph::pattern::wrap_type()}); auto matmul_input = std::make_shared(ngraph::OutputVector{constant, fake_quantize}); - auto matmul = ngraph::pattern::wrap_type({matmul_input, ngraph::pattern::any_input()}, + auto matmul = ngraph::pattern::wrap_type({matmul_input, ngraph::pattern::any_input()}, ngraph::pattern::has_static_shape()); - ngraph::matcher_pass_callback callback = [this](ngraph::pattern::Matcher& m) { - auto matmul = std::dynamic_pointer_cast(m.get_match_root()); - if (!matmul) { - return false; - } - - auto input_a = matmul->input(0).get_source_output(); - auto input_b = matmul->input(1).get_source_output(); - - ngraph::Shape shape_input_a = input_a.get_shape(); - - auto create_transpose = [this](ngraph::Output node, const std::string& transpose_name) -> std::shared_ptr { - ngraph::Shape output_shape = node.get_node_shared_ptr()->get_shape(); - - std::vector transpose_order(output_shape.size()); - std::iota(transpose_order.begin(), transpose_order.end(), 0); - std::swap(*(transpose_order.end() - 1), *(transpose_order.end() - 2)); - - auto transpose = register_new_node( - node, ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape {transpose_order.size()}, transpose_order)); - transpose->set_friendly_name(transpose_name); - return transpose; - }; - - ngraph::NodeVector new_ops; - - if (shape_input_a[0] < 8 || ((shape_input_a[0] % 8 != 0 || shape_input_a[1] % 8 != 0))) { - return false; - } - - gnalog() << "Swap and transpose inputs for " << matmul->get_friendly_name() << "\n"; - auto new_matmul = std::make_shared(input_b, input_a, !matmul->get_transpose_b(), !matmul->get_transpose_a()); - new_matmul->set_friendly_name(matmul->get_friendly_name() + "/swap_inputs"); - new_ops.push_back(new_matmul); - - if (!matmul->get_output_target_inputs(0).empty()) { - auto matmul_out = matmul->get_output_target_inputs(0).begin()->get_node()->shared_from_this(); - if (std::dynamic_pointer_cast(matmul_out) != nullptr) { - ngraph::copy_runtime_info(matmul, new_ops); - ngraph::replace_node(matmul, new_matmul); - auto consumers = matmul_out->output(0).get_target_inputs(); - auto traspose_output = create_transpose(matmul_out, matmul->get_friendly_name()); - for (auto input : consumers) { - input.replace_source_output(traspose_output); - } - return true; - } - } - - auto traspose_output = create_transpose(new_matmul, matmul->get_friendly_name()); - new_ops.push_back(traspose_output); - - ngraph::copy_runtime_info(matmul, new_ops); - ngraph::replace_node(matmul, traspose_output); + ngraph::matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) { + const auto& pattern_map = m.get_pattern_value_map(); + auto matmul_node = std::dynamic_pointer_cast(pattern_map.at(matmul).get_node_shared_ptr()); + IE_ASSERT(matmul_node != nullptr); + SwapAndTransposeInputs(matmul_node, nullptr, nullptr, nullptr); return true; }; auto m = std::make_shared(matmul, matcher_name); this->register_matcher(m, callback); +} + +SwapInputMatMulWithBias::SwapInputMatMulWithBias() { + MATCHER_SCOPE(SwapInputMatMulWithBias); + auto constant = ngraph::pattern::wrap_type({}, [](const ngraph::Output& node) { + auto shape = node.get_node_shared_ptr()->get_output_shape(0); + if (shape.size() != 2 || shape[0] < 8 || ((shape[0] % 8 != 0 || shape[1] % 8 != 0))) { + return false; + } + return true; + }); + auto fake_quantize = ngraph::pattern::wrap_type({constant, + ngraph::pattern::wrap_type(), + ngraph::pattern::wrap_type(), + ngraph::pattern::wrap_type(), + ngraph::pattern::wrap_type()}); + auto matmul_input = std::make_shared(ngraph::OutputVector{constant, fake_quantize}); + auto matmul = ngraph::pattern::wrap_type({matmul_input, ngraph::pattern::any_input()}, + ngraph::pattern::has_static_shape()); + auto bias = ngraph::pattern::wrap_type(); + auto add = ngraph::pattern::wrap_type({matmul, bias}); + + ngraph::matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) { + const auto& pattern_map = m.get_pattern_value_map(); + auto matmul_node = std::dynamic_pointer_cast(pattern_map.at(matmul).get_node_shared_ptr()); + IE_ASSERT(matmul_node != nullptr); + SwapAndTransposeInputs(matmul_node, pattern_map.at(add).get_node_shared_ptr(), + pattern_map.at(bias).get_node_shared_ptr(), nullptr); + return true; + }; + + auto m = std::make_shared(add, matcher_name); + this->register_matcher(m, callback); +} + +SwapInputMatMulWithFq::SwapInputMatMulWithFq() { + MATCHER_SCOPE(SwapInputMatMulWithFq); + auto constant = ngraph::pattern::wrap_type({}, [](const ngraph::Output& node) { + auto shape = node.get_node_shared_ptr()->get_output_shape(0); + if (shape.size() != 2 || shape[0] < 8 || ((shape[0] % 8 != 0 || shape[1] % 8 != 0))) { + return false; + } + return true; + }); + auto fake_quantize = ngraph::pattern::wrap_type({constant, + ngraph::pattern::wrap_type(), + ngraph::pattern::wrap_type(), + ngraph::pattern::wrap_type(), + ngraph::pattern::wrap_type()}); + auto matmul_input = std::make_shared(ngraph::OutputVector{constant, fake_quantize}); + auto matmul = ngraph::pattern::wrap_type({matmul_input, ngraph::pattern::any_input()}, + ngraph::pattern::has_static_shape()); + auto bias = ngraph::pattern::wrap_type(); + auto add = ngraph::pattern::wrap_type({matmul, bias}); + auto matmul_out = std::make_shared(ngraph::OutputVector{add, matmul}); + auto out_fq = ngraph::pattern::wrap_type({matmul_out, + ngraph::pattern::wrap_type(), + ngraph::pattern::wrap_type(), + ngraph::pattern::wrap_type(), + ngraph::pattern::wrap_type()}); + + ngraph::matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) { + const auto& pattern_map = m.get_pattern_value_map(); + auto matmul_node = std::dynamic_pointer_cast(pattern_map.at(matmul).get_node_shared_ptr()); + IE_ASSERT(matmul_node != nullptr); + auto add_it = pattern_map.find(add); + auto add_node = (add_it == std::end(pattern_map) ? nullptr : add_it->second.get_node_shared_ptr()); + auto bias_it = pattern_map.find(bias); + auto bias_node = (bias_it == std::end(pattern_map) ? nullptr : bias_it->second.get_node_shared_ptr()); + SwapAndTransposeInputs(matmul_node, add_node, bias_node, pattern_map.at(out_fq).get_node_shared_ptr()); + return true; + }; + + auto m = std::make_shared(out_fq, matcher_name); + this->register_matcher(m, callback); } \ No newline at end of file diff --git a/inference-engine/src/gna_plugin/transformations/swap_input_matmul_gna.hpp b/inference-engine/src/gna_plugin/transformations/swap_input_matmul_gna.hpp index 66816868915..c9604f8b7c2 100644 --- a/inference-engine/src/gna_plugin/transformations/swap_input_matmul_gna.hpp +++ b/inference-engine/src/gna_plugin/transformations/swap_input_matmul_gna.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2020 Intel Corporation +// Copyright (C) 2020-2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // @@ -16,4 +16,16 @@ public: NGRAPH_RTTI_DECLARATION; SwapInputMatMul(); }; + +class SwapInputMatMulWithBias: public ngraph::pass::MatcherPass { +public: + NGRAPH_RTTI_DECLARATION; + SwapInputMatMulWithBias(); +}; + +class SwapInputMatMulWithFq: public ngraph::pass::MatcherPass { +public: + NGRAPH_RTTI_DECLARATION; + SwapInputMatMulWithFq(); +}; } // namespace GNAPluginNS \ No newline at end of file diff --git a/inference-engine/tests/unit/gna/ngraph/transformations/gna_swap_input_matmul.cpp b/inference-engine/tests/unit/gna/ngraph/transformations/gna_swap_input_matmul.cpp index 2a80bb9f847..184f0fac937 100644 --- a/inference-engine/tests/unit/gna/ngraph/transformations/gna_swap_input_matmul.cpp +++ b/inference-engine/tests/unit/gna/ngraph/transformations/gna_swap_input_matmul.cpp @@ -8,164 +8,165 @@ #include "common_test_utils/ngraph_test_utils.hpp" #include -#include +#include #include #include namespace testing { -TEST(TransformationTests, SwapInputMatMulTestValidConstShape) { - std::shared_ptr func(nullptr), reference_func(nullptr); - const ngraph::Shape data_shape{8, 8}; +static std::shared_ptr CreateMatMulFunction(const ngraph::Shape& input1_shape, + const ngraph::Shape& input2_shape, + const ngraph::Shape& bias_shape, + bool withBias, + bool withWeightsFq, + bool withOutFq, + bool swappedInputs) { + auto input_params = std::make_shared(ngraph::element::i64, input2_shape); - { - auto input_params = std::make_shared(ngraph::element::i64, data_shape); - - auto constant = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{1, 8}, {1}); - auto matmul_operation = std::make_shared(constant, input_params); - - auto result = std::make_shared(matmul_operation); - func = std::make_shared(ngraph::ResultVector{result}, - ngraph::ParameterVector{input_params}); - - reference_func = ngraph::clone_function(*func); - - ngraph::pass::Manager m; - m.register_pass(); - m.register_pass(); - m.run_passes(func); - ASSERT_NO_THROW(check_rt_info(func)); + auto constant = ngraph::opset8::Constant::create(ngraph::element::i64, input1_shape, {1}); + std::shared_ptr const_input = constant; + if (withWeightsFq) { + auto input_low = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1}); + auto input_high = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {20}); + auto output_low = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {0}); + auto output_high = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {10}); + const_input = std::make_shared(const_input, input_low, input_high, + output_low, output_high, 11); } + auto matmul = swappedInputs ? std::make_shared(input_params, const_input, true, true) : + std::make_shared(const_input, input_params); - const FunctionsComparator func_comparator = FunctionsComparator::with_default().enable(FunctionsComparator::ATTRIBUTES); - const FunctionsComparator::Result result = func_comparator(func, reference_func); - ASSERT_TRUE(result.valid); -} - -TEST(TransformationTests, SwapInputMatMulTest) { - std::shared_ptr func(nullptr), reference_func(nullptr); - const ngraph::Shape data_shape{8, 8}; - - { - auto input_params = std::make_shared(ngraph::element::i64, data_shape); - - auto constant = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{16, 8}, {1}); - auto matmul_operation = std::make_shared(constant, input_params); - - auto result = std::make_shared(matmul_operation); - func = std::make_shared(ngraph::ResultVector{result}, - ngraph::ParameterVector{input_params}); - - ngraph::pass::Manager m; - m.register_pass(); - m.register_pass(); - m.run_passes(func); - ASSERT_NO_THROW(check_rt_info(func)); - } - - { - auto input_params = std::make_shared(ngraph::element::i64, data_shape); - - auto constant = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{16, 8}, {1}); - auto matmul_operation = std::make_shared(input_params, constant, 1, 1); - - auto transpose_order = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, - std::vector{1, 0}); - auto transpose_operation = std::make_shared(matmul_operation, transpose_order); - - auto result = std::make_shared(transpose_operation); - reference_func = std::make_shared(ngraph::ResultVector{result}, - ngraph::ParameterVector{input_params}); - } - - const FunctionsComparator func_comparator = FunctionsComparator::with_default().enable(FunctionsComparator::ATTRIBUTES); - const FunctionsComparator::Result result = func_comparator(func, reference_func); - ASSERT_TRUE(result.valid); -} - -TEST(TransformationTests, SwapInputMatMulTestFakeQuantize) { - std::shared_ptr func(nullptr), reference_func(nullptr); - const ngraph::Shape data_shape{8, 8}; - - { - auto input_params = std::make_shared(ngraph::element::i64, data_shape); - - auto constant = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{16, 8}, {1}); - - auto input_low = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1}); - auto input_high = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {20}); - auto output_low = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {0}); - auto output_high = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {10}); - auto fake_quantize_op = std::make_shared(constant, input_low, - input_high, output_low, - output_high, 11); - auto matmul_operation = std::make_shared(fake_quantize_op, input_params); - - auto result = std::make_shared(matmul_operation); - func = std::make_shared(ngraph::ResultVector{result}, - ngraph::ParameterVector{input_params}); - - ngraph::pass::Manager m; - m.register_pass(); - m.register_pass(); - m.run_passes(func); - ASSERT_NO_THROW(check_rt_info(func)); - } - - { - auto input_params = std::make_shared(ngraph::element::i64, data_shape); - - auto constant = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{16, 8}, {1}); - - auto input_low = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1}); - auto input_high = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {20}); - auto output_low = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {0}); - auto output_high = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {10}); - auto fake_quantize_op = std::make_shared(constant, input_low, - input_high, output_low, - output_high, 11); - auto matmul_operation = std::make_shared(input_params, fake_quantize_op, 1 , 1); - - auto transpose_order = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, + std::shared_ptr final_node = matmul; + if (withBias) { + auto bias = ngraph::opset8::Constant::create(ngraph::element::i64, bias_shape, {1}); + std::shared_ptr bias_node = bias; + if (swappedInputs && bias_shape.size() > 1) { + auto transpose_order = ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{2}, std::vector{1, 0}); - auto transpose_operation = std::make_shared(matmul_operation, transpose_order); - - auto result = std::make_shared(transpose_operation); - reference_func = std::make_shared(ngraph::ResultVector{result}, - ngraph::ParameterVector{input_params}); + bias_node = std::make_shared(bias_node, transpose_order); + } + final_node = std::make_shared(matmul, bias_node); } + if (withOutFq) { + auto input_low = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1}); + auto input_high = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {20}); + auto output_low = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {0}); + auto output_high = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {10}); + final_node = std::make_shared(final_node, input_low, input_high, + output_low, output_high, 11); + } + + if (swappedInputs) { + auto transpose_order = ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{2}, + std::vector{1, 0}); + final_node = std::make_shared(final_node, transpose_order); + } + + auto result = std::make_shared(final_node); + return std::make_shared(ngraph::ResultVector{result}, + ngraph::ParameterVector{input_params}); +} + +static void Execute(std::shared_ptr function, std::shared_ptr reference_function) { + ngraph::pass::Manager m; + m.register_pass(); + m.register_pass(); + m.register_pass(); + m.register_pass(); + m.run_passes(function); + ASSERT_NO_THROW(check_rt_info(function)); + const FunctionsComparator func_comparator = FunctionsComparator::with_default().enable(FunctionsComparator::ATTRIBUTES); - const FunctionsComparator::Result result = func_comparator(func, reference_func); + const FunctionsComparator::Result result = func_comparator(function, reference_function); ASSERT_TRUE(result.valid); } -TEST(TransformationTests, SwapInputMatMulTestRank1) { - std::shared_ptr func(nullptr), reference_func(nullptr); - const ngraph::Shape data_shape{8, 8}; +typedef std::tuple< + std::vector, // constant input shape, non-const input shape, bias shape + bool, // with bias + bool, // with weights FakeQuantize + bool // with output FakeQuantize +> SwapInputMatmulParams; - { - auto input_params = std::make_shared(ngraph::element::i64, data_shape); +static std::string getTestCaseName(testing::TestParamInfo obj) { + std::vector shapes; + bool withBias, withWeightsFq, withOutFq; + std::tie(shapes, withBias, withWeightsFq, withOutFq) = obj.param; - auto constant = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{8}, {1}); - auto matmul_operation = std::make_shared(constant, input_params); - - auto result = std::make_shared(matmul_operation); - func = std::make_shared(ngraph::ResultVector{result}, - ngraph::ParameterVector{input_params}); - - reference_func = ngraph::clone_function(*func); - - ngraph::pass::Manager m; - m.register_pass(); - m.register_pass(); - m.run_passes(func); - ASSERT_NO_THROW(check_rt_info(func)); - } - - const FunctionsComparator func_comparator = FunctionsComparator::with_default().enable(FunctionsComparator::ATTRIBUTES); - const FunctionsComparator::Result result = func_comparator(func, reference_func); - ASSERT_TRUE(result.valid); + std::ostringstream result; + result << "IS1=" << shapes[0] << "_"; + result << "IS2=" << shapes[1] << "_"; + result << "BS=" << shapes[2] << "_"; + result << "bias=" << withBias << "_"; + result << "wFQ=" << withWeightsFq << "_"; + result << "oFQ=" << withOutFq; + return result.str(); } +class SwapInputMatmul : public CommonTestUtils::TestsCommon, + public ::testing::WithParamInterface { +public: + void SetUp() override { + std::vector shapes; + bool withBias, withWeightsFq, withOutFq; + std::tie(shapes, withBias, withWeightsFq, withOutFq) = this->GetParam(); + + function = CreateMatMulFunction(shapes[0], shapes[1], shapes[2], withBias, withWeightsFq, withOutFq, false); + reference_function = CreateMatMulFunction(shapes[0], shapes[1], shapes[2], withBias, withWeightsFq, + withOutFq, true); + } +public: + std::shared_ptr function, reference_function; +}; + +class SwapInputMatmulNotApplied : public CommonTestUtils::TestsCommon, + public ::testing::WithParamInterface { +public: + void SetUp() override { + std::vector shapes; + bool withBias, withWeightsFq, withOutFq; + std::tie(shapes, withBias, withWeightsFq, withOutFq) = this->GetParam(); + + function = CreateMatMulFunction(shapes[0], shapes[1], shapes[2], withBias, withWeightsFq, withOutFq, false); + reference_function = ngraph::clone_function(*function); + } +public: + std::shared_ptr function, reference_function; +}; + +TEST_P(SwapInputMatmul, CompareFunctions) { + Execute(function, reference_function); +} + +TEST_P(SwapInputMatmulNotApplied, CompareFunctions) { + Execute(function, reference_function); +} + +const std::vector> input_shapes_applied = { + {{16, 8}, {8, 8}, {16, 8}}, + {{16, 8}, {8, 8}, {1}}, +}; + +const std::vector> input_shapes_not_applied = { + {{1, 8}, {8, 8}, {1, 8}}, + {{8}, {8, 8}, {8}} +}; + +INSTANTIATE_TEST_SUITE_P(smoke_swap_input_matmul, SwapInputMatmul, + ::testing::Combine( + ::testing::ValuesIn(input_shapes_applied), + ::testing::ValuesIn(std::vector{false, true}), + ::testing::ValuesIn(std::vector{false, true}), + ::testing::ValuesIn(std::vector{false, true})), + getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_swap_input_matmul, SwapInputMatmulNotApplied, + ::testing::Combine( + ::testing::ValuesIn(input_shapes_not_applied), + ::testing::ValuesIn(std::vector{false, true}), + ::testing::ValuesIn(std::vector{false, true}), + ::testing::ValuesIn(std::vector{false, true})), + getTestCaseName); + } // namespace testing From 9d9a24c914972eb705a6cf2856d0fa991904d10e Mon Sep 17 00:00:00 2001 From: Vladislav Golubev Date: Wed, 11 Aug 2021 10:10:56 +0300 Subject: [PATCH 05/19] [LPT] Some LP Transformations improvements (#6434) * [LPT] LayerTransformation::canBeTransformed: replaced legacy code * [LPT] NetworkHelper::moveDequantizationAfter refactoring * [LPT] ReshapeTransformation improvement * [LPT] Squeeze/UnsqueezeTransformation improvement --- .../src/network_helper.cpp | 16 ++++++++-------- .../src/squeeze.cpp | 7 ++++++- .../src/unsqueeze.cpp | 9 ++++++++- 3 files changed, 22 insertions(+), 10 deletions(-) diff --git a/inference-engine/src/low_precision_transformations/src/network_helper.cpp b/inference-engine/src/low_precision_transformations/src/network_helper.cpp index 879bd24dc04..47b624236b0 100644 --- a/inference-engine/src/low_precision_transformations/src/network_helper.cpp +++ b/inference-engine/src/low_precision_transformations/src/network_helper.cpp @@ -1560,14 +1560,14 @@ NetworkHelper::InsertDequantizationResult NetworkHelper::moveDequantizationAfter if (updatePrecision) { op->set_overridden_output_type(newOperation->get_input_element_type(0)); } else if (dequantization.multiply) { - op->set_overridden_output_type(dequantization.multiply->get_input_element_type(1)); + op->set_overridden_output_type(dequantization.multiplyConstant->get_element_type()); } else if (dequantization.subtract) { - op->set_overridden_output_type(dequantization.subtract->get_input_element_type(1)); + op->set_overridden_output_type(dequantization.subtractConstant->get_element_type()); } std::dynamic_pointer_cast(newOperation)->validate_and_infer_types(); } - const element::Type deqPrecision = dequantization.multiply->get_input_node_shared_ptr(1)->get_output_element_type(0); + const element::Type deqPrecision = dequantization.multiplyConstant->get_element_type(); const bool shouldConvert = (newOperation->get_output_element_type(0) != deqPrecision); auto parent = newOperation; @@ -1582,11 +1582,11 @@ NetworkHelper::InsertDequantizationResult NetworkHelper::moveDequantizationAfter if (moveSubtract && (dequantization.subtract != nullptr)) { if (dequantization.subtractConvert == nullptr) { const element::Type parentPrecision = parent->get_output_element_type(0); - if (parentPrecision.bitwidth() < dequantization.subtractConstant->output(0).get_element_type().bitwidth()) { + if (parentPrecision.bitwidth() < dequantization.subtractConstant->get_element_type().bitwidth()) { THROW_IE_LPT_EXCEPTION(*parent) << "unexpected precisions: on data " << parent->get_friendly_name() << ":" << parentPrecision << ", subtract dequantization constant " << dequantization.subtractConstant->get_friendly_name() << ":" << - dequantization.subtractConstant->output(0).get_element_type(); + dequantization.subtractConstant->get_element_type(); } parent = std::make_shared>( @@ -1604,12 +1604,12 @@ NetworkHelper::InsertDequantizationResult NetworkHelper::moveDequantizationAfter } if (dequantization.multiply != nullptr) { - auto multiplyConstant = dequantization.multiply->get_input_node_shared_ptr(1); + auto multiplyConstant = dequantization.multiplyConstant; const element::Type parentPrecision = parent->get_output_element_type(0); - if (parentPrecision.bitwidth() < multiplyConstant->output(0).get_element_type().bitwidth()) { + if (parentPrecision.bitwidth() < multiplyConstant->get_element_type().bitwidth()) { THROW_IE_LPT_EXCEPTION(*parent) << "unexpected precisions: on data " << parent->get_friendly_name() << ":" << parentPrecision << - ", multiply dequantization constant " << multiplyConstant->get_friendly_name() << ":" << multiplyConstant->output(0).get_element_type(); + ", multiply dequantization constant " << multiplyConstant->get_friendly_name() << ":" << multiplyConstant->get_element_type(); } parent = std::make_shared>( diff --git a/inference-engine/src/low_precision_transformations/src/squeeze.cpp b/inference-engine/src/low_precision_transformations/src/squeeze.cpp index 8ecad0adea4..919364d1bbf 100644 --- a/inference-engine/src/low_precision_transformations/src/squeeze.cpp +++ b/inference-engine/src/low_precision_transformations/src/squeeze.cpp @@ -42,9 +42,14 @@ bool SqueezeTransformation::transform(TransformationContext& context, ngraph::pa const std::shared_ptr& dequantizationOpConstant, const ngraph::PartialShape& inputShape) { const size_t inputRankValue = inputShape.rank().get_length(); - if (dequantizationOpConstant->get_shape().size() == inputRankValue) { + const auto constantShape = dequantizationOpConstant->get_shape(); + if (shape_size(constantShape) == 1ul) { + return NetworkHelper::toScalar(dequantizationOpConstant); + } + if (constantShape.size() == inputRankValue) { return as_type_ptr(fold(dequantizationOpConstant, squeeze->get_input_node_shared_ptr(1))); } + return dequantizationOpConstant; }; diff --git a/inference-engine/src/low_precision_transformations/src/unsqueeze.cpp b/inference-engine/src/low_precision_transformations/src/unsqueeze.cpp index b03046e2253..d2f0636c832 100644 --- a/inference-engine/src/low_precision_transformations/src/unsqueeze.cpp +++ b/inference-engine/src/low_precision_transformations/src/unsqueeze.cpp @@ -42,12 +42,19 @@ bool UnsqueezeTransformation::transform(TransformationContext& context, ngraph:: const std::shared_ptr& dequantizationOpConstant, const ngraph::PartialShape& inputShape) { const size_t inputRankValue = inputShape.rank().get_length(); - if (dequantizationOpConstant->get_shape().size() == inputRankValue) { + const auto constantShape = dequantizationOpConstant->get_shape(); + if (shape_size(constantShape) == 1ul) { + return NetworkHelper::toScalar(dequantizationOpConstant); + } + + if (constantShape.size() == inputRankValue) { return as_type_ptr(fold(dequantizationOpConstant, unsqueeze->get_input_node_shared_ptr(1))); } + return dequantizationOpConstant; }; + const std::shared_ptr unsqueeze = NetworkHelper::separateInStandaloneBranch(m.get_match_root()); FakeQuantizeDequantization dequantization = NetworkHelper::getDequantization(unsqueeze); From 5292de53380199de45fd6a0fe9630b072e2de8e7 Mon Sep 17 00:00:00 2001 From: Daria Ilina Date: Wed, 11 Aug 2021 11:33:52 +0300 Subject: [PATCH 06/19] Reorganize work with openvino/thirdparty (Copied from PR6744) (#6880) * Copied PR6744 * Added CMakeLists.txt into gflags * Return gflags into .gitmodules * Added CMakeLists.txt into gtest * fix: remove extra gflags and gtests from .gitmodules * Change syntax in set gtest_targets * Make gtest/CMakeLists.txt as in PR6744 for pre-commit experiment * Update gtest/CMakeLists.txt * Change version of gtest repo * Return gtest info into .gitmodules * Update gflags version to currently used --- .gitmodules | 8 +-- inference-engine/samples/CMakeLists.txt | 14 +---- tests/stress_tests/common/CMakeLists.txt | 30 ++++------ .../src/timetests_helper/CMakeLists.txt | 18 +----- thirdparty/CMakeLists.txt | 60 +------------------ thirdparty/gflags/CMakeLists.txt | 13 ++++ thirdparty/{ => gflags}/gflags | 0 thirdparty/gtest/CMakeLists.txt | 55 +++++++++++++++++ thirdparty/{ => gtest}/gtest | 0 9 files changed, 91 insertions(+), 107 deletions(-) create mode 100644 thirdparty/gflags/CMakeLists.txt rename thirdparty/{ => gflags}/gflags (100%) create mode 100644 thirdparty/gtest/CMakeLists.txt rename thirdparty/{ => gtest}/gtest (100%) diff --git a/.gitmodules b/.gitmodules index 0b76a4b239e..095f3968264 100644 --- a/.gitmodules +++ b/.gitmodules @@ -18,12 +18,12 @@ path = thirdparty/ade url = https://github.com/opencv/ade.git ignore = dirty -[submodule "thirdparty/gflags"] - path = thirdparty/gflags +[submodule "thirdparty/gflags/gflags"] + path = thirdparty/gflags/gflags url = https://github.com/gflags/gflags.git ignore = dirty -[submodule "thirdparty/gtest"] - path = thirdparty/gtest +[submodule "thirdparty/gtest/gtest"] + path = thirdparty/gtest/gtest url = https://github.com/openvinotoolkit/googletest.git ignore = dirty [submodule "thirdparty/ocl/icd_loader"] diff --git a/inference-engine/samples/CMakeLists.txt b/inference-engine/samples/CMakeLists.txt index bccc7be715b..3e42fa84f2e 100644 --- a/inference-engine/samples/CMakeLists.txt +++ b/inference-engine/samples/CMakeLists.txt @@ -113,19 +113,7 @@ endif() if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/thirdparty/gflags" AND NOT DEFINED OpenVINO_SOURCE_DIR) - function(add_gflags) - # common gflags settings - set(GFLAGS_IS_SUBPROJECT TRUE) - set(HAVE_SYS_STAT_H 1) - set(HAVE_INTTYPES_H 1) - set(INTTYPES_FORMAT C99) - set(BUILD_TESTING OFF) - set(BUILD_SHARED_LIBS OFF) - - add_subdirectory(thirdparty/gflags EXCLUDE_FROM_ALL) - set_target_properties(gflags_nothreads_static PROPERTIES FOLDER thirdparty) - endfunction() - add_gflags() + add_subdirectory(thirdparty/gflags EXCLUDE_FROM_ALL) endif() if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/thirdparty/zlib") diff --git a/tests/stress_tests/common/CMakeLists.txt b/tests/stress_tests/common/CMakeLists.txt index 2ced4a865ca..32a1ca455a9 100644 --- a/tests/stress_tests/common/CMakeLists.txt +++ b/tests/stress_tests/common/CMakeLists.txt @@ -11,28 +11,22 @@ add_library(${TARGET_NAME} STATIC ${SRC} ${HDR}) target_include_directories(${TARGET_NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) -if(EXISTS "${OpenVINO_SOURCE_DIR}/thirdparty/gflags") - function(add_gflags) - set(GFLAGS_IS_SUBPROJECT TRUE) - set(HAVE_SYS_STAT_H 1) - set(HAVE_INTTYPES_H 1) - set(INTTYPES_FORMAT C99) - set(BUILD_TESTING OFF) - set(BUILD_SHARED_LIBS OFF) - add_subdirectory(${OpenVINO_SOURCE_DIR}/thirdparty/gflags - ${CMAKE_CURRENT_BINARY_DIR}/gflags_build - EXCLUDE_FROM_ALL) - set_target_properties(gflags_nothreads_static PROPERTIES FOLDER thirdparty) - endfunction() - add_gflags() -endif() +add_subdirectory(${OpenVINO_SOURCE_DIR}/thirdparty/gflags + ${CMAKE_CURRENT_BINARY_DIR}/gflags_build + EXCLUDE_FROM_ALL) +add_subdirectory(${OpenVINO_SOURCE_DIR}/thirdparty/gtest + ${CMAKE_CURRENT_BINARY_DIR}/gtest_build + EXCLUDE_FROM_ALL) +add_subdirectory(${OpenVINO_SOURCE_DIR}/thirdparty/pugixml + ${CMAKE_CURRENT_BINARY_DIR}/pugixml_build + EXCLUDE_FROM_ALL) add_subdirectory("${OpenVINO_SOURCE_DIR}/tests/lib" tests_shared_lib) target_link_libraries(${TARGET_NAME} PUBLIC - IE::gtest - IE::pugixml + gtest + pugixml gflags tests_shared_lib PRIVATE - IE::gtest_main) + gtest_main) diff --git a/tests/time_tests/src/timetests_helper/CMakeLists.txt b/tests/time_tests/src/timetests_helper/CMakeLists.txt index 04142ef9768..5609449c848 100644 --- a/tests/time_tests/src/timetests_helper/CMakeLists.txt +++ b/tests/time_tests/src/timetests_helper/CMakeLists.txt @@ -8,20 +8,8 @@ file (GLOB SRC *.cpp) add_library(${TARGET_NAME} STATIC ${SRC}) target_include_directories(${TARGET_NAME} PUBLIC "${CMAKE_SOURCE_DIR}/include") -if(EXISTS "${OpenVINO_SOURCE_DIR}/thirdparty/gflags") - function(add_gflags) - set(GFLAGS_IS_SUBPROJECT TRUE) - set(HAVE_SYS_STAT_H 1) - set(HAVE_INTTYPES_H 1) - set(INTTYPES_FORMAT C99) - set(BUILD_TESTING OFF) - set(BUILD_SHARED_LIBS OFF) - add_subdirectory(${OpenVINO_SOURCE_DIR}/thirdparty/gflags - ${CMAKE_CURRENT_BINARY_DIR}/gflags_build - EXCLUDE_FROM_ALL) - set_target_properties(gflags_nothreads_static PROPERTIES FOLDER thirdparty) - endfunction() - add_gflags() -endif() +add_subdirectory(${OpenVINO_SOURCE_DIR}/thirdparty/gflags + ${CMAKE_CURRENT_BINARY_DIR}/gflags_build + EXCLUDE_FROM_ALL) target_link_libraries(${TARGET_NAME} gflags) diff --git a/thirdparty/CMakeLists.txt b/thirdparty/CMakeLists.txt index fa89cefc5c7..93a1c3684de 100644 --- a/thirdparty/CMakeLists.txt +++ b/thirdparty/CMakeLists.txt @@ -54,72 +54,18 @@ openvino_developer_export_targets(COMPONENT openvino_common TARGETS ade fluid) # Gflags # -function(add_gflags) - # common gflags settings - set(GFLAGS_IS_SUBPROJECT TRUE) - set(HAVE_SYS_STAT_H 1) - set(HAVE_INTTYPES_H 1) - set(INTTYPES_FORMAT C99) - set(BUILD_TESTING OFF) - set(BUILD_SHARED_LIBS OFF) - - add_subdirectory(gflags EXCLUDE_FROM_ALL) - - set_target_properties(gflags_nothreads_static PROPERTIES FOLDER thirdparty) - openvino_developer_export_targets(COMPONENT openvino_common TARGETS gflags) -endfunction() - -add_gflags() +add_subdirectory(gflags EXCLUDE_FROM_ALL) +openvino_developer_export_targets(COMPONENT openvino_common TARGETS gflags) # # Google Tests framework # -function(add_gtest_libraries) - set(gtest_force_shared_crt ON CACHE BOOL "disable static CRT for google test") - - set(BUILD_SHARED_LIBS OFF) - set(INSTALL_GTEST OFF CACHE BOOL "" FORCE) +if(NGRAPH_UNIT_TEST_ENABLE OR ENABLE_TESTS) add_subdirectory(gtest EXCLUDE_FROM_ALL) - get_target_property(gtest_include_dirs gtest INTERFACE_INCLUDE_DIRECTORIES) - set_target_properties(gtest PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${gtest_include_dirs}") - - get_target_property(gmock_include_dirs gtest INTERFACE_INCLUDE_DIRECTORIES) - set_target_properties(gmock PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${gmock_include_dirs};${gmock_SOURCE_DIR}/include") - - set(gtest_targets gtest gtest_main gmock gmock_main) - - foreach(target IN LISTS gtest_targets) - # If we have specified /Z7 option, remove -Zi option which comes from gtest - if (CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") - get_target_property(_target_cxx_flags ${target} COMPILE_OPTIONS) - if(_target_cxx_flags) - if(CMAKE_CXX_FLAGS_DEBUG MATCHES ".+/Z7.+" OR CMAKE_CXX_FLAGS_RELWITHDEBINFO MATCHES ".+/Z7.+") - string(REPLACE "-Zi" " " _target_cxx_flags ${_target_cxx_flags}) - message(STATUS "Removing -Zi flag from target " ${target}) - set_target_properties(${target} PROPERTIES COMPILE_OPTIONS "${_target_cxx_flags}") - endif() - endif() - elseif(CMAKE_COMPILER_IS_GNUCXX OR OV_COMPILER_IS_CLANG OR - CMAKE_CXX_COMPILER_ID STREQUAL "Intel") - target_compile_options(${target} PRIVATE -Wno-undef) - if(CMAKE_COMPILER_IS_GNUCXX) - target_compile_options(${target} PRIVATE -Wno-deprecated-copy) - endif() - endif() - ov_disable_all_warnings(${target}) - endforeach() - - set_target_properties(gtest gtest_main gmock gmock_main - PROPERTIES FOLDER thirdparty) - openvino_developer_export_targets(COMPONENT inference_engine_tests TARGETS gmock gmock_main gtest gtest_main) -endfunction() - -if(NGRAPH_UNIT_TEST_ENABLE OR ENABLE_TESTS) - add_gtest_libraries() endif() # diff --git a/thirdparty/gflags/CMakeLists.txt b/thirdparty/gflags/CMakeLists.txt new file mode 100644 index 00000000000..c77e7dfee67 --- /dev/null +++ b/thirdparty/gflags/CMakeLists.txt @@ -0,0 +1,13 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +set(GFLAGS_IS_SUBPROJECT TRUE) +set(HAVE_SYS_STAT_H 1) +set(HAVE_INTTYPES_H 1) +set(INTTYPES_FORMAT C99) +set(BUILD_TESTING OFF) +set(BUILD_SHARED_LIBS OFF) + +add_subdirectory(gflags EXCLUDE_FROM_ALL) +set_target_properties(gflags_nothreads_static PROPERTIES FOLDER thirdparty) diff --git a/thirdparty/gflags b/thirdparty/gflags/gflags similarity index 100% rename from thirdparty/gflags rename to thirdparty/gflags/gflags diff --git a/thirdparty/gtest/CMakeLists.txt b/thirdparty/gtest/CMakeLists.txt new file mode 100644 index 00000000000..b1f1a339e70 --- /dev/null +++ b/thirdparty/gtest/CMakeLists.txt @@ -0,0 +1,55 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +# +# Adds compiler flags to C / C++ sources +# +macro(add_compiler_flags) + foreach(flag ${ARGN}) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${flag}") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${flag}") + endforeach() +endmacro() + +# +# Google Tests framework +# +set(gtest_force_shared_crt ON CACHE BOOL "disable static CRT for google test") + +set(BUILD_SHARED_LIBS OFF) +set(INSTALL_GTEST OFF CACHE BOOL "" FORCE) +add_subdirectory(gtest EXCLUDE_FROM_ALL) + +get_target_property(gtest_include_dirs gtest INTERFACE_INCLUDE_DIRECTORIES) +set_target_properties(gtest PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${gtest_include_dirs}") + +get_target_property(gmock_include_dirs gtest INTERFACE_INCLUDE_DIRECTORIES) +set_target_properties(gmock PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${gmock_include_dirs};${gmock_SOURCE_DIR}/include") + +set(gtest_targets gtest gtest_main gmock gmock_main) + +foreach(target IN LISTS gtest_targets) + +# If we have specified /Z7 option, remove -Zi option which comes from gtest + if (CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + get_target_property(_target_cxx_flags ${target} COMPILE_OPTIONS) + if(_target_cxx_flags) + if(CMAKE_CXX_FLAGS_DEBUG MATCHES ".+/Z7.+" OR CMAKE_CXX_FLAGS_RELWITHDEBINFO MATCHES ".+/Z7.+") + string(REPLACE "-Zi" " " _target_cxx_flags ${_target_cxx_flags}) + message(STATUS "Removing -Zi flag from target " ${target}) + set_target_properties(${target} PROPERTIES COMPILE_OPTIONS "${_target_cxx_flags}") + endif() + endif() + elseif(CMAKE_COMPILER_IS_GNUCXX OR OV_COMPILER_IS_CLANG OR + CMAKE_CXX_COMPILER_ID STREQUAL "Intel") + target_compile_options(${target} PRIVATE -Wno-undef) + if(CMAKE_COMPILER_IS_GNUCXX) + target_compile_options(${target} PRIVATE -Wno-deprecated-copy) + endif() + endif() + ov_disable_all_warnings(${target}) +endforeach() + +set_target_properties(gtest gtest_main gmock gmock_main + PROPERTIES FOLDER thirdparty) diff --git a/thirdparty/gtest b/thirdparty/gtest/gtest similarity index 100% rename from thirdparty/gtest rename to thirdparty/gtest/gtest From 289df8db2708f006153be2de73646664e3da36ee Mon Sep 17 00:00:00 2001 From: Bartek Szmelczynski Date: Wed, 11 Aug 2021 12:04:30 +0200 Subject: [PATCH 07/19] Revise equal (#6605) * update spec, init backend file for equal op * add backend, visitors, serialize SLT tests * add backend test to manifest cause of mismatch of output type with cpu plugin * add equal to list of trusted ops and to cmakelist file * refactor backend tests to the new template * refactor spec * remove external link in numpy broadcast and update example * remove comparison.in.cpp file and related tests from manifest * fix example * remove redundant arguments * refactor backend tests * add pdpd broadcast to the spec, and different precison to SLT test * add precisions to SLT cpu * remove unsupported type from SLT * revert the deletion of comparison.in.cpp file * remove visitors test, since it will be added in the other PR * remove equal from CMakeLists.txt * refactor links in the spec * revert unwanted changes * remove equal from unit test manifest * revert links modification in spec * add namespace * split SSLTs for comaprison ops into seperate files * fix SSLTs names * add missing new lines * udpate output type in spec * rafactor numeric backend test to template * merge numeric template tests into equal --- docs/ops/comparison/Equal_1.md | 62 +++++++++--------- .../tests/functional/op_reference/equal.cpp | 42 ++++++++++++ .../single_layer/comparison_ops.hpp | 16 +++++ .../serialization/single_layer/equal.cpp | 63 ++++++++++++++++++ .../serialization/single_layer/greater.cpp | 63 ++++++++++++++++++ .../serialization/single_layer/greater_eq.cpp | 63 ++++++++++++++++++ .../serialization/single_layer/less.cpp | 63 ++++++++++++++++++ .../serialization/single_layer/less_eq.cpp | 63 ++++++++++++++++++ .../serialization/single_layer/not_equal.cpp | 63 ++++++++++++++++++ .../single_layer_tests/comparison.cpp | 3 + .../layer_tests_summary/utils/constants.py | 1 + ngraph/test/CMakeLists.txt | 1 - ngraph/test/backend/numeric.in.cpp | 65 ------------------- ngraph/test/runtime/ie/unit_test.manifest | 5 -- 14 files changed, 473 insertions(+), 100 deletions(-) create mode 100644 inference-engine/tests/functional/inference_engine/serialization/single_layer/comparison_ops.hpp create mode 100644 inference-engine/tests/functional/inference_engine/serialization/single_layer/equal.cpp create mode 100644 inference-engine/tests/functional/inference_engine/serialization/single_layer/greater.cpp create mode 100644 inference-engine/tests/functional/inference_engine/serialization/single_layer/greater_eq.cpp create mode 100644 inference-engine/tests/functional/inference_engine/serialization/single_layer/less.cpp create mode 100644 inference-engine/tests/functional/inference_engine/serialization/single_layer/less_eq.cpp create mode 100644 inference-engine/tests/functional/inference_engine/serialization/single_layer/not_equal.cpp delete mode 100644 ngraph/test/backend/numeric.in.cpp diff --git a/docs/ops/comparison/Equal_1.md b/docs/ops/comparison/Equal_1.md index f72d3302665..9bdd3361c26 100644 --- a/docs/ops/comparison/Equal_1.md +++ b/docs/ops/comparison/Equal_1.md @@ -4,35 +4,10 @@ **Category**: Comparison binary operation -**Short description**: *Equal* performs element-wise comparison operation with two given tensors applying multi-directional broadcast rules. - -**Attributes**: - -* *auto_broadcast* - - * **Description**: specifies rules used for auto-broadcasting of input tensors. - * **Range of values**: - * *none* - no auto-broadcasting is allowed, all input shapes should match - * *numpy* - numpy broadcasting rules, aligned with ONNX Broadcasting. Description is available in ONNX docs. - * **Type**: string - * **Default value**: "numpy" - * **Required**: *no* - -**Inputs** - -* **1**: A tensor of type *T*. **Required.** -* **2**: A tensor of type *T*. **Required.** - -**Outputs** - -* **1**: The result of element-wise comparison operation. A tensor of type boolean. - -**Types** - -* *T*: arbitrary supported type. +**Short description**: *Equal* performs element-wise comparison operation with two given input tensors applying multi-directional broadcast rules specified in the *auto_broadcast* attribute. **Detailed description** -Before performing arithmetic operation, input tensors *a* and *b* are broadcasted if their shapes are different and `auto_broadcast` attributes is not `none`. Broadcasting is performed according to `auto_broadcast` value. +Before performing arithmetic operation, input tensors *a* and *b* are broadcasted if their shapes are different and *auto_broadcast* attributes is not *none*. Broadcasting is performed according to *auto_broadcast* value. After broadcasting *Equal* does the following with the input tensors *a* and *b*: @@ -40,12 +15,40 @@ After broadcasting *Equal* does the following with the input tensors *a* and *b* o_{i} = a_{i} == b_{i} \f] +**Attributes**: + +* *auto_broadcast* + + * **Description**: specifies rules used for auto-broadcasting of input tensors. + * **Range of values**: + * *none* - no auto-broadcasting is allowed, all input shapes should match, + * *numpy* - numpy broadcasting rules, description is available in [Broadcast Rules For Elementwise Operations](../broadcast_rules.md), + * *pdpd* - PaddlePaddle-style implicit broadcasting, description is available in [Broadcast Rules For Elementwise Operations](../broadcast_rules.md). + * **Type**: string + * **Default value**: "numpy" + * **Required**: *no* + +**Inputs** + +* **1**: A tensor of type *T* and arbitrary shape. **Required.** +* **2**: A tensor of type *T* and arbitrary shape. **Required.** + +**Outputs** + +* **1**: The result of element-wise **comparison** operation applied to the input tensors. A tensor of type *T_BOOL* and the same shape equal to broadcasted shape of two inputs. + +**Types** + +* *T*: arbitrary supported type. +* *T_BOOL*: `boolean`. + **Examples** -*Example 1* +*Example 1: no broadcast* ```xml + 256 @@ -65,9 +68,10 @@ o_{i} = a_{i} == b_{i} ``` -*Example 2: broadcast* +*Example 2: numpy broadcast* ```xml + 8 diff --git a/docs/template_plugin/tests/functional/op_reference/equal.cpp b/docs/template_plugin/tests/functional/op_reference/equal.cpp index 01cd430be72..c4a2ba2b0f5 100644 --- a/docs/template_plugin/tests/functional/op_reference/equal.cpp +++ b/docs/template_plugin/tests/functional/op_reference/equal.cpp @@ -75,6 +75,48 @@ std::vector generateComparisonCombinedParams() { INSTANTIATE_TEST_SUITE_P(smoke_Comparison_With_Hardcoded_Refs, ReferenceComparisonLayerTest, ::testing::ValuesIn(generateComparisonCombinedParams()), ReferenceComparisonLayerTest::getTestCaseName); + +template +std::vector generateNumericParams(const element::Type& type) { + using T = typename element_type_traits::value_type; + std::vector compParams { + Builder {} + .compType(ComparisonTypes::EQUAL) + .input1({{4}, type, std::vector {-2.5f, 25.5f, 2.25f, NAN}}) + .input2({{4}, type, std::vector {10.0f, 5.0f, 2.25f, 10.0f}}) + .expected({{4}, element::boolean, std::vector {0, 0, 1, 0, }}), + Builder {} + .compType(ComparisonTypes::EQUAL) + .input1({{2, 3}, type, std::vector {0.0f, NAN, NAN, 1.0f, 21.0f, -INFINITY}}) + .input2({{2, 3}, type, std::vector {1.0f, NAN, 23.0f, 1.0f, 19.0f, 21.0f}}) + .expected({{2, 3}, element::boolean, std::vector {0, 0, 0, 1, 0, 0}}), + Builder {} + .compType(ComparisonTypes::EQUAL) + .input1({{1}, type, std::vector {INFINITY}}) + .input2({{1}, type, std::vector {INFINITY}}) + .expected({{1}, element::boolean, std::vector {1}}), + Builder {} + .compType(ComparisonTypes::EQUAL) + .input1({{5}, type, std::vector {-2.5f, 25.5f, 2.25f, INFINITY, 6.0f}}) + .input2({{5}, type, std::vector {10.0f, 5.0f, 2.25f, 10.0f, -INFINITY}}) + .expected({{5}, element::boolean, std::vector {0, 0, 1, 0, 0}})}; + return compParams; +} + +std::vector generateNumericCombinedParams() { + const std::vector> compTypeParams { + generateNumericParams(element::f16), + generateNumericParams(element::f32)}; + std::vector combinedParams; + + for (const auto& params : compTypeParams) { + combinedParams.insert(combinedParams.end(), params.begin(), params.end()); + } + return combinedParams; +} + +INSTANTIATE_TEST_SUITE_P(smoke_Numeric_With_Hardcoded_Refs, ReferenceComparisonLayerTest, ::testing::ValuesIn(generateNumericCombinedParams()), + ReferenceComparisonLayerTest::getTestCaseName); } // namespace } // namespace ComparisonOpsRefTestDefinitions } // namespace reference_tests diff --git a/inference-engine/tests/functional/inference_engine/serialization/single_layer/comparison_ops.hpp b/inference-engine/tests/functional/inference_engine/serialization/single_layer/comparison_ops.hpp new file mode 100644 index 00000000000..76f982d1961 --- /dev/null +++ b/inference-engine/tests/functional/inference_engine/serialization/single_layer/comparison_ops.hpp @@ -0,0 +1,16 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_layer/comparison.hpp" + +struct ComparisionOpsData { + const std::map, std::vector>> inputShapes; + const std::vector inputsPrecisions; + const std::vector secondInputTypes; + const std::map additional_config; + const ngraph::helpers::ComparisonTypes opType; + const InferenceEngine::Precision ieInputPrecision; + const InferenceEngine::Precision ieOutputPrecision; + const std::string deviceName; +}; diff --git a/inference-engine/tests/functional/inference_engine/serialization/single_layer/equal.cpp b/inference-engine/tests/functional/inference_engine/serialization/single_layer/equal.cpp new file mode 100644 index 00000000000..403a941674c --- /dev/null +++ b/inference-engine/tests/functional/inference_engine/serialization/single_layer/equal.cpp @@ -0,0 +1,63 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "comparison_ops.hpp" + +using namespace LayerTestsDefinitions; +using namespace LayerTestsDefinitions::ComparisonParams; + +namespace { +TEST_P(ComparisonLayerTest, Serialize) { + Serialize(); + } + +ComparisionOpsData data = { + // inputsShape + { + {{1}, {{1}, {17}, {1, 1}, {2, 18}, {1, 1, 2}, {2, 2, 3}, {1, 1, 2, 3}}}, + {{5}, {{1}, {1, 1}, {2, 5}, {1, 1, 1}, {2, 2, 5}}}, + {{2, 200}, {{1}, {200}, {1, 200}, {2, 200}, {2, 2, 200}}}, + {{1, 3, 20}, {{20}, {2, 1, 1}}}, + {{2, 17, 3, 4}, {{4}, {1, 3, 4}, {2, 1, 3, 4}}}, + {{2, 17, 3, 4}, {{4}, {1, 3, 4}, {141, 1, 3, 4}}}, + {{2, 1, 1, 3, 1}, {{1}, {1, 3, 4}, {2, 1, 3, 4}, {1, 1, 1, 1, 1}}}, + }, + // inputsPrecisions + { + InferenceEngine::Precision::FP64, + InferenceEngine::Precision::FP32, + InferenceEngine::Precision::FP16, + InferenceEngine::Precision::I32, + InferenceEngine::Precision::U32, + InferenceEngine::Precision::BOOL, + }, + // secondIinputsType + { + ngraph::helpers::InputLayerType::CONSTANT, + ngraph::helpers::InputLayerType::PARAMETER, + }, + // additionalConfig + {}, + // opType + ngraph::helpers::ComparisonTypes::EQUAL, + // ieInputPrecision + InferenceEngine::Precision::UNSPECIFIED, + // ieOutputPrecision + InferenceEngine::Precision::UNSPECIFIED, + // deviceName + CommonTestUtils::DEVICE_CPU, +}; + +const auto SerializeEqualTestParams = ::testing::Combine( + ::testing::ValuesIn(CommonTestUtils::combineParams(data.inputShapes)), + ::testing::ValuesIn(data.inputsPrecisions), + ::testing::Values(data.opType), + ::testing::ValuesIn(data.secondInputTypes), + ::testing::Values(data.ieInputPrecision), + ::testing::Values(data.ieOutputPrecision), + ::testing::Values(data.deviceName), + ::testing::Values(data.additional_config)); + +INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs, ComparisonLayerTest, SerializeEqualTestParams, ComparisonLayerTest::getTestCaseName); +} // namespace diff --git a/inference-engine/tests/functional/inference_engine/serialization/single_layer/greater.cpp b/inference-engine/tests/functional/inference_engine/serialization/single_layer/greater.cpp new file mode 100644 index 00000000000..bf41077b0d7 --- /dev/null +++ b/inference-engine/tests/functional/inference_engine/serialization/single_layer/greater.cpp @@ -0,0 +1,63 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "comparison_ops.hpp" + +using namespace LayerTestsDefinitions; +using namespace LayerTestsDefinitions::ComparisonParams; + +namespace { +TEST_P(ComparisonLayerTest, Serialize) { + Serialize(); + } + +ComparisionOpsData data = { + // inputsShape + { + {{1}, {{1}, {17}, {1, 1}, {2, 18}, {1, 1, 2}, {2, 2, 3}, {1, 1, 2, 3}}}, + {{5}, {{1}, {1, 1}, {2, 5}, {1, 1, 1}, {2, 2, 5}}}, + {{2, 200}, {{1}, {200}, {1, 200}, {2, 200}, {2, 2, 200}}}, + {{1, 3, 20}, {{20}, {2, 1, 1}}}, + {{2, 17, 3, 4}, {{4}, {1, 3, 4}, {2, 1, 3, 4}}}, + {{2, 17, 3, 4}, {{4}, {1, 3, 4}, {141, 1, 3, 4}}}, + {{2, 1, 1, 3, 1}, {{1}, {1, 3, 4}, {2, 1, 3, 4}, {1, 1, 1, 1, 1}}}, + }, + // inputsPrecisions + { + InferenceEngine::Precision::FP64, + InferenceEngine::Precision::FP32, + InferenceEngine::Precision::FP16, + InferenceEngine::Precision::I32, + InferenceEngine::Precision::U32, + InferenceEngine::Precision::BOOL, + }, + // secondIinputsType + { + ngraph::helpers::InputLayerType::CONSTANT, + ngraph::helpers::InputLayerType::PARAMETER, + }, + // additionalConfig + {}, + // opType + ngraph::helpers::ComparisonTypes::GREATER, + // ieInputPrecision + InferenceEngine::Precision::UNSPECIFIED, + // ieOutputPrecision + InferenceEngine::Precision::UNSPECIFIED, + // deviceName + CommonTestUtils::DEVICE_CPU, +}; + +const auto SerializeGreaterTestParams = ::testing::Combine( + ::testing::ValuesIn(CommonTestUtils::combineParams(data.inputShapes)), + ::testing::ValuesIn(data.inputsPrecisions), + ::testing::Values(data.opType), + ::testing::ValuesIn(data.secondInputTypes), + ::testing::Values(data.ieInputPrecision), + ::testing::Values(data.ieOutputPrecision), + ::testing::Values(data.deviceName), + ::testing::Values(data.additional_config)); + +INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs, ComparisonLayerTest, SerializeGreaterTestParams, ComparisonLayerTest::getTestCaseName); +} // namespace diff --git a/inference-engine/tests/functional/inference_engine/serialization/single_layer/greater_eq.cpp b/inference-engine/tests/functional/inference_engine/serialization/single_layer/greater_eq.cpp new file mode 100644 index 00000000000..cca32aff10b --- /dev/null +++ b/inference-engine/tests/functional/inference_engine/serialization/single_layer/greater_eq.cpp @@ -0,0 +1,63 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "comparison_ops.hpp" + +using namespace LayerTestsDefinitions; +using namespace LayerTestsDefinitions::ComparisonParams; + +namespace { +TEST_P(ComparisonLayerTest, Serialize) { + Serialize(); + } + +ComparisionOpsData data = { + // inputsShape + { + {{1}, {{1}, {17}, {1, 1}, {2, 18}, {1, 1, 2}, {2, 2, 3}, {1, 1, 2, 3}}}, + {{5}, {{1}, {1, 1}, {2, 5}, {1, 1, 1}, {2, 2, 5}}}, + {{2, 200}, {{1}, {200}, {1, 200}, {2, 200}, {2, 2, 200}}}, + {{1, 3, 20}, {{20}, {2, 1, 1}}}, + {{2, 17, 3, 4}, {{4}, {1, 3, 4}, {2, 1, 3, 4}}}, + {{2, 17, 3, 4}, {{4}, {1, 3, 4}, {141, 1, 3, 4}}}, + {{2, 1, 1, 3, 1}, {{1}, {1, 3, 4}, {2, 1, 3, 4}, {1, 1, 1, 1, 1}}}, + }, + // inputsPrecisions + { + InferenceEngine::Precision::FP64, + InferenceEngine::Precision::FP32, + InferenceEngine::Precision::FP16, + InferenceEngine::Precision::I32, + InferenceEngine::Precision::U32, + InferenceEngine::Precision::BOOL, + }, + // secondIinputsType + { + ngraph::helpers::InputLayerType::CONSTANT, + ngraph::helpers::InputLayerType::PARAMETER, + }, + // additionalConfig + {}, + // opType + ngraph::helpers::ComparisonTypes::GREATER_EQUAL, + // ieInputPrecision + InferenceEngine::Precision::UNSPECIFIED, + // ieOutputPrecision + InferenceEngine::Precision::UNSPECIFIED, + // deviceName + CommonTestUtils::DEVICE_CPU, +}; + +const auto SerializeGreaterEqualTestParams = ::testing::Combine( + ::testing::ValuesIn(CommonTestUtils::combineParams(data.inputShapes)), + ::testing::ValuesIn(data.inputsPrecisions), + ::testing::Values(data.opType), + ::testing::ValuesIn(data.secondInputTypes), + ::testing::Values(data.ieInputPrecision), + ::testing::Values(data.ieOutputPrecision), + ::testing::Values(data.deviceName), + ::testing::Values(data.additional_config)); + +INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs, ComparisonLayerTest, SerializeGreaterEqualTestParams, ComparisonLayerTest::getTestCaseName); +} // namespace diff --git a/inference-engine/tests/functional/inference_engine/serialization/single_layer/less.cpp b/inference-engine/tests/functional/inference_engine/serialization/single_layer/less.cpp new file mode 100644 index 00000000000..3fc91e633d9 --- /dev/null +++ b/inference-engine/tests/functional/inference_engine/serialization/single_layer/less.cpp @@ -0,0 +1,63 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "comparison_ops.hpp" + +using namespace LayerTestsDefinitions; +using namespace LayerTestsDefinitions::ComparisonParams; + +namespace { +TEST_P(ComparisonLayerTest, Serialize) { + Serialize(); + } + +ComparisionOpsData data = { + // inputsShape + { + {{1}, {{1}, {17}, {1, 1}, {2, 18}, {1, 1, 2}, {2, 2, 3}, {1, 1, 2, 3}}}, + {{5}, {{1}, {1, 1}, {2, 5}, {1, 1, 1}, {2, 2, 5}}}, + {{2, 200}, {{1}, {200}, {1, 200}, {2, 200}, {2, 2, 200}}}, + {{1, 3, 20}, {{20}, {2, 1, 1}}}, + {{2, 17, 3, 4}, {{4}, {1, 3, 4}, {2, 1, 3, 4}}}, + {{2, 17, 3, 4}, {{4}, {1, 3, 4}, {141, 1, 3, 4}}}, + {{2, 1, 1, 3, 1}, {{1}, {1, 3, 4}, {2, 1, 3, 4}, {1, 1, 1, 1, 1}}}, + }, + // inputsPrecisions + { + InferenceEngine::Precision::FP64, + InferenceEngine::Precision::FP32, + InferenceEngine::Precision::FP16, + InferenceEngine::Precision::I32, + InferenceEngine::Precision::U32, + InferenceEngine::Precision::BOOL, + }, + // secondIinputsType + { + ngraph::helpers::InputLayerType::CONSTANT, + ngraph::helpers::InputLayerType::PARAMETER, + }, + // additionalConfig + {}, + // opType + ngraph::helpers::ComparisonTypes::LESS, + // ieInputPrecision + InferenceEngine::Precision::UNSPECIFIED, + // ieOutputPrecision + InferenceEngine::Precision::UNSPECIFIED, + // deviceName + CommonTestUtils::DEVICE_CPU, +}; + +const auto SerializeLessTestParams = ::testing::Combine( + ::testing::ValuesIn(CommonTestUtils::combineParams(data.inputShapes)), + ::testing::ValuesIn(data.inputsPrecisions), + ::testing::Values(data.opType), + ::testing::ValuesIn(data.secondInputTypes), + ::testing::Values(data.ieInputPrecision), + ::testing::Values(data.ieOutputPrecision), + ::testing::Values(data.deviceName), + ::testing::Values(data.additional_config)); + +INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs, ComparisonLayerTest, SerializeLessTestParams, ComparisonLayerTest::getTestCaseName); +} // namespace diff --git a/inference-engine/tests/functional/inference_engine/serialization/single_layer/less_eq.cpp b/inference-engine/tests/functional/inference_engine/serialization/single_layer/less_eq.cpp new file mode 100644 index 00000000000..3f0db85dee2 --- /dev/null +++ b/inference-engine/tests/functional/inference_engine/serialization/single_layer/less_eq.cpp @@ -0,0 +1,63 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "comparison_ops.hpp" + +using namespace LayerTestsDefinitions; +using namespace LayerTestsDefinitions::ComparisonParams; + +namespace { +TEST_P(ComparisonLayerTest, Serialize) { + Serialize(); + } + +ComparisionOpsData data = { + // inputsShape + { + {{1}, {{1}, {17}, {1, 1}, {2, 18}, {1, 1, 2}, {2, 2, 3}, {1, 1, 2, 3}}}, + {{5}, {{1}, {1, 1}, {2, 5}, {1, 1, 1}, {2, 2, 5}}}, + {{2, 200}, {{1}, {200}, {1, 200}, {2, 200}, {2, 2, 200}}}, + {{1, 3, 20}, {{20}, {2, 1, 1}}}, + {{2, 17, 3, 4}, {{4}, {1, 3, 4}, {2, 1, 3, 4}}}, + {{2, 17, 3, 4}, {{4}, {1, 3, 4}, {141, 1, 3, 4}}}, + {{2, 1, 1, 3, 1}, {{1}, {1, 3, 4}, {2, 1, 3, 4}, {1, 1, 1, 1, 1}}}, + }, + // inputsPrecisions + { + InferenceEngine::Precision::FP64, + InferenceEngine::Precision::FP32, + InferenceEngine::Precision::FP16, + InferenceEngine::Precision::I32, + InferenceEngine::Precision::U32, + InferenceEngine::Precision::BOOL, + }, + // secondIinputsType + { + ngraph::helpers::InputLayerType::CONSTANT, + ngraph::helpers::InputLayerType::PARAMETER, + }, + // additionalConfig + {}, + // opType + ngraph::helpers::ComparisonTypes::LESS_EQUAL, + // ieInputPrecision + InferenceEngine::Precision::UNSPECIFIED, + // ieOutputPrecision + InferenceEngine::Precision::UNSPECIFIED, + // deviceName + CommonTestUtils::DEVICE_CPU, +}; + +const auto SerializeLessEqualTestParams = ::testing::Combine( + ::testing::ValuesIn(CommonTestUtils::combineParams(data.inputShapes)), + ::testing::ValuesIn(data.inputsPrecisions), + ::testing::Values(data.opType), + ::testing::ValuesIn(data.secondInputTypes), + ::testing::Values(data.ieInputPrecision), + ::testing::Values(data.ieOutputPrecision), + ::testing::Values(data.deviceName), + ::testing::Values(data.additional_config)); + +INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs, ComparisonLayerTest, SerializeLessEqualTestParams, ComparisonLayerTest::getTestCaseName); +} // namespace diff --git a/inference-engine/tests/functional/inference_engine/serialization/single_layer/not_equal.cpp b/inference-engine/tests/functional/inference_engine/serialization/single_layer/not_equal.cpp new file mode 100644 index 00000000000..c034c4230be --- /dev/null +++ b/inference-engine/tests/functional/inference_engine/serialization/single_layer/not_equal.cpp @@ -0,0 +1,63 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "comparison_ops.hpp" + +using namespace LayerTestsDefinitions; +using namespace LayerTestsDefinitions::ComparisonParams; + +namespace { +TEST_P(ComparisonLayerTest, Serialize) { + Serialize(); + } + +ComparisionOpsData data = { + // inputsShape + { + {{1}, {{1}, {17}, {1, 1}, {2, 18}, {1, 1, 2}, {2, 2, 3}, {1, 1, 2, 3}}}, + {{5}, {{1}, {1, 1}, {2, 5}, {1, 1, 1}, {2, 2, 5}}}, + {{2, 200}, {{1}, {200}, {1, 200}, {2, 200}, {2, 2, 200}}}, + {{1, 3, 20}, {{20}, {2, 1, 1}}}, + {{2, 17, 3, 4}, {{4}, {1, 3, 4}, {2, 1, 3, 4}}}, + {{2, 17, 3, 4}, {{4}, {1, 3, 4}, {141, 1, 3, 4}}}, + {{2, 1, 1, 3, 1}, {{1}, {1, 3, 4}, {2, 1, 3, 4}, {1, 1, 1, 1, 1}}}, + }, + // inputsPrecisions + { + InferenceEngine::Precision::FP64, + InferenceEngine::Precision::FP32, + InferenceEngine::Precision::FP16, + InferenceEngine::Precision::I32, + InferenceEngine::Precision::U32, + InferenceEngine::Precision::BOOL, + }, + // secondIinputsType + { + ngraph::helpers::InputLayerType::CONSTANT, + ngraph::helpers::InputLayerType::PARAMETER, + }, + // additionalConfig + {}, + // opType + ngraph::helpers::ComparisonTypes::NOT_EQUAL, + // ieInputPrecision + InferenceEngine::Precision::UNSPECIFIED, + // ieOutputPrecision + InferenceEngine::Precision::UNSPECIFIED, + // deviceName + CommonTestUtils::DEVICE_CPU, +}; + +const auto SerializeNotEqualTestParams = ::testing::Combine( + ::testing::ValuesIn(CommonTestUtils::combineParams(data.inputShapes)), + ::testing::ValuesIn(data.inputsPrecisions), + ::testing::Values(data.opType), + ::testing::ValuesIn(data.secondInputTypes), + ::testing::Values(data.ieInputPrecision), + ::testing::Values(data.ieOutputPrecision), + ::testing::Values(data.deviceName), + ::testing::Values(data.additional_config)); + +INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs, ComparisonLayerTest, SerializeNotEqualTestParams, ComparisonLayerTest::getTestCaseName); +} // namespace diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/comparison.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/comparison.cpp index ad6c1c897b1..5c8ce8fb8b4 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/comparison.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/comparison.cpp @@ -22,6 +22,9 @@ std::map, std::vector>> inputShapes = { std::vector inputsPrecisions = { InferenceEngine::Precision::FP32, + InferenceEngine::Precision::FP16, + InferenceEngine::Precision::I32, + InferenceEngine::Precision::BOOL, }; std::vector comparisonOpTypes = { diff --git a/inference-engine/tests/ie_test_utils/functional_test_utils/layer_tests_summary/utils/constants.py b/inference-engine/tests/ie_test_utils/functional_test_utils/layer_tests_summary/utils/constants.py index f847e870647..c653bf4fe8c 100644 --- a/inference-engine/tests/ie_test_utils/functional_test_utils/layer_tests_summary/utils/constants.py +++ b/inference-engine/tests/ie_test_utils/functional_test_utils/layer_tests_summary/utils/constants.py @@ -31,6 +31,7 @@ VERIFIED_OP_REFERENCES = [ 'DepthToSpace-1', 'DetectionOutput-1', 'Divide-1', + 'Equal-1', 'Erf-1', 'ExperimentalDetectronDetectionOutput-6', 'ExperimentalDetectronGenerateProposalsSingleImage-6', diff --git a/ngraph/test/CMakeLists.txt b/ngraph/test/CMakeLists.txt index 1b0ee82ce08..32c1cf043aa 100644 --- a/ngraph/test/CMakeLists.txt +++ b/ngraph/test/CMakeLists.txt @@ -475,7 +475,6 @@ set(MULTI_TEST_SRC backend/normalize_l2.in.cpp backend/non_max_suppression.in.cpp backend/non_zero.in.cpp - backend/numeric.in.cpp backend/one_hot.in.cpp backend/pad.in.cpp backend/parameter_as_output.in.cpp diff --git a/ngraph/test/backend/numeric.in.cpp b/ngraph/test/backend/numeric.in.cpp deleted file mode 100644 index f85f336d16c..00000000000 --- a/ngraph/test/backend/numeric.in.cpp +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" -#include "util/engine/test_engines.hpp" -#include "util/test_case.hpp" -#include "util/test_control.hpp" - -NGRAPH_SUPPRESS_DEPRECATED_START - -using namespace std; -using namespace ngraph; - -static string s_manifest = "${MANIFEST}"; -using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); - -NGRAPH_TEST(${BACKEND_NAME}, numeric_float_nan) -{ - Shape shape{5}; - auto A = op::Constant::create(element::f32, shape, {-2.5f, 25.5f, 2.25f, NAN, 6.0f}); - auto B = op::Constant::create(element::f32, shape, {10.0f, 5.0f, 2.25f, 10.0f, NAN}); - auto f = make_shared(make_shared(A, B), ParameterVector{}); - - auto test_case = test::TestCase(f); - test_case.add_expected_output(shape, {false, false, true, false, false}); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, numeric_double_nan) -{ - Shape shape{5}; - auto A = op::Constant::create(element::f64, shape, {-2.5f, 25.5f, 2.25f, NAN, 6.0f}); - auto B = op::Constant::create(element::f64, shape, {10.0f, 5.0f, 2.25f, 10.0f, NAN}); - auto f = make_shared(make_shared(A, B), ParameterVector{}); - - auto test_case = test::TestCase(f); - test_case.add_expected_output(shape, {false, false, true, false, false}); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, numeric_float_inf) -{ - Shape shape{5}; - auto A = op::Constant::create(element::f32, shape, {-2.5f, 25.5f, 2.25f, INFINITY, 6.0f}); - auto B = op::Constant::create(element::f32, shape, {10.0f, 5.0f, 2.25f, 10.0f, -INFINITY}); - auto f = make_shared(make_shared(A, B), ParameterVector{}); - - auto test_case = test::TestCase(f); - test_case.add_expected_output(shape, {false, false, true, false, false}); - test_case.run(); -} - -NGRAPH_TEST(${BACKEND_NAME}, numeric_double_inf) -{ - Shape shape{5}; - auto A = op::Constant::create(element::f64, shape, {-2.5f, 25.5f, 2.25f, INFINITY, 6.0f}); - auto B = op::Constant::create(element::f64, shape, {10.0f, 5.0f, 2.25f, 10.0f, -INFINITY}); - auto f = make_shared(make_shared(A, B), ParameterVector{}); - - auto test_case = test::TestCase(f); - test_case.add_expected_output(shape, {false, false, true, false, false}); - test_case.run(); -} diff --git a/ngraph/test/runtime/ie/unit_test.manifest b/ngraph/test/runtime/ie/unit_test.manifest index 2f7fd7c7c4b..a3a0d74ad1f 100644 --- a/ngraph/test/runtime/ie/unit_test.manifest +++ b/ngraph/test/runtime/ie/unit_test.manifest @@ -248,10 +248,6 @@ onnx_size_op_graph_middle shape_of_vector shape_of_matrix shape_of_5d -numeric_float_nan -numeric_float_inf -numeric_double_nan -numeric_double_inf fake_quantize_pdpd IE_GPU.fake_quantize @@ -421,7 +417,6 @@ max_pool_2d_1channel_1image_overpadded grn_2d_with_bias erf divide_adjoint_stability -equal notequal greater greatereq From 51d511c8ac37c23f840b203b0ec32defde0121a9 Mon Sep 17 00:00:00 2001 From: Patryk Elszkowski Date: Wed, 11 Aug 2021 12:06:25 +0200 Subject: [PATCH 08/19] remove `v0` namespace from reference implementation of fake quantize. (#6977) * remove `v0` namespace from reference implementation of fake quantize. * fix ngraph check message --- .../runtime/reference/fake_quantize.hpp | 128 +++++++++--------- .../runtime/interpreter/evaluates_map.cpp | 26 ++-- 2 files changed, 74 insertions(+), 80 deletions(-) diff --git a/ngraph/core/reference/include/ngraph/runtime/reference/fake_quantize.hpp b/ngraph/core/reference/include/ngraph/runtime/reference/fake_quantize.hpp index c9c635991a9..f503c9f67b0 100644 --- a/ngraph/core/reference/include/ngraph/runtime/reference/fake_quantize.hpp +++ b/ngraph/core/reference/include/ngraph/runtime/reference/fake_quantize.hpp @@ -194,76 +194,70 @@ namespace ngraph } } // namespace fake_quantize_details - inline namespace v0 + + template + void fake_quantize(const T* const arg, + const T* const in_low, + const T* const in_high, + const T* const out_low, + const T* const out_high, + T* const out, + const Shape& arg_shape, + const Shape& in_low_shape, + const Shape& in_high_shape, + const Shape& out_low_shape, + const Shape& out_high_shape, + size_t levels, + const op::AutoBroadcastSpec& broadcast) { - template - void fake_quantize(const T* const arg, - const T* const in_low, - const T* const in_high, - const T* const out_low, - const T* const out_high, - T* const out, - const Shape& arg_shape, - const Shape& in_low_shape, - const Shape& in_high_shape, - const Shape& out_low_shape, - const Shape& out_high_shape, - size_t levels, - const op::AutoBroadcastSpec& broadcast) + using namespace fake_quantize_details; + + if (shape_size(in_low_shape) == 1 && shape_size(in_high_shape) == 1 && + shape_size(out_low_shape) == 1 && shape_size(out_high_shape) == 1) { - using namespace fake_quantize_details; - - if (shape_size(in_low_shape) == 1 && shape_size(in_high_shape) == 1 && - shape_size(out_low_shape) == 1 && shape_size(out_high_shape) == 1) + const size_t arg_size = shape_size(arg_shape); + const auto q = [=](const T& a) { + return quantize(a, *in_low, *in_high, *out_low, *out_high, levels); + }; + for (size_t i = 0; i < arg_size; ++i) { - const size_t arg_size = shape_size(arg_shape); - const auto q = [=](const T& a) { - return quantize(a, *in_low, *in_high, *out_low, *out_high, levels); - }; - for (size_t i = 0; i < arg_size; ++i) - { - out[i] = q(arg[i]); - } - } - else - { - NGRAPH_CHECK(in_low_shape.size() <= arg_shape.size() && - in_high_shape.size() <= arg_shape.size() && - out_low_shape.size() <= arg_shape.size() && - out_high_shape.size() <= arg_shape.size(), - "Tensors with inout\\output ranges should have rank less or " - "equal to data tensor rank equal to ", - arg_shape.size()); - - const QuantizationBound in_low_bound( - in_low, in_low_shape, arg_shape, broadcast); - const QuantizationBound in_high_bound( - in_high, in_high_shape, arg_shape, broadcast); - const QuantizationBound out_low_bound( - out_low, out_low_shape, arg_shape, broadcast); - const QuantizationBound out_high_bound( - out_high, out_high_shape, arg_shape, broadcast); - - std::vector current_dim(arg_shape.size(), 0); - const auto arg_shape_size = shape_size(arg_shape); - for (size_t index = 0; index < arg_shape_size; ++index) - { - const T in_low_val = in_low_bound.get_value(current_dim, index); - const T in_high_val = in_high_bound.get_value(current_dim, index); - const T out_low_val = out_low_bound.get_value(current_dim, index); - const T out_high_val = out_high_bound.get_value(current_dim, index); - - out[index] = quantize(arg[index], - in_low_val, - in_high_val, - out_low_val, - out_high_val, - levels); - increment_current_dim(current_dim, arg_shape); - } + out[i] = q(arg[i]); } } - } // namespace v0 - } // namespace reference - } // namespace runtime + else + { + NGRAPH_CHECK(in_low_shape.size() <= arg_shape.size() && + in_high_shape.size() <= arg_shape.size() && + out_low_shape.size() <= arg_shape.size() && + out_high_shape.size() <= arg_shape.size(), + "Tensors with input\\output ranges should have rank less or " + "equal to data tensor rank equal to ", + arg_shape.size()); + + const QuantizationBound in_low_bound( + in_low, in_low_shape, arg_shape, broadcast); + const QuantizationBound in_high_bound( + in_high, in_high_shape, arg_shape, broadcast); + const QuantizationBound out_low_bound( + out_low, out_low_shape, arg_shape, broadcast); + const QuantizationBound out_high_bound( + out_high, out_high_shape, arg_shape, broadcast); + + std::vector current_dim(arg_shape.size(), 0); + const auto arg_shape_size = shape_size(arg_shape); + for (size_t index = 0; index < arg_shape_size; ++index) + { + const T in_low_val = in_low_bound.get_value(current_dim, index); + const T in_high_val = in_high_bound.get_value(current_dim, index); + const T out_low_val = out_low_bound.get_value(current_dim, index); + const T out_high_val = out_high_bound.get_value(current_dim, index); + + out[index] = quantize( + arg[index], in_low_val, in_high_val, out_low_val, out_high_val, levels); + increment_current_dim(current_dim, arg_shape); + } + } + } + } // namespace reference + } // namespace runtime } // namespace ngraph diff --git a/ngraph/test/runtime/interpreter/evaluates_map.cpp b/ngraph/test/runtime/interpreter/evaluates_map.cpp index 42572cc3f4b..6f410d90abf 100644 --- a/ngraph/test/runtime/interpreter/evaluates_map.cpp +++ b/ngraph/test/runtime/interpreter/evaluates_map.cpp @@ -2439,19 +2439,19 @@ namespace const HostTensorVector& inputs) { using T = typename element_type_traits::value_type; - runtime::reference::v0::fake_quantize(inputs[0]->get_data_ptr(), - inputs[1]->get_data_ptr(), - inputs[2]->get_data_ptr(), - inputs[3]->get_data_ptr(), - inputs[4]->get_data_ptr(), - outputs[0]->get_data_ptr(), - op->get_input_shape(0), - op->get_input_shape(1), - op->get_input_shape(2), - op->get_input_shape(3), - op->get_input_shape(4), - op->get_levels(), - op->get_auto_broadcast()); + runtime::reference::fake_quantize(inputs[0]->get_data_ptr(), + inputs[1]->get_data_ptr(), + inputs[2]->get_data_ptr(), + inputs[3]->get_data_ptr(), + inputs[4]->get_data_ptr(), + outputs[0]->get_data_ptr(), + op->get_input_shape(0), + op->get_input_shape(1), + op->get_input_shape(2), + op->get_input_shape(3), + op->get_input_shape(4), + op->get_levels(), + op->get_auto_broadcast()); return true; } From c986ce09ceaff1958021973292cc0c2c5bbfb8ad Mon Sep 17 00:00:00 2001 From: Dmitrii Khurtin Date: Wed, 11 Aug 2021 14:14:47 +0300 Subject: [PATCH 09/19] [GNA] Transpose bias (#6759) * transpose bias * removed bias transpose; added bias validation predicate to pattern * fixed after review; added handling of the case bias_output_shape.size() == 1 and bias_output_shape.at(0) > 1 * moved bias shape size check to matcher pattern; replaced loop with algorithm --- ...onvert_matmul_to_pointwise_convolution.cpp | 42 +++++- .../tests/unit/gna/CMakeLists.txt | 2 + ...onvert_matmul_to_pointwise_convolution.cpp | 140 +++++++++++------- 3 files changed, 126 insertions(+), 58 deletions(-) diff --git a/inference-engine/src/gna_plugin/transformations/convert_matmul_to_pointwise_convolution.cpp b/inference-engine/src/gna_plugin/transformations/convert_matmul_to_pointwise_convolution.cpp index f96ed1dab0e..e9157b77613 100644 --- a/inference-engine/src/gna_plugin/transformations/convert_matmul_to_pointwise_convolution.cpp +++ b/inference-engine/src/gna_plugin/transformations/convert_matmul_to_pointwise_convolution.cpp @@ -19,6 +19,20 @@ NGRAPH_RTTI_DEFINITION(ConvertMatmulToPointWiseConvolution, "ConvertMatmulToPoin NGRAPH_RTTI_DEFINITION(ConvertMatmulWithBiasToPointWiseConvolution, "ConvertMatmulWithBiasToPointWiseConvolution", 0); NGRAPH_RTTI_DEFINITION(ConvertMatmulWithFqToPointWiseConvolution, "ConvertMatmulWithFqToPointWiseConvolution", 0); +static bool BiasValidation(const ngraph::Output& output) { + auto bias_output_shape = output.get_node()->get_output_shape(0); + if (bias_output_shape.size() > 4) { + gnalog() << "bias output shape (" << output.get_node()->get_friendly_name() << ") is more than 4\n"; + return false; + } + + if (bias_output_shape.size() == 1) { + return true; + } + + return std::count_if(bias_output_shape.begin(), bias_output_shape.end(), [](size_t el){ return el > 1; }) < 2; +} + static std::tuple VerifyAndGetConvParams(std::shared_ptr matmul_node) { auto input1_shape = matmul_node->get_input_shape(0); auto input2_shape = matmul_node->get_input_shape(1); @@ -83,10 +97,24 @@ static bool Convert(std::shared_ptr matmul_node, ngraph::copy_runtime_info(transpose_before, conv_node); std::shared_ptr root_node = matmul_node; - if (bias != nullptr) { - conv_node = std::make_shared(conv_node, bias); - ngraph::copy_runtime_info(transpose_before, conv_node); - root_node = add; + if (bias) { + auto bias_output_shape = bias->get_output_shape(0); + std::shared_ptr new_bias = bias; + if (bias_output_shape.size() > 1 || bias_output_shape.at(0) > 1) { + std::vector axes(4, 1); + auto iter = std::find_if(bias_output_shape.begin(), bias_output_shape.end(), [](size_t value) { return value > 1; }); + if (iter != bias_output_shape.end()) { + axes.at(1) = *iter; + } + new_bias = std::make_shared( + bias->get_output_element_type(0), + axes, + std::dynamic_pointer_cast(bias)->get_data_ptr()); + } + + conv_node = std::make_shared(conv_node, new_bias); + ngraph::copy_runtime_info(transpose_before, conv_node); + root_node = add; } if (fq != nullptr) { @@ -146,7 +174,7 @@ ConvertMatmulWithBiasToPointWiseConvolution::ConvertMatmulWithBiasToPointWiseCon ngraph::pattern::wrap_type()}); auto second_input = std::make_shared(ngraph::OutputVector{const_input, const_fq}); auto matmul = ngraph::pattern::wrap_type({ngraph::pattern::any_input(), second_input}); - auto bias = ngraph::pattern::wrap_type(); + auto bias = ngraph::pattern::wrap_type(BiasValidation); auto add = ngraph::pattern::wrap_type({matmul, bias}); ngraph::matcher_pass_callback callback = [=](ngraph::pattern::Matcher &m) { @@ -169,7 +197,7 @@ ConvertMatmulWithFqToPointWiseConvolution::ConvertMatmulWithFqToPointWiseConvolu ngraph::pattern::wrap_type()}); auto second_input = std::make_shared(ngraph::OutputVector{const_input, const_fq}); auto matmul = ngraph::pattern::wrap_type({ngraph::pattern::any_input(), second_input}); - auto bias = ngraph::pattern::wrap_type(); + auto bias = ngraph::pattern::wrap_type(BiasValidation); auto add = ngraph::pattern::wrap_type({matmul, bias}); auto matmul_out = std::make_shared(ngraph::OutputVector{add, matmul}); auto out_fq = ngraph::pattern::wrap_type({matmul_out, @@ -190,4 +218,4 @@ ConvertMatmulWithFqToPointWiseConvolution::ConvertMatmulWithFqToPointWiseConvolu auto m = std::make_shared(out_fq, matcher_name); this->register_matcher(m, callback); -} \ No newline at end of file +} diff --git a/inference-engine/tests/unit/gna/CMakeLists.txt b/inference-engine/tests/unit/gna/CMakeLists.txt index 99b9461e61a..76a2a1ac94f 100644 --- a/inference-engine/tests/unit/gna/CMakeLists.txt +++ b/inference-engine/tests/unit/gna/CMakeLists.txt @@ -8,6 +8,8 @@ addIeTargetTest( NAME ${TARGET_NAME} ROOT ${CMAKE_CURRENT_SOURCE_DIR} LINK_LIBRARIES + PRIVATE + ngraphFunctions gmock commonTestUtils_s GNAPlugin_test_static diff --git a/inference-engine/tests/unit/gna/ngraph/transformations/gna_convert_matmul_to_pointwise_convolution.cpp b/inference-engine/tests/unit/gna/ngraph/transformations/gna_convert_matmul_to_pointwise_convolution.cpp index 6439c5214e2..d352a52c36e 100644 --- a/inference-engine/tests/unit/gna/ngraph/transformations/gna_convert_matmul_to_pointwise_convolution.cpp +++ b/inference-engine/tests/unit/gna/ngraph/transformations/gna_convert_matmul_to_pointwise_convolution.cpp @@ -14,6 +14,7 @@ #include #include #include +#include "ngraph_functions/builders.hpp" namespace testing { @@ -119,6 +120,7 @@ void CreateMatMul::updateGraph(Graph& graph) { graph.output = matmul_node; } +template class CreateAdd : public CreateGraphDecorator { public: CreateAdd(CreateGraphDecoratorPtr prev_builder = nullptr) : CreateGraphDecorator(std::move(prev_builder)) {} @@ -126,8 +128,18 @@ protected: void updateGraph(Graph&) override; }; -void CreateAdd::updateGraph(Graph& graph) { - auto bias = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); +template +void CreateAdd::updateGraph(Graph& graph) { + std::vector axes(1, 1); + if (std::is_same, std::integral_constant>::value) { + auto shape = graph.output->get_output_shape(0); + if (std::is_same, std::integral_constant>::value) { + axes.resize(shape.size(), 1); + } + axes.back() = shape.back(); + } + + auto bias = ngraph::builder::makeConstant(ngraph::element::i64, axes, {}, true); auto add_node = std::make_shared(graph.output, bias); graph.output = add_node; } @@ -155,7 +167,8 @@ Graph createTransformedGraph(const ngraph::Shape& input_data_shape = ngraph::Sha // ------------------------------------------------------------------------------------------------------------ -Graph createReferenceGraph(bool addConstFakeQuantizeNode, bool insertAddNode, bool addOutFakeQuantizeNode) { +template +Graph createReferenceGraph() { Graph graph; graph.input_params = std::make_shared(ngraph::element::i64, @@ -173,8 +186,9 @@ Graph createReferenceGraph(bool addConstFakeQuantizeNode, bool insertAddNode, bo auto transpose_before = std::make_shared(reshape_before, const_transpose_before); std::shared_ptr parent_node = constant_node; - if (addConstFakeQuantizeNode) + if (std::is_same, std::integral_constant>::value) { parent_node = createFakeQuantizeNode(constant_node); + } auto weights_reshape_const = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{4}, ngraph::Shape{8, 8, 1, 1}); @@ -189,15 +203,21 @@ Graph createReferenceGraph(bool addConstFakeQuantizeNode, bool insertAddNode, bo ngraph::op::PadType::VALID); parent_node = conv_node; + if (std::is_same, std::integral_constant>::value) { + std::vector axes(1, 1); + if (std::is_same, std::integral_constant>::value) { + axes.resize(4, 1); + axes[1] = 8; + } - if (insertAddNode) { - auto bias = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); + auto bias = ngraph::builder::makeConstant(ngraph::element::i64, axes, {}, true); auto add_node = std::make_shared(parent_node, bias); parent_node = add_node; } - if (addOutFakeQuantizeNode) + if (std::is_same, std::integral_constant>::value) { parent_node = createFakeQuantizeNode(parent_node); + } auto const_transpose_after = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, @@ -254,47 +274,65 @@ TEST_P(ConvertMatmulToPointWiseConvolutionFixture, CompareFunctions) { execute_test(function, reference_function, pass_manager); } +namespace { + constexpr bool AddConstFakeQuantizeNode = true; + constexpr bool InsertAddNode = true; + constexpr bool OneDimensional = true; + constexpr bool OneChannel = true; + constexpr bool AddOutFakeQuantizeNode = true; +} + INSTANTIATE_TEST_SUITE_P(ConvertMatmulToPointWiseConvolutionTestSuite, ConvertMatmulToPointWiseConvolutionFixture, - ::testing::Values(std::make_tuple(createTransformedGraph(), - createReferenceGraph(false /* addConstFakeQuantizeNode */, - false /* insertAddNode */, - false /* addOutFakeQuantizeNode */), - createPassManager()), - std::make_tuple(createTransformedGraph(), - createReferenceGraph(true /* addConstFakeQuantizeNode */, - false /* insertAddNode */, - false /* addOutFakeQuantizeNode */), - createPassManager()), - std::make_tuple(createTransformedGraph(), - createReferenceGraph(false /* addConstFakeQuantizeNode */, - true /* insertAddNode */, - false /* addOutFakeQuantizeNode */), - createPassManager()), - std::make_tuple(createTransformedGraph(), - createReferenceGraph(true /* addConstFakeQuantizeNode */, - true /* insertAddNode */, - false /* addOutFakeQuantizeNode */), - createPassManager()), - std::make_tuple(createTransformedGraph(), - createReferenceGraph(false /* addConstFakeQuantizeNode */, - true /* insertAddNode */, - true /* addOutFakeQuantizeNode */), - createPassManager()), - std::make_tuple(createTransformedGraph(), - createReferenceGraph(true /* addConstFakeQuantizeNode */, - true /* insertAddNode */, - true /* addOutFakeQuantizeNode */), - createPassManager()), - std::make_tuple(createTransformedGraph(), - createReferenceGraph(false /* addConstFakeQuantizeNode */, - false /* insertAddNode */, - true /* addOutFakeQuantizeNode */), - createPassManager()), - std::make_tuple(createTransformedGraph(), - createReferenceGraph(true /* addConstFakeQuantizeNode */, - false /* insertAddNode */, - true /* addOutFakeQuantizeNode */), - createPassManager()))); + ::testing::Values( + std::make_tuple( + createTransformedGraph(), + createReferenceGraph(), + createPassManager()), + std::make_tuple(createTransformedGraph(), + createReferenceGraph(), + createPassManager()), + std::make_tuple(createTransformedGraph, CreateMatMul>(), + createReferenceGraph(), + createPassManager()), + std::make_tuple(createTransformedGraph, CreateMatMul>(), + createReferenceGraph(), + createPassManager()), + std::make_tuple(createTransformedGraph, CreateMatMul>(), + createReferenceGraph(), + createPassManager()), + std::make_tuple(createTransformedGraph, CreateMatMul, CreateFakeQuantize>(), + createReferenceGraph(), + createPassManager()), + std::make_tuple(createTransformedGraph, CreateMatMul, CreateFakeQuantize>(), + createReferenceGraph(), + createPassManager()), + std::make_tuple(createTransformedGraph, CreateMatMul, CreateFakeQuantize>(), + createReferenceGraph(), + createPassManager()), + std::make_tuple(createTransformedGraph, CreateMatMul>(), + createReferenceGraph(), + createPassManager()), + std::make_tuple(createTransformedGraph, CreateMatMul>(), + createReferenceGraph(), + createPassManager()), + std::make_tuple(createTransformedGraph, CreateMatMul>(), + createReferenceGraph(), + createPassManager()), + std::make_tuple(createTransformedGraph, CreateMatMul, CreateFakeQuantize>(), + createReferenceGraph(), + createPassManager()), + std::make_tuple(createTransformedGraph, CreateMatMul, CreateFakeQuantize>(), + createReferenceGraph(), + createPassManager()), + std::make_tuple(createTransformedGraph, CreateMatMul, CreateFakeQuantize>(), + createReferenceGraph(), + createPassManager()), + std::make_tuple(createTransformedGraph(), + createReferenceGraph(), + createPassManager()), + std::make_tuple(createTransformedGraph(), + createReferenceGraph(), + createPassManager()))); // ------------------------------------------------------------------------------------------------------- @@ -373,19 +411,19 @@ std::vector transform_types = { CreateMatMul, CreateFakeQuantize>(), FixtureData::create, CreateMatMul>(), FixtureData::create, CreateMatMul, CreateFakeQuantize>(), FixtureData::create, CreateMatMul>(), FixtureData::create, CreateMatMul, CreateFakeQuantize>(), FixtureData::create Date: Wed, 11 Aug 2021 14:47:29 +0300 Subject: [PATCH 10/19] Update samples style (#6998) * Align clang config with IE * Apply code style * Update code style for c samples * Fixed style for c samples --- .../ie_bridges/c/samples/.clang-format | 13 +- .../opencv_c_wrapper/opencv_c_wrapper.cpp | 12 +- .../opencv_c_wrapper/opencv_c_wrapper.h | 24 +- .../object_detection_sample_ssd/c_w_dirent.h | 70 +++--- .../object_detection_sample_ssd/main.c | 38 ++- .../object_detection_sample_ssd.h | 14 +- inference-engine/samples/.clang-format | 13 +- .../samples/benchmark_app/benchmark_app.hpp | 154 +++++++------ .../benchmark_app/infer_request_wrap.hpp | 16 +- .../samples/benchmark_app/inputs_filling.cpp | 156 ++++++++++--- .../samples/benchmark_app/inputs_filling.hpp | 4 +- .../samples/benchmark_app/main.cpp | 180 +++++++++------ .../benchmark_app/statistics_report.hpp | 8 +- .../samples/benchmark_app/utils.cpp | 32 ++- .../samples/benchmark_app/utils.hpp | 33 ++- .../classification_sample_async.h | 12 +- .../classification_sample_async/main.cpp | 15 +- .../common/format_reader/MnistUbyte.cpp | 3 +- .../common/format_reader/format_reader.h | 22 +- .../common/format_reader/format_reader_ptr.h | 2 +- .../common/format_reader/opencv_wrapper.cpp | 13 +- .../common/format_reader/opencv_wrapper.h | 10 +- .../utils/include/samples/args_helper.hpp | 10 +- .../include/samples/classification_results.h | 18 +- .../common/utils/include/samples/common.hpp | 172 +++++++++----- .../include/samples/console_progress.hpp | 8 +- .../utils/include/samples/csv_dumper.hpp | 2 +- .../utils/include/samples/ocv_common.hpp | 4 +- .../include/samples/os/windows/w_dirent.h | 72 +++--- .../samples/common/utils/src/args_helper.cpp | 59 +++-- .../samples/common/utils/src/slog.cpp | 2 +- .../samples/hello_classification/main.cpp | 26 ++- .../hello_nv12_input_classification/main.cpp | 27 ++- .../samples/hello_query_device/main.cpp | 3 +- .../samples/hello_reshape_ssd/main.cpp | 12 +- .../reshape_ssd_extension.hpp | 19 +- .../ngraph_function_creation_sample/main.cpp | 141 ++++++++---- .../ngraph_function_creation_sample.hpp | 10 +- .../object_detection_sample_ssd/main.cpp | 40 +++- .../object_detection_sample_ssd.h | 11 +- .../samples/speech_sample/fileutils.cpp | 47 +++- .../samples/speech_sample/fileutils.hpp | 59 ++++- .../samples/speech_sample/main.cpp | 216 ++++++++++++------ .../samples/speech_sample/speech_sample.hpp | 66 +++--- .../samples/style_transfer_sample/main.cpp | 22 +- .../style_transfer_sample.h | 14 +- 46 files changed, 1240 insertions(+), 664 deletions(-) diff --git a/inference-engine/ie_bridges/c/samples/.clang-format b/inference-engine/ie_bridges/c/samples/.clang-format index c93e6254b5b..ebe747b7838 100644 --- a/inference-engine/ie_bridges/c/samples/.clang-format +++ b/inference-engine/ie_bridges/c/samples/.clang-format @@ -1,6 +1,7 @@ BasedOnStyle: Google IndentWidth: 4 UseTab: Never +ColumnLimit: 120 Language: Cpp Standard: Cpp11 @@ -8,18 +9,20 @@ Standard: Cpp11 AccessModifierOffset: -4 AlignConsecutiveMacros: true AllowAllArgumentsOnNextLine: false +AllowAllConstructorInitializersOnNextLine: false AllowAllParametersOfDeclarationOnNextLine: false AllowShortFunctionsOnASingleLine: Empty AllowShortIfStatementsOnASingleLine: Never AllowShortLambdasOnASingleLine: Empty AllowShortLoopsOnASingleLine: false AlwaysBreakBeforeMultilineStrings: false -ColumnLimit: 160 -# Specialize this comment pragma in order to avoid changes in SEA copyrights +BinPackArguments: false +BinPackParameters: false CommentPragmas: '^#' DerivePointerAlignment: false FixNamespaceComments: true IndentCaseLabels: false -IndentPPDirectives: BeforeHash -SpaceBeforeCpp11BracedList: true -SpaceBeforeCtorInitializerColon: false \ No newline at end of file +IndentPPDirectives: AfterHash +ForEachMacros: + - foreach + - FOREACH_CHILD diff --git a/inference-engine/ie_bridges/c/samples/common/opencv_c_wrapper/opencv_c_wrapper.cpp b/inference-engine/ie_bridges/c/samples/common/opencv_c_wrapper/opencv_c_wrapper.cpp index b8be78e0191..a4a0782fc0e 100644 --- a/inference-engine/ie_bridges/c/samples/common/opencv_c_wrapper/opencv_c_wrapper.cpp +++ b/inference-engine/ie_bridges/c/samples/common/opencv_c_wrapper/opencv_c_wrapper.cpp @@ -24,8 +24,8 @@ int image_add_rectangles(c_mat_t* img, rectangle_t rects[], int classes[], int n #else - #include - #include +# include +# include int image_read(const char* img_path, c_mat_t* img) { if (img_path == nullptr || img == nullptr) { @@ -102,9 +102,11 @@ int image_free(c_mat_t* img) { int image_add_rectangles(c_mat_t* img, rectangle_t rects[], int classes[], int num, int thickness) { int colors_num = 21; color_t colors[21] = {// colors to be used for bounding boxes - {128, 64, 128}, {232, 35, 244}, {70, 70, 70}, {156, 102, 102}, {153, 153, 190}, {153, 153, 153}, {30, 170, 250}, - {0, 220, 220}, {35, 142, 107}, {152, 251, 152}, {180, 130, 70}, {60, 20, 220}, {0, 0, 255}, {142, 0, 0}, - {70, 0, 0}, {100, 60, 0}, {90, 0, 0}, {230, 0, 0}, {32, 11, 119}, {0, 74, 111}, {81, 0, 81}}; + {128, 64, 128}, {232, 35, 244}, {70, 70, 70}, {156, 102, 102}, {153, 153, 190}, + {153, 153, 153}, {30, 170, 250}, {0, 220, 220}, {35, 142, 107}, {152, 251, 152}, + {180, 130, 70}, {60, 20, 220}, {0, 0, 255}, {142, 0, 0}, {70, 0, 0}, + {100, 60, 0}, {90, 0, 0}, {230, 0, 0}, {32, 11, 119}, {0, 74, 111}, + {81, 0, 81}}; for (int i = 0; i < num; i++) { int x = rects[i].x_min; diff --git a/inference-engine/ie_bridges/c/samples/common/opencv_c_wrapper/opencv_c_wrapper.h b/inference-engine/ie_bridges/c/samples/common/opencv_c_wrapper/opencv_c_wrapper.h index c250cd362a6..1516ff3edb0 100644 --- a/inference-engine/ie_bridges/c/samples/common/opencv_c_wrapper/opencv_c_wrapper.h +++ b/inference-engine/ie_bridges/c/samples/common/opencv_c_wrapper/opencv_c_wrapper.h @@ -6,23 +6,23 @@ #include #ifdef __cplusplus - #define OPENCV_C_EXTERN extern "C" +# define OPENCV_C_EXTERN extern "C" #else - #define OPENCV_C_EXTERN +# define OPENCV_C_EXTERN #endif #if defined(__GNUC__) && (__GNUC__ < 4) - #define OPENCV_C_WRAPPER(...) OPENCV_C_EXTERN __VA_ARGS__ +# define OPENCV_C_WRAPPER(...) OPENCV_C_EXTERN __VA_ARGS__ #else - #if defined(_WIN32) - #ifdef opencv_c_wrapper_EXPORTS - #define OPENCV_C_WRAPPER(...) OPENCV_C_EXTERN __declspec(dllexport) __VA_ARGS__ __cdecl - #else - #define OPENCV_C_WRAPPER(...) OPENCV_C_EXTERN __declspec(dllimport) __VA_ARGS__ __cdecl - #endif - #else - #define OPENCV_C_WRAPPER(...) OPENCV_C_EXTERN __attribute__((visibility("default"))) __VA_ARGS__ - #endif +# if defined(_WIN32) +# ifdef opencv_c_wrapper_EXPORTS +# define OPENCV_C_WRAPPER(...) OPENCV_C_EXTERN __declspec(dllexport) __VA_ARGS__ __cdecl +# else +# define OPENCV_C_WRAPPER(...) OPENCV_C_EXTERN __declspec(dllimport) __VA_ARGS__ __cdecl +# endif +# else +# define OPENCV_C_WRAPPER(...) OPENCV_C_EXTERN __attribute__((visibility("default"))) __VA_ARGS__ +# endif #endif /** diff --git a/inference-engine/ie_bridges/c/samples/object_detection_sample_ssd/c_w_dirent.h b/inference-engine/ie_bridges/c/samples/object_detection_sample_ssd/c_w_dirent.h index 259019d3021..d8cfc821e2b 100644 --- a/inference-engine/ie_bridges/c/samples/object_detection_sample_ssd/c_w_dirent.h +++ b/inference-engine/ie_bridges/c/samples/object_detection_sample_ssd/c_w_dirent.h @@ -6,43 +6,43 @@ #if defined(_WIN32) - #ifndef WIN32_LEAN_AND_MEAN - #define WIN32_LEAN_AND_MEAN - #define WIN32_LEAN_AND_MEAN_UNDEF - #endif +# ifndef WIN32_LEAN_AND_MEAN +# define WIN32_LEAN_AND_MEAN +# define WIN32_LEAN_AND_MEAN_UNDEF +# endif - #ifndef NOMINMAX - #define NOMINMAX - #define NOMINMAX_UNDEF - #endif +# ifndef NOMINMAX +# define NOMINMAX +# define NOMINMAX_UNDEF +# endif - #if defined(_M_IX86) && !defined(_X86_) && !defined(_AMD64_) - #define _X86_ - #endif +# if defined(_M_IX86) && !defined(_X86_) && !defined(_AMD64_) +# define _X86_ +# endif - #if defined(_M_X64) && !defined(_X86_) && !defined(_AMD64_) - #define _AMD64_ - #endif +# if defined(_M_X64) && !defined(_X86_) && !defined(_AMD64_) +# define _AMD64_ +# endif - #if defined(_M_ARM) && !defined(_ARM_) && !defined(_ARM64_) - #define _ARM_ - #endif +# if defined(_M_ARM) && !defined(_ARM_) && !defined(_ARM64_) +# define _ARM_ +# endif - #if defined(_M_ARM64) && !defined(_ARM_) && !defined(_ARM64_) - #define _ARM64_ - #endif +# if defined(_M_ARM64) && !defined(_ARM_) && !defined(_ARM64_) +# define _ARM64_ +# endif - // clang-format off +// clang-format off #include #include #include #include #include - // clang-format on +// clang-format on - // Copied from linux libc sys/stat.h: - #define S_ISREG(m) (((m)&S_IFMT) == S_IFREG) - #define S_ISDIR(m) (((m)&S_IFMT) == S_IFDIR) +// Copied from linux libc sys/stat.h: +# define S_ISREG(m) (((m)&S_IFMT) == S_IFREG) +# define S_ISDIR(m) (((m)&S_IFMT) == S_IFDIR) /// @brief structure to store directory names typedef struct dirent { @@ -171,19 +171,19 @@ static void closedir(DIR* dp) { free(dp); } - #ifdef WIN32_LEAN_AND_MEAN_UNDEF - #undef WIN32_LEAN_AND_MEAN - #undef WIN32_LEAN_AND_MEAN_UNDEF - #endif +# ifdef WIN32_LEAN_AND_MEAN_UNDEF +# undef WIN32_LEAN_AND_MEAN +# undef WIN32_LEAN_AND_MEAN_UNDEF +# endif - #ifdef NOMINMAX_UNDEF - #undef NOMINMAX_UNDEF - #undef NOMINMAX - #endif +# ifdef NOMINMAX_UNDEF +# undef NOMINMAX_UNDEF +# undef NOMINMAX +# endif #else - #include - #include +# include +# include #endif \ No newline at end of file diff --git a/inference-engine/ie_bridges/c/samples/object_detection_sample_ssd/main.c b/inference-engine/ie_bridges/c/samples/object_detection_sample_ssd/main.c index cdaf1fefe5d..850a407daa2 100644 --- a/inference-engine/ie_bridges/c/samples/object_detection_sample_ssd/main.c +++ b/inference-engine/ie_bridges/c/samples/object_detection_sample_ssd/main.c @@ -12,9 +12,9 @@ #include "object_detection_sample_ssd.h" #ifdef _WIN32 - #include "c_w_dirent.h" +# include "c_w_dirent.h" #else - #include +# include #endif #define MAX_IMAGES 20 @@ -346,7 +346,10 @@ int main(int argc, char** argv) { goto err; for (i = 0; i < ver.num_vers; ++i) { printf(" %s\n", ver.versions[i].device_name); - printf(" %s version ......... %zu.%zu\n", ver.versions[i].description, ver.versions[i].major, ver.versions[i].minor); + printf(" %s version ......... %zu.%zu\n", + ver.versions[i].description, + ver.versions[i].major, + ver.versions[i].minor); printf(" Build ......... %s\n", ver.versions[i].build_number); } ie_core_versions_free(&ver); @@ -360,7 +363,8 @@ int main(int argc, char** argv) { printf("%sCustom extension loaded: %s\n", info, custom_ex_library_msg); } - if (custom_plugin_cfg_msg && (strcmp(device_name, "GPU") == 0 || strcmp(device_name, "MYRIAD") == 0 || strcmp(device_name, "HDDL") == 0)) { + if (custom_plugin_cfg_msg && + (strcmp(device_name, "GPU") == 0 || strcmp(device_name, "MYRIAD") == 0 || strcmp(device_name, "HDDL") == 0)) { // Config for device plugin custom extension is loaded from an .xml // description ie_config_t cfg = {"CONFIG_FILE", custom_plugin_cfg_msg, NULL}; @@ -480,7 +484,12 @@ int main(int argc, char** argv) { for (j = 0; j < resized_img.mat_data_size; ++j) resized_img.mat_data[j] = img.mat_data[j]; } else { - printf("%sImage is resized from (%d, %d) to (%zu, %zu)\n", warn, img.mat_width, img.mat_height, input_width, input_height); + printf("%sImage is resized from (%d, %d) to (%zu, %zu)\n", + warn, + img.mat_width, + img.mat_height, + input_width, + input_height); if (image_resize(&img, &resized_img, (int)input_width, (int)input_height) == -1) { printf("%sImage %s cannot be resized!\n", warn, file_paths[i]); @@ -623,7 +632,8 @@ int main(int argc, char** argv) { for (ch = 0; ch < num_channels; ++ch) { /** [images stride + channels stride + pixel id ] all in bytes * **/ - data[image_id * image_size * num_channels + ch * image_size + pid] = images[image_id].mat_data[pid * num_channels + ch]; + data[image_id * image_size * num_channels + ch * image_size + pid] = + images[image_id].mat_data[pid * num_channels + ch]; } } image_free(&images[image_id]); @@ -704,7 +714,15 @@ int main(int argc, char** argv) { int xmax = (int)(detection[curProposal * objectSize + 5] * originalImages[image_id].mat_width); int ymax = (int)(detection[curProposal * objectSize + 6] * originalImages[image_id].mat_height); - printf("[%d, %d] element, prob = %f (%d, %d)-(%d, %d) batch id : %d", curProposal, label, confidence, xmin, ymin, xmax, ymax, image_id); + printf("[%d, %d] element, prob = %f (%d, %d)-(%d, %d) batch id : %d", + curProposal, + label, + confidence, + xmin, + ymin, + xmax, + ymax, + image_id); if (confidence > 0.5) { /** Drawing only objects with >50% probability **/ @@ -722,7 +740,11 @@ int main(int argc, char** argv) { int batch_id; for (batch_id = 0; batch_id < batchSize; ++batch_id) { if (object_num[batch_id] > 0) { - image_add_rectangles(&originalImages[batch_id], boxes[batch_id], classes[batch_id], object_num[batch_id], 2); + image_add_rectangles(&originalImages[batch_id], + boxes[batch_id], + classes[batch_id], + object_num[batch_id], + 2); } const char* out = "out_"; char str_num[16] = {0}; diff --git a/inference-engine/ie_bridges/c/samples/object_detection_sample_ssd/object_detection_sample_ssd.h b/inference-engine/ie_bridges/c/samples/object_detection_sample_ssd/object_detection_sample_ssd.h index ec3a1b98fbd..a7d99f8902c 100644 --- a/inference-engine/ie_bridges/c/samples/object_detection_sample_ssd/object_detection_sample_ssd.h +++ b/inference-engine/ie_bridges/c/samples/object_detection_sample_ssd/object_detection_sample_ssd.h @@ -16,14 +16,16 @@ static const char* model_message = "Required. Path to an .xml file with a traine static const char* image_message = "Required. Path to one or more images or folder with images."; /// @brief message for assigning cnn calculation to device -static const char* target_device_message = "Optional. Specify the target device to infer. " - "Default value is CPU. Use \"-d HETERO:\" format to specify " - "HETERO plugin. " - "Sample will look for a suitable plugin for device specified."; +static const char* target_device_message = + "Optional. Specify the target device to infer. " + "Default value is CPU. Use \"-d HETERO:\" format to specify " + "HETERO plugin. " + "Sample will look for a suitable plugin for device specified."; /// @brief message for plugin custom kernels desc -static const char* custom_plugin_config_message = "Required for GPU, MYRIAD, HDDL custom kernels. " - "Absolute path to the .xml config file with the kernels descriptions."; +static const char* custom_plugin_config_message = + "Required for GPU, MYRIAD, HDDL custom kernels. " + "Absolute path to the .xml config file with the kernels descriptions."; /// @brief message for user extension library argument static const char* custom_ex_library_message = "Required for CPU plugin custom layers. " diff --git a/inference-engine/samples/.clang-format b/inference-engine/samples/.clang-format index c93e6254b5b..ebe747b7838 100644 --- a/inference-engine/samples/.clang-format +++ b/inference-engine/samples/.clang-format @@ -1,6 +1,7 @@ BasedOnStyle: Google IndentWidth: 4 UseTab: Never +ColumnLimit: 120 Language: Cpp Standard: Cpp11 @@ -8,18 +9,20 @@ Standard: Cpp11 AccessModifierOffset: -4 AlignConsecutiveMacros: true AllowAllArgumentsOnNextLine: false +AllowAllConstructorInitializersOnNextLine: false AllowAllParametersOfDeclarationOnNextLine: false AllowShortFunctionsOnASingleLine: Empty AllowShortIfStatementsOnASingleLine: Never AllowShortLambdasOnASingleLine: Empty AllowShortLoopsOnASingleLine: false AlwaysBreakBeforeMultilineStrings: false -ColumnLimit: 160 -# Specialize this comment pragma in order to avoid changes in SEA copyrights +BinPackArguments: false +BinPackParameters: false CommentPragmas: '^#' DerivePointerAlignment: false FixNamespaceComments: true IndentCaseLabels: false -IndentPPDirectives: BeforeHash -SpaceBeforeCpp11BracedList: true -SpaceBeforeCtorInitializerColon: false \ No newline at end of file +IndentPPDirectives: AfterHash +ForEachMacros: + - foreach + - FOREACH_CHILD diff --git a/inference-engine/samples/benchmark_app/benchmark_app.hpp b/inference-engine/samples/benchmark_app/benchmark_app.hpp index c21222f9a96..a752152ec22 100644 --- a/inference-engine/samples/benchmark_app/benchmark_app.hpp +++ b/inference-engine/samples/benchmark_app/benchmark_app.hpp @@ -14,28 +14,33 @@ static const char help_message[] = "Print a usage message"; /// @brief message for images argument -static const char input_message[] = "Optional. Path to a folder with images and/or binaries or to specific image or binary file."; +static const char input_message[] = + "Optional. Path to a folder with images and/or binaries or to specific image or binary file."; /// @brief message for model argument -static const char model_message[] = "Required. Path to an .xml/.onnx/.prototxt file with a trained model or to a .blob files with " - "a trained compiled model."; +static const char model_message[] = + "Required. Path to an .xml/.onnx/.prototxt file with a trained model or to a .blob files with " + "a trained compiled model."; /// @brief message for execution mode static const char api_message[] = "Optional. Enable Sync/Async API. Default value is \"async\"."; /// @brief message for assigning cnn calculation to device -static const char target_device_message[] = "Optional. Specify a target device to infer on (the list of available devices is shown below). " - "Default value is CPU. Use \"-d HETERO:\" format to specify " - "HETERO plugin. " - "Use \"-d MULTI:\" format to specify MULTI plugin. " - "The application looks for a suitable plugin for the specified device."; +static const char target_device_message[] = + "Optional. Specify a target device to infer on (the list of available devices is shown below). " + "Default value is CPU. Use \"-d HETERO:\" format to specify " + "HETERO plugin. " + "Use \"-d MULTI:\" format to specify MULTI plugin. " + "The application looks for a suitable plugin for the specified device."; /// @brief message for iterations count -static const char iterations_count_message[] = "Optional. Number of iterations. " - "If not specified, the number of iterations is calculated depending on a device."; +static const char iterations_count_message[] = + "Optional. Number of iterations. " + "If not specified, the number of iterations is calculated depending on a device."; /// @brief message for requests count -static const char infer_requests_count_message[] = "Optional. Number of infer requests. Default value is determined automatically for device."; +static const char infer_requests_count_message[] = + "Optional. Number of infer requests. Default value is determined automatically for device."; /// @brief message for execution time static const char execution_time_message[] = "Optional. Time in seconds to execute topology."; @@ -45,86 +50,101 @@ static const char infer_num_threads_message[] = "Optional. Number of threads to "(including HETERO and MULTI cases)."; /// @brief message for #streams for CPU inference -static const char infer_num_streams_message[] = "Optional. Number of streams to use for inference on the CPU, GPU or MYRIAD devices " - "(for HETERO and MULTI device cases use format :,: or just " - "). " - "Default value is determined automatically for a device.Please note that although the " - "automatic selection " - "usually provides a reasonable performance, it still may be non - optimal for some cases, " - "especially for " - "very small networks. See sample's README for more details. " - "Also, using nstreams>1 is inherently throughput-oriented option, " - "while for the best-latency estimations the number of streams should be set to 1."; +static const char infer_num_streams_message[] = + "Optional. Number of streams to use for inference on the CPU, GPU or MYRIAD devices " + "(for HETERO and MULTI device cases use format :,: or just " + "). " + "Default value is determined automatically for a device.Please note that although the " + "automatic selection " + "usually provides a reasonable performance, it still may be non - optimal for some cases, " + "especially for " + "very small networks. See sample's README for more details. " + "Also, using nstreams>1 is inherently throughput-oriented option, " + "while for the best-latency estimations the number of streams should be set to 1."; /// @brief message for latency percentile settings static const char infer_latency_percentile_message[] = - "Optional. Defines the percentile to be reported in latency metric. The valid range is [1, 100]. The default value is 50 (median)."; + "Optional. Defines the percentile to be reported in latency metric. The valid range is [1, 100]. The default value " + "is 50 (median)."; /// @brief message for enforcing of BF16 execution where it is possible -static const char enforce_bf16_message[] = "Optional. By default floating point operations execution in bfloat16 precision are enforced " - "if supported by platform.\n" - " 'true' - enable bfloat16 regardless of platform support\n" - " 'false' - disable bfloat16 regardless of platform support"; +static const char enforce_bf16_message[] = + "Optional. By default floating point operations execution in bfloat16 precision are enforced " + "if supported by platform.\n" + " 'true' - enable bfloat16 regardless of platform support\n" + " 'false' - disable bfloat16 regardless of platform support"; /// @brief message for user library argument -static const char custom_cpu_library_message[] = "Required for CPU custom layers. Absolute path to a shared library with the kernels " - "implementations."; +static const char custom_cpu_library_message[] = + "Required for CPU custom layers. Absolute path to a shared library with the kernels " + "implementations."; /// @brief message for clDNN custom kernels desc -static const char custom_cldnn_message[] = "Required for GPU custom kernels. Absolute path to an .xml file with the kernels description."; +static const char custom_cldnn_message[] = + "Required for GPU custom kernels. Absolute path to an .xml file with the kernels description."; -static const char batch_size_message[] = "Optional. Batch size value. If not specified, the batch size value is determined from " - "Intermediate Representation."; +static const char batch_size_message[] = + "Optional. Batch size value. If not specified, the batch size value is determined from " + "Intermediate Representation."; // @brief message for CPU threads pinning option static const char infer_threads_pinning_message[] = "Optional. Explicit inference threads binding options (leave empty to let the OpenVINO to make a choice):\n" "\t\t\t\tenabling threads->cores pinning(\"YES\", which is already default for any conventional CPU), \n" - "\t\t\t\tletting the runtime to decide on the threads->different core types(\"HYBRID_AWARE\", which is default on the hybrid CPUs) \n" + "\t\t\t\tletting the runtime to decide on the threads->different core types(\"HYBRID_AWARE\", which is default on " + "the hybrid CPUs) \n" "\t\t\t\tthreads->(NUMA)nodes(\"NUMA\") or \n" "\t\t\t\tcompletely disable(\"NO\") CPU inference threads pinning"; // @brief message for stream_output option -static const char stream_output_message[] = "Optional. Print progress as a plain text. When specified, an interactive progress bar is " - "replaced with a " - "multiline output."; +static const char stream_output_message[] = + "Optional. Print progress as a plain text. When specified, an interactive progress bar is " + "replaced with a " + "multiline output."; // @brief message for report_type option -static const char report_type_message[] = "Optional. Enable collecting statistics report. \"no_counters\" report contains " - "configuration options specified, resulting FPS and latency. \"average_counters\" " - "report extends \"no_counters\" report and additionally includes average PM " - "counters values for each layer from the network. \"detailed_counters\" report " - "extends \"average_counters\" report and additionally includes per-layer PM " - "counters and latency for each executed infer request."; +static const char report_type_message[] = + "Optional. Enable collecting statistics report. \"no_counters\" report contains " + "configuration options specified, resulting FPS and latency. \"average_counters\" " + "report extends \"no_counters\" report and additionally includes average PM " + "counters values for each layer from the network. \"detailed_counters\" report " + "extends \"average_counters\" report and additionally includes per-layer PM " + "counters and latency for each executed infer request."; // @brief message for report_folder option static const char report_folder_message[] = "Optional. Path to a folder where statistics report is stored."; // @brief message for exec_graph_path option -static const char exec_graph_path_message[] = "Optional. Path to a file where to store executable graph information serialized."; +static const char exec_graph_path_message[] = + "Optional. Path to a file where to store executable graph information serialized."; // @brief message for progress bar option -static const char progress_message[] = "Optional. Show progress bar (can affect performance measurement). Default values is " - "\"false\"."; +static const char progress_message[] = + "Optional. Show progress bar (can affect performance measurement). Default values is " + "\"false\"."; // @brief message for performance counters option static const char pc_message[] = "Optional. Report performance counters."; #ifdef USE_OPENCV // @brief message for load config option -static const char load_config_message[] = "Optional. Path to XML/YAML/JSON file to load custom IE parameters." - " Please note, command line parameters have higher priority then parameters from configuration " - "file."; +static const char load_config_message[] = + "Optional. Path to XML/YAML/JSON file to load custom IE parameters." + " Please note, command line parameters have higher priority then parameters from configuration " + "file."; // @brief message for dump config option -static const char dump_config_message[] = "Optional. Path to XML/YAML/JSON file to dump IE parameters, which were set by application."; +static const char dump_config_message[] = + "Optional. Path to XML/YAML/JSON file to dump IE parameters, which were set by application."; #endif -static const char shape_message[] = "Optional. Set shape for input. For example, \"input1[1,3,224,224],input2[1,4]\" or " - "\"[1,3,224,224]\"" - " in case of one input size."; +static const char shape_message[] = + "Optional. Set shape for input. For example, \"input1[1,3,224,224],input2[1,4]\" or " + "\"[1,3,224,224]\"" + " in case of one input size."; -static const char layout_message[] = "Optional. Prompts how network layouts should be treated by application. " - "For example, \"input1[NCHW],input2[NC]\" or \"[NCHW]\" in case of one input size."; +static const char layout_message[] = + "Optional. Prompts how network layouts should be treated by application. " + "For example, \"input1[NCHW],input2[NC]\" or \"[NCHW]\" in case of one input size."; // @brief message for enabling caching static const char cache_dir_message[] = "Optional. Enables caching of loaded models to specified directory. " @@ -139,21 +159,25 @@ static const char gna_qb_message[] = "Optional. Weight bits for quantization: 8 static constexpr char inputs_precision_message[] = "Optional. Specifies precision for all input layers of the network."; -static constexpr char outputs_precision_message[] = "Optional. Specifies precision for all output layers of the network."; +static constexpr char outputs_precision_message[] = + "Optional. Specifies precision for all output layers of the network."; -static constexpr char iop_message[] = "Optional. Specifies precision for input and output layers by name.\n" - " Example: -iop \"input:FP16, output:FP16\".\n" - " Notice that quotes are required.\n" - " Overwrites precision from ip and op options for " - "specified layers."; +static constexpr char iop_message[] = + "Optional. Specifies precision for input and output layers by name.\n" + " Example: -iop \"input:FP16, output:FP16\".\n" + " Notice that quotes are required.\n" + " Overwrites precision from ip and op options for " + "specified layers."; -static constexpr char input_image_scale_message[] = "Optional. Scale values to be used for the input image per channel.\n" - "Values to be provided in the [R, G, B] format. Can be defined for desired input of the model.\n" - "Example: -iscale data[255,255,255],info[255,255,255]\n"; +static constexpr char input_image_scale_message[] = + "Optional. Scale values to be used for the input image per channel.\n" + "Values to be provided in the [R, G, B] format. Can be defined for desired input of the model.\n" + "Example: -iscale data[255,255,255],info[255,255,255]\n"; -static constexpr char input_image_mean_message[] = "Optional. Mean values to be used for the input image per channel.\n" - "Values to be provided in the [R, G, B] format. Can be defined for desired input of the model,\n" - "Example: -imean data[255,255,255],info[255,255,255]\n"; +static constexpr char input_image_mean_message[] = + "Optional. Mean values to be used for the input image per channel.\n" + "Values to be provided in the [R, G, B] format. Can be defined for desired input of the model,\n" + "Example: -imean data[255,255,255],info[255,255,255]\n"; /// @brief Define flag for showing help message
DEFINE_bool(h, false, help_message); diff --git a/inference-engine/samples/benchmark_app/infer_request_wrap.hpp b/inference-engine/samples/benchmark_app/infer_request_wrap.hpp index 741b2ad7f13..5e15f597e7e 100644 --- a/inference-engine/samples/benchmark_app/infer_request_wrap.hpp +++ b/inference-engine/samples/benchmark_app/infer_request_wrap.hpp @@ -23,7 +23,8 @@ typedef std::chrono::nanoseconds ns; typedef std::function QueueCallbackFunction; -/// @brief Wrapper class for InferenceEngine::InferRequest. Handles asynchronous callbacks and calculates execution time. +/// @brief Wrapper class for InferenceEngine::InferRequest. Handles asynchronous callbacks and calculates execution +/// time. class InferReqWrap final { public: using Ptr = std::shared_ptr; @@ -31,7 +32,9 @@ public: ~InferReqWrap() = default; explicit InferReqWrap(InferenceEngine::ExecutableNetwork& net, size_t id, QueueCallbackFunction callbackQueue) - : _request(net.CreateInferRequest()), _id(id), _callbackQueue(callbackQueue) { + : _request(net.CreateInferRequest()), + _id(id), + _callbackQueue(callbackQueue) { _request.SetCompletionCallback([&]() { _endTime = Time::now(); _callbackQueue(_id, getExecutionTimeInMilliseconds()); @@ -79,8 +82,10 @@ class InferRequestsQueue final { public: InferRequestsQueue(InferenceEngine::ExecutableNetwork& net, size_t nireq) { for (size_t id = 0; id < nireq; id++) { - requests.push_back( - std::make_shared(net, id, std::bind(&InferRequestsQueue::putIdleRequest, this, std::placeholders::_1, std::placeholders::_2))); + requests.push_back(std::make_shared( + net, + id, + std::bind(&InferRequestsQueue::putIdleRequest, this, std::placeholders::_1, std::placeholders::_2))); _idleIds.push(id); } resetTimes(); @@ -90,7 +95,8 @@ public: // So it should be released before any context that the request can use inside internal asynchronous tasks // For example all members of InferRequestsQueue would be destroyed before `requests` vector // So requests can try to use this members from `putIdleRequest()` that would be called from request callback - // To avoid this we should move this vector declaration after all members declaration or just clear it manually in destructor + // To avoid this we should move this vector declaration after all members declaration or just clear it manually + // in destructor requests.clear(); } diff --git a/inference-engine/samples/benchmark_app/inputs_filling.cpp b/inference-engine/samples/benchmark_app/inputs_filling.cpp index eadd4eceeae..d50567caf98 100644 --- a/inference-engine/samples/benchmark_app/inputs_filling.cpp +++ b/inference-engine/samples/benchmark_app/inputs_filling.cpp @@ -16,14 +16,15 @@ using namespace InferenceEngine; #ifdef USE_OPENCV -static const std::vector supported_image_extensions = {"bmp", "dib", "jpeg", "jpg", "jpe", "jp2", "png", - "pbm", "pgm", "ppm", "sr", "ras", "tiff", "tif"}; +static const std::vector supported_image_extensions = + {"bmp", "dib", "jpeg", "jpg", "jpe", "jp2", "png", "pbm", "pgm", "ppm", "sr", "ras", "tiff", "tif"}; #else static const std::vector supported_image_extensions = {"bmp"}; #endif static const std::vector supported_binary_extensions = {"bin"}; -std::vector filterFilesByExtensions(const std::vector& filePaths, const std::vector& extensions) { +std::vector filterFilesByExtensions(const std::vector& filePaths, + const std::vector& extensions) { std::vector filtered; auto getExtension = [](const std::string& name) { auto extensionPosition = name.rfind('.', name.size()); @@ -40,8 +41,13 @@ std::vector filterFilesByExtensions(const std::vector& } template -void fillBlobImage(Blob::Ptr& inputBlob, const std::vector& filePaths, const size_t& batchSize, const benchmark_app::InputInfo& app_info, - const size_t& requestId, const size_t& inputId, const size_t& inputSize) { +void fillBlobImage(Blob::Ptr& inputBlob, + const std::vector& filePaths, + const size_t& batchSize, + const benchmark_app::InputInfo& app_info, + const size_t& requestId, + const size_t& inputId, + const size_t& inputSize) { MemoryBlob::Ptr minput = as(inputBlob); if (!minput) { IE_THROW() << "We expect inputBlob to be inherited from MemoryBlob in " @@ -57,7 +63,8 @@ void fillBlobImage(Blob::Ptr& inputBlob, const std::vector& filePat std::vector> vreader; vreader.reserve(batchSize); - for (size_t i = 0ULL, inputIndex = requestId * batchSize * inputSize + inputId; i < batchSize; i++, inputIndex += inputSize) { + for (size_t i = 0ULL, inputIndex = requestId * batchSize * inputSize + inputId; i < batchSize; + i++, inputIndex += inputSize) { inputIndex %= filePaths.size(); slog::info << "Prepare image " << filePaths[inputIndex] << slog::endl; @@ -88,11 +95,13 @@ void fillBlobImage(Blob::Ptr& inputBlob, const std::vector& filePat for (size_t ch = 0; ch < numChannels; ++ch) { /** [images stride + channels stride + pixel id ] all in * bytes **/ - size_t offset = imageId * numChannels * width * height + (((app_info.layout == "NCHW") || (app_info.layout == "CHW")) - ? (ch * width * height + h * width + w) - : (h * width * numChannels + w * numChannels + ch)); + size_t offset = imageId * numChannels * width * height + + (((app_info.layout == "NCHW") || (app_info.layout == "CHW")) + ? (ch * width * height + h * width + w) + : (h * width * numChannels + w * numChannels + ch)); inputBlobData[offset] = - (static_cast(vreader.at(imageId).get()[h * width * numChannels + w * numChannels + ch]) - static_cast(app_info.mean[ch])) / + (static_cast(vreader.at(imageId).get()[h * width * numChannels + w * numChannels + ch]) - + static_cast(app_info.mean[ch])) / static_cast(app_info.scale[ch]); } } @@ -101,7 +110,11 @@ void fillBlobImage(Blob::Ptr& inputBlob, const std::vector& filePat } template -void fillBlobBinary(Blob::Ptr& inputBlob, const std::vector& filePaths, const size_t& batchSize, const size_t& requestId, const size_t& inputId, +void fillBlobBinary(Blob::Ptr& inputBlob, + const std::vector& filePaths, + const size_t& batchSize, + const size_t& requestId, + const size_t& inputId, const size_t& inputSize) { MemoryBlob::Ptr minput = as(inputBlob); if (!minput) { @@ -114,7 +127,8 @@ void fillBlobBinary(Blob::Ptr& inputBlob, const std::vector& filePa auto minputHolder = minput->wmap(); auto inputBlobData = minputHolder.as(); - for (size_t i = 0ULL, inputIndex = requestId * batchSize * inputSize + inputId; i < batchSize; i++, inputIndex += inputSize) { + for (size_t i = 0ULL, inputIndex = requestId * batchSize * inputSize + inputId; i < batchSize; + i++, inputIndex += inputSize) { inputIndex %= filePaths.size(); slog::info << "Prepare binary file " << filePaths[inputIndex] << slog::endl; @@ -140,12 +154,15 @@ void fillBlobBinary(Blob::Ptr& inputBlob, const std::vector& filePa } template -using uniformDistribution = - typename std::conditional::value, std::uniform_real_distribution, - typename std::conditional::value, std::uniform_int_distribution, void>::type>::type; +using uniformDistribution = typename std::conditional< + std::is_floating_point::value, + std::uniform_real_distribution, + typename std::conditional::value, std::uniform_int_distribution, void>::type>::type; template -void fillBlobRandom(Blob::Ptr& inputBlob, T rand_min = std::numeric_limits::min(), T rand_max = std::numeric_limits::max()) { +void fillBlobRandom(Blob::Ptr& inputBlob, + T rand_min = std::numeric_limits::min(), + T rand_max = std::numeric_limits::max()) { MemoryBlob::Ptr minput = as(inputBlob); if (!minput) { IE_THROW() << "We expect inputBlob to be inherited from MemoryBlob in " @@ -191,14 +208,17 @@ void fillBlobImInfo(Blob::Ptr& inputBlob, const size_t& batchSize, std::pair& inputFiles, const size_t& batchSize, benchmark_app::InputsInfo& app_inputs_info, +void fillBlobs(const std::vector& inputFiles, + const size_t& batchSize, + benchmark_app::InputsInfo& app_inputs_info, std::vector requests) { std::vector> input_image_sizes; for (auto& item : app_inputs_info) { if (item.second.isImage()) { input_image_sizes.push_back(std::make_pair(item.second.width(), item.second.height())); } - slog::info << "Network input '" << item.first << "' precision " << item.second.precision << ", dimensions (" << item.second.layout << "): "; + slog::info << "Network input '" << item.first << "' precision " << item.second.precision << ", dimensions (" + << item.second.layout << "): "; for (const auto& i : item.second.shape) { slog::info << i << " "; } @@ -232,10 +252,11 @@ void fillBlobs(const std::vector& inputFiles, const size_t& batchSi "extensions: " << ss.str() << slog::endl; } else if (binaryToBeUsed > binaryFiles.size()) { - slog::warn << "Some binary input files will be duplicated: " << binaryToBeUsed << " files are required but only " << binaryFiles.size() - << " are provided" << slog::endl; + slog::warn << "Some binary input files will be duplicated: " << binaryToBeUsed + << " files are required but only " << binaryFiles.size() << " are provided" << slog::endl; } else if (binaryToBeUsed < binaryFiles.size()) { - slog::warn << "Some binary input files will be ignored: only " << binaryToBeUsed << " are required from " << binaryFiles.size() << slog::endl; + slog::warn << "Some binary input files will be ignored: only " << binaryToBeUsed << " are required from " + << binaryFiles.size() << slog::endl; } imageFiles = filterFilesByExtensions(inputFiles, supported_image_extensions); @@ -254,10 +275,11 @@ void fillBlobs(const std::vector& inputFiles, const size_t& batchSi "extensions: " << ss.str() << slog::endl; } else if (imagesToBeUsed > imageFiles.size()) { - slog::warn << "Some image input files will be duplicated: " << imagesToBeUsed << " files are required but only " << imageFiles.size() - << " are provided" << slog::endl; + slog::warn << "Some image input files will be duplicated: " << imagesToBeUsed + << " files are required but only " << imageFiles.size() << " are provided" << slog::endl; } else if (imagesToBeUsed < imageFiles.size()) { - slog::warn << "Some image input files will be ignored: only " << imagesToBeUsed << " are required from " << imageFiles.size() << slog::endl; + slog::warn << "Some image input files will be ignored: only " << imagesToBeUsed << " are required from " + << imageFiles.size() << slog::endl; } } @@ -274,15 +296,45 @@ void fillBlobs(const std::vector& inputFiles, const size_t& batchSi if (!imageFiles.empty()) { // Fill with Images if (precision == InferenceEngine::Precision::FP32) { - fillBlobImage(inputBlob, imageFiles, batchSize, app_info, requestId, imageInputId++, imageInputCount); + fillBlobImage(inputBlob, + imageFiles, + batchSize, + app_info, + requestId, + imageInputId++, + imageInputCount); } else if (precision == InferenceEngine::Precision::FP16) { - fillBlobImage(inputBlob, imageFiles, batchSize, app_info, requestId, imageInputId++, imageInputCount); + fillBlobImage(inputBlob, + imageFiles, + batchSize, + app_info, + requestId, + imageInputId++, + imageInputCount); } else if (precision == InferenceEngine::Precision::I32) { - fillBlobImage(inputBlob, imageFiles, batchSize, app_info, requestId, imageInputId++, imageInputCount); + fillBlobImage(inputBlob, + imageFiles, + batchSize, + app_info, + requestId, + imageInputId++, + imageInputCount); } else if (precision == InferenceEngine::Precision::I64) { - fillBlobImage(inputBlob, imageFiles, batchSize, app_info, requestId, imageInputId++, imageInputCount); + fillBlobImage(inputBlob, + imageFiles, + batchSize, + app_info, + requestId, + imageInputId++, + imageInputCount); } else if (precision == InferenceEngine::Precision::U8) { - fillBlobImage(inputBlob, imageFiles, batchSize, app_info, requestId, imageInputId++, imageInputCount); + fillBlobImage(inputBlob, + imageFiles, + batchSize, + app_info, + requestId, + imageInputId++, + imageInputCount); } else { IE_THROW() << "Input precision is not supported for " << item.first; } @@ -292,15 +344,41 @@ void fillBlobs(const std::vector& inputFiles, const size_t& batchSi if (!binaryFiles.empty()) { // Fill with binary files if (precision == InferenceEngine::Precision::FP32) { - fillBlobBinary(inputBlob, binaryFiles, batchSize, requestId, binaryInputId++, binaryInputCount); + fillBlobBinary(inputBlob, + binaryFiles, + batchSize, + requestId, + binaryInputId++, + binaryInputCount); } else if (precision == InferenceEngine::Precision::FP16) { - fillBlobBinary(inputBlob, binaryFiles, batchSize, requestId, binaryInputId++, binaryInputCount); + fillBlobBinary(inputBlob, + binaryFiles, + batchSize, + requestId, + binaryInputId++, + binaryInputCount); } else if (precision == InferenceEngine::Precision::I32) { - fillBlobBinary(inputBlob, binaryFiles, batchSize, requestId, binaryInputId++, binaryInputCount); + fillBlobBinary(inputBlob, + binaryFiles, + batchSize, + requestId, + binaryInputId++, + binaryInputCount); } else if (precision == InferenceEngine::Precision::I64) { - fillBlobBinary(inputBlob, binaryFiles, batchSize, requestId, binaryInputId++, binaryInputCount); - } else if ((precision == InferenceEngine::Precision::U8) || (precision == InferenceEngine::Precision::BOOL)) { - fillBlobBinary(inputBlob, binaryFiles, batchSize, requestId, binaryInputId++, binaryInputCount); + fillBlobBinary(inputBlob, + binaryFiles, + batchSize, + requestId, + binaryInputId++, + binaryInputCount); + } else if ((precision == InferenceEngine::Precision::U8) || + (precision == InferenceEngine::Precision::BOOL)) { + fillBlobBinary(inputBlob, + binaryFiles, + batchSize, + requestId, + binaryInputId++, + binaryInputCount); } else { IE_THROW() << "Input precision is not supported for " << item.first; } @@ -310,7 +388,8 @@ void fillBlobs(const std::vector& inputFiles, const size_t& batchSi if (app_info.isImageInfo() && (input_image_sizes.size() == 1)) { // Most likely it is image info: fill with image information auto image_size = input_image_sizes.at(0); - slog::info << "Fill input '" << item.first << "' with image size " << image_size.first << "x" << image_size.second << slog::endl; + slog::info << "Fill input '" << item.first << "' with image size " << image_size.first << "x" + << image_size.second << slog::endl; if (precision == InferenceEngine::Precision::FP32) { fillBlobImInfo(inputBlob, batchSize, image_size); } else if (precision == InferenceEngine::Precision::FP16) { @@ -326,8 +405,9 @@ void fillBlobs(const std::vector& inputFiles, const size_t& batchSi } } // Fill random - slog::info << "Fill input '" << item.first << "' with random values (" << std::string((app_info.isImage() ? "image" : "some binary data")) - << " is expected)" << slog::endl; + slog::info << "Fill input '" << item.first << "' with random values (" + << std::string((app_info.isImage() ? "image" : "some binary data")) << " is expected)" + << slog::endl; if (precision == InferenceEngine::Precision::FP32) { fillBlobRandom(inputBlob); } else if (precision == InferenceEngine::Precision::FP16) { diff --git a/inference-engine/samples/benchmark_app/inputs_filling.hpp b/inference-engine/samples/benchmark_app/inputs_filling.hpp index 000d613db59..42a0fcf54fc 100644 --- a/inference-engine/samples/benchmark_app/inputs_filling.hpp +++ b/inference-engine/samples/benchmark_app/inputs_filling.hpp @@ -11,5 +11,7 @@ #include "infer_request_wrap.hpp" #include "utils.hpp" -void fillBlobs(const std::vector& inputFiles, const size_t& batchSize, benchmark_app::InputsInfo& app_inputs_info, +void fillBlobs(const std::vector& inputFiles, + const size_t& batchSize, + benchmark_app::InputsInfo& app_inputs_info, std::vector requests); \ No newline at end of file diff --git a/inference-engine/samples/benchmark_app/main.cpp b/inference-engine/samples/benchmark_app/main.cpp index da2b77a0ce9..18aa66e0a45 100644 --- a/inference-engine/samples/benchmark_app/main.cpp +++ b/inference-engine/samples/benchmark_app/main.cpp @@ -60,8 +60,10 @@ bool ParseAndCheckCommandLine(int argc, char* argv[]) { throw std::logic_error("Incorrect API. Please set -api option to `sync` or `async` value."); } - if (!FLAGS_report_type.empty() && FLAGS_report_type != noCntReport && FLAGS_report_type != averageCntReport && FLAGS_report_type != detailedCntReport) { - std::string err = "only " + std::string(noCntReport) + "/" + std::string(averageCntReport) + "/" + std::string(detailedCntReport) + + if (!FLAGS_report_type.empty() && FLAGS_report_type != noCntReport && FLAGS_report_type != averageCntReport && + FLAGS_report_type != detailedCntReport) { + std::string err = "only " + std::string(noCntReport) + "/" + std::string(averageCntReport) + "/" + + std::string(detailedCntReport) + " report types are supported (invalid -report_type option value)"; throw std::logic_error(err); } @@ -73,8 +75,9 @@ bool ParseAndCheckCommandLine(int argc, char* argv[]) { bool isNetworkCompiled = fileExt(FLAGS_m) == "blob"; bool isPrecisionSet = !(FLAGS_ip.empty() && FLAGS_op.empty() && FLAGS_iop.empty()); if (isNetworkCompiled && isPrecisionSet) { - std::string err = std::string("Cannot set precision for a compiled network. ") + std::string("Please re-compile your network with required precision " - "using compile_tool"); + std::string err = std::string("Cannot set precision for a compiled network. ") + + std::string("Please re-compile your network with required precision " + "using compile_tool"); throw std::logic_error(err); } @@ -83,17 +86,18 @@ bool ParseAndCheckCommandLine(int argc, char* argv[]) { static void next_step(const std::string additional_info = "") { static size_t step_id = 0; - static const std::map step_names = {{1, "Parsing and validating input arguments"}, - {2, "Loading Inference Engine"}, - {3, "Setting device configuration"}, - {4, "Reading network files"}, - {5, "Resizing network to match image sizes and given batch"}, - {6, "Configuring input of the model"}, - {7, "Loading the model to the device"}, - {8, "Setting optimal runtime parameters"}, - {9, "Creating infer requests and filling input blobs with images"}, - {10, "Measuring performance"}, - {11, "Dumping statistics report"}}; + static const std::map step_names = { + {1, "Parsing and validating input arguments"}, + {2, "Loading Inference Engine"}, + {3, "Setting device configuration"}, + {4, "Reading network files"}, + {5, "Resizing network to match image sizes and given batch"}, + {6, "Configuring input of the model"}, + {7, "Loading the model to the device"}, + {8, "Setting optimal runtime parameters"}, + {9, "Creating infer requests and filling input blobs with images"}, + {10, "Measuring performance"}, + {11, "Dumping statistics report"}}; step_id++; if (step_names.count(step_id) == 0) @@ -140,13 +144,16 @@ int main(int argc, char* argv[]) { } } if (!FLAGS_report_type.empty()) { - statistics = std::make_shared(StatisticsReport::Config {FLAGS_report_type, FLAGS_report_folder}); + statistics = + std::make_shared(StatisticsReport::Config{FLAGS_report_type, FLAGS_report_folder}); statistics->addParameters(StatisticsReport::Category::COMMAND_LINE_PARAMETERS, command_line_arguments); } auto isFlagSetInCommandLine = [&command_line_arguments](const std::string& name) { - return (std::find_if(command_line_arguments.begin(), command_line_arguments.end(), [name](const std::pair& p) { - return p.first == name; - }) != command_line_arguments.end()); + return (std::find_if(command_line_arguments.begin(), + command_line_arguments.end(), + [name](const std::pair& p) { + return p.first == name; + }) != command_line_arguments.end()); }; std::string device_name = FLAGS_d; @@ -213,13 +220,17 @@ int main(int argc, char* argv[]) { if (isFlagSetInCommandLine("pc")) { // set to user defined value device_config[CONFIG_KEY(PERF_COUNT)] = FLAGS_pc ? CONFIG_VALUE(YES) : CONFIG_VALUE(NO); - } else if (device_config.count(CONFIG_KEY(PERF_COUNT)) && (device_config.at(CONFIG_KEY(PERF_COUNT)) == "YES")) { - slog::warn << "Performance counters for " << device << " device is turned on. To print results use -pc option." << slog::endl; + } else if (device_config.count(CONFIG_KEY(PERF_COUNT)) && + (device_config.at(CONFIG_KEY(PERF_COUNT)) == "YES")) { + slog::warn << "Performance counters for " << device + << " device is turned on. To print results use -pc option." << slog::endl; } else if (FLAGS_report_type == detailedCntReport || FLAGS_report_type == averageCntReport) { - slog::warn << "Turn on performance counters for " << device << " device since report type is " << FLAGS_report_type << "." << slog::endl; + slog::warn << "Turn on performance counters for " << device << " device since report type is " + << FLAGS_report_type << "." << slog::endl; device_config[CONFIG_KEY(PERF_COUNT)] = CONFIG_VALUE(YES); } else if (!FLAGS_exec_graph_path.empty()) { - slog::warn << "Turn on performance counters for " << device << " device due to execution graph dumping." << slog::endl; + slog::warn << "Turn on performance counters for " << device << " device due to execution graph dumping." + << slog::endl; device_config[CONFIG_KEY(PERF_COUNT)] = CONFIG_VALUE(YES); } else { // set to default value @@ -231,8 +242,10 @@ int main(int argc, char* argv[]) { const std::string key = device + "_THROUGHPUT_STREAMS"; if (device_nstreams.count(device)) { // set to user defined value - std::vector supported_config_keys = ie.GetMetric(device, METRIC_KEY(SUPPORTED_CONFIG_KEYS)); - if (std::find(supported_config_keys.begin(), supported_config_keys.end(), key) == supported_config_keys.end()) { + std::vector supported_config_keys = + ie.GetMetric(device, METRIC_KEY(SUPPORTED_CONFIG_KEYS)); + if (std::find(supported_config_keys.begin(), supported_config_keys.end(), key) == + supported_config_keys.end()) { throw std::logic_error("Device " + device + " doesn't support config key '" + key + "'! " + "Please specify -nstreams for correct devices in format " ":,:" + @@ -267,8 +280,10 @@ int main(int argc, char* argv[]) { // set to user defined value device_config[CONFIG_KEY(CPU_BIND_THREAD)] = FLAGS_pin; } else if (!device_config.count(CONFIG_KEY(CPU_BIND_THREAD))) { - if ((device_name.find("MULTI") != std::string::npos) && (device_name.find("GPU") != std::string::npos)) { - slog::warn << "Turn off threads pinning for " << device << " device since multi-scenario with GPU device is used." << slog::endl; + if ((device_name.find("MULTI") != std::string::npos) && + (device_name.find("GPU") != std::string::npos)) { + slog::warn << "Turn off threads pinning for " << device + << " device since multi-scenario with GPU device is used." << slog::endl; device_config[CONFIG_KEY(CPU_BIND_THREAD)] = CONFIG_VALUE(NO); } } @@ -279,7 +294,8 @@ int main(int argc, char* argv[]) { // for GPU execution, more throughput-oriented execution via streams setThroughputStreams(); - if ((device_name.find("MULTI") != std::string::npos) && (device_name.find("CPU") != std::string::npos)) { + if ((device_name.find("MULTI") != std::string::npos) && + (device_name.find("CPU") != std::string::npos)) { slog::warn << "Turn on GPU throttling. Multi-device execution with " "the CPU + GPU performs best with GPU throttling hint, " << "which releases another CPU thread (that is otherwise " @@ -299,9 +315,11 @@ int main(int argc, char* argv[]) { if (isFlagSetInCommandLine("nthreads")) device_config[GNA_CONFIG_KEY(LIB_N_THREADS)] = std::to_string(FLAGS_nthreads); } else { - std::vector supported_config_keys = ie.GetMetric(device, METRIC_KEY(SUPPORTED_CONFIG_KEYS)); + std::vector supported_config_keys = + ie.GetMetric(device, METRIC_KEY(SUPPORTED_CONFIG_KEYS)); auto supported = [&](const std::string& key) { - return std::find(std::begin(supported_config_keys), std::end(supported_config_keys), key) != std::end(supported_config_keys); + return std::find(std::begin(supported_config_keys), std::end(supported_config_keys), key) != + std::end(supported_config_keys); }; if (supported(CONFIG_KEY(CPU_THREADS_NUM)) && isFlagSetInCommandLine("nthreads")) { device_config[CONFIG_KEY(CPU_THREADS_NUM)] = std::to_string(FLAGS_nthreads); @@ -351,7 +369,8 @@ int main(int argc, char* argv[]) { auto duration_ms = double_to_string(get_total_ms_time(startTime)); slog::info << "Load network took " << duration_ms << " ms" << slog::endl; if (statistics) - statistics->addParameters(StatisticsReport::Category::EXECUTION_RESULTS, {{"load network time (ms)", duration_ms}}); + statistics->addParameters(StatisticsReport::Category::EXECUTION_RESULTS, + {{"load network time (ms)", duration_ms}}); if (batchSize == 0) { batchSize = 1; } @@ -367,7 +386,8 @@ int main(int argc, char* argv[]) { auto duration_ms = double_to_string(get_total_ms_time(startTime)); slog::info << "Read network took " << duration_ms << " ms" << slog::endl; if (statistics) - statistics->addParameters(StatisticsReport::Category::EXECUTION_RESULTS, {{"read network time (ms)", duration_ms}}); + statistics->addParameters(StatisticsReport::Category::EXECUTION_RESULTS, + {{"read network time (ms)", duration_ms}}); const InputsDataMap inputInfo(cnnNetwork.getInputsInfo()); if (inputInfo.empty()) { @@ -380,7 +400,13 @@ int main(int argc, char* argv[]) { batchSize = cnnNetwork.getBatchSize(); // Parse input shapes if specified bool reshape = false; - app_inputs_info = getInputsInfo(FLAGS_shape, FLAGS_layout, FLAGS_b, FLAGS_iscale, FLAGS_imean, inputInfo, reshape); + app_inputs_info = getInputsInfo(FLAGS_shape, + FLAGS_layout, + FLAGS_b, + FLAGS_iscale, + FLAGS_imean, + inputInfo, + reshape); if (reshape) { InferenceEngine::ICNNNetwork::InputShapes shapes = {}; for (auto& item : app_inputs_info) @@ -391,13 +417,15 @@ int main(int argc, char* argv[]) { duration_ms = double_to_string(get_total_ms_time(startTime)); slog::info << "Reshape network took " << duration_ms << " ms" << slog::endl; if (statistics) - statistics->addParameters(StatisticsReport::Category::EXECUTION_RESULTS, {{"reshape network time (ms)", duration_ms}}); + statistics->addParameters(StatisticsReport::Category::EXECUTION_RESULTS, + {{"reshape network time (ms)", duration_ms}}); } // use batch size according to provided layout and shapes batchSize = (!FLAGS_layout.empty()) ? getBatchSize(app_inputs_info) : cnnNetwork.getBatchSize(); topology_name = cnnNetwork.getName(); - slog::info << (FLAGS_b != 0 ? "Network batch size was changed to: " : "Network batch size: ") << batchSize << slog::endl; + slog::info << (FLAGS_b != 0 ? "Network batch size was changed to: " : "Network batch size: ") << batchSize + << slog::endl; // ----------------- 6. Configuring inputs and outputs // ---------------------------------------------------------------------- @@ -424,7 +452,8 @@ int main(int argc, char* argv[]) { duration_ms = double_to_string(get_total_ms_time(startTime)); slog::info << "Load network took " << duration_ms << " ms" << slog::endl; if (statistics) - statistics->addParameters(StatisticsReport::Category::EXECUTION_RESULTS, {{"load network time (ms)", duration_ms}}); + statistics->addParameters(StatisticsReport::Category::EXECUTION_RESULTS, + {{"load network time (ms)", duration_ms}}); } else { next_step(); slog::info << "Skipping the step for compiled network" << slog::endl; @@ -440,8 +469,14 @@ int main(int argc, char* argv[]) { auto duration_ms = double_to_string(get_total_ms_time(startTime)); slog::info << "Import network took " << duration_ms << " ms" << slog::endl; if (statistics) - statistics->addParameters(StatisticsReport::Category::EXECUTION_RESULTS, {{"import network time (ms)", duration_ms}}); - app_inputs_info = getInputsInfo(FLAGS_shape, FLAGS_layout, FLAGS_b, FLAGS_iscale, FLAGS_imean, exeNetwork.GetInputsInfo()); + statistics->addParameters(StatisticsReport::Category::EXECUTION_RESULTS, + {{"import network time (ms)", duration_ms}}); + app_inputs_info = getInputsInfo(FLAGS_shape, + FLAGS_layout, + FLAGS_b, + FLAGS_iscale, + FLAGS_imean, + exeNetwork.GetInputsInfo()); if (batchSize == 0) { batchSize = 1; } @@ -479,8 +514,8 @@ int main(int argc, char* argv[]) { if ((niter > 0) && (FLAGS_api == "async")) { niter = ((niter + nireq - 1) / nireq) * nireq; if (FLAGS_niter != niter) { - slog::warn << "Number of iterations was aligned by request number from " << FLAGS_niter << " to " << niter << " using number of requests " - << nireq << slog::endl; + slog::warn << "Number of iterations was aligned by request number from " << FLAGS_niter << " to " + << niter << " using number of requests " << nireq << slog::endl; } } @@ -496,23 +531,25 @@ int main(int argc, char* argv[]) { uint64_t duration_nanoseconds = getDurationInNanoseconds(duration_seconds); if (statistics) { - statistics->addParameters(StatisticsReport::Category::RUNTIME_CONFIG, - { - {"topology", topology_name}, - {"target device", device_name}, - {"API", FLAGS_api}, - {"precision", std::string(precision.name())}, - {"batch size", std::to_string(batchSize)}, - {"number of iterations", std::to_string(niter)}, - {"number of parallel infer requests", std::to_string(nireq)}, - {"duration (ms)", std::to_string(getDurationInMilliseconds(duration_seconds))}, - }); + statistics->addParameters( + StatisticsReport::Category::RUNTIME_CONFIG, + { + {"topology", topology_name}, + {"target device", device_name}, + {"API", FLAGS_api}, + {"precision", std::string(precision.name())}, + {"batch size", std::to_string(batchSize)}, + {"number of iterations", std::to_string(niter)}, + {"number of parallel infer requests", std::to_string(nireq)}, + {"duration (ms)", std::to_string(getDurationInMilliseconds(duration_seconds))}, + }); for (auto& nstreams : device_nstreams) { std::stringstream ss; ss << "number of " << nstreams.first << " streams"; - statistics->addParameters(StatisticsReport::Category::RUNTIME_CONFIG, { - {ss.str(), nstreams.second}, - }); + statistics->addParameters(StatisticsReport::Category::RUNTIME_CONFIG, + { + {ss.str(), nstreams.second}, + }); } } @@ -576,7 +613,8 @@ int main(int argc, char* argv[]) { auto duration_ms = double_to_string(inferRequestsQueue.getLatencies()[0]); slog::info << "First inference took " << duration_ms << " ms" << slog::endl; if (statistics) - statistics->addParameters(StatisticsReport::Category::EXECUTION_RESULTS, {{"first inference time (ms)", duration_ms}}); + statistics->addParameters(StatisticsReport::Category::EXECUTION_RESULTS, + {{"first inference time (ms)", duration_ms}}); inferRequestsQueue.resetTimes(); auto startTime = Time::now(); @@ -587,7 +625,8 @@ int main(int argc, char* argv[]) { * executed in the same conditions **/ ProgressBar progressBar(progressBarTotalCount, FLAGS_stream_output, FLAGS_progress); - while ((niter != 0LL && iteration < niter) || (duration_nanoseconds != 0LL && (uint64_t)execTime < duration_nanoseconds) || + while ((niter != 0LL && iteration < niter) || + (duration_nanoseconds != 0LL && (uint64_t)execTime < duration_nanoseconds) || (FLAGS_api == "async" && iteration % nireq != 0)) { inferRequest = inferRequestsQueue.getIdleRequest(); if (!inferRequest) { @@ -629,13 +668,15 @@ int main(int argc, char* argv[]) { double latency = getMedianValue(inferRequestsQueue.getLatencies(), FLAGS_latency_percentile); double totalDuration = inferRequestsQueue.getDurationInMilliseconds(); - double fps = (FLAGS_api == "sync") ? batchSize * 1000.0 / latency : batchSize * 1000.0 * iteration / totalDuration; + double fps = + (FLAGS_api == "sync") ? batchSize * 1000.0 / latency : batchSize * 1000.0 * iteration / totalDuration; if (statistics) { - statistics->addParameters(StatisticsReport::Category::EXECUTION_RESULTS, { - {"total execution time (ms)", double_to_string(totalDuration)}, - {"total number of iterations", std::to_string(iteration)}, - }); + statistics->addParameters(StatisticsReport::Category::EXECUTION_RESULTS, + { + {"total execution time (ms)", double_to_string(totalDuration)}, + {"total number of iterations", std::to_string(iteration)}, + }); if (device_name.find("MULTI") == std::string::npos) { std::string latency_label; if (FLAGS_latency_percentile == 50) { @@ -643,11 +684,13 @@ int main(int argc, char* argv[]) { } else { latency_label = "latency (" + std::to_string(FLAGS_latency_percentile) + " percentile) (ms)"; } - statistics->addParameters(StatisticsReport::Category::EXECUTION_RESULTS, { - {latency_label, double_to_string(latency)}, - }); + statistics->addParameters(StatisticsReport::Category::EXECUTION_RESULTS, + { + {latency_label, double_to_string(latency)}, + }); } - statistics->addParameters(StatisticsReport::Category::EXECUTION_RESULTS, {{"throughput", double_to_string(fps)}}); + statistics->addParameters(StatisticsReport::Category::EXECUTION_RESULTS, + {{"throughput", double_to_string(fps)}}); } progressBar.finish(); @@ -707,9 +750,10 @@ int main(int argc, char* argv[]) { slog::err << ex.what() << slog::endl; if (statistics) { - statistics->addParameters(StatisticsReport::Category::EXECUTION_RESULTS, { - {"error", ex.what()}, - }); + statistics->addParameters(StatisticsReport::Category::EXECUTION_RESULTS, + { + {"error", ex.what()}, + }); statistics->dump(); } diff --git a/inference-engine/samples/benchmark_app/statistics_report.hpp b/inference-engine/samples/benchmark_app/statistics_report.hpp index 1110cf78522..29ec04f4492 100644 --- a/inference-engine/samples/benchmark_app/statistics_report.hpp +++ b/inference-engine/samples/benchmark_app/statistics_report.hpp @@ -35,14 +35,14 @@ public: EXECUTION_RESULTS, }; - explicit StatisticsReport(Config config): _config(std::move(config)) { + explicit StatisticsReport(Config config) : _config(std::move(config)) { _separator = #if defined _WIN32 || defined __CYGWIN__ - #if defined UNICODE +# if defined UNICODE L"\\"; - #else +# else "\\"; - #endif +# endif #else "/"; #endif diff --git a/inference-engine/samples/benchmark_app/utils.cpp b/inference-engine/samples/benchmark_app/utils.cpp index 66deb5bad31..c98e8c1b94e 100644 --- a/inference-engine/samples/benchmark_app/utils.cpp +++ b/inference-engine/samples/benchmark_app/utils.cpp @@ -16,7 +16,7 @@ // clang-format on #ifdef USE_OPENCV - #include +# include #endif namespace benchmark_app { @@ -54,8 +54,13 @@ size_t InputInfo::depth() const { } // namespace benchmark_app uint32_t deviceDefaultDeviceDurationInSeconds(const std::string& device) { - static const std::map deviceDefaultDurationInSeconds {{"CPU", 60}, {"GPU", 60}, {"VPU", 60}, {"MYRIAD", 60}, - {"HDDL", 60}, {"FPGA", 120}, {"UNKNOWN", 120}}; + static const std::map deviceDefaultDurationInSeconds{{"CPU", 60}, + {"GPU", 60}, + {"VPU", 60}, + {"MYRIAD", 60}, + {"HDDL", 60}, + {"FPGA", 120}, + {"UNKNOWN", 120}}; uint32_t duration = 0; for (const auto& deviceDurationInSeconds : deviceDefaultDurationInSeconds) { if (device.find(deviceDurationInSeconds.first) != std::string::npos) { @@ -63,16 +68,18 @@ uint32_t deviceDefaultDeviceDurationInSeconds(const std::string& device) { } } if (duration == 0) { - const auto unknownDeviceIt = - find_if(deviceDefaultDurationInSeconds.begin(), deviceDefaultDurationInSeconds.end(), [](std::pair deviceDuration) { - return deviceDuration.first == "UNKNOWN"; - }); + const auto unknownDeviceIt = find_if(deviceDefaultDurationInSeconds.begin(), + deviceDefaultDurationInSeconds.end(), + [](std::pair deviceDuration) { + return deviceDuration.first == "UNKNOWN"; + }); if (unknownDeviceIt == deviceDefaultDurationInSeconds.end()) { throw std::logic_error("UNKNOWN device was not found in the device duration list"); } duration = unknownDeviceIt->second; - slog::warn << "Default duration " << duration << " seconds for unknown device '" << device << "' is used" << slog::endl; + slog::warn << "Default duration " << duration << " seconds for unknown device '" << device << "' is used" + << slog::endl; } return duration; } @@ -112,7 +119,8 @@ std::vector parseDevices(const std::string& device_string) { return devices; } -std::map parseNStreamsValuePerDevice(const std::vector& devices, const std::string& values_string) { +std::map parseNStreamsValuePerDevice(const std::vector& devices, + const std::string& values_string) { // Format: :,: or just std::map result; auto device_value_strings = split(values_string, ','); @@ -125,7 +133,8 @@ std::map parseNStreamsValuePerDevice(const std::vector if (it != devices.end()) { result[device_name] = nstreams; } else { - throw std::logic_error("Can't set nstreams value " + std::string(nstreams) + " for device '" + device_name + "'! Incorrect device name!"); + throw std::logic_error("Can't set nstreams value " + std::string(nstreams) + " for device '" + + device_name + "'! Incorrect device name!"); } } else if (device_value_vec.size() == 1) { auto value = device_value_vec.at(0); @@ -172,7 +181,8 @@ std::string getShapesString(const InferenceEngine::ICNNNetwork::InputShapes& sha return ss.str(); } -std::map> parseScaleOrMean(const std::string& scale_mean, const benchmark_app::InputsInfo& inputs_info) { +std::map> parseScaleOrMean(const std::string& scale_mean, + const benchmark_app::InputsInfo& inputs_info) { // Format: data:[255,255,255],info[255,255,255] std::map> return_value; diff --git a/inference-engine/samples/benchmark_app/utils.hpp b/inference-engine/samples/benchmark_app/utils.hpp index 4452556b3c4..0440648a63b 100644 --- a/inference-engine/samples/benchmark_app/utils.hpp +++ b/inference-engine/samples/benchmark_app/utils.hpp @@ -29,14 +29,17 @@ using InputsInfo = std::map; std::vector parseDevices(const std::string& device_string); uint32_t deviceDefaultDeviceDurationInSeconds(const std::string& device); -std::map parseNStreamsValuePerDevice(const std::vector& devices, const std::string& values_string); +std::map parseNStreamsValuePerDevice(const std::vector& devices, + const std::string& values_string); std::string getShapesString(const InferenceEngine::ICNNNetwork::InputShapes& shapes); size_t getBatchSize(const benchmark_app::InputsInfo& inputs_info); std::vector split(const std::string& s, char delim); -std::map> parseScaleOrMean(const std::string& scale_mean, const benchmark_app::InputsInfo& inputs_info); +std::map> parseScaleOrMean(const std::string& scale_mean, + const benchmark_app::InputsInfo& inputs_info); template -std::map parseInputParameters(const std::string parameter_string, const std::map& input_info) { +std::map parseInputParameters(const std::string parameter_string, + const std::map& input_info) { // Parse parameter string like "input0[value0],input1[value1]" or "[value]" (applied to all // inputs) std::map return_value; @@ -67,8 +70,12 @@ std::map parseInputParameters(const std::string parame } template -benchmark_app::InputsInfo getInputsInfo(const std::string& shape_string, const std::string& layout_string, const size_t batch_size, - const std::string& scale_string, const std::string& mean_string, const std::map& input_info, +benchmark_app::InputsInfo getInputsInfo(const std::string& shape_string, + const std::string& layout_string, + const size_t batch_size, + const std::string& scale_string, + const std::string& mean_string, + const std::map& input_info, bool& reshape_required) { std::map shape_map = parseInputParameters(shape_string, input_info); std::map layout_map = parseInputParameters(layout_string, input_info); @@ -134,10 +141,20 @@ benchmark_app::InputsInfo getInputsInfo(const std::string& shape_string, const s } template -benchmark_app::InputsInfo getInputsInfo(const std::string& shape_string, const std::string& layout_string, const size_t batch_size, - const std::string& scale_string, const std::string& mean_string, const std::map& input_info) { +benchmark_app::InputsInfo getInputsInfo(const std::string& shape_string, + const std::string& layout_string, + const size_t batch_size, + const std::string& scale_string, + const std::string& mean_string, + const std::map& input_info) { bool reshape_required = false; - return getInputsInfo(shape_string, layout_string, batch_size, scale_string, mean_string, input_info, reshape_required); + return getInputsInfo(shape_string, + layout_string, + batch_size, + scale_string, + mean_string, + input_info, + reshape_required); } #ifdef USE_OPENCV diff --git a/inference-engine/samples/classification_sample_async/classification_sample_async.h b/inference-engine/samples/classification_sample_async/classification_sample_async.h index 454acba6554..0ccc9e375f5 100644 --- a/inference-engine/samples/classification_sample_async/classification_sample_async.h +++ b/inference-engine/samples/classification_sample_async/classification_sample_async.h @@ -17,13 +17,15 @@ static const char help_message[] = "Print a usage message."; static const char model_message[] = "Required. Path to an .xml file with a trained model."; /// @brief message for images argument -static const char image_message[] = "Required. Path to a folder with images or path to an image files: a .ubyte file for LeNet" - " and a .bmp file for the other networks."; +static const char image_message[] = + "Required. Path to a folder with images or path to an image files: a .ubyte file for LeNet" + " and a .bmp file for the other networks."; /// @brief message for assigning cnn calculation to device -static const char target_device_message[] = "Optional. Specify the target device to infer on (the list of available devices is shown below). " - "Default value is CPU. Use \"-d HETERO:\" format to specify HETERO plugin. " - "Sample will look for a suitable plugin for device specified."; +static const char target_device_message[] = + "Optional. Specify the target device to infer on (the list of available devices is shown below). " + "Default value is CPU. Use \"-d HETERO:\" format to specify HETERO plugin. " + "Sample will look for a suitable plugin for device specified."; /// @brief message for top results number static const char ntop_message[] = "Optional. Number of top results. Default value is 10."; diff --git a/inference-engine/samples/classification_sample_async/main.cpp b/inference-engine/samples/classification_sample_async/main.cpp index 197162f3620..87b3ba94734 100644 --- a/inference-engine/samples/classification_sample_async/main.cpp +++ b/inference-engine/samples/classification_sample_async/main.cpp @@ -100,7 +100,8 @@ int main(int argc, char* argv[]) { // Config for device plugin custom extension is loaded from an .xml // description ie.SetConfig({{PluginConfigParams::KEY_CONFIG_FILE, FLAGS_c}}, FLAGS_d); - slog::info << "Config for " << FLAGS_d << " device plugin custom extension loaded: " << FLAGS_c << slog::endl; + slog::info << "Config for " << FLAGS_d << " device plugin custom extension loaded: " << FLAGS_c + << slog::endl; } // ----------------------------------------------------------------------------------------------------- @@ -142,8 +143,8 @@ int main(int argc, char* argv[]) { continue; } /** Store image data **/ - std::shared_ptr data( - reader->getData(inputInfoItem.second->getTensorDesc().getDims()[3], inputInfoItem.second->getTensorDesc().getDims()[2])); + std::shared_ptr data(reader->getData(inputInfoItem.second->getTensorDesc().getDims()[3], + inputInfoItem.second->getTensorDesc().getDims()[2])); if (data != nullptr) { imagesData.push_back(data); validImageNames.push_back(i); @@ -203,7 +204,8 @@ int main(int argc, char* argv[]) { for (size_t ch = 0; ch < num_channels; ++ch) { /** [images stride + channels stride + pixel id ] all in * bytes **/ - data[image_id * image_size * num_channels + ch * image_size + pid] = imagesData.at(image_id).get()[pid * num_channels + ch]; + data[image_id * image_size * num_channels + ch * image_size + pid] = + imagesData.at(image_id).get()[pid * num_channels + ch]; } } } @@ -255,8 +257,9 @@ int main(int argc, char* argv[]) { /** Validating -nt value **/ const size_t resultsCnt = outputBlob->size() / batchSize; if (FLAGS_nt > resultsCnt || FLAGS_nt < 1) { - slog::warn << "-nt " << FLAGS_nt << " is not available for this network (-nt should be less than " << resultsCnt + 1 - << " and more than 0)\n Maximal value " << resultsCnt << " will be used." << slog::endl; + slog::warn << "-nt " << FLAGS_nt << " is not available for this network (-nt should be less than " + << resultsCnt + 1 << " and more than 0)\n Maximal value " << resultsCnt + << " will be used." << slog::endl; FLAGS_nt = resultsCnt; } diff --git a/inference-engine/samples/common/format_reader/MnistUbyte.cpp b/inference-engine/samples/common/format_reader/MnistUbyte.cpp index 1aee331739c..105c35da69e 100644 --- a/inference-engine/samples/common/format_reader/MnistUbyte.cpp +++ b/inference-engine/samples/common/format_reader/MnistUbyte.cpp @@ -42,7 +42,8 @@ MnistUbyte::MnistUbyte(const std::string& filename) { n_cols = reverseInt(n_cols); _width = (size_t)n_cols; if (number_of_images > 1) { - std::cout << "[MNIST] Warning: number_of_images in mnist file equals " << number_of_images << ". Only a first image will be read." << std::endl; + std::cout << "[MNIST] Warning: number_of_images in mnist file equals " << number_of_images + << ". Only a first image will be read." << std::endl; } size_t size = _width * _height * 1; diff --git a/inference-engine/samples/common/format_reader/format_reader.h b/inference-engine/samples/common/format_reader/format_reader.h index 0a7c14fbaae..0be86d4fcd0 100644 --- a/inference-engine/samples/common/format_reader/format_reader.h +++ b/inference-engine/samples/common/format_reader/format_reader.h @@ -14,19 +14,19 @@ #include #if defined(_WIN32) - #ifdef IMPLEMENT_FORMAT_READER - #define FORMAT_READER_API(type) extern "C" __declspec(dllexport) type - #else - #define FORMAT_READER_API(type) extern "C" type - #endif +# ifdef IMPLEMENT_FORMAT_READER +# define FORMAT_READER_API(type) extern "C" __declspec(dllexport) type +# else +# define FORMAT_READER_API(type) extern "C" type +# endif #elif (__GNUC__ >= 4) - #ifdef IMPLEMENT_FORMAT_READER - #define FORMAT_READER_API(type) extern "C" __attribute__((visibility("default"))) type - #else - #define FORMAT_READER_API(type) extern "C" type - #endif +# ifdef IMPLEMENT_FORMAT_READER +# define FORMAT_READER_API(type) extern "C" __attribute__((visibility("default"))) type +# else +# define FORMAT_READER_API(type) extern "C" type +# endif #else - #define FORMAT_READER_API(TYPE) extern "C" TYPE +# define FORMAT_READER_API(TYPE) extern "C" TYPE #endif namespace FormatReader { diff --git a/inference-engine/samples/common/format_reader/format_reader_ptr.h b/inference-engine/samples/common/format_reader/format_reader_ptr.h index ad026c3c148..507e0087f45 100644 --- a/inference-engine/samples/common/format_reader/format_reader_ptr.h +++ b/inference-engine/samples/common/format_reader/format_reader_ptr.h @@ -16,7 +16,7 @@ namespace FormatReader { class ReaderPtr { public: - explicit ReaderPtr(const char* imageName): reader(CreateFormatReader(imageName)) {} + explicit ReaderPtr(const char* imageName) : reader(CreateFormatReader(imageName)) {} /** * @brief dereference operator overload * @return Reader diff --git a/inference-engine/samples/common/format_reader/opencv_wrapper.cpp b/inference-engine/samples/common/format_reader/opencv_wrapper.cpp index 35838fe0c92..326ea165990 100644 --- a/inference-engine/samples/common/format_reader/opencv_wrapper.cpp +++ b/inference-engine/samples/common/format_reader/opencv_wrapper.cpp @@ -3,12 +3,12 @@ // #ifdef USE_OPENCV - #include "opencv_wrapper.h" +# include "opencv_wrapper.h" - #include - #include - #include - #include +# include +# include +# include +# include using namespace std; using namespace FormatReader; @@ -32,7 +32,8 @@ std::shared_ptr OCVReader::getData(size_t width = 0, size_t heigh size_t iw = img.size().width; size_t ih = img.size().height; if (width != iw || height != ih) { - slog::warn << "Image is resized from (" << iw << ", " << ih << ") to (" << width << ", " << height << ")" << slog::endl; + slog::warn << "Image is resized from (" << iw << ", " << ih << ") to (" << width << ", " << height << ")" + << slog::endl; } cv::resize(img, resized, cv::Size(width, height)); } diff --git a/inference-engine/samples/common/format_reader/opencv_wrapper.h b/inference-engine/samples/common/format_reader/opencv_wrapper.h index 227811c3257..bbafd42fbda 100644 --- a/inference-engine/samples/common/format_reader/opencv_wrapper.h +++ b/inference-engine/samples/common/format_reader/opencv_wrapper.h @@ -9,13 +9,13 @@ #pragma once #ifdef USE_OPENCV - #include +# include - #include - #include - #include +# include +# include +# include - #include "register.h" +# include "register.h" namespace FormatReader { /** diff --git a/inference-engine/samples/common/utils/include/samples/args_helper.hpp b/inference-engine/samples/common/utils/include/samples/args_helper.hpp index fa8b1a5392d..f216f370f5d 100644 --- a/inference-engine/samples/common/utils/include/samples/args_helper.hpp +++ b/inference-engine/samples/common/utils/include/samples/args_helper.hpp @@ -27,8 +27,14 @@ void readInputFilesArguments(std::vector& files, const std::string& */ void parseInputFilesArguments(std::vector& files); -void processPrecision(InferenceEngine::CNNNetwork& network, const std::string& ip, const std::string& op, const std::string& iop); +void processPrecision(InferenceEngine::CNNNetwork& network, + const std::string& ip, + const std::string& op, + const std::string& iop); -void processLayout(InferenceEngine::CNNNetwork& network, const std::string& il, const std::string& ol, const std::string& iol); +void processLayout(InferenceEngine::CNNNetwork& network, + const std::string& il, + const std::string& ol, + const std::string& iol); void printInputAndOutputsInfo(const InferenceEngine::CNNNetwork& network); diff --git a/inference-engine/samples/common/utils/include/samples/classification_results.h b/inference-engine/samples/common/utils/include/samples/classification_results.h index 1a8ea4306c3..2e2a0a286ec 100644 --- a/inference-engine/samples/common/utils/include/samples/classification_results.h +++ b/inference-engine/samples/common/utils/include/samples/classification_results.h @@ -78,9 +78,12 @@ private: batchData += offset; std::iota(std::begin(indexes), std::end(indexes), 0); - std::partial_sort(std::begin(indexes), std::begin(indexes) + n, std::end(indexes), [&batchData](unsigned l, unsigned r) { - return batchData[l] > batchData[r]; - }); + std::partial_sort(std::begin(indexes), + std::begin(indexes) + n, + std::end(indexes), + [&batchData](unsigned l, unsigned r) { + return batchData[l] > batchData[r]; + }); for (unsigned j = 0; j < n; j++) { output.at(i * n + j) = indexes.at(j); } @@ -123,7 +126,10 @@ private: } public: - explicit ClassificationResultT(InferenceEngine::Blob::Ptr output_blob, std::vector image_names = {}, size_t batch_size = 1, size_t num_of_top = 10, + explicit ClassificationResultT(InferenceEngine::Blob::Ptr output_blob, + std::vector image_names = {}, + size_t batch_size = 1, + size_t num_of_top = 10, std::vector labels = {}) : _nTop(num_of_top), _outBlob(std::move(output_blob)), @@ -164,8 +170,8 @@ public: // locked memory holder should be alive all time while access to its buffer happens const auto result = moutputHolder - .as::value_type*>()[_results.at(id) + - image_id * (_outBlob->size() / _batchSize)]; + .as::value_type*>() + [_results.at(id) + image_id * (_outBlob->size() / _batchSize)]; std::cout << std::setw(static_cast(_classidStr.length())) << std::left << _results.at(id) << " "; std::cout << std::left << std::setw(static_cast(_probabilityStr.length())) << std::fixed << result; diff --git a/inference-engine/samples/common/utils/include/samples/common.hpp b/inference-engine/samples/common/utils/include/samples/common.hpp index c4e1ead4a93..0c2dad4af3a 100644 --- a/inference-engine/samples/common/utils/include/samples/common.hpp +++ b/inference-engine/samples/common/utils/include/samples/common.hpp @@ -24,11 +24,11 @@ #include #ifndef UNUSED - #if defined(_MSC_VER) && !defined(__clang__) - #define UNUSED - #else - #define UNUSED __attribute__((unused)) - #endif +# if defined(_MSC_VER) && !defined(__clang__) +# define UNUSED +# else +# define UNUSED __attribute__((unused)) +# endif #endif /** @@ -46,7 +46,8 @@ inline void ltrim(std::string& s) { * @param s - string to trim */ inline void rtrim(std::string& s) { - s.erase(std::find_if(s.rbegin(), s.rend(), + s.erase(std::find_if(s.rbegin(), + s.rend(), [](int c) { return !std::isspace(c); }) @@ -130,7 +131,7 @@ public: * @param g - value for green channel * @param b - value for blue channel */ - Color(unsigned char r, unsigned char g, unsigned char b): _r(r), _g(g), _b(b) {} + Color(unsigned char r, unsigned char g, unsigned char b) : _r(r), _g(g), _b(b) {} inline unsigned char red() { return _r; @@ -157,9 +158,11 @@ public: static UNUSED void writeOutputBmp(std::vector> data, size_t classesNum, std::ostream& outFile) { unsigned int seed = (unsigned int)time(NULL); // Known colors for training classes from Cityscape dataset - static std::vector colors = {{128, 64, 128}, {232, 35, 244}, {70, 70, 70}, {156, 102, 102}, {153, 153, 190}, {153, 153, 153}, {30, 170, 250}, - {0, 220, 220}, {35, 142, 107}, {152, 251, 152}, {180, 130, 70}, {60, 20, 220}, {0, 0, 255}, {142, 0, 0}, - {70, 0, 0}, {100, 60, 0}, {90, 0, 0}, {230, 0, 0}, {32, 11, 119}, {0, 74, 111}, {81, 0, 81}}; + static std::vector colors = { + {128, 64, 128}, {232, 35, 244}, {70, 70, 70}, {156, 102, 102}, {153, 153, 190}, {153, 153, 153}, + {30, 170, 250}, {0, 220, 220}, {35, 142, 107}, {152, 251, 152}, {180, 130, 70}, {60, 20, 220}, + {0, 0, 255}, {142, 0, 0}, {70, 0, 0}, {100, 60, 0}, {90, 0, 0}, {230, 0, 0}, + {32, 11, 119}, {0, 74, 111}, {81, 0, 81}}; while (classesNum > colors.size()) { static std::mt19937 rng(seed); @@ -171,13 +174,17 @@ static UNUSED void writeOutputBmp(std::vector> data, size_t unsigned char file[14] = { 'B', 'M', // magic - 0, 0, 0, + 0, + 0, + 0, 0, // size in bytes 0, 0, // app data 0, 0, // app data - 40 + 14, 0, 0, + 40 + 14, + 0, + 0, 0 // start of data offset }; unsigned char info[40] = { @@ -262,13 +269,17 @@ static UNUSED bool writeOutputBmp(std::string name, unsigned char* data, size_t unsigned char file[14] = { 'B', 'M', // magic - 0, 0, 0, + 0, + 0, + 0, 0, // size in bytes 0, 0, // app data 0, 0, // app data - 40 + 14, 0, 0, + 40 + 14, + 0, + 0, 0 // start of data offset }; unsigned char info[40] = { @@ -342,11 +353,18 @@ static UNUSED bool writeOutputBmp(std::string name, unsigned char* data, size_t * @param classes - vector of classes * @param thickness - thickness of a line (in pixels) to be used for bounding boxes */ -static UNUSED void addRectangles(unsigned char* data, size_t height, size_t width, std::vector rectangles, std::vector classes, int thickness = 1) { +static UNUSED void addRectangles(unsigned char* data, + size_t height, + size_t width, + std::vector rectangles, + std::vector classes, + int thickness = 1) { std::vector colors = {// colors to be used for bounding boxes - {128, 64, 128}, {232, 35, 244}, {70, 70, 70}, {156, 102, 102}, {153, 153, 190}, {153, 153, 153}, {30, 170, 250}, - {0, 220, 220}, {35, 142, 107}, {152, 251, 152}, {180, 130, 70}, {60, 20, 220}, {0, 0, 255}, {142, 0, 0}, - {70, 0, 0}, {100, 60, 0}, {90, 0, 0}, {230, 0, 0}, {32, 11, 119}, {0, 74, 111}, {81, 0, 81}}; + {128, 64, 128}, {232, 35, 244}, {70, 70, 70}, {156, 102, 102}, {153, 153, 190}, + {153, 153, 153}, {30, 170, 250}, {0, 220, 220}, {35, 142, 107}, {152, 251, 152}, + {180, 130, 70}, {60, 20, 220}, {0, 0, 255}, {142, 0, 0}, {70, 0, 0}, + {100, 60, 0}, {90, 0, 0}, {230, 0, 0}, {32, 11, 119}, {0, 74, 111}, + {81, 0, 81}}; if (rectangles.size() % 4 != 0 || rectangles.size() / 4 != classes.size()) { return; } @@ -430,13 +448,17 @@ static UNUSED bool writeOutputBmp(unsigned char* data, size_t height, size_t wid unsigned char file[14] = { 'B', 'M', // magic - 0, 0, 0, + 0, + 0, + 0, 0, // size in bytes 0, 0, // app data 0, 0, // app data - 40 + 14, 0, 0, + 40 + 14, + 0, + 0, 0 // start of data offset }; unsigned char info[40] = { @@ -515,8 +537,11 @@ static std::vector& performanceMap, std::ostream& stream, - std::string deviceName, bool bshowHeader = true) { +static UNUSED void printPerformanceCounts( + const std::map& performanceMap, + std::ostream& stream, + std::string deviceName, + bool bshowHeader = true) { long long totalTime = 0; // Print performance counts if (bshowHeader) { @@ -560,12 +585,16 @@ static UNUSED void printPerformanceCounts(const std::map getMapFullDevicesNames(InferenceEngine::Core& ie, std::vector devices) { +inline std::map getMapFullDevicesNames(InferenceEngine::Core& ie, + std::vector devices) { std::map devicesMap; InferenceEngine::Parameter p; for (std::string& deviceName : devices) { @@ -608,8 +637,20 @@ public: float xmin, xmax, ymin, ymax, prob; bool difficult; - DetectedObject(int _objectType, float _xmin, float _ymin, float _xmax, float _ymax, float _prob, bool _difficult = false) - : objectType(_objectType), xmin(_xmin), xmax(_xmax), ymin(_ymin), ymax(_ymax), prob(_prob), difficult(_difficult) {} + DetectedObject(int _objectType, + float _xmin, + float _ymin, + float _xmax, + float _ymax, + float _prob, + bool _difficult = false) + : objectType(_objectType), + xmin(_xmin), + xmax(_xmax), + ymin(_ymin), + ymax(_ymax), + prob(_prob), + difficult(_difficult) {} DetectedObject(const DetectedObject& other) = default; @@ -617,10 +658,18 @@ public: // Add small space to eliminate empty squares float epsilon = 0; // 1e-5f; - DetectedObject detectedObject1(detectedObject1_.objectType, (detectedObject1_.xmin - epsilon), (detectedObject1_.ymin - epsilon), - (detectedObject1_.xmax - epsilon), (detectedObject1_.ymax - epsilon), detectedObject1_.prob); - DetectedObject detectedObject2(detectedObject2_.objectType, (detectedObject2_.xmin + epsilon), (detectedObject2_.ymin + epsilon), - (detectedObject2_.xmax), (detectedObject2_.ymax), detectedObject2_.prob); + DetectedObject detectedObject1(detectedObject1_.objectType, + (detectedObject1_.xmin - epsilon), + (detectedObject1_.ymin - epsilon), + (detectedObject1_.xmax - epsilon), + (detectedObject1_.ymax - epsilon), + detectedObject1_.prob); + DetectedObject detectedObject2(detectedObject2_.objectType, + (detectedObject2_.xmin + epsilon), + (detectedObject2_.ymin + epsilon), + (detectedObject2_.xmax), + (detectedObject2_.ymax), + detectedObject2_.prob); if (detectedObject1.objectType != detectedObject2.objectType) { // objects are different, so the result is 0 @@ -657,8 +706,10 @@ public: } // union - float square1 = (addendum + detectedObject1.xmax - detectedObject1.xmin) * (addendum + detectedObject1.ymax - detectedObject1.ymin); - float square2 = (addendum + detectedObject2.xmax - detectedObject2.xmin) * (addendum + detectedObject2.ymax - detectedObject2.ymin); + float square1 = (addendum + detectedObject1.xmax - detectedObject1.xmin) * + (addendum + detectedObject1.ymax - detectedObject1.ymin); + float square2 = (addendum + detectedObject2.xmax - detectedObject2.xmin) * + (addendum + detectedObject2.ymax - detectedObject2.ymin); float unn = square1 + square2 - intr; @@ -666,7 +717,13 @@ public: } DetectedObject scale(float scale_x, float scale_y) const { - return DetectedObject(objectType, xmin * scale_x, ymin * scale_y, xmax * scale_x, ymax * scale_y, prob, difficult); + return DetectedObject(objectType, + xmin * scale_x, + ymin * scale_y, + xmax * scale_x, + ymax * scale_y, + prob, + difficult); } }; @@ -675,7 +732,9 @@ public: const std::list alist; const bool check_probs; - explicit ImageDescription(const std::list& _alist, bool _check_probs = false): alist(_alist), check_probs(_check_probs) {} + explicit ImageDescription(const std::list& _alist, bool _check_probs = false) + : alist(_alist), + check_probs(_check_probs) {} static float ioUMultiple(const ImageDescription& detectedObjects, const ImageDescription& desiredObjects) { const ImageDescription *detectedObjectsSmall, *detectedObjectsBig; @@ -755,7 +814,7 @@ private: } public: - explicit AveragePrecisionCalculator(double _threshold): threshold(_threshold) {} + explicit AveragePrecisionCalculator(double _threshold) : threshold(_threshold) {} // gt_bboxes -> des // bboxes -> det @@ -763,7 +822,7 @@ public: void consumeImage(const ImageDescription& detectedObjects, const ImageDescription& desiredObjects) { // Collecting IoU values std::vector visited(desiredObjects.alist.size(), false); - std::vector bboxes {std::begin(detectedObjects.alist), std::end(detectedObjects.alist)}; + std::vector bboxes{std::begin(detectedObjects.alist), std::end(detectedObjects.alist)}; std::sort(bboxes.begin(), bboxes.end(), SortBBoxDescend); for (auto&& detObj : bboxes) { @@ -882,10 +941,15 @@ public: * @param width - width of the rectangle * @param detectedObjects - vector of detected objects */ -static UNUSED void addRectangles(unsigned char* data, size_t height, size_t width, std::vector detectedObjects) { - std::vector colors = {{128, 64, 128}, {232, 35, 244}, {70, 70, 70}, {156, 102, 102}, {153, 153, 190}, {153, 153, 153}, {30, 170, 250}, - {0, 220, 220}, {35, 142, 107}, {152, 251, 152}, {180, 130, 70}, {60, 20, 220}, {0, 0, 255}, {142, 0, 0}, - {70, 0, 0}, {100, 60, 0}, {90, 0, 0}, {230, 0, 0}, {32, 11, 119}, {0, 74, 111}, {81, 0, 81}}; +static UNUSED void addRectangles(unsigned char* data, + size_t height, + size_t width, + std::vector detectedObjects) { + std::vector colors = {{128, 64, 128}, {232, 35, 244}, {70, 70, 70}, {156, 102, 102}, {153, 153, 190}, + {153, 153, 153}, {30, 170, 250}, {0, 220, 220}, {35, 142, 107}, {152, 251, 152}, + {180, 130, 70}, {60, 20, 220}, {0, 0, 255}, {142, 0, 0}, {70, 0, 0}, + {100, 60, 0}, {90, 0, 0}, {230, 0, 0}, {32, 11, 119}, {0, 74, 111}, + {81, 0, 81}}; for (size_t i = 0; i < detectedObjects.size(); i++) { int cls = detectedObjects[i].objectType % colors.size(); @@ -923,10 +987,11 @@ inline std::size_t getTensorWidth(const InferenceEngine::TensorDesc& desc) { const auto& layout = desc.getLayout(); const auto& dims = desc.getDims(); const auto& size = dims.size(); - if ((size >= 2) && (layout == InferenceEngine::Layout::NCHW || layout == InferenceEngine::Layout::NHWC || layout == InferenceEngine::Layout::NCDHW || - layout == InferenceEngine::Layout::NDHWC || layout == InferenceEngine::Layout::OIHW || layout == InferenceEngine::Layout::GOIHW || - layout == InferenceEngine::Layout::OIDHW || layout == InferenceEngine::Layout::GOIDHW || layout == InferenceEngine::Layout::CHW || - layout == InferenceEngine::Layout::HW)) { + if ((size >= 2) && (layout == InferenceEngine::Layout::NCHW || layout == InferenceEngine::Layout::NHWC || + layout == InferenceEngine::Layout::NCDHW || layout == InferenceEngine::Layout::NDHWC || + layout == InferenceEngine::Layout::OIHW || layout == InferenceEngine::Layout::GOIHW || + layout == InferenceEngine::Layout::OIDHW || layout == InferenceEngine::Layout::GOIDHW || + layout == InferenceEngine::Layout::CHW || layout == InferenceEngine::Layout::HW)) { // Regardless of layout, dimensions are stored in fixed order return dims.back(); } else { @@ -939,10 +1004,11 @@ inline std::size_t getTensorHeight(const InferenceEngine::TensorDesc& desc) { const auto& layout = desc.getLayout(); const auto& dims = desc.getDims(); const auto& size = dims.size(); - if ((size >= 2) && (layout == InferenceEngine::Layout::NCHW || layout == InferenceEngine::Layout::NHWC || layout == InferenceEngine::Layout::NCDHW || - layout == InferenceEngine::Layout::NDHWC || layout == InferenceEngine::Layout::OIHW || layout == InferenceEngine::Layout::GOIHW || - layout == InferenceEngine::Layout::OIDHW || layout == InferenceEngine::Layout::GOIDHW || layout == InferenceEngine::Layout::CHW || - layout == InferenceEngine::Layout::HW)) { + if ((size >= 2) && (layout == InferenceEngine::Layout::NCHW || layout == InferenceEngine::Layout::NHWC || + layout == InferenceEngine::Layout::NCDHW || layout == InferenceEngine::Layout::NDHWC || + layout == InferenceEngine::Layout::OIHW || layout == InferenceEngine::Layout::GOIHW || + layout == InferenceEngine::Layout::OIDHW || layout == InferenceEngine::Layout::GOIDHW || + layout == InferenceEngine::Layout::CHW || layout == InferenceEngine::Layout::HW)) { // Regardless of layout, dimensions are stored in fixed order return dims.at(size - 2); } else { @@ -953,8 +1019,9 @@ inline std::size_t getTensorHeight(const InferenceEngine::TensorDesc& desc) { inline std::size_t getTensorChannels(const InferenceEngine::TensorDesc& desc) { const auto& layout = desc.getLayout(); - if (layout == InferenceEngine::Layout::NCHW || layout == InferenceEngine::Layout::NHWC || layout == InferenceEngine::Layout::NCDHW || - layout == InferenceEngine::Layout::NDHWC || layout == InferenceEngine::Layout::C || layout == InferenceEngine::Layout::CHW || + if (layout == InferenceEngine::Layout::NCHW || layout == InferenceEngine::Layout::NHWC || + layout == InferenceEngine::Layout::NCDHW || layout == InferenceEngine::Layout::NDHWC || + layout == InferenceEngine::Layout::C || layout == InferenceEngine::Layout::CHW || layout == InferenceEngine::Layout::NC || layout == InferenceEngine::Layout::CN) { // Regardless of layout, dimensions are stored in fixed order const auto& dims = desc.getDims(); @@ -982,8 +1049,9 @@ inline std::size_t getTensorChannels(const InferenceEngine::TensorDesc& desc) { inline std::size_t getTensorBatch(const InferenceEngine::TensorDesc& desc) { const auto& layout = desc.getLayout(); - if (layout == InferenceEngine::Layout::NCHW || layout == InferenceEngine::Layout::NHWC || layout == InferenceEngine::Layout::NCDHW || - layout == InferenceEngine::Layout::NDHWC || layout == InferenceEngine::Layout::NC || layout == InferenceEngine::Layout::CN) { + if (layout == InferenceEngine::Layout::NCHW || layout == InferenceEngine::Layout::NHWC || + layout == InferenceEngine::Layout::NCDHW || layout == InferenceEngine::Layout::NDHWC || + layout == InferenceEngine::Layout::NC || layout == InferenceEngine::Layout::CN) { // Regardless of layout, dimensions are stored in fixed order const auto& dims = desc.getDims(); switch (desc.getLayoutByDims(dims)) { diff --git a/inference-engine/samples/common/utils/include/samples/console_progress.hpp b/inference-engine/samples/common/utils/include/samples/console_progress.hpp index efe45d43d08..20aca64cdf1 100644 --- a/inference-engine/samples/common/utils/include/samples/console_progress.hpp +++ b/inference-engine/samples/common/utils/include/samples/console_progress.hpp @@ -29,9 +29,13 @@ public: * @param _total - maximum value that is correspondent to 100% * @param _detalization - number of symbols(.) to use to represent progress */ - explicit ConsoleProgress(size_t _total, bool _stream_output = false, size_t _percent_to_update = DEFAULT_PERCENT_TO_UPDATE_PROGRESS, + explicit ConsoleProgress(size_t _total, + bool _stream_output = false, + size_t _percent_to_update = DEFAULT_PERCENT_TO_UPDATE_PROGRESS, size_t _detalization = DEFAULT_DETALIZATION) - : total(_total), detalization(_detalization), percent_to_update(_percent_to_update) { + : total(_total), + detalization(_detalization), + percent_to_update(_percent_to_update) { stream_output = _stream_output; if (total == 0) { total = 1; diff --git a/inference-engine/samples/common/utils/include/samples/csv_dumper.hpp b/inference-engine/samples/common/utils/include/samples/csv_dumper.hpp index 807f7c02295..251c9b880bc 100644 --- a/inference-engine/samples/common/utils/include/samples/csv_dumper.hpp +++ b/inference-engine/samples/common/utils/include/samples/csv_dumper.hpp @@ -36,7 +36,7 @@ public: * @param enabled - True if dumping is enabled by default. * @param name - name of file to dump to. File won't be created if first parameter is false. */ - explicit CsvDumper(bool enabled = true, const std::string& name = ""): canDump(enabled) { + explicit CsvDumper(bool enabled = true, const std::string& name = "") : canDump(enabled) { if (!canDump) { return; } diff --git a/inference-engine/samples/common/utils/include/samples/ocv_common.hpp b/inference-engine/samples/common/utils/include/samples/ocv_common.hpp index 7f2de431ebe..0f5d27bc5a8 100644 --- a/inference-engine/samples/common/utils/include/samples/ocv_common.hpp +++ b/inference-engine/samples/common/utils/include/samples/ocv_common.hpp @@ -70,7 +70,9 @@ static UNUSED InferenceEngine::Blob::Ptr wrapMat2Blob(const cv::Mat& mat) { if (!is_dense) IE_THROW() << "Doesn't support conversion from not dense cv::Mat"; - InferenceEngine::TensorDesc tDesc(InferenceEngine::Precision::U8, {1, channels, height, width}, InferenceEngine::Layout::NHWC); + InferenceEngine::TensorDesc tDesc(InferenceEngine::Precision::U8, + {1, channels, height, width}, + InferenceEngine::Layout::NHWC); return InferenceEngine::make_shared_blob(tDesc, mat.data); } diff --git a/inference-engine/samples/common/utils/include/samples/os/windows/w_dirent.h b/inference-engine/samples/common/utils/include/samples/os/windows/w_dirent.h index 5352a8f8b13..908f2bff4af 100644 --- a/inference-engine/samples/common/utils/include/samples/os/windows/w_dirent.h +++ b/inference-engine/samples/common/utils/include/samples/os/windows/w_dirent.h @@ -6,43 +6,43 @@ #if defined(_WIN32) - #ifndef WIN32_LEAN_AND_MEAN - #define WIN32_LEAN_AND_MEAN - #define WIN32_LEAN_AND_MEAN_UNDEF - #endif +# ifndef WIN32_LEAN_AND_MEAN +# define WIN32_LEAN_AND_MEAN +# define WIN32_LEAN_AND_MEAN_UNDEF +# endif - #ifndef NOMINMAX - #define NOMINMAX - #define NOMINMAX_UNDEF - #endif +# ifndef NOMINMAX +# define NOMINMAX +# define NOMINMAX_UNDEF +# endif - #if defined(_M_IX86) && !defined(_X86_) && !defined(_AMD64_) - #define _X86_ - #endif +# if defined(_M_IX86) && !defined(_X86_) && !defined(_AMD64_) +# define _X86_ +# endif - #if defined(_M_X64) && !defined(_X86_) && !defined(_AMD64_) - #define _AMD64_ - #endif +# if defined(_M_X64) && !defined(_X86_) && !defined(_AMD64_) +# define _AMD64_ +# endif - #if defined(_M_ARM) && !defined(_ARM_) && !defined(_ARM64_) - #define _ARM_ - #endif +# if defined(_M_ARM) && !defined(_ARM_) && !defined(_ARM64_) +# define _ARM_ +# endif - #if defined(_M_ARM64) && !defined(_ARM_) && !defined(_ARM64_) - #define _ARM64_ - #endif +# if defined(_M_ARM64) && !defined(_ARM_) && !defined(_ARM64_) +# define _ARM64_ +# endif - // clang-format off +// clang-format off #include #include #include #include #include - // clang-format on +// clang-format on - // Copied from linux libc sys/stat.h: - #define S_ISREG(m) (((m)&S_IFMT) == S_IFREG) - #define S_ISDIR(m) (((m)&S_IFMT) == S_IFDIR) +// Copied from linux libc sys/stat.h: +# define S_ISREG(m) (((m)&S_IFMT) == S_IFREG) +# define S_ISDIR(m) (((m)&S_IFMT) == S_IFDIR) /// @brief structure to store directory names struct dirent { @@ -79,7 +79,7 @@ public: DIR& operator=(const DIR& other) = delete; DIR& operator=(DIR&& other) = delete; - explicit DIR(const char* dirPath): next(nullptr) { + explicit DIR(const char* dirPath) : next(nullptr) { std::string ws = dirPath; if (endsWith(ws, "\\")) ws += "*"; @@ -158,19 +158,19 @@ static void closedir(DIR* dp) { delete dp; } - #ifdef WIN32_LEAN_AND_MEAN_UNDEF - #undef WIN32_LEAN_AND_MEAN - #undef WIN32_LEAN_AND_MEAN_UNDEF - #endif +# ifdef WIN32_LEAN_AND_MEAN_UNDEF +# undef WIN32_LEAN_AND_MEAN +# undef WIN32_LEAN_AND_MEAN_UNDEF +# endif - #ifdef NOMINMAX_UNDEF - #undef NOMINMAX_UNDEF - #undef NOMINMAX - #endif +# ifdef NOMINMAX_UNDEF +# undef NOMINMAX_UNDEF +# undef NOMINMAX +# endif #else - #include - #include +# include +# include #endif diff --git a/inference-engine/samples/common/utils/src/args_helper.cpp b/inference-engine/samples/common/utils/src/args_helper.cpp index 8ec1678f57b..43bf96d86a6 100644 --- a/inference-engine/samples/common/utils/src/args_helper.cpp +++ b/inference-engine/samples/common/utils/src/args_helper.cpp @@ -11,9 +11,9 @@ #include #ifdef _WIN32 - #include +# include #else - #include +# include #endif /** @@ -142,10 +142,18 @@ InferenceEngine::Precision getPrecision(std::string value, const supported_preci InferenceEngine::Precision getPrecision(const std::string& value) { static const supported_precisions_t supported_precisions = { - {"FP32", InferenceEngine::Precision::FP32}, {"FP16", InferenceEngine::Precision::FP16}, {"BF16", InferenceEngine::Precision::BF16}, - {"U64", InferenceEngine::Precision::U64}, {"I64", InferenceEngine::Precision::I64}, {"U32", InferenceEngine::Precision::U32}, - {"I32", InferenceEngine::Precision::I32}, {"U16", InferenceEngine::Precision::U16}, {"I16", InferenceEngine::Precision::I16}, - {"U8", InferenceEngine::Precision::U8}, {"I8", InferenceEngine::Precision::I8}, {"BOOL", InferenceEngine::Precision::BOOL}, + {"FP32", InferenceEngine::Precision::FP32}, + {"FP16", InferenceEngine::Precision::FP16}, + {"BF16", InferenceEngine::Precision::BF16}, + {"U64", InferenceEngine::Precision::U64}, + {"I64", InferenceEngine::Precision::I64}, + {"U32", InferenceEngine::Precision::U32}, + {"I32", InferenceEngine::Precision::I32}, + {"U16", InferenceEngine::Precision::U16}, + {"I16", InferenceEngine::Precision::I16}, + {"U8", InferenceEngine::Precision::U8}, + {"I8", InferenceEngine::Precision::I8}, + {"BOOL", InferenceEngine::Precision::BOOL}, }; return getPrecision(value, supported_precisions); @@ -176,7 +184,10 @@ void setPrecisions(const InferenceEngine::CNNNetwork& network, const std::string } // namespace -void processPrecision(InferenceEngine::CNNNetwork& network, const std::string& ip, const std::string& op, const std::string& iop) { +void processPrecision(InferenceEngine::CNNNetwork& network, + const std::string& ip, + const std::string& op, + const std::string& iop) { if (!ip.empty()) { const auto user_precision = getPrecision(ip); for (auto&& layer : network.getInputsInfo()) { @@ -213,20 +224,27 @@ InferenceEngine::Layout getLayout(std::string value, const supported_layouts_t& InferenceEngine::Layout getLayout(const std::string& value) { static const supported_layouts_t supported_layouts = { - {"NCDHW", InferenceEngine::Layout::NCDHW}, {"NDHWC", InferenceEngine::Layout::NDHWC}, {"NCHW", InferenceEngine::Layout::NCHW}, - {"NHWC", InferenceEngine::Layout::NHWC}, {"CHW", InferenceEngine::Layout::CHW}, {"HWC", InferenceEngine::Layout::HWC}, - {"NC", InferenceEngine::Layout::NC}, {"C", InferenceEngine::Layout::C}, + {"NCDHW", InferenceEngine::Layout::NCDHW}, + {"NDHWC", InferenceEngine::Layout::NDHWC}, + {"NCHW", InferenceEngine::Layout::NCHW}, + {"NHWC", InferenceEngine::Layout::NHWC}, + {"CHW", InferenceEngine::Layout::CHW}, + {"HWC", InferenceEngine::Layout::HWC}, + {"NC", InferenceEngine::Layout::NC}, + {"C", InferenceEngine::Layout::C}, }; return getLayout(value, supported_layouts); } bool isMatchLayoutToDims(InferenceEngine::Layout layout, size_t dimension) { - static const matchLayoutToDims_t matchLayoutToDims = { - {static_cast(InferenceEngine::Layout::NCDHW), 5}, {static_cast(InferenceEngine::Layout::NDHWC), 5}, - {static_cast(InferenceEngine::Layout::NCHW), 4}, {static_cast(InferenceEngine::Layout::NHWC), 4}, - {static_cast(InferenceEngine::Layout::CHW), 3}, {static_cast(InferenceEngine::Layout::NC), 2}, - {static_cast(InferenceEngine::Layout::C), 1}}; + static const matchLayoutToDims_t matchLayoutToDims = {{static_cast(InferenceEngine::Layout::NCDHW), 5}, + {static_cast(InferenceEngine::Layout::NDHWC), 5}, + {static_cast(InferenceEngine::Layout::NCHW), 4}, + {static_cast(InferenceEngine::Layout::NHWC), 4}, + {static_cast(InferenceEngine::Layout::CHW), 3}, + {static_cast(InferenceEngine::Layout::NC), 2}, + {static_cast(InferenceEngine::Layout::C), 1}}; const auto dims = matchLayoutToDims.find(static_cast(layout)); if (dims == matchLayoutToDims.end()) { @@ -269,7 +287,10 @@ void setLayouts(const InferenceEngine::CNNNetwork& network, const std::string io } // namespace -void processLayout(InferenceEngine::CNNNetwork& network, const std::string& il, const std::string& ol, const std::string& iol) { +void processLayout(InferenceEngine::CNNNetwork& network, + const std::string& il, + const std::string& ol, + const std::string& iol) { if (!il.empty()) { const auto layout = getLayout(il); for (auto&& layer : network.getInputsInfo()) { @@ -296,10 +317,12 @@ void processLayout(InferenceEngine::CNNNetwork& network, const std::string& il, void printInputAndOutputsInfo(const InferenceEngine::CNNNetwork& network) { std::cout << "Network inputs:" << std::endl; for (auto&& layer : network.getInputsInfo()) { - std::cout << " " << layer.first << " : " << layer.second->getPrecision() << " / " << layer.second->getLayout() << std::endl; + std::cout << " " << layer.first << " : " << layer.second->getPrecision() << " / " + << layer.second->getLayout() << std::endl; } std::cout << "Network outputs:" << std::endl; for (auto&& layer : network.getOutputsInfo()) { - std::cout << " " << layer.first << " : " << layer.second->getPrecision() << " / " << layer.second->getLayout() << std::endl; + std::cout << " " << layer.first << " : " << layer.second->getPrecision() << " / " + << layer.second->getLayout() << std::endl; } } diff --git a/inference-engine/samples/common/utils/src/slog.cpp b/inference-engine/samples/common/utils/src/slog.cpp index 05cb0aece8b..4edf48e61ef 100644 --- a/inference-engine/samples/common/utils/src/slog.cpp +++ b/inference-engine/samples/common/utils/src/slog.cpp @@ -12,7 +12,7 @@ LogStream info("INFO", std::cout); LogStream warn("WARNING", std::cout); LogStream err("ERROR", std::cerr); -LogStream::LogStream(const std::string& prefix, std::ostream& log_stream): _prefix(prefix), _new_line(true) { +LogStream::LogStream(const std::string& prefix, std::ostream& log_stream) : _prefix(prefix), _new_line(true) { _log_stream = &log_stream; } diff --git a/inference-engine/samples/hello_classification/main.cpp b/inference-engine/samples/hello_classification/main.cpp index 858a87ead5b..1be7dbf6a37 100644 --- a/inference-engine/samples/hello_classification/main.cpp +++ b/inference-engine/samples/hello_classification/main.cpp @@ -18,15 +18,15 @@ using namespace InferenceEngine; * @brief Define names based depends on Unicode path support */ #if defined(ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32) - #define tcout std::wcout - #define file_name_t std::wstring - #define imread_t imreadW - #define ClassificationResult_t ClassificationResultW +# define tcout std::wcout +# define file_name_t std::wstring +# define imread_t imreadW +# define ClassificationResult_t ClassificationResultW #else - #define tcout std::cout - #define file_name_t std::string - #define imread_t cv::imread - #define ClassificationResult_t ClassificationResult +# define tcout std::cout +# define file_name_t std::string +# define imread_t cv::imread +# define ClassificationResult_t ClassificationResult #endif #if defined(ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32) @@ -43,7 +43,9 @@ cv::Mat imreadW(std::wstring input_image_path) { std::size_t file_size = input_image_stream.tellg(); input_image_stream.seekg(0, std::ios::beg); std::vector buffer(0); - std::copy(std::istreambuf_iterator(input_image_stream), std::istreambuf_iterator(), std::back_inserter(buffer)); + std::copy(std::istreambuf_iterator(input_image_stream), + std::istreambuf_iterator(), + std::back_inserter(buffer)); image = cv::imdecode(cv::Mat(1, file_size, CV_8UC1, &buffer[0]), cv::IMREAD_COLOR); } else { tcout << "Input file '" << input_image_path << "' processing error" << std::endl; @@ -83,12 +85,12 @@ int main(int argc, char* argv[]) { return EXIT_FAILURE; } - const file_name_t input_model {argv[1]}; - const file_name_t input_image_path {argv[2]}; + const file_name_t input_model{argv[1]}; + const file_name_t input_image_path{argv[2]}; #if defined(ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32) const std::string device_name = simpleConvert(argv[3]); #else - const std::string device_name {argv[3]}; + const std::string device_name{argv[3]}; #endif // ----------------------------------------------------------------------------------------------------- diff --git a/inference-engine/samples/hello_nv12_input_classification/main.cpp b/inference-engine/samples/hello_nv12_input_classification/main.cpp index 59ec0329cce..9de3fa2bd68 100644 --- a/inference-engine/samples/hello_nv12_input_classification/main.cpp +++ b/inference-engine/samples/hello_nv12_input_classification/main.cpp @@ -16,9 +16,9 @@ #include #include #ifdef _WIN32 - #include +# include #else - #include +# include #endif using namespace InferenceEngine; @@ -147,8 +147,12 @@ std::vector readInputBlobs(std::vector& data, size_t width, // logical height // Create tensor descriptors for Y and UV blobs - const InferenceEngine::TensorDesc y_plane_desc(InferenceEngine::Precision::U8, {1, 1, height, width}, InferenceEngine::Layout::NHWC); - const InferenceEngine::TensorDesc uv_plane_desc(InferenceEngine::Precision::U8, {1, 2, height / 2, width / 2}, InferenceEngine::Layout::NHWC); + const InferenceEngine::TensorDesc y_plane_desc(InferenceEngine::Precision::U8, + {1, 1, height, width}, + InferenceEngine::Layout::NHWC); + const InferenceEngine::TensorDesc uv_plane_desc(InferenceEngine::Precision::U8, + {1, 2, height / 2, width / 2}, + InferenceEngine::Layout::NHWC); const size_t offset = width * height; std::vector blobs; @@ -177,13 +181,15 @@ std::vector readInputBlobs(std::vector& data, size_t width, bool isBatchedBlobSupported(const Core& ie, const std::string& device_name) { const std::vector supported_metrics = ie.GetMetric(device_name, METRIC_KEY(SUPPORTED_METRICS)); - if (std::find(supported_metrics.begin(), supported_metrics.end(), METRIC_KEY(OPTIMIZATION_CAPABILITIES)) == supported_metrics.end()) { + if (std::find(supported_metrics.begin(), supported_metrics.end(), METRIC_KEY(OPTIMIZATION_CAPABILITIES)) == + supported_metrics.end()) { return false; } const std::vector optimization_caps = ie.GetMetric(device_name, METRIC_KEY(OPTIMIZATION_CAPABILITIES)); - return std::find(optimization_caps.begin(), optimization_caps.end(), METRIC_VALUE(BATCHED_BLOB)) != optimization_caps.end(); + return std::find(optimization_caps.begin(), optimization_caps.end(), METRIC_VALUE(BATCHED_BLOB)) != + optimization_caps.end(); } /** @@ -194,15 +200,16 @@ int main(int argc, char* argv[]) { // ------------------------------ Parsing and validation input // arguments------------------------------ if (argc != 5) { - std::cout << "Usage : " << argv[0] << " " << std::endl; + std::cout << "Usage : " << argv[0] << " " + << std::endl; return EXIT_FAILURE; } - const std::string input_model {argv[1]}; - const std::string input_image_path {argv[2]}; + const std::string input_model{argv[1]}; + const std::string input_image_path{argv[2]}; size_t input_width = 0, input_height = 0; std::tie(input_width, input_height) = parseImageSize(argv[3]); - const std::string device_name {argv[4]}; + const std::string device_name{argv[4]}; // ----------------------------------------------------------------------------------------------------- // ------------------------------ Read image names diff --git a/inference-engine/samples/hello_query_device/main.cpp b/inference-engine/samples/hello_query_device/main.cpp index 2fb140ce386..2f692071db4 100644 --- a/inference-engine/samples/hello_query_device/main.cpp +++ b/inference-engine/samples/hello_query_device/main.cpp @@ -125,7 +125,8 @@ int main(int argc, char* argv[]) { } } - if (std::find(supportedMetrics.begin(), supportedMetrics.end(), METRIC_KEY(SUPPORTED_CONFIG_KEYS)) != supportedMetrics.end()) { + if (std::find(supportedMetrics.begin(), supportedMetrics.end(), METRIC_KEY(SUPPORTED_CONFIG_KEYS)) != + supportedMetrics.end()) { std::cout << "\tSUPPORTED_CONFIG_KEYS (default values): " << std::endl; std::vector supportedConfigKeys = ie.GetMetric(device, METRIC_KEY(SUPPORTED_CONFIG_KEYS)); for (auto&& configKey : supportedConfigKeys) { diff --git a/inference-engine/samples/hello_reshape_ssd/main.cpp b/inference-engine/samples/hello_reshape_ssd/main.cpp index e625e068659..f91782688d1 100644 --- a/inference-engine/samples/hello_reshape_ssd/main.cpp +++ b/inference-engine/samples/hello_reshape_ssd/main.cpp @@ -21,10 +21,10 @@ int main(int argc, char* argv[]) { std::cout << "Usage : " << argv[0] << " " << std::endl; return EXIT_FAILURE; } - const std::string input_model {argv[1]}; - const std::string input_image_path {argv[2]}; - const std::string device_name {argv[3]}; - const size_t batch_size {std::stoul(argv[4])}; + const std::string input_model{argv[1]}; + const std::string input_image_path{argv[2]}; + const std::string device_name{argv[3]}; + const size_t batch_size{std::stoul(argv[4])}; // ----------------------------------------------------------------------------------------------------- // --------------------------- Step 1. Initialize inference engine core @@ -177,8 +177,8 @@ int main(int argc, char* argv[]) { std::ostringstream conf; conf << ":" << std::fixed << std::setprecision(3) << confidence; cv::rectangle(image, cv::Point2f(xmin, ymin), cv::Point2f(xmax, ymax), cv::Scalar(0, 0, 255)); - std::cout << "[" << cur_proposal << "," << label << "] element, prob = " << confidence << ", bbox = (" << xmin << "," << ymin << ")-(" << xmax - << "," << ymax << ")" + std::cout << "[" << cur_proposal << "," << label << "] element, prob = " << confidence << ", bbox = (" + << xmin << "," << ymin << ")-(" << xmax << "," << ymax << ")" << ", batch id = " << image_id << std::endl; } } diff --git a/inference-engine/samples/hello_reshape_ssd/reshape_ssd_extension.hpp b/inference-engine/samples/hello_reshape_ssd/reshape_ssd_extension.hpp index 1e6ae59bf6f..4c2c0cbe1bd 100644 --- a/inference-engine/samples/hello_reshape_ssd/reshape_ssd_extension.hpp +++ b/inference-engine/samples/hello_reshape_ssd/reshape_ssd_extension.hpp @@ -14,7 +14,7 @@ class CustomReLUImpl : public InferenceEngine::ILayerExecImpl { public: - explicit CustomReLUImpl(const std::shared_ptr& node): _node(node) {} + explicit CustomReLUImpl(const std::shared_ptr& node) : _node(node) {} InferenceEngine::StatusCode getSupportedConfigurations(std::vector& conf, InferenceEngine::ResponseDesc* /*resp*/) noexcept override { @@ -44,15 +44,19 @@ public: return InferenceEngine::OK; } - InferenceEngine::StatusCode init(InferenceEngine::LayerConfig& /*config*/, InferenceEngine::ResponseDesc* /*resp*/) noexcept override { + InferenceEngine::StatusCode init(InferenceEngine::LayerConfig& /*config*/, + InferenceEngine::ResponseDesc* /*resp*/) noexcept override { return InferenceEngine::StatusCode::OK; } - InferenceEngine::StatusCode execute(std::vector& inputs, std::vector& outputs, + InferenceEngine::StatusCode execute(std::vector& inputs, + std::vector& outputs, InferenceEngine::ResponseDesc* /*resp*/) noexcept override { static bool wasCalled = false; if (!wasCalled) { - std::cout << "Running " + std::string(CUSTOM_RELU_TYPE) + " kernel for the first time (next messages won't be printed)" << std::endl; + std::cout << "Running " + std::string(CUSTOM_RELU_TYPE) + + " kernel for the first time (next messages won't be printed)" + << std::endl; wasCalled = true; } for (size_t i = 0; i < inputs.size(); i++) { @@ -80,13 +84,13 @@ private: class CustomReluOp : public ngraph::op::Op { public: - static constexpr ngraph::NodeTypeInfo type_info {CUSTOM_RELU_TYPE, 0}; + static constexpr ngraph::NodeTypeInfo type_info{CUSTOM_RELU_TYPE, 0}; const ngraph::NodeTypeInfo& get_type_info() const override { return type_info; } CustomReluOp() = default; - explicit CustomReluOp(const ngraph::Output& arg): Op({arg}) { + explicit CustomReluOp(const ngraph::Output& arg) : Op({arg}) { constructor_validate_and_infer_types(); } @@ -134,7 +138,8 @@ public: return {"CPU"}; } - InferenceEngine::ILayerImpl::Ptr getImplementation(const std::shared_ptr& node, const std::string& implType) override { + InferenceEngine::ILayerImpl::Ptr getImplementation(const std::shared_ptr& node, + const std::string& implType) override { if (impls.find(node->description()) == impls.end() || implType != "CPU") return nullptr; return impls[node->description()](node); diff --git a/inference-engine/samples/ngraph_function_creation_sample/main.cpp b/inference-engine/samples/ngraph_function_creation_sample/main.cpp index 6cb1e104305..8afc44783d7 100644 --- a/inference-engine/samples/ngraph_function_creation_sample/main.cpp +++ b/inference-engine/samples/ngraph_function_creation_sample/main.cpp @@ -113,107 +113,151 @@ std::shared_ptr createNgraphFunction() { "classification network."; // -------input------ - std::vector padBegin {0, 0}; - std::vector padEnd {0, 0}; + std::vector padBegin{0, 0}; + std::vector padEnd{0, 0}; - auto paramNode = std::make_shared(element::Type_t::f32, Shape(std::vector {{64, 1, 28, 28}})); + auto paramNode = std::make_shared(element::Type_t::f32, Shape(std::vector{{64, 1, 28, 28}})); paramNode->set_friendly_name("Parameter"); // -------convolution 1---- - auto convFirstShape = Shape {20, 1, 5, 5}; + auto convFirstShape = Shape{20, 1, 5, 5}; std::shared_ptr convolutionFirstConstantNode = std::make_shared(element::Type_t::f32, convFirstShape, weightsPtr->cbuffer().as()); std::shared_ptr convolutionNodeFirst = - std::make_shared(paramNode->output(0), convolutionFirstConstantNode->output(0), Strides(SizeVector {1, 1}), - CoordinateDiff(padBegin), CoordinateDiff(padEnd), Strides(SizeVector {1, 1})); + std::make_shared(paramNode->output(0), + convolutionFirstConstantNode->output(0), + Strides(SizeVector{1, 1}), + CoordinateDiff(padBegin), + CoordinateDiff(padEnd), + Strides(SizeVector{1, 1})); // -------Add-------------- - auto addFirstShape = Shape {1, 20, 1, 1}; + auto addFirstShape = Shape{1, 20, 1, 1}; auto offset = shape_size(convFirstShape) * sizeof(float); std::shared_ptr addFirstConstantNode = - std::make_shared(element::Type_t::f32, addFirstShape, (weightsPtr->cbuffer().as() + offset)); + std::make_shared(element::Type_t::f32, + addFirstShape, + (weightsPtr->cbuffer().as() + offset)); - std::shared_ptr addNodeFirst = std::make_shared(convolutionNodeFirst->output(0), addFirstConstantNode->output(0)); + std::shared_ptr addNodeFirst = + std::make_shared(convolutionNodeFirst->output(0), addFirstConstantNode->output(0)); // -------MAXPOOL---------- - Shape padBeginShape {0, 0}; - Shape padEndShape {0, 0}; + Shape padBeginShape{0, 0}; + Shape padEndShape{0, 0}; - std::shared_ptr maxPoolingNodeFirst = - std::make_shared(addNodeFirst->output(0), std::vector {2, 2}, padBeginShape, padEndShape, std::vector {2, 2}, - op::RoundingType::CEIL, op::PadType::EXPLICIT); + std::shared_ptr maxPoolingNodeFirst = std::make_shared(addNodeFirst->output(0), + std::vector{2, 2}, + padBeginShape, + padEndShape, + std::vector{2, 2}, + op::RoundingType::CEIL, + op::PadType::EXPLICIT); // -------convolution 2---- - auto convSecondShape = Shape {50, 20, 5, 5}; + auto convSecondShape = Shape{50, 20, 5, 5}; offset += shape_size(addFirstShape) * sizeof(float); std::shared_ptr convolutionSecondConstantNode = - std::make_shared(element::Type_t::f32, convSecondShape, (weightsPtr->cbuffer().as() + offset)); + std::make_shared(element::Type_t::f32, + convSecondShape, + (weightsPtr->cbuffer().as() + offset)); std::shared_ptr convolutionNodeSecond = - std::make_shared(maxPoolingNodeFirst->output(0), convolutionSecondConstantNode->output(0), Strides({1, 1}), - CoordinateDiff(padBegin), CoordinateDiff(padEnd), Strides({1, 1})); + std::make_shared(maxPoolingNodeFirst->output(0), + convolutionSecondConstantNode->output(0), + Strides({1, 1}), + CoordinateDiff(padBegin), + CoordinateDiff(padEnd), + Strides({1, 1})); // -------Add 2------------ - auto addSecondShape = Shape {1, 50, 1, 1}; + auto addSecondShape = Shape{1, 50, 1, 1}; offset += shape_size(convSecondShape) * sizeof(float); std::shared_ptr addSecondConstantNode = - std::make_shared(element::Type_t::f32, addSecondShape, (weightsPtr->cbuffer().as() + offset)); + std::make_shared(element::Type_t::f32, + addSecondShape, + (weightsPtr->cbuffer().as() + offset)); - std::shared_ptr addNodeSecond = std::make_shared(convolutionNodeSecond->output(0), addSecondConstantNode->output(0)); + std::shared_ptr addNodeSecond = + std::make_shared(convolutionNodeSecond->output(0), addSecondConstantNode->output(0)); // -------MAXPOOL 2-------- - std::shared_ptr maxPoolingNodeSecond = std::make_shared(addNodeSecond->output(0), Strides {2, 2}, padBeginShape, padEndShape, - Shape {2, 2}, op::RoundingType::CEIL, op::PadType::EXPLICIT); + std::shared_ptr maxPoolingNodeSecond = std::make_shared(addNodeSecond->output(0), + Strides{2, 2}, + padBeginShape, + padEndShape, + Shape{2, 2}, + op::RoundingType::CEIL, + op::PadType::EXPLICIT); // -------Reshape---------- - auto reshapeFirstShape = Shape {2}; + auto reshapeFirstShape = Shape{2}; auto reshapeOffset = shape_size(addSecondShape) * sizeof(float) + offset; std::shared_ptr reshapeFirstConstantNode = - std::make_shared(element::Type_t::i64, reshapeFirstShape, (weightsPtr->cbuffer().as() + reshapeOffset)); + std::make_shared(element::Type_t::i64, + reshapeFirstShape, + (weightsPtr->cbuffer().as() + reshapeOffset)); - std::shared_ptr reshapeFirstNode = std::make_shared(maxPoolingNodeSecond->output(0), reshapeFirstConstantNode->output(0), true); + std::shared_ptr reshapeFirstNode = + std::make_shared(maxPoolingNodeSecond->output(0), reshapeFirstConstantNode->output(0), true); // -------MatMul 1--------- - auto matMulFirstShape = Shape {500, 800}; + auto matMulFirstShape = Shape{500, 800}; offset = shape_size(reshapeFirstShape) * sizeof(int64_t) + reshapeOffset; std::shared_ptr matMulFirstConstantNode = - std::make_shared(element::Type_t::f32, matMulFirstShape, (weightsPtr->cbuffer().as() + offset)); + std::make_shared(element::Type_t::f32, + matMulFirstShape, + (weightsPtr->cbuffer().as() + offset)); - std::shared_ptr matMulFirstNode = std::make_shared(reshapeFirstNode->output(0), matMulFirstConstantNode->output(0), false, true); + std::shared_ptr matMulFirstNode = + std::make_shared(reshapeFirstNode->output(0), matMulFirstConstantNode->output(0), false, true); // -------Add 3------------ - auto addThirdShape = Shape {1, 500}; + auto addThirdShape = Shape{1, 500}; offset += shape_size(matMulFirstShape) * sizeof(float); std::shared_ptr addThirdConstantNode = - std::make_shared(element::Type_t::f32, addThirdShape, (weightsPtr->cbuffer().as() + offset)); + std::make_shared(element::Type_t::f32, + addThirdShape, + (weightsPtr->cbuffer().as() + offset)); - std::shared_ptr addThirdNode = std::make_shared(matMulFirstNode->output(0), addThirdConstantNode->output(0)); + std::shared_ptr addThirdNode = + std::make_shared(matMulFirstNode->output(0), addThirdConstantNode->output(0)); // -------Relu------------- std::shared_ptr reluNode = std::make_shared(addThirdNode->output(0)); // -------Reshape 2-------- - auto reshapeSecondShape = Shape {2}; + auto reshapeSecondShape = Shape{2}; std::shared_ptr reshapeSecondConstantNode = - std::make_shared(element::Type_t::i64, reshapeSecondShape, (weightsPtr->cbuffer().as() + reshapeOffset)); + std::make_shared(element::Type_t::i64, + reshapeSecondShape, + (weightsPtr->cbuffer().as() + reshapeOffset)); - std::shared_ptr reshapeSecondNode = std::make_shared(reluNode->output(0), reshapeSecondConstantNode->output(0), true); + std::shared_ptr reshapeSecondNode = + std::make_shared(reluNode->output(0), reshapeSecondConstantNode->output(0), true); // -------MatMul 2--------- - auto matMulSecondShape = Shape {10, 500}; + auto matMulSecondShape = Shape{10, 500}; offset += shape_size(addThirdShape) * sizeof(float); std::shared_ptr matMulSecondConstantNode = - std::make_shared(element::Type_t::f32, matMulSecondShape, (weightsPtr->cbuffer().as() + offset)); + std::make_shared(element::Type_t::f32, + matMulSecondShape, + (weightsPtr->cbuffer().as() + offset)); - std::shared_ptr matMulSecondNode = std::make_shared(reshapeSecondNode->output(0), matMulSecondConstantNode->output(0), false, true); + std::shared_ptr matMulSecondNode = + std::make_shared(reshapeSecondNode->output(0), matMulSecondConstantNode->output(0), false, true); // -------Add 4------------ - auto add4Shape = Shape {1, 10}; + auto add4Shape = Shape{1, 10}; offset += shape_size(matMulSecondShape) * sizeof(float); - std::shared_ptr add4ConstantNode = std::make_shared(element::Type_t::f32, add4Shape, (weightsPtr->cbuffer().as() + offset)); + std::shared_ptr add4ConstantNode = + std::make_shared(element::Type_t::f32, + add4Shape, + (weightsPtr->cbuffer().as() + offset)); - std::shared_ptr add4Node = std::make_shared(matMulSecondNode->output(0), add4ConstantNode->output(0)); + std::shared_ptr add4Node = + std::make_shared(matMulSecondNode->output(0), add4ConstantNode->output(0)); // -------softMax---------- std::shared_ptr softMaxNode = std::make_shared(add4Node->output(0), 1); @@ -221,7 +265,8 @@ std::shared_ptr createNgraphFunction() { // -------ngraph function-- auto result_full = std::make_shared(softMaxNode->output(0)); - std::shared_ptr fnPtr = std::make_shared(result_full, ngraph::ParameterVector {paramNode}, "lenet"); + std::shared_ptr fnPtr = + std::make_shared(result_full, ngraph::ParameterVector{paramNode}, "lenet"); return fnPtr; } @@ -294,8 +339,8 @@ int main(int argc, char* argv[]) { continue; } /** Store image data **/ - std::shared_ptr data( - reader->getData(inputInfoItem.second->getTensorDesc().getDims()[3], inputInfoItem.second->getTensorDesc().getDims()[2])); + std::shared_ptr data(reader->getData(inputInfoItem.second->getTensorDesc().getDims()[3], + inputInfoItem.second->getTensorDesc().getDims()[2])); if (data.get() != nullptr) { imagesData.push_back(data); } @@ -384,7 +429,8 @@ int main(int argc, char* argv[]) { for (size_t ch = 0; ch < num_channels; ++ch) { /** [images stride + channels stride + pixel id ] all in * bytes **/ - data[image_id * image_size * num_channels + ch * image_size + pid] = imagesData.at(image_id).get()[pid * num_channels + ch]; + data[image_id * image_size * num_channels + ch * image_size + pid] = + imagesData.at(image_id).get()[pid * num_channels + ch]; } } } @@ -407,8 +453,9 @@ int main(int argc, char* argv[]) { /** Validating -nt value **/ const size_t resultsCnt = outputBlob->size() / batchSize; if (FLAGS_nt > resultsCnt || FLAGS_nt < 1) { - slog::warn << "-nt " << FLAGS_nt << " is not available for this network (-nt should be less than " << resultsCnt + 1 - << " and more than 0).\n Maximal value " << resultsCnt << " will be used."; + slog::warn << "-nt " << FLAGS_nt << " is not available for this network (-nt should be less than " + << resultsCnt + 1 << " and more than 0).\n Maximal value " << resultsCnt + << " will be used."; FLAGS_nt = resultsCnt; } diff --git a/inference-engine/samples/ngraph_function_creation_sample/ngraph_function_creation_sample.hpp b/inference-engine/samples/ngraph_function_creation_sample/ngraph_function_creation_sample.hpp index fc7726d963b..89230b1c66a 100644 --- a/inference-engine/samples/ngraph_function_creation_sample/ngraph_function_creation_sample.hpp +++ b/inference-engine/samples/ngraph_function_creation_sample/ngraph_function_creation_sample.hpp @@ -14,15 +14,17 @@ static const char help_message[] = "Print a usage message."; /// @brief message for images argument -static const char input_message[] = "Required. Path to a folder with images or path to image files. Support ubyte files only."; +static const char input_message[] = + "Required. Path to a folder with images or path to image files. Support ubyte files only."; /// @brief message for model argument static const char model_message[] = "Required. Path to a .bin file with weights for the trained model."; /// @brief message for assigning cnn calculation to device -static const char target_device_message[] = "Optional. Specify the target device to infer on (the list of available devices is shown below). " - "Default value is CPU. Use \"-d HETERO:\" format to specify HETERO plugin. " - "Sample will look for a suitable plugin for device specified."; +static const char target_device_message[] = + "Optional. Specify the target device to infer on (the list of available devices is shown below). " + "Default value is CPU. Use \"-d HETERO:\" format to specify HETERO plugin. " + "Sample will look for a suitable plugin for device specified."; /// @brief message for top results number static const char ntop_message[] = "Number of top results. The default value is 10."; diff --git a/inference-engine/samples/object_detection_sample_ssd/main.cpp b/inference-engine/samples/object_detection_sample_ssd/main.cpp index 1c7b6a00132..e403ee2bd25 100644 --- a/inference-engine/samples/object_detection_sample_ssd/main.cpp +++ b/inference-engine/samples/object_detection_sample_ssd/main.cpp @@ -100,7 +100,8 @@ int main(int argc, char* argv[]) { // Config for device plugin custom extension is loaded from an .xml // description ie.SetConfig({{PluginConfigParams::KEY_CONFIG_FILE, FLAGS_c}}, FLAGS_d); - slog::info << "Config for " << FLAGS_d << " device plugin custom extension loaded: " << FLAGS_c << slog::endl; + slog::info << "Config for " << FLAGS_d << " device plugin custom extension loaded: " << FLAGS_c + << slog::endl; } // ----------------------------------------------------------------------------------------------------- @@ -157,7 +158,8 @@ int main(int argc, char* argv[]) { Precision inputPrecision = Precision::FP32; item.second->setPrecision(inputPrecision); - if ((item.second->getTensorDesc().getDims()[1] != 3 && item.second->getTensorDesc().getDims()[1] != 6)) { + if ((item.second->getTensorDesc().getDims()[1] != 3 && + item.second->getTensorDesc().getDims()[1] != 6)) { throw std::logic_error("Invalid input info. Should be 3 or 6 values length"); } } @@ -182,7 +184,8 @@ int main(int argc, char* argv[]) { if (auto ngraphFunction = network.getFunction()) { for (const auto& out : outputsInfo) { for (const auto& op : ngraphFunction->get_ops()) { - if (op->get_type_info() == ngraph::op::DetectionOutput::type_info && op->get_friendly_name() == out.second->getName()) { + if (op->get_type_info() == ngraph::op::DetectionOutput::type_info && + op->get_friendly_name() == out.second->getName()) { outputName = out.first; outputInfo = out.second; break; @@ -239,7 +242,8 @@ int main(int argc, char* argv[]) { } /** Store image data **/ std::shared_ptr originalData(reader->getData()); - std::shared_ptr data(reader->getData(inputInfo->getTensorDesc().getDims()[3], inputInfo->getTensorDesc().getDims()[2])); + std::shared_ptr data( + reader->getData(inputInfo->getTensorDesc().getDims()[3], inputInfo->getTensorDesc().getDims()[2])); if (data.get() != nullptr) { originalImagesData.push_back(originalData); imagesData.push_back(data); @@ -253,7 +257,9 @@ int main(int argc, char* argv[]) { size_t batchSize = network.getBatchSize(); slog::info << "Batch size is " << std::to_string(batchSize) << slog::endl; if (batchSize != imagesData.size()) { - slog::warn << "Number of images " + std::to_string(imagesData.size()) + " doesn't match batch size " + std::to_string(batchSize) << slog::endl; + slog::warn << "Number of images " + std::to_string(imagesData.size()) + " doesn't match batch size " + + std::to_string(batchSize) + << slog::endl; batchSize = std::min(batchSize, imagesData.size()); slog::warn << "Number of images to be processed is " << std::to_string(batchSize) << slog::endl; } @@ -288,7 +294,8 @@ int main(int argc, char* argv[]) { for (size_t ch = 0; ch < num_channels; ++ch) { /** [images stride + channels stride + pixel id ] all in * bytes **/ - data[image_id * image_size * num_channels + ch * image_size + pid] = imagesData.at(image_id).get()[pid * num_channels + ch]; + data[image_id * image_size * num_channels + ch * image_size + pid] = + imagesData.at(image_id).get()[pid * num_channels + ch]; } } } @@ -312,8 +319,10 @@ int main(int argc, char* argv[]) { float* p = minput2Holder.as::value_type*>(); for (size_t image_id = 0; image_id < std::min(imagesData.size(), batchSize); ++image_id) { - p[image_id * imInfoDim + 0] = static_cast(inputsInfo[imageInputName]->getTensorDesc().getDims()[2]); - p[image_id * imInfoDim + 1] = static_cast(inputsInfo[imageInputName]->getTensorDesc().getDims()[3]); + p[image_id * imInfoDim + 0] = + static_cast(inputsInfo[imageInputName]->getTensorDesc().getDims()[2]); + p[image_id * imInfoDim + 1] = + static_cast(inputsInfo[imageInputName]->getTensorDesc().getDims()[3]); for (size_t k = 2; k < imInfoDim; k++) { p[image_id * imInfoDim + k] = 1.0f; // all scale factors are set to 1.0 } @@ -359,8 +368,8 @@ int main(int argc, char* argv[]) { auto xmax = static_cast(detection[curProposal * objectSize + 5] * imageWidths[image_id]); auto ymax = static_cast(detection[curProposal * objectSize + 6] * imageHeights[image_id]); - std::cout << "[" << curProposal << "," << label << "] element, prob = " << confidence << " (" << xmin << "," << ymin << ")-(" << xmax << "," - << ymax << ")" + std::cout << "[" << curProposal << "," << label << "] element, prob = " << confidence << " (" << xmin + << "," << ymin << ")-(" << xmax << "," << ymax << ")" << " batch id : " << image_id; if (confidence > 0.5) { @@ -376,10 +385,17 @@ int main(int argc, char* argv[]) { } for (size_t batch_id = 0; batch_id < batchSize; ++batch_id) { - addRectangles(originalImagesData[batch_id].get(), imageHeights[batch_id], imageWidths[batch_id], boxes[batch_id], classes[batch_id], + addRectangles(originalImagesData[batch_id].get(), + imageHeights[batch_id], + imageWidths[batch_id], + boxes[batch_id], + classes[batch_id], BBOX_THICKNESS); const std::string image_path = "out_" + std::to_string(batch_id) + ".bmp"; - if (writeOutputBmp(image_path, originalImagesData[batch_id].get(), imageHeights[batch_id], imageWidths[batch_id])) { + if (writeOutputBmp(image_path, + originalImagesData[batch_id].get(), + imageHeights[batch_id], + imageWidths[batch_id])) { slog::info << "Image " + image_path + " created!" << slog::endl; } else { throw std::logic_error(std::string("Can't create a file: ") + image_path); diff --git a/inference-engine/samples/object_detection_sample_ssd/object_detection_sample_ssd.h b/inference-engine/samples/object_detection_sample_ssd/object_detection_sample_ssd.h index fea030e8c4b..24e39feb4b1 100644 --- a/inference-engine/samples/object_detection_sample_ssd/object_detection_sample_ssd.h +++ b/inference-engine/samples/object_detection_sample_ssd/object_detection_sample_ssd.h @@ -23,11 +23,12 @@ static const char model_message[] = "Required. Path to an .xml file with a train static const char image_message[] = "Required. Path to an image."; /// @brief message for assigning cnn calculation to device -static const char target_device_message[] = "Optional. Specify the target device to infer on (the list of available devices is shown " - "below). " - "Default value is CPU. Use \"-d HETERO:\" format to specify " - "HETERO plugin. " - "Sample will look for a suitable plugin for device specified."; +static const char target_device_message[] = + "Optional. Specify the target device to infer on (the list of available devices is shown " + "below). " + "Default value is CPU. Use \"-d HETERO:\" format to specify " + "HETERO plugin. " + "Sample will look for a suitable plugin for device specified."; /// @brief message for plugin custom kernels desc static const char custom_plugin_cfg_message[] = "Required for GPU, MYRIAD, HDDL custom kernels. " diff --git a/inference-engine/samples/speech_sample/fileutils.cpp b/inference-engine/samples/speech_sample/fileutils.cpp index 102cca25297..d661443d541 100644 --- a/inference-engine/samples/speech_sample/fileutils.cpp +++ b/inference-engine/samples/speech_sample/fileutils.cpp @@ -4,7 +4,10 @@ #include "fileutils.hpp" -void ArkFile::GetFileInfo(const char* fileName, uint32_t numArrayToFindSize, uint32_t* ptrNumArrays, uint32_t* ptrNumMemoryBytes) { +void ArkFile::GetFileInfo(const char* fileName, + uint32_t numArrayToFindSize, + uint32_t* ptrNumArrays, + uint32_t* ptrNumMemoryBytes) { uint32_t numArrays = 0; uint32_t numMemoryBytes = 0; @@ -40,8 +43,13 @@ void ArkFile::GetFileInfo(const char* fileName, uint32_t numArrayToFindSize, uin *ptrNumMemoryBytes = numMemoryBytes; } -void ArkFile::LoadFile(const char* fileName, uint32_t arrayIndex, std::string& ptrName, std::vector& memory, uint32_t* ptrNumRows, - uint32_t* ptrNumColumns, uint32_t* ptrNumBytesPerElement) { +void ArkFile::LoadFile(const char* fileName, + uint32_t arrayIndex, + std::string& ptrName, + std::vector& memory, + uint32_t* ptrNumRows, + uint32_t* ptrNumColumns, + uint32_t* ptrNumBytesPerElement) { std::ifstream in_file(fileName, std::ios::binary); if (in_file.good()) { uint32_t i = 0; @@ -64,7 +72,8 @@ void ArkFile::LoadFile(const char* fileName, uint32_t arrayIndex, std::string& p std::getline(in_file, ptrName, '\0'); // read variable length name followed by space and NUL std::getline(in_file, line, '\4'); // read "BFM" followed by space and control-D if (line.compare("BFM ") != 0) { - throw std::runtime_error(std::string("Cannot find array specifier in file %s in LoadFile()!\n") + fileName); + throw std::runtime_error(std::string("Cannot find array specifier in file %s in LoadFile()!\n") + + fileName); } in_file.read(reinterpret_cast(ptrNumRows), sizeof(uint32_t)); // read number of rows std::getline(in_file, line, '\4'); // read control-D @@ -80,7 +89,12 @@ void ArkFile::LoadFile(const char* fileName, uint32_t arrayIndex, std::string& p *ptrNumBytesPerElement = sizeof(float); } -void ArkFile::SaveFile(const char* fileName, bool shouldAppend, std::string name, void* ptrMemory, uint32_t numRows, uint32_t numColumns) { +void ArkFile::SaveFile(const char* fileName, + bool shouldAppend, + std::string name, + void* ptrMemory, + uint32_t numRows, + uint32_t numColumns) { std::ios_base::openmode mode = std::ios::binary; if (shouldAppend) { mode |= std::ios::app; @@ -101,7 +115,10 @@ void ArkFile::SaveFile(const char* fileName, bool shouldAppend, std::string name } } -void NumpyFile::GetFileInfo(const char* fileName, uint32_t numArrayToFindSize, uint32_t* ptrNumArrays, uint32_t* ptrNumMemoryBytes) { +void NumpyFile::GetFileInfo(const char* fileName, + uint32_t numArrayToFindSize, + uint32_t* ptrNumArrays, + uint32_t* ptrNumMemoryBytes) { uint32_t numArrays = 0; uint32_t numMemoryBytes = 0; @@ -122,8 +139,13 @@ void NumpyFile::GetFileInfo(const char* fileName, uint32_t numArrayToFindSize, u } } -void NumpyFile::LoadFile(const char* fileName, uint32_t arrayIndex, std::string& ptrName, std::vector& memory, uint32_t* ptrNumRows, - uint32_t* ptrNumColumns, uint32_t* ptrNumBytesPerElement) { +void NumpyFile::LoadFile(const char* fileName, + uint32_t arrayIndex, + std::string& ptrName, + std::vector& memory, + uint32_t* ptrNumRows, + uint32_t* ptrNumColumns, + uint32_t* ptrNumBytesPerElement) { cnpy::npz_t my_npz1 = cnpy::npz_load(fileName); auto it = my_npz1.begin(); std::advance(it, arrayIndex); @@ -143,9 +165,14 @@ void NumpyFile::LoadFile(const char* fileName, uint32_t arrayIndex, std::string& } } -void NumpyFile::SaveFile(const char* fileName, bool shouldAppend, std::string name, void* ptrMemory, uint32_t numRows, uint32_t numColumns) { +void NumpyFile::SaveFile(const char* fileName, + bool shouldAppend, + std::string name, + void* ptrMemory, + uint32_t numRows, + uint32_t numColumns) { std::string mode; shouldAppend ? mode = "a" : mode = "w"; - std::vector shape {numRows, numColumns}; + std::vector shape{numRows, numColumns}; cnpy::npz_save(fileName, name, reinterpret_cast(ptrMemory), shape, mode); } diff --git a/inference-engine/samples/speech_sample/fileutils.hpp b/inference-engine/samples/speech_sample/fileutils.hpp index b437c0a7af3..9928b7d956d 100644 --- a/inference-engine/samples/speech_sample/fileutils.hpp +++ b/inference-engine/samples/speech_sample/fileutils.hpp @@ -11,12 +11,25 @@ /// @brief Interface to work with files like input and output class BaseFile { public: - virtual void LoadFile(const char* fileName, uint32_t arrayIndex, std::string& ptrName, std::vector& memory, uint32_t* ptrNumRows, - uint32_t* ptrNumColumns, uint32_t* ptrNumBytesPerElement) = 0; + virtual void LoadFile(const char* fileName, + uint32_t arrayIndex, + std::string& ptrName, + std::vector& memory, + uint32_t* ptrNumRows, + uint32_t* ptrNumColumns, + uint32_t* ptrNumBytesPerElement) = 0; - virtual void SaveFile(const char* fileName, bool shouldAppend, std::string name, void* ptrMemory, uint32_t numRows, uint32_t numColumns) = 0; + virtual void SaveFile(const char* fileName, + bool shouldAppend, + std::string name, + void* ptrMemory, + uint32_t numRows, + uint32_t numColumns) = 0; - virtual void GetFileInfo(const char* fileName, uint32_t numArrayToFindSize, uint32_t* ptrNumArrays, uint32_t* ptrNumMemoryBytes) = 0; + virtual void GetFileInfo(const char* fileName, + uint32_t numArrayToFindSize, + uint32_t* ptrNumArrays, + uint32_t* ptrNumMemoryBytes) = 0; }; /// @brief Responsible to work with .ark files @@ -30,7 +43,10 @@ public: * @param ptrNumMemoryBytes pointer to specific number of memory bytes * @return none. */ - void GetFileInfo(const char* fileName, uint32_t numArrayToFindSize, uint32_t* ptrNumArrays, uint32_t* ptrNumMemoryBytes) override; + void GetFileInfo(const char* fileName, + uint32_t numArrayToFindSize, + uint32_t* ptrNumArrays, + uint32_t* ptrNumMemoryBytes) override; /** * @brief Load Kaldi ARK speech feature vector file @@ -43,7 +59,12 @@ public: * @param ptrNumBytesPerElement pointer to number bytes per element (size of float by default) * @return none. */ - void LoadFile(const char* fileName, uint32_t arrayIndex, std::string& ptrName, std::vector& memory, uint32_t* ptrNumRows, uint32_t* ptrNumColumns, + void LoadFile(const char* fileName, + uint32_t arrayIndex, + std::string& ptrName, + std::vector& memory, + uint32_t* ptrNumRows, + uint32_t* ptrNumColumns, uint32_t* ptrNumBytesPerElement) override; /** @@ -56,7 +77,12 @@ public: * @param numColumns number of columns * @return none. */ - void SaveFile(const char* fileName, bool shouldAppend, std::string name, void* ptrMemory, uint32_t numRows, uint32_t numColumns) override; + void SaveFile(const char* fileName, + bool shouldAppend, + std::string name, + void* ptrMemory, + uint32_t numRows, + uint32_t numColumns) override; }; /// @brief Responsible to work with .npz files @@ -70,7 +96,10 @@ public: * @param ptrNumMemoryBytes pointer to specific number of memory bytes * @return none. */ - void GetFileInfo(const char* fileName, uint32_t numArrayToFindSize, uint32_t* ptrNumArrays, uint32_t* ptrNumMemoryBytes) override; + void GetFileInfo(const char* fileName, + uint32_t numArrayToFindSize, + uint32_t* ptrNumArrays, + uint32_t* ptrNumMemoryBytes) override; /** * @brief Load Numpy* uncompressed NPZ speech feature vector file @@ -83,7 +112,12 @@ public: * @param ptrNumBytesPerElement pointer to number bytes per element (size of float by default) * @return none. */ - void LoadFile(const char* fileName, uint32_t arrayIndex, std::string& ptrName, std::vector& memory, uint32_t* ptrNumRows, uint32_t* ptrNumColumns, + void LoadFile(const char* fileName, + uint32_t arrayIndex, + std::string& ptrName, + std::vector& memory, + uint32_t* ptrNumRows, + uint32_t* ptrNumColumns, uint32_t* ptrNumBytesPerElement) override; /** @@ -96,5 +130,10 @@ public: * @param numColumns number of columns * @return none. */ - void SaveFile(const char* fileName, bool shouldAppend, std::string name, void* ptrMemory, uint32_t numRows, uint32_t numColumns) override; + void SaveFile(const char* fileName, + bool shouldAppend, + std::string name, + void* ptrMemory, + uint32_t numRows, + uint32_t numColumns) override; }; diff --git a/inference-engine/samples/speech_sample/main.cpp b/inference-engine/samples/speech_sample/main.cpp index 57db61a8e9e..f2366ae7ab9 100644 --- a/inference-engine/samples/speech_sample/main.cpp +++ b/inference-engine/samples/speech_sample/main.cpp @@ -152,7 +152,11 @@ void UpdateScoreError(score_error_t* error, score_error_t* totalError) { * @param numColumns - number columns in score error arrays * @return none. */ -void CompareScores(float* ptrScoreArray, void* ptrRefScoreArray, score_error_t* scoreError, uint32_t numRows, uint32_t numColumns) { +void CompareScores(float* ptrScoreArray, + void* ptrRefScoreArray, + score_error_t* scoreError, + uint32_t numRows, + uint32_t numColumns) { uint32_t numErrors = 0; ClearScoreError(scoreError); @@ -194,31 +198,32 @@ void CompareScores(float* ptrScoreArray, void* ptrRefScoreArray, score_error_t* * @return error */ float StdDevError(score_error_t error) { - return (sqrt(error.sumSquaredError / error.numScores - (error.sumError / error.numScores) * (error.sumError / error.numScores))); + return (sqrt(error.sumSquaredError / error.numScores - + (error.sumError / error.numScores) * (error.sumError / error.numScores))); } #if !defined(__arm__) && !defined(_M_ARM) && !defined(__aarch64__) && !defined(_M_ARM64) - #ifdef _WIN32 - #include - #include - #else +# ifdef _WIN32 +# include +# include +# else - #include +# include - #endif +# endif inline void native_cpuid(unsigned int* eax, unsigned int* ebx, unsigned int* ecx, unsigned int* edx) { size_t level = *eax; - #ifdef _WIN32 +# ifdef _WIN32 int regs[4] = {static_cast(*eax), static_cast(*ebx), static_cast(*ecx), static_cast(*edx)}; __cpuid(regs, level); *eax = static_cast(regs[0]); *ebx = static_cast(regs[1]); *ecx = static_cast(regs[2]); *edx = static_cast(regs[3]); - #else +# else __get_cpuid(level, eax, ebx, ecx, edx); - #endif +# endif } /** @@ -295,8 +300,12 @@ void printReferenceCompareResults(score_error_t const& totalError, size_t frames * @param numberOfFramesOnHw number of frames delivered to GNA HW * @return none. */ -void printPerformanceCounters(std::map const& utterancePerfMap, size_t numberOfFrames, - std::ostream& stream, std::string fullDeviceName, const uint64_t numberOfFramesOnHw) { +void printPerformanceCounters( + std::map const& utterancePerfMap, + size_t numberOfFrames, + std::ostream& stream, + std::string fullDeviceName, + const uint64_t numberOfFramesOnHw) { #if !defined(__arm__) && !defined(_M_ARM) && !defined(__aarch64__) && !defined(_M_ARM64) stream << std::endl << "Performance counts:" << std::endl; stream << std::setw(10) << std::right << "" @@ -340,7 +349,8 @@ void printPerformanceCounters(std::map& perfCounters) { +void getPerformanceCounters(InferenceEngine::InferRequest& request, + std::map& perfCounters) { auto retPerfCounters = request.GetPerformanceCounts(); for (const auto& pair : retPerfCounters) { @@ -356,11 +366,13 @@ void getPerformanceCounters(InferenceEngine::InferRequest& request, std::map const& perfCounters, - std::map& totalPerfCounters, uint64_t& totalRunsOnHw) { + std::map& totalPerfCounters, + uint64_t& totalRunsOnHw) { auto runOnHw = false; for (const auto& pair : perfCounters) { totalPerfCounters[pair.first].realTime_uSec += pair.second.realTime_uSec; - runOnHw |= pair.second.realTime_uSec > 0; // if realTime is above zero, that means that a primitive was executed on the device + runOnHw |= pair.second.realTime_uSec > + 0; // if realTime is above zero, that means that a primitive was executed on the device } totalRunsOnHw += runOnHw; } @@ -380,7 +392,8 @@ std::vector ParseScaleFactors(const std::string& str) { while (getline(stream, outStr, ',')) { auto floatScaleFactor = std::stof(outStr); if (floatScaleFactor <= 0.0f) { - throw std::logic_error("Scale factor for input #" + std::to_string(i) + " (counting from zero) is out of range (must be positive)."); + throw std::logic_error("Scale factor for input #" + std::to_string(i) + + " (counting from zero) is out of range (must be positive)."); } scaleFactorInput.push_back(outStr); i++; @@ -509,7 +522,8 @@ bool ParseAndCheckCommandLine(int argc, char* argv[]) { */ int main(int argc, char* argv[]) { try { - // ------------------------------ Get Inference Engine version ------------------------------------------------------ + // ------------------------------ Get Inference Engine version + // ------------------------------------------------------ slog::info << "InferenceEngine: " << GetInferenceEngineVersion() << slog::endl; // ------------------------------ Parsing and validation of input arguments --------------------------------- @@ -546,7 +560,8 @@ int main(int argc, char* argv[]) { if (numUtterances == 0) { numUtterances = currentNumUtterances; } else if (currentNumUtterances != numUtterances) { - throw std::logic_error("Incorrect input files. Number of utterance must be the same for all input files"); + throw std::logic_error( + "Incorrect input files. Number of utterance must be the same for all input files"); } numBytesThisUtterance.push_back(currentNumBytesThisUtterance); } @@ -574,7 +589,8 @@ int main(int argc, char* argv[]) { std::cout << ie.GetVersions(deviceStr) << std::endl; // ----------------------------------------------------------------------------------------------------- - // --------------------------- Step 2. Read a model in OpenVINO Intermediate Representation (.xml and .bin files) + // --------------------------- Step 2. Read a model in OpenVINO Intermediate Representation (.xml and .bin + // files) slog::info << "Loading network files:" << slog::endl << FLAGS_m << slog::endl; uint32_t batchSize = (FLAGS_cw_r > 0 || FLAGS_cw_l > 0) ? 1 : (uint32_t)FLAGS_bs; @@ -586,7 +602,8 @@ int main(int argc, char* argv[]) { // ------------------------------------------------------------------------------------------------- // --------------------------- Set batch size --------------------------------------------------- - /** Set batch size. Unlike in imaging, batching in time (rather than space) is done for speech recognition. **/ + /** Set batch size. Unlike in imaging, batching in time (rather than space) is done for speech recognition. + * **/ network.setBatchSize(batchSize); slog::info << "Batch size is " << std::to_string(network.getBatchSize()) << slog::endl; } @@ -598,8 +615,10 @@ int main(int argc, char* argv[]) { std::map gnaPluginConfig; std::map genericPluginConfig; if (useGna) { - std::string gnaDevice = useHetero ? FLAGS_d.substr(FLAGS_d.find("GNA"), FLAGS_d.find(",") - FLAGS_d.find("GNA")) : FLAGS_d; - gnaPluginConfig[GNAConfigParams::KEY_GNA_DEVICE_MODE] = gnaDevice.find("_") == std::string::npos ? "GNA_AUTO" : gnaDevice; + std::string gnaDevice = + useHetero ? FLAGS_d.substr(FLAGS_d.find("GNA"), FLAGS_d.find(",") - FLAGS_d.find("GNA")) : FLAGS_d; + gnaPluginConfig[GNAConfigParams::KEY_GNA_DEVICE_MODE] = + gnaDevice.find("_") == std::string::npos ? "GNA_AUTO" : gnaDevice; } if (FLAGS_pc) { @@ -608,18 +627,22 @@ int main(int argc, char* argv[]) { if (FLAGS_q.compare("user") == 0) { if (!FLAGS_rg.empty()) { - slog::warn << "Custom scale factor will be ignored - using scale factor from provided imported gna model: " << FLAGS_rg << slog::endl; + slog::warn + << "Custom scale factor will be ignored - using scale factor from provided imported gna model: " + << FLAGS_rg << slog::endl; } else { auto scaleFactorInput = ParseScaleFactors(FLAGS_sf); if (numInputFiles != scaleFactorInput.size()) { - std::string errMessage("Incorrect command line for multiple inputs: " + std::to_string(scaleFactorInput.size()) + - " scale factors provided for " + std::to_string(numInputFiles) + " input files."); + std::string errMessage( + "Incorrect command line for multiple inputs: " + std::to_string(scaleFactorInput.size()) + + " scale factors provided for " + std::to_string(numInputFiles) + " input files."); throw std::logic_error(errMessage); } for (size_t i = 0; i < scaleFactorInput.size(); ++i) { slog::info << "For input " << i << " using scale factor of " << scaleFactorInput[i] << slog::endl; - std::string scaleFactorConfigKey = GNA_CONFIG_KEY(SCALE_FACTOR) + std::string("_") + std::to_string(i); + std::string scaleFactorConfigKey = + GNA_CONFIG_KEY(SCALE_FACTOR) + std::string("_") + std::to_string(i); gnaPluginConfig[scaleFactorConfigKey] = scaleFactorInput[i]; } } @@ -635,10 +658,19 @@ int main(int argc, char* argv[]) { uint32_t numArrays(0), numBytes(0), numFrames(0), numFrameElements(0), numBytesPerElement(0); file->GetFileInfo(inputFileName, 0, &numArrays, &numBytes); ptrFeatures.resize(numBytes); - file->LoadFile(inputFileName, 0, name, ptrFeatures, &numFrames, &numFrameElements, &numBytesPerElement); - auto floatScaleFactor = ScaleFactorForQuantization(ptrFeatures.data(), MAX_VAL_2B_FEAT, numFrames * numFrameElements); - slog::info << "Using scale factor of " << floatScaleFactor << " calculated from first utterance." << slog::endl; - std::string scaleFactorConfigKey = GNA_CONFIG_KEY(SCALE_FACTOR) + std::string("_") + std::to_string(i); + file->LoadFile(inputFileName, + 0, + name, + ptrFeatures, + &numFrames, + &numFrameElements, + &numBytesPerElement); + auto floatScaleFactor = + ScaleFactorForQuantization(ptrFeatures.data(), MAX_VAL_2B_FEAT, numFrames * numFrameElements); + slog::info << "Using scale factor of " << floatScaleFactor << " calculated from first utterance." + << slog::endl; + std::string scaleFactorConfigKey = + GNA_CONFIG_KEY(SCALE_FACTOR) + std::string("_") + std::to_string(i); gnaPluginConfig[scaleFactorConfigKey] = std::to_string(floatScaleFactor); } } @@ -652,7 +684,8 @@ int main(int argc, char* argv[]) { gnaPluginConfig[GNAConfigParams::KEY_GNA_EXEC_TARGET] = FLAGS_exec_target; gnaPluginConfig[GNAConfigParams::KEY_GNA_COMPILE_TARGET] = FLAGS_compile_target; - gnaPluginConfig[GNAConfigParams::KEY_GNA_LIB_N_THREADS] = std::to_string((FLAGS_cw_r > 0 || FLAGS_cw_l > 0) ? 1 : FLAGS_nthreads); + gnaPluginConfig[GNAConfigParams::KEY_GNA_LIB_N_THREADS] = + std::to_string((FLAGS_cw_r > 0 || FLAGS_cw_l > 0) ? 1 : FLAGS_nthreads); gnaPluginConfig[GNA_CONFIG_KEY(COMPACT_MODE)] = CONFIG_VALUE(NO); gnaPluginConfig[GNA_CONFIG_KEY(PWL_MAX_ERROR_PERCENT)] = std::to_string(FLAGS_pwl_me); // ----------------------------------------------------------------------------------------------------- @@ -678,7 +711,8 @@ int main(int argc, char* argv[]) { for (const auto& outBlobName : output_names) { int pos_layer = outBlobName.rfind(":"); if (pos_layer == -1) { - throw std::logic_error(std::string("Output ") + std::string(outBlobName) + std::string(" doesn't have a port")); + throw std::logic_error(std::string("Output ") + std::string(outBlobName) + + std::string(" doesn't have a port")); } outputs.push_back(outBlobName.substr(0, pos_layer)); try { @@ -728,8 +762,9 @@ int main(int argc, char* argv[]) { } // --------------------------------------------------------------------------------------------------------- - // --------------------------- Step 3. Configure input & output -------------------------------------------------- - // This step executed after creating infer request to check input/output layers mentioned via -iname and -oname args + // --------------------------- Step 3. Configure input & output + // -------------------------------------------------- This step executed after creating infer request to check + // input/output layers mentioned via -iname and -oname args // --------------------------- Prepare input blobs ----------------------------------------------------- /** Taking information about all topology inputs **/ ConstInputsDataMap cInputInfo = executableNet.GetInputsInfo(); @@ -741,8 +776,8 @@ int main(int argc, char* argv[]) { std::vector inputNameBlobs = ConvertStrToVector(FLAGS_iname); if (inputNameBlobs.size() != cInputInfo.size()) { std::string errMessage(std::string("Number of network inputs ( ") + std::to_string(cInputInfo.size()) + - " ) is not equal to the number of inputs entered in the -iname argument ( " + std::to_string(inputNameBlobs.size()) + - " )."); + " ) is not equal to the number of inputs entered in the -iname argument ( " + + std::to_string(inputNameBlobs.size()) + " )."); throw std::logic_error(errMessage); } for (const auto& input : inputNameBlobs) { @@ -842,9 +877,12 @@ int main(int argc, char* argv[]) { uint32_t numFrames(0), n(0); std::vector numFrameElementsInput; - uint32_t numFramesReference(0), numFrameElementsReference(0), numBytesPerElementReference(0), numBytesReferenceScoreThisUtterance(0); - auto dims = outputs.empty() ? cOutputInfo.rbegin()->second->getDims() : cOutputInfo[outputs[next_output]]->getDims(); - const auto numScoresPerFrame = std::accumulate(std::begin(dims), std::end(dims), size_t {1}, std::multiplies()); + uint32_t numFramesReference(0), numFrameElementsReference(0), numBytesPerElementReference(0), + numBytesReferenceScoreThisUtterance(0); + auto dims = outputs.empty() ? cOutputInfo.rbegin()->second->getDims() + : cOutputInfo[outputs[next_output]]->getDims(); + const auto numScoresPerFrame = + std::accumulate(std::begin(dims), std::end(dims), size_t{1}, std::multiplies()); slog::info << "Number scores per frame : " << numScoresPerFrame << slog::endl; @@ -856,13 +894,18 @@ int main(int argc, char* argv[]) { uint32_t currentNumFrames(0), currentNumFrameElementsInput(0), currentNumBytesPerElementInput(0); file->GetFileInfo(inputFilename, utteranceIndex, &n, &numBytesThisUtterance[i]); ptrUtterance.resize(numBytesThisUtterance[i]); - file->LoadFile(inputFilename, utteranceIndex, uttName, ptrUtterance, ¤tNumFrames, ¤tNumFrameElementsInput, + file->LoadFile(inputFilename, + utteranceIndex, + uttName, + ptrUtterance, + ¤tNumFrames, + ¤tNumFrameElementsInput, ¤tNumBytesPerElementInput); if (numFrames == 0) { numFrames = currentNumFrames; } else if (numFrames != currentNumFrames) { - std::string errMessage("Number of frames in input files is different: " + std::to_string(numFrames) + " and " + - std::to_string(currentNumFrames)); + std::string errMessage("Number of frames in input files is different: " + + std::to_string(numFrames) + " and " + std::to_string(currentNumFrames)); throw std::logic_error(errMessage); } @@ -873,7 +916,8 @@ int main(int argc, char* argv[]) { int i = 0; for (auto& ptrInputBlob : ptrInputBlobs) { if (ptrInputBlob->size() != numFrameElementsInput[i++] * batchSize) { - throw std::logic_error("network input size(" + std::to_string(ptrInputBlob->size()) + ") mismatch to input file size (" + + throw std::logic_error("network input size(" + std::to_string(ptrInputBlob->size()) + + ") mismatch to input file size (" + std::to_string(numFrameElementsInput[i - 1] * batchSize) + ")"); } } @@ -891,10 +935,18 @@ int main(int argc, char* argv[]) { throw std::logic_error("Invalid Reference Scores file"); } std::string refUtteranceName; - fileReferenceScores->GetFileInfo(reference_name_files[next_output].c_str(), utteranceIndex, &n, &numBytesReferenceScoreThisUtterance); + fileReferenceScores->GetFileInfo(reference_name_files[next_output].c_str(), + utteranceIndex, + &n, + &numBytesReferenceScoreThisUtterance); ptrReferenceScores.resize(numBytesReferenceScoreThisUtterance); - fileReferenceScores->LoadFile(reference_name_files[next_output].c_str(), utteranceIndex, refUtteranceName, ptrReferenceScores, - &numFramesReference, &numFrameElementsReference, &numBytesPerElementReference); + fileReferenceScores->LoadFile(reference_name_files[next_output].c_str(), + utteranceIndex, + refUtteranceName, + ptrReferenceScores, + &numFramesReference, + &numFrameElementsReference, + &numBytesPerElementReference); } double totalTime = 0.0; @@ -914,7 +966,7 @@ int main(int argc, char* argv[]) { size_t frameIndex = 0; uint32_t numFramesFile = numFrames; numFrames += FLAGS_cw_l + FLAGS_cw_r; - uint32_t numFramesThisBatch {batchSize}; + uint32_t numFramesThisBatch{batchSize}; auto t0 = Time::now(); auto t1 = t0; @@ -934,11 +986,13 @@ int main(int argc, char* argv[]) { if (frameIndex == numFrames) { numFramesThisBatch = 1; } else { - numFramesThisBatch = (numFrames - frameIndex < batchSize) ? (numFrames - frameIndex) : batchSize; + numFramesThisBatch = + (numFrames - frameIndex < batchSize) ? (numFrames - frameIndex) : batchSize; } /* waits until inference result becomes available */ if (inferRequest.frameIndex != -1) { - StatusCode code = inferRequest.inferRequest.Wait(InferenceEngine::InferRequest::WaitMode::RESULT_READY); + StatusCode code = + inferRequest.inferRequest.Wait(InferenceEngine::InferRequest::WaitMode::RESULT_READY); if (code != StatusCode::OK) { if (!useHetero) @@ -946,23 +1000,27 @@ int main(int argc, char* argv[]) { if (code != StatusCode::INFER_NOT_STARTED) continue; } - // --------------------------- Step 8. Process output part 1 ------------------------------------------------------- + // --------------------------- Step 8. Process output part 1 + // ------------------------------------------------------- ConstOutputsDataMap newOutputInfo; if (inferRequest.frameIndex >= 0) { if (!FLAGS_o.empty()) { /* Prepare output data for save to file in future */ - outputFrame = &ptrScores.front() + numScoresPerFrame * sizeof(float) * (inferRequest.frameIndex); + outputFrame = &ptrScores.front() + + numScoresPerFrame * sizeof(float) * (inferRequest.frameIndex); if (!outputs.empty()) { newOutputInfo[outputs[next_output]] = cOutputInfo[outputs[next_output]]; } else { newOutputInfo = cOutputInfo; } - Blob::Ptr outputBlob = inferRequest.inferRequest.GetBlob(newOutputInfo.rbegin()->first); + Blob::Ptr outputBlob = + inferRequest.inferRequest.GetBlob(newOutputInfo.rbegin()->first); MemoryBlob::CPtr moutput = as(outputBlob); if (!moutput) { - throw std::logic_error("We expect output to be inherited from MemoryBlob, " - "but in fact we were not able to cast output to MemoryBlob"); + throw std::logic_error( + "We expect output to be inherited from MemoryBlob, " + "but in fact we were not able to cast output to MemoryBlob"); } // locked memory holder should be alive all time while access to its buffer happens auto moutputHolder = moutput->rmap(); @@ -976,17 +1034,23 @@ int main(int argc, char* argv[]) { } else { newOutputInfo = cOutputInfo; } - Blob::Ptr outputBlob = inferRequest.inferRequest.GetBlob(newOutputInfo.rbegin()->first); + Blob::Ptr outputBlob = + inferRequest.inferRequest.GetBlob(newOutputInfo.rbegin()->first); MemoryBlob::CPtr moutput = as(outputBlob); if (!moutput) { - throw std::logic_error("We expect output to be inherited from MemoryBlob, " - "but in fact we were not able to cast output to MemoryBlob"); + throw std::logic_error( + "We expect output to be inherited from MemoryBlob, " + "but in fact we were not able to cast output to MemoryBlob"); } // locked memory holder should be alive all time while access to its buffer happens auto moutputHolder = moutput->rmap(); - CompareScores(moutputHolder.as(), - &ptrReferenceScores[inferRequest.frameIndex * numFrameElementsReference * numBytesPerElementReference], - &frameError, inferRequest.numFramesThisBatch, numFrameElementsReference); + CompareScores( + moutputHolder.as(), + &ptrReferenceScores[inferRequest.frameIndex * numFrameElementsReference * + numBytesPerElementReference], + &frameError, + inferRequest.numFramesThisBatch, + numFrameElementsReference); UpdateScoreError(&frameError, &totalError); } if (FLAGS_pc) { @@ -1004,7 +1068,8 @@ int main(int argc, char* argv[]) { continue; } - // --------------------------- Step 6. Prepare input -------------------------------------------------------- + // --------------------------- Step 6. Prepare input + // -------------------------------------------------------- ptrInputBlobs.clear(); if (FLAGS_iname.empty()) { for (auto& input : cInputInfo) { @@ -1026,7 +1091,8 @@ int main(int argc, char* argv[]) { for (size_t i = 0; i < numInputFiles; ++i) { MemoryBlob::Ptr minput = as(ptrInputBlobs[i]); if (!minput) { - std::string errMessage("We expect ptrInputBlobs[" + std::to_string(i) + "] to be inherited from MemoryBlob, " + + std::string errMessage("We expect ptrInputBlobs[" + std::to_string(i) + + "] to be inherited from MemoryBlob, " + "but in fact we were not able to cast input blob to MemoryBlob"); throw std::logic_error(errMessage); } @@ -1050,8 +1116,9 @@ int main(int argc, char* argv[]) { if (idx > 0 && idx < static_cast(numFramesFile)) { inputFrame[j] += sizeof(float) * numFrameElementsInput[j] * numFramesThisBatch; } else if (idx >= static_cast(numFramesFile)) { - inputFrame[j] = - &ptrUtterances[j].front() + (numFramesFile - 1) * sizeof(float) * numFrameElementsInput[j] * numFramesThisBatch; + inputFrame[j] = &ptrUtterances[j].front() + (numFramesFile - 1) * sizeof(float) * + numFrameElementsInput[j] * + numFramesThisBatch; } else if (idx <= 0) { inputFrame[j] = &ptrUtterances[j].front(); } @@ -1079,7 +1146,8 @@ int main(int argc, char* argv[]) { } // ----------------------------------------------------------------------------------------------------- - // --------------------------- Step 8. Process output part 2 ------------------------------------------------------- + // --------------------------- Step 8. Process output part 2 + // ------------------------------------------------------- if (!FLAGS_o.empty()) { auto exOutputScoresFile = fileExt(FLAGS_o); @@ -1092,16 +1160,26 @@ int main(int argc, char* argv[]) { } /* Save output data to file */ bool shouldAppend = (utteranceIndex == 0) ? false : true; - fileOutput->SaveFile(output_name_files[next_output].c_str(), shouldAppend, uttName, &ptrScores.front(), numFramesFile, numScoresPerFrame); + fileOutput->SaveFile(output_name_files[next_output].c_str(), + shouldAppend, + uttName, + &ptrScores.front(), + numFramesFile, + numScoresPerFrame); } /** Show performance results **/ std::cout << "Total time in Infer (HW and SW):\t" << totalTime << " ms" << std::endl; std::cout << "Frames in utterance:\t\t\t" << numFrames << " frames" << std::endl; - std::cout << "Average Infer time per frame:\t\t" << totalTime / static_cast(numFrames) << " ms" << std::endl; + std::cout << "Average Infer time per frame:\t\t" << totalTime / static_cast(numFrames) << " ms" + << std::endl; if (FLAGS_pc) { // print performance results - printPerformanceCounters(utterancePerfMap, frameIndex, std::cout, getFullDeviceName(ie, FLAGS_d), totalNumberOfRunsOnHw); + printPerformanceCounters(utterancePerfMap, + frameIndex, + std::cout, + getFullDeviceName(ie, FLAGS_d), + totalNumberOfRunsOnHw); } if (!FLAGS_r.empty()) { // print statistical score error diff --git a/inference-engine/samples/speech_sample/speech_sample.hpp b/inference-engine/samples/speech_sample/speech_sample.hpp index 66d3b24a4c5..8a15b5669f8 100644 --- a/inference-engine/samples/speech_sample/speech_sample.hpp +++ b/inference-engine/samples/speech_sample/speech_sample.hpp @@ -14,24 +14,27 @@ static const char help_message[] = "Print a usage message."; /// @brief message for images argument -static const char input_message[] = "Required. Paths to input files. Example of usage: or or ."; +static const char input_message[] = + "Required. Paths to input files. Example of usage: or or ."; /// @brief message for model argument static const char model_message[] = "Required. Path to an .xml file with a trained model (required if -rg is missing)."; /// @brief message for assigning cnn calculation to device -static const char target_device_message[] = "Optional. Specify a target device to infer on. CPU, GPU, MYRIAD, GNA_AUTO, GNA_HW, " - "GNA_HW_WITH_SW_FBACK, GNA_SW_FP32, " - "GNA_SW_EXACT and HETERO with combination of GNA as the primary device and CPU" - " as a secondary (e.g. HETERO:GNA,CPU) are supported. " - "The sample will look for a suitable plugin for device specified."; +static const char target_device_message[] = + "Optional. Specify a target device to infer on. CPU, GPU, MYRIAD, GNA_AUTO, GNA_HW, " + "GNA_HW_WITH_SW_FBACK, GNA_SW_FP32, " + "GNA_SW_EXACT and HETERO with combination of GNA as the primary device and CPU" + " as a secondary (e.g. HETERO:GNA,CPU) are supported. " + "The sample will look for a suitable plugin for device specified."; /// @brief message for execution target -static const char execution_target_message[] = "Optional. Specify GNA execution target generation. " - "May be one of GNA_TARGET_2_0, GNA_TARGET_3_0. " - "By default, generation corresponds to the GNA HW available in the system " - "or the latest fully supported generation by the software. " - "See the GNA Plugin's GNA_EXEC_TARGET config option description."; +static const char execution_target_message[] = + "Optional. Specify GNA execution target generation. " + "May be one of GNA_TARGET_2_0, GNA_TARGET_3_0. " + "By default, generation corresponds to the GNA HW available in the system " + "or the latest fully supported generation by the software. " + "See the GNA Plugin's GNA_EXEC_TARGET config option description."; /// @brief message for execution target static const char compile_target_message[] = "Optional. Specify GNA compile target generation. " @@ -48,34 +51,41 @@ static const char custom_cpu_library_message[] = "Required for CPU plugin custom "Absolute path to a shared library with the kernels implementations."; /// @brief message for score output argument -static const char output_message[] = "Optional. Output file name to save scores. Example of usage: or "; +static const char output_message[] = + "Optional. Output file name to save scores. Example of usage: or "; /// @brief message for reference score file argument -static const char reference_score_message[] = "Optional. Read reference score file and compare scores. Example of usage: or "; +static const char reference_score_message[] = + "Optional. Read reference score file and compare scores. Example of usage: or "; /// @brief message for read GNA model argument -static const char read_gna_model_message[] = "Read GNA model from file using path/filename provided (required if -m is missing)."; +static const char read_gna_model_message[] = + "Read GNA model from file using path/filename provided (required if -m is missing)."; /// @brief message for write GNA model argument static const char write_gna_model_message[] = "Optional. Write GNA model to file using path/filename provided."; /// @brief message for write GNA embedded model argument -static const char write_embedded_model_message[] = "Optional. Write GNA embedded model to file using path/filename provided."; +static const char write_embedded_model_message[] = + "Optional. Write GNA embedded model to file using path/filename provided."; /// @brief message for write GNA embedded model generation argument -static const char write_embedded_model_generation_message[] = "Optional. GNA generation configuration string for embedded export." - "Can be GNA1 (default) or GNA3."; +static const char write_embedded_model_generation_message[] = + "Optional. GNA generation configuration string for embedded export." + "Can be GNA1 (default) or GNA3."; /// @brief message for quantization argument -static const char quantization_message[] = "Optional. Input quantization mode: static (default), dynamic, or user (use with -sf)."; +static const char quantization_message[] = + "Optional. Input quantization mode: static (default), dynamic, or user (use with -sf)."; /// @brief message for quantization bits argument static const char quantization_bits_message[] = "Optional. Weight bits for quantization: 8 or 16 (default)"; /// @brief message for scale factor argument -static const char scale_factor_message[] = "Optional. User-specified input scale factor for quantization (use with -q user). " - "If the network contains multiple inputs, provide scale factors by separating them with " - "commas."; +static const char scale_factor_message[] = + "Optional. User-specified input scale factor for quantization (use with -q user). " + "If the network contains multiple inputs, provide scale factors by separating them with " + "commas."; /// @brief message for batch size argument static const char batch_size_message[] = "Optional. Batch size 1-8 (default 1)"; @@ -85,14 +95,16 @@ static const char infer_num_threads_message[] = "Optional. Number of threads to " inference requests on the GNA."; /// @brief message for left context window argument -static const char context_window_message_l[] = "Optional. Number of frames for left context windows (default is 0). " - "Works only with context window networks." - " If you use the cw_l or cw_r flag, then batch size and nthreads arguments are ignored."; +static const char context_window_message_l[] = + "Optional. Number of frames for left context windows (default is 0). " + "Works only with context window networks." + " If you use the cw_l or cw_r flag, then batch size and nthreads arguments are ignored."; /// @brief message for right context window argument -static const char context_window_message_r[] = "Optional. Number of frames for right context windows (default is 0). " - "Works only with context window networks." - " If you use the cw_r or cw_l flag, then batch size and nthreads arguments are ignored."; +static const char context_window_message_r[] = + "Optional. Number of frames for right context windows (default is 0). " + "Works only with context window networks." + " If you use the cw_r or cw_l flag, then batch size and nthreads arguments are ignored."; /// @brief message for output layer names static const char output_layer_names_message[] = "Optional. Layer names for output blobs. " diff --git a/inference-engine/samples/style_transfer_sample/main.cpp b/inference-engine/samples/style_transfer_sample/main.cpp index 2ffa42fce4a..9224232ed4b 100644 --- a/inference-engine/samples/style_transfer_sample/main.cpp +++ b/inference-engine/samples/style_transfer_sample/main.cpp @@ -89,7 +89,8 @@ int main(int argc, char* argv[]) { // Config for device plugin custom extension is loaded from an .xml // description ie.SetConfig({{PluginConfigParams::KEY_CONFIG_FILE, FLAGS_c}}, "GPU"); - slog::info << "Config for " << FLAGS_d << " device plugin custom extension loaded: " << FLAGS_c << slog::endl; + slog::info << "Config for " << FLAGS_d << " device plugin custom extension loaded: " << FLAGS_c + << slog::endl; } // ----------------------------------------------------------------------------------------------------- @@ -130,8 +131,8 @@ int main(int argc, char* argv[]) { continue; } /** Store image data **/ - std::shared_ptr data( - reader->getData(inputInfoItem.second->getTensorDesc().getDims()[3], inputInfoItem.second->getTensorDesc().getDims()[2])); + std::shared_ptr data(reader->getData(inputInfoItem.second->getTensorDesc().getDims()[3], + inputInfoItem.second->getTensorDesc().getDims()[2])); if (data.get() != nullptr) { imagesData.push_back(data); } @@ -151,7 +152,8 @@ int main(int argc, char* argv[]) { // BlobMap outputBlobs; std::string firstOutputName; - const float meanValues[] = {static_cast(FLAGS_mean_val_r), static_cast(FLAGS_mean_val_g), + const float meanValues[] = {static_cast(FLAGS_mean_val_r), + static_cast(FLAGS_mean_val_g), static_cast(FLAGS_mean_val_b)}; for (auto& item : outputInfo) { @@ -241,16 +243,20 @@ int main(int argc, char* argv[]) { size_t W = moutput->getTensorDesc().getDims()[3]; size_t nPixels = W * H; - slog::info << "Output size [N,C,H,W]: " << num_images << ", " << num_channels << ", " << H << ", " << W << slog::endl; + slog::info << "Output size [N,C,H,W]: " << num_images << ", " << num_channels << ", " << H << ", " << W + << slog::endl; { std::vector data_img(nPixels * num_channels); for (size_t n = 0; n < num_images; n++) { for (size_t i = 0; i < nPixels; i++) { - data_img[i * num_channels] = static_cast(output_data[i + n * nPixels * num_channels] + meanValues[0]); - data_img[i * num_channels + 1] = static_cast(output_data[(i + nPixels) + n * nPixels * num_channels] + meanValues[1]); - data_img[i * num_channels + 2] = static_cast(output_data[(i + 2 * nPixels) + n * nPixels * num_channels] + meanValues[2]); + data_img[i * num_channels] = + static_cast(output_data[i + n * nPixels * num_channels] + meanValues[0]); + data_img[i * num_channels + 1] = + static_cast(output_data[(i + nPixels) + n * nPixels * num_channels] + meanValues[1]); + data_img[i * num_channels + 2] = + static_cast(output_data[(i + 2 * nPixels) + n * nPixels * num_channels] + meanValues[2]); float temp = data_img[i * num_channels]; data_img[i * num_channels] = data_img[i * num_channels + 2]; diff --git a/inference-engine/samples/style_transfer_sample/style_transfer_sample.h b/inference-engine/samples/style_transfer_sample/style_transfer_sample.h index c787e0470b5..4fc8dd9069c 100644 --- a/inference-engine/samples/style_transfer_sample/style_transfer_sample.h +++ b/inference-engine/samples/style_transfer_sample/style_transfer_sample.h @@ -20,11 +20,12 @@ static const char image_message[] = "Path to a folder with images or paths to im static const char model_message[] = "Required. Path to an .xml file with a trained model."; /// @brief message for assigning cnn calculation to device -static const char target_device_message[] = "Optional. Specify the target device to infer on (the list of available devices is shown " - "below). " - "Default value is CPU. Use \"-d HETERO:\" format to specify " - "HETERO plugin. " - "Sample will look for a suitable plugin for device specified"; +static const char target_device_message[] = + "Optional. Specify the target device to infer on (the list of available devices is shown " + "below). " + "Default value is CPU. Use \"-d HETERO:\" format to specify " + "HETERO plugin. " + "Sample will look for a suitable plugin for device specified"; /// @brief message for plugin custom kernels desc static const char custom_plugin_cfg_message[] = "Required for GPU, MYRIAD, HDDL custom kernels. " @@ -35,7 +36,8 @@ static const char custom_ex_library_message[] = "Required for CPU plugin custom "Absolute path to a shared library with the kernels implementations."; /// @brief message for mean values arguments -static const char preprocess_data_message[] = "Mean values. Required if the model needs mean values for preprocessing and postprocessing."; +static const char preprocess_data_message[] = + "Mean values. Required if the model needs mean values for preprocessing and postprocessing."; /// @brief Define flag for showing help message
DEFINE_bool(h, false, help_message); From 22f29f092ce9e83f931508586ebcb82ad9937a6d Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Wed, 11 Aug 2021 15:32:05 +0300 Subject: [PATCH 11/19] Remove nolint comments (#7020) * Remove nolint comments * Fixed comment --- .../src/inference_engine/CMakeLists.txt | 4 +- .../include/ie/details/ie_pre_allocator.hpp | 4 +- .../src/inference_engine/include/ie/ie_api.h | 2 +- .../inference_engine/include/ie/ie_common.h | 2 +- .../include/ie/ie_parallel.hpp | 2 +- .../include/ie/ie_parameter.hpp | 4 +- .../include/ie/ie_precision.hpp | 2 +- .../src/inference_engine/src/file_utils.cpp | 108 +++++++++--------- 8 files changed, 65 insertions(+), 63 deletions(-) diff --git a/inference-engine/src/inference_engine/CMakeLists.txt b/inference-engine/src/inference_engine/CMakeLists.txt index 609b8a781a3..e79a5709366 100644 --- a/inference-engine/src/inference_engine/CMakeLists.txt +++ b/inference-engine/src/inference_engine/CMakeLists.txt @@ -142,8 +142,6 @@ if (TBBBIND_2_4_FOUND) target_link_libraries(${TARGET_NAME}_obj PRIVATE ${TBBBIND_2_4_IMPORTED_TARGETS}) endif() -add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME}_obj) - # Create shared library file from object library add_library(${TARGET_NAME} SHARED @@ -152,6 +150,8 @@ add_library(${TARGET_NAME} SHARED ${vs_version_file} $) +add_clang_format_target(${TARGET_NAME}_clang FOR_SOURCES ${IE_STATIC_DEPENDENT_FILES} ${LIBRARY_SRC} ${LIBRARY_HEADERS} ${PUBLIC_HEADERS}) + ov_ncc_naming_style(FOR_TARGET ${TARGET_NAME} INCLUDE_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/include/openvino" ADDITIONAL_INCLUDE_DIRECTORIES $) diff --git a/inference-engine/src/inference_engine/include/ie/details/ie_pre_allocator.hpp b/inference-engine/src/inference_engine/include/ie/details/ie_pre_allocator.hpp index 949a31b646d..12428f4078a 100644 --- a/inference-engine/src/inference_engine/include/ie/details/ie_pre_allocator.hpp +++ b/inference-engine/src/inference_engine/include/ie/details/ie_pre_allocator.hpp @@ -37,7 +37,7 @@ public: /** * @brief The PreAllocator class does not utilize this function */ - void unlock(void*) noexcept override {} // NOLINT + void unlock(void*) noexcept override {} /** * @brief Returns a pointer to preallocated memory @@ -55,7 +55,7 @@ public: * @brief The PreAllocator class cannot release the handle * @return false */ - bool free(void*) noexcept override { // NOLINT + bool free(void*) noexcept override { return false; } }; diff --git a/inference-engine/src/inference_engine/include/ie/ie_api.h b/inference-engine/src/inference_engine/include/ie/ie_api.h index 538795b32be..eea1cc8dfe0 100644 --- a/inference-engine/src/inference_engine/include/ie/ie_api.h +++ b/inference-engine/src/inference_engine/include/ie/ie_api.h @@ -119,7 +119,7 @@ IE_DO_PRAGMA(warning(disable : 1786)) # else # define INFERENCE_PLUGIN_API(type) extern "C" type # endif -#elif (__GNUC__ >= 4) // NOLINT +#elif (__GNUC__ >= 4) # ifdef IMPLEMENT_INFERENCE_ENGINE_PLUGIN # define INFERENCE_PLUGIN_API(type) extern "C" __attribute__((visibility("default"))) type # else diff --git a/inference-engine/src/inference_engine/include/ie/ie_common.h b/inference-engine/src/inference_engine/include/ie/ie_common.h index 0a4a7857437..cb6ead29a92 100644 --- a/inference-engine/src/inference_engine/include/ie/ie_common.h +++ b/inference-engine/src/inference_engine/include/ie/ie_common.h @@ -439,7 +439,7 @@ struct ThrowNow final { #ifdef NDEBUG # define IE_ASSERT(EXPRESSION) \ if (!(EXPRESSION)) \ - IE_THROW(GeneralError) << " AssertionFailed: " << #EXPRESSION // NOLINT + IE_THROW(GeneralError) << " AssertionFailed: " << #EXPRESSION #else /** * @private diff --git a/inference-engine/src/inference_engine/include/ie/ie_parallel.hpp b/inference-engine/src/inference_engine/include/ie/ie_parallel.hpp index 2411f077663..95494d46951 100644 --- a/inference-engine/src/inference_engine/include/ie/ie_parallel.hpp +++ b/inference-engine/src/inference_engine/include/ie/ie_parallel.hpp @@ -110,7 +110,7 @@ inline int parallel_get_env_threads() { } #elif IE_THREAD == IE_THREAD_SEQ -# include // NOLINT +# include inline int parallel_get_env_threads() { return 1; } diff --git a/inference-engine/src/inference_engine/include/ie/ie_parameter.hpp b/inference-engine/src/inference_engine/include/ie/ie_parameter.hpp index 9aedae54cef..f68637b5e0d 100644 --- a/inference-engine/src/inference_engine/include/ie/ie_parameter.hpp +++ b/inference-engine/src/inference_engine/include/ie/ie_parameter.hpp @@ -62,7 +62,7 @@ public: template ::type, Parameter>::value && !std::is_abstract::type>::value>::type> - Parameter(T&& parameter) { // NOLINT + Parameter(T&& parameter) { static_assert(!std::is_same::type, Parameter>::value, "To prevent recursion"); ptr = new RealData::type>(std::forward(parameter)); } @@ -72,7 +72,7 @@ public: * * @param str char array */ - Parameter(const char* str) : Parameter(std::string(str)) {} // NOLINT + Parameter(const char* str) : Parameter(std::string(str)) {} /** * @brief Destructor diff --git a/inference-engine/src/inference_engine/include/ie/ie_precision.hpp b/inference-engine/src/inference_engine/include/ie/ie_precision.hpp index 29c84b88eef..9cd256355dc 100644 --- a/inference-engine/src/inference_engine/include/ie/ie_precision.hpp +++ b/inference-engine/src/inference_engine/include/ie/ie_precision.hpp @@ -67,7 +67,7 @@ public: * @brief Constructor with specified precision * @param value A value of ePrecision to create an object from */ - Precision(const Precision::ePrecision value) { // NOLINT + Precision(const Precision::ePrecision value) { precisionInfo = getPrecisionInfo(value); } diff --git a/inference-engine/src/inference_engine/src/file_utils.cpp b/inference-engine/src/inference_engine/src/file_utils.cpp index e31e1fe29c8..3a72787063d 100644 --- a/inference-engine/src/inference_engine/src/file_utils.cpp +++ b/inference-engine/src/inference_engine/src/file_utils.cpp @@ -7,90 +7,91 @@ #include #ifdef __MACH__ -#include -#include +# include +# include #endif #include #include #include + #include "ie_common.h" #ifndef _WIN32 -# include -# include -# include -# ifdef ENABLE_UNICODE_PATH_SUPPORT -# include -# include -# endif +# include +# include +# include +# ifdef ENABLE_UNICODE_PATH_SUPPORT +# include +# include +# endif #else -# if defined(WINAPI_FAMILY) && !WINAPI_PARTITION_DESKTOP -# error "Only WINAPI_PARTITION_DESKTOP is supported, because of GetModuleHandleEx[A|W]" -# endif -# ifndef NOMINMAX -# define NOMINMAX -# endif -# include +# if defined(WINAPI_FAMILY) && !WINAPI_PARTITION_DESKTOP +# error "Only WINAPI_PARTITION_DESKTOP is supported, because of GetModuleHandleEx[A|W]" +# endif +# ifndef NOMINMAX +# define NOMINMAX +# endif +# include #endif #ifdef _WIN32 -#include +# include // Copied from linux libc sys/stat.h: -# define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR) +# define S_ISDIR(m) (((m)&S_IFMT) == S_IFDIR) /// @brief Windows-specific 'mkdir' wrapper -#define makedir(dir) _mkdir(dir) +# define makedir(dir) _mkdir(dir) /// @brief Max length of absolute file path -#define MAX_ABS_PATH _MAX_PATH +# define MAX_ABS_PATH _MAX_PATH /// @brief Get absolute file path, returns NULL in case of error -#define get_absolute_path(result, path) _fullpath(result, path.c_str(), MAX_ABS_PATH) +# define get_absolute_path(result, path) _fullpath(result, path.c_str(), MAX_ABS_PATH) /// @brief Windows-specific 'stat' wrapper -#define stat _stat +# define stat _stat #else -#include +# include /// @brief mkdir wrapper -#define makedir(dir) mkdir(dir, 0755) +# define makedir(dir) mkdir(dir, 0755) /// @brief Max length of absolute file path -#define MAX_ABS_PATH PATH_MAX +# define MAX_ABS_PATH PATH_MAX /// @brief Get absolute file path, returns NULL in case of error -#define get_absolute_path(result, path) realpath(path.c_str(), result) +# define get_absolute_path(result, path) realpath(path.c_str(), result) #endif #ifdef ENABLE_UNICODE_PATH_SUPPORT std::string FileUtils::wStringtoMBCSstringChar(const std::wstring& wstr) { -#ifdef _WIN32 - int size_needed = WideCharToMultiByte(CP_UTF8, 0, &wstr[0], (int)wstr.size(), NULL, 0, NULL, NULL); // NOLINT +# ifdef _WIN32 + int size_needed = WideCharToMultiByte(CP_UTF8, 0, &wstr[0], (int)wstr.size(), NULL, 0, NULL, NULL); std::string strTo(size_needed, 0); - WideCharToMultiByte(CP_UTF8, 0, &wstr[0], (int)wstr.size(), &strTo[0], size_needed, NULL, NULL); // NOLINT + WideCharToMultiByte(CP_UTF8, 0, &wstr[0], (int)wstr.size(), &strTo[0], size_needed, NULL, NULL); return strTo; -#else +# else std::wstring_convert> wstring_decoder; return wstring_decoder.to_bytes(wstr); -#endif +# endif } std::wstring FileUtils::multiByteCharToWString(const char* str) { -#ifdef _WIN32 +# ifdef _WIN32 int strSize = static_cast(std::strlen(str)); int size_needed = MultiByteToWideChar(CP_UTF8, 0, str, strSize, NULL, 0); std::wstring wstrTo(size_needed, 0); MultiByteToWideChar(CP_UTF8, 0, str, strSize, &wstrTo[0], size_needed); return wstrTo; -#else +# else std::wstring_convert> wstring_encoder; std::wstring result = wstring_encoder.from_bytes(str); return result; -#endif +# endif } #endif // ENABLE_UNICODE_PATH_SUPPORT @@ -123,7 +124,7 @@ std::string FileUtils::absoluteFilePath(const std::string& filePath) { return absolutePath; } -bool FileUtils::directoryExists(const std::string &path) { +bool FileUtils::directoryExists(const std::string& path) { struct stat sb; if (stat(path.c_str(), &sb) == 0 && S_ISDIR(sb.st_mode)) { @@ -145,8 +146,7 @@ void FileUtils::createDirectoryRecursive(const std::string& dirPath) { int err = makedir(dirPath.c_str()); if (err != 0 && errno != EEXIST) { // TODO: in case of exception it may be needed to remove all created sub-directories - IE_THROW() << "Couldn't create directory [" - << dirPath << "], err=" << strerror(errno) << ")"; + IE_THROW() << "Couldn't create directory [" << dirPath << "], err=" << strerror(errno) << ")"; } } @@ -154,7 +154,7 @@ namespace InferenceEngine { namespace { -template > +template > std::basic_string getPathName(const std::basic_string& s) { size_t i = s.rfind(FileUtils::FileTraits::FileSeparator, s.length()); if (i != std::string::npos) { @@ -171,50 +171,52 @@ static std::string getIELibraryPathA() { CHAR ie_library_path[MAX_PATH]; HMODULE hm = NULL; if (!GetModuleHandleExA(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT, - reinterpret_cast(getIELibraryPath), &hm)) { + reinterpret_cast(getIELibraryPath), + &hm)) { IE_THROW() << "GetModuleHandle returned " << GetLastError(); } GetModuleFileNameA(hm, (LPSTR)ie_library_path, sizeof(ie_library_path)); return getPathName(std::string(ie_library_path)); #elif defined(__APPLE__) || defined(__linux__) -# ifdef USE_STATIC_IE -# ifdef __APPLE__ +# ifdef USE_STATIC_IE +# ifdef __APPLE__ Dl_info info; dladdr(reinterpret_cast(getIELibraryPath), &info); std::string path = getPathName(std::string(info.dli_fname)).c_str(); -# else +# else char result[PATH_MAX]; ssize_t count = readlink("/proc/self/exe", result, PATH_MAX); std::string path = getPathName(std::string(result, (count > 0) ? count : 0)); -# endif // __APPLE__ - return FileUtils::makePath(path, std::string( "lib")); -# else +# endif // __APPLE__ + return FileUtils::makePath(path, std::string("lib")); +# else Dl_info info; dladdr(reinterpret_cast(getIELibraryPath), &info); return getPathName(std::string(info.dli_fname)).c_str(); -# endif // USE_STATIC_IE +# endif // USE_STATIC_IE #else -# error "Unsupported OS" +# error "Unsupported OS" #endif // _WIN32 } #ifdef ENABLE_UNICODE_PATH_SUPPORT std::wstring getIELibraryPathW() { -#ifdef _WIN32 +# ifdef _WIN32 WCHAR ie_library_path[MAX_PATH]; HMODULE hm = NULL; if (!GetModuleHandleExW(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT, - reinterpret_cast(getIELibraryPath), &hm)) { + reinterpret_cast(getIELibraryPath), + &hm)) { IE_THROW() << "GetModuleHandle returned " << GetLastError(); } GetModuleFileNameW(hm, (LPWSTR)ie_library_path, sizeof(ie_library_path) / sizeof(ie_library_path[0])); return getPathName(std::wstring(ie_library_path)); -#elif defined(__linux__) || defined(__APPLE__) +# elif defined(__linux__) || defined(__APPLE__) return ::FileUtils::multiByteCharToWString(getIELibraryPathA().c_str()); -#else -# error "Unsupported OS" -#endif +# else +# error "Unsupported OS" +# endif } #endif // ENABLE_UNICODE_PATH_SUPPORT From 99f9a90ca55dd6d5bd21a118df02988c49b70d6d Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Wed, 11 Aug 2021 16:15:53 +0300 Subject: [PATCH 12/19] Fix code style artifacts (#6997) * Fixed artifacts * Break code style * Revert "Break code style" This reverts commit 71ee638cd0244f8f4e9d26f81b5ca206be8c010a. * Added -j8 for fix_all --- .github/workflows/code_style.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/code_style.yml b/.github/workflows/code_style.yml index 29ec8446c48..8f30c986361 100644 --- a/.github/workflows/code_style.yml +++ b/.github/workflows/code_style.yml @@ -34,7 +34,7 @@ jobs: - name: Create code style diff if: failure() run: | - cmake --build . --target clang_format_fix_all + cmake --build . --target clang_format_fix_all -j8 git diff > code_style_diff.diff working-directory: build @@ -42,7 +42,7 @@ jobs: if: failure() with: name: code_style_diff - path: code_style_diff.diff + path: build/code_style_diff.diff ShellCheck: runs-on: ubuntu-18.04 From 3186d4ed6ef0aa1c17f8244866b1ab4d3d366e50 Mon Sep 17 00:00:00 2001 From: Mateusz Tabaka Date: Wed, 11 Aug 2021 18:01:27 +0200 Subject: [PATCH 13/19] Merge onnx_importer to onnx_ngraph_frontend (#6825) * Merge onnx_importer to onnx_ngraph_frontend * address review comments * use onnx_ngraph_frontend in docs/template_extension * link onnx_ngraph_frontend instead of onnx_importer * switch onnxruntime branch to rel-1.8.1_ov_2022.1 in https://github.com/intel/onnxruntime * Remove pip install cmake since the 1.8.1 release doesnt require it * add alias * set IMPORTED_GLOBAL Co-authored-by: Ilya Lavrenov --- .ci/azure/linux.yml | 1 - .ci/azure/linux_onnxruntime.yml | 1 - .ci/openvino-onnx/Dockerfile | 1 - cmake/coverage.cmake | 15 +--- cmake/features.cmake | 7 +- cmake/templates/ngraphConfig.cmake.in | 27 ++++-- cmake/test_model_zoo.cmake | 4 +- docs/CMakeLists.txt | 2 +- .../IE_DG/Extensibility_DG/Custom_ONNX_Ops.md | 7 +- docs/onnx_custom_op/CMakeLists.txt | 4 +- docs/snippets/CMakeLists.txt | 4 +- docs/template_extension/CMakeLists.txt | 8 +- docs/template_extension/extension.cpp | 8 +- inference-engine/src/CMakeLists.txt | 2 +- inference-engine/src/readers/CMakeLists.txt | 2 +- .../src/readers/onnx_reader/CMakeLists.txt | 2 +- .../inference_engine/CMakeLists.txt | 6 +- .../ir_serialization/custom_ops.cpp | 2 +- .../ir_serialization/deterministicity.cpp | 2 +- .../ir_serialization/serialize.cpp | 2 +- .../functional/plugin/cpu/CMakeLists.txt | 2 +- inference-engine/tests/unit/CMakeLists.txt | 2 +- .../unit/frontends/onnx_import/CMakeLists.txt | 2 +- ngraph/frontend/CMakeLists.txt | 2 +- ngraph/frontend/onnx/CMakeLists.txt | 5 +- ngraph/frontend/onnx/frontend/CMakeLists.txt | 36 ++++++-- .../include/onnx_import/core/node.hpp | 2 +- .../include/onnx_import}/core/null_node.hpp | 4 +- .../include/onnx_import/core/operator_set.hpp | 0 .../include/onnx_import/onnx.hpp | 8 +- .../onnx_import}/onnx_importer_visibility.hpp | 2 +- .../include/onnx_import/onnx_utils.hpp | 2 +- .../src/core/attribute.cpp | 2 +- .../src/core/attribute.hpp | 0 .../src/core/graph.cpp | 2 +- .../src/core/graph.hpp | 2 +- .../src/core/graph_cache.cpp | 0 .../src/core/graph_cache.hpp | 0 .../src/core/model.cpp | 2 +- .../src}/core/model.hpp | 0 .../src/core/node.cpp | 2 +- .../src/core/null_node.cpp | 2 +- .../src/core/sparse_tensor.hpp | 0 .../src/core/tensor.hpp | 0 .../src/core/transform.cpp | 2 +- .../src/core/transform.hpp | 0 .../src/core/value_info.hpp | 0 .../src/default_opset.hpp | 0 .../src/detail/subgraph_extraction.cpp | 0 .../src/detail/subgraph_extraction.hpp | 2 +- .../src/edge_mapper.cpp | 2 +- .../src}/edge_mapper.hpp | 17 ++-- .../{onnx_import => frontend}/src/editor.cpp | 6 +- .../onnx_editor => frontend/src}/editor.hpp | 4 +- .../src}/editor_types.hpp | 8 +- .../src/exceptions.cpp | 0 .../src/exceptions.hpp | 0 .../frontend/onnx/frontend/src/frontend.cpp | 5 +- .../onnx/frontend/src/input_model.hpp | 2 +- .../{onnx_import => frontend}/src/onnx.cpp | 2 +- .../src/onnx_framework_node.cpp | 0 .../src/onnx_framework_node.hpp | 0 .../src/onnx_utils.cpp | 0 .../{onnx_import => frontend}/src/op/abs.hpp | 0 .../{onnx_import => frontend}/src/op/acos.hpp | 0 .../src/op/acosh.hpp | 0 .../{onnx_import => frontend}/src/op/add.cpp | 0 .../{onnx_import => frontend}/src/op/add.hpp | 0 .../src/op/affine.cpp | 0 .../src/op/affine.hpp | 0 .../{onnx_import => frontend}/src/op/and.hpp | 0 .../src/op/argmax.cpp | 0 .../src/op/argmax.hpp | 0 .../src/op/argmin.cpp | 0 .../src/op/argmin.hpp | 0 .../{onnx_import => frontend}/src/op/asin.hpp | 0 .../src/op/asinh.hpp | 0 .../{onnx_import => frontend}/src/op/atan.hpp | 0 .../src/op/atanh.hpp | 0 .../src/op/average_pool.cpp | 0 .../src/op/average_pool.hpp | 0 .../src/op/batch_norm.cpp | 2 +- .../src/op/batch_norm.hpp | 0 .../src/op/bitshift.cpp | 0 .../src/op/bitshift.hpp | 0 .../{onnx_import => frontend}/src/op/cast.cpp | 0 .../{onnx_import => frontend}/src/op/cast.hpp | 0 .../{onnx_import => frontend}/src/op/ceil.hpp | 0 .../{onnx_import => frontend}/src/op/clip.cpp | 2 +- .../{onnx_import => frontend}/src/op/clip.hpp | 0 .../src/op/compress.cpp | 0 .../src/op/compress.hpp | 0 .../src/op/concat.cpp | 0 .../src/op/concat.hpp | 0 .../src/op/constant.cpp | 0 .../src/op/constant.hpp | 0 .../src/op/constant_fill.cpp | 0 .../src/op/constant_fill.hpp | 0 .../src/op/constant_of_shape.cpp | 0 .../src/op/constant_of_shape.hpp | 0 .../{onnx_import => frontend}/src/op/conv.cpp | 0 .../{onnx_import => frontend}/src/op/conv.hpp | 0 .../src/op/conv_integer.cpp | 0 .../src/op/conv_integer.hpp | 0 .../src/op/conv_transpose.cpp | 0 .../src/op/conv_transpose.hpp | 0 .../{onnx_import => frontend}/src/op/cos.cpp | 0 .../{onnx_import => frontend}/src/op/cos.hpp | 0 .../{onnx_import => frontend}/src/op/cosh.cpp | 0 .../{onnx_import => frontend}/src/op/cosh.hpp | 0 .../src/op/cum_sum.cpp | 0 .../src/op/cum_sum.hpp | 0 .../src/op/depth_to_space.cpp | 0 .../src/op/depth_to_space.hpp | 0 .../src/op/dequantize_linear.cpp | 2 +- .../src/op/dequantize_linear.hpp | 0 .../{onnx_import => frontend}/src/op/div.hpp | 0 .../src/op/dropout.cpp | 2 +- .../src/op/dropout.hpp | 0 .../src/op/einsum.cpp | 0 .../src/op/einsum.hpp | 0 .../{onnx_import => frontend}/src/op/elu.cpp | 0 .../{onnx_import => frontend}/src/op/elu.hpp | 0 .../src/op/equal.hpp | 0 .../{onnx_import => frontend}/src/op/erf.hpp | 0 .../{onnx_import => frontend}/src/op/exp.hpp | 0 .../src/op/expand.cpp | 0 .../src/op/expand.hpp | 0 .../src/op/eye_like.cpp | 0 .../src/op/eye_like.hpp | 0 .../src/op/flatten.cpp | 0 .../src/op/flatten.hpp | 0 .../src/op/floor.hpp | 0 .../src/op/gather.hpp | 0 .../src/op/gather_elements.hpp | 0 .../src/op/gather_nd.cpp | 0 .../src/op/gather_nd.hpp | 0 .../{onnx_import => frontend}/src/op/gemm.cpp | 0 .../{onnx_import => frontend}/src/op/gemm.hpp | 0 .../src/op/global_average_pool.cpp | 0 .../src/op/global_average_pool.hpp | 0 .../src/op/global_max_pool.cpp | 0 .../src/op/global_max_pool.hpp | 0 .../src/op/greater.hpp | 0 .../{onnx_import => frontend}/src/op/gru.cpp | 2 +- .../{onnx_import => frontend}/src/op/gru.hpp | 0 .../src/op/hard_sigmoid.cpp | 0 .../src/op/hard_sigmoid.hpp | 0 .../src/op/hardmax.cpp | 0 .../src/op/hardmax.hpp | 0 .../src/op/identity.hpp | 0 .../src/op/image_scaler.cpp | 0 .../src/op/image_scaler.hpp | 0 .../src/op/instance_norm.cpp | 0 .../src/op/instance_norm.hpp | 0 .../src/op/leaky_relu.cpp | 0 .../src/op/leaky_relu.hpp | 0 .../{onnx_import => frontend}/src/op/less.hpp | 0 .../{onnx_import => frontend}/src/op/log.cpp | 0 .../{onnx_import => frontend}/src/op/log.hpp | 0 .../src/op/log_softmax.cpp | 0 .../src/op/log_softmax.hpp | 0 .../{onnx_import => frontend}/src/op/loop.cpp | 2 +- .../{onnx_import => frontend}/src/op/loop.hpp | 0 .../src/op/lp_norm.cpp | 0 .../src/op/lp_norm.hpp | 0 .../src/op/lp_pool.cpp | 0 .../src/op/lp_pool.hpp | 0 .../{onnx_import => frontend}/src/op/lrn.cpp | 0 .../{onnx_import => frontend}/src/op/lrn.hpp | 0 .../{onnx_import => frontend}/src/op/lstm.cpp | 2 +- .../{onnx_import => frontend}/src/op/lstm.hpp | 0 .../src/op/matmul.hpp | 0 .../{onnx_import => frontend}/src/op/max.hpp | 0 .../src/op/max_pool.cpp | 2 +- .../src/op/max_pool.hpp | 0 .../{onnx_import => frontend}/src/op/mean.cpp | 0 .../{onnx_import => frontend}/src/op/mean.hpp | 0 .../src/op/mean_variance_normalization.cpp | 0 .../src/op/mean_variance_normalization.hpp | 0 .../{onnx_import => frontend}/src/op/min.hpp | 0 .../{onnx_import => frontend}/src/op/mod.cpp | 0 .../{onnx_import => frontend}/src/op/mod.hpp | 0 .../{onnx_import => frontend}/src/op/mul.hpp | 0 .../{onnx_import => frontend}/src/op/neg.hpp | 0 .../src/op/non_max_suppression.cpp | 2 +- .../src/op/non_max_suppression.hpp | 0 .../src/op/non_zero.cpp | 0 .../src/op/non_zero.hpp | 0 .../{onnx_import => frontend}/src/op/not.hpp | 0 .../src/op/onehot.cpp | 0 .../src/op/onehot.hpp | 0 .../{onnx_import => frontend}/src/op/or.hpp | 0 .../deformable_conv_2d.cpp | 0 .../deformable_conv_2d.hpp | 0 .../org.openvinotoolkit/detection_output.cpp | 0 .../org.openvinotoolkit/detection_output.hpp | 0 .../detection_output.cpp | 0 .../detection_output.hpp | 0 .../generate_proposals_single_image.cpp | 0 .../generate_proposals_single_image.hpp | 0 .../prior_grid_generator.cpp | 0 .../prior_grid_generator.hpp | 0 .../roi_feature_extractor.cpp | 0 .../roi_feature_extractor.hpp | 0 .../experimental_detectron/topk_rios.cpp | 0 .../experimental_detectron/topk_rios.hpp | 0 .../op/org.openvinotoolkit/fake_quantize.cpp | 0 .../op/org.openvinotoolkit/fake_quantize.hpp | 0 .../src/op/org.openvinotoolkit/group_norm.cpp | 0 .../src/op/org.openvinotoolkit/group_norm.hpp | 0 .../src/op/org.openvinotoolkit/normalize.cpp | 0 .../src/op/org.openvinotoolkit/normalize.hpp | 0 .../src/op/org.openvinotoolkit/prior_box.cpp | 0 .../src/op/org.openvinotoolkit/prior_box.hpp | 0 .../src/op/org.openvinotoolkit/swish.cpp | 0 .../src/op/org.openvinotoolkit/swish.hpp | 0 .../{onnx_import => frontend}/src/op/pad.cpp | 0 .../{onnx_import => frontend}/src/op/pad.hpp | 0 .../{onnx_import => frontend}/src/op/pow.cpp | 0 .../{onnx_import => frontend}/src/op/pow.hpp | 0 .../src/op/prelu.cpp | 0 .../src/op/prelu.hpp | 0 .../src/op/quant_conv.cpp | 0 .../src/op/quant_conv.hpp | 0 .../src/op/quantize_linear.cpp | 0 .../src/op/quantize_linear.hpp | 0 .../src/op/range.cpp | 0 .../src/op/range.hpp | 0 .../src/op/reciprocal.cpp | 0 .../src/op/reciprocal.hpp | 0 .../src/op/reduce.cpp | 0 .../src/op/reduce.hpp | 0 .../{onnx_import => frontend}/src/op/relu.hpp | 0 .../src/op/reshape.cpp | 0 .../src/op/reshape.hpp | 0 .../src/op/resize.cpp | 0 .../src/op/resize.hpp | 0 .../src/op/reverse_sequence.cpp | 0 .../src/op/reverse_sequence.hpp | 0 .../{onnx_import => frontend}/src/op/rnn.cpp | 0 .../{onnx_import => frontend}/src/op/rnn.hpp | 0 .../src/op/roi_align.cpp | 0 .../src/op/roi_align.hpp | 0 .../src/op/round.cpp | 0 .../src/op/round.hpp | 0 .../src/op/scatter_elements.cpp | 0 .../src/op/scatter_elements.hpp | 0 .../src/op/scatter_nd.cpp | 0 .../src/op/scatter_nd.hpp | 0 .../{onnx_import => frontend}/src/op/selu.cpp | 0 .../{onnx_import => frontend}/src/op/selu.hpp | 0 .../src/op/shape.cpp | 0 .../src/op/shape.hpp | 0 .../src/op/shrink.cpp | 0 .../src/op/shrink.hpp | 0 .../src/op/sigmoid.hpp | 0 .../{onnx_import => frontend}/src/op/sign.hpp | 0 .../{onnx_import => frontend}/src/op/sin.hpp | 0 .../{onnx_import => frontend}/src/op/sinh.hpp | 0 .../{onnx_import => frontend}/src/op/size.cpp | 0 .../{onnx_import => frontend}/src/op/size.hpp | 0 .../src/op/slice.cpp | 2 +- .../src/op/slice.hpp | 0 .../src/op/softmax.cpp | 0 .../src/op/softmax.hpp | 0 .../src/op/softplus.cpp | 0 .../src/op/softplus.hpp | 0 .../src/op/softsign.cpp | 0 .../src/op/softsign.hpp | 0 .../src/op/space_to_depth.cpp | 0 .../src/op/space_to_depth.hpp | 0 .../src/op/split.cpp | 0 .../src/op/split.hpp | 0 .../{onnx_import => frontend}/src/op/sqrt.hpp | 0 .../src/op/squeeze.cpp | 0 .../src/op/squeeze.hpp | 0 .../{onnx_import => frontend}/src/op/sub.hpp | 0 .../{onnx_import => frontend}/src/op/sum.hpp | 0 .../{onnx_import => frontend}/src/op/tan.hpp | 0 .../{onnx_import => frontend}/src/op/tanh.hpp | 0 .../src/op/thresholded_relu.cpp | 0 .../src/op/thresholded_relu.hpp | 0 .../{onnx_import => frontend}/src/op/tile.cpp | 0 .../{onnx_import => frontend}/src/op/tile.hpp | 0 .../{onnx_import => frontend}/src/op/topk.cpp | 0 .../{onnx_import => frontend}/src/op/topk.hpp | 0 .../src/op/transpose.cpp | 0 .../src/op/transpose.hpp | 0 .../src/op/unsqueeze.cpp | 0 .../src/op/unsqueeze.hpp | 0 .../src/op/upsample.cpp | 0 .../src/op/upsample.hpp | 0 .../src/op/where.hpp | 0 .../{onnx_import => frontend}/src/op/xor.hpp | 0 .../src/ops_bridge.cpp | 0 .../src/ops_bridge.hpp | 0 ngraph/frontend/onnx/frontend/src/place.hpp | 7 +- .../{onnx_import => frontend}/src/precomp.hpp | 0 .../src/utils/arg_min_max_factory.cpp | 0 .../src/utils/arg_min_max_factory.hpp | 0 .../src/utils/common.cpp | 0 .../src/utils/common.hpp | 0 .../src/utils/convpool.cpp | 0 .../src/utils/convpool.hpp | 0 .../src/utils/onnx_internal.cpp | 6 +- .../src}/utils/onnx_internal.hpp | 7 +- .../src/utils/pooling_factory.cpp | 0 .../src/utils/pooling_factory.hpp | 0 .../src/utils/provenance_tag.cpp | 0 .../src/utils/provenance_tag.hpp | 0 .../src/utils/recurrent.cpp | 2 +- .../src/utils/recurrent.hpp | 0 .../src/utils/reshape.cpp | 0 .../src/utils/reshape.hpp | 0 .../src/utils/tensor_external_data.cpp | 0 .../src/utils/tensor_external_data.hpp | 0 .../src/utils/variadic.hpp | 0 .../frontend/onnx/onnx_import/CMakeLists.txt | 87 ------------------- .../onnx/onnx_import/src/core/model.hpp | 85 ------------------ ngraph/python/BUILDING.md | 4 +- ngraph/python/CMakeLists.txt | 4 +- ngraph/python/setup.py | 4 +- ngraph/python/src/pyngraph/pyngraph.cpp | 4 +- .../tests/test_frontend/test_frontend_onnx.py | 7 +- .../test_frontend_onnx_editor.py | 33 +++---- ngraph/test/CMakeLists.txt | 15 ++-- ngraph/test/onnx/onnx_editor.cpp | 2 +- ngraph/test/onnx/onnx_import.in.cpp | 2 +- .../test/onnx/onnx_import_org_openvino.in.cpp | 2 +- .../test/onnx/onnx_import_with_editor.in.cpp | 2 +- ngraph/test/onnx/onnx_test_utils.in.cpp | 2 +- ngraph/test/util/CMakeLists.txt | 4 +- thirdparty/CMakeLists.txt | 4 +- 334 files changed, 188 insertions(+), 351 deletions(-) rename ngraph/frontend/onnx/{onnx_import => frontend}/include/onnx_import/core/node.hpp (99%) rename ngraph/frontend/onnx/{onnx_import/src => frontend/include/onnx_import}/core/null_node.hpp (92%) rename ngraph/frontend/onnx/{onnx_import => frontend}/include/onnx_import/core/operator_set.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/include/onnx_import/onnx.hpp (90%) rename ngraph/frontend/onnx/{onnx_import/include/onnx_import/utils => frontend/include/onnx_import}/onnx_importer_visibility.hpp (75%) rename ngraph/frontend/onnx/{onnx_import => frontend}/include/onnx_import/onnx_utils.hpp (97%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/core/attribute.cpp (96%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/core/attribute.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/core/graph.cpp (99%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/core/graph.hpp (99%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/core/graph_cache.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/core/graph_cache.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/core/model.cpp (99%) rename ngraph/frontend/onnx/{onnx_import/include/onnx_import => frontend/src}/core/model.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/core/node.cpp (99%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/core/null_node.cpp (95%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/core/sparse_tensor.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/core/tensor.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/core/transform.cpp (99%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/core/transform.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/core/value_info.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/default_opset.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/detail/subgraph_extraction.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/detail/subgraph_extraction.hpp (99%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/edge_mapper.cpp (99%) rename ngraph/frontend/onnx/{onnx_import/include/onnx_editor => frontend/src}/edge_mapper.hpp (87%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/editor.cpp (99%) rename ngraph/frontend/onnx/{onnx_import/include/onnx_editor => frontend/src}/editor.hpp (99%) rename ngraph/frontend/onnx/{onnx_import/include/onnx_editor => frontend/src}/editor_types.hpp (96%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/exceptions.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/exceptions.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/onnx.cpp (97%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/onnx_framework_node.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/onnx_framework_node.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/onnx_utils.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/abs.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/acos.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/acosh.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/add.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/add.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/affine.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/affine.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/and.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/argmax.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/argmax.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/argmin.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/argmin.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/asin.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/asinh.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/atan.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/atanh.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/average_pool.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/average_pool.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/batch_norm.cpp (98%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/batch_norm.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/bitshift.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/bitshift.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/cast.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/cast.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/ceil.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/clip.cpp (98%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/clip.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/compress.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/compress.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/concat.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/concat.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/constant.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/constant.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/constant_fill.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/constant_fill.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/constant_of_shape.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/constant_of_shape.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/conv.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/conv.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/conv_integer.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/conv_integer.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/conv_transpose.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/conv_transpose.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/cos.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/cos.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/cosh.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/cosh.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/cum_sum.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/cum_sum.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/depth_to_space.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/depth_to_space.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/dequantize_linear.cpp (99%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/dequantize_linear.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/div.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/dropout.cpp (98%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/dropout.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/einsum.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/einsum.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/elu.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/elu.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/equal.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/erf.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/exp.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/expand.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/expand.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/eye_like.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/eye_like.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/flatten.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/flatten.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/floor.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/gather.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/gather_elements.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/gather_nd.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/gather_nd.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/gemm.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/gemm.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/global_average_pool.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/global_average_pool.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/global_max_pool.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/global_max_pool.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/greater.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/gru.cpp (99%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/gru.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/hard_sigmoid.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/hard_sigmoid.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/hardmax.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/hardmax.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/identity.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/image_scaler.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/image_scaler.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/instance_norm.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/instance_norm.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/leaky_relu.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/leaky_relu.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/less.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/log.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/log.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/log_softmax.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/log_softmax.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/loop.cpp (99%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/loop.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/lp_norm.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/lp_norm.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/lp_pool.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/lp_pool.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/lrn.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/lrn.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/lstm.cpp (99%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/lstm.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/matmul.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/max.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/max_pool.cpp (93%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/max_pool.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/mean.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/mean.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/mean_variance_normalization.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/mean_variance_normalization.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/min.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/mod.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/mod.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/mul.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/neg.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/non_max_suppression.cpp (98%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/non_max_suppression.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/non_zero.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/non_zero.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/not.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/onehot.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/onehot.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/or.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/org.openvinotoolkit/deformable_conv_2d.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/org.openvinotoolkit/deformable_conv_2d.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/org.openvinotoolkit/detection_output.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/org.openvinotoolkit/detection_output.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/org.openvinotoolkit/experimental_detectron/detection_output.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/org.openvinotoolkit/experimental_detectron/detection_output.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/org.openvinotoolkit/experimental_detectron/generate_proposals_single_image.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/org.openvinotoolkit/experimental_detectron/generate_proposals_single_image.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/org.openvinotoolkit/experimental_detectron/prior_grid_generator.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/org.openvinotoolkit/experimental_detectron/prior_grid_generator.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/org.openvinotoolkit/experimental_detectron/roi_feature_extractor.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/org.openvinotoolkit/experimental_detectron/roi_feature_extractor.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/org.openvinotoolkit/experimental_detectron/topk_rios.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/org.openvinotoolkit/experimental_detectron/topk_rios.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/org.openvinotoolkit/fake_quantize.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/org.openvinotoolkit/fake_quantize.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/org.openvinotoolkit/group_norm.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/org.openvinotoolkit/group_norm.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/org.openvinotoolkit/normalize.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/org.openvinotoolkit/normalize.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/org.openvinotoolkit/prior_box.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/org.openvinotoolkit/prior_box.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/org.openvinotoolkit/swish.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/org.openvinotoolkit/swish.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/pad.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/pad.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/pow.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/pow.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/prelu.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/prelu.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/quant_conv.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/quant_conv.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/quantize_linear.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/quantize_linear.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/range.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/range.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/reciprocal.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/reciprocal.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/reduce.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/reduce.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/relu.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/reshape.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/reshape.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/resize.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/resize.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/reverse_sequence.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/reverse_sequence.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/rnn.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/rnn.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/roi_align.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/roi_align.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/round.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/round.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/scatter_elements.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/scatter_elements.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/scatter_nd.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/scatter_nd.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/selu.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/selu.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/shape.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/shape.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/shrink.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/shrink.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/sigmoid.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/sign.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/sin.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/sinh.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/size.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/size.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/slice.cpp (99%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/slice.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/softmax.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/softmax.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/softplus.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/softplus.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/softsign.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/softsign.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/space_to_depth.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/space_to_depth.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/split.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/split.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/sqrt.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/squeeze.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/squeeze.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/sub.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/sum.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/tan.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/tanh.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/thresholded_relu.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/thresholded_relu.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/tile.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/tile.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/topk.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/topk.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/transpose.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/transpose.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/unsqueeze.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/unsqueeze.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/upsample.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/upsample.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/where.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/op/xor.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/ops_bridge.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/ops_bridge.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/precomp.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/utils/arg_min_max_factory.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/utils/arg_min_max_factory.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/utils/common.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/utils/common.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/utils/convpool.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/utils/convpool.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/utils/onnx_internal.cpp (97%) rename ngraph/frontend/onnx/{onnx_import/include/onnx_import => frontend/src}/utils/onnx_internal.hpp (89%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/utils/pooling_factory.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/utils/pooling_factory.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/utils/provenance_tag.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/utils/provenance_tag.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/utils/recurrent.cpp (99%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/utils/recurrent.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/utils/reshape.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/utils/reshape.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/utils/tensor_external_data.cpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/utils/tensor_external_data.hpp (100%) rename ngraph/frontend/onnx/{onnx_import => frontend}/src/utils/variadic.hpp (100%) delete mode 100644 ngraph/frontend/onnx/onnx_import/CMakeLists.txt delete mode 100644 ngraph/frontend/onnx/onnx_import/src/core/model.hpp diff --git a/.ci/azure/linux.yml b/.ci/azure/linux.yml index 2b9dda46708..40f07b58922 100644 --- a/.ci/azure/linux.yml +++ b/.ci/azure/linux.yml @@ -117,7 +117,6 @@ jobs: -DPYTHON_EXECUTABLE=/usr/bin/python3.8 -DENABLE_WHEEL=ON -DENABLE_TESTS=ON - -DNGRAPH_ONNX_IMPORT_ENABLE=ON -DNGRAPH_ONNX_FRONTEND_ENABLE=ON -DENABLE_FASTER_BUILD=ON -DENABLE_STRICT_DEPENDENCIES=OFF diff --git a/.ci/azure/linux_onnxruntime.yml b/.ci/azure/linux_onnxruntime.yml index 0229c37c0b0..932fb76d5cc 100644 --- a/.ci/azure/linux_onnxruntime.yml +++ b/.ci/azure/linux_onnxruntime.yml @@ -94,7 +94,6 @@ jobs: -DENABLE_PROFILING_ITT=OFF -DENABLE_SAMPLES=OFF -DENABLE_SPEECH_DEMO=OFF - -DNGRAPH_ONNX_IMPORT_ENABLE=ON -DNGRAPH_ONNX_FRONTEND_ENABLE=ON -DNGRAPH_DEBUG_ENABLE=OFF $(REPO_DIR) diff --git a/.ci/openvino-onnx/Dockerfile b/.ci/openvino-onnx/Dockerfile index 31559822562..ca2cbd8afbe 100644 --- a/.ci/openvino-onnx/Dockerfile +++ b/.ci/openvino-onnx/Dockerfile @@ -68,7 +68,6 @@ RUN cmake .. \ -DENABLE_SPEECH_DEMO=OFF \ -DENABLE_PYTHON=ON \ -DPYTHON_EXECUTABLE=/usr/bin/python3 \ - -DNGRAPH_ONNX_IMPORT_ENABLE=ON \ -DNGRAPH_ONNX_FRONTEND_ENABLE=ON \ -DNGRAPH_DEBUG_ENABLE=OFF \ -DCMAKE_INSTALL_PREFIX=/openvino/dist \ diff --git a/cmake/coverage.cmake b/cmake/coverage.cmake index 4d8976e0a80..137acbb8993 100644 --- a/cmake/coverage.cmake +++ b/cmake/coverage.cmake @@ -90,17 +90,10 @@ ie_coverage_extract(INPUT "openvino" OUTPUT "ngraph" ie_coverage_genhtml(INFO_FILE "ngraph" PREFIX "${OV_COVERAGE_BASE_DIRECTORY}") -if(NGRAPH_ONNX_IMPORT_ENABLE) - ie_coverage_extract(INPUT "openvino" OUTPUT "onnx_importer" - PATTERNS "${OV_COVERAGE_BASE_DIRECTORY}/ngraph/frontend/onnx/onnx_common*" - "${OV_COVERAGE_BASE_DIRECTORY}/ngraph/frontend/onnx/onnx_import*") - ie_coverage_genhtml(INFO_FILE "onnx_importer" - PREFIX "${OV_COVERAGE_BASE_DIRECTORY}") -endif() - if(NGRAPH_ONNX_FRONTEND_ENABLE) - ie_coverage_extract(INPUT "openvino" OUTPUT "onnx_ngraph_frontend" - PATTERNS "${OV_COVERAGE_BASE_DIRECTORY}/ngraph/frontend/onnx/frontend*") - ie_coverage_genhtml(INFO_FILE "onnx_ngraph_frontend" + ie_coverage_extract(INPUT "openvino" OUTPUT "onnx" + PATTERNS "${OV_COVERAGE_BASE_DIRECTORY}/ngraph/frontend/onnx/*" + "${OV_COVERAGE_BASE_DIRECTORY}/ngraph/frontend/onnx/*") + ie_coverage_genhtml(INFO_FILE "onnx" PREFIX "${OV_COVERAGE_BASE_DIRECTORY}") endif() diff --git a/cmake/features.cmake b/cmake/features.cmake index 26bf48f3824..3df1af5ef91 100644 --- a/cmake/features.cmake +++ b/cmake/features.cmake @@ -122,13 +122,12 @@ else() set(protoc_available ON) endif() -ie_dependent_option(NGRAPH_ONNX_IMPORT_ENABLE "Enable ONNX importer" ON "protoc_available" OFF) -ie_dependent_option(NGRAPH_ONNX_FRONTEND_ENABLE "Enable ONNX FrontEnd" OFF "NGRAPH_ONNX_IMPORT_ENABLE" OFF) +ie_dependent_option(NGRAPH_ONNX_FRONTEND_ENABLE "Enable ONNX FrontEnd" ON "protoc_available" OFF) ie_dependent_option(NGRAPH_PDPD_FRONTEND_ENABLE "Enable PaddlePaddle FrontEnd" ON "protoc_available" OFF) ie_dependent_option(NGRAPH_USE_PROTOBUF_LITE "Compiles and links with protobuf-lite" ON - "NGRAPH_ONNX_IMPORT_ENABLE" OFF) + "NGRAPH_ONNX_FRONTEND_ENABLE" OFF) ie_dependent_option(NGRAPH_USE_SYSTEM_PROTOBUF "Use system protobuf" OFF - "NGRAPH_ONNX_IMPORT_ENABLE OR NGRAPH_PDPD_FRONTEND_ENABLE" OFF) + "NGRAPH_ONNX_FRONTEND_ENABLE OR NGRAPH_PDPD_FRONTEND_ENABLE" OFF) ie_dependent_option(NGRAPH_UNIT_TEST_ENABLE "Enables ngraph unit tests" ON "ENABLE_TESTS;NOT ANDROID" OFF) ie_dependent_option(NGRAPH_UNIT_TEST_BACKENDS_ENABLE "Control the building of unit tests using backends" ON "NGRAPH_UNIT_TEST_ENABLE" OFF) diff --git a/cmake/templates/ngraphConfig.cmake.in b/cmake/templates/ngraphConfig.cmake.in index e9c943f37d1..b945d0148aa 100644 --- a/cmake/templates/ngraphConfig.cmake.in +++ b/cmake/templates/ngraphConfig.cmake.in @@ -28,9 +28,8 @@ # # ngraph::frontend_manager - nGraph frontend manager # -# ngraph_onnx_importer_FOUND - True if the system has onnx_importer library -# ngraph::onnx_importer - ONNX importer target (optional) -# ONNX_IMPORTER_LIBRARIES - ONNX importer libraries +# ngraph_onnx_ngraph_frontend_FOUND - True if the system has onnx_ngraph_frontend library +# ngraph::onnx_ngraph_frontend - ONNX FrontEnd target (optional) # # ngraph_paddlepaddle_frontend_FOUND - True if the system has PDPD frontend # ngraph::paddlepaddle_ngraph_frontend - nGraph PDPD frontend (optional) @@ -38,18 +37,30 @@ @PACKAGE_INIT@ +function(set_imported_global target) + get_target_property(IS_IMPORTED_GLOBAL ${target} IMPORTED_GLOBAL) + if (NOT IS_IMPORTED_GLOBAL) + set_target_properties(${target} PROPERTIES IMPORTED_GLOBAL TRUE) + endif() +endfunction() + if(NOT TARGET ngraph) include("${CMAKE_CURRENT_LIST_DIR}/ngraphTargets.cmake") + set_imported_global(ngraph::ngraph) + set_imported_global(ngraph::frontend_manager) endif() set(ngraph_ngraph_FOUND ON) set(NGRAPH_LIBRARIES ngraph::ngraph) -set(ngraph_onnx_importer_FOUND @NGRAPH_ONNX_IMPORT_ENABLE@) -if(ngraph_onnx_importer_FOUND) - set(ONNX_IMPORTER_LIBRARIES ngraph::onnx_importer) +set(ngraph_onnx_ngraph_frontend_FOUND @NGRAPH_ONNX_FRONTEND_ENABLE@) +if (ngraph_onnx_ngraph_frontend_FOUND AND NOT TARGET onnx_ngraph_frontend AND NOT TARGET ngraph::onnx_importer) + set_imported_global(ngraph::onnx_ngraph_frontend) + add_library(ngraph::onnx_importer ALIAS ngraph::onnx_ngraph_frontend) +endif() +set(ngraph_paddlepaddle_frontend_FOUND @NGRAPH_PDPD_FRONTEND_ENABLE@) +if(ngraph_paddlepaddle_frontend_FOUND AND NOT TARGET paddlepaddle_ngraph_frontend) + set_imported_global(ngraph::paddlepaddle_ngraph_frontend) endif() -set(ngraph_paddlepaddle_frontend_FOUND @NGRAPH_PDPD_FRONTEND_ENABLE@) - check_required_components(ngraph) diff --git a/cmake/test_model_zoo.cmake b/cmake/test_model_zoo.cmake index c3f158626cd..580cab35ec4 100644 --- a/cmake/test_model_zoo.cmake +++ b/cmake/test_model_zoo.cmake @@ -17,7 +17,7 @@ function(ov_model_convert SRC DST OUT) get_filename_component(name_we "${in_file}" NAME_WE) set(model_source_dir "${SRC}/${rel_dir}") - if(NOT NGRAPH_ONNX_IMPORT_ENABLE AND ext MATCHES "^\\.(onnx|prototxt)$") + if(NOT NGRAPH_ONNX_FRONTEND_ENABLE AND ext MATCHES "^\\.(onnx|prototxt)$") # don't copy / process ONNX / prototxt files continue() endif() @@ -78,7 +78,7 @@ ov_model_convert("${OpenVINO_SOURCE_DIR}/${rel_path}" ie_onnx_import_out_files) if(ENABLE_TESTS) - if(NGRAPH_ONNX_IMPORT_ENABLE AND ENABLE_REQUIREMENTS_INSTALL) + if(NGRAPH_ONNX_FRONTEND_ENABLE AND ENABLE_REQUIREMENTS_INSTALL) find_package(PythonInterp 3 REQUIRED) get_filename_component(PYTHON_EXEC_DIR ${PYTHON_EXECUTABLE} DIRECTORY) diff --git a/docs/CMakeLists.txt b/docs/CMakeLists.txt index c9859464ee1..15eabf321ab 100644 --- a/docs/CMakeLists.txt +++ b/docs/CMakeLists.txt @@ -25,7 +25,7 @@ if(NOT ENABLE_DOCKER) set(InferenceEngine_DIR ${CMAKE_BINARY_DIR}) endif() - if(NGRAPH_ONNX_IMPORT_ENABLE) + if(NGRAPH_ONNX_FRONTEND_ENABLE) add_subdirectory(onnx_custom_op) endif() add_subdirectory(template_extension) diff --git a/docs/IE_DG/Extensibility_DG/Custom_ONNX_Ops.md b/docs/IE_DG/Extensibility_DG/Custom_ONNX_Ops.md index 252d67df81f..a9a9841cac4 100644 --- a/docs/IE_DG/Extensibility_DG/Custom_ONNX_Ops.md +++ b/docs/IE_DG/Extensibility_DG/Custom_ONNX_Ops.md @@ -50,10 +50,9 @@ The example below demonstrates how to unregister an operator from the destructor ## Requirements for Building with CMake -A program that uses the `register_operator` functionality requires `ngraph` and `onnx_importer` libraries in addition to the Inference Engine. -The `onnx_importer` is a component of the `ngraph` package , so `find_package(ngraph REQUIRED COMPONENTS onnx_importer)` can find both. -The `ngraph` package exposes two variables, `${NGRAPH_LIBRARIES}` and `${ONNX_IMPORTER_LIBRARIES}`, which reference the `ngraph` and `onnx_importer` libraries. -Those variables need to be passed to the `target_link_libraries` command in the CMakeLists.txt file. +A program that uses the `register_operator` functionality requires `ngraph::ngraph` and `ngraph::onnx_ngraph_frontend` libraries in addition to the Inference Engine. +The `onnx_ngraph_frontend` is a component of the `ngraph` package , so `find_package(ngraph REQUIRED COMPONENTS onnx_ngraph_frontend)` can find both. +Those libraries need to be passed to the `target_link_libraries` command in the CMakeLists.txt file. See CMakeLists.txt below for reference: @snippet onnx_custom_op/CMakeLists.txt cmake:onnx_custom_op diff --git a/docs/onnx_custom_op/CMakeLists.txt b/docs/onnx_custom_op/CMakeLists.txt index f38ead369d8..09d6635ee92 100644 --- a/docs/onnx_custom_op/CMakeLists.txt +++ b/docs/onnx_custom_op/CMakeLists.txt @@ -7,11 +7,11 @@ set(CMAKE_CXX_STANDARD 11) set(TARGET_NAME "onnx_custom_op") -find_package(ngraph REQUIRED COMPONENTS onnx_importer) +find_package(ngraph REQUIRED COMPONENTS onnx_ngraph_frontend) add_library(${TARGET_NAME} STATIC onnx_custom_op.cpp onnx_custom_op.hpp) -target_link_libraries(${TARGET_NAME} PUBLIC ${NGRAPH_LIBRARIES} ${ONNX_IMPORTER_LIBRARIES}) +target_link_libraries(${TARGET_NAME} PUBLIC ngraph::ngraph ngraph::onnx_ngraph_frontend) # [cmake:onnx_custom_op] # Enable code style check diff --git a/docs/snippets/CMakeLists.txt b/docs/snippets/CMakeLists.txt index 48edae1e832..9edc3e4f327 100644 --- a/docs/snippets/CMakeLists.txt +++ b/docs/snippets/CMakeLists.txt @@ -44,8 +44,8 @@ if(OpenCV_FOUND) target_link_libraries(${TARGET_NAME} PRIVATE opencv_core) endif() -if(NGRAPH_ONNX_IMPORT_ENABLE) - target_link_libraries(${TARGET_NAME} PRIVATE onnx_importer) +if(NGRAPH_ONNX_FRONTEND_ENABLE) + target_link_libraries(${TARGET_NAME} PRIVATE onnx_ngraph_frontend) endif() if(NOT MSVC) diff --git a/docs/template_extension/CMakeLists.txt b/docs/template_extension/CMakeLists.txt index a6e7527e55f..230323768e0 100644 --- a/docs/template_extension/CMakeLists.txt +++ b/docs/template_extension/CMakeLists.txt @@ -7,7 +7,7 @@ set(CMAKE_CXX_STANDARD 11) set(TARGET_NAME "template_extension") -find_package(ngraph REQUIRED OPTIONAL_COMPONENTS onnx_importer) +find_package(ngraph REQUIRED OPTIONAL_COMPONENTS onnx_ngraph_frontend) find_package(InferenceEngine REQUIRED) find_package(OpenCV QUIET COMPONENTS core) @@ -28,9 +28,9 @@ target_compile_definitions(${TARGET_NAME} PRIVATE IMPLEMENT_INFERENCE_EXTENSION_ target_link_libraries(${TARGET_NAME} PRIVATE IE::inference_engine ${NGRAPH_LIBRARIES}) -if (ngraph_onnx_importer_FOUND) - target_link_libraries(${TARGET_NAME} PRIVATE ${ONNX_IMPORTER_LIBRARIES}) - target_compile_definitions(${TARGET_NAME} PRIVATE NGRAPH_ONNX_IMPORT_ENABLED) +if (ngraph_onnx_ngraph_frontend_FOUND) + target_link_libraries(${TARGET_NAME} PRIVATE ngraph::onnx_ngraph_frontend) + target_compile_definitions(${TARGET_NAME} PRIVATE NGRAPH_ONNX_FRONTEND_ENABLED) endif() # [cmake:extension] diff --git a/docs/template_extension/extension.cpp b/docs/template_extension/extension.cpp index 4c5885a090f..d3be82d1120 100644 --- a/docs/template_extension/extension.cpp +++ b/docs/template_extension/extension.cpp @@ -11,7 +11,7 @@ # include "fft_op.hpp" #endif #include -#ifdef NGRAPH_ONNX_IMPORT_ENABLED +#ifdef NGRAPH_ONNX_FRONTEND_ENABLED # include #endif @@ -24,7 +24,7 @@ using namespace TemplateExtension; //! [extension:ctor] Extension::Extension() { -#ifdef NGRAPH_ONNX_IMPORT_ENABLED +#ifdef NGRAPH_ONNX_FRONTEND_ENABLED ngraph::onnx_import::register_operator(Operation::type_info.name, 1, "custom_domain", @@ -49,12 +49,12 @@ Extension::Extension() { //! [extension:dtor] Extension::~Extension() { -#ifdef NGRAPH_ONNX_IMPORT_ENABLED +#ifdef NGRAPH_ONNX_FRONTEND_ENABLED ngraph::onnx_import::unregister_operator(Operation::type_info.name, 1, "custom_domain"); # ifdef OPENCV_IMPORT_ENABLED ngraph::onnx_import::unregister_operator(FFTOp::type_info.name, 1, "custom_domain"); # endif // OPENCV_IMPORT_ENABLED -#endif // NGRAPH_ONNX_IMPORT_ENABLED +#endif // NGRAPH_ONNX_FRONTEND_ENABLED } //! [extension:dtor] diff --git a/inference-engine/src/CMakeLists.txt b/inference-engine/src/CMakeLists.txt index cd78cd959ec..5f3959223fb 100644 --- a/inference-engine/src/CMakeLists.txt +++ b/inference-engine/src/CMakeLists.txt @@ -52,6 +52,6 @@ add_custom_target(ie_libraries ALL inference_engine_ir_v7_reader inference_engine_ir_reader inference_engine_lp_transformations inference_engine_snippets) -if(NGRAPH_ONNX_IMPORT_ENABLE) +if(NGRAPH_ONNX_FRONTEND_ENABLE) add_dependencies(ie_libraries inference_engine_onnx_reader) endif() diff --git a/inference-engine/src/readers/CMakeLists.txt b/inference-engine/src/readers/CMakeLists.txt index b8f1ff7301d..139a515f3fa 100644 --- a/inference-engine/src/readers/CMakeLists.txt +++ b/inference-engine/src/readers/CMakeLists.txt @@ -18,6 +18,6 @@ add_cpplint_target(${TARGET_NAME}_cpplint FOR_SOURCES ${reader_api_hpp}) add_subdirectory(ir_reader) add_subdirectory(ir_reader_v7) -if(NGRAPH_ONNX_IMPORT_ENABLE) +if(NGRAPH_ONNX_FRONTEND_ENABLE) add_subdirectory(onnx_reader) endif() diff --git a/inference-engine/src/readers/onnx_reader/CMakeLists.txt b/inference-engine/src/readers/onnx_reader/CMakeLists.txt index 3973b1361fa..b5b409f99c7 100644 --- a/inference-engine/src/readers/onnx_reader/CMakeLists.txt +++ b/inference-engine/src/readers/onnx_reader/CMakeLists.txt @@ -23,7 +23,7 @@ target_include_directories(${TARGET_NAME} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) target_compile_definitions(${TARGET_NAME} PRIVATE IMPLEMENT_INFERENCE_ENGINE_PLUGIN) -target_link_libraries(${TARGET_NAME} PRIVATE inference_engine_reader_api onnx_importer inference_engine) +target_link_libraries(${TARGET_NAME} PRIVATE inference_engine_reader_api onnx_ngraph_frontend inference_engine) ie_add_api_validator_post_build_step(TARGET ${TARGET_NAME}) diff --git a/inference-engine/tests/functional/inference_engine/CMakeLists.txt b/inference-engine/tests/functional/inference_engine/CMakeLists.txt index 6b556455897..56258e51437 100644 --- a/inference-engine/tests/functional/inference_engine/CMakeLists.txt +++ b/inference-engine/tests/functional/inference_engine/CMakeLists.txt @@ -34,7 +34,7 @@ set(DEPENDENCIES test_model_zoo ) -if (NOT NGRAPH_ONNX_IMPORT_ENABLE) +if (NOT NGRAPH_ONNX_FRONTEND_ENABLE) list(APPEND EXCLUDED_SOURCE_PATHS "${CMAKE_CURRENT_SOURCE_DIR}/onnx_reader") endif() @@ -56,9 +56,9 @@ addIeTargetTest( set_ie_threading_interface_for(${TARGET_NAME}) -if(NGRAPH_ONNX_IMPORT_ENABLE) +if(NGRAPH_ONNX_FRONTEND_ENABLE) target_compile_definitions(${TARGET_NAME} PRIVATE - NGRAPH_ONNX_IMPORT_ENABLE + NGRAPH_ONNX_FRONTEND_ENABLE ONNX_TEST_MODELS="${TEST_MODEL_ZOO}/onnx_reader/models/") add_dependencies(${TARGET_NAME} inference_engine_onnx_reader) endif() diff --git a/inference-engine/tests/functional/inference_engine/ir_serialization/custom_ops.cpp b/inference-engine/tests/functional/inference_engine/ir_serialization/custom_ops.cpp index 08de701d499..0c38683a081 100644 --- a/inference-engine/tests/functional/inference_engine/ir_serialization/custom_ops.cpp +++ b/inference-engine/tests/functional/inference_engine/ir_serialization/custom_ops.cpp @@ -61,7 +61,7 @@ TEST_F(CustomOpsSerializationTest, CustomOpUser_MO) { ASSERT_TRUE(success) << message; } -#ifdef NGRAPH_ONNX_IMPORT_ENABLE +#ifdef NGRAPH_ONNX_FRONTEND_ENABLE TEST_F(CustomOpsSerializationTest, CustomOpUser_ONNXImporter) { const std::string model = CommonTestUtils::getModelFromTestModelZoo( diff --git a/inference-engine/tests/functional/inference_engine/ir_serialization/deterministicity.cpp b/inference-engine/tests/functional/inference_engine/ir_serialization/deterministicity.cpp index 1a010a15c87..ddcc0149532 100644 --- a/inference-engine/tests/functional/inference_engine/ir_serialization/deterministicity.cpp +++ b/inference-engine/tests/functional/inference_engine/ir_serialization/deterministicity.cpp @@ -47,7 +47,7 @@ protected: } }; -#ifdef NGRAPH_ONNX_IMPORT_ENABLE +#ifdef NGRAPH_ONNX_FRONTEND_ENABLE TEST_F(SerializationDeterministicityTest, BasicModel) { const std::string model = CommonTestUtils::getModelFromTestModelZoo( diff --git a/inference-engine/tests/functional/inference_engine/ir_serialization/serialize.cpp b/inference-engine/tests/functional/inference_engine/ir_serialization/serialize.cpp index 49c9baa9f1e..5bec0de3dd0 100644 --- a/inference-engine/tests/functional/inference_engine/ir_serialization/serialize.cpp +++ b/inference-engine/tests/functional/inference_engine/ir_serialization/serialize.cpp @@ -78,7 +78,7 @@ INSTANTIATE_TEST_SUITE_P(IRSerialization, SerializationTest, std::make_tuple("loop_2d_add.xml", "loop_2d_add.bin"), std::make_tuple("nms5_dynamism.xml", "nms5_dynamism.bin"))); -#ifdef NGRAPH_ONNX_IMPORT_ENABLE +#ifdef NGRAPH_ONNX_FRONTEND_ENABLE INSTANTIATE_TEST_SUITE_P(ONNXSerialization, SerializationTest, testing::Values(std::make_tuple("add_abc.onnx", ""), diff --git a/inference-engine/tests/functional/plugin/cpu/CMakeLists.txt b/inference-engine/tests/functional/plugin/cpu/CMakeLists.txt index c857007f60c..cde17b0a209 100644 --- a/inference-engine/tests/functional/plugin/cpu/CMakeLists.txt +++ b/inference-engine/tests/functional/plugin/cpu/CMakeLists.txt @@ -11,7 +11,7 @@ target_link_libraries(cpuSpecificRtInfo PRIVATE ngraph) set(INCLUDES ${CMAKE_CURRENT_SOURCE_DIR} ${IE_MAIN_SOURCE_DIR}/src/mkldnn_plugin) set(DEPENDENCIES MKLDNNPlugin AutoPlugin) set(LINK_LIBRARIES funcSharedTests cpuSpecificRtInfo) -if (NGRAPH_ONNX_IMPORT_ENABLE) +if (NGRAPH_ONNX_FRONTEND_ENABLE) list(APPEND INCLUDES "${OpenVINO_SOURCE_DIR}/docs/onnx_custom_op") list(APPEND LINK_LIBRARIES onnx_custom_op) list(APPEND DEPENDENCIES template_extension onnx_custom_op) diff --git a/inference-engine/tests/unit/CMakeLists.txt b/inference-engine/tests/unit/CMakeLists.txt index f729ae89e1b..6bc4ac995be 100644 --- a/inference-engine/tests/unit/CMakeLists.txt +++ b/inference-engine/tests/unit/CMakeLists.txt @@ -24,6 +24,6 @@ if (ENABLE_MYRIAD) add_subdirectory(vpu) endif () -if(NGRAPH_ONNX_IMPORT_ENABLE) +if(NGRAPH_ONNX_FRONTEND_ENABLE) add_subdirectory(frontends/onnx_import) endif() diff --git a/inference-engine/tests/unit/frontends/onnx_import/CMakeLists.txt b/inference-engine/tests/unit/frontends/onnx_import/CMakeLists.txt index 6b44002c684..b7a8f3ae100 100644 --- a/inference-engine/tests/unit/frontends/onnx_import/CMakeLists.txt +++ b/inference-engine/tests/unit/frontends/onnx_import/CMakeLists.txt @@ -11,7 +11,7 @@ addIeTargetTest( gtest gtest_main commonTestUtils - onnx_importer + onnx_ngraph_frontend DEFINES ONNX_MODELS_DIR=\"${TEST_MODEL_ZOO}/onnx_import\" ADD_CPPLINT diff --git a/ngraph/frontend/CMakeLists.txt b/ngraph/frontend/CMakeLists.txt index 7bf186dd1ad..ef1bed270f1 100644 --- a/ngraph/frontend/CMakeLists.txt +++ b/ngraph/frontend/CMakeLists.txt @@ -6,7 +6,7 @@ set(FRONTEND_INSTALL_INCLUDE "${NGRAPH_INSTALL_INCLUDE}/ngraph/frontend") add_subdirectory(frontend_manager) -if (NGRAPH_ONNX_IMPORT_ENABLE) +if (NGRAPH_ONNX_FRONTEND_ENABLE) add_subdirectory(onnx) endif() diff --git a/ngraph/frontend/onnx/CMakeLists.txt b/ngraph/frontend/onnx/CMakeLists.txt index 5bf43f04931..fe8f7c31233 100644 --- a/ngraph/frontend/onnx/CMakeLists.txt +++ b/ngraph/frontend/onnx/CMakeLists.txt @@ -3,7 +3,4 @@ # add_subdirectory(onnx_common) -add_subdirectory(onnx_import) -if (NGRAPH_ONNX_FRONTEND_ENABLE) - add_subdirectory(frontend) -endif() +add_subdirectory(frontend) diff --git a/ngraph/frontend/onnx/frontend/CMakeLists.txt b/ngraph/frontend/onnx/frontend/CMakeLists.txt index 2daed8156b3..f348a690ae3 100644 --- a/ngraph/frontend/onnx/frontend/CMakeLists.txt +++ b/ngraph/frontend/onnx/frontend/CMakeLists.txt @@ -2,11 +2,23 @@ # SPDX-License-Identifier: Apache-2.0 # -file(GLOB_RECURSE LIBRARY_SRC ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp) -file(GLOB_RECURSE LIBRARY_PUBLIC_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/include/*.hpp) - +set(ONNX_OPSET_VERSION 13 CACHE INTERNAL "Supported version of ONNX operator set") set(ONNX_FRONTEND_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/include) +file(GLOB_RECURSE LIBRARY_SRC ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp) +file(GLOB_RECURSE LIBRARY_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/src/*.hpp) +file(GLOB_RECURSE LIBRARY_PUBLIC_HEADERS ${ONNX_FRONTEND_INCLUDE_DIR}/*.hpp) + +# Remove disabled ops +list(REMOVE_ITEM LIBRARY_SRC + ${CMAKE_CURRENT_SOURCE_DIR}/src/op/conv_integer.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/op/quant_conv.cpp + ) +list(REMOVE_ITEM LIBRARY_HEADERS + ${CMAKE_CURRENT_SOURCE_DIR}/src/op/conv_integer.hpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/op/quant_conv.hpp + ) + # Create named folders for the sources within the .vcproj # Empty name lists them directly under the .vcproj @@ -20,29 +32,35 @@ add_library(ngraph::onnx_ngraph_frontend ALIAS onnx_ngraph_frontend) add_clang_format_target(onnx_ngraph_frontend_clang FOR_TARGETS onnx_ngraph_frontend) -ov_ncc_naming_style(FOR_TARGET onnx_ngraph_frontend - INCLUDE_DIRECTORY "${ONNX_FRONTEND_INCLUDE_DIR}" - ADDITIONAL_INCLUDE_DIRECTORIES - $) +# TODO: fix empty class name +#ov_ncc_naming_style(FOR_TARGET onnx_ngraph_frontend +# INCLUDE_DIRECTORY "${ONNX_FRONTEND_INCLUDE_DIR}" +# ADDITIONAL_INCLUDE_DIRECTORIES +# $) if(COMMAND ie_add_vs_version_file) ie_add_vs_version_file(NAME onnx_ngraph_frontend FILEDESCRIPTION "nGraph ONNX frontend library") endif() -target_link_libraries(onnx_ngraph_frontend PRIVATE onnx_importer frontend_manager) +target_link_libraries(onnx_ngraph_frontend PUBLIC ngraph PRIVATE frontend_manager ngraph::builder onnx_common inference_engine_transformations) target_include_directories(onnx_ngraph_frontend PUBLIC $ $) - target_include_directories(onnx_ngraph_frontend PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/src) +target_compile_definitions(onnx_ngraph_frontend PRIVATE ONNX_OPSET_VERSION=${ONNX_OPSET_VERSION}) +if(NGRAPH_USE_PROTOBUF_LITE) + target_compile_definitions(onnx_ngraph_frontend PRIVATE NGRAPH_USE_PROTOBUF_LITE) +endif() + install(TARGETS onnx_ngraph_frontend EXPORT ngraphTargets RUNTIME DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph ARCHIVE DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph LIBRARY DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph) install(DIRECTORY ${ONNX_FRONTEND_INCLUDE_DIR}/onnx_frontend + ${ONNX_FRONTEND_INCLUDE_DIR}/onnx_import DESTINATION ${FRONTEND_INSTALL_INCLUDE} COMPONENT ngraph_dev FILES_MATCHING PATTERN "*.hpp") diff --git a/ngraph/frontend/onnx/onnx_import/include/onnx_import/core/node.hpp b/ngraph/frontend/onnx/frontend/include/onnx_import/core/node.hpp similarity index 99% rename from ngraph/frontend/onnx/onnx_import/include/onnx_import/core/node.hpp rename to ngraph/frontend/onnx/frontend/include/onnx_import/core/node.hpp index 1fefd73bada..e943863442a 100644 --- a/ngraph/frontend/onnx/onnx_import/include/onnx_import/core/node.hpp +++ b/ngraph/frontend/onnx/frontend/include/onnx_import/core/node.hpp @@ -9,7 +9,7 @@ #include "ngraph/except.hpp" #include "ngraph/node.hpp" -#include "onnx_import/utils/onnx_importer_visibility.hpp" +#include "onnx_import/onnx_importer_visibility.hpp" namespace ONNX_NAMESPACE { diff --git a/ngraph/frontend/onnx/onnx_import/src/core/null_node.hpp b/ngraph/frontend/onnx/frontend/include/onnx_import/core/null_node.hpp similarity index 92% rename from ngraph/frontend/onnx/onnx_import/src/core/null_node.hpp rename to ngraph/frontend/onnx/frontend/include/onnx_import/core/null_node.hpp index dd75770488c..72671635080 100644 --- a/ngraph/frontend/onnx/onnx_import/src/core/null_node.hpp +++ b/ngraph/frontend/onnx/frontend/include/onnx_import/core/null_node.hpp @@ -7,7 +7,7 @@ #include #include "ngraph/node.hpp" -#include "onnx_import/utils/onnx_importer_visibility.hpp" +#include "onnx_import/onnx_importer_visibility.hpp" namespace ngraph { @@ -31,7 +31,7 @@ namespace ngraph /// /// More: /// https://github.com/onnx/onnx/blob/master/docs/IR.md#optional-inputs-and-outputs - class NullNode : public ngraph::Node + class ONNX_IMPORTER_API NullNode : public ngraph::Node { public: static constexpr NodeTypeInfo type_info{"NullNode", 0}; diff --git a/ngraph/frontend/onnx/onnx_import/include/onnx_import/core/operator_set.hpp b/ngraph/frontend/onnx/frontend/include/onnx_import/core/operator_set.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/include/onnx_import/core/operator_set.hpp rename to ngraph/frontend/onnx/frontend/include/onnx_import/core/operator_set.hpp diff --git a/ngraph/frontend/onnx/onnx_import/include/onnx_import/onnx.hpp b/ngraph/frontend/onnx/frontend/include/onnx_import/onnx.hpp similarity index 90% rename from ngraph/frontend/onnx/onnx_import/include/onnx_import/onnx.hpp rename to ngraph/frontend/onnx/frontend/include/onnx_import/onnx.hpp index 54ee83d6708..a8f3d1ea605 100644 --- a/ngraph/frontend/onnx/onnx_import/include/onnx_import/onnx.hpp +++ b/ngraph/frontend/onnx/frontend/include/onnx_import/onnx.hpp @@ -11,7 +11,7 @@ #include #include "ngraph/function.hpp" -#include "utils/onnx_importer_visibility.hpp" +#include "onnx_importer_visibility.hpp" /// \brief Top level nGraph namespace. namespace ngraph @@ -72,12 +72,6 @@ namespace ngraph /// \return An nGraph function that represents a single output from the created graph. ONNX_IMPORTER_API std::shared_ptr import_onnx_model(const std::string& file_path); - - /// \brief Converts a nGraph function (onnx model decoded to function with - /// ONNXFrameworkNode(s)) - /// to a complete function with actual compute operations - ONNX_IMPORTER_API - void convert_decoded_function(std::shared_ptr function); } // namespace onnx_import } // namespace ngraph diff --git a/ngraph/frontend/onnx/onnx_import/include/onnx_import/utils/onnx_importer_visibility.hpp b/ngraph/frontend/onnx/frontend/include/onnx_import/onnx_importer_visibility.hpp similarity index 75% rename from ngraph/frontend/onnx/onnx_import/include/onnx_import/utils/onnx_importer_visibility.hpp rename to ngraph/frontend/onnx/frontend/include/onnx_import/onnx_importer_visibility.hpp index 7187fc2bb08..da104a3cba2 100644 --- a/ngraph/frontend/onnx/onnx_import/include/onnx_import/utils/onnx_importer_visibility.hpp +++ b/ngraph/frontend/onnx/frontend/include/onnx_import/onnx_importer_visibility.hpp @@ -4,7 +4,7 @@ #include "ngraph/visibility.hpp" -#ifdef onnx_importer_EXPORTS // defined if we are building the ONNX_IMPORTER +#ifdef onnx_ngraph_frontend_EXPORTS #define ONNX_IMPORTER_API NGRAPH_HELPER_DLL_EXPORT #else #define ONNX_IMPORTER_API NGRAPH_HELPER_DLL_IMPORT diff --git a/ngraph/frontend/onnx/onnx_import/include/onnx_import/onnx_utils.hpp b/ngraph/frontend/onnx/frontend/include/onnx_import/onnx_utils.hpp similarity index 97% rename from ngraph/frontend/onnx/onnx_import/include/onnx_import/onnx_utils.hpp rename to ngraph/frontend/onnx/frontend/include/onnx_import/onnx_utils.hpp index c3998c3c24f..d6a45c064c6 100644 --- a/ngraph/frontend/onnx/onnx_import/include/onnx_import/onnx_utils.hpp +++ b/ngraph/frontend/onnx/frontend/include/onnx_import/onnx_utils.hpp @@ -8,7 +8,7 @@ #include #include "onnx_import/core/operator_set.hpp" -#include "utils/onnx_importer_visibility.hpp" +#include "onnx_importer_visibility.hpp" namespace ngraph { diff --git a/ngraph/frontend/onnx/onnx_import/src/core/attribute.cpp b/ngraph/frontend/onnx/frontend/src/core/attribute.cpp similarity index 96% rename from ngraph/frontend/onnx/onnx_import/src/core/attribute.cpp rename to ngraph/frontend/onnx/frontend/src/core/attribute.cpp index 204959b2fac..e273e3a1b7d 100644 --- a/ngraph/frontend/onnx/onnx_import/src/core/attribute.cpp +++ b/ngraph/frontend/onnx/frontend/src/core/attribute.cpp @@ -4,8 +4,8 @@ #include "core/attribute.hpp" #include "core/graph.hpp" +#include "core/model.hpp" #include "ngraph/log.hpp" -#include "onnx_import/core/model.hpp" namespace ngraph { diff --git a/ngraph/frontend/onnx/onnx_import/src/core/attribute.hpp b/ngraph/frontend/onnx/frontend/src/core/attribute.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/core/attribute.hpp rename to ngraph/frontend/onnx/frontend/src/core/attribute.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/core/graph.cpp b/ngraph/frontend/onnx/frontend/src/core/graph.cpp similarity index 99% rename from ngraph/frontend/onnx/onnx_import/src/core/graph.cpp rename to ngraph/frontend/onnx/frontend/src/core/graph.cpp index c844e89ede1..6dbc1162bb7 100644 --- a/ngraph/frontend/onnx/onnx_import/src/core/graph.cpp +++ b/ngraph/frontend/onnx/frontend/src/core/graph.cpp @@ -8,7 +8,6 @@ #include #include "core/graph.hpp" -#include "core/null_node.hpp" #include "core/value_info.hpp" #include "default_opset.hpp" #include "exceptions.hpp" @@ -17,6 +16,7 @@ #include "ngraph/provenance.hpp" #include "onnx_framework_node.hpp" #include "onnx_import/core/node.hpp" +#include "onnx_import/core/null_node.hpp" #include "utils/common.hpp" #include "utils/provenance_tag.hpp" diff --git a/ngraph/frontend/onnx/onnx_import/src/core/graph.hpp b/ngraph/frontend/onnx/frontend/src/core/graph.hpp similarity index 99% rename from ngraph/frontend/onnx/onnx_import/src/core/graph.hpp rename to ngraph/frontend/onnx/frontend/src/core/graph.hpp index fea67c3e146..e676a27d938 100644 --- a/ngraph/frontend/onnx/onnx_import/src/core/graph.hpp +++ b/ngraph/frontend/onnx/frontend/src/core/graph.hpp @@ -10,8 +10,8 @@ #include #include "core/graph_cache.hpp" +#include "core/model.hpp" #include "ngraph/op/parameter.hpp" -#include "onnx_import/core/model.hpp" #include "onnx_import/core/operator_set.hpp" namespace ngraph diff --git a/ngraph/frontend/onnx/onnx_import/src/core/graph_cache.cpp b/ngraph/frontend/onnx/frontend/src/core/graph_cache.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/core/graph_cache.cpp rename to ngraph/frontend/onnx/frontend/src/core/graph_cache.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/core/graph_cache.hpp b/ngraph/frontend/onnx/frontend/src/core/graph_cache.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/core/graph_cache.hpp rename to ngraph/frontend/onnx/frontend/src/core/graph_cache.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/core/model.cpp b/ngraph/frontend/onnx/frontend/src/core/model.cpp similarity index 99% rename from ngraph/frontend/onnx/onnx_import/src/core/model.cpp rename to ngraph/frontend/onnx/frontend/src/core/model.cpp index c7c0993eda1..736c5f7da3e 100644 --- a/ngraph/frontend/onnx/onnx_import/src/core/model.cpp +++ b/ngraph/frontend/onnx/frontend/src/core/model.cpp @@ -4,9 +4,9 @@ #include +#include "core/model.hpp" #include "ngraph/log.hpp" #include "onnx_framework_node.hpp" -#include "onnx_import/core/model.hpp" #include "ops_bridge.hpp" namespace ngraph diff --git a/ngraph/frontend/onnx/onnx_import/include/onnx_import/core/model.hpp b/ngraph/frontend/onnx/frontend/src/core/model.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/include/onnx_import/core/model.hpp rename to ngraph/frontend/onnx/frontend/src/core/model.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/core/node.cpp b/ngraph/frontend/onnx/frontend/src/core/node.cpp similarity index 99% rename from ngraph/frontend/onnx/onnx_import/src/core/node.cpp rename to ngraph/frontend/onnx/frontend/src/core/node.cpp index fbdcd8d2164..111d7b66e67 100644 --- a/ngraph/frontend/onnx/onnx_import/src/core/node.cpp +++ b/ngraph/frontend/onnx/frontend/src/core/node.cpp @@ -6,9 +6,9 @@ #include "core/attribute.hpp" #include "core/graph.hpp" -#include "core/null_node.hpp" #include "core/tensor.hpp" #include "onnx_import/core/node.hpp" +#include "onnx_import/core/null_node.hpp" namespace ngraph { diff --git a/ngraph/frontend/onnx/onnx_import/src/core/null_node.cpp b/ngraph/frontend/onnx/frontend/src/core/null_node.cpp similarity index 95% rename from ngraph/frontend/onnx/onnx_import/src/core/null_node.cpp rename to ngraph/frontend/onnx/frontend/src/core/null_node.cpp index eaaa1307b6c..45d0db6c160 100644 --- a/ngraph/frontend/onnx/onnx_import/src/core/null_node.cpp +++ b/ngraph/frontend/onnx/frontend/src/core/null_node.cpp @@ -5,7 +5,7 @@ #include #include "ngraph/node.hpp" -#include "null_node.hpp" +#include "onnx_import/core/null_node.hpp" namespace ngraph { diff --git a/ngraph/frontend/onnx/onnx_import/src/core/sparse_tensor.hpp b/ngraph/frontend/onnx/frontend/src/core/sparse_tensor.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/core/sparse_tensor.hpp rename to ngraph/frontend/onnx/frontend/src/core/sparse_tensor.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/core/tensor.hpp b/ngraph/frontend/onnx/frontend/src/core/tensor.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/core/tensor.hpp rename to ngraph/frontend/onnx/frontend/src/core/tensor.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/core/transform.cpp b/ngraph/frontend/onnx/frontend/src/core/transform.cpp similarity index 99% rename from ngraph/frontend/onnx/onnx_import/src/core/transform.cpp rename to ngraph/frontend/onnx/frontend/src/core/transform.cpp index 30ce7d78b83..119a602ef30 100644 --- a/ngraph/frontend/onnx/onnx_import/src/core/transform.cpp +++ b/ngraph/frontend/onnx/frontend/src/core/transform.cpp @@ -5,8 +5,8 @@ #include #include +#include "core/model.hpp" #include "core/transform.hpp" -#include "onnx_import/core/model.hpp" #include "ngraph/file_util.hpp" #include "ops_bridge.hpp" diff --git a/ngraph/frontend/onnx/onnx_import/src/core/transform.hpp b/ngraph/frontend/onnx/frontend/src/core/transform.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/core/transform.hpp rename to ngraph/frontend/onnx/frontend/src/core/transform.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/core/value_info.hpp b/ngraph/frontend/onnx/frontend/src/core/value_info.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/core/value_info.hpp rename to ngraph/frontend/onnx/frontend/src/core/value_info.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/default_opset.hpp b/ngraph/frontend/onnx/frontend/src/default_opset.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/default_opset.hpp rename to ngraph/frontend/onnx/frontend/src/default_opset.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/detail/subgraph_extraction.cpp b/ngraph/frontend/onnx/frontend/src/detail/subgraph_extraction.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/detail/subgraph_extraction.cpp rename to ngraph/frontend/onnx/frontend/src/detail/subgraph_extraction.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/detail/subgraph_extraction.hpp b/ngraph/frontend/onnx/frontend/src/detail/subgraph_extraction.hpp similarity index 99% rename from ngraph/frontend/onnx/onnx_import/src/detail/subgraph_extraction.hpp rename to ngraph/frontend/onnx/frontend/src/detail/subgraph_extraction.hpp index 90763194847..0f2099c50b5 100644 --- a/ngraph/frontend/onnx/onnx_import/src/detail/subgraph_extraction.hpp +++ b/ngraph/frontend/onnx/frontend/src/detail/subgraph_extraction.hpp @@ -10,7 +10,7 @@ #include #include -#include "onnx_editor/editor_types.hpp" +#include "editor_types.hpp" namespace ONNX_NAMESPACE { diff --git a/ngraph/frontend/onnx/onnx_import/src/edge_mapper.cpp b/ngraph/frontend/onnx/frontend/src/edge_mapper.cpp similarity index 99% rename from ngraph/frontend/onnx/onnx_import/src/edge_mapper.cpp rename to ngraph/frontend/onnx/frontend/src/edge_mapper.cpp index c42316034cd..df8f909621e 100644 --- a/ngraph/frontend/onnx/onnx_import/src/edge_mapper.cpp +++ b/ngraph/frontend/onnx/frontend/src/edge_mapper.cpp @@ -5,9 +5,9 @@ #include #include +#include "edge_mapper.hpp" #include "ngraph/check.hpp" #include "ngraph/except.hpp" -#include "onnx_editor/edge_mapper.hpp" using namespace ngraph; using namespace ngraph::onnx_editor; diff --git a/ngraph/frontend/onnx/onnx_import/include/onnx_editor/edge_mapper.hpp b/ngraph/frontend/onnx/frontend/src/edge_mapper.hpp similarity index 87% rename from ngraph/frontend/onnx/onnx_import/include/onnx_editor/edge_mapper.hpp rename to ngraph/frontend/onnx/frontend/src/edge_mapper.hpp index 9c5e2e45f42..8f844028a21 100644 --- a/ngraph/frontend/onnx/onnx_import/include/onnx_editor/edge_mapper.hpp +++ b/ngraph/frontend/onnx/frontend/src/edge_mapper.hpp @@ -8,7 +8,7 @@ #include #include -#include "onnx_editor/editor_types.hpp" +#include "editor_types.hpp" namespace ONNX_NAMESPACE { @@ -26,7 +26,7 @@ namespace ngraph class EdgeMapper { public: - ONNX_IMPORTER_API EdgeMapper() = default; + EdgeMapper() = default; /// \brief Creates an edge mapper based on a GraphProto object. /// @@ -51,8 +51,7 @@ namespace ngraph /// /// \param input An EditorInput helper structure created based on a input name /// or a input index. - ONNX_IMPORTER_API InputEdge find_input_edge(const EditorNode& node, - const EditorInput& input) const; + InputEdge find_input_edge(const EditorNode& node, const EditorInput& input) const; /// \brief Returns an OutputEdge based on a node (node name or output name) /// and an output (output name or output index). @@ -69,8 +68,7 @@ namespace ngraph /// /// \param output An EditorOutput helper structure created based on a output name /// or a output index. - ONNX_IMPORTER_API OutputEdge find_output_edge(const EditorNode& node, - const EditorOutput& output) const; + OutputEdge find_output_edge(const EditorNode& node, const EditorOutput& output) const; /// \brief Returns an OutputEdge based on a output name. /// @@ -78,7 +76,7 @@ namespace ngraph /// /// \param output_name A node output name. /// - ONNX_IMPORTER_API OutputEdge find_output_edge(const std::string& output_name) const; + OutputEdge find_output_edge(const std::string& output_name) const; /// \brief Returns a vector of InputEdges which consume an output of a node /// determined by provided output name. @@ -87,8 +85,7 @@ namespace ngraph /// /// \param output_name A node output name. /// - ONNX_IMPORTER_API std::vector - find_output_consumers(const std::string& output_name) const; + std::vector find_output_consumers(const std::string& output_name) const; /// \brief Returns true if a provided node is correct (exists in a graph) /// and is not ambiguous (identification of an ONNX node can be ambiguous @@ -97,7 +94,7 @@ namespace ngraph /// \param node An EditorNode helper structure created based on a node name /// or a node output name. /// - ONNX_IMPORTER_API bool is_correct_and_unambiguous_node(const EditorNode& node) const; + bool is_correct_and_unambiguous_node(const EditorNode& node) const; /// \brief Returns true if a provided tensor name is correct (exists in a graph). /// diff --git a/ngraph/frontend/onnx/onnx_import/src/editor.cpp b/ngraph/frontend/onnx/frontend/src/editor.cpp similarity index 99% rename from ngraph/frontend/onnx/onnx_import/src/editor.cpp rename to ngraph/frontend/onnx/frontend/src/editor.cpp index 81737b8a9ed..507a8c7ad3b 100644 --- a/ngraph/frontend/onnx/onnx_import/src/editor.cpp +++ b/ngraph/frontend/onnx/frontend/src/editor.cpp @@ -7,12 +7,12 @@ #include #include "detail/subgraph_extraction.hpp" +#include "edge_mapper.hpp" +#include "editor.hpp" #include "ngraph/log.hpp" #include "onnx_common/parser.hpp" #include "onnx_common/utils.hpp" -#include "onnx_editor/edge_mapper.hpp" -#include "onnx_editor/editor.hpp" -#include "onnx_import/utils/onnx_internal.hpp" +#include "utils/onnx_internal.hpp" using namespace ngraph; using namespace ngraph::onnx_editor; diff --git a/ngraph/frontend/onnx/onnx_import/include/onnx_editor/editor.hpp b/ngraph/frontend/onnx/frontend/src/editor.hpp similarity index 99% rename from ngraph/frontend/onnx/onnx_import/include/onnx_editor/editor.hpp rename to ngraph/frontend/onnx/frontend/src/editor.hpp index 8eda1e19ee8..50358332962 100644 --- a/ngraph/frontend/onnx/onnx_import/include/onnx_editor/editor.hpp +++ b/ngraph/frontend/onnx/frontend/src/editor.hpp @@ -8,12 +8,12 @@ #include #include +#include "editor_types.hpp" #include "ngraph/function.hpp" #include "ngraph/op/constant.hpp" #include "ngraph/partial_shape.hpp" #include "ngraph/type/element_type.hpp" -#include "onnx_editor/editor.hpp" -#include "onnx_editor/editor_types.hpp" +#include "onnx_import/onnx_importer_visibility.hpp" namespace ngraph { diff --git a/ngraph/frontend/onnx/onnx_import/include/onnx_editor/editor_types.hpp b/ngraph/frontend/onnx/frontend/src/editor_types.hpp similarity index 96% rename from ngraph/frontend/onnx/onnx_import/include/onnx_editor/editor_types.hpp rename to ngraph/frontend/onnx/frontend/src/editor_types.hpp index 9c2fccd2078..16e4828236c 100644 --- a/ngraph/frontend/onnx/onnx_import/include/onnx_editor/editor_types.hpp +++ b/ngraph/frontend/onnx/frontend/src/editor_types.hpp @@ -7,8 +7,6 @@ #include #include -#include "onnx_import/utils/onnx_importer_visibility.hpp" - namespace ngraph { enum class EdgeType @@ -87,7 +85,7 @@ namespace ngraph /// /// The optional argument "new_input_name" can be used to set a custom input name /// which can be created during cutting a graph. - struct ONNX_IMPORTER_API EditorInput + struct EditorInput { EditorInput() = delete; EditorInput(std::string input_name, std::string new_input_name = "") @@ -112,7 +110,7 @@ namespace ngraph /// ----(in_A)----> | test_node | /// +-----------+ ---(out2)---> /// You can indicate out2 as EditorOutput("out2") or EditorOutput(1) - struct ONNX_IMPORTER_API EditorOutput + struct EditorOutput { EditorOutput() = delete; EditorOutput(std::string output_name) @@ -137,7 +135,7 @@ namespace ngraph /// You can indicate test_node by name as EditorNode("test_node") /// or by assigned output as EditorNode(EditorOutput("out1")) /// or EditorNode(EditorOutput("out2")) - struct ONNX_IMPORTER_API EditorNode + struct EditorNode { EditorNode(std::string node_name) : m_node_name{std::move(node_name)} diff --git a/ngraph/frontend/onnx/onnx_import/src/exceptions.cpp b/ngraph/frontend/onnx/frontend/src/exceptions.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/exceptions.cpp rename to ngraph/frontend/onnx/frontend/src/exceptions.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/exceptions.hpp b/ngraph/frontend/onnx/frontend/src/exceptions.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/exceptions.hpp rename to ngraph/frontend/onnx/frontend/src/exceptions.hpp diff --git a/ngraph/frontend/onnx/frontend/src/frontend.cpp b/ngraph/frontend/onnx/frontend/src/frontend.cpp index cad2fa337ce..96d2dde5d33 100644 --- a/ngraph/frontend/onnx/frontend/src/frontend.cpp +++ b/ngraph/frontend/onnx/frontend/src/frontend.cpp @@ -7,6 +7,7 @@ #include #include #include +#include using namespace ngraph; using namespace ngraph::frontend; @@ -19,7 +20,7 @@ extern "C" ONNX_FRONTEND_API FrontEndVersion GetAPIVersion() extern "C" ONNX_FRONTEND_API void* GetFrontEndData() { FrontEndPluginInfo* res = new FrontEndPluginInfo(); - res->m_name = "onnx"; + res->m_name = "onnx_experimental"; res->m_creator = []() { return std::make_shared(); }; return res; } @@ -44,7 +45,7 @@ std::shared_ptr FrontEndONNX::convert(InputModel::Ptr model) c void FrontEndONNX::convert(std::shared_ptr partially_converted) const { - onnx_import::convert_decoded_function(partially_converted); + onnx_import::detail::convert_decoded_function(partially_converted); } std::shared_ptr FrontEndONNX::decode(InputModel::Ptr model) const diff --git a/ngraph/frontend/onnx/frontend/src/input_model.hpp b/ngraph/frontend/onnx/frontend/src/input_model.hpp index 47d632e657a..1447db19438 100644 --- a/ngraph/frontend/onnx/frontend/src/input_model.hpp +++ b/ngraph/frontend/onnx/frontend/src/input_model.hpp @@ -4,8 +4,8 @@ #pragma once +#include #include -#include namespace ngraph { diff --git a/ngraph/frontend/onnx/onnx_import/src/onnx.cpp b/ngraph/frontend/onnx/frontend/src/onnx.cpp similarity index 97% rename from ngraph/frontend/onnx/onnx_import/src/onnx.cpp rename to ngraph/frontend/onnx/frontend/src/onnx.cpp index 35d12346c04..7fc75a75168 100644 --- a/ngraph/frontend/onnx/onnx_import/src/onnx.cpp +++ b/ngraph/frontend/onnx/frontend/src/onnx.cpp @@ -9,8 +9,8 @@ #include "ngraph/except.hpp" #include "onnx_common/parser.hpp" #include "onnx_import/onnx.hpp" -#include "onnx_import/utils/onnx_internal.hpp" #include "ops_bridge.hpp" +#include "utils/onnx_internal.hpp" namespace ngraph { diff --git a/ngraph/frontend/onnx/onnx_import/src/onnx_framework_node.cpp b/ngraph/frontend/onnx/frontend/src/onnx_framework_node.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/onnx_framework_node.cpp rename to ngraph/frontend/onnx/frontend/src/onnx_framework_node.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/onnx_framework_node.hpp b/ngraph/frontend/onnx/frontend/src/onnx_framework_node.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/onnx_framework_node.hpp rename to ngraph/frontend/onnx/frontend/src/onnx_framework_node.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/onnx_utils.cpp b/ngraph/frontend/onnx/frontend/src/onnx_utils.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/onnx_utils.cpp rename to ngraph/frontend/onnx/frontend/src/onnx_utils.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/abs.hpp b/ngraph/frontend/onnx/frontend/src/op/abs.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/abs.hpp rename to ngraph/frontend/onnx/frontend/src/op/abs.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/acos.hpp b/ngraph/frontend/onnx/frontend/src/op/acos.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/acos.hpp rename to ngraph/frontend/onnx/frontend/src/op/acos.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/acosh.hpp b/ngraph/frontend/onnx/frontend/src/op/acosh.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/acosh.hpp rename to ngraph/frontend/onnx/frontend/src/op/acosh.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/add.cpp b/ngraph/frontend/onnx/frontend/src/op/add.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/add.cpp rename to ngraph/frontend/onnx/frontend/src/op/add.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/add.hpp b/ngraph/frontend/onnx/frontend/src/op/add.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/add.hpp rename to ngraph/frontend/onnx/frontend/src/op/add.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/affine.cpp b/ngraph/frontend/onnx/frontend/src/op/affine.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/affine.cpp rename to ngraph/frontend/onnx/frontend/src/op/affine.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/affine.hpp b/ngraph/frontend/onnx/frontend/src/op/affine.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/affine.hpp rename to ngraph/frontend/onnx/frontend/src/op/affine.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/and.hpp b/ngraph/frontend/onnx/frontend/src/op/and.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/and.hpp rename to ngraph/frontend/onnx/frontend/src/op/and.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/argmax.cpp b/ngraph/frontend/onnx/frontend/src/op/argmax.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/argmax.cpp rename to ngraph/frontend/onnx/frontend/src/op/argmax.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/argmax.hpp b/ngraph/frontend/onnx/frontend/src/op/argmax.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/argmax.hpp rename to ngraph/frontend/onnx/frontend/src/op/argmax.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/argmin.cpp b/ngraph/frontend/onnx/frontend/src/op/argmin.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/argmin.cpp rename to ngraph/frontend/onnx/frontend/src/op/argmin.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/argmin.hpp b/ngraph/frontend/onnx/frontend/src/op/argmin.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/argmin.hpp rename to ngraph/frontend/onnx/frontend/src/op/argmin.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/asin.hpp b/ngraph/frontend/onnx/frontend/src/op/asin.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/asin.hpp rename to ngraph/frontend/onnx/frontend/src/op/asin.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/asinh.hpp b/ngraph/frontend/onnx/frontend/src/op/asinh.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/asinh.hpp rename to ngraph/frontend/onnx/frontend/src/op/asinh.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/atan.hpp b/ngraph/frontend/onnx/frontend/src/op/atan.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/atan.hpp rename to ngraph/frontend/onnx/frontend/src/op/atan.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/atanh.hpp b/ngraph/frontend/onnx/frontend/src/op/atanh.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/atanh.hpp rename to ngraph/frontend/onnx/frontend/src/op/atanh.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/average_pool.cpp b/ngraph/frontend/onnx/frontend/src/op/average_pool.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/average_pool.cpp rename to ngraph/frontend/onnx/frontend/src/op/average_pool.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/average_pool.hpp b/ngraph/frontend/onnx/frontend/src/op/average_pool.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/average_pool.hpp rename to ngraph/frontend/onnx/frontend/src/op/average_pool.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/batch_norm.cpp b/ngraph/frontend/onnx/frontend/src/op/batch_norm.cpp similarity index 98% rename from ngraph/frontend/onnx/onnx_import/src/op/batch_norm.cpp rename to ngraph/frontend/onnx/frontend/src/op/batch_norm.cpp index 81cfb910c74..84f5500310a 100644 --- a/ngraph/frontend/onnx/onnx_import/src/op/batch_norm.cpp +++ b/ngraph/frontend/onnx/frontend/src/op/batch_norm.cpp @@ -5,9 +5,9 @@ #include #include -#include "core/null_node.hpp" #include "default_opset.hpp" #include "exceptions.hpp" +#include "onnx_import/core/null_node.hpp" #include "op/batch_norm.hpp" namespace ngraph diff --git a/ngraph/frontend/onnx/onnx_import/src/op/batch_norm.hpp b/ngraph/frontend/onnx/frontend/src/op/batch_norm.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/batch_norm.hpp rename to ngraph/frontend/onnx/frontend/src/op/batch_norm.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/bitshift.cpp b/ngraph/frontend/onnx/frontend/src/op/bitshift.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/bitshift.cpp rename to ngraph/frontend/onnx/frontend/src/op/bitshift.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/bitshift.hpp b/ngraph/frontend/onnx/frontend/src/op/bitshift.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/bitshift.hpp rename to ngraph/frontend/onnx/frontend/src/op/bitshift.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/cast.cpp b/ngraph/frontend/onnx/frontend/src/op/cast.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/cast.cpp rename to ngraph/frontend/onnx/frontend/src/op/cast.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/cast.hpp b/ngraph/frontend/onnx/frontend/src/op/cast.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/cast.hpp rename to ngraph/frontend/onnx/frontend/src/op/cast.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/ceil.hpp b/ngraph/frontend/onnx/frontend/src/op/ceil.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/ceil.hpp rename to ngraph/frontend/onnx/frontend/src/op/ceil.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/clip.cpp b/ngraph/frontend/onnx/frontend/src/op/clip.cpp similarity index 98% rename from ngraph/frontend/onnx/onnx_import/src/op/clip.cpp rename to ngraph/frontend/onnx/frontend/src/op/clip.cpp index 819af6fc579..2f8c6cdea1d 100644 --- a/ngraph/frontend/onnx/onnx_import/src/op/clip.cpp +++ b/ngraph/frontend/onnx/frontend/src/op/clip.cpp @@ -5,9 +5,9 @@ #include #include -#include "core/null_node.hpp" #include "default_opset.hpp" #include "ngraph/builder/make_constant.hpp" +#include "onnx_import/core/null_node.hpp" #include "op/clip.hpp" namespace ngraph diff --git a/ngraph/frontend/onnx/onnx_import/src/op/clip.hpp b/ngraph/frontend/onnx/frontend/src/op/clip.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/clip.hpp rename to ngraph/frontend/onnx/frontend/src/op/clip.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/compress.cpp b/ngraph/frontend/onnx/frontend/src/op/compress.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/compress.cpp rename to ngraph/frontend/onnx/frontend/src/op/compress.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/compress.hpp b/ngraph/frontend/onnx/frontend/src/op/compress.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/compress.hpp rename to ngraph/frontend/onnx/frontend/src/op/compress.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/concat.cpp b/ngraph/frontend/onnx/frontend/src/op/concat.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/concat.cpp rename to ngraph/frontend/onnx/frontend/src/op/concat.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/concat.hpp b/ngraph/frontend/onnx/frontend/src/op/concat.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/concat.hpp rename to ngraph/frontend/onnx/frontend/src/op/concat.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/constant.cpp b/ngraph/frontend/onnx/frontend/src/op/constant.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/constant.cpp rename to ngraph/frontend/onnx/frontend/src/op/constant.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/constant.hpp b/ngraph/frontend/onnx/frontend/src/op/constant.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/constant.hpp rename to ngraph/frontend/onnx/frontend/src/op/constant.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/constant_fill.cpp b/ngraph/frontend/onnx/frontend/src/op/constant_fill.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/constant_fill.cpp rename to ngraph/frontend/onnx/frontend/src/op/constant_fill.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/constant_fill.hpp b/ngraph/frontend/onnx/frontend/src/op/constant_fill.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/constant_fill.hpp rename to ngraph/frontend/onnx/frontend/src/op/constant_fill.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/constant_of_shape.cpp b/ngraph/frontend/onnx/frontend/src/op/constant_of_shape.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/constant_of_shape.cpp rename to ngraph/frontend/onnx/frontend/src/op/constant_of_shape.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/constant_of_shape.hpp b/ngraph/frontend/onnx/frontend/src/op/constant_of_shape.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/constant_of_shape.hpp rename to ngraph/frontend/onnx/frontend/src/op/constant_of_shape.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/conv.cpp b/ngraph/frontend/onnx/frontend/src/op/conv.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/conv.cpp rename to ngraph/frontend/onnx/frontend/src/op/conv.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/conv.hpp b/ngraph/frontend/onnx/frontend/src/op/conv.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/conv.hpp rename to ngraph/frontend/onnx/frontend/src/op/conv.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/conv_integer.cpp b/ngraph/frontend/onnx/frontend/src/op/conv_integer.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/conv_integer.cpp rename to ngraph/frontend/onnx/frontend/src/op/conv_integer.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/conv_integer.hpp b/ngraph/frontend/onnx/frontend/src/op/conv_integer.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/conv_integer.hpp rename to ngraph/frontend/onnx/frontend/src/op/conv_integer.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/conv_transpose.cpp b/ngraph/frontend/onnx/frontend/src/op/conv_transpose.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/conv_transpose.cpp rename to ngraph/frontend/onnx/frontend/src/op/conv_transpose.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/conv_transpose.hpp b/ngraph/frontend/onnx/frontend/src/op/conv_transpose.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/conv_transpose.hpp rename to ngraph/frontend/onnx/frontend/src/op/conv_transpose.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/cos.cpp b/ngraph/frontend/onnx/frontend/src/op/cos.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/cos.cpp rename to ngraph/frontend/onnx/frontend/src/op/cos.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/cos.hpp b/ngraph/frontend/onnx/frontend/src/op/cos.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/cos.hpp rename to ngraph/frontend/onnx/frontend/src/op/cos.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/cosh.cpp b/ngraph/frontend/onnx/frontend/src/op/cosh.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/cosh.cpp rename to ngraph/frontend/onnx/frontend/src/op/cosh.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/cosh.hpp b/ngraph/frontend/onnx/frontend/src/op/cosh.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/cosh.hpp rename to ngraph/frontend/onnx/frontend/src/op/cosh.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/cum_sum.cpp b/ngraph/frontend/onnx/frontend/src/op/cum_sum.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/cum_sum.cpp rename to ngraph/frontend/onnx/frontend/src/op/cum_sum.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/cum_sum.hpp b/ngraph/frontend/onnx/frontend/src/op/cum_sum.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/cum_sum.hpp rename to ngraph/frontend/onnx/frontend/src/op/cum_sum.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/depth_to_space.cpp b/ngraph/frontend/onnx/frontend/src/op/depth_to_space.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/depth_to_space.cpp rename to ngraph/frontend/onnx/frontend/src/op/depth_to_space.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/depth_to_space.hpp b/ngraph/frontend/onnx/frontend/src/op/depth_to_space.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/depth_to_space.hpp rename to ngraph/frontend/onnx/frontend/src/op/depth_to_space.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/dequantize_linear.cpp b/ngraph/frontend/onnx/frontend/src/op/dequantize_linear.cpp similarity index 99% rename from ngraph/frontend/onnx/onnx_import/src/op/dequantize_linear.cpp rename to ngraph/frontend/onnx/frontend/src/op/dequantize_linear.cpp index 0d4826e3edd..70864849bfb 100644 --- a/ngraph/frontend/onnx/onnx_import/src/op/dequantize_linear.cpp +++ b/ngraph/frontend/onnx/frontend/src/op/dequantize_linear.cpp @@ -5,13 +5,13 @@ #include #include -#include "core/null_node.hpp" #include "default_opset.hpp" #include "ngraph/axis_set.hpp" #include "ngraph/builder/make_constant.hpp" #include "ngraph/op/convert.hpp" #include "ngraph/shape.hpp" #include "ngraph/validation_util.hpp" +#include "onnx_import/core/null_node.hpp" #include "op/dequantize_linear.hpp" #include "utils/common.hpp" diff --git a/ngraph/frontend/onnx/onnx_import/src/op/dequantize_linear.hpp b/ngraph/frontend/onnx/frontend/src/op/dequantize_linear.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/dequantize_linear.hpp rename to ngraph/frontend/onnx/frontend/src/op/dequantize_linear.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/div.hpp b/ngraph/frontend/onnx/frontend/src/op/div.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/div.hpp rename to ngraph/frontend/onnx/frontend/src/op/div.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/dropout.cpp b/ngraph/frontend/onnx/frontend/src/op/dropout.cpp similarity index 98% rename from ngraph/frontend/onnx/onnx_import/src/op/dropout.cpp rename to ngraph/frontend/onnx/frontend/src/op/dropout.cpp index 6666e67c10f..b94bc7fa3f3 100644 --- a/ngraph/frontend/onnx/onnx_import/src/op/dropout.cpp +++ b/ngraph/frontend/onnx/frontend/src/op/dropout.cpp @@ -4,11 +4,11 @@ #include -#include "core/null_node.hpp" #include "default_opset.hpp" #include "exceptions.hpp" #include "ngraph/log.hpp" #include "ngraph/node.hpp" +#include "onnx_import/core/null_node.hpp" #include "op/dropout.hpp" namespace ngraph diff --git a/ngraph/frontend/onnx/onnx_import/src/op/dropout.hpp b/ngraph/frontend/onnx/frontend/src/op/dropout.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/dropout.hpp rename to ngraph/frontend/onnx/frontend/src/op/dropout.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/einsum.cpp b/ngraph/frontend/onnx/frontend/src/op/einsum.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/einsum.cpp rename to ngraph/frontend/onnx/frontend/src/op/einsum.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/einsum.hpp b/ngraph/frontend/onnx/frontend/src/op/einsum.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/einsum.hpp rename to ngraph/frontend/onnx/frontend/src/op/einsum.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/elu.cpp b/ngraph/frontend/onnx/frontend/src/op/elu.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/elu.cpp rename to ngraph/frontend/onnx/frontend/src/op/elu.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/elu.hpp b/ngraph/frontend/onnx/frontend/src/op/elu.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/elu.hpp rename to ngraph/frontend/onnx/frontend/src/op/elu.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/equal.hpp b/ngraph/frontend/onnx/frontend/src/op/equal.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/equal.hpp rename to ngraph/frontend/onnx/frontend/src/op/equal.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/erf.hpp b/ngraph/frontend/onnx/frontend/src/op/erf.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/erf.hpp rename to ngraph/frontend/onnx/frontend/src/op/erf.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/exp.hpp b/ngraph/frontend/onnx/frontend/src/op/exp.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/exp.hpp rename to ngraph/frontend/onnx/frontend/src/op/exp.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/expand.cpp b/ngraph/frontend/onnx/frontend/src/op/expand.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/expand.cpp rename to ngraph/frontend/onnx/frontend/src/op/expand.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/expand.hpp b/ngraph/frontend/onnx/frontend/src/op/expand.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/expand.hpp rename to ngraph/frontend/onnx/frontend/src/op/expand.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/eye_like.cpp b/ngraph/frontend/onnx/frontend/src/op/eye_like.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/eye_like.cpp rename to ngraph/frontend/onnx/frontend/src/op/eye_like.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/eye_like.hpp b/ngraph/frontend/onnx/frontend/src/op/eye_like.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/eye_like.hpp rename to ngraph/frontend/onnx/frontend/src/op/eye_like.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/flatten.cpp b/ngraph/frontend/onnx/frontend/src/op/flatten.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/flatten.cpp rename to ngraph/frontend/onnx/frontend/src/op/flatten.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/flatten.hpp b/ngraph/frontend/onnx/frontend/src/op/flatten.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/flatten.hpp rename to ngraph/frontend/onnx/frontend/src/op/flatten.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/floor.hpp b/ngraph/frontend/onnx/frontend/src/op/floor.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/floor.hpp rename to ngraph/frontend/onnx/frontend/src/op/floor.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/gather.hpp b/ngraph/frontend/onnx/frontend/src/op/gather.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/gather.hpp rename to ngraph/frontend/onnx/frontend/src/op/gather.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/gather_elements.hpp b/ngraph/frontend/onnx/frontend/src/op/gather_elements.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/gather_elements.hpp rename to ngraph/frontend/onnx/frontend/src/op/gather_elements.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/gather_nd.cpp b/ngraph/frontend/onnx/frontend/src/op/gather_nd.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/gather_nd.cpp rename to ngraph/frontend/onnx/frontend/src/op/gather_nd.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/gather_nd.hpp b/ngraph/frontend/onnx/frontend/src/op/gather_nd.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/gather_nd.hpp rename to ngraph/frontend/onnx/frontend/src/op/gather_nd.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/gemm.cpp b/ngraph/frontend/onnx/frontend/src/op/gemm.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/gemm.cpp rename to ngraph/frontend/onnx/frontend/src/op/gemm.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/gemm.hpp b/ngraph/frontend/onnx/frontend/src/op/gemm.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/gemm.hpp rename to ngraph/frontend/onnx/frontend/src/op/gemm.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/global_average_pool.cpp b/ngraph/frontend/onnx/frontend/src/op/global_average_pool.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/global_average_pool.cpp rename to ngraph/frontend/onnx/frontend/src/op/global_average_pool.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/global_average_pool.hpp b/ngraph/frontend/onnx/frontend/src/op/global_average_pool.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/global_average_pool.hpp rename to ngraph/frontend/onnx/frontend/src/op/global_average_pool.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/global_max_pool.cpp b/ngraph/frontend/onnx/frontend/src/op/global_max_pool.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/global_max_pool.cpp rename to ngraph/frontend/onnx/frontend/src/op/global_max_pool.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/global_max_pool.hpp b/ngraph/frontend/onnx/frontend/src/op/global_max_pool.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/global_max_pool.hpp rename to ngraph/frontend/onnx/frontend/src/op/global_max_pool.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/greater.hpp b/ngraph/frontend/onnx/frontend/src/op/greater.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/greater.hpp rename to ngraph/frontend/onnx/frontend/src/op/greater.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/gru.cpp b/ngraph/frontend/onnx/frontend/src/op/gru.cpp similarity index 99% rename from ngraph/frontend/onnx/onnx_import/src/op/gru.cpp rename to ngraph/frontend/onnx/frontend/src/op/gru.cpp index 397095e5be0..cf485256133 100644 --- a/ngraph/frontend/onnx/onnx_import/src/op/gru.cpp +++ b/ngraph/frontend/onnx/frontend/src/op/gru.cpp @@ -5,11 +5,11 @@ #include #include -#include "core/null_node.hpp" #include "default_opset.hpp" #include "ngraph/builder/reshape.hpp" #include "ngraph/builder/split.hpp" #include "ngraph/shape.hpp" +#include "onnx_import/core/null_node.hpp" #include "op/gru.hpp" #include "utils/recurrent.hpp" diff --git a/ngraph/frontend/onnx/onnx_import/src/op/gru.hpp b/ngraph/frontend/onnx/frontend/src/op/gru.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/gru.hpp rename to ngraph/frontend/onnx/frontend/src/op/gru.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/hard_sigmoid.cpp b/ngraph/frontend/onnx/frontend/src/op/hard_sigmoid.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/hard_sigmoid.cpp rename to ngraph/frontend/onnx/frontend/src/op/hard_sigmoid.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/hard_sigmoid.hpp b/ngraph/frontend/onnx/frontend/src/op/hard_sigmoid.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/hard_sigmoid.hpp rename to ngraph/frontend/onnx/frontend/src/op/hard_sigmoid.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/hardmax.cpp b/ngraph/frontend/onnx/frontend/src/op/hardmax.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/hardmax.cpp rename to ngraph/frontend/onnx/frontend/src/op/hardmax.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/hardmax.hpp b/ngraph/frontend/onnx/frontend/src/op/hardmax.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/hardmax.hpp rename to ngraph/frontend/onnx/frontend/src/op/hardmax.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/identity.hpp b/ngraph/frontend/onnx/frontend/src/op/identity.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/identity.hpp rename to ngraph/frontend/onnx/frontend/src/op/identity.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/image_scaler.cpp b/ngraph/frontend/onnx/frontend/src/op/image_scaler.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/image_scaler.cpp rename to ngraph/frontend/onnx/frontend/src/op/image_scaler.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/image_scaler.hpp b/ngraph/frontend/onnx/frontend/src/op/image_scaler.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/image_scaler.hpp rename to ngraph/frontend/onnx/frontend/src/op/image_scaler.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/instance_norm.cpp b/ngraph/frontend/onnx/frontend/src/op/instance_norm.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/instance_norm.cpp rename to ngraph/frontend/onnx/frontend/src/op/instance_norm.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/instance_norm.hpp b/ngraph/frontend/onnx/frontend/src/op/instance_norm.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/instance_norm.hpp rename to ngraph/frontend/onnx/frontend/src/op/instance_norm.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/leaky_relu.cpp b/ngraph/frontend/onnx/frontend/src/op/leaky_relu.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/leaky_relu.cpp rename to ngraph/frontend/onnx/frontend/src/op/leaky_relu.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/leaky_relu.hpp b/ngraph/frontend/onnx/frontend/src/op/leaky_relu.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/leaky_relu.hpp rename to ngraph/frontend/onnx/frontend/src/op/leaky_relu.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/less.hpp b/ngraph/frontend/onnx/frontend/src/op/less.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/less.hpp rename to ngraph/frontend/onnx/frontend/src/op/less.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/log.cpp b/ngraph/frontend/onnx/frontend/src/op/log.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/log.cpp rename to ngraph/frontend/onnx/frontend/src/op/log.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/log.hpp b/ngraph/frontend/onnx/frontend/src/op/log.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/log.hpp rename to ngraph/frontend/onnx/frontend/src/op/log.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/log_softmax.cpp b/ngraph/frontend/onnx/frontend/src/op/log_softmax.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/log_softmax.cpp rename to ngraph/frontend/onnx/frontend/src/op/log_softmax.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/log_softmax.hpp b/ngraph/frontend/onnx/frontend/src/op/log_softmax.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/log_softmax.hpp rename to ngraph/frontend/onnx/frontend/src/op/log_softmax.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/loop.cpp b/ngraph/frontend/onnx/frontend/src/op/loop.cpp similarity index 99% rename from ngraph/frontend/onnx/onnx_import/src/op/loop.cpp rename to ngraph/frontend/onnx/frontend/src/op/loop.cpp index dbe4f68d8c9..05938b1c587 100644 --- a/ngraph/frontend/onnx/onnx_import/src/op/loop.cpp +++ b/ngraph/frontend/onnx/frontend/src/op/loop.cpp @@ -8,12 +8,12 @@ #include #include "core/graph.hpp" -#include "core/null_node.hpp" #include "default_opset.hpp" #include "exceptions.hpp" #include "ngraph/function.hpp" #include "ngraph/log.hpp" #include "ngraph/op/util/op_types.hpp" +#include "onnx_import/core/null_node.hpp" #include "utils/reshape.hpp" namespace ngraph diff --git a/ngraph/frontend/onnx/onnx_import/src/op/loop.hpp b/ngraph/frontend/onnx/frontend/src/op/loop.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/loop.hpp rename to ngraph/frontend/onnx/frontend/src/op/loop.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/lp_norm.cpp b/ngraph/frontend/onnx/frontend/src/op/lp_norm.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/lp_norm.cpp rename to ngraph/frontend/onnx/frontend/src/op/lp_norm.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/lp_norm.hpp b/ngraph/frontend/onnx/frontend/src/op/lp_norm.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/lp_norm.hpp rename to ngraph/frontend/onnx/frontend/src/op/lp_norm.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/lp_pool.cpp b/ngraph/frontend/onnx/frontend/src/op/lp_pool.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/lp_pool.cpp rename to ngraph/frontend/onnx/frontend/src/op/lp_pool.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/lp_pool.hpp b/ngraph/frontend/onnx/frontend/src/op/lp_pool.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/lp_pool.hpp rename to ngraph/frontend/onnx/frontend/src/op/lp_pool.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/lrn.cpp b/ngraph/frontend/onnx/frontend/src/op/lrn.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/lrn.cpp rename to ngraph/frontend/onnx/frontend/src/op/lrn.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/lrn.hpp b/ngraph/frontend/onnx/frontend/src/op/lrn.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/lrn.hpp rename to ngraph/frontend/onnx/frontend/src/op/lrn.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/lstm.cpp b/ngraph/frontend/onnx/frontend/src/op/lstm.cpp similarity index 99% rename from ngraph/frontend/onnx/onnx_import/src/op/lstm.cpp rename to ngraph/frontend/onnx/frontend/src/op/lstm.cpp index 8ac038e1221..be935de5ca6 100644 --- a/ngraph/frontend/onnx/onnx_import/src/op/lstm.cpp +++ b/ngraph/frontend/onnx/frontend/src/op/lstm.cpp @@ -9,7 +9,6 @@ #include #include -#include "core/null_node.hpp" #include "default_opset.hpp" #include "exceptions.hpp" #include "ngraph/builder/reshape.hpp" @@ -23,6 +22,7 @@ #include "ngraph/opsets/opset3.hpp" #include "ngraph/shape.hpp" #include "ngraph/type/element_type.hpp" +#include "onnx_import/core/null_node.hpp" #include "op/lstm.hpp" namespace ngraph diff --git a/ngraph/frontend/onnx/onnx_import/src/op/lstm.hpp b/ngraph/frontend/onnx/frontend/src/op/lstm.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/lstm.hpp rename to ngraph/frontend/onnx/frontend/src/op/lstm.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/matmul.hpp b/ngraph/frontend/onnx/frontend/src/op/matmul.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/matmul.hpp rename to ngraph/frontend/onnx/frontend/src/op/matmul.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/max.hpp b/ngraph/frontend/onnx/frontend/src/op/max.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/max.hpp rename to ngraph/frontend/onnx/frontend/src/op/max.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/max_pool.cpp b/ngraph/frontend/onnx/frontend/src/op/max_pool.cpp similarity index 93% rename from ngraph/frontend/onnx/onnx_import/src/op/max_pool.cpp rename to ngraph/frontend/onnx/frontend/src/op/max_pool.cpp index 2507af1459e..da107c48cc0 100644 --- a/ngraph/frontend/onnx/onnx_import/src/op/max_pool.cpp +++ b/ngraph/frontend/onnx/frontend/src/op/max_pool.cpp @@ -4,8 +4,8 @@ #include -#include "core/null_node.hpp" #include "ngraph/log.hpp" +#include "onnx_import/core/null_node.hpp" #include "op/max_pool.hpp" #include "utils/pooling_factory.hpp" diff --git a/ngraph/frontend/onnx/onnx_import/src/op/max_pool.hpp b/ngraph/frontend/onnx/frontend/src/op/max_pool.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/max_pool.hpp rename to ngraph/frontend/onnx/frontend/src/op/max_pool.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/mean.cpp b/ngraph/frontend/onnx/frontend/src/op/mean.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/mean.cpp rename to ngraph/frontend/onnx/frontend/src/op/mean.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/mean.hpp b/ngraph/frontend/onnx/frontend/src/op/mean.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/mean.hpp rename to ngraph/frontend/onnx/frontend/src/op/mean.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/mean_variance_normalization.cpp b/ngraph/frontend/onnx/frontend/src/op/mean_variance_normalization.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/mean_variance_normalization.cpp rename to ngraph/frontend/onnx/frontend/src/op/mean_variance_normalization.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/mean_variance_normalization.hpp b/ngraph/frontend/onnx/frontend/src/op/mean_variance_normalization.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/mean_variance_normalization.hpp rename to ngraph/frontend/onnx/frontend/src/op/mean_variance_normalization.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/min.hpp b/ngraph/frontend/onnx/frontend/src/op/min.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/min.hpp rename to ngraph/frontend/onnx/frontend/src/op/min.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/mod.cpp b/ngraph/frontend/onnx/frontend/src/op/mod.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/mod.cpp rename to ngraph/frontend/onnx/frontend/src/op/mod.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/mod.hpp b/ngraph/frontend/onnx/frontend/src/op/mod.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/mod.hpp rename to ngraph/frontend/onnx/frontend/src/op/mod.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/mul.hpp b/ngraph/frontend/onnx/frontend/src/op/mul.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/mul.hpp rename to ngraph/frontend/onnx/frontend/src/op/mul.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/neg.hpp b/ngraph/frontend/onnx/frontend/src/op/neg.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/neg.hpp rename to ngraph/frontend/onnx/frontend/src/op/neg.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/non_max_suppression.cpp b/ngraph/frontend/onnx/frontend/src/op/non_max_suppression.cpp similarity index 98% rename from ngraph/frontend/onnx/onnx_import/src/op/non_max_suppression.cpp rename to ngraph/frontend/onnx/frontend/src/op/non_max_suppression.cpp index 2c5da242582..ca96e0356c4 100644 --- a/ngraph/frontend/onnx/onnx_import/src/op/non_max_suppression.cpp +++ b/ngraph/frontend/onnx/frontend/src/op/non_max_suppression.cpp @@ -4,11 +4,11 @@ #include -#include "core/null_node.hpp" #include "default_opset.hpp" #include "exceptions.hpp" #include "ngraph/op/non_max_suppression.hpp" #include "ngraph/op/util/attr_types.hpp" +#include "onnx_import/core/null_node.hpp" #include "op/non_max_suppression.hpp" #include "utils/reshape.hpp" diff --git a/ngraph/frontend/onnx/onnx_import/src/op/non_max_suppression.hpp b/ngraph/frontend/onnx/frontend/src/op/non_max_suppression.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/non_max_suppression.hpp rename to ngraph/frontend/onnx/frontend/src/op/non_max_suppression.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/non_zero.cpp b/ngraph/frontend/onnx/frontend/src/op/non_zero.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/non_zero.cpp rename to ngraph/frontend/onnx/frontend/src/op/non_zero.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/non_zero.hpp b/ngraph/frontend/onnx/frontend/src/op/non_zero.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/non_zero.hpp rename to ngraph/frontend/onnx/frontend/src/op/non_zero.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/not.hpp b/ngraph/frontend/onnx/frontend/src/op/not.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/not.hpp rename to ngraph/frontend/onnx/frontend/src/op/not.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/onehot.cpp b/ngraph/frontend/onnx/frontend/src/op/onehot.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/onehot.cpp rename to ngraph/frontend/onnx/frontend/src/op/onehot.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/onehot.hpp b/ngraph/frontend/onnx/frontend/src/op/onehot.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/onehot.hpp rename to ngraph/frontend/onnx/frontend/src/op/onehot.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/or.hpp b/ngraph/frontend/onnx/frontend/src/op/or.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/or.hpp rename to ngraph/frontend/onnx/frontend/src/op/or.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/deformable_conv_2d.cpp b/ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/deformable_conv_2d.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/deformable_conv_2d.cpp rename to ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/deformable_conv_2d.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/deformable_conv_2d.hpp b/ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/deformable_conv_2d.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/deformable_conv_2d.hpp rename to ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/deformable_conv_2d.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/detection_output.cpp b/ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/detection_output.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/detection_output.cpp rename to ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/detection_output.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/detection_output.hpp b/ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/detection_output.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/detection_output.hpp rename to ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/detection_output.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/experimental_detectron/detection_output.cpp b/ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/detection_output.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/experimental_detectron/detection_output.cpp rename to ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/detection_output.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/experimental_detectron/detection_output.hpp b/ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/detection_output.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/experimental_detectron/detection_output.hpp rename to ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/detection_output.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/experimental_detectron/generate_proposals_single_image.cpp b/ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/generate_proposals_single_image.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/experimental_detectron/generate_proposals_single_image.cpp rename to ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/generate_proposals_single_image.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/experimental_detectron/generate_proposals_single_image.hpp b/ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/generate_proposals_single_image.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/experimental_detectron/generate_proposals_single_image.hpp rename to ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/generate_proposals_single_image.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/experimental_detectron/prior_grid_generator.cpp b/ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/prior_grid_generator.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/experimental_detectron/prior_grid_generator.cpp rename to ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/prior_grid_generator.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/experimental_detectron/prior_grid_generator.hpp b/ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/prior_grid_generator.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/experimental_detectron/prior_grid_generator.hpp rename to ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/prior_grid_generator.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/experimental_detectron/roi_feature_extractor.cpp b/ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/roi_feature_extractor.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/experimental_detectron/roi_feature_extractor.cpp rename to ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/roi_feature_extractor.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/experimental_detectron/roi_feature_extractor.hpp b/ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/roi_feature_extractor.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/experimental_detectron/roi_feature_extractor.hpp rename to ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/roi_feature_extractor.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/experimental_detectron/topk_rios.cpp b/ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/topk_rios.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/experimental_detectron/topk_rios.cpp rename to ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/topk_rios.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/experimental_detectron/topk_rios.hpp b/ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/topk_rios.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/experimental_detectron/topk_rios.hpp rename to ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/topk_rios.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/fake_quantize.cpp b/ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/fake_quantize.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/fake_quantize.cpp rename to ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/fake_quantize.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/fake_quantize.hpp b/ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/fake_quantize.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/fake_quantize.hpp rename to ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/fake_quantize.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/group_norm.cpp b/ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/group_norm.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/group_norm.cpp rename to ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/group_norm.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/group_norm.hpp b/ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/group_norm.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/group_norm.hpp rename to ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/group_norm.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/normalize.cpp b/ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/normalize.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/normalize.cpp rename to ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/normalize.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/normalize.hpp b/ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/normalize.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/normalize.hpp rename to ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/normalize.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/prior_box.cpp b/ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/prior_box.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/prior_box.cpp rename to ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/prior_box.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/prior_box.hpp b/ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/prior_box.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/prior_box.hpp rename to ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/prior_box.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/swish.cpp b/ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/swish.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/swish.cpp rename to ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/swish.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/swish.hpp b/ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/swish.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/org.openvinotoolkit/swish.hpp rename to ngraph/frontend/onnx/frontend/src/op/org.openvinotoolkit/swish.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/pad.cpp b/ngraph/frontend/onnx/frontend/src/op/pad.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/pad.cpp rename to ngraph/frontend/onnx/frontend/src/op/pad.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/pad.hpp b/ngraph/frontend/onnx/frontend/src/op/pad.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/pad.hpp rename to ngraph/frontend/onnx/frontend/src/op/pad.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/pow.cpp b/ngraph/frontend/onnx/frontend/src/op/pow.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/pow.cpp rename to ngraph/frontend/onnx/frontend/src/op/pow.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/pow.hpp b/ngraph/frontend/onnx/frontend/src/op/pow.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/pow.hpp rename to ngraph/frontend/onnx/frontend/src/op/pow.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/prelu.cpp b/ngraph/frontend/onnx/frontend/src/op/prelu.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/prelu.cpp rename to ngraph/frontend/onnx/frontend/src/op/prelu.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/prelu.hpp b/ngraph/frontend/onnx/frontend/src/op/prelu.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/prelu.hpp rename to ngraph/frontend/onnx/frontend/src/op/prelu.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/quant_conv.cpp b/ngraph/frontend/onnx/frontend/src/op/quant_conv.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/quant_conv.cpp rename to ngraph/frontend/onnx/frontend/src/op/quant_conv.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/quant_conv.hpp b/ngraph/frontend/onnx/frontend/src/op/quant_conv.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/quant_conv.hpp rename to ngraph/frontend/onnx/frontend/src/op/quant_conv.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/quantize_linear.cpp b/ngraph/frontend/onnx/frontend/src/op/quantize_linear.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/quantize_linear.cpp rename to ngraph/frontend/onnx/frontend/src/op/quantize_linear.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/quantize_linear.hpp b/ngraph/frontend/onnx/frontend/src/op/quantize_linear.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/quantize_linear.hpp rename to ngraph/frontend/onnx/frontend/src/op/quantize_linear.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/range.cpp b/ngraph/frontend/onnx/frontend/src/op/range.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/range.cpp rename to ngraph/frontend/onnx/frontend/src/op/range.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/range.hpp b/ngraph/frontend/onnx/frontend/src/op/range.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/range.hpp rename to ngraph/frontend/onnx/frontend/src/op/range.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/reciprocal.cpp b/ngraph/frontend/onnx/frontend/src/op/reciprocal.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/reciprocal.cpp rename to ngraph/frontend/onnx/frontend/src/op/reciprocal.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/reciprocal.hpp b/ngraph/frontend/onnx/frontend/src/op/reciprocal.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/reciprocal.hpp rename to ngraph/frontend/onnx/frontend/src/op/reciprocal.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/reduce.cpp b/ngraph/frontend/onnx/frontend/src/op/reduce.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/reduce.cpp rename to ngraph/frontend/onnx/frontend/src/op/reduce.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/reduce.hpp b/ngraph/frontend/onnx/frontend/src/op/reduce.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/reduce.hpp rename to ngraph/frontend/onnx/frontend/src/op/reduce.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/relu.hpp b/ngraph/frontend/onnx/frontend/src/op/relu.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/relu.hpp rename to ngraph/frontend/onnx/frontend/src/op/relu.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/reshape.cpp b/ngraph/frontend/onnx/frontend/src/op/reshape.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/reshape.cpp rename to ngraph/frontend/onnx/frontend/src/op/reshape.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/reshape.hpp b/ngraph/frontend/onnx/frontend/src/op/reshape.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/reshape.hpp rename to ngraph/frontend/onnx/frontend/src/op/reshape.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/resize.cpp b/ngraph/frontend/onnx/frontend/src/op/resize.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/resize.cpp rename to ngraph/frontend/onnx/frontend/src/op/resize.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/resize.hpp b/ngraph/frontend/onnx/frontend/src/op/resize.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/resize.hpp rename to ngraph/frontend/onnx/frontend/src/op/resize.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/reverse_sequence.cpp b/ngraph/frontend/onnx/frontend/src/op/reverse_sequence.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/reverse_sequence.cpp rename to ngraph/frontend/onnx/frontend/src/op/reverse_sequence.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/reverse_sequence.hpp b/ngraph/frontend/onnx/frontend/src/op/reverse_sequence.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/reverse_sequence.hpp rename to ngraph/frontend/onnx/frontend/src/op/reverse_sequence.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/rnn.cpp b/ngraph/frontend/onnx/frontend/src/op/rnn.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/rnn.cpp rename to ngraph/frontend/onnx/frontend/src/op/rnn.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/rnn.hpp b/ngraph/frontend/onnx/frontend/src/op/rnn.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/rnn.hpp rename to ngraph/frontend/onnx/frontend/src/op/rnn.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/roi_align.cpp b/ngraph/frontend/onnx/frontend/src/op/roi_align.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/roi_align.cpp rename to ngraph/frontend/onnx/frontend/src/op/roi_align.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/roi_align.hpp b/ngraph/frontend/onnx/frontend/src/op/roi_align.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/roi_align.hpp rename to ngraph/frontend/onnx/frontend/src/op/roi_align.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/round.cpp b/ngraph/frontend/onnx/frontend/src/op/round.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/round.cpp rename to ngraph/frontend/onnx/frontend/src/op/round.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/round.hpp b/ngraph/frontend/onnx/frontend/src/op/round.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/round.hpp rename to ngraph/frontend/onnx/frontend/src/op/round.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/scatter_elements.cpp b/ngraph/frontend/onnx/frontend/src/op/scatter_elements.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/scatter_elements.cpp rename to ngraph/frontend/onnx/frontend/src/op/scatter_elements.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/scatter_elements.hpp b/ngraph/frontend/onnx/frontend/src/op/scatter_elements.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/scatter_elements.hpp rename to ngraph/frontend/onnx/frontend/src/op/scatter_elements.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/scatter_nd.cpp b/ngraph/frontend/onnx/frontend/src/op/scatter_nd.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/scatter_nd.cpp rename to ngraph/frontend/onnx/frontend/src/op/scatter_nd.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/scatter_nd.hpp b/ngraph/frontend/onnx/frontend/src/op/scatter_nd.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/scatter_nd.hpp rename to ngraph/frontend/onnx/frontend/src/op/scatter_nd.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/selu.cpp b/ngraph/frontend/onnx/frontend/src/op/selu.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/selu.cpp rename to ngraph/frontend/onnx/frontend/src/op/selu.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/selu.hpp b/ngraph/frontend/onnx/frontend/src/op/selu.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/selu.hpp rename to ngraph/frontend/onnx/frontend/src/op/selu.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/shape.cpp b/ngraph/frontend/onnx/frontend/src/op/shape.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/shape.cpp rename to ngraph/frontend/onnx/frontend/src/op/shape.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/shape.hpp b/ngraph/frontend/onnx/frontend/src/op/shape.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/shape.hpp rename to ngraph/frontend/onnx/frontend/src/op/shape.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/shrink.cpp b/ngraph/frontend/onnx/frontend/src/op/shrink.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/shrink.cpp rename to ngraph/frontend/onnx/frontend/src/op/shrink.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/shrink.hpp b/ngraph/frontend/onnx/frontend/src/op/shrink.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/shrink.hpp rename to ngraph/frontend/onnx/frontend/src/op/shrink.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/sigmoid.hpp b/ngraph/frontend/onnx/frontend/src/op/sigmoid.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/sigmoid.hpp rename to ngraph/frontend/onnx/frontend/src/op/sigmoid.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/sign.hpp b/ngraph/frontend/onnx/frontend/src/op/sign.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/sign.hpp rename to ngraph/frontend/onnx/frontend/src/op/sign.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/sin.hpp b/ngraph/frontend/onnx/frontend/src/op/sin.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/sin.hpp rename to ngraph/frontend/onnx/frontend/src/op/sin.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/sinh.hpp b/ngraph/frontend/onnx/frontend/src/op/sinh.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/sinh.hpp rename to ngraph/frontend/onnx/frontend/src/op/sinh.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/size.cpp b/ngraph/frontend/onnx/frontend/src/op/size.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/size.cpp rename to ngraph/frontend/onnx/frontend/src/op/size.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/size.hpp b/ngraph/frontend/onnx/frontend/src/op/size.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/size.hpp rename to ngraph/frontend/onnx/frontend/src/op/size.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/slice.cpp b/ngraph/frontend/onnx/frontend/src/op/slice.cpp similarity index 99% rename from ngraph/frontend/onnx/onnx_import/src/op/slice.cpp rename to ngraph/frontend/onnx/frontend/src/op/slice.cpp index e344a849390..69a57b64caa 100644 --- a/ngraph/frontend/onnx/onnx_import/src/op/slice.cpp +++ b/ngraph/frontend/onnx/frontend/src/op/slice.cpp @@ -6,13 +6,13 @@ #include #include -#include "core/null_node.hpp" #include "default_opset.hpp" #include "exceptions.hpp" #include "ngraph/node.hpp" #include "ngraph/op/constant.hpp" #include "ngraph/op/util/op_types.hpp" #include "ngraph/validation_util.hpp" +#include "onnx_import/core/null_node.hpp" #include "op/gather.hpp" #include "utils/common.hpp" diff --git a/ngraph/frontend/onnx/onnx_import/src/op/slice.hpp b/ngraph/frontend/onnx/frontend/src/op/slice.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/slice.hpp rename to ngraph/frontend/onnx/frontend/src/op/slice.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/softmax.cpp b/ngraph/frontend/onnx/frontend/src/op/softmax.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/softmax.cpp rename to ngraph/frontend/onnx/frontend/src/op/softmax.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/softmax.hpp b/ngraph/frontend/onnx/frontend/src/op/softmax.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/softmax.hpp rename to ngraph/frontend/onnx/frontend/src/op/softmax.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/softplus.cpp b/ngraph/frontend/onnx/frontend/src/op/softplus.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/softplus.cpp rename to ngraph/frontend/onnx/frontend/src/op/softplus.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/softplus.hpp b/ngraph/frontend/onnx/frontend/src/op/softplus.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/softplus.hpp rename to ngraph/frontend/onnx/frontend/src/op/softplus.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/softsign.cpp b/ngraph/frontend/onnx/frontend/src/op/softsign.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/softsign.cpp rename to ngraph/frontend/onnx/frontend/src/op/softsign.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/softsign.hpp b/ngraph/frontend/onnx/frontend/src/op/softsign.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/softsign.hpp rename to ngraph/frontend/onnx/frontend/src/op/softsign.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/space_to_depth.cpp b/ngraph/frontend/onnx/frontend/src/op/space_to_depth.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/space_to_depth.cpp rename to ngraph/frontend/onnx/frontend/src/op/space_to_depth.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/space_to_depth.hpp b/ngraph/frontend/onnx/frontend/src/op/space_to_depth.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/space_to_depth.hpp rename to ngraph/frontend/onnx/frontend/src/op/space_to_depth.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/split.cpp b/ngraph/frontend/onnx/frontend/src/op/split.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/split.cpp rename to ngraph/frontend/onnx/frontend/src/op/split.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/split.hpp b/ngraph/frontend/onnx/frontend/src/op/split.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/split.hpp rename to ngraph/frontend/onnx/frontend/src/op/split.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/sqrt.hpp b/ngraph/frontend/onnx/frontend/src/op/sqrt.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/sqrt.hpp rename to ngraph/frontend/onnx/frontend/src/op/sqrt.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/squeeze.cpp b/ngraph/frontend/onnx/frontend/src/op/squeeze.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/squeeze.cpp rename to ngraph/frontend/onnx/frontend/src/op/squeeze.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/squeeze.hpp b/ngraph/frontend/onnx/frontend/src/op/squeeze.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/squeeze.hpp rename to ngraph/frontend/onnx/frontend/src/op/squeeze.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/sub.hpp b/ngraph/frontend/onnx/frontend/src/op/sub.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/sub.hpp rename to ngraph/frontend/onnx/frontend/src/op/sub.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/sum.hpp b/ngraph/frontend/onnx/frontend/src/op/sum.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/sum.hpp rename to ngraph/frontend/onnx/frontend/src/op/sum.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/tan.hpp b/ngraph/frontend/onnx/frontend/src/op/tan.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/tan.hpp rename to ngraph/frontend/onnx/frontend/src/op/tan.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/tanh.hpp b/ngraph/frontend/onnx/frontend/src/op/tanh.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/tanh.hpp rename to ngraph/frontend/onnx/frontend/src/op/tanh.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/thresholded_relu.cpp b/ngraph/frontend/onnx/frontend/src/op/thresholded_relu.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/thresholded_relu.cpp rename to ngraph/frontend/onnx/frontend/src/op/thresholded_relu.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/thresholded_relu.hpp b/ngraph/frontend/onnx/frontend/src/op/thresholded_relu.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/thresholded_relu.hpp rename to ngraph/frontend/onnx/frontend/src/op/thresholded_relu.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/tile.cpp b/ngraph/frontend/onnx/frontend/src/op/tile.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/tile.cpp rename to ngraph/frontend/onnx/frontend/src/op/tile.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/tile.hpp b/ngraph/frontend/onnx/frontend/src/op/tile.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/tile.hpp rename to ngraph/frontend/onnx/frontend/src/op/tile.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/topk.cpp b/ngraph/frontend/onnx/frontend/src/op/topk.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/topk.cpp rename to ngraph/frontend/onnx/frontend/src/op/topk.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/topk.hpp b/ngraph/frontend/onnx/frontend/src/op/topk.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/topk.hpp rename to ngraph/frontend/onnx/frontend/src/op/topk.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/transpose.cpp b/ngraph/frontend/onnx/frontend/src/op/transpose.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/transpose.cpp rename to ngraph/frontend/onnx/frontend/src/op/transpose.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/transpose.hpp b/ngraph/frontend/onnx/frontend/src/op/transpose.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/transpose.hpp rename to ngraph/frontend/onnx/frontend/src/op/transpose.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/unsqueeze.cpp b/ngraph/frontend/onnx/frontend/src/op/unsqueeze.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/unsqueeze.cpp rename to ngraph/frontend/onnx/frontend/src/op/unsqueeze.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/unsqueeze.hpp b/ngraph/frontend/onnx/frontend/src/op/unsqueeze.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/unsqueeze.hpp rename to ngraph/frontend/onnx/frontend/src/op/unsqueeze.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/upsample.cpp b/ngraph/frontend/onnx/frontend/src/op/upsample.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/upsample.cpp rename to ngraph/frontend/onnx/frontend/src/op/upsample.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/upsample.hpp b/ngraph/frontend/onnx/frontend/src/op/upsample.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/upsample.hpp rename to ngraph/frontend/onnx/frontend/src/op/upsample.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/where.hpp b/ngraph/frontend/onnx/frontend/src/op/where.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/where.hpp rename to ngraph/frontend/onnx/frontend/src/op/where.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/op/xor.hpp b/ngraph/frontend/onnx/frontend/src/op/xor.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/op/xor.hpp rename to ngraph/frontend/onnx/frontend/src/op/xor.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/ops_bridge.cpp b/ngraph/frontend/onnx/frontend/src/ops_bridge.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/ops_bridge.cpp rename to ngraph/frontend/onnx/frontend/src/ops_bridge.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/ops_bridge.hpp b/ngraph/frontend/onnx/frontend/src/ops_bridge.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/ops_bridge.hpp rename to ngraph/frontend/onnx/frontend/src/ops_bridge.hpp diff --git a/ngraph/frontend/onnx/frontend/src/place.hpp b/ngraph/frontend/onnx/frontend/src/place.hpp index c356e1e8f54..b868461c635 100644 --- a/ngraph/frontend/onnx/frontend/src/place.hpp +++ b/ngraph/frontend/onnx/frontend/src/place.hpp @@ -4,9 +4,9 @@ #pragma once -#include +#include #include -#include +#include namespace ngraph { @@ -53,7 +53,8 @@ namespace ngraph class PlaceTensorONNX : public Place { public: - PlaceTensorONNX(const std::string& name, std::shared_ptr editor); + PlaceTensorONNX(const std::string& name, + std::shared_ptr editor); std::vector get_names() const override; diff --git a/ngraph/frontend/onnx/onnx_import/src/precomp.hpp b/ngraph/frontend/onnx/frontend/src/precomp.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/precomp.hpp rename to ngraph/frontend/onnx/frontend/src/precomp.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/utils/arg_min_max_factory.cpp b/ngraph/frontend/onnx/frontend/src/utils/arg_min_max_factory.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/utils/arg_min_max_factory.cpp rename to ngraph/frontend/onnx/frontend/src/utils/arg_min_max_factory.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/utils/arg_min_max_factory.hpp b/ngraph/frontend/onnx/frontend/src/utils/arg_min_max_factory.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/utils/arg_min_max_factory.hpp rename to ngraph/frontend/onnx/frontend/src/utils/arg_min_max_factory.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/utils/common.cpp b/ngraph/frontend/onnx/frontend/src/utils/common.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/utils/common.cpp rename to ngraph/frontend/onnx/frontend/src/utils/common.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/utils/common.hpp b/ngraph/frontend/onnx/frontend/src/utils/common.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/utils/common.hpp rename to ngraph/frontend/onnx/frontend/src/utils/common.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/utils/convpool.cpp b/ngraph/frontend/onnx/frontend/src/utils/convpool.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/utils/convpool.cpp rename to ngraph/frontend/onnx/frontend/src/utils/convpool.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/utils/convpool.hpp b/ngraph/frontend/onnx/frontend/src/utils/convpool.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/utils/convpool.hpp rename to ngraph/frontend/onnx/frontend/src/utils/convpool.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/utils/onnx_internal.cpp b/ngraph/frontend/onnx/frontend/src/utils/onnx_internal.cpp similarity index 97% rename from ngraph/frontend/onnx/onnx_import/src/utils/onnx_internal.cpp rename to ngraph/frontend/onnx/frontend/src/utils/onnx_internal.cpp index ce1943df013..197a6428dde 100644 --- a/ngraph/frontend/onnx/onnx_import/src/utils/onnx_internal.cpp +++ b/ngraph/frontend/onnx/frontend/src/utils/onnx_internal.cpp @@ -5,11 +5,11 @@ #include #include "core/graph.hpp" -#include "core/null_node.hpp" +#include "core/model.hpp" #include "core/transform.hpp" #include "onnx_framework_node.hpp" -#include "onnx_import/core/model.hpp" -#include "onnx_import/utils/onnx_internal.hpp" +#include "onnx_import/core/null_node.hpp" +#include "utils/onnx_internal.hpp" namespace ngraph { diff --git a/ngraph/frontend/onnx/onnx_import/include/onnx_import/utils/onnx_internal.hpp b/ngraph/frontend/onnx/frontend/src/utils/onnx_internal.hpp similarity index 89% rename from ngraph/frontend/onnx/onnx_import/include/onnx_import/utils/onnx_internal.hpp rename to ngraph/frontend/onnx/frontend/src/utils/onnx_internal.hpp index 6f9adcf5c64..feba7be2316 100644 --- a/ngraph/frontend/onnx/onnx_import/include/onnx_import/utils/onnx_internal.hpp +++ b/ngraph/frontend/onnx/frontend/src/utils/onnx_internal.hpp @@ -8,7 +8,6 @@ #include #include "ngraph/function.hpp" -#include "onnx_import/utils/onnx_importer_visibility.hpp" namespace ONNX_NAMESPACE { @@ -48,11 +47,15 @@ namespace ngraph /// external files. /// /// \return A nGraph function with ONNXFrameworkNodes - ONNX_IMPORTER_API std::shared_ptr decode_to_framework_nodes(std::shared_ptr model_proto, const std::string& model_path); + /// \brief Converts a nGraph function (onnx model decoded to function with + /// ONNXFrameworkNode(s)) + /// to a complete function with actual compute operations + /// + /// \return A nGraph function. void convert_decoded_function(std::shared_ptr function); } // namespace detail } // namespace onnx_import diff --git a/ngraph/frontend/onnx/onnx_import/src/utils/pooling_factory.cpp b/ngraph/frontend/onnx/frontend/src/utils/pooling_factory.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/utils/pooling_factory.cpp rename to ngraph/frontend/onnx/frontend/src/utils/pooling_factory.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/utils/pooling_factory.hpp b/ngraph/frontend/onnx/frontend/src/utils/pooling_factory.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/utils/pooling_factory.hpp rename to ngraph/frontend/onnx/frontend/src/utils/pooling_factory.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/utils/provenance_tag.cpp b/ngraph/frontend/onnx/frontend/src/utils/provenance_tag.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/utils/provenance_tag.cpp rename to ngraph/frontend/onnx/frontend/src/utils/provenance_tag.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/utils/provenance_tag.hpp b/ngraph/frontend/onnx/frontend/src/utils/provenance_tag.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/utils/provenance_tag.hpp rename to ngraph/frontend/onnx/frontend/src/utils/provenance_tag.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/utils/recurrent.cpp b/ngraph/frontend/onnx/frontend/src/utils/recurrent.cpp similarity index 99% rename from ngraph/frontend/onnx/onnx_import/src/utils/recurrent.cpp rename to ngraph/frontend/onnx/frontend/src/utils/recurrent.cpp index fc9409413ed..d95a38ee799 100644 --- a/ngraph/frontend/onnx/onnx_import/src/utils/recurrent.cpp +++ b/ngraph/frontend/onnx/frontend/src/utils/recurrent.cpp @@ -6,13 +6,13 @@ #include #include -#include "core/null_node.hpp" #include "default_opset.hpp" #include "ngraph/builder/autobroadcast.hpp" #include "ngraph/builder/reshape.hpp" #include "ngraph/builder/split.hpp" #include "ngraph/check.hpp" #include "ngraph/enum_names.hpp" +#include "onnx_import/core/null_node.hpp" #include "utils/recurrent.hpp" namespace ngraph diff --git a/ngraph/frontend/onnx/onnx_import/src/utils/recurrent.hpp b/ngraph/frontend/onnx/frontend/src/utils/recurrent.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/utils/recurrent.hpp rename to ngraph/frontend/onnx/frontend/src/utils/recurrent.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/utils/reshape.cpp b/ngraph/frontend/onnx/frontend/src/utils/reshape.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/utils/reshape.cpp rename to ngraph/frontend/onnx/frontend/src/utils/reshape.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/utils/reshape.hpp b/ngraph/frontend/onnx/frontend/src/utils/reshape.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/utils/reshape.hpp rename to ngraph/frontend/onnx/frontend/src/utils/reshape.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/utils/tensor_external_data.cpp b/ngraph/frontend/onnx/frontend/src/utils/tensor_external_data.cpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/utils/tensor_external_data.cpp rename to ngraph/frontend/onnx/frontend/src/utils/tensor_external_data.cpp diff --git a/ngraph/frontend/onnx/onnx_import/src/utils/tensor_external_data.hpp b/ngraph/frontend/onnx/frontend/src/utils/tensor_external_data.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/utils/tensor_external_data.hpp rename to ngraph/frontend/onnx/frontend/src/utils/tensor_external_data.hpp diff --git a/ngraph/frontend/onnx/onnx_import/src/utils/variadic.hpp b/ngraph/frontend/onnx/frontend/src/utils/variadic.hpp similarity index 100% rename from ngraph/frontend/onnx/onnx_import/src/utils/variadic.hpp rename to ngraph/frontend/onnx/frontend/src/utils/variadic.hpp diff --git a/ngraph/frontend/onnx/onnx_import/CMakeLists.txt b/ngraph/frontend/onnx/onnx_import/CMakeLists.txt deleted file mode 100644 index d7ee771e855..00000000000 --- a/ngraph/frontend/onnx/onnx_import/CMakeLists.txt +++ /dev/null @@ -1,87 +0,0 @@ -# Copyright (C) 2018-2021 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -set(ONNX_OPSET_VERSION 13 CACHE INTERNAL "Supported version of ONNX operator set") - -file(GLOB_RECURSE LIBRARY_SRC ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp) -file(GLOB_RECURSE LIBRARY_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/src/*.hpp) -file(GLOB_RECURSE LIBRARY_PUBLIC_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/include/*.hpp) - -# Remove disabled ops -list(REMOVE_ITEM LIBRARY_SRC - ${CMAKE_CURRENT_SOURCE_DIR}/src/op/conv_integer.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/src/op/quant_conv.cpp - ) -list(REMOVE_ITEM LIBRARY_HEADERS - ${CMAKE_CURRENT_SOURCE_DIR}/src/op/conv_integer.hpp - ${CMAKE_CURRENT_SOURCE_DIR}/src/op/quant_conv.hpp - ) - -set(ONNX_IMPORT_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/include) - -# Create named folders for the sources within the .vcproj -# Empty name lists them directly under the .vcproj - -source_group("src" FILES ${LIBRARY_SRC}) -source_group("include" FILES ${LIBRARY_HEADERS}) -source_group("public include" FILES ${LIBRARY_PUBLIC_HEADERS}) - -# Create shared library -add_library(onnx_importer SHARED ${LIBRARY_SRC} ${LIBRARY_HEADERS} ${LIBRARY_PUBLIC_HEADERS}) -add_library(ngraph::onnx_importer ALIAS onnx_importer) - -# TOD: fix empty class name -# ov_ncc_naming_style(FOR_TARGET onnx_importer -# INCLUDE_DIRECTORY "${ONNX_IMPORT_INCLUDE_DIR}" -# DEFINITIONS -# $ -# ADDITIONAL_INCLUDE_DIRECTORIES -# $) - -add_clang_format_target(onnx_importer_clang FOR_TARGETS onnx_importer) - -if(COMMAND ie_add_vs_version_file) - ie_add_vs_version_file(NAME onnx_importer - FILEDESCRIPTION "nGraph ONNX importer library") -endif() - -if(COMMAND ie_faster_build) - ie_faster_build(onnx_importer - UNITY - PCH PRIVATE "src/precomp.hpp" - ) -endif() - -target_link_libraries(onnx_importer PRIVATE onnx_common ngraph::builder inference_engine_transformations - PUBLIC ngraph) - -target_include_directories(onnx_importer PUBLIC $ - $) - -target_include_directories(onnx_importer PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/src) - -target_compile_definitions(onnx_importer PRIVATE ONNX_OPSET_VERSION=${ONNX_OPSET_VERSION}) - -target_include_directories(onnx_importer PUBLIC $) -target_include_directories(onnx_importer PRIVATE ${ONNX_EDITOR_SRC_DIR}) - -if(NGRAPH_USE_PROTOBUF_LITE) - target_compile_definitions(onnx_importer PRIVATE NGRAPH_USE_PROTOBUF_LITE) -endif() - -if(OV_COMPILER_IS_CLANG) - target_compile_options(onnx_importer PRIVATE -Wno-undef) -endif() - -install(TARGETS onnx_importer EXPORT ngraphTargets - RUNTIME DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph - ARCHIVE DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph - LIBRARY DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph) - -install(DIRECTORY ${ONNX_IMPORT_INCLUDE_DIR}/onnx_import - DESTINATION ${FRONTEND_INSTALL_INCLUDE} - COMPONENT ngraph_dev - FILES_MATCHING PATTERN "*.hpp") - -export(TARGETS onnx_importer NAMESPACE ngraph:: APPEND FILE "${NGRAPH_TARGETS_FILE}") diff --git a/ngraph/frontend/onnx/onnx_import/src/core/model.hpp b/ngraph/frontend/onnx/onnx_import/src/core/model.hpp deleted file mode 100644 index 82a6e0c4f10..00000000000 --- a/ngraph/frontend/onnx/onnx_import/src/core/model.hpp +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "onnx_import/core/operator_set.hpp" - -namespace ngraph -{ - namespace onnx_import - { - /// \brief Type of container which stores opset version and domain in ONNX format - using OpsetImports = - ::google::protobuf::RepeatedPtrField; - - std::string get_node_domain(const ONNX_NAMESPACE::NodeProto& node_proto); - - std::int64_t get_opset_version(const ONNX_NAMESPACE::ModelProto& model_proto, - const std::string& domain); - - class Model - { - public: - Model() = delete; - explicit Model(std::shared_ptr model_proto); - - Model(const Model&) = delete; - Model(Model&&) = delete; - - Model& operator=(const Model&) = delete; - Model& operator=(Model&&) = delete; - - const std::string& get_producer_name() const { return m_model_proto->producer_name(); } - const ONNX_NAMESPACE::GraphProto& get_graph() const { return m_model_proto->graph(); } - std::int64_t get_model_version() const { return m_model_proto->model_version(); } - const OpsetImports& get_opset_imports() const; - const std::string& get_producer_version() const - { - return m_model_proto->producer_version(); - } - - /// \brief Access an operator object by its type name and domain name - /// The function will return the operator object if it exists, or report an error - /// in case of domain or operator absence. - /// \param name type name of the operator object, - /// \param domain domain name of the operator object. - /// \return Reference to the operator object. - /// \throw error::UnknownDomain there is no operator set defined for the given - /// domain, - /// \throw error::UnknownOperator the given operator type name does not exist in - /// operator set. - const Operator& get_operator(const std::string& name, const std::string& domain) const; - - /// \brief Check availability of operator base on NodeProto. - /// \return `true` if the operator is available, otherwise it returns `false`. - bool is_operator_available(const ONNX_NAMESPACE::NodeProto& node_proto) const; - - /// \brief Enable operators from provided domain to use by this model. - /// - /// \note This function makes visible all currently registered in provided domain - /// operators for use in this model. - /// - /// \param[in] domain The domain name. - /// - void enable_opset_domain(const std::string& domain); - - private: - const std::shared_ptr m_model_proto; - std::unordered_map m_opset; - }; - - inline std::ostream& operator<<(std::ostream& outs, const Model& model) - { - return (outs << ""); - } - - } // namespace onnx_import - -} // namespace ngraph diff --git a/ngraph/python/BUILDING.md b/ngraph/python/BUILDING.md index 18e395fc9d8..57a923d5cc6 100644 --- a/ngraph/python/BUILDING.md +++ b/ngraph/python/BUILDING.md @@ -48,7 +48,7 @@ set the mentioned flags to `ON`. Note the `CMAKE_INSTALL_PREFIX`, which defaults -DENABLE_OPENCV=OFF \ -DENABLE_VPU=OFF \ -DENABLE_PYTHON=ON \ - -DNGRAPH_ONNX_IMPORT_ENABLE=ON \ + -DNGRAPH_ONNX_FRONTEND_ENABLE=ON \ -DCMAKE_INSTALL_PREFIX="${OPENVINO_BASEDIR}/openvino_dist" make -j 4 @@ -110,7 +110,7 @@ cmake .. ^ -DENABLE_CLDNN=OFF ^ -DENABLE_OPENCV=OFF ^ -DENABLE_VPU=OFF ^ - -DNGRAPH_ONNX_IMPORT_ENABLE=ON ^ + -DNGRAPH_ONNX_FRONTEND_ENABLE=ON ^ -DENABLE_PYTHON=ON ^ -DCMAKE_CXX_COMPILER="C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.27.29110\bin\Hostx64\x64" diff --git a/ngraph/python/CMakeLists.txt b/ngraph/python/CMakeLists.txt index b7667422781..7de595a5211 100644 --- a/ngraph/python/CMakeLists.txt +++ b/ngraph/python/CMakeLists.txt @@ -61,8 +61,8 @@ target_include_directories(_${PROJECT_NAME} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR} target_link_libraries(_${PROJECT_NAME} PRIVATE ngraph::ngraph ngraph::frontend_manager) -if (TARGET ngraph::onnx_importer) - add_dependencies(_${PROJECT_NAME} ngraph::onnx_importer) +if (TARGET ngraph::onnx_ngraph_frontend) + add_dependencies(_${PROJECT_NAME} ngraph::onnx_ngraph_frontend) endif() if(NGRAPH_UNIT_TEST_ENABLE) diff --git a/ngraph/python/setup.py b/ngraph/python/setup.py index d81df94f09a..5aae74ceb1e 100644 --- a/ngraph/python/setup.py +++ b/ngraph/python/setup.py @@ -23,7 +23,7 @@ OPENVINO_ROOT_DIR = os.path.normpath(os.path.join(PYNGRAPH_ROOT_DIR, "../..")) # Change current working dircectory to ngraph/python os.chdir(PYNGRAPH_ROOT_DIR) -NGRAPH_LIBS = ["ngraph", "onnx_importer"] +NGRAPH_LIBS = ["ngraph", "onnx_ngraph_frontend"] packages = [ "ngraph", @@ -157,7 +157,7 @@ class BuildCMakeExt(build_ext): self.spawn(["cmake", "-H" + root_dir, "-B" + self.build_temp, "-DCMAKE_BUILD_TYPE={}".format(self.config), "-DENABLE_PYTHON=ON", - "-DNGRAPH_ONNX_IMPORT_ENABLE=ON"] + ext_args) + "-DNGRAPH_ONNX_FRONTEND_ENABLE=ON"] + ext_args) self.announce("Building binaries", level=3) diff --git a/ngraph/python/src/pyngraph/pyngraph.cpp b/ngraph/python/src/pyngraph/pyngraph.cpp index c401a683654..8ef56758721 100644 --- a/ngraph/python/src/pyngraph/pyngraph.cpp +++ b/ngraph/python/src/pyngraph/pyngraph.cpp @@ -13,7 +13,7 @@ #include "pyngraph/node_factory.hpp" #include "pyngraph/node_input.hpp" #include "pyngraph/node_output.hpp" -#if defined(NGRAPH_ONNX_IMPORT_ENABLE) +#if defined(NGRAPH_ONNX_FRONTEND_ENABLE) #include "pyngraph/onnx_import/onnx_import.hpp" #endif #include "pyngraph/dimension.hpp" @@ -66,7 +66,7 @@ PYBIND11_MODULE(_pyngraph, m) regclass_pyngraph_op_Constant(m_op); regclass_pyngraph_op_Parameter(m_op); regclass_pyngraph_op_Result(m_op); -#if defined(NGRAPH_ONNX_IMPORT_ENABLE) +#if defined(NGRAPH_ONNX_FRONTEND_ENABLE) regmodule_pyngraph_onnx_import(m); #endif regmodule_pyngraph_op_util(m_op); diff --git a/ngraph/python/tests/test_frontend/test_frontend_onnx.py b/ngraph/python/tests/test_frontend/test_frontend_onnx.py index e55f665b883..4290c7de2ce 100644 --- a/ngraph/python/tests/test_frontend/test_frontend_onnx.py +++ b/ngraph/python/tests/test_frontend/test_frontend_onnx.py @@ -37,6 +37,7 @@ def run_function(function, *inputs, expected): fem = FrontEndManager() onnx_model_filename = "model.onnx" +ONNX_FRONTEND_NAME = "onnx_experimental" def setup_module(): @@ -49,14 +50,14 @@ def teardown_module(): def skip_if_onnx_frontend_is_disabled(): front_ends = fem.get_available_front_ends() - if "onnx" not in front_ends: + if ONNX_FRONTEND_NAME not in front_ends: pytest.skip() def test_convert(): skip_if_onnx_frontend_is_disabled() - fe = fem.load_by_framework(framework="onnx") + fe = fem.load_by_framework(framework=ONNX_FRONTEND_NAME) assert fe model = fe.load(onnx_model_filename) @@ -74,7 +75,7 @@ def test_convert(): def test_decode_and_convert(): skip_if_onnx_frontend_is_disabled() - fe = fem.load_by_framework(framework="onnx") + fe = fem.load_by_framework(framework=ONNX_FRONTEND_NAME) assert fe model = fe.load(onnx_model_filename) diff --git a/ngraph/python/tests/test_frontend/test_frontend_onnx_editor.py b/ngraph/python/tests/test_frontend/test_frontend_onnx_editor.py index 260d5d68d54..ad5acbeb686 100644 --- a/ngraph/python/tests/test_frontend/test_frontend_onnx_editor.py +++ b/ngraph/python/tests/test_frontend/test_frontend_onnx_editor.py @@ -166,6 +166,7 @@ def create_test_onnx_models(): fem = FrontEndManager() test_models_names = [] +ONNX_FRONTEND_NAME = "onnx_experimental" def setup_module(): @@ -182,7 +183,7 @@ def teardown_module(): def skip_if_onnx_frontend_is_disabled(): front_ends = fem.get_available_front_ends() - if "onnx" not in front_ends: + if ONNX_FRONTEND_NAME not in front_ends: pytest.skip() @@ -234,7 +235,7 @@ def compare_functions(current, expected): # noqa: C901 the function is too comp def test_extract_subgraph(): skip_if_onnx_frontend_is_disabled() - fe = fem.load_by_framework(framework="onnx") + fe = fem.load_by_framework(framework=ONNX_FRONTEND_NAME) assert fe model = fe.load("input_model.onnx") @@ -255,7 +256,7 @@ def test_extract_subgraph(): def test_extract_subgraph_2(): skip_if_onnx_frontend_is_disabled() - fe = fem.load_by_framework(framework="onnx") + fe = fem.load_by_framework(framework=ONNX_FRONTEND_NAME) assert fe model = fe.load("input_model.onnx") @@ -275,7 +276,7 @@ def test_extract_subgraph_2(): def test_extract_subgraph_3(): skip_if_onnx_frontend_is_disabled() - fe = fem.load_by_framework(framework="onnx") + fe = fem.load_by_framework(framework=ONNX_FRONTEND_NAME) assert fe model = fe.load("input_model.onnx") @@ -296,7 +297,7 @@ def test_extract_subgraph_3(): def test_extract_subgraph_4(): skip_if_onnx_frontend_is_disabled() - fe = fem.load_by_framework(framework="onnx") + fe = fem.load_by_framework(framework=ONNX_FRONTEND_NAME) assert fe model = fe.load("input_model.onnx") @@ -320,7 +321,7 @@ def test_extract_subgraph_4(): def test_override_all_outputs(): skip_if_onnx_frontend_is_disabled() - fe = fem.load_by_framework(framework="onnx") + fe = fem.load_by_framework(framework=ONNX_FRONTEND_NAME) assert fe model = fe.load("input_model.onnx") @@ -340,7 +341,7 @@ def test_override_all_outputs(): def test_override_all_outputs_2(): skip_if_onnx_frontend_is_disabled() - fe = fem.load_by_framework(framework="onnx") + fe = fem.load_by_framework(framework=ONNX_FRONTEND_NAME) assert fe model = fe.load("input_model.onnx") @@ -359,7 +360,7 @@ def test_override_all_outputs_2(): def test_override_all_inputs(): skip_if_onnx_frontend_is_disabled() - fe = fem.load_by_framework(framework="onnx") + fe = fem.load_by_framework(framework=ONNX_FRONTEND_NAME) assert fe model = fe.load("input_model.onnx") @@ -382,7 +383,7 @@ def test_override_all_inputs(): def test_override_all_inputs_exceptions(): skip_if_onnx_frontend_is_disabled() - fe = fem.load_by_framework(framework="onnx") + fe = fem.load_by_framework(framework=ONNX_FRONTEND_NAME) assert fe model = fe.load("input_model.onnx") @@ -404,7 +405,7 @@ def test_override_all_inputs_exceptions(): def test_is_input_output(): skip_if_onnx_frontend_is_disabled() - fe = fem.load_by_framework(framework="onnx") + fe = fem.load_by_framework(framework=ONNX_FRONTEND_NAME) assert fe model = fe.load("input_model.onnx") @@ -430,7 +431,7 @@ def test_is_input_output(): def test_set_partial_shape(): skip_if_onnx_frontend_is_disabled() - fe = fem.load_by_framework(framework="onnx") + fe = fem.load_by_framework(framework=ONNX_FRONTEND_NAME) assert fe model = fe.load("input_model.onnx") @@ -453,7 +454,7 @@ def test_set_partial_shape(): def test_get_partial_shape(): skip_if_onnx_frontend_is_disabled() - fe = fem.load_by_framework(framework="onnx") + fe = fem.load_by_framework(framework=ONNX_FRONTEND_NAME) assert fe model = fe.load("input_model.onnx") @@ -476,7 +477,7 @@ def test_get_partial_shape(): def test_get_inputs(): skip_if_onnx_frontend_is_disabled() - fe = fem.load_by_framework(framework="onnx") + fe = fem.load_by_framework(framework=ONNX_FRONTEND_NAME) assert fe model = fe.load("input_model.onnx") @@ -486,7 +487,7 @@ def test_get_inputs(): def test_get_outputs(): skip_if_onnx_frontend_is_disabled() - fe = fem.load_by_framework(framework="onnx") + fe = fem.load_by_framework(framework=ONNX_FRONTEND_NAME) assert fe model = fe.load("input_model.onnx") @@ -498,7 +499,7 @@ def test_get_outputs(): def test_is_equal(): skip_if_onnx_frontend_is_disabled() - fe = fem.load_by_framework(framework="onnx") + fe = fem.load_by_framework(framework=ONNX_FRONTEND_NAME) assert fe model = fe.load("input_model.onnx") @@ -531,7 +532,7 @@ def test_is_equal(): def test_get_place_by_tensor_name(): skip_if_onnx_frontend_is_disabled() - fe = fem.load_by_framework(framework="onnx") + fe = fem.load_by_framework(framework=ONNX_FRONTEND_NAME) assert fe model = fe.load("input_model.onnx") diff --git a/ngraph/test/CMakeLists.txt b/ngraph/test/CMakeLists.txt index 32c1cf043aa..5cb76c46257 100644 --- a/ngraph/test/CMakeLists.txt +++ b/ngraph/test/CMakeLists.txt @@ -530,7 +530,7 @@ set(MULTI_TEST_SRC backend/zero_sized.in.cpp ) -if (NGRAPH_ONNX_IMPORT_ENABLE) +if (NGRAPH_ONNX_FRONTEND_ENABLE) list(APPEND MULTI_TEST_SRC onnx/onnx_import.in.cpp onnx/onnx_import_controlflow.in.cpp @@ -550,7 +550,7 @@ if (NGRAPH_ONNX_IMPORT_ENABLE) onnx/onnx_tensor_names.cpp) endif() -if (NGRAPH_ONNX_IMPORT_ENABLE) +if (NGRAPH_ONNX_FRONTEND_ENABLE) list(APPEND SRC onnx/onnx_editor.cpp) list(APPEND MULTI_TEST_SRC onnx/onnx_test_utils.in.cpp @@ -601,7 +601,7 @@ target_link_libraries(unit-test PRIVATE ngraph_test_util # Protobuf-lite does not support parsing files from prototxt format # Since most of the onnx models are stored in this format it have to be disabled -if (NGRAPH_ONNX_IMPORT_ENABLE) +if (NGRAPH_ONNX_FRONTEND_ENABLE) # It's needed by onnx_import_library.cpp and onnx_import_exceptions.cpp tests to include onnx_pb.h. # Not linking statically to libprotobuf (linked into libonnx) avoids false-failing onnx_editor tests. target_include_directories(unit-test @@ -617,11 +617,10 @@ if (OV_COMPILER_IS_CLANG) target_compile_options(unit-test PRIVATE -Wno-undef -Wno-reserved-id-macro) endif() -if (NGRAPH_ONNX_IMPORT_ENABLE) - get_target_property(ONNX_IMPORTER_SRC_DIR onnx_importer SOURCE_DIR) - target_include_directories(unit-test PRIVATE ${ONNX_IMPORTER_SRC_DIR}/src) - - target_link_libraries(unit-test PRIVATE onnx_importer) +if (NGRAPH_ONNX_FRONTEND_ENABLE) + get_target_property(ONNX_FRONTEND_SRC_DIR onnx_ngraph_frontend SOURCE_DIR) + target_include_directories(unit-test PRIVATE ${ONNX_FRONTEND_SRC_DIR}/src) + target_link_libraries(unit-test PRIVATE onnx_ngraph_frontend) if (LINUX) target_link_options(unit-test PRIVATE -Wl,--exclude-libs,ALL) elseif(APPLE) diff --git a/ngraph/test/onnx/onnx_editor.cpp b/ngraph/test/onnx/onnx_editor.cpp index b6b905ba91b..b08d1a37a5d 100644 --- a/ngraph/test/onnx/onnx_editor.cpp +++ b/ngraph/test/onnx/onnx_editor.cpp @@ -11,7 +11,7 @@ #include "ngraph/file_util.hpp" #include "ngraph/op/util/op_types.hpp" #include "ngraph/opsets/opset1.hpp" -#include "onnx_editor/editor.hpp" +#include "editor.hpp" #include "onnx_import/onnx.hpp" #include "util/engine/interpreter_engine.hpp" #include "util/onnx_test_util.hpp" diff --git a/ngraph/test/onnx/onnx_import.in.cpp b/ngraph/test/onnx/onnx_import.in.cpp index f4eb93c1e93..e3c92ff5ed2 100644 --- a/ngraph/test/onnx/onnx_import.in.cpp +++ b/ngraph/test/onnx/onnx_import.in.cpp @@ -22,7 +22,7 @@ #endif // clang-format on -#include "core/null_node.hpp" +#include "onnx_import/core/null_node.hpp" #include "gtest/gtest.h" #include "onnx_import/onnx.hpp" #include "onnx_import/onnx_utils.hpp" diff --git a/ngraph/test/onnx/onnx_import_org_openvino.in.cpp b/ngraph/test/onnx/onnx_import_org_openvino.in.cpp index 46fec97913e..e7336449c6c 100644 --- a/ngraph/test/onnx/onnx_import_org_openvino.in.cpp +++ b/ngraph/test/onnx/onnx_import_org_openvino.in.cpp @@ -22,7 +22,7 @@ #endif // clang-format on -#include "core/null_node.hpp" +#include "onnx_import/core/null_node.hpp" #include "gtest/gtest.h" #include "onnx_import/onnx.hpp" #include "onnx_import/onnx_utils.hpp" diff --git a/ngraph/test/onnx/onnx_import_with_editor.in.cpp b/ngraph/test/onnx/onnx_import_with_editor.in.cpp index 1650aa2fc8a..260ec566d5e 100644 --- a/ngraph/test/onnx/onnx_import_with_editor.in.cpp +++ b/ngraph/test/onnx/onnx_import_with_editor.in.cpp @@ -12,7 +12,7 @@ // clang-format on #include "gtest/gtest.h" -#include "onnx_editor/editor.hpp" +#include "editor.hpp" #include "ngraph/ngraph.hpp" #include "util/test_case.hpp" #include "util/test_control.hpp" diff --git a/ngraph/test/onnx/onnx_test_utils.in.cpp b/ngraph/test/onnx/onnx_test_utils.in.cpp index 5e70e3e13c2..6862452e389 100644 --- a/ngraph/test/onnx/onnx_test_utils.in.cpp +++ b/ngraph/test/onnx/onnx_test_utils.in.cpp @@ -9,7 +9,7 @@ #include "default_opset.hpp" #include "ngraph/file_util.hpp" #include "ngraph/op/util/op_types.hpp" -#include "onnx_editor/editor.hpp" +#include "editor.hpp" #include "onnx_import/onnx.hpp" #include "util/test_control.hpp" diff --git a/ngraph/test/util/CMakeLists.txt b/ngraph/test/util/CMakeLists.txt index 4ed628b8dd2..7c73315e667 100644 --- a/ngraph/test/util/CMakeLists.txt +++ b/ngraph/test/util/CMakeLists.txt @@ -14,7 +14,7 @@ set (SRC visitor.hpp provenance_enabler.hpp ) -if (NGRAPH_ONNX_IMPORT_ENABLE) +if (NGRAPH_ONNX_FRONTEND_ENABLE) list(APPEND SRC onnx_test_util.cpp) endif() @@ -29,6 +29,6 @@ endif() target_include_directories(ngraph_test_util PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/.. $) target_link_libraries(ngraph_test_util PUBLIC ngraph ngraph_backend gtest gmock) -if (NGRAPH_ONNX_IMPORT_ENABLE) +if (NGRAPH_ONNX_FRONTEND_ENABLE) target_link_libraries(ngraph_test_util PRIVATE onnx_common) endif() diff --git a/thirdparty/CMakeLists.txt b/thirdparty/CMakeLists.txt index 93a1c3684de..ef0acd6559b 100644 --- a/thirdparty/CMakeLists.txt +++ b/thirdparty/CMakeLists.txt @@ -72,7 +72,7 @@ endif() # Protobuf # -if(NGRAPH_PDPD_FRONTEND_ENABLE OR NGRAPH_ONNX_IMPORT_ENABLE) +if(NGRAPH_PDPD_FRONTEND_ENABLE OR NGRAPH_ONNX_FRONTEND_ENABLE) if(NGRAPH_USE_SYSTEM_PROTOBUF) set(Protobuf_USE_STATIC_LIBS ON) if(VERBOSE_BUILD) @@ -112,7 +112,7 @@ endif() # ONNX # -if(NGRAPH_ONNX_IMPORT_ENABLE) +if(NGRAPH_ONNX_FRONTEND_ENABLE) add_subdirectory(onnx) endif() From 26936d1fbb025c503ee43fe74593ee9d7862ab15 Mon Sep 17 00:00:00 2001 From: Andrey Zaytsev Date: Wed, 11 Aug 2021 22:07:55 +0300 Subject: [PATCH 14/19] Feature/azaytsev/docs fix layout (#7034) * Added info on DockerHub CI Framework * Feature/azaytsev/change layout (#3295) * Changes according to feedback comments * Replaced @ref's with html links * Fixed links, added a title page for installing from repos and images, fixed formatting issues * Added links * minor fix * Added DL Streamer to the list of components installed by default * Link fixes * Link fixes * ovms doc fix (#2988) * added OpenVINO Model Server * ovms doc fixes Co-authored-by: Trawinski, Dariusz * Updated openvino_docs.xml * Updated the link to software license agreements * Revert "Updated the link to software license agreements" This reverts commit 706dac500e764bd7534f7005ac6197f827d68cb5. * Fixed layout Co-authored-by: Trawinski, Dariusz --- docs/doxygen/ie_docs.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/doxygen/ie_docs.xml b/docs/doxygen/ie_docs.xml index d1fbbe89b2d..f5ef147751f 100644 --- a/docs/doxygen/ie_docs.xml +++ b/docs/doxygen/ie_docs.xml @@ -61,7 +61,7 @@ limitations under the License. - + From 559ad4733f573cbed903dc90d225c5a3f65e6cd8 Mon Sep 17 00:00:00 2001 From: Xiping Yan Date: Thu, 12 Aug 2021 10:25:35 +0800 Subject: [PATCH 15/19] Xp/revise ngraph atanh (#6596) * revise ../docs/ops/arithmetic/Atanh_3.md * revise atanh * recall atanh input data type description * Revert "recall atanh input data type description" This reverts commit 08e38f8c7688553a711a5dd514e7b8702053e6b3. * Migrate atanh to template plugin reference tests Signed-off-by: Yan, Xiping * fix build fail. Signed-off-by: Yan, Xiping * Remove atanh.in.cpp Signed-off-by: Yan, Xiping * Atanh only accept [-1,1]. If input value outside of this range, it will return nan. But nan is not value, can't compare. So I update the reference implement. Signed-off-by: Yan, Xiping * clang format Signed-off-by: Yan, Xiping * Revert submodule mkl-dnn Signed-off-by: Yan, Xiping * op::Atanh->op::v3::Atanh Signed-off-by: Yan, Xiping * Keep algin with "Tensorflow", revert range input to [-1.0, 1.0]; Signed-off-by: Yan, Xiping * fix template test issue: if expect and actual both are nan, return true. Signed-off-by: Yan, Xiping * 1: revert add new code to process condition: res and ref both are nan. 2: change || to ^, it looks like better. Signed-off-by: Yan, Xiping --- docs/ops/arithmetic/Atanh_3.md | 20 ++-- .../tests/functional/op_reference/atanh.cpp | 94 +++++++++++++++++++ .../base/layer_test_utils.hpp | 2 +- ngraph/core/include/ngraph/op/atanh.hpp | 4 +- .../ngraph/runtime/reference/atanh.hpp | 14 ++- ngraph/core/src/op/atanh.cpp | 2 +- ngraph/test/CMakeLists.txt | 3 +- ngraph/test/backend/atanh.in.cpp | 51 ---------- ngraph/test/type_prop/atanh.cpp | 9 ++ ngraph/test/visitors/op/atanh.cpp | 12 +++ 10 files changed, 141 insertions(+), 70 deletions(-) create mode 100644 docs/template_plugin/tests/functional/op_reference/atanh.cpp delete mode 100644 ngraph/test/backend/atanh.in.cpp create mode 100644 ngraph/test/type_prop/atanh.cpp create mode 100644 ngraph/test/visitors/op/atanh.cpp diff --git a/docs/ops/arithmetic/Atanh_3.md b/docs/ops/arithmetic/Atanh_3.md index c6dc4a5a89c..d08486c4205 100644 --- a/docs/ops/arithmetic/Atanh_3.md +++ b/docs/ops/arithmetic/Atanh_3.md @@ -4,11 +4,13 @@ **Category**: Arithmetic unary operation -**Short description**: *Atanh* performs element-wise hyperbolic inverse tangent (arctangenth) operation with given tensor. +**Short description**: *Atanh* performs element-wise hyperbolic inverse tangent (arctangenth) operation with a given tensor. -**Attributes**: +**Detailed description**: *Atanh* performs element-wise hyperbolic inverse tangent (arctangenth) operation on a given input tensor, based on the following mathematical formula: - No attributes available. +\f[ a_{i} = atanh(a_{i}) \f] + +**Attributes**: Atanh operation has no attributes. **Inputs** @@ -16,22 +18,14 @@ **Outputs** -* **1**: The result of element-wise atanh operation. A tensor of type *T*. +* **1**: The result of element-wise atanh operation applied to the input tensor. A tensor of type *T* and the same shape as input tensor. **Types** -* *T*: any floating-point type. - -*Atanh* does the following with the input tensor *a*: - -\f[ -a_{i} = atanh(a_{i}) -\f] +* *T*: any supported numeric type. **Examples** -*Example 1* - ```xml diff --git a/docs/template_plugin/tests/functional/op_reference/atanh.cpp b/docs/template_plugin/tests/functional/op_reference/atanh.cpp new file mode 100644 index 00000000000..e80c6b6734f --- /dev/null +++ b/docs/template_plugin/tests/functional/op_reference/atanh.cpp @@ -0,0 +1,94 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include +#include +#include +#include +#include +#include + +#include "base_reference_test.hpp" + +using namespace reference_tests; +using namespace ngraph; +using namespace InferenceEngine; + +struct AtanhParams { + template + AtanhParams(const ngraph::PartialShape& shape, const ngraph::element::Type& iType, const std::vector& iValues) + : pshape(shape), inType(iType), outType(iType), inputData(CreateBlob(iType, iValues)) { + std::vector oValues; + std::vector output; + for (auto element : iValues) + output.push_back(static_cast(element)); + + std::transform(output.begin(), output.end(), output.begin(), [](double input) -> double { + return std::atanh(input); + }); + + if (std::is_integral()) { + std::transform(output.begin(), output.end(), output.begin(), [](double input) -> double { + return std::round(input); + }); + } + + for (auto element : output) + oValues.push_back(static_cast(element)); + refData = CreateBlob(outType, oValues); + } + ngraph::PartialShape pshape; + ngraph::element::Type inType; + ngraph::element::Type outType; + InferenceEngine::Blob::Ptr inputData; + InferenceEngine::Blob::Ptr refData; +}; + +class ReferenceAtanhLayerTest : public testing::TestWithParam, public CommonReferenceTest { +public: + void SetUp() override { + auto params = GetParam(); + function = CreateFunction(params.pshape, params.inType, params.outType); + inputData = {params.inputData}; + refOutData = {params.refData}; + } + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + auto param = obj.param; + std::ostringstream result; + result << "shape=" << param.pshape << "_"; + result << "iType=" << param.inType << "_"; + result << "oType=" << param.outType; + return result.str(); + } + +private: + static std::shared_ptr CreateFunction(const PartialShape& input_shape, const element::Type& input_type, + const element::Type& expected_output_type) { + const auto in = std::make_shared(input_type, input_shape); + const auto atanh = std::make_shared(in); + return std::make_shared(NodeVector {atanh}, ParameterVector {in}); + } +}; + +TEST_P(ReferenceAtanhLayerTest, CompareWithRefs) { + Exec(); +} + +INSTANTIATE_TEST_SUITE_P( + smoke_Atanh_With_Hardcoded_Refs, ReferenceAtanhLayerTest, + ::testing::Values(AtanhParams(ngraph::PartialShape {2, 4}, ngraph::element::f32, + std::vector {-INFINITY, -2.0f, -1.0f, -0.5f, 0.0f, 0.8f, 1.0f, INFINITY}), + AtanhParams(ngraph::PartialShape {2, 4}, ngraph::element::f16, + std::vector {-INFINITY, -2.0f, -1.0f, -0.5f, -0.0f, 0.8f, 1.0f, INFINITY}), + AtanhParams(ngraph::PartialShape {2, 3}, ngraph::element::i32, + std::vector {std::numeric_limits::min(), -2, -1, 1, 2, std::numeric_limits::max()}), + AtanhParams(ngraph::PartialShape {2, 3}, ngraph::element::u32, + std::vector {std::numeric_limits::min(), 0, 1, 2, 3, std::numeric_limits::max()}), + AtanhParams(ngraph::PartialShape {2, 3}, ngraph::element::i64, + std::vector {std::numeric_limits::min(), -2, -1, 1, 2, std::numeric_limits::max()}), + AtanhParams(ngraph::PartialShape {2, 3}, ngraph::element::u64, + std::vector {std::numeric_limits::min(), 0, 1, 2, 3, std::numeric_limits::max()})), + ReferenceAtanhLayerTest::getTestCaseName); diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/base/layer_test_utils.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/base/layer_test_utils.hpp index bde6ba57578..9140f970cff 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/base/layer_test_utils.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/base/layer_test_utils.hpp @@ -104,7 +104,7 @@ public: } double diff = static_cast(absoluteDifference) / max; if (max == 0 || (diff > static_cast(threshold)) || - std::isnan(static_cast(res)) || std::isnan(static_cast(ref))) { + (std::isnan(static_cast(res)) ^ std::isnan(static_cast(ref)))) { IE_THROW() << "Relative comparison of values expected: " << std::to_string(ref) << " and actual: " << std::to_string(res) << " at index " << i << " with threshold " << threshold << " failed"; diff --git a/ngraph/core/include/ngraph/op/atanh.hpp b/ngraph/core/include/ngraph/op/atanh.hpp index ec99b45d107..8b9ebf8e99f 100644 --- a/ngraph/core/include/ngraph/op/atanh.hpp +++ b/ngraph/core/include/ngraph/op/atanh.hpp @@ -19,8 +19,8 @@ namespace ngraph class NGRAPH_API Atanh : public util::UnaryElementwiseArithmetic { public: - static constexpr NodeTypeInfo type_info{"Atanh", 3}; - const NodeTypeInfo& get_type_info() const override { return type_info; } + NGRAPH_RTTI_DECLARATION; + /// \brief Constructs an Atanh operation. Atanh() = default; /// \brief Constructs an Atanh operation. diff --git a/ngraph/core/reference/include/ngraph/runtime/reference/atanh.hpp b/ngraph/core/reference/include/ngraph/runtime/reference/atanh.hpp index 7e2cb95df0e..4defb372a80 100644 --- a/ngraph/core/reference/include/ngraph/runtime/reference/atanh.hpp +++ b/ngraph/core/reference/include/ngraph/runtime/reference/atanh.hpp @@ -4,6 +4,7 @@ #pragma once +#include #include #include @@ -13,7 +14,8 @@ namespace ngraph { namespace reference { - template + template ::value, bool>::type = true> void atanh(const T* arg, T* out, size_t count) { for (size_t i = 0; i < count; i++) @@ -21,6 +23,16 @@ namespace ngraph out[i] = std::atanh(arg[i]); } } + + template ::value, bool>::type = true> + void atanh(const T* arg, T* out, size_t count) + { + for (size_t i = 0; i < count; i++) + { + out[i] = std::roundl(std::atanh(arg[i])); + } + } } // namespace reference } // namespace runtime } // namespace ngraph diff --git a/ngraph/core/src/op/atanh.cpp b/ngraph/core/src/op/atanh.cpp index a14e6aaebe1..3f865745a6c 100644 --- a/ngraph/core/src/op/atanh.cpp +++ b/ngraph/core/src/op/atanh.cpp @@ -14,7 +14,7 @@ using namespace std; using namespace ngraph; -constexpr NodeTypeInfo op::v3::Atanh::type_info; +NGRAPH_RTTI_DEFINITION(op::v3::Atanh, "Atanh", 0, util::UnaryElementwiseArithmetic); op::v3::Atanh::Atanh(const Output& arg) : UnaryElementwiseArithmetic(arg) diff --git a/ngraph/test/CMakeLists.txt b/ngraph/test/CMakeLists.txt index 5cb76c46257..1d563c53ab3 100644 --- a/ngraph/test/CMakeLists.txt +++ b/ngraph/test/CMakeLists.txt @@ -95,6 +95,7 @@ set(SRC type_prop/asinh.cpp type_prop/assign.cpp type_prop/atan.cpp + type_prop/atanh.cpp type_prop/avg_pool.cpp type_prop/batch_norm.cpp type_prop/batch_to_space.cpp @@ -241,6 +242,7 @@ set(SRC visitors/op/add.cpp visitors/op/asinh.cpp visitors/op/atan.cpp + visitors/op/atanh.cpp visitors/op/batch_norm.cpp visitors/op/batch_to_space.cpp visitors/op/broadcast.cpp @@ -403,7 +405,6 @@ set(MULTI_TEST_SRC backend/asin.in.cpp backend/asinh.in.cpp backend/atan.in.cpp - backend/atanh.in.cpp backend/auto_broadcast.in.cpp backend/avg_pool.in.cpp backend/batch_norm.in.cpp diff --git a/ngraph/test/backend/atanh.in.cpp b/ngraph/test/backend/atanh.in.cpp deleted file mode 100644 index fce628ed8ef..00000000000 --- a/ngraph/test/backend/atanh.in.cpp +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include -#include -#include -#include - -// clang-format off -#ifdef ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS -#define DEFAULT_FLOAT_TOLERANCE_BITS ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS -#endif - -#ifdef ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS -#define DEFAULT_DOUBLE_TOLERANCE_BITS ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS -#endif -// clang-format on - -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" -#include "util/engine/test_engines.hpp" -#include "util/test_case.hpp" -#include "util/test_control.hpp" - -using namespace std; -using namespace ngraph; - -static string s_manifest = "${MANIFEST}"; -using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); - -NGRAPH_TEST(${BACKEND_NAME}, atanh) -{ - Shape shape{11}; - auto A = make_shared(element::f32, shape); - auto f = make_shared(make_shared(A), ParameterVector{A}); - - vector input{0.f, 1.f, -1.f, 2.f, -2.f, 3.f, -3.f, 4.f, 5.f, 10.f, 100.f}; - vector expected; - for (float f : input) - { - expected.push_back(std::atanh(f)); - } - - auto test_case = test::TestCase(f); - test_case.add_input(input); - test_case.add_expected_output(shape, expected); - test_case.run(); -} diff --git a/ngraph/test/type_prop/atanh.cpp b/ngraph/test/type_prop/atanh.cpp new file mode 100644 index 00000000000..96d21a421a5 --- /dev/null +++ b/ngraph/test/type_prop/atanh.cpp @@ -0,0 +1,9 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "unary_ops.hpp" + +using Type = ::testing::Types; + +INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_atanh, UnaryOperator, Type); diff --git a/ngraph/test/visitors/op/atanh.cpp b/ngraph/test/visitors/op/atanh.cpp new file mode 100644 index 00000000000..79122efaba2 --- /dev/null +++ b/ngraph/test/visitors/op/atanh.cpp @@ -0,0 +1,12 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "unary_ops.hpp" + +using Types = ::testing::Types>; + +INSTANTIATE_TYPED_TEST_SUITE_P(visitor_without_attribute, + UnaryOperatorVisitor, + Types, + UnaryOperatorTypeName); \ No newline at end of file From c771b1e2e0cbee474a77b75b81e84a00c1741e10 Mon Sep 17 00:00:00 2001 From: Sergey Lyubimtsev Date: Thu, 12 Aug 2021 11:38:03 +0300 Subject: [PATCH 16/19] Merge benchmark tools dirs (#6960) * add missed __init__.py files * Update __init__.py empty line * Merge infirence_engine/tools/benchmark_tool with tools/benchmark_tool * Update MD links * remove benchmark_tool from package_BOM.txt * add tools folder to the list of Doxygen files * fix relative paths * Update index.md remove extra line --- docs/CMakeLists.txt | 6 +++++- docs/IE_DG/Samples_Overview.md | 2 +- docs/benchmarks/performance_benchmarks_faq.md | 2 +- inference-engine/samples/benchmark_app/README.md | 2 +- inference-engine/tools/package_BOM.txt | 3 --- tools/CMakeLists.txt | 6 ++++-- .../tools => tools}/benchmark_tool/README.md | 12 ++++++------ .../tools => tools}/benchmark_tool/benchmark_app.py | 0 .../openvino/tools/benchmark/README.md | 10 +++++----- .../openvino/tools/benchmark/requirements.txt | 4 ---- .../tools => tools}/benchmark_tool/requirements.txt | 0 tools/benchmark_tool/setup.py | 4 ++-- 12 files changed, 25 insertions(+), 26 deletions(-) rename {inference-engine/tools => tools}/benchmark_tool/README.md (96%) rename {inference-engine/tools => tools}/benchmark_tool/benchmark_app.py (100%) delete mode 100644 tools/benchmark_tool/openvino/tools/benchmark/requirements.txt rename {inference-engine/tools => tools}/benchmark_tool/requirements.txt (100%) diff --git a/docs/CMakeLists.txt b/docs/CMakeLists.txt index 15eabf321ab..4d3135903de 100644 --- a/docs/CMakeLists.txt +++ b/docs/CMakeLists.txt @@ -223,7 +223,11 @@ function(build_docs) "${OpenVINO_SOURCE_DIR}/inference-engine/*.md" "${OpenVINO_SOURCE_DIR}/inference-engine/*.png" "${OpenVINO_SOURCE_DIR}/inference-engine/*.gif" - "${OpenVINO_SOURCE_DIR}/inference-engine/*.jpg") + "${OpenVINO_SOURCE_DIR}/inference-engine/*.jpg" + "${OpenVINO_SOURCE_DIR}/tools/*.md" + "${OpenVINO_SOURCE_DIR}/tools/*.png" + "${OpenVINO_SOURCE_DIR}/tools/*.gif" + "${OpenVINO_SOURCE_DIR}/tools/*.jpg") foreach(source_file ${ovino_doc_files}) list(APPEND commands COMMAND ${CMAKE_COMMAND} -E copy diff --git a/docs/IE_DG/Samples_Overview.md b/docs/IE_DG/Samples_Overview.md index db39cbfc5b4..f9e21cf5e4d 100644 --- a/docs/IE_DG/Samples_Overview.md +++ b/docs/IE_DG/Samples_Overview.md @@ -14,7 +14,7 @@ Inference Engine sample applications include the following: - [Automatic Speech Recognition Python Sample](../../inference-engine/ie_bridges/python/sample/speech_sample/README.md) - **Benchmark Application** – Estimates deep learning inference performance on supported devices for synchronous and asynchronous modes. - [Benchmark C++ Tool](../../inference-engine/samples/benchmark_app/README.md) - - [Benchmark Python Tool](../../inference-engine/tools/benchmark_tool/README.md) + - [Benchmark Python Tool](../../tools/benchmark_tool/README.md) - **Hello Classification Sample** – Inference of image classification networks like AlexNet and GoogLeNet using Synchronous Inference Request API. Input of any size and layout can be set to an infer request which will be pre-processed automatically during inference (the sample supports only images as inputs and supports Unicode paths). - [Hello Classification C++ Sample](../../inference-engine/samples/hello_classification/README.md) - [Hello Classification C Sample](../../inference-engine/ie_bridges/c/samples/hello_classification/README.md) diff --git a/docs/benchmarks/performance_benchmarks_faq.md b/docs/benchmarks/performance_benchmarks_faq.md index a89d0fc07c3..2ff33612097 100644 --- a/docs/benchmarks/performance_benchmarks_faq.md +++ b/docs/benchmarks/performance_benchmarks_faq.md @@ -15,7 +15,7 @@ The models used in the performance benchmarks were chosen based on general adopt CF means Caffe*, while TF means TensorFlow*. #### 5. How can I run the benchmark results on my own? -All of the performance benchmarks were generated using the open-sourced tool within the Intel® Distribution of OpenVINO™ toolkit called `benchmark_app`, which is available in both [C++](../../inference-engine/samples/benchmark_app/README.md) and [Python](../../inference-engine/tools/benchmark_tool/README.md). +All of the performance benchmarks were generated using the open-sourced tool within the Intel® Distribution of OpenVINO™ toolkit called `benchmark_app`, which is available in both [C++](../../inference-engine/samples/benchmark_app/README.md) and [Python](../../tools/benchmark_tool/README.md). #### 6. What image sizes are used for the classification network models? The image size used in the inference depends on the network being benchmarked. The following table shows the list of input sizes for each network model. diff --git a/inference-engine/samples/benchmark_app/README.md b/inference-engine/samples/benchmark_app/README.md index 2d5076a60c6..c705d592ce3 100644 --- a/inference-engine/samples/benchmark_app/README.md +++ b/inference-engine/samples/benchmark_app/README.md @@ -2,7 +2,7 @@ This topic demonstrates how to use the Benchmark C++ Tool to estimate deep learning inference performance on supported devices. Performance can be measured for two inference modes: synchronous (latency-oriented) and asynchronous (throughput-oriented). -> **NOTE:** This topic describes usage of C++ implementation of the Benchmark Tool. For the Python* implementation, refer to [Benchmark Python* Tool](../../tools/benchmark_tool/README.md). +> **NOTE:** This topic describes usage of C++ implementation of the Benchmark Tool. For the Python* implementation, refer to [Benchmark Python* Tool](../../../tools/benchmark_tool/README.md). > **TIP**: You also can work with the Benchmark Tool inside the OpenVINO™ [Deep Learning Workbench](@ref workbench_docs_Workbench_DG_Introduction) (DL Workbench). > [DL Workbench](@ref workbench_docs_Workbench_DG_Introduction) is a platform built upon OpenVINO™ and provides a web-based graphical environment that enables you to optimize, fine-tune, analyze, visualize, and compare diff --git a/inference-engine/tools/package_BOM.txt b/inference-engine/tools/package_BOM.txt index fcff3b75a0f..b1d58875c48 100644 --- a/inference-engine/tools/package_BOM.txt +++ b/inference-engine/tools/package_BOM.txt @@ -1,6 +1,3 @@ -benchmark_tool/benchmark_app.py -benchmark_tool/requirements.txt -benchmark_tool/README.md cross_check_tool/__init__.py cross_check_tool/utils.py cross_check_tool/requirements.txt diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index 5469ac09336..fc468719d16 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -29,8 +29,10 @@ if(ENABLE_PYTHON) ie_cpack_add_component(python_tools_${PYTHON_VERSION}) ie_cpack_add_component(python_tools) - install(DIRECTORY ../inference-engine/tools/benchmark_tool - DESTINATION deployment_tools/tools + install(FILES benchmark_tool/benchmark_app.py + benchmark_tool/README.md + benchmark_tool/requirements.txt + DESTINATION deployment_tools/tools/benchmark_tool COMPONENT python_tools) install(DIRECTORY ../inference-engine/tools/cross_check_tool diff --git a/inference-engine/tools/benchmark_tool/README.md b/tools/benchmark_tool/README.md similarity index 96% rename from inference-engine/tools/benchmark_tool/README.md rename to tools/benchmark_tool/README.md index 1eacb8f56ad..4c7608b6cce 100644 --- a/inference-engine/tools/benchmark_tool/README.md +++ b/tools/benchmark_tool/README.md @@ -2,7 +2,7 @@ This topic demonstrates how to run the Benchmark Python* Tool, which performs inference using convolutional networks. Performance can be measured for two inference modes: synchronous (latency-oriented) and asynchronous (throughput-oriented). -> **NOTE:** This topic describes usage of Python implementation of the Benchmark Tool. For the C++ implementation, refer to [Benchmark C++ Tool](../../samples/benchmark_app/README.md). +> **NOTE:** This topic describes usage of Python implementation of the Benchmark Tool. For the C++ implementation, refer to [Benchmark C++ Tool](../../inference-engine/samples/benchmark_app/README.md). > **TIP**: You also can work with the Benchmark Tool inside the OpenVINO™ [Deep Learning Workbench](@ref workbench_docs_Workbench_DG_Introduction) (DL Workbench). > [DL Workbench](@ref workbench_docs_Workbench_DG_Introduction) is a platform built upon OpenVINO™ and provides a web-based graphical environment that enables you to optimize, fine-tune, analyze, visualize, and compare @@ -15,7 +15,7 @@ This topic demonstrates how to run the Benchmark Python* Tool, which performs in Upon start-up, the application reads command-line parameters and loads a network and images/binary files to the Inference Engine plugin, which is chosen depending on a specified device. The number of infer requests and execution approach depend on the mode defined with the `-api` command-line parameter. -> **NOTE**: By default, Inference Engine samples, tools and demos expect input with BGR channels order. If you trained your model to work with RGB order, you need to manually rearrange the default channels order in the sample or demo application or reconvert your model using the Model Optimizer tool with `--reverse_input_channels` argument specified. For more information about the argument, refer to **When to Reverse Input Channels** section of [Converting a Model Using General Conversion Parameters](../../../docs/MO_DG/prepare_model/convert_model/Converting_Model_General.md). +> **NOTE**: By default, Inference Engine samples, tools and demos expect input with BGR channels order. If you trained your model to work with RGB order, you need to manually rearrange the default channels order in the sample or demo application or reconvert your model using the Model Optimizer tool with `--reverse_input_channels` argument specified. For more information about the argument, refer to **When to Reverse Input Channels** section of [Converting a Model Using General Conversion Parameters](../../docs/MO_DG/prepare_model/convert_model/Converting_Model_General.md). ### Synchronous API @@ -54,7 +54,7 @@ Notice that the benchmark_app usually produces optimal performance for any devic python3 benchmark_app.py -m -i -d CPU ``` -But it is still may be non-optimal for some cases, especially for very small networks. More details can read in [Introduction to Performance Topics](../../../docs/IE_DG/Intro_to_Performance.md). +But it is still may be non-optimal for some cases, especially for very small networks. More details can read in [Introduction to Performance Topics](../../docs/IE_DG/Intro_to_Performance.md). Running the application with the `-h` or `--help`' option yields the following usage message: @@ -147,7 +147,7 @@ If a model has mixed input types, input folder should contain all required files To run the tool, you can use [public](@ref omz_models_group_public) or [Intel's](@ref omz_models_group_intel) pre-trained models from the Open Model Zoo. The models can be downloaded using the [Model Downloader](@ref omz_tools_downloader). -> **NOTE**: Before running the tool with a trained model, make sure the model is converted to the Inference Engine format (\*.xml + \*.bin) using the [Model Optimizer tool](../../../docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md). +> **NOTE**: Before running the tool with a trained model, make sure the model is converted to the Inference Engine format (\*.xml + \*.bin) using the [Model Optimizer tool](../../docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md). ## Examples of Running the Tool @@ -211,6 +211,6 @@ Below are fragments of sample output for CPU and FPGA devices: ``` ## See Also -* [Using Inference Engine Samples](../../../docs/IE_DG/Samples_Overview.md) -* [Model Optimizer](../../../docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md) +* [Using Inference Engine Samples](../../docs/IE_DG/Samples_Overview.md) +* [Model Optimizer](../../docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md) * [Model Downloader](@ref omz_tools_downloader) diff --git a/inference-engine/tools/benchmark_tool/benchmark_app.py b/tools/benchmark_tool/benchmark_app.py similarity index 100% rename from inference-engine/tools/benchmark_tool/benchmark_app.py rename to tools/benchmark_tool/benchmark_app.py diff --git a/tools/benchmark_tool/openvino/tools/benchmark/README.md b/tools/benchmark_tool/openvino/tools/benchmark/README.md index 16491d6c9b2..5bef2ffe498 100644 --- a/tools/benchmark_tool/openvino/tools/benchmark/README.md +++ b/tools/benchmark_tool/openvino/tools/benchmark/README.md @@ -8,7 +8,7 @@ Upon start-up, the application reads command-line parameters and loads a network plugin, which is chosen depending on a specified device. The number of infer requests and execution approach depend on the mode defined with the `-api` command-line parameter. -> **NOTE**: By default, Inference Engine samples and demos expect input with BGR channels order. If you trained your model to work with RGB order, you need to manually rearrange the default channels order in the sample or demo application or reconvert your model using the Model Optimizer tool with `--reverse_input_channels` argument specified. For more information about the argument, refer to **When to Reverse Input Channels** section of [Converting a Model Using General Conversion Parameters](./docs/MO_DG/prepare_model/convert_model/Converting_Model_General.md). +> **NOTE**: By default, Inference Engine samples and demos expect input with BGR channels order. If you trained your model to work with RGB order, you need to manually rearrange the default channels order in the sample or demo application or reconvert your model using the Model Optimizer tool with `--reverse_input_channels` argument specified. For more information about the argument, refer to **When to Reverse Input Channels** section of [Converting a Model Using General Conversion Parameters](../../../../../docs/MO_DG/prepare_model/convert_model/Converting_Model_General.md). ### Synchronous API @@ -47,7 +47,7 @@ Notice that the benchmark_app usually produces optimal performance for any devic $benchmark_app -m -i -d CPU ``` -But it is still may be non-optimal for some cases, especially for very small networks. More details can read in [Introduction to Performance Topics](./docs/IE_DG/Intro_to_Performance.md). +But it is still may be non-optimal for some cases, especially for very small networks. More details can read in [Introduction to Performance Topics](../../../../../docs/IE_DG/Intro_to_Performance.md). Running the application with the `-h` or `--help`' option yields the following usage message: @@ -158,7 +158,7 @@ If a model has mixed input types, input folder should contain all required files To run the tool, you can use [public](@ref omz_models_group_public) or [Intel's](@ref omz_models_group_intel) pre-trained models from the Open Model Zoo. The models can be downloaded using the [Model Downloader](@ref omz_tools_downloader). -> **NOTE**: Before running the demo with a trained model, make sure the model is converted to the Inference Engine format (\*.xml + \*.bin) using the [Model Optimizer tool](./docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md). +> **NOTE**: Before running the demo with a trained model, make sure the model is converted to the Inference Engine format (\*.xml + \*.bin) using the [Model Optimizer tool](../../../../../docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md). For example, to do inference of an image using a trained network with multiple outputs on CPU, run the following command: @@ -187,6 +187,6 @@ Throughput: 73.28 FPS ``` ## See Also -* [Using Inference Engine Samples](./docs/IE_DG/Samples_Overview.md) -* [Model Optimizer](./docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md) +* [Using Inference Engine Samples](../../../../../docs/IE_DG/Samples_Overview.md) +* [Model Optimizer](../../../../../docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md) * [Model Downloader](https://github.com/openvinotoolkit/open_model_zoo/tree/2018/model_downloader) diff --git a/tools/benchmark_tool/openvino/tools/benchmark/requirements.txt b/tools/benchmark_tool/openvino/tools/benchmark/requirements.txt deleted file mode 100644 index 5fbd19cbc89..00000000000 --- a/tools/benchmark_tool/openvino/tools/benchmark/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -py-cpuinfo>=7.0.0 -numpy>=1.16.6,<1.20 -progress>=1.5 -opencv-python==4.5.* \ No newline at end of file diff --git a/inference-engine/tools/benchmark_tool/requirements.txt b/tools/benchmark_tool/requirements.txt similarity index 100% rename from inference-engine/tools/benchmark_tool/requirements.txt rename to tools/benchmark_tool/requirements.txt diff --git a/tools/benchmark_tool/setup.py b/tools/benchmark_tool/setup.py index 31ce11e4a36..c696a999459 100644 --- a/tools/benchmark_tool/setup.py +++ b/tools/benchmark_tool/setup.py @@ -10,10 +10,10 @@ $ python setup.py sdist bdist_wheel """ from setuptools import setup, find_packages -with open('openvino/tools/benchmark/README.md', 'r', encoding='utf-8') as f: +with open('README.md', 'r', encoding='utf-8') as f: long_description = f.read() -with open('openvino/tools/benchmark/requirements.txt') as f: +with open('requirements.txt') as f: required = f.read().splitlines() required.extend(['openvino']) From 7f0b8698c6105466f13708fdb369a23a0199ce65 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Thu, 12 Aug 2021 12:25:30 +0300 Subject: [PATCH 17/19] Enabled NCC check for frontends (#7033) * Try to enable NCC for onnx_ngraph_frontend * naming style for PDPD * Enabled PDPD ncc --- ngraph/frontend/onnx/frontend/CMakeLists.txt | 11 +++++----- ngraph/frontend/paddlepaddle/CMakeLists.txt | 9 ++++---- .../include/paddlepaddle_frontend/model.hpp | 6 +++--- .../include/paddlepaddle_frontend/utility.hpp | 17 +++++---------- ngraph/frontend/paddlepaddle/src/frontend.cpp | 4 ++-- ngraph/frontend/paddlepaddle/src/model.cpp | 21 ++++++++++--------- 6 files changed, 31 insertions(+), 37 deletions(-) diff --git a/ngraph/frontend/onnx/frontend/CMakeLists.txt b/ngraph/frontend/onnx/frontend/CMakeLists.txt index f348a690ae3..ad830d0ddbc 100644 --- a/ngraph/frontend/onnx/frontend/CMakeLists.txt +++ b/ngraph/frontend/onnx/frontend/CMakeLists.txt @@ -32,11 +32,12 @@ add_library(ngraph::onnx_ngraph_frontend ALIAS onnx_ngraph_frontend) add_clang_format_target(onnx_ngraph_frontend_clang FOR_TARGETS onnx_ngraph_frontend) -# TODO: fix empty class name -#ov_ncc_naming_style(FOR_TARGET onnx_ngraph_frontend -# INCLUDE_DIRECTORY "${ONNX_FRONTEND_INCLUDE_DIR}" -# ADDITIONAL_INCLUDE_DIRECTORIES -# $) +ov_ncc_naming_style(FOR_TARGET onnx_ngraph_frontend + INCLUDE_DIRECTORY "${ONNX_FRONTEND_INCLUDE_DIR}" + DEFINITIONS + $ + ADDITIONAL_INCLUDE_DIRECTORIES + $) if(COMMAND ie_add_vs_version_file) ie_add_vs_version_file(NAME onnx_ngraph_frontend diff --git a/ngraph/frontend/paddlepaddle/CMakeLists.txt b/ngraph/frontend/paddlepaddle/CMakeLists.txt index 5c8e9f6b39a..75a99e1e385 100644 --- a/ngraph/frontend/paddlepaddle/CMakeLists.txt +++ b/ngraph/frontend/paddlepaddle/CMakeLists.txt @@ -52,11 +52,10 @@ add_library(${TARGET_NAME} SHARED ${LIBRARY_SRC} ${LIBRARY_HEADERS} ${LIBRARY_P add_dependencies(${TARGET_NAME} paddlepaddle_ngraph_frontend_proto) -# TODO enable: PDPD_ASSERT is in capital letters while it's a function -# ov_ncc_naming_style(FOR_TARGET ${TARGET_NAME} -# INCLUDE_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/include" -# ADDITIONAL_INCLUDE_DIRECTORIES -# $) +ov_ncc_naming_style(FOR_TARGET ${TARGET_NAME} + INCLUDE_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/include" + ADDITIONAL_INCLUDE_DIRECTORIES + $) target_include_directories(${TARGET_NAME} PUBLIC diff --git a/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/model.hpp b/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/model.hpp index 1ab63ef6d10..c21b160c9ba 100644 --- a/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/model.hpp +++ b/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/model.hpp @@ -19,9 +19,9 @@ namespace ngraph class InputModelPDPDImpl; std::shared_ptr _impl; - std::vector> getOpPlaces() const; - std::map> getVarPlaces() const; - std::map> getTensorValues() const; + std::vector> get_op_places() const; + std::map> get_var_places() const; + std::map> get_tensor_values() const; public: explicit InputModelPDPD(const std::string& path); diff --git a/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/utility.hpp b/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/utility.hpp index 19dcc61d24a..4532f8bd3d7 100644 --- a/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/utility.hpp +++ b/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/utility.hpp @@ -13,19 +13,12 @@ #define PDPD_API NGRAPH_HELPER_DLL_IMPORT #endif // paddlepaddle_ngraph_frontend_EXPORTS -namespace ngraph -{ - namespace frontend - { - inline void PDPD_ASSERT(bool ex, const std::string& msg = "Unspecified error.") - { - if (!ex) - throw std::runtime_error(msg); - } +#define PDPD_ASSERT(ex, msg) \ + { \ + if (!(ex)) \ + throw std::runtime_error(msg); \ + } #define PDPD_THROW(msg) throw std::runtime_error(std::string("ERROR: ") + msg) #define NOT_IMPLEMENTED(msg) throw std::runtime_error(std::string(msg) + " is not implemented") - - } // namespace frontend -} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/frontend.cpp b/ngraph/frontend/paddlepaddle/src/frontend.cpp index 7c9e90ec56f..1c122e411a1 100644 --- a/ngraph/frontend/paddlepaddle/src/frontend.cpp +++ b/ngraph/frontend/paddlepaddle/src/frontend.cpp @@ -166,7 +166,7 @@ namespace ngraph const std::map>&, const std::shared_ptr&)> func) { - auto nodes_dict(model->getTensorValues()); + auto nodes_dict(model->get_tensor_values()); ParameterVector parameter_nodes; ResultVector result_nodes; @@ -183,7 +183,7 @@ namespace ngraph parameter_nodes.push_back(param); } - const auto& op_places = model->getOpPlaces(); + const auto& op_places = model->get_op_places(); for (const auto& op_place : op_places) { const auto& op_desc = op_place->get_desc(); diff --git a/ngraph/frontend/paddlepaddle/src/model.cpp b/ngraph/frontend/paddlepaddle/src/model.cpp index 582fbf64cb8..cf4333fde06 100644 --- a/ngraph/frontend/paddlepaddle/src/model.cpp +++ b/ngraph/frontend/paddlepaddle/src/model.cpp @@ -47,12 +47,12 @@ namespace ngraph void setElementType(Place::Ptr place, const ngraph::element::Type&); void setTensorValue(Place::Ptr place, const void* value); - std::vector> getOpPlaces() const; - std::map> getVarPlaces() const + std::vector> get_op_places() const; + std::map> get_var_places() const { return m_var_places; } - std::map> getTensorValues() const + std::map> get_tensor_values() const { return m_tensor_values; }; @@ -236,7 +236,7 @@ namespace ngraph } // namespace pdpd std::vector> - InputModelPDPD::InputModelPDPDImpl::getOpPlaces() const + InputModelPDPD::InputModelPDPDImpl::get_op_places() const { if (m_graph_changed) { @@ -519,19 +519,20 @@ namespace ngraph { } - std::vector> InputModelPDPD::getOpPlaces() const + std::vector> InputModelPDPD::get_op_places() const { - return _impl->getOpPlaces(); + return _impl->get_op_places(); } - std::map> InputModelPDPD::getVarPlaces() const + std::map> + InputModelPDPD::get_var_places() const { - return _impl->getVarPlaces(); + return _impl->get_var_places(); } - std::map> InputModelPDPD::getTensorValues() const + std::map> InputModelPDPD::get_tensor_values() const { - return _impl->getTensorValues(); + return _impl->get_tensor_values(); } std::vector InputModelPDPD::get_inputs() const { return _impl->getInputs(); } From f26ecdd53f0cc872286c2e64643867d5551f1e4b Mon Sep 17 00:00:00 2001 From: Anastasia Popova Date: Thu, 12 Aug 2021 13:08:43 +0300 Subject: [PATCH 18/19] Corrected formulas rendering. (#6986) --- docs/ops/generation/RandomUniform_8.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/ops/generation/RandomUniform_8.md b/docs/ops/generation/RandomUniform_8.md index 4269c82bc6a..4fff2684d6c 100644 --- a/docs/ops/generation/RandomUniform_8.md +++ b/docs/ops/generation/RandomUniform_8.md @@ -8,7 +8,7 @@ **Detailed description**: -*RandomUniform* operation generates random numbers from a uniform distribution in the range `[*minval*, *maxval*)`. +*RandomUniform* operation generates random numbers from a uniform distribution in the range `[minval, maxval)`. The generation algorithm is based on underlying random integer generator that uses Philox algorithm. Philox algorithm is a counter-based pseudo-random generator, which produces uint32 values. Single invocation of Philox algorithm returns four result random values, depending on the given *key* and *counter* values. *Key* and *counter* are initialized @@ -42,7 +42,7 @@ R' = mulhi(R, M) {\oplus} k {\oplus} L \\ mulhi(a, b) = floor((a {\times} b) / 2^{32}) \\ mullo(a, b) = (a {\times} b) \mod 2^{32} \f] -where `{\oplus}` - bitwise xor, *k* = `R_{key}` for updating counter, *k* = `L_{key}` for updating *n*, +where \f${\oplus}\f$ - bitwise xor, *k* = \f$R_{key}\f$ for updating counter, *k* = \f$L_{key}\f$ for updating *n*, *M* = `0xD2511F53` for updating *n*, *M* = `0xCD9E8D57` for updating *counter*. After each round *key* is raised by summing with another pair of const values: @@ -50,7 +50,7 @@ After each round *key* is raised by summing with another pair of const values: L += 0x9E3779B9 \\ R += 0xBB67AE85 \f] -Values *L'_{n}*, *R'_{n}*, *L'_{counter}*, *R'_{counter}* are resulting four random numbers. +Values \f$L'_{n}, R'_{n}, L'_{counter}, R'_{counter}\f$ are resulting four random numbers. Float values between [0..1) are obtained from 32-bit integers by the following rules. From 273c7188a48ecedc708764923683efe98f06d3b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dawid=20Ko=C5=BCykowski?= Date: Thu, 12 Aug 2021 12:11:30 +0200 Subject: [PATCH 19/19] Beautify operator specifications (#6958) * beautify operator specifications * further update ops specs * update FloorMod spec * update adaptive pool spec * update HSwish spec * bringg back old erf version --- docs/ops/activation/Clamp_1.md | 2 +- docs/ops/activation/GELU_2.md | 2 +- docs/ops/activation/GELU_7.md | 4 ++-- docs/ops/activation/HSigmoid_5.md | 2 +- docs/ops/activation/HSwish_4.md | 2 +- docs/ops/activation/HardSigmoid_1.md | 5 ++++- docs/ops/activation/LogSoftmax_5.md | 4 ++-- docs/ops/activation/ReLU_1.md | 2 +- docs/ops/arithmetic/Abs_1.md | 2 +- docs/ops/arithmetic/Ceiling_1.md | 2 +- docs/ops/arithmetic/Divide_1.md | 2 +- docs/ops/arithmetic/FloorMod_1.md | 2 +- docs/ops/arithmetic/Floor_1.md | 2 +- docs/ops/arithmetic/Maximum_1.md | 2 +- docs/ops/arithmetic/Minimum_1.md | 2 +- docs/ops/arithmetic/Mod_1.md | 2 +- docs/ops/arithmetic/Multiply_1.md | 2 +- docs/ops/comparison/GreaterEqual_1.md | 2 +- docs/ops/comparison/LessEqual_1.md | 2 +- docs/ops/comparison/NotEqual_1.md | 2 +- docs/ops/convolution/Convolution_1.md | 6 +++--- docs/ops/convolution/DeformableConvolution_1.md | 2 +- docs/ops/convolution/DeformableConvolution_8.md | 2 +- docs/ops/logical/LogicalNot_1.md | 2 +- docs/ops/logical/LogicalXor_1.md | 2 +- docs/ops/pooling/AdaptiveAvgPool_8.md | 14 +++++++------- docs/ops/pooling/AdaptiveMaxPool_8.md | 12 ++++++------ 27 files changed, 45 insertions(+), 42 deletions(-) diff --git a/docs/ops/activation/Clamp_1.md b/docs/ops/activation/Clamp_1.md index d168ae8ce57..bc6b7edd3c9 100644 --- a/docs/ops/activation/Clamp_1.md +++ b/docs/ops/activation/Clamp_1.md @@ -15,7 +15,7 @@ Let *min_value* and *max_value* be *min* and *max*, respectively. The mathematical formula of *Clamp* is as follows: \f[ -clamp( x_{i} )=\min\big( \max\left( x_{i}, min\_value \right), max\_value \big) +clamp( x_{i} )=\min\big( \max\left( x_{i},\ min\_value \right),\ max\_value \big) \f] **Attributes**: diff --git a/docs/ops/activation/GELU_2.md b/docs/ops/activation/GELU_2.md index c61905191a4..3d2adaa14de 100644 --- a/docs/ops/activation/GELU_2.md +++ b/docs/ops/activation/GELU_2.md @@ -12,7 +12,7 @@ It performs element-wise activation function on a given input tensor, based on the following mathematical formula: \f[ - Gelu(x) = x\cdot\Phi(x) = x\cdot\frac{1}{2}\cdot\left[1 + erf\left(x/\sqrt{2}\right)\right] + Gelu(x) = x\cdot\Phi(x) = x\cdot\frac{1}{2}\cdot\left[1 + erf\frac{x}{\sqrt{2}}\right] \f] where Φ(x) is the Cumulative Distribution Function for Gaussian Distribution. diff --git a/docs/ops/activation/GELU_7.md b/docs/ops/activation/GELU_7.md index 44f182a9ab3..f11a4813a07 100644 --- a/docs/ops/activation/GELU_7.md +++ b/docs/ops/activation/GELU_7.md @@ -22,13 +22,13 @@ The *Gelu* function may be approximated in two different ways based on *approxim For `erf` approximation mode, *Gelu* function is represented as: \f[ - Gelu(x) = x\cdot\Phi(x) = x\cdot\frac{1}{2}\cdot\left[1 + erf\left(x/\sqrt{2}\right)\right] + Gelu(x) = x\cdot\Phi(x) = x\cdot\frac{1}{2}\cdot\left[1 + erf\frac{x}{\sqrt{2}}\right] \f] For `tanh` approximation mode, *Gelu* function is represented as: \f[ - Gelu(x) \approx x\cdot\frac{1}{2}\cdot \left(1 + \tanh\left[\sqrt{2/\pi} \cdot (x + 0.044715 \cdot x^3)\right]\right) + Gelu(x) \approx x\cdot\frac{1}{2}\cdot \left(1 + \tanh\left[\sqrt{\frac{2}{\pi}} \cdot (x + 0.044715 \cdot x^3)\right]\right) \f] **Attributes** diff --git a/docs/ops/activation/HSigmoid_5.md b/docs/ops/activation/HSigmoid_5.md index 2470ccb00da..367327a4f85 100644 --- a/docs/ops/activation/HSigmoid_5.md +++ b/docs/ops/activation/HSigmoid_5.md @@ -10,7 +10,7 @@ element in the output tensor with the following formula: \f[ -HSigmoid(x) = \frac{min(max(x + 3, 0), 6)}{6} +HSigmoid(x) = \frac{min(max(x + 3,\ 0),\ 6)}{6} \f] The HSigmoid operation is introduced in the following [article](https://arxiv.org/pdf/1905.02244.pdf). diff --git a/docs/ops/activation/HSwish_4.md b/docs/ops/activation/HSwish_4.md index a9ae8168a1d..3f27517a44b 100644 --- a/docs/ops/activation/HSwish_4.md +++ b/docs/ops/activation/HSwish_4.md @@ -10,7 +10,7 @@ element in the output tensor with the following formula: \f[ -HSwish(x) = x \frac{min(max(x + 3, 0), 6)}{6} +HSwish(x) = x \cdot \frac{min(max(x + 3,\ 0),\ 6)}{6} \f] The HSwish operation is introduced in the following [article](https://arxiv.org/pdf/1905.02244.pdf). diff --git a/docs/ops/activation/HardSigmoid_1.md b/docs/ops/activation/HardSigmoid_1.md index 03c5c11606e..8403ca8a1ec 100644 --- a/docs/ops/activation/HardSigmoid_1.md +++ b/docs/ops/activation/HardSigmoid_1.md @@ -12,10 +12,13 @@ For each element from the input tensor calculates corresponding element in the output tensor with the following formula: + \f[ - y = max(0, min(1, alpha * x + beta)) + y = max(0,\ min(1,\ \alpha x + \beta)) \f] + where α corresponds to `alpha` scalar input and β corresponds to `beta` scalar input. + **Inputs** * **1**: An tensor of type *T*. **Required.** diff --git a/docs/ops/activation/LogSoftmax_5.md b/docs/ops/activation/LogSoftmax_5.md index 60035120417..d26488fa968 100644 --- a/docs/ops/activation/LogSoftmax_5.md +++ b/docs/ops/activation/LogSoftmax_5.md @@ -8,8 +8,8 @@ **Note**: This is recommended to not compute LogSoftmax directly as Log(Softmax(x, axis)), more numeric stable is to compute LogSoftmax as: \f[ -t = (x - ReduceMax(x, axis)) \\ -LogSoftmax(x, axis) = t - Log(ReduceSum(Exp(t), axis)) +t = (x - ReduceMax(x,\ axis)) \\ +LogSoftmax(x, axis) = t - Log(ReduceSum(Exp(t),\ axis)) \f] **Attributes** diff --git a/docs/ops/activation/ReLU_1.md b/docs/ops/activation/ReLU_1.md index b3edf994e01..5b401dbc908 100644 --- a/docs/ops/activation/ReLU_1.md +++ b/docs/ops/activation/ReLU_1.md @@ -15,7 +15,7 @@ For each element from the input tensor calculates corresponding element in the output tensor with the following formula: \f[ - Y_{i}^{( l )} = max(0, Y_{i}^{( l - 1 )}) + Y_{i}^{( l )} = max(0,\ Y_{i}^{( l - 1 )}) \f] **Inputs**: diff --git a/docs/ops/arithmetic/Abs_1.md b/docs/ops/arithmetic/Abs_1.md index 426daee3806..1dc73dee933 100644 --- a/docs/ops/arithmetic/Abs_1.md +++ b/docs/ops/arithmetic/Abs_1.md @@ -25,7 +25,7 @@ *Abs* does the following with the input tensor *a*: \f[ -a_{i} = abs(a_{i}) +a_{i} = \vert a_{i} \vert \f] **Examples** diff --git a/docs/ops/arithmetic/Ceiling_1.md b/docs/ops/arithmetic/Ceiling_1.md index 4d4cfeb9450..e091824c96d 100644 --- a/docs/ops/arithmetic/Ceiling_1.md +++ b/docs/ops/arithmetic/Ceiling_1.md @@ -10,7 +10,7 @@ element in the output tensor with the following formula: \f[ -a_{i} = ceiling(a_{i}) +a_{i} = \lceil a_{i} \rceil \f] **Attributes**: *Ceiling* operation has no attributes. diff --git a/docs/ops/arithmetic/Divide_1.md b/docs/ops/arithmetic/Divide_1.md index b16198a05ad..b69a07454a1 100644 --- a/docs/ops/arithmetic/Divide_1.md +++ b/docs/ops/arithmetic/Divide_1.md @@ -11,7 +11,7 @@ Before performing arithmetic operation, input tensors *a* and *b* are broadcaste After broadcasting *Divide* performs division operation for the input tensors *a* and *b* using the formula below: \f[ -o_{i} = a_{i} / b_{i} +o_{i} = \frac{a_{i}}{b_{i}} \f] The result of division by zero is undefined. diff --git a/docs/ops/arithmetic/FloorMod_1.md b/docs/ops/arithmetic/FloorMod_1.md index 27c77ade3fa..c573dee8304 100644 --- a/docs/ops/arithmetic/FloorMod_1.md +++ b/docs/ops/arithmetic/FloorMod_1.md @@ -10,7 +10,7 @@ As a first step input tensors *a* and *b* are broadcasted if their shapes differ. Broadcasting is performed according to `auto_broadcast` attribute specification. As a second step *FloorMod* operation is computed element-wise on the input tensors *a* and *b* according to the formula below: \f[ -o_{i} = a_{i} % b_{i} +o_{i} = a_{i} \mod b_{i} \f] *FloorMod* operation computes a reminder of a floored division. It is the same behaviour like in Python programming language: `floor(x / y) * y + floor_mod(x, y) = x`. The sign of the result is equal to a sign of a divisor. The result of division by zero is undefined. diff --git a/docs/ops/arithmetic/Floor_1.md b/docs/ops/arithmetic/Floor_1.md index 910ce43d590..06690f06df8 100644 --- a/docs/ops/arithmetic/Floor_1.md +++ b/docs/ops/arithmetic/Floor_1.md @@ -10,7 +10,7 @@ element in the output tensor with the following formula: \f[ -a_{i} = floor(a_{i}) +a_{i} = \lfloor a_{i} \rfloor \f] **Attributes**: *Floor* operation has no attributes. diff --git a/docs/ops/arithmetic/Maximum_1.md b/docs/ops/arithmetic/Maximum_1.md index d16db0e0d77..18eb0e757b9 100644 --- a/docs/ops/arithmetic/Maximum_1.md +++ b/docs/ops/arithmetic/Maximum_1.md @@ -12,7 +12,7 @@ As a first step input tensors *a* and *b* are broadcasted if their shapes differ After broadcasting *Maximum* does the following with the input tensors *a* and *b*: \f[ -o_{i} = max(a_{i}, b_{i}) +o_{i} = max(a_{i},\ b_{i}) \f] **Attributes**: diff --git a/docs/ops/arithmetic/Minimum_1.md b/docs/ops/arithmetic/Minimum_1.md index 69d5e8d85ef..30204e136dc 100644 --- a/docs/ops/arithmetic/Minimum_1.md +++ b/docs/ops/arithmetic/Minimum_1.md @@ -10,7 +10,7 @@ As a first step input tensors *a* and *b* are broadcasted if their shapes differ. Broadcasting is performed according to `auto_broadcast` attribute specification. As a second step *Minimum* operation is computed element-wise on the input tensors *a* and *b* according to the formula below: \f[ -o_{i} = min(a_{i}, b_{i}) +o_{i} = min(a_{i},\ b_{i}) \f] **Attributes**: diff --git a/docs/ops/arithmetic/Mod_1.md b/docs/ops/arithmetic/Mod_1.md index 7daf20d565c..df414c0f4fe 100644 --- a/docs/ops/arithmetic/Mod_1.md +++ b/docs/ops/arithmetic/Mod_1.md @@ -10,7 +10,7 @@ As a first step input tensors *a* and *b* are broadcasted if their shapes differ. Broadcasting is performed according to `auto_broadcast` attribute specification. As a second step *Mod* operation is computed element-wise on the input tensors *a* and *b* according to the formula below: \f[ -o_{i} = a_{i} % b_{i} +o_{i} = a_{i} \mod b_{i} \f] *Mod* operation computes a reminder of a truncated division. It is the same behaviour like in C programming language: `truncated(x / y) * y + truncated_mod(x, y) = x`. The sign of the result is equal to a sign of a dividend. The result of division by zero is undefined. diff --git a/docs/ops/arithmetic/Multiply_1.md b/docs/ops/arithmetic/Multiply_1.md index 6b8273922f5..a713c9c0eac 100644 --- a/docs/ops/arithmetic/Multiply_1.md +++ b/docs/ops/arithmetic/Multiply_1.md @@ -11,7 +11,7 @@ Before performing arithmetic operation, input tensors *a* and *b* are broadcaste After broadcasting *Multiply* performs multiplication operation for the input tensors *a* and *b* using the formula below: \f[ -o_{i} = a_{i} * b_{i} +o_{i} = a_{i} \cdot b_{i} \f] **Attributes**: diff --git a/docs/ops/comparison/GreaterEqual_1.md b/docs/ops/comparison/GreaterEqual_1.md index 5acf4cbe6d6..f4a29c667fe 100644 --- a/docs/ops/comparison/GreaterEqual_1.md +++ b/docs/ops/comparison/GreaterEqual_1.md @@ -37,7 +37,7 @@ Before performing arithmetic operation, input tensors *a* and *b* are broadcaste After broadcasting *GreaterEqual* does the following with the input tensors *a* and *b*: \f[ -o_{i} = a_{i} >= b_{i} +o_{i} = a_{i} \geq b_{i} \f] **Examples** diff --git a/docs/ops/comparison/LessEqual_1.md b/docs/ops/comparison/LessEqual_1.md index a8b7c810181..bb7eed13793 100644 --- a/docs/ops/comparison/LessEqual_1.md +++ b/docs/ops/comparison/LessEqual_1.md @@ -12,7 +12,7 @@ Before performing arithmetic operation, input tensors *a* and *b* are broadcaste After broadcasting *LessEqual* does the following with the input tensors *a* and *b*: \f[ -o_{i} = a_{i} <= b_{i} +o_{i} = a_{i} \leq b_{i} \f] **Attributes**: diff --git a/docs/ops/comparison/NotEqual_1.md b/docs/ops/comparison/NotEqual_1.md index 456aeb7a785..448f4bcb66a 100644 --- a/docs/ops/comparison/NotEqual_1.md +++ b/docs/ops/comparison/NotEqual_1.md @@ -37,7 +37,7 @@ Before performing arithmetic operation, input tensors *a* and *b* are broadcaste After broadcasting *NotEqual* does the following with the input tensors *a* and *b*: \f[ -o_{i} = a_{i} != b_{i} +o_{i} = a_{i} \neq b_{i} \f] **Examples** diff --git a/docs/ops/convolution/Convolution_1.md b/docs/ops/convolution/Convolution_1.md index e77967e4130..431575b99c3 100644 --- a/docs/ops/convolution/Convolution_1.md +++ b/docs/ops/convolution/Convolution_1.md @@ -16,15 +16,15 @@ n_{out} = \left ( \frac{n_{in} + 2p - k}{s} \right ) + 1 The receptive field in each layer is calculated using the formulas: * Jump in the output feature map: \f[ - j_{out} = j_{in} * s + j_{out} = j_{in} \cdot s \f] * Size of the receptive field of output feature: \f[ - r_{out} = r_{in} + ( k - 1 ) * j_{in} + r_{out} = r_{in} + ( k - 1 ) \cdot j_{in} \f] * Center position of the receptive field of the first output feature: \f[ - start_{out} = start_{in} + ( \frac{k - 1}{2} - p ) * j_{in} + start_{out} = start_{in} + ( \frac{k - 1}{2} - p ) \cdot j_{in} \f] * Output is calculated using the following formula: \f[ diff --git a/docs/ops/convolution/DeformableConvolution_1.md b/docs/ops/convolution/DeformableConvolution_1.md index 77140cb30c7..6c73e202be5 100644 --- a/docs/ops/convolution/DeformableConvolution_1.md +++ b/docs/ops/convolution/DeformableConvolution_1.md @@ -12,7 +12,7 @@ Output is calculated using the following formula: \f[ - y(p) = \sum_{k = 1}^{K}w_{k}x(p + p_{k} + {\Delta}p_{k}) + y(p) = \displaystyle{\sum_{k = 1}^{K}}w_{k}x(p + p_{k} + {\Delta}p_{k}) \f] diff --git a/docs/ops/convolution/DeformableConvolution_8.md b/docs/ops/convolution/DeformableConvolution_8.md index 0474a71193d..fc7c05a235c 100644 --- a/docs/ops/convolution/DeformableConvolution_8.md +++ b/docs/ops/convolution/DeformableConvolution_8.md @@ -14,7 +14,7 @@ Output is calculated using the following formula: \f[ - y(p) = \sum_{k = 1}^{K}w_{k}x(p + p_{k} + {\Delta}p_{k}) * {\Delta}m_{k} + y(p) = \displaystyle{\sum_{k = 1}^{K}}w_{k}x(p + p_{k} + {\Delta}p_{k}) \cdot {\Delta}m_{k} \f] Where diff --git a/docs/ops/logical/LogicalNot_1.md b/docs/ops/logical/LogicalNot_1.md index 9dd9132383f..97c41ddb14c 100644 --- a/docs/ops/logical/LogicalNot_1.md +++ b/docs/ops/logical/LogicalNot_1.md @@ -25,7 +25,7 @@ *LogicalNot* does the following with the input tensor *a*: \f[ -a_{i} = not(a_{i}) +a_{i} = \lnot a_{i} \f] **Examples** diff --git a/docs/ops/logical/LogicalXor_1.md b/docs/ops/logical/LogicalXor_1.md index 61bfa9bc25c..16072f01183 100644 --- a/docs/ops/logical/LogicalXor_1.md +++ b/docs/ops/logical/LogicalXor_1.md @@ -37,7 +37,7 @@ Before performing logical operation, input tensors *a* and *b* are broadcasted i After broadcasting *LogicalXor* does the following with the input tensors *a* and *b*: \f[ -o_{i} = a_{i} xor b_{i} +o_{i} = a_{i} \oplus b_{i} \f] **Examples** diff --git a/docs/ops/pooling/AdaptiveAvgPool_8.md b/docs/ops/pooling/AdaptiveAvgPool_8.md index cff1e91e92c..3c6193045ca 100644 --- a/docs/ops/pooling/AdaptiveAvgPool_8.md +++ b/docs/ops/pooling/AdaptiveAvgPool_8.md @@ -11,19 +11,19 @@ The kernel dimensions are calculated using the following formulae for the `NCDHW \f[ \begin{array}{lcl} -d_{start} &=& floor(i*D_{in}/D_{out})\\ -d_{end} &=& ceil((i+1)*D_{in}/D_{out})\\ -h_{start} &=& floor(j*H_{in}/H_{out})\\ -h_{end} &=& ceil((j+1)*H_{in}/H_{out})\\ -w_{start} &=& floor(k*W_{in}/W_{out})\\ -w_{end} &=& ceil((k+1)*W_{in}/W_{out}) +d_{start} &=& \lfloor i \cdot \frac{D_{in}}{D_{out}}\rfloor\\ +d_{end} &=& \lceil(i+1) \cdot \frac{D_{in}}{D_{out}}\rceil\\ +h_{start} &=& \lfloor j \cdot \frac{H_{in}}{H_{out}}\rfloor\\ +h_{end} &=& \lceil(j+1) \cdot \frac{H_{in}}{H_{out}}\rceil\\ +w_{start} &=& \lfloor k \cdot \frac{W_{in}}{W_{out}}\rfloor\\ +w_{end} &=& \lceil(k+1) \cdot \frac{W_{in}}{W_{out}}\rceil \end{array} \f] The output is calculated with the following formula: \f[ -Output(i,j,k) = \frac{Input[d_{start}:d_{end}, h_{start}:h_{end}, w_{start}:w_{end}]}{(d_{end}-d_{start})*(h_{end}-h_{start})*(w_{end}-w_{start})} +Output(i,j,k) = \frac{Input[d_{start}:d_{end}, h_{start}:h_{end}, w_{start}:w_{end}]}{(d_{end}-d_{start}) \cdot (h_{end}-h_{start}) \cdot (w_{end}-w_{start})} \f] **Inputs**: diff --git a/docs/ops/pooling/AdaptiveMaxPool_8.md b/docs/ops/pooling/AdaptiveMaxPool_8.md index a86c3f67ac0..c34629351b8 100644 --- a/docs/ops/pooling/AdaptiveMaxPool_8.md +++ b/docs/ops/pooling/AdaptiveMaxPool_8.md @@ -11,12 +11,12 @@ The kernel dimensions are calculated using the following formulae for the `NCDHW \f[ \begin{array}{lcl} -d_{start} &=& floor(i*D_{in}/D_{out})\\ -d_{end} &=& ceil((i+1)*D_{in}/D_{out})\\ -h_{start} &=& floor(j*H_{in}/H_{out})\\ -h_{end} &=& ceil((j+1)*H_{in}/H_{out})\\ -w_{start} &=& floor(k*W_{in}/W_{out})\\ -w_{end} &=& ceil((k+1)*W_{in}/W_{out}) +d_{start} &=& \lfloor i \cdot \frac{D_{in}}{D_{out}}\rfloor\\ +d_{end} &=& \lceil(i+1) \cdot \frac{D_{in}}{D_{out}}\rceil\\ +h_{start} &=& \lfloor j \cdot \frac{H_{in}}{H_{out}}\rfloor\\ +h_{end} &=& \lceil(j+1) \cdot \frac{H_{in}}{H_{out}}\rceil\\ +w_{start} &=& \lfloor k \cdot \frac{W_{in}}{W_{out}}\rfloor\\ +w_{end} &=& \lceil(k+1) \cdot \frac{W_{in}}{W_{out}}\rceil \end{array} \f]