Merge remote-tracking branch 'upstream/master' into debian-packages

This commit is contained in:
Ilya Lavrenov 2022-02-18 14:30:26 +03:00
commit 6d57fc75a3
117 changed files with 957 additions and 665 deletions

View File

@ -99,7 +99,7 @@ CxxCatchStatement: '^.*$'
CxxTryStatement: '^.*$' CxxTryStatement: '^.*$'
CxxForRangeStatement: '^.*$' CxxForRangeStatement: '^.*$'
MsAsmStatement: 'XXXX' MsAsmStatement: 'XXXX'
NullStatement: 'XXXX' NullStatement: '^.*$'
DeclarationStatement: '^.*$' DeclarationStatement: '^.*$'
TranslationUnit: 'XXXX' TranslationUnit: 'XXXX'
UnexposedAttribute: '^.*$' UnexposedAttribute: '^.*$'

View File

@ -139,6 +139,16 @@ function(build_docs)
) )
endif() endif()
list(APPEND commands
COMMAND ${CMAKE_COMMAND} -E copy ${API_DOCS_IN}/api_reference.rst ${API_DOCS_OUT}/api_reference.rst
)
if(ENABLE_PYTHON)
list(APPEND commands
COMMAND ${CMAKE_COMMAND} -E copy_directory ${API_DOCS_IN}/ie_python_api ${API_DOCS_OUT}/ie_python_api
)
endif()
# omz doc files # omz doc files
if(EXISTS "${OMZ_DOCS_DIR}") if(EXISTS "${OMZ_DOCS_DIR}")
get_filename_component(OMZ_DOCS_DIR "${OMZ_DOCS_DIR}" ABSOLUTE) get_filename_component(OMZ_DOCS_DIR "${OMZ_DOCS_DIR}" ABSOLUTE)
@ -196,7 +206,6 @@ function(build_docs)
COMMAND ${PYTHON_EXECUTABLE} ${COPY_IMAGES_SCRIPT} ${XML_OUTPUT} ${RST_OUTPUT} COMMAND ${PYTHON_EXECUTABLE} ${COPY_IMAGES_SCRIPT} ${XML_OUTPUT} ${RST_OUTPUT}
COMMAND ${PYTHON_EXECUTABLE} ${DOXYGEN_MAPPING_SCRIPT} ${XML_OUTPUT} ${DOCS_BUILD_DIR} ${OpenVINO_SOURCE_DIR}/../ COMMAND ${PYTHON_EXECUTABLE} ${DOXYGEN_MAPPING_SCRIPT} ${XML_OUTPUT} ${DOCS_BUILD_DIR} ${OpenVINO_SOURCE_DIR}/../
COMMAND ${CMAKE_COMMAND} -E copy ${SPHINX_INDEX_IN} ${SPHINX_INDEX_OUT} COMMAND ${CMAKE_COMMAND} -E copy ${SPHINX_INDEX_IN} ${SPHINX_INDEX_OUT}
COMMAND ${CMAKE_COMMAND} -E copy_directory ${API_DOCS_IN} ${API_DOCS_OUT}
COMMAND ${CMAKE_COMMAND} -E copy_directory ${SPHINX_TEMPLATES_IN} ${SPHINX_TEMPLATES_OUT} COMMAND ${CMAKE_COMMAND} -E copy_directory ${SPHINX_TEMPLATES_IN} ${SPHINX_TEMPLATES_OUT}
COMMAND ${CMAKE_COMMAND} -E copy_directory ${DOXYREST_IN} ${DOXYREST_OUT} COMMAND ${CMAKE_COMMAND} -E copy_directory ${DOXYREST_IN} ${DOXYREST_OUT}
COMMAND ${CMAKE_COMMAND} -E copy_directory ${DOXYREST_SPHINX_IN} ${DOXYREST_SPHINX_OUT} COMMAND ${CMAKE_COMMAND} -E copy_directory ${DOXYREST_SPHINX_IN} ${DOXYREST_SPHINX_OUT}

View File

@ -24,7 +24,10 @@ static const char input_message[] =
" of files for each input (except cases with single file for any input):" " of files for each input (except cases with single file for any input):"
"\"input1:1.jpg input2:1.bin\", \"input1:1.bin,2.bin input2:3.bin input3:4.bin,5.bin \"." "\"input1:1.jpg input2:1.bin\", \"input1:1.bin,2.bin input2:3.bin input3:4.bin,5.bin \"."
" Also you can pass specific keys for inputs: \"random\" - for fillling input with random data," " Also you can pass specific keys for inputs: \"random\" - for fillling input with random data,"
" \"image_info\" - for filling input with image size."; " \"image_info\" - for filling input with image size.\n"
" You should specify either one files set to be used for all inputs (without "
"providing "
"input names) or separate files sets for every input of model (providing inputs names).";
/// @brief message for model argument /// @brief message for model argument
static const char model_message[] = static const char model_message[] =
@ -329,7 +332,7 @@ DEFINE_string(data_shape, "", data_shape_message);
DEFINE_string(layout, "", layout_message); DEFINE_string(layout, "", layout_message);
/// @brief Define flag for inference precision /// @brief Define flag for inference precision
DEFINE_string(infer_precision, "f32", inference_precision_message); DEFINE_string(infer_precision, "", inference_precision_message);
/// @brief Specify precision for all input layers of the network /// @brief Specify precision for all input layers of the network
DEFINE_string(ip, "", inputs_precision_message); DEFINE_string(ip, "", inputs_precision_message);

View File

@ -329,7 +329,7 @@ int main(int argc, char* argv[]) {
} else if (supported(ov::num_streams.name())) { } else if (supported(ov::num_streams.name())) {
// Use API 2.0 key for streams // Use API 2.0 key for streams
key = ov::num_streams.name(); key = ov::num_streams.name();
device_config[key] = ov::NumStreams::AUTO; device_config[key] = ov::streams::AUTO;
} }
} }
} }
@ -550,7 +550,9 @@ int main(int argc, char* argv[]) {
info.at(name).type = type_to_set; info.at(name).type = type_to_set;
} }
} }
// Explicitly set inputs layout. }
// Explicitly set inputs layout.
if (!name.empty() && !app_inputs_info[0].at(name).layout.empty()) {
in.model().set_layout(app_inputs_info[0].at(name).layout); in.model().set_layout(app_inputs_info[0].at(name).layout);
} }
} }
@ -1059,8 +1061,7 @@ int main(int argc, char* argv[]) {
StatisticsVariant("Percentile boundary", "percentile_boundary", FLAGS_latency_percentile), StatisticsVariant("Percentile boundary", "percentile_boundary", FLAGS_latency_percentile),
StatisticsVariant("Average latency (ms)", "latency_avg", generalLatency.avg), StatisticsVariant("Average latency (ms)", "latency_avg", generalLatency.avg),
StatisticsVariant("Min latency (ms)", "latency_min", generalLatency.min), StatisticsVariant("Min latency (ms)", "latency_min", generalLatency.min),
StatisticsVariant("Max latency (ms)", "latency_max", generalLatency.max), StatisticsVariant("Max latency (ms)", "latency_max", generalLatency.max)});
StatisticsVariant("throughput", "throughput", fps)});
if (FLAGS_pcseq && app_inputs_info.size() > 1) { if (FLAGS_pcseq && app_inputs_info.size() > 1) {
for (size_t i = 0; i < groupLatencies.size(); ++i) { for (size_t i = 0; i < groupLatencies.size(); ++i) {
@ -1070,6 +1071,8 @@ int main(int argc, char* argv[]) {
} }
} }
} }
statistics->add_parameters(StatisticsReport::Category::EXECUTION_RESULTS,
{StatisticsVariant("throughput", "throughput", fps)});
} }
progressBar.finish(); progressBar.finish();

View File

@ -317,7 +317,11 @@ std::map<std::string, std::vector<std::string>> parse_input_arguments(const std:
} }
for (auto& file : files.second) { for (auto& file : files.second) {
readInputFilesArguments(mapped_files[files.first], file); if (file == "image_info" || file == "random") {
mapped_files[files.first].push_back(file);
} else {
readInputFilesArguments(mapped_files[files.first], file);
}
} }
} }
args_it = files_end; args_it = files_end;

View File

@ -53,7 +53,7 @@ int main(int argc, char* argv[]) {
// try to find it. // try to find it.
ov::NodeVector ops = model->get_ops(); ov::NodeVector ops = model->get_ops();
auto it = std::find_if(ops.begin(), ops.end(), [](std::shared_ptr<ov::Node> node) { auto it = std::find_if(ops.begin(), ops.end(), [](std::shared_ptr<ov::Node> node) {
return node->get_type_info() == ngraph::op::DetectionOutput::get_type_info_static(); return node->get_type_info().name == ngraph::op::DetectionOutput::get_type_info_static().name;
}); });
if (it == ops.end()) { if (it == ops.end()) {
throw std::logic_error("model does not contain DetectionOutput layer"); throw std::logic_error("model does not contain DetectionOutput layer");

View File

@ -107,7 +107,8 @@ Options:
-q "<mode>" Optional. Input quantization mode: static (default), dynamic, or user (use with -sf). -q "<mode>" Optional. Input quantization mode: static (default), dynamic, or user (use with -sf).
-qb "<integer>" Optional. Weight bits for quantization: 8 or 16 (default) -qb "<integer>" Optional. Weight bits for quantization: 8 or 16 (default)
-sf "<double>" Optional. User-specified input scale factor for quantization (use with -q user). If the network contains multiple inputs, provide scale factors by separating them with commas. -sf "<double>" Optional. User-specified input scale factor for quantization (use with -q user). If the network contains multiple inputs, provide scale factors by separating them with commas.
-bs "<integer>" Optional. Batch size 1-8 (default 1) -bs "<integer>" Optional. Batch size 1-8
-layout "<string>" Optional. Prompts how network layouts should be treated by application.For example, \"input1[NCHW],input2[NC]\" or \"[NCHW]\" in case of one input size.
-r "<path>" Optional. Read reference score file and compare scores. Example of usage: <reference.ark> or <reference.npz> -r "<path>" Optional. Read reference score file and compare scores. Example of usage: <reference.ark> or <reference.npz>
-rg "<path>" Read GNA model from file using path/filename provided (required if -m is missing). -rg "<path>" Read GNA model from file using path/filename provided (required if -m is missing).
-wg "<path>" Optional. Write GNA model to file using path/filename provided. -wg "<path>" Optional. Write GNA model to file using path/filename provided.

View File

@ -83,7 +83,7 @@ int main(int argc, char* argv[]) {
// ------------------------------------- // -------------------------------------
ov::Core core; ov::Core core;
slog::info << "Loading model files:" << slog::endl << FLAGS_m << slog::endl; slog::info << "Loading model files:" << slog::endl << FLAGS_m << slog::endl;
uint32_t batchSize = (FLAGS_cw_r > 0 || FLAGS_cw_l > 0) ? 1 : (uint32_t)FLAGS_bs; uint32_t batchSize = (FLAGS_cw_r > 0 || FLAGS_cw_l > 0 || !FLAGS_bs) ? 1 : (uint32_t)FLAGS_bs;
std::shared_ptr<ov::Model> model; std::shared_ptr<ov::Model> model;
std::vector<std::string> outputs; std::vector<std::string> outputs;
std::vector<size_t> ports; std::vector<size_t> ports;
@ -115,16 +115,38 @@ int main(int argc, char* argv[]) {
} }
} }
check_number_of_inputs(model->inputs().size(), numInputFiles); check_number_of_inputs(model->inputs().size(), numInputFiles);
const ov::Layout tensor_layout{"NC"};
ov::preprocess::PrePostProcessor proc(model); ov::preprocess::PrePostProcessor proc(model);
for (int i = 0; i < model->inputs().size(); i++) { const auto& inputs = model->inputs();
proc.input(i).tensor().set_element_type(ov::element::f32).set_layout(tensor_layout); std::map<std::string, std::string> custom_layouts;
if (!FLAGS_layout.empty()) {
custom_layouts = parse_input_layouts(FLAGS_layout, inputs);
}
for (const auto& input : inputs) {
const auto& item_name = input.get_any_name();
auto& in = proc.input(item_name);
in.tensor().set_element_type(ov::element::f32);
// Explicitly set inputs layout
if (custom_layouts.count(item_name) > 0) {
in.model().set_layout(ov::Layout(custom_layouts.at(item_name)));
}
} }
for (int i = 0; i < model->outputs().size(); i++) { for (int i = 0; i < model->outputs().size(); i++) {
proc.output(i).tensor().set_element_type(ov::element::f32); proc.output(i).tensor().set_element_type(ov::element::f32);
} }
model = proc.build(); model = proc.build();
ov::set_batch(model, batchSize); if (FLAGS_bs) {
if (FLAGS_layout.empty() &&
std::any_of(inputs.begin(), inputs.end(), [](const ov::Output<ov::Node>& i) {
return ov::layout::get_layout(i).empty();
})) {
throw std::logic_error(
"-bs option is set to " + std::to_string(FLAGS_bs) +
" but model does not contain layout information for any input. Please "
"specify it explicitly using -layout option. For example, input1[NCHW], input2[NC] or [NC]");
} else {
ov::set_batch(model, batchSize);
}
}
} }
// ------------------------------ Get Available Devices ------------------------------------------------------ // ------------------------------ Get Available Devices ------------------------------------------------------
auto isFeature = [&](const std::string xFeature) { auto isFeature = [&](const std::string xFeature) {
@ -235,6 +257,22 @@ int main(int argc, char* argv[]) {
throw std::runtime_error("Cannot open model file " + FLAGS_rg); throw std::runtime_error("Cannot open model file " + FLAGS_rg);
} }
executableNet = core.import_model(streamrq, deviceStr, genericPluginConfig); executableNet = core.import_model(streamrq, deviceStr, genericPluginConfig);
// loading batch from exported model
const auto& imported_inputs = executableNet.inputs();
if (std::any_of(imported_inputs.begin(), imported_inputs.end(), [](const ov::Output<const ov::Node>& i) {
return ov::layout::get_layout(i).empty();
})) {
slog::warn << "No batch dimension was found at any input, assuming batch to be 1." << slog::endl;
batchSize = 1;
} else {
for (auto& info : imported_inputs) {
auto imported_layout = ov::layout::get_layout(info);
if (ov::layout::has_batch(imported_layout)) {
batchSize = (uint32_t)info.get_shape()[ov::layout::batch_idx(imported_layout)];
break;
}
}
}
} }
// --------------------------- Exporting gna model using InferenceEngine AOT API--------------------- // --------------------------- Exporting gna model using InferenceEngine AOT API---------------------
if (!FLAGS_wg.empty()) { if (!FLAGS_wg.empty()) {
@ -251,7 +289,8 @@ int main(int argc, char* argv[]) {
return 0; return 0;
} }
// --------------------------------------------------------------------------------------------------------- // ---------------------------------------------------------------------------------------------------------
// --------------------------- Step 3. Create infer request -------------------------------------------------- // --------------------------- Step 3. Create infer request
// --------------------------------------------------
std::vector<InferRequestStruct> inferRequests(1); std::vector<InferRequestStruct> inferRequests(1);
for (auto& inferRequest : inferRequests) { for (auto& inferRequest : inferRequests) {
@ -433,7 +472,8 @@ int main(int argc, char* argv[]) {
outputBlob = outputBlob =
inferRequest.inferRequest.get_tensor(executableNet.output(FLAGS_oname)); inferRequest.inferRequest.get_tensor(executableNet.output(FLAGS_oname));
} }
// locked memory holder should be alive all time while access to its buffer happens // locked memory holder should be alive all time while access to its buffer
// happens
auto byteSize = numScoresPerFrame * sizeof(float); auto byteSize = numScoresPerFrame * sizeof(float);
std::memcpy(outputFrame, outputBlob.data<float>(), byteSize); std::memcpy(outputFrame, outputBlob.data<float>(), byteSize);
} }

View File

@ -121,6 +121,11 @@ static const char output_layer_names_message[] = "Optional. Layer names for outp
static const char input_layer_names_message[] = "Optional. Layer names for input blobs. " static const char input_layer_names_message[] = "Optional. Layer names for input blobs. "
"The names are separated with \",\" " "The names are separated with \",\" "
"Example: Input1,Input2 "; "Example: Input1,Input2 ";
/// @brief message for inputs layer names
static const char layout_message[] =
"Optional. Prompts how network layouts should be treated by application. "
"For example, \"input1[NCHW],input2[NC]\" or \"[NCHW]\" in case of one input size.";
;
/// @brief message for PWL max error percent /// @brief message for PWL max error percent
static const char pwl_max_error_percent_message[] = "Optional. The maximum percent of error for PWL function." static const char pwl_max_error_percent_message[] = "Optional. The maximum percent of error for PWL function."
@ -176,8 +181,8 @@ DEFINE_int32(qb, 16, quantization_bits_message);
/// @brief Scale factor for quantization /// @brief Scale factor for quantization
DEFINE_string(sf, "", scale_factor_message); DEFINE_string(sf, "", scale_factor_message);
/// @brief Batch size (default 1) /// @brief Batch size (default 0)
DEFINE_int32(bs, 1, batch_size_message); DEFINE_int32(bs, 0, batch_size_message);
/// @brief Number of threads to use for inference on the CPU (also affects Hetero cases) /// @brief Number of threads to use for inference on the CPU (also affects Hetero cases)
DEFINE_int32(nthreads, 1, infer_num_threads_message); DEFINE_int32(nthreads, 1, infer_num_threads_message);
@ -194,6 +199,9 @@ DEFINE_string(oname, "", output_layer_names_message);
/// @brief Input layer name /// @brief Input layer name
DEFINE_string(iname, "", input_layer_names_message); DEFINE_string(iname, "", input_layer_names_message);
/// @brief Input layer name
DEFINE_string(layout, "", layout_message);
/// @brief PWL max error percent /// @brief PWL max error percent
DEFINE_double(pwl_me, 1.0, pwl_max_error_percent_message); DEFINE_double(pwl_me, 1.0, pwl_max_error_percent_message);
@ -223,6 +231,7 @@ static void show_usage() {
std::cout << " -cw_r \"<integer>\" " << context_window_message_r << std::endl; std::cout << " -cw_r \"<integer>\" " << context_window_message_r << std::endl;
std::cout << " -oname \"<string>\" " << output_layer_names_message << std::endl; std::cout << " -oname \"<string>\" " << output_layer_names_message << std::endl;
std::cout << " -iname \"<string>\" " << input_layer_names_message << std::endl; std::cout << " -iname \"<string>\" " << input_layer_names_message << std::endl;
std::cout << " -layout \"<string>\" " << layout_message << std::endl;
std::cout << " -pwl_me \"<double>\" " << pwl_max_error_percent_message << std::endl; std::cout << " -pwl_me \"<double>\" " << pwl_max_error_percent_message << std::endl;
std::cout << " -exec_target \"<string>\" " << execution_target_message << std::endl; std::cout << " -exec_target \"<string>\" " << execution_target_message << std::endl;
std::cout << " -compile_target \"<string>\" " << compile_target_message << std::endl; std::cout << " -compile_target \"<string>\" " << compile_target_message << std::endl;
@ -282,7 +291,7 @@ bool parse_and_check_command_line(int argc, char* argv[]) {
} }
uint32_t batchSize = (uint32_t)FLAGS_bs; uint32_t batchSize = (uint32_t)FLAGS_bs;
if ((batchSize < 1) || (batchSize > 8)) { if (batchSize && ((batchSize < 1) || (batchSize > 8))) {
throw std::logic_error("Batch size out of range (1..8)."); throw std::logic_error("Batch size out of range (1..8).");
} }

View File

@ -488,3 +488,43 @@ std::vector<std::string> convert_str_to_vector(std::string str) {
} }
return blobName; return blobName;
} }
/**
* @brief Parse layout string like "input0[value0],input1[value1]" or "[value]" (applied to all inputs)
* @param layout_string input names with layout values
* @param input_info reference to vector of inputs
* @return map of inputs with layout values
*/
std::map<std::string, std::string> parse_input_layouts(const std::string& layout_string,
const std::vector<ov::Output<ov::Node>>& input_info) {
// Parse parameter string like "input0[value0],input1[value1]" or "[value]" (applied to all
// inputs)
std::map<std::string, std::string> return_value;
std::string search_string = layout_string;
auto start_pos = search_string.find_first_of('[');
auto input_name = search_string.substr(0, start_pos);
while (start_pos != std::string::npos) {
auto end_pos = search_string.find_first_of(']');
if (end_pos == std::string::npos)
break;
if (start_pos)
input_name = search_string.substr(0, start_pos);
auto input_value = search_string.substr(start_pos + 1, end_pos - start_pos - 1);
if (!input_name.empty()) {
return_value[input_name] = input_value;
} else {
for (auto& item : input_info) {
return_value[item.get_any_name()] = input_value;
}
}
search_string = search_string.substr(end_pos + 1);
if (search_string.empty() || (search_string.front() != ',' && search_string.front() != '['))
break;
if (search_string.front() == ',')
search_string = search_string.substr(1);
start_pos = search_string.find_first_of('[');
}
if (!search_string.empty())
throw std::logic_error("Can't parse input parameter string: " + layout_string);
return return_value;
}

View File

@ -183,7 +183,10 @@ void regclass_AsyncInferQueue(py::module m) {
// getIdleRequestId function has an intention to block InferQueue // getIdleRequestId function has an intention to block InferQueue
// until there is at least one idle (free to use) InferRequest // until there is at least one idle (free to use) InferRequest
auto handle = self.get_idle_request_id(); auto handle = self.get_idle_request_id();
self._idle_handles.pop(); {
std::lock_guard<std::mutex> lock(self._mutex);
self._idle_handles.pop();
}
// Set new inputs label/id from user // Set new inputs label/id from user
self._user_ids[handle] = userdata; self._user_ids[handle] = userdata;
// Update inputs if there are any // Update inputs if there are any

View File

@ -127,5 +127,4 @@ xfail_issue_63137 = xfail_test(reason="Unsupported operations: OptionalHasElemen
xfail_issue_63138 = xfail_test(reason="Missing ONNX Shape-15 support") xfail_issue_63138 = xfail_test(reason="Missing ONNX Shape-15 support")
xfail_issue_68212 = xfail_test(reason="Unsupported reading model with bytes streams") xfail_issue_68212 = xfail_test(reason="Unsupported reading model with bytes streams")
xfail_issue_77668 = xfail_test(reason="Accuracy issue related to Gather-8.")
xfail_issue_78843 = xfail_test(reason="Missing reference output files for ssd mobilenet models") xfail_issue_78843 = xfail_test(reason="Missing reference output files for ssd mobilenet models")

View File

@ -22,8 +22,7 @@ from tests import (
xfail_issue_48190, xfail_issue_48190,
xfail_issue_58676, xfail_issue_58676,
xfail_issue_78843, xfail_issue_78843,
xfail_issue_onnx_models_140, xfail_issue_onnx_models_140)
xfail_issue_77668)
MODELS_ROOT_DIR = tests.MODEL_ZOO_DIR MODELS_ROOT_DIR = tests.MODEL_ZOO_DIR
@ -180,8 +179,6 @@ if len(zoo_models) > 0:
(xfail_issue_48190, "test_onnx_model_zoo_text_machine_comprehension_roberta_model_roberta_base_11_roberta_base_11_roberta_base_11_cpu"), (xfail_issue_48190, "test_onnx_model_zoo_text_machine_comprehension_roberta_model_roberta_base_11_roberta_base_11_roberta_base_11_cpu"),
(xfail_issue_onnx_models_140, "test_onnx_model_zoo_vision_object_detection_segmentation_duc_model_ResNet101_DUC_7_ResNet101_DUC_HDC_ResNet101_DUC_HDC_cpu"), (xfail_issue_onnx_models_140, "test_onnx_model_zoo_vision_object_detection_segmentation_duc_model_ResNet101_DUC_7_ResNet101_DUC_HDC_ResNet101_DUC_HDC_cpu"),
(xfail_issue_78843, "test_onnx_model_zoo_vision_object_detection_segmentation_ssd_mobilenetv1_model_ssd_mobilenet_v1_10_ssd_mobilenet_v1_ssd_mobilenet_v1_cpu"), (xfail_issue_78843, "test_onnx_model_zoo_vision_object_detection_segmentation_ssd_mobilenetv1_model_ssd_mobilenet_v1_10_ssd_mobilenet_v1_ssd_mobilenet_v1_cpu"),
(xfail_issue_77668, "test_onnx_model_zoo_vision_object_detection_segmentation_faster_rcnn_model_FasterRCNN_10_faster_rcnn_R_50_FPN_1x_cpu"),
(xfail_issue_77668, "test_onnx_model_zoo_vision_object_detection_segmentation_mask_rcnn_model_MaskRCNN_10_mask_rcnn_R_50_FPN_1x_cpu"),
# Model MSFT # Model MSFT
(xfail_issue_37973, "test_MSFT_opset7_tf_inception_v2_model_cpu"), (xfail_issue_37973, "test_MSFT_opset7_tf_inception_v2_model_cpu"),
@ -197,8 +194,6 @@ if len(zoo_models) > 0:
(xfail_issue_47495, "test_MSFT_opset10_BERT_Squad_bertsquad10_cpu"), (xfail_issue_47495, "test_MSFT_opset10_BERT_Squad_bertsquad10_cpu"),
(xfail_issue_78843, "test_MSFT_opset10_mlperf_ssd_mobilenet_300_ssd_mobilenet_v1_coco_2018_01_28_cpu"), (xfail_issue_78843, "test_MSFT_opset10_mlperf_ssd_mobilenet_300_ssd_mobilenet_v1_coco_2018_01_28_cpu"),
(xfail_issue_77668, "test_MSFT_opset10_faster_rcnn_faster_rcnn_R_50_FPN_1x_cpu"),
(xfail_issue_77668, "test_MSFT_opset10_mask_rcnn_mask_rcnn_R_50_FPN_1x_cpu"),
] ]
for test_case in import_xfail_list + execution_xfail_list: for test_case in import_xfail_list + execution_xfail_list:
xfail, test_name = test_case xfail, test_name = test_case

View File

@ -136,6 +136,5 @@ xfail_issue_63136 = xfail_test(reason="Unsupported operation: CastLike")
xfail_issue_63137 = xfail_test(reason="Unsupported operations: OptionalHasElement, OptionalGetElement") xfail_issue_63137 = xfail_test(reason="Unsupported operations: OptionalHasElement, OptionalGetElement")
xfail_issue_63138 = xfail_test(reason="Missing ONNX Shape-15 support") xfail_issue_63138 = xfail_test(reason="Missing ONNX Shape-15 support")
xfail_issue_77668 = xfail_test(reason="Accuracy issue related to Gather-8.")
xfail_issue_78843 = xfail_test(reason="Missing reference output files for ssd mobilenet models") xfail_issue_78843 = xfail_test(reason="Missing reference output files for ssd mobilenet models")
xfail_issue_78741 = xfail_test(reason="Cannot get dims for non static shape") xfail_issue_78741 = xfail_test(reason="Cannot get dims for non static shape")

View File

@ -23,8 +23,7 @@ from tests_compatibility import (
xfail_issue_48190, xfail_issue_48190,
xfail_issue_58676, xfail_issue_58676,
xfail_issue_78843, xfail_issue_78843,
xfail_issue_onnx_models_140, xfail_issue_onnx_models_140)
xfail_issue_77668)
MODELS_ROOT_DIR = tests_compatibility.MODEL_ZOO_DIR MODELS_ROOT_DIR = tests_compatibility.MODEL_ZOO_DIR
@ -168,7 +167,6 @@ if len(zoo_models) > 0:
(xfail_issue_48190, "test_onnx_model_zoo_text_machine_comprehension_roberta_model_roberta_base_11_roberta_base_11_roberta_base_11_cpu"), (xfail_issue_48190, "test_onnx_model_zoo_text_machine_comprehension_roberta_model_roberta_base_11_roberta_base_11_roberta_base_11_cpu"),
(xfail_issue_onnx_models_140, "test_onnx_model_zoo_vision_object_detection_segmentation_duc_model_ResNet101_DUC_7_ResNet101_DUC_HDC_ResNet101_DUC_HDC_cpu"), (xfail_issue_onnx_models_140, "test_onnx_model_zoo_vision_object_detection_segmentation_duc_model_ResNet101_DUC_7_ResNet101_DUC_HDC_ResNet101_DUC_HDC_cpu"),
(xfail_issue_78843, "test_onnx_model_zoo_vision_object_detection_segmentation_ssd_mobilenetv1_model_ssd_mobilenet_v1_10_ssd_mobilenet_v1_ssd_mobilenet_v1_cpu"), (xfail_issue_78843, "test_onnx_model_zoo_vision_object_detection_segmentation_ssd_mobilenetv1_model_ssd_mobilenet_v1_10_ssd_mobilenet_v1_ssd_mobilenet_v1_cpu"),
(xfail_issue_77668, "test_onnx_model_zoo_vision_object_detection_segmentation_faster_rcnn_model_FasterRCNN_10_faster_rcnn_R_50_FPN_1x_cpu"),
# Model MSFT # Model MSFT
(xfail_issue_37973, "test_MSFT_opset7_tf_inception_v2_model_cpu"), (xfail_issue_37973, "test_MSFT_opset7_tf_inception_v2_model_cpu"),
@ -185,8 +183,6 @@ if len(zoo_models) > 0:
(xfail_issue_39669, "test_MSFT_opset9_cgan_cgan_cpu"), (xfail_issue_39669, "test_MSFT_opset9_cgan_cgan_cpu"),
(xfail_issue_47495, "test_MSFT_opset10_BERT_Squad_bertsquad10_cpu"), (xfail_issue_47495, "test_MSFT_opset10_BERT_Squad_bertsquad10_cpu"),
(xfail_issue_78843, "test_MSFT_opset10_mlperf_ssd_mobilenet_300_ssd_mobilenet_v1_coco_2018_01_28_cpu"), (xfail_issue_78843, "test_MSFT_opset10_mlperf_ssd_mobilenet_300_ssd_mobilenet_v1_coco_2018_01_28_cpu"),
(xfail_issue_77668, "test_MSFT_opset10_faster_rcnn_faster_rcnn_R_50_FPN_1x_cpu"),
] ]
for test_case in import_xfail_list + execution_xfail_list: for test_case in import_xfail_list + execution_xfail_list:
xfail, test_name = test_case xfail, test_name = test_case

View File

@ -586,7 +586,7 @@ FakeQuantizeDequantization NetworkHelper::foldDequantization(const std::shared_p
std::shared_ptr<ngraph::Node> NetworkHelper::separateInStandaloneBranch(std::shared_ptr<ngraph::Node> node, std::shared_ptr<ngraph::Node> NetworkHelper::separateInStandaloneBranch(std::shared_ptr<ngraph::Node> node,
const std::vector<ngraph::element::Type>& defaultPrecisions) { const std::vector<ngraph::element::Type>& defaultPrecisions) {
FakeQuantizeDequantization dequantization = NetworkHelper::getDequantization(node, defaultPrecisions); FakeQuantizeDequantization dequantization = NetworkHelper::getDequantization(node, defaultPrecisions);
if (dequantization.isShared()) { if (dequantization.isShared() && !dequantization.empty()) {
Output<Node> parent = dequantization.data; Output<Node> parent = dequantization.data;
if (dequantization.convert != nullptr) { if (dequantization.convert != nullptr) {
auto convert = dequantization.convert->clone_with_new_inputs({ parent }); auto convert = dequantization.convert->clone_with_new_inputs({ parent });

View File

@ -64,17 +64,18 @@ public:
InitMatMulMask() { InitMatMulMask() {
auto a = pattern::any_input(); auto a = pattern::any_input();
auto b = pattern::any_input(); auto b = pattern::any_input();
auto matmul = pattern::wrap_type<opset6::MatMul>({a, b}); auto matmul_pattern = pattern::wrap_type<opset6::MatMul>({a, b});
ngraph::matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) { ngraph::matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) {
const auto & pattern_map = m.get_pattern_value_map(); const auto & pattern_map = m.get_pattern_value_map();
const auto & m_output = pattern_map.at(matmul); const auto & matmul = std::dynamic_pointer_cast<opset6::MatMul>(pattern_map.at(matmul_pattern).get_node_shared_ptr());
if (!matmul) return false;
// Assume constant always in the first input port. // Assume constant always in the first input port.
// Initializing weights mask: // Initializing weights mask:
// 1. Looking for Const node with weights // 1. Looking for Const node with weights
NodeVector weights_calculation_nodes; NodeVector weights_calculation_nodes;
auto cur_node = m_output.get_node()->get_input_node_shared_ptr(1); auto cur_node = matmul->get_input_node_shared_ptr(1);
while (!ngraph::is_type<opset6::Constant>(cur_node) && cur_node->inputs().size()) { while (!ngraph::is_type<opset6::Constant>(cur_node) && cur_node->inputs().size()) {
weights_calculation_nodes.push_back(cur_node); weights_calculation_nodes.push_back(cur_node);
@ -82,17 +83,16 @@ public:
} }
if (!ngraph::is_type<opset6::Constant>(cur_node)) { if (!ngraph::is_type<opset6::Constant>(cur_node)) {
NGRAPH_DEBUG << "Can't find Constant weights for MatMul: " << NGRAPH_DEBUG << "Can't find Constant weights for MatMul: " <<
m_output.get_node()->get_friendly_name() << std::endl; matmul->get_friendly_name() << std::endl;
return false; return false;
} }
// 2. Get constant rank to set mask on last dimension // 2. Get constant rank to set mask on last dimension
const auto const_op = std::dynamic_pointer_cast<opset6::Constant>(cur_node); const auto const_op = std::dynamic_pointer_cast<opset6::Constant>(cur_node);
const auto shape_rank = const_op->get_shape().size(); const auto shape_rank = const_op->get_shape().size();
const auto matmul = std::dynamic_pointer_cast<opset6::MatMul>(m_output.get_node_shared_ptr());
const auto shift = (matmul->get_transpose_b())? 2 : 1; const auto shift = (matmul->get_transpose_b())? 2 : 1;
if (shape_rank < shift) { if (shape_rank < shift) {
NGRAPH_DEBUG << "Can't init mask for MatMul: " << NGRAPH_DEBUG << "Can't init mask for MatMul: " <<
m_output.get_node()->get_friendly_name() << std::endl; matmul->get_friendly_name() << std::endl;
return false; return false;
} }
const size_t outer_dim = shape_rank - shift; const size_t outer_dim = shape_rank - shift;
@ -101,7 +101,7 @@ public:
return true; return true;
}; };
auto m = std::make_shared<ngraph::pattern::Matcher>(matmul, "MatMulInitMask"); auto m = std::make_shared<ngraph::pattern::Matcher>(matmul_pattern, "MatMulInitMask");
register_matcher(m, callback); register_matcher(m, callback);
} }
}; };

View File

@ -511,6 +511,7 @@ public:
m_output_low.get_node_shared_ptr(), m_output_low.get_node_shared_ptr(),
m_output_high.get_node_shared_ptr()}; m_output_high.get_node_shared_ptr()};
auto fq_node = std::dynamic_pointer_cast<op::FakeQuantize>(m_output.get_node_shared_ptr()); auto fq_node = std::dynamic_pointer_cast<op::FakeQuantize>(m_output.get_node_shared_ptr());
if (!fq_node) return false;
size_t idx = 0; size_t idx = 0;
if (fq_node->get_auto_broadcast() != ngraph::op::AutoBroadcastType::NONE) { if (fq_node->get_auto_broadcast() != ngraph::op::AutoBroadcastType::NONE) {
for (auto node : fq_params_nodes) { for (auto node : fq_params_nodes) {

View File

@ -49,6 +49,7 @@ class PropagateNMSPath: public pass::MatcherPass {
opset8::Reshape, opset8::Reshape,
op::util::BroadcastBase, op::util::BroadcastBase,
opset8::StridedSlice, opset8::StridedSlice,
opset8::Slice,
opset8::VariadicSplit, opset8::VariadicSplit,
op::util::GatherBase, op::util::GatherBase,
opset8::Concat, opset8::Concat,

View File

@ -16,8 +16,7 @@ NGRAPH_RTTI_DEFINITION(pass::MatMulMultiplyFusion, "MatMulMultiplyFusion", 0);
static std::shared_ptr<Node> fuse_const_to_weights(const std::shared_ptr<Node>& matmul, static std::shared_ptr<Node> fuse_const_to_weights(const std::shared_ptr<Node>& matmul,
const Output<Node>& weights, const Output<Node>& weights,
std::shared_ptr<opset8::Constant> mul_const, std::shared_ptr<opset8::Constant> mul_const) {
const op::AutoBroadcastSpec& autob) {
auto const_shape = mul_const->get_shape(); auto const_shape = mul_const->get_shape();
auto const_rank = static_cast<int64_t>(const_shape.size()); auto const_rank = static_cast<int64_t>(const_shape.size());
const auto& weights_shape = weights.get_partial_shape(); const auto& weights_shape = weights.get_partial_shape();
@ -149,15 +148,13 @@ pass::MatMulMultiplyFusion::MatMulMultiplyFusion() {
matcher_pass_callback callback = [=](pattern::Matcher& m) { matcher_pass_callback callback = [=](pattern::Matcher& m) {
const auto& pattern_map = m.get_pattern_value_map(); const auto& pattern_map = m.get_pattern_value_map();
const auto& weights = pattern_map.at(weights_pattern); const auto& weights = pattern_map.at(weights_pattern);
auto mul = std::dynamic_pointer_cast<opset8::Multiply>(pattern_map.at(mul_pattern).get_node_shared_ptr()); auto mul = pattern_map.at(mul_pattern).get_node_shared_ptr();
if (!mul)
return false;
auto mul_const = std::dynamic_pointer_cast<opset8::Constant>(pattern_map.at(mul_const_pattern).get_node_shared_ptr()); auto mul_const = std::dynamic_pointer_cast<opset8::Constant>(pattern_map.at(mul_const_pattern).get_node_shared_ptr());
if (!mul_const) if (!mul_const)
return false; return false;
auto matmul = pattern_map.at(matmul_pattern).get_node_shared_ptr(); auto matmul = pattern_map.at(matmul_pattern).get_node_shared_ptr();
auto new_weights = fuse_const_to_weights(matmul, weights, mul_const, mul->get_autob()); auto new_weights = fuse_const_to_weights(matmul, weights, mul_const);
if (!new_weights) if (!new_weights)
return false; return false;

View File

@ -13,40 +13,6 @@
namespace ov { namespace ov {
namespace util { namespace util {
template <class T>
struct ValueTyped {
template <class U>
static auto test(U*) -> decltype(std::declval<typename U::value_type&>(), std::true_type()) {
return {};
}
template <typename>
static auto test(...) -> std::false_type {
return {};
}
constexpr static const auto value = std::is_same<std::true_type, decltype(test<T>(nullptr))>::value;
};
template <typename, typename>
struct Read;
template <typename T, typename std::enable_if<ValueTyped<T>::value, bool>::type = true>
inline typename T::value_type from_string(const std::string& val, const T&) {
std::stringstream ss(val);
typename T::value_type value;
Read<typename T::value_type, void>{}(ss, value);
return value;
}
template <typename>
struct Write;
template <typename T>
inline std::string to_string(const T& value) {
std::stringstream ss;
Write<T>{}(ss, value);
return ss.str();
}
template <typename T> template <typename T>
std::string join(const T& v, const std::string& sep = ", ") { std::string join(const T& v, const std::string& sep = ", ") {
std::ostringstream ss; std::ostringstream ss;

View File

@ -32,6 +32,19 @@ namespace util {
template <typename T, typename = void> template <typename T, typename = void>
struct Read; struct Read;
template <class T>
struct Readable {
template <class U>
static auto test(U*)
-> decltype(std::declval<Read<U>>()(std::declval<std::istream&>(), std::declval<U&>()), std::true_type()) {
return {};
}
template <typename>
static auto test(...) -> std::false_type {
return {};
}
constexpr static const auto value = std::is_same<std::true_type, decltype(test<T>(nullptr))>::value;
};
template <class T> template <class T>
struct Istreamable { struct Istreamable {
template <class U> template <class U>
@ -45,35 +58,6 @@ struct Istreamable {
constexpr static const auto value = std::is_same<std::true_type, decltype(test<T>(nullptr))>::value; constexpr static const auto value = std::is_same<std::true_type, decltype(test<T>(nullptr))>::value;
}; };
template <class T>
struct Readable {
template <class U>
static auto test(U*) -> decltype(read(std::declval<std::istream&>(), std::declval<U&>()), std::true_type()) {
return {};
}
template <typename>
static auto test(...) -> std::false_type {
return {};
}
constexpr static const auto value = std::is_same<std::true_type, decltype(test<T>(nullptr))>::value;
};
template <typename T, typename>
struct Read {
template <typename U>
auto operator()(std::istream&, U&) const ->
typename std::enable_if<std::is_same<T, U>::value && !Istreamable<U>::value && !Readable<U>::value>::type {
OPENVINO_UNREACHABLE("Could read type without std::istream& operator>>(std::istream&, T)",
" defined or ov::util::Read<T> class specialization, T: ",
typeid(T).name());
}
template <typename U>
auto operator()(std::istream& is, U& value) const ->
typename std::enable_if<std::is_same<T, U>::value && Istreamable<U>::value && !Readable<U>::value>::type {
is >> value;
}
};
template <> template <>
struct OPENVINO_API Read<bool> { struct OPENVINO_API Read<bool> {
void operator()(std::istream& is, bool& value) const; void operator()(std::istream& is, bool& value) const;
@ -139,12 +123,80 @@ struct OPENVINO_API Read<std::tuple<unsigned int, unsigned int>> {
void operator()(std::istream& is, std::tuple<unsigned int, unsigned int>& tuple) const; void operator()(std::istream& is, std::tuple<unsigned int, unsigned int>& tuple) const;
}; };
template <typename T>
auto from_string(const std::string& str) -> const
typename std::enable_if<std::is_same<T, std::string>::value, T>::type& {
return str;
}
template <typename T>
auto from_string(const std::string& val) ->
typename std::enable_if<Readable<T>::value && !std::is_same<T, std::string>::value, T>::type {
std::stringstream ss(val);
T value;
Read<T>{}(ss, value);
return value;
}
template <typename T>
auto from_string(const std::string& val) ->
typename std::enable_if<!Readable<T>::value && Istreamable<T>::value && !std::is_same<T, std::string>::value,
T>::type {
std::stringstream ss(val);
T value;
ss >> value;
return value;
}
template <class T>
struct ValueTyped {
template <class U>
static auto test(U*) -> decltype(std::declval<typename U::value_type&>(), std::true_type()) {
return {};
}
template <typename>
static auto test(...) -> std::false_type {
return {};
}
constexpr static const auto value = std::is_same<std::true_type, decltype(test<T>(nullptr))>::value;
};
template <typename T,
typename std::enable_if<ValueTyped<T>::value && Readable<typename T::value_type>::value, bool>::type = true>
typename T::value_type from_string(const std::string& val, const T&) {
std::stringstream ss(val);
typename T::value_type value;
Read<typename T::value_type, void>{}(ss, value);
return value;
}
template <typename T,
typename std::enable_if<ValueTyped<T>::value && !Readable<typename T::value_type>::value &&
Istreamable<typename T::value_type>::value,
bool>::type = true>
typename T::value_type from_string(const std::string& val, const T&) {
std::stringstream ss(val);
typename T::value_type value;
ss >> value;
return value;
}
template <typename T>
auto from_string(const std::string& val) ->
typename std::enable_if<!Readable<T>::value && !Istreamable<T>::value && !std::is_same<T, std::string>::value,
T>::type {
OPENVINO_UNREACHABLE("Could read type without std::istream& operator>>(std::istream&, T)",
" defined or ov::util::Read<T> class specialization, T: ",
typeid(T).name());
}
template <typename T, typename A> template <typename T, typename A>
struct Read<std::vector<T, A>, typename std::enable_if<std::is_default_constructible<T>::value>::type> { struct Read<std::vector<T, A>, typename std::enable_if<std::is_default_constructible<T>::value>::type> {
void operator()(std::istream& is, std::vector<T, A>& vec) const { void operator()(std::istream& is, std::vector<T, A>& vec) const {
while (is.good()) { while (is.good()) {
T v; std::string str;
Read<T>{}(is, v); is >> str;
auto v = from_string<T>(str);
vec.push_back(std::move(v)); vec.push_back(std::move(v));
} }
} }
@ -156,10 +208,11 @@ struct Read<
typename std::enable_if<std::is_default_constructible<K>::value && std::is_default_constructible<T>::value>::type> { typename std::enable_if<std::is_default_constructible<K>::value && std::is_default_constructible<T>::value>::type> {
void operator()(std::istream& is, std::map<K, T, C, A>& map) const { void operator()(std::istream& is, std::map<K, T, C, A>& map) const {
while (is.good()) { while (is.good()) {
K k; std::string str;
T v; is >> str;
Read<K>{}(is, k); auto k = from_string<K>(str);
Read<T>{}(is, v); is >> str;
auto v = from_string<T>(str);
map.emplace(std::move(k), std::move(v)); map.emplace(std::move(k), std::move(v));
} }
} }
@ -184,7 +237,8 @@ struct Ostreamable {
template <class T> template <class T>
struct Writable { struct Writable {
template <class U> template <class U>
static auto test(U*) -> decltype(write(std::declval<std::ostream&>(), std::declval<const U&>()), std::true_type()) { static auto test(U*) -> decltype(std::declval<Write<U>>()(std::declval<std::ostream&>(), std::declval<const U&>()),
std::true_type()) {
return {}; return {};
} }
template <typename> template <typename>
@ -194,18 +248,6 @@ struct Writable {
constexpr static const auto value = std::is_same<std::true_type, decltype(test<T>(nullptr))>::value; constexpr static const auto value = std::is_same<std::true_type, decltype(test<T>(nullptr))>::value;
}; };
template <typename T>
struct Write {
template <typename U>
auto operator()(std::ostream& os, const U&) const ->
typename std::enable_if<std::is_same<T, U>::value && !Ostreamable<U>::value && !Writable<U>::value>::type {}
template <typename U>
auto operator()(std::ostream& os, const U& value) const ->
typename std::enable_if<std::is_same<T, U>::value && Ostreamable<U>::value && !Writable<U>::value>::type {
os << value;
}
};
template <> template <>
struct OPENVINO_API Write<bool> { struct OPENVINO_API Write<bool> {
void operator()(std::ostream& is, const bool& b) const; void operator()(std::ostream& is, const bool& b) const;
@ -226,13 +268,44 @@ struct OPENVINO_API Write<std::tuple<unsigned int, unsigned int>> {
void operator()(std::ostream& os, const std::tuple<unsigned int, unsigned int>& tuple) const; void operator()(std::ostream& os, const std::tuple<unsigned int, unsigned int>& tuple) const;
}; };
template <typename T>
auto to_string(const T& str) -> const typename std::enable_if<std::is_same<T, std::string>::value, T>::type& {
return str;
}
template <typename T>
auto to_string(const T& value) ->
typename std::enable_if<Writable<T>::value && !std::is_same<T, std::string>::value, std::string>::type {
std::stringstream ss;
Write<T>{}(ss, value);
return ss.str();
}
template <typename T>
auto to_string(const T& value) ->
typename std::enable_if<!Writable<T>::value && Ostreamable<T>::value && !std::is_same<T, std::string>::value,
std::string>::type {
std::stringstream ss;
ss << value;
return ss.str();
}
template <typename T>
auto to_string(const T&) ->
typename std::enable_if<!Writable<T>::value && !Ostreamable<T>::value && !std::is_same<T, std::string>::value,
std::string>::type {
OPENVINO_UNREACHABLE("Could convert to string from type without std::ostream& operator>>(std::ostream&, const T&)",
" defined or ov::util::Write<T> class specialization, T: ",
typeid(T).name());
}
template <typename T, typename A> template <typename T, typename A>
struct Write<std::vector<T, A>> { struct Write<std::vector<T, A>> {
void operator()(std::ostream& os, const std::vector<T, A>& vec) const { void operator()(std::ostream& os, const std::vector<T, A>& vec) const {
if (!vec.empty()) { if (!vec.empty()) {
std::size_t i = 0; std::size_t i = 0;
for (auto&& v : vec) { for (auto&& v : vec) {
Write<T>{}(os, v); os << to_string(v);
if (i < (vec.size() - 1)) if (i < (vec.size() - 1))
os << ' '; os << ' ';
++i; ++i;
@ -247,9 +320,9 @@ struct Write<std::map<K, T, C, A>> {
if (!map.empty()) { if (!map.empty()) {
std::size_t i = 0; std::size_t i = 0;
for (auto&& v : map) { for (auto&& v : map) {
Write<K>{}(os, v.first); os << to_string(v.first);
os << ' '; os << ' ';
Write<T>{}(os, v.second); os << to_string(v.second);
if (i < (map.size() - 1)) if (i < (map.size() - 1))
os << ' '; os << ' ';
++i; ++i;
@ -346,9 +419,6 @@ class OPENVINO_API Any {
static bool equal(std::type_index lhs, std::type_index rhs); static bool equal(std::type_index lhs, std::type_index rhs);
/**
* @brief Base API of erased type
*/
class OPENVINO_API Base : public std::enable_shared_from_this<Base> { class OPENVINO_API Base : public std::enable_shared_from_this<Base> {
public: public:
void type_check(const std::type_info&) const; void type_check(const std::type_info&) const;
@ -364,6 +434,7 @@ class OPENVINO_API Any {
virtual bool equal(const Base& rhs) const = 0; virtual bool equal(const Base& rhs) const = 0;
virtual void print(std::ostream& os) const = 0; virtual void print(std::ostream& os) const = 0;
virtual void read(std::istream& os) = 0; virtual void read(std::istream& os) = 0;
void read_to(Base& other) const;
virtual const DiscreteTypeInfo& get_type_info() const = 0; virtual const DiscreteTypeInfo& get_type_info() const = 0;
virtual std::shared_ptr<RuntimeAttribute> as_runtime_attribute() const; virtual std::shared_ptr<RuntimeAttribute> as_runtime_attribute() const;
@ -504,12 +575,50 @@ class OPENVINO_API Any {
return false; return false;
} }
template <typename U>
static typename std::enable_if<util::Writable<U>::value>::type print_impl(std::ostream& os, const U& value) {
util::Write<U>{}(os, value);
}
template <typename U>
static typename std::enable_if<!util::Writable<U>::value && util::Ostreamable<U>::value>::type print_impl(
std::ostream& os,
const U& value) {
os << value;
}
template <typename U>
static typename std::enable_if<!util::Writable<U>::value && !util::Ostreamable<U>::value>::type print_impl(
std::ostream&,
const U&) {}
void print(std::ostream& os) const override { void print(std::ostream& os) const override {
util::Write<T>{}(os, value); print_impl(os, value);
}
template <typename U>
static typename std::enable_if<util::Readable<U>::value>::type read_impl(std::istream& is, U& value) {
util::Read<U>{}(is, value);
}
template <typename U>
static typename std::enable_if<!util::Readable<U>::value && util::Istreamable<U>::value>::type read_impl(
std::istream& is,
U& value) {
is >> value;
}
template <typename U>
static typename std::enable_if<!util::Readable<U>::value && !util::Istreamable<U>::value>::type read_impl(
std::istream&,
U&) {
OPENVINO_UNREACHABLE("Could read type without std::istream& operator>>(std::istream&, T)",
" defined or ov::util::Read<T> class specialization, T: ",
typeid(T).name());
} }
void read(std::istream& is) override { void read(std::istream& is) override {
util::Read<T>{}(is, value); read_impl(is, value);
} }
T value; T value;
@ -527,9 +636,7 @@ class OPENVINO_API Any {
void impl_check() const; void impl_check() const;
mutable Base::Ptr _temp_impl; mutable Base::Ptr _temp;
mutable std::string _str;
Base::Ptr _impl; Base::Ptr _impl;
@ -612,9 +719,10 @@ public:
bool empty() const; bool empty() const;
/** /**
* @brief check the type of value in any * @brief Check that stored type can be casted to specified type.
* If internal type supports Base
* @tparam T Type of value * @tparam T Type of value
* @return true if type of value is correct * @return true if type of value is correct. Return false if any is empty
*/ */
template <class T> template <class T>
bool is() const { bool is() const {
@ -637,10 +745,10 @@ public:
* @return casted object * @return casted object
*/ */
template <class T> template <class T>
typename std::enable_if<std::is_convertible<T, std::shared_ptr<RuntimeAttribute>>::value, T>::type& as() & { typename std::enable_if<std::is_convertible<T, std::shared_ptr<RuntimeAttribute>>::value, T>::type& as() {
if (_impl == nullptr) { if (_impl == nullptr) {
_temp_impl = std::make_shared<Impl<decay_t<T>>>(T{}); _temp = std::make_shared<Impl<decay_t<T>>>(T{});
return _temp_impl->as<T>(); return *static_cast<decay_t<T>*>(_temp->addressof());
} else { } else {
if (_impl->is(typeid(decay_t<T>))) { if (_impl->is(typeid(decay_t<T>))) {
return *static_cast<decay_t<T>*>(_impl->addressof()); return *static_cast<decay_t<T>*>(_impl->addressof());
@ -662,48 +770,9 @@ public:
" to ", " to ",
static_cast<std::string>(T::element_type::get_type_info_static())); static_cast<std::string>(T::element_type::get_type_info_static()));
} }
vptr = std::static_pointer_cast<typename T::element_type>(runtime_attribute); _temp = std::make_shared<Impl<decay_t<T>>>(
_temp_impl = std::make_shared<Impl<decay_t<T>>>(vptr); std::static_pointer_cast<typename T::element_type>(runtime_attribute));
return _temp_impl->as<T>(); return *static_cast<decay_t<T>*>(_temp->addressof());
}
}
}
/**
* Dynamic cast to specified type
* @tparam T type
* @return casted object
*/
template <class T>
const typename std::enable_if<std::is_convertible<T, std::shared_ptr<RuntimeAttribute>>::value, T>::type& as()
const& {
if (_impl == nullptr) {
_temp_impl = std::make_shared<Impl<decay_t<T>>>(T{});
return _temp_impl->as<T>();
} else {
if (_impl->is(typeid(decay_t<T>))) {
return *static_cast<const decay_t<T>*>(_impl->addressof());
} else {
auto runtime_attribute = _impl->as_runtime_attribute();
if (runtime_attribute == nullptr) {
OPENVINO_UNREACHABLE("Any does not contains pointer to runtime_attribute. It contains ",
_impl->type_info().name());
}
auto vptr = std::dynamic_pointer_cast<typename T::element_type>(runtime_attribute);
if (vptr == nullptr && T::element_type::get_type_info_static() != runtime_attribute->get_type_info() &&
T::element_type::get_type_info_static() != RuntimeAttribute::get_type_info_static()) {
OPENVINO_UNREACHABLE("Could not cast Any runtime_attribute to ",
typeid(T).name(),
" from ",
_impl->type_info().name(),
"; from ",
static_cast<std::string>(runtime_attribute->get_type_info()),
" to ",
static_cast<std::string>(T::element_type::get_type_info_static()));
}
vptr = std::static_pointer_cast<typename T::element_type>(runtime_attribute);
_temp_impl = std::make_shared<Impl<decay_t<T>>>(vptr);
return _temp_impl->as<T>();
} }
} }
} }
@ -715,17 +784,17 @@ public:
*/ */
template <class T> template <class T>
typename std::enable_if<!std::is_convertible<T, std::shared_ptr<RuntimeAttribute>>::value && typename std::enable_if<!std::is_convertible<T, std::shared_ptr<RuntimeAttribute>>::value &&
!std::is_same<T, std::string>::value && std::is_default_constructible<T>::value, !std::is_same<T, std::string>::value && std::is_default_constructible<T>::value &&
(util::Istreamable<T>::value || util::Readable<T>::value),
T>::type& T>::type&
as() & { as() {
impl_check(); impl_check();
if (_impl->is(typeid(decay_t<T>))) { if (_impl->is(typeid(decay_t<T>))) {
return *static_cast<decay_t<T>*>(_impl->addressof()); return *static_cast<decay_t<T>*>(_impl->addressof());
} else if (_impl->is(typeid(std::string))) { } else if (_impl->is(typeid(std::string))) {
_temp_impl = std::make_shared<Impl<decay_t<T>>>(); _temp = std::make_shared<Impl<decay_t<T>>>();
std::stringstream strm{as<std::string>()}; _impl->read_to(*_temp);
_temp_impl->read(strm); return *static_cast<decay_t<T>*>(_temp->addressof());
return *static_cast<decay_t<T>*>(_temp_impl->addressof());
} }
for (const auto& type_index : _impl->base_type_info()) { for (const auto& type_index : _impl->base_type_info()) {
if (equal(type_index, typeid(decay_t<T>))) { if (equal(type_index, typeid(decay_t<T>))) {
@ -741,37 +810,11 @@ public:
* @return casted object * @return casted object
*/ */
template <class T> template <class T>
const typename std::enable_if<!std::is_convertible<T, std::shared_ptr<RuntimeAttribute>>::value && typename std::enable_if<
!std::is_same<T, std::string>::value && std::is_default_constructible<T>::value, !std::is_convertible<T, std::shared_ptr<RuntimeAttribute>>::value && !std::is_same<T, std::string>::value &&
T>::type& (!std::is_default_constructible<T>::value || (!util::Istreamable<T>::value && !util::Readable<T>::value)),
as() const& { T>::type&
impl_check(); as() {
if (_impl->is(typeid(decay_t<T>))) {
return *static_cast<const decay_t<T>*>(_impl->addressof());
} else if (_impl->is(typeid(std::string))) {
_temp_impl = std::make_shared<Impl<decay_t<T>>>();
std::stringstream strm{as<std::string>()};
_temp_impl->read(strm);
return *static_cast<const decay_t<T>*>(_temp_impl->addressof());
}
for (const auto& type_index : _impl->base_type_info()) {
if (equal(type_index, typeid(decay_t<T>))) {
return *static_cast<const decay_t<T>*>(_impl->addressof());
}
}
OPENVINO_UNREACHABLE("Bad cast from: ", _impl->type_info().name(), " to: ", typeid(T).name());
}
/**
* Dynamic cast to specified type
* @tparam T type
* @return casted object
*/
template <class T>
typename std::enable_if<!std::is_convertible<T, std::shared_ptr<RuntimeAttribute>>::value &&
!std::is_same<T, std::string>::value && !std::is_default_constructible<T>::value,
T>::type&
as() & {
impl_check(); impl_check();
if (_impl->is(typeid(decay_t<T>))) { if (_impl->is(typeid(decay_t<T>))) {
return *static_cast<decay_t<T>*>(_impl->addressof()); return *static_cast<decay_t<T>*>(_impl->addressof());
@ -790,64 +833,29 @@ public:
* @return casted object * @return casted object
*/ */
template <class T> template <class T>
const typename std::enable_if<!std::is_convertible<T, std::shared_ptr<RuntimeAttribute>>::value && typename std::enable_if<std::is_same<T, std::string>::value, T>::type& as() {
!std::is_same<T, std::string>::value && !std::is_default_constructible<T>::value,
T>::type&
as() const& {
impl_check();
if (_impl->is(typeid(decay_t<T>))) {
return *static_cast<const decay_t<T>*>(_impl->addressof());
}
for (const auto& type_index : _impl->base_type_info()) {
if (equal(type_index, typeid(decay_t<T>))) {
return *static_cast<const decay_t<T>*>(_impl->addressof());
}
}
OPENVINO_UNREACHABLE("Bad cast from: ", _impl->type_info().name(), " to: ", typeid(T).name());
}
/**
* Dynamic cast to specified type
* @tparam T type
* @return casted object
*/
template <class T>
typename std::enable_if<std::is_same<T, std::string>::value, T>::type& as() & {
if (_impl != nullptr) { if (_impl != nullptr) {
if (_impl->is(typeid(decay_t<T>))) { if (_impl->is(typeid(decay_t<T>))) {
return *static_cast<decay_t<T>*>(_impl->addressof()); return *static_cast<decay_t<T>*>(_impl->addressof());
} else { } else {
std::stringstream strm; _temp = std::make_shared<Impl<std::string>>();
print(strm); _impl->read_to(*_temp);
_str = strm.str(); return *static_cast<std::string*>(_temp->addressof());
return _str;
} }
} else { } else {
_str = {}; _temp = std::make_shared<Impl<std::string>>();
return _str; return *static_cast<std::string*>(_temp->addressof());
} }
} }
/** /**
* Dynamic cast to specified type * Dynamic cast to specified type
* @tparam T type * @tparam T type
* @return casted object * @return const reference to caster object
*/ */
template <class T> template <class T>
const typename std::enable_if<std::is_same<T, std::string>::value, T>::type& as() const& { const T& as() const {
if (_impl != nullptr) { return const_cast<Any*>(this)->as<T>();
if (_impl->is(typeid(decay_t<T>))) {
return *static_cast<const decay_t<T>*>(_impl->addressof());
} else {
std::stringstream strm;
print(strm);
_str = strm.str();
return _str;
}
} else {
_str = {};
return _str;
}
} }
/** /**
@ -958,6 +966,18 @@ public:
impl_check(); impl_check();
return _impl.get(); return _impl.get();
} }
/**
* @brief Returns address to internal value if any is not empty and `nullptr` instead
* @return address to internal stored value
*/
void* addressof();
/**
* @brief Returns address to internal value if any is not empty and `nullptr` instead
* @return address to internal stored value
*/
const void* addressof() const;
}; };
/** @cond INTERNAL */ /** @cond INTERNAL */

View File

@ -35,9 +35,6 @@ public:
/// as_type. /// as_type.
virtual const DiscreteTypeInfo& get_type_info() const = 0; virtual const DiscreteTypeInfo& get_type_info() const = 0;
virtual ~ValueAccessor() = default; virtual ~ValueAccessor() = default;
virtual ov::Any get_as_any() {
throw ov::Exception("get_as_any is not implemented");
}
virtual void set_as_any(const ov::Any& x) { virtual void set_as_any(const ov::Any& x) {
throw ov::Exception("set_as_any is not implemented"); throw ov::Exception("set_as_any is not implemented");
} }
@ -59,11 +56,14 @@ public:
virtual const VAT& get() = 0; virtual const VAT& get() = 0;
/// Sets the value /// Sets the value
virtual void set(const VAT& value) = 0; virtual void set(const VAT& value) = 0;
ov::Any get_as_any() override {
return get();
}
void set_as_any(const ov::Any& x) override { void set_as_any(const ov::Any& x) override {
set(x.as<VAT>()); const auto* data = x.addressof();
OPENVINO_ASSERT(data != nullptr, "Data conversion is not possible. Empty data is provided.");
if (x.is<VAT>()) {
set(*static_cast<const VAT*>(data));
} else {
OPENVINO_UNREACHABLE("Bad cast from: ", x.type_info().name(), " to: ", typeid(VAT).name());
}
} }
}; };
@ -108,14 +108,18 @@ public:
} }
void set_as_any(const ov::Any& x) override { void set_as_any(const ov::Any& x) override {
const auto* data = x.addressof();
OPENVINO_ASSERT(data != nullptr, "Data conversion is not possible. Empty data is provided.");
// Try to represent x as VAT or AT // Try to represent x as VAT or AT
if (x.is<VAT>()) { if (x.is<VAT>()) {
set(x.as<VAT>()); set(*static_cast<const VAT*>(data));
} else { } else if (x.is<AT>()) {
// Don't call set here avoiding unnecessary casts AT -> VAT -> AT, // Don't call set here avoiding unnecessary casts AT -> VAT -> AT,
// instead reimplement logic from set. // instead reimplement logic from set.
m_ref = x.as<AT>(); m_ref = *static_cast<const AT*>(data);
m_buffer_valid = false; m_buffer_valid = false;
} else {
OPENVINO_UNREACHABLE("Bad cast from: ", x.type_info().name(), " to: ", typeid(AT).name());
} }
} }
@ -153,14 +157,18 @@ public:
} }
void set_as_any(const ov::Any& x) override { void set_as_any(const ov::Any& x) override {
const auto* data = x.addressof();
OPENVINO_ASSERT(data != nullptr, "Data conversion is not possible. Empty data is provided.");
// Try to represent x as VAT or AT // Try to represent x as VAT or AT
if (x.is<VAT>()) { if (x.is<VAT>()) {
set(x.as<VAT>()); set(*static_cast<const VAT*>(data));
} else { } else if (x.is<AT>()) {
// Don't call set here avoiding unnecessary casts AT -> VAT -> AT, // Don't call set here avoiding unnecessary casts AT -> VAT -> AT,
// instead reimplement logic from set. // instead reimplement logic from set.
m_ref = x.as<AT>(); m_ref = *static_cast<const AT*>(data);
m_buffer_valid = false; m_buffer_valid = false;
} else {
OPENVINO_UNREACHABLE("Bad cast from: ", x.type_info().name(), " to: ", typeid(AT).name());
} }
} }
operator AT&() { operator AT&() {
@ -196,13 +204,17 @@ public:
} }
void set_as_any(const ov::Any& x) override { void set_as_any(const ov::Any& x) override {
const auto* data = x.addressof();
OPENVINO_ASSERT(data != nullptr, "Data conversion is not possible. Empty data is provided.");
// Try to represent x as std::string or AT // Try to represent x as std::string or AT
if (x.is<std::string>()) { if (x.is<std::string>()) {
set(x.as<std::string>()); set(x.as<std::string>());
} else { } else if (x.is<AT>()) {
// Don't call set here avoiding unnecessary casts AT -> std::string -> AT, // Don't call set here avoiding unnecessary casts AT -> std::string -> AT,
// instead reimplement logic from set. // instead reimplement logic from set.
m_ref = x.as<AT>(); m_ref = *static_cast<const AT*>(data);
} else {
OPENVINO_UNREACHABLE("Bad cast from: ", x.type_info().name(), " to: ", typeid(AT).name());
} }
} }

View File

@ -57,8 +57,18 @@ bool Any::Base::visit_attributes(AttributeVisitor& visitor) const {
return const_cast<Any::Base*>(this)->visit_attributes(visitor); return const_cast<Any::Base*>(this)->visit_attributes(visitor);
} }
void Any::Base::read_to(Base& other) const {
std::stringstream strm;
print(strm);
if (other.is<std::string>()) {
*static_cast<std::string*>(other.addressof()) = strm.str();
} else {
other.read(strm);
}
}
Any::~Any() { Any::~Any() {
_temp_impl = {}; _temp = {};
_impl = {}; _impl = {};
} }
@ -121,6 +131,13 @@ const Any::Base* Any::operator->() const {
return _impl.get(); return _impl.get();
} }
void* Any::addressof() {
return _impl != nullptr ? _impl->addressof() : nullptr;
}
const void* Any::addressof() const {
return _impl != nullptr ? _impl->addressof() : nullptr;
}
namespace util { namespace util {
void Read<bool>::operator()(std::istream& is, bool& value) const { void Read<bool>::operator()(std::istream& is, bool& value) const {
@ -138,7 +155,7 @@ void Read<bool>::operator()(std::istream& is, bool& value) const {
template <typename F> template <typename F>
static auto stream_to(std::istream& is, F&& f) -> decltype(f(std::declval<const std::string&>())) { static auto stream_to(std::istream& is, F&& f) -> decltype(f(std::declval<const std::string&>())) {
std::string str; std::string str;
Read<std::string>{}(is, str); is >> str;
try { try {
return f(str); return f(str);
} catch (std::exception& e) { } catch (std::exception& e) {

View File

@ -129,7 +129,6 @@ TEST_F(AnyTests, AnyAsStringInLine) {
Any p = "test"; Any p = "test";
ASSERT_TRUE(p.is<std::string>()); ASSERT_TRUE(p.is<std::string>());
std::string test = p.as<std::string>(); std::string test = p.as<std::string>();
;
ASSERT_EQ("test", test); ASSERT_EQ("test", test);
} }
@ -370,14 +369,13 @@ TEST_F(AnyTests, PrintToVectorOfUInts) {
TEST_F(AnyTests, PrintToVectorOfFloats) { TEST_F(AnyTests, PrintToVectorOfFloats) {
auto ref_vec = std::vector<float>{0.0f, 1.1f, 2.2f, 3.3f, 4.4f, 5.5f}; auto ref_vec = std::vector<float>{0.0f, 1.1f, 2.2f, 3.3f, 4.4f, 5.5f};
std::stringstream stream;
{ {
Any p = std::vector<float>{0.0f, 1.1f, 2.2f, 3.3f, 4.4f, 5.5f}; Any p = std::vector<float>{0.0f, 1.1f, 2.2f, 3.3f, 4.4f, 5.5f};
ASSERT_NO_THROW(p.print(stream)); ASSERT_EQ(p.as<std::string>(), std::string{"0 1.1 2.2 3.3 4.4 5.5"});
ASSERT_EQ(stream.str(), std::string{"0 1.1 2.2 3.3 4.4 5.5"});
} }
{ {
Any p = stream.str(); Any p = "0 1.1 2.2 3.3 4.4 5.5";
ASSERT_EQ((p.as<std::vector<float>>()), ref_vec); ASSERT_EQ((p.as<std::vector<float>>()), ref_vec);
} }
} }
@ -535,3 +533,10 @@ TEST_F(AnyTests, NotIntFromStringThrow) {
ASSERT_TRUE(p.is<std::string>()); ASSERT_TRUE(p.is<std::string>());
ASSERT_THROW(p.as<int>(), ov::Exception); ASSERT_THROW(p.as<int>(), ov::Exception);
} }
TEST_F(AnyTests, AddressofNoThrow) {
Any p;
ASSERT_EQ(nullptr, p.addressof());
p = 42;
ASSERT_NE(nullptr, p.addressof());
}

View File

@ -26,6 +26,7 @@ from google.protobuf import text_format
import onnx import onnx
from onnx.external_data_helper import convert_model_to_external_data from onnx.external_data_helper import convert_model_to_external_data
import os import os
import sys
ONNX_SUFFX = '.onnx' ONNX_SUFFX = '.onnx'
PROTOTXT_SUFFX = '.prototxt' PROTOTXT_SUFFX = '.prototxt'

View File

@ -2,10 +2,10 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "ngraph/opsets/opset7.hpp" #include "ngraph/opsets/opset8.hpp"
namespace ngraph { namespace ngraph {
namespace onnx_import { namespace onnx_import {
namespace default_opset = ngraph::opset7; namespace default_opset = ngraph::opset8;
} }
} // namespace ngraph } // namespace ngraph

View File

@ -59,13 +59,13 @@ OutputVector PoolingFactory::make_avg_pool() const {
} }
OutputVector PoolingFactory::make_max_pool() const { OutputVector PoolingFactory::make_max_pool() const {
return {std::make_shared<default_opset::MaxPool>(m_inputs.at(0), return {std::make_shared<op::v1::MaxPool>(m_inputs.at(0),
m_strides, m_strides,
m_padding_below, m_padding_below,
m_padding_above, m_padding_above,
m_kernel_shape, m_kernel_shape,
m_rounding_type, m_rounding_type,
m_auto_pad)}; m_auto_pad)};
} }
OutputVector PoolingFactory::make_max_pool_with_indices() const { OutputVector PoolingFactory::make_max_pool_with_indices() const {

View File

@ -8,6 +8,7 @@
#include <array> #include <array>
#include <exception> #include <exception>
#include <map> #include <map>
#include <unordered_set>
#include <vector> #include <vector>
namespace { namespace {
@ -128,7 +129,7 @@ ONNXField decode_next_field(std::istream& model) {
switch (decoded_key.second) { switch (decoded_key.second) {
case VARINT: { case VARINT: {
// the decoded varint is the payload in this case but its value does not matter // the decoded varint is the payload in this case but its value does not matter
// in the fast check process so you can discard it // in the fast check process so it can be discarded
decode_varint(model); decode_varint(model);
return {onnx_field, 0}; return {onnx_field, 0};
} }
@ -198,21 +199,23 @@ namespace ngraph {
namespace onnx_common { namespace onnx_common {
bool is_valid_model(std::istream& model) { bool is_valid_model(std::istream& model) {
// the model usually starts with a 0x08 byte indicating the ir_version value // the model usually starts with a 0x08 byte indicating the ir_version value
// so this checker expects at least 2 valid ONNX keys to be found in the validated model // so this checker expects at least 3 valid ONNX keys to be found in the validated model
const unsigned int EXPECTED_FIELDS_FOUND = 2u; const size_t EXPECTED_FIELDS_FOUND = 3u;
unsigned int valid_fields_found = 0u; std::unordered_set<onnx::Field, std::hash<int>> onnx_fields_found = {};
try { try {
while (!model.eof() && valid_fields_found < EXPECTED_FIELDS_FOUND) { while (!model.eof() && onnx_fields_found.size() < EXPECTED_FIELDS_FOUND) {
const auto field = ::onnx::decode_next_field(model); const auto field = ::onnx::decode_next_field(model);
++valid_fields_found; if (onnx_fields_found.count(field.first) > 0) {
// if the same field is found twice, this is not a valid ONNX model
if (field.second > 0) { return false;
::onnx::skip_payload(model, field.second); } else {
onnx_fields_found.insert(field.first);
onnx::skip_payload(model, field.second);
} }
} }
return valid_fields_found == EXPECTED_FIELDS_FOUND; return onnx_fields_found.size() == EXPECTED_FIELDS_FOUND;
} catch (...) { } catch (...) {
return false; return false;
} }

View File

@ -5,24 +5,61 @@
ov_add_frontend(NAME tensorflow ov_add_frontend(NAME tensorflow
LINKABLE_FRONTEND LINKABLE_FRONTEND
SHUTDOWN_PROTOBUF SHUTDOWN_PROTOBUF
SKIP_INSTALL
FILEDESCRIPTION "FrontEnd to load and convert TensorFlow file format" FILEDESCRIPTION "FrontEnd to load and convert TensorFlow file format"
LINK_LIBRARIES openvino::util openvino::runtime::dev) LINK_LIBRARIES openvino::util openvino::runtime::dev)
# give a different name during installation to OpenVINO package #
set_target_properties(openvino_tensorflow_frontend PROPERTIES OUTPUT_NAME openvino_tensorflow_fe) # Temporary install steps
#
function(ov_frontend_get_file_name target_name library_name) set(TARGET_NAME openvino_tensorflow_frontend)
set(LIB_PREFIX "${CMAKE_SHARED_LIBRARY_PREFIX}") set(TARGET_NAME_IRC openvino_tensorflow_fe)
set(LIB_SUFFIX "${IE_BUILD_POSTFIX}${CMAKE_SHARED_LIBRARY_SUFFIX}")
set("${library_name}" "${LIB_PREFIX}${target_name}${LIB_SUFFIX}" PARENT_SCOPE) set_target_properties(${TARGET_NAME} PROPERTIES EXPORT_NAME frontend::tensorflow)
endfunction() export(TARGETS ${TARGET_NAME} NAMESPACE openvino::
APPEND FILE "${CMAKE_BINARY_DIR}/OpenVINOTargets.cmake")
ov_frontend_get_file_name(openvino_tensorflow_frontend output_name)
# install with original name for tests component # install with original name for tests component
install(FILES $<TARGET_FILE:openvino_tensorflow_frontend> install(TARGETS ${TARGET_NAME}
DESTINATION ${OV_CPACK_RUNTIMEDIR} RUNTIME DESTINATION ${OV_CPACK_RUNTIMEDIR} COMPONENT tests EXCLUDE_FROM_ALL
COMPONENT tests LIBRARY DESTINATION ${OV_CPACK_LIBRARYDIR} COMPONENT tests EXCLUDE_FROM_ALL)
RENAME ${output_name}
EXCLUDE_FROM_ALL) if(BUILD_SHARED_LIBS)
function(ov_shared_library_name target_name library_name)
set(LIB_PREFIX "${CMAKE_SHARED_LIBRARY_PREFIX}")
set(LIB_SUFFIX "${IE_BUILD_POSTFIX}${CMAKE_SHARED_LIBRARY_SUFFIX}")
set("${library_name}" "${LIB_PREFIX}${target_name}${LIB_SUFFIX}" PARENT_SCOPE)
endfunction()
function(ov_lib_file_name target_name library_name)
set(LIB_PREFIX "${CMAKE_STATIC_LIBRARY_PREFIX}")
set(LIB_SUFFIX "${IE_BUILD_POSTFIX}${CMAKE_STATIC_LIBRARY_SUFFIX}")
set("${library_name}" "${LIB_PREFIX}${target_name}${LIB_SUFFIX}" PARENT_SCOPE)
endfunction()
ov_shared_library_name(${TARGET_NAME_IRC} shared_library_name)
# rename targets files to avoid auto-loading by FEM
install(FILES $<TARGET_FILE:${TARGET_NAME}>
DESTINATION ${OV_CPACK_RUNTIMEDIR}
COMPONENT ${OV_CPACK_COMP_CORE}
RENAME ${shared_library_name})
if(WIN32)
ov_lib_file_name(${TARGET_NAME_IRC} lib_file_name)
# need to install renamed .lib file as well
install(FILES $<TARGET_LINKER_FILE:${TARGET_NAME}>
DESTINATION ${OV_CPACK_LIBRARYDIR}
COMPONENT ${OV_CPACK_COMP_CORE}
RENAME ${lib_file_name})
endif()
endif()
# install -dev part
install(DIRECTORY ${${TARGET_NAME}_INCLUDE_DIR}/openvino
DESTINATION ${FRONTEND_INSTALL_INCLUDE}/
COMPONENT ${OV_CPACK_COMP_CORE_DEV}
FILES_MATCHING PATTERN "*.hpp")

View File

@ -108,6 +108,10 @@ void FrontEnd::translate_graph(const ov::frontend::InputModel::Ptr& model,
// prepare a list of OV node inputs for each node // prepare a list of OV node inputs for each node
ov::OutputVector ng_inputs; ov::OutputVector ng_inputs;
for (size_t input_port_idx = 0; input_port_idx < operation_decoder->get_input_size(); ++input_port_idx) { for (size_t input_port_idx = 0; input_port_idx < operation_decoder->get_input_size(); ++input_port_idx) {
// TODO: Implement more general approach. Skipping Constants that have input edges
if (operation_decoder->get_op_type() == "Const") {
break;
}
std::string producer_name; std::string producer_name;
size_t producer_port_idx; size_t producer_port_idx;
try { try {
@ -143,7 +147,7 @@ void FrontEnd::translate_graph(const ov::frontend::InputModel::Ptr& model,
ng_inputs.push_back(input_outputs_vector.at(producer_port_idx)); ng_inputs.push_back(input_outputs_vector.at(producer_port_idx));
} else { } else {
FRONT_END_GENERAL_CHECK(false, FRONT_END_GENERAL_CHECK(false,
"No input is found for node \"" + operation_name + "\" by port" + "No input is found for node \"" + operation_name + "\" by port " +
std::to_string(producer_port_idx)); std::to_string(producer_port_idx));
} }
} }

View File

@ -628,10 +628,11 @@ constexpr static const auto EXPORT_IMPORT = "EXPORT_IMPORT"; //!< Device suppor
} // namespace capability } // namespace capability
} // namespace device } // namespace device
namespace streams {
/** /**
* @brief Class to represent number of streams in streams executor * @brief Class to represent number of streams in streams executor
*/ */
struct NumStreams { struct Num {
using Base = std::tuple<int32_t>; //!< NumStreams is representable as int32_t using Base = std::tuple<int32_t>; //!< NumStreams is representable as int32_t
/** /**
@ -642,9 +643,9 @@ struct NumStreams {
NUMA = -2, //!< Creates as many streams as needed to accommodate NUMA and avoid associated penalties NUMA = -2, //!< Creates as many streams as needed to accommodate NUMA and avoid associated penalties
}; };
NumStreams() : num{AUTO} {}; constexpr Num() : num{AUTO} {};
NumStreams(const int32_t num_) : num{num_} {} constexpr Num(const int32_t num_) : num{num_} {}
operator int32_t() { operator int32_t() {
return num; return num;
@ -657,28 +658,37 @@ struct NumStreams {
int32_t num = 0; int32_t num = 0;
}; };
/**
* @brief The number of executor logical partitions
*/
static constexpr Property<Num, PropertyMutability::RW> num{"NUM_STREAMS"};
static constexpr Num AUTO{Num::AUTO}; //!< Creates bare minimum of streams to improve the performance
static constexpr Num NUMA{
Num::NUMA}; //!< Creates as many streams as needed to accommodate NUMA and avoid associated penalties
/** @cond INTERNAL */ /** @cond INTERNAL */
inline std::ostream& operator<<(std::ostream& os, const NumStreams& num_streams) { inline std::ostream& operator<<(std::ostream& os, const Num& num) {
switch (num_streams.num) { switch (num.num) {
case NumStreams::AUTO: case Num::AUTO:
return os << "AUTO"; return os << "AUTO";
case NumStreams::NUMA: case Num::NUMA:
return os << "NUMA"; return os << "NUMA";
default: default:
return os << num_streams.num; return os << num.num;
} }
} }
inline std::istream& operator>>(std::istream& is, NumStreams& num_streams) { inline std::istream& operator>>(std::istream& is, Num& num) {
std::string str; std::string str;
is >> str; is >> str;
if (str == "AUTO") { if (str == "AUTO") {
num_streams = {NumStreams::AUTO}; num = AUTO;
} else if (str == "NUMA") { } else if (str == "NUMA") {
num_streams = {NumStreams::NUMA}; num = NUMA;
} else { } else {
try { try {
num_streams = {std::stoi(str)}; num = {std::stoi(str)};
} catch (const std::exception& e) { } catch (const std::exception& e) {
throw ov::Exception{std::string{"Could not read number of streams from str: "} + str + "; " + e.what()}; throw ov::Exception{std::string{"Could not read number of streams from str: "} + str + "; " + e.what()};
} }
@ -686,11 +696,17 @@ inline std::istream& operator>>(std::istream& is, NumStreams& num_streams) {
return is; return is;
} }
/** @endcond */ /** @endcond */
} // namespace streams
/**
* @brief Class to represent number of streams in streams executor
*/
using NumStreams = streams::Num;
/** /**
* @brief The number of executor logical partitions * @brief The number of executor logical partitions
*/ */
static constexpr Property<NumStreams, PropertyMutability::RW> num_streams{"NUM_STREAMS"}; static constexpr Property<streams::Num, PropertyMutability::RW> num_streams{"NUM_STREAMS"};
/** /**
* @brief Maximum number of threads that can be used for inference tasks * @brief Maximum number of threads that can be used for inference tasks

View File

@ -1143,12 +1143,19 @@ public:
std::map<std::string, std::string> GetSupportedConfig(const std::string& deviceName, std::map<std::string, std::string> GetSupportedConfig(const std::string& deviceName,
const std::map<std::string, std::string>& configs) override { const std::map<std::string, std::string>& configs) override {
auto supportedConfigKeys = std::vector<std::string> supportedConfigKeys;
GetMetric(deviceName, METRIC_KEY(SUPPORTED_CONFIG_KEYS)).as<std::vector<std::string>>(); try {
for (auto&& property : ICore::get_property(deviceName, ov::supported_properties)) { supportedConfigKeys =
if (property.is_mutable()) { GetMetric(deviceName, METRIC_KEY(SUPPORTED_CONFIG_KEYS)).as<std::vector<std::string>>();
supportedConfigKeys.emplace_back(std::move(property)); } catch (ov::Exception&) {
}
try {
for (auto&& property : ICore::get_property(deviceName, ov::supported_properties)) {
if (property.is_mutable()) {
supportedConfigKeys.emplace_back(std::move(property));
}
} }
} catch (ov::Exception&) {
} }
std::map<std::string, std::string> supportedConfig; std::map<std::string, std::string> supportedConfig;
for (auto&& key : supportedConfigKeys) { for (auto&& key : supportedConfigKeys) {

View File

@ -16,6 +16,7 @@
#include "ie_plugin_config.hpp" #include "ie_plugin_config.hpp"
#include "ie_system_conf.h" #include "ie_system_conf.h"
#include "openvino/runtime/properties.hpp" #include "openvino/runtime/properties.hpp"
#include "openvino/util/common_util.hpp"
namespace InferenceEngine { namespace InferenceEngine {
IStreamsExecutor::~IStreamsExecutor() {} IStreamsExecutor::~IStreamsExecutor() {}
@ -108,12 +109,10 @@ void IStreamsExecutor::Config::SetConfig(const std::string& key, const std::stri
_streams = val_i; _streams = val_i;
} }
} else if (key == ov::num_streams) { } else if (key == ov::num_streams) {
ov::NumStreams streams; auto streams = ov::util::from_string(value, ov::streams::num);
std::stringstream strm{value}; if (streams == ov::streams::NUMA) {
strm >> streams;
if (streams.num == ov::NumStreams::NUMA) {
_streams = static_cast<int32_t>(getAvailableNUMANodes().size()); _streams = static_cast<int32_t>(getAvailableNUMANodes().size());
} else if (streams.num == ov::NumStreams::AUTO) { } else if (streams == ov::streams::AUTO) {
// bare minimum of streams (that evenly divides available number of cores) // bare minimum of streams (that evenly divides available number of cores)
_streams = GetDefaultNumStreams(); _streams = GetDefaultNumStreams();
} else if (streams.num >= 0) { } else if (streams.num >= 0) {
@ -122,7 +121,7 @@ void IStreamsExecutor::Config::SetConfig(const std::string& key, const std::stri
OPENVINO_UNREACHABLE("Wrong value for property key ", OPENVINO_UNREACHABLE("Wrong value for property key ",
ov::num_streams.name(), ov::num_streams.name(),
". Expected non negative numbers (#streams) or ", ". Expected non negative numbers (#streams) or ",
"ov::NumStreams(ov::NumStreams::NUMA|ov::NumStreams::AUTO), Got: ", "ov::streams::NUMA|ov::streams::AUTO, Got: ",
streams); streams);
} }
} else if (key == CONFIG_KEY(CPU_THREADS_NUM) || key == ov::inference_num_threads) { } else if (key == CONFIG_KEY(CPU_THREADS_NUM) || key == ov::inference_num_threads) {

View File

@ -908,10 +908,10 @@ void collectPluginMetrics(std::vector<std::string>& baseMetrics, const std::vect
InferenceEngine::Parameter HeteroExecutableNetwork::GetMetric(const std::string& name) const { InferenceEngine::Parameter HeteroExecutableNetwork::GetMetric(const std::string& name) const {
if (EXEC_NETWORK_METRIC_KEY(SUPPORTED_METRICS) == name) { if (EXEC_NETWORK_METRIC_KEY(SUPPORTED_METRICS) == name) {
std::vector<std::string> heteroMetrics = {METRIC_KEY(NETWORK_NAME), std::vector<std::string> heteroMetrics = {ov::model_name.name(),
METRIC_KEY(SUPPORTED_METRICS), METRIC_KEY(SUPPORTED_METRICS),
METRIC_KEY(SUPPORTED_CONFIG_KEYS), METRIC_KEY(SUPPORTED_CONFIG_KEYS),
METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)}; ov::optimal_number_of_infer_requests.name()};
{ {
std::vector<::Metrics> pluginMetrics; std::vector<::Metrics> pluginMetrics;
@ -951,15 +951,15 @@ InferenceEngine::Parameter HeteroExecutableNetwork::GetMetric(const std::string&
} }
IE_SET_METRIC_RETURN(SUPPORTED_CONFIG_KEYS, heteroConfigKeys); IE_SET_METRIC_RETURN(SUPPORTED_CONFIG_KEYS, heteroConfigKeys);
} else if (EXEC_NETWORK_METRIC_KEY(NETWORK_NAME) == name) { } else if (ov::model_name == name) {
IE_SET_METRIC_RETURN(NETWORK_NAME, _name); return decltype(ov::model_name)::value_type{_name};
} else if (EXEC_NETWORK_METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS) == name) { } else if (ov::optimal_number_of_infer_requests == name) {
unsigned int value = 0u; unsigned int value = 0u;
for (auto&& desc : _networks) { for (auto&& desc : _networks) {
value = std::max(value, value = std::max(value,
desc._network->GetMetric(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)).as<unsigned int>()); desc._network->GetMetric(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)).as<unsigned int>());
} }
IE_SET_METRIC_RETURN(OPTIMAL_NUMBER_OF_INFER_REQUESTS, value); return decltype(ov::optimal_number_of_infer_requests)::value_type{value};
} else { } else {
// find metric key among plugin metrics // find metric key among plugin metrics
for (auto&& desc : _networks) { for (auto&& desc : _networks) {

View File

@ -165,17 +165,20 @@ Parameter Engine::GetMetric(const std::string& name, const std::map<std::string,
if (METRIC_KEY(SUPPORTED_METRICS) == name) { if (METRIC_KEY(SUPPORTED_METRICS) == name) {
IE_SET_METRIC_RETURN(SUPPORTED_METRICS, IE_SET_METRIC_RETURN(SUPPORTED_METRICS,
std::vector<std::string>{METRIC_KEY(SUPPORTED_METRICS), std::vector<std::string>{METRIC_KEY(SUPPORTED_METRICS),
METRIC_KEY(FULL_DEVICE_NAME), ov::device::full_name.name(),
METRIC_KEY(SUPPORTED_CONFIG_KEYS), METRIC_KEY(SUPPORTED_CONFIG_KEYS),
METRIC_KEY(DEVICE_ARCHITECTURE), ov::device::architecture.name(),
METRIC_KEY(IMPORT_EXPORT_SUPPORT)}); METRIC_KEY(IMPORT_EXPORT_SUPPORT),
ov::device::capabilities.name()});
} else if (METRIC_KEY(SUPPORTED_CONFIG_KEYS) == name) { } else if (METRIC_KEY(SUPPORTED_CONFIG_KEYS) == name) {
IE_SET_METRIC_RETURN(SUPPORTED_CONFIG_KEYS, getSupportedConfigKeys()); IE_SET_METRIC_RETURN(SUPPORTED_CONFIG_KEYS, getSupportedConfigKeys());
} else if (METRIC_KEY(FULL_DEVICE_NAME) == name) { } else if (ov::device::full_name == name) {
IE_SET_METRIC_RETURN(FULL_DEVICE_NAME, std::string{"HETERO"}); return decltype(ov::device::full_name)::value_type{"HETERO"};
} else if (METRIC_KEY(IMPORT_EXPORT_SUPPORT) == name) { } else if (METRIC_KEY(IMPORT_EXPORT_SUPPORT) == name) {
IE_SET_METRIC_RETURN(IMPORT_EXPORT_SUPPORT, true); IE_SET_METRIC_RETURN(IMPORT_EXPORT_SUPPORT, true);
} else if (METRIC_KEY(DEVICE_ARCHITECTURE) == name) { } else if (ov::device::capabilities == name) {
return decltype(ov::device::capabilities)::value_type{{ov::device::capability::EXPORT_IMPORT}};
} else if (ov::device::architecture == name) {
auto deviceIt = options.find("TARGET_FALLBACK"); auto deviceIt = options.find("TARGET_FALLBACK");
std::string targetFallback; std::string targetFallback;
if (deviceIt != options.end()) { if (deviceIt != options.end()) {
@ -188,7 +191,7 @@ Parameter Engine::GetMetric(const std::string& name, const std::map<std::string,
targetFallback = GetConfig(ov::device::priorities.name(), {}).as<std::string>(); targetFallback = GetConfig(ov::device::priorities.name(), {}).as<std::string>();
} }
} }
IE_SET_METRIC_RETURN(DEVICE_ARCHITECTURE, DeviceArchitecture(targetFallback)); return decltype(ov::device::architecture)::value_type{DeviceArchitecture(targetFallback)};
} else { } else {
IE_THROW() << "Unsupported metric key: " << name; IE_THROW() << "Unsupported metric key: " << name;
} }

View File

@ -16,6 +16,7 @@
#include <cpp_interfaces/interface/ie_internal_plugin_config.hpp> #include <cpp_interfaces/interface/ie_internal_plugin_config.hpp>
#include "openvino/core/type/element_type_traits.hpp" #include "openvino/core/type/element_type_traits.hpp"
#include "openvino/runtime/properties.hpp" #include "openvino/runtime/properties.hpp"
#include <cpu/x64/cpu_isa_traits.hpp>
namespace MKLDNNPlugin { namespace MKLDNNPlugin {
@ -42,7 +43,7 @@ Config::Config() {
} }
#endif #endif
if (!with_cpu_x86_bfloat16()) if (!dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core_bf16))
enforceBF16 = false; enforceBF16 = false;
CPU_DEBUG_CAP_ENABLE(readDebugCapsProperties()); CPU_DEBUG_CAP_ENABLE(readDebugCapsProperties());
@ -105,7 +106,7 @@ void Config::readProperties(const std::map<std::string, std::string> &prop) {
IE_THROW() << "Wrong value for property key " << PluginConfigInternalParams::KEY_LP_TRANSFORMS_MODE; IE_THROW() << "Wrong value for property key " << PluginConfigInternalParams::KEY_LP_TRANSFORMS_MODE;
} else if (key == PluginConfigParams::KEY_ENFORCE_BF16) { } else if (key == PluginConfigParams::KEY_ENFORCE_BF16) {
if (val == PluginConfigParams::YES) { if (val == PluginConfigParams::YES) {
if (with_cpu_x86_avx512_core()) { if (dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core)) {
enforceBF16 = true; enforceBF16 = true;
manualEnforceBF16 = true; manualEnforceBF16 = true;
} else { } else {
@ -120,7 +121,7 @@ void Config::readProperties(const std::map<std::string, std::string> &prop) {
} }
} else if (key == ov::hint::inference_precision.name()) { } else if (key == ov::hint::inference_precision.name()) {
if (val == "bf16") { if (val == "bf16") {
if (with_cpu_x86_avx512_core()) { if (dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core)) {
enforceBF16 = true; enforceBF16 = true;
manualEnforceBF16 = true; manualEnforceBF16 = true;
} else { } else {

View File

@ -422,9 +422,8 @@ bool MKLDNNExecNetwork::canBeExecViaLegacyDynBatch(std::shared_ptr<const ov::Mod
} }
if (type == Tile) { if (type == Tile) {
const auto repeatsNode = std::dynamic_pointer_cast<const ngraph::opset1::Constant>(op->get_input_node_shared_ptr(1));
const auto tile = std::dynamic_pointer_cast<const ngraph::opset1::Tile>(op); const auto tile = std::dynamic_pointer_cast<const ngraph::opset1::Tile>(op);
const auto repeatsNode = std::dynamic_pointer_cast<const ngraph::opset1::Constant>(tile->get_input_node_shared_ptr(1));
if (!(tile && repeatsNode && repeatsNode->cast_vector<int64_t>()[0] == 1)) { if (!(tile && repeatsNode && repeatsNode->cast_vector<int64_t>()[0] == 1)) {
return false; return false;
} }
@ -502,10 +501,10 @@ bool MKLDNNExecNetwork::CanProcessDynBatch(const InferenceEngine::CNNNetwork &ne
for (const auto& op : ops) { for (const auto& op : ops) {
auto type = TypeFromName(op->get_type_name()); auto type = TypeFromName(op->get_type_name());
if (type == Tile) { if (type == Tile) {
const auto tile = std::dynamic_pointer_cast<const ngraph::opset1::Tile>(op); const auto repeatsNode = std::dynamic_pointer_cast<const ngraph::opset1::Constant>(op->get_input_node_shared_ptr(1));
const auto repeatsNode = std::dynamic_pointer_cast<const ngraph::opset1::Constant>(tile->get_input_node_shared_ptr(1));
if (!repeatsNode) if (!repeatsNode)
return false; return false;
const auto tile = std::dynamic_pointer_cast<const ngraph::opset1::Tile>(op);
if (tile && repeatsNode->cast_vector<int64_t>()[0] == 1) if (tile && repeatsNode->cast_vector<int64_t>()[0] == 1)
continue; continue;
} }

View File

@ -1082,6 +1082,8 @@ void MKLDNNGraph::GetPerfData(std::map<std::string, InferenceEngine::InferenceEn
}; };
for (int i = 0; i < graphNodes.size(); i++) { for (int i = 0; i < graphNodes.size(); i++) {
if (graphNodes[i]->isConstant())
continue;
getPerfMapFor(perfMap, graphNodes[i]); getPerfMapFor(perfMap, graphNodes[i]);
} }
} }
@ -1201,10 +1203,11 @@ void MKLDNNGraph::DropDWConvNode(const MKLDNNNodePtr &node) {
if (!parent) continue; if (!parent) continue;
MKLDNNEdgePtr &remEdge = p_edge; MKLDNNEdgePtr &remEdge = p_edge;
const auto portCandidate = remEdge->getOutputNum();
int inNum = 0; int inNum = 0;
int portCandidate = 0;
if (remEdge) { if (remEdge) {
inNum = remEdge->getInputNum(); inNum = remEdge->getInputNum();
portCandidate = remEdge->getOutputNum();
remEdge->drop(); remEdge->drop();
RemoveEdge(remEdge); RemoveEdge(remEdge);
} }

View File

@ -1071,8 +1071,9 @@ void MKLDNNGraphOptimizer::FuseConvolutionSumAndConvolutionSumActivation(MKLDNNG
}; };
for (auto &graphNode : graphNodes) { for (auto &graphNode : graphNodes) {
const auto eltwiseNode = std::dynamic_pointer_cast<MKLDNNEltwiseNode>(graphNode);
if (graphNode->getType() != Eltwise || graphNode->getAlgorithm() != EltwiseAdd || if (graphNode->getType() != Eltwise || graphNode->getAlgorithm() != EltwiseAdd ||
std::dynamic_pointer_cast<MKLDNNEltwiseNode>(graphNode)->isWithBroadcast()) !eltwiseNode || eltwiseNode->isWithBroadcast())
continue; continue;
// TODO: Enlarge to several inputs // TODO: Enlarge to several inputs

View File

@ -116,7 +116,7 @@ void MKLDNNMemory::FillZero() {
void *MKLDNNMemory::GetPtr() const { void *MKLDNNMemory::GetPtr() const {
auto ptr = static_cast<uint8_t*>(GetData()); auto ptr = static_cast<uint8_t*>(GetData());
const auto& md = prim->get_desc().data; const mkldnn_memory_desc_t md = prim->get_desc().data;
mkldnn::impl::memory_desc_wrapper wrapper(md); mkldnn::impl::memory_desc_wrapper wrapper(md);
ptr += wrapper.offset0() * wrapper.data_type_size(); ptr += wrapper.offset0() * wrapper.data_type_size();
return ptr; return ptr;

View File

@ -138,27 +138,25 @@ MKLDNNNode::MKLDNNNode(const std::shared_ptr<ngraph::Node>& op, const mkldnn::en
} }
} }
if (op != nullptr) { std::string inputMemoryFormats = ngraph::getMKLDNNInputMemoryFormats(op);
std::string inputMemoryFormats = ngraph::getMKLDNNInputMemoryFormats(op); if (!inputMemoryFormats.empty()) {
if (!inputMemoryFormats.empty()) { std::istringstream stream(inputMemoryFormats);
std::istringstream stream(inputMemoryFormats); std::string str;
std::string str; while (getline(stream, str, ',')) {
while (getline(stream, str, ',')) { if (str.substr(0, 4) != "cpu:")
if (str.substr(0, 4) != "cpu:") continue;
continue; inputMemoryFormatsFilter.push_back(mkldnn::utils::str2fmt(str.substr(4, str.size()).c_str()));
inputMemoryFormatsFilter.push_back(mkldnn::utils::str2fmt(str.substr(4, str.size()).c_str()));
}
} }
}
std::string outputMemoryFormats = ngraph::getMKLDNNOutputMemoryFormats(op); std::string outputMemoryFormats = ngraph::getMKLDNNOutputMemoryFormats(op);
if (!outputMemoryFormats.empty()) { if (!outputMemoryFormats.empty()) {
std::istringstream stream(outputMemoryFormats); std::istringstream stream(outputMemoryFormats);
std::string str; std::string str;
while (getline(stream, str, ',')) { while (getline(stream, str, ',')) {
if (str.substr(0, 4) != "cpu:") if (str.substr(0, 4) != "cpu:")
continue; continue;
outputMemoryFormatsFilter.push_back(mkldnn::utils::str2fmt(str.substr(4, str.size()).c_str())); outputMemoryFormatsFilter.push_back(mkldnn::utils::str2fmt(str.substr(4, str.size()).c_str()));
}
} }
} }

View File

@ -121,11 +121,13 @@
# ifdef _WIN32 # ifdef _WIN32
# include <intrin.h> # include <intrin.h>
# include <windows.h> # include <windows.h>
# else # elif defined(__APPLE__)
# include <cpuid.h> # include <cpuid.h>
# endif # endif
#endif #endif
#include <cpu/x64/cpu_isa_traits.hpp>
using namespace MKLDNNPlugin; using namespace MKLDNNPlugin;
using namespace InferenceEngine; using namespace InferenceEngine;
@ -200,7 +202,7 @@ static void TransformationUpToCPUSpecificOpSet(std::shared_ptr<ngraph::Function>
{ngraph::element::u4, ngraph::element::u8} {ngraph::element::u4, ngraph::element::u8}
}; };
if (!with_cpu_x86_avx512_core()) if (!dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core))
array.push_back({ngraph::element::bf16, ngraph::element::f32}); array.push_back({ngraph::element::bf16, ngraph::element::f32});
return array; return array;
@ -504,7 +506,7 @@ static void TransformationUpToCPUSpecificOpSet(std::shared_ptr<ngraph::Function>
postLPTPassManager.register_pass<ngraph::pass::ConstantFolding>(); postLPTPassManager.register_pass<ngraph::pass::ConstantFolding>();
postLPTPassManager.run_passes(nGraphFunc); postLPTPassManager.run_passes(nGraphFunc);
if (!useLpt && _enableSnippets && with_cpu_x86_avx2()) { if (!useLpt && _enableSnippets && dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx2)) {
ngraph::pass::Manager tokenization_manager; ngraph::pass::Manager tokenization_manager;
tokenization_manager.register_pass<SnippetsMarkSkipped>(); tokenization_manager.register_pass<SnippetsMarkSkipped>();
tokenization_manager.register_pass<ngraph::snippets::pass::EnumerateNodes>(); tokenization_manager.register_pass<ngraph::snippets::pass::EnumerateNodes>();
@ -656,7 +658,7 @@ Engine::LoadExeNetworkImpl(const InferenceEngine::CNNNetwork &network, const std
|| Config::LPTransformsMode::On == engConfig.lpTransformsMode /* or already enabled for the plugin */; || Config::LPTransformsMode::On == engConfig.lpTransformsMode /* or already enabled for the plugin */;
const auto& BF16Prop = config.find(InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16); const auto& BF16Prop = config.find(InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16);
const bool enableBF16 = ((BF16Prop != config.end() && BF16Prop->second == PluginConfigParams::YES) const bool enableBF16 = ((BF16Prop != config.end() && BF16Prop->second == PluginConfigParams::YES)
|| engConfig.enforceBF16) && with_cpu_x86_avx512_core(); || engConfig.enforceBF16) && dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core);
const auto& modelCacheProp = config.find(InferenceEngine::PluginConfigParams::KEY_CACHE_DIR); const auto& modelCacheProp = config.find(InferenceEngine::PluginConfigParams::KEY_CACHE_DIR);
const bool enableModelCache = (modelCacheProp != config.end() && !modelCacheProp->second.empty()) const bool enableModelCache = (modelCacheProp != config.end() && !modelCacheProp->second.empty())
|| !engConfig.cache_dir.empty(); || !engConfig.cache_dir.empty();
@ -753,20 +755,6 @@ Parameter Engine::GetConfig(const std::string& name, const std::map<std::string,
return GetConfigLegacy(name, options); return GetConfigLegacy(name, options);
} }
static bool hasAVX512() {
#if !defined(__arm__) && !defined(_M_ARM) && !defined(__aarch64__) && !defined(_M_ARM64)
unsigned int regs[4] = {7, 0, 0, 0};
#ifdef _WIN32
__cpuid(reinterpret_cast<int*>(regs), regs[0]);
#else
__cpuid_count(regs[0], regs[1], regs[0], regs[1], regs[2], regs[3]);
#endif
if (regs[1] & (1U << 16))
return true;
#endif
return false;
}
Parameter Engine::GetMetricLegacy(const std::string& name, const std::map<std::string, Parameter>& options) const { Parameter Engine::GetMetricLegacy(const std::string& name, const std::map<std::string, Parameter>& options) const {
if (name == METRIC_KEY(SUPPORTED_METRICS)) { if (name == METRIC_KEY(SUPPORTED_METRICS)) {
std::vector<std::string> metrics = { std::vector<std::string> metrics = {
@ -787,9 +775,9 @@ Parameter Engine::GetMetricLegacy(const std::string& name, const std::map<std::s
IE_SET_METRIC_RETURN(AVAILABLE_DEVICES, availableDevices); IE_SET_METRIC_RETURN(AVAILABLE_DEVICES, availableDevices);
} else if (name == METRIC_KEY(OPTIMIZATION_CAPABILITIES)) { } else if (name == METRIC_KEY(OPTIMIZATION_CAPABILITIES)) {
std::vector<std::string> capabilities; std::vector<std::string> capabilities;
if (with_cpu_x86_bfloat16()) if (dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core_bf16))
capabilities.push_back(METRIC_VALUE(BF16)); capabilities.push_back(METRIC_VALUE(BF16));
if (hasAVX512()) if (dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_common))
capabilities.push_back(METRIC_VALUE(WINOGRAD)); capabilities.push_back(METRIC_VALUE(WINOGRAD));
capabilities.push_back(METRIC_VALUE(FP32)); capabilities.push_back(METRIC_VALUE(FP32));
capabilities.push_back(METRIC_VALUE(FP16)); capabilities.push_back(METRIC_VALUE(FP16));
@ -855,9 +843,9 @@ Parameter Engine::GetMetric(const std::string& name, const std::map<std::string,
return availableDevices; return availableDevices;
} else if (name == ov::device::capabilities) { } else if (name == ov::device::capabilities) {
std::vector<std::string> capabilities; std::vector<std::string> capabilities;
if (with_cpu_x86_bfloat16()) if (dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core_bf16))
capabilities.push_back(METRIC_VALUE(BF16)); capabilities.push_back(METRIC_VALUE(BF16));
if (hasAVX512()) if (dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_common))
capabilities.push_back(METRIC_VALUE(WINOGRAD)); capabilities.push_back(METRIC_VALUE(WINOGRAD));
capabilities.push_back(METRIC_VALUE(FP32)); capabilities.push_back(METRIC_VALUE(FP32));
capabilities.push_back(METRIC_VALUE(FP16)); capabilities.push_back(METRIC_VALUE(FP16));
@ -904,7 +892,8 @@ QueryNetworkResult Engine::QueryNetwork(const CNNNetwork& network, const std::ma
const auto& lptProp = config.find(InferenceEngine::PluginConfigInternalParams::KEY_LP_TRANSFORMS_MODE); const auto& lptProp = config.find(InferenceEngine::PluginConfigInternalParams::KEY_LP_TRANSFORMS_MODE);
const bool enableLPT = (lptProp != config.end() && lptProp->second == PluginConfigParams::YES) /* enabled in the orig_config*/ const bool enableLPT = (lptProp != config.end() && lptProp->second == PluginConfigParams::YES) /* enabled in the orig_config*/
|| Config::LPTransformsMode::On == engConfig.lpTransformsMode /* or already enabled */; || Config::LPTransformsMode::On == engConfig.lpTransformsMode /* or already enabled */;
const bool enableSnippets = !(conf.cache_dir.empty() || conf.enableDynamicBatch || (conf.enforceBF16 && with_cpu_x86_avx512_core())); const bool enableSnippets = !(conf.cache_dir.empty() || conf.enableDynamicBatch || (conf.enforceBF16
&& dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core)));
Transformation(clonedNetwork, enableLPT, enableSnippets, isLegacyAPI()); Transformation(clonedNetwork, enableLPT, enableSnippets, isLegacyAPI());
auto ops = clonedNetwork.getFunction()->get_ordered_ops(); auto ops = clonedNetwork.getFunction()->get_ordered_ops();
std::unordered_set<std::string> supported; std::unordered_set<std::string> supported;

View File

@ -127,6 +127,10 @@ public:
} }
return false; return false;
}); });
if (itr == opList.end())
return;
auto sumNode = *itr; auto sumNode = *itr;
addEdge(inp0, sumNode, 0, 0); addEdge(inp0, sumNode, 0, 0);
addEdge(inp1, sumNode, 0, 1); addEdge(inp1, sumNode, 0, 1);

View File

@ -183,7 +183,7 @@ private:
std::vector<float> outputShift; std::vector<float> outputShift;
std::vector<float> quantizationData; std::vector<float> quantizationData;
size_t quantizationDataSize; size_t quantizationDataSize = 0lu;
MKLDNNMemoryPtr quantizationMemory; MKLDNNMemoryPtr quantizationMemory;
size_t cropLowSize; size_t cropLowSize;

View File

@ -2076,6 +2076,8 @@ void MKLDNNTopKNode::createPrimitive() {
jcp.bubble_inplace = bubble_inplace; jcp.bubble_inplace = bubble_inplace;
jcp.sort_stride = static_cast<int>(I); jcp.sort_stride = static_cast<int>(I);
jcp.work_amount = static_cast<int>(I); jcp.work_amount = static_cast<int>(I);
jcp.bitonic_idx_cnt = 0;
jcp.bitonic_k_idx_cnt = 0;
if (algorithm == TopKAlgorithm::topk_bitonic_sort) { if (algorithm == TopKAlgorithm::topk_bitonic_sort) {
auto &srcMemPtr = getParentEdgeAt(TOPK_DATA)->getMemoryPtr(); auto &srcMemPtr = getParentEdgeAt(TOPK_DATA)->getMemoryPtr();

View File

@ -787,7 +787,17 @@ static bool is_node_for_onednn(deconvolution_node const& node) {
get_post_ops_count(node) <= 32 && get_post_ops_count(node) <= 32 &&
input_layout.data_type == output_layout.data_type; input_layout.data_type == output_layout.data_type;
auto spatial_dims_num = input_layout.format.spatial_num(); auto spatial_dims_num = input_layout.get_spatial_rank();
// oneDNN doesn't support sum post ops for deconvolutions
for (auto& fused_op : node.get_fused_primitives()) {
if (fused_op.node->is_type<eltwise>() && fused_op.deps.size() == 1) {
auto eltw_in_layout = node.get_dependency(fused_op.dep_start_idx).get_output_layout();
if (program_helpers::needs_onednn_sum_post_op(fused_op.node->as<eltwise>(), eltw_in_layout)) {
return false;
}
}
}
return onednn_valid_dt && onednn_valid_params && spatial_dims_num <= 3; return onednn_valid_dt && onednn_valid_params && spatial_dims_num <= 3;
} }
@ -1477,19 +1487,6 @@ impl_types layout_optimizer::get_preferred_impl_type(program_node& node, format
if (node.is_type<convolution>() && node.as<convolution>().weights_zero_points_term()) if (node.is_type<convolution>() && node.as<convolution>().weights_zero_points_term())
impl_candidate = impl_types::ocl; impl_candidate = impl_types::ocl;
// oneDNN doesn't support sum post ops for deconvolutions
if (node.is_type<deconvolution>() && impl_candidate == impl_types::onednn) {
for (auto& fused_op : node.get_fused_primitives()) {
if (fused_op.node->is_type<eltwise>() && fused_op.deps.size() == 1) {
auto eltw_in_layout = node.get_dependency(fused_op.dep_start_idx).get_output_layout();
if (program_helpers::needs_onednn_sum_post_op(fused_op.node->as<eltwise>(), eltw_in_layout)) {
impl_candidate = impl_types::ocl;
break;
}
}
}
}
preferred_impl = impl_candidate; preferred_impl = impl_candidate;
} else if (node.is_type<concatenation>()) { } else if (node.is_type<concatenation>()) {
if (!_optimization_attributes.use_onednn_impls) if (!_optimization_attributes.use_onednn_impls)
@ -1652,8 +1649,6 @@ format layout_optimizer::get_preferred_format(program_node& node) {
format expected_conv_fmt = get_expected_layout(conv_output_layout, conv, weights_layout).format; format expected_conv_fmt = get_expected_layout(conv_output_layout, conv, weights_layout).format;
if (expected == format::bfyx && expected_conv_fmt == format::bs_fs_yx_bsv32_fsv32 && layout.size.feature[0] % 32 == 0) if (expected == format::bfyx && expected_conv_fmt == format::bs_fs_yx_bsv32_fsv32 && layout.size.feature[0] % 32 == 0)
expected = expected_conv_fmt; expected = expected_conv_fmt;
} else {
expected = format::bfyx;
} }
} else if (layout.size.feature[0] > 8) { } else if (layout.size.feature[0] > 8) {
expected = format::b_fs_yx_fsv16; expected = format::b_fs_yx_fsv16;
@ -1680,10 +1675,6 @@ format layout_optimizer::get_preferred_format(program_node& node) {
format expected_conv_fmt = get_expected_layout(conv_output_layout, conv, weights_layout).format; format expected_conv_fmt = get_expected_layout(conv_output_layout, conv, weights_layout).format;
if (layout.format.spatial_num() == 2 && layout.size.feature[0] % 32 == 0 && expected_conv_fmt == format::b_fs_yx_fsv32) if (layout.format.spatial_num() == 2 && layout.size.feature[0] % 32 == 0 && expected_conv_fmt == format::b_fs_yx_fsv32)
expected = expected_conv_fmt; expected = expected_conv_fmt;
else
expected = format::bfyx;
} else {
expected = format::bfyx;
} }
} }
} else if (node.is_type<reorder>() || node.is_type<input_layout>()) { } else if (node.is_type<reorder>() || node.is_type<input_layout>()) {

View File

@ -243,7 +243,7 @@ void Config::UpdateFromMap(const std::map<std::string, std::string>& configMap)
} }
} else if (key.compare(PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS) == 0 || key == ov::num_streams) { } else if (key.compare(PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS) == 0 || key == ov::num_streams) {
if (val.compare(PluginConfigParams::GPU_THROUGHPUT_AUTO) == 0 || if (val.compare(PluginConfigParams::GPU_THROUGHPUT_AUTO) == 0 ||
val.compare(ov::num_streams(ov::NumStreams::AUTO).second.as<std::string>()) == 0) { val.compare(ov::util::to_string(ov::streams::AUTO)) == 0) {
throughput_streams = GetDefaultNStreamsForThroughputMode(); throughput_streams = GetDefaultNStreamsForThroughputMode();
} else { } else {
int val_i; int val_i;

View File

@ -225,7 +225,7 @@ std::map<std::string, std::string> Plugin::ConvertPerfHintsToConfig(
config[ov::num_streams.name()] = std::to_string(1); config[ov::num_streams.name()] = std::to_string(1);
} else if (mode_name == CONFIG_VALUE(THROUGHPUT)) { } else if (mode_name == CONFIG_VALUE(THROUGHPUT)) {
config[PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS] = CONFIG_VALUE(GPU_THROUGHPUT_AUTO); config[PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS] = CONFIG_VALUE(GPU_THROUGHPUT_AUTO);
config[ov::num_streams.name()] = ov::util::to_string(ov::NumStreams(ov::NumStreams::AUTO)); config[ov::num_streams.name()] = ov::util::to_string(ov::streams::AUTO);
//disabling the throttling temporarily to set the validation (that is switching to the hints) perf baseline //disabling the throttling temporarily to set the validation (that is switching to the hints) perf baseline
//checking throttling (to avoid overriding what user might explicitly set in the incoming config or previously via SetConfig) //checking throttling (to avoid overriding what user might explicitly set in the incoming config or previously via SetConfig)
// const auto bInConfig = config.find(GPUConfigParams::KEY_GPU_PLUGIN_THROTTLE) != config.end() || // const auto bInConfig = config.find(GPUConfigParams::KEY_GPU_PLUGIN_THROTTLE) != config.end() ||
@ -913,7 +913,7 @@ Parameter Plugin::GetMetric(const std::string& name, const std::map<std::string,
} else if (it_streams->second.is<std::string>()) { } else if (it_streams->second.is<std::string>()) {
std::string n_streams_str = it_streams->second.as<std::string>(); std::string n_streams_str = it_streams->second.as<std::string>();
if (n_streams_str != CONFIG_VALUE(GPU_THROUGHPUT_AUTO) && if (n_streams_str != CONFIG_VALUE(GPU_THROUGHPUT_AUTO) &&
n_streams_str != util::to_string(ov::NumStreams(ov::NumStreams::AUTO))) { n_streams_str != util::to_string(ov::streams::AUTO)) {
IE_THROW() << "[GPU_MAX_BATCH_SIZE] bad casting: GPU_THROUGHPUT_STREAMS should be either of uint32_t type or \"GPU_THROUGHPUT_AUTO\""; IE_THROW() << "[GPU_MAX_BATCH_SIZE] bad casting: GPU_THROUGHPUT_STREAMS should be either of uint32_t type or \"GPU_THROUGHPUT_AUTO\"";
} }
n_streams = config.GetDefaultNStreamsForThroughputMode(); n_streams = config.GetDefaultNStreamsForThroughputMode();

View File

@ -148,7 +148,7 @@ bool kernels_cache::is_cache_enabled() const {
} }
size_t kernels_cache::get_max_kernels_per_batch() const { size_t kernels_cache::get_max_kernels_per_batch() const {
return 10; return 9;
} }

View File

@ -78,38 +78,38 @@ public:
} }
template <typename... Args> template <typename... Args>
inline void fatal(const char* format, const Args&... args) const noexcept { inline void fatal(const char* format, const Args&... args) const {
addEntry(LogLevel::Fatal, format, args...); addEntry(LogLevel::Fatal, format, args...);
} }
template <typename... Args> template <typename... Args>
inline void error(const char* format, const Args&... args) const noexcept { inline void error(const char* format, const Args&... args) const {
addEntry(LogLevel::Error, format, args...); addEntry(LogLevel::Error, format, args...);
} }
template <typename... Args> template <typename... Args>
inline void warning(const char* format, const Args&... args) const noexcept { inline void warning(const char* format, const Args&... args) const {
addEntry(LogLevel::Warning, format, args...); addEntry(LogLevel::Warning, format, args...);
} }
template <typename... Args> template <typename... Args>
inline void info(const char* format, const Args&... args) const noexcept { inline void info(const char* format, const Args&... args) const {
addEntry(LogLevel::Info, format, args...); addEntry(LogLevel::Info, format, args...);
} }
template <typename... Args> template <typename... Args>
inline void debug(const char* format, const Args&... args) const noexcept { inline void debug(const char* format, const Args&... args) const {
addEntry(LogLevel::Debug, format, args...); addEntry(LogLevel::Debug, format, args...);
} }
template <typename... Args> template <typename... Args>
inline void trace(const char* format, const Args&... args) const noexcept { inline void trace(const char* format, const Args&... args) const {
addEntry(LogLevel::Trace, format, args...); addEntry(LogLevel::Trace, format, args...);
} }
private: private:
template <typename... Args> template <typename... Args>
void addEntry(LogLevel msgLevel, const char* format, const Args&... args) const noexcept { void addEntry(LogLevel msgLevel, const char* format, const Args&... args) const {
if (!isActive(msgLevel)) { if (!isActive(msgLevel)) {
return; return;
} }
@ -124,8 +124,8 @@ private:
_out->get().flush(); _out->get().flush();
} }
void printHeader(LogLevel msgLevel) const noexcept; void printHeader(LogLevel msgLevel) const;
void printFooter() const noexcept; void printFooter() const;
private: private:
std::string _name; std::string _name;

View File

@ -9,6 +9,7 @@
#include <openvino/runtime/properties.hpp> #include <openvino/runtime/properties.hpp>
#include <sstream> #include <sstream>
#include <vpu/myriad_config.hpp> #include <vpu/myriad_config.hpp>
#include <openvino/util/common_util.hpp>
namespace vpu { namespace vpu {
@ -45,9 +46,7 @@ details::Category OvThroughputStreamsOption::category() {
} }
std::string OvThroughputStreamsOption::defaultValue() { std::string OvThroughputStreamsOption::defaultValue() {
std::stringstream ss; return ov::util::to_string(ov::streams::AUTO);
ss << ov::NumStreams(ov::NumStreams::AUTO);
return ss.str();
} }
OvThroughputStreamsOption::value_type OvThroughputStreamsOption::parse(const std::string& value) { OvThroughputStreamsOption::value_type OvThroughputStreamsOption::parse(const std::string& value) {

View File

@ -118,7 +118,7 @@ const auto COLOR_RESET = "\033[0m";
} // namespace } // namespace
void Logger::printHeader(LogLevel msgLevel) const noexcept { void Logger::printHeader(LogLevel msgLevel) const {
try { try {
if (_out->supportColors()) { if (_out->supportColors()) {
static const EnumMap<LogLevel, const char*> levelColors{ static const EnumMap<LogLevel, const char*> levelColors{
@ -148,7 +148,7 @@ void Logger::printHeader(LogLevel msgLevel) const noexcept {
} }
} }
void Logger::printFooter() const noexcept { void Logger::printFooter() const {
try { try {
if (_out->supportColors()) { if (_out->supportColors()) {
_out->get() << COLOR_RESET; _out->get() << COLOR_RESET;

View File

@ -235,7 +235,7 @@ CompiledGraph::Ptr compileSubNetwork(const ie::CNNNetwork& network, const Plugin
const auto& env = CompileEnv::get(); const auto& env = CompileEnv::get();
auto prevConfig = env.config; auto prevConfig = env.config;
AutoScope autoRecover([prevConfig]() { AutoScope autoRecover([&prevConfig]() {
CompileEnv::updateConfig(prevConfig); CompileEnv::updateConfig(prevConfig);
}); });

View File

@ -177,7 +177,7 @@ void FrontEnd::parseRNN(const Model& model, const ie::CNNLayerPtr& _layer, const
IE_ASSERT(stateSize * ngates == biasesSize); IE_ASSERT(stateSize * ngates == biasesSize);
/* weights repacking */ /* weights repacking */
const auto generator = [&weights, stateSize, inputSize, ngates, outputs](const ie::Blob::Ptr& blob) { const auto generator = [&weights, stateSize, inputSize, ngates](const ie::Blob::Ptr& blob) {
auto newWeightsPtr = blob->buffer().as<fp16_t*>(); auto newWeightsPtr = blob->buffer().as<fp16_t*>();
auto content = weights->content(); auto content = weights->content();

View File

@ -314,6 +314,9 @@ InferenceEngine::Parameter Engine::GetMetric(const std::string& name,
ov::range_for_async_infer_requests.name(), ov::range_for_async_infer_requests.name(),
ov::device::thermal.name(), ov::device::thermal.name(),
ov::device::architecture.name(), ov::device::architecture.name(),
ov::num_streams.name(),
ov::hint::performance_mode.name(),
ov::hint::num_requests.name(),
}; };
} else if (name == METRIC_KEY(SUPPORTED_CONFIG_KEYS)) { } else if (name == METRIC_KEY(SUPPORTED_CONFIG_KEYS)) {
// TODO: remove once all options are migrated // TODO: remove once all options are migrated

View File

@ -63,3 +63,9 @@ TEST(ONNXReader_ModelUnsupported, unknown_wire_type) {
EXPECT_THROW(InferenceEngine::Core{}.ReadNetwork(model_path("unsupported/unknown_wire_type.onnx")), EXPECT_THROW(InferenceEngine::Core{}.ReadNetwork(model_path("unsupported/unknown_wire_type.onnx")),
InferenceEngine::NetworkNotRead); InferenceEngine::NetworkNotRead);
} }
TEST(ONNXReader_ModelUnsupported, duplicate_fields) {
// the model contains the IR_VERSION field twice - this is not correct
EXPECT_THROW(InferenceEngine::Core{}.ReadNetwork(model_path("unsupported/duplicate_onnx_fields.onnx")),
std::exception);
}

View File

@ -0,0 +1,12 @@
ONNX Reader test2Doc string for this model:D
xy"Cosh
cosh_graphZ
x


b
y


B

View File

@ -102,6 +102,50 @@ TEST_F(TransformationTestsF, test_convert_to_unsigned_nms_gather_2) {
} }
} }
TEST_F(TransformationTestsF, test_convert_to_unsigned_nms_gather_with_onnx_slice) {
// if Convert already exists and Slice is present instead of StridedSlice
{
auto boxes = make_shared<opset8::Parameter>(element::f32, Shape{1, 1000, 4});
auto scores = make_shared<opset8::Parameter>(element::f32, Shape{1, 1, 1000});
auto nms = make_shared<opset8::NonMaxSuppression>(boxes, scores);
auto start = opset8::Constant::create(element::i32, Shape{1}, {3});
auto stop = opset8::Constant::create(element::i32, Shape{1}, {4});
auto step = opset8::Constant::create(element::i32, Shape{1}, {1});
auto slice_node = make_shared<opset8::Slice>(nms->output(0), start, stop, step);
// squeeze can be represented as reshape
auto squeeze_node = make_shared<opset8::Reshape>(slice_node, opset8::Constant::create(element::i32, Shape{1}, {-1}), true);
auto convert = make_shared<opset8::Convert>(squeeze_node, element::Type_t::i32);
// usually input to gather data goes after reshape NMS scores
auto reshape_node = make_shared<opset8::Reshape>(scores, opset8::Constant::create(element::i32, Shape{1}, {-1}), true);
auto gather = make_shared<opset8::Gather>(reshape_node, convert, opset8::Constant::create(element::i32, Shape{1}, {0}));
function = make_shared<Function>(NodeVector{gather}, ParameterVector{boxes, scores});
manager.register_pass<pass::ConvertNmsGatherPathToUnsigned>();
}
{
auto boxes = make_shared<opset8::Parameter>(element::f32, Shape{1, 1000, 4});
auto scores = make_shared<opset8::Parameter>(element::f32, Shape{1, 1, 1000});
auto nms = make_shared<opset8::NonMaxSuppression>(boxes, scores);
auto start = opset8::Constant::create(element::i32, Shape{1}, {3});
auto stop = opset8::Constant::create(element::i32, Shape{1}, {4});
auto step = opset8::Constant::create(element::i32, Shape{1}, {1});
auto slice_node = make_shared<opset8::Slice>(nms->output(0), start, stop, step);
// squeeze can be represented as reshape
auto squeeze_node = make_shared<opset8::Reshape>(slice_node, opset8::Constant::create(element::i32, Shape{1}, {-1}), true);
auto convert = make_shared<opset8::Convert>(squeeze_node, element::Type_t::u32);
auto reshape_node = make_shared<opset8::Reshape>(scores, opset8::Constant::create(element::i32, Shape{1}, {-1}), true);
auto gather = make_shared<opset8::Gather>(reshape_node, convert, opset8::Constant::create(element::i32, Shape{1}, {0}));
function_ref = make_shared<Function>(NodeVector{gather}, ParameterVector{boxes, scores});
}
}
TEST(TransformationTests, test_convert_to_unsigned_nms_gather_3) { TEST(TransformationTests, test_convert_to_unsigned_nms_gather_3) {
// if NMS output goes not into Gather indices no converts should be inserted // if NMS output goes not into Gather indices no converts should be inserted
auto boxes = make_shared<opset8::Parameter>(element::f32, Shape{1, 1000, 4}); auto boxes = make_shared<opset8::Parameter>(element::f32, Shape{1, 1000, 4});

View File

@ -23,8 +23,6 @@ The tool takes two command line parameters:
* `--constants_size_threshold` - Optional. Maximum size of constant in megabytes to be serialized. * `--constants_size_threshold` - Optional. Maximum size of constant in megabytes to be serialized.
If constant size exceeds specified number it will be replaced If constant size exceeds specified number it will be replaced
with parameter and meta information about original data range will be saved with parameter and meta information about original data range will be saved
* `--eliminate_dynamism` - Optional. If specified dynamic shapes will be eliminated from model
and replaced by propagated upper bound values (if possible)
* '--extract_body' - Optional. Allow to extract operation bodies to operation cache. * '--extract_body' - Optional. Allow to extract operation bodies to operation cache.
E.g. E.g.

View File

@ -18,17 +18,14 @@ static const char constants_size_threshold_message[] = "Optional. Maximum size o
"If constant size exceeds specified number it will be replaced" "If constant size exceeds specified number it will be replaced"
"with parameter and meta information about original data range " "with parameter and meta information about original data range "
"will be saved"; "will be saved";
static const char eliminate_dynamism_message[] = "Optional. If specified dynamic shapes will be eliminated from model"
"and replaced by propagated upper bound values (if possible)";
static const char extract_body_message[] = "Optional. Allow to extract operation bodies to operation cache."; static const char extract_body_message[] = "Optional. Allow to extract operation bodies to operation cache.";
DEFINE_bool(h, false, help_message); DEFINE_bool(h, false, help_message);
DEFINE_string(input_folders, "", local_cache_message); DEFINE_string(input_folders, "", local_cache_message);
DEFINE_string(local_cache, ".", input_folders_message); DEFINE_string(local_cache, "", input_folders_message);
DEFINE_string(output_folder, "output", output_folder_message); DEFINE_string(output_folder, "output", output_folder_message);
DEFINE_string(path_regex, ".*", output_folder_message); DEFINE_string(path_regex, ".*", output_folder_message);
DEFINE_double(constants_size_threshold, 1., constants_size_threshold_message); DEFINE_double(constants_size_threshold, 1., constants_size_threshold_message);
DEFINE_bool(eliminate_dynamism, true, eliminate_dynamism_message);
DEFINE_bool(extract_body, true, extract_body_message); DEFINE_bool(extract_body, true, extract_body_message);
/** /**
@ -45,7 +42,6 @@ static void showUsage() {
std::cout << " --output_folder \"<path>\" " << output_folder_message << "\n"; std::cout << " --output_folder \"<path>\" " << output_folder_message << "\n";
std::cout << " --path_regex \"<path>\" " << path_regex_message << "\n"; std::cout << " --path_regex \"<path>\" " << path_regex_message << "\n";
std::cout << " --constants_size_threshold \"<value>\" " << constants_size_threshold_message << "\n"; std::cout << " --constants_size_threshold \"<value>\" " << constants_size_threshold_message << "\n";
std::cout << " --eliminate_dynamism \"<value>\" " << eliminate_dynamism_message << "\n";
std::cout << " --extract_body \"<value>\" " << extract_body_message << "\n"; std::cout << " --extract_body \"<value>\" " << extract_body_message << "\n";
std::cout << std::flush; std::cout << std::flush;
} }

View File

@ -17,7 +17,7 @@ namespace SubgraphsDumper {
class OPCache { class OPCache {
public: public:
OPCache() : num_neighbours_to_cache(0), manager(MatchersManager()), OPCache() : num_neighbours_to_cache(0), manager(MatchersManager()),
m_ops_cache(std::vector<std::pair<std::shared_ptr<ngraph::Node>, LayerTestsUtils::OPInfo>>()) {} m_ops_cache(std::map<std::shared_ptr<ngraph::Node>, LayerTestsUtils::OPInfo>()) {}
static std::unique_ptr<OPCache> make_cache() { static std::unique_ptr<OPCache> make_cache() {
return std::unique_ptr<OPCache>(new OPCache()); return std::unique_ptr<OPCache>(new OPCache());
@ -36,7 +36,7 @@ public:
float get_size_of_cached_ops(); float get_size_of_cached_ops();
protected: protected:
std::vector<std::pair<std::shared_ptr<ngraph::Node>, LayerTestsUtils::OPInfo>> m_ops_cache; std::map<std::shared_ptr<ngraph::Node>, LayerTestsUtils::OPInfo> m_ops_cache;
MatchersManager manager; MatchersManager manager;
size_t num_neighbours_to_cache = 0; size_t num_neighbours_to_cache = 0;
enum SerializationStatus { enum SerializationStatus {

View File

@ -1,13 +0,0 @@
// Copyright (C) 2019-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ngraph/ngraph.hpp>
namespace SubgraphsDumper {
// Copy from serialization transformation pass
void resolve_dynamic_shapes(const std::shared_ptr<ngraph::Function>& f);
} // namespace SubgraphsDumper

View File

@ -13,7 +13,6 @@
#include "ops_cache.hpp" #include "ops_cache.hpp"
#include "op_cloner.hpp" #include "op_cloner.hpp"
#include "utils/dynamism_resolver.hpp"
#include "utils/model_wrap_struct.hpp" #include "utils/model_wrap_struct.hpp"
#include "gflag_config.hpp" #include "gflag_config.hpp"
#include <stdlib.h> #include <stdlib.h>
@ -71,15 +70,6 @@ void cacheModels(std::unique_ptr<SubgraphsDumper::OPCache> &cache,
InferenceEngine::CNNNetwork net = ie.ReadNetwork(model.xml, model.bin); InferenceEngine::CNNNetwork net = ie.ReadNetwork(model.xml, model.bin);
auto function = net.getFunction(); auto function = net.getFunction();
if (FLAGS_eliminate_dynamism) {
try {
SubgraphsDumper::resolve_dynamic_shapes(function);
} catch (std::exception &e) {
std::cout << "Failed to eliminate dynamism from model " << model.xml
<< "\n Exception occurred:\n" << e.what() << "\nModel will be processed as is."
<< std::endl;
}
}
cache->update_ops_cache(function, extract_body, model.xml); cache->update_ops_cache(function, extract_body, model.xml);
} catch (std::exception &e) { } catch (std::exception &e) {
std::cout << "Model processing failed with exception:" << std::endl << e.what() << std::endl; std::cout << "Model processing failed with exception:" << std::endl << e.what() << std::endl;
@ -107,7 +97,9 @@ int main(int argc, char *argv[]) {
auto models = findModelsInDirs(dirs); auto models = findModelsInDirs(dirs);
auto cache = SubgraphsDumper::OPCache::make_cache(); auto cache = SubgraphsDumper::OPCache::make_cache();
cacheModels(cache, ret_code, cachedOps, FLAGS_extract_body); if (!FLAGS_local_cache.empty()) {
cacheModels(cache, ret_code, cachedOps, FLAGS_extract_body);
}
cacheModels(cache, ret_code, models, FLAGS_extract_body); cacheModels(cache, ret_code, models, FLAGS_extract_body);
cache->serialize_cached_ops(FLAGS_output_folder); cache->serialize_cached_ops(FLAGS_output_folder);

View File

@ -85,9 +85,9 @@ bool SingleOpMatcher::match_inputs(const std::shared_ptr<ngraph::Node> &node,
ref->get_input_tensor(i).get_partial_shape().rank(); ref->get_input_tensor(i).get_partial_shape().rank();
bool elemTypeIsEqual = node->get_input_tensor(i).get_element_type() == bool elemTypeIsEqual = node->get_input_tensor(i).get_element_type() ==
ref->get_input_tensor(i).get_element_type(); ref->get_input_tensor(i).get_element_type();
bool is_dynamic = node->get_input_node_ptr(i)->is_dynamic() == bool dynamismIsEqual = node->get_input_partial_shape(i).is_dynamic() ==
ref->get_input_node_ptr(i)->is_dynamic(); ref->get_input_partial_shape(i).is_dynamic();
if (!(rankIsEqual && elemTypeIsEqual && is_dynamic)) { if (!rankIsEqual || !elemTypeIsEqual || !dynamismIsEqual) {
return false; return false;
} }
} }
@ -102,12 +102,20 @@ SingleOpMatcher::match_outputs(const std::shared_ptr<ngraph::Node> &node,
if (node->get_output_size() != ref->get_output_size()) { if (node->get_output_size() != ref->get_output_size()) {
return false; return false;
} }
// Match output element type // Match output element type, shape rank & dynamism
for (size_t i = 0; i < node->get_output_size(); ++i) { for (size_t i = 0; i < node->get_output_size(); ++i) {
if (node->get_output_tensor(i).get_element_type() != if (node->get_output_tensor(i).get_element_type() !=
ref->get_output_tensor(i).get_element_type()) { ref->get_output_tensor(i).get_element_type()) {
return false; return false;
} }
if (node->get_output_tensor(i).get_partial_shape().is_dynamic() !=
ref->get_output_tensor(i).get_partial_shape().is_dynamic()) {
return false;
}
if (node->get_output_tensor(i).get_partial_shape().rank()!=
ref->get_output_tensor(i).get_partial_shape().rank()) {
return false;
}
} }
return true; return true;
@ -150,6 +158,12 @@ bool SingleOpMatcher::match_ports(const std::shared_ptr<ngraph::Node> &node,
bool SingleOpMatcher::match(const std::shared_ptr<ngraph::Node> &node, bool SingleOpMatcher::match(const std::shared_ptr<ngraph::Node> &node,
const std::shared_ptr<ngraph::Node> &ref, const std::shared_ptr<ngraph::Node> &ref,
const LayerTestsUtils::OPInfo &op_info) const { const LayerTestsUtils::OPInfo &op_info) const {
for (const auto& input_node : node->inputs()) {
if (input_node.get_partial_shape().is_dynamic()) {
std::cout << "[ DEBUG ] " << node->get_friendly_name() << std::endl;
break;
}
}
const auto &cfg = get_config(node); const auto &cfg = get_config(node);
if (match_only_configured_ops() && cfg->is_fallback_config) { if (match_only_configured_ops() && cfg->is_fallback_config) {
return false; return false;

View File

@ -30,7 +30,7 @@ void OPCache::update_ops_cache(const std::shared_ptr<ngraph::Node> &op,
try { try {
const std::shared_ptr<ngraph::Node> op_clone = clone_fn(op, meta); const std::shared_ptr<ngraph::Node> op_clone = clone_fn(op, meta);
op_clone->set_friendly_name(op_clone->get_friendly_name() + "_cached"); op_clone->set_friendly_name(op_clone->get_friendly_name() + "_cached");
m_ops_cache.emplace_back(std::make_pair(op_clone, meta)); m_ops_cache.insert({op_clone, meta});
} catch (std::exception &e) { } catch (std::exception &e) {
std::cout << e.what() << std::endl; std::cout << e.what() << std::endl;
} }

View File

@ -1,82 +0,0 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "utils/dynamism_resolver.hpp"
namespace SubgraphsDumper {
bool has_dynamic_output(std::shared_ptr<ngraph::Node> n) {
for (size_t i = 0; i < n->get_output_size(); i++) {
if (n->get_output_partial_shape(i).is_dynamic()) {
return true;
}
}
return false;
}
void resolve_dynamic_shapes(const std::shared_ptr<ngraph::Function>& f) {
const auto & f_ops = f->get_ordered_ops();
if (std::all_of(f_ops.begin(), f_ops.end(),
[](std::shared_ptr<ngraph::Node> results) {
return !results->is_dynamic() && !has_dynamic_output(results); })) {
return;
}
auto f_clone = ngraph::clone_function(*f);
const auto & f_clone_ops = f_clone->get_ordered_ops();
NGRAPH_CHECK(f_ops.size() == f_clone_ops.size(), "Unexpected get_ordered_ops method behaviour");
for (size_t id = 0; id < f_ops.size(); ++id) {
auto & op = f_ops[id];
auto & clone_op = f_clone_ops[id];
if (auto op_subgraph = std::dynamic_pointer_cast<ngraph::op::util::SubGraphOp>(op)) {
resolve_dynamic_shapes(op_subgraph->get_function());
}
op->validate_and_infer_types();
clone_op->validate_and_infer_types();
// dynamic_to_static function converts dynamic dimensions to static using
// upperbound (get_max_length) dimension value.
auto dynamic_to_static = [&op](const ngraph::PartialShape & shape) -> ngraph::PartialShape {
if (shape.is_static() || shape.rank().is_dynamic()) {
return shape;
}
std::vector<ngraph::Dimension> out_shape;
std::transform(std::begin(shape), std::end(shape),
std::back_inserter(out_shape),
[](const ngraph::Dimension& d) -> ngraph::Dimension {
return d.get_max_length();
});
NGRAPH_CHECK(ngraph::PartialShape(out_shape).is_static(),
"Dynamic dimension cannot be resolved in ", op);
return out_shape;
};
ngraph::OutputVector replacements(clone_op->get_output_size());
if (!clone_op->constant_fold(replacements, clone_op->input_values())) {
for (size_t output_id = 0; output_id < clone_op->get_output_size(); ++output_id) {
clone_op->set_output_type(output_id, clone_op->output(output_id).get_element_type(),
dynamic_to_static(clone_op->output(output_id).get_partial_shape()));
op->set_output_type(output_id, clone_op->output(output_id).get_element_type(),
clone_op->output(output_id).get_partial_shape());
}
} else {
for (size_t output_id = 0; output_id < clone_op->get_output_size(); ++output_id) {
op->set_output_type(output_id, replacements[output_id].get_element_type(),
replacements[output_id].get_partial_shape());
}
for (size_t i = 0; i < replacements.size(); ++i) {
auto node_output = clone_op->output(i);
auto replacement = replacements.at(i);
if (replacement.get_node_shared_ptr() && (node_output != replacement)) {
node_output.replace(replacement);
}
}
}
}
}
} // namespace SubgraphsDumper

View File

@ -11,6 +11,7 @@
#include "ngraph_functions/builders.hpp" #include "ngraph_functions/builders.hpp"
#include "common_test_utils/file_utils.hpp" #include "common_test_utils/file_utils.hpp"
#include "common_test_utils/data_utils.hpp"
#include "common_test_utils/common_utils.hpp" #include "common_test_utils/common_utils.hpp"
#include "functional_test_utils/layer_test_utils/op_info.hpp" #include "functional_test_utils/layer_test_utils/op_info.hpp"
#include "functional_test_utils/skip_tests_config.hpp" #include "functional_test_utils/skip_tests_config.hpp"
@ -140,18 +141,25 @@ void ReadIRTest::SetUp() {
} }
} }
} }
std::vector<ov::Shape> staticShapes; std::vector<InputShape> inputShapes;
for (const auto param : function->get_parameters()) { for (const auto& param : function -> get_parameters()) {
if (param->get_partial_shape().is_static()) { if (param->get_partial_shape().is_static()) {
staticShapes.push_back(param->get_shape()); inputShapes.push_back(InputShape{{}, {param->get_shape()}});
} else { } else {
staticShapes.push_back(param->get_partial_shape().get_max_shape()); ov::Shape midShape;
for (const auto s : param->get_partial_shape()) {
int dimValue = s.get_length();
if (s.is_dynamic()) {
CommonTestUtils::fill_data_random(&dimValue, 1, s.get_max_length() - s.get_min_length(), s.get_min_length(), 1);
}
midShape.push_back(dimValue);
}
inputShapes.push_back(InputShape{param->get_partial_shape(), { param->get_partial_shape().get_min_shape(),
param->get_partial_shape().get_max_shape(),
midShape }});
} }
} }
for (const auto& param : function->get_parameters()) { init_input_shapes(inputShapes);
inputDynamicShapes.push_back(param->get_partial_shape());
}
targetStaticShapes.push_back(staticShapes);
} }
} // namespace subgraph } // namespace subgraph

View File

@ -16,6 +16,8 @@ TEST_P(InferRequestPerfCountersTest, CheckOperationInPerfMap) {
std::map<std::string, InferenceEngine::InferenceEngineProfileInfo> perfMap; std::map<std::string, InferenceEngine::InferenceEngineProfileInfo> perfMap;
ASSERT_NO_THROW(perfMap = req.GetPerformanceCounts()); ASSERT_NO_THROW(perfMap = req.GetPerformanceCounts());
for (const auto& op : function->get_ops()) { for (const auto& op : function->get_ops()) {
if (!strcmp(op->get_type_info().name, "Constant"))
continue;
auto it = perfMap.begin(); auto it = perfMap.begin();
while (true) { while (true) {
if (it->first.find(op->get_friendly_name() + "_") != std::string::npos || it->first == op->get_friendly_name()) { if (it->first.find(op->get_friendly_name() + "_") != std::string::npos || it->first == op->get_friendly_name()) {

View File

@ -0,0 +1,140 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior/ov_executable_network/properties.hpp"
#include "openvino/runtime/properties.hpp"
#include "ie_system_conf.h"
using namespace ov::test::behavior;
namespace {
const std::vector<ov::AnyMap> inproperties = {
{ov::num_streams(-100)},
};
const std::vector<ov::AnyMap> hetero_inproperties = {
{ov::num_streams(-100)},
};
const std::vector<ov::AnyMap> multi_inproperties = {
{ov::num_streams(-100)},
};
const std::vector<ov::AnyMap> auto_inproperties = {
{ov::num_streams(-100)},
};
const std::vector<ov::AnyMap> auto_batch_inproperties = {
{ov::num_streams(-100)},
};
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVCompiledModelPropertiesIncorrectTests,
::testing::Combine(
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::ValuesIn(inproperties)),
OVCompiledModelPropertiesIncorrectTests::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, OVCompiledModelPropertiesIncorrectTests,
::testing::Combine(
::testing::Values(CommonTestUtils::DEVICE_HETERO),
::testing::ValuesIn(hetero_inproperties)),
OVCompiledModelPropertiesIncorrectTests::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVCompiledModelPropertiesIncorrectTests,
::testing::Combine(
::testing::Values(CommonTestUtils::DEVICE_MULTI),
::testing::ValuesIn(multi_inproperties)),
OVCompiledModelPropertiesIncorrectTests::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVCompiledModelPropertiesIncorrectTests,
::testing::Combine(
::testing::Values(CommonTestUtils::DEVICE_AUTO),
::testing::ValuesIn(auto_inproperties)),
OVCompiledModelPropertiesIncorrectTests::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, OVCompiledModelPropertiesIncorrectTests,
::testing::Combine(
::testing::Values(CommonTestUtils::DEVICE_BATCH),
::testing::ValuesIn(auto_batch_inproperties)),
OVCompiledModelPropertiesIncorrectTests::getTestCaseName);
#if (defined(__APPLE__) || defined(_WIN32))
auto default_affinity = [] {
auto numaNodes = InferenceEngine::getAvailableNUMANodes();
if (numaNodes.size() > 1) {
return ov::Affinity::NUMA;
} else {
return ov::Affinity::NONE;
}
}();
#else
auto default_affinity = ov::Affinity::CORE;
#endif
const std::vector<ov::AnyMap> default_properties = {
{ov::affinity(default_affinity)},
};
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVCompiledModelPropertiesDefaultTests,
::testing::Combine(
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::ValuesIn(default_properties)),
OVCompiledModelPropertiesDefaultTests::getTestCaseName);
const std::vector<ov::AnyMap> properties = {
{ov::num_streams(ov::NumStreams::AUTO)},
{ov::num_streams(ov::NumStreams::NUMA)},
{ov::num_streams(0), ov::inference_num_threads(1)},
{ov::num_streams(1), ov::inference_num_threads(1)},
{{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}
};
const std::vector<ov::AnyMap> hetero_properties = {
{ov::device::priorities(CommonTestUtils::DEVICE_CPU), ov::num_streams(ov::NumStreams::AUTO)},
{ov::device::priorities(CommonTestUtils::DEVICE_CPU),
{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}},
};
const std::vector<ov::AnyMap> multi_properties = {
{ov::device::priorities(CommonTestUtils::DEVICE_CPU), ov::num_streams(ov::NumStreams::AUTO)},
{ov::device::priorities(CommonTestUtils::DEVICE_CPU),
{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}},
};
const std::vector<ov::AnyMap> auto_batch_properties = {
{{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG) , CommonTestUtils::DEVICE_CPU}, ov::num_streams(ov::NumStreams::AUTO)},
{{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG) , CommonTestUtils::DEVICE_CPU},
{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}},
{{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG) , CommonTestUtils::DEVICE_CPU},
{CONFIG_KEY(AUTO_BATCH_TIMEOUT) , "1"}, ov::num_streams(ov::NumStreams::AUTO)},
};
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVCompiledModelPropertiesTests,
::testing::Combine(
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::ValuesIn(properties)),
OVCompiledModelPropertiesTests::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, OVCompiledModelPropertiesTests,
::testing::Combine(
::testing::Values(CommonTestUtils::DEVICE_HETERO),
::testing::ValuesIn(hetero_properties)),
OVCompiledModelPropertiesTests::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVCompiledModelPropertiesTests,
::testing::Combine(
::testing::Values(CommonTestUtils::DEVICE_MULTI),
::testing::ValuesIn(multi_properties)),
OVCompiledModelPropertiesTests::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, OVCompiledModelPropertiesTests,
::testing::Combine(
::testing::Values(CommonTestUtils::DEVICE_BATCH),
::testing::ValuesIn(auto_batch_properties)),
OVCompiledModelPropertiesTests::getTestCaseName);
} // namespace

View File

@ -15,6 +15,8 @@ TEST_P(OVInferRequestPerfCountersTest, CheckOperationInProfilingInfo) {
ASSERT_NO_THROW(profiling_info = req.get_profiling_info()); ASSERT_NO_THROW(profiling_info = req.get_profiling_info());
for (const auto& op : function->get_ops()) { for (const auto& op : function->get_ops()) {
if (!strcmp(op->get_type_info().name, "Constant"))
continue;
auto op_is_in_profiling_info = std::any_of(std::begin(profiling_info), std::end(profiling_info), auto op_is_in_profiling_info = std::any_of(std::begin(profiling_info), std::end(profiling_info),
[&] (const ov::ProfilingInfo& info) { [&] (const ov::ProfilingInfo& info) {
if (info.node_name.find(op->get_friendly_name() + "_") != std::string::npos || info.node_name == op->get_friendly_name()) { if (info.node_name.find(op->get_friendly_name() + "_") != std::string::npos || info.node_name == op->get_friendly_name()) {

View File

@ -126,12 +126,12 @@ TEST(OVClassBasicTest, smoke_SetConfigStreamsNum) {
setGetProperty(value, num_streams); setGetProperty(value, num_streams);
ASSERT_EQ(num_streams, value); ASSERT_EQ(num_streams, value);
num_streams = ov::NumStreams::NUMA; num_streams = ov::streams::NUMA;
setGetProperty(value, num_streams); setGetProperty(value, num_streams);
ASSERT_GT(value, 0); // value has been configured automatically ASSERT_GT(value, 0); // value has been configured automatically
num_streams = ov::NumStreams::AUTO; num_streams = ov::streams::AUTO;
setGetProperty(value, num_streams); setGetProperty(value, num_streams);
ASSERT_GT(value, 0); // value has been configured automatically ASSERT_GT(value, 0); // value has been configured automatically

View File

@ -68,7 +68,6 @@ TEST_P(OVCompiledModelPropertiesTests, canCompileModelWithPropertiesAndCheckGetP
OV_ASSERT_NO_THROW(property = compiled_model.get_property(property_item.first)); OV_ASSERT_NO_THROW(property = compiled_model.get_property(property_item.first));
ASSERT_FALSE(property.empty()); ASSERT_FALSE(property.empty());
std::cout << property_item.first << ":" << property.as<std::string>() << std::endl; std::cout << property_item.first << ":" << property.as<std::string>() << std::endl;
ASSERT_EQ(property, property_item.second);
} }
} }
} }

View File

@ -28,12 +28,12 @@ public:
bool inPlace; bool inPlace;
std::tie(srcDims, inPlace) = obj.param; std::tie(srcDims, inPlace) = obj.param;
std::ostringstream result; std::ostringstream result;
result << "IS:("; result << "IS=(";
for (const auto s : srcDims) for (const auto s : srcDims)
result << s << "."; result << s << ".";
result.seekp(-1, result.cur); result.seekp(-1, result.cur);
result << ")"; result << ")";
result << "_InPlace:" << inPlace; result << "_InPlace=" << inPlace;
return result.str(); return result.str();
} }

View File

@ -22,6 +22,7 @@ log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=s
test_data = get_tests(cmd_params={'i': [os.path.join('ark', 'dev93_10.ark')], test_data = get_tests(cmd_params={'i': [os.path.join('ark', 'dev93_10.ark')],
'm': [os.path.join('wsj', 'FP32', 'wsj_dnn5b.xml')], 'm': [os.path.join('wsj', 'FP32', 'wsj_dnn5b.xml')],
'layout': ["[NC]"],
'bs': [1, 2], 'bs': [1, 2],
'o': ['res_output.ark'], 'o': ['res_output.ark'],
'r': [os.path.join('ark', 'dev93_scores_10.ark')], 'r': [os.path.join('ark', 'dev93_scores_10.ark')],

@ -1 +1 @@
Subproject commit 7032b09d4b8bbd4ae0e616b689a489c04e8954e3 Subproject commit 5903bf21ac77f419c68e6defc851a0608bb44e2a

View File

@ -67,12 +67,19 @@ class IREngine(object):
self.graph.name = xml_root.attrib['name'] if xml_root.attrib.get('name') is not None else None self.graph.name = xml_root.attrib['name'] if xml_root.attrib.get('name') is not None else None
self.graph.inputs_order = []
self.graph.outputs_order = []
# Parse XML # Parse XML
for child in xml_root: for child in xml_root:
if child.tag == 'layers': if child.tag == 'layers':
for layer in child: for layer in child:
layer_id, layer_attrs = self.__load_layer(layer) layer_id, layer_attrs = self.__load_layer(layer)
xml_layers.update({layer_id: layer_attrs}) xml_layers.update({layer_id: layer_attrs})
if layer_attrs['type'] == 'Parameter':
self.graph.inputs_order.append(layer_attrs['name'])
if layer_attrs['type'] == 'Result':
self.graph.outputs_order.append(layer_attrs['name'])
elif child.tag == 'edges': elif child.tag == 'edges':
for edge in child: for edge in child:
xml_edges.append(Edge(edge.attrib['from-layer'], int(edge.attrib['from-port']), xml_edges.append(Edge(edge.attrib['from-layer'], int(edge.attrib['from-port']),

View File

@ -317,6 +317,8 @@ def copy_graph_with_ops(graph: Graph) -> Graph:
new_graph = Graph() new_graph = Graph()
new_graph.stage = 'back' new_graph.stage = 'back'
new_graph.graph = graph.graph new_graph.graph = graph.graph
new_graph.inputs_order = graph.inputs_order
new_graph.outputs_order = graph.outputs_order
node_connections = dict() node_connections = dict()
mapping_of_old_idx_into_new = dict() mapping_of_old_idx_into_new = dict()

View File

@ -140,7 +140,6 @@ def build_graph_for_node(model, input_name, input_shape, node, remove_bias=False
nodes.append((result_name, 'Result', {})) nodes.append((result_name, 'Result', {}))
edges.append((node.name, result_name, {'out': 0, 'in': 0})) edges.append((node.name, result_name, {'out': 0, 'in': 0}))
graph = build_graph(*make_copy_graph_attrs(model, input_name, input_shape), nodes, edges) graph = build_graph(*make_copy_graph_attrs(model, input_name, input_shape), nodes, edges)
graph.ir_v10 = True
# Add the neccessary attribute to the new graph # Add the neccessary attribute to the new graph
src_node = get_node_by_name(graph, node.name) src_node = get_node_by_name(graph, node.name)

View File

@ -47,11 +47,17 @@ def load_graph(model_config, target_device='ANY'):
raise RuntimeError('Input model bin should link to an existing file. Please, provide a correct path.') raise RuntimeError('Input model bin should link to an existing file. Please, provide a correct path.')
graph_from_ir, meta_data = stdout_redirect(restore_graph_from_ir, xml_path, bin_path) graph_from_ir, meta_data = stdout_redirect(restore_graph_from_ir, xml_path, bin_path)
if graph_from_ir.graph['ir_version'] == 10:
raise AssertionError(
'POT does not support version 10 of IR.'
'Please convert the model with the newer version of OpenVINO '
'or use the POT from OpenVINO 2021.4.2 to work with version 10 of IR.')
orig_graph_from_ir, meta_data = stdout_redirect(restore_graph_from_ir, model_config.model, model_config.weights) orig_graph_from_ir, meta_data = stdout_redirect(restore_graph_from_ir, model_config.model, model_config.weights)
meta_data['quantization_parameters'] = model_config.quantization_info meta_data['quantization_parameters'] = model_config.quantization_info
graph_from_ir.meta_data = meta_data graph_from_ir.meta_data = meta_data
graph_from_ir.ir_v10 = True
graph_from_ir.graph['cmd_params'] = orig_graph_from_ir.graph['cmd_params'] graph_from_ir.graph['cmd_params'] = orig_graph_from_ir.graph['cmd_params']
remove_converts(graph_from_ir) remove_converts(graph_from_ir)
model_preprocessing(graph_from_ir) model_preprocessing(graph_from_ir)

View File

@ -67,6 +67,12 @@ class CompressedModel:
ge.add_fullname_for_nodes(model['model']) ge.add_fullname_for_nodes(model['model'])
def _from_graph(self, graph): def _from_graph(self, graph):
if graph.graph['ir_version'] == 10:
raise AssertionError(
'POT does not support version 10 of IR.'
'Please convert the model with the newer version of OpenVINO '
'or use the POT from OpenVINO 2021.4.2 to work with version 10 of IR.')
ge.add_fullname_for_nodes(graph) ge.add_fullname_for_nodes(graph)
self._models.append({'model': graph}) self._models.append({'model': graph})
self._is_cascade = False self._is_cascade = False

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:8e8c8e6ba81cc8fc66217e1408677175b0a094c34c071835c06412495d536256
size 2130036

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1 version https://git-lfs.github.com/spec/v1
oid sha256:7ed7eef3415ccb1cd1b8606bdb8348e124979bb78b02e50badda92aa2cd3dc4d oid sha256:aa2be3155a781cdd99871f8c677f429d37df4921385a2bd7568116cec0634291
size 222 size 152

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:43d82f63bfea7d3ae9d641169b9693596199d78171e94b918e09497a4f08a012
size 34

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:70bd9834efa16e7ffa4ab52594b57bb4f813e4cf6b5f01d62117ebe6d8f22711
size 2131972

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:5d678641136d8356fca47d4db4f39cb98e59426a9e1113754811d9e12e47463a
size 17387

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:0af1173b44db9f52b9878032b87679cc16521ec0c21fc413228e0854da4efed0
size 526864

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1 version https://git-lfs.github.com/spec/v1
oid sha256:de67711db3e1fb404a8b61be29954ab5b336d76b8cf7d3c5c98f7b9faab7c6e4 oid sha256:ffd87d843342dfcf87dc75ccf0d39acd929906e042a5960e155ff8208a3359c0
size 283 size 224

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:0a24413580157bb51a374150ff5597a362663794f394a925d906492a6cc61f4d
size 533495

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:eafca25515ecb19d073a96fbaa234c5c9004bf9646419bb1d3f9c0faa25c19c2
size 12739

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:97d00430e1ea79257307daf825cfb01a68631a6cd4af2fdb7126ef526d40a660
size 22360

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1 version https://git-lfs.github.com/spec/v1
oid sha256:54e12e3df889dff95bfbe949a8e98facdfd5464bb746686ecc7b48d0da78e154 oid sha256:91b323d1ac69cbf8ee95f26aea9dcb06e6ea31144d62cdf317f50d8c9242d112
size 202 size 146

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:43d82f63bfea7d3ae9d641169b9693596199d78171e94b918e09497a4f08a012
size 34

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:3ef0c63af492bd90c0ff94bacf8f3e0098a29fd10ae3692c1624e14d3c2af671
size 50014

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:f3dcb45d8f69aa643c3e8038b92fdee885418d17d052470b9ef921039a8c3822
size 10919

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:97d00430e1ea79257307daf825cfb01a68631a6cd4af2fdb7126ef526d40a660
size 22360

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1 version https://git-lfs.github.com/spec/v1
oid sha256:58d6366c3dbb3c1a0f4f9c5ce8c4569260a0d65e355dc0d16b8aec3432a72d7a oid sha256:8ca2fc29a319b713835d770b26946193070f1ab9184a16c6305a924fd683c3d2
size 217 size 157

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:43d82f63bfea7d3ae9d641169b9693596199d78171e94b918e09497a4f08a012
size 34

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:d1baa52871e25d66ef96d7f8c67160d8816e682c36158f6f2215ef1a8b473351
size 50044

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:b816b8316b12511fa5a1b46e637179c86677b9b2b8608b671e6fa7c10c68484e
size 11207

Some files were not shown because too many files have changed in this diff Show More