Update benchmark_app to pass precision via command line (#4318)

* Update benchmark_app to pass precision via command line

* Update vpu_perfcheck

* Update python benchmark_app to support setting precision from cmd

* Review comments

* Address more review comments

* Fixes after rebase
This commit is contained in:
Artemy Skrebkov 2021-03-26 15:07:59 +03:00 committed by GitHub
parent 63d7516f1f
commit 76cf1b2b65
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
44 changed files with 559 additions and 355 deletions

View File

@ -136,7 +136,7 @@ endif()
# Developer package
#
openvino_developer_export_targets(COMPONENT openvino_common TARGETS format_reader)
openvino_developer_export_targets(COMPONENT openvino_common TARGETS format_reader gflags ie_samples_utils)
openvino_developer_export_targets(COMPONENT ngraph TARGETS ${NGRAPH_LIBRARIES})
# for Template plugin

View File

@ -143,6 +143,10 @@ else()
find_package(InferenceEngine 2.1 REQUIRED)
endif()
if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/common/utils")
add_subdirectory(common/utils)
endif()
# format reader must be added after find_package(InferenceEngine) to get
# exactly the same OpenCV_DIR path which was used for the InferenceEngine build
if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/common/format_reader")

View File

@ -8,5 +8,6 @@ file (GLOB HDR ${CMAKE_CURRENT_SOURCE_DIR}/*.hpp)
ie_add_sample(NAME benchmark_app
SOURCES ${SRC}
HEADERS ${HDR}
DEPENDENCIES format_reader
DEPENDENCIES format_reader ie_samples_utils
OPENCV_DEPENDENCIES core)

View File

@ -105,6 +105,9 @@ Options:
-nthreads "<integer>" Optional. Number of threads to use for inference on the CPU (including HETERO and MULTI cases).
-enforcebf16 Optional. Enforcing of floating point operations execution in bfloat16 precision on platforms with native bfloat16 support. By default, this key sets "true" on platforms with native bfloat16 support and "false" for other platforms. Use "-enforcebf16=false" to disable this feature.
-pin "YES"/"NO"/"NUMA" Optional. Enable threads->cores ("YES", default), threads->(NUMA)nodes ("NUMA") or completely disable ("NO") CPU threads pinning for CPU-involved inference.
-ip "U8"/"FP16"/"FP32" Optional. Specifies precision for all input layers of the network.
-op "U8"/"FP16"/"FP32" Optional. Specifies precision for all output layers of the network.
-iop Optional. Specifies precision for input and output layers by name. Example: -iop "input:FP16, output:FP16". Notice that quotes are required. Overwrites precision from ip and op options for specified layers.
Statistics dumping options:

View File

@ -108,6 +108,18 @@ static const char layout_message[] = "Optional. Prompts how network layouts shou
// @brief message for quantization bits
static const char gna_qb_message[] = "Optional. Weight bits for quantization: 8 or 16 (default)";
static constexpr char inputs_precision_message[] =
"Optional. Specifies precision for all input layers of the network.";
static constexpr char outputs_precision_message[] =
"Optional. Specifies precision for all output layers of the network.";
static constexpr char iop_message[] =
"Optional. Specifies precision for input and output layers by name.\n"
" Example: -iop \"input:FP16, output:FP16\".\n"
" Notice that quotes are required.\n"
" Overwrites precision from ip and op options for specified layers.";
/// @brief Define flag for showing help message <br>
DEFINE_bool(h, false, help_message);
@ -198,6 +210,18 @@ DEFINE_string(layout, "", layout_message);
/// @brief Define flag for quantization bits (default 16)
DEFINE_int32(qb, 16, gna_qb_message);
/// @brief Specify precision for all input layers of the network
DEFINE_string(ip, "", inputs_precision_message);
/// @brief Specify precision for all ouput layers of the network
DEFINE_string(op, "", outputs_precision_message);
/// @brief Specify precision for input and output layers by name.\n"
/// Example: -iop \"input:FP16, output:FP16\".\n"
/// Notice that quotes are required.\n"
/// Overwrites layout from ip and op options for specified layers.";
DEFINE_string(iop, "", iop_message);
/**
* @brief This function show a help message
*/
@ -237,4 +261,7 @@ static void showUsage() {
std::cout << " -load_config " << load_config_message << std::endl;
#endif
std::cout << " -qb " << gna_qb_message << std::endl;
std::cout << " -ip <value> " << inputs_precision_message << std::endl;
std::cout << " -op <value> " << outputs_precision_message << std::endl;
std::cout << " -iop \"<value>\" " << iop_message << std::endl;
}

View File

@ -67,6 +67,14 @@ bool ParseAndCheckCommandLine(int argc, char *argv[]) {
throw std::logic_error("only " + std::string(detailedCntReport) + " report type is supported for MULTI device");
}
bool isNetworkCompiled = fileExt(FLAGS_m) == "blob";
bool isPrecisionSet = !(FLAGS_ip.empty() && FLAGS_op.empty() && FLAGS_iop.empty());
if (isNetworkCompiled && isPrecisionSet) {
std::string err = std::string("Cannot set precision for a compiled network. ") +
std::string("Please re-compile your network with required precision using compile_tool");
throw std::logic_error(err);
}
return true;
}
@ -380,6 +388,10 @@ int main(int argc, char *argv[]) {
item.second->setPrecision(app_inputs_info.at(item.first).precision);
}
}
processPrecision(cnnNetwork, FLAGS_ip, FLAGS_op, FLAGS_iop);
printInputAndOutputsInfo(cnnNetwork);
// ----------------- 7. Loading the model to the device --------------------------------------------------------
next_step();
startTime = Time::now();

View File

@ -5,4 +5,4 @@
ie_add_sample(NAME classification_sample_async
SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/main.cpp"
HEADERS classification_sample_async.h
DEPENDENCIES format_reader)
DEPENDENCIES format_reader ie_samples_utils)

View File

@ -25,7 +25,7 @@ find_package(OpenCV COMPONENTS core imgproc imgcodecs QUIET)
if(NOT OpenCV_FOUND)
message(WARNING "OPENCV is disabled or not found, ${TARGET_NAME} will be built without OPENCV support")
else()
target_link_libraries(${TARGET_NAME} PRIVATE ${OpenCV_LIBRARIES})
target_link_libraries(${TARGET_NAME} PRIVATE ${OpenCV_LIBRARIES} ie_samples_utils)
if(UNIX AND NOT APPLE)
# Workaround issue that rpath-link is missing for PRIVATE dependencies
# Fixed in cmake 3.16.0 https://gitlab.kitware.com/cmake/cmake/issues/19556

View File

@ -1,87 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
/**
* @brief a header file with common samples functionality
* @file args_helper.hpp
*/
#pragma once
#include <string>
#include <vector>
#include <gflags/gflags.h>
#include <iostream>
#include <sys/stat.h>
#include <samples/slog.hpp>
#ifdef _WIN32
#include <os/windows/w_dirent.h>
#else
#include <dirent.h>
#endif
/**
* @brief This function checks input args and existence of specified files in a given folder
* @param arg path to a file to be checked for existence
* @return files updated vector of verified input files
*/
void readInputFilesArguments(std::vector<std::string> &files, const std::string& arg) {
struct stat sb;
if (stat(arg.c_str(), &sb) != 0) {
slog::warn << "File " << arg << " cannot be opened!" << slog::endl;
return;
}
if (S_ISDIR(sb.st_mode)) {
DIR *dp;
dp = opendir(arg.c_str());
if (dp == nullptr) {
slog::warn << "Directory " << arg << " cannot be opened!" << slog::endl;
return;
}
struct dirent *ep;
while (nullptr != (ep = readdir(dp))) {
std::string fileName = ep->d_name;
if (fileName == "." || fileName == "..") continue;
files.push_back(arg + "/" + ep->d_name);
}
closedir(dp);
} else {
files.push_back(arg);
}
if (files.size() < 20) {
slog::info << "Files were added: " << files.size() << slog::endl;
for (std::string filePath : files) {
slog::info << " " << filePath << slog::endl;
}
} else {
slog::info << "Files were added: " << files.size() << ". Too many to display each of them." << slog::endl;
}
}
/**
* @brief This function find -i/--images key in input args
* It's necessary to process multiple values for single key
* @return files updated vector of verified input files
*/
void parseInputFilesArguments(std::vector<std::string> &files) {
std::vector<std::string> args = gflags::GetArgvs();
bool readArguments = false;
for (size_t i = 0; i < args.size(); i++) {
if (args.at(i) == "-i" || args.at(i) == "--images") {
readArguments = true;
continue;
}
if (!readArguments) {
continue;
}
if (args.at(i).c_str()[0] == '-') {
break;
}
readInputFilesArguments(files, args.at(i));
}
}

View File

@ -0,0 +1,19 @@
# Copyright (C) Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
set(TARGET_NAME "ie_samples_utils")
file(GLOB_RECURSE SOURCES "*.cpp" "*.hpp")
source_group(TREE ${CMAKE_CURRENT_SOURCE_DIR} FILES ${SOURCES})
add_library(${TARGET_NAME} STATIC ${SOURCES})
set_target_properties(${TARGET_NAME} PROPERTIES FOLDER "src")
target_include_directories(${TARGET_NAME}
PUBLIC
"${CMAKE_CURRENT_SOURCE_DIR}/include")
target_link_libraries(${TARGET_NAME}
PUBLIC
IE::inference_engine
gflags)

View File

@ -0,0 +1,36 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
/**
* @brief a header file with common samples functionality
* @file args_helper.hpp
*/
#pragma once
#include <string>
#include <vector>
#include <inference_engine.hpp>
/**
* @brief This function checks input args and existence of specified files in a given folder
* @param arg path to a file to be checked for existence
* @return files updated vector of verified input files
*/
void readInputFilesArguments(std::vector<std::string> &files, const std::string& arg);
/**
* @brief This function find -i/--images key in input args
* It's necessary to process multiple values for single key
* @return files updated vector of verified input files
*/
void parseInputFilesArguments(std::vector<std::string> &files);
void processPrecision(InferenceEngine::CNNNetwork& network, const std::string &ip, const std::string &op, const std::string &iop);
void processLayout(InferenceEngine::CNNNetwork& network, const std::string& il, const std::string& ol, const std::string& iol);
void printInputAndOutputsInfo(const InferenceEngine::CNNNetwork& network);

View File

@ -6,6 +6,8 @@
* @brief a header file with output classification results
* @file classification_results.hpp
*/
#pragma once
#include <string>
#include <vector>
#include <iostream>

View File

@ -1129,3 +1129,13 @@ inline void showAvailableDevices() {
}
std::cout << std::endl;
}
/**
* @brief Parse text config file. The file must have the following format (with space a delimeter):
* CONFIG_NAME1 CONFIG_VALUE1
* CONFIG_NAME2 CONFIG_VALUE2
*
* @param configName - filename for a file with config options
* @param comment - lines starting with symbol `comment` are skipped
*/
std::map<std::string, std::string> parseConfig(const std::string &configName, char comment = '#');

View File

@ -0,0 +1,310 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "samples/args_helper.hpp"
#include <gflags/gflags.h>
#include <iostream>
#include <sys/stat.h>
#include <samples/slog.hpp>
#ifdef _WIN32
#include <samples/os/windows/w_dirent.h>
#else
#include <dirent.h>
#endif
void readInputFilesArguments(std::vector<std::string> &files, const std::string& arg) {
struct stat sb;
if (stat(arg.c_str(), &sb) != 0) {
slog::warn << "File " << arg << " cannot be opened!" << slog::endl;
return;
}
if (S_ISDIR(sb.st_mode)) {
struct CloseDir {
void operator()(DIR* d) const noexcept {
if (d) {
closedir(d);
}
}
};
using Dir = std::unique_ptr<DIR, CloseDir>;
Dir dp(opendir(arg.c_str()));
if (dp == nullptr) {
slog::warn << "Directory " << arg << " cannot be opened!" << slog::endl;
return;
}
struct dirent *ep;
while (nullptr != (ep = readdir(dp.get()))) {
std::string fileName = ep->d_name;
if (fileName == "." || fileName == "..") continue;
files.push_back(arg + "/" + ep->d_name);
}
} else {
files.push_back(arg);
}
if (files.size() < 20) {
slog::info << "Files were added: " << files.size() << slog::endl;
for (const auto& filePath : files) {
slog::info << " " << filePath << slog::endl;
}
} else {
slog::info << "Files were added: " << files.size() << ". Too many to display each of them." << slog::endl;
}
}
void parseInputFilesArguments(std::vector<std::string> &files) {
std::vector<std::string> args = gflags::GetArgvs();
const auto is_image_arg = [](const std::string& s){ return s == "-i" || s == "--images";};
const auto is_arg = [](const std::string& s){return s.front() == '-';};
const auto img_start = std::find_if(begin(args), end(args), is_image_arg);
if (img_start == end(args)) {
return;
}
const auto img_begin = std::next(img_start);
const auto img_end = std::find_if(img_begin, end(args), is_arg);
for (auto img = img_begin; img != img_end; ++img) {
readInputFilesArguments(files, *img);
}
}
namespace {
std::vector<std::string> splitStringList(const std::string& str, char delim) {
if (str.empty())
return {};
std::istringstream istr(str);
std::vector<std::string> result;
std::string elem;
while (std::getline(istr, elem, delim)) {
if (elem.empty()) {
continue;
}
result.emplace_back(std::move(elem));
}
return result;
}
std::map<std::string, std::string> parseArgMap(std::string argMap) {
argMap.erase(std::remove_if(argMap.begin(), argMap.end(), ::isspace), argMap.end());
const auto pairs = splitStringList(argMap, ',');
std::map<std::string, std::string> parsedMap;
for (auto&& pair : pairs) {
const auto keyValue = splitStringList(pair, ':');
if (keyValue.size() != 2) {
throw std::invalid_argument("Invalid key/value pair " + pair + ". Expected <layer_name>:<value>");
}
parsedMap[keyValue[0]] = keyValue[1];
}
return parsedMap;
}
using supported_precisions_t = std::unordered_map<std::string, InferenceEngine::Precision>;
InferenceEngine::Precision getPrecision(std::string value,
const supported_precisions_t& supported_precisions) {
std::transform(value.begin(), value.end(), value.begin(), ::toupper);
const auto precision = supported_precisions.find(value);
if (precision == supported_precisions.end()) {
throw std::logic_error("\"" + value + "\"" + " is not a valid precision");
}
return precision->second;
}
InferenceEngine::Precision getPrecision(const std::string& value) {
static const supported_precisions_t supported_precisions = {
{ "FP32", InferenceEngine::Precision::FP32 },
{ "FP16", InferenceEngine::Precision::FP16 },
{ "BF16", InferenceEngine::Precision::BF16 },
{ "U64", InferenceEngine::Precision::U64 },
{ "I64", InferenceEngine::Precision::I64 },
{ "U32", InferenceEngine::Precision::U32 },
{ "I32", InferenceEngine::Precision::I32 },
{ "U16", InferenceEngine::Precision::U16 },
{ "I16", InferenceEngine::Precision::I16 },
{ "U8", InferenceEngine::Precision::U8 },
{ "I8", InferenceEngine::Precision::I8 },
{ "BOOL", InferenceEngine::Precision::BOOL },
};
return getPrecision(value, supported_precisions);
}
void setPrecisions(const InferenceEngine::CNNNetwork& network, const std::string &iop) {
const auto user_precisions_map = parseArgMap(iop);
auto inputs = network.getInputsInfo();
auto outputs = network.getOutputsInfo();
for (auto&& item : user_precisions_map) {
const auto& layer_name = item.first;
const auto& user_precision = item.second;
const auto input = inputs.find(layer_name);
const auto output = outputs.find(layer_name);
if (input != inputs.end()) {
input->second->setPrecision(getPrecision(user_precision));
} else if (output != outputs.end()) {
output->second->setPrecision(getPrecision(user_precision));
} else {
throw std::logic_error(layer_name + " is not an input neither output");
}
}
}
} // namespace
void processPrecision(InferenceEngine::CNNNetwork& network, const std::string &ip, const std::string &op,
const std::string &iop) {
if (!ip.empty()) {
const auto user_precision = getPrecision(ip);
for (auto&& layer : network.getInputsInfo()) {
layer.second->setPrecision(user_precision);
}
}
if (!op.empty()) {
auto user_precision = getPrecision(op);
for (auto&& layer : network.getOutputsInfo()) {
layer.second->setPrecision(user_precision);
}
}
if (!iop.empty()) {
setPrecisions(network, iop);
}
}
namespace {
using supported_layouts_t = std::unordered_map<std::string, InferenceEngine::Layout>;
using matchLayoutToDims_t = std::unordered_map<size_t, size_t>;
InferenceEngine::Layout getLayout(std::string value,
const supported_layouts_t& supported_layouts) {
std::transform(value.begin(), value.end(), value.begin(), ::toupper);
const auto layout = supported_layouts.find(value);
if (layout == supported_layouts.end()) {
throw std::logic_error("\"" + value + "\"" + " is not a valid layout");
}
return layout->second;
}
InferenceEngine::Layout getLayout(const std::string& value) {
static const supported_layouts_t supported_layouts = {
{ "NCDHW", InferenceEngine::Layout::NCDHW },
{ "NDHWC", InferenceEngine::Layout::NDHWC },
{ "NCHW", InferenceEngine::Layout::NCHW },
{ "NHWC", InferenceEngine::Layout::NHWC },
{ "CHW", InferenceEngine::Layout::CHW },
{ "HWC", InferenceEngine::Layout::HWC},
{ "NC", InferenceEngine::Layout::NC },
{ "C", InferenceEngine::Layout::C },
};
return getLayout(value, supported_layouts);
}
bool isMatchLayoutToDims(InferenceEngine::Layout layout, size_t dimension) {
static const matchLayoutToDims_t matchLayoutToDims = {
{static_cast<size_t>(InferenceEngine::Layout::NCDHW), 5 },
{static_cast<size_t>(InferenceEngine::Layout::NDHWC), 5 },
{static_cast<size_t>(InferenceEngine::Layout::NCHW), 4 },
{static_cast<size_t>(InferenceEngine::Layout::NHWC), 4 },
{static_cast<size_t>(InferenceEngine::Layout::CHW), 3 },
{static_cast<size_t>(InferenceEngine::Layout::NC), 2 },
{static_cast<size_t>(InferenceEngine::Layout::C), 1 }
};
const auto dims = matchLayoutToDims.find(static_cast<size_t>(layout));
if (dims == matchLayoutToDims.end()) {
throw std::logic_error("Layout is not valid.");
}
return dimension == dims->second;
}
void setLayouts(const InferenceEngine::CNNNetwork& network, const std::string iol) {
const auto user_layouts_map = parseArgMap(iol);
auto inputs = network.getInputsInfo();
auto outputs = network.getOutputsInfo();
for (auto&& item : user_layouts_map) {
const auto& layer_name = item.first;
const auto& user_layout = getLayout(item.second);
const auto input = inputs.find(layer_name);
const auto output = outputs.find(layer_name);
if (input != inputs.end()) {
if (!isMatchLayoutToDims(user_layout, input->second->getTensorDesc().getDims().size())) {
throw std::logic_error(item.second + " layout is not applicable to " + layer_name);
}
input->second->setLayout(user_layout);
} else if (output != outputs.end()) {
if (!isMatchLayoutToDims(user_layout, output->second->getTensorDesc().getDims().size())) {
throw std::logic_error(item.second + " layout is not applicable to " + layer_name);
}
output->second->setLayout(user_layout);
} else {
throw std::logic_error(layer_name + " is not an input neither output");
}
}
}
} // namespace
void processLayout(InferenceEngine::CNNNetwork& network, const std::string& il, const std::string& ol, const std::string& iol) {
if (!il.empty()) {
const auto layout = getLayout(il);
for (auto&& layer : network.getInputsInfo()) {
if (isMatchLayoutToDims(layout, layer.second->getTensorDesc().getDims().size())) {
layer.second->setLayout(layout);
}
}
}
if (!ol.empty()) {
const auto layout = getLayout(ol);
for (auto&& layer : network.getOutputsInfo()) {
if (isMatchLayoutToDims(layout, layer.second->getTensorDesc().getDims().size())) {
layer.second->setLayout(layout);
}
}
}
if (!iol.empty()) {
setLayouts(network, iol);
}
}
void printInputAndOutputsInfo(const InferenceEngine::CNNNetwork& network) {
std::cout << "Network inputs:" << std::endl;
for (auto&& layer : network.getInputsInfo()) {
std::cout << " " << layer.first << " : " << layer.second->getPrecision() << " / " << layer.second->getLayout() << std::endl;
}
std::cout << "Network outputs:" << std::endl;
for (auto&& layer : network.getOutputsInfo()) {
std::cout << " " << layer.first << " : " << layer.second->getPrecision() << " / " << layer.second->getLayout() << std::endl;
}
}

View File

@ -0,0 +1,24 @@
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "samples/common.hpp"
std::map<std::string, std::string> parseConfig(const std::string &configName, char comment) {
std::map<std::string, std::string> config = {};
std::ifstream file(configName);
if (!file.is_open()) {
return config;
}
std::string key, value;
while (file >> key >> value) {
if (key.empty() || key[0] == comment) {
continue;
}
config[key] = value;
}
return config;
}

View File

@ -4,4 +4,6 @@
ie_add_sample(NAME hello_classification
SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/main.cpp"
DEPENDENCIES ie_samples_utils
OPENCV_DEPENDENCIES core imgcodecs)

View File

@ -3,4 +3,5 @@
#
ie_add_sample(NAME hello_nv12_input_classification
SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/main.cpp")
SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/main.cpp"
DEPENDENCIES ie_samples_utils)

View File

@ -18,7 +18,7 @@
#include <sys/stat.h>
#ifdef _WIN32
#include <os/windows/w_dirent.h>
#include <samples/os/windows/w_dirent.h>
#else
#include <dirent.h>
#endif

View File

@ -3,4 +3,5 @@
#
ie_add_sample(NAME hello_query_device
SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/main.cpp")
SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/main.cpp"
DEPENDENCIES ie_samples_utils)

View File

@ -5,6 +5,7 @@
ie_add_sample(NAME hello_reshape_ssd
SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/main.cpp"
HEADERS "${CMAKE_CURRENT_SOURCE_DIR}/reshape_ssd_extension.hpp"
DEPENDENCIES ie_samples_utils
OPENCV_DEPENDENCIES core imgproc imgcodecs)
find_package(ngraph REQUIRED)

View File

@ -10,7 +10,7 @@ file (GLOB MAIN_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/*.h)
ie_add_sample(NAME ngraph_function_creation_sample
SOURCES ${MAIN_SRC}
HEADERS ${MAIN_HEADERS}
DEPENDENCIES format_reader)
DEPENDENCIES format_reader ie_samples_utils)
find_package(ngraph REQUIRED)
target_link_libraries(${TARGET_NAME} PRIVATE ${NGRAPH_LIBRARIES})

View File

@ -5,7 +5,7 @@
ie_add_sample(NAME object_detection_sample_ssd
SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/main.cpp"
HEADERS "${CMAKE_CURRENT_SOURCE_DIR}/object_detection_sample_ssd.h"
DEPENDENCIES format_reader)
DEPENDENCIES format_reader ie_samples_utils)
find_package(ngraph REQUIRED)
target_link_libraries(object_detection_sample_ssd PRIVATE ${NGRAPH_LIBRARIES})

View File

@ -18,7 +18,6 @@
#include <samples/slog.hpp>
#include <samples/args_helper.hpp>
#include <vpu/vpu_tools_common.hpp>
#include <vpu/vpu_plugin_config.hpp>
#include "object_detection_sample_ssd.h"

View File

@ -4,4 +4,5 @@
ie_add_sample(NAME speech_sample
SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/main.cpp"
HEADERS "${CMAKE_CURRENT_SOURCE_DIR}/speech_sample.hpp")
HEADERS "${CMAKE_CURRENT_SOURCE_DIR}/speech_sample.hpp"
DEPENDENCIES ie_samples_utils)

View File

@ -5,4 +5,4 @@
ie_add_sample(NAME style_transfer_sample
SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/main.cpp"
HEADERS "${CMAKE_CURRENT_SOURCE_DIR}/style_transfer_sample.h"
DEPENDENCIES format_reader)
DEPENDENCIES format_reader ie_samples_utils)

View File

@ -29,7 +29,7 @@ function(add_helpers target_name)
# TODO: eliminate dependency on samples
target_include_directories(${target_name} PUBLIC
"${IE_MAIN_SOURCE_DIR}/samples/common/os/windows")
"${IE_MAIN_SOURCE_DIR}/samples/common/utils/include/samples/os/windows")
set_property(TARGET ${target_name} PROPERTY COMPILE_PDB_NAME ${target_name})

View File

@ -133,7 +133,9 @@ Options:
graph information serialized.
-pc [PERF_COUNTS], --perf_counts [PERF_COUNTS]
Optional. Report performance counters.
-ip "U8"/"FP16"/"FP32" Optional. Specifies precision for all input layers of the network.
-op "U8"/"FP16"/"FP32" Optional. Specifies precision for all output layers of the network.
-iop Optional. Specifies precision for input and output layers by name. Example: -iop "input:FP16, output:FP16". Notice that quotes are required. Overwrites precision from ip and op options for specified layers.
```
Running the application with the empty list of options yields the usage message given above and an error message.

View File

@ -11,7 +11,6 @@ file(GLOB SRCS
add_executable(${TARGET_NAME} ${SRCS})
target_include_directories(${TARGET_NAME} SYSTEM PRIVATE
${IE_MAIN_SOURCE_DIR}/samples/common
${IE_MAIN_SOURCE_DIR}/include
${IE_MAIN_SOURCE_DIR}/src/vpu/graph_transformer/include
${IE_MAIN_SOURCE_DIR}/src/vpu/common/include
@ -27,6 +26,7 @@ endif()
target_link_libraries(${TARGET_NAME} PRIVATE
inference_engine
gflags
ie_samples_utils
)
set_target_properties(${TARGET_NAME} PROPERTIES

View File

@ -18,7 +18,9 @@
#include <vpu/vpu_plugin_config.hpp>
#include <vpu/private_plugin_config.hpp>
#include <vpu/utils/string.hpp>
#include "samples/common.hpp"
#include "samples/args_helper.hpp"
static constexpr char help_message[] =
"Optional. Print the usage message.";
@ -208,106 +210,6 @@ IE_SUPPRESS_DEPRECATED_END
return config;
}
static std::map<std::string, std::string> parseArgMap(std::string argMap) {
argMap.erase(std::remove_if(argMap.begin(), argMap.end(), ::isspace), argMap.end());
std::vector<std::string> pairs;
vpu::splitStringList(argMap, pairs, ',');
std::map<std::string, std::string> parsedMap;
for (auto&& pair : pairs) {
std::vector<std::string> keyValue;
vpu::splitStringList(pair, keyValue, ':');
if (keyValue.size() != 2) {
throw std::invalid_argument("Invalid key/value pair " + pair + ". Expected <layer_name>:<value>");
}
parsedMap[keyValue[0]] = keyValue[1];
}
return parsedMap;
}
using supported_precisions_t = std::unordered_map<std::string, InferenceEngine::Precision>;
using supported_layouts_t = std::unordered_map<std::string, InferenceEngine::Layout>;
using matchLayoutToDims_t = std::unordered_map<size_t, size_t>;
static InferenceEngine::Layout getLayout(std::string value,
const supported_layouts_t& supported_layouts) {
std::transform(value.begin(), value.end(), value.begin(), ::toupper);
const auto layout = supported_layouts.find(value);
if (layout == supported_layouts.end()) {
throw std::logic_error("\"" + value + "\"" + " is not a valid layout");
}
return layout->second;
}
static InferenceEngine::Layout getLayout(const std::string& value) {
static const supported_layouts_t supported_layouts = {
{ "NCDHW", InferenceEngine::Layout::NCDHW },
{ "NDHWC", InferenceEngine::Layout::NDHWC },
{ "NCHW", InferenceEngine::Layout::NCHW },
{ "NHWC", InferenceEngine::Layout::NHWC },
{ "CHW", InferenceEngine::Layout::CHW },
{ "NC", InferenceEngine::Layout::NC },
{ "C", InferenceEngine::Layout::C },
};
return getLayout(value, supported_layouts);
}
static bool isMatchLayoutToDims(InferenceEngine::Layout layout, size_t dimension) {
static const matchLayoutToDims_t matchLayoutToDims = {
{static_cast<size_t>(InferenceEngine::Layout::NCDHW), 5 },
{static_cast<size_t>(InferenceEngine::Layout::NDHWC), 5 },
{static_cast<size_t>(InferenceEngine::Layout::NCHW), 4 },
{static_cast<size_t>(InferenceEngine::Layout::NHWC), 4 },
{static_cast<size_t>(InferenceEngine::Layout::CHW), 3 },
{static_cast<size_t>(InferenceEngine::Layout::NC), 2 },
{static_cast<size_t>(InferenceEngine::Layout::C), 1 }
};
const auto dims = matchLayoutToDims.find(static_cast<size_t>(layout));
if (dims == matchLayoutToDims.end()) {
throw std::logic_error("Layout is not valid.");
}
return dimension == dims->second;
}
static InferenceEngine::Precision getPrecision(std::string value,
const supported_precisions_t& supported_precisions) {
std::transform(value.begin(), value.end(), value.begin(), ::toupper);
const auto precision = supported_precisions.find(value);
if (precision == supported_precisions.end()) {
throw std::logic_error("\"" + value + "\"" + " is not a valid precision");
}
return precision->second;
}
static InferenceEngine::Precision getPrecision(const std::string& value) {
static const supported_precisions_t supported_precisions = {
{ "FP32", InferenceEngine::Precision::FP32 },
{ "FP16", InferenceEngine::Precision::FP16 },
{ "BF16", InferenceEngine::Precision::BF16 },
{ "U64", InferenceEngine::Precision::U64 },
{ "I64", InferenceEngine::Precision::I64 },
{ "U32", InferenceEngine::Precision::U32 },
{ "I32", InferenceEngine::Precision::I32 },
{ "U16", InferenceEngine::Precision::U16 },
{ "I16", InferenceEngine::Precision::I16 },
{ "U8", InferenceEngine::Precision::U8 },
{ "I8", InferenceEngine::Precision::I8 },
{ "BOOL", InferenceEngine::Precision::BOOL },
};
return getPrecision(value, supported_precisions);
}
bool isFP16(InferenceEngine::Precision precision) {
return precision == InferenceEngine::Precision::FP16;
}
@ -320,29 +222,6 @@ bool isFloat(InferenceEngine::Precision precision) {
return isFP16(precision) || isFP32(precision);
}
static void setPrecisions(const InferenceEngine::CNNNetwork& network) {
const auto user_precisions_map = parseArgMap(FLAGS_iop);
auto inputs = network.getInputsInfo();
auto outputs = network.getOutputsInfo();
for (auto&& item : user_precisions_map) {
const auto& layer_name = item.first;
const auto& user_precision = item.second;
const auto input = inputs.find(layer_name);
const auto output = outputs.find(layer_name);
if (input != inputs.end()) {
input->second->setPrecision(getPrecision(user_precision));
} else if (output != outputs.end()) {
output->second->setPrecision(getPrecision(user_precision));
} else {
throw std::logic_error(layer_name + " is not an input neither output");
}
}
}
static void setDefaultIO(InferenceEngine::CNNNetwork& network) {
const bool isMYRIAD = FLAGS_d.find("MYRIAD") != std::string::npos;
const bool isVPUX = FLAGS_d.find("VPUX") != std::string::npos;
@ -377,81 +256,6 @@ static void setDefaultIO(InferenceEngine::CNNNetwork& network) {
}
}
static void processPrecisions(InferenceEngine::CNNNetwork& network) {
if (!FLAGS_ip.empty()) {
const auto user_precision = getPrecision(FLAGS_ip);
for (auto&& layer : network.getInputsInfo()) {
layer.second->setPrecision(user_precision);
}
}
if (!FLAGS_op.empty()) {
auto user_precision = getPrecision(FLAGS_op);
for (auto&& layer : network.getOutputsInfo()) {
layer.second->setPrecision(user_precision);
}
}
if (!FLAGS_iop.empty()) {
setPrecisions(network);
}
}
static void setLayouts(const InferenceEngine::CNNNetwork& network) {
const auto user_layouts_map = parseArgMap(FLAGS_iol);
auto inputs = network.getInputsInfo();
auto outputs = network.getOutputsInfo();
for (auto&& item : user_layouts_map) {
const auto& layer_name = item.first;
const auto& user_layout = getLayout(item.second);
const auto input = inputs.find(layer_name);
const auto output = outputs.find(layer_name);
if (input != inputs.end()) {
if (!isMatchLayoutToDims(user_layout, input->second->getTensorDesc().getDims().size())) {
throw std::logic_error(item.second + " layout is not applicable to " + layer_name);
}
input->second->setLayout(user_layout);
} else if (output != outputs.end()) {
if (!isMatchLayoutToDims(user_layout, output->second->getTensorDesc().getDims().size())) {
throw std::logic_error(item.second + " layout is not applicable to " + layer_name);
}
output->second->setLayout(user_layout);
} else {
throw std::logic_error(layer_name + " is not an input neither output");
}
}
}
static void processLayout(InferenceEngine::CNNNetwork& network) {
if (!FLAGS_il.empty()) {
const auto layout = getLayout(FLAGS_il);
for (auto&& layer : network.getInputsInfo()) {
if (isMatchLayoutToDims(layout, layer.second->getTensorDesc().getDims().size())) {
layer.second->setLayout(layout);
}
}
}
if (!FLAGS_ol.empty()) {
const auto layout = getLayout(FLAGS_ol);
for (auto&& layer : network.getOutputsInfo()) {
if (isMatchLayoutToDims(layout, layer.second->getTensorDesc().getDims().size())) {
layer.second->setLayout(layout);
}
}
}
if (!FLAGS_iol.empty()) {
setLayouts(network);
}
}
std::string getFileNameFromPath(const std::string& path,
#if defined(_WIN32)
const std::string& sep = "\\") {
@ -487,18 +291,10 @@ int main(int argc, char* argv[]) {
auto network = ie.ReadNetwork(FLAGS_m);
setDefaultIO(network);
processPrecisions(network);
processLayout(network);
processPrecision(network, FLAGS_ip, FLAGS_op, FLAGS_iop);
processLayout(network, FLAGS_il, FLAGS_ol, FLAGS_iol);
std::cout << "Network inputs:" << std::endl;
for (auto&& layer : network.getInputsInfo()) {
std::cout << " " << layer.first << " : " << layer.second->getPrecision() << " / " << layer.second->getLayout() << std::endl;
}
std::cout << "Network outputs:" << std::endl;
for (auto&& layer : network.getOutputsInfo()) {
std::cout << " " << layer.first << " : " << layer.second->getPrecision() << " / " << layer.second->getLayout() << std::endl;
}
std::cout << std::endl;
printInputAndOutputsInfo(network);
auto timeBeforeLoadNetwork = std::chrono::steady_clock::now();
auto executableNetwork = ie.LoadNetwork(network, FLAGS_d, configure());

View File

@ -8,7 +8,6 @@
#endif
#include <sys/stat.h>
#include <os/windows/w_dirent.h>
#include <algorithm>
#include <map>
@ -20,7 +19,9 @@
#include "vpu_tools_common.hpp"
#include <vpu/utils/string.hpp>
#include "samples/common.hpp"
#include <samples/os/windows/w_dirent.h>
#include <samples/common.hpp>
#include "precision_utils.h"
@ -58,25 +59,6 @@ void setPrecisions(const InferenceEngine::CNNNetwork &network) {
}
}
std::map<std::string, std::string> parseConfig(const std::string &configName, char comment) {
std::map<std::string, std::string> config = {};
std::ifstream file(configName);
if (!file.is_open()) {
return config;
}
std::string key, value;
while (file >> key >> value) {
if (key.empty() || key[0] == comment) {
continue;
}
config[key] = value;
}
return config;
}
BitMap::BitMap(const std::string &filename) {
BmpHeader header;
BmpInfoHeader infoHeader;

View File

@ -20,8 +20,6 @@ bool isFloat(InferenceEngine::Precision precision);
/* Set FP32 to FP16, all others without changes */
void setPrecisions(const InferenceEngine::CNNNetwork &network);
std::map<std::string, std::string> parseConfig(const std::string &configName, char comment = '#');
class BitMap {
private:
typedef struct {

View File

@ -18,13 +18,14 @@ if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
endif()
target_include_directories(${TARGET_NAME} SYSTEM PRIVATE
${IE_MAIN_SOURCE_DIR}/samples/common
${IE_MAIN_SOURCE_DIR}/tools/vpu/common
${IE_MAIN_SOURCE_DIR}/samples/common
)
target_link_libraries(${TARGET_NAME} PRIVATE
inference_engine vpu_graph_transformer
gflags
ie_samples_utils
)
add_dependencies(${TARGET_NAME} myriadPlugin)
@ -40,4 +41,4 @@ add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME})
install(TARGETS ${TARGET_NAME}
RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH}
COMPONENT myriad)
COMPONENT myriad)

View File

@ -20,13 +20,13 @@ function(add_perfcheck_target TARGET_NAME PLUGIN_NAME)
SYSTEM PRIVATE
"${IE_MAIN_SOURCE_DIR}/src/vpu/graph_transformer/include"
"${IE_MAIN_SOURCE_DIR}/src/plugin_api"
"${IE_MAIN_SOURCE_DIR}/samples/common/samples"
"${IE_MAIN_SOURCE_DIR}/samples/common/format_reader")
target_link_libraries(${TARGET_NAME}
PRIVATE
inference_engine format_reader
Threads::Threads)
Threads::Threads
ie_samples_utils)
add_dependencies(${TARGET_NAME}
${PLUGIN_NAME} ${ARGN})

View File

@ -29,11 +29,13 @@
#include <stdio.h>
#include <ios>
#include <sys/stat.h>
#include <os/windows/w_dirent.h>
#include <samples/os/windows/w_dirent.h>
#include <inference_engine.hpp>
#include <precision_utils.h>
#include <common.hpp>
#include <samples/common.hpp>
#include <vpu/vpu_config.hpp>
static char* m_exename = nullptr;

View File

@ -24,10 +24,10 @@ from openvino.tools.benchmark.utils.constants import MULTI_DEVICE_NAME, HETERO_D
from openvino.tools.benchmark.utils.inputs_filling import set_inputs
from openvino.tools.benchmark.utils.logging import logger
from openvino.tools.benchmark.utils.progress_bar import ProgressBar
from openvino.tools.benchmark.utils.utils import next_step, config_network_inputs, get_number_iterations, \
from openvino.tools.benchmark.utils.utils import next_step, get_number_iterations, process_precision, \
process_help_inference_string, print_perf_counters, dump_exec_graph, get_duration_in_milliseconds, \
get_command_line_arguments, parse_nstreams_value_per_device, parse_devices, get_inputs_info, \
get_batch_size, load_config, dump_config
print_inputs_and_outputs_info, get_batch_size, load_config, dump_config
from openvino.tools.benchmark.utils.statistics_report import StatisticsReport, averageCntReport, detailedCntReport
@ -228,10 +228,11 @@ def run(args):
logger.info('Network batch size: {}'.format(batch_size))
# --------------------- 6. Configuring input of the model --------------------------------------------------
# --------------------- 6. Configuring inputs and outputs of the model --------------------------------------------------
next_step()
config_network_inputs(ie_network, app_inputs_info)
process_precision(ie_network, app_inputs_info, args.input_precision, args.output_precision, args.input_output_precision)
print_inputs_and_outputs_info(ie_network)
# --------------------- 7. Loading the model to the device -------------------------------------------------
next_step()

View File

@ -125,6 +125,12 @@ def parse_args():
" Please note, command line parameters have higher priority then parameters from configuration file.")
args.add_argument('-qb', '--quantization_bits', type=int, required=False, default=None, choices=[8, 16],
help="Optional. Weight bits for quantization: 8 (I8) or 16 (I16) ")
args.add_argument('-ip', '--input_precision', type=str, required=False, default='U8', choices=['U8', 'FP16', 'FP32'],
help='Optional. Specifies precision for all input layers of the network.')
args.add_argument('-op', '--output_precision', type=str, required=False, default='FP32', choices=['U8', 'FP16', 'FP32'],
help='Optional. Specifies precision for all output layers of the network.')
args.add_argument('-iop', '--input_output_precision', type=str, required=False,
help='Optional. Specifies precision for input and output layers by name. Example: -iop "input:FP16, output:FP16". Notice that quotes are required. Overwrites precision from ip and op options for specified layers.')
parsed_args = parser.parse_args()
return parsed_args

View File

@ -37,11 +37,6 @@ def get_inputs(paths_to_input, batch_size, app_input_info, requests):
info = app_input_info[key]
if info.is_image:
input_image_sizes[key] = (info.width, info.height)
logger.info("Network input '{}' precision {}, dimensions ({}): {}".format(key,
info.precision,
info.layout,
" ".join(str(x) for x in
info.shape)))
images_count = len(input_image_sizes.keys())
binaries_count = len(app_input_info) - images_count

View File

@ -59,16 +59,71 @@ def next_step(additional_info='', step_id=0):
step_info_template = step_info_template.format(next_step.step_id, len(step_names), step_name)
print(step_info_template)
def process_precision(ie_network: IENetwork, app_inputs_info, input_precision: str, output_precision: str, input_output_precision: str):
_configure_network_inputs(ie_network, app_inputs_info, input_precision)
_configure_network_outputs(ie_network, output_precision)
if input_output_precision:
_configure_network_inputs_and_outputs(ie_network, input_output_precision)
def config_network_inputs(ie_network: IENetwork, app_inputs_info):
def _configure_network_inputs(ie_network: IENetwork, app_inputs_info, input_precision: str):
input_info = ie_network.input_info
for key in input_info.keys():
if app_inputs_info[key].is_image:
# Set the precision of input data provided by the user
# Should be called before load of the network to the plugin
app_inputs_info[key].precision = 'U8'
input_info[key].precision = 'U8'
app_inputs_info[key].precision = input_precision
input_info[key].precision = input_precision
def _configure_network_outputs(ie_network: IENetwork, output_precision: str):
output_info = ie_network.outputs
for key in output_info.keys():
output_info[key].precision = output_precision
def _configure_network_inputs_and_outputs(ie_network: IENetwork, input_output_precision: str):
if not input_output_precision:
raise Exception("Input/output precision is empty")
user_precision_map = _parse_arg_map(input_output_precision)
input_info = ie_network.input_info
output_info = ie_network.outputs
for key, value in user_precision_map.items():
if key in input_info:
input_info[key].precision = value
elif key in output_info:
output_info[key].precision = value
else:
raise Exception("Element '{}' does not exist in network".format(key))
def _parse_arg_map(arg_map: str):
arg_map = arg_map.replace(" ", "")
pairs = [x.strip() for x in arg_map.split(',')]
parsed_map = {}
for pair in pairs:
key_value = [x.strip() for x in pair.split(':')]
parsed_map.update({key_value[0]:key_value[1]})
return parsed_map
def print_inputs_and_outputs_info(ie_network: IENetwork):
input_info = ie_network.input_info
for key in input_info.keys():
tensor_desc = input_info[key].tensor_desc
logger.info("Network input '{}' precision {}, dimensions ({}): {}".format(key,
tensor_desc.precision,
tensor_desc.layout,
" ".join(str(x) for x in
tensor_desc.dims)))
output_info = ie_network.outputs
for key in output_info.keys():
info = output_info[key]
logger.info("Network output '{}' precision {}, dimensions ({}): {}".format(key,
info.precision,
info.layout,
" ".join(str(x) for x in
info.shape)))
def get_number_iterations(number_iterations: int, nireq: int, api_type: str):
niter = number_iterations
@ -336,4 +391,4 @@ def dump_config(filename, config):
def load_config(filename, config):
with open(filename) as f:
config.update(json.load(f))
config.update(json.load(f))