diff --git a/samples/cpp/speech_sample/README.md b/samples/cpp/speech_sample/README.md index 0f440ba767d..7de98c36c66 100644 --- a/samples/cpp/speech_sample/README.md +++ b/samples/cpp/speech_sample/README.md @@ -99,24 +99,22 @@ speech_sample [OPTION] Options: -h Print a usage message. - -i "" Required. Paths to input files. Example of usage: or or . + -i "" Required. Paths to input file or Layers names with corresponding paths to the input files. Example of usage for single file: or . Example of usage for named layers: =,=. -m "" Required. Path to an .xml file with a trained model (required if -rg is missing). - -o "" Optional. Output file name to save scores. Example of usage: or + -o "" Optional. Output file name to save scores or Layer names with corresponding files names to save scores. Example of usage for single file: or . Example of usage for named layers: Example of usage for named layers: =,=. -d "" Optional. Specify a target device to infer on. CPU, GPU, MYRIAD, GNA_AUTO, GNA_HW, GNA_HW_WITH_SW_FBACK, GNA_SW_FP32, GNA_SW_EXACT and HETERO with combination of GNA as the primary device and CPU as a secondary (e.g. HETERO:GNA,CPU) are supported. The sample will look for a suitable plugin for device specified. -pc Optional. Enables per-layer performance report. - -q "" Optional. Input quantization mode: static (default), dynamic, or user (use with -sf). + -q "" Optional. Input quantization mode: static (default), dynamic, or user (use with -sf). -qb "" Optional. Weight bits for quantization: 8 or 16 (default) -sf "" Optional. User-specified input scale factor for quantization (use with -q user). If the network contains multiple inputs, provide scale factors by separating them with commas. -bs "" Optional. Batch size 1-8 -layout "" Optional. Prompts how network layouts should be treated by application.For example, \"input1[NCHW],input2[NC]\" or \"[NCHW]\" in case of one input size. - -r "" Optional. Read reference score file and compare scores. Example of usage: or + -r "" Optional. Read reference score file or named layers with corresponding score files and compare scores. Example of usage for single file: or . Example of usage for named layers: Example of usage for named layers: =,=. -rg "" Read GNA model from file using path/filename provided (required if -m is missing). -wg "" Optional. Write GNA model to file using path/filename provided. -we "" Optional. Write GNA embedded model to file using path/filename provided. -cw_l "" Optional. Number of frames for left context windows (default is 0). Works only with context window networks. If you use the cw_l or cw_r flag, then batch size argument is ignored. -cw_r "" Optional. Number of frames for right context windows (default is 0). Works only with context window networks. If you use the cw_r or cw_l flag, then batch size argument is ignored. - -oname "" Optional. Layer names for output blobs. The names are separated with "," Example: Output1:port,Output2:port - -iname "" Optional. Layer names for input blobs. The names are separated with "," Example: Input1,Input2 -pwl_me "" Optional. The maximum percent of error for PWL function.The value must be in <0, 100> range. The default value is 1.0. -exec_target "" Optional. Specify GNA execution target generation. May be one of GNA_TARGET_2_0, GNA_TARGET_3_0. By default, generation corresponds to the GNA HW available in the system or the latest fully supported generation by the software. See the GNA Plugin's GNA_EXEC_TARGET config option description. -compile_target "" Optional. Specify GNA compile target generation. May be one of GNA_TARGET_2_0, GNA_TARGET_3_0. By default, generation corresponds to the GNA HW available in the system or the latest fully supported generation by the software. See the GNA Plugin's GNA_COMPILE_TARGET config option description. diff --git a/samples/cpp/speech_sample/main.cpp b/samples/cpp/speech_sample/main.cpp index d42c81793b4..05b641261ed 100644 --- a/samples/cpp/speech_sample/main.cpp +++ b/samples/cpp/speech_sample/main.cpp @@ -322,7 +322,7 @@ int main(int argc, char* argv[]) { std::vector inputNameBlobs = input_data.second; if (inputNameBlobs.size() != cInputInfo.size()) { std::string errMessage(std::string("Number of network inputs ( ") + std::to_string(cInputInfo.size()) + - " ) is not equal to the number of inputs entered in the -iname argument ( " + + " ) is not equal to the number of inputs entered in the -i argument ( " + std::to_string(inputNameBlobs.size()) + " )."); throw std::logic_error(errMessage); } diff --git a/samples/cpp/speech_sample/speech_sample.hpp b/samples/cpp/speech_sample/speech_sample.hpp index 7582c40d325..8ca4fc806ff 100644 --- a/samples/cpp/speech_sample/speech_sample.hpp +++ b/samples/cpp/speech_sample/speech_sample.hpp @@ -14,8 +14,9 @@ static const char help_message[] = "Print a usage message."; /// @brief message for input data argument -static const char input_message[] = - "Required. Paths to input files. Example of usage: or or ."; +static const char input_message[] = "Required. Paths to input file or Layers names with corresponding paths to the " + "input files. Example of usage for single file: or . Example " + "of usage for named layers: =,=."; /// @brief message for model argument static const char model_message[] = "Required. Path to an .xml file with a trained model (required if -rg is missing)."; @@ -60,11 +61,15 @@ static const char custom_cpu_library_message[] = "Required for CPU plugin custom /// @brief message for score output argument static const char output_message[] = - "Optional. Output file name to save scores. Example of usage: or "; + "Optional. Output file name to save scores or Layer names with corresponding files names to save scores. Example " + "of usage for single file: or . Example of usage for named layers: Example of usage for " + "named layers: =,=."; /// @brief message for reference score file argument static const char reference_score_message[] = - "Optional. Read reference score file and compare scores. Example of usage: or "; + "Optional. Read reference score file or named layers with corresponding score files and compare scores. Example of " + "usage for single file: or . Example of usage for named layers: Example of usage " + "for named layers: =,=."; /// @brief message for read GNA model argument static const char read_gna_model_message[] = @@ -111,15 +116,6 @@ static const char context_window_message_r[] = "Works only with context window networks." " If you use the cw_r or cw_l flag, then batch size argument is ignored."; -/// @brief message for output layer names -static const char output_layer_names_message[] = "Optional. Layer names for output blobs. " - "The names are separated with \",\" " - "Example: Output1:port,Output2:port "; - -/// @brief message for inputs layer names -static const char input_layer_names_message[] = "Optional. Layer names for input blobs. " - "The names are separated with \",\" " - "Example: Input1,Input2 "; /// @brief message for inputs layer names static const char layout_message[] = "Optional. Prompts how network layouts should be treated by application. " @@ -192,12 +188,6 @@ DEFINE_int32(cw_r, 0, context_window_message_r); /// @brief Left context window size (default 0) DEFINE_int32(cw_l, 0, context_window_message_l); -/// @brief Output layer name -DEFINE_string(oname, "", output_layer_names_message); - -/// @brief Input layer name -DEFINE_string(iname, "", input_layer_names_message); - /// @brief Input layer name DEFINE_string(layout, "", layout_message); @@ -228,8 +218,6 @@ static void show_usage() { std::cout << " -we \"\" " << write_embedded_model_message << std::endl; std::cout << " -cw_l \"\" " << context_window_message_l << std::endl; std::cout << " -cw_r \"\" " << context_window_message_r << std::endl; - std::cout << " -oname \"\" " << output_layer_names_message << std::endl; - std::cout << " -iname \"\" " << input_layer_names_message << std::endl; std::cout << " -layout \"\" " << layout_message << std::endl; std::cout << " -pwl_me \"\" " << pwl_max_error_percent_message << std::endl; std::cout << " -exec_target \"\" " << execution_target_message << std::endl; diff --git a/samples/cpp/speech_sample/utils.hpp b/samples/cpp/speech_sample/utils.hpp index f194c4e66ab..7aa3dc975e6 100644 --- a/samples/cpp/speech_sample/utils.hpp +++ b/samples/cpp/speech_sample/utils.hpp @@ -536,29 +536,30 @@ std::map parse_input_layouts(const std::string& layout } /** - * @brief Parse parameters for inputs/outputs like as "=,=" or - * "" in case of one input/output + * @brief Parse parameters for inputs/outputs/reference like as "=,=" or + * "" in case of one input/output/reference. + * @note Examplary result for given data: {",",{"",""}} * @param file_paths_string input/output path - * @return pair of filename and vector of tensor_names + * @return pair of filename and vector of layers names */ -std::pair> parse_parameters(const std::string file_paths_string) { +std::pair> parse_parameters(const std::string& file_paths_string) { auto search_string = file_paths_string; char comma_delim = ','; char equal_delim = '='; std::string filename = ""; - std::vector tensor_names; + std::vector layers_names; std::vector filenames; if (!std::count(search_string.begin(), search_string.end(), comma_delim) && !std::count(search_string.begin(), search_string.end(), equal_delim)) { - return {search_string, tensor_names}; + return {search_string, layers_names}; } search_string += comma_delim; std::vector splitted = split(search_string, comma_delim); for (size_t j = 0; j < splitted.size(); j++) { - auto semicolon_pos = splitted[j].find_first_of(equal_delim); - if (semicolon_pos != std::string::npos) { - tensor_names.push_back(splitted[j].substr(0, semicolon_pos)); - filenames.push_back(splitted[j].substr(semicolon_pos + 1, std::string::npos)); + auto equal_delim_pos = splitted[j].find_first_of(equal_delim); + if (equal_delim_pos != std::string::npos) { + layers_names.push_back(splitted[j].substr(0, equal_delim_pos)); + filenames.push_back(splitted[j].substr(equal_delim_pos + 1, std::string::npos)); } } for (std::vector::const_iterator name = filenames.begin(); name != filenames.end(); ++name) { @@ -566,5 +567,5 @@ std::pair> parse_parameters(const std::str if (name != filenames.end() - 1) filename += comma_delim; } - return {filename, tensor_names}; + return {filename, layers_names}; } \ No newline at end of file