[SAMPLES] Remove unused commandline arguments for speech_sample (#11892)
This commit is contained in:
parent
bb82f43e22
commit
3efda5067b
@ -99,24 +99,22 @@ speech_sample [OPTION]
|
||||
Options:
|
||||
|
||||
-h Print a usage message.
|
||||
-i "<path>" Required. Paths to input files. Example of usage: <file1.ark,file2.ark> or <file.ark> or <file.npz>.
|
||||
-i "<path>" Required. Paths to input file or Layers names with corresponding paths to the input files. Example of usage for single file: <file.ark> or <file.npz>. Example of usage for named layers: <layer1>=<file1.ark>,<layer2>=<file2.ark>.
|
||||
-m "<path>" Required. Path to an .xml file with a trained model (required if -rg is missing).
|
||||
-o "<path>" Optional. Output file name to save scores. Example of usage: <output.ark> or <output.npz>
|
||||
-o "<path>" Optional. Output file name to save scores or Layer names with corresponding files names to save scores. Example of usage for single file: <output.ark> or <output.npz>. Example of usage for named layers: Example of usage for named layers: <layer1:port_num>=<output_file1.ark>,<layer2:port_num>=<output_file2.ark>.
|
||||
-d "<device>" Optional. Specify a target device to infer on. CPU, GPU, MYRIAD, GNA_AUTO, GNA_HW, GNA_HW_WITH_SW_FBACK, GNA_SW_FP32, GNA_SW_EXACT and HETERO with combination of GNA as the primary device and CPU as a secondary (e.g. HETERO:GNA,CPU) are supported. The sample will look for a suitable plugin for device specified.
|
||||
-pc Optional. Enables per-layer performance report.
|
||||
-q "<mode>" Optional. Input quantization mode: static (default), dynamic, or user (use with -sf).
|
||||
-q "<mode>" Optional. Input quantization mode: static (default), dynamic, or user (use with -sf).
|
||||
-qb "<integer>" Optional. Weight bits for quantization: 8 or 16 (default)
|
||||
-sf "<double>" Optional. User-specified input scale factor for quantization (use with -q user). If the network contains multiple inputs, provide scale factors by separating them with commas.
|
||||
-bs "<integer>" Optional. Batch size 1-8
|
||||
-layout "<string>" Optional. Prompts how network layouts should be treated by application.For example, \"input1[NCHW],input2[NC]\" or \"[NCHW]\" in case of one input size.
|
||||
-r "<path>" Optional. Read reference score file and compare scores. Example of usage: <reference.ark> or <reference.npz>
|
||||
-r "<path>" Optional. Read reference score file or named layers with corresponding score files and compare scores. Example of usage for single file: <reference.ark> or <reference.npz>. Example of usage for named layers: Example of usage for named layers: <layer1:port_num>=<reference_file2.ark>,<layer2:port_num>=<reference_file2.ark>.
|
||||
-rg "<path>" Read GNA model from file using path/filename provided (required if -m is missing).
|
||||
-wg "<path>" Optional. Write GNA model to file using path/filename provided.
|
||||
-we "<path>" Optional. Write GNA embedded model to file using path/filename provided.
|
||||
-cw_l "<integer>" Optional. Number of frames for left context windows (default is 0). Works only with context window networks. If you use the cw_l or cw_r flag, then batch size argument is ignored.
|
||||
-cw_r "<integer>" Optional. Number of frames for right context windows (default is 0). Works only with context window networks. If you use the cw_r or cw_l flag, then batch size argument is ignored.
|
||||
-oname "<string>" Optional. Layer names for output blobs. The names are separated with "," Example: Output1:port,Output2:port
|
||||
-iname "<string>" Optional. Layer names for input blobs. The names are separated with "," Example: Input1,Input2
|
||||
-pwl_me "<double>" Optional. The maximum percent of error for PWL function.The value must be in <0, 100> range. The default value is 1.0.
|
||||
-exec_target "<string>" Optional. Specify GNA execution target generation. May be one of GNA_TARGET_2_0, GNA_TARGET_3_0. By default, generation corresponds to the GNA HW available in the system or the latest fully supported generation by the software. See the GNA Plugin's GNA_EXEC_TARGET config option description.
|
||||
-compile_target "<string>" Optional. Specify GNA compile target generation. May be one of GNA_TARGET_2_0, GNA_TARGET_3_0. By default, generation corresponds to the GNA HW available in the system or the latest fully supported generation by the software. See the GNA Plugin's GNA_COMPILE_TARGET config option description.
|
||||
|
@ -322,7 +322,7 @@ int main(int argc, char* argv[]) {
|
||||
std::vector<std::string> inputNameBlobs = input_data.second;
|
||||
if (inputNameBlobs.size() != cInputInfo.size()) {
|
||||
std::string errMessage(std::string("Number of network inputs ( ") + std::to_string(cInputInfo.size()) +
|
||||
" ) is not equal to the number of inputs entered in the -iname argument ( " +
|
||||
" ) is not equal to the number of inputs entered in the -i argument ( " +
|
||||
std::to_string(inputNameBlobs.size()) + " ).");
|
||||
throw std::logic_error(errMessage);
|
||||
}
|
||||
|
@ -14,8 +14,9 @@
|
||||
static const char help_message[] = "Print a usage message.";
|
||||
|
||||
/// @brief message for input data argument
|
||||
static const char input_message[] =
|
||||
"Required. Paths to input files. Example of usage: <file1.ark,file2.ark> or <file.ark> or <file.npz>.";
|
||||
static const char input_message[] = "Required. Paths to input file or Layers names with corresponding paths to the "
|
||||
"input files. Example of usage for single file: <file.ark> or <file.npz>. Example "
|
||||
"of usage for named layers: <layer1>=<file1.ark>,<layer2>=<file2.ark>.";
|
||||
|
||||
/// @brief message for model argument
|
||||
static const char model_message[] = "Required. Path to an .xml file with a trained model (required if -rg is missing).";
|
||||
@ -60,11 +61,15 @@ static const char custom_cpu_library_message[] = "Required for CPU plugin custom
|
||||
|
||||
/// @brief message for score output argument
|
||||
static const char output_message[] =
|
||||
"Optional. Output file name to save scores. Example of usage: <output.ark> or <output.npz>";
|
||||
"Optional. Output file name to save scores or Layer names with corresponding files names to save scores. Example "
|
||||
"of usage for single file: <output.ark> or <output.npz>. Example of usage for named layers: Example of usage for "
|
||||
"named layers: <layer1:port_num>=<output_file1.ark>,<layer2:port_num>=<output_file2.ark>.";
|
||||
|
||||
/// @brief message for reference score file argument
|
||||
static const char reference_score_message[] =
|
||||
"Optional. Read reference score file and compare scores. Example of usage: <reference.ark> or <reference.npz>";
|
||||
"Optional. Read reference score file or named layers with corresponding score files and compare scores. Example of "
|
||||
"usage for single file: <reference.ark> or <reference.npz>. Example of usage for named layers: Example of usage "
|
||||
"for named layers: <layer1:port_num>=<reference_file2.ark>,<layer2:port_num>=<reference_file2.ark>.";
|
||||
|
||||
/// @brief message for read GNA model argument
|
||||
static const char read_gna_model_message[] =
|
||||
@ -111,15 +116,6 @@ static const char context_window_message_r[] =
|
||||
"Works only with context window networks."
|
||||
" If you use the cw_r or cw_l flag, then batch size argument is ignored.";
|
||||
|
||||
/// @brief message for output layer names
|
||||
static const char output_layer_names_message[] = "Optional. Layer names for output blobs. "
|
||||
"The names are separated with \",\" "
|
||||
"Example: Output1:port,Output2:port ";
|
||||
|
||||
/// @brief message for inputs layer names
|
||||
static const char input_layer_names_message[] = "Optional. Layer names for input blobs. "
|
||||
"The names are separated with \",\" "
|
||||
"Example: Input1,Input2 ";
|
||||
/// @brief message for inputs layer names
|
||||
static const char layout_message[] =
|
||||
"Optional. Prompts how network layouts should be treated by application. "
|
||||
@ -192,12 +188,6 @@ DEFINE_int32(cw_r, 0, context_window_message_r);
|
||||
/// @brief Left context window size (default 0)
|
||||
DEFINE_int32(cw_l, 0, context_window_message_l);
|
||||
|
||||
/// @brief Output layer name
|
||||
DEFINE_string(oname, "", output_layer_names_message);
|
||||
|
||||
/// @brief Input layer name
|
||||
DEFINE_string(iname, "", input_layer_names_message);
|
||||
|
||||
/// @brief Input layer name
|
||||
DEFINE_string(layout, "", layout_message);
|
||||
|
||||
@ -228,8 +218,6 @@ static void show_usage() {
|
||||
std::cout << " -we \"<path>\" " << write_embedded_model_message << std::endl;
|
||||
std::cout << " -cw_l \"<integer>\" " << context_window_message_l << std::endl;
|
||||
std::cout << " -cw_r \"<integer>\" " << context_window_message_r << std::endl;
|
||||
std::cout << " -oname \"<string>\" " << output_layer_names_message << std::endl;
|
||||
std::cout << " -iname \"<string>\" " << input_layer_names_message << std::endl;
|
||||
std::cout << " -layout \"<string>\" " << layout_message << std::endl;
|
||||
std::cout << " -pwl_me \"<double>\" " << pwl_max_error_percent_message << std::endl;
|
||||
std::cout << " -exec_target \"<string>\" " << execution_target_message << std::endl;
|
||||
|
@ -536,29 +536,30 @@ std::map<std::string, std::string> parse_input_layouts(const std::string& layout
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Parse parameters for inputs/outputs like as "<name1>=<file1.ark/.npz>,<name2>=<file2.ark/.npz>" or
|
||||
* "<file.ark/.npz>" in case of one input/output
|
||||
* @brief Parse parameters for inputs/outputs/reference like as "<name1>=<file1.ark/.npz>,<name2>=<file2.ark/.npz>" or
|
||||
* "<file.ark/.npz>" in case of one input/output/reference.
|
||||
* @note Examplary result for given data: {"<file1.ark/.npz>,<file2.ark/.npz>",{"<name1>","<name2>"}}
|
||||
* @param file_paths_string input/output path
|
||||
* @return pair of filename and vector of tensor_names
|
||||
* @return pair of filename and vector of layers names
|
||||
*/
|
||||
std::pair<std::string, std::vector<std::string>> parse_parameters(const std::string file_paths_string) {
|
||||
std::pair<std::string, std::vector<std::string>> parse_parameters(const std::string& file_paths_string) {
|
||||
auto search_string = file_paths_string;
|
||||
char comma_delim = ',';
|
||||
char equal_delim = '=';
|
||||
std::string filename = "";
|
||||
std::vector<std::string> tensor_names;
|
||||
std::vector<std::string> layers_names;
|
||||
std::vector<std::string> filenames;
|
||||
if (!std::count(search_string.begin(), search_string.end(), comma_delim) &&
|
||||
!std::count(search_string.begin(), search_string.end(), equal_delim)) {
|
||||
return {search_string, tensor_names};
|
||||
return {search_string, layers_names};
|
||||
}
|
||||
search_string += comma_delim;
|
||||
std::vector<std::string> splitted = split(search_string, comma_delim);
|
||||
for (size_t j = 0; j < splitted.size(); j++) {
|
||||
auto semicolon_pos = splitted[j].find_first_of(equal_delim);
|
||||
if (semicolon_pos != std::string::npos) {
|
||||
tensor_names.push_back(splitted[j].substr(0, semicolon_pos));
|
||||
filenames.push_back(splitted[j].substr(semicolon_pos + 1, std::string::npos));
|
||||
auto equal_delim_pos = splitted[j].find_first_of(equal_delim);
|
||||
if (equal_delim_pos != std::string::npos) {
|
||||
layers_names.push_back(splitted[j].substr(0, equal_delim_pos));
|
||||
filenames.push_back(splitted[j].substr(equal_delim_pos + 1, std::string::npos));
|
||||
}
|
||||
}
|
||||
for (std::vector<std::string>::const_iterator name = filenames.begin(); name != filenames.end(); ++name) {
|
||||
@ -566,5 +567,5 @@ std::pair<std::string, std::vector<std::string>> parse_parameters(const std::str
|
||||
if (name != filenames.end() - 1)
|
||||
filename += comma_delim;
|
||||
}
|
||||
return {filename, tensor_names};
|
||||
return {filename, layers_names};
|
||||
}
|
Loading…
Reference in New Issue
Block a user