samples: hide -we_gen and remove -l options for C++ speech (#5840)
* samples: hide and remove options for C++ speech * sample: revert -we option
This commit is contained in:
parent
3954c3f99d
commit
3e303f016b
@ -10,7 +10,6 @@ Automatic Speech Recognition C++ sample application demonstrates how to use the
|
||||
|:--- |:--- |:---
|
||||
|Inference Engine Version| `InferenceEngine::GetInferenceEngineVersion` | Get Inference Engine API version
|
||||
|Available Devices|`InferenceEngine::Core::GetAvailableDevices`| Get version information of the devices for inference
|
||||
|Custom Extension Kernels|`InferenceEngine::Core::AddExtension`| Load extension library to the device plugin
|
||||
| Network Operations | `InferenceEngine::CNNNetwork::setBatchSize`, `InferenceEngine::CNNNetwork::getBatchSize` | Managing of network, operate with its batch size.
|
||||
|Network Operations|`InferenceEngine::CNNNetwork::addOutput`| Change names of output layers in the network
|
||||
|Import Network|`InferenceEngine::ExecutableNetwork::Export`,`InferenceEngine::Core::ImportNetwork`| Creates an executable network from a previously exported network
|
||||
@ -72,8 +71,7 @@ Several execution modes are supported via the `-d` flag:
|
||||
|
||||
#### Loading and Saving Models
|
||||
|
||||
The GNA plugin supports loading and saving of the GNA-optimized model (non-IR) via the `-rg` and `-wg` flags. Thereby, it is possible to avoid the cost of full model quantization at run time. The GNA plugin also
|
||||
supports export of firmware-compatible embedded model images for the Intel® Speech Enabling Developer Kit and Amazon Alexa* Premium Far-Field Voice Development Kit via the `-we` flag (save only).
|
||||
The GNA plugin supports loading and saving of the GNA-optimized model (non-IR) via the `-rg` and `-wg` flags. Thereby, it is possible to avoid the cost of full model quantization at run time. The GNA plugin also supports export of firmware-compatible embedded model images for the Intel® Speech Enabling Developer Kit and Amazon Alexa* Premium Far-Field Voice Development Kit via the `-we` flag (save only).
|
||||
|
||||
In addition to performing inference directly from a GNA model file, these options make it possible to:
|
||||
|
||||
@ -104,7 +102,6 @@ Options:
|
||||
-i "<path>" Required. Paths to .ark files. Example of usage: <file1.ark,file2.ark> or <file.ark>.
|
||||
-m "<path>" Required. Path to an .xml file with a trained model (required if -rg is missing).
|
||||
-o "<path>" Optional. Output file name to save ark scores.
|
||||
-l "<absolute_path>" Required for CPU custom layers. Absolute path to a shared library with the kernel implementations.
|
||||
-d "<device>" Optional. Specify a target device to infer on. CPU, GPU, MYRIAD, GNA_AUTO, GNA_HW, GNA_SW_FP32, GNA_SW_EXACT and HETERO with combination of GNA
|
||||
as the primary device and CPU as a secondary (e.g. HETERO:GNA,CPU) are supported. The list of available devices is shown below. The sample will look for a suitable plugin for device specified.
|
||||
-pc Optional. Enables per-layer performance report.
|
||||
|
@ -663,13 +663,6 @@ int main(int argc, char* argv[]) {
|
||||
CNNNetwork network;
|
||||
ExecutableNetwork executableNet;
|
||||
|
||||
if (!FLAGS_l.empty()) {
|
||||
// Custom CPU extension is loaded as a shared library and passed as a pointer to base extension
|
||||
IExtensionPtr extension_ptr = std::make_shared<Extension>(FLAGS_l);
|
||||
ie.AddExtension(extension_ptr);
|
||||
slog::info << "Custom Extension loaded: " << FLAGS_l << slog::endl;
|
||||
}
|
||||
|
||||
// ------------------------------ Get Available Devices ------------------------------------------------------
|
||||
auto isFeature = [&](const std::string xFeature) {
|
||||
return FLAGS_d.find(xFeature) != std::string::npos;
|
||||
|
@ -132,10 +132,6 @@ DEFINE_string(compile_target, "", compile_target_message);
|
||||
/// \brief Enable per-layer performance report
|
||||
DEFINE_bool(pc, false, performance_counter_message);
|
||||
|
||||
/// @brief Absolute path to CPU library with user layers <br>
|
||||
/// It is an optional parameter
|
||||
DEFINE_string(l, "", custom_cpu_library_message);
|
||||
|
||||
/// @brief Write output file to save ark scores
|
||||
DEFINE_string(o, "", output_message);
|
||||
|
||||
@ -151,7 +147,7 @@ DEFINE_string(wg, "", write_gna_model_message);
|
||||
/// @brief Write GNA embedded model to file (model.bin)
|
||||
DEFINE_string(we, "", write_embedded_model_message);
|
||||
|
||||
/// @brief Optional GNA embedded device generation (default GNA1 aka Sue Creek)
|
||||
/// @brief Optional GNA embedded device generation (default GNA1 aka Sue Creek) - hide option
|
||||
DEFINE_string(we_gen, "GNA1", write_embedded_model_generation_message);
|
||||
|
||||
/// @brief Input quantization mode (default static)
|
||||
@ -196,7 +192,6 @@ static void showUsage() {
|
||||
std::cout << " -i \"<path>\" " << input_message << std::endl;
|
||||
std::cout << " -m \"<path>\" " << model_message << std::endl;
|
||||
std::cout << " -o \"<path>\" " << output_message << std::endl;
|
||||
std::cout << " -l \"<absolute_path>\" " << custom_cpu_library_message << std::endl;
|
||||
std::cout << " -d \"<device>\" " << target_device_message << std::endl;
|
||||
std::cout << " -pc " << performance_counter_message << std::endl;
|
||||
std::cout << " -q \"<mode>\" " << quantization_message << std::endl;
|
||||
@ -207,7 +202,6 @@ static void showUsage() {
|
||||
std::cout << " -rg \"<path>\" " << read_gna_model_message << std::endl;
|
||||
std::cout << " -wg \"<path>\" " << write_gna_model_message << std::endl;
|
||||
std::cout << " -we \"<path>\" " << write_embedded_model_message << std::endl;
|
||||
std::cout << " -we_gen \"<generation>\" " << write_embedded_model_generation_message << std::endl;
|
||||
std::cout << " -nthreads \"<integer>\" " << infer_num_threads_message << std::endl;
|
||||
std::cout << " -cw_l \"<integer>\" " << context_window_message_l << std::endl;
|
||||
std::cout << " -cw_r \"<integer>\" " << context_window_message_r << std::endl;
|
||||
|
Loading…
Reference in New Issue
Block a user