diff --git a/inference-engine/ie_bridges/c/samples/object_detection_sample_ssd/main.c b/inference-engine/ie_bridges/c/samples/object_detection_sample_ssd/main.c index a98c3f60a98..6916e93dcfa 100644 --- a/inference-engine/ie_bridges/c/samples/object_detection_sample_ssd/main.c +++ b/inference-engine/ie_bridges/c/samples/object_detection_sample_ssd/main.c @@ -68,12 +68,12 @@ int ParseAndCheckCommandLine(int argc, char *argv[]) { if (help) return -1; - if (img_msg == NULL) { - printf("Parameter -i is not set\n"); + if (input_model == NULL) { + printf("Model is required but not set. Please set -m option. \n"); return -1; } - if (input_model == NULL) { - printf("Parameter -m is not set \n"); + if (img_msg == NULL) { + printf("Input is required but not set.Please set - i option.\n"); return -1; } diff --git a/inference-engine/ie_bridges/c/samples/object_detection_sample_ssd/object_detection_sample_ssd.h b/inference-engine/ie_bridges/c/samples/object_detection_sample_ssd/object_detection_sample_ssd.h index d2d1204d406..a3c0e40cde9 100644 --- a/inference-engine/ie_bridges/c/samples/object_detection_sample_ssd/object_detection_sample_ssd.h +++ b/inference-engine/ie_bridges/c/samples/object_detection_sample_ssd/object_detection_sample_ssd.h @@ -9,12 +9,12 @@ /// @brief message for help argument static const char *help_message = "Print a usage message."; +/// @brief message for model argument +static const char* model_message = "Required. Path to an .xml file with a trained model."; + /// @brief message for images argument static const char *image_message = "Required. Path to an .bmp image."; -/// @brief message for model argument -static const char *model_message = "Required. Path to an .xml file with a trained model."; - /// @brief message for plugin argument static const char *plugin_message = "Plugin name. For example MKLDNNPlugin. If this parameter is pointed, " \ "the sample will look for this plugin only"; @@ -41,8 +41,8 @@ static void showUsage() { printf("\nobject_detection_sample_ssd [OPTION]\n"); printf("Options:\n\n"); printf(" -h %s\n", help_message); - printf(" -i \"\" %s\n", image_message); printf(" -m \"\" %s\n", model_message); + printf(" -i \"\" %s\n", image_message); printf(" -l \"\" %s\n", custom_cpu_library_message); printf(" Or\n"); printf(" -c \"\" %s\n", custom_cldnn_message); @@ -77,6 +77,7 @@ static int getopt(int argc, char **argv, char *opts) { optopt = c = argv[optind][sp]; if(c == ':' || (cp = strchr(opts, c)) == 0) { ERR(": unrecognized option -- ", c); + showUsage(); if(argv[optind][++sp] == '\0') { optind++; sp = 1; @@ -102,4 +103,4 @@ static int getopt(int argc, char **argv, char *opts) { } } return(c); -} \ No newline at end of file +} diff --git a/inference-engine/samples/benchmark_app/README.md b/inference-engine/samples/benchmark_app/README.md index 6709c6758ed..d6aa9544e2c 100644 --- a/inference-engine/samples/benchmark_app/README.md +++ b/inference-engine/samples/benchmark_app/README.md @@ -68,8 +68,8 @@ benchmark_app [OPTION] Options: -h, --help Print a usage message + -m "" Required. Path to an .xml/.onnx/.prototxt file with a trained model or to a .blob files with a trained compiled model. -i "" Optional. Path to a folder with images and/or binaries or to specific image or binary file. - -m "" Required. Path to an .xml/.onnx/.prototxt file with a trained model or to a .blob files with a trained compiled model. -d "" Optional. Specify a target device to infer on (the list of available devices is shown below). Default value is CPU. Use "-d HETERO:" format to specify HETERO plugin. Use "-d MULTI:" format to specify MULTI plugin. @@ -143,11 +143,11 @@ This section provides step-by-step instructions on how to run the Benchmark Tool * On CPU: ```sh - ./benchmark_app -m /googlenet-v1.xml -d CPU -api async -i /deployment_tools/demo/car.png --progress true + ./benchmark_app -m /googlenet-v1.xml -i /deployment_tools/demo/car.png -d CPU -api async --progress true ``` * On FPGA: ```sh - ./benchmark_app -m /googlenet-v1.xml -d HETERO:FPGA,CPU -api async -i /deployment_tools/demo/car.png --progress true + ./benchmark_app -m /googlenet-v1.xml -i /deployment_tools/demo/car.png -d HETERO:FPGA,CPU -api async --progress true ``` The application outputs the number of executed iterations, total duration of execution, latency, and throughput. diff --git a/inference-engine/samples/benchmark_app/benchmark_app.hpp b/inference-engine/samples/benchmark_app/benchmark_app.hpp index 6f223f08b78..1a5a5299a02 100644 --- a/inference-engine/samples/benchmark_app/benchmark_app.hpp +++ b/inference-engine/samples/benchmark_app/benchmark_app.hpp @@ -199,8 +199,8 @@ static void showUsage() { std::cout << "Options:" << std::endl; std::cout << std::endl; std::cout << " -h, --help " << help_message << std::endl; - std::cout << " -i \"\" " << input_message << std::endl; std::cout << " -m \"\" " << model_message << std::endl; + std::cout << " -i \"\" " << input_message << std::endl; std::cout << " -d \"\" " << target_device_message << std::endl; std::cout << " -l \"\" " << custom_cpu_library_message << std::endl; std::cout << " Or" << std::endl; diff --git a/inference-engine/samples/classification_sample_async/README.md b/inference-engine/samples/classification_sample_async/README.md index 4d87920dc9a..5d9abb06335 100644 --- a/inference-engine/samples/classification_sample_async/README.md +++ b/inference-engine/samples/classification_sample_async/README.md @@ -38,8 +38,8 @@ classification_sample_async [OPTION] Options: -h Print a usage message. - -i "" Required. Path to a folder with images or path to an image files: a .ubyte file for LeNetand a .bmp file for the other networks. -m "" Required. Path to an .xml file with a trained model. + -i "" Required. Path to a folder with images or path to an image files: a .ubyte file for LeNetand a .bmp file for the other networks. -l "" Required for CPU custom layers.Absolute path to a shared library with the kernels implementation Or -c "" Required for GPU custom kernels.Absolute path to the .xml file with kernels description @@ -57,7 +57,7 @@ To run the sample, use AlexNet and GoogLeNet or other public or pre-trained imag You can do inference of an image using a trained AlexNet network on FPGA with fallback to CPU using the following command: ```sh -./classification_sample_async -i /cat.bmp -m /alexnet_fp32.xml -nt 5 -d HETERO:FPGA,CPU +./classification_sample_async -m /alexnet_fp32.xml -i /cat.bmp -d HETERO:FPGA,CPU -nt 5 ``` ## Sample Output diff --git a/inference-engine/samples/classification_sample_async/classification_sample_async.h b/inference-engine/samples/classification_sample_async/classification_sample_async.h index f01ee28dfef..09d2ebb8cb2 100644 --- a/inference-engine/samples/classification_sample_async/classification_sample_async.h +++ b/inference-engine/samples/classification_sample_async/classification_sample_async.h @@ -12,13 +12,13 @@ /// @brief message for help argument static const char help_message[] = "Print a usage message."; +/// @brief message for model argument +static const char model_message[] = "Required. Path to an .xml file with a trained model."; + /// @brief message for images argument static const char image_message[] = "Required. Path to a folder with images or path to an image files: a .ubyte file for LeNet"\ "and a .bmp file for the other networks."; -/// @brief message for model argument -static const char model_message[] = "Required. Path to an .xml file with a trained model."; - /// @brief message for assigning cnn calculation to device static const char target_device_message[] = "Optional. Specify the target device to infer on (the list of available devices is shown below). " \ "Default value is CPU. Sample will look for a suitable plugin for device specified."; @@ -72,8 +72,8 @@ static void showUsage() { std::cout << "Options:" << std::endl; std::cout << std::endl; std::cout << " -h " << help_message << std::endl; - std::cout << " -i \"\" " << image_message << std::endl; std::cout << " -m \"\" " << model_message << std::endl; + std::cout << " -i \"\" " << image_message << std::endl; std::cout << " -l \"\" " << custom_cpu_library_message << std::endl; std::cout << " Or" << std::endl; std::cout << " -c \"\" " << custom_cldnn_message << std::endl; diff --git a/inference-engine/samples/classification_sample_async/main.cpp b/inference-engine/samples/classification_sample_async/main.cpp index 2a20eb56e26..a4afc803ced 100644 --- a/inference-engine/samples/classification_sample_async/main.cpp +++ b/inference-engine/samples/classification_sample_async/main.cpp @@ -43,12 +43,12 @@ bool ParseAndCheckCommandLine(int argc, char *argv[]) { } slog::info << "Parsing input parameters" << slog::endl; - if (FLAGS_i.empty()) { - throw std::logic_error("Parameter -i is not set"); + if (FLAGS_m.empty()) { + throw std::logic_error("Model is required but not set. Please set -m option."); } - if (FLAGS_m.empty()) { - throw std::logic_error("Parameter -m is not set"); + if (FLAGS_i.empty()) { + throw std::logic_error("Input is required but not set. Please set -i option."); } return true; diff --git a/inference-engine/samples/common/samples/common.hpp b/inference-engine/samples/common/samples/common.hpp index f7867f820f4..cd6948e1e1c 100644 --- a/inference-engine/samples/common/samples/common.hpp +++ b/inference-engine/samples/common/samples/common.hpp @@ -1127,4 +1127,5 @@ inline void showAvailableDevices() { for (const auto& device : devices) { std::cout << " " << device; } + std::cout << std::endl; } diff --git a/inference-engine/samples/hello_classification/main.cpp b/inference-engine/samples/hello_classification/main.cpp index 048f3b0451b..35399401e9c 100644 --- a/inference-engine/samples/hello_classification/main.cpp +++ b/inference-engine/samples/hello_classification/main.cpp @@ -67,7 +67,7 @@ int main(int argc, char *argv[]) { try { // ------------------------------ Parsing and validation of input args --------------------------------- if (argc != 4) { - tcout << "Usage : ./hello_classification " << std::endl; + tcout << "Usage : " << argv[0] << " " << std::endl; return EXIT_FAILURE; } diff --git a/inference-engine/samples/hello_query_device/main.cpp b/inference-engine/samples/hello_query_device/main.cpp index e656dcc09a2..ab8c8b5f8bb 100644 --- a/inference-engine/samples/hello_query_device/main.cpp +++ b/inference-engine/samples/hello_query_device/main.cpp @@ -72,7 +72,7 @@ int main(int argc, char *argv[]) { try { // ------------------------------ Parsing and validation of input args --------------------------------- if (argc != 1) { - std::cout << "Usage : ./hello_query_device" << std::endl; + std::cout << "Usage : "<< argv[0] << std::endl; return EXIT_FAILURE; } diff --git a/inference-engine/samples/hello_reshape_ssd/main.cpp b/inference-engine/samples/hello_reshape_ssd/main.cpp index ef28ece9866..63a7cc8d6ee 100644 --- a/inference-engine/samples/hello_reshape_ssd/main.cpp +++ b/inference-engine/samples/hello_reshape_ssd/main.cpp @@ -19,7 +19,7 @@ int main(int argc, char* argv[]) { try { // ------------------------------ Parsing and validation of input args --------------------------------- if (argc != 5) { - std::cout << "Usage : ./hello_reshape_ssd " + std::cout << "Usage : "<< argv[0] <<" " << std::endl; return EXIT_FAILURE; } diff --git a/inference-engine/samples/ngraph_function_creation_sample/README.md b/inference-engine/samples/ngraph_function_creation_sample/README.md index 6833441cf09..2eb03539e81 100644 --- a/inference-engine/samples/ngraph_function_creation_sample/README.md +++ b/inference-engine/samples/ngraph_function_creation_sample/README.md @@ -35,7 +35,7 @@ ngraph_function_creation_sample [OPTION] Options: -h Print a usage message. - -m "" Path to a .bin file with weights for the trained model + -m "" Required. Path to a .bin file with weights for the trained model -i "" Required. Path to an image or folder with images -d "" Specify the target device to infer on it. See the list of available devices below. The sample looks for a suitable plugin for the specified device. The default value is CPU. -nt "" Number of top results. The default value is 10. @@ -46,7 +46,7 @@ Available target devices: For example, to do inference of an UByte image on a GPU run the following command: ```sh -./ngraph_function_creation_sample -i -m -d GPU +./ngraph_function_creation_sample -m -i -d GPU ``` ## Sample Output diff --git a/inference-engine/samples/ngraph_function_creation_sample/main.cpp b/inference-engine/samples/ngraph_function_creation_sample/main.cpp index 10757bb1131..057737a6ded 100644 --- a/inference-engine/samples/ngraph_function_creation_sample/main.cpp +++ b/inference-engine/samples/ngraph_function_creation_sample/main.cpp @@ -35,6 +35,14 @@ bool ParseAndCheckCommandLine(int argc, char* argv[]) { throw std::logic_error("Incorrect value for nt argument. It should be greater than 0 and less than 10."); } + if (FLAGS_m.empty()) { + throw std::logic_error("Path to a .bin file with weights for the trained model is required but not set. Please set -m option."); + } + + if (FLAGS_i.empty()) { + throw std::logic_error("Path to an image is required but not set. Please set -i option."); + } + return true; } diff --git a/inference-engine/samples/ngraph_function_creation_sample/ngraph_function_creation_sample.hpp b/inference-engine/samples/ngraph_function_creation_sample/ngraph_function_creation_sample.hpp index e6e9d847291..da20c59c7e9 100644 --- a/inference-engine/samples/ngraph_function_creation_sample/ngraph_function_creation_sample.hpp +++ b/inference-engine/samples/ngraph_function_creation_sample/ngraph_function_creation_sample.hpp @@ -16,7 +16,7 @@ static const char help_message[] = "Print a usage message."; static const char input_message[] = "Required. Path to image or folder with images"; /// @brief message for model argument -static const char model_message[] = "Path to a .bin file with weights for the trained model."; +static const char model_message[] = "Required. Path to a .bin file with weights for the trained model."; /// @brief message for assigning cnn calculation to device static const char target_device_message[] = "Specify the target device to infer on it . See the list of available devices below. " \ diff --git a/inference-engine/samples/object_detection_sample_ssd/README.md b/inference-engine/samples/object_detection_sample_ssd/README.md index 9ce4460069f..b0a4f4e8465 100644 --- a/inference-engine/samples/object_detection_sample_ssd/README.md +++ b/inference-engine/samples/object_detection_sample_ssd/README.md @@ -26,8 +26,8 @@ object_detection_sample_ssd [OPTION] Options: -h Print a usage message. - -i "" Required. Path to an .bmp image. -m "" Required. Path to an .xml file with a trained model. + -i "" Required. Path to an .bmp image. -l "" Required for CPU custom layers. Absolute path to a shared library with the kernels implementations. Or -c "" Required for GPU custom kernels. Absolute path to the .xml file with the kernels descriptions. @@ -45,11 +45,11 @@ To run the sample, you can use public or pre-trained models. To download the pre For example, to do inference on a CPU with the OpenVINO™ toolkit person detection SSD models, run one of the following commands: ```sh -./object_detection_sample_ssd -i /inputImage.bmp -m person-detection-retail-0013.xml -d CPU +./object_detection_sample_ssd -m person-detection-retail-0013.xml -i /inputImage.bmp -d CPU ``` or ```sh -./object_detection_sample_ssd -i /inputImage.jpg -m person-detection-retail-0002.xml -d CPU +./object_detection_sample_ssd -m person-detection-retail-0002.xml -i /inputImage.jpg -d CPU ``` ## Sample Output diff --git a/inference-engine/samples/object_detection_sample_ssd/main.cpp b/inference-engine/samples/object_detection_sample_ssd/main.cpp index 382e00793f2..fbb23164a11 100644 --- a/inference-engine/samples/object_detection_sample_ssd/main.cpp +++ b/inference-engine/samples/object_detection_sample_ssd/main.cpp @@ -36,12 +36,12 @@ bool ParseAndCheckCommandLine(int argc, char *argv[]) { slog::info << "Parsing input parameters" << slog::endl; - if (FLAGS_i.empty()) { - throw std::logic_error("Parameter -i is not set"); + if (FLAGS_m.empty()) { + throw std::logic_error("Model is required but not set. Please set -m option."); } - if (FLAGS_m.empty()) { - throw std::logic_error("Parameter -m is not set"); + if (FLAGS_i.empty()) { + throw std::logic_error("Input is required but not set. Please set -i option."); } return true; diff --git a/inference-engine/samples/object_detection_sample_ssd/object_detection_sample_ssd.h b/inference-engine/samples/object_detection_sample_ssd/object_detection_sample_ssd.h index d62d0cf6d3d..e0f0200b50b 100644 --- a/inference-engine/samples/object_detection_sample_ssd/object_detection_sample_ssd.h +++ b/inference-engine/samples/object_detection_sample_ssd/object_detection_sample_ssd.h @@ -15,12 +15,12 @@ /// @brief message for help argument static const char help_message[] = "Print a usage message."; -/// @brief message for images argument -static const char image_message[] = "Required. Path to an .bmp image."; - /// @brief message for model argument static const char model_message[] = "Required. Path to an .xml file with a trained model."; +/// @brief message for images argument +static const char image_message[] = "Required. Path to an .bmp image."; + /// @brief message for plugin argument static const char plugin_message[] = "Plugin name. For example MKLDNNPlugin. If this parameter is pointed, " \ "the sample will look for this plugin only"; @@ -78,8 +78,8 @@ static void showUsage() { std::cout << "Options:" << std::endl; std::cout << std::endl; std::cout << " -h " << help_message << std::endl; - std::cout << " -i \"\" " << image_message << std::endl; std::cout << " -m \"\" " << model_message << std::endl; + std::cout << " -i \"\" " << image_message << std::endl; std::cout << " -l \"\" " << custom_cpu_library_message << std::endl; std::cout << " Or" << std::endl; std::cout << " -c \"\" " << custom_cldnn_message << std::endl; diff --git a/inference-engine/samples/style_transfer_sample/main.cpp b/inference-engine/samples/style_transfer_sample/main.cpp index cc370235c7a..b85ab429eef 100644 --- a/inference-engine/samples/style_transfer_sample/main.cpp +++ b/inference-engine/samples/style_transfer_sample/main.cpp @@ -33,12 +33,12 @@ bool ParseAndCheckCommandLine(int argc, char *argv[]) { return false; } - if (FLAGS_i.empty()) { - throw std::logic_error("Parameter -i is not set"); + if (FLAGS_m.empty()) { + throw std::logic_error("Model is required but not set. Please set -m option."); } - if (FLAGS_m.empty()) { - throw std::logic_error("Parameter -m is not set"); + if (FLAGS_i.empty()) { + throw std::logic_error("Input is required but not set. Please set -i option."); } return true;