diff --git a/tools/pot/openvino/tools/pot/api/samples/classification/README.md b/tools/pot/openvino/tools/pot/api/samples/classification/README.md index ff330fa2d37..089b1af5bef 100644 --- a/tools/pot/openvino/tools/pot/api/samples/classification/README.md +++ b/tools/pot/openvino/tools/pot/api/samples/classification/README.md @@ -32,7 +32,7 @@ How to Run the Example .. code-block:: sh - python3 ./classification_example.py -m -a -d + python3 ./classification_sample.py -m -a -d Optional: you can specify .bin file of IR directly using the ``-w``, ``--weights`` options. diff --git a/tools/pot/openvino/tools/pot/api/samples/classification/classification_sample.py b/tools/pot/openvino/tools/pot/api/samples/classification/classification_sample.py index 33582386737..f37448f2c48 100644 --- a/tools/pot/openvino/tools/pot/api/samples/classification/classification_sample.py +++ b/tools/pot/openvino/tools/pot/api/samples/classification/classification_sample.py @@ -210,10 +210,9 @@ def optimize_model(args): # Step 6: Execute the pipeline. compressed_model = pipeline.run(model) - # Step 7 (Optional): Compress model weights quantized precision - # in order to reduce the size of final .bin file. - if not args.keep_uncompressed_weights: - compress_model_weights(compressed_model) + # Step 7: Compress model weights quantized precision + # in order to reduce the size of final .bin file. + compress_model_weights(compressed_model) return compressed_model, pipeline