Reduced usage of batch in python samples (#3104)

* Reduced usage of batch in python sampes

Excluded from hello_classification and object_detection samples
This commit is contained in:
Mikhail Ryzhov
2020-11-17 10:12:33 +03:00
committed by GitHub
parent dc710d6090
commit 23e653858b
3 changed files with 3 additions and 8 deletions

View File

@@ -30,9 +30,8 @@ def build_argparser():
args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Show this help message and exit.') args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Show this help message and exit.')
args.add_argument("-m", "--model", help="Required. Path to an .xml or .onnx file with a trained model.", required=True, args.add_argument("-m", "--model", help="Required. Path to an .xml or .onnx file with a trained model.", required=True,
type=str) type=str)
args.add_argument("-i", "--input", help="Required. Path to a folder with images or path to an image files", args.add_argument("-i", "--input", help="Required. Path to image file.",
required=True, required=True, type=str)
type=str, nargs="+")
args.add_argument("-l", "--cpu_extension", args.add_argument("-l", "--cpu_extension",
help="Optional. Required for CPU custom layers. " help="Optional. Required for CPU custom layers. "
"MKLDNN (CPU)-targeted custom layers. Absolute path to a shared library with the" "MKLDNN (CPU)-targeted custom layers. Absolute path to a shared library with the"
@@ -69,7 +68,6 @@ def main():
log.info("Preparing input blobs") log.info("Preparing input blobs")
input_blob = next(iter(net.input_info)) input_blob = next(iter(net.input_info))
out_blob = next(iter(net.outputs)) out_blob = next(iter(net.outputs))
net.batch_size = len(args.input)
# Read and pre-process input images # Read and pre-process input images
n, c, h, w = net.input_info[input_blob].input_data.shape n, c, h, w = net.input_info[input_blob].input_data.shape
@@ -81,7 +79,6 @@ def main():
image = cv2.resize(image, (w, h)) image = cv2.resize(image, (w, h))
image = image.transpose((2, 0, 1)) # Change data layout from HWC to CHW image = image.transpose((2, 0, 1)) # Change data layout from HWC to CHW
images[i] = image images[i] = image
log.info("Batch size is {}".format(n))
# Loading model to the plugin # Loading model to the plugin
log.info("Loading model to the plugin") log.info("Loading model to the plugin")

View File

@@ -33,8 +33,7 @@ def build_argparser() -> ArgumentParser:
args = parser.add_argument_group('Options') args = parser.add_argument_group('Options')
args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Show this help message and exit.') args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Show this help message and exit.')
args.add_argument('-i', '--input', help='Required. Path to a folder with images or path to an image files', args.add_argument('-i', '--input', help='Required. Path to a folder with images or path to an image files',
required=True, required=True, type=str, nargs="+")
type=str, nargs="+")
args.add_argument('-m', '--model', help='Required. Path to file where weights for the network are located') args.add_argument('-m', '--model', help='Required. Path to file where weights for the network are located')
args.add_argument('-d', '--device', args.add_argument('-d', '--device',
help='Optional. Specify the target device to infer on; CPU, GPU, FPGA, HDDL, MYRIAD or HETERO: ' help='Optional. Specify the target device to infer on; CPU, GPU, FPGA, HDDL, MYRIAD or HETERO: '

View File

@@ -112,7 +112,6 @@ def main():
for input_key in net.input_info: for input_key in net.input_info:
if len(net.input_info[input_key].layout) == 4: if len(net.input_info[input_key].layout) == 4:
input_name = input_key input_name = input_key
log.info("Batch size is {}".format(net.batch_size))
net.input_info[input_key].precision = 'U8' net.input_info[input_key].precision = 'U8'
elif len(net.input_info[input_key].layout) == 2: elif len(net.input_info[input_key].layout) == 2:
input_info_name = input_key input_info_name = input_key