feat: linters for IE Py API, wheel, samples (#5352)
* feat: linters for IE Py API, wheel, samples * fix indent after auto-formatter * ignore formatting for argument parsing
This commit is contained in:
parent
4daa74303a
commit
10f3d7e065
62
.github/workflows/py_checks.yml
vendored
62
.github/workflows/py_checks.yml
vendored
@ -1,13 +1,13 @@
|
||||
name: IE Python Checks
|
||||
# TODO: add for IE API, wheels
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
paths:
|
||||
- 'inference-engine/ie_bridges/python/sample/**'
|
||||
- 'inference-engine/ie_bridges/python/**'
|
||||
pull_request:
|
||||
paths:
|
||||
- 'inference-engine/ie_bridges/python/sample/**'
|
||||
- 'inference-engine/ie_bridges/python/**'
|
||||
jobs:
|
||||
linters:
|
||||
runs-on: ubuntu-18.04
|
||||
@ -21,13 +21,55 @@ jobs:
|
||||
with:
|
||||
python-version: '3.6'
|
||||
- name: Install dependencies
|
||||
run: python -m pip install -r inference-engine/ie_bridges/python/sample/requirements_dev.txt
|
||||
- name: Run Flake
|
||||
run: python -m flake8 ./ --config=setup.cfg --show-source
|
||||
run: python -m pip install -r inference-engine/ie_bridges/python/requirements_dev.txt
|
||||
- name: Run Flake on samples
|
||||
run: python -m flake8 ./ --config=../setup.cfg
|
||||
working-directory: inference-engine/ie_bridges/python/sample
|
||||
- name: Run MyPy
|
||||
run: python -m mypy ./ --config-file ./setup.cfg --show-error-context --show-column-numbers --pretty
|
||||
working-directory: inference-engine/ie_bridges/python/sample
|
||||
- name: Create code style diff for samples
|
||||
if: failure()
|
||||
run: |
|
||||
python -m black -l 160 -S ./
|
||||
git diff > samples_diff.diff
|
||||
working-directory: inference-engine/ie_bridges/python/sample
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: failure()
|
||||
with:
|
||||
name: samples_diff
|
||||
path: samples_diff.diff
|
||||
- name: Run Flake on src
|
||||
run: python -m flake8 ./ --config=../setup.cfg
|
||||
working-directory: inference-engine/ie_bridges/python/src
|
||||
- name: Create code style diff for Python src
|
||||
if: failure()
|
||||
run: |
|
||||
python -m black -l 160 -S ./
|
||||
git diff > src_diff.diff
|
||||
working-directory: inference-engine/ie_bridges/python/src
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: failure()
|
||||
with:
|
||||
name: src_diff
|
||||
path: src_diff.diff
|
||||
- name: Run Flake on wheel
|
||||
run: python -m flake8 ./ --config=../setup.cfg
|
||||
working-directory: inference-engine/ie_bridges/python/wheel
|
||||
- name: Create code style diff for wheel
|
||||
if: failure()
|
||||
run: |
|
||||
python -m black -l 160 -S ./
|
||||
git diff > wheel_diff.diff
|
||||
working-directory: inference-engine/ie_bridges/python/wheel
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: failure()
|
||||
with:
|
||||
name: wheel_diff
|
||||
path: wheel_diff.diff
|
||||
|
||||
- name: Run MyPy
|
||||
run: python -m mypy ./ --config-file ./setup.cfg
|
||||
working-directory: inference-engine/ie_bridges/python
|
||||
- name: Run Bandit
|
||||
run: python -m bandit -r ./ -f screen
|
||||
working-directory: inference-engine/ie_bridges/python/sample
|
||||
working-directory: inference-engine/ie_bridges/python
|
||||
|
||||
|
2
inference-engine/ie_bridges/python/.bandit
Normal file
2
inference-engine/ie_bridges/python/.bandit
Normal file
@ -0,0 +1,2 @@
|
||||
[bandit]
|
||||
skips: B101
|
@ -1,7 +1,7 @@
|
||||
bandit
|
||||
black
|
||||
flake8
|
||||
flake8-annotations-complexity
|
||||
flake8-bandit
|
||||
flake8-broken-line
|
||||
flake8-bugbear
|
||||
flake8-class-attributes-order
|
@ -15,6 +15,7 @@ def parse_args() -> argparse.Namespace:
|
||||
"""Parse and return command line arguments"""
|
||||
parser = argparse.ArgumentParser(add_help=False)
|
||||
args = parser.add_argument_group('Options')
|
||||
# fmt: off
|
||||
args.add_argument('-h', '--help', action='help', help='Show this help message and exit.')
|
||||
args.add_argument('-m', '--model', required=True, type=str,
|
||||
help='Required. Path to an .xml or .onnx file with a trained model.')
|
||||
@ -31,7 +32,7 @@ def parse_args() -> argparse.Namespace:
|
||||
'Default value is CPU.')
|
||||
args.add_argument('--labels', default=None, type=str, help='Optional. Path to a labels mapping file.')
|
||||
args.add_argument('-nt', '--number_top', default=10, type=int, help='Optional. Number of top results.')
|
||||
|
||||
# fmt: on
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
@ -39,7 +40,7 @@ def main():
|
||||
log.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.INFO, stream=sys.stdout)
|
||||
args = parse_args()
|
||||
|
||||
# ---------------------------Step 1. Initialize inference engine core--------------------------------------------------
|
||||
# ---------------------------Step 1. Initialize inference engine core--------------------------------------------------
|
||||
log.info('Creating Inference Engine')
|
||||
ie = IECore()
|
||||
|
||||
@ -51,7 +52,7 @@ def main():
|
||||
log.info(f'Loading the {args.device} configuration: {args.config}')
|
||||
ie.set_config({'CONFIG_FILE': args.config}, args.device)
|
||||
|
||||
# ---------------------------Step 2. Read a model in OpenVINO Intermediate Representation or ONNX format---------------
|
||||
# ---------------------------Step 2. Read a model in OpenVINO Intermediate Representation or ONNX format---------------
|
||||
log.info(f'Reading the network: {args.model}')
|
||||
# (.xml and .bin files) or (.onnx file)
|
||||
net = ie.read_network(model=args.model)
|
||||
@ -63,7 +64,7 @@ def main():
|
||||
log.error('Sample supports only single output topologies')
|
||||
return -1
|
||||
|
||||
# ---------------------------Step 3. Configure input & output----------------------------------------------------------
|
||||
# ---------------------------Step 3. Configure input & output----------------------------------------------------------
|
||||
log.info('Configuring input and output blobs')
|
||||
# Get names of input and output blobs
|
||||
input_blob = next(iter(net.input_info))
|
||||
@ -78,15 +79,15 @@ def main():
|
||||
# Get a number of classes recognized by a model
|
||||
num_of_classes = max(net.outputs[out_blob].shape)
|
||||
|
||||
# ---------------------------Step 4. Loading model to the device-------------------------------------------------------
|
||||
# ---------------------------Step 4. Loading model to the device-------------------------------------------------------
|
||||
log.info('Loading the model to the plugin')
|
||||
exec_net = ie.load_network(network=net, device_name=args.device, num_requests=num_of_input)
|
||||
|
||||
# ---------------------------Step 5. Create infer request--------------------------------------------------------------
|
||||
# load_network() method of the IECore class with a specified number of requests (default 1) returns an ExecutableNetwork
|
||||
# instance which stores infer requests. So you already created Infer requests in the previous step.
|
||||
# ---------------------------Step 5. Create infer request--------------------------------------------------------------
|
||||
# load_network() method of the IECore class with a specified number of requests (default 1) returns an ExecutableNetwork
|
||||
# instance which stores infer requests. So you already created Infer requests in the previous step.
|
||||
|
||||
# ---------------------------Step 6. Prepare input---------------------------------------------------------------------
|
||||
# ---------------------------Step 6. Prepare input---------------------------------------------------------------------
|
||||
input_data = []
|
||||
_, _, h, w = net.input_info[input_blob].input_data.shape
|
||||
|
||||
@ -104,12 +105,12 @@ def main():
|
||||
|
||||
input_data.append(image)
|
||||
|
||||
# ---------------------------Step 7. Do inference----------------------------------------------------------------------
|
||||
# ---------------------------Step 7. Do inference----------------------------------------------------------------------
|
||||
log.info('Starting inference in asynchronous mode')
|
||||
for i in range(num_of_input):
|
||||
exec_net.requests[i].async_infer({input_blob: input_data[i]})
|
||||
|
||||
# ---------------------------Step 8. Process output--------------------------------------------------------------------
|
||||
# ---------------------------Step 8. Process output--------------------------------------------------------------------
|
||||
# Generate a label list
|
||||
if args.labels:
|
||||
with open(args.labels, 'r') as f:
|
||||
@ -136,7 +137,7 @@ def main():
|
||||
# Change a shape of a numpy.ndarray with results to get another one with one dimension
|
||||
probs = res.reshape(num_of_classes)
|
||||
# Get an array of args.number_top class IDs in descending order of probability
|
||||
top_n_idexes = np.argsort(probs)[-args.number_top:][::-1]
|
||||
top_n_idexes = np.argsort(probs)[-args.number_top :][::-1]
|
||||
|
||||
header = 'classid probability'
|
||||
header = header + ' label' if args.labels else header
|
||||
@ -158,9 +159,8 @@ def main():
|
||||
if len(output_queue) == 0:
|
||||
break
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------------
|
||||
log.info('This sample is an API example, '
|
||||
'for any performance measurements please use the dedicated benchmark_app tool\n')
|
||||
# ----------------------------------------------------------------------------------------------------------------------
|
||||
log.info('This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool\n')
|
||||
return 0
|
||||
|
||||
|
||||
|
@ -15,6 +15,7 @@ def parse_args() -> argparse.Namespace:
|
||||
"""Parse and return command line arguments"""
|
||||
parser = argparse.ArgumentParser(add_help=False)
|
||||
args = parser.add_argument_group('Options')
|
||||
# fmt: off
|
||||
args.add_argument('-h', '--help', action='help', help='Show this help message and exit.')
|
||||
args.add_argument('-m', '--model', required=True, type=str,
|
||||
help='Required. Path to an .xml or .onnx file with a trained model.')
|
||||
@ -25,7 +26,7 @@ def parse_args() -> argparse.Namespace:
|
||||
'Default value is CPU.')
|
||||
args.add_argument('--labels', default=None, type=str, help='Optional. Path to a labels mapping file.')
|
||||
args.add_argument('-nt', '--number_top', default=10, type=int, help='Optional. Number of top results.')
|
||||
|
||||
# fmt: on
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
@ -33,11 +34,11 @@ def main():
|
||||
log.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.INFO, stream=sys.stdout)
|
||||
args = parse_args()
|
||||
|
||||
# ---------------------------Step 1. Initialize inference engine core--------------------------------------------------
|
||||
# ---------------------------Step 1. Initialize inference engine core--------------------------------------------------
|
||||
log.info('Creating Inference Engine')
|
||||
ie = IECore()
|
||||
|
||||
# ---------------------------Step 2. Read a model in OpenVINO Intermediate Representation or ONNX format---------------
|
||||
# ---------------------------Step 2. Read a model in OpenVINO Intermediate Representation or ONNX format---------------
|
||||
log.info(f'Reading the network: {args.model}')
|
||||
# (.xml and .bin files) or (.onnx file)
|
||||
net = ie.read_network(model=args.model)
|
||||
@ -49,7 +50,7 @@ def main():
|
||||
log.error('Sample supports only single output topologies')
|
||||
return -1
|
||||
|
||||
# ---------------------------Step 3. Configure input & output----------------------------------------------------------
|
||||
# ---------------------------Step 3. Configure input & output----------------------------------------------------------
|
||||
log.info('Configuring input and output blobs')
|
||||
# Get names of input and output blobs
|
||||
input_blob = next(iter(net.input_info))
|
||||
@ -62,15 +63,15 @@ def main():
|
||||
# Get a number of classes recognized by a model
|
||||
num_of_classes = max(net.outputs[out_blob].shape)
|
||||
|
||||
# ---------------------------Step 4. Loading model to the device-------------------------------------------------------
|
||||
# ---------------------------Step 4. Loading model to the device-------------------------------------------------------
|
||||
log.info('Loading the model to the plugin')
|
||||
exec_net = ie.load_network(network=net, device_name=args.device)
|
||||
|
||||
# ---------------------------Step 5. Create infer request--------------------------------------------------------------
|
||||
# load_network() method of the IECore class with a specified number of requests (default 1) returns an ExecutableNetwork
|
||||
# instance which stores infer requests. So you already created Infer requests in the previous step.
|
||||
# ---------------------------Step 5. Create infer request--------------------------------------------------------------
|
||||
# load_network() method of the IECore class with a specified number of requests (default 1) returns an ExecutableNetwork
|
||||
# instance which stores infer requests. So you already created Infer requests in the previous step.
|
||||
|
||||
# ---------------------------Step 6. Prepare input---------------------------------------------------------------------
|
||||
# ---------------------------Step 6. Prepare input---------------------------------------------------------------------
|
||||
original_image = cv2.imread(args.input)
|
||||
image = original_image.copy()
|
||||
_, _, h, w = net.input_info[input_blob].input_data.shape
|
||||
@ -84,11 +85,11 @@ def main():
|
||||
# Add N dimension to transform to NCHW
|
||||
image = np.expand_dims(image, axis=0)
|
||||
|
||||
# ---------------------------Step 7. Do inference----------------------------------------------------------------------
|
||||
# ---------------------------Step 7. Do inference----------------------------------------------------------------------
|
||||
log.info('Starting inference in synchronous mode')
|
||||
res = exec_net.infer(inputs={input_blob: image})
|
||||
|
||||
# ---------------------------Step 8. Process output--------------------------------------------------------------------
|
||||
# ---------------------------Step 8. Process output--------------------------------------------------------------------
|
||||
# Generate a label list
|
||||
if args.labels:
|
||||
with open(args.labels, 'r') as f:
|
||||
@ -98,7 +99,7 @@ def main():
|
||||
# Change a shape of a numpy.ndarray with results to get another one with one dimension
|
||||
probs = res.reshape(num_of_classes)
|
||||
# Get an array of args.number_top class IDs in descending order of probability
|
||||
top_n_idexes = np.argsort(probs)[-args.number_top:][::-1]
|
||||
top_n_idexes = np.argsort(probs)[-args.number_top :][::-1]
|
||||
|
||||
header = 'classid probability'
|
||||
header = header + ' label' if args.labels else header
|
||||
@ -115,9 +116,8 @@ def main():
|
||||
log.info(f'{class_id}{probability_indent}{probs[class_id]:.7f}{label_indent}{label}')
|
||||
log.info('')
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------------
|
||||
log.info('This sample is an API example, '
|
||||
'for any performance measurements please use the dedicated benchmark_app tool\n')
|
||||
# ----------------------------------------------------------------------------------------------------------------------
|
||||
log.info('This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool\n')
|
||||
return 0
|
||||
|
||||
|
||||
|
@ -19,11 +19,11 @@ def param_to_string(metric) -> str:
|
||||
def main():
|
||||
log.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.INFO, stream=sys.stdout)
|
||||
|
||||
# ---------------------------Initialize inference engine core----------------------------------------------------------
|
||||
# ---------------------------Initialize inference engine core----------------------------------------------------------
|
||||
log.info('Creating Inference Engine')
|
||||
ie = IECore()
|
||||
|
||||
# ---------------------------Get metrics of available devices----------------------------------------------------------
|
||||
# ---------------------------Get metrics of available devices----------------------------------------------------------
|
||||
log.info('Available devices:')
|
||||
for device in ie.available_devices:
|
||||
log.info(f'{device} :')
|
||||
@ -46,7 +46,7 @@ def main():
|
||||
log.info(f'\t\t{config_key}: {param_to_string(config_val)}')
|
||||
log.info('')
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------------
|
||||
# ----------------------------------------------------------------------------------------------------------------------
|
||||
return 0
|
||||
|
||||
|
||||
|
@ -15,6 +15,7 @@ def parse_args() -> argparse.Namespace:
|
||||
"""Parse and return command line arguments"""
|
||||
parser = argparse.ArgumentParser(add_help=False)
|
||||
args = parser.add_argument_group('Options')
|
||||
# fmt: off
|
||||
args.add_argument('-h', '--help', action='help', help='Show this help message and exit.')
|
||||
args.add_argument('-m', '--model', required=True, type=str,
|
||||
help='Required. Path to an .xml or .onnx file with a trained model.')
|
||||
@ -30,7 +31,7 @@ def parse_args() -> argparse.Namespace:
|
||||
'is acceptable. The sample will look for a suitable plugin for device specified. '
|
||||
'Default value is CPU.')
|
||||
args.add_argument('--labels', default=None, type=str, help='Optional. Path to a labels mapping file.')
|
||||
|
||||
# fmt: on
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
@ -38,7 +39,7 @@ def main():
|
||||
log.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.INFO, stream=sys.stdout)
|
||||
args = parse_args()
|
||||
|
||||
# ---------------------------Step 1. Initialize inference engine core--------------------------------------------------
|
||||
# ---------------------------Step 1. Initialize inference engine core--------------------------------------------------
|
||||
log.info('Creating Inference Engine')
|
||||
ie = IECore()
|
||||
|
||||
@ -50,7 +51,7 @@ def main():
|
||||
log.info(f'Loading the {args.device} configuration: {args.config}')
|
||||
ie.set_config({'CONFIG_FILE': args.config}, args.device)
|
||||
|
||||
# ---------------------------Step 2. Read a model in OpenVINO Intermediate Representation or ONNX format---------------
|
||||
# ---------------------------Step 2. Read a model in OpenVINO Intermediate Representation or ONNX format---------------
|
||||
log.info(f'Reading the network: {args.model}')
|
||||
# (.xml and .bin files) or (.onnx file)
|
||||
net = ie.read_network(model=args.model)
|
||||
@ -62,7 +63,7 @@ def main():
|
||||
log.error('Sample supports only single output topologies')
|
||||
return -1
|
||||
|
||||
# ---------------------------Step 3. Configure input & output----------------------------------------------------------
|
||||
# ---------------------------Step 3. Configure input & output----------------------------------------------------------
|
||||
log.info('Configuring input and output blobs')
|
||||
# Get names of input and output blobs
|
||||
input_blob = next(iter(net.input_info))
|
||||
@ -85,22 +86,22 @@ def main():
|
||||
net.reshape({input_blob: image.shape})
|
||||
log.info(f'Input shape after reshape: {net.input_info[input_blob].input_data.shape}')
|
||||
|
||||
# ---------------------------Step 4. Loading model to the device-------------------------------------------------------
|
||||
# ---------------------------Step 4. Loading model to the device-------------------------------------------------------
|
||||
log.info('Loading the model to the plugin')
|
||||
exec_net = ie.load_network(network=net, device_name=args.device)
|
||||
|
||||
# ---------------------------Step 5. Create infer request--------------------------------------------------------------
|
||||
# load_network() method of the IECore class with a specified number of requests (default 1) returns an ExecutableNetwork
|
||||
# instance which stores infer requests. So you already created Infer requests in the previous step.
|
||||
# ---------------------------Step 5. Create infer request--------------------------------------------------------------
|
||||
# load_network() method of the IECore class with a specified number of requests (default 1) returns an ExecutableNetwork
|
||||
# instance which stores infer requests. So you already created Infer requests in the previous step.
|
||||
|
||||
# ---------------------------Step 6. Prepare input---------------------------------------------------------------------
|
||||
# This sample changes a network input layer shape instead of a image shape. See Step 4.
|
||||
# ---------------------------Step 6. Prepare input---------------------------------------------------------------------
|
||||
# This sample changes a network input layer shape instead of a image shape. See Step 4.
|
||||
|
||||
# ---------------------------Step 7. Do inference----------------------------------------------------------------------
|
||||
# ---------------------------Step 7. Do inference----------------------------------------------------------------------
|
||||
log.info('Starting inference in synchronous mode')
|
||||
res = exec_net.infer(inputs={input_blob: image})
|
||||
|
||||
# ---------------------------Step 8. Process output--------------------------------------------------------------------
|
||||
# ---------------------------Step 8. Process output--------------------------------------------------------------------
|
||||
# Generate a label list
|
||||
if args.labels:
|
||||
with open(args.labels, 'r') as f:
|
||||
@ -126,8 +127,7 @@ def main():
|
||||
xmax = int(detection[5] * w)
|
||||
ymax = int(detection[6] * h)
|
||||
|
||||
log.info(f'Found: label = {label}, confidence = {confidence:.2f}, '
|
||||
f'coords = ({xmin}, {ymin}), ({xmax}, {ymax})')
|
||||
log.info(f'Found: label = {label}, confidence = {confidence:.2f}, ' f'coords = ({xmin}, {ymin}), ({xmax}, {ymax})')
|
||||
|
||||
# Draw a bounding box on a output image
|
||||
cv2.rectangle(output_image, (xmin, ymin), (xmax, ymax), (0, 255, 0), 2)
|
||||
@ -135,9 +135,8 @@ def main():
|
||||
cv2.imwrite('out.bmp', output_image)
|
||||
log.info('Image out.bmp was created!')
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------------
|
||||
log.info('This sample is an API example, '
|
||||
'for any performance measurements please use the dedicated benchmark_app tool\n')
|
||||
# ----------------------------------------------------------------------------------------------------------------------
|
||||
log.info('This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool\n')
|
||||
return 0
|
||||
|
||||
|
||||
|
@ -19,6 +19,7 @@ def parse_args() -> argparse.Namespace:
|
||||
"""Parse and return command line arguments"""
|
||||
parser = argparse.ArgumentParser(add_help=False)
|
||||
args = parser.add_argument_group('Options')
|
||||
# fmt: off
|
||||
args.add_argument('-h', '--help', action='help', help='Show this help message and exit.')
|
||||
args.add_argument('-m', '--model', required=True, type=str,
|
||||
help='Required. Path to a file with network weights.')
|
||||
@ -29,7 +30,7 @@ def parse_args() -> argparse.Namespace:
|
||||
'Default value is CPU.')
|
||||
args.add_argument('--labels', default=None, type=str, help='Optional. Path to a labels mapping file.')
|
||||
args.add_argument('-nt', '--number_top', default=10, type=int, help='Optional. Number of top results.')
|
||||
|
||||
# fmt: on
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
@ -40,7 +41,7 @@ def read_image(image_path: str) -> np.ndarray:
|
||||
# Try to open image as ubyte
|
||||
if image is None:
|
||||
with open(image_path, 'rb') as f:
|
||||
st.unpack('>4B', f.read(4)) # need to skip 4 bytes
|
||||
st.unpack('>4B', f.read(4)) # need to skip 4 bytes
|
||||
nimg = st.unpack('>I', f.read(4))[0] # number of images
|
||||
nrow = st.unpack('>I', f.read(4))[0] # number of rows
|
||||
ncolumn = st.unpack('>I', f.read(4))[0] # number of column
|
||||
@ -78,7 +79,7 @@ def create_ngraph_function(args: argparse.Namespace) -> ngraph.impl.Function:
|
||||
# add 1
|
||||
add_1_kernel_shape, add_1_kernel_length = shape_and_length([1, 20, 1, 1])
|
||||
add_1_kernel = ngraph.constant(
|
||||
weights[weights_offset:weights_offset + add_1_kernel_length].reshape(add_1_kernel_shape),
|
||||
weights[weights_offset : weights_offset + add_1_kernel_length].reshape(add_1_kernel_shape),
|
||||
)
|
||||
weights_offset += add_1_kernel_length
|
||||
add_1_node = ngraph.add(conv_1_node, add_1_kernel)
|
||||
@ -89,7 +90,7 @@ def create_ngraph_function(args: argparse.Namespace) -> ngraph.impl.Function:
|
||||
# convolution 2
|
||||
conv_2_kernel_shape, conv_2_kernel_length = shape_and_length([50, 20, 5, 5])
|
||||
conv_2_kernel = ngraph.constant(
|
||||
weights[weights_offset:weights_offset + conv_2_kernel_length].reshape(conv_2_kernel_shape),
|
||||
weights[weights_offset : weights_offset + conv_2_kernel_length].reshape(conv_2_kernel_shape),
|
||||
)
|
||||
weights_offset += conv_2_kernel_length
|
||||
conv_2_node = ngraph.convolution(maxpool_1_node, conv_2_kernel, [1, 1], padding_begin, padding_end, [1, 1])
|
||||
@ -97,7 +98,7 @@ def create_ngraph_function(args: argparse.Namespace) -> ngraph.impl.Function:
|
||||
# add 2
|
||||
add_2_kernel_shape, add_2_kernel_length = shape_and_length([1, 50, 1, 1])
|
||||
add_2_kernel = ngraph.constant(
|
||||
weights[weights_offset:weights_offset + add_2_kernel_length].reshape(add_2_kernel_shape),
|
||||
weights[weights_offset : weights_offset + add_2_kernel_length].reshape(add_2_kernel_shape),
|
||||
)
|
||||
weights_offset += add_2_kernel_length
|
||||
add_2_node = ngraph.add(conv_2_node, add_2_kernel)
|
||||
@ -109,7 +110,8 @@ def create_ngraph_function(args: argparse.Namespace) -> ngraph.impl.Function:
|
||||
reshape_1_dims, reshape_1_length = shape_and_length([2])
|
||||
# workaround to get int64 weights from float32 ndarray w/o unnecessary copying
|
||||
dtype_weights = np.frombuffer(
|
||||
weights[weights_offset:weights_offset + 2 * reshape_1_length], dtype=np.int64,
|
||||
weights[weights_offset : weights_offset + 2 * reshape_1_length],
|
||||
dtype=np.int64,
|
||||
)
|
||||
reshape_1_kernel = ngraph.constant(dtype_weights)
|
||||
weights_offset += 2 * reshape_1_length
|
||||
@ -118,7 +120,7 @@ def create_ngraph_function(args: argparse.Namespace) -> ngraph.impl.Function:
|
||||
# matmul 1
|
||||
matmul_1_kernel_shape, matmul_1_kernel_length = shape_and_length([500, 800])
|
||||
matmul_1_kernel = ngraph.constant(
|
||||
weights[weights_offset:weights_offset + matmul_1_kernel_length].reshape(matmul_1_kernel_shape),
|
||||
weights[weights_offset : weights_offset + matmul_1_kernel_length].reshape(matmul_1_kernel_shape),
|
||||
)
|
||||
weights_offset += matmul_1_kernel_length
|
||||
matmul_1_node = ngraph.matmul(reshape_1_node, matmul_1_kernel, False, True)
|
||||
@ -126,7 +128,7 @@ def create_ngraph_function(args: argparse.Namespace) -> ngraph.impl.Function:
|
||||
# add 3
|
||||
add_3_kernel_shape, add_3_kernel_length = shape_and_length([1, 500])
|
||||
add_3_kernel = ngraph.constant(
|
||||
weights[weights_offset:weights_offset + add_3_kernel_length].reshape(add_3_kernel_shape),
|
||||
weights[weights_offset : weights_offset + add_3_kernel_length].reshape(add_3_kernel_shape),
|
||||
)
|
||||
weights_offset += add_3_kernel_length
|
||||
add_3_node = ngraph.add(matmul_1_node, add_3_kernel)
|
||||
@ -141,7 +143,7 @@ def create_ngraph_function(args: argparse.Namespace) -> ngraph.impl.Function:
|
||||
# matmul 2
|
||||
matmul_2_kernel_shape, matmul_2_kernel_length = shape_and_length([10, 500])
|
||||
matmul_2_kernel = ngraph.constant(
|
||||
weights[weights_offset:weights_offset + matmul_2_kernel_length].reshape(matmul_2_kernel_shape),
|
||||
weights[weights_offset : weights_offset + matmul_2_kernel_length].reshape(matmul_2_kernel_shape),
|
||||
)
|
||||
weights_offset += matmul_2_kernel_length
|
||||
matmul_2_node = ngraph.matmul(reshape_2_node, matmul_2_kernel, False, True)
|
||||
@ -149,7 +151,7 @@ def create_ngraph_function(args: argparse.Namespace) -> ngraph.impl.Function:
|
||||
# add 4
|
||||
add_4_kernel_shape, add_4_kernel_length = shape_and_length([1, 10])
|
||||
add_4_kernel = ngraph.constant(
|
||||
weights[weights_offset:weights_offset + add_4_kernel_length].reshape(add_4_kernel_shape),
|
||||
weights[weights_offset : weights_offset + add_4_kernel_length].reshape(add_4_kernel_shape),
|
||||
)
|
||||
weights_offset += add_4_kernel_length
|
||||
add_4_node = ngraph.add(matmul_2_node, add_4_kernel)
|
||||
@ -167,16 +169,16 @@ def main():
|
||||
log.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.INFO, stream=sys.stdout)
|
||||
args = parse_args()
|
||||
|
||||
# ---------------------------Step 1. Initialize inference engine core--------------------------------------------------
|
||||
# ---------------------------Step 1. Initialize inference engine core--------------------------------------------------
|
||||
log.info('Creating Inference Engine')
|
||||
ie = IECore()
|
||||
|
||||
# ---------------------------Step 2. Read a model in OpenVINO Intermediate Representation------------------------------
|
||||
# ---------------------------Step 2. Read a model in OpenVINO Intermediate Representation------------------------------
|
||||
log.info(f'Loading the network using ngraph function with weights from {args.model}')
|
||||
ngraph_function = create_ngraph_function(args)
|
||||
net = IENetwork(ngraph.impl.Function.to_capsule(ngraph_function))
|
||||
|
||||
# ---------------------------Step 3. Configure input & output----------------------------------------------------------
|
||||
# ---------------------------Step 3. Configure input & output----------------------------------------------------------
|
||||
log.info('Configuring input and output blobs')
|
||||
# Get names of input and output blobs
|
||||
input_blob = next(iter(net.input_info))
|
||||
@ -189,15 +191,15 @@ def main():
|
||||
# Set a batch size to a equal number of input images
|
||||
net.batch_size = len(args.input)
|
||||
|
||||
# ---------------------------Step 4. Loading model to the device-------------------------------------------------------
|
||||
# ---------------------------Step 4. Loading model to the device-------------------------------------------------------
|
||||
log.info('Loading the model to the plugin')
|
||||
exec_net = ie.load_network(network=net, device_name=args.device)
|
||||
|
||||
# ---------------------------Step 5. Create infer request--------------------------------------------------------------
|
||||
# load_network() method of the IECore class with a specified number of requests (default 1) returns an ExecutableNetwork
|
||||
# instance which stores infer requests. So you already created Infer requests in the previous step.
|
||||
# ---------------------------Step 5. Create infer request--------------------------------------------------------------
|
||||
# load_network() method of the IECore class with a specified number of requests (default 1) returns an ExecutableNetwork
|
||||
# instance which stores infer requests. So you already created Infer requests in the previous step.
|
||||
|
||||
# ---------------------------Step 6. Prepare input---------------------------------------------------------------------
|
||||
# ---------------------------Step 6. Prepare input---------------------------------------------------------------------
|
||||
n, c, h, w = net.input_info[input_blob].input_data.shape
|
||||
input_data = np.ndarray(shape=(n, c, h, w))
|
||||
|
||||
@ -218,11 +220,11 @@ def main():
|
||||
|
||||
input_data[i] = image
|
||||
|
||||
# ---------------------------Step 7. Do inference----------------------------------------------------------------------
|
||||
# ---------------------------Step 7. Do inference----------------------------------------------------------------------
|
||||
log.info('Starting inference in synchronous mode')
|
||||
res = exec_net.infer(inputs={input_blob: input_data})
|
||||
|
||||
# ---------------------------Step 8. Process output--------------------------------------------------------------------
|
||||
# ---------------------------Step 8. Process output--------------------------------------------------------------------
|
||||
# Generate a label list
|
||||
if args.labels:
|
||||
with open(args.labels, 'r') as f:
|
||||
@ -233,7 +235,7 @@ def main():
|
||||
for i in range(n):
|
||||
probs = res[i]
|
||||
# Get an array of args.number_top class IDs in descending order of probability
|
||||
top_n_idexes = np.argsort(probs)[-args.number_top:][::-1]
|
||||
top_n_idexes = np.argsort(probs)[-args.number_top :][::-1]
|
||||
|
||||
header = 'classid probability'
|
||||
header = header + ' label' if args.labels else header
|
||||
@ -250,9 +252,8 @@ def main():
|
||||
log.info(f'{class_id}{probability_indent}{probs[class_id]:.7f}{label_indent}{label}')
|
||||
log.info('')
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------------
|
||||
log.info('This sample is an API example, '
|
||||
'for any performance measurements please use the dedicated benchmark_app tool\n')
|
||||
# ----------------------------------------------------------------------------------------------------------------------
|
||||
log.info('This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool\n')
|
||||
return 0
|
||||
|
||||
|
||||
|
@ -15,6 +15,7 @@ def parse_args() -> argparse.Namespace:
|
||||
"""Parse and return command line arguments"""
|
||||
parser = argparse.ArgumentParser(add_help=False)
|
||||
args = parser.add_argument_group('Options')
|
||||
# fmt: off
|
||||
args.add_argument('-h', '--help', action='help', help='Show this help message and exit.')
|
||||
args.add_argument('-m', '--model', required=True, type=str,
|
||||
help='Required. Path to an .xml or .onnx file with a trained model.')
|
||||
@ -30,7 +31,7 @@ def parse_args() -> argparse.Namespace:
|
||||
'is acceptable. The sample will look for a suitable plugin for device specified. '
|
||||
'Default value is CPU.')
|
||||
args.add_argument('--labels', default=None, type=str, help='Optional. Path to a labels mapping file.')
|
||||
|
||||
# fmt: on
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
@ -38,7 +39,7 @@ def main(): # noqa
|
||||
log.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.INFO, stream=sys.stdout)
|
||||
args = parse_args()
|
||||
|
||||
# ---------------------------Step 1. Initialize inference engine core--------------------------------------------------
|
||||
# ---------------------------Step 1. Initialize inference engine core--------------------------------------------------
|
||||
log.info('Creating Inference Engine')
|
||||
ie = IECore()
|
||||
|
||||
@ -50,20 +51,20 @@ def main(): # noqa
|
||||
log.info(f'Loading the {args.device} configuration: {args.config}')
|
||||
ie.set_config({'CONFIG_FILE': args.config}, args.device)
|
||||
|
||||
# ---------------------------Step 2. Read a model in OpenVINO Intermediate Representation or ONNX format---------------
|
||||
# ---------------------------Step 2. Read a model in OpenVINO Intermediate Representation or ONNX format---------------
|
||||
log.info(f'Reading the network: {args.model}')
|
||||
# (.xml and .bin files) or (.onnx file)
|
||||
net = ie.read_network(model=args.model)
|
||||
|
||||
if len(net.input_info) != 1:
|
||||
log.error('The sample supports only single input topologies')
|
||||
return - 1
|
||||
return -1
|
||||
|
||||
if len(net.outputs) != 1 and not ('boxes' in net.outputs or 'labels' in net.outputs):
|
||||
log.error('The sample supports models with 1 output or with 2 with the names "boxes" and "labels"')
|
||||
return -1
|
||||
|
||||
# ---------------------------Step 3. Configure input & output----------------------------------------------------------
|
||||
# ---------------------------Step 3. Configure input & output----------------------------------------------------------
|
||||
log.info('Configuring input and output blobs')
|
||||
# Get name of input blob
|
||||
input_blob = next(iter(net.input_info))
|
||||
@ -78,15 +79,15 @@ def main(): # noqa
|
||||
net.outputs['boxes'].precision = 'FP32'
|
||||
net.outputs['labels'].precision = 'U16'
|
||||
|
||||
# ---------------------------Step 4. Loading model to the device-------------------------------------------------------
|
||||
# ---------------------------Step 4. Loading model to the device-------------------------------------------------------
|
||||
log.info('Loading the model to the plugin')
|
||||
exec_net = ie.load_network(network=net, device_name=args.device)
|
||||
|
||||
# ---------------------------Step 5. Create infer request--------------------------------------------------------------
|
||||
# load_network() method of the IECore class with a specified number of requests (default 1) returns an ExecutableNetwork
|
||||
# instance which stores infer requests. So you already created Infer requests in the previous step.
|
||||
# ---------------------------Step 5. Create infer request--------------------------------------------------------------
|
||||
# load_network() method of the IECore class with a specified number of requests (default 1) returns an ExecutableNetwork
|
||||
# instance which stores infer requests. So you already created Infer requests in the previous step.
|
||||
|
||||
# ---------------------------Step 6. Prepare input---------------------------------------------------------------------
|
||||
# ---------------------------Step 6. Prepare input---------------------------------------------------------------------
|
||||
original_image = cv2.imread(args.input)
|
||||
image = original_image.copy()
|
||||
_, _, net_h, net_w = net.input_info[input_blob].input_data.shape
|
||||
@ -100,11 +101,11 @@ def main(): # noqa
|
||||
# Add N dimension to transform to NCHW
|
||||
image = np.expand_dims(image, axis=0)
|
||||
|
||||
# ---------------------------Step 7. Do inference----------------------------------------------------------------------
|
||||
# ---------------------------Step 7. Do inference----------------------------------------------------------------------
|
||||
log.info('Starting inference in synchronous mode')
|
||||
res = exec_net.infer(inputs={input_blob: image})
|
||||
|
||||
# ---------------------------Step 8. Process output--------------------------------------------------------------------
|
||||
# ---------------------------Step 8. Process output--------------------------------------------------------------------
|
||||
# Generate a label list
|
||||
if args.labels:
|
||||
with open(args.labels, 'r') as f:
|
||||
@ -139,8 +140,7 @@ def main(): # noqa
|
||||
xmax = int(xmax * w)
|
||||
ymax = int(ymax * h)
|
||||
|
||||
log.info(f'Found: label = {label}, confidence = {confidence:.2f}, '
|
||||
f'coords = ({xmin}, {ymin}), ({xmax}, {ymax})')
|
||||
log.info(f'Found: label = {label}, confidence = {confidence:.2f}, ' f'coords = ({xmin}, {ymin}), ({xmax}, {ymax})')
|
||||
|
||||
# Draw a bounding box on a output image
|
||||
cv2.rectangle(output_image, (xmin, ymin), (xmax, ymax), (0, 255, 0), 2)
|
||||
@ -148,9 +148,8 @@ def main(): # noqa
|
||||
cv2.imwrite('out.bmp', output_image)
|
||||
log.info('Image out.bmp created!')
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------------
|
||||
log.info('This sample is an API example, '
|
||||
'for any performance measurements please use the dedicated benchmark_app tool\n')
|
||||
# ----------------------------------------------------------------------------------------------------------------------
|
||||
log.info('This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool\n')
|
||||
return 0
|
||||
|
||||
|
||||
|
@ -1,12 +0,0 @@
|
||||
[flake8]
|
||||
max-line-length = 120
|
||||
max-parameters-amount = 8
|
||||
show_source = True
|
||||
docstring-convention = google
|
||||
enable-extensions=G
|
||||
|
||||
[pydocstyle]
|
||||
convention = google
|
||||
|
||||
[mypy]
|
||||
ignore_missing_imports = True
|
@ -15,6 +15,7 @@ def parse_args() -> argparse.Namespace:
|
||||
"""Parse and return command line arguments"""
|
||||
parser = argparse.ArgumentParser(add_help=False)
|
||||
args = parser.add_argument_group('Options')
|
||||
# fmt: off
|
||||
args.add_argument('-h', '--help', action='help', help='Show this help message and exit.')
|
||||
args.add_argument('-m', '--model', required=True, type=str,
|
||||
help='Required. Path to an .xml or .onnx file with a trained model.')
|
||||
@ -37,7 +38,7 @@ def parse_args() -> argparse.Namespace:
|
||||
help='Optional. Mean value of green channel for mean value subtraction in postprocessing.')
|
||||
args.add_argument('--mean_val_b', default=0, type=float,
|
||||
help='Optional. Mean value of blue channel for mean value subtraction in postprocessing.')
|
||||
|
||||
# fmt: on
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
@ -45,7 +46,7 @@ def main():
|
||||
log.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.INFO, stream=sys.stdout)
|
||||
args = parse_args()
|
||||
|
||||
# ---------------------------Step 1. Initialize inference engine core--------------------------------------------------
|
||||
# ---------------------------Step 1. Initialize inference engine core--------------------------------------------------
|
||||
log.info('Creating Inference Engine')
|
||||
ie = IECore()
|
||||
|
||||
@ -57,7 +58,7 @@ def main():
|
||||
log.info(f'Loading the {args.device} configuration: {args.config}')
|
||||
ie.set_config({'CONFIG_FILE': args.config}, args.device)
|
||||
|
||||
# ---------------------------Step 2. Read a model in OpenVINO Intermediate Representation or ONNX format---------------
|
||||
# ---------------------------Step 2. Read a model in OpenVINO Intermediate Representation or ONNX format---------------
|
||||
log.info(f'Reading the network: {args.model}')
|
||||
# (.xml and .bin files) or (.onnx file)
|
||||
net = ie.read_network(model=args.model)
|
||||
@ -69,7 +70,7 @@ def main():
|
||||
log.error('Sample supports only single output topologies')
|
||||
return -1
|
||||
|
||||
# ---------------------------Step 3. Configure input & output----------------------------------------------------------
|
||||
# ---------------------------Step 3. Configure input & output----------------------------------------------------------
|
||||
log.info('Configuring input and output blobs')
|
||||
# Get names of input and output blobs
|
||||
input_blob = next(iter(net.input_info))
|
||||
@ -82,15 +83,15 @@ def main():
|
||||
# Set a batch size to a equal number of input images
|
||||
net.batch_size = len(args.input)
|
||||
|
||||
# ---------------------------Step 4. Loading model to the device-------------------------------------------------------
|
||||
# ---------------------------Step 4. Loading model to the device-------------------------------------------------------
|
||||
log.info('Loading the model to the plugin')
|
||||
exec_net = ie.load_network(network=net, device_name=args.device)
|
||||
|
||||
# ---------------------------Step 5. Create infer request--------------------------------------------------------------
|
||||
# load_network() method of the IECore class with a specified number of requests (default 1) returns an ExecutableNetwork
|
||||
# instance which stores infer requests. So you already created Infer requests in the previous step.
|
||||
# ---------------------------Step 5. Create infer request--------------------------------------------------------------
|
||||
# load_network() method of the IECore class with a specified number of requests (default 1) returns an ExecutableNetwork
|
||||
# instance which stores infer requests. So you already created Infer requests in the previous step.
|
||||
|
||||
# ---------------------------Step 6. Prepare input---------------------------------------------------------------------
|
||||
# ---------------------------Step 6. Prepare input---------------------------------------------------------------------
|
||||
original_images = []
|
||||
|
||||
n, c, h, w = net.input_info[input_blob].input_data.shape
|
||||
@ -109,11 +110,11 @@ def main():
|
||||
|
||||
input_data[i] = image
|
||||
|
||||
# ---------------------------Step 7. Do inference----------------------------------------------------------------------
|
||||
# ---------------------------Step 7. Do inference----------------------------------------------------------------------
|
||||
log.info('Starting inference in synchronous mode')
|
||||
res = exec_net.infer(inputs={input_blob: input_data})
|
||||
|
||||
# ---------------------------Step 8. Process output--------------------------------------------------------------------
|
||||
# ---------------------------Step 8. Process output--------------------------------------------------------------------
|
||||
res = res[out_blob]
|
||||
|
||||
for i in range(n):
|
||||
@ -136,9 +137,8 @@ def main():
|
||||
cv2.imwrite(f'out_{i}.bmp', output_image)
|
||||
log.info(f'Image out_{i}.bmp created!')
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------------
|
||||
log.info('This sample is an API example, '
|
||||
'for any performance measurements please use the dedicated benchmark_app tool\n')
|
||||
# ----------------------------------------------------------------------------------------------------------------------
|
||||
log.info('This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool\n')
|
||||
return 0
|
||||
|
||||
|
||||
|
23
inference-engine/ie_bridges/python/setup.cfg
Normal file
23
inference-engine/ie_bridges/python/setup.cfg
Normal file
@ -0,0 +1,23 @@
|
||||
[flake8]
|
||||
filename = *.py, *.pyx
|
||||
max-line-length = 160
|
||||
ignore = E203
|
||||
max-parameters-amount = 8
|
||||
show_source = True
|
||||
docstring-convention = google
|
||||
enable-extensions = G
|
||||
per-file-ignores =
|
||||
*.pyx: E225, E226, E251, E999, E800, E265, E203, E266, E227, E211
|
||||
tests/*: S101, T001
|
||||
*__init__.py: F403, F405, F405
|
||||
|
||||
[pydocstyle]
|
||||
convention = google
|
||||
|
||||
[mypy]
|
||||
ignore_missing_imports = True
|
||||
disable_error_code = attr-defined
|
||||
show_column_numbers = True
|
||||
show_error_context = True
|
||||
show_absolute_path = True
|
||||
pretty = True
|
@ -1,3 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2018-2021 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
|
@ -1,10 +1,11 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2018-2021 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
if sys.platform == "win32":
|
||||
if sys.platform == 'win32':
|
||||
# Installer, yum, pip installs openvino dlls to the different directories
|
||||
# and those paths need to be visible to the openvino modules
|
||||
#
|
||||
@ -24,9 +25,9 @@ if sys.platform == "win32":
|
||||
if (3, 8) <= sys.version_info:
|
||||
os.add_dll_directory(os.path.abspath(lib_path))
|
||||
else:
|
||||
os.environ["PATH"] = os.path.abspath(lib_path) + ";" + os.environ["PATH"]
|
||||
os.environ['PATH'] = os.path.abspath(lib_path) + ';' + os.environ['PATH']
|
||||
|
||||
from .ie_api import *
|
||||
__all__ = ['IENetwork', "TensorDesc", "IECore", "Blob", "PreProcessInfo", "get_version"]
|
||||
__version__ = get_version()
|
||||
|
||||
__all__ = ['IENetwork', 'TensorDesc', 'IECore', 'Blob', 'PreProcessInfo', 'get_version']
|
||||
__version__ = get_version() # type: ignore
|
||||
|
@ -6,49 +6,48 @@ from .cimport ie_api_impl_defs as C
|
||||
import numpy as np
|
||||
from enum import Enum
|
||||
|
||||
supported_precisions = ["FP32", "FP64", "FP16", "I64", "U64", "I32", "U32",
|
||||
"I16", "I4", "I8", "U16", "U4", "U8", "BOOL", "BIN", "BF16"]
|
||||
supported_precisions = ['FP32', 'FP64', 'FP16', 'I64', 'U64', 'I32', 'U32',
|
||||
'I16', 'I4', 'I8', 'U16', 'U4', 'U8', 'BOOL', 'BIN', 'BF16']
|
||||
|
||||
known_plugins = ['CPU', 'GPU', 'FPGA', 'MYRIAD', 'HETERO', 'HDDL', 'MULTI']
|
||||
|
||||
layout_int_to_str_map = {0: "ANY", 1: "NCHW", 2: "NHWC", 3: "NCDHW", 4: "NDHWC", 64: "OIHW", 95: "SCALAR", 96: "C",
|
||||
128: "CHW", 192: "HW", 193: "NC", 194: "CN", 200: "BLOCKED"}
|
||||
layout_int_to_str_map = {0: 'ANY', 1: 'NCHW', 2: 'NHWC', 3: 'NCDHW', 4: 'NDHWC', 64: 'OIHW', 95: 'SCALAR', 96: 'C',
|
||||
128: 'CHW', 192: 'HW', 193: 'NC', 194: 'CN', 200: 'BLOCKED'}
|
||||
|
||||
format_map = {
|
||||
'FP32' : np.float32,
|
||||
'FP64' : np.float64,
|
||||
'FP16' : np.float16,
|
||||
'I64' : np.int64,
|
||||
'U64' : np.uint64,
|
||||
'I32' : np.int32,
|
||||
'U32' : np.uint32,
|
||||
'I16' : np.int16,
|
||||
'U16' : np.uint16,
|
||||
'I4' : np.int8,
|
||||
'I8' : np.int8,
|
||||
'U4' : np.int8,
|
||||
'U8' : np.uint8,
|
||||
'BOOL' : np.uint8,
|
||||
'BIN' : np.int8,
|
||||
'BF16' : np.float16,
|
||||
}
|
||||
format_map = {'FP32' : np.float32,
|
||||
'FP64' : np.float64,
|
||||
'FP16' : np.float16,
|
||||
'I64' : np.int64,
|
||||
'U64' : np.uint64,
|
||||
'I32' : np.int32,
|
||||
'U32' : np.uint32,
|
||||
'I16' : np.int16,
|
||||
'U16' : np.uint16,
|
||||
'I4' : np.int8,
|
||||
'I8' : np.int8,
|
||||
'U4' : np.int8,
|
||||
'U8' : np.uint8,
|
||||
'BOOL' : np.uint8,
|
||||
'BIN' : np.int8,
|
||||
'BF16' : np.float16,
|
||||
}
|
||||
|
||||
layout_str_to_enum = {'ANY': C.Layout.ANY,
|
||||
"NHWC": C.Layout.NHWC,
|
||||
"NCHW": C.Layout.NCHW,
|
||||
"NCDHW": C.Layout.NCDHW,
|
||||
"NDHWC": C.Layout.NDHWC,
|
||||
"OIHW": C.Layout.OIHW,
|
||||
"GOIHW": C.Layout.GOIHW,
|
||||
"OIDHW": C.Layout.OIDHW,
|
||||
"GOIDHW": C.Layout.GOIDHW,
|
||||
"SCALAR": C.Layout.SCALAR,
|
||||
"C": C.Layout.C,
|
||||
"CHW": C.Layout.CHW,
|
||||
"HW": C.Layout.HW,
|
||||
"NC": C.Layout.NC,
|
||||
"CN": C.Layout.CN,
|
||||
"BLOCKED": C.Layout.BLOCKED
|
||||
'NHWC': C.Layout.NHWC,
|
||||
'NCHW': C.Layout.NCHW,
|
||||
'NCDHW': C.Layout.NCDHW,
|
||||
'NDHWC': C.Layout.NDHWC,
|
||||
'OIHW': C.Layout.OIHW,
|
||||
'GOIHW': C.Layout.GOIHW,
|
||||
'OIDHW': C.Layout.OIDHW,
|
||||
'GOIDHW': C.Layout.GOIDHW,
|
||||
'SCALAR': C.Layout.SCALAR,
|
||||
'C': C.Layout.C,
|
||||
'CHW': C.Layout.CHW,
|
||||
'HW': C.Layout.HW,
|
||||
'NC': C.Layout.NC,
|
||||
'CN': C.Layout.CN,
|
||||
'BLOCKED': C.Layout.BLOCKED
|
||||
}
|
||||
|
||||
|
||||
|
@ -60,14 +60,19 @@ cdef c_map_to_dict(map[string, string] c_map):
|
||||
def get_version():
|
||||
return C.get_version().decode()
|
||||
|
||||
|
||||
## This class defines Tensor description
|
||||
cdef class TensorDesc:
|
||||
|
||||
def __eq__(self, other : TensorDesc):
|
||||
return self.layout == other.layout and self.precision == other.precision and self.dims == other.dims
|
||||
|
||||
def __ne__(self, other : TensorDesc):
|
||||
return self.layout != other.layout or self.precision != other.precision or self.dims != other.dims
|
||||
|
||||
def __deepcopy__(self, memodict={}):
|
||||
return TensorDesc(deepcopy(self.precision, memodict), deepcopy(self.dims, memodict), deepcopy(self.layout, memodict))
|
||||
|
||||
## Class constructor
|
||||
# @param precision: target memory precision
|
||||
# @param dims: target memory dimensions
|
||||
@ -77,26 +82,32 @@ cdef class TensorDesc:
|
||||
if precision not in supported_precisions:
|
||||
raise ValueError(f"Unsupported precision {precision}! List of supported precisions: {supported_precisions}")
|
||||
self.impl = C.CTensorDesc(C.Precision.FromStr(precision.encode()), dims, layout_str_to_enum[layout])
|
||||
|
||||
## Shape (dimensions) of the TensorDesc object
|
||||
@property
|
||||
def dims(self):
|
||||
return self.impl.getDims()
|
||||
|
||||
@dims.setter
|
||||
def dims(self, dims_array : [list, tuple]):
|
||||
self.impl.setDims(dims_array)
|
||||
|
||||
## Precision of the TensorDesc object
|
||||
@property
|
||||
def precision(self):
|
||||
return self.impl.getPrecision().name().decode()
|
||||
|
||||
@precision.setter
|
||||
def precision(self, precision : str):
|
||||
if precision not in supported_precisions:
|
||||
raise ValueError(f"Unsupported precision {precision}! List of supported precisions: {supported_precisions}")
|
||||
self.impl.setPrecision(C.Precision.FromStr(precision.encode()))
|
||||
|
||||
## Layout of the TensorDesc object
|
||||
@property
|
||||
def layout(self):
|
||||
return layout_int_to_str_map[self.impl.getLayout()]
|
||||
|
||||
@layout.setter
|
||||
def layout(self, layout : str):
|
||||
if layout not in layout_str_to_enum.keys():
|
||||
@ -148,17 +159,17 @@ cdef class Blob:
|
||||
self._ptr = C.make_shared_blob[int16_t](c_tensor_desc)
|
||||
elif precision == "Q78" or precision == "U16":
|
||||
self._ptr = C.make_shared_blob[uint16_t](c_tensor_desc)
|
||||
elif precision == "U8" or precision == "BOOL":
|
||||
elif precision == "U8" or precision == "BOOL":
|
||||
self._ptr = C.make_shared_blob[uint8_t](c_tensor_desc)
|
||||
elif precision == "I8" or precision == "BIN" or precision == "I4" or precision == "U4":
|
||||
elif precision == "I8" or precision == "BIN" or precision == "I4" or precision == "U4":
|
||||
self._ptr = C.make_shared_blob[int8_t](c_tensor_desc)
|
||||
elif precision == "I32":
|
||||
elif precision == "I32":
|
||||
self._ptr = C.make_shared_blob[int32_t](c_tensor_desc)
|
||||
elif precision == "U32":
|
||||
elif precision == "U32":
|
||||
self._ptr = C.make_shared_blob[uint32_t](c_tensor_desc)
|
||||
elif precision == "I64":
|
||||
elif precision == "I64":
|
||||
self._ptr = C.make_shared_blob[int64_t](c_tensor_desc)
|
||||
elif precision == "U64":
|
||||
elif precision == "U64":
|
||||
self._ptr = C.make_shared_blob[uint64_t](c_tensor_desc)
|
||||
else:
|
||||
raise AttributeError(f"Unsupported precision {precision} for blob")
|
||||
@ -191,22 +202,22 @@ cdef class Blob:
|
||||
elif precision == "Q78" or precision == "U16":
|
||||
U16_array_memview = self._array_data
|
||||
self._ptr = C.make_shared_blob[uint16_t](c_tensor_desc, &U16_array_memview[0], U16_array_memview.shape[0])
|
||||
elif precision == "U8" or precision == "BOOL":
|
||||
elif precision == "U8" or precision == "BOOL":
|
||||
U8_array_memview = self._array_data
|
||||
self._ptr = C.make_shared_blob[uint8_t](c_tensor_desc, &U8_array_memview[0], U8_array_memview.shape[0])
|
||||
elif precision == "I8" or precision == "BIN" or precision == "I4" or precision == "U4":
|
||||
elif precision == "I8" or precision == "BIN" or precision == "I4" or precision == "U4":
|
||||
I8_array_memview = self._array_data
|
||||
self._ptr = C.make_shared_blob[int8_t](c_tensor_desc, &I8_array_memview[0], I8_array_memview.shape[0])
|
||||
elif precision == "I32":
|
||||
elif precision == "I32":
|
||||
I32_array_memview = self._array_data
|
||||
self._ptr = C.make_shared_blob[int32_t](c_tensor_desc, &I32_array_memview[0], I32_array_memview.shape[0])
|
||||
elif precision == "U32":
|
||||
elif precision == "U32":
|
||||
U32_array_memview = self._array_data
|
||||
self._ptr = C.make_shared_blob[uint32_t](c_tensor_desc, &U32_array_memview[0], U32_array_memview.shape[0])
|
||||
elif precision == "I64":
|
||||
elif precision == "I64":
|
||||
I64_array_memview = self._array_data
|
||||
self._ptr = C.make_shared_blob[int64_t](c_tensor_desc, &I64_array_memview[0], I64_array_memview.shape[0])
|
||||
elif precision == "U64":
|
||||
elif precision == "U64":
|
||||
U64_array_memview = self._array_data
|
||||
self._ptr = C.make_shared_blob[uint64_t](c_tensor_desc, &U64_array_memview[0], U64_array_memview.shape[0])
|
||||
else:
|
||||
@ -301,7 +312,7 @@ cdef class IECore:
|
||||
raise Exception(f"Path to the weights {weights} doesn't exist or it's a directory")
|
||||
weights_ = weights.encode()
|
||||
|
||||
net.impl = self.impl.readNetwork(model_, weights_)
|
||||
net.impl = self.impl.readNetwork(model_, weights_)
|
||||
return net
|
||||
|
||||
## Loads a network that was read from the Intermediate Representation (IR) to the plugin with specified device name
|
||||
@ -500,11 +511,13 @@ cdef class PreProcessChannel:
|
||||
property mean_value:
|
||||
def __get__(self):
|
||||
return deref(self._ptr).meanValue
|
||||
|
||||
def __set__(self, float mean_value):
|
||||
deref(self._ptr).meanValue = mean_value
|
||||
property std_scale:
|
||||
def __get__(self):
|
||||
return deref(self._ptr).stdScale
|
||||
|
||||
def __set__(self, float std_scale):
|
||||
deref(self._ptr).stdScale = std_scale
|
||||
property mean_data:
|
||||
@ -512,6 +525,7 @@ cdef class PreProcessChannel:
|
||||
blob = Blob()
|
||||
blob._ptr = deref(self._ptr).meanData
|
||||
return blob
|
||||
|
||||
def __set__(self, Blob mean_data):
|
||||
deref(self._ptr).meanData = mean_data._ptr
|
||||
|
||||
@ -519,7 +533,7 @@ cdef class PreProcessChannel:
|
||||
cdef class PreProcessInfo:
|
||||
def __cinit__(self):
|
||||
self._ptr = new CPreProcessInfo()
|
||||
self._user_data = True
|
||||
self._user_data = True
|
||||
|
||||
def __dealloc__(self):
|
||||
if self._user_data:
|
||||
@ -581,7 +595,7 @@ cdef class PreProcessInfo:
|
||||
# ```
|
||||
@property
|
||||
def resize_algorithm(self):
|
||||
return ResizeAlgorithm(deref(self._ptr).getResizeAlgorithm())
|
||||
return ResizeAlgorithm(deref(self._ptr).getResizeAlgorithm())
|
||||
|
||||
@resize_algorithm.setter
|
||||
def resize_algorithm(self, alg : ResizeAlgorithm):
|
||||
@ -757,18 +771,22 @@ cdef class CDataPtr:
|
||||
@property
|
||||
def name(self):
|
||||
return deref(self._ptr).getName().decode()
|
||||
|
||||
## Precision of the data object
|
||||
@property
|
||||
def precision(self):
|
||||
return deref(self._ptr).getPrecision().name().decode()
|
||||
|
||||
## Shape (dimensions) of the data object
|
||||
@property
|
||||
def shape(self):
|
||||
return deref(self._ptr).getDims()
|
||||
|
||||
## Layout of the data object
|
||||
@property
|
||||
def layout(self):
|
||||
return layout_int_to_str_map[deref(self._ptr).getLayout()]
|
||||
|
||||
## Checks if the current data object is resolved
|
||||
@property
|
||||
def initialized(self):
|
||||
@ -810,7 +828,6 @@ cdef class ExecutableNetwork:
|
||||
res[name] = deepcopy(value.buffer)
|
||||
return res
|
||||
|
||||
|
||||
## Starts asynchronous inference for specified infer request.
|
||||
# Wraps `async_infer()` method of the `InferRequest` class.
|
||||
# @param request_id: Index of infer request to start inference
|
||||
@ -877,6 +894,7 @@ cdef class ExecutableNetwork:
|
||||
data_ptr._ptr = in_.second
|
||||
inputs[in_.first.decode()] = data_ptr
|
||||
return inputs
|
||||
|
||||
## A dictionary that maps output layer names to CDataPtr objects
|
||||
@property
|
||||
def outputs(self):
|
||||
@ -888,6 +906,7 @@ cdef class ExecutableNetwork:
|
||||
data_ptr._ptr = in_.second
|
||||
outputs[in_.first.decode()] = data_ptr
|
||||
return outputs
|
||||
|
||||
## Gets executable graph information from a device
|
||||
# @return An instance of `IENetwork`
|
||||
#
|
||||
@ -1180,12 +1199,12 @@ cdef class InferRequest:
|
||||
cpdef get_perf_counts(self):
|
||||
cdef map[string, C.ProfileInfo] c_profile = deref(self.impl).getPerformanceCounts()
|
||||
profile = {}
|
||||
for l in c_profile:
|
||||
info = l.second
|
||||
for line in c_profile:
|
||||
info = line.second
|
||||
# TODO: add execution index. Check if unsigned int is properly converted to int in python.
|
||||
profile[l.first.decode()] = {"status": info.status.decode(), "exec_type": info.exec_type.decode(),
|
||||
"layer_type": info.layer_type.decode(), "real_time": info.real_time,
|
||||
"cpu_time": info.cpu_time, "execution_index": info.execution_index}
|
||||
profile[line.first.decode()] = {"status": info.status.decode(), "exec_type": info.exec_type.decode(),
|
||||
"layer_type": info.layer_type.decode(), "real_time": info.real_time,
|
||||
"cpu_time": info.cpu_time, "execution_index": info.execution_index}
|
||||
return profile
|
||||
|
||||
## A dictionary that maps input layer names to `numpy.ndarray`
|
||||
@ -1294,8 +1313,7 @@ cdef class IENetwork:
|
||||
cdef string weights_
|
||||
if init_from_buffer:
|
||||
warnings.warn("Reading network using constructor is deprecated. "
|
||||
"Please, use IECore.read_network() method instead",
|
||||
DeprecationWarning)
|
||||
"Please, use IECore.read_network() method instead", DeprecationWarning)
|
||||
memcpy(xml_buffer, <char*> model, len(model))
|
||||
memcpy(bin_buffer, <uint8_t *> weights, len(weights))
|
||||
xml_buffer[len(model)] = b'\0'
|
||||
@ -1304,8 +1322,7 @@ cdef class IENetwork:
|
||||
else:
|
||||
if model and weights:
|
||||
warnings.warn("Reading network using constructor is deprecated. "
|
||||
"Please, use IECore.read_network() method instead",
|
||||
DeprecationWarning)
|
||||
"Please, use IECore.read_network() method instead", DeprecationWarning)
|
||||
if not os.path.isfile(model):
|
||||
raise Exception(f"Path to the model {model} doesn't exist or it's a directory")
|
||||
if not os.path.isfile(weights):
|
||||
@ -1389,7 +1406,6 @@ cdef class IENetwork:
|
||||
raise AttributeError(f"Invalid batch size {batch}! Batch size should be positive integer value")
|
||||
self.impl.setBatch(batch)
|
||||
|
||||
|
||||
## Marks any intermediate layer as output layer to retrieve the inference results from the specified layers.
|
||||
# @param outputs: List of layers to be set as model outputs. The list can contain strings with layer names to be set
|
||||
# as outputs or tuples with layer name as first element and output port id as second element.
|
||||
@ -1405,13 +1421,13 @@ cdef class IENetwork:
|
||||
def add_outputs(self, outputs):
|
||||
if not isinstance(outputs, list):
|
||||
outputs = [outputs]
|
||||
for i, l in enumerate(outputs):
|
||||
if isinstance(l, str):
|
||||
self.impl.addOutput(l.encode(), 0)
|
||||
elif isinstance(l, tuple) and len(l) == 2:
|
||||
self.impl.addOutput(l[0].encode(), l[1])
|
||||
for i, line in enumerate(outputs):
|
||||
if isinstance(line, str):
|
||||
self.impl.addOutput(line.encode(), 0)
|
||||
elif isinstance(line, tuple) and len(line) == 2:
|
||||
self.impl.addOutput(line[0].encode(), line[1])
|
||||
else:
|
||||
raise TypeError(f"Incorrect type {type(l)} for layer to add at index {i}. "
|
||||
raise TypeError(f"Incorrect type {type(line)} for layer to add at index {i}. "
|
||||
"Expected string with layer name or tuple with two elements: layer name as "
|
||||
"first element and port id as second")
|
||||
|
||||
@ -1447,7 +1463,7 @@ cdef class IENetwork:
|
||||
# net.reshape({input_layer: (n, c, h*2, w*2)})
|
||||
# ```
|
||||
def reshape(self, input_shapes: dict):
|
||||
cdef map[string, vector[size_t]] c_input_shapes;
|
||||
cdef map[string, vector[size_t]] c_input_shapes
|
||||
cdef vector[size_t] c_shape
|
||||
net_inputs = self.input_info
|
||||
for input, shape in input_shapes.items():
|
||||
@ -1533,13 +1549,10 @@ cdef class BlobBuffer:
|
||||
|
||||
return precision_to_format[name].encode()
|
||||
|
||||
def to_numpy(self):
|
||||
def to_numpy(self):
|
||||
precision = deref(self.ptr).getTensorDesc().getPrecision()
|
||||
name = bytes(precision.name()).decode()
|
||||
if name == "FP16":
|
||||
name = bytes(precision.name()).decode()
|
||||
if name == "FP16":
|
||||
return np.asarray(self).view(dtype=np.float16)
|
||||
else:
|
||||
return np.asarray(self)
|
||||
|
||||
|
||||
|
||||
|
@ -1,10 +1,11 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2018-2021 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
if sys.platform == "win32":
|
||||
if sys.platform == 'win32':
|
||||
# Installer, yum, pip installs openvino dlls to the different directories
|
||||
# and those paths need to be visible to the openvino modules
|
||||
#
|
||||
@ -24,7 +25,8 @@ if sys.platform == "win32":
|
||||
if (3, 8) <= sys.version_info:
|
||||
os.add_dll_directory(os.path.abspath(lib_path))
|
||||
else:
|
||||
os.environ["PATH"] = os.path.abspath(lib_path) + ";" + os.environ["PATH"]
|
||||
os.environ['PATH'] = os.path.abspath(lib_path) + ';' + os.environ['PATH']
|
||||
|
||||
from .offline_transformations_api import *
|
||||
|
||||
__all__ = ['ApplyMOCTransformations']
|
||||
|
@ -7,17 +7,22 @@ from ..inference_engine.ie_api cimport IENetwork
|
||||
from libcpp cimport bool
|
||||
from libcpp.string cimport string
|
||||
|
||||
|
||||
def ApplyMOCTransformations(IENetwork network, bool cf):
|
||||
C.ApplyMOCTransformations(network.impl, cf)
|
||||
|
||||
|
||||
def ApplyPOTTransformations(IENetwork network, string device):
|
||||
C.ApplyPOTTransformations(network.impl, device)
|
||||
|
||||
|
||||
def ApplyLowLatencyTransformation(IENetwork network):
|
||||
C.ApplyLowLatencyTransformation(network.impl)
|
||||
|
||||
|
||||
def ApplyPruningTransformation(IENetwork network):
|
||||
C.ApplyPruningTransformation(network.impl)
|
||||
|
||||
|
||||
def CheckAPI():
|
||||
C.CheckAPI()
|
||||
|
@ -1,5 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2018-2021 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from .test_utils_api import *
|
||||
|
||||
__all__ = ['CompareNetworks']
|
||||
|
@ -8,6 +8,7 @@ from libcpp cimport bool
|
||||
from libcpp.string cimport string
|
||||
from libcpp.pair cimport pair
|
||||
|
||||
|
||||
def CompareNetworks(IENetwork lhs, IENetwork rhs):
|
||||
cdef pair[bool, string] c_pair
|
||||
c_pair = C.CompareNetworks(lhs.impl, rhs.impl)
|
||||
|
@ -1,10 +1,12 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2018-2021 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import os.path
|
||||
import sys
|
||||
import errno
|
||||
import subprocess
|
||||
import subprocess # nosec
|
||||
import typing
|
||||
from pathlib import Path
|
||||
from shutil import copyfile
|
||||
from distutils.command.install import install
|
||||
@ -18,94 +20,82 @@ from decouple import config
|
||||
|
||||
WHEEL_LIBS_INSTALL_DIR = os.path.join('openvino', 'libs')
|
||||
WHEEL_LIBS_PACKAGE = 'openvino.libs'
|
||||
PYTHON_VERSION = f"python{sys.version_info.major}.{sys.version_info.minor}"
|
||||
PYTHON_VERSION = f'python{sys.version_info.major}.{sys.version_info.minor}'
|
||||
|
||||
# The following variables can be defined in environment or .env file
|
||||
CMAKE_BUILD_DIR = config('CMAKE_BUILD_DIR', ".")
|
||||
CMAKE_BUILD_DIR = config('CMAKE_BUILD_DIR', '.')
|
||||
CORE_LIBS_DIR = config('CORE_LIBS_DIR', '')
|
||||
PLUGINS_LIBS_DIR = config('PLUGINS_LIBS_DIR', '')
|
||||
NGRAPH_LIBS_DIR = config('NGRAPH_LIBS_DIR', '')
|
||||
TBB_LIBS_DIR = config('TBB_LIBS_DIR', '')
|
||||
PY_PACKAGES_DIR = config('PY_PACKAGES_DIR', '')
|
||||
LIBS_RPATH = "$ORIGIN" if sys.platform == "linux" else "@loader_path"
|
||||
LIBS_RPATH = '$ORIGIN' if sys.platform == 'linux' else '@loader_path'
|
||||
|
||||
LIB_INSTALL_CFG = {
|
||||
"ie_libs": {
|
||||
'ie_libs': {
|
||||
'name': 'core',
|
||||
'prefix': 'libs.core',
|
||||
'install_dir': CORE_LIBS_DIR,
|
||||
'rpath': LIBS_RPATH,
|
||||
},
|
||||
"hetero_plugin": {
|
||||
'hetero_plugin': {
|
||||
'name': 'hetero',
|
||||
'prefix': 'libs.plugins',
|
||||
'install_dir': PLUGINS_LIBS_DIR,
|
||||
'rpath': LIBS_RPATH,
|
||||
},
|
||||
"gpu_plugin": {
|
||||
'gpu_plugin': {
|
||||
'name': 'gpu',
|
||||
'prefix': 'libs.plugins',
|
||||
'install_dir': PLUGINS_LIBS_DIR,
|
||||
'rpath': LIBS_RPATH,
|
||||
},
|
||||
"cpu_plugin": {
|
||||
'cpu_plugin': {
|
||||
'name': 'cpu',
|
||||
'prefix': 'libs.plugins',
|
||||
'install_dir': PLUGINS_LIBS_DIR,
|
||||
'rpath': LIBS_RPATH,
|
||||
},
|
||||
"multi_plugin": {
|
||||
'multi_plugin': {
|
||||
'name': 'multi',
|
||||
'prefix': 'libs.plugins',
|
||||
'install_dir': PLUGINS_LIBS_DIR,
|
||||
'rpath': LIBS_RPATH,
|
||||
},
|
||||
"myriad_plugin": {
|
||||
'myriad_plugin': {
|
||||
'name': 'myriad',
|
||||
'prefix': 'libs.plugins',
|
||||
'install_dir': PLUGINS_LIBS_DIR,
|
||||
'rpath': LIBS_RPATH,
|
||||
},
|
||||
"ngraph_libs": {
|
||||
'ngraph_libs': {
|
||||
'name': 'ngraph',
|
||||
'prefix': 'libs.ngraph',
|
||||
'install_dir': NGRAPH_LIBS_DIR,
|
||||
'rpath': LIBS_RPATH,
|
||||
},
|
||||
"tbb_libs": {
|
||||
'name': 'tbb',
|
||||
'prefix': 'libs.tbb',
|
||||
'install_dir': TBB_LIBS_DIR
|
||||
},
|
||||
'tbb_libs': {'name': 'tbb', 'prefix': 'libs.tbb', 'install_dir': TBB_LIBS_DIR},
|
||||
}
|
||||
|
||||
PY_INSTALL_CFG = {
|
||||
"ie_py": {
|
||||
'name': PYTHON_VERSION,
|
||||
'prefix': 'site-packages',
|
||||
'install_dir': PY_PACKAGES_DIR
|
||||
},
|
||||
"ngraph_py": {
|
||||
'name': f"pyngraph_{PYTHON_VERSION}",
|
||||
'prefix': 'site-packages',
|
||||
'install_dir': PY_PACKAGES_DIR
|
||||
},
|
||||
'ie_py': {'name': PYTHON_VERSION, 'prefix': 'site-packages', 'install_dir': PY_PACKAGES_DIR},
|
||||
'ngraph_py': {'name': f'pyngraph_{PYTHON_VERSION}', 'prefix': 'site-packages', 'install_dir': PY_PACKAGES_DIR},
|
||||
}
|
||||
|
||||
|
||||
class PrebuiltExtension(Extension):
|
||||
"""Initialize Extension"""
|
||||
|
||||
def __init__(self, name, sources, *args, **kwargs):
|
||||
if len(sources) != 1:
|
||||
nln = '\n'
|
||||
raise DistutilsSetupError(
|
||||
f"PrebuiltExtension can accept only one source, but got: {nln}{nln.join(sources)}"
|
||||
)
|
||||
raise DistutilsSetupError(f'PrebuiltExtension can accept only one source, but got: {nln}{nln.join(sources)}')
|
||||
super().__init__(name, sources, *args, **kwargs)
|
||||
|
||||
|
||||
class CustomBuild(build):
|
||||
"""Custom implementation of build_clib"""
|
||||
|
||||
def run(self):
|
||||
self.run_command('build_clib')
|
||||
build.run(self)
|
||||
@ -113,6 +103,7 @@ class CustomBuild(build):
|
||||
|
||||
class CustomInstall(install):
|
||||
"""Enable build_clib during the installation"""
|
||||
|
||||
def run(self):
|
||||
self.run_command('build_clib')
|
||||
install.run(self)
|
||||
@ -120,6 +111,7 @@ class CustomInstall(install):
|
||||
|
||||
class PrepareLibs(build_clib):
|
||||
"""Prepare prebuilt libraries"""
|
||||
|
||||
def run(self):
|
||||
self.configure(LIB_INSTALL_CFG)
|
||||
self.configure(PY_INSTALL_CFG)
|
||||
@ -132,16 +124,13 @@ class PrepareLibs(build_clib):
|
||||
install_dir = comp_data.get('install_dir')
|
||||
if install_dir and not os.path.isabs(install_dir):
|
||||
install_dir = os.path.join(install_prefix, install_dir)
|
||||
self.announce(f"Installing {comp}", level=3)
|
||||
self.spawn(["cmake",
|
||||
"--install", CMAKE_BUILD_DIR,
|
||||
"--prefix", install_prefix,
|
||||
"--component", comp_data.get('name')])
|
||||
self.announce(f'Installing {comp}', level=3)
|
||||
self.spawn(['cmake', '--install', CMAKE_BUILD_DIR, '--prefix', install_prefix, '--component', comp_data.get('name')])
|
||||
# set rpath if applicable
|
||||
if sys.platform != "win32" and comp_data.get('rpath'):
|
||||
file_types = ["*.so"] if sys.platform == "linux" else ["*.dylib", "*.so"]
|
||||
for file in file_types:
|
||||
for path in Path(install_dir).glob(file):
|
||||
if sys.platform != 'win32' and comp_data.get('rpath'):
|
||||
file_types = ['*.so'] if sys.platform == 'linux' else ['*.dylib', '*.so']
|
||||
for file_type in file_types:
|
||||
for path in Path(install_dir).glob(file_type):
|
||||
set_rpath(comp_data['rpath'], path)
|
||||
|
||||
def generate_package(self, src_dirs):
|
||||
@ -162,28 +151,28 @@ class PrepareLibs(build_clib):
|
||||
copyfile(file_path, dst_file)
|
||||
|
||||
if Path(package_dir).exists():
|
||||
self.announce(f"Adding {WHEEL_LIBS_PACKAGE} package", level=3)
|
||||
self.announce(f'Adding {WHEEL_LIBS_PACKAGE} package', level=3)
|
||||
packages.append(WHEEL_LIBS_PACKAGE)
|
||||
package_data.update({WHEEL_LIBS_PACKAGE: ['*']})
|
||||
|
||||
|
||||
class CopyExt(build_ext):
|
||||
"""Copy extension files to the build directory"""
|
||||
|
||||
def run(self):
|
||||
for extension in self.extensions:
|
||||
if not isinstance(extension, PrebuiltExtension):
|
||||
raise DistutilsSetupError(
|
||||
f"copy_ext can accept PrebuiltExtension only, but got {extension.name}")
|
||||
raise DistutilsSetupError(f'copy_ext can accept PrebuiltExtension only, but got {extension.name}')
|
||||
src = extension.sources[0]
|
||||
dst = self.get_ext_fullpath(extension.name)
|
||||
os.makedirs(os.path.dirname(dst), exist_ok=True)
|
||||
# setting relative path to find dlls
|
||||
if sys.platform != "win32":
|
||||
if sys.platform != 'win32':
|
||||
rpath = os.path.relpath(get_package_dir(PY_INSTALL_CFG), os.path.dirname(src))
|
||||
if sys.platform == "linux":
|
||||
rpath = os.path.join("$ORIGIN", rpath, WHEEL_LIBS_INSTALL_DIR)
|
||||
elif sys.platform == "darwin":
|
||||
rpath = os.path.join("@loader_path", rpath, WHEEL_LIBS_INSTALL_DIR)
|
||||
if sys.platform == 'linux':
|
||||
rpath = os.path.join('$ORIGIN', rpath, WHEEL_LIBS_INSTALL_DIR)
|
||||
elif sys.platform == 'darwin':
|
||||
rpath = os.path.join('@loader_path', rpath, WHEEL_LIBS_INSTALL_DIR)
|
||||
set_rpath(rpath, src)
|
||||
|
||||
copy_file(src, dst, verbose=self.verbose, dry_run=self.dry_run)
|
||||
@ -193,7 +182,7 @@ def is_tool(name):
|
||||
"""Check if the command-line tool is available"""
|
||||
try:
|
||||
devnull = subprocess.DEVNULL
|
||||
subprocess.Popen([name], stdout=devnull, stderr=devnull).communicate()
|
||||
subprocess.Popen([name], stdout=devnull, stderr=devnull).communicate() # nosec
|
||||
except OSError as error:
|
||||
if error.errno == errno.ENOENT:
|
||||
return False
|
||||
@ -202,80 +191,81 @@ def is_tool(name):
|
||||
|
||||
def remove_rpath(file_path):
|
||||
"""
|
||||
Remove rpath from binaries
|
||||
:param file_path: binary path
|
||||
:type file_path: pathlib.Path
|
||||
Remove rpath from binaries
|
||||
:param file_path: binary path
|
||||
:type file_path: pathlib.Path
|
||||
"""
|
||||
if sys.platform == "darwin":
|
||||
cmd = f'otool -l {file_path} ' \
|
||||
f'| grep LC_RPATH -A3 ' \
|
||||
f'| grep -o "path.*" ' \
|
||||
f'| cut -d " " -f2 ' \
|
||||
f'| xargs -I{{}} install_name_tool -delete_rpath {{}} {file_path}'
|
||||
if os.WEXITSTATUS(os.system(cmd)) != 0:
|
||||
sys.exit(f"Could not remove rpath for {file_path}")
|
||||
if sys.platform == 'darwin':
|
||||
cmd = (
|
||||
f'otool -l {file_path} ' # noqa: P103
|
||||
f'| grep LC_RPATH -A3 '
|
||||
f'| grep -o "path.*" '
|
||||
f'| cut -d " " -f2 '
|
||||
f'| xargs -I{{}} install_name_tool -delete_rpath {{}} {file_path}'
|
||||
)
|
||||
if os.WEXITSTATUS(os.system(cmd)) != 0: # nosec
|
||||
sys.exit(f'Could not remove rpath for {file_path}')
|
||||
else:
|
||||
sys.exit(f"Unsupported platform: {sys.platform}")
|
||||
sys.exit(f'Unsupported platform: {sys.platform}')
|
||||
|
||||
|
||||
def set_rpath(rpath, executable):
|
||||
"""Setting rpath for linux and macOS libraries"""
|
||||
print(f"Setting rpath {rpath} for {executable}")
|
||||
cmd = ""
|
||||
rpath_tool = ""
|
||||
if sys.platform == "linux":
|
||||
rpath_tool = "patchelf"
|
||||
cmd = [rpath_tool, "--set-rpath", rpath, executable]
|
||||
elif sys.platform == "darwin":
|
||||
rpath_tool = "install_name_tool"
|
||||
cmd = [rpath_tool, "-add_rpath", rpath, executable]
|
||||
print(f'Setting rpath {rpath} for {executable}') # noqa: T001
|
||||
cmd = []
|
||||
rpath_tool = ''
|
||||
if sys.platform == 'linux':
|
||||
rpath_tool = 'patchelf'
|
||||
cmd = [rpath_tool, '--set-rpath', rpath, executable]
|
||||
elif sys.platform == 'darwin':
|
||||
rpath_tool = 'install_name_tool'
|
||||
cmd = [rpath_tool, '-add_rpath', rpath, executable]
|
||||
else:
|
||||
sys.exit(f"Unsupported platform: {sys.platform}")
|
||||
sys.exit(f'Unsupported platform: {sys.platform}')
|
||||
|
||||
if is_tool(rpath_tool):
|
||||
if sys.platform == "darwin":
|
||||
if sys.platform == 'darwin':
|
||||
remove_rpath(executable)
|
||||
ret_info = subprocess.run(cmd, check=True)
|
||||
ret_info = subprocess.run(cmd, check=True, shell=False) # nosec
|
||||
if ret_info.returncode != 0:
|
||||
sys.exit(f"Could not set rpath: {rpath} for {executable}")
|
||||
sys.exit(f'Could not set rpath: {rpath} for {executable}')
|
||||
else:
|
||||
sys.exit(f"Could not found {rpath_tool} on the system, "
|
||||
f"please make sure that this tool is installed")
|
||||
sys.exit(f'Could not found {rpath_tool} on the system, ' f'please make sure that this tool is installed')
|
||||
|
||||
|
||||
def find_prebuilt_extensions(search_dirs):
|
||||
"""collect prebuilt python extensions"""
|
||||
extensions = []
|
||||
ext_pattern = ""
|
||||
if sys.platform == "linux":
|
||||
ext_pattern = "**/*.so"
|
||||
elif sys.platform == "win32":
|
||||
ext_pattern = "**/*.pyd"
|
||||
elif sys.platform == "darwin":
|
||||
ext_pattern = "**/*.so"
|
||||
ext_pattern = ''
|
||||
if sys.platform == 'linux':
|
||||
ext_pattern = '**/*.so'
|
||||
elif sys.platform == 'win32':
|
||||
ext_pattern = '**/*.pyd'
|
||||
elif sys.platform == 'darwin':
|
||||
ext_pattern = '**/*.so'
|
||||
for base_dir in search_dirs:
|
||||
for path in Path(base_dir).glob(ext_pattern):
|
||||
relpath = path.relative_to(base_dir)
|
||||
if relpath.parent != ".":
|
||||
if relpath.parent != '.':
|
||||
package_names = str(relpath.parent).split(os.path.sep)
|
||||
else:
|
||||
package_names = []
|
||||
package_names.append(path.name.split(".", 1)[0])
|
||||
name = ".".join(package_names)
|
||||
package_names.append(path.name.split('.', 1)[0])
|
||||
name = '.'.join(package_names)
|
||||
extensions.append(PrebuiltExtension(name, sources=[str(path)]))
|
||||
return extensions
|
||||
|
||||
|
||||
def get_description(desc_file_path):
|
||||
"""read description from README.md"""
|
||||
with open(desc_file_path, "r", encoding="utf-8") as fstream:
|
||||
with open(desc_file_path, 'r', encoding='utf-8') as fstream:
|
||||
description = fstream.read()
|
||||
return description
|
||||
|
||||
|
||||
def get_dependencies(requirements_file_path):
|
||||
"""read dependencies from requirements.txt"""
|
||||
with open(requirements_file_path, "r", encoding="utf-8") as fstream:
|
||||
with open(requirements_file_path, 'r', encoding='utf-8') as fstream:
|
||||
dependencies = fstream.read()
|
||||
return dependencies
|
||||
|
||||
@ -299,7 +289,7 @@ def get_package_dir(install_cfg):
|
||||
Get python package path based on config
|
||||
All the packages should be located in one directory
|
||||
"""
|
||||
py_package_path = ""
|
||||
py_package_path = ''
|
||||
dirs = get_dir_list(install_cfg)
|
||||
if len(dirs) != 0:
|
||||
# setup.py support only one package directory, all modules should be located there
|
||||
@ -307,17 +297,17 @@ def get_package_dir(install_cfg):
|
||||
return py_package_path
|
||||
|
||||
|
||||
platforms = ["linux", "win32", "darwin"]
|
||||
platforms = ['linux', 'win32', 'darwin']
|
||||
if not any(pl in sys.platform for pl in platforms):
|
||||
sys.exit("Unsupported platform: {}, expected: {}".format(sys.platform, "linux, win32, darwin"))
|
||||
sys.exit(f'Unsupported platform: {sys.platform}, expected: linux, win32, darwin')
|
||||
|
||||
# copy license file into the build directory
|
||||
package_license = config('WHEEL_LICENSE', '')
|
||||
if os.path.exists(package_license):
|
||||
copyfile(package_license, "LICENSE")
|
||||
copyfile(package_license, 'LICENSE')
|
||||
|
||||
packages = find_namespace_packages(','.join(get_dir_list(PY_INSTALL_CFG)))
|
||||
package_data = {}
|
||||
package_data: typing.Dict[str, list] = {}
|
||||
|
||||
setup(
|
||||
version=config('WHEEL_VERSION', '0.0.0'),
|
||||
@ -326,16 +316,16 @@ setup(
|
||||
license=config('WHEEL_LICENCE_TYPE', 'OSI Approved :: Apache Software License'),
|
||||
author=config('WHEEL_AUTHOR', 'Intel Corporation'),
|
||||
description=config('WHEEL_DESC', 'Inference Engine Python* API'),
|
||||
install_requires=get_dependencies(config('WHEEL_REQUIREMENTS', "requirements.txt")),
|
||||
install_requires=get_dependencies(config('WHEEL_REQUIREMENTS', 'requirements.txt')),
|
||||
long_description=get_description(config('WHEEL_OVERVIEW', 'pypi_overview.md')),
|
||||
long_description_content_type="text/markdown",
|
||||
long_description_content_type='text/markdown',
|
||||
download_url=config('WHEEL_DOWNLOAD_URL', 'https://github.com/openvinotoolkit/openvino/tags'),
|
||||
url=config('WHEEL_URL', 'https://docs.openvinotoolkit.org/latest/index.html'),
|
||||
cmdclass={
|
||||
"build": CustomBuild,
|
||||
"install": CustomInstall,
|
||||
"build_clib": PrepareLibs,
|
||||
"build_ext": CopyExt,
|
||||
'build': CustomBuild,
|
||||
'install': CustomInstall,
|
||||
'build_clib': PrepareLibs,
|
||||
'build_ext': CopyExt,
|
||||
},
|
||||
ext_modules=find_prebuilt_extensions(get_dir_list(PY_INSTALL_CFG)),
|
||||
packages=packages,
|
||||
|
Loading…
Reference in New Issue
Block a user