update (#18615)
This commit is contained in:
parent
9334ad0790
commit
b7e8338e29
File diff suppressed because it is too large
Load Diff
@ -91,7 +91,7 @@ Load the model
|
|||||||
# read the network and corresponding weights from file
|
# read the network and corresponding weights from file
|
||||||
model = ie.read_model(model=model_path)
|
model = ie.read_model(model=model_path)
|
||||||
|
|
||||||
# compile the model for the CPU (you can choose manually CPU, GPU, MYRIAD etc.)
|
# compile the model for the CPU (you can choose manually CPU, GPU, etc.)
|
||||||
# or let the engine choose the best available device (AUTO)
|
# or let the engine choose the best available device (AUTO)
|
||||||
compiled_model = ie.compile_model(model=model, device_name="CPU")
|
compiled_model = ie.compile_model(model=model, device_name="CPU")
|
||||||
|
|
||||||
|
@ -393,7 +393,7 @@ Now, you can read and load the network.
|
|||||||
ie = Core()
|
ie = Core()
|
||||||
|
|
||||||
You may run the network on multiple devices. By default, it will load
|
You may run the network on multiple devices. By default, it will load
|
||||||
the model on CPU (you can choose manually CPU, GPU, MYRIAD, etc.) or let
|
the model on CPU (you can choose manually CPU, GPU, etc.) or let
|
||||||
the engine choose the best available device (AUTO).
|
the engine choose the best available device (AUTO).
|
||||||
|
|
||||||
To list all available devices that can be used, run
|
To list all available devices that can be used, run
|
||||||
|
@ -144,7 +144,7 @@ specified device.
|
|||||||
|
|
||||||
# Read the network and corresponding weights from a file.
|
# Read the network and corresponding weights from a file.
|
||||||
model = ie_core.read_model(model=model_path)
|
model = ie_core.read_model(model=model_path)
|
||||||
# Compile the model for CPU (you can use GPU or MYRIAD as well).
|
# Compile the model for CPU (you can also use GPU).
|
||||||
compiled_model = ie_core.compile_model(model=model, device_name="CPU")
|
compiled_model = ie_core.compile_model(model=model, device_name="CPU")
|
||||||
# Get input and output names of nodes.
|
# Get input and output names of nodes.
|
||||||
input_keys = compiled_model.input(0)
|
input_keys = compiled_model.input(0)
|
||||||
|
@ -900,9 +900,8 @@ OpenVINO Inference Engine Setup
|
|||||||
ie = Core()
|
ie = Core()
|
||||||
|
|
||||||
# Neural Compute Stick
|
# Neural Compute Stick
|
||||||
# compile the model for the CPU (you can choose manually CPU, GPU, MYRIAD etc.)
|
# compile the model for the CPU (you can choose manually CPU, GPU, etc.)
|
||||||
# or let the engine choose the best available device (AUTO)
|
# or let the engine choose the best available device (AUTO)
|
||||||
# compiled_model = ie.compile_model(model=model, device_name="MYRIAD")
|
|
||||||
compiled_model = ie.compile_model(model=ir_model, device_name="CPU")
|
compiled_model = ie.compile_model(model=ir_model, device_name="CPU")
|
||||||
|
|
||||||
del ir_model
|
del ir_model
|
||||||
|
@ -97,7 +97,7 @@ desired device.
|
|||||||
ie_core = Core()
|
ie_core = Core()
|
||||||
# Read the network from a file.
|
# Read the network from a file.
|
||||||
model = ie_core.read_model(model_path)
|
model = ie_core.read_model(model_path)
|
||||||
# Let the AUTO device decide where to load the model (you can use CPU, GPU or MYRIAD as well).
|
# Let the AUTO device decide where to load the model (you can use CPU or GPU).
|
||||||
compiled_model = ie_core.compile_model(model=model, device_name="AUTO", config={"PERFORMANCE_HINT": "LATENCY"})
|
compiled_model = ie_core.compile_model(model=model, device_name="AUTO", config={"PERFORMANCE_HINT": "LATENCY"})
|
||||||
|
|
||||||
# Get the input and output names of nodes.
|
# Get the input and output names of nodes.
|
||||||
|
@ -183,7 +183,7 @@ Model Initialization function
|
|||||||
|
|
||||||
# Read the network and corresponding weights from a file.
|
# Read the network and corresponding weights from a file.
|
||||||
model = ie_core.read_model(model=model_path)
|
model = ie_core.read_model(model=model_path)
|
||||||
# Compile the model for CPU (you can use GPU or MYRIAD as well).
|
# Compile the model for CPU (you can also use GPU).
|
||||||
compiled_model = ie_core.compile_model(model=model, device_name="CPU")
|
compiled_model = ie_core.compile_model(model=model, device_name="CPU")
|
||||||
# Get input and output names of nodes.
|
# Get input and output names of nodes.
|
||||||
input_keys = compiled_model.input(0)
|
input_keys = compiled_model.input(0)
|
||||||
|
@ -164,7 +164,7 @@ results.
|
|||||||
# Read the network and corresponding weights from IR Model.
|
# Read the network and corresponding weights from IR Model.
|
||||||
model = ie_core.read_model(model=ir_path)
|
model = ie_core.read_model(model=ir_path)
|
||||||
|
|
||||||
# Compile the model for CPU (or change to GPU, MYRIAD etc. for other devices)
|
# Compile the model for CPU (or change to GPU, etc. for other devices)
|
||||||
# or let OpenVINO select the best available device with AUTO.
|
# or let OpenVINO select the best available device with AUTO.
|
||||||
compiled_model = ie_core.compile_model(model=model, device_name="AUTO")
|
compiled_model = ie_core.compile_model(model=model, device_name="AUTO")
|
||||||
|
|
||||||
|
@ -209,7 +209,7 @@ created to infer the compiled model.
|
|||||||
ie_core = Core()
|
ie_core = Core()
|
||||||
# read the network and corresponding weights from file
|
# read the network and corresponding weights from file
|
||||||
model = ie_core.read_model(model=ir_model_path, weights=model_weights_path)
|
model = ie_core.read_model(model=ir_model_path, weights=model_weights_path)
|
||||||
# load the model on the CPU (you can use GPU or MYRIAD as well)
|
# load the model on the CPU (you can also use GPU)
|
||||||
compiled_model = ie_core.compile_model(model=model, device_name="CPU")
|
compiled_model = ie_core.compile_model(model=model, device_name="CPU")
|
||||||
infer_request = compiled_model.create_infer_request()
|
infer_request = compiled_model.create_infer_request()
|
||||||
input_tensor_name = model.inputs[0].get_any_name()
|
input_tensor_name = model.inputs[0].get_any_name()
|
||||||
|
@ -117,8 +117,6 @@ Tutorials that explain how to optimize and quantize models with OpenVINO tools.
|
|||||||
+----------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------+
|
+----------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------+
|
||||||
| `107-speech-recognition-quantization <notebooks/107-speech-recognition-quantization-data2vec-with-output.html>`__ |br| |c107| | Optimize and quantize a pre-trained Data2Vec speech model. |
|
| `107-speech-recognition-quantization <notebooks/107-speech-recognition-quantization-data2vec-with-output.html>`__ |br| |c107| | Optimize and quantize a pre-trained Data2Vec speech model. |
|
||||||
+----------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------+
|
+----------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------+
|
||||||
| `108-gpu-device <notebooks/108-gpu-device-with-output.html>`__ | Working with GPUs in OpenVINO™. |
|
|
||||||
+----------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------+
|
|
||||||
| `109-performance-tricks <notebooks/109-latency-tricks-with-output.html>`__ | Performance tricks in OpenVINO™. |
|
| `109-performance-tricks <notebooks/109-latency-tricks-with-output.html>`__ | Performance tricks in OpenVINO™. |
|
||||||
+----------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------+
|
+----------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------+
|
||||||
| `110-ct-segmentation-quantize <notebooks/110-ct-scan-live-inference-with-output.html>`__ |br| |n110| | Live inference of a kidney segmentation model and benchmark CT-scan data with OpenVINO. |
|
| `110-ct-segmentation-quantize <notebooks/110-ct-scan-live-inference-with-output.html>`__ |br| |n110| | Live inference of a kidney segmentation model and benchmark CT-scan data with OpenVINO. |
|
||||||
|
Loading…
Reference in New Issue
Block a user