Merge remote-tracking branch 'upstream/master'
This commit is contained in:
commit
89ba7c1bf1
@ -1 +1 @@
|
|||||||
rel-1.7.1
|
rel-1.8.1
|
||||||
|
@ -16,13 +16,12 @@ jobs:
|
|||||||
timeoutInMinutes: 90
|
timeoutInMinutes: 90
|
||||||
|
|
||||||
pool:
|
pool:
|
||||||
name: LIN_VMSS_VENV_F16S_WU2
|
name: LIN_VMSS_VENV_F16S_U20_WU2
|
||||||
|
|
||||||
variables:
|
variables:
|
||||||
system.debug: true
|
system.debug: true
|
||||||
VSTS_HTTP_RETRY: 5
|
VSTS_HTTP_RETRY: 5
|
||||||
VSTS_HTTP_TIMEOUT: 200
|
VSTS_HTTP_TIMEOUT: 200
|
||||||
WORKERS_NUMBER: 16
|
|
||||||
BUILD_TYPE: Release
|
BUILD_TYPE: Release
|
||||||
REPO_DIR: $(Build.Repository.LocalPath)
|
REPO_DIR: $(Build.Repository.LocalPath)
|
||||||
OPENVINO_CONTRIB_REPO_DIR: $(REPO_DIR)/../openvino_contrib
|
OPENVINO_CONTRIB_REPO_DIR: $(REPO_DIR)/../openvino_contrib
|
||||||
@ -43,6 +42,7 @@ jobs:
|
|||||||
echo Python info ; which python ; python --version
|
echo Python info ; which python ; python --version
|
||||||
echo Java info ; which java ; java -version
|
echo Java info ; which java ; java -version
|
||||||
echo gcc info ; which gcc ; gcc --version
|
echo gcc info ; which gcc ; gcc --version
|
||||||
|
echo cmake info ; which cmake ; cmake --version
|
||||||
lsb_release
|
lsb_release
|
||||||
env
|
env
|
||||||
cat /proc/cpuinfo
|
cat /proc/cpuinfo
|
||||||
@ -74,27 +74,26 @@ jobs:
|
|||||||
submodules: recursive
|
submodules: recursive
|
||||||
path: openvino_contrib
|
path: openvino_contrib
|
||||||
|
|
||||||
- checkout: testdata
|
|
||||||
clean: true
|
|
||||||
lfs: true
|
|
||||||
path: testdata
|
|
||||||
|
|
||||||
- script: |
|
- script: |
|
||||||
sudo apt --assume-yes install libusb-1.0-0-dev
|
set -e
|
||||||
# For opencv-python: setuptools and upgrade
|
$(REPO_DIR)/install_build_dependencies.sh
|
||||||
sudo apt-get install python3-setuptools patchelf
|
# Move jdk into contrib
|
||||||
|
sudo apt --assume-yes install openjdk-11-jdk
|
||||||
|
# For opencv-python: python3-setuptools and pip upgrade
|
||||||
python3 -m pip install --upgrade pip
|
python3 -m pip install --upgrade pip
|
||||||
python3 -m pip install -r $(REPO_DIR)/inference-engine/ie_bridges/python/requirements.txt
|
python3 -m pip install -r $(REPO_DIR)/inference-engine/ie_bridges/python/requirements.txt
|
||||||
python3 -m pip install -r $(REPO_DIR)/inference-engine/ie_bridges/python/wheel/requirements-dev.txt
|
python3 -m pip install -r $(REPO_DIR)/inference-engine/ie_bridges/python/wheel/requirements-dev.txt
|
||||||
# For running Python API tests
|
# For running Python API tests
|
||||||
python3 -m pip install -r $(REPO_DIR)/inference-engine/ie_bridges/python/src/requirements-dev.txt
|
python3 -m pip install -r $(REPO_DIR)/inference-engine/ie_bridges/python/src/requirements-dev.txt
|
||||||
# For running nGraph unit tests dependent on Python frameworks
|
# For running PaddlePaddle frontend unit tests
|
||||||
python3 -m pip install -r $(REPO_DIR)/ngraph/test/requirements_test.txt
|
python3 -m pip install -r $(REPO_DIR)/ngraph/test/frontend/paddlepaddle/requirements_dev.txt
|
||||||
|
# For running ONNX frontend unit tests
|
||||||
|
python3 -m pip install -r $(REPO_DIR)/ngraph/test/requirements_test_onnx.txt
|
||||||
# For MO unit tests
|
# For MO unit tests
|
||||||
python3 -m pip install -r $(REPO_DIR)/model-optimizer/requirements.txt
|
python3 -m pip install -r $(REPO_DIR)/model-optimizer/requirements.txt
|
||||||
python3 -m pip install -r $(REPO_DIR)/model-optimizer/requirements_dev.txt
|
python3 -m pip install -r $(REPO_DIR)/model-optimizer/requirements_dev.txt
|
||||||
# Speed up build
|
# Speed up build
|
||||||
wget https://github.com/ninja-build/ninja/releases/download/v1.10.0/ninja-linux.zip
|
wget https://github.com/ninja-build/ninja/releases/download/v1.10.2/ninja-linux.zip
|
||||||
unzip ninja-linux.zip
|
unzip ninja-linux.zip
|
||||||
sudo cp -v ninja /usr/local/bin/
|
sudo cp -v ninja /usr/local/bin/
|
||||||
# Speed up tests
|
# Speed up tests
|
||||||
@ -102,6 +101,11 @@ jobs:
|
|||||||
workingDirectory: $(WORK_DIR)
|
workingDirectory: $(WORK_DIR)
|
||||||
displayName: 'Install dependencies'
|
displayName: 'Install dependencies'
|
||||||
|
|
||||||
|
- checkout: testdata
|
||||||
|
clean: true
|
||||||
|
lfs: true
|
||||||
|
path: testdata
|
||||||
|
|
||||||
- task: CMake@1
|
- task: CMake@1
|
||||||
inputs:
|
inputs:
|
||||||
# CMake must get Python 3.x version by default
|
# CMake must get Python 3.x version by default
|
||||||
@ -110,13 +114,14 @@ jobs:
|
|||||||
-DVERBOSE_BUILD=ON
|
-DVERBOSE_BUILD=ON
|
||||||
-DCMAKE_BUILD_TYPE=$(BUILD_TYPE)
|
-DCMAKE_BUILD_TYPE=$(BUILD_TYPE)
|
||||||
-DENABLE_PYTHON=ON
|
-DENABLE_PYTHON=ON
|
||||||
-DPYTHON_EXECUTABLE=/usr/bin/python3.6
|
-DPYTHON_EXECUTABLE=/usr/bin/python3.8
|
||||||
-DENABLE_WHEEL=ON
|
-DENABLE_WHEEL=ON
|
||||||
-DENABLE_TESTS=ON
|
-DENABLE_TESTS=ON
|
||||||
-DNGRAPH_ONNX_IMPORT_ENABLE=ON
|
-DNGRAPH_ONNX_IMPORT_ENABLE=ON
|
||||||
-DNGRAPH_ONNX_FRONTEND_ENABLE=ON
|
-DNGRAPH_ONNX_FRONTEND_ENABLE=ON
|
||||||
-DENABLE_FASTER_BUILD=ON
|
-DENABLE_FASTER_BUILD=ON
|
||||||
-DENABLE_STRICT_DEPENDENCIES=OFF
|
-DENABLE_STRICT_DEPENDENCIES=OFF
|
||||||
|
-DENABLE_REQUIREMENTS_INSTALL=OFF
|
||||||
-DIE_EXTRA_MODULES=$(OPENVINO_CONTRIB_REPO_DIR)/modules
|
-DIE_EXTRA_MODULES=$(OPENVINO_CONTRIB_REPO_DIR)/modules
|
||||||
$(REPO_DIR)
|
$(REPO_DIR)
|
||||||
workingDirectory: $(BUILD_DIR)
|
workingDirectory: $(BUILD_DIR)
|
||||||
@ -139,8 +144,10 @@ jobs:
|
|||||||
displayName: 'List install files'
|
displayName: 'List install files'
|
||||||
|
|
||||||
- script: |
|
- script: |
|
||||||
|
set -e
|
||||||
mkdir $(INSTALL_DIR)/opencv/
|
mkdir $(INSTALL_DIR)/opencv/
|
||||||
cmake -DCMAKE_INSTALL_PREFIX=$(INSTALL_DIR) -DCOMPONENT=tests -P cmake_install.cmake && cp -R $(REPO_DIR)/inference-engine/temp/opencv_4.5.2_ubuntu18/opencv/* $(INSTALL_DIR)/opencv/
|
cmake -DCMAKE_INSTALL_PREFIX=$(INSTALL_DIR) -DCOMPONENT=tests -P cmake_install.cmake
|
||||||
|
cp -R $(REPO_DIR)/inference-engine/temp/opencv_4.5.2_ubuntu20/opencv/* $(INSTALL_DIR)/opencv/
|
||||||
workingDirectory: $(BUILD_DIR)
|
workingDirectory: $(BUILD_DIR)
|
||||||
displayName: 'Install tests'
|
displayName: 'Install tests'
|
||||||
|
|
||||||
@ -155,17 +162,31 @@ jobs:
|
|||||||
workingDirectory: $(BUILD_SAMPLES_DIR)
|
workingDirectory: $(BUILD_SAMPLES_DIR)
|
||||||
displayName: 'Build c samples'
|
displayName: 'Build c samples'
|
||||||
|
|
||||||
|
- script: rm -fr $(BUILD_DIR)
|
||||||
|
displayName: 'Clean build dir'
|
||||||
|
continueOnError: false
|
||||||
|
|
||||||
|
# Skip test_onnx/test_zoo_models and test_onnx/test_backend due to long execution time
|
||||||
|
- script: . $(SETUPVARS) -pyver 3.8 && python3 -m pytest -s $(INSTALL_TEST_DIR)/pyngraph --junitxml=TEST-Pyngraph.xml --ignore=$(INSTALL_TEST_DIR)/pyngraph/tests/test_onnx/test_zoo_models.py --ignore=$(INSTALL_TEST_DIR)/pyngraph/tests/test_onnx/test_backend.py
|
||||||
|
displayName: 'nGraph Python Bindings Tests'
|
||||||
|
continueOnError: false
|
||||||
|
|
||||||
- script: |
|
- script: |
|
||||||
export MO_ROOT=$(INSTALL_DIR)/deployment_tools/model_optimizer
|
export MO_ROOT=$(INSTALL_DIR)/deployment_tools/model_optimizer
|
||||||
. $(SETUPVARS) -pyver 3.6 && python3 -m pytest -s $(INSTALL_DIR)/deployment_tools/model_optimizer/unit_tests --junitxml=TEST-ModelOptimizer.xml
|
. $(SETUPVARS) -pyver 3.8 && python3 -m pytest -s $(INSTALL_DIR)/deployment_tools/model_optimizer/unit_tests --junitxml=TEST-ModelOptimizer.xml
|
||||||
displayName: 'Model Optimizer UT'
|
displayName: 'Model Optimizer UT'
|
||||||
continueOnError: false
|
continueOnError: false
|
||||||
|
|
||||||
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/unit-test --gtest_print_time=1 --gtest_filter=-backend_api.config_unsupported:*IE_GPU* --gtest_output=xml:TEST-NGraphUT.xml
|
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/unit-test --gtest_print_time=1 --gtest_filter=-backend_api.config_unsupported:*IE_GPU* --gtest_output=xml:TEST-NGraphUT.xml
|
||||||
|
workingDirectory: $(INSTALL_TEST_DIR)
|
||||||
displayName: 'nGraph UT'
|
displayName: 'nGraph UT'
|
||||||
continueOnError: false
|
continueOnError: false
|
||||||
|
|
||||||
# . $(SETUPVARS) && python3 $(WORK_DIR)/gtest-parallel/gtest_parallel.py $(INSTALL_TEST_DIR)/InferenceEngineUnitTests --workers=$(WORKERS_NUMBER) --dump_json_test_results=InferenceEngineUnitTests.json --gtest_filter=*smoke* -- --gtest_print_time=1
|
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/paddlepaddle_tests --gtest_print_time=1 --gtest_filter=-backend_api.config_unsupported:*IE_GPU* --gtest_output=xml:TEST-PaddlePaddle.xml
|
||||||
|
displayName: 'PaddlePaddle Frontend UT'
|
||||||
|
continueOnError: false
|
||||||
|
|
||||||
|
# . $(SETUPVARS) && python3 $(WORK_DIR)/gtest-parallel/gtest_parallel.py $(INSTALL_TEST_DIR)/InferenceEngineUnitTests --workers=16 --dump_json_test_results=InferenceEngineUnitTests.json --gtest_filter=*smoke* -- --gtest_print_time=1
|
||||||
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/InferenceEngineUnitTests --gtest_print_time=1 --gtest_output=xml:TEST-InferenceEngineUnitTests.xml
|
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/InferenceEngineUnitTests --gtest_print_time=1 --gtest_output=xml:TEST-InferenceEngineUnitTests.xml
|
||||||
displayName: 'IE UT old'
|
displayName: 'IE UT old'
|
||||||
continueOnError: false
|
continueOnError: false
|
||||||
@ -213,10 +234,9 @@ jobs:
|
|||||||
export DATA_PATH=$(MODELS_PATH)
|
export DATA_PATH=$(MODELS_PATH)
|
||||||
export MODELS_PATH=$(MODELS_PATH)
|
export MODELS_PATH=$(MODELS_PATH)
|
||||||
cd $(REPO_DIR)/inference-engine/ie_bridges/python/tests
|
cd $(REPO_DIR)/inference-engine/ie_bridges/python/tests
|
||||||
. $(SETUPVARS) -pyver 3.6 && pytest pytest --junitxml=TEST-PythonAPI.xml
|
. $(SETUPVARS) -pyver 3.8 && python3 -m pytest --junitxml=TEST-PythonAPI.xml
|
||||||
displayName: 'Python API Tests'
|
displayName: 'Python API Tests'
|
||||||
continueOnError: false
|
continueOnError: false
|
||||||
enabled: false
|
|
||||||
|
|
||||||
- task: PublishTestResults@2
|
- task: PublishTestResults@2
|
||||||
condition: always()
|
condition: always()
|
||||||
|
@ -4,20 +4,18 @@ jobs:
|
|||||||
timeoutInMinutes: 90
|
timeoutInMinutes: 90
|
||||||
|
|
||||||
pool:
|
pool:
|
||||||
name: LIN_VMSS_VENV_F16S_WU2
|
name: LIN_VMSS_VENV_F16S_U20_WU2
|
||||||
|
|
||||||
variables:
|
variables:
|
||||||
system.debug: true
|
system.debug: true
|
||||||
VSTS_HTTP_RETRY: 5
|
VSTS_HTTP_RETRY: 5
|
||||||
VSTS_HTTP_TIMEOUT: 200
|
VSTS_HTTP_TIMEOUT: 200
|
||||||
WORKERS_NUMBER: 16
|
|
||||||
BUILD_TYPE: Release
|
BUILD_TYPE: Release
|
||||||
REPO_DIR: $(Build.Repository.LocalPath)
|
REPO_DIR: $(Build.Repository.LocalPath)
|
||||||
OPENVINO_CONTRIB_REPO_DIR: $(REPO_DIR)/../openvino_contrib
|
OPENVINO_CONTRIB_REPO_DIR: $(REPO_DIR)/../openvino_contrib
|
||||||
MODELS_PATH: $(REPO_DIR)/../testdata
|
MODELS_PATH: $(REPO_DIR)/../testdata
|
||||||
WORK_DIR: $(Pipeline.Workspace)/_w
|
WORK_DIR: $(Pipeline.Workspace)/_w
|
||||||
BUILD_DIR: $(WORK_DIR)/build
|
BUILD_DIR: $(WORK_DIR)/build
|
||||||
BIN_DIR: $(REPO_DIR)/bin/intel64/$(BUILD_TYPE)
|
|
||||||
INSTALL_DIR: $(WORK_DIR)/install_pkg
|
INSTALL_DIR: $(WORK_DIR)/install_pkg
|
||||||
SETUPVARS: $(INSTALL_DIR)/bin/setupvars.sh
|
SETUPVARS: $(INSTALL_DIR)/bin/setupvars.sh
|
||||||
|
|
||||||
@ -30,6 +28,7 @@ jobs:
|
|||||||
echo Python info ; which python ; python --version
|
echo Python info ; which python ; python --version
|
||||||
echo Java info ; which java ; java -version
|
echo Java info ; which java ; java -version
|
||||||
echo gcc info ; which gcc ; gcc --version
|
echo gcc info ; which gcc ; gcc --version
|
||||||
|
echo cmake info ; which cmake ; cmake --version
|
||||||
lsb_release
|
lsb_release
|
||||||
env
|
env
|
||||||
cat /proc/cpuinfo
|
cat /proc/cpuinfo
|
||||||
@ -53,10 +52,11 @@ jobs:
|
|||||||
path: openvino
|
path: openvino
|
||||||
|
|
||||||
- script: |
|
- script: |
|
||||||
sudo apt --assume-yes install libusb-1.0-0-dev
|
set -e
|
||||||
|
$(REPO_DIR)/install_build_dependencies.sh
|
||||||
python3 -m pip install -r $(REPO_DIR)/inference-engine/ie_bridges/python/requirements.txt
|
python3 -m pip install -r $(REPO_DIR)/inference-engine/ie_bridges/python/requirements.txt
|
||||||
# Speed up build
|
# Speed up build
|
||||||
wget https://github.com/ninja-build/ninja/releases/download/v1.10.0/ninja-linux.zip
|
wget https://github.com/ninja-build/ninja/releases/download/v1.10.2/ninja-linux.zip
|
||||||
unzip ninja-linux.zip
|
unzip ninja-linux.zip
|
||||||
sudo cp -v ninja /usr/local/bin/
|
sudo cp -v ninja /usr/local/bin/
|
||||||
workingDirectory: $(WORK_DIR)
|
workingDirectory: $(WORK_DIR)
|
||||||
@ -76,12 +76,14 @@ jobs:
|
|||||||
|
|
||||||
- script: ninja
|
- script: ninja
|
||||||
workingDirectory: $(BUILD_DIR)
|
workingDirectory: $(BUILD_DIR)
|
||||||
displayName: 'Build'
|
displayName: 'Build LinCC'
|
||||||
|
|
||||||
- script: ls -alR $(REPO_DIR)/bin/
|
- script: ls -alR $(REPO_DIR)/bin/
|
||||||
displayName: 'List files'
|
displayName: 'List bin files'
|
||||||
|
|
||||||
- script: cmake -DCMAKE_INSTALL_PREFIX=$(INSTALL_DIR) -P cmake_install.cmake
|
- script: cmake -DCMAKE_INSTALL_PREFIX=$(INSTALL_DIR) -P cmake_install.cmake
|
||||||
workingDirectory: $(BUILD_DIR)
|
workingDirectory: $(BUILD_DIR)
|
||||||
displayName: 'Install'
|
displayName: 'Install'
|
||||||
|
|
||||||
|
- script: ls -alR $(INSTALL_DIR)
|
||||||
|
displayName: 'List install files'
|
||||||
|
@ -20,13 +20,12 @@ jobs:
|
|||||||
timeoutInMinutes: 90
|
timeoutInMinutes: 90
|
||||||
|
|
||||||
pool:
|
pool:
|
||||||
name: LIN_VMSS_VENV_ONNX_WU2
|
name: LIN_VMSS_VENV_ONNX_U20_WU2
|
||||||
|
|
||||||
variables:
|
variables:
|
||||||
system.debug: true
|
system.debug: true
|
||||||
VSTS_HTTP_RETRY: 5
|
VSTS_HTTP_RETRY: 5
|
||||||
VSTS_HTTP_TIMEOUT: 200
|
VSTS_HTTP_TIMEOUT: 200
|
||||||
WORKERS_NUMBER: 8
|
|
||||||
REPO_DIR: $(Build.Repository.LocalPath)
|
REPO_DIR: $(Build.Repository.LocalPath)
|
||||||
WORK_DIR: $(Pipeline.Workspace)/_w
|
WORK_DIR: $(Pipeline.Workspace)/_w
|
||||||
MODELS_DIR: /mount/cinfsshare/onnxtestdata
|
MODELS_DIR: /mount/cinfsshare/onnxtestdata
|
||||||
@ -43,6 +42,7 @@ jobs:
|
|||||||
echo Python info ; which python ; python --version
|
echo Python info ; which python ; python --version
|
||||||
echo Java info ; which java ; java -version
|
echo Java info ; which java ; java -version
|
||||||
echo gcc info ; which gcc ; gcc --version
|
echo gcc info ; which gcc ; gcc --version
|
||||||
|
echo cmake info ; which cmake ; cmake --version
|
||||||
lsb_release
|
lsb_release
|
||||||
env
|
env
|
||||||
cat /proc/cpuinfo
|
cat /proc/cpuinfo
|
||||||
@ -68,16 +68,23 @@ jobs:
|
|||||||
submodules: recursive
|
submodules: recursive
|
||||||
path: openvino
|
path: openvino
|
||||||
|
|
||||||
- script: docker build --tag=openvino-onnx-ci-image --file=.ci/openvino-onnx/Dockerfile --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg PROTOBUF_LITE=$(PROTOBUF_LITE) .
|
- script: |
|
||||||
|
set -e
|
||||||
|
sudo apt --assume-yes install git-lfs uidmap
|
||||||
|
curl -fsSL https://get.docker.com -o get-docker.sh
|
||||||
|
sudo sh get-docker.sh
|
||||||
|
workingDirectory: $(WORK_DIR)
|
||||||
|
displayName: 'Install dependencies'
|
||||||
|
|
||||||
|
- script: sudo docker build --tag=openvino-onnx-ci-image --file=.ci/openvino-onnx/Dockerfile --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg PROTOBUF_LITE=$(PROTOBUF_LITE) .
|
||||||
displayName: 'Docker build $(BUILD_TYPE) protobuf-lite: $(PROTOBUF_LITE)'
|
displayName: 'Docker build $(BUILD_TYPE) protobuf-lite: $(PROTOBUF_LITE)'
|
||||||
|
|
||||||
- script: ngraph/python/tests/test_onnx/model_zoo_preprocess.sh -d $(TMP_DIR) -o -s "$(ONNX_MODEL_ZOO_SHA)"
|
- script: ngraph/python/tests/test_onnx/model_zoo_preprocess.sh -d $(TMP_DIR) -o -s "$(ONNX_MODEL_ZOO_SHA)"
|
||||||
displayName: 'Get models'
|
displayName: 'Get models'
|
||||||
condition: ne(variables['BUILD_TYPE'], 'Debug')
|
condition: ne(variables['BUILD_TYPE'], 'Debug')
|
||||||
|
|
||||||
- script: sudo fallocate -l 48G /swapfile ; sudo mkswap /swapfile ; sudo swapon /swapfile ; df ; free -h
|
- script: sudo fallocate -l 64G /swapfile ; sudo mkswap /swapfile ; sudo swapon /swapfile ; df ; free -h
|
||||||
displayName: 'Create swap'
|
displayName: 'Create swap'
|
||||||
|
|
||||||
- script: |
|
- script: sudo docker run --name openvino-onnx-ci-container --volume $(TMP_DIR)/model_zoo/onnx_model_zoo_$(ONNX_MODEL_ZOO_SHA):/root/.onnx/model_zoo/onnx_model_zoo --volume $(MODELS_DIR)/msft:/root/.onnx/model_zoo/MSFT openvino-onnx-ci-image /bin/bash -c "$(TOX_COMMAND)"
|
||||||
docker run --name openvino-onnx-ci-container --volume $(TMP_DIR)/model_zoo/onnx_model_zoo_$(ONNX_MODEL_ZOO_SHA):/root/.onnx/model_zoo/onnx_model_zoo --volume $(MODELS_DIR)/msft:/root/.onnx/model_zoo/MSFT openvino-onnx-ci-image /bin/bash -c "$(TOX_COMMAND)"
|
|
||||||
displayName: 'Docker run $(BUILD_TYPE) protobuf-lite: $(PROTOBUF_LITE)'
|
displayName: 'Docker run $(BUILD_TYPE) protobuf-lite: $(PROTOBUF_LITE)'
|
||||||
|
@ -3,23 +3,23 @@ jobs:
|
|||||||
timeoutInMinutes: 90
|
timeoutInMinutes: 90
|
||||||
|
|
||||||
pool:
|
pool:
|
||||||
name: LIN_VMSS_VENV_ONNX_WU2
|
name: LIN_VMSS_VENV_ONNX_U20_WU2
|
||||||
|
|
||||||
variables:
|
variables:
|
||||||
system.debug: true
|
system.debug: true
|
||||||
VSTS_HTTP_RETRY: 5
|
VSTS_HTTP_RETRY: 5
|
||||||
VSTS_HTTP_TIMEOUT: 200
|
VSTS_HTTP_TIMEOUT: 200
|
||||||
WORKERS_NUMBER: 8
|
|
||||||
BUILD_TYPE: Release
|
BUILD_TYPE: Release
|
||||||
REPO_DIR: $(Build.Repository.LocalPath)
|
REPO_DIR: $(Build.Repository.LocalPath)
|
||||||
ONNXRUNTIME_REPO_DIR: $(REPO_DIR)/../onnxruntime
|
ONNXRUNTIME_REPO_DIR: $(REPO_DIR)/../onnxruntime
|
||||||
WORK_DIR: $(Pipeline.Workspace)/_w
|
WORK_DIR: $(Pipeline.Workspace)/_w
|
||||||
MODELS_DIR: /mount/cinfsshare/onnxtestdata
|
MODELS_DIR: /mount/cinfsshare/onnxtestdata
|
||||||
TMP_DIR: /mnt/tmp
|
TMP_DIR: /mnt/tmp
|
||||||
INSTALL_DIR: $(WORK_DIR)/install_pkg
|
INSTALL_DIR: $(WORK_DIR)/install_pkg/openvino
|
||||||
BUILD_DIR: $(WORK_DIR)/build
|
BUILD_DIR: $(WORK_DIR)/build
|
||||||
ONNXRUNTIME_UTILS: $(REPO_DIR)/.ci/azure/ci_utils/onnxruntime
|
ONNXRUNTIME_UTILS: $(REPO_DIR)/.ci/azure/ci_utils/onnxruntime
|
||||||
ONNXRUNTIME_BUILD_DIR: $(ONNXRUNTIME_REPO_DIR)/build
|
ONNXRUNTIME_BUILD_DIR: $(ONNXRUNTIME_REPO_DIR)/build
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- script: |
|
- script: |
|
||||||
curl -H Metadata:true --noproxy "*" "http://169.254.169.254/metadata/instance?api-version=2019-06-01"
|
curl -H Metadata:true --noproxy "*" "http://169.254.169.254/metadata/instance?api-version=2019-06-01"
|
||||||
@ -29,6 +29,7 @@ jobs:
|
|||||||
echo Python info ; which python ; python --version
|
echo Python info ; which python ; python --version
|
||||||
echo Java info ; which java ; java -version
|
echo Java info ; which java ; java -version
|
||||||
echo gcc info ; which gcc ; gcc --version
|
echo gcc info ; which gcc ; gcc --version
|
||||||
|
echo cmake info ; which cmake ; cmake --version
|
||||||
lsb_release
|
lsb_release
|
||||||
env
|
env
|
||||||
cat /proc/cpuinfo
|
cat /proc/cpuinfo
|
||||||
@ -60,15 +61,14 @@ jobs:
|
|||||||
displayName: 'Clone onnxruntime'
|
displayName: 'Clone onnxruntime'
|
||||||
|
|
||||||
- script: |
|
- script: |
|
||||||
sudo apt --assume-yes install libusb-1.0-0-dev
|
set -e
|
||||||
# For opencv-python: setuptools and upgrade
|
$(REPO_DIR)/install_build_dependencies.sh
|
||||||
sudo apt-get install python3-setuptools
|
|
||||||
python3 -m pip install --upgrade pip
|
python3 -m pip install --upgrade pip
|
||||||
python3 -m pip install -r $(REPO_DIR)/inference-engine/ie_bridges/python/requirements.txt
|
python3 -m pip install -r $(REPO_DIR)/inference-engine/ie_bridges/python/requirements.txt
|
||||||
# For running Python API tests
|
# For running Python API tests
|
||||||
python3 -m pip install -r $(REPO_DIR)/inference-engine/ie_bridges/python/src/requirements-dev.txt
|
python3 -m pip install -r $(REPO_DIR)/inference-engine/ie_bridges/python/src/requirements-dev.txt
|
||||||
# Speed up build
|
# Speed up build
|
||||||
wget https://github.com/ninja-build/ninja/releases/download/v1.10.0/ninja-linux.zip
|
wget https://github.com/ninja-build/ninja/releases/download/v1.10.2/ninja-linux.zip
|
||||||
unzip ninja-linux.zip
|
unzip ninja-linux.zip
|
||||||
sudo cp -v ninja /usr/local/bin/
|
sudo cp -v ninja /usr/local/bin/
|
||||||
# Speed up tests
|
# Speed up tests
|
||||||
@ -83,7 +83,7 @@ jobs:
|
|||||||
-GNinja
|
-GNinja
|
||||||
-DCMAKE_BUILD_TYPE=$(BUILD_TYPE)
|
-DCMAKE_BUILD_TYPE=$(BUILD_TYPE)
|
||||||
-DENABLE_PYTHON=ON
|
-DENABLE_PYTHON=ON
|
||||||
-DPYTHON_EXECUTABLE=/usr/bin/python3.6
|
-DPYTHON_EXECUTABLE=/usr/bin/python3.8
|
||||||
-DENABLE_VPU=OFF
|
-DENABLE_VPU=OFF
|
||||||
-DENABLE_GNA=OFF
|
-DENABLE_GNA=OFF
|
||||||
-DENABLE_OPENCV=OFF
|
-DENABLE_OPENCV=OFF
|
||||||
@ -102,10 +102,10 @@ jobs:
|
|||||||
|
|
||||||
- script: ninja
|
- script: ninja
|
||||||
workingDirectory: $(BUILD_DIR)
|
workingDirectory: $(BUILD_DIR)
|
||||||
displayName: 'Build Lin'
|
displayName: 'Build Lin ONNX'
|
||||||
|
|
||||||
- script: ls -alR $(REPO_DIR)/bin/
|
- script: ls -alR $(REPO_DIR)/bin/
|
||||||
displayName: 'List files'
|
displayName: 'List bin files'
|
||||||
|
|
||||||
- script: cmake -DCMAKE_INSTALL_PREFIX=$(INSTALL_DIR) -P cmake_install.cmake
|
- script: cmake -DCMAKE_INSTALL_PREFIX=$(INSTALL_DIR) -P cmake_install.cmake
|
||||||
workingDirectory: $(BUILD_DIR)
|
workingDirectory: $(BUILD_DIR)
|
||||||
@ -113,10 +113,9 @@ jobs:
|
|||||||
|
|
||||||
- script: |
|
- script: |
|
||||||
source $(INSTALL_DIR)/bin/setupvars.sh
|
source $(INSTALL_DIR)/bin/setupvars.sh
|
||||||
echo "2021.2" > $(INSTALL_DIR)/deployment_tools/inference_engine/version.txt
|
|
||||||
CXXFLAGS="-Wno-error=deprecated-declarations" ./build.sh --config RelWithDebInfo --use_openvino CPU_FP32 --build_shared_lib --parallel --skip_tests --build_dir $(ONNXRUNTIME_BUILD_DIR)
|
CXXFLAGS="-Wno-error=deprecated-declarations" ./build.sh --config RelWithDebInfo --use_openvino CPU_FP32 --build_shared_lib --parallel --skip_tests --build_dir $(ONNXRUNTIME_BUILD_DIR)
|
||||||
workingDirectory: $(ONNXRUNTIME_REPO_DIR)
|
workingDirectory: $(ONNXRUNTIME_REPO_DIR)
|
||||||
displayName: 'Build ONNX Runtime'
|
displayName: 'Build Lin ONNX Runtime'
|
||||||
|
|
||||||
- script: |
|
- script: |
|
||||||
source $(INSTALL_DIR)/bin/setupvars.sh
|
source $(INSTALL_DIR)/bin/setupvars.sh
|
||||||
|
@ -22,7 +22,6 @@ jobs:
|
|||||||
system.debug: true
|
system.debug: true
|
||||||
VSTS_HTTP_RETRY: 5
|
VSTS_HTTP_RETRY: 5
|
||||||
VSTS_HTTP_TIMEOUT: 200
|
VSTS_HTTP_TIMEOUT: 200
|
||||||
WORKERS_NUMBER: 3
|
|
||||||
BUILD_TYPE: Release
|
BUILD_TYPE: Release
|
||||||
REPO_DIR: $(Build.Repository.LocalPath)
|
REPO_DIR: $(Build.Repository.LocalPath)
|
||||||
OPENVINO_CONTRIB_REPO_DIR: $(REPO_DIR)/../openvino_contrib
|
OPENVINO_CONTRIB_REPO_DIR: $(REPO_DIR)/../openvino_contrib
|
||||||
@ -76,6 +75,7 @@ jobs:
|
|||||||
- script: |
|
- script: |
|
||||||
brew install cython
|
brew install cython
|
||||||
brew install automake
|
brew install automake
|
||||||
|
python3 -m pip install -r $(REPO_DIR)/ngraph/test/requirements_test_onnx.txt
|
||||||
# Speed up build
|
# Speed up build
|
||||||
brew install ninja
|
brew install ninja
|
||||||
# Speed up tests
|
# Speed up tests
|
||||||
@ -87,7 +87,7 @@ jobs:
|
|||||||
export PATH="/usr/local/opt/cython/bin:$PATH"
|
export PATH="/usr/local/opt/cython/bin:$PATH"
|
||||||
export CC=gcc
|
export CC=gcc
|
||||||
export CXX=g++
|
export CXX=g++
|
||||||
cmake -GNinja -DVERBOSE_BUILD=ON -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_PYTHON=ON -DENABLE_TESTS=ON -DENABLE_STRICT_DEPENDENCIES=OFF -DIE_EXTRA_MODULES=$(OPENVINO_CONTRIB_REPO_DIR)/modules $(REPO_DIR)
|
cmake -GNinja -DVERBOSE_BUILD=ON -DENABLE_REQUIREMENTS_INSTALL=OFF -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_PYTHON=ON -DENABLE_TESTS=ON -DENABLE_STRICT_DEPENDENCIES=OFF -DIE_EXTRA_MODULES=$(OPENVINO_CONTRIB_REPO_DIR)/modules $(REPO_DIR)
|
||||||
workingDirectory: $(BUILD_DIR)
|
workingDirectory: $(BUILD_DIR)
|
||||||
displayName: 'CMake'
|
displayName: 'CMake'
|
||||||
|
|
||||||
@ -103,6 +103,7 @@ jobs:
|
|||||||
displayName: 'Install'
|
displayName: 'Install'
|
||||||
|
|
||||||
- script: $(BIN_DIR)/unit-test --gtest_print_time=1 --gtest_filter=-backend_api.config_unsupported:*IE_GPU*:IE_CPU.onnx_model_sigmoid:IE_CPU/GRUSequenceOp.onnx_model_gru* --gtest_output=xml:TEST-NGraphUT.xml
|
- script: $(BIN_DIR)/unit-test --gtest_print_time=1 --gtest_filter=-backend_api.config_unsupported:*IE_GPU*:IE_CPU.onnx_model_sigmoid:IE_CPU/GRUSequenceOp.onnx_model_gru* --gtest_output=xml:TEST-NGraphUT.xml
|
||||||
|
workingDirectory: $(BIN_DIR)
|
||||||
displayName: 'nGraph UT'
|
displayName: 'nGraph UT'
|
||||||
continueOnError: false
|
continueOnError: false
|
||||||
|
|
||||||
|
@ -22,7 +22,6 @@ jobs:
|
|||||||
system.debug: true
|
system.debug: true
|
||||||
VSTS_HTTP_RETRY: 5
|
VSTS_HTTP_RETRY: 5
|
||||||
VSTS_HTTP_TIMEOUT: 200
|
VSTS_HTTP_TIMEOUT: 200
|
||||||
WORKERS_NUMBER: 8
|
|
||||||
BUILD_TYPE: Release
|
BUILD_TYPE: Release
|
||||||
REPO_DIR: $(Build.Repository.LocalPath)
|
REPO_DIR: $(Build.Repository.LocalPath)
|
||||||
OPENVINO_CONTRIB_REPO_DIR: $(REPO_DIR)\..\openvino_contrib
|
OPENVINO_CONTRIB_REPO_DIR: $(REPO_DIR)\..\openvino_contrib
|
||||||
@ -42,6 +41,7 @@ jobs:
|
|||||||
- script: |
|
- script: |
|
||||||
powershell -command "Invoke-RestMethod -Headers @{\"Metadata\"=\"true\"} -Method GET -Uri http://169.254.169.254/metadata/instance/compute?api-version=2019-06-01 | format-custom"
|
powershell -command "Invoke-RestMethod -Headers @{\"Metadata\"=\"true\"} -Method GET -Uri http://169.254.169.254/metadata/instance/compute?api-version=2019-06-01 | format-custom"
|
||||||
where python3
|
where python3
|
||||||
|
python3 --version
|
||||||
where python
|
where python
|
||||||
python --version
|
python --version
|
||||||
where java
|
where java
|
||||||
@ -83,7 +83,18 @@ jobs:
|
|||||||
path: testdata
|
path: testdata
|
||||||
|
|
||||||
- script: |
|
- script: |
|
||||||
certutil -urlcache -split -f https://github.com/ninja-build/ninja/releases/download/v1.10.0/ninja-win.zip ninja-win.zip
|
python -m pip install --upgrade pip
|
||||||
|
rem For running Python API tests
|
||||||
|
python -m pip install -r $(REPO_DIR)\inference-engine\ie_bridges\python\src\requirements-dev.txt
|
||||||
|
rem For running PaddlePaddle frontend unit tests
|
||||||
|
python -m pip install -r $(REPO_DIR)\ngraph\test\frontend\paddlepaddle\requirements_dev.txt
|
||||||
|
rem For running ONNX frontend unit tests
|
||||||
|
python -m pip install -r $(REPO_DIR)\ngraph\test\requirements_test_onnx.txt
|
||||||
|
rem For MO unit tests
|
||||||
|
python -m pip install -r $(REPO_DIR)\model-optimizer\requirements.txt
|
||||||
|
python -m pip install -r $(REPO_DIR)\model-optimizer\requirements_dev.txt
|
||||||
|
rem Speed up build
|
||||||
|
certutil -urlcache -split -f https://github.com/ninja-build/ninja/releases/download/v1.10.2/ninja-win.zip ninja-win.zip
|
||||||
powershell -command "Expand-Archive -Force ninja-win.zip"
|
powershell -command "Expand-Archive -Force ninja-win.zip"
|
||||||
git clone https://github.com/google/gtest-parallel.git
|
git clone https://github.com/google/gtest-parallel.git
|
||||||
workingDirectory: $(WORK_DIR)
|
workingDirectory: $(WORK_DIR)
|
||||||
@ -91,7 +102,7 @@ jobs:
|
|||||||
|
|
||||||
- script: |
|
- script: |
|
||||||
set PATH=$(WORK_DIR)\ninja-win;%PATH%
|
set PATH=$(WORK_DIR)\ninja-win;%PATH%
|
||||||
call "$(MSVS_VARS_PATH)" && cmake -GNinja -DENABLE_FASTER_BUILD=ON -DENABLE_TEMPLATE_PLUGIN=ON -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_TESTS=ON -DENABLE_STRICT_DEPENDENCIES=OFF -DIE_EXTRA_MODULES=$(OPENVINO_CONTRIB_REPO_DIR)\modules -DCMAKE_C_COMPILER:PATH="$(MSVC_COMPILER_PATH)" -DCMAKE_CXX_COMPILER:PATH="$(MSVC_COMPILER_PATH)" $(REPO_DIR)
|
call "$(MSVS_VARS_PATH)" && cmake -GNinja -DENABLE_REQUIREMENTS_INSTALL=OFF -DENABLE_FASTER_BUILD=ON -DENABLE_TEMPLATE_PLUGIN=ON -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_TESTS=ON -DENABLE_STRICT_DEPENDENCIES=OFF -DENABLE_PYTHON=ON -DPYTHON_EXECUTABLE="C:\hostedtoolcache\windows\Python\3.7.6\x64\python.exe" -DIE_EXTRA_MODULES=$(OPENVINO_CONTRIB_REPO_DIR)\modules -DCMAKE_C_COMPILER:PATH="$(MSVC_COMPILER_PATH)" -DCMAKE_CXX_COMPILER:PATH="$(MSVC_COMPILER_PATH)" $(REPO_DIR)
|
||||||
workingDirectory: $(BUILD_DIR)
|
workingDirectory: $(BUILD_DIR)
|
||||||
displayName: 'CMake'
|
displayName: 'CMake'
|
||||||
|
|
||||||
@ -129,10 +140,19 @@ jobs:
|
|||||||
workingDirectory: $(BUILD_SAMPLES_DIR)
|
workingDirectory: $(BUILD_SAMPLES_DIR)
|
||||||
displayName: 'Build c samples'
|
displayName: 'Build c samples'
|
||||||
|
|
||||||
|
- script: rd /Q /S $(BUILD_DIR)
|
||||||
|
displayName: 'Clean build dir'
|
||||||
|
continueOnError: false
|
||||||
|
|
||||||
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\unit-test --gtest_print_time=1 --gtest_filter=-backend_api.config_unsupported:*IE_GPU* --gtest_output=xml:TEST-NGraphUT.xml
|
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\unit-test --gtest_print_time=1 --gtest_filter=-backend_api.config_unsupported:*IE_GPU* --gtest_output=xml:TEST-NGraphUT.xml
|
||||||
|
workingDirectory: $(INSTALL_TEST_DIR)
|
||||||
displayName: 'nGraph UT'
|
displayName: 'nGraph UT'
|
||||||
continueOnError: false
|
continueOnError: false
|
||||||
|
|
||||||
|
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\paddlepaddle_tests --gtest_print_time=1 --gtest_filter=-backend_api.config_unsupported:*IE_GPU* --gtest_output=xml:TEST-PaddlePaddle.xml
|
||||||
|
displayName: 'PaddlePaddle Frontend UT'
|
||||||
|
continueOnError: false
|
||||||
|
|
||||||
- script: |
|
- script: |
|
||||||
set PATH=$(IB_DIR);%PATH%
|
set PATH=$(IB_DIR);%PATH%
|
||||||
call $(SETUPVARS) && "$(IB_TESTCONSOLE)" $(INSTALL_TEST_DIR)\InferenceEngineUnitTests.exe --gtest_output=xml:TEST-InferenceEngineUnitTests-IB.xml
|
call $(SETUPVARS) && "$(IB_TESTCONSOLE)" $(INSTALL_TEST_DIR)\InferenceEngineUnitTests.exe --gtest_output=xml:TEST-InferenceEngineUnitTests-IB.xml
|
||||||
|
28
.github/workflows/code_style.yml
vendored
28
.github/workflows/code_style.yml
vendored
@ -28,7 +28,7 @@ jobs:
|
|||||||
cmake -DENABLE_PYTHON=ON -DENABLE_TESTS=ON -DENABLE_PROFILING_ITT=ON -DSELECTIVE_BUILD=COLLECT ..
|
cmake -DENABLE_PYTHON=ON -DENABLE_TESTS=ON -DENABLE_PROFILING_ITT=ON -DSELECTIVE_BUILD=COLLECT ..
|
||||||
|
|
||||||
- name: Check code style
|
- name: Check code style
|
||||||
run: cmake --build build --target clang_format_check_all
|
run: cmake --build build --target clang_format_check_all -j8
|
||||||
|
|
||||||
- name: Create code style diff
|
- name: Create code style diff
|
||||||
if: failure()
|
if: failure()
|
||||||
@ -64,5 +64,29 @@ jobs:
|
|||||||
cmake ..
|
cmake ..
|
||||||
|
|
||||||
- name: ShellCheck
|
- name: ShellCheck
|
||||||
run: make ie_shellcheck
|
run: cmake --build . --target ie_shellcheck -j8
|
||||||
|
working-directory: build
|
||||||
|
|
||||||
|
NamingConventionCheck:
|
||||||
|
runs-on: ubuntu-18.04
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
submodules: recursive
|
||||||
|
|
||||||
|
- name: Install Clang dependency
|
||||||
|
run: sudo apt --assume-yes install libusb-1.0-0-dev libclang-9-dev
|
||||||
|
|
||||||
|
- name: Install Python-based dependencies
|
||||||
|
run: |
|
||||||
|
python3 -m pip install pyyaml clang==9.0
|
||||||
|
|
||||||
|
- name: CMake
|
||||||
|
run: |
|
||||||
|
mkdir build
|
||||||
|
cd build
|
||||||
|
cmake ..
|
||||||
|
|
||||||
|
- name: Naming convention check
|
||||||
|
run: cmake --build . --target ncc_all -j8
|
||||||
working-directory: build
|
working-directory: build
|
||||||
|
5
.gitmodules
vendored
5
.gitmodules
vendored
@ -40,7 +40,7 @@
|
|||||||
ignore = dirty
|
ignore = dirty
|
||||||
[submodule "thirdparty/onnx"]
|
[submodule "thirdparty/onnx"]
|
||||||
path = thirdparty/onnx/onnx
|
path = thirdparty/onnx/onnx
|
||||||
url = https://github.com/openvinotoolkit/onnx.git
|
url = https://github.com/onnx/onnx.git
|
||||||
[submodule "thirdparty/protobuf"]
|
[submodule "thirdparty/protobuf"]
|
||||||
path = thirdparty/protobuf/protobuf
|
path = thirdparty/protobuf/protobuf
|
||||||
url = https://github.com/protocolbuffers/protobuf.git
|
url = https://github.com/protocolbuffers/protobuf.git
|
||||||
@ -50,3 +50,6 @@
|
|||||||
[submodule "thirdparty/ittapi/ittapi"]
|
[submodule "thirdparty/ittapi/ittapi"]
|
||||||
path = thirdparty/ittapi/ittapi
|
path = thirdparty/ittapi/ittapi
|
||||||
url = https://github.com/intel/ittapi.git
|
url = https://github.com/intel/ittapi.git
|
||||||
|
[submodule "ncc"]
|
||||||
|
path = cmake/developer_package/ncc_naming_style/ncc
|
||||||
|
url = https://github.com/nithinn/ncc.git
|
||||||
|
@ -77,6 +77,9 @@ endfunction()
|
|||||||
ie_cpack_add_component(ngraph REQUIRED)
|
ie_cpack_add_component(ngraph REQUIRED)
|
||||||
ie_cpack_add_component(ngraph_dev REQUIRED DEPENDS ngraph)
|
ie_cpack_add_component(ngraph_dev REQUIRED DEPENDS ngraph)
|
||||||
|
|
||||||
|
# add target with processed tests model zoo
|
||||||
|
include(cmake/test_model_zoo.cmake)
|
||||||
|
|
||||||
add_subdirectory(thirdparty)
|
add_subdirectory(thirdparty)
|
||||||
add_subdirectory(openvino)
|
add_subdirectory(openvino)
|
||||||
add_subdirectory(ngraph)
|
add_subdirectory(ngraph)
|
||||||
|
12
CODEOWNERS
12
CODEOWNERS
@ -30,13 +30,13 @@ azure-pipelines.yml @openvinotoolkit/openvino-admins
|
|||||||
|
|
||||||
# IE GPU:
|
# IE GPU:
|
||||||
/inference-engine/src/cldnn_engine/ @openvinotoolkit/openvino-ie-gpu-maintainers @openvinotoolkit/openvino-ie-gpu-developers
|
/inference-engine/src/cldnn_engine/ @openvinotoolkit/openvino-ie-gpu-maintainers @openvinotoolkit/openvino-ie-gpu-developers
|
||||||
/inference-engine/include/gpu/ @openvinotoolkit/openvino-ie-gpu-maintainers @openvinotoolkit/openvino-ie-gpu-developers
|
/inference-engine/src/inference_engine/include/gpu/ @openvinotoolkit/openvino-ie-gpu-maintainers @openvinotoolkit/openvino-ie-gpu-developers
|
||||||
/inference-engine/include/cldnn/ @openvinotoolkit/openvino-ie-gpu-maintainers @openvinotoolkit/openvino-ie-gpu-developers
|
/inference-engine/src/inference_engine/include/cldnn/ @openvinotoolkit/openvino-ie-gpu-maintainers @openvinotoolkit/openvino-ie-gpu-developers
|
||||||
/inference-engine/thirdparty/clDNN/ @openvinotoolkit/openvino-ie-gpu-maintainers @openvinotoolkit/openvino-ie-gpu-developers
|
/inference-engine/thirdparty/clDNN/ @openvinotoolkit/openvino-ie-gpu-maintainers @openvinotoolkit/openvino-ie-gpu-developers
|
||||||
|
|
||||||
# IE VPU:
|
# IE VPU:
|
||||||
/inference-engine/src/vpu/ @openvinotoolkit/openvino-ie-vpu-maintainers
|
/inference-engine/src/vpu/ @openvinotoolkit/openvino-ie-vpu-maintainers
|
||||||
/inference-engine/include/vpu/ @openvinotoolkit/openvino-ie-vpu-maintainers
|
/inference-engine/src/inference_engine/include/vpu/ @openvinotoolkit/openvino-ie-vpu-maintainers
|
||||||
/inference-engine/thirdparty/movidius/ @openvinotoolkit/openvino-ie-vpu-maintainers
|
/inference-engine/thirdparty/movidius/ @openvinotoolkit/openvino-ie-vpu-maintainers
|
||||||
/inference-engine/tests_deprecated/unit/engines/vpu/ @openvinotoolkit/openvino-ie-vpu-maintainers @openvinotoolkit/openvino-ie-tests-maintainers
|
/inference-engine/tests_deprecated/unit/engines/vpu/ @openvinotoolkit/openvino-ie-vpu-maintainers @openvinotoolkit/openvino-ie-tests-maintainers
|
||||||
/inference-engine/tests_deprecated/functional/vpu/ @openvinotoolkit/openvino-ie-vpu-maintainers @openvinotoolkit/openvino-ie-tests-maintainers
|
/inference-engine/tests_deprecated/functional/vpu/ @openvinotoolkit/openvino-ie-vpu-maintainers @openvinotoolkit/openvino-ie-tests-maintainers
|
||||||
@ -49,11 +49,11 @@ azure-pipelines.yml @openvinotoolkit/openvino-admins
|
|||||||
|
|
||||||
# IE GNA:
|
# IE GNA:
|
||||||
/inference-engine/src/gna_plugin/ @openvinotoolkit/openvino-ie-gna-maintainers
|
/inference-engine/src/gna_plugin/ @openvinotoolkit/openvino-ie-gna-maintainers
|
||||||
/inference-engine/include/gna/ @openvinotoolkit/openvino-ie-gna-maintainers
|
/inference-engine/src/inference_engine/include/gna/ @openvinotoolkit/openvino-ie-gna-maintainers
|
||||||
|
|
||||||
# IE MULTI:
|
# IE MULTI:
|
||||||
/inference-engine/src/multi_device/ @openvinotoolkit/openvino-ie-multi-maintainers
|
/inference-engine/src/multi_device/ @openvinotoolkit/openvino-ie-multi-maintainers
|
||||||
/inference-engine/include/multi-device/ @openvinotoolkit/openvino-ie-multi-maintainers
|
/inference-engine/src/inference_engine/include/multi-device/ @openvinotoolkit/openvino-ie-multi-maintainers
|
||||||
|
|
||||||
# IE Tests:
|
# IE Tests:
|
||||||
/inference-engine/tests/ @openvinotoolkit/openvino-ie-tests-maintainers
|
/inference-engine/tests/ @openvinotoolkit/openvino-ie-tests-maintainers
|
||||||
@ -77,4 +77,4 @@ azure-pipelines.yml @openvinotoolkit/openvino-admins
|
|||||||
# Control 3d party dependencies
|
# Control 3d party dependencies
|
||||||
*requirements* @openvino-configuration-mgmt
|
*requirements* @openvino-configuration-mgmt
|
||||||
*setup.py @openvino-configuration-mgmt
|
*setup.py @openvino-configuration-mgmt
|
||||||
/scripts/install_dependencies/ @openvino-configuration-mgmt
|
/scripts/install_dependencies/ @openvino-configuration-mgmt
|
||||||
|
@ -7,10 +7,6 @@ cmake_policy(SET CMP0054 NEW)
|
|||||||
# TODO: fix it
|
# TODO: fix it
|
||||||
set_temp_directory(TEMP "${IE_MAIN_SOURCE_DIR}")
|
set_temp_directory(TEMP "${IE_MAIN_SOURCE_DIR}")
|
||||||
|
|
||||||
if(CMAKE_CROSSCOMPILING)
|
|
||||||
set(CMAKE_STAGING_PREFIX "${TEMP}")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if(ENABLE_SAME_BRANCH_FOR_MODELS)
|
if(ENABLE_SAME_BRANCH_FOR_MODELS)
|
||||||
branchName(MODELS_BRANCH)
|
branchName(MODELS_BRANCH)
|
||||||
else()
|
else()
|
||||||
@ -315,25 +311,25 @@ if(ENABLE_SPEECH_DEMO)
|
|||||||
if(DEFINED IE_PATH_TO_DEPS)
|
if(DEFINED IE_PATH_TO_DEPS)
|
||||||
if(WIN32 AND X86_64)
|
if(WIN32 AND X86_64)
|
||||||
RESOLVE_DEPENDENCY(SPEECH_LIBS_AND_DEMOS
|
RESOLVE_DEPENDENCY(SPEECH_LIBS_AND_DEMOS
|
||||||
ARCHIVE_WIN "speech_demo_1.0.0.774_windows.zip"
|
ARCHIVE_WIN "speech_demo_1.0.0.780_windows.zip"
|
||||||
VERSION_REGEX ".*_([0-9]+.[0-9]+.[0-9]+.[0-9]+).*"
|
VERSION_REGEX ".*_([0-9]+.[0-9]+.[0-9]+.[0-9]+).*"
|
||||||
TARGET_PATH "${TEMP}/speech_demo_1.0.0.774"
|
TARGET_PATH "${TEMP}/speech_demo_1.0.0.780"
|
||||||
SHA256 "67b25170be5e89a4f0e90e8b39623b60c9a15b965c30329385e295fcd2edc856")
|
SHA256 "957bd274a1f6dc1d83a46879c7ef3b3b06f17d11af85cc45c18919051d145abd")
|
||||||
debug_message(STATUS "speech_libs_and_demos=" ${SPEECH_LIBS_AND_DEMOS})
|
debug_message(STATUS "speech_libs_and_demos=" ${SPEECH_LIBS_AND_DEMOS})
|
||||||
elseif(LINUX AND X86_64)
|
elseif(LINUX AND X86_64)
|
||||||
if(LINUX_OS_NAME STREQUAL "CentOS 7" OR CMAKE_CXX_COMPILER_VERSION VERSION_LESS "4.9")
|
if(LINUX_OS_NAME STREQUAL "CentOS 7" OR CMAKE_CXX_COMPILER_VERSION VERSION_LESS "4.9")
|
||||||
RESOLVE_DEPENDENCY(SPEECH_LIBS_AND_DEMOS
|
RESOLVE_DEPENDENCY(SPEECH_LIBS_AND_DEMOS
|
||||||
ARCHIVE_LIN "speech_demo_1.0.0.774_centos.tgz"
|
ARCHIVE_LIN "speech_demo_1.0.0.780_centos.tgz"
|
||||||
VERSION_REGEX ".*_([0-9]+.[0-9]+.[0-9]+.[0-9]+).*"
|
VERSION_REGEX ".*_([0-9]+.[0-9]+.[0-9]+.[0-9]+).*"
|
||||||
TARGET_PATH "${TEMP}/speech_demo_1.0.0.774"
|
TARGET_PATH "${TEMP}/speech_demo_1.0.0.780"
|
||||||
SHA256 "5ec3b7be9ae05376aefae5bd5fd4a39b12c274e82817fd3218120b8e8fc8ff5a")
|
SHA256 "6d8d1111d0e662fe71d71cd3debad2995f6fb6fe5df3b92196dae06ff7abdf44")
|
||||||
debug_message(STATUS "speech_libs_and_demos=" ${SPEECH_LIBS_AND_DEMOS})
|
debug_message(STATUS "speech_libs_and_demos=" ${SPEECH_LIBS_AND_DEMOS})
|
||||||
else()
|
else()
|
||||||
RESOLVE_DEPENDENCY(SPEECH_LIBS_AND_DEMOS
|
RESOLVE_DEPENDENCY(SPEECH_LIBS_AND_DEMOS
|
||||||
ARCHIVE_LIN "speech_demo_1.0.0.774_linux.tgz"
|
ARCHIVE_LIN "speech_demo_1.0.0.780_linux.tgz"
|
||||||
VERSION_REGEX ".*_([0-9]+.[0-9]+.[0-9]+.[0-9]+).*"
|
VERSION_REGEX ".*_([0-9]+.[0-9]+.[0-9]+.[0-9]+).*"
|
||||||
TARGET_PATH "${TEMP}/speech_demo_1.0.0.774"
|
TARGET_PATH "${TEMP}/speech_demo_1.0.0.780"
|
||||||
SHA256 "f0bbd0a6218b0365e7cfb1f860b34e4ace7e0d47dd60b369cdea8a480329810f")
|
SHA256 "0ec6f1e47c00d781dc918af5d3055ab474ff47b9978dd6fe2add73e3339b0763")
|
||||||
debug_message(STATUS "speech_libs_and_demos=" ${SPEECH_LIBS_AND_DEMOS})
|
debug_message(STATUS "speech_libs_and_demos=" ${SPEECH_LIBS_AND_DEMOS})
|
||||||
endif()
|
endif()
|
||||||
else()
|
else()
|
||||||
|
@ -132,7 +132,7 @@ set(IE_DEBUG_POSTFIX_WIN "d")
|
|||||||
set(IE_RELEASE_POSTFIX_WIN "")
|
set(IE_RELEASE_POSTFIX_WIN "")
|
||||||
set(IE_DEBUG_POSTFIX_LIN "")
|
set(IE_DEBUG_POSTFIX_LIN "")
|
||||||
set(IE_RELEASE_POSTFIX_LIN "")
|
set(IE_RELEASE_POSTFIX_LIN "")
|
||||||
set(IE_DEBUG_POSTFIX_MAC "d")
|
set(IE_DEBUG_POSTFIX_MAC "")
|
||||||
set(IE_RELEASE_POSTFIX_MAC "")
|
set(IE_RELEASE_POSTFIX_MAC "")
|
||||||
|
|
||||||
if(WIN32)
|
if(WIN32)
|
||||||
@ -187,8 +187,8 @@ set_property(GLOBAL PROPERTY USE_FOLDERS ON)
|
|||||||
# Enable CMAKE_<LANG>_COMPILER_ID AppleClang
|
# Enable CMAKE_<LANG>_COMPILER_ID AppleClang
|
||||||
set(CMAKE_POLICY_DEFAULT_CMP0025 NEW)
|
set(CMAKE_POLICY_DEFAULT_CMP0025 NEW)
|
||||||
|
|
||||||
set(CMAKE_WARN_DEPRECATED OFF)
|
set(CMAKE_WARN_DEPRECATED OFF CACHE BOOL "Don't warn about obsolete cmake versions in 3rdparty")
|
||||||
set(CMAKE_WARN_ON_ABSOLUTE_INSTALL_DESTINATION ON)
|
set(CMAKE_WARN_ON_ABSOLUTE_INSTALL_DESTINATION ON CACHE BOOL "Warn about absolute paths in destination")
|
||||||
|
|
||||||
# LTO
|
# LTO
|
||||||
|
|
||||||
@ -251,20 +251,40 @@ endfunction()
|
|||||||
|
|
||||||
# check python package
|
# check python package
|
||||||
|
|
||||||
function(ie_check_pip_package name message_type)
|
function(ie_check_pip_package full_name message_type)
|
||||||
find_package(PythonInterp 3 REQUIRED)
|
find_package(PythonInterp 3 REQUIRED)
|
||||||
|
|
||||||
|
get_filename_component(PYTHON_EXEC_DIR ${PYTHON_EXECUTABLE} DIRECTORY)
|
||||||
|
|
||||||
|
# extract version if any
|
||||||
|
if(full_name MATCHES "^([a-z_]+)[~=<>!]*(.*)$")
|
||||||
|
set(name ${CMAKE_MATCH_1})
|
||||||
|
set(req_version ${CMAKE_MATCH_2})
|
||||||
|
else()
|
||||||
|
set(name ${full_name})
|
||||||
|
endif()
|
||||||
|
|
||||||
execute_process(
|
execute_process(
|
||||||
COMMAND ${PYTHON_EXECUTABLE} -m pip show ${name}
|
COMMAND ${PYTHON_EXECUTABLE} -m pip show ${name}
|
||||||
|
WORKING_DIRECTORY ${PYTHON_EXEC_DIR}
|
||||||
RESULT_VARIABLE PIP_EXIT_CODE
|
RESULT_VARIABLE PIP_EXIT_CODE
|
||||||
OUTPUT_QUIET
|
OUTPUT_VARIABLE output)
|
||||||
)
|
|
||||||
|
|
||||||
if(NOT PIP_EXIT_CODE EQUAL 0)
|
if(NOT PIP_EXIT_CODE EQUAL 0)
|
||||||
set(${name}_FOUND OFF PARENT_SCOPE)
|
set(${name}_FOUND OFF PARENT_SCOPE)
|
||||||
message(${message_type} "${name} package is not installed. Please use \"${PYTHON_EXECUTABLE} -m pip install ${name}\".")
|
message(${message_type} "${name} package is not installed. Please use \"${PYTHON_EXECUTABLE} -m pip install ${full_name}\".")
|
||||||
else()
|
else()
|
||||||
set(${name}_FOUND ON PARENT_SCOPE)
|
if(req_version)
|
||||||
|
string(REGEX MATCH "Version: ([0-9]+\.?[0-9]*\.?[0-9]*)\n" installed_version "${output}")
|
||||||
|
if(installed_version)
|
||||||
|
set(installed_version "${CMAKE_MATCH_1}")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
message(${message_type} "${name} package is installed, but may have different version (${installed_version}). "
|
||||||
|
"Please use \"${PYTHON_EXECUTABLE} -m pip install ${full_name}\".")
|
||||||
|
else()
|
||||||
|
set(${name}_FOUND ON PARENT_SCOPE)
|
||||||
|
endif()
|
||||||
endif()
|
endif()
|
||||||
endfunction()
|
endfunction()
|
||||||
|
|
||||||
@ -272,6 +292,7 @@ endfunction()
|
|||||||
|
|
||||||
include(cpplint/cpplint)
|
include(cpplint/cpplint)
|
||||||
include(clang_format/clang_format)
|
include(clang_format/clang_format)
|
||||||
|
include(ncc_naming_style/ncc_naming_style)
|
||||||
|
|
||||||
# Restore state
|
# Restore state
|
||||||
set(CMAKE_MODULE_PATH ${OLD_CMAKE_MODULE_PATH})
|
set(CMAKE_MODULE_PATH ${OLD_CMAKE_MODULE_PATH})
|
||||||
|
@ -2,17 +2,17 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
|
|
||||||
if (ENABLE_CLANG_FORMAT)
|
if(ENABLE_CLANG_FORMAT)
|
||||||
set(CLANG_FORMAT_FILENAME clang-format-9 clang-format)
|
set(CLANG_FORMAT_FILENAME clang-format-9 clang-format)
|
||||||
find_program(CLANG_FORMAT NAMES ${CLANG_FORMAT_FILENAME} PATHS ENV PATH)
|
find_host_program(CLANG_FORMAT NAMES ${CLANG_FORMAT_FILENAME} PATHS ENV PATH)
|
||||||
if (CLANG_FORMAT)
|
if(CLANG_FORMAT)
|
||||||
execute_process(COMMAND ${CLANG_FORMAT} ${CMAKE_CURRENT_SOURCE_DIR} ARGS --version OUTPUT_VARIABLE CLANG_VERSION)
|
execute_process(COMMAND ${CLANG_FORMAT} ${CMAKE_CURRENT_SOURCE_DIR} ARGS --version OUTPUT_VARIABLE CLANG_VERSION)
|
||||||
if (NOT CLANG_VERSION OR CLANG_VERSION STREQUAL "")
|
if(NOT CLANG_VERSION OR CLANG_VERSION STREQUAL "")
|
||||||
message(WARNING "Supported clang-format version is 9!")
|
message(WARNING "Supported clang-format version is 9!")
|
||||||
set(ENABLE_CLANG_FORMAT OFF)
|
set(ENABLE_CLANG_FORMAT OFF)
|
||||||
else()
|
else()
|
||||||
string(REGEX REPLACE "[^0-9]+([0-9]+)\\..*" "\\1" CLANG_FORMAT_MAJOR_VERSION ${CLANG_VERSION})
|
string(REGEX REPLACE "[^0-9]+([0-9]+)\\..*" "\\1" CLANG_FORMAT_MAJOR_VERSION ${CLANG_VERSION})
|
||||||
if (NOT ${CLANG_FORMAT_MAJOR_VERSION} EQUAL "9")
|
if(NOT ${CLANG_FORMAT_MAJOR_VERSION} EQUAL "9")
|
||||||
message(WARNING "Supported clang-format version is 9!")
|
message(WARNING "Supported clang-format version is 9!")
|
||||||
set(ENABLE_CLANG_FORMAT OFF)
|
set(ENABLE_CLANG_FORMAT OFF)
|
||||||
endif()
|
endif()
|
||||||
|
@ -68,13 +68,13 @@ function(ie_sse42_optimization_flags flags)
|
|||||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
|
if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
|
||||||
# No such option for MSVC 2019
|
# No such option for MSVC 2019
|
||||||
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
|
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
|
||||||
set(${flags} /arch:SSE4.2 /QxSSE4.2 PARENT_SCOPE)
|
set(${flags} /QxSSE4.2 PARENT_SCOPE)
|
||||||
else()
|
else()
|
||||||
message(WARNING "Unsupported CXX compiler ${CMAKE_CXX_COMPILER_ID}")
|
message(WARNING "Unsupported CXX compiler ${CMAKE_CXX_COMPILER_ID}")
|
||||||
endif()
|
endif()
|
||||||
else()
|
else()
|
||||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
|
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
|
||||||
set(${flags} -msse4.2 -xSSE4.2 PARENT_SCOPE)
|
set(${flags} -xSSE4.2 PARENT_SCOPE)
|
||||||
else()
|
else()
|
||||||
set(${flags} -msse4.2 PARENT_SCOPE)
|
set(${flags} -msse4.2 PARENT_SCOPE)
|
||||||
endif()
|
endif()
|
||||||
@ -95,7 +95,7 @@ function(ie_avx2_optimization_flags flags)
|
|||||||
endif()
|
endif()
|
||||||
else()
|
else()
|
||||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
|
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
|
||||||
set(${flags} -march=core-avx2 -xCORE-AVX2 -mtune=core-avx2 PARENT_SCOPE)
|
set(${flags} -xCORE-AVX2 PARENT_SCOPE)
|
||||||
else()
|
else()
|
||||||
set(${flags} -mavx2 -mfma PARENT_SCOPE)
|
set(${flags} -mavx2 -mfma PARENT_SCOPE)
|
||||||
endif()
|
endif()
|
||||||
@ -152,6 +152,24 @@ function(ie_arm_neon_optimization_flags flags)
|
|||||||
endif()
|
endif()
|
||||||
endfunction()
|
endfunction()
|
||||||
|
|
||||||
|
#
|
||||||
|
# Disables all warnings for 3rd party targets
|
||||||
|
#
|
||||||
|
function(ov_disable_all_warnings)
|
||||||
|
foreach(target IN LISTS ARGN)
|
||||||
|
if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
|
||||||
|
target_compile_options(${target} PRIVATE /WX-)
|
||||||
|
elseif(CMAKE_COMPILER_IS_GNUCXX OR OV_COMPILER_IS_CLANG)
|
||||||
|
target_compile_options(${target} PRIVATE -w)
|
||||||
|
elseif(UNIX AND CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
|
||||||
|
# 193: zero used for undefined preprocessing identifier "XXX"
|
||||||
|
# 1011: missing return statement at end of non-void function "XXX"
|
||||||
|
# 2415: variable "xxx" of static storage duration was declared but never referenced
|
||||||
|
target_compile_options(${target} PRIVATE -diag-disable=warn,193,1011,2415)
|
||||||
|
endif()
|
||||||
|
endforeach()
|
||||||
|
endfunction()
|
||||||
|
|
||||||
#
|
#
|
||||||
# Enables Link Time Optimization compilation
|
# Enables Link Time Optimization compilation
|
||||||
#
|
#
|
||||||
@ -286,15 +304,13 @@ else()
|
|||||||
ie_add_compiler_flags(-Wreturn-type)
|
ie_add_compiler_flags(-Wreturn-type)
|
||||||
ie_add_compiler_flags(-Wunused-variable)
|
ie_add_compiler_flags(-Wunused-variable)
|
||||||
|
|
||||||
# Disable noisy warnings
|
|
||||||
|
|
||||||
if (CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang")
|
if (CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang")
|
||||||
ie_add_compiler_flags(-Wswitch)
|
ie_add_compiler_flags(-Wswitch)
|
||||||
elseif(UNIX)
|
elseif(UNIX)
|
||||||
ie_add_compiler_flags(-Wuninitialized -Winit-self)
|
ie_add_compiler_flags(-Wuninitialized -Winit-self)
|
||||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
|
if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
|
||||||
ie_add_compiler_flags(-Wno-error=switch
|
ie_add_compiler_flags(-Winconsistent-missing-override
|
||||||
-Winconsistent-missing-override)
|
-Wstring-plus-int)
|
||||||
else()
|
else()
|
||||||
ie_add_compiler_flags(-Wmaybe-uninitialized)
|
ie_add_compiler_flags(-Wmaybe-uninitialized)
|
||||||
check_cxx_compiler_flag("-Wsuggest-override" SUGGEST_OVERRIDE_SUPPORTED)
|
check_cxx_compiler_flag("-Wsuggest-override" SUGGEST_OVERRIDE_SUPPORTED)
|
||||||
@ -304,10 +320,11 @@ else()
|
|||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
# Disable noisy warnings
|
||||||
|
|
||||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
|
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
|
||||||
ie_add_compiler_flags(-diag-disable=remark)
|
# 177: function "XXX" was declared but never referenced
|
||||||
# noisy warnings from Intel Compiler 19.1.1.217 20200306
|
ie_add_compiler_flags(-diag-disable=remark,177,2196)
|
||||||
ie_add_compiler_flags(-diag-disable=2196)
|
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# Linker flags
|
# Linker flags
|
||||||
@ -315,7 +332,6 @@ else()
|
|||||||
if(APPLE)
|
if(APPLE)
|
||||||
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,-dead_strip")
|
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,-dead_strip")
|
||||||
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -Wl,-dead_strip")
|
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -Wl,-dead_strip")
|
||||||
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-dead_strip")
|
|
||||||
elseif(LINUX)
|
elseif(LINUX)
|
||||||
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--gc-sections -Wl,--exclude-libs,ALL")
|
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--gc-sections -Wl,--exclude-libs,ALL")
|
||||||
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -Wl,--gc-sections -Wl,--exclude-libs,ALL")
|
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -Wl,--gc-sections -Wl,--exclude-libs,ALL")
|
||||||
|
@ -18,6 +18,8 @@ if (ENABLE_UB_SANITIZER)
|
|||||||
# TODO: Remove -fno-sanitize=null as thirdparty/ocl/clhpp_headers UBSAN compatibility resolved:
|
# TODO: Remove -fno-sanitize=null as thirdparty/ocl/clhpp_headers UBSAN compatibility resolved:
|
||||||
# https://github.com/KhronosGroup/OpenCL-CLHPP/issues/17
|
# https://github.com/KhronosGroup/OpenCL-CLHPP/issues/17
|
||||||
set(SANITIZER_COMPILER_FLAGS "${SANITIZER_COMPILER_FLAGS} -fsanitize=undefined -fno-sanitize=null")
|
set(SANITIZER_COMPILER_FLAGS "${SANITIZER_COMPILER_FLAGS} -fsanitize=undefined -fno-sanitize=null")
|
||||||
|
# TODO: Remove -Wno-maybe-uninitialized after CVS-61143 fix
|
||||||
|
set(SANITIZER_COMPILER_FLAGS "${SANITIZER_COMPILER_FLAGS} -Wno-maybe-uninitialized")
|
||||||
check_cxx_compiler_flag("-fsanitize-recover=undefined" SANITIZE_RECOVER_UNDEFINED_SUPPORTED)
|
check_cxx_compiler_flag("-fsanitize-recover=undefined" SANITIZE_RECOVER_UNDEFINED_SUPPORTED)
|
||||||
if (SANITIZE_RECOVER_UNDEFINED_SUPPORTED)
|
if (SANITIZE_RECOVER_UNDEFINED_SUPPORTED)
|
||||||
set(SANITIZER_COMPILER_FLAGS "${SANITIZER_COMPILER_FLAGS} -fsanitize-recover=undefined")
|
set(SANITIZER_COMPILER_FLAGS "${SANITIZER_COMPILER_FLAGS} -fsanitize-recover=undefined")
|
||||||
@ -33,17 +35,18 @@ endif()
|
|||||||
|
|
||||||
# common sanitizer options
|
# common sanitizer options
|
||||||
if (DEFINED SANITIZER_COMPILER_FLAGS)
|
if (DEFINED SANITIZER_COMPILER_FLAGS)
|
||||||
# ensure sumbols are present
|
# ensure symbols are present
|
||||||
set(SANITIZER_COMPILER_FLAGS "${SANITIZER_COMPILER_FLAGS} -g -fno-omit-frame-pointer")
|
set(SANITIZER_COMPILER_FLAGS "${SANITIZER_COMPILER_FLAGS} -g -fno-omit-frame-pointer")
|
||||||
|
if(NOT OV_COMPILER_IS_CLANG)
|
||||||
|
# GPU plugin tests compilation is slow with -fvar-tracking-assignments on GCC.
|
||||||
|
# Clang has no var-tracking-assignments.
|
||||||
|
set(SANITIZER_COMPILER_FLAGS "${SANITIZER_COMPILER_FLAGS} -fno-var-tracking-assignments")
|
||||||
|
endif()
|
||||||
# prevent unloading libraries at runtime, so sanitizer can resolve their symbols
|
# prevent unloading libraries at runtime, so sanitizer can resolve their symbols
|
||||||
set(SANITIZER_LINKER_FLAGS "${SANITIZER_LINKER_FLAGS} -Wl,-z,nodelete")
|
set(SANITIZER_LINKER_FLAGS "${SANITIZER_LINKER_FLAGS} -Wl,-z,nodelete")
|
||||||
|
|
||||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
if(OV_COMPILER_IS_CLANG AND NOT WIN32 AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 8.0)
|
||||||
set(SANITIZER_LINKER_FLAGS "${SANITIZER_LINKER_FLAGS} -fuse-ld=gold")
|
set(SANITIZER_LINKER_FLAGS "${SANITIZER_LINKER_FLAGS} -fuse-ld=lld")
|
||||||
elseif(OV_COMPILER_IS_CLANG AND NOT WIN32)
|
|
||||||
if(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 8.0)
|
|
||||||
set(SANITIZER_LINKER_FLAGS "${SANITIZER_LINKER_FLAGS} -fuse-ld=lld")
|
|
||||||
endif()
|
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SANITIZER_COMPILER_FLAGS}")
|
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SANITIZER_COMPILER_FLAGS}")
|
||||||
|
@ -44,16 +44,14 @@ ie_option (BUILD_SHARED_LIBS "Build as a shared library" ON)
|
|||||||
|
|
||||||
ie_dependent_option (ENABLE_FASTER_BUILD "Enable build features (PCH, UNITY) to speed up build time" OFF "CMAKE_VERSION VERSION_GREATER_EQUAL 3.16" OFF)
|
ie_dependent_option (ENABLE_FASTER_BUILD "Enable build features (PCH, UNITY) to speed up build time" OFF "CMAKE_VERSION VERSION_GREATER_EQUAL 3.16" OFF)
|
||||||
|
|
||||||
if(NOT DEFINED ENABLE_CPPLINT)
|
ie_dependent_option (ENABLE_CPPLINT "Enable cpplint checks during the build" ON "UNIX;NOT ANDROID" OFF)
|
||||||
ie_dependent_option (ENABLE_CPPLINT "Enable cpplint checks during the build" ON "UNIX;NOT ANDROID" OFF)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if(NOT DEFINED ENABLE_CPPLINT_REPORT)
|
ie_dependent_option (ENABLE_CPPLINT_REPORT "Build cpplint report instead of failing the build" OFF "ENABLE_CPPLINT" OFF)
|
||||||
ie_dependent_option (ENABLE_CPPLINT_REPORT "Build cpplint report instead of failing the build" OFF "ENABLE_CPPLINT" OFF)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
ie_dependent_option (ENABLE_CLANG_FORMAT "Enable clang-format checks during the build" ON "UNIX;NOT ANDROID" OFF)
|
ie_dependent_option (ENABLE_CLANG_FORMAT "Enable clang-format checks during the build" ON "UNIX;NOT ANDROID" OFF)
|
||||||
|
|
||||||
|
ie_dependent_option (ENABLE_NCC_STYLE "Enable ncc style check" ON "UNIX;NOT ANDROID" OFF)
|
||||||
|
|
||||||
ie_option (VERBOSE_BUILD "shows extra information about build" OFF)
|
ie_option (VERBOSE_BUILD "shows extra information about build" OFF)
|
||||||
|
|
||||||
ie_option (ENABLE_UNSAFE_LOCATIONS "skip check for MD5 for dependency" OFF)
|
ie_option (ENABLE_UNSAFE_LOCATIONS "skip check for MD5 for dependency" OFF)
|
||||||
|
1
cmake/developer_package/ncc_naming_style/ncc
Submodule
1
cmake/developer_package/ncc_naming_style/ncc
Submodule
@ -0,0 +1 @@
|
|||||||
|
Subproject commit d7d83049708eaa18ea6796adf0eeef85b28ebc1f
|
137
cmake/developer_package/ncc_naming_style/ncc_naming_style.cmake
Normal file
137
cmake/developer_package/ncc_naming_style/ncc_naming_style.cmake
Normal file
@ -0,0 +1,137 @@
|
|||||||
|
# Copyright (C) 2018-2021 Intel Corporation
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
if(NOT COMMAND ie_check_pip_package)
|
||||||
|
message(FATAL_ERROR "ncc_naming_style.cmake must be included after ie_check_pip_package")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
set(ncc_style_dir "${IEDevScripts_DIR}/ncc_naming_style")
|
||||||
|
set(ncc_style_bin_dir "${CMAKE_CURRENT_BINARY_DIR}/ncc_naming_style")
|
||||||
|
|
||||||
|
# try to find_package(Clang QUIET)
|
||||||
|
# ClangConfig.cmake contains bug that if libclang-XX-dev is not
|
||||||
|
# installed, then find_package fails with errors even in QUIET mode
|
||||||
|
configure_file("${ncc_style_dir}/try_find_clang.cmake"
|
||||||
|
"${ncc_style_bin_dir}/source/CMakeLists.txt" COPYONLY)
|
||||||
|
execute_process(
|
||||||
|
COMMAND
|
||||||
|
"${CMAKE_COMMAND}" -S "${ncc_style_bin_dir}/source"
|
||||||
|
-B "${ncc_style_bin_dir}/build"
|
||||||
|
RESULT_VARIABLE clang_find_result
|
||||||
|
OUTPUT_VARIABLE output
|
||||||
|
ERROR_VARIABLE output)
|
||||||
|
|
||||||
|
if(NOT clang_find_result EQUAL "0")
|
||||||
|
message(WARNING "Please, install libclang-[N]-dev package (required for ncc naming style check)")
|
||||||
|
set(ENABLE_NCC_STYLE OFF)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
# Since we were able to find_package(Clang) in a separate process
|
||||||
|
# let's try to find in current process
|
||||||
|
if(ENABLE_NCC_STYLE)
|
||||||
|
find_host_package(Clang QUIET)
|
||||||
|
if(Clang_FOUND AND TARGET libclang)
|
||||||
|
get_target_property(libclang_location libclang LOCATION)
|
||||||
|
set(ncc_wrapper_py "${ncc_style_bin_dir}/ncc_wrapper.py")
|
||||||
|
configure_file("${ncc_style_dir}/ncc_wrapper.py.in" ${ncc_wrapper_py} @ONLY)
|
||||||
|
message(STATUS "Found libclang: ${libclang_location}")
|
||||||
|
else()
|
||||||
|
message(WARNING "libclang is not found (required for ncc naming style check)")
|
||||||
|
set(ENABLE_NCC_STYLE OFF)
|
||||||
|
endif()
|
||||||
|
endif()
|
||||||
|
|
||||||
|
# find python3
|
||||||
|
|
||||||
|
find_package(PythonInterp 3 QUIET)
|
||||||
|
if(NOT PYTHONINTERP_FOUND)
|
||||||
|
message(WARNING "Python3 interpreter was not found (required for ncc naming style check)")
|
||||||
|
set(ENABLE_NCC_STYLE OFF)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
# check python requirements_dev.txt
|
||||||
|
|
||||||
|
set(req_file "${ncc_style_dir}/requirements_dev.txt")
|
||||||
|
file(STRINGS ${req_file} req_lines)
|
||||||
|
|
||||||
|
foreach(req IN LISTS req_lines)
|
||||||
|
ie_check_pip_package(${req} STATUS)
|
||||||
|
endforeach()
|
||||||
|
|
||||||
|
set(ncc_script_dir "${ncc_style_dir}/ncc/")
|
||||||
|
set(ncc_script_py "${ncc_style_dir}/ncc/ncc.py")
|
||||||
|
|
||||||
|
if(NOT EXISTS ${ncc_script_py})
|
||||||
|
message(WARNING "ncc.py is not downloaded via submodule")
|
||||||
|
set(ENABLE_NCC_STYLE OFF)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
# create high-level target
|
||||||
|
|
||||||
|
if(ENABLE_NCC_STYLE AND NOT TARGET ncc_all)
|
||||||
|
add_custom_target(ncc_all ALL)
|
||||||
|
set_target_properties(ncc_all PROPERTIES FOLDER ncc_naming_style)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
#
|
||||||
|
# ov_ncc_naming_style(FOR_TARGET target_name
|
||||||
|
# INCLUDE_DIRECTORY dir
|
||||||
|
# [ADDITIONAL_INCLUDE_DIRECTORIES dir1 dir2 ..])
|
||||||
|
#
|
||||||
|
# FOR_TARGET - name of the target
|
||||||
|
# INCLUDE_DIRECTORY - directory to check headers from
|
||||||
|
# ADDITIONAL_INCLUDE_DIRECTORIES - additional include directories used in checked headers
|
||||||
|
#
|
||||||
|
function(ov_ncc_naming_style)
|
||||||
|
if(NOT ENABLE_NCC_STYLE)
|
||||||
|
return()
|
||||||
|
endif()
|
||||||
|
|
||||||
|
cmake_parse_arguments(NCC_STYLE ""
|
||||||
|
"FOR_TARGET;INCLUDE_DIRECTORY" "ADDITIONAL_INCLUDE_DIRECTORIES" ${ARGN})
|
||||||
|
|
||||||
|
file(GLOB_RECURSE headers
|
||||||
|
RELATIVE "${NCC_STYLE_INCLUDE_DIRECTORY}"
|
||||||
|
"${NCC_STYLE_INCLUDE_DIRECTORY}/*.hpp")
|
||||||
|
|
||||||
|
set(new_pythonpath "${ncc_script_dir}:$ENV{PYTHOPATH}")
|
||||||
|
list(APPEND ADDITIONAL_INCLUDE_DIRECTORIES "${NCC_STYLE_INCLUDE_DIRECTORY}")
|
||||||
|
|
||||||
|
foreach(header IN LISTS headers)
|
||||||
|
set(output_file "${ncc_style_bin_dir}/${header}.ncc_style")
|
||||||
|
set(full_header_path "${NCC_STYLE_INCLUDE_DIRECTORY}/${header}")
|
||||||
|
|
||||||
|
add_custom_command(
|
||||||
|
OUTPUT
|
||||||
|
${output_file}
|
||||||
|
COMMAND
|
||||||
|
"${CMAKE_COMMAND}" -E env PYTHONPATH=${new_pythonpath}
|
||||||
|
"${CMAKE_COMMAND}"
|
||||||
|
-D "PYTHON_EXECUTABLE=${PYTHON_EXECUTABLE}"
|
||||||
|
-D "NCC_PY_SCRIPT=${ncc_wrapper_py}"
|
||||||
|
-D "INPUT_FILE=${full_header_path}"
|
||||||
|
-D "OUTPUT_FILE=${output_file}"
|
||||||
|
-D "STYLE_FILE=${ncc_style_dir}/openvino.style"
|
||||||
|
-D "ADDITIONAL_INCLUDE_DIRECTORIES=${ADDITIONAL_INCLUDE_DIRECTORIES}"
|
||||||
|
-P "${ncc_style_dir}/ncc_run.cmake"
|
||||||
|
DEPENDS
|
||||||
|
"${full_header_path}"
|
||||||
|
"${ncc_style_dir}/openvino.style"
|
||||||
|
"${ncc_script_py}"
|
||||||
|
"${ncc_wrapper_py}"
|
||||||
|
"${ncc_style_dir}/ncc_run.cmake"
|
||||||
|
COMMENT
|
||||||
|
"[ncc naming style] ${header}"
|
||||||
|
VERBATIM)
|
||||||
|
list(APPEND output_files ${output_file})
|
||||||
|
endforeach()
|
||||||
|
|
||||||
|
set(ncc_target ${NCC_STYLE_FOR_TARGET}_ncc_check)
|
||||||
|
add_custom_target(${ncc_target}
|
||||||
|
DEPENDS ${output_files}
|
||||||
|
COMMENT "[ncc naming style] ${NCC_STYLE_FOR_TARGET}")
|
||||||
|
|
||||||
|
add_dependencies(${NCC_STYLE_FOR_TARGET} ${ncc_target})
|
||||||
|
add_dependencies(ncc_all ${ncc_target})
|
||||||
|
endfunction()
|
31
cmake/developer_package/ncc_naming_style/ncc_run.cmake
Normal file
31
cmake/developer_package/ncc_naming_style/ncc_run.cmake
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
# Copyright (C) 2018-2021 Intel Corporation
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
foreach(var NCC_PY_SCRIPT PYTHON_EXECUTABLE OUTPUT_FILE
|
||||||
|
INPUT_FILE ADDITIONAL_INCLUDE_DIRECTORIES STYLE_FILE)
|
||||||
|
if(NOT DEFINED ${var})
|
||||||
|
message(FATAL_ERROR "${var} is not defined for ncc_run.cmake")
|
||||||
|
endif()
|
||||||
|
endforeach()
|
||||||
|
|
||||||
|
file(REMOVE "${OUTPUT_FILE}")
|
||||||
|
|
||||||
|
execute_process(
|
||||||
|
COMMAND
|
||||||
|
"${PYTHON_EXECUTABLE}"
|
||||||
|
"${NCC_PY_SCRIPT}"
|
||||||
|
--path ${INPUT_FILE}
|
||||||
|
--style ${STYLE_FILE}
|
||||||
|
--include ${ADDITIONAL_INCLUDE_DIRECTORIES}
|
||||||
|
RESULT_VARIABLE result
|
||||||
|
OUTPUT_VARIABLE output
|
||||||
|
ERROR_VARIABLE output)
|
||||||
|
|
||||||
|
file(WRITE "${OUTPUT_FILE}" "${output}")
|
||||||
|
|
||||||
|
if(NOT result EQUAL "0")
|
||||||
|
# Display the output to console (to parse it form IDE)
|
||||||
|
message("${output}")
|
||||||
|
message(FATAL_ERROR "[ncc naming style] Naming style check failed for ${INPUT_FILE}")
|
||||||
|
endif()
|
52
cmake/developer_package/ncc_naming_style/ncc_wrapper.py.in
Normal file
52
cmake/developer_package/ncc_naming_style/ncc_wrapper.py.in
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
#!/usr/bin/python3
|
||||||
|
|
||||||
|
# Copyright (C) 2018-2021 Intel Corporation
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from clang.cindex import Config
|
||||||
|
from ncc import Options, RulesDb, do_validate, Validator
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# set path to speicific clang library location
|
||||||
|
Config.set_library_file('@libclang_location@')
|
||||||
|
|
||||||
|
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s',
|
||||||
|
filename='log.txt', filemode='w')
|
||||||
|
|
||||||
|
""" Parse all command line arguments and validate """
|
||||||
|
op = Options()
|
||||||
|
op.parse_cmd_line()
|
||||||
|
|
||||||
|
if op.args.path is None:
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
""" Creating the rules database """
|
||||||
|
rules_db = RulesDb(op._style_file)
|
||||||
|
|
||||||
|
""" Check the source code against the configured rules """
|
||||||
|
errors = 0
|
||||||
|
for path in op.args.path:
|
||||||
|
if os.path.isfile(path):
|
||||||
|
if do_validate(op, path):
|
||||||
|
v = Validator(rules_db, path, op)
|
||||||
|
errors += v.validate()
|
||||||
|
elif os.path.isdir(path):
|
||||||
|
for (root, subdirs, files) in os.walk(path):
|
||||||
|
for filename in files:
|
||||||
|
path = root + '/' + filename
|
||||||
|
if do_validate(op, path):
|
||||||
|
v = Validator(rules_db, path, op)
|
||||||
|
errors += v.validate()
|
||||||
|
|
||||||
|
if not op.args.recurse:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
sys.stderr.write("File '{}' not found!\n".format(path))
|
||||||
|
|
||||||
|
if errors:
|
||||||
|
print("Total number of errors = {}".format(errors))
|
||||||
|
sys.exit(1)
|
129
cmake/developer_package/ncc_naming_style/openvino.style
Normal file
129
cmake/developer_package/ncc_naming_style/openvino.style
Normal file
@ -0,0 +1,129 @@
|
|||||||
|
# custom OpenVINO values
|
||||||
|
CppMethod: '^(operator\W+|[a-z_\d]+|signaling_NaN|quiet_NaN)$'
|
||||||
|
# TODO: remove stopwatch|unsupported_op
|
||||||
|
ClassName: '^([A-Z][\w]+|b?float16|numeric_limits|ngraph_error|stopwatch|unsupported_op)$'
|
||||||
|
# TODO: remove oi_pair
|
||||||
|
StructName: '^([A-Z][\w]+|element_type_traits|hash|oi_pair)$'
|
||||||
|
FunctionName: '^(operator\W+|[a-z_\d]+)$'
|
||||||
|
Namespace: '^[a-z\d_]+$'
|
||||||
|
NamespaceAlias: '^[a-z\d_]+$'
|
||||||
|
UnionName: '[A-Z][\w]+$'
|
||||||
|
TemplateTemplateParameter: '[A-Z][\w]+'
|
||||||
|
NamespaceReference: '^[a-z\d_]+$'
|
||||||
|
TemplateNonTypeParameter: '^\w*$'
|
||||||
|
ClassTemplate: '^([A-Z][\w]+|element_type_traits)$'
|
||||||
|
TemplateTypeParameter: '^\w*$'
|
||||||
|
ParameterName: '^\w*$'
|
||||||
|
FunctionTemplate: '^(operator.+|\w+)$'
|
||||||
|
TypeAliasName: '^\w+$'
|
||||||
|
VariableReference: '^\w+$'
|
||||||
|
|
||||||
|
# TODO: align
|
||||||
|
EnumConstantName: '^.*$'
|
||||||
|
EnumName: '^.*$'
|
||||||
|
UsingDeclaration: '^.*$'
|
||||||
|
TypedefName: '^.*$'
|
||||||
|
|
||||||
|
# not needed values
|
||||||
|
ClassTemplatePartialSpecialization: 'XXXX'
|
||||||
|
ConversionFunction: '^.*$'
|
||||||
|
UsingDirective: 'XXXX'
|
||||||
|
ClassAccessSpecifier: '^.*$' # looks like can be fixed
|
||||||
|
TypeReference: '^.*$' # looks like can be fixed
|
||||||
|
CxxBaseSpecifier: '^.*$' # looks like can be fixed
|
||||||
|
TemplateReference: '^.*$'
|
||||||
|
MemberReference: '^.*$'
|
||||||
|
LabelReference: 'XXXX'
|
||||||
|
OverloadedDeclarationReference: '^.*$'
|
||||||
|
InvalidFile: 'XXXX'
|
||||||
|
NoDeclarationFound: 'XXXX'
|
||||||
|
NotImplemented: 'XXXX'
|
||||||
|
InvalidCode: 'XXXX'
|
||||||
|
UnexposedExpression: '^.*$'
|
||||||
|
DeclarationReferenceExpression: '^.*$'
|
||||||
|
MemberReferenceExpression: '^.*$'
|
||||||
|
CallExpression: '^.*$'
|
||||||
|
BlockExpression: 'XXXX'
|
||||||
|
IntegerLiteral: '^.*$'
|
||||||
|
FloatingLiteral: '^.*$'
|
||||||
|
ImaginaryLiteral: 'XXXX'
|
||||||
|
StringLiteral: '^.*$'
|
||||||
|
CharacterLiteral: '^.*$'
|
||||||
|
ParenExpression: '^.*$'
|
||||||
|
UnaryOperator: '^.*$'
|
||||||
|
ArraySubscriptExpression: '^.*$'
|
||||||
|
BinaryOperator: '^.*$'
|
||||||
|
CompoundAssignmentOperator: '^.*$'
|
||||||
|
ConditionalOperator: '^.*$'
|
||||||
|
CstyleCastExpression: '^.*$'
|
||||||
|
CompoundLiteralExpression: 'XXXX'
|
||||||
|
InitListExpression: '^.*$'
|
||||||
|
AddrLabelExpression: 'XXXX'
|
||||||
|
StatementExpression: 'XXXX'
|
||||||
|
GenericSelectionExpression: 'XXXX'
|
||||||
|
GnuNullExpression: 'XXXX'
|
||||||
|
CxxStaticCastExpression: '^.*$'
|
||||||
|
CxxDynamicCastExpression: 'XXXX'
|
||||||
|
CxxReinterpretCastExpression: '^.*$'
|
||||||
|
CxxConstCastExpression: 'XXXX'
|
||||||
|
CxxFunctionalCastExpression: '^.*$'
|
||||||
|
CxxTypeidExpression: 'XXXX'
|
||||||
|
CxxBoolLiteralExpression: '^.*$'
|
||||||
|
CxxNullPointerLiteralExpression: '^.*$'
|
||||||
|
CxxThisExpression: '^.*$'
|
||||||
|
CxxThrowExpression: '^.*$'
|
||||||
|
CxxNewExpression: '^.*$'
|
||||||
|
CxxDeleteExpression: 'XXXX'
|
||||||
|
CxxUnaryExpression: '^.*$'
|
||||||
|
PackExpansionExpression: '^.*$'
|
||||||
|
SizeOfPackExpression: '^.*$'
|
||||||
|
LambdaExpression: '^.*$'
|
||||||
|
ObjectBoolLiteralExpression: 'XXXX'
|
||||||
|
ObjectSelfExpression: 'XXXX'
|
||||||
|
UnexposedStatement: 'XXXX'
|
||||||
|
LabelStatement: 'XXXX'
|
||||||
|
CompoundStatement: '^.*$'
|
||||||
|
CaseStatement: '^.*$'
|
||||||
|
DefaultStatement: '^.*$'
|
||||||
|
IfStatement: '^.*$'
|
||||||
|
SwitchStatement: '^.*$'
|
||||||
|
WhileStatement: '^.*$'
|
||||||
|
DoStatement: '^.*$'
|
||||||
|
ForStatement: '^.*$'
|
||||||
|
GotoStatement: 'XXXX'
|
||||||
|
IndirectGotoStatement: 'XXXX'
|
||||||
|
ContinueStatement: '^.*$'
|
||||||
|
BreakStatement: '^.*$'
|
||||||
|
ReturnStatement: '^.*$'
|
||||||
|
AsmStatement: 'XXXX'
|
||||||
|
CxxCatchStatement: 'XXXX'
|
||||||
|
CxxTryStatement: 'XXXX'
|
||||||
|
CxxForRangeStatement: '^.*$'
|
||||||
|
MsAsmStatement: 'XXXX'
|
||||||
|
NullStatement: 'XXXX'
|
||||||
|
DeclarationStatement: '^.*$'
|
||||||
|
TranslationUnit: 'XXXX'
|
||||||
|
UnexposedAttribute: '^.*$'
|
||||||
|
CxxFinalAttribute: 'XXXX'
|
||||||
|
CxxOverrideAttribute: '^.*$'
|
||||||
|
AnnotateAttribute: 'XXXX'
|
||||||
|
AsmLabelAttribute: 'XXXX'
|
||||||
|
PackedAttribute: 'XXXX'
|
||||||
|
PureAttribute: 'XXXX'
|
||||||
|
ConstAttribute: 'XXXX'
|
||||||
|
NoduplicateAttribute: 'XXXX'
|
||||||
|
PreprocessingDirective: 'XXXX'
|
||||||
|
MacroDefinition: 'XXXX'
|
||||||
|
MacroInstantiation: 'XXXX'
|
||||||
|
InclusionDirective: 'XXXX'
|
||||||
|
VariableName:
|
||||||
|
ScopePrefix:
|
||||||
|
Global: ''
|
||||||
|
Static: ''
|
||||||
|
ClassMember: ''
|
||||||
|
DataTypePrefix:
|
||||||
|
String: ''
|
||||||
|
Integer: ''
|
||||||
|
Bool: ''
|
||||||
|
Pointer: ''
|
||||||
|
Pattern: '^.*$'
|
@ -0,0 +1,2 @@
|
|||||||
|
clang==9.0
|
||||||
|
pyyaml
|
@ -0,0 +1,8 @@
|
|||||||
|
# Copyright (C) 2018-2021 Intel Corporation
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
cmake_minimum_required(VERSION 3.13)
|
||||||
|
project(try_find_clang)
|
||||||
|
|
||||||
|
find_package(Clang QUIET)
|
@ -42,7 +42,7 @@ macro(ie_parse_ci_build_number)
|
|||||||
return()
|
return()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
set(ie_version_hpp "${OpenVINO_SOURCE_DIR}/inference-engine/include/ie_version.hpp")
|
set(ie_version_hpp "${OpenVINO_SOURCE_DIR}/inference-engine/src/inference_engine/include/ie/ie_version.hpp")
|
||||||
if(NOT EXISTS ${ie_version_hpp})
|
if(NOT EXISTS ${ie_version_hpp})
|
||||||
message(FATAL_ERROR "File ie_version.hpp with IE_VERSION definitions is not found")
|
message(FATAL_ERROR "File ie_version.hpp with IE_VERSION definitions is not found")
|
||||||
endif()
|
endif()
|
||||||
|
@ -98,7 +98,7 @@ ie_dependent_option (ENABLE_FUNCTIONAL_TESTS "functional tests" ON "ENABLE_TESTS
|
|||||||
|
|
||||||
ie_dependent_option (ENABLE_SAMPLES "console samples are part of inference engine package" ON "NOT MINGW" OFF)
|
ie_dependent_option (ENABLE_SAMPLES "console samples are part of inference engine package" ON "NOT MINGW" OFF)
|
||||||
|
|
||||||
ie_dependent_option (ENABLE_SPEECH_DEMO "enable speech demo integration" ON "NOT APPLE;NOT ANDROID;X86 OR X86_64" OFF)
|
ie_dependent_option (ENABLE_SPEECH_DEMO "enable speech demo integration" ON "NOT APPLE;NOT ANDROID;X86_64" OFF)
|
||||||
|
|
||||||
ie_option (ENABLE_OPENCV "enables OpenCV" ON)
|
ie_option (ENABLE_OPENCV "enables OpenCV" ON)
|
||||||
|
|
||||||
@ -125,14 +125,15 @@ endif()
|
|||||||
ie_dependent_option(NGRAPH_ONNX_IMPORT_ENABLE "Enable ONNX importer" ON "protoc_available" OFF)
|
ie_dependent_option(NGRAPH_ONNX_IMPORT_ENABLE "Enable ONNX importer" ON "protoc_available" OFF)
|
||||||
ie_dependent_option(NGRAPH_ONNX_FRONTEND_ENABLE "Enable ONNX FrontEnd" OFF "NGRAPH_ONNX_IMPORT_ENABLE" OFF)
|
ie_dependent_option(NGRAPH_ONNX_FRONTEND_ENABLE "Enable ONNX FrontEnd" OFF "NGRAPH_ONNX_IMPORT_ENABLE" OFF)
|
||||||
ie_dependent_option(NGRAPH_PDPD_FRONTEND_ENABLE "Enable PaddlePaddle FrontEnd" ON "protoc_available" OFF)
|
ie_dependent_option(NGRAPH_PDPD_FRONTEND_ENABLE "Enable PaddlePaddle FrontEnd" ON "protoc_available" OFF)
|
||||||
ie_dependent_option(NGRAPH_USE_PROTOBUF_LITE "Compiles and links with protobuf-lite" OFF
|
ie_dependent_option(NGRAPH_USE_PROTOBUF_LITE "Compiles and links with protobuf-lite" ON
|
||||||
"NGRAPH_ONNX_IMPORT_ENABLE OR NGRAPH_PDPD_FRONTEND_ENABLE" OFF)
|
"NGRAPH_ONNX_IMPORT_ENABLE" OFF)
|
||||||
ie_dependent_option(NGRAPH_USE_SYSTEM_PROTOBUF "Use system protobuf" OFF
|
ie_dependent_option(NGRAPH_USE_SYSTEM_PROTOBUF "Use system protobuf" OFF
|
||||||
"NGRAPH_ONNX_IMPORT_ENABLE OR NGRAPH_PDPD_FRONTEND_ENABLE" OFF)
|
"NGRAPH_ONNX_IMPORT_ENABLE OR NGRAPH_PDPD_FRONTEND_ENABLE" OFF)
|
||||||
ie_dependent_option(NGRAPH_UNIT_TEST_ENABLE "Enables ngraph unit tests" ON "ENABLE_TESTS;NOT ANDROID" OFF)
|
ie_dependent_option(NGRAPH_UNIT_TEST_ENABLE "Enables ngraph unit tests" ON "ENABLE_TESTS;NOT ANDROID" OFF)
|
||||||
ie_dependent_option(NGRAPH_UNIT_TEST_BACKENDS_ENABLE "Control the building of unit tests using backends" ON
|
ie_dependent_option(NGRAPH_UNIT_TEST_BACKENDS_ENABLE "Control the building of unit tests using backends" ON
|
||||||
"NGRAPH_UNIT_TEST_ENABLE" OFF)
|
"NGRAPH_UNIT_TEST_ENABLE" OFF)
|
||||||
option(NGRAPH_DEBUG_ENABLE "Enable output for NGRAPH_DEBUG statements" OFF)
|
option(NGRAPH_DEBUG_ENABLE "Enable output for NGRAPH_DEBUG statements" OFF)
|
||||||
|
option(ENABLE_REQUIREMENTS_INSTALL "Dynamic dependencies install" ON)
|
||||||
|
|
||||||
# WA for ngraph python build on Windows debug
|
# WA for ngraph python build on Windows debug
|
||||||
list(REMOVE_ITEM IE_OPTIONS NGRAPH_UNIT_TEST_ENABLE NGRAPH_UNIT_TEST_BACKENDS_ENABLE)
|
list(REMOVE_ITEM IE_OPTIONS NGRAPH_UNIT_TEST_ENABLE NGRAPH_UNIT_TEST_BACKENDS_ENABLE)
|
||||||
|
131
cmake/test_model_zoo.cmake
Normal file
131
cmake/test_model_zoo.cmake
Normal file
@ -0,0 +1,131 @@
|
|||||||
|
# Copyright (C) 2018-2021 Intel Corporation
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
function(ov_model_convert SRC DST OUT)
|
||||||
|
set(onnx_gen_script ${OpenVINO_SOURCE_DIR}/ngraph/test/models/onnx/onnx_prototxt_converter.py)
|
||||||
|
|
||||||
|
file(GLOB_RECURSE prototxt_models RELATIVE "${SRC}" "${SRC}/*.prototxt")
|
||||||
|
file(GLOB_RECURSE xml_models RELATIVE "${SRC}" "${SRC}/*.xml")
|
||||||
|
file(GLOB_RECURSE bin_models RELATIVE "${SRC}" "${SRC}/*.bin")
|
||||||
|
file(GLOB_RECURSE onnx_models RELATIVE "${SRC}" "${SRC}/*.onnx")
|
||||||
|
file(GLOB_RECURSE data_models RELATIVE "${SRC}" "${SRC}/*.data")
|
||||||
|
|
||||||
|
foreach(in_file IN LISTS prototxt_models xml_models bin_models onnx_models data_models)
|
||||||
|
get_filename_component(ext "${in_file}" EXT)
|
||||||
|
get_filename_component(rel_dir "${in_file}" DIRECTORY)
|
||||||
|
get_filename_component(name_we "${in_file}" NAME_WE)
|
||||||
|
set(model_source_dir "${SRC}/${rel_dir}")
|
||||||
|
|
||||||
|
if(NOT NGRAPH_ONNX_IMPORT_ENABLE AND ext MATCHES "^\\.(onnx|prototxt)$")
|
||||||
|
# don't copy / process ONNX / prototxt files
|
||||||
|
continue()
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if(ext STREQUAL ".prototxt")
|
||||||
|
# convert model
|
||||||
|
set(rel_out_name "${name_we}.onnx")
|
||||||
|
if(rel_dir)
|
||||||
|
set(rel_out_name "${rel_dir}/${rel_out_name}")
|
||||||
|
endif()
|
||||||
|
else()
|
||||||
|
# copy as is
|
||||||
|
set(rel_out_name "${in_file}")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
set(full_out_name "${DST}/${rel_out_name}")
|
||||||
|
file(MAKE_DIRECTORY "${DST}/${rel_dir}")
|
||||||
|
|
||||||
|
if(ext STREQUAL ".prototxt")
|
||||||
|
# convert .prototxt models to .onnx binary
|
||||||
|
add_custom_command(OUTPUT ${full_out_name}
|
||||||
|
COMMAND ${PYTHON_EXECUTABLE} ${onnx_gen_script}
|
||||||
|
"${SRC}/${in_file}" ${full_out_name}
|
||||||
|
DEPENDS ${onnx_gen_script} "${SRC}/${in_file}"
|
||||||
|
COMMENT "Generate ${rel_out_name}"
|
||||||
|
WORKING_DIRECTORY "${model_source_dir}")
|
||||||
|
else()
|
||||||
|
add_custom_command(OUTPUT ${full_out_name}
|
||||||
|
COMMAND "${CMAKE_COMMAND}" -E copy_if_different
|
||||||
|
"${SRC}/${in_file}" ${full_out_name}
|
||||||
|
DEPENDS ${onnx_gen_script} "${SRC}/${in_file}"
|
||||||
|
COMMENT "Copy ${rel_out_name}"
|
||||||
|
WORKING_DIRECTORY "${model_source_dir}")
|
||||||
|
endif()
|
||||||
|
list(APPEND files "${full_out_name}")
|
||||||
|
endforeach()
|
||||||
|
|
||||||
|
set(${OUT} ${files} PARENT_SCOPE)
|
||||||
|
endfunction()
|
||||||
|
|
||||||
|
ov_model_convert("${CMAKE_CURRENT_SOURCE_DIR}/ngraph/test"
|
||||||
|
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/test_model_zoo/ngraph"
|
||||||
|
onnx_out_files)
|
||||||
|
|
||||||
|
set(rel_path "inference-engine/tests/functional/inference_engine/onnx_reader")
|
||||||
|
ov_model_convert("${OpenVINO_SOURCE_DIR}/${rel_path}"
|
||||||
|
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/test_model_zoo/onnx_reader"
|
||||||
|
ie_onnx_out_files)
|
||||||
|
|
||||||
|
set(rel_path "inference-engine/tests/functional/inference_engine/ir_serialization")
|
||||||
|
ov_model_convert("${OpenVINO_SOURCE_DIR}/${rel_path}"
|
||||||
|
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/test_model_zoo/ir_serialization"
|
||||||
|
ie_serialize_out_files)
|
||||||
|
|
||||||
|
set(rel_path "inference-engine/tests/unit/frontends/onnx_import/models")
|
||||||
|
ov_model_convert("${OpenVINO_SOURCE_DIR}/${rel_path}"
|
||||||
|
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/test_model_zoo/onnx_import"
|
||||||
|
ie_onnx_import_out_files)
|
||||||
|
|
||||||
|
if(ENABLE_TESTS)
|
||||||
|
if(NGRAPH_ONNX_IMPORT_ENABLE AND ENABLE_REQUIREMENTS_INSTALL)
|
||||||
|
find_package(PythonInterp 3 REQUIRED)
|
||||||
|
|
||||||
|
get_filename_component(PYTHON_EXEC_DIR ${PYTHON_EXECUTABLE} DIRECTORY)
|
||||||
|
execute_process(COMMAND "${PYTHON_EXECUTABLE}" -m pip --version
|
||||||
|
WORKING_DIRECTORY ${PYTHON_EXEC_DIR}
|
||||||
|
RESULT_VARIABLE pip3_exit_code
|
||||||
|
OUTPUT_VARIABLE pip3_version)
|
||||||
|
|
||||||
|
if(NOT pip3_exit_code EQUAL 0)
|
||||||
|
message(FATAL_ERROR "Failed to extract pip module version")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if(pip3_version MATCHES ".* ([0-9]+)+\.([0-9]+)([\.0-9 ]).*")
|
||||||
|
set(pip3_version ${CMAKE_MATCH_1}.${CMAKE_MATCH_2})
|
||||||
|
else()
|
||||||
|
message(FATAL_ERROR "Failed to parse ${pip3_version}")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
message(STATUS "pip version is ${pip3_version}")
|
||||||
|
set(args --quiet)
|
||||||
|
if(pip3_version VERSION_GREATER 20.2.2)
|
||||||
|
list(APPEND args --use-feature=2020-resolver)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
set(reqs "${OpenVINO_SOURCE_DIR}/ngraph/test/requirements_test_onnx.txt")
|
||||||
|
add_custom_target(test_pip_prerequsites ALL
|
||||||
|
"${PYTHON_EXECUTABLE}" -m pip install ${args} -r ${reqs}
|
||||||
|
COMMENT "Install requirements_test.txt"
|
||||||
|
VERBATIM
|
||||||
|
SOURCES ${reqs})
|
||||||
|
endif()
|
||||||
|
|
||||||
|
add_custom_target(test_model_zoo DEPENDS ${onnx_out_files}
|
||||||
|
${ie_onnx_out_files}
|
||||||
|
${ie_serialize_out_files}
|
||||||
|
${ie_onnx_import_out_files})
|
||||||
|
|
||||||
|
if(TARGET test_pip_prerequsites)
|
||||||
|
add_dependencies(test_model_zoo test_pip_prerequsites)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if (NGRAPH_PDPD_FRONTEND_ENABLE)
|
||||||
|
add_dependencies(test_model_zoo paddlepaddle_test_models)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
install(DIRECTORY "${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/test_model_zoo"
|
||||||
|
DESTINATION tests COMPONENT tests EXCLUDE_FROM_ALL)
|
||||||
|
|
||||||
|
set(TEST_MODEL_ZOO "./test_model_zoo" CACHE PATH "Path to test model zoo")
|
||||||
|
endif()
|
@ -519,3 +519,67 @@ Standard ONNX\* operators:
|
|||||||
| Upsample | No |
|
| Upsample | No |
|
||||||
| Where | No |
|
| Where | No |
|
||||||
| Xor | No |
|
| Xor | No |
|
||||||
|
|
||||||
|
|
||||||
|
## PaddlePaddle\* Supported Operators
|
||||||
|
|
||||||
|
Standard PaddlePaddle(paddlepaddle>=2.1)\* Operators:
|
||||||
|
|
||||||
|
| Operator Name in PaddlePaddle\*| Limitations|
|
||||||
|
| :----------| :----------|
|
||||||
|
| adpative_pool2d | 'NHWC' data_layout is not supported |
|
||||||
|
| arg_max | 'int32' output data_type is not supported |
|
||||||
|
| assign_value | No |
|
||||||
|
| batch_norm | No |
|
||||||
|
| bilinear_interp | 'NCW' 'NWC' 'NHWC' 'NCDHW' 'NDHWC' data_layout are not supported |
|
||||||
|
| bilinear_interp_v2 | 'NCW' 'NWC' 'NHWC' 'NCDHW' 'NDHWC' data_layout are not supported |
|
||||||
|
| bmm | No |
|
||||||
|
| cast | No |
|
||||||
|
| clip | No |
|
||||||
|
| concat | No |
|
||||||
|
| conv2d | 'NHWC' data_layout is not supported |
|
||||||
|
| depthwise_conv2d | 'NHWC' data_layout is not supported |
|
||||||
|
| deformable_conv | No |
|
||||||
|
| elementwise_add | No |
|
||||||
|
| elementwise_div | No |
|
||||||
|
| elementwise_max | No |
|
||||||
|
| elementwise_min | No |
|
||||||
|
| elementwise_mul | No |
|
||||||
|
| elementwise_pow | No |
|
||||||
|
| elementwise_sub | No |
|
||||||
|
| equal | No |
|
||||||
|
| expand_v2 | No |
|
||||||
|
| fill_constant_batch_size_like | No |
|
||||||
|
| fill_constant | No |
|
||||||
|
| flatten_contiguous_range | No |
|
||||||
|
| greater_equal | No |
|
||||||
|
| hard_sigmoid | No |
|
||||||
|
| hard_swish | No |
|
||||||
|
| leaky_relu | No |
|
||||||
|
| log | No |
|
||||||
|
| logical_not | No |
|
||||||
|
| matmul | No |
|
||||||
|
| matrix_nms | Only supports IE CPU plugin with 'number of selected boxes' static shape(eg: min(min(num_boxes, nms_top_k) * num_classes_output, keep_top_k)) |
|
||||||
|
| max_pool2d_with_index | No |
|
||||||
|
| mul | No |
|
||||||
|
| multiclass_nms | Only supports IE CPU plugin with 'number of selected boxes' static shape(eg: min(min(num_boxes, nms_top_k) * num_classes_output, keep_top_k)) |
|
||||||
|
| nearest_interp | 'NCW' 'NWC' 'NHWC' 'NCDHW' 'NDHWC' data_layout are not supported |
|
||||||
|
| nearest_interp_v2 | 'NCW' 'NWC' 'NHWC' 'NCDHW' 'NDHWC' data_layout are not supported |
|
||||||
|
| pad3d | 'Circular' mode is not supported |
|
||||||
|
| pow | No |
|
||||||
|
| pool2d | 'NHWC' data_layout is not supported |
|
||||||
|
| range | No |
|
||||||
|
| relu | No |
|
||||||
|
| relu6 | No |
|
||||||
|
| reshape2 | No |
|
||||||
|
| rnn | 'SimpleRNN' and 'GRU' modes are not supported |
|
||||||
|
| scale | No |
|
||||||
|
| shape | No |
|
||||||
|
| slice | No |
|
||||||
|
| softmax | No |
|
||||||
|
| sigmoid | No |
|
||||||
|
| split | No |
|
||||||
|
| squeeze2 | No |
|
||||||
|
| transpose2 | No |
|
||||||
|
| unsqueeze2 | No |
|
||||||
|
| yolo_box | No |
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
# Converting TensorFlow* Object Detection API Models {#openvino_docs_MO_DG_prepare_model_convert_model_tf_specific_Convert_Object_Detection_API_Models}
|
# Converting TensorFlow* Object Detection API Models {#openvino_docs_MO_DG_prepare_model_convert_model_tf_specific_Convert_Object_Detection_API_Models}
|
||||||
|
|
||||||
> **NOTES**:
|
> **NOTES**:
|
||||||
|
> * Starting with the 2022.1 release, the Model Optimizer can convert the TensorFlow\* Object Detection API Faster and Mask RCNNs topologies differently. By default, the Model Optimizer adds operation "Proposal" to the generated IR. This operation needs an additional input to the model with name "image_info" which should be fed with several values describing the pre-processing applied to the input image (refer to the [Proposal](../../../../ops/detection/Proposal_4.md) operation specification for more information). However, this input is redundant for the models trained and inferred with equal size images. Model Optimizer can generate IR for such models and insert operation [DetectionOutput](../../../../ops/detection/DetectionOutput_1.md) instead of `Proposal`. The `DetectionOutput` operation does not require additional model input "image_info" and moreover, for some models the produced inference results are closer to the original TensorFlow\* model. In order to trigger new behaviour the attribute "operation_to_add" in the corresponding JSON transformation configuration file should be set to value "DetectionOutput" instead of default one "Proposal".
|
||||||
> * Starting with the 2021.1 release, the Model Optimizer converts the TensorFlow\* Object Detection API SSDs, Faster and Mask RCNNs topologies keeping shape-calculating sub-graphs by default, so topologies can be re-shaped in the Inference Engine using dedicated reshape API. Refer to [Using Shape Inference](../../../../IE_DG/ShapeInference.md) for more information on how to use this feature. It is possible to change the both spatial dimensions of the input image and batch size.
|
> * Starting with the 2021.1 release, the Model Optimizer converts the TensorFlow\* Object Detection API SSDs, Faster and Mask RCNNs topologies keeping shape-calculating sub-graphs by default, so topologies can be re-shaped in the Inference Engine using dedicated reshape API. Refer to [Using Shape Inference](../../../../IE_DG/ShapeInference.md) for more information on how to use this feature. It is possible to change the both spatial dimensions of the input image and batch size.
|
||||||
> * To generate IRs for SSD topologies, the Model Optimizer creates a number of `PriorBoxClustered` layers instead of a constant node with prior boxes calculated for the particular input image size. This change allows you to reshape the topology in the Inference Engine using dedicated Inference Engine API. The reshaping is supported for all SSD topologies except FPNs which contain hardcoded shapes for some operations preventing from changing topology input shape.
|
> * To generate IRs for SSD topologies, the Model Optimizer creates a number of `PriorBoxClustered` layers instead of a constant node with prior boxes calculated for the particular input image size. This change allows you to reshape the topology in the Inference Engine using dedicated Inference Engine API. The reshaping is supported for all SSD topologies except FPNs which contain hardcoded shapes for some operations preventing from changing topology input shape.
|
||||||
|
|
||||||
@ -29,14 +30,16 @@ To convert a TensorFlow\* Object Detection API model, go to the `<INSTALL_DIR>/d
|
|||||||
* `faster_rcnn_support_api_v1.13.json` --- for Faster R-CNN topologies trained using the TensorFlow\* Object Detection API version 1.13.X
|
* `faster_rcnn_support_api_v1.13.json` --- for Faster R-CNN topologies trained using the TensorFlow\* Object Detection API version 1.13.X
|
||||||
* `faster_rcnn_support_api_v1.14.json` --- for Faster R-CNN topologies trained using the TensorFlow\* Object Detection API version 1.14.0 up to 1.14.X inclusively
|
* `faster_rcnn_support_api_v1.14.json` --- for Faster R-CNN topologies trained using the TensorFlow\* Object Detection API version 1.14.0 up to 1.14.X inclusively
|
||||||
* `faster_rcnn_support_api_v1.15.json` --- for Faster R-CNN topologies trained using the TensorFlow\* Object Detection API version 1.15.0 up to 2.0
|
* `faster_rcnn_support_api_v1.15.json` --- for Faster R-CNN topologies trained using the TensorFlow\* Object Detection API version 1.15.0 up to 2.0
|
||||||
* `faster_rcnn_support_api_v2.0.json` --- for Faster R-CNN topologies trained using the TensorFlow\* Object Detection API version 2.0 or higher
|
* `faster_rcnn_support_api_v2.0.json` --- for Faster R-CNN topologies trained using the TensorFlow\* Object Detection API version 2.0 up to 2.3.X inclusively
|
||||||
|
* `faster_rcnn_support_api_v2.4.json` --- for Faster R-CNN topologies trained using the TensorFlow\* Object Detection API version 2.4 or higher
|
||||||
* `mask_rcnn_support.json` --- for Mask R-CNN topologies from the TF 1.X models zoo trained with TensorFlow\* version 1.9.0 or lower.
|
* `mask_rcnn_support.json` --- for Mask R-CNN topologies from the TF 1.X models zoo trained with TensorFlow\* version 1.9.0 or lower.
|
||||||
* `mask_rcnn_support_api_v1.7.json` --- for Mask R-CNN topologies trained using the TensorFlow\* Object Detection API version 1.7.0 up to 1.9.X inclusively
|
* `mask_rcnn_support_api_v1.7.json` --- for Mask R-CNN topologies trained using the TensorFlow\* Object Detection API version 1.7.0 up to 1.9.X inclusively
|
||||||
* `mask_rcnn_support_api_v1.11.json` --- for Mask R-CNN topologies trained using the TensorFlow\* Object Detection API version 1.11.0 up to 1.12.X inclusively
|
* `mask_rcnn_support_api_v1.11.json` --- for Mask R-CNN topologies trained using the TensorFlow\* Object Detection API version 1.11.0 up to 1.12.X inclusively
|
||||||
* `mask_rcnn_support_api_v1.13.json` --- for Mask R-CNN topologies trained using the TensorFlow\* Object Detection API version 1.13.0 up to 1.13.X inclusively
|
* `mask_rcnn_support_api_v1.13.json` --- for Mask R-CNN topologies trained using the TensorFlow\* Object Detection API version 1.13.0 up to 1.13.X inclusively
|
||||||
* `mask_rcnn_support_api_v1.14.json` --- for Mask R-CNN topologies trained using the TensorFlow\* Object Detection API version 1.14.0 up to 1.14.X inclusively
|
* `mask_rcnn_support_api_v1.14.json` --- for Mask R-CNN topologies trained using the TensorFlow\* Object Detection API version 1.14.0 up to 1.14.X inclusively
|
||||||
* `mask_rcnn_support_api_v1.15.json` --- for Mask R-CNN topologies trained using the TensorFlow\* Object Detection API version 1.15.0 up to 2.0
|
* `mask_rcnn_support_api_v1.15.json` --- for Mask R-CNN topologies trained using the TensorFlow\* Object Detection API version 1.15.0 up to 2.0
|
||||||
* `mask_rcnn_support_api_v2.0.json` --- for Mask R-CNN topologies trained using the TensorFlow\* Object Detection API version 2.0 or higher
|
* `mask_rcnn_support_api_v2.0.json` --- for Mask R-CNN topologies trained using the TensorFlow\* Object Detection API version 2.0 up to 2.3.X inclusively
|
||||||
|
* `mask_rcnn_support_api_v2.4.json` --- for Mask R-CNN topologies trained using the TensorFlow\* Object Detection API version 2.4 or higher
|
||||||
* `rfcn_support.json` --- for RFCN topology from the models zoo trained with TensorFlow\* version up to 1.9.X inclusively
|
* `rfcn_support.json` --- for RFCN topology from the models zoo trained with TensorFlow\* version up to 1.9.X inclusively
|
||||||
* `rfcn_support_api_v1.10.json` --- for RFCN topology from the models zoo frozen with TensorFlow\* version 1.10.0 up to 1.12.X inclusively
|
* `rfcn_support_api_v1.10.json` --- for RFCN topology from the models zoo frozen with TensorFlow\* version 1.10.0 up to 1.12.X inclusively
|
||||||
* `rfcn_support_api_v1.13.json` --- for RFCN topology from the models zoo frozen with TensorFlow\* version 1.13.X
|
* `rfcn_support_api_v1.13.json` --- for RFCN topology from the models zoo frozen with TensorFlow\* version 1.13.X
|
||||||
|
@ -16,8 +16,8 @@ openvino/docs/optimization_guide/dldt_optimization_guide.md
|
|||||||
openvino/docs/IE_DG/ShapeInference.md
|
openvino/docs/IE_DG/ShapeInference.md
|
||||||
build/docs/openvino_docs.xml
|
build/docs/openvino_docs.xml
|
||||||
openvino/docs/install_guides/installing-openvino-linux-ivad-vpu.md
|
openvino/docs/install_guides/installing-openvino-linux-ivad-vpu.md
|
||||||
inference-engine/include/ie_parallel.hpp
|
inference-engine/src/inference_engine/include/ie/ie_parallel.hpp
|
||||||
inference-engine/include/ie_plugin_config.hpp
|
inference-engine/src/inference_engine/include/ie/ie_plugin_config.hpp
|
||||||
inference-engine/include/vpu/myriad_config.hpp
|
inference-engine/src/inference_engine/include/ie/vpu/myriad_config.hpp
|
||||||
inference-engine/include/vpu/vpu_config.hpp
|
inference-engine/src/inference_engine/include/ie/vpu/vpu_config.hpp
|
||||||
inference-engine/include/vpu/vpu_plugin_config.hpp
|
inference-engine/src/inference_engine/include/ie/vpu/vpu_plugin_config.hpp
|
||||||
|
@ -824,7 +824,7 @@ WARN_LOGFILE = "@DOCS_BUILD_DIR@/ie_docs.log"
|
|||||||
# Note: If this tag is empty the current directory is searched.
|
# Note: If this tag is empty the current directory is searched.
|
||||||
|
|
||||||
INPUT = "@DOCS_BUILD_DIR@" \
|
INPUT = "@DOCS_BUILD_DIR@" \
|
||||||
"@IE_SOURCE_DIR@/include"
|
"@IE_SOURCE_DIR@/src/inference_engine/include"
|
||||||
|
|
||||||
# This tag can be used to specify the character encoding of the source files
|
# This tag can be used to specify the character encoding of the source files
|
||||||
# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
|
# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
|
||||||
|
@ -4,34 +4,32 @@
|
|||||||
|
|
||||||
**Category**: Arithmetic unary operation
|
**Category**: Arithmetic unary operation
|
||||||
|
|
||||||
**Short description**: *Erf* calculates the Gauss error function element-wise with given tensor.
|
**Short description**: *Erf* performs element-wise Gauss error function (erf) on a given input tensor.
|
||||||
|
|
||||||
**Detailed Description**
|
**Detailed Description**
|
||||||
|
|
||||||
For each element from the input tensor calculates corresponding element in the output tensor with the following formula:
|
*Erf* performs element-wise erf operation on a given input tensor, based on the following mathematical formula:
|
||||||
|
|
||||||
\f[
|
\f[
|
||||||
erf(x) = \pi^{-1} \int_{-x}^{x} e^{-t^2} dt
|
erf(x) = \pi^{-1} \int_{-x}^{x} e^{-t^2} dt
|
||||||
\f]
|
\f]
|
||||||
|
|
||||||
**Attributes**:
|
**Attributes**: *Erf* operation has no attributes.
|
||||||
|
|
||||||
No attributes available.
|
|
||||||
|
|
||||||
**Inputs**
|
**Inputs**
|
||||||
|
|
||||||
* **1**: A tensor of type *T*. **Required.**
|
* **1**: A tensor of type *T* and arbitrary shape. **Required.**
|
||||||
|
|
||||||
**Outputs**
|
**Outputs**
|
||||||
|
|
||||||
* **1**: The result of element-wise operation. A tensor of type *T*.
|
* **1**: The result of element-wise *Erf* function applied to the input tensor. A tensor of type *T* and the same shape as the input tensor.
|
||||||
|
|
||||||
**Types**
|
**Types**
|
||||||
|
|
||||||
* *T*: any supported floating-point type.
|
* *T*: any supported numeric type.
|
||||||
|
|
||||||
**Examples**
|
|
||||||
|
|
||||||
*Example 1*
|
**Example**
|
||||||
|
|
||||||
```xml
|
```xml
|
||||||
<layer ... type="Erf">
|
<layer ... type="Erf">
|
||||||
|
@ -6,32 +6,39 @@
|
|||||||
|
|
||||||
**Short description**: *Tan* performs element-wise tangent operation with given tensor.
|
**Short description**: *Tan* performs element-wise tangent operation with given tensor.
|
||||||
|
|
||||||
**Attributes**:
|
**Detailed description**: Operation takes one input tensor and performs the element-wise tangent function on a given input tensor, based on the following mathematical formula:
|
||||||
|
|
||||||
No attributes available.
|
|
||||||
|
|
||||||
**Inputs**
|
|
||||||
|
|
||||||
* **1**: An tensor of type *T*. **Required.**
|
|
||||||
|
|
||||||
**Outputs**
|
|
||||||
|
|
||||||
* **1**: The result of element-wise tan operation. A tensor of type *T*.
|
|
||||||
|
|
||||||
**Types**
|
|
||||||
|
|
||||||
* *T*: any numeric type.
|
|
||||||
|
|
||||||
*Tan* does the following with the input tensor *a*:
|
|
||||||
|
|
||||||
\f[
|
\f[
|
||||||
a_{i} = tan(a_{i})
|
a_{i} = tan(a_{i})
|
||||||
\f]
|
\f]
|
||||||
|
|
||||||
**Examples**
|
|
||||||
|
|
||||||
*Example 1*
|
*Example 1*
|
||||||
|
|
||||||
|
input = [0.0, 0.25, -0.25, 0.5, -0.5]
|
||||||
|
output = [0.0, 0.25534192, -0.25534192, 0.54630249, -0.54630249]
|
||||||
|
|
||||||
|
*Example 2*
|
||||||
|
|
||||||
|
input = [-2, -1, 0, 1, 2]
|
||||||
|
output = [2, -2, 0, 2, -2]
|
||||||
|
|
||||||
|
**Attributes**: *tan* operation has no attributes.
|
||||||
|
|
||||||
|
**Inputs**
|
||||||
|
|
||||||
|
* **1**: A tensor of type *T* and arbitrary shape, measured in radians. **Required.**
|
||||||
|
|
||||||
|
**Outputs**
|
||||||
|
|
||||||
|
* **1**: The result of element-wise *tan* applied to the input tensor. A tensor of type *T* and same shape as the input tensor.
|
||||||
|
|
||||||
|
**Types**
|
||||||
|
|
||||||
|
* *T*: any supported numeric type.
|
||||||
|
|
||||||
|
|
||||||
|
**Examples**
|
||||||
|
|
||||||
```xml
|
```xml
|
||||||
<layer ... type="Tan">
|
<layer ... type="Tan">
|
||||||
<input>
|
<input>
|
||||||
|
@ -4,32 +4,7 @@
|
|||||||
|
|
||||||
**Category**: Comparison binary operation
|
**Category**: Comparison binary operation
|
||||||
|
|
||||||
**Short description**: *LessEqual* performs element-wise comparison operation with two given tensors applying multi-directional broadcast rules.
|
**Short description**: *LessEqual* performs element-wise comparison operation with two given tensors applying broadcast rules specified in the *auto_broadcast* attribute.
|
||||||
|
|
||||||
**Attributes**:
|
|
||||||
|
|
||||||
* *auto_broadcast*
|
|
||||||
|
|
||||||
* **Description**: specifies rules used for auto-broadcasting of input tensors.
|
|
||||||
* **Range of values**:
|
|
||||||
* *none* - no auto-broadcasting is allowed, all input shapes should match
|
|
||||||
* *numpy* - numpy broadcasting rules, aligned with ONNX Broadcasting. Description is available in <a href="https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md">ONNX docs</a>.
|
|
||||||
* **Type**: string
|
|
||||||
* **Default value**: "numpy"
|
|
||||||
* **Required**: *no*
|
|
||||||
|
|
||||||
**Inputs**
|
|
||||||
|
|
||||||
* **1**: A tensor of type *T*. **Required.**
|
|
||||||
* **2**: A tensor of type *T*. **Required.**
|
|
||||||
|
|
||||||
**Outputs**
|
|
||||||
|
|
||||||
* **1**: The result of element-wise comparison operation. A tensor of type boolean.
|
|
||||||
|
|
||||||
**Types**
|
|
||||||
|
|
||||||
* *T*: arbitrary supported type.
|
|
||||||
|
|
||||||
**Detailed description**
|
**Detailed description**
|
||||||
Before performing arithmetic operation, input tensors *a* and *b* are broadcasted if their shapes are different and `auto_broadcast` attributes is not `none`. Broadcasting is performed according to `auto_broadcast` value.
|
Before performing arithmetic operation, input tensors *a* and *b* are broadcasted if their shapes are different and `auto_broadcast` attributes is not `none`. Broadcasting is performed according to `auto_broadcast` value.
|
||||||
@ -40,12 +15,39 @@ After broadcasting *LessEqual* does the following with the input tensors *a* and
|
|||||||
o_{i} = a_{i} <= b_{i}
|
o_{i} = a_{i} <= b_{i}
|
||||||
\f]
|
\f]
|
||||||
|
|
||||||
|
**Attributes**:
|
||||||
|
|
||||||
|
* *auto_broadcast*
|
||||||
|
|
||||||
|
* **Description**: specifies rules used for auto-broadcasting of input tensors.
|
||||||
|
* **Range of values**:
|
||||||
|
* *none* - no auto-broadcasting is allowed, all input shapes should match,
|
||||||
|
* *numpy* - numpy broadcasting rules, description is available in [Broadcast Rules For Elementwise Operations](../broadcast_rules.md),
|
||||||
|
* *pdpd* - PaddlePaddle-style implicit broadcasting, description is available in [Broadcast Rules For Elementwise Operations](../broadcast_rules.md).
|
||||||
|
* **Type**: string
|
||||||
|
* **Default value**: "numpy"
|
||||||
|
* **Required**: *no*
|
||||||
|
|
||||||
|
**Inputs**
|
||||||
|
|
||||||
|
* **1**: A tensor of type *T* and arbitrary shape. **Required.**
|
||||||
|
* **2**: A tensor of type *T* and arbitrary shape. **Required.**
|
||||||
|
|
||||||
|
**Outputs**
|
||||||
|
|
||||||
|
* **1**: The result of element-wise comparison operation applied to the input tensors. A tensor of type **boolean** and shape equal to broadcasted shape of two inputs.
|
||||||
|
|
||||||
|
**Types**
|
||||||
|
|
||||||
|
* *T*: arbitrary supported type.
|
||||||
|
|
||||||
**Examples**
|
**Examples**
|
||||||
|
|
||||||
*Example 1*
|
*Example 1: no broadcast*
|
||||||
|
|
||||||
```xml
|
```xml
|
||||||
<layer ... type="LessEqual">
|
<layer ... type="LessEqual">
|
||||||
|
<data auto_broadcast="none"/>
|
||||||
<input>
|
<input>
|
||||||
<port id="0">
|
<port id="0">
|
||||||
<dim>256</dim>
|
<dim>256</dim>
|
||||||
@ -65,9 +67,10 @@ o_{i} = a_{i} <= b_{i}
|
|||||||
</layer>
|
</layer>
|
||||||
```
|
```
|
||||||
|
|
||||||
*Example 2: broadcast*
|
*Example 2: numpy broadcast*
|
||||||
```xml
|
```xml
|
||||||
<layer ... type="LessEqual">
|
<layer ... type="LessEqual">
|
||||||
|
<data auto_broadcast="numpy"/>
|
||||||
<input>
|
<input>
|
||||||
<port id="0">
|
<port id="0">
|
||||||
<dim>8</dim>
|
<dim>8</dim>
|
||||||
|
@ -6,6 +6,16 @@
|
|||||||
|
|
||||||
**Short description**: *Less* performs element-wise comparison operation with two given tensors applying multi-directional broadcast rules.
|
**Short description**: *Less* performs element-wise comparison operation with two given tensors applying multi-directional broadcast rules.
|
||||||
|
|
||||||
|
**Detailed description**
|
||||||
|
Before performing arithmetic operation, input tensors *a* and *b* are broadcasted if their shapes are different and `auto_broadcast` attributes is not `none`. Broadcasting is performed according to `auto_broadcast` value.
|
||||||
|
|
||||||
|
After broadcasting *Less* does the following with the input tensors *a* and *b*:
|
||||||
|
|
||||||
|
\f[
|
||||||
|
o_{i} = a_{i} < b_{i}
|
||||||
|
\f]
|
||||||
|
|
||||||
|
|
||||||
**Attributes**:
|
**Attributes**:
|
||||||
|
|
||||||
* *auto_broadcast*
|
* *auto_broadcast*
|
||||||
@ -13,8 +23,9 @@
|
|||||||
* **Description**: specifies rules used for auto-broadcasting of input tensors.
|
* **Description**: specifies rules used for auto-broadcasting of input tensors.
|
||||||
* **Range of values**:
|
* **Range of values**:
|
||||||
* *none* - no auto-broadcasting is allowed, all input shapes should match
|
* *none* - no auto-broadcasting is allowed, all input shapes should match
|
||||||
* *numpy* - numpy broadcasting rules, aligned with ONNX Broadcasting. Description is available in <a href="https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md">ONNX docs</a>.
|
* *numpy* - numpy broadcasting rules, description is available in [Broadcast Rules For Elementwise Operations](../broadcast_rules.md)
|
||||||
* **Type**: string
|
* *pdpd* - PaddlePaddle-style implicit broadcasting, description is available in [Broadcast Rules For Elementwise Operations](../broadcast_rules.md)
|
||||||
|
* **Type**: `string`
|
||||||
* **Default value**: "numpy"
|
* **Default value**: "numpy"
|
||||||
* **Required**: *no*
|
* **Required**: *no*
|
||||||
|
|
||||||
@ -31,15 +42,6 @@
|
|||||||
|
|
||||||
* *T*: arbitrary supported type.
|
* *T*: arbitrary supported type.
|
||||||
|
|
||||||
**Detailed description**
|
|
||||||
Before performing arithmetic operation, input tensors *a* and *b* are broadcasted if their shapes are different and `auto_broadcast` attributes is not `none`. Broadcasting is performed according to `auto_broadcast` value.
|
|
||||||
|
|
||||||
After broadcasting *Less* does the following with the input tensors *a* and *b*:
|
|
||||||
|
|
||||||
\f[
|
|
||||||
o_{i} = a_{i} < b_{i}
|
|
||||||
\f]
|
|
||||||
|
|
||||||
**Examples**
|
**Examples**
|
||||||
|
|
||||||
*Example 1*
|
*Example 1*
|
||||||
|
@ -6,39 +6,40 @@
|
|||||||
|
|
||||||
**Short description**: *LogicalAnd* performs element-wise logical AND operation with two given tensors applying multi-directional broadcast rules.
|
**Short description**: *LogicalAnd* performs element-wise logical AND operation with two given tensors applying multi-directional broadcast rules.
|
||||||
|
|
||||||
|
**Detailed description**: Before performing logical operation, input tensors *a* and *b* are broadcasted if their shapes are different and `auto_broadcast` attributes is not `none`. Broadcasting is performed according to `auto_broadcast` value.
|
||||||
|
|
||||||
|
After broadcasting *LogicalAnd* does the following with the input tensors *a* and *b*:
|
||||||
|
|
||||||
|
\f[
|
||||||
|
o_{i} = a_{i} \wedge b_{i}
|
||||||
|
\f]
|
||||||
|
|
||||||
**Attributes**:
|
**Attributes**:
|
||||||
|
|
||||||
* *auto_broadcast*
|
* *auto_broadcast*
|
||||||
|
|
||||||
* **Description**: specifies rules used for auto-broadcasting of input tensors.
|
* **Description**: specifies rules used for auto-broadcasting of input tensors.
|
||||||
* **Range of values**:
|
* **Range of values**:
|
||||||
* *none* - no auto-broadcasting is allowed, all input shapes should match
|
* *none* - no auto-broadcasting is allowed, all input shapes must match,
|
||||||
* *numpy* - numpy broadcasting rules, aligned with ONNX Broadcasting. Description is available in <a href="https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md">ONNX docs</a>.
|
* *numpy* - numpy broadcasting rules, description is available in [Broadcast Rules For Elementwise Operations](../broadcast_rules.md),
|
||||||
|
* *pdpd* - PaddlePaddle-style implicit broadcasting, description is available in [Broadcast Rules For Elementwise Operations](../broadcast_rules.md).
|
||||||
* **Type**: string
|
* **Type**: string
|
||||||
* **Default value**: "numpy"
|
* **Default value**: "numpy"
|
||||||
* **Required**: *no*
|
* **Required**: *no*
|
||||||
|
|
||||||
**Inputs**
|
**Inputs**
|
||||||
|
|
||||||
* **1**: A tensor of type *T*. **Required.**
|
* **1**: A tensor of type *T* and arbitrary shape. **Required.**
|
||||||
* **2**: A tensor of type *T*. **Required.**
|
* **2**: A tensor of type *T* and arbitrary shape. **Required.**
|
||||||
|
|
||||||
**Outputs**
|
**Outputs**
|
||||||
|
|
||||||
* **1**: The result of element-wise logical AND operation. A tensor of type boolean.
|
* **1**: The result of element-wise *LogicalAnd* operation. A tensor of type boolean.
|
||||||
|
|
||||||
**Types**
|
**Types**
|
||||||
|
|
||||||
* *T*: boolean type.
|
* *T*: boolean type.
|
||||||
|
|
||||||
**Detailed description**
|
|
||||||
Before performing logical operation, input tensors *a* and *b* are broadcasted if their shapes are different and `auto_broadcast` attributes is not `none`. Broadcasting is performed according to `auto_broadcast` value.
|
|
||||||
|
|
||||||
After broadcasting *LogicalAnd* does the following with the input tensors *a* and *b*:
|
|
||||||
|
|
||||||
\f[
|
|
||||||
o_{i} = a_{i} and b_{i}
|
|
||||||
\f]
|
|
||||||
|
|
||||||
**Examples**
|
**Examples**
|
||||||
|
|
||||||
|
@ -102,6 +102,7 @@ InferenceEngine::StatusCode OpImplementation::init(InferenceEngine::LayerConfig&
|
|||||||
IE_THROW() << "Operation supports only FP32 precisions!";
|
IE_THROW() << "Operation supports only FP32 precisions!";
|
||||||
}
|
}
|
||||||
} catch (InferenceEngine::Exception& ex) {
|
} catch (InferenceEngine::Exception& ex) {
|
||||||
|
error = ex.what();
|
||||||
if (resp) {
|
if (resp) {
|
||||||
strncpy(resp->msg, error.c_str(), sizeof(resp->msg) - 1);
|
strncpy(resp->msg, error.c_str(), sizeof(resp->msg) - 1);
|
||||||
resp->msg[sizeof(resp->msg) - 1] = 0;
|
resp->msg[sizeof(resp->msg) - 1] = 0;
|
||||||
|
@ -66,6 +66,7 @@ InferenceEngine::StatusCode FFTImpl::init(InferenceEngine::LayerConfig& config,
|
|||||||
IE_THROW() << "Operation supports only FP32 precisions!";
|
IE_THROW() << "Operation supports only FP32 precisions!";
|
||||||
}
|
}
|
||||||
} catch (InferenceEngine::Exception& ex) {
|
} catch (InferenceEngine::Exception& ex) {
|
||||||
|
error = ex.what();
|
||||||
if (resp) {
|
if (resp) {
|
||||||
strncpy(resp->msg, error.c_str(), sizeof(resp->msg) - 1);
|
strncpy(resp->msg, error.c_str(), sizeof(resp->msg) - 1);
|
||||||
resp->msg[sizeof(resp->msg) - 1] = 0;
|
resp->msg[sizeof(resp->msg) - 1] = 0;
|
||||||
|
@ -0,0 +1,63 @@
|
|||||||
|
// Copyright (C) 2018-2021 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
#include <gtest/gtest.h>
|
||||||
|
|
||||||
|
#include <ie_core.hpp>
|
||||||
|
#include <ie_ngraph_utils.hpp>
|
||||||
|
#include <ngraph/ngraph.hpp>
|
||||||
|
#include <shared_test_classes/base/layer_test_utils.hpp>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "base_reference_test.hpp"
|
||||||
|
#include "ngraph_functions/builders.hpp"
|
||||||
|
|
||||||
|
namespace reference_tests {
|
||||||
|
namespace ComparisonOpsRefTestDefinitions {
|
||||||
|
|
||||||
|
struct RefComparisonParams {
|
||||||
|
ngraph::helpers::ComparisonTypes compType;
|
||||||
|
Tensor input1;
|
||||||
|
Tensor input2;
|
||||||
|
Tensor expected;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct Builder : ParamsBuilder<RefComparisonParams> {
|
||||||
|
REFERENCE_TESTS_ADD_SET_PARAM(Builder, compType);
|
||||||
|
REFERENCE_TESTS_ADD_SET_PARAM(Builder, input1);
|
||||||
|
REFERENCE_TESTS_ADD_SET_PARAM(Builder, input2);
|
||||||
|
REFERENCE_TESTS_ADD_SET_PARAM(Builder, expected);
|
||||||
|
};
|
||||||
|
|
||||||
|
class ReferenceComparisonLayerTest : public testing::TestWithParam<RefComparisonParams>, public CommonReferenceTest {
|
||||||
|
public:
|
||||||
|
void SetUp() override {
|
||||||
|
const auto& params = GetParam();
|
||||||
|
function = CreateFunction(params.compType, params.input1.shape, params.input2.shape, params.input1.type, params.expected.type);
|
||||||
|
inputData = {params.input1.data, params.input2.data};
|
||||||
|
refOutData = {params.expected.data};
|
||||||
|
}
|
||||||
|
static std::string getTestCaseName(const testing::TestParamInfo<RefComparisonParams>& obj) {
|
||||||
|
const auto& param = obj.param;
|
||||||
|
std::ostringstream result;
|
||||||
|
result << "comparisonType=" << param.compType << "_";
|
||||||
|
result << "inpt_shape1=" << param.input1.shape << "_";
|
||||||
|
result << "inpt_shape2=" << param.input2.shape << "_";
|
||||||
|
result << "iType=" << param.input1.type << "_";
|
||||||
|
result << "oType=" << param.expected.type;
|
||||||
|
return result.str();
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
static std::shared_ptr<ngraph::Function> CreateFunction(ngraph::helpers::ComparisonTypes comp_op_type, const ngraph::PartialShape& input_shape1,
|
||||||
|
const ngraph::PartialShape& input_shape2, const ngraph::element::Type& input_type,
|
||||||
|
const ngraph::element::Type& expected_output_type) {
|
||||||
|
const auto in = std::make_shared<ngraph::op::Parameter>(input_type, input_shape1);
|
||||||
|
const auto in2 = std::make_shared<ngraph::op::Parameter>(input_type, input_shape2);
|
||||||
|
const auto comp = ngraph::builder::makeComparison(in, in2, comp_op_type);
|
||||||
|
return std::make_shared<ngraph::Function>(ngraph::NodeVector {comp}, ngraph::ParameterVector {in, in2});
|
||||||
|
}
|
||||||
|
};
|
||||||
|
} // namespace ComparisonOpsRefTestDefinitions
|
||||||
|
} // namespace reference_tests
|
@ -0,0 +1,15 @@
|
|||||||
|
// Copyright (C) 2021 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
#include "conversion.hpp"
|
||||||
|
|
||||||
|
namespace reference_tests {
|
||||||
|
namespace ConversionOpsRefTestDefinitions {
|
||||||
|
namespace {
|
||||||
|
TEST_P(ReferenceConversionLayerTest, CompareWithHardcodedRefs) {
|
||||||
|
Exec();
|
||||||
|
}
|
||||||
|
} // namespace
|
||||||
|
} // namespace ConversionOpsRefTestDefinitions
|
||||||
|
} // namespace reference_tests
|
@ -0,0 +1,67 @@
|
|||||||
|
// Copyright (C) 2021 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
#include <gtest/gtest.h>
|
||||||
|
|
||||||
|
#include <ie_core.hpp>
|
||||||
|
#include <ie_ngraph_utils.hpp>
|
||||||
|
#include <ngraph/ngraph.hpp>
|
||||||
|
#include <shared_test_classes/base/layer_test_utils.hpp>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "base_reference_test.hpp"
|
||||||
|
#include "ngraph_functions/builders.hpp"
|
||||||
|
|
||||||
|
namespace reference_tests {
|
||||||
|
namespace ConversionOpsRefTestDefinitions {
|
||||||
|
|
||||||
|
static std::map<ngraph::helpers::ConversionTypes, std::string> conversionNames = {
|
||||||
|
{ngraph::helpers::ConversionTypes::CONVERT, "Convert"},
|
||||||
|
{ngraph::helpers::ConversionTypes::CONVERT_LIKE, "ConvertLike"}
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ConvertParams {
|
||||||
|
template <class IT, class OT>
|
||||||
|
ConvertParams(ngraph::helpers::ConversionTypes convType, const ngraph::PartialShape& shape, const ngraph::element::Type& iType,
|
||||||
|
const ngraph::element::Type& oType, const std::vector<IT>& iValues, const std::vector<OT>& oValues, size_t iSize = 0, size_t oSize = 0)
|
||||||
|
: conversionType(convType), pshape(shape), inType(iType), outType(oType), inputData(CreateBlob(iType, iValues, iSize)),
|
||||||
|
refData(CreateBlob(oType, oValues, oSize)) {}
|
||||||
|
ngraph::helpers::ConversionTypes conversionType;
|
||||||
|
ngraph::PartialShape pshape;
|
||||||
|
ngraph::element::Type inType;
|
||||||
|
ngraph::element::Type outType;
|
||||||
|
InferenceEngine::Blob::Ptr inputData;
|
||||||
|
InferenceEngine::Blob::Ptr refData;
|
||||||
|
};
|
||||||
|
|
||||||
|
class ReferenceConversionLayerTest : public testing::TestWithParam<ConvertParams>, public CommonReferenceTest {
|
||||||
|
public:
|
||||||
|
void SetUp() override {
|
||||||
|
const auto& params = GetParam();
|
||||||
|
function = CreateFunction(params.pshape, params.inType, params.outType, params.conversionType);
|
||||||
|
inputData = {params.inputData};
|
||||||
|
refOutData = {params.refData};
|
||||||
|
}
|
||||||
|
|
||||||
|
static std::string getTestCaseName(const testing::TestParamInfo<ConvertParams>& obj) {
|
||||||
|
const auto& param = obj.param;
|
||||||
|
std::ostringstream result;
|
||||||
|
result << "convertionType=" << conversionNames[param.conversionType] << "_";
|
||||||
|
result << "shape=" << param.pshape << "_";
|
||||||
|
result << "iType=" << param.inType << "_";
|
||||||
|
result << "oType=" << param.outType;
|
||||||
|
return result.str();
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
static std::shared_ptr<ngraph::Function> CreateFunction(const ngraph::PartialShape& input_shape, const ngraph::element::Type& input_type,
|
||||||
|
const ngraph::element::Type& expected_output_type,
|
||||||
|
const ngraph::helpers::ConversionTypes& conversion_type) {
|
||||||
|
const auto in = std::make_shared<ngraph::op::Parameter>(input_type, input_shape);
|
||||||
|
const auto convert = ngraph::builder::makeConversion(in, expected_output_type, conversion_type);
|
||||||
|
return std::make_shared<ngraph::Function>(ngraph::NodeVector {convert}, ngraph::ParameterVector {in});
|
||||||
|
}
|
||||||
|
};
|
||||||
|
} // namespace ConversionOpsRefTestDefinitions
|
||||||
|
} // namespace reference_tests
|
@ -10,433 +10,403 @@
|
|||||||
#include <shared_test_classes/base/layer_test_utils.hpp>
|
#include <shared_test_classes/base/layer_test_utils.hpp>
|
||||||
#include <tuple>
|
#include <tuple>
|
||||||
|
|
||||||
#include "base_reference_test.hpp"
|
#include "conversion.hpp"
|
||||||
|
|
||||||
using namespace reference_tests;
|
|
||||||
using namespace ngraph;
|
using namespace ngraph;
|
||||||
using namespace InferenceEngine;
|
using namespace InferenceEngine;
|
||||||
|
using ConversionTypes = ngraph::helpers::ConversionTypes;
|
||||||
|
|
||||||
struct ConvertParams {
|
namespace reference_tests {
|
||||||
template <class IT, class OT>
|
namespace ConversionOpsRefTestDefinitions {
|
||||||
ConvertParams(const ngraph::PartialShape& shape, const ngraph::element::Type& iType, const ngraph::element::Type& oType, const std::vector<IT>& iValues,
|
namespace {
|
||||||
const std::vector<OT>& oValues, size_t iSize = 0, size_t oSize = 0)
|
|
||||||
: pshape(shape), inType(iType), outType(oType), inputData(CreateBlob(iType, iValues, iSize)), refData(CreateBlob(oType, oValues, oSize)) {}
|
|
||||||
ngraph::PartialShape pshape;
|
|
||||||
ngraph::element::Type inType;
|
|
||||||
ngraph::element::Type outType;
|
|
||||||
InferenceEngine::Blob::Ptr inputData;
|
|
||||||
InferenceEngine::Blob::Ptr refData;
|
|
||||||
};
|
|
||||||
|
|
||||||
class ReferenceConvertLayerTest : public testing::TestWithParam<ConvertParams>, public CommonReferenceTest {
|
|
||||||
public:
|
|
||||||
void SetUp() override {
|
|
||||||
auto params = GetParam();
|
|
||||||
function = CreateFunction(params.pshape, params.inType, params.outType);
|
|
||||||
inputData = {params.inputData};
|
|
||||||
refOutData = {params.refData};
|
|
||||||
}
|
|
||||||
static std::string getTestCaseName(const testing::TestParamInfo<ConvertParams>& obj) {
|
|
||||||
auto param = obj.param;
|
|
||||||
std::ostringstream result;
|
|
||||||
result << "shape=" << param.pshape << "_";
|
|
||||||
result << "iType=" << param.inType << "_";
|
|
||||||
result << "oType=" << param.outType;
|
|
||||||
return result.str();
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
static std::shared_ptr<Function> CreateFunction(const PartialShape& input_shape, const element::Type& input_type,
|
|
||||||
const element::Type& expected_output_type) {
|
|
||||||
const auto in = std::make_shared<op::Parameter>(input_type, input_shape);
|
|
||||||
const auto convert = std::make_shared<op::Convert>(in, expected_output_type);
|
|
||||||
return std::make_shared<Function>(NodeVector {convert}, ParameterVector {in});
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
TEST_P(ReferenceConvertLayerTest, CompareWithHardcodedRefs) {
|
|
||||||
Exec();
|
|
||||||
}
|
|
||||||
|
|
||||||
INSTANTIATE_TEST_SUITE_P(
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
smoke_Convert_With_Hardcoded_Refs, ReferenceConvertLayerTest,
|
smoke_Conversion_With_Hardcoded_Refs, ReferenceConversionLayerTest,
|
||||||
::testing::Values(
|
::testing::Values(
|
||||||
// destination boolean
|
// destination boolean
|
||||||
ConvertParams(ngraph::PartialShape {2, 3}, ngraph::element::u8, ngraph::element::boolean,
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {2, 3}, ngraph::element::u8, ngraph::element::boolean,
|
||||||
std::vector<uint8_t> {0, 12, 23, 0, std::numeric_limits<uint8_t>::lowest(), std::numeric_limits<uint8_t>::max()},
|
std::vector<uint8_t> {0, 12, 23, 0, std::numeric_limits<uint8_t>::lowest(), std::numeric_limits<uint8_t>::max()},
|
||||||
std::vector<char> {0, 1, 1, 0, 0, 1}),
|
std::vector<char> {0, 1, 1, 0, 0, 1}),
|
||||||
ConvertParams(ngraph::PartialShape {2, 3}, ngraph::element::i32, ngraph::element::boolean,
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {2, 3}, ngraph::element::i32, ngraph::element::boolean,
|
||||||
std::vector<int32_t> {0, -12, 23, 0, std::numeric_limits<int32_t>::lowest(), std::numeric_limits<int32_t>::max()},
|
std::vector<int32_t> {0, -12, 23, 0, std::numeric_limits<int32_t>::lowest(), std::numeric_limits<int32_t>::max()},
|
||||||
std::vector<char> {0, 1, 1, 0, 1, 1}),
|
std::vector<char> {0, 1, 1, 0, 1, 1}),
|
||||||
ConvertParams(ngraph::PartialShape {3, 3}, ngraph::element::f32, ngraph::element::boolean,
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {3, 3}, ngraph::element::f32, ngraph::element::boolean,
|
||||||
std::vector<float> {0.f, 1.5745f, 0.12352f, 0.f, std::numeric_limits<float>::lowest(), std::numeric_limits<float>::max(),
|
std::vector<float> {0.f, 1.5745f, 0.12352f, 0.f, std::numeric_limits<float>::lowest(), std::numeric_limits<float>::max(),
|
||||||
std::numeric_limits<float>::min(), std::numeric_limits<float>::infinity(), -std::numeric_limits<float>::infinity()},
|
std::numeric_limits<float>::min(), std::numeric_limits<float>::infinity(), -std::numeric_limits<float>::infinity()},
|
||||||
std::vector<char> {0, 1, 1, 0, 1, 1, 1, 1, 1}),
|
std::vector<char> {0, 1, 1, 0, 1, 1, 1, 1, 1}),
|
||||||
|
|
||||||
// destination bf16
|
// destination bf16
|
||||||
ConvertParams(ngraph::PartialShape {1, 1, 3, 5}, ngraph::element::f32, ngraph::element::bf16,
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {1, 1, 3, 5}, ngraph::element::f32, ngraph::element::bf16,
|
||||||
std::vector<float> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f},
|
std::vector<float> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f},
|
||||||
std::vector<bfloat16> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}),
|
std::vector<bfloat16> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}),
|
||||||
ConvertParams(ngraph::PartialShape {11}, ngraph::element::u8, ngraph::element::bf16,
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {11}, ngraph::element::u8, ngraph::element::bf16,
|
||||||
std::vector<uint8_t> {0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142},
|
std::vector<uint8_t> {0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142},
|
||||||
std::vector<bfloat16> {0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142}),
|
std::vector<bfloat16> {0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142}),
|
||||||
|
|
||||||
// destination f16
|
// destination f16
|
||||||
ConvertParams(ngraph::PartialShape {1, 1, 3, 5}, ngraph::element::f32, ngraph::element::f16,
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {1, 1, 3, 5}, ngraph::element::f32, ngraph::element::f16,
|
||||||
std::vector<float> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f},
|
std::vector<float> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f},
|
||||||
std::vector<float16> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}),
|
std::vector<float16> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}),
|
||||||
ConvertParams(ngraph::PartialShape {11}, ngraph::element::u8, ngraph::element::f16, std::vector<uint8_t> {0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {11}, ngraph::element::u8, ngraph::element::f16,
|
||||||
std::vector<float16> {0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142}),
|
std::vector<uint8_t> {0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142}, std::vector<float16> {0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142}),
|
||||||
|
|
||||||
// destination f32
|
// destination f32
|
||||||
ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::u1, ngraph::element::f32, std::vector<uint8_t> {0xA0},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {2, 2}, ngraph::element::u1, ngraph::element::f32, std::vector<uint8_t> {0xA0},
|
||||||
std::vector<float> {1.0f, 0.0f, 1.0f, 0.0f}, 4),
|
std::vector<float> {1.0f, 0.0f, 1.0f, 0.0f}, 4),
|
||||||
ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::u4, ngraph::element::f32, std::vector<uint8_t> {0xFB, 0x0A},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {2, 2}, ngraph::element::u4, ngraph::element::f32, std::vector<uint8_t> {0xFB, 0x0A},
|
||||||
std::vector<float> {15.0f, 11.0f, 0.0f, 10.0f}, 4),
|
std::vector<float> {15.0f, 11.0f, 0.0f, 10.0f}, 4),
|
||||||
ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::u8, ngraph::element::f32, std::vector<uint8_t> {255, 128, 32, 0},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {2, 2}, ngraph::element::u8, ngraph::element::f32, std::vector<uint8_t> {255, 128, 32, 0},
|
||||||
std::vector<float> {255.0f, 128.0f, 32.0f, 0.0f}),
|
std::vector<float> {255.0f, 128.0f, 32.0f, 0.0f}),
|
||||||
ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::u16, ngraph::element::f32, std::vector<uint16_t> {64000, 32000, 128, 0},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {2, 2}, ngraph::element::u16, ngraph::element::f32,
|
||||||
std::vector<float> {64000.0f, 32000.0f, 128.0f, 0.0f}),
|
std::vector<uint16_t> {64000, 32000, 128, 0}, std::vector<float> {64000.0f, 32000.0f, 128.0f, 0.0f}),
|
||||||
ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::u32, ngraph::element::f32, std::vector<uint32_t> {4000000, 2000000, 128, 0},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {2, 2}, ngraph::element::u32, ngraph::element::f32,
|
||||||
std::vector<float> {4000000.0f, 2000000.0f, 128.0f, 0.0f}),
|
std::vector<uint32_t> {4000000, 2000000, 128, 0}, std::vector<float> {4000000.0f, 2000000.0f, 128.0f, 0.0f}),
|
||||||
ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::u64, ngraph::element::f32, std::vector<uint64_t> {4000000, 2000000, 128, 0},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {2, 2}, ngraph::element::u64, ngraph::element::f32,
|
||||||
std::vector<float> {4000000.0f, 2000000.0f, 128.0f, 0.0f}),
|
std::vector<uint64_t> {4000000, 2000000, 128, 0}, std::vector<float> {4000000.0f, 2000000.0f, 128.0f, 0.0f}),
|
||||||
ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::i4, ngraph::element::f32, std::vector<uint8_t> {0xFE, 0xF2},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {2, 2}, ngraph::element::i4, ngraph::element::f32, std::vector<uint8_t> {0xFE, 0xF2},
|
||||||
std::vector<float> {-1.0f, -2.0f, -1.0f, 2.0f}, 4),
|
std::vector<float> {-1.0f, -2.0f, -1.0f, 2.0f}, 4),
|
||||||
ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::i8, ngraph::element::f32, std::vector<int8_t> {-127, -0, 0, 127},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {2, 2}, ngraph::element::i8, ngraph::element::f32, std::vector<int8_t> {-127, -0, 0, 127},
|
||||||
std::vector<float> {-127.0f, -0.0f, 0.0f, 127.0f}),
|
std::vector<float> {-127.0f, -0.0f, 0.0f, 127.0f}),
|
||||||
ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::i16, ngraph::element::f32, std::vector<int16_t> {-32000, -0, 0, 32000},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {2, 2}, ngraph::element::i16, ngraph::element::f32,
|
||||||
std::vector<float> {-32000.0f, -0.0f, 0.0f, 32000.0f}),
|
std::vector<int16_t> {-32000, -0, 0, 32000}, std::vector<float> {-32000.0f, -0.0f, 0.0f, 32000.0f}),
|
||||||
ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::i32, ngraph::element::f32, std::vector<int32_t> {-64000, -0, 0, 64000},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {2, 2}, ngraph::element::i32, ngraph::element::f32,
|
||||||
std::vector<float> {-64000.0f, -0.0f, 0.0f, 64000.0f}),
|
std::vector<int32_t> {-64000, -0, 0, 64000}, std::vector<float> {-64000.0f, -0.0f, 0.0f, 64000.0f}),
|
||||||
ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::i64, ngraph::element::f32, std::vector<int64_t> {-64000, -0, 0, 64000},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {2, 2}, ngraph::element::i64, ngraph::element::f32,
|
||||||
std::vector<float> {-64000.0f, -0.0f, 0.0f, 64000.0f}),
|
std::vector<int64_t> {-64000, -0, 0, 64000}, std::vector<float> {-64000.0f, -0.0f, 0.0f, 64000.0f}),
|
||||||
ConvertParams(ngraph::PartialShape {1, 1, 3, 5}, ngraph::element::bf16, ngraph::element::f32,
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {1, 1, 3, 5}, ngraph::element::bf16, ngraph::element::f32,
|
||||||
std::vector<bfloat16> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f},
|
std::vector<bfloat16> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f},
|
||||||
std::vector<float> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}),
|
std::vector<float> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}),
|
||||||
ConvertParams(ngraph::PartialShape {1, 1, 3, 5}, ngraph::element::f16, ngraph::element::f32,
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {1, 1, 3, 5}, ngraph::element::f16, ngraph::element::f32,
|
||||||
std::vector<float16> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f},
|
std::vector<float16> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f},
|
||||||
std::vector<float> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}),
|
std::vector<float> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}),
|
||||||
ConvertParams(ngraph::PartialShape {1, 1, 3, 5}, ngraph::element::f32, ngraph::element::f32,
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {1, 1, 3, 5}, ngraph::element::f32, ngraph::element::f32,
|
||||||
std::vector<float> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f},
|
std::vector<float> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f},
|
||||||
std::vector<float> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}),
|
std::vector<float> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}),
|
||||||
|
|
||||||
// destination i4
|
// destination i4
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u1, ngraph::element::i4, std::vector<uint8_t> {0xA0}, std::vector<uint8_t> {0x10, 0x10}, 4, 4),
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u1, ngraph::element::i4, std::vector<uint8_t> {0xA0},
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::i4, std::vector<uint8_t> {0x12, 0x03}, std::vector<uint8_t> {0x12, 0x03},
|
std::vector<uint8_t> {0x10, 0x10}, 4, 4),
|
||||||
4, 4),
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::i4, std::vector<uint8_t> {0x12, 0x03},
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::i4, std::vector<uint8_t> {1, 2, 0, 3}, std::vector<uint8_t> {0x12, 0x03},
|
|
||||||
4, 4),
|
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::i4, std::vector<uint16_t> {1, 2, 0, 3},
|
|
||||||
std::vector<uint8_t> {0x12, 0x03}, 4, 4),
|
std::vector<uint8_t> {0x12, 0x03}, 4, 4),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::i4, std::vector<uint32_t> {1, 2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::i4, std::vector<uint8_t> {1, 2, 0, 3},
|
||||||
std::vector<uint8_t> {0x12, 0x03}, 4, 4),
|
std::vector<uint8_t> {0x12, 0x03}, 4, 4),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::i4, std::vector<uint64_t> {1, 2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::i4, std::vector<uint16_t> {1, 2, 0, 3},
|
||||||
std::vector<uint8_t> {0x12, 0x03}, 4, 4),
|
std::vector<uint8_t> {0x12, 0x03}, 4, 4),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::i4, std::vector<uint8_t> {0xFE, 0x03}, std::vector<uint8_t> {0xFE, 0x03},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::i4, std::vector<uint32_t> {1, 2, 0, 3},
|
||||||
4, 4),
|
std::vector<uint8_t> {0x12, 0x03}, 4, 4),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::i4, std::vector<int8_t> {-1, -2, 2, 3}, std::vector<uint8_t> {0xFE, 0x23},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::i4, std::vector<uint64_t> {1, 2, 0, 3},
|
||||||
4, 4),
|
std::vector<uint8_t> {0x12, 0x03}, 4, 4),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::i4, std::vector<int16_t> {-1, -2, 2, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::i4, std::vector<uint8_t> {0xFE, 0x03},
|
||||||
std::vector<uint8_t> {0xFE, 0x23}, 4, 4),
|
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::i4, std::vector<int32_t> {-1, -2, 2, 3},
|
|
||||||
std::vector<uint8_t> {0xFE, 0x23}, 4, 4),
|
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::i4, std::vector<int64_t> {-1, -2, 2, 3},
|
|
||||||
std::vector<uint8_t> {0xFE, 0x23}, 4, 4),
|
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::i4, std::vector<ngraph::float16> {-1, -2, 0, 3},
|
|
||||||
std::vector<uint8_t> {0xFE, 0x03}, 4, 4),
|
std::vector<uint8_t> {0xFE, 0x03}, 4, 4),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::i4, std::vector<ngraph::bfloat16> {-1, -2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::i4, std::vector<int8_t> {-1, -2, 2, 3},
|
||||||
std::vector<uint8_t> {0xFE, 0x03}, 4, 4),
|
std::vector<uint8_t> {0xFE, 0x23}, 4, 4),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::i4, std::vector<float> {-1, -2, 2, 3}, std::vector<uint8_t> {0xFE, 0x23},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::i4, std::vector<int16_t> {-1, -2, 2, 3},
|
||||||
4, 4),
|
std::vector<uint8_t> {0xFE, 0x23}, 4, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::i4, std::vector<int32_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<uint8_t> {0xFE, 0x23}, 4, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::i4, std::vector<int64_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<uint8_t> {0xFE, 0x23}, 4, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::i4,
|
||||||
|
std::vector<ngraph::float16> {-1, -2, 0, 3}, std::vector<uint8_t> {0xFE, 0x03}, 4, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::i4,
|
||||||
|
std::vector<ngraph::bfloat16> {-1, -2, 0, 3}, std::vector<uint8_t> {0xFE, 0x03}, 4, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::i4, std::vector<float> {-1, -2, 2, 3},
|
||||||
|
std::vector<uint8_t> {0xFE, 0x23}, 4, 4),
|
||||||
// destination i8
|
// destination i8
|
||||||
ConvertParams(ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::i8, std::vector<uint8_t> {0x81},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::i8, std::vector<uint8_t> {0x81},
|
||||||
std::vector<int8_t> {1, 0, 0, 0, 0, 0, 0, 1}, 8),
|
std::vector<int8_t> {1, 0, 0, 0, 0, 0, 0, 1}, 8),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::i8, std::vector<uint8_t> {0x21, 0x43}, std::vector<int8_t> {2, 1, 4, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::i8, std::vector<uint8_t> {0x21, 0x43},
|
||||||
4),
|
std::vector<int8_t> {2, 1, 4, 3}, 4),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::i8, std::vector<uint8_t> {1, 2, 0, 3}, std::vector<int8_t> {1, 2, 0, 3}),
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::i8, std::vector<uint8_t> {1, 2, 0, 3},
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::i8, std::vector<uint16_t> {1, 2, 0, 3},
|
|
||||||
std::vector<int8_t> {1, 2, 0, 3}),
|
std::vector<int8_t> {1, 2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::i8, std::vector<uint32_t> {1, 2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::i8, std::vector<uint16_t> {1, 2, 0, 3},
|
||||||
std::vector<int8_t> {1, 2, 0, 3}),
|
std::vector<int8_t> {1, 2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::i8, std::vector<uint64_t> {1, 2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::i8, std::vector<uint32_t> {1, 2, 0, 3},
|
||||||
std::vector<int8_t> {1, 2, 0, 3}),
|
std::vector<int8_t> {1, 2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::i8, std::vector<uint8_t> {0x21, 0x43}, std::vector<int8_t> {2, 1, 4, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::i8, std::vector<uint64_t> {1, 2, 0, 3},
|
||||||
4),
|
std::vector<int8_t> {1, 2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::i8, std::vector<int8_t> {-1, -2, 2, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::i8, std::vector<uint8_t> {0x21, 0x43},
|
||||||
|
std::vector<int8_t> {2, 1, 4, 3}, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::i8, std::vector<int8_t> {-1, -2, 2, 3},
|
||||||
std::vector<int8_t> {-1, -2, 2, 3}),
|
std::vector<int8_t> {-1, -2, 2, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::i8, std::vector<int16_t> {-1, -2, 2, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::i8, std::vector<int16_t> {-1, -2, 2, 3},
|
||||||
std::vector<int8_t> {-1, -2, 2, 3}),
|
std::vector<int8_t> {-1, -2, 2, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::i8, std::vector<int32_t> {-1, -2, 2, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::i8, std::vector<int32_t> {-1, -2, 2, 3},
|
||||||
std::vector<int8_t> {-1, -2, 2, 3}),
|
std::vector<int8_t> {-1, -2, 2, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::i8, std::vector<int64_t> {-1, -2, 2, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::i8, std::vector<int64_t> {-1, -2, 2, 3},
|
||||||
std::vector<int8_t> {-1, -2, 2, 3}),
|
std::vector<int8_t> {-1, -2, 2, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::i8, std::vector<ngraph::float16> {-1, -2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::i8,
|
||||||
std::vector<int8_t> {-1, -2, 0, 3}),
|
std::vector<ngraph::float16> {-1, -2, 0, 3}, std::vector<int8_t> {-1, -2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::i8, std::vector<ngraph::bfloat16> {-1, -2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::i8,
|
||||||
std::vector<int8_t> {-1, -2, 0, 3}),
|
std::vector<ngraph::bfloat16> {-1, -2, 0, 3}, std::vector<int8_t> {-1, -2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::i8, std::vector<float> {-1, -2, 2, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::i8, std::vector<float> {-1, -2, 2, 3},
|
||||||
std::vector<int8_t> {-1, -2, 2, 3}),
|
std::vector<int8_t> {-1, -2, 2, 3}),
|
||||||
// destination i16
|
// destination i16
|
||||||
ConvertParams(ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::i16, std::vector<uint8_t> {0x81},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::i16, std::vector<uint8_t> {0x81},
|
||||||
std::vector<int16_t> {1, 0, 0, 0, 0, 0, 0, 1}, 8),
|
std::vector<int16_t> {1, 0, 0, 0, 0, 0, 0, 1}, 8),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::i16, std::vector<uint8_t> {0x21, 0x43}, std::vector<int16_t> {2, 1, 4, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::i16, std::vector<uint8_t> {0x21, 0x43},
|
||||||
4),
|
std::vector<int16_t> {2, 1, 4, 3}, 4),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::i16, std::vector<uint8_t> {1, 2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::i16, std::vector<uint8_t> {1, 2, 0, 3},
|
||||||
std::vector<int16_t> {1, 2, 0, 3}),
|
std::vector<int16_t> {1, 2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::i16, std::vector<uint16_t> {1, 2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::i16, std::vector<uint16_t> {1, 2, 0, 3},
|
||||||
std::vector<int16_t> {1, 2, 0, 3}),
|
std::vector<int16_t> {1, 2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::i16, std::vector<uint32_t> {1, 2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::i16, std::vector<uint32_t> {1, 2, 0, 3},
|
||||||
std::vector<int16_t> {1, 2, 0, 3}),
|
std::vector<int16_t> {1, 2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::i16, std::vector<uint64_t> {1, 2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::i16, std::vector<uint64_t> {1, 2, 0, 3},
|
||||||
std::vector<int16_t> {1, 2, 0, 3}),
|
std::vector<int16_t> {1, 2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::i16, std::vector<uint8_t> {0x21, 0x43}, std::vector<int16_t> {2, 1, 4, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::i16, std::vector<uint8_t> {0x21, 0x43},
|
||||||
4),
|
std::vector<int16_t> {2, 1, 4, 3}, 4),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::i16, std::vector<int8_t> {-1, -2, 2, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::i16, std::vector<int8_t> {-1, -2, 2, 3},
|
||||||
std::vector<int16_t> {-1, -2, 2, 3}),
|
std::vector<int16_t> {-1, -2, 2, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::i16, std::vector<int16_t> {-1, -2, 2, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::i16, std::vector<int16_t> {-1, -2, 2, 3},
|
||||||
std::vector<int16_t> {-1, -2, 2, 3}),
|
std::vector<int16_t> {-1, -2, 2, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::i16, std::vector<int32_t> {-1, -2, 2, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::i16, std::vector<int32_t> {-1, -2, 2, 3},
|
||||||
std::vector<int16_t> {-1, -2, 2, 3}),
|
std::vector<int16_t> {-1, -2, 2, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::i16, std::vector<int64_t> {-1, -2, 2, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::i16, std::vector<int64_t> {-1, -2, 2, 3},
|
||||||
std::vector<int16_t> {-1, -2, 2, 3}),
|
std::vector<int16_t> {-1, -2, 2, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::i16, std::vector<ngraph::float16> {-1, -2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::i16,
|
||||||
std::vector<int16_t> {-1, -2, 0, 3}),
|
std::vector<ngraph::float16> {-1, -2, 0, 3}, std::vector<int16_t> {-1, -2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::i16, std::vector<ngraph::bfloat16> {-1, -2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::i16,
|
||||||
std::vector<int16_t> {-1, -2, 0, 3}),
|
std::vector<ngraph::bfloat16> {-1, -2, 0, 3}, std::vector<int16_t> {-1, -2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::i16, std::vector<float> {-1, -2, 2, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::i16, std::vector<float> {-1, -2, 2, 3},
|
||||||
std::vector<int16_t> {-1, -2, 2, 3}),
|
std::vector<int16_t> {-1, -2, 2, 3}),
|
||||||
// destination i32
|
// destination i32
|
||||||
ConvertParams(ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::i32, std::vector<uint8_t> {0x81},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::i32, std::vector<uint8_t> {0x81},
|
||||||
std::vector<int32_t> {1, 0, 0, 0, 0, 0, 0, 1}, 8),
|
std::vector<int32_t> {1, 0, 0, 0, 0, 0, 0, 1}, 8),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::i32, std::vector<uint8_t> {0x21, 0x43}, std::vector<int32_t> {2, 1, 4, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::i32, std::vector<uint8_t> {0x21, 0x43},
|
||||||
4),
|
std::vector<int32_t> {2, 1, 4, 3}, 4),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::i32, std::vector<uint8_t> {1, 2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::i32, std::vector<uint8_t> {1, 2, 0, 3},
|
||||||
std::vector<int32_t> {1, 2, 0, 3}),
|
std::vector<int32_t> {1, 2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::i32, std::vector<uint16_t> {1, 2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::i32, std::vector<uint16_t> {1, 2, 0, 3},
|
||||||
std::vector<int32_t> {1, 2, 0, 3}),
|
std::vector<int32_t> {1, 2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::i32, std::vector<uint32_t> {1, 2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::i32, std::vector<uint32_t> {1, 2, 0, 3},
|
||||||
std::vector<int32_t> {1, 2, 0, 3}),
|
std::vector<int32_t> {1, 2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::i32, std::vector<uint64_t> {1, 2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::i32, std::vector<uint64_t> {1, 2, 0, 3},
|
||||||
std::vector<int32_t> {1, 2, 0, 3}),
|
std::vector<int32_t> {1, 2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::i32, std::vector<uint8_t> {0x21, 0x43}, std::vector<int32_t> {2, 1, 4, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::i32, std::vector<uint8_t> {0x21, 0x43},
|
||||||
4),
|
std::vector<int32_t> {2, 1, 4, 3}, 4),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::i32, std::vector<int8_t> {-1, -2, 2, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::i32, std::vector<int8_t> {-1, -2, 2, 3},
|
||||||
std::vector<int32_t> {-1, -2, 2, 3}),
|
std::vector<int32_t> {-1, -2, 2, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::i32, std::vector<int16_t> {-1, -2, 2, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::i32, std::vector<int16_t> {-1, -2, 2, 3},
|
||||||
std::vector<int32_t> {-1, -2, 2, 3}),
|
std::vector<int32_t> {-1, -2, 2, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::i32, std::vector<int32_t> {-1, -2, 2, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::i32, std::vector<int32_t> {-1, -2, 2, 3},
|
||||||
std::vector<int32_t> {-1, -2, 2, 3}),
|
std::vector<int32_t> {-1, -2, 2, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::i32, std::vector<int64_t> {-1, -2, 2, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::i32, std::vector<int64_t> {-1, -2, 2, 3},
|
||||||
std::vector<int32_t> {-1, -2, 2, 3}),
|
std::vector<int32_t> {-1, -2, 2, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::i32, std::vector<ngraph::float16> {-1, -2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::i32,
|
||||||
std::vector<int32_t> {-1, -2, 0, 3}),
|
std::vector<ngraph::float16> {-1, -2, 0, 3}, std::vector<int32_t> {-1, -2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::i32, std::vector<ngraph::bfloat16> {-1, -2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::i32,
|
||||||
std::vector<int32_t> {-1, -2, 0, 3}),
|
std::vector<ngraph::bfloat16> {-1, -2, 0, 3}, std::vector<int32_t> {-1, -2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::i32, std::vector<float> {-1, -2, 2, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::i32, std::vector<float> {-1, -2, 2, 3},
|
||||||
std::vector<int32_t> {-1, -2, 2, 3}),
|
std::vector<int32_t> {-1, -2, 2, 3}),
|
||||||
// destination i64
|
// destination i64
|
||||||
ConvertParams(ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::i64, std::vector<uint8_t> {0x81},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::i64, std::vector<uint8_t> {0x81},
|
||||||
std::vector<int64_t> {1, 0, 0, 0, 0, 0, 0, 1}, 8),
|
std::vector<int64_t> {1, 0, 0, 0, 0, 0, 0, 1}, 8),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::i64, std::vector<uint8_t> {0x21, 0x43}, std::vector<int64_t> {2, 1, 4, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::i64, std::vector<uint8_t> {0x21, 0x43},
|
||||||
4),
|
std::vector<int64_t> {2, 1, 4, 3}, 4),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::i64, std::vector<uint8_t> {1, 2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::i64, std::vector<uint8_t> {1, 2, 0, 3},
|
||||||
std::vector<int64_t> {1, 2, 0, 3}),
|
std::vector<int64_t> {1, 2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::i64, std::vector<uint16_t> {1, 2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::i64, std::vector<uint16_t> {1, 2, 0, 3},
|
||||||
std::vector<int64_t> {1, 2, 0, 3}),
|
std::vector<int64_t> {1, 2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::i64, std::vector<uint32_t> {1, 2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::i64, std::vector<uint32_t> {1, 2, 0, 3},
|
||||||
std::vector<int64_t> {1, 2, 0, 3}),
|
std::vector<int64_t> {1, 2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::i64, std::vector<uint64_t> {1, 2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::i64, std::vector<uint64_t> {1, 2, 0, 3},
|
||||||
std::vector<int64_t> {1, 2, 0, 3}),
|
std::vector<int64_t> {1, 2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::i64, std::vector<uint8_t> {0x21, 0x43}, std::vector<int64_t> {2, 1, 4, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::i64, std::vector<uint8_t> {0x21, 0x43},
|
||||||
4),
|
std::vector<int64_t> {2, 1, 4, 3}, 4),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::i64, std::vector<int8_t> {-1, -2, 2, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::i64, std::vector<int8_t> {-1, -2, 2, 3},
|
||||||
std::vector<int64_t> {-1, -2, 2, 3}),
|
std::vector<int64_t> {-1, -2, 2, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::i64, std::vector<int16_t> {-1, -2, 2, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::i64, std::vector<int16_t> {-1, -2, 2, 3},
|
||||||
std::vector<int64_t> {-1, -2, 2, 3}),
|
std::vector<int64_t> {-1, -2, 2, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::i64, std::vector<int32_t> {-1, -2, 2, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::i64, std::vector<int32_t> {-1, -2, 2, 3},
|
||||||
std::vector<int64_t> {-1, -2, 2, 3}),
|
std::vector<int64_t> {-1, -2, 2, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::i64, std::vector<int64_t> {-1, -2, 2, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::i64, std::vector<int64_t> {-1, -2, 2, 3},
|
||||||
std::vector<int64_t> {-1, -2, 2, 3}),
|
std::vector<int64_t> {-1, -2, 2, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::i64, std::vector<ngraph::float16> {-1, -2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::i64,
|
||||||
std::vector<int64_t> {-1, -2, 0, 3}),
|
std::vector<ngraph::float16> {-1, -2, 0, 3}, std::vector<int64_t> {-1, -2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::i64, std::vector<ngraph::bfloat16> {-1, -2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::i64,
|
||||||
std::vector<int64_t> {-1, -2, 0, 3}),
|
std::vector<ngraph::bfloat16> {-1, -2, 0, 3}, std::vector<int64_t> {-1, -2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::i64, std::vector<float> {-1, -2, 2, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::i64, std::vector<float> {-1, -2, 2, 3},
|
||||||
std::vector<int64_t> {-1, -2, 2, 3}),
|
std::vector<int64_t> {-1, -2, 2, 3}),
|
||||||
|
|
||||||
// destination u1
|
// destination u1
|
||||||
ConvertParams(ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::u1, std::vector<uint8_t> {0xA0}, std::vector<uint8_t> {0xA0}, 8, 8),
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::u1, std::vector<uint8_t> {0xA0},
|
||||||
ConvertParams(ngraph::PartialShape {8}, ngraph::element::u4, ngraph::element::u1, std::vector<uint8_t> {0x10, 0x01, 0x00, 0x00},
|
std::vector<uint8_t> {0xA0}, 8, 8),
|
||||||
std::vector<uint8_t> {0x90}, 8, 8),
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {8}, ngraph::element::u4, ngraph::element::u1,
|
||||||
ConvertParams(ngraph::PartialShape {8}, ngraph::element::u8, ngraph::element::u1, std::vector<uint8_t> {1, 0, 1, 0, 0, 0, 0, 1},
|
std::vector<uint8_t> {0x10, 0x01, 0x00, 0x00}, std::vector<uint8_t> {0x90}, 8, 8),
|
||||||
std::vector<uint8_t> {0xA1}, 8, 8),
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {8}, ngraph::element::u8, ngraph::element::u1,
|
||||||
ConvertParams(ngraph::PartialShape {8}, ngraph::element::u16, ngraph::element::u1, std::vector<uint16_t> {1, 0, 1, 0, 0, 0, 0, 1},
|
std::vector<uint8_t> {1, 0, 1, 0, 0, 0, 0, 1}, std::vector<uint8_t> {0xA1}, 8, 8),
|
||||||
std::vector<uint8_t> {0xA1}, 8, 8),
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {8}, ngraph::element::u16, ngraph::element::u1,
|
||||||
ConvertParams(ngraph::PartialShape {8}, ngraph::element::u32, ngraph::element::u1, std::vector<uint32_t> {1, 0, 1, 0, 0, 0, 0, 1},
|
std::vector<uint16_t> {1, 0, 1, 0, 0, 0, 0, 1}, std::vector<uint8_t> {0xA1}, 8, 8),
|
||||||
std::vector<uint8_t> {0xA1}, 8, 8),
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {8}, ngraph::element::u32, ngraph::element::u1,
|
||||||
ConvertParams(ngraph::PartialShape {8}, ngraph::element::u64, ngraph::element::u1, std::vector<uint64_t> {1, 0, 1, 0, 0, 0, 0, 1},
|
std::vector<uint32_t> {1, 0, 1, 0, 0, 0, 0, 1}, std::vector<uint8_t> {0xA1}, 8, 8),
|
||||||
std::vector<uint8_t> {0xA1}, 8, 8),
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {8}, ngraph::element::u64, ngraph::element::u1,
|
||||||
ConvertParams(ngraph::PartialShape {8}, ngraph::element::i4, ngraph::element::u1, std::vector<uint8_t> {0x10, 0x01, 0x00, 0x00},
|
std::vector<uint64_t> {1, 0, 1, 0, 0, 0, 0, 1}, std::vector<uint8_t> {0xA1}, 8, 8),
|
||||||
std::vector<uint8_t> {0x90}, 8, 8),
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {8}, ngraph::element::i4, ngraph::element::u1,
|
||||||
ConvertParams(ngraph::PartialShape {8}, ngraph::element::i8, ngraph::element::u1, std::vector<int8_t> {1, 0, 1, 0, 0, 0, 0, 1},
|
std::vector<uint8_t> {0x10, 0x01, 0x00, 0x00}, std::vector<uint8_t> {0x90}, 8, 8),
|
||||||
std::vector<uint8_t> {0xA1}, 8, 8),
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {8}, ngraph::element::i8, ngraph::element::u1,
|
||||||
ConvertParams(ngraph::PartialShape {8}, ngraph::element::i16, ngraph::element::u1, std::vector<int16_t> {1, 0, 1, 0, 0, 0, 0, 1},
|
std::vector<int8_t> {1, 0, 1, 0, 0, 0, 0, 1}, std::vector<uint8_t> {0xA1}, 8, 8),
|
||||||
std::vector<uint8_t> {0xA1}, 8, 8),
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {8}, ngraph::element::i16, ngraph::element::u1,
|
||||||
ConvertParams(ngraph::PartialShape {8}, ngraph::element::i32, ngraph::element::u1, std::vector<int32_t> {1, 0, 1, 0, 0, 0, 0, 1},
|
std::vector<int16_t> {1, 0, 1, 0, 0, 0, 0, 1}, std::vector<uint8_t> {0xA1}, 8, 8),
|
||||||
std::vector<uint8_t> {0xA1}, 8, 8),
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {8}, ngraph::element::i32, ngraph::element::u1,
|
||||||
ConvertParams(ngraph::PartialShape {8}, ngraph::element::i64, ngraph::element::u1, std::vector<int64_t> {1, 0, 1, 0, 0, 0, 0, 1},
|
std::vector<int32_t> {1, 0, 1, 0, 0, 0, 0, 1}, std::vector<uint8_t> {0xA1}, 8, 8),
|
||||||
std::vector<uint8_t> {0xA1}, 8, 8),
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {8}, ngraph::element::i64, ngraph::element::u1,
|
||||||
ConvertParams(ngraph::PartialShape {8}, ngraph::element::f16, ngraph::element::u1, std::vector<ngraph::float16> {1, 0, 1, 0, 0, 0, 0, 1},
|
std::vector<int64_t> {1, 0, 1, 0, 0, 0, 0, 1}, std::vector<uint8_t> {0xA1}, 8, 8),
|
||||||
std::vector<uint8_t> {0xA1}, 8, 8),
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {8}, ngraph::element::f16, ngraph::element::u1,
|
||||||
ConvertParams(ngraph::PartialShape {8}, ngraph::element::bf16, ngraph::element::u1, std::vector<ngraph::bfloat16> {1, 0, 1, 0, 0, 0, 0, 1},
|
std::vector<ngraph::float16> {1, 0, 1, 0, 0, 0, 0, 1}, std::vector<uint8_t> {0xA1}, 8, 8),
|
||||||
std::vector<uint8_t> {0xA1}, 8, 8),
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {8}, ngraph::element::bf16, ngraph::element::u1,
|
||||||
ConvertParams(ngraph::PartialShape {8}, ngraph::element::f32, ngraph::element::u1, std::vector<float> {1, 0, 1, 0, 0, 0, 0, 1},
|
std::vector<ngraph::bfloat16> {1, 0, 1, 0, 0, 0, 0, 1}, std::vector<uint8_t> {0xA1}, 8, 8),
|
||||||
std::vector<uint8_t> {0xA1}, 8, 8),
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {8}, ngraph::element::f32, ngraph::element::u1,
|
||||||
|
std::vector<float> {1, 0, 1, 0, 0, 0, 0, 1}, std::vector<uint8_t> {0xA1}, 8, 8),
|
||||||
|
|
||||||
// destination u4
|
// destination u4
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u1, ngraph::element::u4, std::vector<uint8_t> {0xA0}, std::vector<uint8_t> {0x10, 0x10}, 4, 4),
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u1, ngraph::element::u4, std::vector<uint8_t> {0xA0},
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::u4, std::vector<uint8_t> {0x12, 0x03}, std::vector<uint8_t> {0x12, 0x03},
|
std::vector<uint8_t> {0x10, 0x10}, 4, 4),
|
||||||
4, 4),
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::u4, std::vector<uint8_t> {0x12, 0x03},
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::u4, std::vector<uint8_t> {1, 2, 0, 3}, std::vector<uint8_t> {0x12, 0x03},
|
|
||||||
4, 4),
|
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::u4, std::vector<uint16_t> {1, 2, 0, 3},
|
|
||||||
std::vector<uint8_t> {0x12, 0x03}, 4, 4),
|
std::vector<uint8_t> {0x12, 0x03}, 4, 4),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::u4, std::vector<uint32_t> {1, 2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::u4, std::vector<uint8_t> {1, 2, 0, 3},
|
||||||
std::vector<uint8_t> {0x12, 0x03}, 4, 4),
|
std::vector<uint8_t> {0x12, 0x03}, 4, 4),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::u4, std::vector<uint64_t> {1, 2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::u4, std::vector<uint16_t> {1, 2, 0, 3},
|
||||||
std::vector<uint8_t> {0x12, 0x03}, 4, 4),
|
std::vector<uint8_t> {0x12, 0x03}, 4, 4),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::u4, std::vector<uint8_t> {0xFE, 0x03}, std::vector<uint8_t> {0xFE, 0x03},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::u4, std::vector<uint32_t> {1, 2, 0, 3},
|
||||||
4, 4),
|
std::vector<uint8_t> {0x12, 0x03}, 4, 4),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::u4, std::vector<int8_t> {-1, -2, 2, 3}, std::vector<uint8_t> {0xFE, 0x23},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::u4, std::vector<uint64_t> {1, 2, 0, 3},
|
||||||
4, 4),
|
std::vector<uint8_t> {0x12, 0x03}, 4, 4),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::u4, std::vector<int16_t> {-1, -2, 2, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::u4, std::vector<uint8_t> {0xFE, 0x03},
|
||||||
std::vector<uint8_t> {0xFE, 0x23}, 4, 4),
|
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::u4, std::vector<int32_t> {-1, -2, 2, 3},
|
|
||||||
std::vector<uint8_t> {0xFE, 0x23}, 4, 4),
|
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::u4, std::vector<int64_t> {-1, -2, 2, 3},
|
|
||||||
std::vector<uint8_t> {0xFE, 0x23}, 4, 4),
|
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::u4, std::vector<ngraph::float16> {-1, -2, 0, 3},
|
|
||||||
std::vector<uint8_t> {0xFE, 0x03}, 4, 4),
|
std::vector<uint8_t> {0xFE, 0x03}, 4, 4),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::u4, std::vector<ngraph::bfloat16> {-1, -2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::u4, std::vector<int8_t> {-1, -2, 2, 3},
|
||||||
std::vector<uint8_t> {0xFE, 0x03}, 4, 4),
|
std::vector<uint8_t> {0xFE, 0x23}, 4, 4),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::u4, std::vector<float> {-1, -2, 2, 3}, std::vector<uint8_t> {0xFE, 0x23},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::u4, std::vector<int16_t> {-1, -2, 2, 3},
|
||||||
4, 4),
|
std::vector<uint8_t> {0xFE, 0x23}, 4, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::u4, std::vector<int32_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<uint8_t> {0xFE, 0x23}, 4, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::u4, std::vector<int64_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<uint8_t> {0xFE, 0x23}, 4, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::u4,
|
||||||
|
std::vector<ngraph::float16> {-1, -2, 0, 3}, std::vector<uint8_t> {0xFE, 0x03}, 4, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::u4,
|
||||||
|
std::vector<ngraph::bfloat16> {-1, -2, 0, 3}, std::vector<uint8_t> {0xFE, 0x03}, 4, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::u4, std::vector<float> {-1, -2, 2, 3},
|
||||||
|
std::vector<uint8_t> {0xFE, 0x23}, 4, 4),
|
||||||
|
|
||||||
// destination u8
|
// destination u8
|
||||||
ConvertParams(ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::u8, std::vector<uint8_t> {0x81},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::u8, std::vector<uint8_t> {0x81},
|
||||||
std::vector<uint8_t> {1, 0, 0, 0, 0, 0, 0, 1}, 8),
|
std::vector<uint8_t> {1, 0, 0, 0, 0, 0, 0, 1}, 8),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::u8, std::vector<uint8_t> {0x21, 0x43}, std::vector<uint8_t> {2, 1, 4, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::u8, std::vector<uint8_t> {0x21, 0x43},
|
||||||
4),
|
std::vector<uint8_t> {2, 1, 4, 3}, 4),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::u8, std::vector<uint8_t> {1, 2, 0, 3}, std::vector<uint8_t> {1, 2, 0, 3}),
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::u8, std::vector<uint8_t> {1, 2, 0, 3},
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::u8, std::vector<uint16_t> {1, 2, 0, 3},
|
|
||||||
std::vector<uint8_t> {1, 2, 0, 3}),
|
std::vector<uint8_t> {1, 2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::u8, std::vector<uint32_t> {1, 2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::u8, std::vector<uint16_t> {1, 2, 0, 3},
|
||||||
std::vector<uint8_t> {1, 2, 0, 3}),
|
std::vector<uint8_t> {1, 2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::u8, std::vector<uint64_t> {1, 2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::u8, std::vector<uint32_t> {1, 2, 0, 3},
|
||||||
std::vector<uint8_t> {1, 2, 0, 3}),
|
std::vector<uint8_t> {1, 2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::u8, std::vector<uint8_t> {0x21, 0x43}, std::vector<uint8_t> {2, 1, 4, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::u8, std::vector<uint64_t> {1, 2, 0, 3},
|
||||||
4),
|
std::vector<uint8_t> {1, 2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::u8, std::vector<int8_t> {1, 2, 2, 3}, std::vector<uint8_t> {1, 2, 2, 3}),
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::u8, std::vector<uint8_t> {0x21, 0x43},
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::u8, std::vector<int16_t> {1, 2, 2, 3},
|
std::vector<uint8_t> {2, 1, 4, 3}, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::u8, std::vector<int8_t> {1, 2, 2, 3},
|
||||||
std::vector<uint8_t> {1, 2, 2, 3}),
|
std::vector<uint8_t> {1, 2, 2, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::u8, std::vector<int32_t> {1, 2, 2, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::u8, std::vector<int16_t> {1, 2, 2, 3},
|
||||||
std::vector<uint8_t> {1, 2, 2, 3}),
|
std::vector<uint8_t> {1, 2, 2, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::u8, std::vector<int64_t> {1, 2, 2, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::u8, std::vector<int32_t> {1, 2, 2, 3},
|
||||||
std::vector<uint8_t> {1, 2, 2, 3}),
|
std::vector<uint8_t> {1, 2, 2, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::u8, std::vector<ngraph::float16> {1, 2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::u8, std::vector<int64_t> {1, 2, 2, 3},
|
||||||
|
std::vector<uint8_t> {1, 2, 2, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::u8, std::vector<ngraph::float16> {1, 2, 0, 3},
|
||||||
std::vector<uint8_t> {1, 2, 0, 3}),
|
std::vector<uint8_t> {1, 2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::u8, std::vector<ngraph::bfloat16> {1, 2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::u8,
|
||||||
std::vector<uint8_t> {1, 2, 0, 3}),
|
std::vector<ngraph::bfloat16> {1, 2, 0, 3}, std::vector<uint8_t> {1, 2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::u8, std::vector<float> {1, 2, 2, 3}, std::vector<uint8_t> {1, 2, 2, 3}),
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::u8, std::vector<float> {1, 2, 2, 3},
|
||||||
|
std::vector<uint8_t> {1, 2, 2, 3}),
|
||||||
|
|
||||||
// destination u16
|
// destination u16
|
||||||
ConvertParams(ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::u16, std::vector<uint8_t> {0x81},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::u16, std::vector<uint8_t> {0x81},
|
||||||
std::vector<uint16_t> {1, 0, 0, 0, 0, 0, 0, 1}, 8),
|
std::vector<uint16_t> {1, 0, 0, 0, 0, 0, 0, 1}, 8),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::u16, std::vector<uint8_t> {0x21, 0x43},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::u16, std::vector<uint8_t> {0x21, 0x43},
|
||||||
std::vector<uint16_t> {2, 1, 4, 3}, 4),
|
std::vector<uint16_t> {2, 1, 4, 3}, 4),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::u16, std::vector<uint8_t> {1, 2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::u16, std::vector<uint8_t> {1, 2, 0, 3},
|
||||||
std::vector<uint16_t> {1, 2, 0, 3}),
|
std::vector<uint16_t> {1, 2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::u16, std::vector<uint16_t> {1, 2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::u16, std::vector<uint16_t> {1, 2, 0, 3},
|
||||||
std::vector<uint16_t> {1, 2, 0, 3}),
|
std::vector<uint16_t> {1, 2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::u16, std::vector<uint32_t> {1, 2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::u16, std::vector<uint32_t> {1, 2, 0, 3},
|
||||||
std::vector<uint16_t> {1, 2, 0, 3}),
|
std::vector<uint16_t> {1, 2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::u16, std::vector<uint64_t> {1, 2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::u16, std::vector<uint64_t> {1, 2, 0, 3},
|
||||||
std::vector<uint16_t> {1, 2, 0, 3}),
|
std::vector<uint16_t> {1, 2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::u16, std::vector<uint8_t> {0x21, 0x43},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::u16, std::vector<uint8_t> {0x21, 0x43},
|
||||||
std::vector<uint16_t> {2, 1, 4, 3}, 4),
|
std::vector<uint16_t> {2, 1, 4, 3}, 4),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::u16, std::vector<int8_t> {1, 2, 2, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::u16, std::vector<int8_t> {1, 2, 2, 3},
|
||||||
std::vector<uint16_t> {1, 2, 2, 3}),
|
std::vector<uint16_t> {1, 2, 2, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::u16, std::vector<int16_t> {1, 2, 2, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::u16, std::vector<int16_t> {1, 2, 2, 3},
|
||||||
std::vector<uint16_t> {1, 2, 2, 3}),
|
std::vector<uint16_t> {1, 2, 2, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::u16, std::vector<int32_t> {1, 2, 2, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::u16, std::vector<int32_t> {1, 2, 2, 3},
|
||||||
std::vector<uint16_t> {1, 2, 2, 3}),
|
std::vector<uint16_t> {1, 2, 2, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::u16, std::vector<int64_t> {1, 2, 2, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::u16, std::vector<int64_t> {1, 2, 2, 3},
|
||||||
std::vector<uint16_t> {1, 2, 2, 3}),
|
std::vector<uint16_t> {1, 2, 2, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::u16, std::vector<ngraph::float16> {1, 2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::u16, std::vector<ngraph::float16> {1, 2, 0, 3},
|
||||||
std::vector<uint16_t> {1, 2, 0, 3}),
|
std::vector<uint16_t> {1, 2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::u16, std::vector<ngraph::bfloat16> {1, 2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::u16,
|
||||||
std::vector<uint16_t> {1, 2, 0, 3}),
|
std::vector<ngraph::bfloat16> {1, 2, 0, 3}, std::vector<uint16_t> {1, 2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::u16, std::vector<float> {1, 2, 2, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::u16, std::vector<float> {1, 2, 2, 3},
|
||||||
std::vector<uint16_t> {1, 2, 2, 3}),
|
std::vector<uint16_t> {1, 2, 2, 3}),
|
||||||
|
|
||||||
// destination u32
|
// destination u32
|
||||||
ConvertParams(ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::u32, std::vector<uint8_t> {0x81},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::u32, std::vector<uint8_t> {0x81},
|
||||||
std::vector<uint32_t> {1, 0, 0, 0, 0, 0, 0, 1}, 8),
|
std::vector<uint32_t> {1, 0, 0, 0, 0, 0, 0, 1}, 8),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::u32, std::vector<uint8_t> {0x21, 0x43},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::u32, std::vector<uint8_t> {0x21, 0x43},
|
||||||
std::vector<uint32_t> {2, 1, 4, 3}, 4),
|
std::vector<uint32_t> {2, 1, 4, 3}, 4),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::u32, std::vector<uint8_t> {1, 2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::u32, std::vector<uint8_t> {1, 2, 0, 3},
|
||||||
std::vector<uint32_t> {1, 2, 0, 3}),
|
std::vector<uint32_t> {1, 2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::u32, std::vector<uint16_t> {1, 2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::u32, std::vector<uint16_t> {1, 2, 0, 3},
|
||||||
std::vector<uint32_t> {1, 2, 0, 3}),
|
std::vector<uint32_t> {1, 2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::u32, std::vector<uint32_t> {1, 2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::u32, std::vector<uint32_t> {1, 2, 0, 3},
|
||||||
std::vector<uint32_t> {1, 2, 0, 3}),
|
std::vector<uint32_t> {1, 2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::u32, std::vector<uint64_t> {1, 2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::u32, std::vector<uint64_t> {1, 2, 0, 3},
|
||||||
std::vector<uint32_t> {1, 2, 0, 3}),
|
std::vector<uint32_t> {1, 2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::u32, std::vector<uint8_t> {0x21, 0x43},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::u32, std::vector<uint8_t> {0x21, 0x43},
|
||||||
std::vector<uint32_t> {2, 1, 4, 3}, 4),
|
std::vector<uint32_t> {2, 1, 4, 3}, 4),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::u32, std::vector<int8_t> {1, 2, 2, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::u32, std::vector<int8_t> {1, 2, 2, 3},
|
||||||
std::vector<uint32_t> {1, 2, 2, 3}),
|
std::vector<uint32_t> {1, 2, 2, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::u32, std::vector<int16_t> {1, 2, 2, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::u32, std::vector<int16_t> {1, 2, 2, 3},
|
||||||
std::vector<uint32_t> {1, 2, 2, 3}),
|
std::vector<uint32_t> {1, 2, 2, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::u32, std::vector<int32_t> {1, 2, 2, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::u32, std::vector<int32_t> {1, 2, 2, 3},
|
||||||
std::vector<uint32_t> {1, 2, 2, 3}),
|
std::vector<uint32_t> {1, 2, 2, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::u32, std::vector<int64_t> {1, 2, 2, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::u32, std::vector<int64_t> {1, 2, 2, 3},
|
||||||
std::vector<uint32_t> {1, 2, 2, 3}),
|
std::vector<uint32_t> {1, 2, 2, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::u32, std::vector<ngraph::float16> {1, 2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::u32, std::vector<ngraph::float16> {1, 2, 0, 3},
|
||||||
std::vector<uint32_t> {1, 2, 0, 3}),
|
std::vector<uint32_t> {1, 2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::u32, std::vector<ngraph::bfloat16> {1, 2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::u32,
|
||||||
std::vector<uint32_t> {1, 2, 0, 3}),
|
std::vector<ngraph::bfloat16> {1, 2, 0, 3}, std::vector<uint32_t> {1, 2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::u32, std::vector<float> {1, 2, 2, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::u32, std::vector<float> {1, 2, 2, 3},
|
||||||
std::vector<uint32_t> {1, 2, 2, 3}),
|
std::vector<uint32_t> {1, 2, 2, 3}),
|
||||||
|
|
||||||
// destination u64
|
// destination u64
|
||||||
ConvertParams(ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::u64, std::vector<uint8_t> {0x81},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::u64, std::vector<uint8_t> {0x81},
|
||||||
std::vector<uint64_t> {1, 0, 0, 0, 0, 0, 0, 1}, 8),
|
std::vector<uint64_t> {1, 0, 0, 0, 0, 0, 0, 1}, 8),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::u64, std::vector<uint8_t> {0x21, 0x43},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::u64, std::vector<uint8_t> {0x21, 0x43},
|
||||||
std::vector<uint64_t> {2, 1, 4, 3}, 4),
|
std::vector<uint64_t> {2, 1, 4, 3}, 4),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::u64, std::vector<uint8_t> {1, 2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::u64, std::vector<uint8_t> {1, 2, 0, 3},
|
||||||
std::vector<uint64_t> {1, 2, 0, 3}),
|
std::vector<uint64_t> {1, 2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::u64, std::vector<uint16_t> {1, 2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::u64, std::vector<uint16_t> {1, 2, 0, 3},
|
||||||
std::vector<uint64_t> {1, 2, 0, 3}),
|
std::vector<uint64_t> {1, 2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::u64, std::vector<uint32_t> {1, 2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::u64, std::vector<uint32_t> {1, 2, 0, 3},
|
||||||
std::vector<uint64_t> {1, 2, 0, 3}),
|
std::vector<uint64_t> {1, 2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::u64, std::vector<uint64_t> {1, 2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::u64, std::vector<uint64_t> {1, 2, 0, 3},
|
||||||
std::vector<uint64_t> {1, 2, 0, 3}),
|
std::vector<uint64_t> {1, 2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::u64, std::vector<uint8_t> {0x21, 0x43},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::u64, std::vector<uint8_t> {0x21, 0x43},
|
||||||
std::vector<uint64_t> {2, 1, 4, 3}, 4),
|
std::vector<uint64_t> {2, 1, 4, 3}, 4),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::u64, std::vector<int8_t> {1, 2, 2, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::u64, std::vector<int8_t> {1, 2, 2, 3},
|
||||||
std::vector<uint64_t> {1, 2, 2, 3}),
|
std::vector<uint64_t> {1, 2, 2, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::u64, std::vector<int16_t> {1, 2, 2, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::u64, std::vector<int16_t> {1, 2, 2, 3},
|
||||||
std::vector<uint64_t> {1, 2, 2, 3}),
|
std::vector<uint64_t> {1, 2, 2, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::u64, std::vector<int32_t> {1, 2, 2, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::u64, std::vector<int32_t> {1, 2, 2, 3},
|
||||||
std::vector<uint64_t> {1, 2, 2, 3}),
|
std::vector<uint64_t> {1, 2, 2, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::u64, std::vector<int64_t> {1, 2, 2, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::u64, std::vector<int64_t> {1, 2, 2, 3},
|
||||||
std::vector<uint64_t> {1, 2, 2, 3}),
|
std::vector<uint64_t> {1, 2, 2, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::u64, std::vector<ngraph::float16> {1, 2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::u64, std::vector<ngraph::float16> {1, 2, 0, 3},
|
||||||
std::vector<uint64_t> {1, 2, 0, 3}),
|
std::vector<uint64_t> {1, 2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::u64, std::vector<ngraph::bfloat16> {1, 2, 0, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::u64,
|
||||||
std::vector<uint64_t> {1, 2, 0, 3}),
|
std::vector<ngraph::bfloat16> {1, 2, 0, 3}, std::vector<uint64_t> {1, 2, 0, 3}),
|
||||||
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::u64, std::vector<float> {1, 2, 2, 3},
|
ConvertParams(ConversionTypes::CONVERT, ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::u64, std::vector<float> {1, 2, 2, 3},
|
||||||
std::vector<uint64_t> {1, 2, 2, 3})),
|
std::vector<uint64_t> {1, 2, 2, 3})),
|
||||||
ReferenceConvertLayerTest::getTestCaseName);
|
ReferenceConversionLayerTest::getTestCaseName);
|
||||||
|
} // namespace
|
||||||
|
} // namespace ConversionOpsRefTestDefinitions
|
||||||
|
} // namespace reference_tests
|
||||||
|
@ -0,0 +1,413 @@
|
|||||||
|
// Copyright (C) 2021 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
#include <gtest/gtest.h>
|
||||||
|
|
||||||
|
#include <ie_core.hpp>
|
||||||
|
#include <ie_ngraph_utils.hpp>
|
||||||
|
#include <ngraph/ngraph.hpp>
|
||||||
|
#include <shared_test_classes/base/layer_test_utils.hpp>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "conversion.hpp"
|
||||||
|
|
||||||
|
using namespace ngraph;
|
||||||
|
using namespace InferenceEngine;
|
||||||
|
using ConversionTypes = ngraph::helpers::ConversionTypes;
|
||||||
|
|
||||||
|
namespace reference_tests {
|
||||||
|
namespace ConversionOpsRefTestDefinitions {
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
|
smoke_Conversion_With_Hardcoded_Refs, ReferenceConversionLayerTest,
|
||||||
|
::testing::Values(
|
||||||
|
// destination boolean
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {2, 3}, ngraph::element::u8, ngraph::element::boolean,
|
||||||
|
std::vector<uint8_t> {0, 12, 23, 0, std::numeric_limits<uint8_t>::lowest(), std::numeric_limits<uint8_t>::max()},
|
||||||
|
std::vector<char> {0, 1, 1, 0, 0, 1}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {2, 3}, ngraph::element::i32, ngraph::element::boolean,
|
||||||
|
std::vector<int32_t> {0, -12, 23, 0, std::numeric_limits<int32_t>::lowest(), std::numeric_limits<int32_t>::max()},
|
||||||
|
std::vector<char> {0, 1, 1, 0, 1, 1}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {3, 3}, ngraph::element::f32, ngraph::element::boolean,
|
||||||
|
std::vector<float> {0.f, 1.5745f, 0.12352f, 0.f, std::numeric_limits<float>::lowest(), std::numeric_limits<float>::max(),
|
||||||
|
std::numeric_limits<float>::min(), std::numeric_limits<float>::infinity(), -std::numeric_limits<float>::infinity()},
|
||||||
|
std::vector<char> {0, 1, 1, 0, 1, 1, 1, 1, 1}),
|
||||||
|
// destination bf16
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {1, 1, 3, 5}, ngraph::element::f32, ngraph::element::bf16,
|
||||||
|
std::vector<float> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f},
|
||||||
|
std::vector<bfloat16> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {11}, ngraph::element::u8, ngraph::element::bf16,
|
||||||
|
std::vector<uint8_t> {0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142},
|
||||||
|
std::vector<bfloat16> {0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142}),
|
||||||
|
|
||||||
|
// destination f16
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {1, 1, 3, 5}, ngraph::element::f32, ngraph::element::f16,
|
||||||
|
std::vector<float> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f},
|
||||||
|
std::vector<float16> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {11}, ngraph::element::u8, ngraph::element::f16,
|
||||||
|
std::vector<uint8_t> {0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142},
|
||||||
|
std::vector<float16> {0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142}),
|
||||||
|
|
||||||
|
// destination f32
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {2, 2}, ngraph::element::u1, ngraph::element::f32,
|
||||||
|
std::vector<uint8_t> {0xA0}, std::vector<float> {1.0f, 0.0f, 1.0f, 0.0f}, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {2, 2}, ngraph::element::u4, ngraph::element::f32,
|
||||||
|
std::vector<uint8_t> {0xFB, 0x0A}, std::vector<float> {15.0f, 11.0f, 0.0f, 10.0f}, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {2, 2}, ngraph::element::u8, ngraph::element::f32,
|
||||||
|
std::vector<uint8_t> {255, 128, 32, 0}, std::vector<float> {255.0f, 128.0f, 32.0f, 0.0f}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {2, 2}, ngraph::element::u16, ngraph::element::f32,
|
||||||
|
std::vector<uint16_t> {64000, 32000, 128, 0}, std::vector<float> {64000.0f, 32000.0f, 128.0f, 0.0f}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {2, 2}, ngraph::element::u32, ngraph::element::f32,
|
||||||
|
std::vector<uint32_t> {4000000, 2000000, 128, 0}, std::vector<float> {4000000.0f, 2000000.0f, 128.0f, 0.0f}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {2, 2}, ngraph::element::u64, ngraph::element::f32,
|
||||||
|
std::vector<uint64_t> {4000000, 2000000, 128, 0}, std::vector<float> {4000000.0f, 2000000.0f, 128.0f, 0.0f}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {2, 2}, ngraph::element::i4, ngraph::element::f32,
|
||||||
|
std::vector<uint8_t> {0xFE, 0xF2}, std::vector<float> {-1.0f, -2.0f, -1.0f, 2.0f}, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {2, 2}, ngraph::element::i8, ngraph::element::f32,
|
||||||
|
std::vector<int8_t> {-127, -0, 0, 127}, std::vector<float> {-127.0f, -0.0f, 0.0f, 127.0f}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {2, 2}, ngraph::element::i16, ngraph::element::f32,
|
||||||
|
std::vector<int16_t> {-32000, -0, 0, 32000}, std::vector<float> {-32000.0f, -0.0f, 0.0f, 32000.0f}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {2, 2}, ngraph::element::i32, ngraph::element::f32,
|
||||||
|
std::vector<int32_t> {-64000, -0, 0, 64000}, std::vector<float> {-64000.0f, -0.0f, 0.0f, 64000.0f}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {2, 2}, ngraph::element::i64, ngraph::element::f32,
|
||||||
|
std::vector<int64_t> {-64000, -0, 0, 64000}, std::vector<float> {-64000.0f, -0.0f, 0.0f, 64000.0f}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {1, 1, 3, 5}, ngraph::element::bf16, ngraph::element::f32,
|
||||||
|
std::vector<bfloat16> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f},
|
||||||
|
std::vector<float> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {1, 1, 3, 5}, ngraph::element::f16, ngraph::element::f32,
|
||||||
|
std::vector<float16> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f},
|
||||||
|
std::vector<float> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {1, 1, 3, 5}, ngraph::element::f32, ngraph::element::f32,
|
||||||
|
std::vector<float> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f},
|
||||||
|
std::vector<float> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}),
|
||||||
|
|
||||||
|
// destination i4
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u1, ngraph::element::i4, std::vector<uint8_t> {0xA0},
|
||||||
|
std::vector<uint8_t> {0x10, 0x10}, 4, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::i4, std::vector<uint8_t> {0x12, 0x03},
|
||||||
|
std::vector<uint8_t> {0x12, 0x03}, 4, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::i4, std::vector<uint8_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint8_t> {0x12, 0x03}, 4, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::i4, std::vector<uint16_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint8_t> {0x12, 0x03}, 4, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::i4, std::vector<uint32_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint8_t> {0x12, 0x03}, 4, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::i4, std::vector<uint64_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint8_t> {0x12, 0x03}, 4, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::i4, std::vector<uint8_t> {0xFE, 0x03},
|
||||||
|
std::vector<uint8_t> {0xFE, 0x03}, 4, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::i4, std::vector<int8_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<uint8_t> {0xFE, 0x23}, 4, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::i4, std::vector<int16_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<uint8_t> {0xFE, 0x23}, 4, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::i4, std::vector<int32_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<uint8_t> {0xFE, 0x23}, 4, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::i4, std::vector<int64_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<uint8_t> {0xFE, 0x23}, 4, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::i4,
|
||||||
|
std::vector<ngraph::float16> {-1, -2, 0, 3}, std::vector<uint8_t> {0xFE, 0x03}, 4, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::i4,
|
||||||
|
std::vector<ngraph::bfloat16> {-1, -2, 0, 3}, std::vector<uint8_t> {0xFE, 0x03}, 4, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::i4, std::vector<float> {-1, -2, 2, 3},
|
||||||
|
std::vector<uint8_t> {0xFE, 0x23}, 4, 4),
|
||||||
|
// destination i8
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::i8, std::vector<uint8_t> {0x81},
|
||||||
|
std::vector<int8_t> {1, 0, 0, 0, 0, 0, 0, 1}, 8),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::i8, std::vector<uint8_t> {0x21, 0x43},
|
||||||
|
std::vector<int8_t> {2, 1, 4, 3}, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::i8, std::vector<uint8_t> {1, 2, 0, 3},
|
||||||
|
std::vector<int8_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::i8, std::vector<uint16_t> {1, 2, 0, 3},
|
||||||
|
std::vector<int8_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::i8, std::vector<uint32_t> {1, 2, 0, 3},
|
||||||
|
std::vector<int8_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::i8, std::vector<uint64_t> {1, 2, 0, 3},
|
||||||
|
std::vector<int8_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::i8, std::vector<uint8_t> {0x21, 0x43},
|
||||||
|
std::vector<int8_t> {2, 1, 4, 3}, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::i8, std::vector<int8_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<int8_t> {-1, -2, 2, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::i8, std::vector<int16_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<int8_t> {-1, -2, 2, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::i8, std::vector<int32_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<int8_t> {-1, -2, 2, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::i8, std::vector<int64_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<int8_t> {-1, -2, 2, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::i8,
|
||||||
|
std::vector<ngraph::float16> {-1, -2, 0, 3}, std::vector<int8_t> {-1, -2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::i8,
|
||||||
|
std::vector<ngraph::bfloat16> {-1, -2, 0, 3}, std::vector<int8_t> {-1, -2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::i8, std::vector<float> {-1, -2, 2, 3},
|
||||||
|
std::vector<int8_t> {-1, -2, 2, 3}),
|
||||||
|
// destination i16
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::i16, std::vector<uint8_t> {0x81},
|
||||||
|
std::vector<int16_t> {1, 0, 0, 0, 0, 0, 0, 1}, 8),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::i16, std::vector<uint8_t> {0x21, 0x43},
|
||||||
|
std::vector<int16_t> {2, 1, 4, 3}, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::i16, std::vector<uint8_t> {1, 2, 0, 3},
|
||||||
|
std::vector<int16_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::i16, std::vector<uint16_t> {1, 2, 0, 3},
|
||||||
|
std::vector<int16_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::i16, std::vector<uint32_t> {1, 2, 0, 3},
|
||||||
|
std::vector<int16_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::i16, std::vector<uint64_t> {1, 2, 0, 3},
|
||||||
|
std::vector<int16_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::i16, std::vector<uint8_t> {0x21, 0x43},
|
||||||
|
std::vector<int16_t> {2, 1, 4, 3}, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::i16, std::vector<int8_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<int16_t> {-1, -2, 2, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::i16, std::vector<int16_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<int16_t> {-1, -2, 2, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::i16, std::vector<int32_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<int16_t> {-1, -2, 2, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::i16, std::vector<int64_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<int16_t> {-1, -2, 2, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::i16,
|
||||||
|
std::vector<ngraph::float16> {-1, -2, 0, 3}, std::vector<int16_t> {-1, -2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::i16,
|
||||||
|
std::vector<ngraph::bfloat16> {-1, -2, 0, 3}, std::vector<int16_t> {-1, -2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::i16, std::vector<float> {-1, -2, 2, 3},
|
||||||
|
std::vector<int16_t> {-1, -2, 2, 3}),
|
||||||
|
// destination i32
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::i32, std::vector<uint8_t> {0x81},
|
||||||
|
std::vector<int32_t> {1, 0, 0, 0, 0, 0, 0, 1}, 8),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::i32, std::vector<uint8_t> {0x21, 0x43},
|
||||||
|
std::vector<int32_t> {2, 1, 4, 3}, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::i32, std::vector<uint8_t> {1, 2, 0, 3},
|
||||||
|
std::vector<int32_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::i32, std::vector<uint16_t> {1, 2, 0, 3},
|
||||||
|
std::vector<int32_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::i32, std::vector<uint32_t> {1, 2, 0, 3},
|
||||||
|
std::vector<int32_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::i32, std::vector<uint64_t> {1, 2, 0, 3},
|
||||||
|
std::vector<int32_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::i32, std::vector<uint8_t> {0x21, 0x43},
|
||||||
|
std::vector<int32_t> {2, 1, 4, 3}, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::i32, std::vector<int8_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<int32_t> {-1, -2, 2, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::i32, std::vector<int16_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<int32_t> {-1, -2, 2, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::i32, std::vector<int32_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<int32_t> {-1, -2, 2, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::i32, std::vector<int64_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<int32_t> {-1, -2, 2, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::i32,
|
||||||
|
std::vector<ngraph::float16> {-1, -2, 0, 3}, std::vector<int32_t> {-1, -2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::i32,
|
||||||
|
std::vector<ngraph::bfloat16> {-1, -2, 0, 3}, std::vector<int32_t> {-1, -2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::i32, std::vector<float> {-1, -2, 2, 3},
|
||||||
|
std::vector<int32_t> {-1, -2, 2, 3}),
|
||||||
|
// destination i64
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::i64, std::vector<uint8_t> {0x81},
|
||||||
|
std::vector<int64_t> {1, 0, 0, 0, 0, 0, 0, 1}, 8),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::i64, std::vector<uint8_t> {0x21, 0x43},
|
||||||
|
std::vector<int64_t> {2, 1, 4, 3}, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::i64, std::vector<uint8_t> {1, 2, 0, 3},
|
||||||
|
std::vector<int64_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::i64, std::vector<uint16_t> {1, 2, 0, 3},
|
||||||
|
std::vector<int64_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::i64, std::vector<uint32_t> {1, 2, 0, 3},
|
||||||
|
std::vector<int64_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::i64, std::vector<uint64_t> {1, 2, 0, 3},
|
||||||
|
std::vector<int64_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::i64, std::vector<uint8_t> {0x21, 0x43},
|
||||||
|
std::vector<int64_t> {2, 1, 4, 3}, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::i64, std::vector<int8_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<int64_t> {-1, -2, 2, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::i64, std::vector<int16_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<int64_t> {-1, -2, 2, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::i64, std::vector<int32_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<int64_t> {-1, -2, 2, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::i64, std::vector<int64_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<int64_t> {-1, -2, 2, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::i64,
|
||||||
|
std::vector<ngraph::float16> {-1, -2, 0, 3}, std::vector<int64_t> {-1, -2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::i64,
|
||||||
|
std::vector<ngraph::bfloat16> {-1, -2, 0, 3}, std::vector<int64_t> {-1, -2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::i64, std::vector<float> {-1, -2, 2, 3},
|
||||||
|
std::vector<int64_t> {-1, -2, 2, 3}),
|
||||||
|
|
||||||
|
// destination u1
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::u1, std::vector<uint8_t> {0xA0},
|
||||||
|
std::vector<uint8_t> {0xA0}, 8, 8),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {8}, ngraph::element::u4, ngraph::element::u1,
|
||||||
|
std::vector<uint8_t> {0x10, 0x01, 0x00, 0x00}, std::vector<uint8_t> {0x90}, 8, 8),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {8}, ngraph::element::u8, ngraph::element::u1,
|
||||||
|
std::vector<uint8_t> {1, 0, 1, 0, 0, 0, 0, 1}, std::vector<uint8_t> {0xA1}, 8, 8),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {8}, ngraph::element::u16, ngraph::element::u1,
|
||||||
|
std::vector<uint16_t> {1, 0, 1, 0, 0, 0, 0, 1}, std::vector<uint8_t> {0xA1}, 8, 8),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {8}, ngraph::element::u32, ngraph::element::u1,
|
||||||
|
std::vector<uint32_t> {1, 0, 1, 0, 0, 0, 0, 1}, std::vector<uint8_t> {0xA1}, 8, 8),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {8}, ngraph::element::u64, ngraph::element::u1,
|
||||||
|
std::vector<uint64_t> {1, 0, 1, 0, 0, 0, 0, 1}, std::vector<uint8_t> {0xA1}, 8, 8),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {8}, ngraph::element::i4, ngraph::element::u1,
|
||||||
|
std::vector<uint8_t> {0x10, 0x01, 0x00, 0x00}, std::vector<uint8_t> {0x90}, 8, 8),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {8}, ngraph::element::i8, ngraph::element::u1,
|
||||||
|
std::vector<int8_t> {1, 0, 1, 0, 0, 0, 0, 1}, std::vector<uint8_t> {0xA1}, 8, 8),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {8}, ngraph::element::i16, ngraph::element::u1,
|
||||||
|
std::vector<int16_t> {1, 0, 1, 0, 0, 0, 0, 1}, std::vector<uint8_t> {0xA1}, 8, 8),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {8}, ngraph::element::i32, ngraph::element::u1,
|
||||||
|
std::vector<int32_t> {1, 0, 1, 0, 0, 0, 0, 1}, std::vector<uint8_t> {0xA1}, 8, 8),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {8}, ngraph::element::i64, ngraph::element::u1,
|
||||||
|
std::vector<int64_t> {1, 0, 1, 0, 0, 0, 0, 1}, std::vector<uint8_t> {0xA1}, 8, 8),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {8}, ngraph::element::f16, ngraph::element::u1,
|
||||||
|
std::vector<ngraph::float16> {1, 0, 1, 0, 0, 0, 0, 1}, std::vector<uint8_t> {0xA1}, 8, 8),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {8}, ngraph::element::bf16, ngraph::element::u1,
|
||||||
|
std::vector<ngraph::bfloat16> {1, 0, 1, 0, 0, 0, 0, 1}, std::vector<uint8_t> {0xA1}, 8, 8),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {8}, ngraph::element::f32, ngraph::element::u1,
|
||||||
|
std::vector<float> {1, 0, 1, 0, 0, 0, 0, 1}, std::vector<uint8_t> {0xA1}, 8, 8),
|
||||||
|
|
||||||
|
// destination u4
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u1, ngraph::element::u4, std::vector<uint8_t> {0xA0},
|
||||||
|
std::vector<uint8_t> {0x10, 0x10}, 4, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::u4, std::vector<uint8_t> {0x12, 0x03},
|
||||||
|
std::vector<uint8_t> {0x12, 0x03}, 4, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::u4, std::vector<uint8_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint8_t> {0x12, 0x03}, 4, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::u4, std::vector<uint16_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint8_t> {0x12, 0x03}, 4, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::u4, std::vector<uint32_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint8_t> {0x12, 0x03}, 4, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::u4, std::vector<uint64_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint8_t> {0x12, 0x03}, 4, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::u4, std::vector<uint8_t> {0xFE, 0x03},
|
||||||
|
std::vector<uint8_t> {0xFE, 0x03}, 4, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::u4, std::vector<int8_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<uint8_t> {0xFE, 0x23}, 4, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::u4, std::vector<int16_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<uint8_t> {0xFE, 0x23}, 4, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::u4, std::vector<int32_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<uint8_t> {0xFE, 0x23}, 4, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::u4, std::vector<int64_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<uint8_t> {0xFE, 0x23}, 4, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::u4,
|
||||||
|
std::vector<ngraph::float16> {-1, -2, 0, 3}, std::vector<uint8_t> {0xFE, 0x03}, 4, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::u4,
|
||||||
|
std::vector<ngraph::bfloat16> {-1, -2, 0, 3}, std::vector<uint8_t> {0xFE, 0x03}, 4, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::u4, std::vector<float> {-1, -2, 2, 3},
|
||||||
|
std::vector<uint8_t> {0xFE, 0x23}, 4, 4),
|
||||||
|
|
||||||
|
// destination u8
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::u8, std::vector<uint8_t> {0x81},
|
||||||
|
std::vector<uint8_t> {1, 0, 0, 0, 0, 0, 0, 1}, 8),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::u8, std::vector<uint8_t> {0x21, 0x43},
|
||||||
|
std::vector<uint8_t> {2, 1, 4, 3}, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::u8, std::vector<uint8_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint8_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::u8, std::vector<uint16_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint8_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::u8, std::vector<uint32_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint8_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::u8, std::vector<uint64_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint8_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::u8, std::vector<uint8_t> {0x21, 0x43},
|
||||||
|
std::vector<uint8_t> {2, 1, 4, 3}, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::u8, std::vector<int8_t> {1, 2, 2, 3},
|
||||||
|
std::vector<uint8_t> {1, 2, 2, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::u8, std::vector<int16_t> {1, 2, 2, 3},
|
||||||
|
std::vector<uint8_t> {1, 2, 2, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::u8, std::vector<int32_t> {1, 2, 2, 3},
|
||||||
|
std::vector<uint8_t> {1, 2, 2, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::u8, std::vector<int64_t> {1, 2, 2, 3},
|
||||||
|
std::vector<uint8_t> {1, 2, 2, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::u8,
|
||||||
|
std::vector<ngraph::float16> {1, 2, 0, 3}, std::vector<uint8_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::u8,
|
||||||
|
std::vector<ngraph::bfloat16> {1, 2, 0, 3}, std::vector<uint8_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::u8, std::vector<float> {1, 2, 2, 3},
|
||||||
|
std::vector<uint8_t> {1, 2, 2, 3}),
|
||||||
|
|
||||||
|
// destination u16
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::u16, std::vector<uint8_t> {0x81},
|
||||||
|
std::vector<uint16_t> {1, 0, 0, 0, 0, 0, 0, 1}, 8),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::u16, std::vector<uint8_t> {0x21, 0x43},
|
||||||
|
std::vector<uint16_t> {2, 1, 4, 3}, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::u16, std::vector<uint8_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint16_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::u16, std::vector<uint16_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint16_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::u16, std::vector<uint32_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint16_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::u16, std::vector<uint64_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint16_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::u16, std::vector<uint8_t> {0x21, 0x43},
|
||||||
|
std::vector<uint16_t> {2, 1, 4, 3}, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::u16, std::vector<int8_t> {1, 2, 2, 3},
|
||||||
|
std::vector<uint16_t> {1, 2, 2, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::u16, std::vector<int16_t> {1, 2, 2, 3},
|
||||||
|
std::vector<uint16_t> {1, 2, 2, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::u16, std::vector<int32_t> {1, 2, 2, 3},
|
||||||
|
std::vector<uint16_t> {1, 2, 2, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::u16, std::vector<int64_t> {1, 2, 2, 3},
|
||||||
|
std::vector<uint16_t> {1, 2, 2, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::u16,
|
||||||
|
std::vector<ngraph::float16> {1, 2, 0, 3}, std::vector<uint16_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::u16,
|
||||||
|
std::vector<ngraph::bfloat16> {1, 2, 0, 3}, std::vector<uint16_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::u16, std::vector<float> {1, 2, 2, 3},
|
||||||
|
std::vector<uint16_t> {1, 2, 2, 3}),
|
||||||
|
|
||||||
|
// destination u32
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::u32, std::vector<uint8_t> {0x81},
|
||||||
|
std::vector<uint32_t> {1, 0, 0, 0, 0, 0, 0, 1}, 8),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::u32, std::vector<uint8_t> {0x21, 0x43},
|
||||||
|
std::vector<uint32_t> {2, 1, 4, 3}, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::u32, std::vector<uint8_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint32_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::u32, std::vector<uint16_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint32_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::u32, std::vector<uint32_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint32_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::u32, std::vector<uint64_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint32_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::u32, std::vector<uint8_t> {0x21, 0x43},
|
||||||
|
std::vector<uint32_t> {2, 1, 4, 3}, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::u32, std::vector<int8_t> {1, 2, 2, 3},
|
||||||
|
std::vector<uint32_t> {1, 2, 2, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::u32, std::vector<int16_t> {1, 2, 2, 3},
|
||||||
|
std::vector<uint32_t> {1, 2, 2, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::u32, std::vector<int32_t> {1, 2, 2, 3},
|
||||||
|
std::vector<uint32_t> {1, 2, 2, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::u32, std::vector<int64_t> {1, 2, 2, 3},
|
||||||
|
std::vector<uint32_t> {1, 2, 2, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::u32,
|
||||||
|
std::vector<ngraph::float16> {1, 2, 0, 3}, std::vector<uint32_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::u32,
|
||||||
|
std::vector<ngraph::bfloat16> {1, 2, 0, 3}, std::vector<uint32_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::u32, std::vector<float> {1, 2, 2, 3},
|
||||||
|
std::vector<uint32_t> {1, 2, 2, 3}),
|
||||||
|
// destination u64
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::u64, std::vector<uint8_t> {0x81},
|
||||||
|
std::vector<uint64_t> {1, 0, 0, 0, 0, 0, 0, 1}, 8),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::u64, std::vector<uint8_t> {0x21, 0x43},
|
||||||
|
std::vector<uint64_t> {2, 1, 4, 3}, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::u64, std::vector<uint8_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint64_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::u64, std::vector<uint16_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint64_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::u64, std::vector<uint32_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint64_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::u64, std::vector<uint64_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint64_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::u64, std::vector<uint8_t> {0x21, 0x43},
|
||||||
|
std::vector<uint64_t> {2, 1, 4, 3}, 4),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::u64, std::vector<int8_t> {1, 2, 2, 3},
|
||||||
|
std::vector<uint64_t> {1, 2, 2, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::u64, std::vector<int16_t> {1, 2, 2, 3},
|
||||||
|
std::vector<uint64_t> {1, 2, 2, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::u64, std::vector<int32_t> {1, 2, 2, 3},
|
||||||
|
std::vector<uint64_t> {1, 2, 2, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::u64, std::vector<int64_t> {1, 2, 2, 3},
|
||||||
|
std::vector<uint64_t> {1, 2, 2, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::u64,
|
||||||
|
std::vector<ngraph::float16> {1, 2, 0, 3}, std::vector<uint64_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::u64,
|
||||||
|
std::vector<ngraph::bfloat16> {1, 2, 0, 3}, std::vector<uint64_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ConversionTypes::CONVERT_LIKE, ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::u64, std::vector<float> {1, 2, 2, 3},
|
||||||
|
std::vector<uint64_t> {1, 2, 2, 3})),
|
||||||
|
ReferenceConversionLayerTest::getTestCaseName);
|
||||||
|
} // namespace
|
||||||
|
} // namespace ConversionOpsRefTestDefinitions
|
||||||
|
} // namespace reference_tests
|
84
docs/template_plugin/tests/functional/op_reference/equal.cpp
Normal file
84
docs/template_plugin/tests/functional/op_reference/equal.cpp
Normal file
@ -0,0 +1,84 @@
|
|||||||
|
// Copyright (C) 2018-2021 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
#include <gtest/gtest.h>
|
||||||
|
|
||||||
|
#include <ie_core.hpp>
|
||||||
|
#include <ie_ngraph_utils.hpp>
|
||||||
|
#include <ngraph/ngraph.hpp>
|
||||||
|
#include <shared_test_classes/base/layer_test_utils.hpp>
|
||||||
|
|
||||||
|
#include "comparison.hpp"
|
||||||
|
|
||||||
|
using namespace ngraph;
|
||||||
|
using namespace InferenceEngine;
|
||||||
|
using ComparisonTypes = ngraph::helpers::ComparisonTypes;
|
||||||
|
|
||||||
|
|
||||||
|
namespace reference_tests {
|
||||||
|
namespace ComparisonOpsRefTestDefinitions {
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
TEST_P(ReferenceComparisonLayerTest, EqualCompareWithHardcodedRefs) {
|
||||||
|
Exec();
|
||||||
|
}
|
||||||
|
|
||||||
|
template <element::Type_t IN_ET>
|
||||||
|
std::vector<RefComparisonParams> generateComparisonParams(const element::Type& type) {
|
||||||
|
using T = typename element_type_traits<IN_ET>::value_type;
|
||||||
|
std::vector<RefComparisonParams> compParams {
|
||||||
|
// 1D // 2D // 3D // 4D
|
||||||
|
Builder {}
|
||||||
|
.compType(ComparisonTypes::EQUAL)
|
||||||
|
.input1({{2, 2}, type, std::vector<T> {0, 12, 23, 0}})
|
||||||
|
.input2({{2, 2}, type, std::vector<T> {0, 12, 23, 0}})
|
||||||
|
.expected({{2, 2}, element::boolean, std::vector<char> {1, 1, 1, 1}}),
|
||||||
|
Builder {}
|
||||||
|
.compType(ComparisonTypes::EQUAL)
|
||||||
|
.input1({{2, 3}, type, std::vector<T> {0, 6, 45, 1, 21, 21}})
|
||||||
|
.input2({{2, 3}, type, std::vector<T> {1, 18, 23, 1, 19, 21}})
|
||||||
|
.expected({{2, 3}, element::boolean, std::vector<char> {0, 0, 0, 1, 0, 1}}),
|
||||||
|
Builder {}
|
||||||
|
.compType(ComparisonTypes::EQUAL)
|
||||||
|
.input1({{1}, type, std::vector<T> {53}})
|
||||||
|
.input2({{1}, type, std::vector<T> {53}})
|
||||||
|
.expected({{1}, element::boolean, std::vector<char> {1}}),
|
||||||
|
Builder {}
|
||||||
|
.compType(ComparisonTypes::EQUAL)
|
||||||
|
.input1({{2, 4}, type, std::vector<T> {0, 12, 23, 0, 1, 5, 11, 8}})
|
||||||
|
.input2({{2, 4}, type, std::vector<T> {0, 12, 23, 0, 10, 5, 11, 8}})
|
||||||
|
.expected({{2, 4}, element::boolean, std::vector<char> {1, 1, 1, 1, 0, 1, 1, 1}}),
|
||||||
|
Builder {}
|
||||||
|
.compType(ComparisonTypes::EQUAL)
|
||||||
|
.input1({{3, 1, 2}, type, std::vector<T> {2, 1, 4, 1, 3, 1}})
|
||||||
|
.input2({{1, 2, 1}, type, std::vector<T> {1, 1}})
|
||||||
|
.expected({{3, 2, 2}, element::boolean, std::vector<char> {0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1}}),
|
||||||
|
Builder {}
|
||||||
|
.compType(ComparisonTypes::EQUAL)
|
||||||
|
.input1({{2, 1, 2, 1}, type, std::vector<T> {2, 1, 4, 1}})
|
||||||
|
.input2({{1, 2, 1}, type, std::vector<T> {1, 1}})
|
||||||
|
.expected({{2, 1, 2, 1}, element::boolean, std::vector<char> {0, 1, 0, 1}})};
|
||||||
|
return compParams;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<RefComparisonParams> generateComparisonCombinedParams() {
|
||||||
|
const std::vector<std::vector<RefComparisonParams>> compTypeParams {
|
||||||
|
generateComparisonParams<element::Type_t::f32>(element::f32),
|
||||||
|
generateComparisonParams<element::Type_t::f16>(element::f16),
|
||||||
|
generateComparisonParams<element::Type_t::i32>(element::i32),
|
||||||
|
generateComparisonParams<element::Type_t::u32>(element::u32),
|
||||||
|
generateComparisonParams<element::Type_t::u8>(element::boolean)};
|
||||||
|
std::vector<RefComparisonParams> combinedParams;
|
||||||
|
|
||||||
|
for (const auto& params : compTypeParams) {
|
||||||
|
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
|
||||||
|
}
|
||||||
|
return combinedParams;
|
||||||
|
}
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(smoke_Comparison_With_Hardcoded_Refs, ReferenceComparisonLayerTest, ::testing::ValuesIn(generateComparisonCombinedParams()),
|
||||||
|
ReferenceComparisonLayerTest::getTestCaseName);
|
||||||
|
} // namespace
|
||||||
|
} // namespace ComparisonOpsRefTestDefinitions
|
||||||
|
} // namespace reference_tests
|
94
docs/template_plugin/tests/functional/op_reference/erf.cpp
Normal file
94
docs/template_plugin/tests/functional/op_reference/erf.cpp
Normal file
@ -0,0 +1,94 @@
|
|||||||
|
// Copyright (C) 2021 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
#include <gtest/gtest.h>
|
||||||
|
|
||||||
|
#include <ie_core.hpp>
|
||||||
|
#include <ie_ngraph_utils.hpp>
|
||||||
|
#include <limits>
|
||||||
|
#include <ngraph/ngraph.hpp>
|
||||||
|
#include <shared_test_classes/base/layer_test_utils.hpp>
|
||||||
|
#include <tuple>
|
||||||
|
|
||||||
|
#include "base_reference_test.hpp"
|
||||||
|
|
||||||
|
using namespace reference_tests;
|
||||||
|
using namespace ngraph;
|
||||||
|
using namespace InferenceEngine;
|
||||||
|
|
||||||
|
struct ErfParams {
|
||||||
|
template <class IT>
|
||||||
|
ErfParams(const ngraph::PartialShape& shape, const ngraph::element::Type& iType, const std::vector<IT>& iValues)
|
||||||
|
: pshape(shape), inType(iType), outType(iType), inputData(CreateBlob(iType, iValues)) {
|
||||||
|
std::vector<IT> oValues;
|
||||||
|
std::vector<double> output;
|
||||||
|
for (auto element : iValues)
|
||||||
|
output.push_back(static_cast<double>(element));
|
||||||
|
|
||||||
|
std::transform(output.begin(), output.end(), output.begin(), [](double input) -> double {
|
||||||
|
return std::erf(input);
|
||||||
|
});
|
||||||
|
|
||||||
|
if (std::is_integral<IT>()) {
|
||||||
|
std::transform(output.begin(), output.end(), output.begin(), [](double input) -> double {
|
||||||
|
return std::round(input);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
for (auto element : output)
|
||||||
|
oValues.push_back(static_cast<IT>(element));
|
||||||
|
refData = CreateBlob(outType, oValues);
|
||||||
|
}
|
||||||
|
ngraph::PartialShape pshape;
|
||||||
|
ngraph::element::Type inType;
|
||||||
|
ngraph::element::Type outType;
|
||||||
|
InferenceEngine::Blob::Ptr inputData;
|
||||||
|
InferenceEngine::Blob::Ptr refData;
|
||||||
|
};
|
||||||
|
|
||||||
|
class ReferenceErfLayerTest : public testing::TestWithParam<ErfParams>, public CommonReferenceTest {
|
||||||
|
public:
|
||||||
|
void SetUp() override {
|
||||||
|
auto params = GetParam();
|
||||||
|
function = CreateFunction(params.pshape, params.inType, params.outType);
|
||||||
|
inputData = {params.inputData};
|
||||||
|
refOutData = {params.refData};
|
||||||
|
}
|
||||||
|
static std::string getTestCaseName(const testing::TestParamInfo<ErfParams>& obj) {
|
||||||
|
auto param = obj.param;
|
||||||
|
std::ostringstream result;
|
||||||
|
result << "shape=" << param.pshape << "_";
|
||||||
|
result << "iType=" << param.inType << "_";
|
||||||
|
result << "oType=" << param.outType;
|
||||||
|
return result.str();
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
static std::shared_ptr<Function> CreateFunction(const PartialShape& input_shape, const element::Type& input_type,
|
||||||
|
const element::Type& expected_output_type) {
|
||||||
|
const auto in = std::make_shared<op::Parameter>(input_type, input_shape);
|
||||||
|
const auto erf = std::make_shared<op::Erf>(in);
|
||||||
|
return std::make_shared<Function>(NodeVector {erf}, ParameterVector {in});
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
TEST_P(ReferenceErfLayerTest, CompareWithRefs) {
|
||||||
|
Exec();
|
||||||
|
}
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
|
smoke_Erf_With_Hardcoded_Refs, ReferenceErfLayerTest,
|
||||||
|
::testing::Values(ErfParams(ngraph::PartialShape {2, 5}, ngraph::element::f32,
|
||||||
|
std::vector<float> {-INFINITY, -4.0f, -3.0f, -2.0f, -1.0f, 0.0f, 1.0f, 2.0f, 3.0f, INFINITY}),
|
||||||
|
ErfParams(ngraph::PartialShape {2, 5}, ngraph::element::f16,
|
||||||
|
std::vector<float16> {-INFINITY, -4.0f, -3.0f, -2.0f, -1.0f, 0.0f, 1.0f, 2.0f, 3.0f, INFINITY}),
|
||||||
|
ErfParams(ngraph::PartialShape {2, 3}, ngraph::element::i32,
|
||||||
|
std::vector<int32_t> {std::numeric_limits<int32_t>::min(), -2, -1, 1, 2, std::numeric_limits<int32_t>::max()}),
|
||||||
|
ErfParams(ngraph::PartialShape {2, 3}, ngraph::element::u32,
|
||||||
|
std::vector<uint32_t> {std::numeric_limits<uint32_t>::min(), 0, 1, 2, 3, std::numeric_limits<uint32_t>::max()}),
|
||||||
|
ErfParams(ngraph::PartialShape {2, 3}, ngraph::element::i64,
|
||||||
|
std::vector<int64_t> {std::numeric_limits<int64_t>::min(), -2, -1, 1, 2, std::numeric_limits<int64_t>::max()}),
|
||||||
|
ErfParams(ngraph::PartialShape {2, 3}, ngraph::element::u64,
|
||||||
|
std::vector<uint64_t> {std::numeric_limits<uint64_t>::min(), 0, 1, 2, 3, std::numeric_limits<uint64_t>::max()})),
|
||||||
|
ReferenceErfLayerTest::getTestCaseName);
|
82
docs/template_plugin/tests/functional/op_reference/less.cpp
Normal file
82
docs/template_plugin/tests/functional/op_reference/less.cpp
Normal file
@ -0,0 +1,82 @@
|
|||||||
|
// Copyright (C) 2018-2021 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
#include <gtest/gtest.h>
|
||||||
|
|
||||||
|
#include <ie_core.hpp>
|
||||||
|
#include <ie_ngraph_utils.hpp>
|
||||||
|
#include <ngraph/ngraph.hpp>
|
||||||
|
#include <shared_test_classes/base/layer_test_utils.hpp>
|
||||||
|
|
||||||
|
#include "comparison.hpp"
|
||||||
|
|
||||||
|
using namespace ngraph;
|
||||||
|
using namespace InferenceEngine;
|
||||||
|
using ComparisonTypes = ngraph::helpers::ComparisonTypes;
|
||||||
|
|
||||||
|
namespace reference_tests {
|
||||||
|
namespace ComparisonOpsRefTestDefinitions {
|
||||||
|
namespace {
|
||||||
|
TEST_P(ReferenceComparisonLayerTest, LessCompareWithHardcodedRefs) {
|
||||||
|
Exec();
|
||||||
|
}
|
||||||
|
|
||||||
|
template <element::Type_t IN_ET>
|
||||||
|
std::vector<RefComparisonParams> generateComparisonParams(const element::Type& type) {
|
||||||
|
using T = typename element_type_traits<IN_ET>::value_type;
|
||||||
|
std::vector<RefComparisonParams> compParams {
|
||||||
|
// 1D // 2D // 3D // 4D
|
||||||
|
Builder {}
|
||||||
|
.compType(ComparisonTypes::LESS)
|
||||||
|
.input1({{2, 2}, type, std::vector<T> {0, 12, 23, 0}})
|
||||||
|
.input2({{2, 2}, type, std::vector<T> {0, 12, 23, 0}})
|
||||||
|
.expected({{2, 2}, element::boolean, std::vector<char> {0, 0, 0, 0}}),
|
||||||
|
Builder {}
|
||||||
|
.compType(ComparisonTypes::LESS)
|
||||||
|
.input1({{2, 3}, type, std::vector<T> {0, 6, 45, 1, 21, 21}})
|
||||||
|
.input2({{2, 3}, type, std::vector<T> {1, 18, 23, 1, 19, 21}})
|
||||||
|
.expected({{2, 3}, element::boolean, std::vector<char> {1, 1, 0, 0, 0, 0}}),
|
||||||
|
Builder {}
|
||||||
|
.compType(ComparisonTypes::LESS)
|
||||||
|
.input1({{1}, type, std::vector<T> {53}})
|
||||||
|
.input2({{1}, type, std::vector<T> {53}})
|
||||||
|
.expected({{1}, element::boolean, std::vector<char> {0}}),
|
||||||
|
Builder {}
|
||||||
|
.compType(ComparisonTypes::LESS)
|
||||||
|
.input1({{2, 4}, type, std::vector<T> {0, 12, 23, 0, 1, 5, 11, 8}})
|
||||||
|
.input2({{2, 4}, type, std::vector<T> {0, 12, 23, 0, 10, 5, 11, 8}})
|
||||||
|
.expected({{2, 4}, element::boolean, std::vector<char> {0, 0, 0, 0, 1, 0, 0, 0}}),
|
||||||
|
Builder {}
|
||||||
|
.compType(ComparisonTypes::LESS)
|
||||||
|
.input1({{3, 1, 2}, type, std::vector<T> {2, 1, 4, 1, 3, 1}})
|
||||||
|
.input2({{1, 2, 1}, type, std::vector<T> {1, 1}})
|
||||||
|
.expected({{3, 2, 2}, element::boolean, std::vector<char> {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}),
|
||||||
|
Builder {}
|
||||||
|
.compType(ComparisonTypes::LESS)
|
||||||
|
.input1({{2, 1, 2, 1}, type, std::vector<T> {2, 1, 4, 1}})
|
||||||
|
.input2({{1, 2, 1}, type, std::vector<T> {1, 1}})
|
||||||
|
.expected({{2, 1, 2, 1}, element::boolean, std::vector<char> {0, 0, 0, 0}})};
|
||||||
|
return compParams;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<RefComparisonParams> generateComparisonCombinedParams() {
|
||||||
|
const std::vector<std::vector<RefComparisonParams>> compTypeParams {
|
||||||
|
generateComparisonParams<element::Type_t::f32>(element::f32),
|
||||||
|
generateComparisonParams<element::Type_t::f16>(element::f16),
|
||||||
|
generateComparisonParams<element::Type_t::i32>(element::i32),
|
||||||
|
generateComparisonParams<element::Type_t::u32>(element::u32),
|
||||||
|
generateComparisonParams<element::Type_t::u8>(element::boolean)};
|
||||||
|
std::vector<RefComparisonParams> combinedParams;
|
||||||
|
|
||||||
|
for (const auto& params : compTypeParams) {
|
||||||
|
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
|
||||||
|
}
|
||||||
|
return combinedParams;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
INSTANTIATE_TEST_SUITE_P(smoke_Comparison_With_Hardcoded_Refs, ReferenceComparisonLayerTest, ::testing::ValuesIn(generateComparisonCombinedParams()),
|
||||||
|
ReferenceComparisonLayerTest::getTestCaseName);
|
||||||
|
} // namespace ComparisonOpsRefTestDefinitions
|
||||||
|
} // namespace reference_tests
|
@ -0,0 +1,82 @@
|
|||||||
|
// Copyright (C) 2018-2021 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
#include <gtest/gtest.h>
|
||||||
|
|
||||||
|
#include <ie_core.hpp>
|
||||||
|
#include <ie_ngraph_utils.hpp>
|
||||||
|
#include <ngraph/ngraph.hpp>
|
||||||
|
#include <shared_test_classes/base/layer_test_utils.hpp>
|
||||||
|
|
||||||
|
#include "comparison.hpp"
|
||||||
|
|
||||||
|
using namespace ngraph;
|
||||||
|
using namespace InferenceEngine;
|
||||||
|
using ComparisonTypes = ngraph::helpers::ComparisonTypes;
|
||||||
|
|
||||||
|
namespace reference_tests {
|
||||||
|
namespace ComparisonOpsRefTestDefinitions {
|
||||||
|
namespace {
|
||||||
|
TEST_P(ReferenceComparisonLayerTest, LessEqualCompareWithHardcodedRefs) {
|
||||||
|
Exec();
|
||||||
|
}
|
||||||
|
|
||||||
|
template <element::Type_t IN_ET>
|
||||||
|
std::vector<RefComparisonParams> generateComparisonParams(const element::Type& type) {
|
||||||
|
using T = typename element_type_traits<IN_ET>::value_type;
|
||||||
|
std::vector<RefComparisonParams> compParams {
|
||||||
|
// 1D // 2D // 3D // 4D
|
||||||
|
Builder {}
|
||||||
|
.compType(ComparisonTypes::LESS_EQUAL)
|
||||||
|
.input1({{2, 2}, type, std::vector<T> {0, 12, 23, 0}})
|
||||||
|
.input2({{2, 2}, type, std::vector<T> {0, 12, 23, 0}})
|
||||||
|
.expected({{2, 2}, element::boolean, std::vector<char> {1, 1, 1, 1}}),
|
||||||
|
Builder {}
|
||||||
|
.compType(ComparisonTypes::LESS_EQUAL)
|
||||||
|
.input1({{2, 3}, type, std::vector<T> {0, 6, 45, 1, 21, 21}})
|
||||||
|
.input2({{2, 3}, type, std::vector<T> {1, 18, 23, 1, 19, 21}})
|
||||||
|
.expected({{2, 3}, element::boolean, std::vector<char> {1, 1, 0, 1, 0, 1}}),
|
||||||
|
Builder {}
|
||||||
|
.compType(ComparisonTypes::LESS_EQUAL)
|
||||||
|
.input1({{1}, type, std::vector<T> {53}})
|
||||||
|
.input2({{1}, type, std::vector<T> {53}})
|
||||||
|
.expected({{1}, element::boolean, std::vector<char> {1}}),
|
||||||
|
Builder {}
|
||||||
|
.compType(ComparisonTypes::LESS_EQUAL)
|
||||||
|
.input1({{2, 4}, type, std::vector<T> {0, 12, 23, 0, 1, 5, 11, 8}})
|
||||||
|
.input2({{2, 4}, type, std::vector<T> {0, 12, 23, 0, 10, 5, 11, 8}})
|
||||||
|
.expected({{2, 4}, element::boolean, std::vector<char> {1, 1, 1, 1, 1, 1, 1, 1}}),
|
||||||
|
Builder {}
|
||||||
|
.compType(ComparisonTypes::LESS_EQUAL)
|
||||||
|
.input1({{3, 1, 2}, type, std::vector<T> {2, 1, 4, 1, 3, 1}})
|
||||||
|
.input2({{1, 2, 1}, type, std::vector<T> {1, 1}})
|
||||||
|
.expected({{3, 2, 2}, element::boolean, std::vector<char> {0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1}}),
|
||||||
|
Builder {}
|
||||||
|
.compType(ComparisonTypes::LESS_EQUAL)
|
||||||
|
.input1({{2, 1, 2, 1}, type, std::vector<T> {2, 1, 4, 1}})
|
||||||
|
.input2({{1, 2, 1}, type, std::vector<T> {1, 1}})
|
||||||
|
.expected({{2, 1, 2, 1}, element::boolean, std::vector<char> {0, 1, 0, 1}})};
|
||||||
|
return compParams;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<RefComparisonParams> generateComparisonCombinedParams() {
|
||||||
|
const std::vector<std::vector<RefComparisonParams>> compTypeParams {
|
||||||
|
generateComparisonParams<element::Type_t::f32>(element::f32),
|
||||||
|
generateComparisonParams<element::Type_t::f16>(element::f16),
|
||||||
|
generateComparisonParams<element::Type_t::i32>(element::i32),
|
||||||
|
generateComparisonParams<element::Type_t::u32>(element::u32),
|
||||||
|
generateComparisonParams<element::Type_t::u8>(element::boolean)};
|
||||||
|
std::vector<RefComparisonParams> combinedParams;
|
||||||
|
|
||||||
|
for (const auto& params : compTypeParams) {
|
||||||
|
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
|
||||||
|
}
|
||||||
|
return combinedParams;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
INSTANTIATE_TEST_SUITE_P(smoke_Comparison_With_Hardcoded_Refs, ReferenceComparisonLayerTest, ::testing::ValuesIn(generateComparisonCombinedParams()),
|
||||||
|
ReferenceComparisonLayerTest::getTestCaseName);
|
||||||
|
} // namespace ComparisonOpsRefTestDefinitions
|
||||||
|
} // namespace reference_tests
|
@ -0,0 +1,83 @@
|
|||||||
|
// Copyright (C) 2018-2021 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
#include <gtest/gtest.h>
|
||||||
|
|
||||||
|
#include <ie_core.hpp>
|
||||||
|
#include <ie_ngraph_utils.hpp>
|
||||||
|
#include <ngraph/ngraph.hpp>
|
||||||
|
#include <shared_test_classes/base/layer_test_utils.hpp>
|
||||||
|
#include <tuple>
|
||||||
|
|
||||||
|
#include "base_reference_test.hpp"
|
||||||
|
|
||||||
|
using namespace reference_tests;
|
||||||
|
using namespace ngraph;
|
||||||
|
using namespace InferenceEngine;
|
||||||
|
|
||||||
|
|
||||||
|
struct LogicalAndParams {
|
||||||
|
template <class IT, class OT>
|
||||||
|
LogicalAndParams(const ngraph::PartialShape& input_shape1, const ngraph::PartialShape& input_shape2 ,
|
||||||
|
const std::vector<IT>& iValues1, const std::vector<IT>& iValues2, const std::vector<OT>& oValues)
|
||||||
|
: pshape1(input_shape1), pshape2(input_shape2), inType(ngraph::element::boolean), outType(ngraph::element::boolean),
|
||||||
|
inputData1(CreateBlob(ngraph::element::boolean, iValues1)), inputData2(CreateBlob(ngraph::element::boolean, iValues2)),
|
||||||
|
refData(CreateBlob(ngraph::element::boolean, oValues)) {}
|
||||||
|
ngraph::PartialShape pshape1;
|
||||||
|
ngraph::PartialShape pshape2;
|
||||||
|
ngraph::element::Type inType;
|
||||||
|
ngraph::element::Type outType;
|
||||||
|
InferenceEngine::Blob::Ptr inputData1;
|
||||||
|
InferenceEngine::Blob::Ptr inputData2;
|
||||||
|
InferenceEngine::Blob::Ptr refData;
|
||||||
|
};
|
||||||
|
|
||||||
|
class ReferenceLogicalAndLayerTest : public testing::TestWithParam<LogicalAndParams>, public CommonReferenceTest {
|
||||||
|
public:
|
||||||
|
void SetUp() override {
|
||||||
|
auto params = GetParam();
|
||||||
|
function = CreateFunction(params.pshape1, params.pshape2, params.inType);
|
||||||
|
inputData = {params.inputData1, params.inputData2};
|
||||||
|
refOutData = {params.refData};
|
||||||
|
}
|
||||||
|
static std::string getTestCaseName(const testing::TestParamInfo<LogicalAndParams>& obj) {
|
||||||
|
auto param = obj.param;
|
||||||
|
std::ostringstream result;
|
||||||
|
result << "input_shape1=" << param.pshape1 << "_";
|
||||||
|
result << "input_shape2=" << param.pshape2 << "_";
|
||||||
|
result << "iType=" << param.inType << "_";
|
||||||
|
result << "oType=" << param.outType;
|
||||||
|
return result.str();
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
static std::shared_ptr<Function> CreateFunction(const PartialShape& input_shape1,
|
||||||
|
const PartialShape& input_shape2, const element::Type& input_type) {
|
||||||
|
const auto in = std::make_shared<op::Parameter>(input_type, input_shape1);
|
||||||
|
const auto in2 = std::make_shared<op::Parameter>(input_type, input_shape2);
|
||||||
|
const auto logical_and = std::make_shared<op::v1::LogicalAnd>(in, in2);
|
||||||
|
return std::make_shared<Function>(NodeVector {logical_and}, ParameterVector {in, in2});
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
TEST_P(ReferenceLogicalAndLayerTest, CompareWithHardcodedRefs) {
|
||||||
|
Exec();
|
||||||
|
}
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
|
smoke_LogicalAnd_With_Hardcoded_Refs, ReferenceLogicalAndLayerTest,
|
||||||
|
::testing::Values(
|
||||||
|
LogicalAndParams(ngraph::PartialShape {2, 2}, ngraph::PartialShape {2, 2},
|
||||||
|
std::vector<char> {true, false, true, false},
|
||||||
|
std::vector<char> {false, true, true, false},
|
||||||
|
std::vector<char> {false, false, true, false}),
|
||||||
|
LogicalAndParams(ngraph::PartialShape {2, 1, 2, 1}, ngraph::PartialShape {1, 1, 2, 1},
|
||||||
|
std::vector<char> {true, false, true, false},
|
||||||
|
std::vector<char> {true, false},
|
||||||
|
std::vector<char> {true, false, true, false}),
|
||||||
|
LogicalAndParams(ngraph::PartialShape {3, 4}, ngraph::PartialShape {3, 4},
|
||||||
|
std::vector<char> {true, true, true, true, true, false, true, false, false, true, true, true},
|
||||||
|
std::vector<char> {true, true, true, true, true, false, true, false, false, true, true, false},
|
||||||
|
std::vector<char> {true, true, true, true, true, false, true, false, false, true, true, false})),
|
||||||
|
ReferenceLogicalAndLayerTest::getTestCaseName);
|
@ -0,0 +1,226 @@
|
|||||||
|
// Copyright (C) 2018-2021 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
#include <gtest/gtest.h>
|
||||||
|
|
||||||
|
#include <ie_core.hpp>
|
||||||
|
#include <ie_ngraph_utils.hpp>
|
||||||
|
#include <ngraph/ngraph.hpp>
|
||||||
|
#include <tuple>
|
||||||
|
|
||||||
|
#include "base_reference_test.hpp"
|
||||||
|
|
||||||
|
using namespace ngraph;
|
||||||
|
using namespace InferenceEngine;
|
||||||
|
using namespace reference_tests;
|
||||||
|
|
||||||
|
struct ROIPoolingParams {
|
||||||
|
template <class T>
|
||||||
|
ROIPoolingParams(const size_t iH, const size_t iW, const size_t ch, const size_t rois,
|
||||||
|
const size_t oH, const size_t oW, const float sS, const std::string mode,
|
||||||
|
const ngraph::element::Type& type, const std::vector<T>& inputValues,
|
||||||
|
const std::vector<T>& proposalValues, const std::vector<T>& outputValues)
|
||||||
|
: inputH(iH), inputW(iW), channelCount(ch), roiCount(rois), outputH(oH), outputW(oW), spatialScale(sS),
|
||||||
|
poolingMode(mode), dataType(type), featureMap(CreateBlob(type, inputValues)),
|
||||||
|
proposal(CreateBlob(type, proposalValues)), refData(CreateBlob(type, outputValues)) {}
|
||||||
|
size_t inputH;
|
||||||
|
size_t inputW;
|
||||||
|
size_t channelCount;
|
||||||
|
size_t roiCount;
|
||||||
|
size_t outputH;
|
||||||
|
size_t outputW;
|
||||||
|
float spatialScale;
|
||||||
|
std::string poolingMode;
|
||||||
|
ngraph::element::Type dataType;
|
||||||
|
InferenceEngine::Blob::Ptr featureMap;
|
||||||
|
InferenceEngine::Blob::Ptr proposal;
|
||||||
|
InferenceEngine::Blob::Ptr refData;
|
||||||
|
|
||||||
|
public:
|
||||||
|
template<class T>
|
||||||
|
inline static std::vector<T> increasinglyFilledBlob(size_t size) {
|
||||||
|
std::vector<T> inputValues;
|
||||||
|
T one = 1;
|
||||||
|
for (size_t i = 0; i < size; i++) {
|
||||||
|
inputValues.push_back(one * i / 10);
|
||||||
|
}
|
||||||
|
return inputValues;
|
||||||
|
}
|
||||||
|
template<class T>
|
||||||
|
inline static std::vector<T> equallyFilledBlob(size_t size, T value) {
|
||||||
|
std::vector<T> inputValues;
|
||||||
|
for (size_t i = 0; i < size; i++) {
|
||||||
|
inputValues.push_back(value);
|
||||||
|
}
|
||||||
|
return inputValues;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class ReferenceRoiPoolingLayerTest : public testing::TestWithParam<ROIPoolingParams>, public CommonReferenceTest {
|
||||||
|
public:
|
||||||
|
void SetUp() override {
|
||||||
|
auto params = GetParam();
|
||||||
|
function = CreateFunction(params.inputH, params.inputW, params.channelCount, params.roiCount,
|
||||||
|
params.outputH, params.outputW, params.spatialScale, params.poolingMode, params.dataType);
|
||||||
|
inputData = {params.featureMap, params.proposal};
|
||||||
|
refOutData = {params.refData};
|
||||||
|
}
|
||||||
|
static std::string getTestCaseName(const testing::TestParamInfo<ROIPoolingParams>& obj) {
|
||||||
|
auto param = obj.param;
|
||||||
|
std::ostringstream result;
|
||||||
|
result << "IS=" << param.inputH << "," << param.inputW << "_";
|
||||||
|
result << "OS=" << param.outputH << "," << param.outputW << "_";
|
||||||
|
result << "Ch=" << param.channelCount << "_";
|
||||||
|
result << "Rois=" << param.roiCount << "_";
|
||||||
|
result << "Ss=" << param.spatialScale << "_";
|
||||||
|
result << "Mode=" << param.poolingMode << "_";
|
||||||
|
result << "Prec=" << param.dataType << "_";
|
||||||
|
result << std::to_string(obj.index);
|
||||||
|
return result.str();
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
static std::shared_ptr<Function> CreateFunction(const size_t i_h, const size_t i_w, const size_t ch, const size_t roi_count,
|
||||||
|
const size_t o_h, const size_t o_w, const float spat_scale, const std::string mode,
|
||||||
|
const ngraph::element::Type& type) {
|
||||||
|
Shape feat_map_shape{1, ch, i_h, i_w};
|
||||||
|
Shape rois_shape{roi_count, 5};
|
||||||
|
Shape pooled_shape{o_h, o_w};
|
||||||
|
Shape output_shape{roi_count, ch, o_h, o_w};
|
||||||
|
|
||||||
|
const auto feat_map = std::make_shared<op::Parameter>(type, feat_map_shape);
|
||||||
|
const auto rois = std::make_shared<op::Parameter>(type, rois_shape);
|
||||||
|
const auto roi_pooling = std::make_shared<op::v0::ROIPooling>(feat_map, rois, pooled_shape, spat_scale, mode);
|
||||||
|
return std::make_shared<Function>(roi_pooling, ParameterVector{feat_map, rois});
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
TEST_P(ReferenceRoiPoolingLayerTest, CompareWithHardcodedRefs) {
|
||||||
|
Exec();
|
||||||
|
}
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
|
smoke_ROIPooling_With_Hardcoded_Refs, ReferenceRoiPoolingLayerTest,
|
||||||
|
::testing::Values(
|
||||||
|
// fp32
|
||||||
|
// roi_pooling_1x1_max
|
||||||
|
ROIPoolingParams(6, 6, // iH, iW
|
||||||
|
3, 3, // channels, rois
|
||||||
|
1, 1, // oH, oW
|
||||||
|
1.f, "max", // scale, mode
|
||||||
|
element::f32, ROIPoolingParams::increasinglyFilledBlob<float>(3 * 6 * 6),
|
||||||
|
std::vector<float> {0, 1, 1, 2, 3, 0, 1, 1, 2, 3, 0, 1, 1, 2, 3},
|
||||||
|
std::vector<float> {2.0f, 5.6f, 9.2f, 2.0f, 5.6f, 9.2f, 2.0f, 5.6f, 9.2f}),
|
||||||
|
// roi_pooling_2x2_max
|
||||||
|
ROIPoolingParams(6, 6, // iH, iW
|
||||||
|
1, 3, // channels, rois
|
||||||
|
2, 2, // oH, oW
|
||||||
|
1.f, "max", // scale, mode
|
||||||
|
element::f32, ROIPoolingParams::increasinglyFilledBlob<float>(1 * 6 * 6),
|
||||||
|
std::vector<float> {0, 1, 1, 3, 3, 0, 1, 2, 2, 4, 0, 0, 1, 4, 5},
|
||||||
|
std::vector<float> {1.4f, 1.5f, 2.0f, 2.1f, 1.9f, 2.0f, 2.5f, 2.6f, 2.0f, 2.2f, 3.2f, 3.4f}),
|
||||||
|
// roi_pooling_1x1_bilinear
|
||||||
|
ROIPoolingParams(6, 6, // iH, iW
|
||||||
|
3, 2, // channels, rois
|
||||||
|
1, 1, // oH, oW
|
||||||
|
1.f, "bilinear", // scale, mode
|
||||||
|
element::f32, ROIPoolingParams::increasinglyFilledBlob<float>(3 * 6 * 6),
|
||||||
|
std::vector<float> {0, 0.2, 0.2, 0.4, 0.4, 0, 0.2, 0.2, 0.6, 0.6},
|
||||||
|
std::vector<float> {1.05f, 4.65f, 8.25f, 1.4f, 5.0f, 8.6f}),
|
||||||
|
// roi_pooling_2x2_bilinear
|
||||||
|
ROIPoolingParams(8, 8, // iH, iW
|
||||||
|
1, 3, // channels, rois
|
||||||
|
2, 2, // oH, oW
|
||||||
|
1.f, "bilinear", // scale, mode
|
||||||
|
element::f32, ROIPoolingParams::increasinglyFilledBlob<float>(1 * 8 * 8),
|
||||||
|
std::vector<float> {0.f, 0.15f, 0.2f, 0.75f, 0.8f,
|
||||||
|
0.f, 0.15f, 0.2f, 0.75f, 0.8f,
|
||||||
|
0.f, 0.15f, 0.2f, 0.75f, 0.8f},
|
||||||
|
std::vector<float> {1.225f, 1.645f, 4.585f, 5.005f,
|
||||||
|
1.225f, 1.645f, 4.585f, 5.005f,
|
||||||
|
1.225f, 1.645f, 4.585f, 5.005f}),
|
||||||
|
// roi_pooling_2x2_bilinear_border_proposal
|
||||||
|
ROIPoolingParams(50, 50, // iH, iW
|
||||||
|
1, 1, // channels, rois
|
||||||
|
4, 4, // oH, oW
|
||||||
|
1.f, "bilinear", // scale, mode
|
||||||
|
element::f32, ROIPoolingParams::equallyFilledBlob<float>(1 * 50 * 50, 1),
|
||||||
|
std::vector<float> {0.f, 0.f, 0.248046786f, 0.471333951f, 1.f},
|
||||||
|
std::vector<float>(16, 1.f)),
|
||||||
|
|
||||||
|
// bf16
|
||||||
|
// roi_pooling_1x1_max
|
||||||
|
ROIPoolingParams(6, 6, // iH, iW
|
||||||
|
3, 3, // channels, rois
|
||||||
|
1, 1, // oH, oW
|
||||||
|
1.f, "max", // scale, mode
|
||||||
|
element::bf16, ROIPoolingParams::increasinglyFilledBlob<bfloat16>(3 * 6 * 6),
|
||||||
|
std::vector<bfloat16> {0, 1, 1, 2, 3, 0, 1, 1, 2, 3, 0, 1, 1, 2, 3},
|
||||||
|
std::vector<bfloat16> {2.0f, 5.6f, 9.2f, 2.0f, 5.6f, 9.2f, 2.0f, 5.6f, 9.2f}),
|
||||||
|
// roi_pooling_2x2_max
|
||||||
|
ROIPoolingParams(6, 6, // iH, iW
|
||||||
|
1, 3, // channels, rois
|
||||||
|
2, 2, // oH, oW
|
||||||
|
1.f, "max", // scale, mode
|
||||||
|
element::bf16, ROIPoolingParams::increasinglyFilledBlob<bfloat16>(1 * 6 * 6),
|
||||||
|
std::vector<bfloat16> {0, 1, 1, 3, 3, 0, 1, 2, 2, 4, 0, 0, 1, 4, 5},
|
||||||
|
std::vector<bfloat16> {1.4f, 1.5f, 2.0f, 2.1f, 1.9f, 2.0f, 2.5f, 2.6f, 2.0f, 2.2f, 3.2f, 3.4f}),
|
||||||
|
// roi_pooling_1x1_bilinear
|
||||||
|
ROIPoolingParams(6, 6, // iH, iW
|
||||||
|
3, 2, // channels, rois
|
||||||
|
1, 1, // oH, oW
|
||||||
|
1.f, "bilinear", // scale, mode
|
||||||
|
element::bf16, ROIPoolingParams::increasinglyFilledBlob<bfloat16>(3 * 6 * 6),
|
||||||
|
std::vector<bfloat16> {0, 0.2, 0.2, 0.4, 0.4, 0, 0.2, 0.2, 0.6, 0.6},
|
||||||
|
std::vector<bfloat16> {1.05f, 4.65f, 8.25f, 1.4f, 5.0f, 8.6f}),
|
||||||
|
// roi_pooling_2x2_bilinear
|
||||||
|
ROIPoolingParams(8, 8, // iH, iW
|
||||||
|
1, 3, // channels, rois
|
||||||
|
2, 2, // oH, oW
|
||||||
|
1.f, "bilinear", // scale, mode
|
||||||
|
element::bf16, ROIPoolingParams::increasinglyFilledBlob<bfloat16>(1 * 8 * 8),
|
||||||
|
std::vector<bfloat16> {0.f, 0.15f, 0.2f, 0.75f, 0.8f,
|
||||||
|
0.f, 0.15f, 0.2f, 0.75f, 0.8f,
|
||||||
|
0.f, 0.15f, 0.2f, 0.75f, 0.8f},
|
||||||
|
std::vector<bfloat16> {1.225f, 1.645f, 4.585f, 4.937f,
|
||||||
|
1.225f, 1.645f, 4.585f, 4.937f,
|
||||||
|
1.225f, 1.645f, 4.585f, 4.937f}),
|
||||||
|
// fp16
|
||||||
|
// roi_pooling_1x1_max
|
||||||
|
ROIPoolingParams(6, 6, // iH, iW
|
||||||
|
3, 3, // channels, rois
|
||||||
|
1, 1, // oH, oW
|
||||||
|
1.f, "max", // scale, mode
|
||||||
|
element::f16, ROIPoolingParams::increasinglyFilledBlob<float16>(3 * 6 * 6),
|
||||||
|
std::vector<float16> {0, 1, 1, 2, 3, 0, 1, 1, 2, 3, 0, 1, 1, 2, 3},
|
||||||
|
std::vector<float16> {2.0f, 5.6f, 9.2f, 2.0f, 5.6f, 9.2f, 2.0f, 5.6f, 9.2f}),
|
||||||
|
// roi_pooling_2x2_max
|
||||||
|
ROIPoolingParams(6, 6, // iH, iW
|
||||||
|
1, 3, // channels, rois
|
||||||
|
2, 2, // oH, oW
|
||||||
|
1.f, "max", // scale, mode
|
||||||
|
element::f16, ROIPoolingParams::increasinglyFilledBlob<float16>(1 * 6 * 6),
|
||||||
|
std::vector<float16> {0, 1, 1, 3, 3, 0, 1, 2, 2, 4, 0, 0, 1, 4, 5},
|
||||||
|
std::vector<float16> {1.4f, 1.5f, 2.0f, 2.1f, 1.9f, 2.0f, 2.5f, 2.6f, 2.0f, 2.2f, 3.2f, 3.4f}),
|
||||||
|
// roi_pooling_1x1_bilinear
|
||||||
|
ROIPoolingParams(6, 6, // iH, iW
|
||||||
|
3, 2, // channels, rois
|
||||||
|
1, 1, // oH, oW
|
||||||
|
1.f, "bilinear", // scale, mode
|
||||||
|
element::f16, ROIPoolingParams::increasinglyFilledBlob<float16>(3 * 6 * 6),
|
||||||
|
std::vector<float16> {0, 0.2, 0.2, 0.4, 0.4, 0, 0.2, 0.2, 0.6, 0.6},
|
||||||
|
std::vector<float16> {1.05f, 4.65f, 8.25f, 1.4f, 5.0f, 8.6f}),
|
||||||
|
// roi_pooling_2x2_bilinear
|
||||||
|
ROIPoolingParams(8, 8, // iH, iW
|
||||||
|
1, 3, // channels, rois
|
||||||
|
2, 2, // oH, oW
|
||||||
|
1.f, "bilinear", // scale, mode
|
||||||
|
element::f16, ROIPoolingParams::increasinglyFilledBlob<float16>(1 * 8 * 8),
|
||||||
|
std::vector<float16> {0.f, 0.15f, 0.2f, 0.75f, 0.8f,
|
||||||
|
0.f, 0.15f, 0.2f, 0.75f, 0.8f,
|
||||||
|
0.f, 0.15f, 0.2f, 0.75f, 0.8f},
|
||||||
|
std::vector<float16> {1.225f, 1.645f, 4.585f, 5.005f,
|
||||||
|
1.225f, 1.645f, 4.585f, 5.005f,
|
||||||
|
1.225f, 1.645f, 4.585f, 5.005f})),
|
||||||
|
ReferenceRoiPoolingLayerTest::getTestCaseName);
|
85
docs/template_plugin/tests/functional/op_reference/tan.cpp
Normal file
85
docs/template_plugin/tests/functional/op_reference/tan.cpp
Normal file
@ -0,0 +1,85 @@
|
|||||||
|
// Copyright (C) 2018-2021 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
#include <gtest/gtest.h>
|
||||||
|
|
||||||
|
#include <ie_core.hpp>
|
||||||
|
#include <ie_ngraph_utils.hpp>
|
||||||
|
#include <ngraph/ngraph.hpp>
|
||||||
|
#include <shared_test_classes/base/layer_test_utils.hpp>
|
||||||
|
#include <tuple>
|
||||||
|
|
||||||
|
#include "base_reference_test.hpp"
|
||||||
|
|
||||||
|
using namespace ngraph;
|
||||||
|
using namespace InferenceEngine;
|
||||||
|
using namespace reference_tests;
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
struct TanParams {
|
||||||
|
template <class IT>
|
||||||
|
TanParams(const ngraph::PartialShape& shape, const ngraph::element::Type& iType, const std::vector<IT>& iValues,
|
||||||
|
const std::vector<IT>& oValues)
|
||||||
|
:pshape(shape), inType(iType), outType(iType), inputData(CreateBlob(iType, iValues)), refData(CreateBlob(iType, oValues)) {}
|
||||||
|
ngraph::PartialShape pshape;
|
||||||
|
ngraph::element::Type inType;
|
||||||
|
ngraph::element::Type outType;
|
||||||
|
InferenceEngine::Blob::Ptr inputData;
|
||||||
|
InferenceEngine::Blob::Ptr refData;
|
||||||
|
};
|
||||||
|
|
||||||
|
class ReferenceTanLayerTest : public testing::TestWithParam<TanParams>, public CommonReferenceTest {
|
||||||
|
public:
|
||||||
|
void SetUp() override {
|
||||||
|
auto params = GetParam();
|
||||||
|
function = CreateFunction(params.pshape, params.inType);
|
||||||
|
inputData = {params.inputData};
|
||||||
|
refOutData = {params.refData};
|
||||||
|
}
|
||||||
|
static std::string getTestCaseName(const testing::TestParamInfo<TanParams>& obj) {
|
||||||
|
auto param = obj.param;
|
||||||
|
std::ostringstream result;
|
||||||
|
result << "shape=" << param.pshape << "_";
|
||||||
|
result << "iType=" << param.inType << "_";
|
||||||
|
result << "oType=" << param.outType;
|
||||||
|
return result.str();
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
static std::shared_ptr<Function> CreateFunction(const PartialShape& input_shape, const element::Type& input_type) {
|
||||||
|
const auto in = std::make_shared<op::Parameter>(input_type, input_shape);
|
||||||
|
const auto tan = std::make_shared<op::Tan>(in);
|
||||||
|
return std::make_shared<Function>(tan, ParameterVector {in});
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
TEST_P(ReferenceTanLayerTest, CompareWithHardcodedRefs) {
|
||||||
|
Exec();
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<TanParams> generateTanCombinedParams() {
|
||||||
|
std::vector<TanParams> combinedParams {
|
||||||
|
TanParams(ngraph::PartialShape {5}, ngraph::element::i32, std::vector<int32_t> {-2, -1, 0, 1, 2},
|
||||||
|
std::vector<int32_t> {2, -2, 0, 2, -2}),
|
||||||
|
TanParams(ngraph::PartialShape {5}, ngraph::element::i64, std::vector<int64_t> {-2, -1, 0, 1, 2},
|
||||||
|
std::vector<int64_t> {2, -2, 0, 2, -2}),
|
||||||
|
TanParams(ngraph::PartialShape {5}, ngraph::element::u32, std::vector<uint32_t> {1, 2, 3, 4, 5},
|
||||||
|
std::vector<uint32_t> {2, 0xFFFFFFFF - 1, 0, 1, 0xFFFFFFFF - 2}),
|
||||||
|
TanParams(ngraph::PartialShape {5}, ngraph::element::u64, std::vector<uint64_t> {1, 2, 3, 4, 5},
|
||||||
|
std::vector<uint64_t> {2, 0xFFFFFFFFFFFFFFFF - 1, 0, 1, 0xFFFFFFFFFFFFFFFF - 2}),
|
||||||
|
TanParams(ngraph::PartialShape {11}, ngraph::element::f32, std::vector<float> {0.f, 0.25f,
|
||||||
|
-0.25f, 0.5f, -0.5f, 1.f, -1.f, 2.f, -2.f, 4.f, -4.f},
|
||||||
|
std::vector<float> {0.00000000f, 0.25534192f, -0.25534192f, 0.54630249f, -0.54630249f,
|
||||||
|
1.55740772f, -1.55740772f, -2.18503986f, 2.18503986f, 1.15782128f, -1.15782128f}),
|
||||||
|
TanParams(ngraph::PartialShape {11}, ngraph::element::f16, std::vector<float16> {0.f, 0.25f,
|
||||||
|
-0.25f, 0.5f, -0.5f, 1.f, -1.f, 2.f, -2.f, 4.f, -4.f},
|
||||||
|
std::vector<float16> {0.00000000f, 0.25534192f, -0.25534192f, 0.54630249f, -0.54630249f,
|
||||||
|
1.55740772f, -1.55740772f, -2.18503986f, 2.18503986f, 1.15782128f, -1.15782128f})
|
||||||
|
};
|
||||||
|
return combinedParams;
|
||||||
|
}
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(smoke_TAN_With_Hardcoded_Refs, ReferenceTanLayerTest, ::testing::ValuesIn(generateTanCombinedParams()),
|
||||||
|
ReferenceTanLayerTest::getTestCaseName);
|
||||||
|
} // namespace
|
@ -29,6 +29,7 @@ function(set_ie_threading_interface_for TARGET_NAME)
|
|||||||
set(TBB_IMPORTED_TARGETS ${TBB_IMPORTED_TARGETS} PARENT_SCOPE)
|
set(TBB_IMPORTED_TARGETS ${TBB_IMPORTED_TARGETS} PARENT_SCOPE)
|
||||||
set(TBB_VERSION ${TBB_VERSION} PARENT_SCOPE)
|
set(TBB_VERSION ${TBB_VERSION} PARENT_SCOPE)
|
||||||
if (NOT TBB_FOUND)
|
if (NOT TBB_FOUND)
|
||||||
|
set(THREADING "SEQ" PARENT_SCOPE)
|
||||||
ext_message(WARNING "TBB was not found by the configured TBB_DIR/TBBROOT path.\
|
ext_message(WARNING "TBB was not found by the configured TBB_DIR/TBBROOT path.\
|
||||||
SEQ method will be used.")
|
SEQ method will be used.")
|
||||||
endif ()
|
endif ()
|
||||||
@ -95,6 +96,7 @@ function(set_ie_threading_interface_for TARGET_NAME)
|
|||||||
set(IE_THREAD_DEFINE "IE_THREAD_TBB")
|
set(IE_THREAD_DEFINE "IE_THREAD_TBB")
|
||||||
ie_target_link_libraries(${TARGET_NAME} ${LINK_TYPE} ${TBB_IMPORTED_TARGETS})
|
ie_target_link_libraries(${TARGET_NAME} ${LINK_TYPE} ${TBB_IMPORTED_TARGETS})
|
||||||
else ()
|
else ()
|
||||||
|
set(THREADING "SEQ" PARENT_SCOPE)
|
||||||
ext_message(WARNING "TBB was not found by the configured TBB_DIR path.\
|
ext_message(WARNING "TBB was not found by the configured TBB_DIR path.\
|
||||||
SEQ method will be used for ${TARGET_NAME}")
|
SEQ method will be used for ${TARGET_NAME}")
|
||||||
endif ()
|
endif ()
|
||||||
@ -133,6 +135,7 @@ function(set_ie_threading_interface_for TARGET_NAME)
|
|||||||
|
|
||||||
if (NOT OMP_LIBRARIES_RELEASE)
|
if (NOT OMP_LIBRARIES_RELEASE)
|
||||||
ext_message(WARNING "Intel OpenMP not found. Intel OpenMP support will be disabled. ${IE_THREAD_DEFINE} is defined")
|
ext_message(WARNING "Intel OpenMP not found. Intel OpenMP support will be disabled. ${IE_THREAD_DEFINE} is defined")
|
||||||
|
set(THREADING "SEQ" PARENT_SCOPE)
|
||||||
else ()
|
else ()
|
||||||
set(IE_THREAD_DEFINE "IE_THREAD_OMP")
|
set(IE_THREAD_DEFINE "IE_THREAD_OMP")
|
||||||
|
|
||||||
|
@ -14,7 +14,7 @@ add_library(${TARGET_NAME} SHARED ${HEADERS} ${SOURCES})
|
|||||||
target_link_libraries(${TARGET_NAME} PRIVATE inference_engine)
|
target_link_libraries(${TARGET_NAME} PRIVATE inference_engine)
|
||||||
|
|
||||||
target_include_directories(${TARGET_NAME} PUBLIC
|
target_include_directories(${TARGET_NAME} PUBLIC
|
||||||
$<INSTALL_INTERFACE:${IE_CPACK_IE_DIR}/include>
|
$<INSTALL_INTERFACE:${IE_CPACK_IE_DIR}/include/ie>
|
||||||
$<BUILD_INTERFACE:${InferenceEngine_C_API_SOURCE_DIR}/include>)
|
$<BUILD_INTERFACE:${InferenceEngine_C_API_SOURCE_DIR}/include>)
|
||||||
|
|
||||||
add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME})
|
add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME})
|
||||||
@ -40,5 +40,5 @@ install(TARGETS ${TARGET_NAME} EXPORT InferenceEngineTargets
|
|||||||
LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core_c)
|
LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core_c)
|
||||||
|
|
||||||
install(DIRECTORY ${InferenceEngine_C_API_SOURCE_DIR}/include/
|
install(DIRECTORY ${InferenceEngine_C_API_SOURCE_DIR}/include/
|
||||||
DESTINATION ${IE_CPACK_IE_DIR}/include
|
DESTINATION ${IE_CPACK_IE_DIR}/include/ie
|
||||||
COMPONENT core_c_dev)
|
COMPONENT core_c_dev)
|
||||||
|
@ -58,6 +58,13 @@ else()
|
|||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
function(ov_python_disable_intel_warnings target)
|
||||||
|
if(UNIX AND CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
|
||||||
|
# 1292: unknown attribute "fallthrough"
|
||||||
|
target_compile_options(${target} PRIVATE -diag-disable=1292)
|
||||||
|
endif()
|
||||||
|
endfunction()
|
||||||
|
|
||||||
set (PYTHON_BRIDGE_SRC_ROOT ${CMAKE_CURRENT_SOURCE_DIR})
|
set (PYTHON_BRIDGE_SRC_ROOT ${CMAKE_CURRENT_SOURCE_DIR})
|
||||||
add_subdirectory (src/openvino/inference_engine)
|
add_subdirectory (src/openvino/inference_engine)
|
||||||
add_subdirectory (src/openvino/offline_transformations)
|
add_subdirectory (src/openvino/offline_transformations)
|
||||||
|
@ -20,13 +20,15 @@ set_source_files_properties(${PYX_SOURCES} PROPERTIES CYTHON_IS_CXX ON)
|
|||||||
# create target
|
# create target
|
||||||
|
|
||||||
cython_add_module(${TARGET_NAME} ${SOURCES})
|
cython_add_module(${TARGET_NAME} ${SOURCES})
|
||||||
set(INSTALLED_TARGETS ${TARGET_NAME})
|
ov_python_disable_intel_warnings(${TARGET_NAME})
|
||||||
|
|
||||||
|
set(INSTALLED_TARGETS ${TARGET_NAME})
|
||||||
list(REMOVE_ITEM PYX_SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/ie_api.pyx")
|
list(REMOVE_ITEM PYX_SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/ie_api.pyx")
|
||||||
|
|
||||||
foreach(PYX_FILE IN LISTS PYX_SOURCES)
|
foreach(PYX_FILE IN LISTS PYX_SOURCES)
|
||||||
get_filename_component(PYX_NAME "${PYX_FILE}" NAME_WE)
|
get_filename_component(PYX_NAME "${PYX_FILE}" NAME_WE)
|
||||||
cython_add_module(${PYX_NAME} ${PYX_FILE})
|
cython_add_module(${PYX_NAME} ${PYX_FILE})
|
||||||
|
ov_python_disable_intel_warnings(${PYX_NAME})
|
||||||
add_dependencies(${TARGET_NAME} ${PYX_NAME})
|
add_dependencies(${TARGET_NAME} ${PYX_NAME})
|
||||||
target_include_directories(${PYX_NAME} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}")
|
target_include_directories(${PYX_NAME} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}")
|
||||||
target_link_libraries(${PYX_NAME} PRIVATE ${InferenceEngine_LIBRARIES})
|
target_link_libraries(${PYX_NAME} PRIVATE ${InferenceEngine_LIBRARIES})
|
||||||
|
@ -284,7 +284,9 @@ cdef class IECore:
|
|||||||
# If the parameter is not specified, the default configuration is handled automatically.
|
# If the parameter is not specified, the default configuration is handled automatically.
|
||||||
# @return Instance of IECore class
|
# @return Instance of IECore class
|
||||||
def __cinit__(self, xml_config_file: str = ""):
|
def __cinit__(self, xml_config_file: str = ""):
|
||||||
self.impl = C.IECore(xml_config_file.encode())
|
cdef string c_xml_config_file = xml_config_file.encode()
|
||||||
|
with nogil:
|
||||||
|
self.impl = C.IECore(c_xml_config_file)
|
||||||
|
|
||||||
## Get a `namedtuple` object with versions of the plugin specified
|
## Get a `namedtuple` object with versions of the plugin specified
|
||||||
# @param device_name: Name of the the registered plugin
|
# @param device_name: Name of the the registered plugin
|
||||||
@ -326,12 +328,15 @@ cdef class IECore:
|
|||||||
cdef string weights_
|
cdef string weights_
|
||||||
cdef string model_
|
cdef string model_
|
||||||
cdef IENetwork net = IENetwork()
|
cdef IENetwork net = IENetwork()
|
||||||
|
cdef size_t bin_size
|
||||||
if init_from_buffer:
|
if init_from_buffer:
|
||||||
model_ = bytes(model)
|
model_ = bytes(model)
|
||||||
net.impl = self.impl.readNetwork(model_, weights, len(weights))
|
bin_buffer = <uint8_t*> weights
|
||||||
|
bin_size = len(weights)
|
||||||
|
with nogil:
|
||||||
|
net.impl = self.impl.readNetwork(model_, bin_buffer, bin_size)
|
||||||
else:
|
else:
|
||||||
weights_ = "".encode()
|
weights_ = "".encode()
|
||||||
|
|
||||||
model = os.fspath(model)
|
model = os.fspath(model)
|
||||||
if not os.path.isfile(model):
|
if not os.path.isfile(model):
|
||||||
raise Exception(f"Path to the model {model} doesn't exist or it's a directory")
|
raise Exception(f"Path to the model {model} doesn't exist or it's a directory")
|
||||||
@ -342,8 +347,8 @@ cdef class IECore:
|
|||||||
if not os.path.isfile(weights):
|
if not os.path.isfile(weights):
|
||||||
raise Exception(f"Path to the weights {weights} doesn't exist or it's a directory")
|
raise Exception(f"Path to the weights {weights} doesn't exist or it's a directory")
|
||||||
weights_ = weights.encode()
|
weights_ = weights.encode()
|
||||||
|
with nogil:
|
||||||
net.impl = self.impl.readNetwork(model_, weights_)
|
net.impl = self.impl.readNetwork(model_, weights_)
|
||||||
return net
|
return net
|
||||||
|
|
||||||
## Loads a network that was read from the Intermediate Representation (IR) to the plugin with specified device name
|
## Loads a network that was read from the Intermediate Representation (IR) to the plugin with specified device name
|
||||||
@ -367,16 +372,22 @@ cdef class IECore:
|
|||||||
cpdef ExecutableNetwork load_network(self, network: [IENetwork, str], str device_name, config=None, int num_requests=1):
|
cpdef ExecutableNetwork load_network(self, network: [IENetwork, str], str device_name, config=None, int num_requests=1):
|
||||||
cdef ExecutableNetwork exec_net = ExecutableNetwork()
|
cdef ExecutableNetwork exec_net = ExecutableNetwork()
|
||||||
cdef map[string, string] c_config
|
cdef map[string, string] c_config
|
||||||
|
cdef string c_device_name
|
||||||
|
cdef string c_network_path
|
||||||
if num_requests < 0:
|
if num_requests < 0:
|
||||||
raise ValueError(f"Incorrect number of requests specified: {num_requests}. Expected positive integer number "
|
raise ValueError(f"Incorrect number of requests specified: {num_requests}. Expected positive integer number "
|
||||||
"or zero for auto detection")
|
"or zero for auto detection")
|
||||||
if config:
|
if config:
|
||||||
c_config = dict_to_c_map(config)
|
c_config = dict_to_c_map(config)
|
||||||
exec_net.ie_core_impl = self.impl
|
exec_net.ie_core_impl = self.impl
|
||||||
|
c_device_name = device_name.encode()
|
||||||
if isinstance(network, str):
|
if isinstance(network, str):
|
||||||
exec_net.impl = move(self.impl.loadNetworkFromFile((<str>network).encode(), device_name.encode(), c_config, num_requests))
|
c_network_path = network.encode()
|
||||||
|
with nogil:
|
||||||
|
exec_net.impl = move(self.impl.loadNetworkFromFile(c_network_path, c_device_name, c_config, num_requests))
|
||||||
else:
|
else:
|
||||||
exec_net.impl = move(self.impl.loadNetwork((<IENetwork>network).impl, device_name.encode(), c_config, num_requests))
|
with nogil:
|
||||||
|
exec_net.impl = move(self.impl.loadNetwork((<IENetwork>network).impl, c_device_name, c_config, num_requests))
|
||||||
return exec_net
|
return exec_net
|
||||||
|
|
||||||
## Creates an executable network from a previously exported network
|
## Creates an executable network from a previously exported network
|
||||||
@ -534,7 +545,9 @@ cdef class IECore:
|
|||||||
# If there are more than one device of a specific type, they all are listed followed by a dot and a number.
|
# If there are more than one device of a specific type, they all are listed followed by a dot and a number.
|
||||||
@property
|
@property
|
||||||
def available_devices(self):
|
def available_devices(self):
|
||||||
cdef vector[string] c_devices = self.impl.getAvailableDevices()
|
cdef vector[string] c_devices
|
||||||
|
with nogil:
|
||||||
|
c_devices = self.impl.getAvailableDevices()
|
||||||
return [d.decode() for d in c_devices]
|
return [d.decode() for d in c_devices]
|
||||||
|
|
||||||
## This structure stores info about pre-processing of network inputs (scale, mean image, ...)
|
## This structure stores info about pre-processing of network inputs (scale, mean image, ...)
|
||||||
@ -897,15 +910,19 @@ cdef class ExecutableNetwork:
|
|||||||
## A tuple of `InferRequest` instances
|
## A tuple of `InferRequest` instances
|
||||||
@property
|
@property
|
||||||
def requests(self):
|
def requests(self):
|
||||||
|
cdef size_t c_infer_requests_size
|
||||||
|
with nogil:
|
||||||
|
c_infer_requests_size = deref(self.impl).infer_requests.size()
|
||||||
if len(self._infer_requests) == 0:
|
if len(self._infer_requests) == 0:
|
||||||
for i in range(deref(self.impl).infer_requests.size()):
|
for i in range(c_infer_requests_size):
|
||||||
infer_request = InferRequest()
|
infer_request = InferRequest()
|
||||||
infer_request.impl = &(deref(self.impl).infer_requests[i])
|
with nogil:
|
||||||
|
infer_request.impl = &(deref(self.impl).infer_requests[i])
|
||||||
infer_request._inputs_list = list(self.input_info.keys())
|
infer_request._inputs_list = list(self.input_info.keys())
|
||||||
infer_request._outputs_list = list(self.outputs.keys())
|
infer_request._outputs_list = list(self.outputs.keys())
|
||||||
self._infer_requests.append(infer_request)
|
self._infer_requests.append(infer_request)
|
||||||
|
|
||||||
if len(self._infer_requests) != deref(self.impl).infer_requests.size():
|
if len(self._infer_requests) != c_infer_requests_size:
|
||||||
raise Exception("Mismatch of infer requests number!")
|
raise Exception("Mismatch of infer requests number!")
|
||||||
|
|
||||||
return self._infer_requests
|
return self._infer_requests
|
||||||
@ -923,26 +940,6 @@ cdef class ExecutableNetwork:
|
|||||||
inputs[in_.first.decode()] = input_info_ptr
|
inputs[in_.first.decode()] = input_info_ptr
|
||||||
return inputs
|
return inputs
|
||||||
|
|
||||||
## \note The property is deprecated. Please use the input_info property
|
|
||||||
# to get the map of inputs
|
|
||||||
#
|
|
||||||
## A dictionary that maps input layer names to DataPtr objects
|
|
||||||
@property
|
|
||||||
def inputs(self):
|
|
||||||
warnings.warn("'inputs' property of ExecutableNetwork class is deprecated. "
|
|
||||||
"To access DataPtrs user need to use 'input_data' property "
|
|
||||||
"of InputInfoCPtr objects which can be accessed by 'input_info' property.",
|
|
||||||
DeprecationWarning)
|
|
||||||
cdef map[string, C.DataPtr] c_inputs = deref(self.impl).getInputs()
|
|
||||||
inputs = {}
|
|
||||||
cdef DataPtr data_ptr
|
|
||||||
for in_ in c_inputs:
|
|
||||||
data_ptr = DataPtr()
|
|
||||||
data_ptr._ptr = in_.second
|
|
||||||
data_ptr._ptr_plugin = deref(self.impl).getPluginLink()
|
|
||||||
inputs[in_.first.decode()] = data_ptr
|
|
||||||
return inputs
|
|
||||||
|
|
||||||
## A dictionary that maps output layer names to CDataPtr objects
|
## A dictionary that maps output layer names to CDataPtr objects
|
||||||
@property
|
@property
|
||||||
def outputs(self):
|
def outputs(self):
|
||||||
@ -1022,16 +1019,26 @@ cdef class ExecutableNetwork:
|
|||||||
# If not specified, `timeout` value is set to -1 by default.
|
# If not specified, `timeout` value is set to -1 by default.
|
||||||
# @return Request status code: OK or RESULT_NOT_READY
|
# @return Request status code: OK or RESULT_NOT_READY
|
||||||
cpdef wait(self, num_requests=None, timeout=None):
|
cpdef wait(self, num_requests=None, timeout=None):
|
||||||
|
cdef int status_code
|
||||||
|
cdef int64_t c_timeout
|
||||||
|
cdef int c_num_requests
|
||||||
if num_requests is None:
|
if num_requests is None:
|
||||||
num_requests = len(self.requests)
|
num_requests = len(self.requests)
|
||||||
|
c_num_requests = <int> num_requests
|
||||||
if timeout is None:
|
if timeout is None:
|
||||||
timeout = WaitMode.RESULT_READY
|
timeout = WaitMode.RESULT_READY
|
||||||
return deref(self.impl).wait(<int> num_requests, <int64_t> timeout)
|
c_timeout = <int64_t> timeout
|
||||||
|
with nogil:
|
||||||
|
status_code = deref(self.impl).wait(c_num_requests, c_timeout)
|
||||||
|
return status_code
|
||||||
|
|
||||||
## Get idle request ID
|
## Get idle request ID
|
||||||
# @return Request index
|
# @return Request index
|
||||||
cpdef get_idle_request_id(self):
|
cpdef get_idle_request_id(self):
|
||||||
return deref(self.impl).getIdleRequestId()
|
cdef int request_id
|
||||||
|
with nogil:
|
||||||
|
request_id = deref(self.impl).getIdleRequestId()
|
||||||
|
return request_id
|
||||||
|
|
||||||
ctypedef extern void (*cb_type)(void*, int) with gil
|
ctypedef extern void (*cb_type)(void*, int) with gil
|
||||||
|
|
||||||
@ -1177,8 +1184,8 @@ cdef class InferRequest:
|
|||||||
cpdef infer(self, inputs=None):
|
cpdef infer(self, inputs=None):
|
||||||
if inputs is not None:
|
if inputs is not None:
|
||||||
self._fill_inputs(inputs)
|
self._fill_inputs(inputs)
|
||||||
|
with nogil:
|
||||||
deref(self.impl).infer()
|
deref(self.impl).infer()
|
||||||
|
|
||||||
## Starts asynchronous inference of the infer request and fill outputs array
|
## Starts asynchronous inference of the infer request and fill outputs array
|
||||||
#
|
#
|
||||||
@ -1197,7 +1204,8 @@ cdef class InferRequest:
|
|||||||
self._fill_inputs(inputs)
|
self._fill_inputs(inputs)
|
||||||
if self._py_callback_used:
|
if self._py_callback_used:
|
||||||
self._py_callback_called.clear()
|
self._py_callback_called.clear()
|
||||||
deref(self.impl).infer_async()
|
with nogil:
|
||||||
|
deref(self.impl).infer_async()
|
||||||
|
|
||||||
## Waits for the result to become available. Blocks until specified timeout elapses or the result
|
## Waits for the result to become available. Blocks until specified timeout elapses or the result
|
||||||
# becomes available, whichever comes first.
|
# becomes available, whichever comes first.
|
||||||
@ -1213,9 +1221,14 @@ cdef class InferRequest:
|
|||||||
#
|
#
|
||||||
# Usage example: See `async_infer()` method of the the `InferRequest` class.
|
# Usage example: See `async_infer()` method of the the `InferRequest` class.
|
||||||
cpdef wait(self, timeout=None):
|
cpdef wait(self, timeout=None):
|
||||||
|
cdef int status
|
||||||
|
cdef int64_t c_timeout
|
||||||
|
cdef int c_wait_mode
|
||||||
if self._py_callback_used:
|
if self._py_callback_used:
|
||||||
# check request status to avoid blocking for idle requests
|
# check request status to avoid blocking for idle requests
|
||||||
status = deref(self.impl).wait(WaitMode.STATUS_ONLY)
|
c_wait_mode = WaitMode.STATUS_ONLY
|
||||||
|
with nogil:
|
||||||
|
status = deref(self.impl).wait(c_wait_mode)
|
||||||
if status != StatusCode.RESULT_NOT_READY:
|
if status != StatusCode.RESULT_NOT_READY:
|
||||||
return status
|
return status
|
||||||
if not self._py_callback_called.is_set():
|
if not self._py_callback_called.is_set():
|
||||||
@ -1230,8 +1243,10 @@ cdef class InferRequest:
|
|||||||
|
|
||||||
if timeout is None:
|
if timeout is None:
|
||||||
timeout = WaitMode.RESULT_READY
|
timeout = WaitMode.RESULT_READY
|
||||||
|
c_timeout = <int64_t> timeout
|
||||||
return deref(self.impl).wait(<int64_t> timeout)
|
with nogil:
|
||||||
|
status = deref(self.impl).wait(c_timeout)
|
||||||
|
return status
|
||||||
|
|
||||||
## Queries performance measures per layer to get feedback of what is the most time consuming layer.
|
## Queries performance measures per layer to get feedback of what is the most time consuming layer.
|
||||||
#
|
#
|
||||||
@ -1268,27 +1283,6 @@ cdef class InferRequest:
|
|||||||
"cpu_time": info.cpu_time, "execution_index": info.execution_index}
|
"cpu_time": info.cpu_time, "execution_index": info.execution_index}
|
||||||
return profile
|
return profile
|
||||||
|
|
||||||
## A dictionary that maps input layer names to `numpy.ndarray`
|
|
||||||
# objects of proper shape with input data for the layer
|
|
||||||
@property
|
|
||||||
def inputs(self):
|
|
||||||
warnings.warn("'inputs' property of InferRequest is deprecated. Please instead use 'input_blobs' property.",
|
|
||||||
DeprecationWarning)
|
|
||||||
inputs = {}
|
|
||||||
for input in self._inputs_list:
|
|
||||||
inputs[input] = self._get_blob_buffer(input.encode()).to_numpy()
|
|
||||||
return inputs
|
|
||||||
|
|
||||||
## A dictionary that maps output layer names to `numpy.ndarray` objects with output data of the layer
|
|
||||||
@property
|
|
||||||
def outputs(self):
|
|
||||||
warnings.warn("'outputs' property of InferRequest is deprecated. Please instead use 'output_blobs' property.",
|
|
||||||
DeprecationWarning)
|
|
||||||
outputs = {}
|
|
||||||
for output in self._outputs_list:
|
|
||||||
outputs[output] = self._get_blob_buffer(output.encode()).to_numpy()
|
|
||||||
return deepcopy(outputs)
|
|
||||||
|
|
||||||
## Current infer request inference time in milliseconds
|
## Current infer request inference time in milliseconds
|
||||||
@property
|
@property
|
||||||
def latency(self):
|
def latency(self):
|
||||||
@ -1333,68 +1327,25 @@ cdef class InferRequest:
|
|||||||
cdef class IENetwork:
|
cdef class IENetwork:
|
||||||
## Class constructor
|
## Class constructor
|
||||||
#
|
#
|
||||||
# \note Reading networks using IENetwork constructor is deprecated.
|
# @param model: A PyCapsule containing smart pointer to nGraph function.
|
||||||
# Please, use IECore.read_network() method instead.
|
|
||||||
#
|
#
|
||||||
# @param model: A `.xml` file of the IR or PyCapsule containing smart pointer to nGraph function.
|
|
||||||
# In case of passing a `.xml` file attribute value can be a string path or bytes with file content
|
|
||||||
# depending on `init_from_buffer` attribute value
|
|
||||||
# .
|
|
||||||
# @param weights: A `.bin` file of the IR. Depending on `init_from_buffer` value, can be a string path or
|
|
||||||
# bytes with file content.
|
|
||||||
# @param init_from_buffer: Defines the way of how `model` and `weights` attributes are interpreted.
|
|
||||||
# If `False`, attributes are interpreted as strings with paths to .xml and .bin files
|
|
||||||
# of IR. If `True`, they are interpreted as Python `bytes` object with .xml and .bin files content.
|
|
||||||
# Ignored in case of `IENetwork` object initialization from nGraph function.
|
|
||||||
# @return Instance of IENetwork class
|
# @return Instance of IENetwork class
|
||||||
#
|
#
|
||||||
# Usage example:\n
|
# Usage example:\n
|
||||||
# Initializing `IENetwork` object from IR files:
|
# Initializing `IENetwork` object from IR files:
|
||||||
# ```python
|
# ```python
|
||||||
# net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
|
# func = Function([relu], [param], 'test')
|
||||||
|
# caps = Function.to_capsule(func)
|
||||||
|
# net = IENetwork(caps)
|
||||||
# ```
|
# ```
|
||||||
#
|
def __cinit__(self, model = None):
|
||||||
# Initializing `IENetwork` object bytes with content of IR files:
|
|
||||||
# ```python
|
|
||||||
# with open(path_to_bin_file, 'rb') as f:
|
|
||||||
# bin = f.read()
|
|
||||||
# with open(path_to_xml_file, 'rb') as f:
|
|
||||||
# xml = f.read()
|
|
||||||
# net = IENetwork(model=xml, weights=bin, init_from_buffer=True)
|
|
||||||
# ```
|
|
||||||
|
|
||||||
def __cinit__(self, model: [str, bytes] = "", weights: [str, bytes] = "", init_from_buffer: bool = False):
|
|
||||||
# Try to create Inference Engine network from capsule
|
# Try to create Inference Engine network from capsule
|
||||||
if model.__class__.__name__ == 'PyCapsule' and weights == '' and init_from_buffer is False:
|
if model is not None:
|
||||||
self.impl = C.IENetwork(model)
|
with nogil:
|
||||||
return
|
self.impl = C.IENetwork(model)
|
||||||
cdef char*xml_buffer = <char*> malloc(len(model)+1)
|
|
||||||
cdef uint8_t*bin_buffer = <uint8_t *> malloc(len(weights))
|
|
||||||
cdef string model_
|
|
||||||
cdef string weights_
|
|
||||||
if init_from_buffer:
|
|
||||||
warnings.warn("Reading network using constructor is deprecated. "
|
|
||||||
"Please, use IECore.read_network() method instead", DeprecationWarning)
|
|
||||||
memcpy(xml_buffer, <char*> model, len(model))
|
|
||||||
memcpy(bin_buffer, <uint8_t *> weights, len(weights))
|
|
||||||
xml_buffer[len(model)] = b'\0'
|
|
||||||
self.impl = C.IENetwork()
|
|
||||||
self.impl.load_from_buffer(xml_buffer, len(model), bin_buffer, len(weights))
|
|
||||||
else:
|
else:
|
||||||
if model and weights:
|
with nogil:
|
||||||
warnings.warn("Reading network using constructor is deprecated. "
|
|
||||||
"Please, use IECore.read_network() method instead", DeprecationWarning)
|
|
||||||
if not os.path.isfile(model):
|
|
||||||
raise Exception(f"Path to the model {model} doesn't exist or it's a directory")
|
|
||||||
if not os.path.isfile(weights):
|
|
||||||
raise Exception(f"Path to the weights {weights} doesn't exist or it's a directory")
|
|
||||||
model_ = model.encode()
|
|
||||||
weights_ = weights.encode()
|
|
||||||
self.impl = C.IENetwork(model_, weights_)
|
|
||||||
else:
|
|
||||||
self.impl = C.IENetwork()
|
self.impl = C.IENetwork()
|
||||||
free(bin_buffer)
|
|
||||||
free(xml_buffer)
|
|
||||||
|
|
||||||
## Name of the loaded network
|
## Name of the loaded network
|
||||||
@property
|
@property
|
||||||
@ -1405,7 +1356,9 @@ cdef class IENetwork:
|
|||||||
## A dictionary that maps input layer names to InputInfoPtr objects.
|
## A dictionary that maps input layer names to InputInfoPtr objects.
|
||||||
@property
|
@property
|
||||||
def input_info(self):
|
def input_info(self):
|
||||||
cdef map[string, C.InputInfo.Ptr] c_inputs = self.impl.getInputsInfo()
|
cdef map[string, C.InputInfo.Ptr] c_inputs
|
||||||
|
with nogil:
|
||||||
|
c_inputs = self.impl.getInputsInfo()
|
||||||
inputs = {}
|
inputs = {}
|
||||||
cdef InputInfoPtr input_info_ptr
|
cdef InputInfoPtr input_info_ptr
|
||||||
for input in c_inputs:
|
for input in c_inputs:
|
||||||
@ -1415,30 +1368,12 @@ cdef class IENetwork:
|
|||||||
inputs[input.first.decode()] = input_info_ptr
|
inputs[input.first.decode()] = input_info_ptr
|
||||||
return inputs
|
return inputs
|
||||||
|
|
||||||
## \note The property is deprecated. Please use the input_info property
|
|
||||||
# to get the map of inputs
|
|
||||||
#
|
|
||||||
## A dictionary that maps input layer names to DataPtr objects
|
|
||||||
@property
|
|
||||||
def inputs(self):
|
|
||||||
warnings.warn("'inputs' property of IENetwork class is deprecated. "
|
|
||||||
"To access DataPtrs user need to use 'input_data' property "
|
|
||||||
"of InputInfoPtr objects which can be accessed by 'input_info' property.",
|
|
||||||
DeprecationWarning)
|
|
||||||
cdef map[string, C.DataPtr] c_inputs = self.impl.getInputs()
|
|
||||||
inputs = {}
|
|
||||||
cdef DataPtr data_ptr
|
|
||||||
for input in c_inputs:
|
|
||||||
data_ptr = DataPtr()
|
|
||||||
data_ptr._ptr_network = &self.impl
|
|
||||||
data_ptr._ptr = input.second
|
|
||||||
inputs[input.first.decode()] = data_ptr
|
|
||||||
return inputs
|
|
||||||
|
|
||||||
## A dictionary that maps output layer names to DataPtr objects
|
## A dictionary that maps output layer names to DataPtr objects
|
||||||
@property
|
@property
|
||||||
def outputs(self):
|
def outputs(self):
|
||||||
cdef map[string, C.DataPtr] c_outputs = self.impl.getOutputs()
|
cdef map[string, C.DataPtr] c_outputs
|
||||||
|
with nogil:
|
||||||
|
c_outputs = self.impl.getOutputs()
|
||||||
outputs = {}
|
outputs = {}
|
||||||
cdef DataPtr data_ptr
|
cdef DataPtr data_ptr
|
||||||
for output in c_outputs:
|
for output in c_outputs:
|
||||||
|
@ -200,14 +200,6 @@ InferenceEnginePython::IENetwork InferenceEnginePython::read_network(std::string
|
|||||||
return InferenceEnginePython::IENetwork(std::make_shared<InferenceEngine::CNNNetwork>(net));
|
return InferenceEnginePython::IENetwork(std::make_shared<InferenceEngine::CNNNetwork>(net));
|
||||||
}
|
}
|
||||||
|
|
||||||
InferenceEnginePython::IENetwork::IENetwork(const std::string& model, const std::string& weights) {
|
|
||||||
InferenceEngine::Core reader;
|
|
||||||
auto net = reader.ReadNetwork(model, weights);
|
|
||||||
actual = std::make_shared<InferenceEngine::CNNNetwork>(net);
|
|
||||||
name = actual->getName();
|
|
||||||
batch_size = actual->getBatchSize();
|
|
||||||
}
|
|
||||||
|
|
||||||
InferenceEnginePython::IENetwork::IENetwork(const std::shared_ptr<InferenceEngine::CNNNetwork>& cnn_network): actual(cnn_network) {
|
InferenceEnginePython::IENetwork::IENetwork(const std::shared_ptr<InferenceEngine::CNNNetwork>& cnn_network): actual(cnn_network) {
|
||||||
if (actual == nullptr)
|
if (actual == nullptr)
|
||||||
IE_THROW() << "IENetwork was not initialized.";
|
IE_THROW() << "IENetwork was not initialized.";
|
||||||
@ -228,16 +220,6 @@ InferenceEnginePython::IENetwork::IENetwork(PyObject* network) {
|
|||||||
batch_size = actual->getBatchSize();
|
batch_size = actual->getBatchSize();
|
||||||
}
|
}
|
||||||
|
|
||||||
void InferenceEnginePython::IENetwork::load_from_buffer(const char* xml, size_t xml_size, uint8_t* bin, size_t bin_size) {
|
|
||||||
InferenceEngine::Core reader;
|
|
||||||
InferenceEngine::TensorDesc tensorDesc(InferenceEngine::Precision::U8, {bin_size}, InferenceEngine::Layout::C);
|
|
||||||
auto weights_blob = InferenceEngine::make_shared_blob<uint8_t>(tensorDesc, bin, bin_size);
|
|
||||||
auto net = reader.ReadNetwork(std::string(xml, xml + xml_size), weights_blob);
|
|
||||||
name = net.getName();
|
|
||||||
actual = std::make_shared<InferenceEngine::CNNNetwork>(net);
|
|
||||||
batch_size = actual->getBatchSize();
|
|
||||||
}
|
|
||||||
|
|
||||||
void InferenceEnginePython::IENetwork::serialize(const std::string& path_to_xml, const std::string& path_to_bin) {
|
void InferenceEnginePython::IENetwork::serialize(const std::string& path_to_xml, const std::string& path_to_bin) {
|
||||||
actual->serialize(path_to_xml, path_to_bin);
|
actual->serialize(path_to_xml, path_to_bin);
|
||||||
}
|
}
|
||||||
@ -275,15 +257,6 @@ const std::map<std::string, InferenceEngine::InputInfo::Ptr> InferenceEnginePyth
|
|||||||
return inputs;
|
return inputs;
|
||||||
}
|
}
|
||||||
|
|
||||||
const std::map<std::string, InferenceEngine::DataPtr> InferenceEnginePython::IENetwork::getInputs() {
|
|
||||||
std::map<std::string, InferenceEngine::DataPtr> inputs;
|
|
||||||
const InferenceEngine::InputsDataMap& inputsInfo = actual->getInputsInfo();
|
|
||||||
for (auto& in : inputsInfo) {
|
|
||||||
inputs[in.first] = in.second->getInputData();
|
|
||||||
}
|
|
||||||
return inputs;
|
|
||||||
}
|
|
||||||
|
|
||||||
const std::map<std::string, InferenceEngine::DataPtr> InferenceEnginePython::IENetwork::getOutputs() {
|
const std::map<std::string, InferenceEngine::DataPtr> InferenceEnginePython::IENetwork::getOutputs() {
|
||||||
std::map<std::string, InferenceEngine::DataPtr> outputs;
|
std::map<std::string, InferenceEngine::DataPtr> outputs;
|
||||||
const InferenceEngine::OutputsDataMap& outputsInfo = actual->getOutputsInfo();
|
const InferenceEngine::OutputsDataMap& outputsInfo = actual->getOutputsInfo();
|
||||||
@ -338,15 +311,6 @@ void InferenceEnginePython::IEExecNetwork::exportNetwork(const std::string& mode
|
|||||||
actual->Export(model_file);
|
actual->Export(model_file);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::map<std::string, InferenceEngine::DataPtr> InferenceEnginePython::IEExecNetwork::getInputs() {
|
|
||||||
InferenceEngine::ConstInputsDataMap inputsDataMap = actual->GetInputsInfo();
|
|
||||||
std::map<std::string, InferenceEngine::DataPtr> pyInputs;
|
|
||||||
for (const auto& item : inputsDataMap) {
|
|
||||||
pyInputs[item.first] = item.second->getInputData();
|
|
||||||
}
|
|
||||||
return pyInputs;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::map<std::string, InferenceEngine::InputInfo::CPtr> InferenceEnginePython::IEExecNetwork::getInputsInfo() {
|
std::map<std::string, InferenceEngine::InputInfo::CPtr> InferenceEnginePython::IEExecNetwork::getInputsInfo() {
|
||||||
InferenceEngine::ConstInputsDataMap inputsDataMap = actual->GetInputsInfo();
|
InferenceEngine::ConstInputsDataMap inputsDataMap = actual->GetInputsInfo();
|
||||||
std::map<std::string, InferenceEngine::InputInfo::CPtr> pyInputs;
|
std::map<std::string, InferenceEngine::InputInfo::CPtr> pyInputs;
|
||||||
|
@ -60,18 +60,12 @@ struct IENetwork {
|
|||||||
|
|
||||||
const std::map<std::string, InferenceEngine::InputInfo::Ptr> getInputsInfo();
|
const std::map<std::string, InferenceEngine::InputInfo::Ptr> getInputsInfo();
|
||||||
|
|
||||||
const std::map<std::string, InferenceEngine::DataPtr> getInputs();
|
|
||||||
|
|
||||||
const std::map<std::string, InferenceEngine::DataPtr> getOutputs();
|
const std::map<std::string, InferenceEngine::DataPtr> getOutputs();
|
||||||
|
|
||||||
void reshape(const std::map<std::string, std::vector<size_t>>& input_shapes);
|
void reshape(const std::map<std::string, std::vector<size_t>>& input_shapes);
|
||||||
|
|
||||||
void serialize(const std::string& path_to_xml, const std::string& path_to_bin);
|
void serialize(const std::string& path_to_xml, const std::string& path_to_bin);
|
||||||
|
|
||||||
void load_from_buffer(const char* xml, size_t xml_size, uint8_t* bin, size_t bin_size);
|
|
||||||
|
|
||||||
IENetwork(const std::string& model, const std::string& weights);
|
|
||||||
|
|
||||||
IENetwork(const std::shared_ptr<InferenceEngine::CNNNetwork>& cnn_network);
|
IENetwork(const std::shared_ptr<InferenceEngine::CNNNetwork>& cnn_network);
|
||||||
|
|
||||||
IENetwork(PyObject* network);
|
IENetwork(PyObject* network);
|
||||||
@ -146,7 +140,6 @@ struct IEExecNetwork {
|
|||||||
void exportNetwork(const std::string& model_file);
|
void exportNetwork(const std::string& model_file);
|
||||||
|
|
||||||
std::map<std::string, InferenceEngine::InputInfo::CPtr> getInputsInfo();
|
std::map<std::string, InferenceEngine::InputInfo::CPtr> getInputsInfo();
|
||||||
std::map<std::string, InferenceEngine::DataPtr> getInputs();
|
|
||||||
std::map<std::string, InferenceEngine::CDataPtr> getOutputs();
|
std::map<std::string, InferenceEngine::CDataPtr> getOutputs();
|
||||||
|
|
||||||
PyObject* getMetric(const std::string& metric_name);
|
PyObject* getMetric(const std::string& metric_name);
|
||||||
|
@ -14,7 +14,7 @@ cdef extern from "<inference_engine.hpp>" namespace "InferenceEngine":
|
|||||||
ctypedef vector[size_t] SizeVector
|
ctypedef vector[size_t] SizeVector
|
||||||
|
|
||||||
cdef cppclass CExecutableNetwork "InferenceEngine::ExecutableNetwork"
|
cdef cppclass CExecutableNetwork "InferenceEngine::ExecutableNetwork"
|
||||||
|
|
||||||
cdef cppclass TBlob[T]:
|
cdef cppclass TBlob[T]:
|
||||||
ctypedef shared_ptr[TBlob[T]] Ptr
|
ctypedef shared_ptr[TBlob[T]] Ptr
|
||||||
|
|
||||||
@ -154,27 +154,24 @@ cdef extern from "ie_api_impl.hpp" namespace "InferenceEnginePython":
|
|||||||
cdef cppclass IEExecNetwork:
|
cdef cppclass IEExecNetwork:
|
||||||
vector[InferRequestWrap] infer_requests
|
vector[InferRequestWrap] infer_requests
|
||||||
IENetwork GetExecGraphInfo() except +
|
IENetwork GetExecGraphInfo() except +
|
||||||
map[string, DataPtr] getInputs() except +
|
|
||||||
map[string, CDataPtr] getOutputs() except +
|
map[string, CDataPtr] getOutputs() except +
|
||||||
map[string, InputInfo.CPtr] getInputsInfo()
|
map[string, InputInfo.CPtr] getInputsInfo()
|
||||||
void exportNetwork(const string & model_file) except +
|
void exportNetwork(const string & model_file) except +
|
||||||
object getMetric(const string & metric_name) except +
|
object getMetric(const string & metric_name) except +
|
||||||
object getConfig(const string & metric_name) except +
|
object getConfig(const string & metric_name) except +
|
||||||
int wait(int num_requests, int64_t timeout)
|
int wait(int num_requests, int64_t timeout) nogil
|
||||||
int getIdleRequestId()
|
int getIdleRequestId() nogil
|
||||||
shared_ptr[CExecutableNetwork] getPluginLink() except +
|
shared_ptr[CExecutableNetwork] getPluginLink() except +
|
||||||
|
|
||||||
cdef cppclass IENetwork:
|
cdef cppclass IENetwork:
|
||||||
IENetwork() except +
|
IENetwork() nogil except +
|
||||||
IENetwork(object) except +
|
IENetwork(object) nogil except +
|
||||||
IENetwork(const string &, const string &) except +
|
|
||||||
string name
|
string name
|
||||||
size_t batch_size
|
size_t batch_size
|
||||||
string precision
|
string precision
|
||||||
map[string, vector[size_t]] inputs
|
map[string, vector[size_t]] inputs
|
||||||
const map[string, InputInfo.Ptr] getInputsInfo() except +
|
const map[string, InputInfo.Ptr] getInputsInfo() nogil except +
|
||||||
const map[string, DataPtr] getInputs() except +
|
map[string, DataPtr] getOutputs() nogil except +
|
||||||
map[string, DataPtr] getOutputs() except +
|
|
||||||
void addOutput(string &, size_t) except +
|
void addOutput(string &, size_t) except +
|
||||||
void setAffinity(map[string, string] & types_affinity_map, map[string, string] & layers_affinity_map) except +
|
void setAffinity(map[string, string] & types_affinity_map, map[string, string] & layers_affinity_map) except +
|
||||||
void setBatch(size_t size) except +
|
void setBatch(size_t size) except +
|
||||||
@ -182,7 +179,6 @@ cdef extern from "ie_api_impl.hpp" namespace "InferenceEnginePython":
|
|||||||
void setLayerParams(map[string, map[string, string]] params_map) except +
|
void setLayerParams(map[string, map[string, string]] params_map) except +
|
||||||
void serialize(const string& path_to_xml, const string& path_to_bin) except +
|
void serialize(const string& path_to_xml, const string& path_to_bin) except +
|
||||||
void reshape(map[string, vector[size_t]] input_shapes) except +
|
void reshape(map[string, vector[size_t]] input_shapes) except +
|
||||||
void load_from_buffer(const char*xml, size_t xml_size, uint8_t*bin, size_t bin_size) except +
|
|
||||||
object getFunction() except +
|
object getFunction() except +
|
||||||
void convertToOldRepresentation() except +
|
void convertToOldRepresentation() except +
|
||||||
string getOVNameForTensor(const string &) except +
|
string getOVNameForTensor(const string &) except +
|
||||||
@ -195,23 +191,23 @@ cdef extern from "ie_api_impl.hpp" namespace "InferenceEnginePython":
|
|||||||
void setBlob(const string &blob_name, const CBlob.Ptr &blob_ptr, CPreProcessInfo& info) except +
|
void setBlob(const string &blob_name, const CBlob.Ptr &blob_ptr, CPreProcessInfo& info) except +
|
||||||
const CPreProcessInfo& getPreProcess(const string& blob_name) except +
|
const CPreProcessInfo& getPreProcess(const string& blob_name) except +
|
||||||
map[string, ProfileInfo] getPerformanceCounts() except +
|
map[string, ProfileInfo] getPerformanceCounts() except +
|
||||||
void infer() except +
|
void infer() nogil except +
|
||||||
void infer_async() except +
|
void infer_async() nogil except +
|
||||||
int wait(int64_t timeout) except +
|
int wait(int64_t timeout) nogil except +
|
||||||
void setBatch(int size) except +
|
void setBatch(int size) except +
|
||||||
void setCyCallback(void (*)(void*, int), void *) except +
|
void setCyCallback(void (*)(void*, int), void *) except +
|
||||||
vector[CVariableState] queryState() except +
|
vector[CVariableState] queryState() except +
|
||||||
|
|
||||||
cdef cppclass IECore:
|
cdef cppclass IECore:
|
||||||
IECore() except +
|
IECore() nogil except +
|
||||||
IECore(const string & xml_config_file) except +
|
IECore(const string & xml_config_file) nogil except +
|
||||||
map[string, Version] getVersions(const string & deviceName) except +
|
map[string, Version] getVersions(const string & deviceName) except +
|
||||||
IENetwork readNetwork(const string& modelPath, const string& binPath) except +
|
IENetwork readNetwork(const string& modelPath, const string& binPath) nogil except +
|
||||||
IENetwork readNetwork(const string& modelPath,uint8_t*bin, size_t bin_size) except +
|
IENetwork readNetwork(const string& modelPath,uint8_t*bin, size_t bin_size) nogil except +
|
||||||
unique_ptr[IEExecNetwork] loadNetwork(IENetwork network, const string deviceName,
|
unique_ptr[IEExecNetwork] loadNetwork(IENetwork network, const string deviceName,
|
||||||
const map[string, string] & config, int num_requests) except +
|
const map[string, string] & config, int num_requests) nogil except +
|
||||||
unique_ptr[IEExecNetwork] loadNetworkFromFile(const string & modelPath, const string & deviceName,
|
unique_ptr[IEExecNetwork] loadNetworkFromFile(const string & modelPath, const string & deviceName,
|
||||||
const map[string, string] & config, int num_requests) except +
|
const map[string, string] & config, int num_requests) nogil except +
|
||||||
unique_ptr[IEExecNetwork] importNetwork(const string & modelFIle, const string & deviceName,
|
unique_ptr[IEExecNetwork] importNetwork(const string & modelFIle, const string & deviceName,
|
||||||
const map[string, string] & config, int num_requests) except +
|
const map[string, string] & config, int num_requests) except +
|
||||||
map[string, string] queryNetwork(IENetwork network, const string deviceName,
|
map[string, string] queryNetwork(IENetwork network, const string deviceName,
|
||||||
@ -221,7 +217,7 @@ cdef extern from "ie_api_impl.hpp" namespace "InferenceEnginePython":
|
|||||||
void unregisterPlugin(const string & deviceName) except +
|
void unregisterPlugin(const string & deviceName) except +
|
||||||
void registerPlugins(const string & xmlConfigFile) except +
|
void registerPlugins(const string & xmlConfigFile) except +
|
||||||
void addExtension(const string & ext_lib_path, const string & deviceName) except +
|
void addExtension(const string & ext_lib_path, const string & deviceName) except +
|
||||||
vector[string] getAvailableDevices() except +
|
vector[string] getAvailableDevices() nogil except +
|
||||||
object getMetric(const string & deviceName, const string & name) except +
|
object getMetric(const string & deviceName, const string & name) except +
|
||||||
object getConfig(const string & deviceName, const string & name) except +
|
object getConfig(const string & deviceName, const string & name) except +
|
||||||
|
|
||||||
|
@ -20,7 +20,9 @@ set_source_files_properties(${CMAKE_CURRENT_SOURCE_DIR}/offline_transformations_
|
|||||||
# create target
|
# create target
|
||||||
|
|
||||||
cython_add_module(${TARGET_NAME} ${SOURCES})
|
cython_add_module(${TARGET_NAME} ${SOURCES})
|
||||||
|
|
||||||
add_dependencies(${TARGET_NAME} ie_api)
|
add_dependencies(${TARGET_NAME} ie_api)
|
||||||
|
ov_python_disable_intel_warnings(${TARGET_NAME})
|
||||||
|
|
||||||
if(COMMAND ie_add_vs_version_file)
|
if(COMMAND ie_add_vs_version_file)
|
||||||
ie_add_vs_version_file(NAME ${TARGET_NAME}
|
ie_add_vs_version_file(NAME ${TARGET_NAME}
|
||||||
|
@ -20,7 +20,9 @@ set_source_files_properties(${CMAKE_CURRENT_SOURCE_DIR}/test_utils_api.pyx
|
|||||||
# create target
|
# create target
|
||||||
|
|
||||||
cython_add_module(${TARGET_NAME} ${SOURCES})
|
cython_add_module(${TARGET_NAME} ${SOURCES})
|
||||||
|
|
||||||
add_dependencies(${TARGET_NAME} ie_api)
|
add_dependencies(${TARGET_NAME} ie_api)
|
||||||
|
ov_python_disable_intel_warnings(${TARGET_NAME})
|
||||||
|
|
||||||
if(COMMAND ie_add_vs_version_file)
|
if(COMMAND ie_add_vs_version_file)
|
||||||
ie_add_vs_version_file(NAME ${TARGET_NAME}
|
ie_add_vs_version_file(NAME ${TARGET_NAME}
|
||||||
|
@ -21,11 +21,6 @@ def model_onnx_path():
|
|||||||
test_onnx = os.path.join(path_to_repo, "models", "test_model", 'test_model.onnx')
|
test_onnx = os.path.join(path_to_repo, "models", "test_model", 'test_model.onnx')
|
||||||
return test_onnx
|
return test_onnx
|
||||||
|
|
||||||
def model_prototxt_path():
|
|
||||||
path_to_repo = os.environ["MODELS_PATH"]
|
|
||||||
test_prototxt = os.path.join(path_to_repo, "models", "test_model", 'test_model.prototxt')
|
|
||||||
return test_prototxt
|
|
||||||
|
|
||||||
def image_path():
|
def image_path():
|
||||||
path_to_repo = os.environ["DATA_PATH"]
|
path_to_repo = os.environ["DATA_PATH"]
|
||||||
path_to_img = os.path.join(path_to_repo, 'validation_set', '224x224', 'dog.bmp')
|
path_to_img = os.path.join(path_to_repo, 'validation_set', '224x224', 'dog.bmp')
|
||||||
|
@ -82,24 +82,6 @@ def test_input_info(device):
|
|||||||
del ie_core
|
del ie_core
|
||||||
|
|
||||||
|
|
||||||
def test_inputs_deprecated(device):
|
|
||||||
ie_core = ie.IECore()
|
|
||||||
net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
|
|
||||||
exec_net = ie_core.load_network(net, device, num_requests=5)
|
|
||||||
with warnings.catch_warnings(record=True) as w:
|
|
||||||
assert len(exec_net.inputs) == 1
|
|
||||||
assert "data" in exec_net.inputs
|
|
||||||
assert isinstance(exec_net.inputs['data'], ie.DataPtr)
|
|
||||||
assert len(w) == 3
|
|
||||||
for i in range (len(w)):
|
|
||||||
assert "'inputs' property of ExecutableNetwork class is deprecated. " \
|
|
||||||
"To access DataPtrs user need to use 'input_data' property " \
|
|
||||||
"of InputInfoCPtr objects which " \
|
|
||||||
"can be accessed by 'input_info' property." in str(w[i].message)
|
|
||||||
del exec_net
|
|
||||||
del ie_core
|
|
||||||
|
|
||||||
|
|
||||||
def test_outputs(device):
|
def test_outputs(device):
|
||||||
ie_core = ie.IECore()
|
ie_core = ie.IECore()
|
||||||
net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
|
net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
|
||||||
|
@ -5,14 +5,16 @@ import os
|
|||||||
import pytest
|
import pytest
|
||||||
from sys import platform
|
from sys import platform
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
from threading import Thread
|
||||||
|
from time import sleep, time
|
||||||
|
from queue import Queue
|
||||||
|
|
||||||
from openvino.inference_engine import IENetwork, IECore, ExecutableNetwork
|
from openvino.inference_engine import IENetwork, IECore, ExecutableNetwork
|
||||||
from conftest import model_path, plugins_path, model_onnx_path, model_prototxt_path
|
from conftest import model_path, plugins_path, model_onnx_path
|
||||||
|
|
||||||
|
|
||||||
test_net_xml, test_net_bin = model_path()
|
test_net_xml, test_net_bin = model_path()
|
||||||
test_net_onnx = model_onnx_path()
|
test_net_onnx = model_onnx_path()
|
||||||
test_net_prototxt = model_prototxt_path()
|
|
||||||
plugins_xml, plugins_win_xml, plugins_osx_xml = plugins_path()
|
plugins_xml, plugins_win_xml, plugins_osx_xml = plugins_path()
|
||||||
|
|
||||||
|
|
||||||
@ -201,18 +203,6 @@ def test_read_network_from_onnx_as_path():
|
|||||||
assert isinstance(net, IENetwork)
|
assert isinstance(net, IENetwork)
|
||||||
|
|
||||||
|
|
||||||
def test_read_network_from_prototxt():
|
|
||||||
ie = IECore()
|
|
||||||
net = ie.read_network(model=test_net_prototxt)
|
|
||||||
assert isinstance(net, IENetwork)
|
|
||||||
|
|
||||||
|
|
||||||
def test_read_network_from_prototxt_as_path():
|
|
||||||
ie = IECore()
|
|
||||||
net = ie.read_network(model=Path(test_net_prototxt))
|
|
||||||
assert isinstance(net, IENetwork)
|
|
||||||
|
|
||||||
|
|
||||||
def test_incorrect_xml():
|
def test_incorrect_xml():
|
||||||
ie = IECore()
|
ie = IECore()
|
||||||
with pytest.raises(Exception) as e:
|
with pytest.raises(Exception) as e:
|
||||||
@ -253,3 +243,37 @@ def test_net_from_buffer_valid():
|
|||||||
o_net2 = ref_net.outputs
|
o_net2 = ref_net.outputs
|
||||||
assert ii_net.keys() == ii_net2.keys()
|
assert ii_net.keys() == ii_net2.keys()
|
||||||
assert o_net.keys() == o_net2.keys()
|
assert o_net.keys() == o_net2.keys()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.skipif(os.environ.get("TEST_DEVICE","CPU") != "GPU", reason=f"Device dependent test")
|
||||||
|
def test_load_network_release_gil(device):
|
||||||
|
running = True
|
||||||
|
message_queue = Queue()
|
||||||
|
def detect_long_gil_holds():
|
||||||
|
sleep_time = 0.01
|
||||||
|
latency_alert_threshold = 0.1
|
||||||
|
# Send a message to indicate the thread is running and ready to detect GIL locks
|
||||||
|
message_queue.put("ready to detect")
|
||||||
|
while running:
|
||||||
|
start_sleep = time()
|
||||||
|
sleep(sleep_time)
|
||||||
|
elapsed = time() - start_sleep
|
||||||
|
if elapsed > latency_alert_threshold:
|
||||||
|
# Send a message to the testing thread that a long GIL lock occurred
|
||||||
|
message_queue.put(latency_alert_threshold)
|
||||||
|
ie = IECore()
|
||||||
|
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
|
||||||
|
# Wait for the GIL lock detector to be up and running
|
||||||
|
gil_hold_detection_thread = Thread(daemon=True, target=detect_long_gil_holds)
|
||||||
|
gil_hold_detection_thread.start()
|
||||||
|
# Wait to make sure the thread is started and checking for GIL holds
|
||||||
|
sleep(0.1)
|
||||||
|
assert message_queue.get(timeout=5) == "ready to detect"
|
||||||
|
# Run the function that should unlock the GIL
|
||||||
|
exec_net = ie.load_network(net, device)
|
||||||
|
# Ensure resources are closed
|
||||||
|
running = False
|
||||||
|
gil_hold_detection_thread.join(timeout=5)
|
||||||
|
# Assert there were never any long gil locks
|
||||||
|
assert message_queue.qsize() == 0, \
|
||||||
|
f"More than 0 GIL locks occured! Latency: {message_queue.get()})"
|
||||||
|
@ -12,60 +12,12 @@ from conftest import model_path
|
|||||||
test_net_xml, test_net_bin = model_path()
|
test_net_xml, test_net_bin = model_path()
|
||||||
|
|
||||||
|
|
||||||
def test_create_ie_network_deprecated():
|
|
||||||
with warnings.catch_warnings(record=True) as w:
|
|
||||||
net = IENetwork(model=test_net_xml, weights=test_net_bin)
|
|
||||||
assert isinstance(net, IENetwork)
|
|
||||||
assert len(w) == 1
|
|
||||||
assert issubclass(w[-1].category, DeprecationWarning)
|
|
||||||
assert "Reading network using constructor is deprecated. " \
|
|
||||||
"Please, use IECore.read_network() method instead" in str(w[0].message)
|
|
||||||
|
|
||||||
|
|
||||||
def test_incorrect_xml_deprecated():
|
|
||||||
with warnings.catch_warnings(record=True) as w:
|
|
||||||
with pytest.raises(Exception) as e:
|
|
||||||
IENetwork(model="./model.xml", weights=test_net_bin)
|
|
||||||
assert "Path to the model ./model.xml doesn't exist or it's a directory" in str(e.value)
|
|
||||||
assert len(w) == 1
|
|
||||||
assert issubclass(w[-1].category, DeprecationWarning)
|
|
||||||
assert "Reading network using constructor is deprecated. " \
|
|
||||||
"Please, use IECore.read_network() method instead" in str(w[0].message)
|
|
||||||
|
|
||||||
|
|
||||||
def test_incorrect_bin_deprecated():
|
|
||||||
with warnings.catch_warnings(record=True) as w:
|
|
||||||
with pytest.raises(Exception) as e:
|
|
||||||
IENetwork(model=test_net_xml, weights="./model.bin")
|
|
||||||
assert "Path to the weights ./model.bin doesn't exist or it's a directory" in str(e.value)
|
|
||||||
assert len(w) == 1
|
|
||||||
assert issubclass(w[-1].category, DeprecationWarning)
|
|
||||||
assert "Reading network using constructor is deprecated. " \
|
|
||||||
"Please, use IECore.read_network() method instead" in str(w[0].message)
|
|
||||||
|
|
||||||
|
|
||||||
def test_name():
|
def test_name():
|
||||||
ie = IECore()
|
ie = IECore()
|
||||||
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
|
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
|
||||||
assert net.name == "test_model"
|
assert net.name == "test_model"
|
||||||
|
|
||||||
|
|
||||||
def test_inputs_deprecated():
|
|
||||||
ie = IECore()
|
|
||||||
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
|
|
||||||
with warnings.catch_warnings(record=True) as w:
|
|
||||||
inp = net.inputs
|
|
||||||
assert isinstance(inp['data'], DataPtr)
|
|
||||||
assert inp['data'].layout == "NCHW"
|
|
||||||
assert inp['data'].precision == "FP32"
|
|
||||||
assert inp['data'].shape == [1, 3, 32, 32]
|
|
||||||
assert len(w) == 1
|
|
||||||
assert "'inputs' property of IENetwork class is deprecated. " \
|
|
||||||
"To access DataPtrs user need to use 'input_data' property " \
|
|
||||||
"of InputInfoPtr objects which " \
|
|
||||||
"can be accessed by 'input_info' property." in str(w[-1].message)
|
|
||||||
|
|
||||||
|
|
||||||
def test_input_info():
|
def test_input_info():
|
||||||
ie = IECore()
|
ie = IECore()
|
||||||
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
|
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
|
||||||
@ -208,21 +160,7 @@ def test_reshape():
|
|||||||
net.reshape({"data": (2, 3, 32, 32)})
|
net.reshape({"data": (2, 3, 32, 32)})
|
||||||
|
|
||||||
|
|
||||||
def test_read_net_from_buffer_deprecated():
|
def test_net_from_buffer_valid():
|
||||||
with warnings.catch_warnings(record=True) as w:
|
|
||||||
with open(test_net_bin, 'rb') as f:
|
|
||||||
bin = f.read()
|
|
||||||
with open(test_net_xml, 'rb') as f:
|
|
||||||
xml = f.read()
|
|
||||||
net = IENetwork(model=xml, weights=bin, init_from_buffer=True)
|
|
||||||
assert isinstance(net, IENetwork)
|
|
||||||
assert len(w) == 1
|
|
||||||
assert issubclass(w[-1].category, DeprecationWarning)
|
|
||||||
assert "Reading network using constructor is deprecated. " \
|
|
||||||
"Please, use IECore.read_network() method instead" in str(w[0].message)
|
|
||||||
|
|
||||||
|
|
||||||
def test_net_from_buffer_valid_deprecated():
|
|
||||||
ie = IECore()
|
ie = IECore()
|
||||||
with open(test_net_bin, 'rb') as f:
|
with open(test_net_bin, 'rb') as f:
|
||||||
bin = f.read()
|
bin = f.read()
|
||||||
|
@ -66,32 +66,6 @@ def test_output_blobs(device):
|
|||||||
assert executable_network.requests[0].output_blobs['fc_out'].tensor_desc == td
|
assert executable_network.requests[0].output_blobs['fc_out'].tensor_desc == td
|
||||||
|
|
||||||
|
|
||||||
def test_inputs_deprecated(device):
|
|
||||||
ie_core = ie.IECore()
|
|
||||||
net = ie_core.read_network(test_net_xml, test_net_bin)
|
|
||||||
executable_network = ie_core.load_network(net, device, num_requests=2)
|
|
||||||
with warnings.catch_warnings(record=True) as w:
|
|
||||||
inputs = executable_network.requests[0].inputs
|
|
||||||
assert "'inputs' property of InferRequest is deprecated. " \
|
|
||||||
"Please instead use 'input_blobs' property." in str(w[-1].message)
|
|
||||||
del executable_network
|
|
||||||
del ie_core
|
|
||||||
del net
|
|
||||||
|
|
||||||
|
|
||||||
def test_outputs_deprecated(device):
|
|
||||||
ie_core = ie.IECore()
|
|
||||||
net = ie_core.read_network(test_net_xml, test_net_bin)
|
|
||||||
executable_network = ie_core.load_network(net, device, num_requests=2)
|
|
||||||
with warnings.catch_warnings(record=True) as w:
|
|
||||||
outputs = executable_network.requests[0].outputs
|
|
||||||
assert "'outputs' property of InferRequest is deprecated. Please instead use 'output_blobs' property." in str(
|
|
||||||
w[-1].message)
|
|
||||||
del executable_network
|
|
||||||
del ie_core
|
|
||||||
del net
|
|
||||||
|
|
||||||
|
|
||||||
def test_inputs_list(device):
|
def test_inputs_list(device):
|
||||||
ie_core = ie.IECore()
|
ie_core = ie.IECore()
|
||||||
net = ie_core.read_network(test_net_xml, test_net_bin)
|
net = ie_core.read_network(test_net_xml, test_net_bin)
|
||||||
@ -552,11 +526,10 @@ def test_query_state_write_buffer(device, input_shape, data_type, mode):
|
|||||||
pytest.skip("Can't run on ARM plugin")
|
pytest.skip("Can't run on ARM plugin")
|
||||||
|
|
||||||
layout = ["C", "HW", "CHW", "NCHW"]
|
layout = ["C", "HW", "CHW", "NCHW"]
|
||||||
np_data_type = {"FP32": np.float32, "FP16": np.float16, "I32": np.int32}
|
|
||||||
|
|
||||||
from openvino.inference_engine import TensorDesc, Blob
|
from openvino.inference_engine import TensorDesc, Blob, format_map
|
||||||
|
|
||||||
net = ie.IENetwork(create_function_with_memory(input_shape, np_data_type[data_type]))
|
net = ie.IENetwork(create_function_with_memory(input_shape, format_map[data_type]))
|
||||||
ie_core = ie.IECore()
|
ie_core = ie.IECore()
|
||||||
exec_net = ie_core.load_network(network=net, device_name=device, num_requests=1)
|
exec_net = ie_core.load_network(network=net, device_name=device, num_requests=1)
|
||||||
request = exec_net.requests[0]
|
request = exec_net.requests[0]
|
||||||
@ -572,23 +545,23 @@ def test_query_state_write_buffer(device, input_shape, data_type, mode):
|
|||||||
if mode == "set_init_memory_state":
|
if mode == "set_init_memory_state":
|
||||||
# create initial value
|
# create initial value
|
||||||
const_init = 5
|
const_init = 5
|
||||||
init_array = np.full(input_shape, const_init, dtype=np_data_type[mem_state.state.tensor_desc.precision])
|
init_array = np.full(input_shape, const_init, dtype=format_map[mem_state.state.tensor_desc.precision])
|
||||||
tensor_desc = TensorDesc(mem_state.state.tensor_desc.precision, input_shape, layout[len(input_shape) - 1])
|
tensor_desc = TensorDesc(mem_state.state.tensor_desc.precision, input_shape, layout[len(input_shape) - 1])
|
||||||
blob = Blob(tensor_desc, init_array)
|
blob = Blob(tensor_desc, init_array)
|
||||||
mem_state.state = blob
|
mem_state.state = blob
|
||||||
|
|
||||||
res = exec_net.infer({"input_data": np.full(input_shape, 1, dtype=np_data_type[data_type])})
|
res = exec_net.infer({"input_data": np.full(input_shape, 1, dtype=format_map[data_type])})
|
||||||
expected_res = np.full(input_shape, 1 + const_init, dtype=np_data_type[data_type])
|
expected_res = np.full(input_shape, 1 + const_init, dtype=format_map[data_type])
|
||||||
elif mode == "reset_memory_state":
|
elif mode == "reset_memory_state":
|
||||||
# reset initial state of ReadValue to zero
|
# reset initial state of ReadValue to zero
|
||||||
mem_state.reset()
|
mem_state.reset()
|
||||||
res = exec_net.infer({"input_data": np.full(input_shape, 1, dtype=np_data_type[data_type])})
|
res = exec_net.infer({"input_data": np.full(input_shape, 1, dtype=format_map[data_type])})
|
||||||
|
|
||||||
# always ones
|
# always ones
|
||||||
expected_res = np.full(input_shape, 1, dtype=np_data_type[data_type])
|
expected_res = np.full(input_shape, 1, dtype=format_map[data_type])
|
||||||
else:
|
else:
|
||||||
res = exec_net.infer({"input_data": np.full(input_shape, 1, dtype=np_data_type[data_type])})
|
res = exec_net.infer({"input_data": np.full(input_shape, 1, dtype=format_map[data_type])})
|
||||||
expected_res = np.full(input_shape, i, dtype=np_data_type[data_type])
|
expected_res = np.full(input_shape, i, dtype=format_map[data_type])
|
||||||
|
|
||||||
assert np.allclose(res['MemoryAdd'], expected_res, atol=1e-6), \
|
assert np.allclose(res['MemoryAdd'], expected_res, atol=1e-6), \
|
||||||
"Expected values: {} \n Actual values: {} \n".format(expected_res, res)
|
"Expected values: {} \n Actual values: {} \n".format(expected_res, res)
|
@ -56,13 +56,13 @@ endif()
|
|||||||
|
|
||||||
add_custom_command(TARGET ie_wheel
|
add_custom_command(TARGET ie_wheel
|
||||||
PRE_BUILD
|
PRE_BUILD
|
||||||
COMMAND ${CMAKE_COMMAND} -E rm -rf "${CMAKE_CURRENT_BINARY_DIR}/site-packages"
|
COMMAND ${CMAKE_COMMAND} -E remove_directory "${CMAKE_CURRENT_BINARY_DIR}/site-packages"
|
||||||
COMMAND ${PYTHON_EXECUTABLE} ${SETUP_PY} clean bdist_wheel
|
COMMAND ${PYTHON_EXECUTABLE} ${SETUP_PY} clean bdist_wheel
|
||||||
--dist-dir ${CMAKE_BINARY_DIR}/wheels
|
--dist-dir ${CMAKE_BINARY_DIR}/wheels
|
||||||
--build=${WHEEL_BUILD}
|
--build=${WHEEL_BUILD}
|
||||||
--plat-name=${WHEEL_PLATFORM}
|
--plat-name=${WHEEL_PLATFORM}
|
||||||
POST_BUILD
|
POST_BUILD
|
||||||
COMMAND ${CMAKE_COMMAND} -E rm "${CMAKE_CURRENT_SOURCE_DIR}/.env"
|
COMMAND ${CMAKE_COMMAND} -E remove_directory "${CMAKE_CURRENT_SOURCE_DIR}/.env"
|
||||||
WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}"
|
WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}"
|
||||||
COMMENT "Building Python wheel ${WHEEL_PACKAGE_NAME}"
|
COMMENT "Building Python wheel ${WHEEL_PACKAGE_NAME}"
|
||||||
VERBATIM
|
VERBATIM
|
||||||
|
@ -56,35 +56,30 @@ set (CMAKE_RUNTIME_OUTPUT_DIRECTORY ${IE_MAIN_SAMPLES_DIR}/${BIN_FOLDER})
|
|||||||
|
|
||||||
if (WIN32)
|
if (WIN32)
|
||||||
set_property (DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS _CRT_SECURE_NO_WARNINGS)
|
set_property (DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS _CRT_SECURE_NO_WARNINGS)
|
||||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D_SCL_SECURE_NO_WARNINGS -DNOMINMAX")
|
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D_SCL_SECURE_NO_WARNINGS")
|
||||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHsc") #no asynchronous structured exception handling
|
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHsc") # no asynchronous structured exception handling
|
||||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /LARGEADDRESSAWARE")
|
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /LARGEADDRESSAWARE")
|
||||||
|
|
||||||
if (TREAT_WARNING_AS_ERROR)
|
if (TREAT_WARNING_AS_ERROR)
|
||||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /WX") #treating warnings as errors
|
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /WX") # treating warnings as errors
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
|
if (CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
|
||||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /Qdiag-disable:177")
|
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /Qdiag-disable:177")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
# disable some noisy warnings
|
||||||
if (CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
|
if (CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
|
||||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4251 /wd4275 /wd4267 /wd4819") #disable some warnings
|
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4251 /wd4275 /wd4267 /wd4819")
|
||||||
endif()
|
endif()
|
||||||
else()
|
else()
|
||||||
|
# treating warnings as errors
|
||||||
if(TREAT_WARNING_AS_ERROR)
|
if(TREAT_WARNING_AS_ERROR)
|
||||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror") #treating warnings as errors
|
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall")
|
if (CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
|
||||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall")
|
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -diag-disable:177")
|
||||||
if (APPLE)
|
|
||||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=unused-command-line-argument")
|
|
||||||
elseif(UNIX)
|
|
||||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wuninitialized -Winit-self")
|
|
||||||
if(NOT CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
|
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wmaybe-uninitialized")
|
|
||||||
endif()
|
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
@ -92,6 +87,15 @@ if(APPLE)
|
|||||||
set(CMAKE_MACOSX_RPATH ON)
|
set(CMAKE_MACOSX_RPATH ON)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(arm64.*|aarch64.*|AARCH64.*)")
|
||||||
|
set(AARCH64 ON)
|
||||||
|
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(arm.*|ARM.*)")
|
||||||
|
set(ARM ON)
|
||||||
|
endif()
|
||||||
|
if(ARM AND NOT CMAKE_CROSSCOMPILING)
|
||||||
|
add_compile_options(-march=armv7-a)
|
||||||
|
endif()
|
||||||
|
|
||||||
set(CMAKE_POLICY_DEFAULT_CMP0063 NEW)
|
set(CMAKE_POLICY_DEFAULT_CMP0063 NEW)
|
||||||
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
|
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
|
||||||
set(CMAKE_CXX_VISIBILITY_PRESET hidden)
|
set(CMAKE_CXX_VISIBILITY_PRESET hidden)
|
||||||
@ -104,9 +108,6 @@ if(NOT DEFINED CMAKE_CXX_STANDARD)
|
|||||||
set (CMAKE_CXX_STANDARD 11)
|
set (CMAKE_CXX_STANDARD 11)
|
||||||
set (CMAKE_CXX_EXTENSIONS OFF)
|
set (CMAKE_CXX_EXTENSIONS OFF)
|
||||||
set (CMAKE_CXX_STANDARD_REQUIRED ON)
|
set (CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||||
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
|
||||||
set (CMAKE_CXX_FLAGS "-std=c++11 ${CMAKE_CXX_FLAGS}")
|
|
||||||
endif()
|
|
||||||
endif()
|
endif()
|
||||||
####################################
|
####################################
|
||||||
|
|
||||||
@ -135,10 +136,6 @@ if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/thirdparty/cnpy")
|
|||||||
add_subdirectory(thirdparty/cnpy EXCLUDE_FROM_ALL)
|
add_subdirectory(thirdparty/cnpy EXCLUDE_FROM_ALL)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/common/utils")
|
if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/common/utils")
|
||||||
add_subdirectory(common/utils)
|
add_subdirectory(common/utils)
|
||||||
endif()
|
endif()
|
||||||
|
@ -147,6 +147,14 @@ static constexpr char iop_message[] = "Optional. Specifies precision for input a
|
|||||||
" Overwrites precision from ip and op options for "
|
" Overwrites precision from ip and op options for "
|
||||||
"specified layers.";
|
"specified layers.";
|
||||||
|
|
||||||
|
static constexpr char input_image_scale_message[] = "Optional. Scale values to be used for the input image per channel.\n"
|
||||||
|
"Values to be provided in the [R, G, B] format. Can be defined for desired input of the model.\n"
|
||||||
|
"Example: -iscale data[255,255,255],info[255,255,255]\n";
|
||||||
|
|
||||||
|
static constexpr char input_image_mean_message[] = "Optional. Mean values to be used for the input image per channel.\n"
|
||||||
|
"Values to be provided in the [R, G, B] format. Can be defined for desired input of the model,\n"
|
||||||
|
"Example: -imean data[255,255,255],info[255,255,255]\n";
|
||||||
|
|
||||||
/// @brief Define flag for showing help message <br>
|
/// @brief Define flag for showing help message <br>
|
||||||
DEFINE_bool(h, false, help_message);
|
DEFINE_bool(h, false, help_message);
|
||||||
|
|
||||||
@ -259,6 +267,12 @@ DEFINE_string(cache_dir, "", cache_dir_message);
|
|||||||
/// @brief Define flag for load network from model file by name without ReadNetwork <br>
|
/// @brief Define flag for load network from model file by name without ReadNetwork <br>
|
||||||
DEFINE_bool(load_from_file, false, load_from_file_message);
|
DEFINE_bool(load_from_file, false, load_from_file_message);
|
||||||
|
|
||||||
|
/// @brief Define flag for using input image scale <br>
|
||||||
|
DEFINE_string(iscale, "", input_image_scale_message);
|
||||||
|
|
||||||
|
/// @brief Define flag for using input image mean <br>
|
||||||
|
DEFINE_string(imean, "", input_image_mean_message);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief This function show a help message
|
* @brief This function show a help message
|
||||||
*/
|
*/
|
||||||
@ -304,4 +318,6 @@ static void showUsage() {
|
|||||||
std::cout << " -ip <value> " << inputs_precision_message << std::endl;
|
std::cout << " -ip <value> " << inputs_precision_message << std::endl;
|
||||||
std::cout << " -op <value> " << outputs_precision_message << std::endl;
|
std::cout << " -op <value> " << outputs_precision_message << std::endl;
|
||||||
std::cout << " -iop \"<value>\" " << iop_message << std::endl;
|
std::cout << " -iop \"<value>\" " << iop_message << std::endl;
|
||||||
|
std::cout << " -iscale " << input_image_scale_message << std::endl;
|
||||||
|
std::cout << " -imean " << input_image_mean_message << std::endl;
|
||||||
}
|
}
|
||||||
|
@ -91,7 +91,9 @@ void fillBlobImage(Blob::Ptr& inputBlob, const std::vector<std::string>& filePat
|
|||||||
size_t offset = imageId * numChannels * width * height + (((app_info.layout == "NCHW") || (app_info.layout == "CHW"))
|
size_t offset = imageId * numChannels * width * height + (((app_info.layout == "NCHW") || (app_info.layout == "CHW"))
|
||||||
? (ch * width * height + h * width + w)
|
? (ch * width * height + h * width + w)
|
||||||
: (h * width * numChannels + w * numChannels + ch));
|
: (h * width * numChannels + w * numChannels + ch));
|
||||||
inputBlobData[offset] = static_cast<T>(vreader.at(imageId).get()[h * width * numChannels + w * numChannels + ch]);
|
inputBlobData[offset] =
|
||||||
|
(static_cast<T>(vreader.at(imageId).get()[h * width * numChannels + w * numChannels + ch]) - static_cast<T>(app_info.mean[ch])) /
|
||||||
|
static_cast<T>(app_info.scale[ch]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -12,4 +12,4 @@
|
|||||||
#include "utils.hpp"
|
#include "utils.hpp"
|
||||||
|
|
||||||
void fillBlobs(const std::vector<std::string>& inputFiles, const size_t& batchSize, benchmark_app::InputsInfo& app_inputs_info,
|
void fillBlobs(const std::vector<std::string>& inputFiles, const size_t& batchSize, benchmark_app::InputsInfo& app_inputs_info,
|
||||||
std::vector<InferReqWrap::Ptr> requests);
|
std::vector<InferReqWrap::Ptr> requests);
|
@ -380,7 +380,7 @@ int main(int argc, char* argv[]) {
|
|||||||
batchSize = cnnNetwork.getBatchSize();
|
batchSize = cnnNetwork.getBatchSize();
|
||||||
// Parse input shapes if specified
|
// Parse input shapes if specified
|
||||||
bool reshape = false;
|
bool reshape = false;
|
||||||
app_inputs_info = getInputsInfo<InputInfo::Ptr>(FLAGS_shape, FLAGS_layout, FLAGS_b, inputInfo, reshape);
|
app_inputs_info = getInputsInfo<InputInfo::Ptr>(FLAGS_shape, FLAGS_layout, FLAGS_b, FLAGS_iscale, FLAGS_imean, inputInfo, reshape);
|
||||||
if (reshape) {
|
if (reshape) {
|
||||||
InferenceEngine::ICNNNetwork::InputShapes shapes = {};
|
InferenceEngine::ICNNNetwork::InputShapes shapes = {};
|
||||||
for (auto& item : app_inputs_info)
|
for (auto& item : app_inputs_info)
|
||||||
@ -441,7 +441,7 @@ int main(int argc, char* argv[]) {
|
|||||||
slog::info << "Import network took " << duration_ms << " ms" << slog::endl;
|
slog::info << "Import network took " << duration_ms << " ms" << slog::endl;
|
||||||
if (statistics)
|
if (statistics)
|
||||||
statistics->addParameters(StatisticsReport::Category::EXECUTION_RESULTS, {{"import network time (ms)", duration_ms}});
|
statistics->addParameters(StatisticsReport::Category::EXECUTION_RESULTS, {{"import network time (ms)", duration_ms}});
|
||||||
app_inputs_info = getInputsInfo<InputInfo::CPtr>(FLAGS_shape, FLAGS_layout, FLAGS_b, exeNetwork.GetInputsInfo());
|
app_inputs_info = getInputsInfo<InputInfo::CPtr>(FLAGS_shape, FLAGS_layout, FLAGS_b, FLAGS_iscale, FLAGS_imean, exeNetwork.GetInputsInfo());
|
||||||
if (batchSize == 0) {
|
if (batchSize == 0) {
|
||||||
batchSize = 1;
|
batchSize = 1;
|
||||||
}
|
}
|
||||||
|
@ -88,6 +88,17 @@ std::vector<std::string> split(const std::string& s, char delim) {
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::vector<float> splitFloat(const std::string& s, char delim) {
|
||||||
|
std::vector<float> result;
|
||||||
|
std::stringstream ss(s);
|
||||||
|
std::string item;
|
||||||
|
|
||||||
|
while (getline(ss, item, delim)) {
|
||||||
|
result.push_back(std::stof(item));
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
std::vector<std::string> parseDevices(const std::string& device_string) {
|
std::vector<std::string> parseDevices(const std::string& device_string) {
|
||||||
std::string comma_separated_devices = device_string;
|
std::string comma_separated_devices = device_string;
|
||||||
if (comma_separated_devices.find(":") != std::string::npos) {
|
if (comma_separated_devices.find(":") != std::string::npos) {
|
||||||
@ -161,6 +172,44 @@ std::string getShapesString(const InferenceEngine::ICNNNetwork::InputShapes& sha
|
|||||||
return ss.str();
|
return ss.str();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::map<std::string, std::vector<float>> parseScaleOrMean(const std::string& scale_mean, const benchmark_app::InputsInfo& inputs_info) {
|
||||||
|
// Format: data:[255,255,255],info[255,255,255]
|
||||||
|
std::map<std::string, std::vector<float>> return_value;
|
||||||
|
|
||||||
|
std::string search_string = scale_mean;
|
||||||
|
auto start_pos = search_string.find_first_of('[');
|
||||||
|
while (start_pos != std::string::npos) {
|
||||||
|
auto end_pos = search_string.find_first_of(']');
|
||||||
|
if (end_pos == std::string::npos)
|
||||||
|
break;
|
||||||
|
auto input_name = search_string.substr(0, start_pos);
|
||||||
|
auto input_value_string = search_string.substr(start_pos + 1, end_pos - start_pos - 1);
|
||||||
|
auto input_value = splitFloat(input_value_string, ',');
|
||||||
|
|
||||||
|
if (!input_name.empty()) {
|
||||||
|
if (inputs_info.count(input_name)) {
|
||||||
|
return_value[input_name] = input_value;
|
||||||
|
}
|
||||||
|
// ignore wrong input name
|
||||||
|
} else {
|
||||||
|
for (auto& item : inputs_info) {
|
||||||
|
if (item.second.isImage())
|
||||||
|
return_value[item.first] = input_value;
|
||||||
|
}
|
||||||
|
search_string.clear();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
search_string = search_string.substr(end_pos + 1);
|
||||||
|
if (search_string.empty() || search_string.front() != ',')
|
||||||
|
break;
|
||||||
|
search_string = search_string.substr(1);
|
||||||
|
start_pos = search_string.find_first_of('[');
|
||||||
|
}
|
||||||
|
if (!search_string.empty())
|
||||||
|
throw std::logic_error("Can't parse input parameter string: " + scale_mean);
|
||||||
|
return return_value;
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef USE_OPENCV
|
#ifdef USE_OPENCV
|
||||||
void dump_config(const std::string& filename, const std::map<std::string, std::map<std::string, std::string>>& config) {
|
void dump_config(const std::string& filename, const std::map<std::string, std::map<std::string, std::string>>& config) {
|
||||||
cv::FileStorage fs(filename, cv::FileStorage::WRITE);
|
cv::FileStorage fs(filename, cv::FileStorage::WRITE);
|
||||||
|
@ -13,6 +13,8 @@ struct InputInfo {
|
|||||||
InferenceEngine::Precision precision;
|
InferenceEngine::Precision precision;
|
||||||
InferenceEngine::SizeVector shape;
|
InferenceEngine::SizeVector shape;
|
||||||
std::string layout;
|
std::string layout;
|
||||||
|
std::vector<float> scale;
|
||||||
|
std::vector<float> mean;
|
||||||
bool isImage() const;
|
bool isImage() const;
|
||||||
bool isImageInfo() const;
|
bool isImageInfo() const;
|
||||||
size_t getDimentionByLayout(char character) const;
|
size_t getDimentionByLayout(char character) const;
|
||||||
@ -31,6 +33,7 @@ std::map<std::string, std::string> parseNStreamsValuePerDevice(const std::vector
|
|||||||
std::string getShapesString(const InferenceEngine::ICNNNetwork::InputShapes& shapes);
|
std::string getShapesString(const InferenceEngine::ICNNNetwork::InputShapes& shapes);
|
||||||
size_t getBatchSize(const benchmark_app::InputsInfo& inputs_info);
|
size_t getBatchSize(const benchmark_app::InputsInfo& inputs_info);
|
||||||
std::vector<std::string> split(const std::string& s, char delim);
|
std::vector<std::string> split(const std::string& s, char delim);
|
||||||
|
std::map<std::string, std::vector<float>> parseScaleOrMean(const std::string& scale_mean, const benchmark_app::InputsInfo& inputs_info);
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
std::map<std::string, std::string> parseInputParameters(const std::string parameter_string, const std::map<std::string, T>& input_info) {
|
std::map<std::string, std::string> parseInputParameters(const std::string parameter_string, const std::map<std::string, T>& input_info) {
|
||||||
@ -65,9 +68,11 @@ std::map<std::string, std::string> parseInputParameters(const std::string parame
|
|||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
benchmark_app::InputsInfo getInputsInfo(const std::string& shape_string, const std::string& layout_string, const size_t batch_size,
|
benchmark_app::InputsInfo getInputsInfo(const std::string& shape_string, const std::string& layout_string, const size_t batch_size,
|
||||||
const std::map<std::string, T>& input_info, bool& reshape_required) {
|
const std::string& scale_string, const std::string& mean_string, const std::map<std::string, T>& input_info,
|
||||||
|
bool& reshape_required) {
|
||||||
std::map<std::string, std::string> shape_map = parseInputParameters(shape_string, input_info);
|
std::map<std::string, std::string> shape_map = parseInputParameters(shape_string, input_info);
|
||||||
std::map<std::string, std::string> layout_map = parseInputParameters(layout_string, input_info);
|
std::map<std::string, std::string> layout_map = parseInputParameters(layout_string, input_info);
|
||||||
|
|
||||||
reshape_required = false;
|
reshape_required = false;
|
||||||
benchmark_app::InputsInfo info_map;
|
benchmark_app::InputsInfo info_map;
|
||||||
for (auto& item : input_info) {
|
for (auto& item : input_info) {
|
||||||
@ -106,14 +111,33 @@ benchmark_app::InputsInfo getInputsInfo(const std::string& shape_string, const s
|
|||||||
}
|
}
|
||||||
info_map[name] = info;
|
info_map[name] = info;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Update scale and mean
|
||||||
|
std::map<std::string, std::vector<float>> scale_map = parseScaleOrMean(scale_string, info_map);
|
||||||
|
std::map<std::string, std::vector<float>> mean_map = parseScaleOrMean(mean_string, info_map);
|
||||||
|
|
||||||
|
for (auto& item : info_map) {
|
||||||
|
if (item.second.isImage()) {
|
||||||
|
item.second.scale.assign({1, 1, 1});
|
||||||
|
item.second.mean.assign({0, 0, 0});
|
||||||
|
|
||||||
|
if (scale_map.count(item.first)) {
|
||||||
|
item.second.scale = scale_map.at(item.first);
|
||||||
|
}
|
||||||
|
if (mean_map.count(item.first)) {
|
||||||
|
item.second.mean = mean_map.at(item.first);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return info_map;
|
return info_map;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
benchmark_app::InputsInfo getInputsInfo(const std::string& shape_string, const std::string& layout_string, const size_t batch_size,
|
benchmark_app::InputsInfo getInputsInfo(const std::string& shape_string, const std::string& layout_string, const size_t batch_size,
|
||||||
const std::map<std::string, T>& input_info) {
|
const std::string& scale_string, const std::string& mean_string, const std::map<std::string, T>& input_info) {
|
||||||
bool reshape_required = false;
|
bool reshape_required = false;
|
||||||
return getInputsInfo<T>(shape_string, layout_string, batch_size, input_info, reshape_required);
|
return getInputsInfo<T>(shape_string, layout_string, batch_size, scale_string, mean_string, input_info, reshape_required);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef USE_OPENCV
|
#ifdef USE_OPENCV
|
||||||
|
@ -54,21 +54,27 @@ private:
|
|||||||
* @param output Vector of indexes for the top n places
|
* @param output Vector of indexes for the top n places
|
||||||
*/
|
*/
|
||||||
template <class T>
|
template <class T>
|
||||||
void topResults(unsigned int n, InferenceEngine::TBlob<T>& input, std::vector<unsigned>& output) {
|
void topResults(unsigned int n, InferenceEngine::Blob::Ptr& input, std::vector<unsigned>& output) {
|
||||||
InferenceEngine::SizeVector dims = input.getTensorDesc().getDims();
|
InferenceEngine::SizeVector dims = input->getTensorDesc().getDims();
|
||||||
size_t input_rank = dims.size();
|
size_t input_rank = dims.size();
|
||||||
if (!input_rank || !dims[0])
|
if (!input_rank || !dims[0])
|
||||||
IE_THROW() << "Input blob has incorrect dimensions!";
|
IE_THROW() << "Input blob has incorrect dimensions!";
|
||||||
size_t batchSize = dims[0];
|
size_t batchSize = dims[0];
|
||||||
std::vector<unsigned> indexes(input.size() / batchSize);
|
std::vector<unsigned> indexes(input->size() / batchSize);
|
||||||
|
|
||||||
n = static_cast<unsigned>(std::min<size_t>((size_t)n, input.size()));
|
n = static_cast<unsigned>(std::min<size_t>((size_t)n, input->size()));
|
||||||
|
|
||||||
output.resize(n * batchSize);
|
output.resize(n * batchSize);
|
||||||
|
InferenceEngine::MemoryBlob::CPtr moutput = InferenceEngine::as<InferenceEngine::MemoryBlob>(input);
|
||||||
|
if (!moutput) {
|
||||||
|
IE_THROW() << "Output blob should be inherited from MemoryBlob";
|
||||||
|
}
|
||||||
|
// locked memory holder should be alive all time while access to its buffer happens
|
||||||
|
auto moutputHolder = moutput->rmap();
|
||||||
|
|
||||||
for (size_t i = 0; i < batchSize; i++) {
|
for (size_t i = 0; i < batchSize; i++) {
|
||||||
size_t offset = i * (input.size() / batchSize);
|
size_t offset = i * (input->size() / batchSize);
|
||||||
T* batchData = input.data();
|
T* batchData = moutputHolder.as<T*>();
|
||||||
batchData += offset;
|
batchData += offset;
|
||||||
|
|
||||||
std::iota(std::begin(indexes), std::end(indexes), 0);
|
std::iota(std::begin(indexes), std::end(indexes), 0);
|
||||||
@ -88,16 +94,15 @@ private:
|
|||||||
* @param input 1D blob that contains probabilities
|
* @param input 1D blob that contains probabilities
|
||||||
* @param output Vector of indexes for the top n places
|
* @param output Vector of indexes for the top n places
|
||||||
*/
|
*/
|
||||||
void topResults(unsigned int n, InferenceEngine::Blob& input, std::vector<unsigned>& output) {
|
void topResults(unsigned int n, InferenceEngine::Blob::Ptr& input, std::vector<unsigned>& output) {
|
||||||
#define TBLOB_TOP_RESULT(precision) \
|
#define TBLOB_TOP_RESULT(precision) \
|
||||||
case InferenceEngine::Precision::precision: { \
|
case InferenceEngine::Precision::precision: { \
|
||||||
using myBlobType = InferenceEngine::PrecisionTrait<InferenceEngine::Precision::precision>::value_type; \
|
using myBlobType = InferenceEngine::PrecisionTrait<InferenceEngine::Precision::precision>::value_type; \
|
||||||
InferenceEngine::TBlob<myBlobType>& tblob = dynamic_cast<InferenceEngine::TBlob<myBlobType>&>(input); \
|
topResults<myBlobType>(n, input, output); \
|
||||||
topResults(n, tblob, output); \
|
|
||||||
break; \
|
break; \
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (input.getTensorDesc().getPrecision()) {
|
switch (input->getTensorDesc().getPrecision()) {
|
||||||
TBLOB_TOP_RESULT(FP32);
|
TBLOB_TOP_RESULT(FP32);
|
||||||
TBLOB_TOP_RESULT(FP64);
|
TBLOB_TOP_RESULT(FP64);
|
||||||
TBLOB_TOP_RESULT(FP16);
|
TBLOB_TOP_RESULT(FP16);
|
||||||
@ -111,7 +116,7 @@ private:
|
|||||||
TBLOB_TOP_RESULT(U64);
|
TBLOB_TOP_RESULT(U64);
|
||||||
TBLOB_TOP_RESULT(I64);
|
TBLOB_TOP_RESULT(I64);
|
||||||
default:
|
default:
|
||||||
IE_THROW() << "cannot locate blob for precision: " << input.getTensorDesc().getPrecision();
|
IE_THROW() << "cannot locate blob for precision: " << input->getTensorDesc().getPrecision();
|
||||||
}
|
}
|
||||||
|
|
||||||
#undef TBLOB_TOP_RESULT
|
#undef TBLOB_TOP_RESULT
|
||||||
@ -129,7 +134,7 @@ public:
|
|||||||
if (_imageNames.size() != _batchSize) {
|
if (_imageNames.size() != _batchSize) {
|
||||||
throw std::logic_error("Batch size should be equal to the number of images.");
|
throw std::logic_error("Batch size should be equal to the number of images.");
|
||||||
}
|
}
|
||||||
topResults(_nTop, *_outBlob, _results);
|
topResults(_nTop, _outBlob, _results);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -146,18 +151,17 @@ public:
|
|||||||
std::wcout << std::endl << std::endl;
|
std::wcout << std::endl << std::endl;
|
||||||
printHeader();
|
printHeader();
|
||||||
|
|
||||||
|
InferenceEngine::MemoryBlob::CPtr moutput = InferenceEngine::as<InferenceEngine::MemoryBlob>(_outBlob);
|
||||||
|
auto moutputHolder = moutput->rmap();
|
||||||
for (size_t id = image_id * _nTop, cnt = 0; id < (image_id + 1) * _nTop; ++cnt, ++id) {
|
for (size_t id = image_id * _nTop, cnt = 0; id < (image_id + 1) * _nTop; ++cnt, ++id) {
|
||||||
std::cout.precision(7);
|
std::cout.precision(7);
|
||||||
/** Getting probability for resulting class **/
|
/** Getting probability for resulting class **/
|
||||||
InferenceEngine::MemoryBlob::CPtr moutput = InferenceEngine::as<InferenceEngine::MemoryBlob>(_outBlob);
|
|
||||||
if (!moutput) {
|
if (!moutput) {
|
||||||
throw std::logic_error("We expect _outBlob to be inherited from MemoryBlob in "
|
throw std::logic_error("We expect _outBlob to be inherited from MemoryBlob in "
|
||||||
"ClassificationResult::print, "
|
"ClassificationResult::print, "
|
||||||
"but by fact we were not able to cast _outBlob to MemoryBlob");
|
"but by fact we were not able to cast _outBlob to MemoryBlob");
|
||||||
}
|
}
|
||||||
// locked memory holder should be alive all time while access to its buffer happens
|
// locked memory holder should be alive all time while access to its buffer happens
|
||||||
auto moutputHolder = moutput->rmap();
|
|
||||||
|
|
||||||
const auto result =
|
const auto result =
|
||||||
moutputHolder
|
moutputHolder
|
||||||
.as<const InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP32>::value_type*>()[_results.at(id) +
|
.as<const InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP32>::value_type*>()[_results.at(id) +
|
||||||
|
@ -109,7 +109,7 @@ public:
|
|||||||
return std::make_shared<CustomReluOp>(new_args.at(0));
|
return std::make_shared<CustomReluOp>(new_args.at(0));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool visit_attributes(ngraph::AttributeVisitor& visitor) override {
|
bool visit_attributes(ngraph::AttributeVisitor&) override {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -108,7 +108,7 @@ TBlob<uint8_t>::CPtr ReadWeights(std::string filepath) {
|
|||||||
std::shared_ptr<Function> createNgraphFunction() {
|
std::shared_ptr<Function> createNgraphFunction() {
|
||||||
TBlob<uint8_t>::CPtr weightsPtr = ReadWeights(FLAGS_m);
|
TBlob<uint8_t>::CPtr weightsPtr = ReadWeights(FLAGS_m);
|
||||||
|
|
||||||
if (weightsPtr->byteSize() != 1724336)
|
if (weightsPtr->byteSize() != 6897344)
|
||||||
IE_THROW() << "Incorrect weights file. This sample works only with LeNet "
|
IE_THROW() << "Incorrect weights file. This sample works only with LeNet "
|
||||||
"classification network.";
|
"classification network.";
|
||||||
|
|
||||||
|
@ -274,31 +274,108 @@ DeviceName AutoInferencePlugin::SelectDevice(const std::vector<DeviceName>& meta
|
|||||||
}
|
}
|
||||||
|
|
||||||
std::vector<DeviceName> CPU;
|
std::vector<DeviceName> CPU;
|
||||||
std::vector<DeviceName> GPU;
|
std::vector<DeviceName> dGPU;
|
||||||
|
std::vector<DeviceName> iGPU;
|
||||||
|
std::vector<DeviceName> MYRIAD;
|
||||||
|
std::vector<DeviceName> VPUX;
|
||||||
|
|
||||||
for (auto& item : metaDevices) {
|
for (auto& item : metaDevices) {
|
||||||
if (item.find("CPU") == 0) {
|
if (item.find("CPU") == 0) {
|
||||||
CPU.push_back(item);
|
CPU.push_back(item);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
if (item.find("MYRIAD") == 0) {
|
||||||
|
MYRIAD.push_back(item);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (item.find("VPUX") == 0) {
|
||||||
|
VPUX.push_back(item);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
if (item.find("GPU") == 0) {
|
if (item.find("GPU") == 0) {
|
||||||
GPU.push_back(item);
|
auto gpuFullDeviceName = GetCore()->GetMetric(item, METRIC_KEY(FULL_DEVICE_NAME)).as<std::string>();
|
||||||
|
if (gpuFullDeviceName.find("iGPU") != std::string::npos) {
|
||||||
|
iGPU.push_back(item);
|
||||||
|
} else if (gpuFullDeviceName.find("dGPU") != std::string::npos) {
|
||||||
|
dGPU.push_back(item);
|
||||||
|
}
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (CPU.empty() && GPU.empty()) {
|
if (CPU.empty() && dGPU.empty() && iGPU.empty() && MYRIAD.empty() && VPUX.empty()) {
|
||||||
IE_THROW(NotFound) << "No available device found";
|
IE_THROW(NotFound) << "No available device found";
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sort GPU by name: GPU.2 > GPU.1 > GPU.0 > GPU, so we always choose the GPU[0] as best device
|
// Priority of selecting device: dGPU > VPUX > iGPU > MYRIAD > CPU
|
||||||
std::sort(GPU.begin(), GPU.end(), [](const DeviceName& a, const DeviceName& b)->bool{return b < a;});
|
if (!dGPU.empty()) {
|
||||||
|
for (auto&& item : dGPU) {
|
||||||
|
std::vector<std::string> capability = GetCore()->GetMetric(item, METRIC_KEY(OPTIMIZATION_CAPABILITIES));
|
||||||
|
auto supportNetwork = std::find(capability.begin(), capability.end(), networkPrecision);
|
||||||
|
if (supportNetwork != capability.end()) {
|
||||||
|
return item;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if (!VPUX.empty()) {
|
||||||
|
for (auto&& item : VPUX) {
|
||||||
|
std::vector<std::string> capability = GetCore()->GetMetric(item, METRIC_KEY(OPTIMIZATION_CAPABILITIES));
|
||||||
|
auto supportNetwork = std::find(capability.begin(), capability.end(), networkPrecision);
|
||||||
|
if (supportNetwork != capability.end()) {
|
||||||
|
return item;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if (!iGPU.empty()) {
|
||||||
|
for (auto&& item : iGPU) {
|
||||||
|
std::vector<std::string> capability = GetCore()->GetMetric(item, METRIC_KEY(OPTIMIZATION_CAPABILITIES));
|
||||||
|
auto supportNetwork = std::find(capability.begin(), capability.end(), networkPrecision);
|
||||||
|
if (supportNetwork != capability.end()) {
|
||||||
|
return item;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if (!MYRIAD.empty()) {
|
||||||
|
for (auto&& item : MYRIAD) {
|
||||||
|
std::vector<std::string> capability = GetCore()->GetMetric(item, METRIC_KEY(OPTIMIZATION_CAPABILITIES));
|
||||||
|
auto supportNetwork = std::find(capability.begin(), capability.end(), networkPrecision);
|
||||||
|
if (supportNetwork != capability.end()) {
|
||||||
|
return item;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for (auto&& item : GPU) {
|
// If network is FP32 but there is no device support FP32, offload FP32 network to device support FP16.
|
||||||
std::vector<std::string> capability = GetCore()->GetMetric(item, METRIC_KEY(OPTIMIZATION_CAPABILITIES));
|
if (networkPrecision == "FP32") {
|
||||||
auto res = std::find(capability.begin(), capability.end(), networkPrecision);
|
if (!dGPU.empty()) {
|
||||||
if (res != capability.end()) {
|
for (auto&& item : dGPU) {
|
||||||
return item;
|
std::vector<std::string> capability = GetCore()->GetMetric(item, METRIC_KEY(OPTIMIZATION_CAPABILITIES));
|
||||||
|
auto supportNetwork = std::find(capability.begin(), capability.end(), "FP16");
|
||||||
|
if (supportNetwork != capability.end()) {
|
||||||
|
return item;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if (!VPUX.empty()) {
|
||||||
|
for (auto&& item : VPUX) {
|
||||||
|
std::vector<std::string> capability = GetCore()->GetMetric(item, METRIC_KEY(OPTIMIZATION_CAPABILITIES));
|
||||||
|
auto supportNetwork = std::find(capability.begin(), capability.end(), "FP16");
|
||||||
|
if (supportNetwork != capability.end()) {
|
||||||
|
return item;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if (!iGPU.empty()) {
|
||||||
|
for (auto&& item : iGPU) {
|
||||||
|
std::vector<std::string> capability = GetCore()->GetMetric(item, METRIC_KEY(OPTIMIZATION_CAPABILITIES));
|
||||||
|
auto supportNetwork = std::find(capability.begin(), capability.end(), "FP16");
|
||||||
|
if (supportNetwork != capability.end()) {
|
||||||
|
return item;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if (!MYRIAD.empty()) {
|
||||||
|
for (auto&& item : MYRIAD) {
|
||||||
|
std::vector<std::string> capability = GetCore()->GetMetric(item, METRIC_KEY(OPTIMIZATION_CAPABILITIES));
|
||||||
|
auto supportNetwork = std::find(capability.begin(), capability.end(), "FP16");
|
||||||
|
if (supportNetwork != capability.end()) {
|
||||||
|
return item;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -88,11 +88,11 @@ void CLDNNGraph::Build() {
|
|||||||
|
|
||||||
std::shared_ptr<cldnn::network> CLDNNGraph::BuildNetwork(std::shared_ptr<cldnn::program> program) {
|
std::shared_ptr<cldnn::network> CLDNNGraph::BuildNetwork(std::shared_ptr<cldnn::program> program) {
|
||||||
OV_ITT_SCOPED_TASK(itt::domains::CLDNNPlugin, "CLDNNGraph::BuildNetwork");
|
OV_ITT_SCOPED_TASK(itt::domains::CLDNNPlugin, "CLDNNGraph::BuildNetwork");
|
||||||
auto network = std::make_shared<cldnn::network>(*program, m_stream_id);
|
auto network = std::make_shared<cldnn::network>(program, m_stream_id);
|
||||||
|
|
||||||
if (!m_config.graph_dumps_dir.empty() && m_stream_id == 0) {
|
if (!m_config.graph_dumps_dir.empty() && m_stream_id == 0) {
|
||||||
static int net_id = 0;
|
static int net_id = 0;
|
||||||
auto steps_info = network->get_optimization_steps_info();
|
auto steps_info = network->get_optimizer_passes_info();
|
||||||
size_t step_idx = 0;
|
size_t step_idx = 0;
|
||||||
for (auto& step : steps_info) {
|
for (auto& step : steps_info) {
|
||||||
CNNNetwork net(GetExecGraphInfoByPrimitivesInfo(step.second, true));
|
CNNNetwork net(GetExecGraphInfoByPrimitivesInfo(step.second, true));
|
||||||
|
@ -51,8 +51,10 @@ public:
|
|||||||
InferenceEngine::SizeVector GetOutputSize(std::string outName) const;
|
InferenceEngine::SizeVector GetOutputSize(std::string outName) const;
|
||||||
std::string MapOutputName(std::string outName) const;
|
std::string MapOutputName(std::string outName) const;
|
||||||
std::string getName() const { return m_networkName; }
|
std::string getName() const { return m_networkName; }
|
||||||
|
std::mutex& get_mutex() { return m_infer_mutex; }
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
std::mutex m_infer_mutex;
|
||||||
std::string m_networkName;
|
std::string m_networkName;
|
||||||
Config m_config;
|
Config m_config;
|
||||||
|
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -23,6 +23,7 @@ class CLDNNExecNetwork;
|
|||||||
|
|
||||||
class CLDNNInferRequest : public InferenceEngine::IInferRequestInternal {
|
class CLDNNInferRequest : public InferenceEngine::IInferRequestInternal {
|
||||||
public:
|
public:
|
||||||
|
using Ptr = std::shared_ptr<CLDNNInferRequest>;
|
||||||
// make sure all blobs and cldnn::memory objects
|
// make sure all blobs and cldnn::memory objects
|
||||||
// are in place and valid
|
// are in place and valid
|
||||||
void checkBlobs() override;
|
void checkBlobs() override;
|
||||||
@ -45,8 +46,9 @@ public:
|
|||||||
void EnableProfiling() { m_useProfiling = true; }
|
void EnableProfiling() { m_useProfiling = true; }
|
||||||
void EnableStreams() { m_useStreams = true; }
|
void EnableStreams() { m_useStreams = true; }
|
||||||
|
|
||||||
protected:
|
private:
|
||||||
std::map<std::string, cldnn::memory::ptr> inputsMemory;
|
InferenceEngine::BlobMap _deviceOutputs;
|
||||||
|
std::map<std::string, cldnn::primitive_id> inputsMap;
|
||||||
std::map<std::string, cldnn::primitive_id> outputsMap;
|
std::map<std::string, cldnn::primitive_id> outputsMap;
|
||||||
|
|
||||||
bool m_useProfiling;
|
bool m_useProfiling;
|
||||||
@ -58,24 +60,25 @@ protected:
|
|||||||
std::map<std::string, std::vector<buf_info>> batchOutputs;
|
std::map<std::string, std::vector<buf_info>> batchOutputs;
|
||||||
InferenceEngine::IStreamsExecutor* streamExecutor = nullptr;
|
InferenceEngine::IStreamsExecutor* streamExecutor = nullptr;
|
||||||
|
|
||||||
InferenceEngine::Blob::Ptr createInputBlob(const InferenceEngine::TensorDesc& desc, uint8_t* mem_ptr = nullptr);
|
void prepare_input(const cldnn::primitive_id &inputName, InferenceEngine::Blob::Ptr &inputBlob,
|
||||||
InferenceEngine::Blob::Ptr createOutputBlob(const InferenceEngine::TensorDesc& desc, uint8_t* mem_ptr = nullptr);
|
std::vector<cldnn::event::ptr>& dependencies);
|
||||||
void copyOutputData(cldnn::memory::ptr outputMemory, InferenceEngine::Blob::Ptr bptr, buf_info* bi = nullptr);
|
void prepare_output(const cldnn::primitive_id& outputName, InferenceEngine::Blob::Ptr& outputBlob);
|
||||||
void copyInputData(std::shared_ptr<cldnn::network> network, const cldnn::primitive_id &inputName,
|
|
||||||
const cldnn::layout& inputLayout, const InferenceEngine::Blob &inputBlob,
|
|
||||||
buf_info* bi = nullptr);
|
|
||||||
|
|
||||||
void input_attach(cldnn::primitive_id name, cldnn::memory::ptr inputMem);
|
InferenceEngine::Blob::Ptr create_input_host_blob(const InferenceEngine::TensorDesc& desc, uint8_t* mem_ptr = nullptr);
|
||||||
void input_alloc(cldnn::primitive_id name, const cldnn::layout& layout);
|
InferenceEngine::Blob::Ptr create_output_host_blob(const InferenceEngine::TensorDesc& desc, uint8_t* mem_ptr = nullptr);
|
||||||
void AllocateInputs();
|
InferenceEngine::Blob::Ptr create_device_blob(const InferenceEngine::TensorDesc& desc, const cldnn::layout& layout);
|
||||||
void AllocateOutputs();
|
|
||||||
void AllocateInputsDyn();
|
|
||||||
void AllocateOutputsDyn();
|
|
||||||
void execAndParse();
|
|
||||||
void execAndParseDyn();
|
|
||||||
|
|
||||||
void PrepareInput(const cldnn::primitive_id &inputName, const InferenceEngine::Blob &inputBlob);
|
void copy_output_data(cldnn::memory::ptr outputMemory, InferenceEngine::Blob::Ptr bptr, buf_info* bi = nullptr);
|
||||||
void PrepareInputDyn(const cldnn::primitive_id &inputName, const InferenceEngine::Blob &inputBlob);
|
void copy_input_data(std::shared_ptr<cldnn::network> network, const cldnn::primitive_id &inputName,
|
||||||
|
const cldnn::layout& inputLayout, const InferenceEngine::Blob &inputBlob,
|
||||||
|
buf_info* bi = nullptr);
|
||||||
|
|
||||||
|
void allocate_inputs();
|
||||||
|
void allocate_outputs();
|
||||||
|
void allocate_inputs_dynamic();
|
||||||
|
void allocate_outputs_dynamic();
|
||||||
|
void exec_and_parse(const std::vector<cldnn::event::ptr>& dependencies);
|
||||||
|
void exec_and_parse_dynamic();
|
||||||
};
|
};
|
||||||
|
|
||||||
}; // namespace CLDNNPlugin
|
}; // namespace CLDNNPlugin
|
||||||
|
@ -204,6 +204,7 @@ REGISTER_FACTORY(v5, Loop);
|
|||||||
// ------------------------------ Supported v6 ops ------------------------------ //
|
// ------------------------------ Supported v6 ops ------------------------------ //
|
||||||
REGISTER_FACTORY(v6, CTCGreedyDecoderSeqLen);
|
REGISTER_FACTORY(v6, CTCGreedyDecoderSeqLen);
|
||||||
REGISTER_FACTORY(v6, MVN);
|
REGISTER_FACTORY(v6, MVN);
|
||||||
|
REGISTER_FACTORY(v6, GatherElements);
|
||||||
|
|
||||||
// ------------------------------ Supported v7 ops ------------------------------ //
|
// ------------------------------ Supported v7 ops ------------------------------ //
|
||||||
REGISTER_FACTORY(v7, Gather);
|
REGISTER_FACTORY(v7, Gather);
|
||||||
|
@ -6,7 +6,6 @@
|
|||||||
#include "ngraph/ops.hpp"
|
#include "ngraph/ops.hpp"
|
||||||
#include "ngraph_ops/nms_ie_internal.hpp"
|
#include "ngraph_ops/nms_ie_internal.hpp"
|
||||||
#include "cldnn_itt.h"
|
#include "cldnn_itt.h"
|
||||||
#include "cldnn/runtime/debug_configuration.hpp"
|
|
||||||
|
|
||||||
using namespace InferenceEngine;
|
using namespace InferenceEngine;
|
||||||
using namespace InferenceEngine::details;
|
using namespace InferenceEngine::details;
|
||||||
@ -178,16 +177,11 @@ std::shared_ptr<cldnn::program> Program::BuildProgram(const std::vector<std::sha
|
|||||||
bool createTopologyOnly) {
|
bool createTopologyOnly) {
|
||||||
OV_ITT_SCOPED_TASK(itt::domains::CLDNNPlugin, "Program::BuildProgram");
|
OV_ITT_SCOPED_TASK(itt::domains::CLDNNPlugin, "Program::BuildProgram");
|
||||||
cldnn::build_options options;
|
cldnn::build_options options;
|
||||||
GPU_DEBUG_GET_INSTANCE(debug_config);
|
|
||||||
|
|
||||||
if (!m_config.graph_dumps_dir.empty()) {
|
if (!m_config.graph_dumps_dir.empty()) {
|
||||||
options.set_option(cldnn::build_option::graph_dumps_dir(m_config.graph_dumps_dir));
|
options.set_option(cldnn::build_option::graph_dumps_dir(m_config.graph_dumps_dir));
|
||||||
}
|
}
|
||||||
|
|
||||||
GPU_DEBUG_IF(!debug_config->dump_graphs.empty()) {
|
|
||||||
options.set_option(cldnn::build_option::graph_dumps_dir(debug_config->dump_graphs));
|
|
||||||
}
|
|
||||||
|
|
||||||
options.set_option(cldnn::build_option::optimize_data(true));
|
options.set_option(cldnn::build_option::optimize_data(true));
|
||||||
options.set_option(cldnn::build_option::tuning_config(m_config.tuningConfig));
|
options.set_option(cldnn::build_option::tuning_config(m_config.tuningConfig));
|
||||||
|
|
||||||
@ -199,7 +193,7 @@ std::shared_ptr<cldnn::program> Program::BuildProgram(const std::vector<std::sha
|
|||||||
return {};
|
return {};
|
||||||
} else {
|
} else {
|
||||||
OV_ITT_SCOPED_TASK(itt::domains::CLDNNPlugin, "Program::CreateProgram");
|
OV_ITT_SCOPED_TASK(itt::domains::CLDNNPlugin, "Program::CreateProgram");
|
||||||
auto program = std::make_shared<cldnn::program>(*m_engine, *m_topology, options);
|
auto program = cldnn::program::build_program(*m_engine, *m_topology, options);
|
||||||
CleanupBuild();
|
CleanupBuild();
|
||||||
|
|
||||||
return program;
|
return program;
|
||||||
|
@ -81,22 +81,13 @@ bool CLDNNRemoteBlobImpl::is_locked() const noexcept {
|
|||||||
return lockedHolder != nullptr;
|
return lockedHolder != nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
void CLDNNRemoteBlobImpl::allocate_if_needed() {
|
|
||||||
OV_ITT_SCOPED_TASK(itt::domains::CLDNNPlugin, "CLDNNRemoteBlobImpl::AllocateIfNeeded");
|
|
||||||
auto _impl = getContextImpl(m_context.lock());
|
|
||||||
_impl->acquire_lock();
|
|
||||||
|
|
||||||
if (m_memObject == nullptr) {
|
|
||||||
allocate();
|
|
||||||
}
|
|
||||||
|
|
||||||
_impl->release_lock();
|
|
||||||
}
|
|
||||||
|
|
||||||
void CLDNNRemoteBlobImpl::allocate() noexcept {
|
void CLDNNRemoteBlobImpl::allocate() noexcept {
|
||||||
|
OV_ITT_SCOPED_TASK(itt::domains::CLDNNPlugin, "CLDNNRemoteBlobImpl::Allocate");
|
||||||
assert(m_memObject == nullptr);
|
assert(m_memObject == nullptr);
|
||||||
|
|
||||||
std::shared_ptr<cldnn::engine> eng = getContextImpl(m_context.lock())->GetEngine();
|
auto _impl = getContextImpl(m_context.lock());
|
||||||
|
_impl->acquire_lock();
|
||||||
|
std::shared_ptr<cldnn::engine> eng = _impl->GetEngine();
|
||||||
|
|
||||||
switch (m_mem_type) {
|
switch (m_mem_type) {
|
||||||
case BlobType::BT_BUF_INTERNAL: {
|
case BlobType::BT_BUF_INTERNAL: {
|
||||||
@ -129,6 +120,7 @@ void CLDNNRemoteBlobImpl::allocate() noexcept {
|
|||||||
default:
|
default:
|
||||||
m_memObject.reset();
|
m_memObject.reset();
|
||||||
}
|
}
|
||||||
|
_impl->release_lock();
|
||||||
}
|
}
|
||||||
|
|
||||||
const std::shared_ptr<IAllocator>& CLDNNRemoteBlobImpl::getAllocator() const noexcept {
|
const std::shared_ptr<IAllocator>& CLDNNRemoteBlobImpl::getAllocator() const noexcept {
|
||||||
@ -154,7 +146,7 @@ void CLDNNRemoteBlobImpl::lock() const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void CLDNNRemoteBlobImpl::unlock() const {
|
void CLDNNRemoteBlobImpl::unlock() const {
|
||||||
lockedHolder.release();
|
lockedHolder.reset();
|
||||||
}
|
}
|
||||||
|
|
||||||
LockedMemory<void> CLDNNRemoteBlobImpl::buffer() noexcept {
|
LockedMemory<void> CLDNNRemoteBlobImpl::buffer() noexcept {
|
||||||
|
@ -44,8 +44,8 @@ public:
|
|||||||
explicit CLDNNRemoteBlobImpl(InferenceEngine::gpu::ClContext::Ptr context,
|
explicit CLDNNRemoteBlobImpl(InferenceEngine::gpu::ClContext::Ptr context,
|
||||||
cldnn::stream& stream,
|
cldnn::stream& stream,
|
||||||
const cldnn::layout& layout,
|
const cldnn::layout& layout,
|
||||||
cldnn::shared_handle mem,
|
cldnn::shared_handle mem = nullptr,
|
||||||
cldnn::shared_surface surf,
|
cldnn::shared_surface surf = 0,
|
||||||
uint32_t plane = 0,
|
uint32_t plane = 0,
|
||||||
BlobType mem_type = BT_BUF_INTERNAL);
|
BlobType mem_type = BT_BUF_INTERNAL);
|
||||||
|
|
||||||
@ -64,7 +64,6 @@ public:
|
|||||||
|
|
||||||
bool is_allocated() const noexcept;
|
bool is_allocated() const noexcept;
|
||||||
bool is_locked() const noexcept;
|
bool is_locked() const noexcept;
|
||||||
void allocate_if_needed();
|
|
||||||
cldnn::memory::ptr getMemory() { return m_memObject; }
|
cldnn::memory::ptr getMemory() { return m_memObject; }
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
@ -99,10 +98,10 @@ public:
|
|||||||
cldnn::stream& stream,
|
cldnn::stream& stream,
|
||||||
const InferenceEngine::TensorDesc& desc,
|
const InferenceEngine::TensorDesc& desc,
|
||||||
const cldnn::layout& layout,
|
const cldnn::layout& layout,
|
||||||
cldnn::shared_handle mem,
|
cldnn::shared_handle mem = nullptr,
|
||||||
cldnn::shared_surface surf,
|
cldnn::shared_surface surf = 0,
|
||||||
uint32_t plane,
|
uint32_t plane = 0,
|
||||||
CLDNNRemoteBlobImpl::BlobType mem_type)
|
CLDNNRemoteBlobImpl::BlobType mem_type = CLDNNRemoteBlobImpl::BlobType::BT_BUF_INTERNAL)
|
||||||
: _impl(context, stream, layout, mem, surf, plane, mem_type)
|
: _impl(context, stream, layout, mem, surf, plane, mem_type)
|
||||||
, TpublicAPI(desc) {}
|
, TpublicAPI(desc) {}
|
||||||
|
|
||||||
@ -184,7 +183,7 @@ public:
|
|||||||
* @brief Maps handle to heap memory accessible by any memory manipulation routines.
|
* @brief Maps handle to heap memory accessible by any memory manipulation routines.
|
||||||
* @return Generic pointer to memory
|
* @return Generic pointer to memory
|
||||||
*/
|
*/
|
||||||
void* lock(void* handle, InferenceEngine::LockOp = InferenceEngine::LOCK_FOR_WRITE) noexcept override { return nullptr; };
|
void* lock(void* handle, InferenceEngine::LockOp = InferenceEngine::LOCK_FOR_WRITE) noexcept override { return handle; };
|
||||||
/**
|
/**
|
||||||
* @brief Unmaps memory by handle with multiple sequential mappings of the same handle.
|
* @brief Unmaps memory by handle with multiple sequential mappings of the same handle.
|
||||||
* The multiple sequential mappings of the same handle are suppose to get the same
|
* The multiple sequential mappings of the same handle are suppose to get the same
|
||||||
|
@ -12,14 +12,14 @@
|
|||||||
namespace CLDNNPlugin {
|
namespace CLDNNPlugin {
|
||||||
|
|
||||||
static cldnn::concatenation::concatenation_axis GetConcatAxis(int32_t axis, size_t rank) {
|
static cldnn::concatenation::concatenation_axis GetConcatAxis(int32_t axis, size_t rank) {
|
||||||
if (axis >= rank)
|
unsigned cldnn_axis = axis >= 0 ? axis : axis + static_cast<int32_t>(rank);
|
||||||
|
if (cldnn_axis >= rank)
|
||||||
IE_THROW() << "Concatenation axis exceeds number of dimensions";
|
IE_THROW() << "Concatenation axis exceeds number of dimensions";
|
||||||
|
|
||||||
// Difference in dimension ordering between IE and clDNN,
|
// Difference in dimension ordering between IE and clDNN,
|
||||||
// reverse spatial dimensions after batch and feature.
|
// reverse spatial dimensions after batch and feature.
|
||||||
unsigned cldnn_axis = axis;
|
if (cldnn_axis >= 2) {
|
||||||
if (axis >= 2) {
|
auto spatial_axis = cldnn_axis - 2;
|
||||||
auto spatial_axis = axis - 2;
|
|
||||||
// Default and minimum number of dimensions is 4
|
// Default and minimum number of dimensions is 4
|
||||||
auto spatial_size = std::max<size_t>(rank, 4) - 2;
|
auto spatial_size = std::max<size_t>(rank, 4) - 2;
|
||||||
cldnn_axis = spatial_size - spatial_axis - 1 + 2;
|
cldnn_axis = spatial_size - spatial_axis - 1 + 2;
|
||||||
|
66
inference-engine/src/cldnn_engine/ops/gather_elements.cpp
Normal file
66
inference-engine/src/cldnn_engine/ops/gather_elements.cpp
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
// Copyright (C) 2021 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
#include "cldnn_program.h"
|
||||||
|
#include "cldnn_common_utils.h"
|
||||||
|
|
||||||
|
#include "ngraph/op/gather_elements.hpp"
|
||||||
|
#include "ngraph/op/constant.hpp"
|
||||||
|
|
||||||
|
#include "cldnn/primitives/gather_elements.hpp"
|
||||||
|
|
||||||
|
namespace CLDNNPlugin {
|
||||||
|
|
||||||
|
static cldnn::gather_elements::gather_elements_axis GetGatherAxis(int axis, unsigned rank) {
|
||||||
|
if (axis < 0)
|
||||||
|
axis += rank;
|
||||||
|
if (axis < 0 || axis >= rank)
|
||||||
|
IE_THROW() << "GatherElements axis is not correspond to number of dimensions";
|
||||||
|
|
||||||
|
// Difference in dimension ordering between IE and clDNN,
|
||||||
|
// reverse spatial dimensions after batch and feature.
|
||||||
|
unsigned cldnn_axis = axis;
|
||||||
|
if (axis >= 2) {
|
||||||
|
auto spatial_axis = axis - 2;
|
||||||
|
// Default and minimum number of dimensions is 4
|
||||||
|
auto spatial_size = std::max(rank, 4u) - 2;
|
||||||
|
cldnn_axis = spatial_size - spatial_axis - 1 + 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (cldnn_axis) {
|
||||||
|
case 0: return cldnn::gather_elements::gather_elements_axis::along_b;
|
||||||
|
case 1: return cldnn::gather_elements::gather_elements_axis::along_f;
|
||||||
|
case 2: return cldnn::gather_elements::gather_elements_axis::along_x;
|
||||||
|
case 3: return cldnn::gather_elements::gather_elements_axis::along_y;
|
||||||
|
case 4: return cldnn::gather_elements::gather_elements_axis::along_z;
|
||||||
|
case 5: return cldnn::gather_elements::gather_elements_axis::along_w;
|
||||||
|
default: IE_THROW() << "Unsupported GatherElements axis: " << axis;
|
||||||
|
}
|
||||||
|
return cldnn::gather_elements::gather_elements_axis::along_f; // shouldn't get here
|
||||||
|
}
|
||||||
|
|
||||||
|
void CreateGatherElementsOp(Program& p, const std::shared_ptr<ngraph::op::v6::GatherElements>& op) {
|
||||||
|
p.ValidateInputs(op, {2});
|
||||||
|
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
|
||||||
|
std::string layerName = layer_type_name_ID(op);
|
||||||
|
|
||||||
|
size_t rank = op->get_input_shape(0).size();
|
||||||
|
int32_t axis = static_cast<int32_t>(op->get_axis());
|
||||||
|
|
||||||
|
auto outLayout = DefaultFormatForDims(op->get_output_shape(0).size());
|
||||||
|
|
||||||
|
auto primitive = cldnn::gather_elements(layerName,
|
||||||
|
inputPrimitives[0],
|
||||||
|
inputPrimitives[1],
|
||||||
|
outLayout,
|
||||||
|
CldnnTensorFromIEDims(op->get_output_shape(0)),
|
||||||
|
GetGatherAxis(axis, rank));
|
||||||
|
|
||||||
|
p.AddPrimitive(primitive);
|
||||||
|
p.AddPrimitiveToProfiler(op);
|
||||||
|
}
|
||||||
|
|
||||||
|
REGISTER_FACTORY_IMPL(v6, GatherElements);
|
||||||
|
|
||||||
|
} // namespace CLDNNPlugin
|
@ -58,7 +58,8 @@ void AdvanceCnnOperationIfAllApplied(const std::vector<intel_dnn_component_t>& c
|
|||||||
|
|
||||||
template <class T>
|
template <class T>
|
||||||
void AdvancePwlOperationIfAllApplied(const std::vector<intel_dnn_component_t>& component, int i, T*& operation) {
|
void AdvancePwlOperationIfAllApplied(const std::vector<intel_dnn_component_t>& component, int i, T*& operation) {
|
||||||
if (i == component.size() - 1 || (component[i + 1].operation != kDnnMaxPoolOp)) {
|
if (i == component.size() - 1 || ((component[i + 1].operation != kDnnMaxPoolOp)
|
||||||
|
&& (component[i + 1].operation != kDnnPiecewiselinearOp))) {
|
||||||
operation++;
|
operation++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -227,7 +227,7 @@ OvGnaType OvGnaTypeIntFromBytes(T bytesPerElement) {
|
|||||||
return r->second;
|
return r->second;
|
||||||
}
|
}
|
||||||
|
|
||||||
static std::string OvGnaTypeToString(OvGnaType type) {
|
inline std::string OvGnaTypeToString(OvGnaType type) {
|
||||||
static const std::map<OvGnaType, std::string> typeToString = {
|
static const std::map<OvGnaType, std::string> typeToString = {
|
||||||
{OvGnaTypeInt8, "OvGnaTypeInt8"},
|
{OvGnaTypeInt8, "OvGnaTypeInt8"},
|
||||||
{OvGnaTypeInt16, "OvGnaTypeInt16"},
|
{OvGnaTypeInt16, "OvGnaTypeInt16"},
|
||||||
@ -241,7 +241,7 @@ static std::string OvGnaTypeToString(OvGnaType type) {
|
|||||||
return r->second;
|
return r->second;
|
||||||
}
|
}
|
||||||
|
|
||||||
static std::string OvGnaModeToString(OvGnaMode mode) {
|
inline std::string OvGnaModeToString(OvGnaMode mode) {
|
||||||
static const std::map<OvGnaMode, std::string> modeToString = {
|
static const std::map<OvGnaMode, std::string> modeToString = {
|
||||||
{OvGnaModeDefault, "OvGnaModeDefault"},
|
{OvGnaModeDefault, "OvGnaModeDefault"},
|
||||||
{OvGnaModeDisabled, "OvGnaModeDisabled"},
|
{OvGnaModeDisabled, "OvGnaModeDisabled"},
|
||||||
|
@ -24,6 +24,10 @@ constexpr uint32_t noOfInputsLowPrecDivisor = 16;
|
|||||||
|
|
||||||
constexpr uint32_t affineMaxBatchSize = 8;
|
constexpr uint32_t affineMaxBatchSize = 8;
|
||||||
|
|
||||||
|
constexpr uint32_t maxPoolMaxWindowSize = 6;
|
||||||
|
|
||||||
|
constexpr uint32_t copyMaxGrouping = 8;
|
||||||
|
|
||||||
namespace Cnn2D {
|
namespace Cnn2D {
|
||||||
struct RangeLimit {
|
struct RangeLimit {
|
||||||
uint32_t min;
|
uint32_t min;
|
||||||
@ -87,6 +91,8 @@ class Validator {
|
|||||||
|
|
||||||
static void ThrowIfNotEmpty(const std::string prefix, const std::string error);
|
static void ThrowIfNotEmpty(const std::string prefix, const std::string error);
|
||||||
public:
|
public:
|
||||||
|
Validator() = default;
|
||||||
|
|
||||||
void ValidateCnn2D(std::string name, const uint32_t inHeight, const uint32_t inWidth,
|
void ValidateCnn2D(std::string name, const uint32_t inHeight, const uint32_t inWidth,
|
||||||
const uint32_t inChannels, const uint32_t kH, const uint32_t kW, const uint32_t kN,
|
const uint32_t inChannels, const uint32_t kH, const uint32_t kW, const uint32_t kN,
|
||||||
const uint32_t strideH, const uint32_t strideW, OvGnaType inPrecision) const;
|
const uint32_t strideH, const uint32_t strideW, OvGnaType inPrecision) const;
|
||||||
|
@ -63,6 +63,7 @@
|
|||||||
#include "transformations/swap_input_matmul_gna.hpp"
|
#include "transformations/swap_input_matmul_gna.hpp"
|
||||||
#include "transformations/convert_matmul_to_pointwise_convolution.hpp"
|
#include "transformations/convert_matmul_to_pointwise_convolution.hpp"
|
||||||
#include "transformations/split_convolution_with_large_buffer_size.hpp"
|
#include "transformations/split_convolution_with_large_buffer_size.hpp"
|
||||||
|
#include "transformations/decompose_2d_conv.hpp"
|
||||||
#include "transformations/convert_padded2valid_conv.hpp"
|
#include "transformations/convert_padded2valid_conv.hpp"
|
||||||
|
|
||||||
#include <ngraph/opsets/opset7.hpp>
|
#include <ngraph/opsets/opset7.hpp>
|
||||||
@ -673,6 +674,11 @@ void GNAPlugin::AddDebugProperties(const InferenceEngine::CNNLayerPtr layer,
|
|||||||
void GNAPlugin::LoadNetwork(CNNNetwork & _network) {
|
void GNAPlugin::LoadNetwork(CNNNetwork & _network) {
|
||||||
OV_ITT_SCOPED_TASK(itt::domains::GNAPlugin, "LoadNetwork");
|
OV_ITT_SCOPED_TASK(itt::domains::GNAPlugin, "LoadNetwork");
|
||||||
std::shared_ptr<InferenceEngine::details::CNNNetworkImpl> convertedNetwork;
|
std::shared_ptr<InferenceEngine::details::CNNNetworkImpl> convertedNetwork;
|
||||||
|
|
||||||
|
if (!gnaFlags->sw_fp32) {
|
||||||
|
InitGNADevice();
|
||||||
|
}
|
||||||
|
|
||||||
if (_network.getFunction()) {
|
if (_network.getFunction()) {
|
||||||
CNNNetwork clonedNetwork = InferenceEngine::cloneNetwork(_network);
|
CNNNetwork clonedNetwork = InferenceEngine::cloneNetwork(_network);
|
||||||
const auto& graph = clonedNetwork.getFunction();
|
const auto& graph = clonedNetwork.getFunction();
|
||||||
@ -682,6 +688,11 @@ void GNAPlugin::LoadNetwork(CNNNetwork & _network) {
|
|||||||
manager.register_pass<ngraph::pass::ConvertPriorBox>();
|
manager.register_pass<ngraph::pass::ConvertPriorBox>();
|
||||||
manager.register_pass<ngraph::pass::CommonOptimizations>();
|
manager.register_pass<ngraph::pass::CommonOptimizations>();
|
||||||
manager.register_pass<ConvertPadded2ValidConv>();
|
manager.register_pass<ConvertPadded2ValidConv>();
|
||||||
|
if (config.gnaCompileTarget == InferenceEngine::GNAConfigParams::GNA_TARGET_2_0) {
|
||||||
|
manager.register_pass<Decompose2DConvTransposedWithBiasAF>();
|
||||||
|
manager.register_pass<Decompose2DConvTransposedWithBias>();
|
||||||
|
manager.register_pass<Decompose2DConv>();
|
||||||
|
}
|
||||||
// TODO enable this transformation for networks with convolutions
|
// TODO enable this transformation for networks with convolutions
|
||||||
if (!ngraph::op::util::has_op_with_type<ngraph::opset7::Convolution>(graph)) {
|
if (!ngraph::op::util::has_op_with_type<ngraph::opset7::Convolution>(graph)) {
|
||||||
manager.register_pass<ConvertMatmulWithFqToPointWiseConvolution>();
|
manager.register_pass<ConvertMatmulWithFqToPointWiseConvolution>();
|
||||||
@ -870,15 +881,16 @@ void GNAPlugin::LoadNetwork(CNNNetwork & _network) {
|
|||||||
// fill in extra storage with memory layers
|
// fill in extra storage with memory layers
|
||||||
graphCompiler.fillMemoryConnections(memoryPairs);
|
graphCompiler.fillMemoryConnections(memoryPairs);
|
||||||
|
|
||||||
if (!graphCompiler.memory_connection.empty()) {
|
if (!graphCompiler.memory_connection.empty() && gnaFlags->gna_lib_async_threads_num != 1) {
|
||||||
|
// TODO: check if updating the number of threads is needed for sw_fp32
|
||||||
gnaFlags->gna_lib_async_threads_num = 1;
|
gnaFlags->gna_lib_async_threads_num = 1;
|
||||||
|
if (!gnaFlags->sw_fp32)
|
||||||
|
InitGNADevice();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (gnaFlags->sw_fp32) {
|
if (gnaFlags->sw_fp32) {
|
||||||
gnamem.reset(new gna_memory_type(memory::make_polymorph<std::allocator<uint8_t>>()));
|
gnamem.reset(new gna_memory_type(memory::make_polymorph<std::allocator<uint8_t>>()));
|
||||||
graphCompiler.setGNAMemoryPtr(gnamem);
|
graphCompiler.setGNAMemoryPtr(gnamem);
|
||||||
} else {
|
|
||||||
InitGNADevice();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// keep inputs information and create input primitives
|
// keep inputs information and create input primitives
|
||||||
|
@ -90,8 +90,8 @@ void Config::UpdateFromMap(const std::map<std::string, std::string>& config) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
auto scale_factor = InferenceEngine::CNNLayer::ie_parse_float(value);
|
auto scale_factor = InferenceEngine::CNNLayer::ie_parse_float(value);
|
||||||
if (fp32eq(scale_factor, 0.0f)) {
|
if (fp32eq(scale_factor, 0.0f) || std::isinf(scale_factor)) {
|
||||||
THROW_GNA_EXCEPTION << "input scale factor of 0.0f not supported";
|
THROW_GNA_EXCEPTION << "input scale factor of 0.0f or +-inf not supported";
|
||||||
}
|
}
|
||||||
// missing scale factors are set to be 1.0f
|
// missing scale factors are set to be 1.0f
|
||||||
if (inputScaleFactors.size() <= input_index) {
|
if (inputScaleFactors.size() <= input_index) {
|
||||||
|
@ -2173,7 +2173,7 @@ void MoveFakeQuantizeLayerIntoQuantParamsPass :: run() {
|
|||||||
// Find all output layers connected to FQ
|
// Find all output layers connected to FQ
|
||||||
auto nextLayers = CNNNetGetAllNextLayersSkipCertain(*fqLayer, -1, donotSkip);
|
auto nextLayers = CNNNetGetAllNextLayersSkipCertain(*fqLayer, -1, donotSkip);
|
||||||
if (nextLayers.empty()) {
|
if (nextLayers.empty()) {
|
||||||
return;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (isFQFuseAllowed) {
|
if (isFQFuseAllowed) {
|
||||||
|
@ -14,72 +14,26 @@
|
|||||||
#include <ngraph/rt_info.hpp>
|
#include <ngraph/rt_info.hpp>
|
||||||
#include <ngraph/pass/manager.hpp>
|
#include <ngraph/pass/manager.hpp>
|
||||||
#include <ie_common.h>
|
#include <ie_common.h>
|
||||||
|
#include "utils/transformation_helper.hpp"
|
||||||
|
|
||||||
|
|
||||||
using namespace GNAPluginNS;
|
using namespace GNAPluginNS;
|
||||||
|
|
||||||
NGRAPH_RTTI_DEFINITION(ConvertPadded2ValidConv, "ConvertPadded2ValidConv", 0);
|
NGRAPH_RTTI_DEFINITION(ConvertPadded2ValidConv, "ConvertPadded2ValidConv", 0);
|
||||||
|
|
||||||
struct ConvData {
|
static bool VerifyAndGetConvData(std::shared_ptr<ngraph::opset7::Convolution> conv, ConvData& conv_data) {
|
||||||
size_t input_height;
|
|
||||||
size_t input_width;
|
|
||||||
size_t input_channel_count;
|
|
||||||
size_t filter_count;
|
|
||||||
size_t pads_begin_width;
|
|
||||||
size_t pads_begin_height;
|
|
||||||
size_t pads_end_width;
|
|
||||||
size_t pads_end_height;
|
|
||||||
ngraph::op::PadType padding_type;
|
|
||||||
ngraph::element::Type element_type;
|
|
||||||
};
|
|
||||||
|
|
||||||
static bool VerifyAndGetConvParams(std::shared_ptr<ngraph::opset7::Convolution> conv, ConvData& conv_data) {
|
|
||||||
const auto& input = conv->input_value(0);
|
const auto& input = conv->input_value(0);
|
||||||
|
|
||||||
// We support only 2D conv batch 1
|
// We support only batch 1
|
||||||
if (conv->get_dilations().size() != 2 ||
|
if (input.get_shape()[0] != 1) {
|
||||||
conv->get_strides().size() != 2 ||
|
|
||||||
input.get_shape()[0] != 1) {
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
conv_data.padding_type = conv->get_auto_pad();
|
GetConvData(conv, conv_data);
|
||||||
conv_data.input_channel_count = conv->input_value(0).get_shape()[1];
|
|
||||||
conv_data.input_height = conv->input_value(0).get_shape()[2];
|
|
||||||
conv_data.input_width = conv->input_value(0).get_shape()[3];
|
|
||||||
conv_data.filter_count = conv->input_value(1).get_shape()[0];
|
|
||||||
conv_data.pads_begin_height = conv->get_pads_begin()[0];
|
|
||||||
conv_data.pads_begin_width = conv->get_pads_begin()[1];
|
|
||||||
conv_data.pads_end_height = conv->get_pads_end()[0];
|
|
||||||
conv_data.pads_end_width = conv->get_pads_end()[1];
|
|
||||||
conv_data.element_type = conv->get_element_type();
|
|
||||||
|
|
||||||
return conv_data.pads_begin_height || conv_data.pads_end_height || conv_data.pads_begin_width || conv_data.pads_end_width;
|
return conv_data.pads_begin_height || conv_data.pads_end_height || conv_data.pads_begin_width || conv_data.pads_end_width;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool TransposeOrderMatches(std::shared_ptr<ngraph::opset7::Transpose> transpose, std::vector<size_t> order) {
|
|
||||||
if (!transpose)
|
|
||||||
return false;
|
|
||||||
const ngraph::Output<ngraph::Node>& transpose_order = transpose->input_value(1);
|
|
||||||
auto transpose_order_dim = transpose_order.get_shape().size();
|
|
||||||
|
|
||||||
if (transpose_order_dim != 1 || transpose_order.get_shape()[0] != order.size())
|
|
||||||
return false;
|
|
||||||
|
|
||||||
auto const_with_order_values = std::dynamic_pointer_cast<ngraph::opset7::Constant>(transpose_order.get_node_shared_ptr());
|
|
||||||
if (!const_with_order_values)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
const auto data = const_with_order_values->cast_vector<size_t>();
|
|
||||||
if (data.empty())
|
|
||||||
return false;
|
|
||||||
|
|
||||||
if (!std::equal(order.begin(), order.end(), data.begin()))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool VerifyBias(std::shared_ptr<ngraph::opset7::Add> bias, const size_t& filter_count) {
|
static bool VerifyBias(std::shared_ptr<ngraph::opset7::Add> bias, const size_t& filter_count) {
|
||||||
auto add_const = std::dynamic_pointer_cast<ngraph::opset7::Constant>(bias->input_value(0).get_node_shared_ptr());
|
auto add_const = std::dynamic_pointer_cast<ngraph::opset7::Constant>(bias->input_value(0).get_node_shared_ptr());
|
||||||
|
|
||||||
@ -91,16 +45,6 @@ static bool VerifyBias(std::shared_ptr<ngraph::opset7::Add> bias, const size_t&
|
|||||||
return (add_const && shape_size(add_const->get_shape()) == filter_count);
|
return (add_const && shape_size(add_const->get_shape()) == filter_count);
|
||||||
}
|
}
|
||||||
|
|
||||||
static std::shared_ptr<ngraph::opset7::StridedSlice> FlatCrop(ngraph::Output<ngraph::Node> input, size_t offset, size_t size) {
|
|
||||||
return std::make_shared<ngraph::opset7::StridedSlice>(
|
|
||||||
input, // data
|
|
||||||
ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {(size_t)0, offset}), // begin sice index
|
|
||||||
ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {(size_t)0, offset + size}), // end slice index
|
|
||||||
ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {(size_t)1, (size_t)1}), // strides
|
|
||||||
std::vector<int64_t>{1, 0}, // begin mask
|
|
||||||
std::vector<int64_t>{1, 0}); // end mask
|
|
||||||
}
|
|
||||||
|
|
||||||
static void InsertPadding(ngraph::OutputVector& input_rows_to_concat, size_t size, const std::shared_ptr<ngraph::opset7::Convolution>& conv,
|
static void InsertPadding(ngraph::OutputVector& input_rows_to_concat, size_t size, const std::shared_ptr<ngraph::opset7::Convolution>& conv,
|
||||||
const std::shared_ptr<ngraph::opset7::Constant> padding_const, size_t biggest_padding) {
|
const std::shared_ptr<ngraph::opset7::Constant> padding_const, size_t biggest_padding) {
|
||||||
|
|
||||||
@ -226,7 +170,7 @@ static bool Convert(std::shared_ptr<ngraph::Node> leading_transpose,
|
|||||||
|
|
||||||
ConvData conv_data;
|
ConvData conv_data;
|
||||||
|
|
||||||
if (!VerifyAndGetConvParams(std::dynamic_pointer_cast<ngraph::opset7::Convolution>(conv), conv_data))
|
if (!VerifyAndGetConvData(std::dynamic_pointer_cast<ngraph::opset7::Convolution>(conv), conv_data))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
// We are looking for Transpose(NHWC->NCHW) => Conv => Transpose(NCHW->NHWC)
|
// We are looking for Transpose(NHWC->NCHW) => Conv => Transpose(NCHW->NHWC)
|
||||||
@ -246,7 +190,7 @@ static bool Convert(std::shared_ptr<ngraph::Node> leading_transpose,
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::function<bool(ngraph::Output<ngraph::Node>)> consumers_and_rank(const size_t expected_count, const ngraph::Dimension& expected_rank) {
|
static std::function<bool(ngraph::Output<ngraph::Node>)> consumers_and_rank(const size_t expected_count, const ngraph::Dimension& expected_rank) {
|
||||||
return [=](ngraph::Output<ngraph::Node> output) -> bool {
|
return [=](ngraph::Output<ngraph::Node> output) -> bool {
|
||||||
return ngraph::pattern::consumers_count(expected_count) && ngraph::pattern::rank_equals(expected_rank);
|
return ngraph::pattern::consumers_count(expected_count) && ngraph::pattern::rank_equals(expected_rank);
|
||||||
};
|
};
|
||||||
@ -263,34 +207,35 @@ ConvertPadded2ValidConv::ConvertPadded2ValidConv() {
|
|||||||
ngraph::pattern::consumers_count(1));
|
ngraph::pattern::consumers_count(1));
|
||||||
auto bias = ngraph::pattern::wrap_type<ngraph::opset7::Add>({conv, const_input},
|
auto bias = ngraph::pattern::wrap_type<ngraph::opset7::Add>({conv, const_input},
|
||||||
ngraph::pattern::consumers_count(1));
|
ngraph::pattern::consumers_count(1));
|
||||||
auto fq = ngraph::pattern::wrap_type<ngraph::opset7::FakeQuantize>({bias, const_input, const_input, const_input, const_input},
|
auto fq_bias = ngraph::pattern::wrap_type<ngraph::opset7::FakeQuantize>({bias, const_input, const_input, const_input, const_input},
|
||||||
ngraph::pattern::consumers_count(1));
|
ngraph::pattern::consumers_count(1));
|
||||||
auto max_pool1 = ngraph::pattern::wrap_type<ngraph::opset7::MaxPool>({bias},
|
auto max_pool1 = ngraph::pattern::wrap_type<ngraph::opset7::MaxPool>({bias},
|
||||||
ngraph::pattern::consumers_count(1));
|
ngraph::pattern::consumers_count(1));
|
||||||
auto max_pool2 = ngraph::pattern::wrap_type<ngraph::opset7::MaxPool>({fq},
|
auto max_pool2 = ngraph::pattern::wrap_type<ngraph::opset7::MaxPool>({fq_bias},
|
||||||
ngraph::pattern::consumers_count(1));
|
ngraph::pattern::consumers_count(1));
|
||||||
auto af1 = ngraph::pattern::wrap_type<ngraph::opset7::Relu, ngraph::opset7::Sigmoid,
|
auto af1 = ngraph::pattern::wrap_type<ngraph::opset7::Relu, ngraph::opset7::Sigmoid,
|
||||||
ngraph::opset7::Tanh, ngraph::opset7::Abs, ngraph::opset7::Log, ngraph::opset7::Exp,
|
ngraph::opset7::Tanh, ngraph::opset7::Abs, ngraph::opset7::Log, ngraph::opset7::Exp,
|
||||||
ngraph::opset7::Sign, ngraph::opset7::Clamp>({bias}, ngraph::pattern::consumers_count(1));
|
ngraph::opset7::Sign, ngraph::opset7::Clamp>({bias}, ngraph::pattern::consumers_count(1));
|
||||||
auto af2 = ngraph::pattern::wrap_type<ngraph::opset7::Relu, ngraph::opset7::Sigmoid,
|
auto af2 = ngraph::pattern::wrap_type<ngraph::opset7::Relu, ngraph::opset7::Sigmoid,
|
||||||
ngraph::opset7::Tanh, ngraph::opset7::Abs, ngraph::opset7::Log, ngraph::opset7::Exp,
|
ngraph::opset7::Tanh, ngraph::opset7::Abs, ngraph::opset7::Log, ngraph::opset7::Exp,
|
||||||
ngraph::opset7::Sign, ngraph::opset7::Clamp>({fq}, ngraph::pattern::consumers_count(1));
|
ngraph::opset7::Sign, ngraph::opset7::Clamp>({fq_bias}, ngraph::pattern::consumers_count(1));
|
||||||
auto af3 = ngraph::pattern::wrap_type<ngraph::opset7::Relu, ngraph::opset7::Sigmoid,
|
auto af3 = ngraph::pattern::wrap_type<ngraph::opset7::Relu, ngraph::opset7::Sigmoid,
|
||||||
ngraph::opset7::Tanh, ngraph::opset7::Abs, ngraph::opset7::Log, ngraph::opset7::Exp,
|
ngraph::opset7::Tanh, ngraph::opset7::Abs, ngraph::opset7::Log, ngraph::opset7::Exp,
|
||||||
ngraph::opset7::Sign, ngraph::opset7::Clamp>({max_pool1}, ngraph::pattern::consumers_count(1));
|
ngraph::opset7::Sign, ngraph::opset7::Clamp>({max_pool1}, ngraph::pattern::consumers_count(1));
|
||||||
auto af4 = ngraph::pattern::wrap_type<ngraph::opset7::Relu, ngraph::opset7::Sigmoid,
|
auto af4 = ngraph::pattern::wrap_type<ngraph::opset7::Relu, ngraph::opset7::Sigmoid,
|
||||||
ngraph::opset7::Tanh, ngraph::opset7::Abs, ngraph::opset7::Log, ngraph::opset7::Exp,
|
ngraph::opset7::Tanh, ngraph::opset7::Abs, ngraph::opset7::Log, ngraph::opset7::Exp,
|
||||||
ngraph::opset7::Sign, ngraph::opset7::Clamp>({max_pool2}, ngraph::pattern::consumers_count(1));
|
ngraph::opset7::Sign, ngraph::opset7::Clamp>({max_pool2}, ngraph::pattern::consumers_count(1));
|
||||||
auto transpose_input = std::make_shared<ngraph::pattern::op::Or>(ngraph::OutputVector{conv, bias, max_pool1, max_pool2, fq, af1, af2, af3, af4});
|
auto fq_af = ngraph::pattern::wrap_type<ngraph::opset7::FakeQuantize>({af4, const_input, const_input, const_input, const_input},
|
||||||
|
ngraph::pattern::consumers_count(1));
|
||||||
|
auto transpose_input =
|
||||||
|
std::make_shared<ngraph::pattern::op::Or>(ngraph::OutputVector{conv, bias, max_pool1, max_pool2, fq_bias, af1, af2, af3, af4, fq_af});
|
||||||
auto trailing_transpose = ngraph::pattern::wrap_type<ngraph::opset7::Transpose>({transpose_input, const_input},
|
auto trailing_transpose = ngraph::pattern::wrap_type<ngraph::opset7::Transpose>({transpose_input, const_input},
|
||||||
consumers_and_rank(1, 4));
|
consumers_and_rank(1, 4));
|
||||||
|
|
||||||
ngraph::matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) {
|
ngraph::matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) {
|
||||||
const auto& pattern_map = m.get_pattern_value_map();
|
const auto& pattern_map = m.get_pattern_value_map();
|
||||||
auto conv_output = conv->output(0).get_node_shared_ptr();
|
auto bias_it = pattern_map.find(bias);
|
||||||
IE_ASSERT(conv_output != nullptr);
|
auto bias_node = (bias_it == std::end(pattern_map) ? nullptr : bias_it->second.get_node_shared_ptr());
|
||||||
|
|
||||||
auto bias_node = std::dynamic_pointer_cast<ngraph::opset7::Add>(conv_output);
|
|
||||||
|
|
||||||
return Convert(pattern_map.at(leading_transpose).get_node_shared_ptr(), pattern_map.at(conv).get_node_shared_ptr(),
|
return Convert(pattern_map.at(leading_transpose).get_node_shared_ptr(), pattern_map.at(conv).get_node_shared_ptr(),
|
||||||
pattern_map.at(trailing_transpose).get_node_shared_ptr(), bias_node);
|
pattern_map.at(trailing_transpose).get_node_shared_ptr(), bias_node);
|
||||||
|
@ -13,11 +13,11 @@ namespace GNAPluginNS {
|
|||||||
* wrapped with transposes, to a valid convolution with padding added before the leading transpose,
|
* wrapped with transposes, to a valid convolution with padding added before the leading transpose,
|
||||||
* POT precessed models are supported (fake quantized layers omitted below for clarity):
|
* POT precessed models are supported (fake quantized layers omitted below for clarity):
|
||||||
*
|
*
|
||||||
* Padding
|
* Padding
|
||||||
* |
|
* |
|
||||||
* Transpose (NHWC -> NCHW) Transpose (NHWC -> NCHW)
|
* Transpose (NHWC -> NCHW) Transpose (NHWC -> NCHW)
|
||||||
* | |
|
* | |
|
||||||
* Convolution with padding Convolution with padding
|
* Convolution with padding Valid convolution
|
||||||
* | |
|
* | |
|
||||||
* Broadcast Bias (optional) Broadcast Bias (optional)
|
* Broadcast Bias (optional) Broadcast Bias (optional)
|
||||||
* | |
|
* | |
|
||||||
|
@ -0,0 +1,667 @@
|
|||||||
|
// Copyright (C) 2021 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
#include <openvino/cc/ngraph/itt.hpp>
|
||||||
|
|
||||||
|
#include "transformations/decompose_2d_conv.hpp"
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
#include <ngraph/opsets/opset7.hpp>
|
||||||
|
#include <ngraph/pattern/op/wrap_type.hpp>
|
||||||
|
#include <transformations/utils/utils.hpp>
|
||||||
|
#include <ngraph/pattern/op/or.hpp>
|
||||||
|
#include <ngraph/rt_info.hpp>
|
||||||
|
#include <ngraph/pass/manager.hpp>
|
||||||
|
#include <ie_common.h>
|
||||||
|
#include "utils/transformation_helper.hpp"
|
||||||
|
#include "backend/gna_limitations.hpp"
|
||||||
|
#include "layers/gna_convolution_layer.hpp"
|
||||||
|
|
||||||
|
|
||||||
|
using namespace GNAPluginNS;
|
||||||
|
|
||||||
|
NGRAPH_RTTI_DEFINITION(Decompose2DConv, "Decompose2DConv", 0);
|
||||||
|
NGRAPH_RTTI_DEFINITION(Decompose2DConvTransposedWithBias, "Decompose2DConvTransposedWithBias", 0);
|
||||||
|
NGRAPH_RTTI_DEFINITION(Decompose2DConvTransposedWithBiasAF, "Decompose2DConvTransposedWithBiasAF", 0);
|
||||||
|
|
||||||
|
struct GraphData {
|
||||||
|
std::shared_ptr<ngraph::opset7::Transpose>leading_transpose;
|
||||||
|
std::shared_ptr<ngraph::opset7::FakeQuantize>fq_conv;
|
||||||
|
std::shared_ptr<ngraph::opset7::Convolution>conv;
|
||||||
|
std::shared_ptr<ngraph::opset7::Transpose>trailing_transpose;
|
||||||
|
std::shared_ptr<ngraph::opset7::FakeQuantize>fq_bias;
|
||||||
|
std::shared_ptr<ngraph::opset7::MaxPool>max_pool;
|
||||||
|
std::shared_ptr<ngraph::op::util::UnaryElementwiseArithmetic>af;
|
||||||
|
std::shared_ptr<ngraph::opset7::FakeQuantize>fq_af;
|
||||||
|
std::shared_ptr<ngraph::Node>last_op_in_sequence_for_replacement;
|
||||||
|
std::shared_ptr<ngraph::Node>bias_const;
|
||||||
|
size_t conv_count;
|
||||||
|
size_t pool_size_width;
|
||||||
|
size_t pool_stride_width;
|
||||||
|
// TODO: currently 2D max pool is not supported
|
||||||
|
//size_t pool_size_height;
|
||||||
|
//size_t pool_stride_height;
|
||||||
|
};
|
||||||
|
|
||||||
|
static bool VerifyAndGetConvData(std::shared_ptr<ngraph::opset7::Convolution> conv, ConvData& conv_data) {
|
||||||
|
const auto& input = conv->input_value(0);
|
||||||
|
const auto& filters = conv->input_value(1);
|
||||||
|
|
||||||
|
// We support only batch == 1
|
||||||
|
if (input.get_shape()[0] != 1) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t filter_height = filters.get_shape()[2];
|
||||||
|
size_t filter_width = filters.get_shape()[3];
|
||||||
|
|
||||||
|
if (filter_width > GNALimitations::copyMaxGrouping || filter_height > GNALimitations::copyMaxGrouping) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
GetConvData(conv, conv_data);
|
||||||
|
|
||||||
|
IE_ASSERT(conv_data.output_channel_count == conv->get_output_shape(0)[1]);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static std::shared_ptr<ngraph::Node> VerifyBiasAndReshapeConst(std::shared_ptr<ngraph::opset7::Add> conv_bias, const ConvData& conv_data) {
|
||||||
|
auto add_const = std::dynamic_pointer_cast<ngraph::opset7::Constant>(conv_bias->input_value(1).get_node_shared_ptr());
|
||||||
|
|
||||||
|
if (add_const) {
|
||||||
|
auto bias_size = shape_size(add_const->get_shape());
|
||||||
|
|
||||||
|
// The add may be a normal add not conv bias, then we just go further
|
||||||
|
if (bias_size == conv_data.filter_count) {
|
||||||
|
return ngraph::op::util::make_try_fold<ngraph::opset7::Reshape>(add_const,
|
||||||
|
ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{1, bias_size, 1, 1}), false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Bias size does not match (or dynamic bias), can't decompose such convolution
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool VerifyMaxPool(GraphData& graph_data, std::shared_ptr<ngraph::opset7::MaxPool> max_pool) {
|
||||||
|
auto pool_filter = max_pool->get_kernel();
|
||||||
|
auto pool_strides = max_pool->get_strides();
|
||||||
|
|
||||||
|
// Check Max Pool padding and limitations
|
||||||
|
if ((max_pool->get_auto_pad() != ngraph::op::PadType::VALID &&
|
||||||
|
(max_pool->get_auto_pad() != ngraph::op::PadType::EXPLICIT ||
|
||||||
|
max_pool->get_pads_begin() != ngraph::Shape({0, 0}) || max_pool->get_pads_end() != ngraph::Shape({0, 0}))) ||
|
||||||
|
pool_filter.size() != 2 || pool_strides.size() != 2 ||
|
||||||
|
pool_filter[0] > GNALimitations::maxPoolMaxWindowSize)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
graph_data.pool_size_width = pool_filter[1];
|
||||||
|
graph_data.pool_stride_width = pool_strides[1];
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t CalculateConvCount(const ConvData& conv_data) {
|
||||||
|
// Check if split of plane due to GNA HW limitations of 768 filter elements is possible
|
||||||
|
size_t conv_count = 1;
|
||||||
|
size_t total_factorized_conv_channel_count = (conv_data.input_channel_count * conv_data.filter_height * conv_data.filter_width);
|
||||||
|
while (total_factorized_conv_channel_count / conv_count > GNALimitations::convFilterMaxSize ||
|
||||||
|
total_factorized_conv_channel_count % conv_count != 0 || conv_data.filter_channel_count % conv_count != 0)
|
||||||
|
conv_count++;
|
||||||
|
|
||||||
|
return conv_count;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool ShouldDecompose(GraphData& graph_data, const ConvData& conv_data) {
|
||||||
|
// Calculate the number of splits required
|
||||||
|
graph_data.conv_count = CalculateConvCount(conv_data);
|
||||||
|
|
||||||
|
// Concat (copy) layer limitation allows to split up to a certain limit
|
||||||
|
// Currently we are able to split only convolutions without pooling in horizontal dimension
|
||||||
|
if (graph_data.conv_count > GNALimitations::copyMaxGrouping ||
|
||||||
|
((graph_data.pool_size_width > 1 || graph_data.pool_stride_width > 1) && graph_data.conv_count > 1))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
// GNA supported features or handled otherwise - there is no need to decompose such convolution
|
||||||
|
if (graph_data.conv_count == 1 && (((conv_data.input_height == 1 || conv_data.input_width == 1) &&
|
||||||
|
conv_data.filter_dilation_width == 1 && conv_data.filter_dilation_height == 1) ||
|
||||||
|
GNAConvolutionLayer::isMappableFrom2DTo1D(conv_data.input_height, conv_data.input_width, conv_data.filter_width, conv_data.filter_stride_width)))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static std::vector<std::shared_ptr<ngraph::Node>> Split2DConvFilters(std::shared_ptr<ngraph::opset7::Constant>& filters,
|
||||||
|
const bool& vertical_permute, const bool& horizontal_permute, const size_t& split_channels) {
|
||||||
|
|
||||||
|
if (!horizontal_permute && !vertical_permute && split_channels == 1)
|
||||||
|
return {filters};
|
||||||
|
|
||||||
|
std::vector <std::shared_ptr<ngraph::Node>> result;
|
||||||
|
ngraph::Shape reshape_shape;
|
||||||
|
auto flat_filters = filters->outputs();
|
||||||
|
const auto filter_shape = filters->get_output_shape(0);
|
||||||
|
IE_ASSERT(filter_shape.size() == 4);
|
||||||
|
|
||||||
|
if (split_channels > 1) {
|
||||||
|
const auto axis_node = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{}, {1});
|
||||||
|
const auto split = std::make_shared<ngraph::opset7::Split>(filters, axis_node, split_channels);
|
||||||
|
flat_filters = split->outputs();
|
||||||
|
}
|
||||||
|
|
||||||
|
for (size_t split_index = 0; split_index < split_channels; split_index++) {
|
||||||
|
ngraph::Output<ngraph::Node>& flat_filter = flat_filters[split_index];
|
||||||
|
if (horizontal_permute && !vertical_permute) {
|
||||||
|
result.push_back(std::make_shared<ngraph::opset7::Transpose>(flat_filter,
|
||||||
|
ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{0, 1, 3, 2})));
|
||||||
|
} else {
|
||||||
|
result.push_back(flat_filter.get_node_shared_ptr());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (vertical_permute && horizontal_permute) {
|
||||||
|
reshape_shape = ngraph::Shape{filter_shape[0], filter_shape[1] * filter_shape[2] * filter_shape[3] / split_channels, 1, 1};
|
||||||
|
} else if (vertical_permute && !horizontal_permute) {
|
||||||
|
reshape_shape = ngraph::Shape{filter_shape[0], filter_shape[1] * filter_shape[2] / split_channels, 1, filter_shape[3]};
|
||||||
|
} else if (!vertical_permute && horizontal_permute) {
|
||||||
|
reshape_shape = ngraph::Shape{filter_shape[0], filter_shape[1] * filter_shape[3] / split_channels, filter_shape[2], 1};
|
||||||
|
} else {
|
||||||
|
reshape_shape = ngraph::Shape{filter_shape[0], filter_shape[1] / split_channels, filter_shape[2], filter_shape[3]};
|
||||||
|
}
|
||||||
|
|
||||||
|
for (auto &new_filter : result)
|
||||||
|
new_filter = ngraph::op::util::make_try_fold<ngraph::opset7::Reshape>(new_filter,
|
||||||
|
ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, reshape_shape), false);
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
static ngraph::OutputVector SplitInput(const GraphData& graph_data, ConvData& conv_data) {
|
||||||
|
// We need to have proper input shape first
|
||||||
|
ngraph::OutputVector split_planes;
|
||||||
|
auto padded_input_plane = std::make_shared<ngraph::opset7::Reshape>(graph_data.leading_transpose->input_value(0),
|
||||||
|
ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2},
|
||||||
|
ngraph::Shape{1, shape_size(graph_data.leading_transpose->input_value(0).get_shape())}), false);
|
||||||
|
copy_runtime_info(graph_data.conv, padded_input_plane);
|
||||||
|
|
||||||
|
if (graph_data.conv_count > 1) {
|
||||||
|
// If we have split input plane and convolutions due to GNA limitation - we must sum their results at the end
|
||||||
|
conv_data.input_channel_count /= graph_data.conv_count;
|
||||||
|
|
||||||
|
auto reshape_before_transpose = std::make_shared<ngraph::opset7::Reshape>(padded_input_plane,
|
||||||
|
ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2},
|
||||||
|
{shape_size(padded_input_plane->get_shape()) / graph_data.conv_count, graph_data.conv_count}), false);
|
||||||
|
|
||||||
|
auto transpose_before_channel_wise_split = std::make_shared<ngraph::opset7::Transpose>(reshape_before_transpose,
|
||||||
|
ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {1, 0})->output(0));
|
||||||
|
|
||||||
|
const auto axis_node = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{}, {0});
|
||||||
|
const auto split = std::make_shared<ngraph::opset7::Split>(transpose_before_channel_wise_split, axis_node, graph_data.conv_count);
|
||||||
|
split_planes = split->outputs();
|
||||||
|
} else {
|
||||||
|
split_planes.push_back(padded_input_plane);
|
||||||
|
}
|
||||||
|
|
||||||
|
return split_planes;
|
||||||
|
}
|
||||||
|
|
||||||
|
static std::vector<std::shared_ptr<ngraph::Node>> SplitFilters(const GraphData& graph_data, ConvData& conv_data) {
|
||||||
|
// If the input plane exceeds GNA limits and we have split into several convolutions, then we need to split filter data as well;
|
||||||
|
// we also need to take filter height and potential dilation into account when modifying the filters
|
||||||
|
|
||||||
|
// Take account of fake quantize when getting filter values
|
||||||
|
auto filter_values = std::dynamic_pointer_cast<ngraph::opset7::Constant>(graph_data.fq_conv == nullptr ?
|
||||||
|
graph_data.conv->input_value(1).get_node_shared_ptr() : graph_data.fq_conv->input_value(0).get_node_shared_ptr());
|
||||||
|
bool vertical_permute = (conv_data.filter_height > 1);
|
||||||
|
bool horizontal_permute = (conv_data.filter_dilation_width > 1);
|
||||||
|
std::vector<std::shared_ptr<ngraph::Node>> h_1_filters{};
|
||||||
|
|
||||||
|
h_1_filters = Split2DConvFilters(filter_values, vertical_permute, horizontal_permute, graph_data.conv_count);
|
||||||
|
|
||||||
|
for (auto filter : h_1_filters)
|
||||||
|
copy_runtime_info(graph_data.conv, filter);
|
||||||
|
|
||||||
|
return h_1_filters;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void TransformInput(const GraphData& graph_data, const ConvData& conv_data, ngraph::Output<ngraph::Node>& split_input_plane) {
|
||||||
|
/*
|
||||||
|
* Padded row - NHWC order
|
||||||
|
* |
|
||||||
|
* Split in vertical dim (filter height)
|
||||||
|
* / | \
|
||||||
|
* Concat
|
||||||
|
* |
|
||||||
|
* Transpose
|
||||||
|
*/
|
||||||
|
|
||||||
|
// First we need to prepare flat (height = 1) slices of input data proper for flattened (height = 1) filters created later on;
|
||||||
|
// the input datat is overlapping (duplicated)
|
||||||
|
ngraph::OutputVector dilated_input_planes;
|
||||||
|
for (size_t filter_height = 0; filter_height < conv_data.filter_height; filter_height++) {
|
||||||
|
size_t offset;
|
||||||
|
|
||||||
|
if (conv_data.filter_stride_height > 1) {
|
||||||
|
// Prepare strided slices of input data
|
||||||
|
for (size_t output_height = 0; output_height < conv_data.output_height; output_height++) {
|
||||||
|
offset = (filter_height * conv_data.filter_dilation_height + output_height * conv_data.filter_stride_height) *
|
||||||
|
conv_data.input_width * conv_data.input_channel_count;
|
||||||
|
auto slice = FlatCrop(split_input_plane, offset, conv_data.input_width * conv_data.input_channel_count);
|
||||||
|
copy_runtime_info(graph_data.conv, slice);
|
||||||
|
dilated_input_planes.push_back(slice);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
offset = filter_height * conv_data.filter_dilation_height * conv_data.input_width * conv_data.input_channel_count;
|
||||||
|
auto slice = FlatCrop(split_input_plane, offset, conv_data.input_width * conv_data.input_channel_count * conv_data.output_height);
|
||||||
|
copy_runtime_info(graph_data.conv, slice);
|
||||||
|
dilated_input_planes.push_back(slice);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Interleaving dilated input planes
|
||||||
|
std::shared_ptr<ngraph::Node> dilated_chunks_concat = std::make_shared<ngraph::opset7::Concat>(dilated_input_planes, 0);
|
||||||
|
|
||||||
|
// Additional reshape is required for strided slices of input intended for each filter row
|
||||||
|
if (conv_data.filter_stride_height > 1) {
|
||||||
|
dilated_chunks_concat = std::make_shared<ngraph::opset7::Reshape>(dilated_chunks_concat,
|
||||||
|
ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2},
|
||||||
|
{conv_data.filter_height, conv_data.input_width * conv_data.input_channel_count * conv_data.output_height}), false);
|
||||||
|
}
|
||||||
|
|
||||||
|
auto transposed_dilated_chunks = std::make_shared<ngraph::opset7::Transpose>(dilated_chunks_concat,
|
||||||
|
ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {1, 0})->output(0));
|
||||||
|
|
||||||
|
// Flattening of interleaved input planes
|
||||||
|
auto flattened_dilated_transposed_input = std::make_shared<ngraph::opset7::Reshape>(transposed_dilated_chunks,
|
||||||
|
ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2},
|
||||||
|
{(size_t)1, conv_data.input_width * conv_data.input_channel_count * conv_data.output_height * conv_data.filter_height}), false);
|
||||||
|
|
||||||
|
copy_runtime_info(graph_data.conv, {dilated_chunks_concat, flattened_dilated_transposed_input, transposed_dilated_chunks });
|
||||||
|
split_input_plane = flattened_dilated_transposed_input;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void InsertFQLayer(const std::shared_ptr<ngraph::opset7::FakeQuantize> fqLayer,
|
||||||
|
std::shared_ptr<ngraph::Node> lastNode) {
|
||||||
|
if (fqLayer != nullptr) {
|
||||||
|
lastNode = fqLayer->clone_with_new_inputs({lastNode,
|
||||||
|
fqLayer->input_value(1), fqLayer->input_value(2),
|
||||||
|
fqLayer->input_value(3), fqLayer->input_value(4)});
|
||||||
|
ngraph::copy_runtime_info(fqLayer, lastNode);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Valid 1D (decomposed 2D) convolution wrapped with transposes NHWC => NCHW => conv => NCHW => NHWC
|
||||||
|
static std::shared_ptr<ngraph::Node> Create1DConv(const GraphData& graph_data, const ConvData& conv_data, const ngraph::Output<ngraph::Node>& input,
|
||||||
|
std::shared_ptr<ngraph::Node> filters, const size_t conv_index, const size_t h_index) {
|
||||||
|
// Transpose NHWC => NCHW
|
||||||
|
std::shared_ptr<ngraph::Node> nchw_input = std::make_shared<ngraph::opset7::Transpose>(input,
|
||||||
|
ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 3, 1, 2})->output(0));
|
||||||
|
|
||||||
|
// Fake quantize
|
||||||
|
InsertFQLayer(graph_data.fq_conv, filters);
|
||||||
|
|
||||||
|
// 1D Convolution
|
||||||
|
auto conv = std::make_shared<ngraph::opset7::Convolution>(nchw_input, filters,
|
||||||
|
ngraph::Strides{1, conv_data.filter_stride_width}, ngraph::CoordinateDiff{0, 0}, ngraph::CoordinateDiff{0, 0},
|
||||||
|
ngraph::Strides{1, 1}, ngraph::op::PadType::VALID);
|
||||||
|
std::string conv_name = graph_data.conv->get_friendly_name() + "_H_" + std::to_string(h_index) + "_CH_" + std::to_string(0);
|
||||||
|
conv->set_friendly_name(conv_name);
|
||||||
|
|
||||||
|
// Bias & fake quantize
|
||||||
|
std::shared_ptr<ngraph::Node> last_conv_block_op = conv;
|
||||||
|
if (graph_data.bias_const && conv_index == 0) {
|
||||||
|
last_conv_block_op = std::make_shared<ngraph::opset7::Add>(conv, graph_data.bias_const);
|
||||||
|
copy_runtime_info(graph_data.conv, last_conv_block_op);
|
||||||
|
InsertFQLayer(graph_data.fq_bias, last_conv_block_op);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Max pooling
|
||||||
|
if ((graph_data.max_pool && graph_data.pool_size_width > 1) || graph_data.pool_stride_width > 1) {
|
||||||
|
last_conv_block_op = std::make_shared<ngraph::opset7::MaxPool>(last_conv_block_op,
|
||||||
|
ngraph::Strides{1, graph_data.pool_stride_width}, ngraph::Shape{0, 0}, ngraph::Shape{0, 0},
|
||||||
|
ngraph::Shape{1, graph_data.pool_size_width}, graph_data.max_pool->get_rounding_type(), ngraph::op::PadType::VALID);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Activation function & fake quantize
|
||||||
|
if (graph_data.af && graph_data.conv_count == 1) {
|
||||||
|
last_conv_block_op = graph_data.af->copy_with_new_inputs({last_conv_block_op});
|
||||||
|
copy_runtime_info(conv, last_conv_block_op);
|
||||||
|
InsertFQLayer(graph_data.fq_af, last_conv_block_op);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Transpose NCHW => NHWC
|
||||||
|
auto nhwc_output = std::make_shared<ngraph::opset7::Transpose>(last_conv_block_op,
|
||||||
|
ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 2, 3, 1})->output(0));
|
||||||
|
copy_runtime_info(graph_data.conv, {nchw_input, conv, nhwc_output});
|
||||||
|
return nhwc_output;
|
||||||
|
}
|
||||||
|
|
||||||
|
static std::shared_ptr<ngraph::Node> CreateDecomposedConv(const GraphData& graph_data, ConvData& conv_data,
|
||||||
|
ngraph::Output<ngraph::Node>& reduced_input_plane, const std::vector<std::shared_ptr<ngraph::Node>>& h_1_filters, const size_t conv_index) {
|
||||||
|
ngraph::OutputVector result_chunks;
|
||||||
|
std::shared_ptr<ngraph::Node> last_op;
|
||||||
|
bool horizontal_permute = (conv_data.filter_dilation_width > 1);
|
||||||
|
size_t h_1_filter_channel_count = (conv_data.input_channel_count * conv_data.filter_height);
|
||||||
|
|
||||||
|
for (size_t output_height = 0; output_height < conv_data.output_height; output_height++) {
|
||||||
|
size_t offset = output_height * conv_data.input_width * h_1_filter_channel_count;
|
||||||
|
auto row = (conv_data.output_height == 1) ? reduced_input_plane :
|
||||||
|
FlatCrop(reduced_input_plane, offset, conv_data.input_width * h_1_filter_channel_count);
|
||||||
|
/*
|
||||||
|
* Padded row
|
||||||
|
* |
|
||||||
|
* ??? <Dilation !=1> ???
|
||||||
|
* |
|
||||||
|
* Split in vertical dim
|
||||||
|
* / | \
|
||||||
|
* Concat
|
||||||
|
* |
|
||||||
|
* Permute
|
||||||
|
* |
|
||||||
|
* Transpose (NHWC => NCHW)
|
||||||
|
* |
|
||||||
|
* 1D Conv (Bias | MaxPooling)
|
||||||
|
* |
|
||||||
|
* Transpose (NCHW => NHWC)
|
||||||
|
*/
|
||||||
|
auto nhwc_conv_y_input = row;
|
||||||
|
|
||||||
|
if (horizontal_permute) {
|
||||||
|
// Horizontal split - transform input accordingly
|
||||||
|
ngraph::OutputVector dilated_chunks;
|
||||||
|
std::shared_ptr<ngraph::Node> dilated_chunks_concat = nhwc_conv_y_input.get_node_shared_ptr();
|
||||||
|
|
||||||
|
// We need to calculate some parameters in case horizontal stride > 1 is used, because if we use the ones available from the original convolution
|
||||||
|
// we won't take into account the fact horizontal strides will be supported by the newly created 1D convolution, and not by decomposition
|
||||||
|
size_t filter_dilation_width = conv_data.filter_width > 1 ? conv_data.filter_dilation_width : 1;
|
||||||
|
size_t output_width = (conv_data.input_width - (conv_data.filter_width + filter_dilation_width - 2));
|
||||||
|
|
||||||
|
if (conv_data.filter_width > 1) {
|
||||||
|
for (size_t filter_width = 0; filter_width < conv_data.filter_width; filter_width++) {
|
||||||
|
size_t offset = filter_width * conv_data.filter_dilation_width * h_1_filter_channel_count;
|
||||||
|
auto slice = FlatCrop(row, offset, h_1_filter_channel_count * output_width);
|
||||||
|
copy_runtime_info(graph_data.conv, slice);
|
||||||
|
dilated_chunks.push_back(slice);
|
||||||
|
}
|
||||||
|
|
||||||
|
dilated_chunks_concat = std::make_shared<ngraph::opset7::Concat>(dilated_chunks, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
auto transposed_dilated_chunks = std::make_shared<ngraph::opset7::Transpose>(dilated_chunks_concat,
|
||||||
|
ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {1, 0})->output(0));
|
||||||
|
|
||||||
|
auto flattened_dilated_conv_input = std::make_shared<ngraph::opset7::Reshape>(transposed_dilated_chunks,
|
||||||
|
ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4},
|
||||||
|
ngraph::Shape{1, 1, output_width, h_1_filter_channel_count * conv_data.filter_width}), false);
|
||||||
|
|
||||||
|
copy_runtime_info(graph_data.conv, ngraph::NodeVector{flattened_dilated_conv_input, transposed_dilated_chunks, dilated_chunks_concat});
|
||||||
|
|
||||||
|
nhwc_conv_y_input = flattened_dilated_conv_input;
|
||||||
|
} else {
|
||||||
|
// If no horizontal split is done, only reshape is required before decomposed convolution
|
||||||
|
nhwc_conv_y_input = std::make_shared<ngraph::opset7::Reshape>(nhwc_conv_y_input,
|
||||||
|
ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4},
|
||||||
|
ngraph::Shape{1, 1, conv_data.input_width, h_1_filter_channel_count}), false);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pointwise convolutions
|
||||||
|
// Valid 1D convolution wrapped with transposes NHWC => NCHW => Conv => NCHW => NHWC
|
||||||
|
// Activation function can be fused with convolution only if it isn't split
|
||||||
|
auto nhwc_y_output = Create1DConv(graph_data, conv_data, nhwc_conv_y_input, h_1_filters[conv_index], conv_index, output_height);
|
||||||
|
result_chunks.push_back(nhwc_y_output);
|
||||||
|
last_op = nhwc_y_output;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Horizontal dimemsion greater than 1
|
||||||
|
if (result_chunks.size() > 1) {
|
||||||
|
// Concat in horizontal dimension
|
||||||
|
// In NHWC index of H is 1
|
||||||
|
auto concatenated_sub_results = std::make_shared<ngraph::opset7::Concat>(result_chunks, 1);
|
||||||
|
copy_runtime_info(graph_data.conv, concatenated_sub_results);
|
||||||
|
last_op = concatenated_sub_results;
|
||||||
|
}
|
||||||
|
return last_op;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void Decompose(const GraphData& graph_data, ConvData& conv_data) {
|
||||||
|
std::vector<std::shared_ptr<ngraph::Node>> partial_conv_results;
|
||||||
|
|
||||||
|
// Split input due to GNA filter element count limit
|
||||||
|
auto split_planes = SplitInput(graph_data, conv_data);
|
||||||
|
// Split filters due to GNA filter element count limit, 2D convolution shape, or dilations
|
||||||
|
auto h_1_filters = SplitFilters(graph_data, conv_data);
|
||||||
|
|
||||||
|
// Do transformations in each of the splits created above
|
||||||
|
for (size_t conv_index = 0; conv_index < graph_data.conv_count; conv_index++) {
|
||||||
|
ngraph::Output<ngraph::Node>& split_input_plane = split_planes[conv_index];
|
||||||
|
|
||||||
|
// Input data needs to be prepared before 2D convolution decomposition
|
||||||
|
if (conv_data.filter_height > 1 || conv_data.filter_stride_height > 1) {
|
||||||
|
TransformInput(graph_data, conv_data, split_input_plane);
|
||||||
|
}
|
||||||
|
|
||||||
|
auto flat_conv = CreateDecomposedConv(graph_data, conv_data, split_input_plane, h_1_filters, conv_index);
|
||||||
|
partial_conv_results.push_back(flat_conv);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::shared_ptr<ngraph::Node> conv_result = partial_conv_results.front();
|
||||||
|
for (size_t i = 1; i < partial_conv_results.size(); i++) {
|
||||||
|
auto add_result = std::make_shared<ngraph::opset7::Add>(partial_conv_results[i], conv_result);
|
||||||
|
copy_runtime_info(graph_data.conv, add_result);
|
||||||
|
conv_result = add_result;
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Max Pool 2D case
|
||||||
|
//if (graph_data.max_pool && (graph_data.pool_size_height > 1 || graph_data.pool_stride_height > 1)) {
|
||||||
|
//}
|
||||||
|
|
||||||
|
// Activation function after trailing Transpose NCHW->NHWC
|
||||||
|
if (graph_data.af && graph_data.conv_count > 1) {
|
||||||
|
auto af_result = graph_data.af->copy_with_new_inputs({conv_result});
|
||||||
|
copy_runtime_info(graph_data.conv, af_result);
|
||||||
|
conv_result = af_result;
|
||||||
|
}
|
||||||
|
// We need to put the same name as before for the Convolution layer, so its output can be used as network result
|
||||||
|
std::string conv_result_name = graph_data.last_op_in_sequence_for_replacement->get_friendly_name();
|
||||||
|
replace_node(graph_data.last_op_in_sequence_for_replacement, conv_result);
|
||||||
|
conv_result->set_friendly_name(conv_result_name);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool Convert(std::shared_ptr<ngraph::Node> leading_transpose,
|
||||||
|
std::shared_ptr<ngraph::Node> fq_conv,
|
||||||
|
std::shared_ptr<ngraph::Node> conv,
|
||||||
|
std::shared_ptr<ngraph::Node> trailing_transpose,
|
||||||
|
std::shared_ptr<ngraph::Node> bias,
|
||||||
|
std::shared_ptr<ngraph::Node> fq_bias,
|
||||||
|
std::shared_ptr<ngraph::Node> max_pool,
|
||||||
|
std::shared_ptr<ngraph::Node> af,
|
||||||
|
std::shared_ptr<ngraph::Node> fq_af,
|
||||||
|
std::shared_ptr<ngraph::Node> last_op_for_replacement) {
|
||||||
|
|
||||||
|
GraphData graph_data{std::dynamic_pointer_cast<ngraph::opset7::Transpose>(leading_transpose),
|
||||||
|
std::dynamic_pointer_cast<ngraph::opset7::FakeQuantize>(fq_conv),
|
||||||
|
std::dynamic_pointer_cast<ngraph::opset7::Convolution>(conv),
|
||||||
|
std::dynamic_pointer_cast<ngraph::opset7::Transpose>(trailing_transpose),
|
||||||
|
std::dynamic_pointer_cast<ngraph::opset7::FakeQuantize>(fq_bias),
|
||||||
|
std::dynamic_pointer_cast<ngraph::opset7::MaxPool>(max_pool),
|
||||||
|
std::dynamic_pointer_cast<ngraph::op::util::UnaryElementwiseArithmetic>(af),
|
||||||
|
std::dynamic_pointer_cast<ngraph::opset7::FakeQuantize>(fq_af),
|
||||||
|
last_op_for_replacement, nullptr, 1, 1, 1};
|
||||||
|
ConvData conv_data;
|
||||||
|
|
||||||
|
if (!VerifyAndGetConvData(std::dynamic_pointer_cast<ngraph::opset7::Convolution>(conv), conv_data))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
// We are looking for Transpose(NHWC->NCHW) => Conv => Transpose(NCHW->NHWC)
|
||||||
|
// or similar cases, so required network must be in NHWC order like in TF
|
||||||
|
if (!TransposeOrderMatches(std::dynamic_pointer_cast<ngraph::opset7::Transpose>(leading_transpose), {0, 3, 1, 2}))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (!TransposeOrderMatches(std::dynamic_pointer_cast<ngraph::opset7::Transpose>(trailing_transpose), {0, 2, 3, 1}))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (bias && !(graph_data.bias_const = VerifyBiasAndReshapeConst(std::dynamic_pointer_cast<ngraph::opset7::Add>(bias), conv_data)))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (max_pool && !VerifyMaxPool(graph_data, std::dynamic_pointer_cast<ngraph::opset7::MaxPool>(max_pool)))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (!ShouldDecompose(graph_data, conv_data))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
// All checks applied - now we may start decomposition
|
||||||
|
Decompose(graph_data, conv_data);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool VerifyBias(std::shared_ptr<ngraph::Node> conv, std::shared_ptr<ngraph::Node> bias) {
|
||||||
|
auto add_const = std::dynamic_pointer_cast<ngraph::opset7::Constant>(bias->input_value(1).get_node_shared_ptr());
|
||||||
|
|
||||||
|
if (!add_const) {
|
||||||
|
add_const = std::dynamic_pointer_cast<ngraph::opset7::Constant>(bias->input_value(0).get_node_shared_ptr());
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!add_const) {
|
||||||
|
auto bias_size = shape_size(add_const->get_shape());
|
||||||
|
auto conv_filter_count = conv->input_value(1).get_shape()[0];
|
||||||
|
if (bias_size == conv_filter_count)
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
Decompose2DConv::Decompose2DConv() {
|
||||||
|
MATCHER_SCOPE(Decompose2DConv);
|
||||||
|
|
||||||
|
auto const_input = ngraph::pattern::wrap_type<ngraph::opset7::Constant>();
|
||||||
|
auto leading_transpose = ngraph::pattern::wrap_type<ngraph::opset7::Transpose>({ngraph::pattern::any_input(), const_input},
|
||||||
|
consumers_and_rank(1, 4));
|
||||||
|
auto filters_const = ngraph::pattern::wrap_type<ngraph::opset7::Constant>(ngraph::pattern::rank_equals(4));
|
||||||
|
auto fq_conv = ngraph::pattern::wrap_type<ngraph::opset7::FakeQuantize>({const_input, const_input, const_input, const_input, const_input},
|
||||||
|
consumers_and_rank(1, 4));
|
||||||
|
auto filters = std::make_shared<ngraph::pattern::op::Or>(ngraph::OutputVector{filters_const, fq_conv});
|
||||||
|
auto conv = ngraph::pattern::wrap_type<ngraph::opset7::Convolution>({leading_transpose, filters},
|
||||||
|
consumers_and_rank(1, 4));
|
||||||
|
auto bias = ngraph::pattern::wrap_type<ngraph::opset7::Add>({conv, const_input},
|
||||||
|
ngraph::pattern::consumers_count(1));
|
||||||
|
auto fq_bias = ngraph::pattern::wrap_type<ngraph::opset7::FakeQuantize>({bias, const_input, const_input, const_input, const_input},
|
||||||
|
ngraph::pattern::consumers_count(1));
|
||||||
|
auto max_pool1 = ngraph::pattern::wrap_type<ngraph::opset7::MaxPool>({bias},
|
||||||
|
ngraph::pattern::consumers_count(1));
|
||||||
|
auto max_pool2 = ngraph::pattern::wrap_type<ngraph::opset7::MaxPool>({fq_bias},
|
||||||
|
ngraph::pattern::consumers_count(1));
|
||||||
|
auto af1 = ngraph::pattern::wrap_type<ngraph::opset7::Relu, ngraph::opset7::Sigmoid,
|
||||||
|
ngraph::opset7::Tanh, ngraph::opset7::Abs, ngraph::opset7::Log, ngraph::opset7::Exp,
|
||||||
|
ngraph::opset7::Sign, ngraph::opset7::Clamp>({bias}, ngraph::pattern::consumers_count(1));
|
||||||
|
auto af2 = ngraph::pattern::wrap_type<ngraph::opset7::Relu, ngraph::opset7::Sigmoid,
|
||||||
|
ngraph::opset7::Tanh, ngraph::opset7::Abs, ngraph::opset7::Log, ngraph::opset7::Exp,
|
||||||
|
ngraph::opset7::Sign, ngraph::opset7::Clamp>({fq_bias}, ngraph::pattern::consumers_count(1));
|
||||||
|
auto af3 = ngraph::pattern::wrap_type<ngraph::opset7::Relu, ngraph::opset7::Sigmoid,
|
||||||
|
ngraph::opset7::Tanh, ngraph::opset7::Abs, ngraph::opset7::Log, ngraph::opset7::Exp,
|
||||||
|
ngraph::opset7::Sign, ngraph::opset7::Clamp>({max_pool1}, ngraph::pattern::consumers_count(1));
|
||||||
|
auto af4 = ngraph::pattern::wrap_type<ngraph::opset7::Relu, ngraph::opset7::Sigmoid,
|
||||||
|
ngraph::opset7::Tanh, ngraph::opset7::Abs, ngraph::opset7::Log, ngraph::opset7::Exp,
|
||||||
|
ngraph::opset7::Sign, ngraph::opset7::Clamp>({max_pool2}, ngraph::pattern::consumers_count(1));
|
||||||
|
auto fq_af = ngraph::pattern::wrap_type<ngraph::opset7::FakeQuantize>({af4, const_input, const_input, const_input, const_input},
|
||||||
|
ngraph::pattern::consumers_count(1));
|
||||||
|
auto transpose_input =
|
||||||
|
std::make_shared<ngraph::pattern::op::Or>(ngraph::OutputVector{conv, bias, max_pool1, max_pool2, fq_bias, af1, af2, af3, af4, fq_af});
|
||||||
|
auto trailing_transpose = ngraph::pattern::wrap_type<ngraph::opset7::Transpose>({transpose_input, const_input},
|
||||||
|
consumers_and_rank(1, 4));
|
||||||
|
|
||||||
|
ngraph::matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) {
|
||||||
|
const auto& pattern_map = m.get_pattern_value_map();
|
||||||
|
auto fq_conv_it = pattern_map.find(fq_conv);
|
||||||
|
auto fq_conv_node = (fq_conv_it == std::end(pattern_map) ? nullptr : fq_conv_it->second.get_node_shared_ptr());
|
||||||
|
auto bias_it = pattern_map.find(bias);
|
||||||
|
auto bias_node = (bias_it == std::end(pattern_map) ? nullptr : bias_it->second.get_node_shared_ptr());
|
||||||
|
auto fq_bias_it = pattern_map.find(fq_bias);
|
||||||
|
auto fq_bias_node = (fq_bias_it == std::end(pattern_map) ? nullptr : fq_bias_it->second.get_node_shared_ptr());
|
||||||
|
auto fq_af_it = pattern_map.find(fq_af);
|
||||||
|
auto fq_af_node = (fq_af_it == std::end(pattern_map) ? nullptr : fq_af_it->second.get_node_shared_ptr());
|
||||||
|
auto max_pool1_it = pattern_map.find(max_pool1);
|
||||||
|
auto max_pool2_it = pattern_map.find(max_pool2);
|
||||||
|
auto max_pool_node = (max_pool1_it == std::end(pattern_map) ?
|
||||||
|
((max_pool2_it == std::end(pattern_map) ? nullptr : max_pool2_it->second.get_node_shared_ptr())) : max_pool1_it->second.get_node_shared_ptr());
|
||||||
|
std::shared_ptr<ngraph::Node> af_node = nullptr;
|
||||||
|
std::vector<ngraph::pattern::PatternValueMap::const_iterator> af_it
|
||||||
|
{pattern_map.find(af1), pattern_map.find(af2), pattern_map.find(af3), pattern_map.find(af4)};
|
||||||
|
|
||||||
|
for (auto const& af : af_it) {
|
||||||
|
if (af != std::end(pattern_map)) {
|
||||||
|
af_node = af->second.get_node_shared_ptr();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return Convert(pattern_map.at(leading_transpose).get_node_shared_ptr(), fq_conv_node, pattern_map.at(conv).get_node_shared_ptr(),
|
||||||
|
pattern_map.at(trailing_transpose).get_node_shared_ptr(), bias_node, fq_bias_node, max_pool_node, af_node, fq_af_node,
|
||||||
|
pattern_map.at(trailing_transpose).get_node_shared_ptr());
|
||||||
|
};
|
||||||
|
|
||||||
|
auto m = std::make_shared<ngraph::pattern::Matcher>(trailing_transpose, matcher_name);
|
||||||
|
this->register_matcher(m, callback);
|
||||||
|
}
|
||||||
|
|
||||||
|
Decompose2DConvTransposedWithBias::Decompose2DConvTransposedWithBias() {
|
||||||
|
MATCHER_SCOPE(Decompose2DConvTransposedWithBias);
|
||||||
|
|
||||||
|
auto const_input_i64 = ngraph::pattern::wrap_type<ngraph::opset7::Constant>(ngraph::pattern::type_matches(ngraph::element::i64));
|
||||||
|
auto const_input = ngraph::pattern::wrap_type<ngraph::opset7::Constant>();
|
||||||
|
auto leading_transpose = ngraph::pattern::wrap_type<ngraph::opset7::Transpose>({ngraph::pattern::any_input(), const_input_i64},
|
||||||
|
consumers_and_rank(1, 4));
|
||||||
|
auto conv = ngraph::pattern::wrap_type<ngraph::opset7::Convolution>(
|
||||||
|
{leading_transpose, ngraph::pattern::wrap_type<ngraph::opset7::Constant>(ngraph::pattern::rank_equals(4))},
|
||||||
|
consumers_and_rank(1, 4));
|
||||||
|
auto trailing_transpose = ngraph::pattern::wrap_type<ngraph::opset7::Transpose>({conv, const_input_i64},
|
||||||
|
consumers_and_rank(1, 4));
|
||||||
|
auto bias = ngraph::pattern::wrap_type<ngraph::opset7::Add>({trailing_transpose, const_input},
|
||||||
|
ngraph::pattern::consumers_count(1));
|
||||||
|
|
||||||
|
ngraph::matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) {
|
||||||
|
const auto& pattern_map = m.get_pattern_value_map();
|
||||||
|
if (!VerifyBias(pattern_map.at(conv).get_node_shared_ptr(), pattern_map.at(bias).get_node_shared_ptr()))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return Convert(pattern_map.at(leading_transpose).get_node_shared_ptr(), nullptr, pattern_map.at(conv).get_node_shared_ptr(),
|
||||||
|
pattern_map.at(trailing_transpose).get_node_shared_ptr(), pattern_map.at(bias).get_node_shared_ptr(), nullptr, nullptr,
|
||||||
|
nullptr, nullptr, pattern_map.at(bias).get_node_shared_ptr());
|
||||||
|
};
|
||||||
|
|
||||||
|
auto m = std::make_shared<ngraph::pattern::Matcher>(bias, matcher_name);
|
||||||
|
this->register_matcher(m, callback);
|
||||||
|
}
|
||||||
|
|
||||||
|
Decompose2DConvTransposedWithBiasAF::Decompose2DConvTransposedWithBiasAF() {
|
||||||
|
MATCHER_SCOPE(Decompose2DConvTransposedWithBiasAF);
|
||||||
|
|
||||||
|
auto const_input_i64 = ngraph::pattern::wrap_type<ngraph::opset7::Constant>(ngraph::pattern::type_matches(ngraph::element::i64));
|
||||||
|
auto const_input = ngraph::pattern::wrap_type<ngraph::opset7::Constant>();
|
||||||
|
auto leading_transpose = ngraph::pattern::wrap_type<ngraph::opset7::Transpose>({ngraph::pattern::any_input(), const_input_i64},
|
||||||
|
consumers_and_rank(1, 4));
|
||||||
|
auto conv = ngraph::pattern::wrap_type<ngraph::opset7::Convolution>(
|
||||||
|
{leading_transpose, ngraph::pattern::wrap_type<ngraph::opset7::Constant>(ngraph::pattern::rank_equals(4))},
|
||||||
|
consumers_and_rank(1, 4));
|
||||||
|
auto trailing_transpose = ngraph::pattern::wrap_type<ngraph::opset7::Transpose>({conv, const_input_i64},
|
||||||
|
consumers_and_rank(1, 4));
|
||||||
|
auto bias = ngraph::pattern::wrap_type<ngraph::opset7::Add>({trailing_transpose, const_input},
|
||||||
|
ngraph::pattern::consumers_count(1));
|
||||||
|
auto af = ngraph::pattern::wrap_type<ngraph::opset7::Relu, ngraph::opset7::Sigmoid,
|
||||||
|
ngraph::opset7::Tanh, ngraph::opset7::Abs, ngraph::opset7::Log, ngraph::opset7::Exp,
|
||||||
|
ngraph::opset7::Sign, ngraph::opset7::Clamp>({bias},
|
||||||
|
ngraph::pattern::consumers_count(1));
|
||||||
|
|
||||||
|
ngraph::matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) {
|
||||||
|
const auto& pattern_map = m.get_pattern_value_map();
|
||||||
|
if (!VerifyBias(pattern_map.at(conv).get_node_shared_ptr(), pattern_map.at(bias).get_node_shared_ptr()))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return Convert(pattern_map.at(leading_transpose).get_node_shared_ptr(), nullptr, pattern_map.at(conv).get_node_shared_ptr(),
|
||||||
|
pattern_map.at(trailing_transpose).get_node_shared_ptr(), pattern_map.at(bias).get_node_shared_ptr(), nullptr,
|
||||||
|
nullptr, pattern_map.at(af).get_node_shared_ptr(), nullptr, pattern_map.at(af).get_node_shared_ptr());
|
||||||
|
};
|
||||||
|
|
||||||
|
auto m = std::make_shared<ngraph::pattern::Matcher>(af, matcher_name);
|
||||||
|
this->register_matcher(m, callback);
|
||||||
|
}
|
@ -0,0 +1,80 @@
|
|||||||
|
// Copyright (C) 2021 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <ngraph/pass/graph_rewrite.hpp>
|
||||||
|
|
||||||
|
namespace GNAPluginNS {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Decompose a 2D convolution, wrapped with transposes,
|
||||||
|
* to a set of valid 1D convolutions with padding added in front of the set:
|
||||||
|
*
|
||||||
|
* Padding
|
||||||
|
* |
|
||||||
|
* Transpose (NHWC -> NCHW) Transpose (NHWC -> NCHW)
|
||||||
|
* | |
|
||||||
|
* Convolution with padding Valid convolution
|
||||||
|
* | |
|
||||||
|
* Broadcast Bias (optional) Broadcast Bias (optional)
|
||||||
|
* | |
|
||||||
|
* Max Pooling (optional) Max Pooling (optional)
|
||||||
|
* | |
|
||||||
|
* Activation Function (optional) Activation Function (optional)
|
||||||
|
* | |
|
||||||
|
* Transpose (NCHW -> NHWC) Transpose (NCHW -> NHWC)
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
class Decompose2DConv : public ngraph::pass::MatcherPass {
|
||||||
|
public:
|
||||||
|
NGRAPH_RTTI_DECLARATION;
|
||||||
|
Decompose2DConv();
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Decomopose a 2D convolution wrapped with transposes, with bias after trailing transpose,
|
||||||
|
* to a set of valid 1D convolutions with padding added in front of the set:
|
||||||
|
*
|
||||||
|
* Padding
|
||||||
|
* |
|
||||||
|
* Transpose (NHWC -> NCHW) Transpose (NHWC -> NCHW)
|
||||||
|
* | |
|
||||||
|
* Convolution with padding Valid convolution
|
||||||
|
* | |
|
||||||
|
* Transpose (NCHW -> NHWC) Transpose (NCHW -> NHWC)
|
||||||
|
* | |
|
||||||
|
* Broadcast Bias Broadcast Bias
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
class Decompose2DConvTransposedWithBias : public ngraph::pass::MatcherPass {
|
||||||
|
public:
|
||||||
|
NGRAPH_RTTI_DECLARATION;
|
||||||
|
Decompose2DConvTransposedWithBias();
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Decomopose a 2D convolution wrapped with transposes, with bias
|
||||||
|
* to a set of valid 1D convolutions with padding added in front of the set:
|
||||||
|
*
|
||||||
|
* Padding
|
||||||
|
* |
|
||||||
|
* Transpose (NHWC -> NCHW) Transpose (NHWC -> NCHW)
|
||||||
|
* | |
|
||||||
|
* Convolution with padding Valid convolution
|
||||||
|
* | |
|
||||||
|
* Transpose (NCHW -> NHWC) Transpose (NCHW -> NHWC)
|
||||||
|
* | |
|
||||||
|
* Broadcast Bias Broadcast Bias
|
||||||
|
* | |
|
||||||
|
* Activation Function Activation Function
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
class Decompose2DConvTransposedWithBiasAF : public ngraph::pass::MatcherPass {
|
||||||
|
public:
|
||||||
|
NGRAPH_RTTI_DECLARATION;
|
||||||
|
Decompose2DConvTransposedWithBiasAF();
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace GNAPluginNS
|
@ -0,0 +1,75 @@
|
|||||||
|
// Copyright (C) 2021 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
|
||||||
|
#include <ngraph/opsets/opset7.hpp>
|
||||||
|
#include <ngraph/pattern/op/wrap_type.hpp>
|
||||||
|
#include "transformation_helper.hpp"
|
||||||
|
|
||||||
|
|
||||||
|
namespace GNAPluginNS {
|
||||||
|
|
||||||
|
void GetConvData(std::shared_ptr<ngraph::opset7::Convolution> conv, ConvData& conv_data) {
|
||||||
|
conv_data.output_height = conv->get_output_shape(0)[2];
|
||||||
|
conv_data.output_width = conv->get_output_shape(0)[3];
|
||||||
|
conv_data.input_channel_count = conv->input_value(0).get_shape()[1];
|
||||||
|
conv_data.input_height = conv->input_value(0).get_shape()[2];
|
||||||
|
conv_data.input_width = conv->input_value(0).get_shape()[3];
|
||||||
|
conv_data.filter_count = conv->input_value(1).get_shape()[0];
|
||||||
|
conv_data.filter_channel_count = conv->input_value(1).get_shape()[1];
|
||||||
|
conv_data.filter_height = conv->input_value(1).get_shape()[2];
|
||||||
|
conv_data.filter_width = conv->input_value(1).get_shape()[3];
|
||||||
|
conv_data.filter_dilation_height = conv->get_dilations()[0];
|
||||||
|
conv_data.filter_dilation_width = conv->get_dilations()[1];
|
||||||
|
conv_data.filter_stride_height = conv->get_strides()[0];
|
||||||
|
conv_data.filter_stride_width = conv->get_strides()[1];
|
||||||
|
conv_data.output_channel_count = conv_data.filter_count;
|
||||||
|
conv_data.pads_begin_height = conv->get_pads_begin()[0];
|
||||||
|
conv_data.pads_begin_width = conv->get_pads_begin()[1];
|
||||||
|
conv_data.pads_end_height = conv->get_pads_end()[0];
|
||||||
|
conv_data.pads_end_width = conv->get_pads_end()[1];
|
||||||
|
conv_data.padding_type = conv->get_auto_pad();
|
||||||
|
conv_data.element_type = conv->get_element_type();
|
||||||
|
}
|
||||||
|
|
||||||
|
std::function<bool(ngraph::Output<ngraph::Node>)> consumers_and_rank(const size_t expected_count, const ngraph::Dimension& expected_rank) {
|
||||||
|
return [=](ngraph::Output<ngraph::Node> output) -> bool {
|
||||||
|
return ngraph::pattern::consumers_count(expected_count)(output) && ngraph::pattern::rank_equals(expected_rank)(output);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
bool TransposeOrderMatches(std::shared_ptr<ngraph::opset7::Transpose> transpose, std::vector<size_t> order) {
|
||||||
|
if (!transpose)
|
||||||
|
return false;
|
||||||
|
const ngraph::Output<ngraph::Node>& transpose_order = transpose->input_value(1);
|
||||||
|
auto transpose_order_dim = transpose_order.get_shape().size();
|
||||||
|
|
||||||
|
if (transpose_order_dim != 1 || transpose_order.get_shape()[0] != order.size())
|
||||||
|
return false;
|
||||||
|
|
||||||
|
auto const_with_order_values = std::dynamic_pointer_cast<ngraph::opset7::Constant>(transpose_order.get_node_shared_ptr());
|
||||||
|
if (!const_with_order_values)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
const auto data = const_with_order_values->cast_vector<size_t>();
|
||||||
|
if (data.empty())
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (!std::equal(order.begin(), order.end(), data.begin()))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::shared_ptr<ngraph::opset7::StridedSlice> FlatCrop(ngraph::Output<ngraph::Node> input, size_t offset, size_t size) {
|
||||||
|
return std::make_shared<ngraph::opset7::StridedSlice>(
|
||||||
|
input, // data
|
||||||
|
ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {(size_t)0, offset}), // begin sice index
|
||||||
|
ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {(size_t)0, offset + size}), // end slice index
|
||||||
|
ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {(size_t)1, (size_t)1}), // strides
|
||||||
|
std::vector<int64_t>{1, 0}, // begin mask
|
||||||
|
std::vector<int64_t>{1, 0}); // end mask
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace GNAPluginNS
|
@ -0,0 +1,64 @@
|
|||||||
|
// Copyright (C) 2021 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
namespace GNAPluginNS {
|
||||||
|
|
||||||
|
struct ConvData {
|
||||||
|
size_t input_height;
|
||||||
|
size_t input_width;
|
||||||
|
size_t input_channel_count;
|
||||||
|
size_t filter_height;
|
||||||
|
size_t filter_width;
|
||||||
|
size_t filter_count;
|
||||||
|
size_t filter_channel_count;
|
||||||
|
size_t filter_dilation_height;
|
||||||
|
size_t filter_dilation_width;
|
||||||
|
size_t filter_stride_height;
|
||||||
|
size_t filter_stride_width;
|
||||||
|
size_t output_height;
|
||||||
|
size_t output_width;
|
||||||
|
size_t output_channel_count;
|
||||||
|
size_t pads_begin_width;
|
||||||
|
size_t pads_begin_height;
|
||||||
|
size_t pads_end_width;
|
||||||
|
size_t pads_end_height;
|
||||||
|
ngraph::op::PadType padding_type;
|
||||||
|
ngraph::element::Type element_type;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief gets all convolution related data into a struct for further processing
|
||||||
|
* @param conv convolution node to get data of
|
||||||
|
* @param conv_data convolution data structure to put data into
|
||||||
|
* @return void
|
||||||
|
*/
|
||||||
|
void GetConvData(std::shared_ptr<ngraph::opset7::Convolution> conv, ConvData& conv_data);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief ngraph matcher predicate fusing existing predicates for consumers count and rank of a layer
|
||||||
|
* @param expected_count expected consumers count for of node
|
||||||
|
* @param expected_rank expected node rank
|
||||||
|
* @return predicate function wrapper
|
||||||
|
*/
|
||||||
|
std::function<bool(ngraph::Output<ngraph::Node>)> consumers_and_rank(const size_t expected_count, const ngraph::Dimension& expected_rank);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief checks whether transpose matches a given order
|
||||||
|
* @param transpose transpose layer
|
||||||
|
* @param order order of transposition to be compared with
|
||||||
|
* @return true if the order matches, false otherwise
|
||||||
|
*/
|
||||||
|
bool TransposeOrderMatches(std::shared_ptr<ngraph::opset7::Transpose> transpose, std::vector<size_t> order);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief performs a crop of a flattened input tensor
|
||||||
|
* @param input input layer
|
||||||
|
* @param offset offset to start the crop at*
|
||||||
|
* @param size size of the crop
|
||||||
|
* @return pointer to the newly created slice
|
||||||
|
*/
|
||||||
|
std::shared_ptr<ngraph::opset7::StridedSlice> FlatCrop(ngraph::Output<ngraph::Node> input, size_t offset, size_t size);
|
||||||
|
} // namespace GNAPluginNS
|
@ -77,7 +77,7 @@ void HeteroInferRequest::SetBlob(const std::string& name, const InferenceEngine:
|
|||||||
if (findInputAndOutputBlobByName(name, foundInput, foundOutput)) {
|
if (findInputAndOutputBlobByName(name, foundInput, foundOutput)) {
|
||||||
r->SetBlob(name, data, foundInput->getPreProcess());
|
r->SetBlob(name, data, foundInput->getPreProcess());
|
||||||
}
|
}
|
||||||
} catch (const InferenceEngine::NotFound& ex) {}
|
} catch (const InferenceEngine::NotFound&) {}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user