Compare commits

...

6 Commits

Author SHA1 Message Date
Ivan Tikhonov
cfd42bd2cb Handle Reshape's special zero in SimplifySecondInputOfReshape (#20785) (#20820)
* Handle Reshape's special zero in SimplifySecondInputOfReshape

SimplifySecondInputOfReshape detects ShapeOf->Gather->Concat
subgraphs on Reshape's second input and replaces ShapeOf->Gather
with a Constant with zero(s). Currently it works only with Reshapes
that have special_zero set to true, but it can work for Reshapes
with special_zero == false if non-Gather inputs to Concat are Constants
and don't contain any zero.

Ticket: CVS-123434

* fix no default output

Co-authored-by: Mateusz Tabaka <mateusz.tabaka@intel.com>
2023-11-02 17:43:25 +04:00
Andrei Kochin
663bf04208 Revert Bitwise ops in PyTorch FE (#20813)
* revert bitwise op for PT FE

* revert coverity fixes
2023-11-02 13:08:55 +00:00
Surya Siddharth Pemmaraju
3ebb8eb315 Added fix for concat in torchfx (#20806) 2023-11-02 15:18:53 +04:00
Gorokhov Dmitriy
757b466c5d [CPU] Fixed port mismatch in Eltwise fusion graph optimization (#20807) 2023-11-02 11:31:20 +04:00
Alina Kladieva
df19f18f37 Update refs for public CI pipelines (#20798) 2023-10-31 16:39:41 +00:00
Tatiana Savina
017eb943d5 [DOCS] Update PyPI links and pre-release note (#20799)
* update links and rm pre-release note

* update ov-dev
2023-10-31 13:56:33 +00:00
44 changed files with 268 additions and 671 deletions

View File

@@ -4,7 +4,7 @@ resources:
type: github
endpoint: openvinotoolkit
name: openvinotoolkit/openvino_contrib
ref: master
ref: releases/2023/2
variables:
- group: github

View File

@@ -32,7 +32,7 @@ resources:
type: github
endpoint: openvinotoolkit
name: openvinotoolkit/openvino_contrib
ref: master
ref: releases/2023/2
jobs:
- job: Win

View File

@@ -35,7 +35,7 @@ resources:
type: github
endpoint: openvinotoolkit
name: openvinotoolkit/testdata
ref: master
ref: releases/2023/2
variables:
- group: github

View File

@@ -79,7 +79,7 @@ jobs:
repository: 'openvinotoolkit/openvino_contrib'
path: ${{ env.OPENVINO_CONTRIB_REPO }}
submodules: 'true'
ref: 'master'
ref: 'releases/2023/2'
#
# Print system info
@@ -540,7 +540,7 @@ jobs:
install_build_dependencies.sh
sparse-checkout-cone-mode: false
path: ${{ env.OPENVINO_REPO }}
ref: 'master'
ref: 'releases/2023/2'
- name: Install git
run: |
@@ -1443,7 +1443,7 @@ jobs:
with:
repository: 'openvinotoolkit/openvino_contrib'
path: ${{ env.OPENVINO_CONTRIB_REPO }}
ref: 'master'
ref: 'releases/2023/2'
#
# Dependencies

View File

@@ -73,7 +73,7 @@ jobs:
repository: 'openvinotoolkit/testdata'
path: ${{ env.MODELS_PATH }}
lfs: 'true'
ref: 'master'
ref: 'releases/2023/2'
#
# Print system info
@@ -243,7 +243,7 @@ jobs:
repository: 'openvinotoolkit/testdata'
path: ${{ env.MODELS_PATH }}
lfs: 'true'
ref: 'master'
ref: 'releases/2023/2'
- name: Download selective build statistics package
uses: actions/download-artifact@v3

View File

@@ -60,7 +60,7 @@ jobs:
with:
repository: 'openvinotoolkit/openvino_contrib'
path: 'openvino_contrib'
ref: 'master'
ref: 'releases/2023/2'
#
# Print system info

View File

@@ -62,7 +62,7 @@ jobs:
repository: 'openvinotoolkit/testdata'
path: 'testdata'
lfs: 'true'
ref: 'master'
ref: 'releases/2023/2'
#
# Print system info
@@ -221,7 +221,7 @@ jobs:
repository: 'openvinotoolkit/testdata'
path: 'testdata'
lfs: 'true'
ref: 'master'
ref: 'releases/2023/2'
- name: Download selective build statistics package
uses: actions/download-artifact@v3

View File

@@ -1,2 +0,0 @@
> **NOTE**: This version is pre-release software and has not undergone full release validation or qualification. No support is offered on pre-release software and APIs/behavior are subject to change. It should NOT be incorporated into any production software/solution and instead should be used only for early testing and integration while awaiting a final release version of this software.

View File

@@ -1,9 +1,6 @@
# OpenVINO™ Development Tools
<!--- The note below is intended for master branch only for pre-release purpose. Remove it for official releases. --->
> **NOTE**: This version is pre-release software and has not undergone full release validation or qualification. No support is offered on pre-release software and APIs/behavior are subject to change. It should NOT be incorporated into any production software/solution and instead should be used only for early testing and integration while awaiting a final release version of this software.
> **NOTE**: OpenVINO™ Development Tools package has been deprecated and will be discontinued with 2024.0 release. To learn more, refer to the [OpenVINO Legacy Features and Components page](https://docs.openvino.ai/2023.1/openvino_legacy_features.html).
> **NOTE**: OpenVINO™ Development Tools package has been deprecated and will be discontinued with 2024.0 release. To learn more, refer to the [OpenVINO Legacy Features and Components page](https://docs.openvino.ai/2023.2/openvino_legacy_features.html).
Intel® Distribution of OpenVINO™ toolkit is an open-source toolkit for optimizing and deploying AI inference. It can be used to develop applications and solutions based on deep learning tasks, such as: emulation of human vision, automatic speech recognition, natural language processing, recommendation systems, etc. It provides high-performance and rich deployment options, from edge to cloud.
@@ -121,14 +118,14 @@ For example, to install and configure the components for working with TensorFlow
| Component | Console Script | Description |
|------------------|---------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| [Legacy Model conversion API](https://docs.openvino.ai/nightly/openvino_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide.html) | `mo` |**Model conversion API** imports, converts, and optimizes models that were trained in popular frameworks to a format usable by OpenVINO components. <br>Supported frameworks include Caffe\*, TensorFlow\*, MXNet\*, PaddlePaddle\*, and ONNX\*. | |
| [Accuracy Checker](https://docs.openvino.ai/nightly/omz_tools_accuracy_checker.html) and <br> [Annotation Converter](https://docs.openvino.ai/nightly/omz_tools_accuracy_checker_annotation_converters.html) | `accuracy_check` <br> `convert_annotation` |**Accuracy Checker** is a deep learning accuracy validation tool that allows you to collect accuracy metrics against popular datasets. The main advantages of the tool are the flexibility of configuration and a set of supported datasets, preprocessing, postprocessing, and metrics. <br> **Annotation Converter** is a utility that prepares datasets for evaluation with Accuracy Checker. |
| [Post-Training Optimization Tool](https://docs.openvino.ai/nightly/pot_introduction.html)| `pot` |**Post-Training Optimization Tool** allows you to optimize trained models with advanced capabilities, such as quantization and low-precision optimizations, without the need to retrain or fine-tune models. |
| [Model Downloader and other Open Model Zoo tools](https://docs.openvino.ai/nightly/omz_tools_downloader.html)| `omz_downloader` <br> `omz_converter` <br> `omz_quantizer` <br> `omz_info_dumper`| **Model Downloader** is a tool for getting access to the collection of high-quality and extremely fast pre-trained deep learning [public](@ref omz_models_group_public) and [Intel](@ref omz_models_group_intel)-trained models. These free pre-trained models can be used to speed up the development and production deployment process without training your own models. The tool downloads model files from online sources and, if necessary, patches them to make them more usable with model conversion API. A number of additional tools are also provided to automate the process of working with downloaded models:<br> **Model Converter** is a tool for converting Open Model Zoo models that are stored in an original deep learning framework format into the OpenVINO Intermediate Representation (IR) using model conversion API. <br> **Model Quantizer** is a tool for automatic quantization of full-precision models in the IR format into low-precision versions using the Post-Training Optimization Tool. <br> **Model Information Dumper** is a helper utility for dumping information about the models to a stable, machine-readable format. |
| [Legacy Model conversion API](https://docs.openvino.ai/2023.2/openvino_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide.html) | `mo` |**Model conversion API** imports, converts, and optimizes models that were trained in popular frameworks to a format usable by OpenVINO components. <br>Supported frameworks include Caffe\*, TensorFlow\*, MXNet\*, PaddlePaddle\*, and ONNX\*. | |
| [Accuracy Checker](https://docs.openvino.ai/2023.2/omz_tools_accuracy_checker.html) and <br> [Annotation Converter](https://docs.openvino.ai/2023.2/omz_tools_accuracy_checker_annotation_converters.html) | `accuracy_check` <br> `convert_annotation` |**Accuracy Checker** is a deep learning accuracy validation tool that allows you to collect accuracy metrics against popular datasets. The main advantages of the tool are the flexibility of configuration and a set of supported datasets, preprocessing, postprocessing, and metrics. <br> **Annotation Converter** is a utility that prepares datasets for evaluation with Accuracy Checker. |
| [Post-Training Optimization Tool](https://docs.openvino.ai/2023.2/pot_introduction.html)| `pot` |**Post-Training Optimization Tool** allows you to optimize trained models with advanced capabilities, such as quantization and low-precision optimizations, without the need to retrain or fine-tune models. |
| [Model Downloader and other Open Model Zoo tools](https://docs.openvino.ai/2023.2/omz_tools_downloader.html)| `omz_downloader` <br> `omz_converter` <br> `omz_quantizer` <br> `omz_info_dumper`| **Model Downloader** is a tool for getting access to the collection of high-quality and extremely fast pre-trained deep learning [public](@ref omz_models_group_public) and [Intel](@ref omz_models_group_intel)-trained models. These free pre-trained models can be used to speed up the development and production deployment process without training your own models. The tool downloads model files from online sources and, if necessary, patches them to make them more usable with model conversion API. A number of additional tools are also provided to automate the process of working with downloaded models:<br> **Model Converter** is a tool for converting Open Model Zoo models that are stored in an original deep learning framework format into the OpenVINO Intermediate Representation (IR) using model conversion API. <br> **Model Quantizer** is a tool for automatic quantization of full-precision models in the IR format into low-precision versions using the Post-Training Optimization Tool. <br> **Model Information Dumper** is a helper utility for dumping information about the models to a stable, machine-readable format. |
## Troubleshooting
For general troubleshooting steps and issues, see [Troubleshooting Guide for OpenVINO Installation](https://docs.openvino.ai/2023.1/openvino_docs_get_started_guide_troubleshooting.html). The following sections also provide explanations to several error messages.
For general troubleshooting steps and issues, see [Troubleshooting Guide for OpenVINO Installation](https://docs.openvino.ai/2023.2/openvino_docs_get_started_guide_troubleshooting.html). The following sections also provide explanations to several error messages.
### Errors with Installing via PIP for Users in China

View File

@@ -1,11 +1,8 @@
# OpenVINO™
<!--- The note below is intended for master branch only for pre-release purpose. Remove it for official releases. --->
> **NOTE**: This version is pre-release software and has not undergone full release validation or qualification. No support is offered on pre-release software and APIs/behavior are subject to change. It should NOT be incorporated into any production software/solution and instead should be used only for early testing and integration while awaiting a final release version of this software.
Intel® Distribution of OpenVINO™ toolkit is an open-source toolkit for optimizing and deploying AI inference. It can be used to develop applications and solutions based on deep learning tasks, such as: emulation of human vision, automatic speech recognition, natural language processing, recommendation systems, etc. It provides high-performance and rich deployment options, from edge to cloud.
If you have already finished developing your models and converting them to the OpenVINO model format, you can install OpenVINO Runtime to deploy your applications on various devices. The [OpenVINO™](https://docs.openvino.ai/2023.1/openvino_docs_OV_UG_OV_Runtime_User_Guide.html) Python package includes a set of libraries for an easy inference integration with your products.
If you have already finished developing your models and converting them to the OpenVINO model format, you can install OpenVINO Runtime to deploy your applications on various devices. The [OpenVINO™](https://docs.openvino.ai/2023.2/openvino_docs_OV_UG_OV_Runtime_User_Guide.html) Python package includes a set of libraries for an easy inference integration with your products.
## System Requirements
@@ -75,13 +72,13 @@ If installation was successful, you will see the list of available devices.
| Component | Content | Description |
|------------------|---------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| [OpenVINO Runtime](https://docs.openvino.ai/2023.1/openvino_docs_OV_UG_OV_Runtime_User_Guide.html) | `openvino package` |**OpenVINO Runtime** is a set of C++ libraries with C and Python bindings providing a common API to deliver inference solutions on the platform of your choice. Use the OpenVINO Runtime API to read PyTorch\*, TensorFlow\*, TensorFlow Lite\*, ONNX\*, and PaddlePaddle\* models and execute them on preferred devices. OpenVINO Runtime uses a plugin architecture and includes the following plugins: [CPU](https://docs.openvino.ai/2023.1/openvino_docs_OV_UG_supported_plugins_CPU.html), [GPU](https://docs.openvino.ai/2023.1/openvino_docs_OV_UG_supported_plugins_GPU.html), [Auto Batch](https://docs.openvino.ai/2023.1/openvino_docs_OV_UG_Automatic_Batching.html), [Auto](https://docs.openvino.ai/2023.1/openvino_docs_OV_UG_supported_plugins_AUTO.html), [Hetero](https://docs.openvino.ai/2023.1/openvino_docs_OV_UG_Hetero_execution.html).
| [OpenVINO Model Converter (OVC)](https://docs.openvino.ai/2023.1/openvino_docs_model_processing_introduction.html#convert-a-model-in-cli-ovc) | `ovc` |**OpenVINO Model Converter** converts models that were trained in popular frameworks to a format usable by OpenVINO components. <br>Supported frameworks include ONNX\*, TensorFlow\*, TensorFlow Lite\*, and PaddlePaddle\*. |
| [Benchmark Tool](https://docs.openvino.ai/2023.1/openvino_inference_engine_tools_benchmark_tool_README.html)| `benchmark_app` | **Benchmark Application** allows you to estimate deep learning inference performance on supported devices for synchronous and asynchronous modes. |
| [OpenVINO Runtime](https://docs.openvino.ai/2023.2/openvino_docs_OV_UG_OV_Runtime_User_Guide.html) | `openvino package` |**OpenVINO Runtime** is a set of C++ libraries with C and Python bindings providing a common API to deliver inference solutions on the platform of your choice. Use the OpenVINO Runtime API to read PyTorch\*, TensorFlow\*, TensorFlow Lite\*, ONNX\*, and PaddlePaddle\* models and execute them on preferred devices. OpenVINO Runtime uses a plugin architecture and includes the following plugins: [CPU](https://docs.openvino.ai/2023.2/openvino_docs_OV_UG_supported_plugins_CPU.html), [GPU](https://docs.openvino.ai/2023.2/openvino_docs_OV_UG_supported_plugins_GPU.html), [Auto Batch](https://docs.openvino.ai/2023.2/openvino_docs_OV_UG_Automatic_Batching.html), [Auto](https://docs.openvino.ai/2023.2/openvino_docs_OV_UG_supported_plugins_AUTO.html), [Hetero](https://docs.openvino.ai/2023.2/openvino_docs_OV_UG_Hetero_execution.html).
| [OpenVINO Model Converter (OVC)](https://docs.openvino.ai/2023.2/openvino_docs_model_processing_introduction.html#convert-a-model-in-cli-ovc) | `ovc` |**OpenVINO Model Converter** converts models that were trained in popular frameworks to a format usable by OpenVINO components. <br>Supported frameworks include ONNX\*, TensorFlow\*, TensorFlow Lite\*, and PaddlePaddle\*. |
| [Benchmark Tool](https://docs.openvino.ai/2023.2/openvino_inference_engine_tools_benchmark_tool_README.html)| `benchmark_app` | **Benchmark Application** allows you to estimate deep learning inference performance on supported devices for synchronous and asynchronous modes. |
## Troubleshooting
For general troubleshooting steps and issues, see [Troubleshooting Guide for OpenVINO Installation](https://docs.openvino.ai/2023.1/openvino_docs_get_started_guide_troubleshooting.html). The following sections also provide explanations to several error messages.
For general troubleshooting steps and issues, see [Troubleshooting Guide for OpenVINO Installation](https://docs.openvino.ai/2023.2/openvino_docs_get_started_guide_troubleshooting.html). The following sections also provide explanations to several error messages.
### Errors with Installing via PIP for Users in China

View File

@@ -82,6 +82,8 @@ class OperatorSupport(OperatorSupport):
"torch.ops.aten.mul.Scalar": None,
"torch.ops.aten.mul.Tensor": None,
"torch.ops.aten.native_batch_norm.default": None,
"torch.ops.aten._native_batch_norm_legit.default": None,
"torch.ops.aten._native_batch_norm_legit_no_training.default": None,
"torch.ops.aten.native_group_norm.default": None,
"torch.ops.aten.native_layer_norm.default": None,
"torch.ops.aten.neg.default": None,

View File

@@ -1,52 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/pass/graph_rewrite.hpp"
#include "transformations_visibility.hpp"
namespace ov {
namespace pass {
class TRANSFORMATIONS_API ConvertBitwiseAndToLogicalAnd;
class TRANSFORMATIONS_API ConvertBitwiseNotToLogicalNot;
class TRANSFORMATIONS_API ConvertBitwiseOrToLogicalOr;
class TRANSFORMATIONS_API ConvertBitwiseXorToLogicalXor;
} // namespace pass
} // namespace ov
class ov::pass::ConvertBitwiseAndToLogicalAnd : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvertBitwiseAndToLogicalAnd", "0");
ConvertBitwiseAndToLogicalAnd();
};
class ov::pass::ConvertBitwiseNotToLogicalNot : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvertBitwiseNotToLogicalNot", "0");
ConvertBitwiseNotToLogicalNot();
};
class ov::pass::ConvertBitwiseOrToLogicalOr : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvertBitwiseOrToLogicalOr", "0");
ConvertBitwiseOrToLogicalOr();
};
class ov::pass::ConvertBitwiseXorToLogicalXor : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvertBitwiseXorToLogicalXor", "0");
ConvertBitwiseXorToLogicalXor();
};
/**
* @ingroup ie_transformation_common_api
* @brief Converts Bitwise operators to Logical for boolean datatype for plugins that don't support opset13 Bitwise
*/
class ConvertBitwiseToLogical : public ov::pass::GraphRewrite {
public:
OPENVINO_RTTI("ConvertBitwiseToLogical", "0");
ConvertBitwiseToLogical() {
add_matcher<ov::pass::ConvertBitwiseAndToLogicalAnd>();
add_matcher<ov::pass::ConvertBitwiseNotToLogicalNot>();
add_matcher<ov::pass::ConvertBitwiseOrToLogicalOr>();
add_matcher<ov::pass::ConvertBitwiseXorToLogicalXor>();
}
};

View File

@@ -65,7 +65,6 @@
#include "transformations/init_node_info.hpp"
#include "transformations/op_conversions/batch_norm_decomposition.hpp"
#include "transformations/op_conversions/bidirectional_sequences_decomposition.hpp"
#include "transformations/op_conversions/convert_bitwise_to_logical_bool.hpp"
#include "transformations/op_conversions/convert_broadcast_to_tiles.hpp"
#include "transformations/op_conversions/convert_convertlike.hpp"
#include "transformations/op_conversions/convert_deformable_conv_v8_to_v1.hpp"
@@ -227,11 +226,6 @@ bool ov::pass::CommonOptimizations::run_on_model(const std::shared_ptr<ov::Model
ADD_MATCHER(fq_fusions, MulFakeQuantizeFusion)
fq_fusions->set_name("ov::pass::FakeQuantizeFusions");
// Temporary transformation to allow for PyTorch frontend to
// partially support bitwise operators with boolean inputs for plugins
// that didn't enabled BitwiseOps from opset13
REGISTER_PASS(manager, ConvertBitwiseToLogical)
// StridesOptimization should be at the very end
// because we cannot insert any MaxPools since they may prevent
// other optimizations

View File

@@ -200,7 +200,7 @@ pass::SimplifySecondInputOfReshape::SimplifySecondInputOfReshape() {
matcher_pass_callback callback = [=](Matcher& m) {
auto node = m.get_match_root();
const auto reshape = as_type_ptr<v1::Reshape>(node);
if (!reshape || reshape->get_special_zero() == false) {
if (!reshape) {
return false;
}
@@ -219,7 +219,7 @@ pass::SimplifySecondInputOfReshape::SimplifySecondInputOfReshape() {
auto check_shape_of_gather = [&](const std::shared_ptr<Node>& gather) {
auto shape_of = gather->get_input_node_shared_ptr(0);
if (!is_type<v3::ShapeOf>(shape_of) && !is_type<v0::ShapeOf>(shape_of)) {
if (!is_type<op::util::ShapeOfBase>(shape_of)) {
return false;
}
return shape_of->input_value(0) == data;
@@ -237,16 +237,15 @@ pass::SimplifySecondInputOfReshape::SimplifySecondInputOfReshape() {
gather_dims_expected_location += concat_input_shape[0];
};
bool special_zero = reshape->get_special_zero();
// We need this check to avoid sequences shapeOf -> gather -> concat
// that change the arrangement of dimensions in the reshape pattern
for (auto& concat_input : new_concat_inputs) {
if (const auto gather = as_type_ptr<op::util::GatherBase>(concat_input.get_node_shared_ptr())) {
auto indices_constant = as_type_ptr<v0::Constant>(gather->get_input_node_shared_ptr(1));
if (!indices_constant || !check_shape_of_gather(gather)) {
update_expected_gather_location(gather);
continue;
}
auto node = concat_input.get_node_shared_ptr();
if (ov::is_type<op::util::GatherBase>(node) &&
ov::is_type<v0::Constant>(node->get_input_node_shared_ptr(1)) && check_shape_of_gather(node)) {
auto indices_constant = as_type_ptr<v0::Constant>(node->get_input_node_shared_ptr(1));
bool gather_can_be_fused = true;
const auto indices = indices_constant->cast_vector<std::int64_t>();
for (size_t i = 0; i < indices.size(); ++i) {
@@ -258,11 +257,21 @@ pass::SimplifySecondInputOfReshape::SimplifySecondInputOfReshape() {
if (gather_can_be_fused) {
const size_t num_of_unchanged_dimensions = indices.size();
const auto subgraph_et = gather->get_input_element_type(0);
const auto subgraph_et = node->get_input_element_type(0);
concat_input = v0::Constant::create(subgraph_et, Shape{num_of_unchanged_dimensions}, {0});
gather_folded = true;
}
} else {
if (!special_zero) {
// If special zero is false - check if other inputs to Concat are Constants.
// If any of those Constants contain zero - return false.
auto constant = as_type_ptr<v0::Constant>(node);
if (!constant)
return false;
auto values = constant->cast_vector<int64_t>();
if (std::find(values.begin(), values.end(), 0) != values.end())
return false;
}
update_expected_gather_location(concat_input);
}
}
@@ -275,7 +284,7 @@ pass::SimplifySecondInputOfReshape::SimplifySecondInputOfReshape() {
new_concat->set_friendly_name(concat->get_friendly_name());
copy_runtime_info(concat, new_concat);
const auto new_reshape = reshape->clone_with_new_inputs({reshape->input_value(0), new_concat});
const auto new_reshape = std::make_shared<v1::Reshape>(reshape->input_value(0), new_concat, true);
new_reshape->set_friendly_name(reshape->get_friendly_name());
copy_runtime_info(reshape, new_reshape);

View File

@@ -1,116 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "transformations/op_conversions/convert_bitwise_to_logical_bool.hpp"
#include "itt.hpp"
#include "openvino/core/rt_info.hpp"
#include "openvino/op/bitwise_and.hpp"
#include "openvino/op/bitwise_not.hpp"
#include "openvino/op/bitwise_or.hpp"
#include "openvino/op/bitwise_xor.hpp"
#include "openvino/op/logical_and.hpp"
#include "openvino/op/logical_not.hpp"
#include "openvino/op/logical_or.hpp"
#include "openvino/op/logical_xor.hpp"
#include "openvino/pass/pattern/op/wrap_type.hpp"
ov::pass::ConvertBitwiseAndToLogicalAnd::ConvertBitwiseAndToLogicalAnd() {
MATCHER_SCOPE(ConvertBitwiseAndToLogicalAnd);
auto pattern =
pattern::wrap_type<ov::op::v13::BitwiseAnd>({pattern::any_input(pattern::type_matches(element::boolean)),
pattern::any_input(pattern::type_matches(element::boolean))});
const matcher_pass_callback callback = [=](pattern::Matcher& m) {
const auto bitwise = std::dynamic_pointer_cast<ov::op::v13::BitwiseAnd>(m.get_match_root());
if (!bitwise || transformation_callback(bitwise)) {
return false;
}
const auto logical = std::make_shared<ov::op::v1::LogicalAnd>(bitwise->input_value(0),
bitwise->input_value(1),
bitwise->get_autob());
logical->set_friendly_name(bitwise->get_friendly_name());
copy_runtime_info(bitwise, logical);
replace_node(bitwise, logical);
return true;
};
auto m = std::make_shared<pattern::Matcher>(pattern, matcher_name);
register_matcher(m, callback);
}
ov::pass::ConvertBitwiseNotToLogicalNot::ConvertBitwiseNotToLogicalNot() {
MATCHER_SCOPE(ConvertBitwiseNotToLogicalNot);
auto pattern =
pattern::wrap_type<ov::op::v13::BitwiseNot>({pattern::any_input(pattern::type_matches(element::boolean))});
const matcher_pass_callback callback = [=](pattern::Matcher& m) {
const auto bitwise = std::dynamic_pointer_cast<ov::op::v13::BitwiseNot>(m.get_match_root());
if (!bitwise || transformation_callback(bitwise)) {
return false;
}
const auto logical = std::make_shared<ov::op::v1::LogicalNot>(bitwise->input_value(0));
logical->set_friendly_name(bitwise->get_friendly_name());
copy_runtime_info(bitwise, logical);
replace_node(bitwise, logical);
return true;
};
auto m = std::make_shared<pattern::Matcher>(pattern, matcher_name);
register_matcher(m, callback);
}
ov::pass::ConvertBitwiseOrToLogicalOr::ConvertBitwiseOrToLogicalOr() {
MATCHER_SCOPE(ConvertBitwiseOrToLogicalOr);
auto pattern =
pattern::wrap_type<ov::op::v13::BitwiseOr>({pattern::any_input(pattern::type_matches(element::boolean)),
pattern::any_input(pattern::type_matches(element::boolean))});
const matcher_pass_callback callback = [=](pattern::Matcher& m) {
const auto bitwise = std::dynamic_pointer_cast<ov::op::v13::BitwiseOr>(m.get_match_root());
if (!bitwise || transformation_callback(bitwise)) {
return false;
}
const auto logical = std::make_shared<ov::op::v1::LogicalOr>(bitwise->input_value(0),
bitwise->input_value(1),
bitwise->get_autob());
logical->set_friendly_name(bitwise->get_friendly_name());
copy_runtime_info(bitwise, logical);
replace_node(bitwise, logical);
return true;
};
auto m = std::make_shared<pattern::Matcher>(pattern, matcher_name);
register_matcher(m, callback);
}
ov::pass::ConvertBitwiseXorToLogicalXor::ConvertBitwiseXorToLogicalXor() {
MATCHER_SCOPE(ConvertBitwiseXorToLogicalXor);
auto pattern =
pattern::wrap_type<ov::op::v13::BitwiseXor>({pattern::any_input(pattern::type_matches(element::boolean)),
pattern::any_input(pattern::type_matches(element::boolean))});
const matcher_pass_callback callback = [=](pattern::Matcher& m) {
const auto bitwise = std::dynamic_pointer_cast<ov::op::v13::BitwiseXor>(m.get_match_root());
if (!bitwise || transformation_callback(bitwise)) {
return false;
}
const auto logical = std::make_shared<ov::op::v1::LogicalXor>(bitwise->input_value(0),
bitwise->input_value(1),
bitwise->get_autob());
logical->set_friendly_name(bitwise->get_friendly_name());
copy_runtime_info(bitwise, logical);
replace_node(bitwise, logical);
return true;
};
auto m = std::make_shared<pattern::Matcher>(pattern, matcher_name);
register_matcher(m, callback);
}

View File

@@ -611,3 +611,53 @@ TEST_F(TransformationTestsF, SimplifySecondInputOfReshapeTest21) {
}
comparator.enable(FunctionsComparator::CONST_VALUES);
}
TEST_F(TransformationTestsF, SimplifySecondInputOfReshapeTestFalseSpecialZero) {
PartialShape data_shape{1, 128, 12, 64};
{
auto data = std::make_shared<opset7::Parameter>(element::f32, data_shape);
auto shape_of = std::make_shared<opset7::ShapeOf>(data);
auto gather_op = gather(shape_of, std::vector<int64_t>{0, 1});
auto constant = opset7::Constant::create(element::i64, Shape{1}, {768});
auto concat = std::make_shared<opset7::Concat>(OutputVector{gather_op, constant}, -1);
auto reshape = std::make_shared<opset7::Reshape>(data, concat, false);
model = std::make_shared<Model>(NodeVector{reshape}, ParameterVector{data});
manager.register_pass<ov::pass::SimplifySecondInputOfReshape>();
}
{
auto data = std::make_shared<opset7::Parameter>(element::f32, data_shape);
auto reshape_pattern = opset7::Constant::create(element::i64, Shape{3}, {0, 0, 768});
auto reshape = std::make_shared<opset7::Reshape>(data, reshape_pattern, true);
model_ref = std::make_shared<Model>(NodeVector{reshape}, ParameterVector{data});
}
comparator.enable(FunctionsComparator::ATTRIBUTES);
comparator.enable(FunctionsComparator::CONST_VALUES);
}
TEST_F(TransformationTestsF, SimplifySecondInputOfReshapeTestFalseSpecialZeroZeroDim) {
PartialShape data_shape{1, 0, 12, 64};
{
auto data = std::make_shared<opset7::Parameter>(element::f32, data_shape);
auto shape_of = std::make_shared<opset7::ShapeOf>(data);
auto gather_op = gather(shape_of, std::vector<int64_t>{0, 1});
auto constant = opset7::Constant::create(element::i64, Shape{1}, {768});
auto concat = std::make_shared<opset7::Concat>(OutputVector{gather_op, constant}, -1);
auto reshape = std::make_shared<opset7::Reshape>(data, concat, false);
model = std::make_shared<Model>(NodeVector{reshape}, ParameterVector{data});
manager.register_pass<ov::pass::SimplifySecondInputOfReshape>();
}
{
auto data = std::make_shared<opset7::Parameter>(element::f32, data_shape);
auto reshape_pattern = opset7::Constant::create(element::i64, Shape{3}, {0, 0, 768});
auto reshape = std::make_shared<opset7::Reshape>(data, reshape_pattern, true);
model_ref = std::make_shared<Model>(NodeVector{reshape}, ParameterVector{data});
}
comparator.enable(FunctionsComparator::ATTRIBUTES);
comparator.enable(FunctionsComparator::CONST_VALUES);
}

View File

@@ -1,124 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "transformations/op_conversions/convert_bitwise_to_logical_bool.hpp"
#include <gtest/gtest.h>
#include <memory>
#include "common_test_utils/ov_test_utils.hpp"
#include "openvino/opsets/opset1.hpp"
#include "openvino/opsets/opset13.hpp"
#include "openvino/pass/manager.hpp"
#include "transformations/utils/utils.hpp"
using namespace ov;
using namespace testing;
namespace {
std::shared_ptr<ov::Model> create_bitwise_model(std::string op_type, const ov::element::Type input_type) {
const auto lhs = std::make_shared<ov::opset13::Parameter>(input_type, ov::Shape{1, 3, 100, 100});
const auto rhs = std::make_shared<ov::opset13::Parameter>(input_type, ov::Shape{1, 3, 100, 100});
std::shared_ptr<ov::Node> bitwise;
ParameterVector params{lhs, rhs};
if (op_type == "and") {
bitwise = std::make_shared<ov::opset13::BitwiseAnd>(lhs, rhs, op::AutoBroadcastType::NONE);
} else if (op_type == "not") {
bitwise = std::make_shared<ov::opset13::BitwiseNot>(lhs);
params = {lhs};
} else if (op_type == "or") {
bitwise = std::make_shared<ov::opset13::BitwiseOr>(lhs, rhs, op::AutoBroadcastType::NONE);
} else if (op_type == "xor") {
bitwise = std::make_shared<ov::opset13::BitwiseXor>(lhs, rhs, op::AutoBroadcastType::NONE);
}
bitwise->set_friendly_name("bitwise");
return std::make_shared<ov::Model>(bitwise->outputs(), params);
}
std::shared_ptr<ov::Model> create_logical_model(std::string op_type) {
const auto lhs = std::make_shared<ov::opset1::Parameter>(ov::element::boolean, ov::Shape{1, 3, 100, 100});
const auto rhs = std::make_shared<ov::opset1::Parameter>(ov::element::boolean, ov::Shape{1, 3, 100, 100});
std::shared_ptr<ov::Node> logical;
ParameterVector params = {lhs, rhs};
if (op_type == "and") {
logical = std::make_shared<ov::opset1::LogicalAnd>(lhs, rhs, op::AutoBroadcastType::NONE);
} else if (op_type == "not") {
logical = std::make_shared<ov::opset1::LogicalNot>(lhs);
params = {lhs};
} else if (op_type == "or") {
logical = std::make_shared<ov::opset1::LogicalOr>(lhs, rhs, op::AutoBroadcastType::NONE);
} else if (op_type == "xor") {
logical = std::make_shared<ov::opset1::LogicalXor>(lhs, rhs, op::AutoBroadcastType::NONE);
}
logical->set_friendly_name("logical");
return std::make_shared<ov::Model>(logical->outputs(), params);
}
} // namespace
TEST_F(TransformationTestsF, ConvertBitwiseToLogical_and_i32) {
auto transform = manager.register_pass<ov::pass::GraphRewrite>();
transform->add_matcher<ConvertBitwiseToLogical>();
model = create_bitwise_model("and", element::i32);
}
TEST_F(TransformationTestsF, ConvertBitwiseToLogical_not_i32) {
auto transform = manager.register_pass<ov::pass::GraphRewrite>();
transform->add_matcher<ConvertBitwiseToLogical>();
model = create_bitwise_model("not", element::i32);
}
TEST_F(TransformationTestsF, ConvertBitwiseToLogical_or_i32) {
auto transform = manager.register_pass<ov::pass::GraphRewrite>();
transform->add_matcher<ConvertBitwiseToLogical>();
model = create_bitwise_model("or", element::i32);
}
TEST_F(TransformationTestsF, ConvertBitwiseToLogical_xor_i32) {
auto transform = manager.register_pass<ov::pass::GraphRewrite>();
transform->add_matcher<ConvertBitwiseToLogical>();
model = create_bitwise_model("xor", element::i32);
}
TEST_F(TransformationTestsF, ConvertBitwiseToLogical_and_boolean) {
auto transform = manager.register_pass<ov::pass::GraphRewrite>();
transform->add_matcher<ConvertBitwiseToLogical>();
model = create_bitwise_model("and", element::boolean);
model_ref = create_logical_model("and");
comparator.enable(FunctionsComparator::CmpValues::CONST_VALUES);
comparator.enable(FunctionsComparator::CmpValues::ATTRIBUTES);
}
TEST_F(TransformationTestsF, ConvertBitwiseToLogical_not_boolean) {
auto transform = manager.register_pass<ov::pass::GraphRewrite>();
transform->add_matcher<ConvertBitwiseToLogical>();
model = create_bitwise_model("not", element::boolean);
model_ref = create_logical_model("not");
comparator.enable(FunctionsComparator::CmpValues::CONST_VALUES);
comparator.enable(FunctionsComparator::CmpValues::ATTRIBUTES);
}
TEST_F(TransformationTestsF, ConvertBitwiseToLogical_or_boolean) {
auto transform = manager.register_pass<ov::pass::GraphRewrite>();
transform->add_matcher<ConvertBitwiseToLogical>();
model = create_bitwise_model("or", element::boolean);
model_ref = create_logical_model("or");
comparator.enable(FunctionsComparator::CmpValues::CONST_VALUES);
comparator.enable(FunctionsComparator::CmpValues::ATTRIBUTES);
}
TEST_F(TransformationTestsF, ConvertBitwiseToLogical_xor_boolean) {
auto transform = manager.register_pass<ov::pass::GraphRewrite>();
transform->add_matcher<ConvertBitwiseToLogical>();
model = create_bitwise_model("xor", element::boolean);
model_ref = create_logical_model("xor");
comparator.enable(FunctionsComparator::CmpValues::CONST_VALUES);
comparator.enable(FunctionsComparator::CmpValues::ATTRIBUTES);
}

View File

@@ -17,7 +17,7 @@ class Place;
class TorchDecoder;
struct PlaceDesc {
PlaceDesc(const std::shared_ptr<Node>& value) : m_value(value) {}
PlaceDesc(std::shared_ptr<Node> value) : m_value(value) {}
std::shared_ptr<Node> m_value;
};

View File

@@ -60,8 +60,7 @@ OutputVector translate_avg_poolnd(const NodeContext& context) {
auto pads_len = context.mark_node(v0::Constant::create(element::i32, Shape{}, {pads.size()}));
auto pads_diff = context.mark_node(std::make_shared<v1::Subtract>(rank, pads_len));
auto pads_remaining = context.mark_node(std::make_shared<v3::Broadcast>(zero_i32, pads_diff));
auto padding = context.mark_node(
std::make_shared<v0::Concat>(OutputVector{std::move(pads_remaining), std::move(pad_values)}, 0));
auto padding = context.mark_node(std::make_shared<v0::Concat>(OutputVector{pads_remaining, pad_values}, 0));
input = context.mark_node(std::make_shared<v1::Pad>(input, padding, padding, zero, ov::op::PadMode::CONSTANT));
pads = Shape(pads.size(), 0);
}

View File

@@ -39,10 +39,13 @@ Output<Node> broadcast_const_to_channel_dim(const NodeContext& context,
}
} // namespace
OutputVector translate_batch_norm(const NodeContext& context) {
OutputVector translate_batch_norm_common(const NodeContext& context, bool training) {
// Schema: aten::batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var,
// bool training, float momentum, float eps, bool cudnn_enabled) -> Tensor
num_inputs_check(context, 8, 9);
// batch_norm_legit_no_training Schema: aten::batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor?
// running_mean, Tensor? running_var, float momentum, float eps) -> Tensor
auto input = context.get_input(0);
Output<Node> weight;
Output<Node> bias;
@@ -63,7 +66,6 @@ OutputVector translate_batch_norm(const NodeContext& context) {
bias = broadcast_const_to_channel_dim(context, input, zero_f);
}
// index 3 running_mean and index 4 running_var can be none for training case only, check that not training before
auto training = context.const_input<bool>(5);
// if training for batch norm activated, but model in eval mode, it uses current statistics instead of running
if (training) {
auto zero = context.mark_node(v0::Constant::create(element::i32, Shape{}, {0}));
@@ -92,14 +94,34 @@ OutputVector translate_batch_norm(const NodeContext& context) {
running_var = current_var;
}
// Input with index 6 is momentum, it is used only for updating running_mean accumulation during training
auto epsilon = context.const_input<float>(7);
// In batch_norm_legit_no_training, momentum is index 5 and epsilon is 6
float epsilon;
if (context.get_input_size() == 7) {
epsilon = context.const_input<float>(6);
} else {
epsilon = context.const_input<float>(7);
}
// Input with index 8 is flag "cudnn_enabled" we can ignore it
return {context.mark_node(
std::make_shared<v5::BatchNormInference>(input, weight, bias, running_mean, running_var, epsilon))};
};
OutputVector translate_batch_norm_fx(const NodeContext& context) {
auto output = translate_batch_norm(context);
OutputVector translate_batch_norm(const NodeContext& context) {
num_inputs_check(context, 7, 9);
auto training = context.const_input<bool>(5);
return translate_batch_norm_common(context, training);
}
OutputVector translate_batch_norm_legit_fx(const NodeContext& context) {
num_inputs_check(context, 7, 9);
auto training = context.const_input<bool>(5);
auto output = translate_batch_norm_common(context, training);
return {context.mark_node(make_list_construct(output))};
}
OutputVector translate_batch_norm_legit_no_training_fx(const NodeContext& context) {
num_inputs_check(context, 7, 9);
auto output = translate_batch_norm_common(context, false);
return {context.mark_node(make_list_construct(output))};
}

View File

@@ -3,10 +3,10 @@
//
#include "openvino/frontend/pytorch/node_context.hpp"
#include "openvino/op/bitwise_and.hpp"
#include "openvino/op/bitwise_not.hpp"
#include "openvino/op/bitwise_or.hpp"
#include "openvino/op/bitwise_xor.hpp"
#include "openvino/op/logical_and.hpp"
#include "openvino/op/logical_not.hpp"
#include "openvino/op/logical_or.hpp"
#include "openvino/op/logical_xor.hpp"
#include "utils.hpp"
namespace ov {
@@ -17,7 +17,9 @@ namespace op {
OutputVector translate_bitwise_not(const NodeContext& context) {
num_inputs_check(context, 1, 2);
auto x = context.get_input(0);
auto not_x = context.mark_node(std::make_shared<ov::op::v13::BitwiseNot>(x));
FRONT_END_OP_CONVERSION_CHECK(x.get_element_type().compatible(element::boolean),
"aten::bitwise_not supported only for boolean input");
auto not_x = context.mark_node(std::make_shared<ov::op::v1::LogicalNot>(x));
if (!context.input_is_none(1)) {
context.mutate_input(1, not_x);
}
@@ -25,38 +27,32 @@ OutputVector translate_bitwise_not(const NodeContext& context) {
};
OutputVector translate_bitwise_and(const NodeContext& context) {
num_inputs_check(context, 2, 3);
num_inputs_check(context, 2, 2);
auto x = context.get_input(0);
auto y = context.get_input(1);
align_eltwise_input_types(context, x, y, false);
auto and_x = context.mark_node(std::make_shared<ov::op::v13::BitwiseAnd>(x, y));
if (!context.input_is_none(2)) {
context.mutate_input(2, and_x);
}
FRONT_END_OP_CONVERSION_CHECK(x.get_element_type().compatible(element::boolean),
"aten::bitwise_not supported only for boolean input");
auto and_x = context.mark_node(std::make_shared<ov::op::v1::LogicalAnd>(x, y));
return {and_x};
};
OutputVector translate_bitwise_or(const NodeContext& context) {
num_inputs_check(context, 2, 3);
num_inputs_check(context, 2, 2);
auto x = context.get_input(0);
auto y = context.get_input(1);
align_eltwise_input_types(context, x, y, false);
auto or_x = context.mark_node(std::make_shared<ov::op::v13::BitwiseOr>(x, y));
if (!context.input_is_none(2)) {
context.mutate_input(2, or_x);
}
FRONT_END_OP_CONVERSION_CHECK(x.get_element_type().compatible(element::boolean),
"aten::bitwise_not supported only for boolean input");
auto or_x = context.mark_node(std::make_shared<ov::op::v1::LogicalOr>(x, y));
return {or_x};
};
OutputVector translate_bitwise_xor(const NodeContext& context) {
num_inputs_check(context, 2, 3);
num_inputs_check(context, 2, 2);
auto x = context.get_input(0);
auto y = context.get_input(1);
align_eltwise_input_types(context, x, y, false);
auto xor_x = context.mark_node(std::make_shared<ov::op::v13::BitwiseXor>(x, y));
if (!context.input_is_none(2)) {
context.mutate_input(2, xor_x);
}
FRONT_END_OP_CONVERSION_CHECK(x.get_element_type().compatible(element::boolean),
"aten::bitwise_xor supported only for boolean input");
auto xor_x = context.mark_node(std::make_shared<ov::op::v1::LogicalXor>(x, y));
return {xor_x};
};

View File

@@ -22,7 +22,8 @@ using namespace ov::op;
OutputVector translate_cat_common(const NodeContext& context,
const std::deque<ov::Output<ov::Node>>& list_elems,
int64_t axis) {
int64_t axis,
bool is_fx) {
if (list_elems.empty()) {
// couldn't get list elements
auto fw_node = std::make_shared<PtFrameworkNode>(context.get_decoder(), OutputVector{context.get_input(0)}, 1);
@@ -39,8 +40,8 @@ OutputVector translate_cat_common(const NodeContext& context,
"<aten/quantized>::cat is located inside body while inputs are located outside of the body. "
"This case is not supported.");
if (list_elems.size() == 1 &&
!std::dynamic_pointer_cast<op::util::FrameworkNode>(context.get_input(0).get_node_shared_ptr())) {
// Case when list was merged into tensor
!std::dynamic_pointer_cast<op::util::FrameworkNode>(context.get_input(0).get_node_shared_ptr()) && !is_fx) {
// Case when list was merged into tensor. // This case doesn't work with torchfx
auto tensor = list_elems[0];
auto shape = context.mark_node(std::make_shared<v3::ShapeOf>(tensor, element::i32));
auto zero = context.mark_node(v0::Constant::create(element::i32, Shape{}, {0}));
@@ -63,7 +64,7 @@ OutputVector translate_cat(const NodeContext& context) {
num_inputs_check(context, 2, 3);
const auto&& list_elems = get_list_as_outputs(context.get_input(0));
auto axis = context.const_input<int64_t>(1);
auto out = translate_cat_common(context, list_elems, axis);
auto out = translate_cat_common(context, list_elems, axis, false);
if (!context.input_is_none(2)) {
context.mutate_input(2, out[0]);
}
@@ -78,7 +79,7 @@ OutputVector translate_cat_fx(const NodeContext& context) {
list_elems.push_back(context.get_input(static_cast<int>(i)));
}
auto axis = context.const_input<int64_t>(context.get_input_size() - 1);
return translate_cat_common(context, list_elems, axis);
return translate_cat_common(context, list_elems, axis, true);
};
OutputVector translate_quantized_cat(const NodeContext& context) {
@@ -87,7 +88,7 @@ OutputVector translate_quantized_cat(const NodeContext& context) {
auto axis = context.const_input<int64_t>(1);
FRONT_END_OP_CONVERSION_CHECK(!list_elems.empty(), "Couldn't find quantized input for quantized::cat operation.");
return {quantize(context,
translate_cat_common(context, list_elems, axis)[0],
translate_cat_common(context, list_elems, axis, false)[0],
context.get_input(2),
context.get_input(3),
list_elems.front())};

View File

@@ -113,7 +113,7 @@ OutputVector translate_max_poolnd(const NodeContext& context) {
if (context.get_output_size() == 2) {
auto out1 = res->output(0);
auto out2 = res->output(1);
return {std::move(out1), std::move(out2)};
return {out1, out2};
} else {
return {res};
}

View File

@@ -127,8 +127,7 @@ OutputVector translate_channel_shuffle(const NodeContext& context) {
auto k = context.mark_node(std::make_shared<v1::Divide>(c, groups, true));
auto g = context.mark_node(std::make_shared<v0::Unsqueeze>(groups, zero));
// 1. Reshape input [N, G, K=C/G, -1]
auto reshape_indices = context.mark_node(
std::make_shared<v0::Concat>(OutputVector{std::move(n), std::move(g), std::move(k), std::move(neg_1)}, 0));
auto reshape_indices = context.mark_node(std::make_shared<v0::Concat>(OutputVector{n, g, k, neg_1}, 0));
x = context.mark_node(std::make_shared<v1::Reshape>(x, reshape_indices, false));
// 2. Transpose to [N, K, G, -1]
auto permute_indices = context.mark_node(v0::Constant::create(element::i32, Shape{4}, {0, 2, 1, 3}));

View File

@@ -33,7 +33,7 @@ OutputVector translate_pythonop(const NodeContext& context) {
}
OutputVector outputs{};
for (auto& result : body->get_results()) {
for (auto result : body->get_results()) {
auto output = result->get_input_source_output(0);
outputs.push_back(context.mark_output(output));
}

View File

@@ -18,10 +18,7 @@ namespace op {
using namespace ov::op;
namespace {
Output<Node> prepare_source(const NodeContext& context,
const Output<Node>& src,
const Output<Node>& index,
const Output<Node>& input) {
Output<Node> prepare_source(const NodeContext& context, Output<Node> src, Output<Node> index, Output<Node> input) {
auto src_partial_shape = src.get_partial_shape();
auto index_shape_rank = get_shape_rank(context, index);
auto index_shape = std::get<0>(index_shape_rank);
@@ -31,9 +28,8 @@ Output<Node> prepare_source(const NodeContext& context,
// into shape of indices.
// TODO: Figure out way to dynamically broadcast scalar src only, without affecting Tensor src. Current
// implementation will fail if Scalar source would have dynamic rank.
auto _src = std::move(src);
if (src_partial_shape.rank().is_static() && src_partial_shape.rank().get_length() == 0) {
_src = context.mark_node(std::make_shared<v3::Broadcast>(_src, index_shape));
src = context.mark_node(std::make_shared<v3::Broadcast>(src, index_shape));
}
auto const_0 = context.mark_node(v0::Constant::create(element::i32, Shape{}, {0}));
@@ -42,13 +38,13 @@ Output<Node> prepare_source(const NodeContext& context,
auto ones = context.mark_node(std::make_shared<v3::Broadcast>(const_1, index_rank));
// In torch indices can be of different shape than source tensor. Create slice to trim source tensor to shape of
// indices.
auto src_pruned = context.mark_node(std::make_shared<v8::Slice>(_src, zeros, index_shape, ones));
auto src_pruned = context.mark_node(std::make_shared<v8::Slice>(src, zeros, index_shape, ones));
auto src_input_dtype = context.mark_node(std::make_shared<v1::ConvertLike>(src_pruned, input));
return src_input_dtype;
};
const v12::ScatterElementsUpdate::Reduction get_reduction_mode(const std::string& pt_reduce_mode) {
const v12::ScatterElementsUpdate::Reduction get_reduction_mode(std::string pt_reduce_mode) {
static const std::unordered_map<std::string, v12::ScatterElementsUpdate::Reduction> TORCH_REDUCTION_TO_OV{
{"add", v12::ScatterElementsUpdate::Reduction::SUM},
{"multiply", v12::ScatterElementsUpdate::Reduction::PROD},

View File

@@ -43,7 +43,6 @@ OP_CONVERTER(translate_batch_norm);
OP_CONVERTER(translate_bitwise_and);
OP_CONVERTER(translate_bitwise_not);
OP_CONVERTER(translate_bitwise_or);
OP_CONVERTER(translate_bitwise_xor);
OP_CONVERTER(translate_cat);
OP_CONVERTER(translate_cdist);
OP_CONVERTER(translate_channel_shuffle);
@@ -213,7 +212,8 @@ OP_CONVERTER(translate_quantized_linear);
OP_CONVERTER(translate_xor);
// Torch FX Translations
OP_CONVERTER(translate_arange_fx);
OP_CONVERTER(translate_batch_norm_fx);
OP_CONVERTER(translate_batch_norm_legit_fx);
OP_CONVERTER(translate_batch_norm_legit_no_training_fx);
OP_CONVERTER(translate_cat_fx);
OP_CONVERTER(translate_chunk_fx);
OP_CONVERTER(translate_expand_fx);
@@ -231,11 +231,11 @@ OP_CONVERTER(translate_transpose_fx);
// Supported ops for TorchScript
const std::map<std::string, CreatorFunction> get_supported_ops_ts() {
return {
{"aten::__and__", op::translate_bitwise_and},
{"aten::__and__", op::translate_and},
{"aten::__derive_index", op::translate_derive_index},
{"aten::__getitem__", op::translate_getitem},
{"aten::__not__", op::translate_1to1_match_1_inputs<opset10::LogicalNot>},
{"aten::__or__", op::translate_bitwise_or},
{"aten::__or__", op::translate_or},
{"aten::__xor__", op::translate_bitwise_xor},
{"aten::__range_length", op::translate_range_length},
{"aten::_convolution", op::translate_convolution},
@@ -281,10 +281,7 @@ const std::map<std::string, CreatorFunction> get_supported_ops_ts() {
{"aten::broadcast_to", op::translate_expand},
{"aten::baddbmm", op::translate_addmm},
{"aten::batch_norm", op::translate_batch_norm},
{"aten::bitwise_and", op::translate_bitwise_and},
{"aten::bitwise_not", op::translate_bitwise_not},
{"aten::bitwise_or", op::translate_bitwise_or},
{"aten::bitwise_xor", op::translate_bitwise_xor},
{"aten::bmm", op::translate_1to1_match_2_inputs<opset10::MatMul>},
{"aten::Bool", op::translate_bool},
{"aten::cat", op::translate_cat},
@@ -612,7 +609,9 @@ const std::map<std::string, CreatorFunction> get_supported_ops_fx() {
{"aten.mm.default", op::translate_1to1_match_2_inputs<opset10::MatMul>},
{"aten.mul.Tensor", op::translate_1to1_match_2_inputs_align_types<opset10::Multiply>},
{"aten.mul.Scalar", op::translate_1to1_match_2_inputs_align_types<opset10::Multiply>},
{"aten.native_batch_norm.default", op::translate_batch_norm_fx},
{"aten.native_batch_norm.default", op::translate_batch_norm_legit_fx},
{"aten._native_batch_norm_legit.default", op::translate_batch_norm_legit_fx},
{"aten._native_batch_norm_legit_no_training.default", op::translate_batch_norm_legit_no_training_fx},
{"aten.native_group_norm.default", op::translate_group_norm_fx},
{"aten.native_layer_norm.default", op::translate_layer_norm_fx},
{"aten.neg.default", op::translate_neg},

View File

@@ -346,7 +346,7 @@ public:
// Replace a single result with 6 results, per each input of parent list_pack
auto inputs = list_pack->inputs();
for (auto& input : inputs) {
for (auto input : inputs) {
model->add_results({make_shared<opset10::Result>(input.get_source_output())});
// TODO: Keep tracking between original and new Results
}

View File

@@ -76,7 +76,7 @@ AppendListUnpackReplacer::AppendListUnpackReplacer() {
auto split = std::make_shared<v1::Split>(inputs[index], axis_0, list_unpack->get_output_size());
NodeVector to_copy_rt{axis_0, split};
OutputVector res;
for (auto& output : split->outputs()) {
for (auto output : split->outputs()) {
auto squeeze = std::make_shared<v0::Squeeze>(output, axis_0);
to_copy_rt.push_back(squeeze);
res.push_back(squeeze);

View File

@@ -73,7 +73,7 @@ AtenCatToConcat::AtenCatToConcat() {
auto body = loop->get_function();
auto output_index = cat->input(0).get_source_output().get_index();
int64_t body_result_index = -1;
for (auto& out_desc : loop->get_output_descriptions()) {
for (auto out_desc : loop->get_output_descriptions()) {
if (out_desc->m_output_index == output_index) {
body_result_index = static_cast<int64_t>(out_desc->m_body_value_index);
break;
@@ -99,7 +99,7 @@ AtenCatToConcat::AtenCatToConcat() {
auto body_param_index = body->get_parameter_index(param);
FRONT_END_GENERAL_CHECK(body_param_index >= 0, "Couldn't find parameter in body parameters.");
int64_t input_index = -1;
for (auto& in_desc : loop->get_input_descriptions()) {
for (auto in_desc : loop->get_input_descriptions()) {
if (in_desc->m_body_parameter_index == static_cast<size_t>(body_param_index)) {
input_index = static_cast<int64_t>(in_desc->m_input_index);
break;

View File

@@ -62,7 +62,7 @@ IndexLoopGetitemReplacer::IndexLoopGetitemReplacer() {
auto body = loop_op->get_function();
std::shared_ptr<Node> chunk_param;
for (auto& input_desc : loop_op->get_input_descriptions()) {
for (auto input_desc : loop_op->get_input_descriptions()) {
if (input_desc->m_input_index == chunk_idx) {
chunk_param = body->get_parameters().at(input_desc->m_body_parameter_index);
break;

View File

@@ -181,14 +181,14 @@ PrimListUnpackReplacer::PrimListUnpackReplacer() {
return false;
}
Output<Node> final_shape_t = opset10::Constant::create(element::i32, Shape{}, {0});
for (auto& input : tensors->inputs()) {
for (auto input : tensors->inputs()) {
auto tensor_shape = rg.make<opset10::ShapeOf>(input.get_source_output(), element::i32);
final_shape_t =
rg.make<opset10::Broadcast>(final_shape_t, tensor_shape, ov::op::BroadcastType::BIDIRECTIONAL);
}
auto final_shape = rg.make<opset10::ShapeOf>(final_shape_t, element::i32);
OutputVector outputs;
for (auto& input : tensors->inputs()) {
for (auto input : tensors->inputs()) {
outputs.push_back(rg.make<opset10::Broadcast>(input.get_source_output(), final_shape));
}
copy_runtime_info_and_name(list_unpack, rg.get(), {input_node});
@@ -202,7 +202,7 @@ PrimListUnpackReplacer::PrimListUnpackReplacer() {
const auto num_splits = list_unpack->get_output_size();
auto split = rg.make<opset10::Split>(input, axis, num_splits);
OutputVector outputs;
for (auto& output : split->outputs()) {
for (auto output : split->outputs()) {
const auto squeeze = rg.make<opset10::Squeeze>(output, axis);
outputs.push_back(squeeze);
}
@@ -218,7 +218,7 @@ PrimListUnpackReplacer::PrimListUnpackReplacer() {
const auto num_splits = list_unpack->get_output_size();
auto split = rg.make<opset10::Split>(non_zero, axis, num_splits);
OutputVector outputs;
for (auto& output : split->outputs()) {
for (auto output : split->outputs()) {
const auto squeeze = rg.make<opset10::Squeeze>(output, axis);
outputs.push_back(squeeze);
}
@@ -234,7 +234,7 @@ PrimListUnpackReplacer::PrimListUnpackReplacer() {
const auto num_splits = list_unpack->get_output_size();
auto split = rg.make<opset10::Split>(non_zero, axis, num_splits);
OutputVector outputs;
for (auto& output : split->outputs()) {
for (auto output : split->outputs()) {
const auto squeeze = rg.make<opset10::Squeeze>(output, axis);
outputs.push_back(squeeze);
}
@@ -310,7 +310,7 @@ PrimListUnpackReplacer::PrimListUnpackReplacer() {
auto split = rg.make<opset10::Split>(shape_of, axis_0, list_unpack->get_output_size());
OutputVector res;
for (auto& output : split->outputs()) {
for (auto output : split->outputs()) {
auto squeeze = rg.make<opset10::Squeeze>(output, axis_0);
res.push_back(squeeze);
}
@@ -328,7 +328,7 @@ PrimListUnpackReplacer::PrimListUnpackReplacer() {
auto split = rg.make<opset10::Split>(slice, axis_0, list_unpack->get_output_size());
OutputVector res;
for (auto& output : split->outputs()) {
for (auto output : split->outputs()) {
auto squeeze = rg.make<opset10::Squeeze>(output, axis_0);
res.push_back(squeeze);
}

View File

@@ -94,7 +94,7 @@ bool DecomposeTupleParameters::run_on_model(const std::shared_ptr<Model>& model)
auto new_parameter = std::make_shared<ov::op::v0::Parameter>(et, ps);
for (auto& input : inputs) {
for (auto input : inputs) {
auto names = input.get_tensor().get_names();
input.replace_source_output(new_parameter->output(0));
new_parameter->output(0).add_names(names);

View File

@@ -130,7 +130,7 @@ RFFTNComplexReplacer::RFFTNComplexReplacer() {
auto normalized_rfftn_splitted = std::make_shared<v1::Split>(normalized_rfftn, const_neg_1, 2);
auto rfftn_outs = rfftn_op->get_users();
bool rval = false;
for (auto& out : rfftn_outs) {
for (auto out : rfftn_outs) {
if (auto real_op = cast_fw_node(out, "aten::real")) {
auto squeezed = std::make_shared<v0::Squeeze>(normalized_rfftn_splitted->output(0), const_neg_1);
copy_runtime_info({rfftn_op, real_op}, squeezed);

View File

@@ -47,7 +47,7 @@ PrimTupleUnpackReplacer::PrimTupleUnpackReplacer() {
bool TupleUnpackInBodyReplacer::run_on_model(const std::shared_ptr<Model>& model) {
bool result = false;
for (auto& op : model->get_ordered_ops()) {
for (auto op : model->get_ordered_ops()) {
const auto if_op = as_type_ptr<v8::If>(op);
if (if_op) {
for (size_t i = 1; i < if_op->get_input_size(); i++) {
@@ -61,7 +61,7 @@ bool TupleUnpackInBodyReplacer::run_on_model(const std::shared_ptr<Model>& model
int else_body_idx = -1;
auto then_descs = if_op->get_input_descriptions(v8::If::THEN_BODY_INDEX);
auto else_descs = if_op->get_input_descriptions(v8::If::ELSE_BODY_INDEX);
for (auto& inp_desc : then_descs) {
for (auto inp_desc : then_descs) {
if (inp_desc->m_input_index == i) {
if (then_body_idx != -1) {
add_exception_to_fw_node(
@@ -72,7 +72,7 @@ bool TupleUnpackInBodyReplacer::run_on_model(const std::shared_ptr<Model>& model
}
}
}
for (auto& inp_desc : else_descs) {
for (auto inp_desc : else_descs) {
if (inp_desc->m_input_index == i) {
if (else_body_idx != -1) {
add_exception_to_fw_node(
@@ -130,10 +130,10 @@ bool TupleUnpackInBodyReplacer::run_on_model(const std::shared_ptr<Model>& model
// create new If inputs
std::vector<std::pair<int, int>> inputs_mapping(if_op->get_input_size(), {-1, -1});
for (auto& inp_desc : then_descs) {
for (auto inp_desc : then_descs) {
inputs_mapping[inp_desc->m_input_index].first = static_cast<int>(inp_desc->m_body_parameter_index);
}
for (auto& inp_desc : else_descs) {
for (auto inp_desc : else_descs) {
inputs_mapping[inp_desc->m_input_index].second = static_cast<int>(inp_desc->m_body_parameter_index);
}
for (size_t j = 0; j < inputs_mapping.size(); j++) {

View File

@@ -85,8 +85,7 @@ U4BlockRepack::U4BlockRepack() {
}
}
copy_runtime_info({std::move(constant), std::move(reshape1), std::move(transpose), std::move(reshape2)},
new_const);
copy_runtime_info(NodeVector{constant, reshape1, transpose, reshape2}, new_const);
replace_node(reshape2, new_const);
return true;

View File

@@ -94,7 +94,7 @@ std::shared_ptr<Model> TranslateSession::convert_pytorch_model(
if (input_model) {
// When we have input model we should use its inputs order to create Parameters
// We use m_inputs instead of get_inputs() because latter doesn't have "self" input
for (auto& input_p : input_model->m_inputs) {
for (auto input_p : input_model->m_inputs) {
auto pytorch_place = std::dynamic_pointer_cast<pytorch::Place>(input_p);
FRONT_END_GENERAL_CHECK(pytorch_place, "Only place produced by PyTorch Frontend is supported.");
auto tensor_id = pytorch_place->get_tensor_index();
@@ -108,7 +108,7 @@ std::shared_ptr<Model> TranslateSession::convert_pytorch_model(
(*tensor_map)[tensor_id] = parameter;
}
// Add all tensors that were frozen
for (auto& desc : input_model->m_descriptors) {
for (auto desc : input_model->m_descriptors) {
(*tensor_map)[desc.first] = desc.second.m_value;
}
} else {
@@ -225,7 +225,7 @@ std::shared_ptr<Model> TranslateSession::convert_pytorch_model(
ResultVector results;
if (input_model) {
// For the case when we have InputModel we need to have same order as its outputs
for (auto& output_p : input_model->get_outputs()) {
for (auto output_p : input_model->get_outputs()) {
auto pytorch_place = std::dynamic_pointer_cast<pytorch::Place>(output_p);
FRONT_END_GENERAL_CHECK(pytorch_place, "Only place produced by PyTorch Frontend is supported.");
auto tensor_id = pytorch_place->get_tensor_index();

View File

@@ -2081,7 +2081,9 @@ void GraphOptimizer::FuseEltwiseAndSimple(Graph &graph) {
graphEdges.push_back(newEdge);
parent->addEdge(newEdge);
parentNode->inputShapes.push_back(parent->getOutputShapeAtPort(inNum));
if (parentNode->inputShapes.size() < static_cast<size_t>(outNum + 1))
parentNode->inputShapes.resize(outNum + 1);
parentNode->inputShapes[outNum] = parent->getOutputShapeAtPort(inNum);
}
}

View File

@@ -33,7 +33,6 @@
#include "transformations/control_flow/unroll_tensor_iterator.hpp"
#include "transformations/fp16_compression/mark_decompression_convert_constant_folding.hpp"
#include "transformations/op_conversions/convert_batch_to_space.hpp"
#include "transformations/op_conversions/convert_bitwise_to_logical_bool.hpp"
#include "transformations/op_conversions/convert_broadcast_to_tiles.hpp"
#include "transformations/op_conversions/convert_depth_to_space.hpp"
#include "transformations/op_conversions/convert_gather_downgrade.hpp"
@@ -445,11 +444,6 @@ void Transformations::PreLpt(const std::vector<ov::element::Type>& defaultPrecis
CPU_ENABLE_PASS_COMMON(manager, ov::pass::ConvertDetectionOutput1ToDetectionOutput8);
CPU_ENABLE_PASS_COMMON(manager, ov::pass::ConvertROIAlign3To9);
CPU_DISABLE_PASS_COMMON(manager, ov::pass::ConvertBitwiseAndToLogicalAnd);
CPU_ENABLE_PASS_COMMON(manager, ov::pass::ConvertBitwiseNotToLogicalNot);
CPU_DISABLE_PASS_COMMON(manager, ov::pass::ConvertBitwiseOrToLogicalOr);
CPU_DISABLE_PASS_COMMON(manager, ov::pass::ConvertBitwiseXorToLogicalXor);
if (useLpt) {
CPU_LPT_SCOPE(LowPrecisionTransformations_Part3);
CPU_SET_CALLBACK_COMMON(manager,

View File

@@ -9,11 +9,13 @@ from pytorch_layer_test_class import PytorchLayerTest
class TestAnd(PytorchLayerTest):
def _prepare_input(self):
return self.input_data
def create_model_tensor_input(self):
class aten_and_tensor(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
@@ -23,9 +25,10 @@ class TestAnd(PytorchLayerTest):
ref_net = None
return aten_and_tensor(), ref_net, "aten::__and__"
def create_model_bool_input(self):
class aten_and_bool(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
@@ -36,43 +39,18 @@ class TestAnd(PytorchLayerTest):
return aten_and_bool(), ref_net, "aten::__and__"
def create_model_int_input(self):
class aten_and_int(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, int_a: int, int_b: int):
return int_a & int_b
ref_net = None
return aten_and_int(), ref_net, "aten::__and__"
@pytest.mark.nightly
@pytest.mark.precommit
def test_and_tensor(self, ie_device, precision, ir_version):
self.input_data = (
np.array([True, False, False], dtype=np.bool_),
np.array([True, True, False], dtype=np.bool_),
)
self._test(*self.create_model_tensor_input(), ie_device, precision, ir_version)
self.input_data = (np.array([True, False, False], dtype=np.bool_), np.array(
[True, True, False], dtype=np.bool_))
self._test(*self.create_model_tensor_input(),
ie_device, precision, ir_version)
@pytest.mark.nightly
@pytest.mark.precommit
def test_and_bool(self, ie_device, precision, ir_version):
self.input_data = (np.array(True, dtype=np.bool_), np.array(True, dtype=np.bool_))
self._test(*self.create_model_bool_input(), ie_device, precision, ir_version)
@pytest.mark.nightly
@pytest.mark.precommit
def test_and_int(self, ie_device, precision, ir_version):
self.input_data = (np.array(3, dtype=np.int32), np.array(4, dtype=np.int32))
self._test(*self.create_model_int_input(), ie_device, precision, ir_version)
@pytest.mark.nightly
@pytest.mark.precommit
def test_and_tensor(self, ie_device, precision, ir_version):
self.input_data = (np.array([3, 5, 8], dtype=np.int32), np.array([7, 11, 2], dtype=np.int32))
self._test(
*self.create_model_tensor_input(), ie_device, precision, ir_version, freeze_model=False, trace_model=True
)
self.input_data = (np.array(True, dtype=np.bool_),
np.array(True, dtype=np.bool_))
self._test(*self.create_model_bool_input(),
ie_device, precision, ir_version)

View File

@@ -0,0 +1,29 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import pytest
from pytorch_layer_test_class import PytorchLayerTest
class TestBitwiseNot(PytorchLayerTest):
def _prepare_input(self):
import numpy as np
return ((np.random.randn(1, 5) > 0).astype(bool),)
def create_model(self):
import torch
class aten_bitwise_not(torch.nn.Module):
def forward(self, x):
return torch.bitwise_not(x)
ref_net = None
return aten_bitwise_not(), ref_net, "aten::bitwise_not"
@pytest.mark.nightly
@pytest.mark.precommit
def test_bitwise_not(self, ie_device, precision, ir_version):
self._test(*self.create_model(), ie_device, precision, ir_version)

View File

@@ -1,132 +0,0 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import pytest
import torch
from pytorch_layer_test_class import PytorchLayerTest
class TestBitwiseOp(PytorchLayerTest):
def _prepare_input(self, out, unary, lhs_dtype, rhs_dtype, lhs_shape, rhs_shape):
choices = np.array([0, 1, 255, 7])
x = np.random.choice(choices, lhs_shape).astype(lhs_dtype)
if unary:
return (x,) if not out else (x, np.zeros_like(x).astype(lhs_dtype))
y = np.random.choice(choices, rhs_shape).astype(rhs_dtype)
if not out:
return x, y
return x, y, np.zeros_like(x).astype(lhs_dtype) + np.zeros_like(y).astype(rhs_dtype)
def create_model(self, op_name, out):
ops = {
"and": torch.bitwise_and,
"or": torch.bitwise_or,
"xor": torch.bitwise_xor,
"not": torch.bitwise_not,
}
op = ops[op_name]
class aten_bitwise(torch.nn.Module):
def __init__(self, op, out) -> None:
super().__init__()
self.op = op
if op == torch.bitwise_not:
self.forward = self.forward_not
if out:
self.forward = self.forward_out if not op == torch.bitwise_not else self.forward_not_out
def forward(self, tensor_a, tensor_b):
return self.op(tensor_a, tensor_b)
def forward_out(self, tensor_a, tensor_b, out):
return self.op(tensor_a, tensor_b, out=out), out
def forward_not(self, tensor_a):
return self.op(tensor_a)
def forward_not_out(self, tensor_a, out):
return self.op(tensor_a, out=out), out
ref_net = None
return aten_bitwise(op, out), ref_net, f"aten::bitwise_{op_name}"
@pytest.mark.nightly
@pytest.mark.precommit
@pytest.mark.parametrize("op_type", ["and", "or", "not", "xor"])
@pytest.mark.parametrize("lhs_dtype", ["bool", "int32", "uint8", "int64"])
@pytest.mark.parametrize("rhs_dtype", ["bool", "int32", "uint8", "int64"])
@pytest.mark.parametrize(
("lhs_shape", "rhs_shape"),
[
([2, 3], [2, 3]),
([2, 3], []),
([], [2, 3]),
],
)
@pytest.mark.parametrize("out", [False, True])
def test_bitwise_mixed_dtypes(
self, op_type, out, lhs_dtype, rhs_dtype, lhs_shape, rhs_shape, ie_device, precision, ir_version
):
self._test(
*self.create_model(op_type, out),
ie_device,
precision,
ir_version,
kwargs_to_prepare_input={
"out": out,
"unary": op_type == "not",
"lhs_dtype": lhs_dtype,
"rhs_dtype": rhs_dtype,
"lhs_shape": lhs_shape,
"rhs_shape": rhs_shape,
},
freeze_model=False,
trace_model=True,
)
class TestBitwiseOperators(PytorchLayerTest):
def _prepare_input(self, lhs_dtype, rhs_dtype, lhs_shape, rhs_shape):
choices = np.array([0, 1, 255, 7])
x = np.random.choice(choices, lhs_shape).astype(lhs_dtype)
y = np.random.choice(choices, rhs_shape).astype(rhs_dtype)
return x, y
def create_model(self):
class aten_bitwise(torch.nn.Module):
def forward(self, lhs, rhs):
return lhs & rhs, ~lhs, lhs | rhs, lhs ^ rhs
ref_net = None
return aten_bitwise(), ref_net, ("aten::__and__", "aten::bitwise_not", "aten::__or__", "aten::__xor__")
@pytest.mark.nightly
@pytest.mark.precommit
@pytest.mark.parametrize("lhs_dtype", ["bool", "int32"])
@pytest.mark.parametrize("rhs_dtype", ["bool", "int32"])
@pytest.mark.parametrize(
("lhs_shape", "rhs_shape"),
[
([2, 3], [2, 3]),
([2, 3], []),
([], [2, 3]),
],
)
def test_bitwise_operators(self, lhs_dtype, rhs_dtype, lhs_shape, rhs_shape, ie_device, precision, ir_version):
self._test(
*self.create_model(),
ie_device,
precision,
ir_version,
kwargs_to_prepare_input={
"lhs_dtype": lhs_dtype,
"rhs_dtype": rhs_dtype,
"lhs_shape": lhs_shape,
"rhs_shape": rhs_shape,
},
trace_model=True,
freeze_model=False,
)

View File

@@ -1,78 +1,29 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import pytest
import torch
from pytorch_layer_test_class import PytorchLayerTest
class TestOr(PytorchLayerTest):
class TestLog(PytorchLayerTest):
def _prepare_input(self):
return self.input_data
import numpy as np
return (np.random.randint(0, 255, (20, 30, 40, 50)),)
def create_model_tensor_input(self):
class aten_or_tensor(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def create_model(self):
import torch
def forward(self, tensor_a, tensor_b):
return tensor_a | tensor_b
class aten_or(torch.nn.Module):
def forward(self, x):
res = torch.ByteTensor(x.size()).zero_()
res[:, :, :, 1:] = res[:, :, :, 1:] | (x[:, :, :, 1:] != x[:, :, :, :-1])
res[:, :, :, :-1] = res[:, :, :, :-1] | (x[:, :, :, 1:] != x[:, :, :, :-1])
return res.float()
ref_net = None
return aten_or_tensor(), ref_net, "aten::__or__"
def create_model_bool_input(self):
class aten_or_bool(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, bool_a: bool, bool_b: bool):
return bool_a | bool_b
ref_net = None
return aten_or_bool(), ref_net, "aten::__or__"
def create_model_int_input(self):
class aten_or_int(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, int_a: int, int_b: int):
return int_a | int_b
ref_net = None
return aten_or_int(), ref_net, "aten::__or__"
return aten_or(), None, "aten::__or__"
@pytest.mark.nightly
@pytest.mark.precommit
def test_or_tensor(self, ie_device, precision, ir_version):
self.input_data = (
np.array([True, False, False], dtype=np.bool_),
np.array([True, True, False], dtype=np.bool_),
)
self._test(*self.create_model_tensor_input(), ie_device, precision, ir_version)
@pytest.mark.nightly
@pytest.mark.precommit
def test_or_bool(self, ie_device, precision, ir_version):
self.input_data = (np.array(True, dtype=np.bool_), np.array(True, dtype=np.bool_))
self._test(*self.create_model_bool_input(), ie_device, precision, ir_version)
@pytest.mark.nightly
@pytest.mark.precommit
def test_or_int(self, ie_device, precision, ir_version):
self.input_data = (np.array(3, dtype=np.int32), np.array(4, dtype=np.int32))
self._test(*self.create_model_int_input(), ie_device, precision, ir_version)
@pytest.mark.nightly
@pytest.mark.precommit
def test_or_tensor(self, ie_device, precision, ir_version):
self.input_data = (np.array([3, 5, 8], dtype=np.int32), np.array([7, 11, 2], dtype=np.int32))
self._test(
*self.create_model_tensor_input(), ie_device, precision, ir_version, freeze_model=False, trace_model=True
)
def test_or(self, ie_device, precision, ir_version):
self._test(*self.create_model(), ie_device, precision, ir_version,
dynamic_shapes=False, trace_model=True, use_convert_model=True)

View File

@@ -9,11 +9,13 @@ from pytorch_layer_test_class import PytorchLayerTest
class TestXor(PytorchLayerTest):
def _prepare_input(self):
return self.input_data
def create_model_tensor_input(self):
class aten_xor_tensor(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
@@ -26,6 +28,7 @@ class TestXor(PytorchLayerTest):
def create_model_bool_input(self):
class aten_xor_bool(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
@@ -38,6 +41,7 @@ class TestXor(PytorchLayerTest):
def create_model_int_input(self):
class aten_xor_int(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
@@ -51,28 +55,33 @@ class TestXor(PytorchLayerTest):
@pytest.mark.nightly
@pytest.mark.precommit
def test_xor_tensor(self, ie_device, precision, ir_version):
self.input_data = (
np.array([True, False, False], dtype=np.bool_),
np.array([True, True, False], dtype=np.bool_),
)
self._test(*self.create_model_tensor_input(), ie_device, precision, ir_version)
self.input_data = (np.array([True, False, False], dtype=np.bool_), np.array(
[True, True, False], dtype=np.bool_))
self._test(*self.create_model_tensor_input(),
ie_device, precision, ir_version)
@pytest.mark.nightly
@pytest.mark.precommit
def test_xor_bool(self, ie_device, precision, ir_version):
self.input_data = (np.array(True, dtype=np.bool_), np.array(True, dtype=np.bool_))
self._test(*self.create_model_bool_input(), ie_device, precision, ir_version)
self.input_data = (np.array(True, dtype=np.bool_),
np.array(True, dtype=np.bool_))
self._test(*self.create_model_bool_input(),
ie_device, precision, ir_version)
@pytest.mark.xfail(reason="bitwise_xor is not implemented")
@pytest.mark.nightly
@pytest.mark.precommit
def test_xor_int(self, ie_device, precision, ir_version):
self.input_data = (np.array(3, dtype=np.int32), np.array(4, dtype=np.int32))
self._test(*self.create_model_int_input(), ie_device, precision, ir_version)
self.input_data = (np.array(3, dtype=np.int),
np.array(4, dtype=np.int))
self._test(*self.create_model_int_input(),
ie_device, precision, ir_version)
@pytest.mark.xfail(reason="bitwise_xor is not implemented")
@pytest.mark.nightly
@pytest.mark.precommit
def test_xor_tensor(self, ie_device, precision, ir_version):
self.input_data = (np.array([3, 5, 8], dtype=np.int32), np.array([7, 11, 2], dtype=np.int32))
self._test(
*self.create_model_tensor_input(), ie_device, precision, ir_version, freeze_model=False, trace_model=True
)
self.input_data = (np.array([3, 5, 8], dtype=np.int), np.array(
[7, 11, 2], dtype=np.int))
self._test(*self.create_model_tensor_input(),
ie_device, precision, ir_version)