Merge remote-tracking branch 'upstream/master'
This commit is contained in:
commit
e92f5928dc
@ -6,13 +6,15 @@
|
||||
|
||||
**Short description**: *CumSum* performs cumulative summation of the input elements along the given axis.
|
||||
|
||||
**Detailed description**: By default, it will do the sum inclusively meaning the first element is copied as is. Through an "exclusive" attribute, this behavior can change to exclude the first element. It can also perform summation in the opposite direction of the axis. For that, set reverse attribute to `true`.
|
||||
**Detailed description**: *CumSum* performs cumulative summation of the input elements along the `axis` specified by the second input. By default, the `j-th` output element is the inclusive sum of the first `j` elements in the given sequence, and the first element in the sequence is copied to the output as is.
|
||||
In the `exclusive` mode the `j-th` output element is the sum of the first `j-1` elements and the first element in the output sequence is `0`.
|
||||
To perform the summation in the opposite direction of the axis, set reverse attribute to `true`.
|
||||
|
||||
**Attributes**:
|
||||
|
||||
* *exclusive*
|
||||
|
||||
* **Description**: If the attribute is set to `true` then an exclusive sum in which the top element is not included is returned. In other terms, if set to `true`, the `j-th` output element would be the sum of the first `(j-1)` elements. Otherwise, it would be the sum of the first `j` elements.
|
||||
* **Description**: If the attribute is set to `true`, then exclusive sums are returned, the `j-th` element is not included in the `j-th` sum. Otherwise, the inclusive sum of the first `j` elements for the `j-th` element is calculated.
|
||||
* **Range of values**:
|
||||
* `false` - include the top element
|
||||
* `true` - do not include the top element
|
||||
@ -32,19 +34,19 @@
|
||||
|
||||
**Inputs**
|
||||
|
||||
* **1**: An tensor of type *T*. **Required.**
|
||||
* **1**: A tensor of type *T* and rank greater or equal to 1. **Required.**
|
||||
|
||||
* **2**: Scalar axis of type *T_AXIS*. Negative value means counting dimensions from the back. Default value is 0. **Optional.**
|
||||
* **2**: Axis index along which the cumulative sum is performed. A scalar of type *T_AXIS*. Negative value means counting dimensions from the back. Default value is `0`. **Optional.**
|
||||
|
||||
**Outputs**
|
||||
|
||||
* **1**: Output tensor with cumulative sums of the input's elements. A tensor of type *T* of the same shape as 1st input.
|
||||
* **1**: Output tensor with cumulative sums of the input elements. A tensor of type *T* of the same shape as the first input.
|
||||
|
||||
**Types**
|
||||
|
||||
* *T*: any numeric type.
|
||||
|
||||
* *T_AXIS*: any integer number.
|
||||
* *T_AXIS*: `int64` or `int32`.
|
||||
|
||||
**Examples**
|
||||
|
||||
|
174
docs/template_plugin/tests/functional/op_reference/ctc_loss.cpp
Normal file
174
docs/template_plugin/tests/functional/op_reference/ctc_loss.cpp
Normal file
@ -0,0 +1,174 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <ie_core.hpp>
|
||||
#include <ie_ngraph_utils.hpp>
|
||||
#include <ngraph/ngraph.hpp>
|
||||
#include <shared_test_classes/base/layer_test_utils.hpp>
|
||||
#include <tuple>
|
||||
|
||||
#include "base_reference_test.hpp"
|
||||
|
||||
using namespace reference_tests;
|
||||
using namespace ngraph;
|
||||
using namespace InferenceEngine;
|
||||
|
||||
namespace {
|
||||
|
||||
struct CTCLossParams {
|
||||
CTCLossParams(const bool collapseRepeated, const bool mergeRepeated, const bool findUnique, const Tensor& logitsTensor, const Tensor& logitsLenTensor,
|
||||
const Tensor& labelsTensor, const Tensor& labelsLenTensor, const Tensor& blankIdxTensor, const Tensor& expectedTensor)
|
||||
: preprocessCollapseRepeated(collapseRepeated),
|
||||
ctcMergeRepeated(mergeRepeated),
|
||||
unique(findUnique),
|
||||
logits(logitsTensor),
|
||||
logitsLen(logitsLenTensor),
|
||||
labels(labelsTensor),
|
||||
labelsLen(labelsLenTensor),
|
||||
blankIdx(blankIdxTensor),
|
||||
expected(expectedTensor) {}
|
||||
|
||||
bool preprocessCollapseRepeated;
|
||||
bool ctcMergeRepeated;
|
||||
bool unique;
|
||||
Tensor logits;
|
||||
Tensor logitsLen;
|
||||
Tensor labels;
|
||||
Tensor labelsLen;
|
||||
Tensor blankIdx;
|
||||
Tensor expected;
|
||||
};
|
||||
|
||||
class ReferenceCTCLossLayerTest : public testing::TestWithParam<CTCLossParams>, public CommonReferenceTest {
|
||||
public:
|
||||
void SetUp() override {
|
||||
auto params = GetParam();
|
||||
function = CreateFunction(params);
|
||||
inputData = {params.logits.data, params.logitsLen.data, params.labels.data, params.labelsLen.data, params.blankIdx.data};
|
||||
refOutData = {params.expected.data};
|
||||
}
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<CTCLossParams>& obj) {
|
||||
auto param = obj.param;
|
||||
std::ostringstream result;
|
||||
result << "fl_pr=" << param.logits.type << "_";
|
||||
result << "int_pr=" << param.logitsLen.type << "_";
|
||||
result << "collapse=" << param.preprocessCollapseRepeated << "_";
|
||||
result << "merge=" << param.ctcMergeRepeated << "_";
|
||||
result << "unique=" << param.unique << "_";
|
||||
result << "logits_shape=" << param.logits.shape << "_";
|
||||
result << "logits_len_shape=" << param.logitsLen.shape << "_";
|
||||
result << "labels_shape=" << param.labels.shape << "_";
|
||||
result << "labels_len_shape=" << param.labelsLen.shape << "_";
|
||||
result << "blank_idx_shape=" << param.blankIdx.shape << "_";
|
||||
return result.str();
|
||||
}
|
||||
|
||||
private:
|
||||
static std::shared_ptr<Function> CreateFunction(const CTCLossParams& params) {
|
||||
const auto A = std::make_shared<op::Parameter>(params.logits.type, params.logits.shape); // logits
|
||||
const auto B = std::make_shared<op::Parameter>(params.logitsLen.type, params.logitsLen.shape); // logitsLen
|
||||
const auto C = std::make_shared<op::Parameter>(params.labels.type, params.labels.shape); // labels
|
||||
const auto D = std::make_shared<op::Parameter>(params.labelsLen.type, params.labelsLen.shape); // labelsLen
|
||||
const auto E = std::make_shared<op::Parameter>(params.blankIdx.type, params.blankIdx.shape); // blankIdx
|
||||
|
||||
const auto ctcLoss = std::make_shared<op::v4::CTCLoss>(A, B, C, D, E, params.preprocessCollapseRepeated, params.ctcMergeRepeated, params.unique);
|
||||
return std::make_shared<Function>(NodeVector {ctcLoss}, ParameterVector {A, B, C, D, E});
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(ReferenceCTCLossLayerTest, CompareWithRefs) {
|
||||
Exec();
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(
|
||||
smoke_CTCLoss_With_Hardcoded_Refs, ReferenceCTCLossLayerTest,
|
||||
::testing::Values(CTCLossParams(false, false, false, // collapse repeated, merge repeated, unique
|
||||
Tensor({2, 3, 3}, element::f32, std::vector<float> {0, 1, 8, 5, 5, 2, 0, 7, 7, 10, 4, 5, 9, 0, 0, 5, 7, 0}), // logits
|
||||
Tensor({2}, element::i32, std::vector<int> {3, 3}), // logitsLen
|
||||
Tensor({2, 3}, element::i32, std::vector<int> {0, 1, 2, 1, 1, 1}), // labels
|
||||
Tensor({2}, element::i32, std::vector<int> {2, 1}), // labelsLen
|
||||
Tensor({}, element::i32, std::vector<int> {2}), // blankIdx
|
||||
Tensor({2}, element::f32, std::vector<float> {1.41223f, 14.1359f})), // refOut
|
||||
CTCLossParams(false, false, true, // collapse repeated, merge repeated, unique
|
||||
Tensor({2, 3, 3}, element::f32, std::vector<float> {0, 1, 8, 5, 5, 2, 0, 7, 7, 10, 4, 5, 9, 0, 0, 5, 7, 0}), // logits
|
||||
Tensor({2}, element::i32, std::vector<int> {3, 3}), // logitsLen
|
||||
Tensor({2, 3}, element::i32, std::vector<int> {0, 1, 2, 1, 1, 1}), // labels
|
||||
Tensor({2}, element::i32, std::vector<int> {2, 1}), // labelsLen
|
||||
Tensor({}, element::i32, std::vector<int> {2}), // blankIdx
|
||||
Tensor({2}, element::f32, std::vector<float> {1.41223f, 14.1359f})), // refOut
|
||||
CTCLossParams(false, true, false, // collapse repeated, merge repeated, unique
|
||||
Tensor({2, 3, 3}, element::f32, std::vector<float> {0, 1, 8, 5, 5, 2, 0, 7, 7, 10, 4, 5, 9, 0, 0, 5, 7, 0}), // logits
|
||||
Tensor({2}, element::i32, std::vector<int> {3, 3}), // logitsLen
|
||||
Tensor({2, 3}, element::i32, std::vector<int> {0, 1, 2, 1, 1, 1}), // labels
|
||||
Tensor({2}, element::i32, std::vector<int> {2, 1}), // labelsLen
|
||||
Tensor({}, element::i32, std::vector<int> {2}), // blankIdx
|
||||
Tensor({2}, element::f32, std::vector<float> {1.41156f, 13.2745f})), // refOut
|
||||
CTCLossParams(true, false, false, // collapse repeated, merge repeated, unique
|
||||
Tensor({2, 3, 3}, element::f32, std::vector<float> {0, 1, 8, 5, 5, 2, 0, 7, 7, 10, 4, 5, 9, 0, 0, 5, 7, 0}), // logits
|
||||
Tensor({2}, element::i32, std::vector<int> {3, 3}), // logitsLen
|
||||
Tensor({2, 3}, element::i32, std::vector<int> {0, 1, 2, 1, 1, 1}), // labels
|
||||
Tensor({2}, element::i32, std::vector<int> {2, 1}), // labelsLen
|
||||
Tensor({}, element::i32, std::vector<int> {2}), // blankIdx
|
||||
Tensor({2}, element::f32, std::vector<float> {1.41223f, 14.1359f})), // refOut
|
||||
CTCLossParams(false, true, true, // collapse repeated, merge repeated, unique
|
||||
Tensor({2, 3, 3}, element::f32, std::vector<float> {0, 1, 8, 5, 5, 2, 0, 7, 7, 10, 4, 5, 9, 0, 0, 5, 7, 0}), // logits
|
||||
Tensor({2}, element::i32, std::vector<int> {3, 3}), // logitsLen
|
||||
Tensor({2, 3}, element::i32, std::vector<int> {0, 1, 2, 1, 1, 1}), // labels
|
||||
Tensor({2}, element::i32, std::vector<int> {2, 1}), // labelsLen
|
||||
Tensor({}, element::i32, std::vector<int> {2}), // blankIdx
|
||||
Tensor({2}, element::f32, std::vector<float> {1.41156f, 13.2745f})), // refOut
|
||||
CTCLossParams(true, true, true, // collapse repeated, merge repeated, unique
|
||||
Tensor({2, 3, 3}, element::f32, std::vector<float> {0, 1, 8, 5, 5, 2, 0, 7, 7, 10, 4, 5, 9, 0, 0, 5, 7, 0}), // logits
|
||||
Tensor({2}, element::i32, std::vector<int> {3, 3}), // logitsLen
|
||||
Tensor({2, 3}, element::i32, std::vector<int> {0, 1, 2, 1, 1, 1}), // labels
|
||||
Tensor({2}, element::i32, std::vector<int> {2, 1}), // labelsLen
|
||||
Tensor({}, element::i32, std::vector<int> {2}), // blankIdx
|
||||
Tensor({2}, element::f32, std::vector<float> {1.41223f, 13.2745f})), // refOut
|
||||
// floating point type - float16
|
||||
CTCLossParams(false, false, false, // collapse repeated, merge repeated, unique
|
||||
Tensor({2, 3, 3}, element::f16, std::vector<float16> {0, 1, 8, 5, 5, 2, 0, 7, 7, 10, 4, 5, 9, 0, 0, 5, 7, 0}), // logits
|
||||
Tensor({2}, element::i32, std::vector<int> {3, 3}), // logitsLen
|
||||
Tensor({2, 3}, element::i32, std::vector<int> {0, 1, 2, 1, 1, 1}), // labels
|
||||
Tensor({2}, element::i32, std::vector<int> {2, 1}), // labelsLen
|
||||
Tensor({}, element::i32, std::vector<int> {2}), // blankIdx
|
||||
Tensor({2}, element::f16, std::vector<float16> {1.41223f, 14.1359f})), // refOut
|
||||
CTCLossParams(false, false, true, // collapse repeated, merge repeated, unique
|
||||
Tensor({2, 3, 3}, element::f16, std::vector<float16> {0, 1, 8, 5, 5, 2, 0, 7, 7, 10, 4, 5, 9, 0, 0, 5, 7, 0}), // logits
|
||||
Tensor({2}, element::i32, std::vector<int> {3, 3}), // logitsLen
|
||||
Tensor({2, 3}, element::i32, std::vector<int> {0, 1, 2, 1, 1, 1}), // labels
|
||||
Tensor({2}, element::i32, std::vector<int> {2, 1}), // labelsLen
|
||||
Tensor({}, element::i32, std::vector<int> {2}), // blankIdx
|
||||
Tensor({2}, element::f16, std::vector<float16> {1.41223f, 14.1359f})), // refOut
|
||||
CTCLossParams(false, true, false, // collapse repeated, merge repeated, unique
|
||||
Tensor({2, 3, 3}, element::f16, std::vector<float16> {0, 1, 8, 5, 5, 2, 0, 7, 7, 10, 4, 5, 9, 0, 0, 5, 7, 0}), // logits
|
||||
Tensor({2}, element::i32, std::vector<int> {3, 3}), // logitsLen
|
||||
Tensor({2, 3}, element::i32, std::vector<int> {0, 1, 2, 1, 1, 1}), // labels
|
||||
Tensor({2}, element::i32, std::vector<int> {2, 1}), // labelsLen
|
||||
Tensor({}, element::i32, std::vector<int> {2}), // blankIdx
|
||||
Tensor({2}, element::f16, std::vector<float16> {1.41156f, 13.2745f})), // refOut
|
||||
CTCLossParams(true, false, false, // collapse repeated, merge repeated, unique
|
||||
Tensor({2, 3, 3}, element::f16, std::vector<float16> {0, 1, 8, 5, 5, 2, 0, 7, 7, 10, 4, 5, 9, 0, 0, 5, 7, 0}), // logits
|
||||
Tensor({2}, element::i32, std::vector<int> {3, 3}), // logitsLen
|
||||
Tensor({2, 3}, element::i32, std::vector<int> {0, 1, 2, 1, 1, 1}), // labels
|
||||
Tensor({2}, element::i32, std::vector<int> {2, 1}), // labelsLen
|
||||
Tensor({}, element::i32, std::vector<int> {2}), // blankIdx
|
||||
Tensor({2}, element::f16, std::vector<float16> {1.41223f, 14.1359f})), // refOut
|
||||
CTCLossParams(false, true, true, // collapse repeated, merge repeated, unique
|
||||
Tensor({2, 3, 3}, element::f16, std::vector<float16> {0, 1, 8, 5, 5, 2, 0, 7, 7, 10, 4, 5, 9, 0, 0, 5, 7, 0}), // logits
|
||||
Tensor({2}, element::i32, std::vector<int> {3, 3}), // logitsLen
|
||||
Tensor({2, 3}, element::i32, std::vector<int> {0, 1, 2, 1, 1, 1}), // labels
|
||||
Tensor({2}, element::i32, std::vector<int> {2, 1}), // labelsLen
|
||||
Tensor({}, element::i32, std::vector<int> {2}), // blankIdx
|
||||
Tensor({2}, element::f16, std::vector<float16> {1.41156f, 13.2745f})), // refOut
|
||||
CTCLossParams(true, true, true, // collapse repeated, merge repeated, unique
|
||||
Tensor({2, 3, 3}, element::f16, std::vector<float16> {0, 1, 8, 5, 5, 2, 0, 7, 7, 10, 4, 5, 9, 0, 0, 5, 7, 0}), // logits
|
||||
Tensor({2}, element::i32, std::vector<int> {3, 3}), // logitsLen
|
||||
Tensor({2, 3}, element::i32, std::vector<int> {0, 1, 2, 1, 1, 1}), // labels
|
||||
Tensor({2}, element::i32, std::vector<int> {2, 1}), // labelsLen
|
||||
Tensor({}, element::i32, std::vector<int> {2}), // blankIdx
|
||||
Tensor({2}, element::f16, std::vector<float16> {1.41223f, 13.2745f}))), // refOut
|
||||
ReferenceCTCLossLayerTest::getTestCaseName);
|
||||
} // namespace
|
@ -29,9 +29,12 @@ enum class eltwise_mode : int32_t;
|
||||
// Forward declarations for ngraph part
|
||||
namespace ngraph {
|
||||
class Node;
|
||||
class DiscreteTypeInfo;
|
||||
} // namespace ngraph
|
||||
|
||||
namespace ov {
|
||||
class DiscreteTypeInfo;
|
||||
} // namespace ov
|
||||
|
||||
#define REGISTER_FACTORY_IMPL(op_version, op_name) \
|
||||
void __register ## _ ## op_name ## _ ## op_version() { \
|
||||
Program::RegisterFactory<ngraph::op::op_version::op_name>( \
|
||||
|
@ -0,0 +1,23 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
/**
|
||||
* @brief This is a header file for the OpenVINO Runtime common aliases that depend only from external API
|
||||
*
|
||||
* @file openvino/runtime/common.hpp
|
||||
*/
|
||||
#pragma once
|
||||
|
||||
#include <map>
|
||||
#include <string>
|
||||
|
||||
namespace ov {
|
||||
namespace ie = InferenceEngine;
|
||||
namespace runtime {
|
||||
/**
|
||||
* @brief This type of map is commonly used to pass set of parameters
|
||||
*/
|
||||
using ConfigMap = std::map<std::string, std::string>;
|
||||
} // namespace runtime
|
||||
} // namespace ov
|
@ -15,6 +15,7 @@
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "common.hpp"
|
||||
#include "cpp/ie_executable_network.hpp"
|
||||
#include "ie_plugin_config.hpp"
|
||||
#include "ie_version.hpp"
|
||||
@ -57,7 +58,7 @@ public:
|
||||
* @param deviceName Device name to identify plugin
|
||||
* @return A vector of versions
|
||||
*/
|
||||
std::map<std::string, InferenceEngine::Version> get_versions(const std::string& deviceName) const;
|
||||
std::map<std::string, ie::Version> get_versions(const std::string& deviceName) const;
|
||||
|
||||
#ifdef ENABLE_UNICODE_PATH_SUPPORT
|
||||
/**
|
||||
@ -101,7 +102,7 @@ public:
|
||||
* @return Function
|
||||
*/
|
||||
std::shared_ptr<ov::Function> read_model(const std::string& model,
|
||||
const std::shared_ptr<const InferenceEngine::Blob>& weights) const;
|
||||
const std::shared_ptr<const ie::Blob>& weights) const;
|
||||
|
||||
/**
|
||||
* @brief Creates an executable network from a network object.
|
||||
@ -115,9 +116,9 @@ public:
|
||||
* operation
|
||||
* @return An executable network reference
|
||||
*/
|
||||
InferenceEngine::ExecutableNetwork compile_model(const std::shared_ptr<const ov::Function>& network,
|
||||
ie::ExecutableNetwork compile_model(const std::shared_ptr<const ov::Function>& network,
|
||||
const std::string& deviceName,
|
||||
const std::map<std::string, std::string>& config = {});
|
||||
const ConfigMap& config = {});
|
||||
|
||||
/**
|
||||
* @brief Reads model and creates an executable network from IR or ONNX file
|
||||
@ -132,9 +133,9 @@ public:
|
||||
*
|
||||
* @return An executable network reference
|
||||
*/
|
||||
InferenceEngine::ExecutableNetwork compile_model(const std::string& modelPath,
|
||||
ie::ExecutableNetwork compile_model(const std::string& modelPath,
|
||||
const std::string& deviceName,
|
||||
const std::map<std::string, std::string>& config = {});
|
||||
const ConfigMap& config = {});
|
||||
|
||||
/**
|
||||
* @brief Creates an executable network from a network object within a specified remote context.
|
||||
@ -144,15 +145,15 @@ public:
|
||||
* operation
|
||||
* @return An executable network object
|
||||
*/
|
||||
InferenceEngine::ExecutableNetwork compile_model(const std::shared_ptr<const ov::Function>& network,
|
||||
const std::shared_ptr<InferenceEngine::RemoteContext>& context,
|
||||
const std::map<std::string, std::string>& config = {});
|
||||
ie::ExecutableNetwork compile_model(const std::shared_ptr<const ov::Function>& network,
|
||||
const std::shared_ptr<ie::RemoteContext>& context,
|
||||
const ConfigMap& config = {});
|
||||
|
||||
/**
|
||||
* @brief Registers extension
|
||||
* @param extension Pointer to already loaded extension
|
||||
*/
|
||||
void add_extension(const std::shared_ptr<InferenceEngine::IExtension>& extension);
|
||||
void add_extension(const std::shared_ptr<ie::IExtension>& extension);
|
||||
|
||||
/**
|
||||
* @brief Creates an executable network from a previously exported network
|
||||
@ -162,9 +163,9 @@ public:
|
||||
* operation*
|
||||
* @return An executable network reference
|
||||
*/
|
||||
InferenceEngine::ExecutableNetwork import_model(std::istream& networkModel,
|
||||
ie::ExecutableNetwork import_model(std::istream& networkModel,
|
||||
const std::string& deviceName,
|
||||
const std::map<std::string, std::string>& config = {});
|
||||
const ConfigMap& config = {});
|
||||
|
||||
/**
|
||||
* @brief Creates an executable network from a previously exported network within a specified
|
||||
@ -176,9 +177,9 @@ public:
|
||||
* operation
|
||||
* @return An executable network reference
|
||||
*/
|
||||
InferenceEngine::ExecutableNetwork import_model(std::istream& networkModel,
|
||||
const std::shared_ptr<InferenceEngine::RemoteContext>& context,
|
||||
const std::map<std::string, std::string>& config = {});
|
||||
ie::ExecutableNetwork import_model(std::istream& networkModel,
|
||||
const std::shared_ptr<ie::RemoteContext>& context,
|
||||
const ConfigMap& config = {});
|
||||
|
||||
/**
|
||||
* @brief Query device if it supports specified network with specified configuration
|
||||
@ -188,9 +189,9 @@ public:
|
||||
* @param config Optional map of pairs: (config parameter name, config parameter value)
|
||||
* @return An object containing a map of pairs a layer name -> a device name supporting this layer.
|
||||
*/
|
||||
InferenceEngine::QueryNetworkResult query_model(const std::shared_ptr<const ov::Function>& network,
|
||||
ie::QueryNetworkResult query_model(const std::shared_ptr<const ov::Function>& network,
|
||||
const std::string& deviceName,
|
||||
const std::map<std::string, std::string>& config = {}) const;
|
||||
const ConfigMap& config = {}) const;
|
||||
|
||||
/**
|
||||
* @brief Sets configuration for device, acceptable keys can be found in ie_plugin_config.hpp
|
||||
@ -200,7 +201,7 @@ public:
|
||||
*
|
||||
* @param config Map of pairs: (config parameter name, config parameter value)
|
||||
*/
|
||||
void set_config(const std::map<std::string, std::string>& config, const std::string& deviceName = {});
|
||||
void set_config(const ConfigMap& config, const std::string& deviceName = {});
|
||||
|
||||
/**
|
||||
* @brief Gets configuration dedicated to device behaviour.
|
||||
@ -211,7 +212,7 @@ public:
|
||||
* @param name - config key.
|
||||
* @return Value of config corresponding to config key.
|
||||
*/
|
||||
InferenceEngine::Parameter get_config(const std::string& deviceName, const std::string& name) const;
|
||||
ie::Parameter get_config(const std::string& deviceName, const std::string& name) const;
|
||||
|
||||
/**
|
||||
* @brief Gets general runtime metric for dedicated hardware.
|
||||
@ -223,7 +224,7 @@ public:
|
||||
* @param name - metric name to request.
|
||||
* @return Metric value corresponding to metric key.
|
||||
*/
|
||||
InferenceEngine::Parameter get_metric(const std::string& deviceName, const std::string& name) const;
|
||||
ie::Parameter get_metric(const std::string& deviceName, const std::string& name) const;
|
||||
|
||||
/**
|
||||
* @brief Returns devices available for neural networks inference
|
||||
@ -290,15 +291,14 @@ public:
|
||||
* @param params Map of device-specific shared context parameters.
|
||||
* @return A shared pointer to a created remote context.
|
||||
*/
|
||||
std::shared_ptr<InferenceEngine::RemoteContext> create_context(const std::string& deviceName,
|
||||
const InferenceEngine::ParamMap& params);
|
||||
std::shared_ptr<ie::RemoteContext> create_context(const std::string& deviceName, const ie::ParamMap& params);
|
||||
|
||||
/**
|
||||
* @brief Get a pointer to default(plugin-supplied) shared context object for specified accelerator device.
|
||||
* @param deviceName - A name of a device to get create shared context from.
|
||||
* @return A shared pointer to a default remote context.
|
||||
*/
|
||||
std::shared_ptr<InferenceEngine::RemoteContext> get_default_context(const std::string& deviceName);
|
||||
std::shared_ptr<ie::RemoteContext> get_default_context(const std::string& deviceName);
|
||||
};
|
||||
} // namespace runtime
|
||||
} // namespace ov
|
||||
|
@ -1218,7 +1218,7 @@ Core::Core(const std::string& xmlConfigFile) {
|
||||
register_plugins(core_detail::parseXmlConfig(xmlConfigFile));
|
||||
}
|
||||
|
||||
std::map<std::string, InferenceEngine::Version> Core::get_versions(const std::string& deviceName) const {
|
||||
std::map<std::string, ie::Version> Core::get_versions(const std::string& deviceName) const {
|
||||
return _impl->GetVersions(deviceName);
|
||||
}
|
||||
|
||||
@ -1232,49 +1232,45 @@ std::shared_ptr<ngraph::Function> Core::read_model(const std::wstring& modelPath
|
||||
std::shared_ptr<ngraph::Function> Core::read_model(const std::string& modelPath, const std::string& binPath) const {
|
||||
return _impl->ReadNetwork(modelPath, binPath).getFunction();
|
||||
}
|
||||
std::shared_ptr<ngraph::Function> Core::read_model(const std::string& model,
|
||||
const InferenceEngine::Blob::CPtr& weights) const {
|
||||
std::shared_ptr<ngraph::Function> Core::read_model(const std::string& model, const ie::Blob::CPtr& weights) const {
|
||||
return _impl->ReadNetwork(model, weights).getFunction();
|
||||
}
|
||||
InferenceEngine::ExecutableNetwork Core::compile_model(const std::shared_ptr<const ngraph::Function>& network,
|
||||
ie::ExecutableNetwork Core::compile_model(const std::shared_ptr<const ngraph::Function>& network,
|
||||
const std::string& deviceName,
|
||||
const std::map<std::string, std::string>& config) {
|
||||
auto exec = _impl->LoadNetwork(InferenceEngine::CNNNetwork(std::const_pointer_cast<ngraph::Function>(network)),
|
||||
deviceName,
|
||||
config);
|
||||
const ConfigMap& config) {
|
||||
auto exec =
|
||||
_impl->LoadNetwork(ie::CNNNetwork(std::const_pointer_cast<ngraph::Function>(network)), deviceName, config);
|
||||
return {exec, exec};
|
||||
}
|
||||
InferenceEngine::ExecutableNetwork Core::compile_model(const std::string& modelPath,
|
||||
ie::ExecutableNetwork Core::compile_model(const std::string& modelPath,
|
||||
const std::string& deviceName,
|
||||
const std::map<std::string, std::string>& config) {
|
||||
const ConfigMap& config) {
|
||||
auto exec = _impl->LoadNetwork(modelPath, deviceName, config);
|
||||
return {exec, exec};
|
||||
}
|
||||
|
||||
InferenceEngine::ExecutableNetwork Core::compile_model(const std::shared_ptr<const ngraph::Function>& network,
|
||||
const InferenceEngine::RemoteContext::Ptr& context,
|
||||
const std::map<std::string, std::string>& config) {
|
||||
auto exec = _impl->LoadNetwork(InferenceEngine::CNNNetwork(std::const_pointer_cast<ngraph::Function>(network)),
|
||||
context,
|
||||
config);
|
||||
ie::ExecutableNetwork Core::compile_model(const std::shared_ptr<const ngraph::Function>& network,
|
||||
const ie::RemoteContext::Ptr& context,
|
||||
const ConfigMap& config) {
|
||||
auto exec = _impl->LoadNetwork(ie::CNNNetwork(std::const_pointer_cast<ngraph::Function>(network)), context, config);
|
||||
return {exec, exec};
|
||||
}
|
||||
|
||||
void Core::add_extension(const InferenceEngine::IExtensionPtr& extension) {
|
||||
void Core::add_extension(const ie::IExtensionPtr& extension) {
|
||||
_impl->AddExtension(extension);
|
||||
}
|
||||
|
||||
InferenceEngine::ExecutableNetwork Core::import_model(std::istream& networkModel,
|
||||
ie::ExecutableNetwork Core::import_model(std::istream& networkModel,
|
||||
const std::string& deviceName,
|
||||
const std::map<std::string, std::string>& config) {
|
||||
const ConfigMap& config) {
|
||||
OV_ITT_SCOPED_TASK(ov::itt::domains::IE, "Core::import_model");
|
||||
auto exec = _impl->ImportNetwork(networkModel, deviceName, config);
|
||||
return {exec, exec};
|
||||
}
|
||||
|
||||
InferenceEngine::ExecutableNetwork Core::import_model(std::istream& networkModel,
|
||||
const InferenceEngine::RemoteContext::Ptr& context,
|
||||
const std::map<std::string, std::string>& config) {
|
||||
ie::ExecutableNetwork Core::import_model(std::istream& networkModel,
|
||||
const ie::RemoteContext::Ptr& context,
|
||||
const ConfigMap& config) {
|
||||
OV_ITT_SCOPED_TASK(ov::itt::domains::IE, "Core::import_model");
|
||||
|
||||
using ExportMagic = std::array<char, 4>;
|
||||
@ -1296,14 +1292,12 @@ InferenceEngine::ExecutableNetwork Core::import_model(std::istream& networkModel
|
||||
return {exec, exec};
|
||||
}
|
||||
|
||||
InferenceEngine::QueryNetworkResult Core::query_model(const std::shared_ptr<const ngraph::Function>& network,
|
||||
ie::QueryNetworkResult Core::query_model(const std::shared_ptr<const ngraph::Function>& network,
|
||||
const std::string& deviceName,
|
||||
const std::map<std::string, std::string>& config) const {
|
||||
return _impl->QueryNetwork(InferenceEngine::CNNNetwork(std::const_pointer_cast<ngraph::Function>(network)),
|
||||
deviceName,
|
||||
config);
|
||||
const ConfigMap& config) const {
|
||||
return _impl->QueryNetwork(ie::CNNNetwork(std::const_pointer_cast<ngraph::Function>(network)), deviceName, config);
|
||||
}
|
||||
void Core::set_config(const std::map<std::string, std::string>& config, const std::string& deviceName) {
|
||||
void Core::set_config(const ConfigMap& config, const std::string& deviceName) {
|
||||
// HETERO case
|
||||
if (deviceName.find("HETERO:") == 0) {
|
||||
IE_THROW() << "SetConfig is supported only for HETERO itself (without devices). "
|
||||
@ -1337,7 +1331,7 @@ void Core::set_config(const std::map<std::string, std::string>& config, const st
|
||||
}
|
||||
}
|
||||
|
||||
InferenceEngine::Parameter Core::get_config(const std::string& deviceName, const std::string& name) const {
|
||||
ie::Parameter Core::get_config(const std::string& deviceName, const std::string& name) const {
|
||||
// HETERO case
|
||||
{
|
||||
if (deviceName.find("HETERO:") == 0) {
|
||||
@ -1363,13 +1357,13 @@ InferenceEngine::Parameter Core::get_config(const std::string& deviceName, const
|
||||
auto parsed = core_detail::parseDeviceNameIntoConfig(deviceName);
|
||||
|
||||
// we need to return a copy of Parameter object which is created on Core side,
|
||||
// not in InferenceEngine plugin side, which can be unloaded from Core in a parallel thread
|
||||
// not in ie plugin side, which can be unloaded from Core in a parallel thread
|
||||
// TODO: remove this WA after *-31417 is resolved
|
||||
return core_detail::copyParameterValue(
|
||||
_impl->GetCPPPluginByName(parsed._deviceName).GetConfig(name, parsed._config));
|
||||
}
|
||||
|
||||
InferenceEngine::Parameter Core::get_metric(const std::string& deviceName, const std::string& name) const {
|
||||
ie::Parameter Core::get_metric(const std::string& deviceName, const std::string& name) const {
|
||||
return _impl->GetMetric(deviceName, name);
|
||||
}
|
||||
|
||||
@ -1382,7 +1376,7 @@ void Core::register_plugin(const std::string& pluginName, const std::string& dev
|
||||
}
|
||||
|
||||
void Core::unload_plugin(const std::string& deviceName) {
|
||||
InferenceEngine::DeviceIDParser parser(deviceName);
|
||||
ie::DeviceIDParser parser(deviceName);
|
||||
std::string devName = parser.getDeviceName();
|
||||
|
||||
_impl->UnloadPluginByName(devName);
|
||||
@ -1392,8 +1386,7 @@ void Core::register_plugins(const std::string& xmlConfigFile) {
|
||||
_impl->RegisterPluginsInRegistry(xmlConfigFile);
|
||||
}
|
||||
|
||||
InferenceEngine::RemoteContext::Ptr Core::create_context(const std::string& deviceName,
|
||||
const InferenceEngine::ParamMap& params) {
|
||||
ie::RemoteContext::Ptr Core::create_context(const std::string& deviceName, const ie::ParamMap& params) {
|
||||
if (deviceName.find("HETERO") == 0) {
|
||||
IE_THROW() << "HETERO device does not support remote context";
|
||||
}
|
||||
@ -1408,7 +1401,7 @@ InferenceEngine::RemoteContext::Ptr Core::create_context(const std::string& devi
|
||||
return _impl->GetCPPPluginByName(parsed._deviceName).CreateContext(parsed._config);
|
||||
}
|
||||
|
||||
InferenceEngine::RemoteContext::Ptr Core::get_default_context(const std::string& deviceName) {
|
||||
ie::RemoteContext::Ptr Core::get_default_context(const std::string& deviceName) {
|
||||
if (deviceName.find("HETERO") == 0) {
|
||||
IE_THROW() << "HETERO device does not support remote context";
|
||||
}
|
||||
@ -1419,7 +1412,7 @@ InferenceEngine::RemoteContext::Ptr Core::get_default_context(const std::string&
|
||||
IE_THROW() << "AUTO device does not support remote context";
|
||||
}
|
||||
|
||||
auto parsed = core_detail::parseDeviceNameIntoConfig(deviceName, InferenceEngine::ParamMap());
|
||||
auto parsed = core_detail::parseDeviceNameIntoConfig(deviceName, ie::ParamMap());
|
||||
|
||||
return _impl->GetCPPPluginByName(parsed._deviceName).GetDefaultContext(parsed._config);
|
||||
}
|
||||
|
@ -164,7 +164,7 @@ int64_t op::NonMaxSuppressionIE3::max_boxes_output_from_input() const {
|
||||
}
|
||||
|
||||
const auto max_output_boxes_input =
|
||||
as_type_ptr<op::Constant>(input_value(max_output_boxes_per_class_port).get_node_shared_ptr());
|
||||
ov::as_type_ptr<op::Constant>(input_value(max_output_boxes_per_class_port).get_node_shared_ptr());
|
||||
max_output_boxes = max_output_boxes_input->cast_vector<int64_t>().at(0);
|
||||
|
||||
return max_output_boxes;
|
||||
|
@ -226,14 +226,14 @@ public:
|
||||
auto node = nodes.front();
|
||||
nodes.pop_front();
|
||||
|
||||
if (visited.count(node) || is_type<op::Constant>(node)) {
|
||||
if (visited.count(node) || ov::is_type<op::Constant>(node)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
visited.insert(node);
|
||||
|
||||
bool handleConnectedNodes = false;
|
||||
if (NetworkHelper::isPrecisionPreserved(node) || is_type<opset1::FakeQuantize>(node)) {
|
||||
if (NetworkHelper::isPrecisionPreserved(node) || ov::is_type<opset1::FakeQuantize>(node)) {
|
||||
auto& rt = node->get_rt_info();
|
||||
|
||||
if (node == initialNode) {
|
||||
@ -255,13 +255,13 @@ public:
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!is_type<opset1::FakeQuantize>(node)) {
|
||||
if (!ov::is_type<opset1::FakeQuantize>(node)) {
|
||||
for (size_t index = 0ul; index < node->get_input_size(); ++index) {
|
||||
auto getInput = [](const std::shared_ptr<ngraph::Node>& node, const size_t index) {
|
||||
const auto dequantization = NetworkHelper::getDequantization(node, index);
|
||||
if (!dequantization.empty() &&
|
||||
(is_type<opset1::Convert>(dequantization.data.get_node())) &&
|
||||
is_type<opset1::FakeQuantize>(dequantization.data.get_node()->get_input_node_ptr(0))) {
|
||||
(ov::is_type<opset1::Convert>(dequantization.data.get_node())) &&
|
||||
ov::is_type<opset1::FakeQuantize>(dequantization.data.get_node()->get_input_node_ptr(0))) {
|
||||
const auto input = dequantization.data.get_node()->input(0);
|
||||
return input;
|
||||
}
|
||||
@ -272,7 +272,7 @@ public:
|
||||
const auto& input_node = input.get_source_output().get_node_shared_ptr();
|
||||
|
||||
//const auto& input_node = input.get_source_output().get_node_shared_ptr();
|
||||
if (visited.count(input_node) || is_type<op::Constant>(input_node)) {
|
||||
if (visited.count(input_node) || ov::is_type<op::Constant>(input_node)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -283,7 +283,7 @@ public:
|
||||
for (auto& output : node->outputs()) {
|
||||
for (auto& input_value : output.get_target_inputs()) {
|
||||
const auto& output_node = input_value.get_node()->shared_from_this();
|
||||
if (visited.count(output_node) || is_type<op::Constant>(output_node)) {
|
||||
if (visited.count(output_node) || ov::is_type<op::Constant>(output_node)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -364,7 +364,7 @@ std::shared_ptr<Node> NetworkHelper::setOutDataPrecision(std::shared_ptr<Operati
|
||||
|
||||
template <typename T>
|
||||
std::shared_ptr<Node> make_op_pattern(const ngraph::NodeVector& args) {
|
||||
return std::make_shared<ngraph::pattern::op::Any>(element::undefined, PartialShape{}, [](std::shared_ptr<Node> n) {return !!as_type_ptr<T>(n); }, args);
|
||||
return std::make_shared<ngraph::pattern::op::Any>(element::undefined, PartialShape{}, [](std::shared_ptr<Node> n) {return !!ov::as_type_ptr<T>(n); }, args);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
@ -372,7 +372,7 @@ std::shared_ptr<Node> make_op_label() {
|
||||
return std::make_shared<ngraph::pattern::op::Label>(
|
||||
element::undefined,
|
||||
PartialShape{},
|
||||
[](std::shared_ptr<Node> n) {return !!as_type_ptr<T>(n); });
|
||||
[](std::shared_ptr<Node> n) {return !!ov::as_type_ptr<T>(n); });
|
||||
}
|
||||
|
||||
template <typename T, typename... Args>
|
||||
@ -394,18 +394,18 @@ std::shared_ptr<Node> fold_reshape(Args&&... args) {
|
||||
std::shared_ptr<Node> node = std::make_shared<T>(std::forward<Args>(args)...);
|
||||
if (node->get_output_size() == 1) {
|
||||
// issue #57985: remove fold_reshape & reuse nGraph implementation
|
||||
const auto values = as_type_ptr<opset1::Constant>(node->input_value(1).get_node_shared_ptr())->template cast_vector<int64_t>();
|
||||
const auto values = ov::as_type_ptr<opset1::Constant>(node->input_value(1).get_node_shared_ptr())->template cast_vector<int64_t>();
|
||||
if (std::any_of(values.begin(), values.end(), [](const int64_t value) { return (value == 0) || (value == -1); })) {
|
||||
return fold<opset1::Reshape>(std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
OutputVector folded;
|
||||
if (is_type<opset1::Constant>(node->input_value(0).get_node_shared_ptr()) &&
|
||||
is_type<opset1::Constant>(node->input_value(1).get_node_shared_ptr())) {
|
||||
if (ov::is_type<opset1::Constant>(node->input_value(0).get_node_shared_ptr()) &&
|
||||
ov::is_type<opset1::Constant>(node->input_value(1).get_node_shared_ptr())) {
|
||||
return std::make_shared<opset1::Constant>(
|
||||
node->get_input_element_type(0),
|
||||
Shape(as_type_ptr<opset1::Constant>(node->input_value(1).get_node_shared_ptr())->template cast_vector<size_t>()),
|
||||
as_type_ptr<opset1::Constant>(node->input_value(0).get_node_shared_ptr())->get_data_ptr());
|
||||
Shape(ov::as_type_ptr<opset1::Constant>(node->input_value(1).get_node_shared_ptr())->template cast_vector<size_t>()),
|
||||
ov::as_type_ptr<opset1::Constant>(node->input_value(0).get_node_shared_ptr())->get_data_ptr());
|
||||
}
|
||||
}
|
||||
return node;
|
||||
|
@ -36,7 +36,7 @@ public:
|
||||
std::vector<std::shared_ptr<ngraph::Node>> nodes(f->get_ordered_ops());
|
||||
for (auto it = nodes.begin(); it != nodes.end(); it++) {
|
||||
const std::shared_ptr<Node> node = *it;
|
||||
if (is_type<opset1::FakeQuantize>(node)) {
|
||||
if (ov::is_type<opset1::FakeQuantize>(node)) {
|
||||
assert(node->get_output_size() == 1ul);
|
||||
auto& outputRtInfo = node->output(0).get_rt_info();
|
||||
|
||||
@ -56,7 +56,7 @@ public:
|
||||
|
||||
auto node = nodeInput.get_source_output().get_node_shared_ptr();
|
||||
std::vector<std::shared_ptr<ngraph::VariantWrapper<std::shared_ptr<PrecisionsAttribute>>>> attributes;
|
||||
if (is_type<opset1::FakeQuantize>(node)) {
|
||||
if (ov::is_type<opset1::FakeQuantize>(node)) {
|
||||
// output
|
||||
auto& rt = nodeInput.get_source_output().get_rt_info();
|
||||
auto it = rt.find(name);
|
||||
@ -109,8 +109,8 @@ private:
|
||||
|
||||
const auto dequantization = NetworkHelper::getDequantization(node, index);
|
||||
if (!dequantization.empty() &&
|
||||
(is_type<opset1::Convert>(dequantization.data.get_node())) &&
|
||||
is_type<opset1::FakeQuantize>(dequantization.data.get_node()->get_input_node_ptr(0))) {
|
||||
(ov::is_type<opset1::Convert>(dequantization.data.get_node())) &&
|
||||
ov::is_type<opset1::FakeQuantize>(dequantization.data.get_node()->get_input_node_ptr(0))) {
|
||||
inputNode = dequantization.data.get_node()->get_input_node_shared_ptr(0);
|
||||
}
|
||||
|
||||
@ -121,7 +121,7 @@ private:
|
||||
const auto attribute = std::dynamic_pointer_cast<ngraph::VariantWrapper<std::shared_ptr<PrecisionsAttribute>>>(inputAttributeIt->second);
|
||||
parentAttributes.push_back(attribute);
|
||||
}
|
||||
} else if (is_type<opset1::FakeQuantize>(inputNode)) {
|
||||
} else if (ov::is_type<opset1::FakeQuantize>(inputNode)) {
|
||||
const auto& outputPortRtInfo = inputNode->outputs()[0].get_rt_info();
|
||||
auto attributeIt = outputPortRtInfo.find(ngraph::VariantWrapper<std::shared_ptr<PrecisionsAttribute>>::type_info.name);
|
||||
if (attributeIt != outputPortRtInfo.end()) {
|
||||
|
@ -96,9 +96,9 @@ private:
|
||||
auto getInput = [](const std::shared_ptr<ngraph::Node>& node, const size_t index) -> Input<Node> {
|
||||
const auto dequantization = NetworkHelper::getDequantization(node, index);
|
||||
if (!dequantization.empty() &&
|
||||
is_type<opset1::Convert>(dequantization.data.get_node()) &&
|
||||
ov::is_type<opset1::Convert>(dequantization.data.get_node()) &&
|
||||
(dequantization.data.get_node()->get_input_size() == 1ul) &&
|
||||
is_type<opset1::FakeQuantize>(dequantization.data.get_node()->get_input_node_ptr(0))) {
|
||||
ov::is_type<opset1::FakeQuantize>(dequantization.data.get_node()->get_input_node_ptr(0))) {
|
||||
return dequantization.data.get_node()->input(0);
|
||||
}
|
||||
|
||||
|
@ -72,9 +72,9 @@ private:
|
||||
auto getInput = [](const Input<Node>& input) {
|
||||
const auto dequantization = NetworkHelper::getDequantization(input.get_node()->shared_from_this(), input.get_index());
|
||||
if (!dequantization.empty() &&
|
||||
is_type<opset1::Convert>(dequantization.data.get_node()) &&
|
||||
ov::is_type<opset1::Convert>(dequantization.data.get_node()) &&
|
||||
(dequantization.data.get_node()->get_input_size() == 1ul) &&
|
||||
is_type<opset1::FakeQuantize>(dequantization.data.get_node()->get_input_node_ptr(0))) {
|
||||
ov::is_type<opset1::FakeQuantize>(dequantization.data.get_node()->get_input_node_ptr(0))) {
|
||||
return dequantization.data.get_node()->input(0);
|
||||
}
|
||||
|
||||
|
@ -35,14 +35,14 @@ public:
|
||||
const bool needToCheckExpectedAttributeType = !std::is_same<ExpectedAttributeType, AttributeType>::value;
|
||||
if (!needToCheckExpectedAttributeType) {
|
||||
// expected attribute is ignored, set attributes for node inputs except Result & FakeQuantize operations
|
||||
if (is_type<ngraph::opset1::Result>(node) ||
|
||||
is_type<ngraph::opset1::FakeQuantize>(node) ||
|
||||
if (ov::is_type<ngraph::opset1::Result>(node) ||
|
||||
ov::is_type<ngraph::opset1::FakeQuantize>(node) ||
|
||||
transformation_callback(node)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (ngraph::pass::low_precision::NetworkHelper::isPrecisionPreserved(node) || is_type<opset1::FakeQuantize>(node)) {
|
||||
if (ngraph::pass::low_precision::NetworkHelper::isPrecisionPreserved(node) || ov::is_type<opset1::FakeQuantize>(node)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -87,8 +87,8 @@ private:
|
||||
Input<Node> getDequantizationInput(const Input<Node>& input) {
|
||||
const auto dequantization = NetworkHelper::getDequantization(input.get_node()->shared_from_this(), input.get_index());
|
||||
if (!dequantization.empty() &&
|
||||
(is_type<opset1::Convert>(dequantization.data.get_node())) &&
|
||||
is_type<opset1::FakeQuantize>(dequantization.data.get_node()->get_input_node_ptr(0))) {
|
||||
(ov::is_type<opset1::Convert>(dequantization.data.get_node())) &&
|
||||
ov::is_type<opset1::FakeQuantize>(dequantization.data.get_node()->get_input_node_ptr(0))) {
|
||||
assert(dequantization.data.get_target_inputs().size() == 1ul);
|
||||
return *dequantization.data.get_target_inputs().begin();
|
||||
}
|
||||
|
@ -28,26 +28,26 @@ std::shared_ptr<opset1::Subtract> replaceToSubtract(const std::shared_ptr<Node>&
|
||||
// motivation:
|
||||
// - single responsibility
|
||||
// - keep AddTransformation and AddToSubtractTransformation transformations independent and optional
|
||||
const auto add = as_type_ptr<opset1::Add>(op);
|
||||
const auto add = ov::as_type_ptr<opset1::Add>(op);
|
||||
if (add == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// TODO: use general way from getDequantization: is eltwise with Constant
|
||||
const int constBranchIndex = is_type<opset1::Constant>(add->get_input_node_ptr(0)) ?
|
||||
const int constBranchIndex = ov::is_type<opset1::Constant>(add->get_input_node_ptr(0)) ?
|
||||
0 :
|
||||
(is_type<opset1::Constant>(add->get_input_node_ptr(1)) ? 1 : -1);
|
||||
(ov::is_type<opset1::Constant>(add->get_input_node_ptr(1)) ? 1 : -1);
|
||||
if (constBranchIndex == -1) {
|
||||
return nullptr;
|
||||
}
|
||||
const size_t dataBranchIndex = constBranchIndex == 0 ? 1ul : 0;
|
||||
|
||||
const auto parent = add->get_input_node_shared_ptr(dataBranchIndex);
|
||||
if (is_type<opset1::Convolution>(parent) ||
|
||||
is_type<opset1::GroupConvolution>(parent) ||
|
||||
is_type<opset1::ConvolutionBackpropData>(parent) ||
|
||||
(is_type<opset1::MatMul>(parent) &&
|
||||
(is_type<opset1::Constant>(parent->get_input_node_ptr(0)) || is_type<opset1::Constant>(parent->get_input_node_ptr(1))))) {
|
||||
if (ov::is_type<opset1::Convolution>(parent) ||
|
||||
ov::is_type<opset1::GroupConvolution>(parent) ||
|
||||
ov::is_type<opset1::ConvolutionBackpropData>(parent) ||
|
||||
(ov::is_type<opset1::MatMul>(parent) &&
|
||||
(ov::is_type<opset1::Constant>(parent->get_input_node_ptr(0)) || ov::is_type<opset1::Constant>(parent->get_input_node_ptr(1))))) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@ -68,11 +68,11 @@ std::shared_ptr<opset1::Subtract> replaceToSubtract(const std::shared_ptr<Node>&
|
||||
}
|
||||
|
||||
std::shared_ptr<opset1::Subtract> fuseWithSubtract(const std::shared_ptr<Node>& op) {
|
||||
const auto add = as_type_ptr<opset1::Add>(op);
|
||||
const auto add = ov::as_type_ptr<opset1::Add>(op);
|
||||
if ((add == nullptr) ||
|
||||
!is_type<opset1::Subtract>(add->get_input_node_shared_ptr(0)) ||
|
||||
!ov::is_type<opset1::Subtract>(add->get_input_node_shared_ptr(0)) ||
|
||||
// TODO: use general way from getDequantization: is eltwise with Constant
|
||||
!is_type<opset1::Constant>(add->get_input_node_shared_ptr(0)->get_input_node_shared_ptr(1))) {
|
||||
!ov::is_type<opset1::Constant>(add->get_input_node_shared_ptr(0)->get_input_node_shared_ptr(1))) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@ -107,7 +107,7 @@ AddTransformation::AddTransformation(const Params& params) : EltwiseBaseTransfor
|
||||
}
|
||||
|
||||
bool AddTransformation::transform(TransformationContext& context, ngraph::pattern::Matcher &m) {
|
||||
std::shared_ptr<opset1::Add> op = as_type_ptr<opset1::Add>(m.get_match_root());
|
||||
std::shared_ptr<opset1::Add> op = ov::as_type_ptr<opset1::Add>(m.get_match_root());
|
||||
if ((op == nullptr) || (!canBeTransformed(context, op))) {
|
||||
return false;
|
||||
}
|
||||
@ -116,7 +116,7 @@ bool AddTransformation::transform(TransformationContext& context, ngraph::patter
|
||||
NetworkHelper::normalizeDequantization(NetworkHelper::getDequantization(op, 1));
|
||||
|
||||
std::shared_ptr<Node> addNode = NetworkHelper::separateInStandaloneBranch(op);
|
||||
std::shared_ptr<opset1::Add> add = as_type_ptr<opset1::Add>(addNode);
|
||||
std::shared_ptr<opset1::Add> add = ov::as_type_ptr<opset1::Add>(addNode);
|
||||
|
||||
const int fullPathIndex = getNotEmpty(add);
|
||||
std::shared_ptr<Node> newMultiply;
|
||||
@ -136,7 +136,7 @@ bool AddTransformation::transform(TransformationContext& context, ngraph::patter
|
||||
|
||||
newMultiply = NetworkHelper::swapMultiplyAndAdd(add, multiplyBranch.first);
|
||||
ngraph::copy_runtime_info({ add, newMultiply }, newMultiply);
|
||||
if (is_type<opset1::Add>(newMultiply->get_input_node_shared_ptr(0))) {
|
||||
if (ov::is_type<opset1::Add>(newMultiply->get_input_node_shared_ptr(0))) {
|
||||
newAddOrSubtract = newMultiply->get_input_node_shared_ptr(0);
|
||||
|
||||
auto subtract = fuseWithSubtract(newAddOrSubtract);
|
||||
|
@ -37,13 +37,13 @@ bool ClampTransformation::transform(TransformationContext& context, ngraph::patt
|
||||
return false;
|
||||
}
|
||||
|
||||
auto constant = as_type_ptr<ngraph::opset1::Constant>(sub->get_input_node_shared_ptr(1));
|
||||
auto constant = ov::as_type_ptr<ngraph::opset1::Constant>(sub->get_input_node_shared_ptr(1));
|
||||
if (constant == nullptr) {
|
||||
const auto convert = sub->get_input_node_shared_ptr(1);
|
||||
if (!is_type<ngraph::opset1::Convert>(convert)) {
|
||||
if (!ov::is_type<ngraph::opset1::Convert>(convert)) {
|
||||
return false;
|
||||
}
|
||||
constant = as_type_ptr<ngraph::opset1::Constant>(convert->get_input_node_shared_ptr(0));
|
||||
constant = ov::as_type_ptr<ngraph::opset1::Constant>(convert->get_input_node_shared_ptr(0));
|
||||
}
|
||||
|
||||
if (constant == nullptr) {
|
||||
@ -66,7 +66,7 @@ bool ClampTransformation::transform(TransformationContext& context, ngraph::patt
|
||||
return false;
|
||||
}
|
||||
|
||||
const auto newClamp = as_type_ptr<opset1::Clamp>(moveDequantizationAfter(context, clamp, dequantization, false, moveSubtract));
|
||||
const auto newClamp = ov::as_type_ptr<opset1::Clamp>(moveDequantizationAfter(context, clamp, dequantization, false, moveSubtract));
|
||||
|
||||
std::shared_ptr<ngraph::opset1::Clamp> replacement;
|
||||
{
|
||||
@ -74,7 +74,7 @@ bool ClampTransformation::transform(TransformationContext& context, ngraph::patt
|
||||
double max = newClamp->get_max();
|
||||
|
||||
if (dequantization.multiply != nullptr) {
|
||||
double scale = as_type_ptr<opset1::Constant>(dequantization.multiply->get_input_node_shared_ptr(1))->cast_vector<double>()[0];
|
||||
double scale = ov::as_type_ptr<opset1::Constant>(dequantization.multiply->get_input_node_shared_ptr(1))->cast_vector<double>()[0];
|
||||
if (scale < 0.0) {
|
||||
std::swap(min, max);
|
||||
}
|
||||
@ -83,7 +83,7 @@ bool ClampTransformation::transform(TransformationContext& context, ngraph::patt
|
||||
}
|
||||
|
||||
if (dequantization.subtract != nullptr && moveSubtract) {
|
||||
double shift = as_type_ptr<opset1::Constant>(dequantization.subtractConstant)->cast_vector<double>()[0];
|
||||
double shift = ov::as_type_ptr<opset1::Constant>(dequantization.subtractConstant)->cast_vector<double>()[0];
|
||||
min += shift;
|
||||
max += shift;
|
||||
}
|
||||
|
@ -178,7 +178,7 @@ bool ConcatTransformation::isPrecisionPreserved(std::shared_ptr<Node>) const noe
|
||||
}
|
||||
|
||||
bool ConcatTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr<Node> layer) const {
|
||||
std::shared_ptr<opset1::Concat> concat = as_type_ptr<opset1::Concat>(layer);
|
||||
std::shared_ptr<opset1::Concat> concat = ov::as_type_ptr<opset1::Concat>(layer);
|
||||
if (concat == nullptr) {
|
||||
return false;
|
||||
}
|
||||
|
@ -37,7 +37,7 @@ ConvertTransformation::ConvertTransformation(const Params& params) : LayerTransf
|
||||
}
|
||||
|
||||
bool ConvertTransformation::transform(TransformationContext& context, ngraph::pattern::Matcher &m) {
|
||||
std::shared_ptr<opset1::Convert> convert = as_type_ptr<opset1::Convert>(m.get_match_root());
|
||||
std::shared_ptr<opset1::Convert> convert = ov::as_type_ptr<opset1::Convert>(m.get_match_root());
|
||||
if (!convert) {
|
||||
return false;
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ bool ConvolutionTransformation::transform(TransformationContext &context, ngraph
|
||||
|
||||
if (!canConvolutionBeTransformed(context, convolution)) {
|
||||
const auto weightInput = convolution->get_input_node_shared_ptr(1);
|
||||
const auto reshapeFromWeights = as_type_ptr<opset1::Reshape>(weightInput);
|
||||
const auto reshapeFromWeights = ov::as_type_ptr<opset1::Reshape>(weightInput);
|
||||
FakeQuantizeDequantization dequantization = reshapeFromWeights == nullptr ?
|
||||
NetworkHelper::getDequantization(convolution, 1ul) :
|
||||
NetworkHelper::getDequantization(reshapeFromWeights);
|
||||
@ -69,7 +69,7 @@ bool ConvolutionTransformation::transform(TransformationContext &context, ngraph
|
||||
reshapeFromWeights->input_value(1),
|
||||
false);
|
||||
}
|
||||
if (is_type<opset1::Constant>(resultConstant)) {
|
||||
if (ov::is_type<opset1::Constant>(resultConstant)) {
|
||||
replace_node(weightInput, resultConstant);
|
||||
}
|
||||
} else {
|
||||
@ -90,7 +90,7 @@ bool ConvolutionTransformation::transform(TransformationContext &context, ngraph
|
||||
if (optimizedSubtract == nullptr) {
|
||||
optimizedSubtract = dequantization.subtract;
|
||||
}
|
||||
subtract = as_type_ptr<opset1::Subtract>(optimizedSubtract);
|
||||
subtract = ov::as_type_ptr<opset1::Subtract>(optimizedSubtract);
|
||||
}
|
||||
|
||||
// workaround normalizes shape of Subtract to match CPU plugin expectations
|
||||
@ -108,7 +108,7 @@ bool ConvolutionTransformation::transform(TransformationContext &context, ngraph
|
||||
Shape{ length },
|
||||
broadcastShape));
|
||||
|
||||
const auto newSubtract = as_type_ptr<opset1::Subtract>(subtract->clone_with_new_inputs({
|
||||
const auto newSubtract = ov::as_type_ptr<opset1::Subtract>(subtract->clone_with_new_inputs({
|
||||
subtract->input_value(0),
|
||||
newShift }));
|
||||
NetworkHelper::copyInfo(subtract, newSubtract);
|
||||
@ -159,7 +159,7 @@ bool ConvolutionTransformation::transform(TransformationContext &context, ngraph
|
||||
}
|
||||
|
||||
const auto copyNode = convolution->clone_with_new_inputs({ dequantization.multiply->input_value(0), convolution->input_value(1) });
|
||||
auto conv = as_type_ptr<opset1::Convolution>(copyNode);
|
||||
auto conv = ov::as_type_ptr<opset1::Convolution>(copyNode);
|
||||
std::shared_ptr<Node> relaxedNewConvolution;
|
||||
if (conv) {
|
||||
relaxedNewConvolution = std::make_shared<op::TypeRelaxed<opset1::Convolution>>(
|
||||
@ -168,7 +168,7 @@ bool ConvolutionTransformation::transform(TransformationContext &context, ngraph
|
||||
std::vector<element::Type>{deqPrecision});
|
||||
} else {
|
||||
relaxedNewConvolution = std::make_shared<op::TypeRelaxed<opset1::GroupConvolution>>(
|
||||
*as_type_ptr<opset1::GroupConvolution>(copyNode),
|
||||
*ov::as_type_ptr<opset1::GroupConvolution>(copyNode),
|
||||
std::vector<element::Type>{deqPrecision, deqPrecision},
|
||||
std::vector<element::Type>{deqPrecision});
|
||||
}
|
||||
@ -183,7 +183,7 @@ bool ConvolutionTransformation::transform(TransformationContext &context, ngraph
|
||||
replace_node(convolution, newMultiplyAfter);
|
||||
convolution = newMultiplyAfter->input_value(0).get_node_shared_ptr();
|
||||
|
||||
if (is_type<opset1::Convert>(convolution->get_input_node_ptr(0))) {
|
||||
if (ov::is_type<opset1::Convert>(convolution->get_input_node_ptr(0))) {
|
||||
auto newConvolution = convolution->clone_with_new_inputs({
|
||||
convolution->get_input_node_ptr(0)->input_value(0),
|
||||
convolution->input_value(1)});
|
||||
@ -201,24 +201,24 @@ bool ConvolutionTransformation::transform(TransformationContext &context, ngraph
|
||||
return false;
|
||||
}
|
||||
|
||||
std::shared_ptr<opset1::Reshape> reshapeFromWeights = as_type_ptr<opset1::Reshape>(convolution->get_input_node_shared_ptr(1));
|
||||
std::shared_ptr<opset1::Reshape> reshapeFromWeights = ov::as_type_ptr<opset1::Reshape>(convolution->get_input_node_shared_ptr(1));
|
||||
|
||||
dequantization = reshapeFromWeights == nullptr ?
|
||||
NetworkHelper::getDequantization(convolution, 1ul) :
|
||||
NetworkHelper::getDequantization(reshapeFromWeights);
|
||||
assert(!dequantization.empty());
|
||||
if (is_type<opset1::FakeQuantize>(dequantization.data.get_node())) {
|
||||
const std::shared_ptr<opset1::FakeQuantize> fq = as_type_ptr<opset1::FakeQuantize>(dequantization.data.get_node_shared_ptr());
|
||||
if (ov::is_type<opset1::FakeQuantize>(dequantization.data.get_node())) {
|
||||
const std::shared_ptr<opset1::FakeQuantize> fq = ov::as_type_ptr<opset1::FakeQuantize>(dequantization.data.get_node_shared_ptr());
|
||||
std::shared_ptr<ngraph::Node> newFQ = NetworkHelper::fold_fake_quantize(fq, true);
|
||||
NetworkHelper::copyInfo(fq, newFQ);
|
||||
replace_node(fq, newFQ);
|
||||
}
|
||||
|
||||
std::shared_ptr<opset1::Multiply> multiplyFromWeights = as_type_ptr<opset1::Multiply>(
|
||||
std::shared_ptr<opset1::Multiply> multiplyFromWeights = ov::as_type_ptr<opset1::Multiply>(
|
||||
reshapeFromWeights == nullptr ?
|
||||
convolution->get_input_node_shared_ptr(1) :
|
||||
convolution->get_input_node_ptr(1)->get_input_node_shared_ptr(0));
|
||||
std::shared_ptr<opset1::Subtract> subtractFromWeights = as_type_ptr<opset1::Subtract>(multiplyFromWeights->get_input_node_shared_ptr(0));
|
||||
std::shared_ptr<opset1::Subtract> subtractFromWeights = ov::as_type_ptr<opset1::Subtract>(multiplyFromWeights->get_input_node_shared_ptr(0));
|
||||
|
||||
{
|
||||
const auto newScalePShape = multiplyFromWeights->get_input_partial_shape(1);
|
||||
@ -231,7 +231,7 @@ bool ConvolutionTransformation::transform(TransformationContext &context, ngraph
|
||||
}
|
||||
|
||||
if (reshapeFromWeights != nullptr) {
|
||||
reshapeFromWeights = as_type_ptr<opset1::Reshape>(reshapeFromWeights->copy_with_new_inputs({
|
||||
reshapeFromWeights = ov::as_type_ptr<opset1::Reshape>(reshapeFromWeights->copy_with_new_inputs({
|
||||
multiplyFromWeights->input_value(0),
|
||||
reshapeFromWeights->input_value(1) }));
|
||||
}
|
||||
@ -264,7 +264,7 @@ bool ConvolutionTransformation::transform(TransformationContext &context, ngraph
|
||||
if (optimizedSubtract == nullptr) {
|
||||
subtractFromWeights = nullptr;
|
||||
} else {
|
||||
subtractFromWeights = as_type_ptr<opset1::Subtract>(optimizedSubtract);
|
||||
subtractFromWeights = ov::as_type_ptr<opset1::Subtract>(optimizedSubtract);
|
||||
|
||||
const auto weightsPShape = subtractFromWeights->get_input_partial_shape(0);
|
||||
assert(weightsPShape.is_static());
|
||||
@ -281,7 +281,7 @@ bool ConvolutionTransformation::transform(TransformationContext &context, ngraph
|
||||
}
|
||||
}
|
||||
|
||||
std::shared_ptr<opset1::Convert> convertFromWeights = as_type_ptr<opset1::Convert>(subtractFromWeights == nullptr ?
|
||||
std::shared_ptr<opset1::Convert> convertFromWeights = ov::as_type_ptr<opset1::Convert>(subtractFromWeights == nullptr ?
|
||||
multiplyFromWeights->get_input_node_shared_ptr(0) :
|
||||
subtractFromWeights->get_input_node_shared_ptr(0));
|
||||
if (convertFromWeights != nullptr) {
|
||||
@ -298,7 +298,7 @@ bool ConvolutionTransformation::transform(TransformationContext &context, ngraph
|
||||
convolution = newConvolution;
|
||||
}
|
||||
|
||||
reshapeFromWeights = as_type_ptr<opset1::Reshape>(convolution->get_input_node_shared_ptr(1));
|
||||
reshapeFromWeights = ov::as_type_ptr<opset1::Reshape>(convolution->get_input_node_shared_ptr(1));
|
||||
if (reshapeFromWeights != nullptr) {
|
||||
// remove Reshape on weights
|
||||
const std::shared_ptr<Node> newWeights = fold_reshape<opset1::Reshape>(
|
||||
@ -319,11 +319,11 @@ bool ConvolutionTransformation::transform(TransformationContext &context, ngraph
|
||||
NetworkHelper::normalizeDequantizationShape(finalDequantization);
|
||||
|
||||
auto onWeights = convolution->get_input_node_shared_ptr(1);
|
||||
if (is_type<opset1::Reshape>(onWeights)) {
|
||||
if (ov::is_type<opset1::Reshape>(onWeights)) {
|
||||
onWeights = onWeights->get_input_node_shared_ptr(0);
|
||||
}
|
||||
|
||||
if (is_type<opset1::Subtract>(onWeights)) {
|
||||
if (ov::is_type<opset1::Subtract>(onWeights)) {
|
||||
auto& rt = onWeights->get_rt_info();
|
||||
rt["DISABLED_CONSTANT_FOLDING"] = std::make_shared<ngraph::VariantWrapper<std::string>>("");
|
||||
}
|
||||
|
@ -66,7 +66,7 @@ bool ConvolutionBackpropDataTransformation::transform(TransformationContext &con
|
||||
|
||||
if (!canBeTransformed(context, convolutionBackpropData)) {
|
||||
auto weightsInput = convolutionBackpropData->get_input_node_shared_ptr(1);
|
||||
std::shared_ptr<opset1::Reshape> reshapeFromWeights = as_type_ptr<opset1::Reshape>(weightsInput);
|
||||
std::shared_ptr<opset1::Reshape> reshapeFromWeights = ov::as_type_ptr<opset1::Reshape>(weightsInput);
|
||||
FakeQuantizeDequantization dequantization = reshapeFromWeights == nullptr ?
|
||||
NetworkHelper::getDequantization(convolutionBackpropData, 1ul) :
|
||||
NetworkHelper::getDequantization(reshapeFromWeights);
|
||||
@ -87,7 +87,7 @@ bool ConvolutionBackpropDataTransformation::transform(TransformationContext &con
|
||||
reshapeFromWeights->input_value(1),
|
||||
false);
|
||||
}
|
||||
if (is_type<opset1::Constant>(resultConstant)) {
|
||||
if (ov::is_type<opset1::Constant>(resultConstant)) {
|
||||
replace_node(weightsInput, resultConstant);
|
||||
}
|
||||
} else {
|
||||
@ -113,7 +113,7 @@ bool ConvolutionBackpropDataTransformation::transform(TransformationContext &con
|
||||
const auto copyNode = convolutionBackpropData->copy_with_new_inputs(inputs);
|
||||
|
||||
const auto relaxedConvolutionBackpropData = std::make_shared<op::TypeRelaxed<opset1::ConvolutionBackpropData>>(
|
||||
*as_type_ptr<opset1::ConvolutionBackpropData>(copyNode),
|
||||
*ov::as_type_ptr<opset1::ConvolutionBackpropData>(copyNode),
|
||||
std::vector<element::Type>{deqPrecision, deqPrecision},
|
||||
std::vector<element::Type>{deqPrecision});
|
||||
|
||||
@ -126,7 +126,7 @@ bool ConvolutionBackpropDataTransformation::transform(TransformationContext &con
|
||||
replace_node(convolutionBackpropData, newMultiplyAfter);
|
||||
convolutionBackpropData = newMultiplyAfter->get_input_node_shared_ptr(0);
|
||||
inputs[0] = convolutionBackpropData->get_input_node_ptr(0)->input_value(0);
|
||||
if (is_type<opset1::Convert>(convolutionBackpropData->get_input_node_ptr(0))) {
|
||||
if (ov::is_type<opset1::Convert>(convolutionBackpropData->get_input_node_ptr(0))) {
|
||||
auto newConvolution = convolutionBackpropData->copy_with_new_inputs(inputs);
|
||||
replace_node(convolutionBackpropData, newConvolution);
|
||||
convolutionBackpropData = newConvolution;
|
||||
@ -137,16 +137,16 @@ bool ConvolutionBackpropDataTransformation::transform(TransformationContext &con
|
||||
decomposeFakeQuantizeForWeightsPath(convolutionBackpropData, 1ul);
|
||||
dequantization = NetworkHelper::getDequantization(convolutionBackpropData, 1ul);
|
||||
|
||||
if (is_type<opset1::FakeQuantize>(dequantization.data.get_node())) {
|
||||
const std::shared_ptr<opset1::FakeQuantize> fq = as_type_ptr<opset1::FakeQuantize>(dequantization.data.get_node_shared_ptr());
|
||||
if (ov::is_type<opset1::FakeQuantize>(dequantization.data.get_node())) {
|
||||
const std::shared_ptr<opset1::FakeQuantize> fq = ov::as_type_ptr<opset1::FakeQuantize>(dequantization.data.get_node_shared_ptr());
|
||||
std::shared_ptr<ngraph::Node> newFQ = NetworkHelper::fold_fake_quantize(fq, true);
|
||||
NetworkHelper::copyInfo(fq, newFQ);
|
||||
replace_node(fq, newFQ);
|
||||
}
|
||||
|
||||
std::shared_ptr<opset1::Multiply> multiplyFromWeights = as_type_ptr<opset1::Multiply>(
|
||||
std::shared_ptr<opset1::Multiply> multiplyFromWeights = ov::as_type_ptr<opset1::Multiply>(
|
||||
convolutionBackpropData->input_value(1).get_node_shared_ptr());
|
||||
std::shared_ptr<opset1::Subtract> subtractFromWeights = as_type_ptr<opset1::Subtract>(multiplyFromWeights->get_input_node_shared_ptr(0));
|
||||
std::shared_ptr<opset1::Subtract> subtractFromWeights = ov::as_type_ptr<opset1::Subtract>(multiplyFromWeights->get_input_node_shared_ptr(0));
|
||||
|
||||
{
|
||||
const auto newScalePShape = multiplyFromWeights->get_input_partial_shape(1);
|
||||
@ -173,7 +173,7 @@ bool ConvolutionBackpropDataTransformation::transform(TransformationContext &con
|
||||
if (optimizedSubtract == nullptr) {
|
||||
subtractFromWeights = nullptr;
|
||||
} else {
|
||||
subtractFromWeights = as_type_ptr<opset1::Subtract>(optimizedSubtract);
|
||||
subtractFromWeights = ov::as_type_ptr<opset1::Subtract>(optimizedSubtract);
|
||||
|
||||
const auto weightsPShape = subtractFromWeights->get_input_partial_shape(0);
|
||||
assert(weightsPShape.is_static());
|
||||
@ -190,7 +190,7 @@ bool ConvolutionBackpropDataTransformation::transform(TransformationContext &con
|
||||
}
|
||||
|
||||
std::shared_ptr<opset1::Convert> convertFromWeights =
|
||||
as_type_ptr<opset1::Convert>(
|
||||
ov::as_type_ptr<opset1::Convert>(
|
||||
subtractFromWeights == nullptr ?
|
||||
multiplyFromWeights->get_input_node_shared_ptr(0) :
|
||||
subtractFromWeights->get_input_node_shared_ptr(0));
|
||||
@ -209,11 +209,11 @@ bool ConvolutionBackpropDataTransformation::transform(TransformationContext &con
|
||||
updateOutput(context, finalDequantization, convolutionBackpropData);
|
||||
|
||||
auto onWeights = convolutionBackpropData->get_input_node_shared_ptr(1);
|
||||
if (is_type<opset1::Reshape>(onWeights)) {
|
||||
if (ov::is_type<opset1::Reshape>(onWeights)) {
|
||||
onWeights = onWeights->get_input_node_shared_ptr(0);
|
||||
}
|
||||
|
||||
if (is_type<opset1::Subtract>(onWeights)) {
|
||||
if (ov::is_type<opset1::Subtract>(onWeights)) {
|
||||
auto& rt = onWeights->get_rt_info();
|
||||
rt["DISABLED_CONSTANT_FOLDING"] = std::make_shared<ngraph::VariantWrapper<std::string>>("");
|
||||
}
|
||||
|
@ -51,7 +51,7 @@ bool DepthToSpaceTransformation::canBeTransformed(const TransformationContext& c
|
||||
|
||||
const FakeQuantizeDequantization dequantization = NetworkHelper::getDequantization(layer);
|
||||
if (dequantization.multiply != nullptr) {
|
||||
auto multiplyConst = as_type_ptr<opset1::Constant>(dequantization.multiply->get_input_node_shared_ptr(1));
|
||||
auto multiplyConst = ov::as_type_ptr<opset1::Constant>(dequantization.multiply->get_input_node_shared_ptr(1));
|
||||
if (!NetworkHelper::isScalarLike(multiplyConst)) {
|
||||
return false;
|
||||
}
|
||||
|
@ -41,8 +41,8 @@ bool EltwiseBaseTransformation::canBeTransformed(const TransformationContext& co
|
||||
return false;
|
||||
}
|
||||
|
||||
if ((as_type_ptr<ngraph::opset1::Constant>(operation->get_input_node_shared_ptr(0)) ||
|
||||
as_type_ptr<ngraph::opset1::Constant>(operation->get_input_node_shared_ptr(1))) &&
|
||||
if ((ov::as_type_ptr<ngraph::opset1::Constant>(operation->get_input_node_shared_ptr(0)) ||
|
||||
ov::as_type_ptr<ngraph::opset1::Constant>(operation->get_input_node_shared_ptr(1))) &&
|
||||
!FakeQuantizeDequantization::checkElementwise(operation)) {
|
||||
NetworkHelper::cleanRunTimeInfo(operation);
|
||||
}
|
||||
@ -65,18 +65,18 @@ bool EltwiseBaseTransformation::canBeTransformed(const TransformationContext& co
|
||||
}
|
||||
|
||||
static bool isTargetType(const std::shared_ptr<Node> node) {
|
||||
return is_type<opset1::Convolution>(node) ||
|
||||
is_type<opset1::GroupConvolution>(node) ||
|
||||
is_type<opset1::MatMul>(node);
|
||||
return ov::is_type<opset1::Convolution>(node) ||
|
||||
ov::is_type<opset1::GroupConvolution>(node) ||
|
||||
ov::is_type<opset1::MatMul>(node);
|
||||
}
|
||||
|
||||
static std::shared_ptr<Node> getDataParent(const std::shared_ptr<Node> branchData) {
|
||||
std::shared_ptr<Node> parent = branchData;
|
||||
while (is_type<opset1::FakeQuantize>(parent)) {
|
||||
while (ov::is_type<opset1::FakeQuantize>(parent)) {
|
||||
parent = parent->get_input_node_shared_ptr(0);
|
||||
}
|
||||
|
||||
if (is_type<opset1::Add>(parent) && isTargetType(parent->get_input_node_shared_ptr(0))) {
|
||||
if (ov::is_type<opset1::Add>(parent) && isTargetType(parent->get_input_node_shared_ptr(0))) {
|
||||
return parent->get_input_node_shared_ptr(0);
|
||||
}
|
||||
return parent;
|
||||
@ -96,12 +96,12 @@ static bool isBranchHaveMultipleConsumers(const std::shared_ptr<Node> branchData
|
||||
// return branch index with FP32 precision after eltwise transformation
|
||||
int EltwiseBaseTransformation::getNotEmpty(const std::shared_ptr<Node>& eltwise) const {
|
||||
const FakeQuantizeDequantization dequantization1 = pass::low_precision::NetworkHelper::getDequantization(eltwise, 0ul);
|
||||
if (as_type<opset1::Constant>(dequantization1.data.get_node())) {
|
||||
if (ov::as_type<opset1::Constant>(dequantization1.data.get_node())) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
const FakeQuantizeDequantization dequantization2 = pass::low_precision::NetworkHelper::getDequantization(eltwise, 1ul);
|
||||
if (as_type<opset1::Constant>(dequantization2.data.get_node())) {
|
||||
if (ov::as_type<opset1::Constant>(dequantization2.data.get_node())) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -130,9 +130,9 @@ int EltwiseBaseTransformation::getNotEmpty(const std::shared_ptr<Node>& eltwise)
|
||||
}
|
||||
|
||||
const std::shared_ptr<opset1::FakeQuantize> fakeQuantize1 =
|
||||
as_type_ptr<opset1::FakeQuantize>(dequantization1.data.get_node_shared_ptr());
|
||||
ov::as_type_ptr<opset1::FakeQuantize>(dequantization1.data.get_node_shared_ptr());
|
||||
const std::shared_ptr<opset1::FakeQuantize> fakeQuantize2 =
|
||||
as_type_ptr<opset1::FakeQuantize>(dequantization2.data.get_node_shared_ptr());
|
||||
ov::as_type_ptr<opset1::FakeQuantize>(dequantization2.data.get_node_shared_ptr());
|
||||
|
||||
if (fakeQuantize1 && !fakeQuantize2) {
|
||||
return 0;
|
||||
@ -151,11 +151,11 @@ int EltwiseBaseTransformation::getNotEmpty(const std::shared_ptr<Node>& eltwise)
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (is_type<opset1::Constant>(dequantization1.data.get_node())) {
|
||||
if (ov::is_type<opset1::Constant>(dequantization1.data.get_node())) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (is_type<opset1::Constant>(dequantization2.data.get_node())) {
|
||||
if (ov::is_type<opset1::Constant>(dequantization2.data.get_node())) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -199,17 +199,17 @@ std::pair<int, int> EltwiseBaseTransformation::getMultiplyConstBranch(const std:
|
||||
const auto dequantization2 = NetworkHelper::getDequantization(eltwise, 1);
|
||||
|
||||
std::shared_ptr<opset1::Constant> constParent = dequantization1.empty() ?
|
||||
as_type_ptr<opset1::Constant>(parent1) :
|
||||
as_type_ptr<opset1::Constant>(dequantization1.data.get_node_shared_ptr());
|
||||
std::shared_ptr<opset1::Multiply> multiplyParent = as_type_ptr<opset1::Multiply>(parent2);
|
||||
ov::as_type_ptr<opset1::Constant>(parent1) :
|
||||
ov::as_type_ptr<opset1::Constant>(dequantization1.data.get_node_shared_ptr());
|
||||
std::shared_ptr<opset1::Multiply> multiplyParent = ov::as_type_ptr<opset1::Multiply>(parent2);
|
||||
int multiplyBranch = 1;
|
||||
|
||||
|
||||
if (constParent == nullptr || multiplyParent == nullptr) {
|
||||
constParent = dequantization2.empty() ?
|
||||
as_type_ptr<opset1::Constant>(parent2) :
|
||||
as_type_ptr<opset1::Constant>(dequantization2.data.get_node_shared_ptr());
|
||||
multiplyParent = as_type_ptr<opset1::Multiply>(parent1);
|
||||
ov::as_type_ptr<opset1::Constant>(parent2) :
|
||||
ov::as_type_ptr<opset1::Constant>(dequantization2.data.get_node_shared_ptr());
|
||||
multiplyParent = ov::as_type_ptr<opset1::Multiply>(parent1);
|
||||
multiplyBranch = 0;
|
||||
}
|
||||
|
||||
@ -220,14 +220,14 @@ std::pair<int, int> EltwiseBaseTransformation::getMultiplyConstBranch(const std:
|
||||
auto multiplyParentParent1 = multiplyParent->get_input_node_shared_ptr(0);
|
||||
auto multiplyParentParent2 = multiplyParent->get_input_node_shared_ptr(1);
|
||||
|
||||
auto multiplyParentParent = as_type_ptr<opset1::Multiply>(multiplyParentParent1);
|
||||
auto multiplyParentConst = as_type_ptr<opset1::Constant>(multiplyParentParent2);
|
||||
auto multiplyParentParent = ov::as_type_ptr<opset1::Multiply>(multiplyParentParent1);
|
||||
auto multiplyParentConst = ov::as_type_ptr<opset1::Constant>(multiplyParentParent2);
|
||||
int multiplyActBranch = 0;
|
||||
|
||||
|
||||
if (multiplyParentConst == nullptr) {
|
||||
multiplyParentParent = as_type_ptr<opset1::Multiply>(multiplyParentParent2);
|
||||
multiplyParentConst = as_type_ptr<opset1::Constant>(multiplyParentParent1);
|
||||
multiplyParentParent = ov::as_type_ptr<opset1::Multiply>(multiplyParentParent2);
|
||||
multiplyParentConst = ov::as_type_ptr<opset1::Constant>(multiplyParentParent1);
|
||||
multiplyActBranch = 1;
|
||||
}
|
||||
|
||||
|
@ -68,11 +68,11 @@ static std::shared_ptr<Node> updateShape(std::shared_ptr<Node> constantOp, const
|
||||
}
|
||||
|
||||
static std::shared_ptr<Node> getData(const std::shared_ptr<Node>& eltwise) {
|
||||
if (!is_type<opset1::Constant>(eltwise->get_input_node_shared_ptr(0))) {
|
||||
if (!ov::is_type<opset1::Constant>(eltwise->get_input_node_shared_ptr(0))) {
|
||||
return eltwise->get_input_node_shared_ptr(0);
|
||||
}
|
||||
|
||||
if (!is_type<opset1::Constant>(eltwise->get_input_node_shared_ptr(1))) {
|
||||
if (!ov::is_type<opset1::Constant>(eltwise->get_input_node_shared_ptr(1))) {
|
||||
return eltwise->get_input_node_shared_ptr(1);
|
||||
}
|
||||
|
||||
@ -84,12 +84,12 @@ static std::shared_ptr<opset1::Constant> getConstant(const std::shared_ptr<Node>
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
std::shared_ptr<opset1::Constant> constant = as_type_ptr<opset1::Constant>(eltwise->get_input_node_shared_ptr(1));
|
||||
std::shared_ptr<opset1::Constant> constant = ov::as_type_ptr<opset1::Constant>(eltwise->get_input_node_shared_ptr(1));
|
||||
if (constant != nullptr) {
|
||||
return constant;
|
||||
}
|
||||
|
||||
return as_type_ptr<opset1::Constant>(eltwise->get_input_node_shared_ptr(0));
|
||||
return ov::as_type_ptr<opset1::Constant>(eltwise->get_input_node_shared_ptr(0));
|
||||
}
|
||||
|
||||
} // namespace fq
|
||||
@ -136,12 +136,12 @@ std::shared_ptr<opset1::FakeQuantize> FakeQuantizeTransformation::fuseElementwis
|
||||
std::shared_ptr<Node> inputHighConst_f32 = foldConvert(fakeQuantize->get_input_node_shared_ptr(2), deqPrecision);
|
||||
|
||||
std::shared_ptr<opset1::Constant> constant = fq::getConstant(eltwise);
|
||||
if (is_type<opset1::Multiply>(eltwise) && checkElementwise(eltwise)) {
|
||||
if (ov::is_type<opset1::Multiply>(eltwise) && checkElementwise(eltwise)) {
|
||||
const auto value = constant->get_output_element_type(0) == deqPrecision ?
|
||||
constant :
|
||||
foldConvert(constant, deqPrecision);
|
||||
|
||||
const auto valueVec = as_type_ptr<opset1::Constant>(value)->cast_vector<float>();
|
||||
const auto valueVec = ov::as_type_ptr<opset1::Constant>(value)->cast_vector<float>();
|
||||
|
||||
if (std::any_of(valueVec.cbegin(), valueVec.cend(), [](const float value) { return value <= 0.f; })) {
|
||||
return nullptr;
|
||||
@ -149,8 +149,8 @@ std::shared_ptr<opset1::FakeQuantize> FakeQuantizeTransformation::fuseElementwis
|
||||
|
||||
inputLowConst_f32 = fold<opset1::Divide>(inputLowConst_f32, value);
|
||||
inputHighConst_f32 = fold<opset1::Divide>(inputHighConst_f32, value);
|
||||
const auto resultLow = as_type_ptr<opset1::Constant>(inputLowConst_f32)->cast_vector<float>();
|
||||
const auto resultHigh = as_type_ptr<opset1::Constant>(inputHighConst_f32)->cast_vector<float>();
|
||||
const auto resultLow = ov::as_type_ptr<opset1::Constant>(inputLowConst_f32)->cast_vector<float>();
|
||||
const auto resultHigh = ov::as_type_ptr<opset1::Constant>(inputHighConst_f32)->cast_vector<float>();
|
||||
if (std::any_of(resultLow.begin(), resultLow.end(), [](const float value){ return std::isinf(value); }) ||
|
||||
std::any_of(resultHigh.begin(), resultHigh.end(), [](const float value){ return std::isinf(value); })) {
|
||||
return nullptr;
|
||||
@ -158,18 +158,18 @@ std::shared_ptr<opset1::FakeQuantize> FakeQuantizeTransformation::fuseElementwis
|
||||
|
||||
inputLowConst_f32 = fq::updateShape(inputLowConst_f32, fakeQuantize->get_output_partial_shape(0));
|
||||
inputHighConst_f32 = fq::updateShape(inputHighConst_f32, fakeQuantize->get_output_partial_shape(0));
|
||||
} else if (is_type<opset1::Subtract>(eltwise) && checkElementwise(eltwise)) {
|
||||
} else if (ov::is_type<opset1::Subtract>(eltwise) && checkElementwise(eltwise)) {
|
||||
const auto value = constant->get_output_element_type(0) == deqPrecision ?
|
||||
constant :
|
||||
foldConvert(constant, deqPrecision);
|
||||
|
||||
inputLowConst_f32 = fq::updateShape(fold<opset1::Add>(inputLowConst_f32, value), fakeQuantize->get_output_partial_shape(0));
|
||||
inputHighConst_f32 = fq::updateShape(fold<opset1::Add>(inputHighConst_f32, value), fakeQuantize->get_output_partial_shape(0));
|
||||
} else if (is_type<opset1::Add>(eltwise) && checkElementwise(eltwise)) {
|
||||
if (is_type<opset1::Convolution>(fq::getData(eltwise)) ||
|
||||
is_type<opset1::GroupConvolution>(fq::getData(eltwise)) ||
|
||||
is_type<opset1::ConvolutionBackpropData>(fq::getData(eltwise)) ||
|
||||
is_type<opset1::GroupConvolutionBackpropData>(fq::getData(eltwise))) {
|
||||
} else if (ov::is_type<opset1::Add>(eltwise) && checkElementwise(eltwise)) {
|
||||
if (ov::is_type<opset1::Convolution>(fq::getData(eltwise)) ||
|
||||
ov::is_type<opset1::GroupConvolution>(fq::getData(eltwise)) ||
|
||||
ov::is_type<opset1::ConvolutionBackpropData>(fq::getData(eltwise)) ||
|
||||
ov::is_type<opset1::GroupConvolutionBackpropData>(fq::getData(eltwise))) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@ -179,7 +179,7 @@ std::shared_ptr<opset1::FakeQuantize> FakeQuantizeTransformation::fuseElementwis
|
||||
|
||||
inputLowConst_f32 = fq::updateShape(fold<opset1::Subtract>(inputLowConst_f32, value), fakeQuantize->get_output_partial_shape(0));
|
||||
inputHighConst_f32 = fq::updateShape(fold<opset1::Subtract>(inputHighConst_f32, value), fakeQuantize->get_output_partial_shape(0));
|
||||
} else if (is_type<opset1::Convert>(eltwise)) {
|
||||
} else if (ov::is_type<opset1::Convert>(eltwise)) {
|
||||
// issue #40611
|
||||
if ((eltwise->get_input_element_type(0) == element::i32) &&
|
||||
((eltwise->get_output_element_type(0) == element::f16) || (eltwise->get_output_element_type(0) == element::f32))) {
|
||||
@ -192,7 +192,7 @@ std::shared_ptr<opset1::FakeQuantize> FakeQuantizeTransformation::fuseElementwis
|
||||
const auto data = fq::getData(eltwise);
|
||||
const size_t outputIdx = NetworkHelper::getParentOutputIndex(data, eltwise);
|
||||
|
||||
const auto newFakeQuantize = as_type_ptr<opset1::FakeQuantize>(fakeQuantize->clone_with_new_inputs({
|
||||
const auto newFakeQuantize = ov::as_type_ptr<opset1::FakeQuantize>(fakeQuantize->clone_with_new_inputs({
|
||||
data->output(outputIdx),
|
||||
inputLowConst_f32,
|
||||
inputHighConst_f32,
|
||||
|
@ -95,8 +95,8 @@ DataPrecision getDataPrecisionByOutputPortAndFakeQuantize(std::shared_ptr<opset1
|
||||
// 2. Precisions on port
|
||||
DataPrecision getDataPrecisionByOutputPort(std::shared_ptr<opset1::FakeQuantize> layer) {
|
||||
const size_t levels = layer->get_levels();
|
||||
const std::vector<float> outputLowValues = as_type_ptr<opset1::Constant>(layer->get_input_node_shared_ptr(3))->cast_vector<float>();
|
||||
const std::vector<float> outputHighValues = as_type_ptr<opset1::Constant>(layer->get_input_node_shared_ptr(4))->cast_vector<float>();
|
||||
const std::vector<float> outputLowValues = ov::as_type_ptr<opset1::Constant>(layer->get_input_node_shared_ptr(3))->cast_vector<float>();
|
||||
const std::vector<float> outputHighValues = ov::as_type_ptr<opset1::Constant>(layer->get_input_node_shared_ptr(4))->cast_vector<float>();
|
||||
|
||||
auto precisionsAttribute = getAttributeFromOutput<std::shared_ptr<PrecisionsAttribute>>(layer->output(0));
|
||||
if (precisionsAttribute == nullptr) {
|
||||
@ -166,8 +166,8 @@ std::shared_ptr<ngraph::Node> decomposeFakeQuantize(
|
||||
std::shared_ptr<ngraph::Node> dequantize;
|
||||
if (intervalsAlignment != nullptr) {
|
||||
OV_ITT_SCOPE(FIRST_INFERENCE, itt::domains::LPT_LT, "decomposeFakeQuantize1");
|
||||
const std::vector<float> outputLowValues = as_type_ptr<opset1::Constant>(layer->get_input_node_shared_ptr(3))->cast_vector<float>();
|
||||
const std::vector<float> outputHighValues = as_type_ptr<opset1::Constant>(layer->get_input_node_shared_ptr(4))->cast_vector<float>();
|
||||
const std::vector<float> outputLowValues = ov::as_type_ptr<opset1::Constant>(layer->get_input_node_shared_ptr(3))->cast_vector<float>();
|
||||
const std::vector<float> outputHighValues = ov::as_type_ptr<opset1::Constant>(layer->get_input_node_shared_ptr(4))->cast_vector<float>();
|
||||
|
||||
float dequantizationMul;
|
||||
float dequantizationSub;
|
||||
@ -230,7 +230,7 @@ std::shared_ptr<ngraph::Node> decomposeFakeQuantize(
|
||||
OV_ITT_SCOPE(FIRST_INFERENCE, itt::domains::LPT_LT, "decomposeFakeQuantize2");
|
||||
// Split FakeQuantize to two parts: Quantize and Dequantize
|
||||
auto QDQ = NetworkHelper::decomposeFakeQuantize(
|
||||
as_type_ptr<opset1::FakeQuantize>(layer),
|
||||
ov::as_type_ptr<opset1::FakeQuantize>(layer),
|
||||
dataPrecision.precision,
|
||||
dataPrecision.min,
|
||||
dataPrecision.max,
|
||||
@ -251,7 +251,7 @@ std::shared_ptr<ngraph::Node> decomposeFakeQuantize(
|
||||
} // namespace fq_decomposition
|
||||
|
||||
bool FakeQuantizeDecompositionTransformation::transform(TransformationContext& context, ngraph::pattern::Matcher& m) {
|
||||
auto layer = as_type_ptr<opset1::FakeQuantize>(m.get_match_root());
|
||||
auto layer = ov::as_type_ptr<opset1::FakeQuantize>(m.get_match_root());
|
||||
if (!NetworkHelper::isQuantizeSupported(layer)) {
|
||||
return false;
|
||||
}
|
||||
@ -343,8 +343,8 @@ bool FakeQuantizeDecompositionTransformation::transform(TransformationContext& c
|
||||
if (dataPrecision.precision == element::undefined) {
|
||||
element::Type precision;
|
||||
const auto levels = layer->get_levels();
|
||||
const std::vector<float> outputLowValues = as_type_ptr<opset1::Constant>(layer->get_input_node_shared_ptr(3))->cast_vector<float>();
|
||||
const std::vector<float> outputHighValues = as_type_ptr<opset1::Constant>(layer->get_input_node_shared_ptr(4))->cast_vector<float>();
|
||||
const std::vector<float> outputLowValues = ov::as_type_ptr<opset1::Constant>(layer->get_input_node_shared_ptr(3))->cast_vector<float>();
|
||||
const std::vector<float> outputHighValues = ov::as_type_ptr<opset1::Constant>(layer->get_input_node_shared_ptr(4))->cast_vector<float>();
|
||||
if (intervalsAlignment == nullptr) {
|
||||
// define precision by FakeQuantize intervals
|
||||
LayerTransformation::PrecisionDetails precisionDetailsAtOutputIntervals = LayerTransformation::getPrecisionDetails(
|
||||
|
@ -42,9 +42,9 @@ bool FakeQuantizeDequantization::multiplyHasZeroOrDenormal() const {
|
||||
return false;
|
||||
}
|
||||
|
||||
std::shared_ptr<opset1::Constant> multiplyConstant = as_type_ptr<opset1::Constant>(multiply->get_input_node_shared_ptr(1));
|
||||
std::shared_ptr<opset1::Constant> multiplyConstant = ov::as_type_ptr<opset1::Constant>(multiply->get_input_node_shared_ptr(1));
|
||||
if (multiplyConstant == nullptr) {
|
||||
multiplyConstant = as_type_ptr<opset1::Constant>(multiply->get_input_node_shared_ptr(0));
|
||||
multiplyConstant = ov::as_type_ptr<opset1::Constant>(multiply->get_input_node_shared_ptr(0));
|
||||
}
|
||||
if (multiplyConstant == nullptr) {
|
||||
return false;
|
||||
@ -163,11 +163,11 @@ int FakeQuantizeDequantization::fillDequantizationParams(
|
||||
const size_t branchIndex,
|
||||
std::shared_ptr<ngraph::opset1::Convert>& convert,
|
||||
std::shared_ptr<ngraph::opset1::Constant>& constant) {
|
||||
convert = as_type_ptr<opset1::Convert>(elementwise->get_input_node_shared_ptr(branchIndex));
|
||||
convert = ov::as_type_ptr<opset1::Convert>(elementwise->get_input_node_shared_ptr(branchIndex));
|
||||
if (convert != nullptr) {
|
||||
constant = as_type_ptr<opset1::Constant>(convert->get_input_node_shared_ptr(0));
|
||||
constant = ov::as_type_ptr<opset1::Constant>(convert->get_input_node_shared_ptr(0));
|
||||
} else {
|
||||
constant = as_type_ptr<opset1::Constant>(elementwise->get_input_node_shared_ptr(branchIndex));
|
||||
constant = ov::as_type_ptr<opset1::Constant>(elementwise->get_input_node_shared_ptr(branchIndex));
|
||||
}
|
||||
};
|
||||
|
||||
@ -187,12 +187,12 @@ int FakeQuantizeDequantization::fillDequantizationParams(
|
||||
int FakeQuantizeDequantization::fillDequantizationParams(
|
||||
const std::shared_ptr<ngraph::Node>& elementwise,
|
||||
std::shared_ptr<ngraph::opset1::Constant>& constant) noexcept {
|
||||
constant = as_type_ptr<opset1::Constant>(elementwise->get_input_node_shared_ptr(1ul));
|
||||
constant = ov::as_type_ptr<opset1::Constant>(elementwise->get_input_node_shared_ptr(1ul));
|
||||
if (constant != nullptr) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
constant = as_type_ptr<opset1::Constant>(elementwise->get_input_node_shared_ptr(0ul));
|
||||
constant = ov::as_type_ptr<opset1::Constant>(elementwise->get_input_node_shared_ptr(0ul));
|
||||
if (constant != nullptr) {
|
||||
return 0;
|
||||
}
|
||||
|
@ -38,12 +38,12 @@ bool FoldConvertTransformation::transform(TransformationContext& context, ngraph
|
||||
|
||||
auto foldConvert = [&](const size_t branch) {
|
||||
const auto convert = subtract->get_input_node_shared_ptr(branch);
|
||||
if (!is_type<opset1::Convert>(convert) || !is_type<opset1::Constant>(convert->get_input_node_shared_ptr(0))) {
|
||||
if (!ov::is_type<opset1::Convert>(convert) || !ov::is_type<opset1::Constant>(convert->get_input_node_shared_ptr(0))) {
|
||||
return;
|
||||
}
|
||||
|
||||
const auto resultConstant = ngraph::pass::low_precision::foldConvert(convert->get_input_node_shared_ptr(0), convert->output(0).get_element_type());
|
||||
assert(is_type<opset1::Constant>(resultConstant));
|
||||
assert(ov::is_type<opset1::Constant>(resultConstant));
|
||||
|
||||
replace_node(convert, resultConstant);
|
||||
updateOutput(context, resultConstant, convert);
|
||||
@ -57,10 +57,10 @@ bool FoldConvertTransformation::transform(TransformationContext& context, ngraph
|
||||
|
||||
bool FoldConvertTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr<Node> operation) const {
|
||||
return
|
||||
(is_type<opset1::Convert>(operation->get_input_node_ptr(1)) &&
|
||||
is_type<opset1::Constant>(operation->get_input_node_ptr(1)->get_input_node_ptr(0))) ||
|
||||
(is_type<opset1::Convert>(operation->get_input_node_ptr(0)) &&
|
||||
is_type<opset1::Constant>(operation->get_input_node_ptr(0)->get_input_node_ptr(0)));
|
||||
(ov::is_type<opset1::Convert>(operation->get_input_node_ptr(1)) &&
|
||||
ov::is_type<opset1::Constant>(operation->get_input_node_ptr(1)->get_input_node_ptr(0))) ||
|
||||
(ov::is_type<opset1::Convert>(operation->get_input_node_ptr(0)) &&
|
||||
ov::is_type<opset1::Constant>(operation->get_input_node_ptr(0)->get_input_node_ptr(0)));
|
||||
}
|
||||
|
||||
bool FoldConvertTransformation::isPrecisionPreserved(std::shared_ptr<Node> layer) const noexcept {
|
||||
|
@ -33,7 +33,7 @@ FoldFakeQuantizeTransformation::FoldFakeQuantizeTransformation(const Params& par
|
||||
}
|
||||
|
||||
bool FoldFakeQuantizeTransformation::transform(TransformationContext& context, ngraph::pattern::Matcher &m) {
|
||||
const auto fakeQuantize = as_type_ptr<opset1::FakeQuantize>(m.get_match_root());
|
||||
const auto fakeQuantize = ov::as_type_ptr<opset1::FakeQuantize>(m.get_match_root());
|
||||
if (fakeQuantize == nullptr) {
|
||||
return false;
|
||||
}
|
||||
@ -51,7 +51,7 @@ bool FoldFakeQuantizeTransformation::transform(TransformationContext& context, n
|
||||
fakeQuantize,
|
||||
false,
|
||||
(constantShape.rank().get_length() < 2) || constantShape[1] != 1ul ? 1ul : 0ul);
|
||||
if (is_type<opset1::Constant>(resultConstant)) {
|
||||
if (ov::is_type<opset1::Constant>(resultConstant)) {
|
||||
replace_node(fakeQuantize, resultConstant);
|
||||
return true;
|
||||
}
|
||||
|
@ -62,26 +62,26 @@ bool FuseConvertTransformation::transform(TransformationContext& context, ngraph
|
||||
return false;
|
||||
}
|
||||
|
||||
const auto convert = as_type_ptr<opset1::Convert>(op->get_input_node_shared_ptr(0));
|
||||
const auto convert = ov::as_type_ptr<opset1::Convert>(op->get_input_node_shared_ptr(0));
|
||||
std::shared_ptr<Node> parent = convert->get_input_node_shared_ptr(0);
|
||||
|
||||
if (is_type<opset1::Constant>(parent)) {
|
||||
if (ov::is_type<opset1::Constant>(parent)) {
|
||||
auto convertedConstant = foldConvert(parent, convert->get_convert_element_type());
|
||||
NetworkHelper::copyInfo(parent, convertedConstant);
|
||||
replace_node(convert, convertedConstant);
|
||||
} else {
|
||||
std::shared_ptr<Node> newOp;
|
||||
if (is_type<opset1::Subtract>(op)) {
|
||||
auto subtract = as_type_ptr<opset1::Subtract>(op);
|
||||
if (ov::is_type<opset1::Subtract>(op)) {
|
||||
auto subtract = ov::as_type_ptr<opset1::Subtract>(op);
|
||||
newOp = removeConvertIfPossibleForSubtract(convert, subtract);
|
||||
} else if (is_type<opset1::Multiply>(op)) {
|
||||
} else if (ov::is_type<opset1::Multiply>(op)) {
|
||||
newOp = std::make_shared<ngraph::op::TypeRelaxed<opset1::Multiply>>(
|
||||
std::vector<ngraph::element::Type>{ element::f32, element::f32 }, std::vector<ngraph::element::Type>{},
|
||||
ngraph::op::TemporaryReplaceOutputType(convert->get_input_source_output(0), element::f32).get(),
|
||||
ngraph::op::TemporaryReplaceOutputType(op->get_input_node_shared_ptr(1), element::f32).get());
|
||||
NetworkHelper::setOutDataPrecisionForTypeRelaxed(newOp, op->get_output_element_type(0));
|
||||
replace_node(op, newOp);
|
||||
} else if (is_type<opset1::Add>(op)) {
|
||||
} else if (ov::is_type<opset1::Add>(op)) {
|
||||
newOp = std::make_shared<ngraph::op::TypeRelaxed<opset1::Add>>(
|
||||
std::vector<ngraph::element::Type>{ element::f32, element::f32 }, std::vector<ngraph::element::Type>{},
|
||||
ngraph::op::TemporaryReplaceOutputType(convert->get_input_source_output(0), element::f32).get(),
|
||||
@ -103,7 +103,7 @@ bool FuseConvertTransformation::transform(TransformationContext& context, ngraph
|
||||
}
|
||||
|
||||
bool FuseConvertTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr<Node> op) const {
|
||||
const auto convert = as_type_ptr<opset1::Convert>(op->get_input_node_shared_ptr(0));
|
||||
const auto convert = ov::as_type_ptr<opset1::Convert>(op->get_input_node_shared_ptr(0));
|
||||
// issue #40395
|
||||
if (convert == nullptr) {
|
||||
return false;
|
||||
|
@ -31,7 +31,7 @@ FuseFakeQuantizeTransformation::FuseFakeQuantizeTransformation(const Params& par
|
||||
}
|
||||
|
||||
bool FuseFakeQuantizeTransformation::transform(TransformationContext& context, ngraph::pattern::Matcher &m) {
|
||||
std::shared_ptr<opset1::FakeQuantize> fakeQuantize = as_type_ptr<ngraph::opset1::FakeQuantize>(m.get_match_root());
|
||||
std::shared_ptr<opset1::FakeQuantize> fakeQuantize = ov::as_type_ptr<ngraph::opset1::FakeQuantize>(m.get_match_root());
|
||||
do {
|
||||
fakeQuantize = handle(context, fakeQuantize);
|
||||
} while (fakeQuantize != nullptr);
|
||||
@ -55,11 +55,11 @@ std::shared_ptr<Node> updateShape(std::shared_ptr<Node> op, const PartialShape&
|
||||
}
|
||||
|
||||
std::shared_ptr<Node> getData(const std::shared_ptr<Node>& eltwise) {
|
||||
if (!is_type<opset1::Constant>(eltwise->get_input_node_shared_ptr(0))) {
|
||||
if (!ov::is_type<opset1::Constant>(eltwise->get_input_node_shared_ptr(0))) {
|
||||
return eltwise->get_input_node_shared_ptr(0);
|
||||
}
|
||||
|
||||
if (!is_type<opset1::Constant>(eltwise->get_input_node_shared_ptr(1))) {
|
||||
if (!ov::is_type<opset1::Constant>(eltwise->get_input_node_shared_ptr(1))) {
|
||||
return eltwise->get_input_node_shared_ptr(1);
|
||||
}
|
||||
|
||||
@ -71,12 +71,12 @@ std::shared_ptr<opset1::Constant> getConstant(const std::shared_ptr<Node>& eltwi
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
std::shared_ptr<opset1::Constant> constant = as_type_ptr<opset1::Constant>(eltwise->get_input_node_shared_ptr(1));
|
||||
std::shared_ptr<opset1::Constant> constant = ov::as_type_ptr<opset1::Constant>(eltwise->get_input_node_shared_ptr(1));
|
||||
if (constant != nullptr) {
|
||||
return constant;
|
||||
}
|
||||
|
||||
return as_type_ptr<opset1::Constant>(eltwise->get_input_node_shared_ptr(0));
|
||||
return ov::as_type_ptr<opset1::Constant>(eltwise->get_input_node_shared_ptr(0));
|
||||
}
|
||||
|
||||
bool eltwiseWithConstant(const std::shared_ptr<Node>& eltwise) {
|
||||
@ -122,30 +122,30 @@ std::shared_ptr<opset1::FakeQuantize> FuseFakeQuantizeTransformation::handle(
|
||||
std::shared_ptr<Node> inputHightConst = fakeQuantize->get_input_node_shared_ptr(2);
|
||||
|
||||
std::shared_ptr<opset1::Constant> constant = fuse_fq::getConstant(eltwise);
|
||||
if (is_type<opset1::Multiply>(eltwise) && fuse_fq::eltwiseWithConstant(eltwise)) {
|
||||
if (ov::is_type<opset1::Multiply>(eltwise) && fuse_fq::eltwiseWithConstant(eltwise)) {
|
||||
const auto value = constant->get_output_element_type(0) == eltwise->get_output_element_type(0) ?
|
||||
constant :
|
||||
foldConvert(constant, eltwise->get_output_element_type(0));
|
||||
|
||||
inputLowConst = fuse_fq::updateShape(fold<opset1::Divide>(inputLowConst, value), fakeQuantize->get_output_partial_shape(0));
|
||||
inputHightConst = fuse_fq::updateShape(fold<opset1::Divide>(inputHightConst, value), fakeQuantize->get_output_partial_shape(0));
|
||||
} else if (is_type<opset1::Divide>(eltwise) && fuse_fq::eltwiseWithConstant(eltwise)) {
|
||||
} else if (ov::is_type<opset1::Divide>(eltwise) && fuse_fq::eltwiseWithConstant(eltwise)) {
|
||||
const auto value = constant->get_output_element_type(0) == eltwise->get_output_element_type(0) ?
|
||||
constant :
|
||||
foldConvert(constant, eltwise->get_output_element_type(0));
|
||||
|
||||
inputLowConst = fuse_fq::updateShape(fold<opset1::Multiply>(inputLowConst, value), fakeQuantize->get_output_partial_shape(0));
|
||||
inputHightConst = fuse_fq::updateShape(fold<opset1::Multiply>(inputHightConst, value), fakeQuantize->get_output_partial_shape(0));
|
||||
} else if (is_type<opset1::Subtract>(eltwise) && fuse_fq::eltwiseWithConstant(eltwise)) {
|
||||
} else if (ov::is_type<opset1::Subtract>(eltwise) && fuse_fq::eltwiseWithConstant(eltwise)) {
|
||||
const auto value = constant->get_output_element_type(0) == eltwise->get_output_element_type(0) ?
|
||||
constant :
|
||||
foldConvert(constant, eltwise->get_output_element_type(0));
|
||||
|
||||
inputLowConst = fuse_fq::updateShape(fold<opset1::Add>(inputLowConst, value), fakeQuantize->get_output_partial_shape(0));
|
||||
inputHightConst = fuse_fq::updateShape(fold<opset1::Add>(inputHightConst, value), fakeQuantize->get_output_partial_shape(0));
|
||||
} else if (is_type<opset1::Add>(eltwise) && fuse_fq::eltwiseWithConstant(eltwise)) {
|
||||
if (is_type<opset1::Convolution>(fuse_fq::getData(eltwise)) ||
|
||||
is_type<opset1::GroupConvolution>(fuse_fq::getData(eltwise))) {
|
||||
} else if (ov::is_type<opset1::Add>(eltwise) && fuse_fq::eltwiseWithConstant(eltwise)) {
|
||||
if (ov::is_type<opset1::Convolution>(fuse_fq::getData(eltwise)) ||
|
||||
ov::is_type<opset1::GroupConvolution>(fuse_fq::getData(eltwise))) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@ -155,7 +155,7 @@ std::shared_ptr<opset1::FakeQuantize> FuseFakeQuantizeTransformation::handle(
|
||||
|
||||
inputLowConst = fuse_fq::updateShape(fold<opset1::Subtract>(inputLowConst, value), fakeQuantize->get_output_partial_shape(0));
|
||||
inputHightConst = fuse_fq::updateShape(fold<opset1::Subtract>(inputHightConst, value), fakeQuantize->get_output_partial_shape(0));
|
||||
} else if (is_type<opset1::Convert>(eltwise)) {
|
||||
} else if (ov::is_type<opset1::Convert>(eltwise)) {
|
||||
// issue #40611
|
||||
if ((eltwise->input(0).get_element_type() == element::i32) && (eltwise->output(0).get_element_type() == element::f32)) {
|
||||
return nullptr;
|
||||
@ -164,7 +164,7 @@ std::shared_ptr<opset1::FakeQuantize> FuseFakeQuantizeTransformation::handle(
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
std::shared_ptr<opset1::FakeQuantize> newFakeQuantize = as_type_ptr<opset1::FakeQuantize>(fakeQuantize->clone_with_new_inputs({
|
||||
std::shared_ptr<opset1::FakeQuantize> newFakeQuantize = ov::as_type_ptr<opset1::FakeQuantize>(fakeQuantize->clone_with_new_inputs({
|
||||
fuse_fq::getData(eltwise),
|
||||
inputLowConst,
|
||||
inputHightConst,
|
||||
|
@ -38,11 +38,11 @@ bool FuseMultiplyToFakeQuantizeTransformation::transform(TransformationContext&
|
||||
}
|
||||
|
||||
const auto parent = multiply->get_input_node_shared_ptr(0);
|
||||
auto fakeQuantize = as_type_ptr<opset1::FakeQuantize>(parent);
|
||||
const auto convert = as_type_ptr<opset1::Convert>(parent);
|
||||
auto fakeQuantize = ov::as_type_ptr<opset1::FakeQuantize>(parent);
|
||||
const auto convert = ov::as_type_ptr<opset1::Convert>(parent);
|
||||
|
||||
if (convert) {
|
||||
fakeQuantize = as_type_ptr<opset1::FakeQuantize>(convert->get_input_node_shared_ptr(0));
|
||||
fakeQuantize = ov::as_type_ptr<opset1::FakeQuantize>(convert->get_input_node_shared_ptr(0));
|
||||
}
|
||||
|
||||
const auto multiplyConstant = multiply->get_input_node_shared_ptr(1);
|
||||
@ -90,7 +90,7 @@ bool FuseMultiplyToFakeQuantizeTransformation::transform(TransformationContext&
|
||||
}
|
||||
|
||||
bool FuseMultiplyToFakeQuantizeTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr<Node> operation) const {
|
||||
if (!is_type<opset1::Constant>(operation->get_input_node_shared_ptr(1))) {
|
||||
if (!ov::is_type<opset1::Constant>(operation->get_input_node_shared_ptr(1))) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -99,11 +99,11 @@ bool FuseMultiplyToFakeQuantizeTransformation::canBeTransformed(const Transforma
|
||||
}
|
||||
|
||||
const auto parent = operation->get_input_node_shared_ptr(0);
|
||||
auto fq = as_type_ptr<opset1::FakeQuantize>(parent);
|
||||
const auto convert = as_type_ptr<opset1::Convert>(parent);
|
||||
auto fq = ov::as_type_ptr<opset1::FakeQuantize>(parent);
|
||||
const auto convert = ov::as_type_ptr<opset1::Convert>(parent);
|
||||
|
||||
if (convert) {
|
||||
fq = as_type_ptr<opset1::FakeQuantize>(convert->get_input_node_shared_ptr(0));
|
||||
fq = ov::as_type_ptr<opset1::FakeQuantize>(convert->get_input_node_shared_ptr(0));
|
||||
}
|
||||
|
||||
if (!fq) {
|
||||
|
@ -37,11 +37,11 @@ bool FuseSubtractToFakeQuantizeTransformation::transform(TransformationContext&
|
||||
}
|
||||
|
||||
const auto parent = subtract->get_input_node_shared_ptr(0);
|
||||
auto fakeQuantize = as_type_ptr<opset1::FakeQuantize>(parent);
|
||||
const auto convert = as_type_ptr<opset1::Convert>(parent);
|
||||
auto fakeQuantize = ov::as_type_ptr<opset1::FakeQuantize>(parent);
|
||||
const auto convert = ov::as_type_ptr<opset1::Convert>(parent);
|
||||
|
||||
if (convert) {
|
||||
fakeQuantize = as_type_ptr<opset1::FakeQuantize>(convert->get_input_node_shared_ptr(0));
|
||||
fakeQuantize = ov::as_type_ptr<opset1::FakeQuantize>(convert->get_input_node_shared_ptr(0));
|
||||
}
|
||||
|
||||
const auto subtractConstant = subtract->get_input_node_shared_ptr(1);
|
||||
@ -84,7 +84,7 @@ bool FuseSubtractToFakeQuantizeTransformation::transform(TransformationContext&
|
||||
}
|
||||
|
||||
bool FuseSubtractToFakeQuantizeTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr<Node> operation) const {
|
||||
if (!is_type<opset1::Constant>(operation->get_input_node_shared_ptr(1))) {
|
||||
if (!ov::is_type<opset1::Constant>(operation->get_input_node_shared_ptr(1))) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -95,20 +95,20 @@ bool FuseSubtractToFakeQuantizeTransformation::canBeTransformed(const Transforma
|
||||
const auto children = operation->get_output_target_inputs(0);
|
||||
|
||||
for (const auto& target : children) {
|
||||
const auto convolution = is_type<opset1::Convolution>(target.get_node());
|
||||
const auto groupConvolution = is_type<opset1::GroupConvolution>(target.get_node());
|
||||
const auto convolutionBackpropData = is_type<opset1::ConvolutionBackpropData>(target.get_node());
|
||||
const auto convolution = ov::is_type<opset1::Convolution>(target.get_node());
|
||||
const auto groupConvolution = ov::is_type<opset1::GroupConvolution>(target.get_node());
|
||||
const auto convolutionBackpropData = ov::is_type<opset1::ConvolutionBackpropData>(target.get_node());
|
||||
if (convolution || groupConvolution || convolutionBackpropData) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
const auto parent = operation->get_input_node_shared_ptr(0);
|
||||
auto fq = as_type_ptr<opset1::FakeQuantize>(parent);
|
||||
const auto convert = as_type_ptr<opset1::Convert>(parent);
|
||||
auto fq = ov::as_type_ptr<opset1::FakeQuantize>(parent);
|
||||
const auto convert = ov::as_type_ptr<opset1::Convert>(parent);
|
||||
|
||||
if (convert) {
|
||||
fq = as_type_ptr<opset1::FakeQuantize>(convert->get_input_node_shared_ptr(0));
|
||||
fq = ov::as_type_ptr<opset1::FakeQuantize>(convert->get_input_node_shared_ptr(0));
|
||||
}
|
||||
|
||||
if (!fq) {
|
||||
|
@ -63,13 +63,13 @@ bool InterpolateTransformation::transform(TransformationContext &context, ngraph
|
||||
}
|
||||
|
||||
bool InterpolateTransformation::isPrecisionPreserved(std::shared_ptr<Node> layer) const noexcept {
|
||||
std::shared_ptr<opset1::Interpolate> interpolate1 = as_type_ptr<opset1::Interpolate>(layer);
|
||||
std::shared_ptr<opset1::Interpolate> interpolate1 = ov::as_type_ptr<opset1::Interpolate>(layer);
|
||||
if (interpolate1) {
|
||||
const auto attrs = interpolate1->get_attrs();
|
||||
return attrs.mode == "nearest";
|
||||
}
|
||||
|
||||
std::shared_ptr<opset4::Interpolate> interpolate4 = as_type_ptr<opset4::Interpolate>(layer);
|
||||
std::shared_ptr<opset4::Interpolate> interpolate4 = ov::as_type_ptr<opset4::Interpolate>(layer);
|
||||
if (interpolate4) {
|
||||
const auto attrs = interpolate4->get_attrs();
|
||||
return attrs.mode == op::v4::Interpolate::InterpolateMode::NEAREST;
|
||||
@ -90,7 +90,7 @@ bool InterpolateTransformation::canBeTransformed(const TransformationContext& co
|
||||
return false;
|
||||
}
|
||||
|
||||
const auto interpolate1 = as_type_ptr<opset1::Interpolate>(layer);
|
||||
const auto interpolate1 = ov::as_type_ptr<opset1::Interpolate>(layer);
|
||||
if (interpolate1) {
|
||||
const auto interpAttrs = interpolate1->get_attrs();
|
||||
if (interpAttrs.axes.count(0) || interpAttrs.axes.count(1)) {
|
||||
@ -104,7 +104,7 @@ bool InterpolateTransformation::canBeTransformed(const TransformationContext& co
|
||||
}
|
||||
}
|
||||
|
||||
const auto interpolate4 = as_type_ptr<opset4::Interpolate>(layer);
|
||||
const auto interpolate4 = ov::as_type_ptr<opset4::Interpolate>(layer);
|
||||
if (interpolate4) {
|
||||
const auto interpAttrs = interpolate4->get_attrs();
|
||||
|
||||
|
@ -147,9 +147,9 @@ bool LayerTransformation::canSubtractBeHandled(const std::shared_ptr<Node>& op,
|
||||
|
||||
const auto parent = dequantization.subtract->input_value(1).get_node_shared_ptr();
|
||||
|
||||
if (is_type<opset1::Constant>(parent)) {
|
||||
if (ov::is_type<opset1::Constant>(parent)) {
|
||||
return true;
|
||||
} else if (is_type<opset1::Convert>(parent) && is_type<opset1::Constant>(parent->get_input_node_shared_ptr(0))) {
|
||||
} else if (ov::is_type<opset1::Convert>(parent) && ov::is_type<opset1::Constant>(parent->get_input_node_shared_ptr(0))) {
|
||||
const auto constant = parent->get_input_node_shared_ptr(0);
|
||||
const auto constantType = constant->output(0).get_element_type();
|
||||
return operationType == constantType;
|
||||
@ -171,7 +171,7 @@ std::stringstream toStream(const std::vector<float>& dequantizationValues) {
|
||||
}
|
||||
|
||||
void LayerTransformation::printDequantizationInfo(const std::shared_ptr<Node>& layer) {
|
||||
const QuantizationDetails quantizationDetails = QuantizationDetails::getDetails(as_type_ptr<opset1::FakeQuantize>(layer));
|
||||
const QuantizationDetails quantizationDetails = QuantizationDetails::getDetails(ov::as_type_ptr<opset1::FakeQuantize>(layer));
|
||||
std::cout <<
|
||||
layer->get_type_name() << (NetworkHelper::isConstantPath(layer) ? " on weights " : " on activations ") <<
|
||||
layer->get_friendly_name() << ":" << std::endl <<
|
||||
@ -337,7 +337,7 @@ void LayerTransformation::updateOutput(
|
||||
// TODO: not tested!!!
|
||||
for (auto output : lastNode->outputs()) {
|
||||
for (auto input : output.get_target_inputs()) {
|
||||
if (is_type<ngraph::opset1::Result>(input.get_node())) {
|
||||
if (ov::is_type<ngraph::opset1::Result>(input.get_node())) {
|
||||
const std::string originalName = originalNode->get_friendly_name();
|
||||
originalNode->set_friendly_name(originalName + LayerTransformation::originalLayerPostfix);
|
||||
lastNode->set_friendly_name(originalName);
|
||||
|
@ -95,7 +95,7 @@ void make_matcher_type_relaxed(ngraph::pass::GraphRewrite* transformation) {
|
||||
using namespace ngraph;
|
||||
|
||||
auto is_op_type = [](std::shared_ptr<Node> n) {
|
||||
return !!as_type_ptr<BaseOp>(n);
|
||||
return !!ov::as_type_ptr<BaseOp>(n);
|
||||
};
|
||||
|
||||
auto p_node = std::make_shared<pattern::op::Label>(element::f32, Shape{}, is_op_type);
|
||||
@ -270,7 +270,7 @@ bool ngraph::pass::low_precision::LowPrecision::isFunctionQuantized(const std::s
|
||||
continue;
|
||||
}
|
||||
|
||||
const std::shared_ptr<ngraph::opset1::FakeQuantize> fakeQuantize = as_type_ptr<ngraph::opset1::FakeQuantize>(parent);
|
||||
const std::shared_ptr<ngraph::opset1::FakeQuantize> fakeQuantize = ov::as_type_ptr<ngraph::opset1::FakeQuantize>(parent);
|
||||
if ((fakeQuantize != nullptr) &&
|
||||
QuantizationDetails::outputLayoutIsSupported(fakeQuantize) &&
|
||||
QuantizationDetails::isSupportedLevel(fakeQuantize->get_levels())) {
|
||||
|
@ -83,7 +83,7 @@ bool ngraph::pass::low_precision::MarkupPrecisions::run_on_function(std::shared_
|
||||
|
||||
// TODO: don't need to set restrictions for not supported operations
|
||||
// if don't set restrictions for not supported operations then accuracy drop appears, issue #59197
|
||||
const bool supported = is_type<opset1::Result>(node) || isSupported(node);
|
||||
const bool supported = ov::is_type<opset1::Result>(node) || isSupported(node);
|
||||
if (!supported || !LayerTransformation::canBeTransformedStatic(node)) {
|
||||
setRestriction(node, std::vector<std::pair<size_t, std::vector<ngraph::element::Type>>> { {0ul, {}}});
|
||||
continue;
|
||||
@ -157,14 +157,14 @@ bool ngraph::pass::low_precision::MarkupPrecisions::isPrecisionPreserved(const s
|
||||
return precisionPreserved;
|
||||
}
|
||||
|
||||
if (is_type<opset1::Interpolate>(node)) {
|
||||
std::shared_ptr<opset1::Interpolate> interpolate1 = as_type_ptr<opset1::Interpolate>(node);
|
||||
if (ov::is_type<opset1::Interpolate>(node)) {
|
||||
std::shared_ptr<opset1::Interpolate> interpolate1 = ov::as_type_ptr<opset1::Interpolate>(node);
|
||||
if (interpolate1) {
|
||||
const auto attrs = interpolate1->get_attrs();
|
||||
return attrs.mode == "nearest";
|
||||
}
|
||||
|
||||
std::shared_ptr<opset4::Interpolate> interpolate4 = as_type_ptr<opset4::Interpolate>(node);
|
||||
std::shared_ptr<opset4::Interpolate> interpolate4 = ov::as_type_ptr<opset4::Interpolate>(node);
|
||||
if (interpolate4) {
|
||||
const auto attrs = interpolate4->get_attrs();
|
||||
return attrs.mode == op::v4::Interpolate::InterpolateMode::NEAREST;
|
||||
|
@ -40,18 +40,18 @@ MatMulTransformation::MatMulTransformation(const Params& params) : LayerTransfor
|
||||
}
|
||||
|
||||
bool MatMulTransformation::transform(TransformationContext &context, ngraph::pattern::Matcher &m) {
|
||||
std::shared_ptr<opset1::MatMul> matMul = as_type_ptr<opset1::MatMul>(m.get_match_root());
|
||||
std::shared_ptr<opset1::MatMul> matMul = ov::as_type_ptr<opset1::MatMul>(m.get_match_root());
|
||||
if ((matMul == nullptr) || !canBeTransformed(context, matMul)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
matMul = as_type_ptr<opset1::MatMul>(NetworkHelper::separateInStandaloneBranch(matMul));
|
||||
matMul = ov::as_type_ptr<opset1::MatMul>(NetworkHelper::separateInStandaloneBranch(matMul));
|
||||
const auto dequantization1 = NetworkHelper::getDequantization(matMul, 0);
|
||||
auto dequantization2 = NetworkHelper::getDequantization(matMul, 1);
|
||||
|
||||
if (dequantization2.empty()) {
|
||||
const std::shared_ptr<opset1::FakeQuantize> fakeQuantize =
|
||||
as_type_ptr<opset1::FakeQuantize>(dequantization2.data.get_node_shared_ptr());
|
||||
ov::as_type_ptr<opset1::FakeQuantize>(dequantization2.data.get_node_shared_ptr());
|
||||
if (fakeQuantize != nullptr) {
|
||||
const QuantizationDetails quantizationDetails = QuantizationDetails::getDetails(fakeQuantize);
|
||||
|
||||
@ -90,7 +90,7 @@ bool MatMulTransformation::transform(TransformationContext &context, ngraph::pat
|
||||
|
||||
// dequantization with subtract on activations & constant weights
|
||||
if (dequantization1.subtract) {
|
||||
auto broadcastShape = NetworkHelper::isScalarLike(as_type_ptr<opset1::Constant>(dequantization1.subtractConstant)) ?
|
||||
auto broadcastShape = NetworkHelper::isScalarLike(ov::as_type_ptr<opset1::Constant>(dequantization1.subtractConstant)) ?
|
||||
Shape(dequantization1.subtract->get_output_partial_shape(0).rank().get_length(), 1) :
|
||||
dequantization1.subtractConstant->get_shape();
|
||||
|
||||
@ -139,8 +139,8 @@ bool MatMulTransformation::transform(TransformationContext &context, ngraph::pat
|
||||
const auto mulConst1 = matMul->get_transpose_a() ? transpose(dequantization1.multiplyConstant) : dequantization1.multiplyConstant;
|
||||
auto mulConst2 = matMul->get_transpose_b() ? transpose(dequantization2.multiplyConstant) : dequantization2.multiplyConstant;
|
||||
|
||||
if (NetworkHelper::isScalarLike(as_type_ptr<opset1::Constant>(mulConst2))) {
|
||||
mulConst2 = NetworkHelper::toScalar(as_type_ptr<opset1::Constant>(mulConst2));
|
||||
if (NetworkHelper::isScalarLike(ov::as_type_ptr<opset1::Constant>(mulConst2))) {
|
||||
mulConst2 = NetworkHelper::toScalar(ov::as_type_ptr<opset1::Constant>(mulConst2));
|
||||
} else {
|
||||
const auto constShape = mulConst2->get_shape();
|
||||
const size_t inputRank = matMul->get_input_partial_shape(0).rank().get_length();
|
||||
@ -194,7 +194,7 @@ bool MatMulTransformation::canBeTransformed(const TransformationContext& context
|
||||
return false;
|
||||
}
|
||||
|
||||
std::shared_ptr<opset1::MatMul> matMul = as_type_ptr<opset1::MatMul>(layer);
|
||||
std::shared_ptr<opset1::MatMul> matMul = ov::as_type_ptr<opset1::MatMul>(layer);
|
||||
if (matMul == nullptr) {
|
||||
return false;
|
||||
}
|
||||
@ -252,7 +252,7 @@ bool MatMulTransformation::canBeTransformed(const TransformationContext& context
|
||||
}
|
||||
}
|
||||
|
||||
const auto fakeQuantize = as_type_ptr<opset1::FakeQuantize>(layer->get_input_node_shared_ptr(1));
|
||||
const auto fakeQuantize = ov::as_type_ptr<opset1::FakeQuantize>(layer->get_input_node_shared_ptr(1));
|
||||
if (fakeQuantize) {
|
||||
if (!QuantizationDetails::outputLayoutIsSupported(fakeQuantize)) {
|
||||
return false;
|
||||
|
@ -43,7 +43,7 @@ bool MaxPoolTransformation::canBeTransformed(const TransformationContext& contex
|
||||
return false;
|
||||
}
|
||||
|
||||
const std::vector<float> scales = as_type_ptr<opset1::Constant>(dequantization.multiply->get_input_node_shared_ptr(1))->cast_vector<float>();
|
||||
const std::vector<float> scales = ov::as_type_ptr<opset1::Constant>(dequantization.multiply->get_input_node_shared_ptr(1))->cast_vector<float>();
|
||||
if (std::any_of(scales.begin(), scales.end(), [](const float value) { return value < 0.0; })) {
|
||||
return false;
|
||||
}
|
||||
|
@ -52,10 +52,10 @@ bool MultiplyTransformation::transform(TransformationContext& context, ngraph::p
|
||||
auto newMultiply = multiply;
|
||||
|
||||
auto fold_fake_quantizes = [](std::shared_ptr<Node>& multiply, const size_t index) {
|
||||
auto fakeQuantizeOnWeights = as_type_ptr<opset1::FakeQuantize>(multiply->get_input_node_shared_ptr(index));
|
||||
auto fakeQuantizeOnWeights = ov::as_type_ptr<opset1::FakeQuantize>(multiply->get_input_node_shared_ptr(index));
|
||||
if (fakeQuantizeOnWeights != nullptr) {
|
||||
auto result = NetworkHelper::fold_fake_quantize(fakeQuantizeOnWeights);
|
||||
if (is_type<opset1::Constant>(result)) {
|
||||
if (ov::is_type<opset1::Constant>(result)) {
|
||||
replace_node(fakeQuantizeOnWeights, result);
|
||||
}
|
||||
}
|
||||
@ -165,14 +165,14 @@ bool MultiplyTransformation::canBeTransformed(const TransformationContext& conte
|
||||
FakeQuantizeDequantization dequantization2 = pass::low_precision::NetworkHelper::getDequantization(layer, 1ul);
|
||||
|
||||
if ((dequantization1.data.get_node() == nullptr) ||
|
||||
(dequantization1.empty() && !is_type<opset1::Constant>(dequantization1.data.get_node_shared_ptr()) &&
|
||||
!is_type<opset1::Constant>(dequantization2.data.get_node_shared_ptr()))) {
|
||||
(dequantization1.empty() && !ov::is_type<opset1::Constant>(dequantization1.data.get_node_shared_ptr()) &&
|
||||
!ov::is_type<opset1::Constant>(dequantization2.data.get_node_shared_ptr()))) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if ((dequantization2.data.get_node() == nullptr) ||
|
||||
(dequantization2.empty() && !is_type<opset1::Constant>(dequantization2.data.get_node_shared_ptr()) &&
|
||||
!is_type<opset1::Constant>(dequantization1.data.get_node_shared_ptr()))) {
|
||||
(dequantization2.empty() && !ov::is_type<opset1::Constant>(dequantization2.data.get_node_shared_ptr()) &&
|
||||
!ov::is_type<opset1::Constant>(dequantization1.data.get_node_shared_ptr()))) {
|
||||
return false;
|
||||
}
|
||||
return EltwiseBaseTransformation::canBeTransformed(context, layer);
|
||||
|
@ -40,7 +40,7 @@ bool MultiplyToGroupConvolutionTransformation::transform(TransformationContext&
|
||||
auto input = multiply->get_input_node_shared_ptr(0);
|
||||
auto constant = multiply->get_input_node_shared_ptr(1);
|
||||
auto inputIndex = 0;
|
||||
if (!is_type<opset1::Constant>(constant)) {
|
||||
if (!ov::is_type<opset1::Constant>(constant)) {
|
||||
input = multiply->get_input_node_shared_ptr(1);
|
||||
constant = multiply->get_input_node_shared_ptr(0);
|
||||
inputIndex = 1;
|
||||
@ -164,15 +164,15 @@ bool MultiplyToGroupConvolutionTransformation::canBeTransformed(const Transforma
|
||||
|
||||
Shape constShape;
|
||||
int inputIndex;
|
||||
if (const auto constant = as_type_ptr<opset1::Constant>(operation->get_input_node_shared_ptr(1))) {
|
||||
if (const auto constant = ov::as_type_ptr<opset1::Constant>(operation->get_input_node_shared_ptr(1))) {
|
||||
inputIndex = 0;
|
||||
constShape = constant->get_shape();
|
||||
if (is_type<opset1::Constant>(operation->get_input_node_shared_ptr(0)) ||
|
||||
(is_type<opset1::Subtract>(operation->get_input_node_shared_ptr(0)) &&
|
||||
is_type<opset1::Constant>(operation->get_input_node_shared_ptr(0)->get_input_node_shared_ptr(0)))) {
|
||||
if (ov::is_type<opset1::Constant>(operation->get_input_node_shared_ptr(0)) ||
|
||||
(ov::is_type<opset1::Subtract>(operation->get_input_node_shared_ptr(0)) &&
|
||||
ov::is_type<opset1::Constant>(operation->get_input_node_shared_ptr(0)->get_input_node_shared_ptr(0)))) {
|
||||
return false;
|
||||
}
|
||||
} else if (const auto constant = as_type_ptr<opset1::Constant>(operation->get_input_node_shared_ptr(0))) {
|
||||
} else if (const auto constant = ov::as_type_ptr<opset1::Constant>(operation->get_input_node_shared_ptr(0))) {
|
||||
inputIndex = 1;
|
||||
constShape = constant->get_shape();
|
||||
} else {
|
||||
@ -209,7 +209,7 @@ bool MultiplyToGroupConvolutionTransformation::canBeTransformedToGroupConvolutio
|
||||
const auto parent0 = layer->get_input_node_shared_ptr(0);
|
||||
const auto parent1 = layer->get_input_node_shared_ptr(1);
|
||||
|
||||
if (!is_type<opset1::Constant>(parent0) && !is_type<opset1::Constant>(parent1)) {
|
||||
if (!ov::is_type<opset1::Constant>(parent0) && !ov::is_type<opset1::Constant>(parent1)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -224,10 +224,10 @@ bool MultiplyToGroupConvolutionTransformation::canBeTransformedToGroupConvolutio
|
||||
|
||||
bool MultiplyToGroupConvolutionTransformation::isDynamicOrScalar(const std::shared_ptr<const Node>& node) {
|
||||
auto getConstantIndex = [](const std::shared_ptr<const Node>& node) -> int {
|
||||
if (is_type<opset1::Constant>(node->get_input_node_shared_ptr(1))) {
|
||||
if (ov::is_type<opset1::Constant>(node->get_input_node_shared_ptr(1))) {
|
||||
return 1;
|
||||
}
|
||||
if (is_type<opset1::Constant>(node->get_input_node_shared_ptr(0))) {
|
||||
if (ov::is_type<opset1::Constant>(node->get_input_node_shared_ptr(0))) {
|
||||
return 0;
|
||||
}
|
||||
return -1;
|
||||
|
@ -71,22 +71,22 @@ bool MVNTransformation::canBeTransformed(const TransformationContext& context, s
|
||||
return false;
|
||||
}
|
||||
|
||||
std::shared_ptr<Node> mvn = as_type_ptr<op::MVN>(operation);
|
||||
std::shared_ptr<Node> mvn = ov::as_type_ptr<op::MVN>(operation);
|
||||
if (!mvn) {
|
||||
mvn = as_type_ptr<opset6::MVN>(operation);
|
||||
mvn = ov::as_type_ptr<opset6::MVN>(operation);
|
||||
if (!mvn) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
const auto scalesConst = as_type_ptr<opset1::Constant>(NetworkHelper::getConstantInput(mvn->get_input_node_shared_ptr(0)));
|
||||
const auto scalesConst = ov::as_type_ptr<opset1::Constant>(NetworkHelper::getConstantInput(mvn->get_input_node_shared_ptr(0)));
|
||||
bool isScalarScales = NetworkHelper::isScalarLike(scalesConst);
|
||||
|
||||
AxisSet reduction_axes;
|
||||
if (is_type<op::MVN>(mvn)) {
|
||||
reduction_axes = as_type_ptr<op::MVN>(mvn)->get_reduction_axes();
|
||||
if (ov::is_type<op::MVN>(mvn)) {
|
||||
reduction_axes = ov::as_type_ptr<op::MVN>(mvn)->get_reduction_axes();
|
||||
} else {
|
||||
reduction_axes = as_type_ptr<opset1::Constant>(mvn->get_input_node_shared_ptr(1))->get_axis_set_val();
|
||||
reduction_axes = ov::as_type_ptr<opset1::Constant>(mvn->get_input_node_shared_ptr(1))->get_axis_set_val();
|
||||
}
|
||||
|
||||
if (reduction_axes.count(1) == 0) {
|
||||
@ -115,22 +115,22 @@ bool MVNTransformation::transform(TransformationContext &context, ngraph::patter
|
||||
return false;
|
||||
}
|
||||
|
||||
std::shared_ptr<Node> mvn = as_type_ptr<op::MVN>(operation);
|
||||
std::shared_ptr<Node> mvn = ov::as_type_ptr<op::MVN>(operation);
|
||||
if (!mvn) {
|
||||
mvn = as_type_ptr<opset6::MVN>(operation);
|
||||
mvn = ov::as_type_ptr<opset6::MVN>(operation);
|
||||
}
|
||||
|
||||
bool normalizeVariance;
|
||||
if (is_type<op::MVN>(mvn)) {
|
||||
normalizeVariance = as_type_ptr<op::MVN>(mvn)->get_normalize_variance();
|
||||
if (ov::is_type<op::MVN>(mvn)) {
|
||||
normalizeVariance = ov::as_type_ptr<op::MVN>(mvn)->get_normalize_variance();
|
||||
} else {
|
||||
normalizeVariance = as_type_ptr<opset6::MVN>(mvn)->get_normalize_variance();
|
||||
normalizeVariance = ov::as_type_ptr<opset6::MVN>(mvn)->get_normalize_variance();
|
||||
}
|
||||
|
||||
FakeQuantizeDequantization dequantization = NetworkHelper::getDequantization(mvn);
|
||||
auto scalesConst = as_type_ptr<opset1::Constant>(dequantization.multiply->get_input_node_shared_ptr(1));
|
||||
auto scalesConst = ov::as_type_ptr<opset1::Constant>(dequantization.multiply->get_input_node_shared_ptr(1));
|
||||
if (scalesConst == nullptr) {
|
||||
scalesConst = as_type_ptr<opset1::Constant>(dequantization.multiply->get_input_node_shared_ptr(0));
|
||||
scalesConst = ov::as_type_ptr<opset1::Constant>(dequantization.multiply->get_input_node_shared_ptr(0));
|
||||
}
|
||||
|
||||
auto newScalesConst = scalesConst;
|
||||
@ -151,7 +151,7 @@ bool MVNTransformation::transform(TransformationContext &context, ngraph::patter
|
||||
}
|
||||
}
|
||||
std::shared_ptr<Node> newMVN;
|
||||
if (is_type<op::MVN>(mvn)) {
|
||||
if (ov::is_type<op::MVN>(mvn)) {
|
||||
newMVN = mvn->copy_with_new_inputs({dequantization.data});
|
||||
} else {
|
||||
newMVN = mvn->copy_with_new_inputs({dequantization.data, mvn->get_input_node_shared_ptr(1)});
|
||||
|
@ -42,7 +42,7 @@ bool NetworkHelper::notAllChildrensAreFQ(const NodeVector& childrens) {
|
||||
// NOTE: This check was added for models that don't have FQ after AvgPool
|
||||
// They will have transparent precision as it was in old LPT.
|
||||
for (const auto& child : childrens) {
|
||||
if (!is_type<opset1::FakeQuantize>(child)) {
|
||||
if (!ov::is_type<opset1::FakeQuantize>(child)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -69,11 +69,11 @@ std::vector<std::shared_ptr<Node>> NetworkHelper::consumers(std::shared_ptr<Node
|
||||
|
||||
bool NetworkHelper::isConstantPath(const std::shared_ptr<Node>& op) {
|
||||
const auto isNotConstantPathOperation = [](const std::shared_ptr<Node>& node) -> bool {
|
||||
return is_type<opset1::Parameter>(node) ||
|
||||
is_type<opset1::Convolution>(node) ||
|
||||
is_type<opset1::GroupConvolution>(node) ||
|
||||
is_type<opset1::MatMul>(node) ||
|
||||
is_type<opset1::ConvolutionBackpropData>(node);
|
||||
return ov::is_type<opset1::Parameter>(node) ||
|
||||
ov::is_type<opset1::Convolution>(node) ||
|
||||
ov::is_type<opset1::GroupConvolution>(node) ||
|
||||
ov::is_type<opset1::MatMul>(node) ||
|
||||
ov::is_type<opset1::ConvolutionBackpropData>(node);
|
||||
};
|
||||
|
||||
if (isNotConstantPathOperation(op)) {
|
||||
@ -123,7 +123,7 @@ std::shared_ptr<opset1::Constant> NetworkHelper::foldDequantizationConstant(
|
||||
// constant folding of constant
|
||||
op->constant_fold(outputs, inputs);
|
||||
|
||||
const auto result = as_type_ptr<opset1::Constant>(outputs[outIdx].get_node_shared_ptr());
|
||||
const auto result = ov::as_type_ptr<opset1::Constant>(outputs[outIdx].get_node_shared_ptr());
|
||||
if (result == nullptr) {
|
||||
THROW_IE_LPT_EXCEPTION(*result) << "result of constant folding is not constant";
|
||||
}
|
||||
@ -191,9 +191,9 @@ size_t NetworkHelper::getInputChannelsCount(std::shared_ptr<Node> layer) {
|
||||
}
|
||||
|
||||
size_t NetworkHelper::getGroupsCount(std::shared_ptr<Node> layer) {
|
||||
if (is_type<opset1::Convolution>(layer)) {
|
||||
if (ov::is_type<opset1::Convolution>(layer)) {
|
||||
return 1;
|
||||
} else if (is_type<opset1::GroupConvolution>(layer)) {
|
||||
} else if (ov::is_type<opset1::GroupConvolution>(layer)) {
|
||||
return layer->get_input_partial_shape(1)[0].get_length(); // input weights for opset1::GC is in format GOI..., see the specification
|
||||
} else {
|
||||
THROW_TRANSFORMATION_EXCEPTION << "Invalid layer type of " << layer->get_friendly_name() << "; expected Convolution or GroupConvolution";
|
||||
@ -221,13 +221,13 @@ std::shared_ptr<Node> NetworkHelper::swapMultiplyAndAdd(std::shared_ptr<opset1::
|
||||
const auto multiplyParent1 = multiply->get_input_node_shared_ptr(0);
|
||||
const auto multiplyParent2 = multiply->get_input_node_shared_ptr(1);
|
||||
|
||||
auto multiplyInput = as_type_ptr<opset1::Multiply>(multiplyParent1);
|
||||
auto multiplyConst = as_type_ptr<opset1::Constant>(multiplyParent2);
|
||||
auto multiplyInput = ov::as_type_ptr<opset1::Multiply>(multiplyParent1);
|
||||
auto multiplyConst = ov::as_type_ptr<opset1::Constant>(multiplyParent2);
|
||||
int multiplyInputBranch = 0;
|
||||
|
||||
if (multiplyConst == nullptr) {
|
||||
multiplyInput = as_type_ptr<opset1::Multiply>(multiplyParent2);
|
||||
multiplyConst = as_type_ptr<opset1::Constant>(multiplyParent1);
|
||||
multiplyInput = ov::as_type_ptr<opset1::Multiply>(multiplyParent2);
|
||||
multiplyConst = ov::as_type_ptr<opset1::Constant>(multiplyParent1);
|
||||
multiplyInputBranch = 1;
|
||||
}
|
||||
|
||||
@ -249,8 +249,8 @@ std::shared_ptr<Node> NetworkHelper::swapMultiplyAndAdd(std::shared_ptr<opset1::
|
||||
|
||||
if ((shape_size(bShape) == 1) || (shape_size(aShape) == 1) || (shape_size(bShape) == shape_size(aShape))) {
|
||||
// safely division to avoid NaN
|
||||
const std::vector<float> bValues = as_type_ptr<opset1::Constant>(b)->cast_vector<float>();
|
||||
const std::vector<float> aValues = as_type_ptr<opset1::Constant>(a)->cast_vector<float>();
|
||||
const std::vector<float> bValues = ov::as_type_ptr<opset1::Constant>(b)->cast_vector<float>();
|
||||
const std::vector<float> aValues = ov::as_type_ptr<opset1::Constant>(a)->cast_vector<float>();
|
||||
const bool aBroadcasted = bValues.size() > aValues.size();
|
||||
const bool bBroadcasted = bValues.size() < aValues.size();
|
||||
std::vector<float> bDivAValues(aBroadcasted ? bValues.size() : aValues.size());
|
||||
@ -399,19 +399,19 @@ std::shared_ptr<opset1::Constant> NetworkHelper::toScalar(std::shared_ptr<opset1
|
||||
}
|
||||
|
||||
std::shared_ptr<Node> NetworkHelper::getConstantInput(std::shared_ptr<Node> node) {
|
||||
std::shared_ptr<Node> constant1 = as_type_ptr<opset1::Constant>(node->input_value(0).get_node_shared_ptr());
|
||||
std::shared_ptr<Node> constant1 = ov::as_type_ptr<opset1::Constant>(node->input_value(0).get_node_shared_ptr());
|
||||
if (!constant1) {
|
||||
constant1 = as_type_ptr<opset1::Constant>(node->input_value(1).get_node_shared_ptr());
|
||||
constant1 = ov::as_type_ptr<opset1::Constant>(node->input_value(1).get_node_shared_ptr());
|
||||
}
|
||||
return constant1;
|
||||
}
|
||||
|
||||
int NetworkHelper::getConstantInputIndex(std::shared_ptr<Node> node) {
|
||||
if (as_type_ptr<opset1::Constant>(node->get_input_node_shared_ptr(1)) != nullptr) {
|
||||
if (ov::as_type_ptr<opset1::Constant>(node->get_input_node_shared_ptr(1)) != nullptr) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (as_type_ptr<opset1::Constant>(node->get_input_node_shared_ptr(0)) != nullptr) {
|
||||
if (ov::as_type_ptr<opset1::Constant>(node->get_input_node_shared_ptr(0)) != nullptr) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -449,7 +449,7 @@ std::vector<size_t> NetworkHelper::updateReshapeValues(
|
||||
}
|
||||
|
||||
std::shared_ptr<ngraph::opset1::Multiply> NetworkHelper::optimizeMultipliesAfter(std::shared_ptr<Node> node) {
|
||||
std::shared_ptr<ngraph::opset1::Multiply> multiply = as_type_ptr<opset1::Multiply>(std::move(node));
|
||||
std::shared_ptr<ngraph::opset1::Multiply> multiply = ov::as_type_ptr<opset1::Multiply>(std::move(node));
|
||||
if (!multiply) {
|
||||
THROW_IE_LPT_EXCEPTION(*multiply) << "Unexpected operation type";
|
||||
}
|
||||
@ -461,7 +461,7 @@ std::shared_ptr<ngraph::opset1::Multiply> NetworkHelper::optimizeMultipliesAfter
|
||||
}
|
||||
|
||||
auto nextMultiplyInput = *multiply->output(0).get_target_inputs().begin();
|
||||
auto nextMultiply = as_type_ptr<op::TypeRelaxed<opset1::Multiply>>(nextMultiplyInput.get_node()->shared_from_this());
|
||||
auto nextMultiply = ov::as_type_ptr<op::TypeRelaxed<opset1::Multiply>>(nextMultiplyInput.get_node()->shared_from_this());
|
||||
if (nextMultiply) {
|
||||
auto constant2 = getConstantInput(nextMultiply);
|
||||
if (!constant2 || constant2->output(0).get_target_inputs().size() != 1) {
|
||||
@ -472,7 +472,7 @@ std::shared_ptr<ngraph::opset1::Multiply> NetworkHelper::optimizeMultipliesAfter
|
||||
auto multiplyResult = fold<opset1::Multiply>(constant1, constant2);
|
||||
{
|
||||
// optimize constant shape: used in rfcn-resnet101-coco
|
||||
const auto multiplyResultConstant = as_type_ptr<opset1::Constant>(multiplyResult);
|
||||
const auto multiplyResultConstant = ov::as_type_ptr<opset1::Constant>(multiplyResult);
|
||||
if ((multiplyResultConstant != nullptr) && NetworkHelper::isScalarLike(multiplyResultConstant)) {
|
||||
multiplyResult = NetworkHelper::toScalar(multiplyResultConstant);
|
||||
}
|
||||
@ -496,10 +496,10 @@ std::shared_ptr<ngraph::opset1::Multiply> NetworkHelper::optimizeMultipliesAfter
|
||||
}
|
||||
|
||||
std::shared_ptr<opset1::Constant> NetworkHelper::round(std::shared_ptr<Node> node, element::Type target_type) {
|
||||
const auto constant = as_type_ptr<opset1::Constant>(node);
|
||||
const auto constant = ov::as_type_ptr<opset1::Constant>(node);
|
||||
assert(constant);
|
||||
|
||||
const auto castedConstant = as_type_ptr<ngraph::opset1::Constant>(fold<op::v0::Convert>(
|
||||
const auto castedConstant = ov::as_type_ptr<ngraph::opset1::Constant>(fold<op::v0::Convert>(
|
||||
fold<ngraph::op::v5::Round>(constant->output(0), ngraph::op::v5::Round::RoundMode::HALF_AWAY_FROM_ZERO),
|
||||
target_type));
|
||||
|
||||
@ -525,7 +525,7 @@ FakeQuantizeDequantization NetworkHelper::foldDequantization(const std::shared_p
|
||||
|
||||
if (dequantization.convert != nullptr) {
|
||||
const std::shared_ptr<Node> result = foldConvert(dequantization.data, dequantization.convert->get_element_type());
|
||||
if (is_type<opset1::Constant>(result)) {
|
||||
if (ov::is_type<opset1::Constant>(result)) {
|
||||
if (inPlace) {
|
||||
copyInfo(dequantization.convert, result);
|
||||
}
|
||||
@ -543,7 +543,7 @@ FakeQuantizeDequantization NetworkHelper::foldDequantization(const std::shared_p
|
||||
const auto convertionResult = foldConvert(
|
||||
dequantization.subtractConstant,
|
||||
dequantization.subtractConvert->get_element_type());
|
||||
if (is_type<opset1::Constant>(convertionResult)) {
|
||||
if (ov::is_type<opset1::Constant>(convertionResult)) {
|
||||
replace_node(dequantization.subtractConvert, convertionResult);
|
||||
dequantization = NetworkHelper::getDequantization(node, branchIndex, inPlace);
|
||||
}
|
||||
@ -552,7 +552,7 @@ FakeQuantizeDequantization NetworkHelper::foldDequantization(const std::shared_p
|
||||
const std::shared_ptr<Node> result = fold<opset1::Subtract>(
|
||||
dequantization.subtract->get_input_node_shared_ptr(0),
|
||||
dequantization.subtract->get_input_node_shared_ptr(1));
|
||||
if (is_type<opset1::Constant>(result)) {
|
||||
if (ov::is_type<opset1::Constant>(result)) {
|
||||
if (inPlace) {
|
||||
copyInfo(dequantization.subtract, result);
|
||||
}
|
||||
@ -571,7 +571,7 @@ FakeQuantizeDequantization NetworkHelper::foldDequantization(const std::shared_p
|
||||
std::shared_ptr<Node> result = fold<opset1::Multiply>(
|
||||
dequantization.multiply->get_input_node_shared_ptr(0),
|
||||
dequantization.multiply->get_input_node_shared_ptr(1));
|
||||
if (!is_type<opset1::Constant>(result)) {
|
||||
if (!ov::is_type<opset1::Constant>(result)) {
|
||||
return dequantization;
|
||||
}
|
||||
if (dequantization.multiply->get_output_element_type(0) != result->get_element_type()) {
|
||||
@ -649,7 +649,7 @@ std::shared_ptr<opset1::FakeQuantize> NetworkHelper::fuseConvert(const std::shar
|
||||
}
|
||||
|
||||
Node* node = targetInputs.begin()->get_node();
|
||||
if (!is_type<opset1::Convert>(node) ||
|
||||
if (!ov::is_type<opset1::Convert>(node) ||
|
||||
// TODO: LPT: avoid precision hardcode: to separate method: isSupportedPrecision
|
||||
((node->get_output_element_type(0) != element::u8) && (node->get_output_element_type(0) != element::i8))) {
|
||||
return fakeQuantize;
|
||||
@ -715,15 +715,15 @@ std::shared_ptr<Node> NetworkHelper::foldFakeQuantize(
|
||||
const bool roundValuesArg,
|
||||
const bool roundValuesWasSet,
|
||||
const int outChannelsShapeIndex) {
|
||||
if (is_type<opset1::Constant>(fq->get_input_node_shared_ptr(0)) &&
|
||||
is_type<opset1::Constant>(fq->get_input_node_shared_ptr(1)) &&
|
||||
is_type<opset1::Constant>(fq->get_input_node_shared_ptr(2)) &&
|
||||
is_type<opset1::Constant>(fq->get_input_node_shared_ptr(3)) &&
|
||||
is_type<opset1::Constant>(fq->get_input_node_shared_ptr(4)) &&
|
||||
op::util::constantIsEqualTo(as_type_ptr<opset1::Constant>(fq->get_input_node_shared_ptr(1)), 0.f) &&
|
||||
op::util::constantIsEqualTo(as_type_ptr<opset1::Constant>(fq->get_input_node_shared_ptr(2)), 254.f) &&
|
||||
op::util::constantIsEqualTo(as_type_ptr<opset1::Constant>(fq->get_input_node_shared_ptr(3)), -127.f) &&
|
||||
op::util::constantIsEqualTo(as_type_ptr<opset1::Constant>(fq->get_input_node_shared_ptr(4)), 127.f)) {
|
||||
if (ov::is_type<opset1::Constant>(fq->get_input_node_shared_ptr(0)) &&
|
||||
ov::is_type<opset1::Constant>(fq->get_input_node_shared_ptr(1)) &&
|
||||
ov::is_type<opset1::Constant>(fq->get_input_node_shared_ptr(2)) &&
|
||||
ov::is_type<opset1::Constant>(fq->get_input_node_shared_ptr(3)) &&
|
||||
ov::is_type<opset1::Constant>(fq->get_input_node_shared_ptr(4)) &&
|
||||
op::util::constantIsEqualTo(ov::as_type_ptr<opset1::Constant>(fq->get_input_node_shared_ptr(1)), 0.f) &&
|
||||
op::util::constantIsEqualTo(ov::as_type_ptr<opset1::Constant>(fq->get_input_node_shared_ptr(2)), 254.f) &&
|
||||
op::util::constantIsEqualTo(ov::as_type_ptr<opset1::Constant>(fq->get_input_node_shared_ptr(3)), -127.f) &&
|
||||
op::util::constantIsEqualTo(ov::as_type_ptr<opset1::Constant>(fq->get_input_node_shared_ptr(4)), 127.f)) {
|
||||
const auto type1 = fq->input_value(0).get_element_type();
|
||||
const auto type2 = fq->input_value(3).get_element_type();
|
||||
if (type1.is_real() && type2.is_real()) {
|
||||
@ -744,7 +744,7 @@ std::shared_ptr<Node> NetworkHelper::foldFakeQuantize(
|
||||
foldConvert(fq->input_value(3), element::f32));
|
||||
}
|
||||
|
||||
auto constant = as_type_ptr<opset1::Constant>(fq->get_input_node_shared_ptr(0));
|
||||
auto constant = ov::as_type_ptr<opset1::Constant>(fq->get_input_node_shared_ptr(0));
|
||||
|
||||
if (constant) {
|
||||
const bool roundValues = roundValuesWasSet ? roundValuesArg : fq->get_output_element_type(0).is_integral();
|
||||
@ -774,10 +774,10 @@ std::shared_ptr<Node> NetworkHelper::foldFakeQuantize(
|
||||
const size_t H = constShape.size() > 2lu ? constShape.size() == 3lu ? constShape[2] : constShape[constShape.size() - 2] : 1;
|
||||
const size_t W = constShape.size() > 3lu ? constShape[constShape.size() - 1] : 1;
|
||||
|
||||
const auto inputLowValues = as_type_ptr<opset1::Constant>(fq->get_input_node_shared_ptr(1))->cast_vector<float>();
|
||||
const auto inputHighValues = as_type_ptr<opset1::Constant>(fq->get_input_node_shared_ptr(2))->cast_vector<float>();
|
||||
const auto outputLowValues = as_type_ptr<opset1::Constant>(fq->get_input_node_shared_ptr(3))->cast_vector<float>();
|
||||
const auto outputHighValues = as_type_ptr<opset1::Constant>(fq->get_input_node_shared_ptr(4))->cast_vector<float>();
|
||||
const auto inputLowValues = ov::as_type_ptr<opset1::Constant>(fq->get_input_node_shared_ptr(1))->cast_vector<float>();
|
||||
const auto inputHighValues = ov::as_type_ptr<opset1::Constant>(fq->get_input_node_shared_ptr(2))->cast_vector<float>();
|
||||
const auto outputLowValues = ov::as_type_ptr<opset1::Constant>(fq->get_input_node_shared_ptr(3))->cast_vector<float>();
|
||||
const auto outputHighValues = ov::as_type_ptr<opset1::Constant>(fq->get_input_node_shared_ptr(4))->cast_vector<float>();
|
||||
|
||||
const size_t inputLowSize = inputLowValues.size();
|
||||
const size_t inputHighSize = inputHighValues.size();
|
||||
@ -848,7 +848,7 @@ std::shared_ptr<opset1::FakeQuantize> NetworkHelper::composeFakeQuantize(const s
|
||||
if (targetInputs.size() != 1ul) {
|
||||
return nullptr;
|
||||
}
|
||||
if (is_type<opset1::Convert>(targetInputs.begin()->get_node())) {
|
||||
if (ov::is_type<opset1::Convert>(targetInputs.begin()->get_node())) {
|
||||
parent = targetInputs.begin()->get_node()->shared_from_this();
|
||||
}
|
||||
|
||||
@ -856,7 +856,7 @@ std::shared_ptr<opset1::FakeQuantize> NetworkHelper::composeFakeQuantize(const s
|
||||
if (targetInputs.size() != 1ul) {
|
||||
return nullptr;
|
||||
}
|
||||
if (is_type<opset1::Subtract>(targetInputs.begin()->get_node())) {
|
||||
if (ov::is_type<opset1::Subtract>(targetInputs.begin()->get_node())) {
|
||||
parent = targetInputs.begin()->get_node()->shared_from_this();
|
||||
}
|
||||
|
||||
@ -864,7 +864,7 @@ std::shared_ptr<opset1::FakeQuantize> NetworkHelper::composeFakeQuantize(const s
|
||||
if (targetInputs.size() != 1ul) {
|
||||
return nullptr;
|
||||
}
|
||||
if (is_type<opset1::Multiply>(targetInputs.begin()->get_node())) {
|
||||
if (ov::is_type<opset1::Multiply>(targetInputs.begin()->get_node())) {
|
||||
parent = targetInputs.begin()->get_node()->shared_from_this();
|
||||
}
|
||||
|
||||
@ -970,8 +970,8 @@ std::tuple<std::shared_ptr<Node>, std::shared_ptr<Node>> NetworkHelper::decompos
|
||||
const auto outputLow = fq->input_value(3);
|
||||
const auto outputHigh = fq->input_value(4);
|
||||
|
||||
std::vector<float> outputLowValues = as_type_ptr<opset1::Constant>(outputLow.get_node_shared_ptr())->cast_vector<float>();
|
||||
std::vector<float> outputHighValues = as_type_ptr<opset1::Constant>(outputHigh.get_node_shared_ptr())->cast_vector<float>();
|
||||
std::vector<float> outputLowValues = ov::as_type_ptr<opset1::Constant>(outputLow.get_node_shared_ptr())->cast_vector<float>();
|
||||
std::vector<float> outputHighValues = ov::as_type_ptr<opset1::Constant>(outputHigh.get_node_shared_ptr())->cast_vector<float>();
|
||||
size_t outputSize = outputLowValues.size();
|
||||
std::vector<float> minValues(outputSize, min);
|
||||
std::vector<float> maxValues(outputSize, max);
|
||||
@ -1035,7 +1035,7 @@ std::tuple<std::shared_ptr<Node>, std::shared_ptr<Node>> NetworkHelper::decompos
|
||||
}
|
||||
}
|
||||
|
||||
if ((shift != nullptr) && isZero(as_type_ptr<opset1::Constant>(shift))) {
|
||||
if ((shift != nullptr) && isZero(ov::as_type_ptr<opset1::Constant>(shift))) {
|
||||
shift = nullptr;
|
||||
}
|
||||
|
||||
@ -1057,12 +1057,12 @@ std::tuple<std::shared_ptr<Node>, std::shared_ptr<Node>> NetworkHelper::decompos
|
||||
std::shared_ptr<ngraph::Node> convert2;
|
||||
if (updatePrecision) {
|
||||
std::shared_ptr<Node> convert;
|
||||
std::shared_ptr<opset1::Constant> newFqConstant = as_type_ptr<opset1::Constant>(newFQ);
|
||||
std::shared_ptr<opset1::Constant> newFqConstant = ov::as_type_ptr<opset1::Constant>(newFQ);
|
||||
|
||||
if (is_type<opset1::Constant>(newFQ)) {
|
||||
if (ov::is_type<opset1::Constant>(newFQ)) {
|
||||
convert = foldConvert(newFQ, precision);
|
||||
} else if (is_type<opset1::FakeQuantize>(newFQ)) {
|
||||
newFQ = setOutDataPrecision(as_type_ptr<opset1::FakeQuantize>(newFQ), precision);
|
||||
} else if (ov::is_type<opset1::FakeQuantize>(newFQ)) {
|
||||
newFQ = setOutDataPrecision(ov::as_type_ptr<opset1::FakeQuantize>(newFQ), precision);
|
||||
convert = newFQ;
|
||||
} else {
|
||||
THROW_IE_LPT_EXCEPTION(*newFQ) << "unexpected operation type";
|
||||
@ -1191,20 +1191,20 @@ FakeQuantizeDequantization NetworkHelper::createDequantizationFromFakeQuantize(
|
||||
|
||||
// TODO: threshold values have to used here to avoid shifts
|
||||
|
||||
const std::shared_ptr<opset1::Constant> scale = as_type_ptr<opset1::Constant>(foldConvert(fold<opset1::Divide>(
|
||||
const std::shared_ptr<opset1::Constant> scale = ov::as_type_ptr<opset1::Constant>(foldConvert(fold<opset1::Divide>(
|
||||
fold<opset1::Subtract>(outputHigh, outputLow),
|
||||
fold<opset1::Subtract>(newMax, newMin)), deqPrecision));
|
||||
assert(scale != nullptr);
|
||||
|
||||
std::shared_ptr<opset1::Constant> shift = hasZeroPoint ?
|
||||
as_type_ptr<opset1::Constant>(foldConvert(fold<opset1::Divide>(
|
||||
ov::as_type_ptr<opset1::Constant>(foldConvert(fold<opset1::Divide>(
|
||||
fold<opset1::Subtract>(fold<opset1::Multiply>(newMin, outputHigh), fold<opset1::Multiply>(newMax, outputLow)),
|
||||
fold<opset1::Subtract>(outputHigh, outputLow)), deqPrecision)) :
|
||||
nullptr;
|
||||
assert((!hasZeroPoint) || (hasZeroPoint && shift != nullptr));
|
||||
|
||||
if (shift != nullptr) {
|
||||
std::shared_ptr<opset1::Constant> shiftConst = as_type_ptr<opset1::Constant>(shift);
|
||||
std::shared_ptr<opset1::Constant> shiftConst = ov::as_type_ptr<opset1::Constant>(shift);
|
||||
if (isScalarLike(shiftConst)) {
|
||||
auto scalar = toScalar(shiftConst);
|
||||
if (op::util::constantIsEqualTo(scalar, 0)) {
|
||||
@ -1241,7 +1241,7 @@ FakeQuantizeDequantization NetworkHelper::createDequantizationFromFakeQuantize(
|
||||
}
|
||||
|
||||
bool NetworkHelper::areQuantizeAndDequantizeSupportedForSubtract(const std::shared_ptr<const ngraph::Node>& node) {
|
||||
if (!is_type<opset1::Subtract>(node)) {
|
||||
if (!ov::is_type<opset1::Subtract>(node)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -1255,7 +1255,7 @@ bool NetworkHelper::areQuantizeAndDequantizeSupportedForSubtract(const std::shar
|
||||
}
|
||||
|
||||
bool NetworkHelper::areQuantizeAndDequantizeSupportedForMultiply(const std::shared_ptr<const ngraph::Node>& node) {
|
||||
if (!is_type<opset1::Multiply>(node)) {
|
||||
if (!ov::is_type<opset1::Multiply>(node)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -1266,14 +1266,14 @@ bool NetworkHelper::areQuantizeAndDequantizeSupportedForMultiply(const std::shar
|
||||
}
|
||||
|
||||
const auto dataNode = dequantization.data.get_node();
|
||||
if (is_type<opset1::Convert>(dataNode)) {
|
||||
const auto quantize = as_type_ptr<opset1::FakeQuantize>(dataNode->get_input_node_shared_ptr(0));
|
||||
if (ov::is_type<opset1::Convert>(dataNode)) {
|
||||
const auto quantize = ov::as_type_ptr<opset1::FakeQuantize>(dataNode->get_input_node_shared_ptr(0));
|
||||
if (quantize == nullptr) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return NetworkHelper::isQuantizeSupported(quantize);
|
||||
} else if (is_type<opset1::Constant>(dataNode)) {
|
||||
} else if (ov::is_type<opset1::Constant>(dataNode)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1286,15 +1286,15 @@ bool NetworkHelper::isQuantizeSupported(const std::shared_ptr<opset1::FakeQuanti
|
||||
|
||||
FakeQuantizeDequantization NetworkHelper::getDequantization(const std::shared_ptr<const Node>& node, const size_t parentIndex, const bool inPlace) {
|
||||
auto getDataIndex = [](const std::shared_ptr<ngraph::Node>& node) {
|
||||
if (is_type<opset1::Constant>(node->get_input_node_ptr(1))) {
|
||||
if (ov::is_type<opset1::Constant>(node->get_input_node_ptr(1))) {
|
||||
return 0ul;
|
||||
}
|
||||
|
||||
if (is_type<opset1::Convert>(node->get_input_node_ptr(1)) && is_type<opset1::Constant>(node->get_input_node_ptr(1)->get_input_node_ptr(0))) {
|
||||
if (ov::is_type<opset1::Convert>(node->get_input_node_ptr(1)) && ov::is_type<opset1::Constant>(node->get_input_node_ptr(1)->get_input_node_ptr(0))) {
|
||||
return 0ul;
|
||||
}
|
||||
|
||||
if (is_type<opset1::Convert>(node->get_input_node_ptr(0)) && is_type<opset1::Constant>(node->get_input_node_ptr(0)->get_input_node_ptr(0))) {
|
||||
if (ov::is_type<opset1::Convert>(node->get_input_node_ptr(0)) && ov::is_type<opset1::Constant>(node->get_input_node_ptr(0)->get_input_node_ptr(0))) {
|
||||
return 1ul;
|
||||
}
|
||||
|
||||
@ -1303,7 +1303,7 @@ FakeQuantizeDequantization NetworkHelper::getDequantization(const std::shared_pt
|
||||
|
||||
Output<Node> dataNode = inPlace ? std::const_pointer_cast<Node>(node)->output(0) : node->input_value(parentIndex);
|
||||
|
||||
const std::shared_ptr<ngraph::opset1::Multiply> multiply = as_type_ptr<ngraph::opset1::Multiply>(dataNode.get_node_shared_ptr());
|
||||
const std::shared_ptr<ngraph::opset1::Multiply> multiply = ov::as_type_ptr<ngraph::opset1::Multiply>(dataNode.get_node_shared_ptr());
|
||||
std::shared_ptr<opset1::Constant> multiplyConstant;
|
||||
if (multiply != nullptr) {
|
||||
if (!FakeQuantizeDequantization::checkShape(multiply)) {
|
||||
@ -1317,7 +1317,7 @@ FakeQuantizeDequantization NetworkHelper::getDequantization(const std::shared_pt
|
||||
dataNode = multiply->get_input_source_output(getDataIndex(multiply));
|
||||
}
|
||||
|
||||
const std::shared_ptr<opset1::Subtract> subtract = as_type_ptr<ngraph::opset1::Subtract>(dataNode.get_node_shared_ptr());
|
||||
const std::shared_ptr<opset1::Subtract> subtract = ov::as_type_ptr<ngraph::opset1::Subtract>(dataNode.get_node_shared_ptr());
|
||||
std::shared_ptr<opset1::Convert> subtractConvert;
|
||||
std::shared_ptr<opset1::Constant> subtractConstant;
|
||||
if (subtract != nullptr) {
|
||||
@ -1332,7 +1332,7 @@ FakeQuantizeDequantization NetworkHelper::getDequantization(const std::shared_pt
|
||||
dataNode = subtract->get_input_source_output(getDataIndex(subtract));
|
||||
}
|
||||
|
||||
const std::shared_ptr<opset1::Convert> convert = as_type_ptr<opset1::Convert>(dataNode.get_node_shared_ptr());
|
||||
const std::shared_ptr<opset1::Convert> convert = ov::as_type_ptr<opset1::Convert>(dataNode.get_node_shared_ptr());
|
||||
if (convert != nullptr) {
|
||||
if ((convert->input(0).get_element_type() != element::i8) && (convert->input(0).get_element_type() != element::u8) &&
|
||||
(convert->output(0).get_element_type() != element::f32)) {
|
||||
@ -1353,7 +1353,7 @@ FakeQuantizeDequantization NetworkHelper::getDequantizationBelow(const std::shar
|
||||
|
||||
std::shared_ptr<Node> lastNode = targetInputs.begin()->get_node()->shared_from_this();
|
||||
|
||||
const std::shared_ptr<opset1::Convert> convert = as_type_ptr<opset1::Convert>(lastNode);
|
||||
const std::shared_ptr<opset1::Convert> convert = ov::as_type_ptr<opset1::Convert>(lastNode);
|
||||
if (convertIsMandatory && (convert == nullptr)) {
|
||||
return FakeQuantizeDequantization();
|
||||
}
|
||||
@ -1371,7 +1371,7 @@ FakeQuantizeDequantization NetworkHelper::getDequantizationBelow(const std::shar
|
||||
lastNode = inputs.begin()->get_node()->shared_from_this();
|
||||
}
|
||||
|
||||
const std::shared_ptr<opset1::Subtract> subtract = as_type_ptr<ngraph::opset1::Subtract>(lastNode);
|
||||
const std::shared_ptr<opset1::Subtract> subtract = ov::as_type_ptr<ngraph::opset1::Subtract>(lastNode);
|
||||
std::shared_ptr<opset1::Convert> subtractConvert;
|
||||
std::shared_ptr<opset1::Constant> subtractConstant;
|
||||
if (subtract != nullptr) {
|
||||
@ -1387,7 +1387,7 @@ FakeQuantizeDequantization NetworkHelper::getDequantizationBelow(const std::shar
|
||||
lastNode = inputs.begin()->get_node()->shared_from_this();
|
||||
}
|
||||
|
||||
const std::shared_ptr<ngraph::opset1::Multiply> multiply = as_type_ptr<ngraph::opset1::Multiply>(lastNode);
|
||||
const std::shared_ptr<ngraph::opset1::Multiply> multiply = ov::as_type_ptr<ngraph::opset1::Multiply>(lastNode);
|
||||
std::shared_ptr<opset1::Constant> multiplyConstant;
|
||||
if (multiply != nullptr) {
|
||||
FakeQuantizeDequantization::fillDequantizationParams(multiply, multiplyConstant);
|
||||
@ -1403,18 +1403,18 @@ FakeQuantizeDequantization NetworkHelper::normalizeDequantization(FakeQuantizeDe
|
||||
if (dequantization.empty()) {
|
||||
return dequantization;
|
||||
}
|
||||
if (dequantization.multiply != nullptr && as_type_ptr<ngraph::opset1::Constant>(dequantization.multiply->get_input_node_shared_ptr(0))) {
|
||||
if (dequantization.multiply != nullptr && ov::as_type_ptr<ngraph::opset1::Constant>(dequantization.multiply->get_input_node_shared_ptr(0))) {
|
||||
std::shared_ptr<Node> leftParent = dequantization.multiply->get_input_node_shared_ptr(0);
|
||||
std::shared_ptr<Node> rightParent = dequantization.multiply->get_input_node_shared_ptr(1);
|
||||
std::shared_ptr<opset1::Multiply> normalized_multiply = as_type_ptr<opset1::Multiply>(
|
||||
std::shared_ptr<opset1::Multiply> normalized_multiply = ov::as_type_ptr<opset1::Multiply>(
|
||||
dequantization.multiply->clone_with_new_inputs({rightParent, leftParent}));
|
||||
replace_node(dequantization.multiply, normalized_multiply);
|
||||
dequantization.multiply = normalized_multiply;
|
||||
}
|
||||
if (dequantization.subtract != nullptr && as_type_ptr<ngraph::opset1::Constant>(dequantization.subtract->get_input_node_shared_ptr(0))) {
|
||||
if (dequantization.subtract != nullptr && ov::as_type_ptr<ngraph::opset1::Constant>(dequantization.subtract->get_input_node_shared_ptr(0))) {
|
||||
std::shared_ptr<Node> leftParent = dequantization.subtract->get_input_node_shared_ptr(0);
|
||||
std::shared_ptr<Node> rightParent = dequantization.subtract->get_input_node_shared_ptr(1);
|
||||
std::shared_ptr<opset1::Subtract> normalized_subtract = as_type_ptr<opset1::Subtract>(
|
||||
std::shared_ptr<opset1::Subtract> normalized_subtract = ov::as_type_ptr<opset1::Subtract>(
|
||||
dequantization.subtract->clone_with_new_inputs({rightParent, leftParent}));
|
||||
replace_node(dequantization.subtract, normalized_subtract);
|
||||
dequantization.subtract = normalized_subtract;
|
||||
@ -1424,7 +1424,7 @@ FakeQuantizeDequantization NetworkHelper::normalizeDequantization(FakeQuantizeDe
|
||||
|
||||
std::shared_ptr<opset1::Constant> NetworkHelper::normalizeDequantizationShape(const std::shared_ptr<Node>& eltwise) {
|
||||
const size_t constantIdx = getConstantInputIndex(eltwise);
|
||||
const auto constant = as_type_ptr<opset1::Constant>(eltwise->get_input_node_shared_ptr(constantIdx));
|
||||
const auto constant = ov::as_type_ptr<opset1::Constant>(eltwise->get_input_node_shared_ptr(constantIdx));
|
||||
|
||||
const auto getConstWithNormalizeShape = [](
|
||||
const std::shared_ptr<Node>& eltwise,
|
||||
@ -1443,7 +1443,7 @@ std::shared_ptr<opset1::Constant> NetworkHelper::normalizeDequantizationShape(co
|
||||
constant,
|
||||
op::Constant::create(element::i32, Shape{ unsqueezeConstantShape.size() }, unsqueezeConstantShape));
|
||||
|
||||
return as_type_ptr<opset1::Constant>(newConstant);
|
||||
return ov::as_type_ptr<opset1::Constant>(newConstant);
|
||||
} else {
|
||||
return constant;
|
||||
}
|
||||
@ -1473,7 +1473,7 @@ FakeQuantizeDequantizationValues NetworkHelper::createEmptyValues(const FakeQuan
|
||||
}
|
||||
|
||||
bool NetworkHelper::isZeroConst(const std::shared_ptr<Node>& node) {
|
||||
std::shared_ptr<opset1::Constant> constant = as_type_ptr<opset1::Constant>(node);
|
||||
std::shared_ptr<opset1::Constant> constant = ov::as_type_ptr<opset1::Constant>(node);
|
||||
|
||||
if (constant == nullptr)
|
||||
return false;
|
||||
@ -1492,13 +1492,13 @@ bool NetworkHelper::isZeroConst(const std::shared_ptr<Node>& node) {
|
||||
|
||||
std::shared_ptr<Node> NetworkHelper::optimizeSubtract(std::shared_ptr<opset1::Subtract> subtract) {
|
||||
auto convertOnSubtract = subtract->input_value(0).get_node_shared_ptr();
|
||||
if (as_type_ptr<opset1::Convert>(convertOnSubtract) == nullptr) {
|
||||
if (ov::as_type_ptr<opset1::Convert>(convertOnSubtract) == nullptr) {
|
||||
return subtract;
|
||||
}
|
||||
|
||||
// TODO: replace assert to condition and omit conversion part if there is no convert
|
||||
// TODO: also check convertInputType to understand if we really want to propagate type
|
||||
assert(as_type_ptr<opset1::Convert>(convertOnSubtract));
|
||||
assert(ov::as_type_ptr<opset1::Convert>(convertOnSubtract));
|
||||
const element::Type convertInputType = convertOnSubtract->get_input_element_type(0);
|
||||
const element::Type convertOutputType = convertOnSubtract->get_output_element_type(0);
|
||||
|
||||
@ -1508,7 +1508,7 @@ std::shared_ptr<Node> NetworkHelper::optimizeSubtract(std::shared_ptr<opset1::Su
|
||||
|
||||
auto data = convertOnSubtract->input_value(0);
|
||||
const auto subtractParent = subtract->get_input_node_shared_ptr(1);
|
||||
if (is_type<opset1::Constant>(subtractParent)) {
|
||||
if (ov::is_type<opset1::Constant>(subtractParent)) {
|
||||
std::shared_ptr<Node> replacement;
|
||||
|
||||
auto shift = subtract->input_value(1).get_node_shared_ptr();
|
||||
@ -1533,7 +1533,7 @@ std::shared_ptr<Node> NetworkHelper::optimizeSubtract(std::shared_ptr<opset1::Su
|
||||
}
|
||||
|
||||
return replacement;
|
||||
} else if (is_type<opset1::Convert>(subtractParent) && is_type<opset1::Constant>(subtractParent->get_input_node_shared_ptr(0))) {
|
||||
} else if (ov::is_type<opset1::Convert>(subtractParent) && ov::is_type<opset1::Constant>(subtractParent->get_input_node_shared_ptr(0))) {
|
||||
auto replacement = std::make_shared<op::TypeRelaxed<opset1::Subtract>>(data, subtractParent->get_input_node_shared_ptr(0));
|
||||
NetworkHelper::copyInfo(subtract, replacement);
|
||||
NetworkHelper::setOutDataPrecisionForTypeRelaxed(replacement, convertOutputType);
|
||||
@ -1652,7 +1652,7 @@ bool NetworkHelper::checkConstantValuePrecision(const element::Type expectedPrec
|
||||
return true;
|
||||
}
|
||||
|
||||
std::shared_ptr<opset1::Constant> constantOp = as_type_ptr<opset1::Constant>(constant);
|
||||
std::shared_ptr<opset1::Constant> constantOp = ov::as_type_ptr<opset1::Constant>(constant);
|
||||
if (constantOp == nullptr) {
|
||||
return false;
|
||||
}
|
||||
@ -1687,7 +1687,7 @@ size_t NetworkHelper::getParentOutputIndex(const std::shared_ptr<ngraph::Node>&
|
||||
}
|
||||
|
||||
std::shared_ptr<Node> NetworkHelper::toScalarIfPossible(std::shared_ptr<Node> node) {
|
||||
std::shared_ptr<opset1::Constant> constant = as_type_ptr<opset1::Constant>(node);
|
||||
std::shared_ptr<opset1::Constant> constant = ov::as_type_ptr<opset1::Constant>(node);
|
||||
if (constant == nullptr) {
|
||||
return node;
|
||||
}
|
||||
@ -1700,7 +1700,7 @@ std::shared_ptr<Node> NetworkHelper::toScalarIfPossible(std::shared_ptr<Node> no
|
||||
}
|
||||
|
||||
std::shared_ptr<Node> foldConvert(const Output<Node>& node, const element::Type targetPrecision) {
|
||||
if (is_type<opset1::Constant>(node.get_node_shared_ptr()) && (node.get_element_type() == targetPrecision)) {
|
||||
if (ov::is_type<opset1::Constant>(node.get_node_shared_ptr()) && (node.get_element_type() == targetPrecision)) {
|
||||
return node.get_node_shared_ptr();
|
||||
}
|
||||
|
||||
@ -1713,9 +1713,9 @@ bool NetworkHelper::checkZeroPoint(const std::shared_ptr<Node>& node, const Data
|
||||
}
|
||||
|
||||
float min, max;
|
||||
if (is_type<opset1::Subtract>(node)) {
|
||||
if (ov::is_type<opset1::Subtract>(node)) {
|
||||
const auto parent = node->get_input_node_shared_ptr(0);
|
||||
const auto intNode = is_type<opset1::Convert>(parent) ? parent : node;
|
||||
const auto intNode = ov::is_type<opset1::Convert>(parent) ? parent : node;
|
||||
const auto type = intNode->get_input_element_type(0);
|
||||
if (type == element::u8 || type == element::i8) {
|
||||
min = DataPrecision::getMinValue(type, 256) - 0.5f;
|
||||
@ -1724,12 +1724,12 @@ bool NetworkHelper::checkZeroPoint(const std::shared_ptr<Node>& node, const Data
|
||||
return type == element::f32 || type == element::f16;
|
||||
}
|
||||
auto subtract1input = node->get_input_node_shared_ptr(1);
|
||||
if (is_type<opset1::Convert>(subtract1input)) {
|
||||
if (ov::is_type<opset1::Convert>(subtract1input)) {
|
||||
return true;
|
||||
}
|
||||
auto subtractConst = as_type_ptr<opset1::Constant>(subtract1input);
|
||||
auto subtractConst = ov::as_type_ptr<opset1::Constant>(subtract1input);
|
||||
if (!subtractConst) {
|
||||
subtractConst = as_type_ptr<opset1::Constant>(node->get_input_node_shared_ptr(1)->get_input_node_shared_ptr(0));
|
||||
subtractConst = ov::as_type_ptr<opset1::Constant>(node->get_input_node_shared_ptr(1)->get_input_node_shared_ptr(0));
|
||||
if (subtractConst == nullptr) {
|
||||
return false;
|
||||
}
|
||||
@ -1739,13 +1739,13 @@ bool NetworkHelper::checkZeroPoint(const std::shared_ptr<Node>& node, const Data
|
||||
return (val < min) || (val > max); })) {
|
||||
return false;
|
||||
}
|
||||
} else if (is_type<opset1::FakeQuantize>(node)) {
|
||||
} else if (ov::is_type<opset1::FakeQuantize>(node)) {
|
||||
if (!dataPrecision.hasZeroPoint) {
|
||||
return true;
|
||||
}
|
||||
min = dataPrecision.min - 0.5f;
|
||||
max = dataPrecision.max + 0.5f;
|
||||
const auto quantizationDetails = QuantizationDetails::getDetails(as_type_ptr<opset1::FakeQuantize>(node));
|
||||
const auto quantizationDetails = QuantizationDetails::getDetails(ov::as_type_ptr<opset1::FakeQuantize>(node));
|
||||
for (size_t i = 0; i < quantizationDetails.outputLowValues.size(); ++i) {
|
||||
float shift;
|
||||
if (quantizationDetails.outputHighValues[i] != quantizationDetails.outputLowValues[i]) {
|
||||
|
@ -64,16 +64,16 @@ bool NormalizeL2Transformation::canBeTransformed(const TransformationContext& co
|
||||
}
|
||||
|
||||
const std::shared_ptr<Node> multiply = operation->get_input_node_shared_ptr(0);
|
||||
auto scalesConst = as_type_ptr<ngraph::opset1::Constant>(multiply->get_input_node_shared_ptr(1));
|
||||
auto scalesConst = ov::as_type_ptr<ngraph::opset1::Constant>(multiply->get_input_node_shared_ptr(1));
|
||||
if (scalesConst == nullptr) {
|
||||
scalesConst = as_type_ptr<ngraph::opset1::Constant>(multiply->get_input_node_shared_ptr(0));
|
||||
scalesConst = ov::as_type_ptr<ngraph::opset1::Constant>(multiply->get_input_node_shared_ptr(0));
|
||||
}
|
||||
if (scalesConst == nullptr) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// TODO: Expand transformation for all cases of axes values
|
||||
const auto axes = as_type_ptr<opset1::Constant>(operation->get_input_node_shared_ptr(1));
|
||||
const auto axes = ov::as_type_ptr<opset1::Constant>(operation->get_input_node_shared_ptr(1));
|
||||
const std::vector<int64_t> axesAcrossSpatial = { 1 };
|
||||
const std::vector<int64_t> axesByChannels = { 1, 2, 3 };
|
||||
|
||||
@ -104,13 +104,13 @@ bool NormalizeL2Transformation::transform(TransformationContext &context, ngraph
|
||||
return false;
|
||||
}
|
||||
|
||||
auto normalize = as_type_ptr<opset1::NormalizeL2>(NetworkHelper::separateInStandaloneBranch(operation));
|
||||
auto normalize = ov::as_type_ptr<opset1::NormalizeL2>(NetworkHelper::separateInStandaloneBranch(operation));
|
||||
|
||||
const auto axes = as_type_ptr<opset1::Constant>(normalize->get_input_node_shared_ptr(1));
|
||||
const auto axes = ov::as_type_ptr<opset1::Constant>(normalize->get_input_node_shared_ptr(1));
|
||||
FakeQuantizeDequantization dequantization = NetworkHelper::getDequantization(normalize);
|
||||
auto scalesConst = as_type_ptr<opset1::Constant>(dequantization.multiply->get_input_node_shared_ptr(1));
|
||||
auto scalesConst = ov::as_type_ptr<opset1::Constant>(dequantization.multiply->get_input_node_shared_ptr(1));
|
||||
if (scalesConst == nullptr) {
|
||||
scalesConst = as_type_ptr<opset1::Constant>(dequantization.multiply->get_input_node_shared_ptr(0));
|
||||
scalesConst = ov::as_type_ptr<opset1::Constant>(dequantization.multiply->get_input_node_shared_ptr(0));
|
||||
}
|
||||
|
||||
std::shared_ptr<opset1::Constant> newScalesConst;
|
||||
|
@ -40,8 +40,8 @@ bool PadTransformation::transform(TransformationContext& context, ngraph::patter
|
||||
return false;
|
||||
}
|
||||
|
||||
const auto pad = as_type_ptr<opset1::Pad>(NetworkHelper::separateInStandaloneBranch(m.get_match_root()));
|
||||
const auto padConstant = as_type_ptr<opset1::Constant>(pad->get_input_node_shared_ptr(3));
|
||||
const auto pad = ov::as_type_ptr<opset1::Pad>(NetworkHelper::separateInStandaloneBranch(m.get_match_root()));
|
||||
const auto padConstant = ov::as_type_ptr<opset1::Constant>(pad->get_input_node_shared_ptr(3));
|
||||
const auto padConstantValue = padConstant->cast_vector<float>()[0];
|
||||
|
||||
const auto padsBegin = pad->get_pads_begin();
|
||||
@ -67,7 +67,7 @@ bool PadTransformation::transform(TransformationContext& context, ngraph::patter
|
||||
bcastedShape[padIdx] = inputPShape[padIdx].get_length();
|
||||
|
||||
const auto bCastConst = opset1::Constant::create(element::i32, Shape{bcastedShape.size()}, bcastedShape);
|
||||
return as_type_ptr<opset1::Constant>(fold<opset1::Broadcast>(constant, bCastConst));
|
||||
return ov::as_type_ptr<opset1::Constant>(fold<opset1::Broadcast>(constant, bCastConst));
|
||||
};
|
||||
|
||||
if (dequantization.subtract && shape_size(dequantization.subtractConstant->get_shape()) == 1ul) {
|
||||
@ -114,7 +114,7 @@ bool PadTransformation::transform(TransformationContext& context, ngraph::patter
|
||||
const auto endConst = opset1::Constant::create(element::u32, { padsForConstantEnd.size() }, padsForConstantEnd);
|
||||
const auto padValueConstant = opset1::Constant::create(constant->get_element_type(), Shape{}, { padVal });
|
||||
const auto foldedConstant = fold<opset1::Pad>(constant, beginConst, endConst, padValueConstant, padMode);
|
||||
return as_type_ptr<opset1::Constant>(foldedConstant);
|
||||
return ov::as_type_ptr<opset1::Constant>(foldedConstant);
|
||||
} else {
|
||||
return constant;
|
||||
}
|
||||
@ -157,7 +157,7 @@ bool PadTransformation::canBeTransformed(const TransformationContext& context, s
|
||||
return false;
|
||||
}
|
||||
|
||||
const auto pad = as_type_ptr<opset1::Pad>(op);
|
||||
const auto pad = ov::as_type_ptr<opset1::Pad>(op);
|
||||
if (!pad) {
|
||||
return false;
|
||||
}
|
||||
@ -231,7 +231,7 @@ bool PadTransformation::canBeTransformed(const TransformationContext& context, s
|
||||
return false;
|
||||
}
|
||||
|
||||
const auto constant = as_type_ptr<opset1::Constant>(pad->get_input_node_shared_ptr(3));
|
||||
const auto constant = ov::as_type_ptr<opset1::Constant>(pad->get_input_node_shared_ptr(3));
|
||||
const auto constantValue = constant->cast_vector<float>()[0];
|
||||
if (constantValue != 0.f && !padAndDqByTheSameDimension(dequantization.multiplyConstant)) {
|
||||
return false;
|
||||
|
@ -23,13 +23,13 @@ std::shared_ptr<Node> moveThroughElementwise(const std::shared_ptr<Node>& reshap
|
||||
const auto reshapeValues = reshape->get_input_node_shared_ptr(1);
|
||||
NGRAPH_CHECK(reshapeValues != nullptr, "Reshape constant was not found");
|
||||
|
||||
auto elementwiseValuesConvert = as_type_ptr<opset1::Convert>(elementwise->get_input_node_shared_ptr(1ul));
|
||||
auto elementwiseValuesConvert = ov::as_type_ptr<opset1::Convert>(elementwise->get_input_node_shared_ptr(1ul));
|
||||
auto elementwiseValues = elementwiseValuesConvert == nullptr ?
|
||||
elementwise->get_input_node_shared_ptr(1ul) :
|
||||
elementwiseValuesConvert->get_input_node_shared_ptr(0ul);
|
||||
assert(is_type<opset1::Constant>(elementwiseValues));
|
||||
assert(ov::is_type<opset1::Constant>(elementwiseValues));
|
||||
|
||||
const std::shared_ptr<opset1::Reshape> newReshape = as_type_ptr<opset1::Reshape>(reshape->clone_with_new_inputs({
|
||||
const std::shared_ptr<opset1::Reshape> newReshape = ov::as_type_ptr<opset1::Reshape>(reshape->clone_with_new_inputs({
|
||||
elementwise->get_input_node_shared_ptr(0ul),
|
||||
reshapeValues }));
|
||||
|
||||
@ -39,7 +39,7 @@ std::shared_ptr<Node> moveThroughElementwise(const std::shared_ptr<Node>& reshap
|
||||
if (!elementwiseValuesShape.empty() && (elementwiseValuesShape.size() != 1ul)) {
|
||||
// update shape constant value to avoid eltwise constan value broadcasting
|
||||
const Shape elementwiseShape = elementwise->output(0).get_shape();
|
||||
const std::vector<size_t> reshapeValuesVector = as_type_ptr<opset1::Constant>(reshapeValues)->cast_vector<size_t>();
|
||||
const std::vector<size_t> reshapeValuesVector = ov::as_type_ptr<opset1::Constant>(reshapeValues)->cast_vector<size_t>();
|
||||
|
||||
const std::vector<size_t> newReshapeValuesVector = ngraph::pass::low_precision::NetworkHelper::updateReshapeValues(
|
||||
elementwiseValuesShape,
|
||||
@ -54,8 +54,8 @@ std::shared_ptr<Node> moveThroughElementwise(const std::shared_ptr<Node>& reshap
|
||||
newElementwiseValues = ngraph::pass::low_precision::fold_reshape<opset1::Reshape>(
|
||||
elementwiseValues->output(0),
|
||||
newReshapeValues->output(0),
|
||||
as_type_ptr<opset1::Reshape>(reshape)->get_special_zero());
|
||||
assert(is_type<opset1::Constant>(newElementwiseValues));
|
||||
ov::as_type_ptr<opset1::Reshape>(reshape)->get_special_zero());
|
||||
assert(ov::is_type<opset1::Constant>(newElementwiseValues));
|
||||
} else {
|
||||
newElementwiseValues = elementwiseValues;
|
||||
}
|
||||
@ -113,18 +113,18 @@ ngraph::pass::low_precision::PullReshapeThroughDequantization::PullReshapeThroug
|
||||
auto reshape = opsMap.find(reshapeWrapper)->second.get_node()->shared_from_this();
|
||||
|
||||
auto child = reshape->get_output_target_inputs(0).begin()->get_node();
|
||||
if (is_type<opset1::GroupConvolution>(child)) {
|
||||
if (ov::is_type<opset1::GroupConvolution>(child)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
while (reshape != nullptr) {
|
||||
const auto parent = reshape->get_input_node_shared_ptr(0);
|
||||
if (is_type<opset1::Multiply>(parent) || is_type<opset1::Subtract>(parent)) {
|
||||
if (ov::is_type<opset1::Multiply>(parent) || ov::is_type<opset1::Subtract>(parent)) {
|
||||
reshape = pull_reshape_through_dequantization::moveThroughElementwise(reshape, parent);
|
||||
} else if (is_type<opset1::Convert>(parent)) {
|
||||
} else if (ov::is_type<opset1::Convert>(parent)) {
|
||||
reshape = pull_reshape_through_dequantization::moveThroughConvert(reshape, parent);
|
||||
} else if (is_type<opset1::Constant>(parent)) {
|
||||
pull_reshape_through_dequantization::fuseConstant(reshape, as_type_ptr<opset1::Constant>(parent));
|
||||
} else if (ov::is_type<opset1::Constant>(parent)) {
|
||||
pull_reshape_through_dequantization::fuseConstant(reshape, ov::as_type_ptr<opset1::Constant>(parent));
|
||||
reshape = nullptr;
|
||||
} else {
|
||||
THROW_IE_LPT_EXCEPTION(*parent) << "unexepcted operation type";
|
||||
|
@ -24,11 +24,11 @@ std::shared_ptr<Node> moveThroughElementwise(const std::shared_ptr<Node>& transp
|
||||
const auto transposeValues = transpose->get_input_node_shared_ptr(1);
|
||||
NGRAPH_CHECK(transposeValues != nullptr, "transpose constant was not found");
|
||||
|
||||
auto elementwiseValuesConvert = as_type_ptr<opset1::Convert>(elementwise->get_input_node_shared_ptr(1ul));
|
||||
auto elementwiseValuesConvert = ov::as_type_ptr<opset1::Convert>(elementwise->get_input_node_shared_ptr(1ul));
|
||||
auto elementwiseValues = elementwiseValuesConvert == nullptr ?
|
||||
elementwise->get_input_node_shared_ptr(1ul) :
|
||||
elementwiseValuesConvert->get_input_node_shared_ptr(0ul);
|
||||
assert(is_type<opset1::Constant>(elementwiseValues));
|
||||
assert(ov::is_type<opset1::Constant>(elementwiseValues));
|
||||
|
||||
const auto transposeValuesShape = transposeValues->output(0).get_shape();
|
||||
const auto elementwiseValuesShape = elementwiseValues->output(0).get_shape();
|
||||
@ -43,17 +43,17 @@ std::shared_ptr<Node> moveThroughElementwise(const std::shared_ptr<Node>& transp
|
||||
element::i64,
|
||||
Shape{ shape_size(transposeValuesShape) },
|
||||
std::vector<size_t>(shape_size(transposeValuesShape), 1ul)));
|
||||
assert(is_type<opset1::Constant>(elementwiseValues));
|
||||
assert(ov::is_type<opset1::Constant>(elementwiseValues));
|
||||
}
|
||||
|
||||
const std::shared_ptr<opset1::Transpose> newTranspose = as_type_ptr<opset1::Transpose>(transpose->clone_with_new_inputs({
|
||||
const std::shared_ptr<opset1::Transpose> newTranspose = ov::as_type_ptr<opset1::Transpose>(transpose->clone_with_new_inputs({
|
||||
elementwise->get_input_node_shared_ptr(0ul),
|
||||
transposeValues }));
|
||||
|
||||
const auto newElementwiseValues = ngraph::pass::low_precision::fold<opset1::Transpose>(
|
||||
elementwiseValues->output(0),
|
||||
transposeValues->output(0));
|
||||
assert(is_type<opset1::Constant>(newElementwiseValues));
|
||||
assert(ov::is_type<opset1::Constant>(newElementwiseValues));
|
||||
|
||||
const auto newElementwise = elementwise->clone_with_new_inputs({
|
||||
newTranspose,
|
||||
@ -112,12 +112,12 @@ ngraph::pass::low_precision::PullTransposeThroughDequantization::PullTransposeTh
|
||||
|
||||
while (transpose != nullptr) {
|
||||
const auto parent = transpose->get_input_node_shared_ptr(0);
|
||||
if (is_type<opset1::Multiply>(parent) || is_type<opset1::Subtract>(parent)) {
|
||||
if (ov::is_type<opset1::Multiply>(parent) || ov::is_type<opset1::Subtract>(parent)) {
|
||||
transpose = pull_transpose_through_dequantization::moveThroughElementwise(transpose, parent);
|
||||
} else if (is_type<opset1::Convert>(parent)) {
|
||||
} else if (ov::is_type<opset1::Convert>(parent)) {
|
||||
transpose = pull_transpose_through_dequantization::moveThroughConvert(transpose, parent);
|
||||
} else if (is_type<opset1::Constant>(parent)) {
|
||||
pull_transpose_through_dequantization::fuseConstant(transpose, as_type_ptr<opset1::Constant>(parent));
|
||||
} else if (ov::is_type<opset1::Constant>(parent)) {
|
||||
pull_transpose_through_dequantization::fuseConstant(transpose, ov::as_type_ptr<opset1::Constant>(parent));
|
||||
transpose = nullptr;
|
||||
} else {
|
||||
THROW_IE_LPT_EXCEPTION(*parent) << "unexepcted operation type";
|
||||
|
@ -49,21 +49,21 @@ QuantizationDetails::QuantizationDetails(const size_t levels, const std::vector<
|
||||
outputHighValues(outputHighValues) {}
|
||||
|
||||
bool QuantizationDetails::outputLayoutIsSupported(std::shared_ptr<opset1::FakeQuantize> quantize) {
|
||||
return is_type<opset1::Constant>(quantize->get_input_node_ptr(1)) &&
|
||||
is_type<opset1::Constant>(quantize->get_input_node_ptr(2)) &&
|
||||
is_type<opset1::Constant>(quantize->get_input_node_ptr(3)) &&
|
||||
is_type<opset1::Constant>(quantize->get_input_node_ptr(4));
|
||||
return ov::is_type<opset1::Constant>(quantize->get_input_node_ptr(1)) &&
|
||||
ov::is_type<opset1::Constant>(quantize->get_input_node_ptr(2)) &&
|
||||
ov::is_type<opset1::Constant>(quantize->get_input_node_ptr(3)) &&
|
||||
ov::is_type<opset1::Constant>(quantize->get_input_node_ptr(4));
|
||||
}
|
||||
|
||||
void QuantizationDetails::getInputIntervals(
|
||||
std::shared_ptr<opset1::FakeQuantize> quantize,
|
||||
std::vector<float>& inputLowValues,
|
||||
std::vector<float>& inputHighValues) {
|
||||
std::shared_ptr<opset1::Constant> inputLowLayer = as_type_ptr<opset1::Constant>(quantize->get_input_node_shared_ptr(1));
|
||||
std::shared_ptr<opset1::Constant> inputLowLayer = ov::as_type_ptr<opset1::Constant>(quantize->get_input_node_shared_ptr(1));
|
||||
const std::vector<float>& inputLowBlobValues = getBlobValue(inputLowLayer);
|
||||
inputLowValues.insert(inputLowValues.end(), inputLowBlobValues.begin(), inputLowBlobValues.end());
|
||||
|
||||
std::shared_ptr<opset1::Constant> inputHighLayer = as_type_ptr<opset1::Constant>(quantize->get_input_node_shared_ptr(2));
|
||||
std::shared_ptr<opset1::Constant> inputHighLayer = ov::as_type_ptr<opset1::Constant>(quantize->get_input_node_shared_ptr(2));
|
||||
const std::vector<float> inputHighBlobValues = getBlobValue(inputHighLayer);
|
||||
inputHighValues.insert(inputHighValues.end(), inputHighBlobValues.begin(), inputHighBlobValues.end());
|
||||
|
||||
@ -77,11 +77,11 @@ void QuantizationDetails::getOutputIntervals(
|
||||
std::shared_ptr<opset1::FakeQuantize> quantize,
|
||||
std::vector<float>& outputLowValues,
|
||||
std::vector<float>& outputHighValues) {
|
||||
std::shared_ptr<opset1::Constant> outputLowLayer = as_type_ptr<opset1::Constant>(quantize->get_input_node_shared_ptr(3));
|
||||
std::shared_ptr<opset1::Constant> outputLowLayer = ov::as_type_ptr<opset1::Constant>(quantize->get_input_node_shared_ptr(3));
|
||||
const std::vector<float>& outputLowBlobValues = getBlobValue(outputLowLayer);
|
||||
outputLowValues.insert(outputLowValues.end(), outputLowBlobValues.begin(), outputLowBlobValues.end());
|
||||
|
||||
std::shared_ptr<opset1::Constant> outputHighLayer = as_type_ptr<opset1::Constant>(quantize->get_input_node_shared_ptr(4));
|
||||
std::shared_ptr<opset1::Constant> outputHighLayer = ov::as_type_ptr<opset1::Constant>(quantize->get_input_node_shared_ptr(4));
|
||||
const std::vector<float> outputHighBlobValues = getBlobValue(outputHighLayer);
|
||||
outputHighValues.insert(outputHighValues.end(), outputHighBlobValues.begin(), outputHighBlobValues.end());
|
||||
|
||||
@ -91,11 +91,11 @@ void QuantizationDetails::getOutputIntervals(
|
||||
}
|
||||
|
||||
QuantizationDetails QuantizationDetails::getDetails(std::shared_ptr<opset1::FakeQuantize> quantize) {
|
||||
const std::vector<float> inputLowValues = as_type_ptr<opset1::Constant>(quantize->get_input_node_shared_ptr(1))->cast_vector<float>();
|
||||
const std::vector<float> inputHighValues = as_type_ptr<opset1::Constant>(quantize->get_input_node_shared_ptr(2))->cast_vector<float>();
|
||||
const std::vector<float> inputLowValues = ov::as_type_ptr<opset1::Constant>(quantize->get_input_node_shared_ptr(1))->cast_vector<float>();
|
||||
const std::vector<float> inputHighValues = ov::as_type_ptr<opset1::Constant>(quantize->get_input_node_shared_ptr(2))->cast_vector<float>();
|
||||
|
||||
const std::vector<float> outputLowValues = as_type_ptr<opset1::Constant>(quantize->get_input_node_shared_ptr(3))->cast_vector<float>();
|
||||
const std::vector<float> outputHighValues = as_type_ptr<opset1::Constant>(quantize->get_input_node_shared_ptr(4))->cast_vector<float>();
|
||||
const std::vector<float> outputLowValues = ov::as_type_ptr<opset1::Constant>(quantize->get_input_node_shared_ptr(3))->cast_vector<float>();
|
||||
const std::vector<float> outputHighValues = ov::as_type_ptr<opset1::Constant>(quantize->get_input_node_shared_ptr(4))->cast_vector<float>();
|
||||
|
||||
return QuantizationDetails(
|
||||
quantize->get_levels(),
|
||||
@ -150,7 +150,7 @@ float QuantizationDetails::getOutputHighValue(const size_t index) const {
|
||||
}
|
||||
|
||||
std::vector<float> QuantizationDetails::getBlobValue(std::shared_ptr<Node> constantLayer) {
|
||||
return as_type_ptr<opset1::Constant>(constantLayer)->cast_vector<float>();
|
||||
return ov::as_type_ptr<opset1::Constant>(constantLayer)->cast_vector<float>();
|
||||
}
|
||||
|
||||
bool QuantizationDetails::isSupportedLevel(const size_t level) {
|
||||
|
@ -40,7 +40,7 @@ bool ReduceBaseTransformation::canBeTransformed(const TransformationContext& con
|
||||
return false;
|
||||
}
|
||||
|
||||
const auto axesConstant = as_type_ptr<ngraph::opset1::Constant>(reduce->get_input_node_shared_ptr(1));
|
||||
const auto axesConstant = ov::as_type_ptr<ngraph::opset1::Constant>(reduce->get_input_node_shared_ptr(1));
|
||||
if (axesConstant == nullptr) {
|
||||
return false;
|
||||
}
|
||||
|
@ -31,7 +31,7 @@ ReduceMaxTransformation::ReduceMaxTransformation(const Params& params) : ReduceB
|
||||
}
|
||||
|
||||
bool ReduceMaxTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr<Node> reduce) const {
|
||||
if (!is_type<opset1::ReduceMax>(reduce)) {
|
||||
if (!ov::is_type<opset1::ReduceMax>(reduce)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -40,7 +40,7 @@ bool ReduceMaxTransformation::canBeTransformed(const TransformationContext& cont
|
||||
}
|
||||
|
||||
const auto dequantization = NetworkHelper::getDequantization(reduce);
|
||||
const std::vector<float> scales = as_type_ptr<opset1::Constant>(dequantization.multiplyConstant)->cast_vector<float>();
|
||||
const std::vector<float> scales = ov::as_type_ptr<opset1::Constant>(dequantization.multiplyConstant)->cast_vector<float>();
|
||||
if (std::any_of(scales.begin(), scales.end(), [](const float value) { return value < 0.0; })) {
|
||||
return false;
|
||||
}
|
||||
|
@ -31,7 +31,7 @@ ReduceMeanTransformation::ReduceMeanTransformation(const Params& params) : Reduc
|
||||
}
|
||||
|
||||
bool ReduceMeanTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr<Node> reduce) const {
|
||||
return is_type<opset1::ReduceMean>(reduce) ? ReduceBaseTransformation::canBeTransformed(context, reduce) : false;
|
||||
return ov::is_type<opset1::ReduceMean>(reduce) ? ReduceBaseTransformation::canBeTransformed(context, reduce) : false;
|
||||
}
|
||||
|
||||
bool ReduceMeanTransformation::isPrecisionPreserved(std::shared_ptr<Node> reduce) const noexcept {
|
||||
|
@ -31,7 +31,7 @@ ReduceMinTransformation::ReduceMinTransformation(const Params& params) : ReduceB
|
||||
}
|
||||
|
||||
bool ReduceMinTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr<Node> reduce) const {
|
||||
if (!is_type<opset1::ReduceMin>(reduce)) {
|
||||
if (!ov::is_type<opset1::ReduceMin>(reduce)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -40,7 +40,7 @@ bool ReduceMinTransformation::canBeTransformed(const TransformationContext& cont
|
||||
}
|
||||
|
||||
const auto dequantization = NetworkHelper::getDequantization(reduce);
|
||||
const std::vector<float> scales = as_type_ptr<opset1::Constant>(dequantization.multiplyConstant)->cast_vector<float>();
|
||||
const std::vector<float> scales = ov::as_type_ptr<opset1::Constant>(dequantization.multiplyConstant)->cast_vector<float>();
|
||||
if (std::any_of(scales.begin(), scales.end(), [](const float value) { return value < 0.0; })) {
|
||||
return false;
|
||||
}
|
||||
|
@ -31,7 +31,7 @@ ReduceSumTransformation::ReduceSumTransformation(const Params& params) : ReduceB
|
||||
}
|
||||
|
||||
bool ReduceSumTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr<Node> reduce) const {
|
||||
const auto reduceSum = as_type_ptr<opset1::ReduceSum>(reduce);
|
||||
const auto reduceSum = ov::as_type_ptr<opset1::ReduceSum>(reduce);
|
||||
if (!reduceSum || !ReduceBaseTransformation::canBeTransformed(context, reduceSum)) {
|
||||
return false;
|
||||
}
|
||||
@ -57,7 +57,7 @@ void ReduceSumTransformation::changeDequantizationValues(
|
||||
ReduceBaseTransformation::changeDequantizationValues(reduce, dequantization);
|
||||
|
||||
if (dequantization.subtract) {
|
||||
const auto reduceSum = as_type_ptr<opset1::ReduceSum>(reduce);
|
||||
const auto reduceSum = ov::as_type_ptr<opset1::ReduceSum>(reduce);
|
||||
const auto reductionAxes = reduceSum->get_reduction_axes();
|
||||
const auto inputShape = reduceSum->get_input_partial_shape(0);
|
||||
|
||||
@ -72,7 +72,7 @@ void ReduceSumTransformation::changeDequantizationValues(
|
||||
const auto result = fold<opset1::Multiply>(dequantization.subtractConstant, reductionSizeConstant);
|
||||
|
||||
replace_node(dequantization.subtractConstant, result);
|
||||
dequantization.subtractConstant = as_type_ptr<opset1::Constant>(result);
|
||||
dequantization.subtractConstant = ov::as_type_ptr<opset1::Constant>(result);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -125,7 +125,7 @@ void reshapeDequantizationConstant(const std::shared_ptr<opset1::Reshape>& resha
|
||||
}
|
||||
|
||||
bool ReshapeTransformation::transform(TransformationContext& context, ngraph::pattern::Matcher &m) {
|
||||
std::shared_ptr<opset1::Reshape> reshape = as_type_ptr<opset1::Reshape>(m.get_match_root());
|
||||
std::shared_ptr<opset1::Reshape> reshape = ov::as_type_ptr<opset1::Reshape>(m.get_match_root());
|
||||
if (NetworkHelper::isConstantPath(reshape)) {
|
||||
return false;
|
||||
}
|
||||
@ -134,7 +134,7 @@ bool ReshapeTransformation::transform(TransformationContext& context, ngraph::pa
|
||||
return false;
|
||||
}
|
||||
|
||||
reshape = as_type_ptr<opset1::Reshape>(NetworkHelper::separateInStandaloneBranch(reshape));
|
||||
reshape = ov::as_type_ptr<opset1::Reshape>(NetworkHelper::separateInStandaloneBranch(reshape));
|
||||
reshapeDequantizationConstant(reshape);
|
||||
moveDequantizationAfter(context, reshape, NetworkHelper::getDequantization(reshape, 0), false);
|
||||
return true;
|
||||
|
@ -36,11 +36,11 @@ constexpr VariantTypeInfo VariantWrapper<IntervalsAlignmentAttributePtr>::type_i
|
||||
std::shared_ptr<VariantWrapper<std::shared_ptr<IntervalsAlignmentAttribute>>> VariantWrapper<IntervalsAlignmentAttributePtr>::create(
|
||||
const std::shared_ptr<ngraph::Node>& node,
|
||||
const AttributeParameters& params) {
|
||||
if (!is_type<opset1::FakeQuantize>(node)) {
|
||||
if (!ov::is_type<opset1::FakeQuantize>(node)) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
auto fakeQuantize = as_type_ptr<opset1::FakeQuantize>(node);
|
||||
auto fakeQuantize = ov::as_type_ptr<opset1::FakeQuantize>(node);
|
||||
if (!QuantizationDetails::outputLayoutIsSupported(fakeQuantize) || !QuantizationDetails::isSupportedLevel(fakeQuantize->get_levels())) {
|
||||
return nullptr;
|
||||
}
|
||||
@ -58,8 +58,8 @@ std::shared_ptr<VariantWrapper<std::shared_ptr<IntervalsAlignmentAttribute>>> Va
|
||||
}
|
||||
}
|
||||
|
||||
const auto outLow = as_type_ptr<opset1::Constant>(node->get_input_node_shared_ptr(3));
|
||||
const auto outHigh = as_type_ptr<opset1::Constant>(node->get_input_node_shared_ptr(4));
|
||||
const auto outLow = ov::as_type_ptr<opset1::Constant>(node->get_input_node_shared_ptr(3));
|
||||
const auto outHigh = ov::as_type_ptr<opset1::Constant>(node->get_input_node_shared_ptr(4));
|
||||
if (!NetworkHelper::isScalarLike(outLow) || !NetworkHelper::isScalarLike(outHigh)) {
|
||||
return nullptr;
|
||||
}
|
||||
@ -78,7 +78,7 @@ std::shared_ptr<VariantWrapper<std::shared_ptr<IntervalsAlignmentAttribute>>> Va
|
||||
foldConvert(node->get_input_node_ptr(3)->shared_from_this(), params.deqPrecision),
|
||||
dequantization.multiplyConstant);
|
||||
|
||||
auto multiplyResultConstant = as_type_ptr<opset1::Constant>(multiplyResult);
|
||||
auto multiplyResultConstant = ov::as_type_ptr<opset1::Constant>(multiplyResult);
|
||||
auto intervals = multiplyResultConstant->cast_vector<float>();
|
||||
lowInterval = *std::min_element(intervals.begin(), intervals.end());
|
||||
}
|
||||
@ -90,7 +90,7 @@ std::shared_ptr<VariantWrapper<std::shared_ptr<IntervalsAlignmentAttribute>>> Va
|
||||
foldConvert(node->get_input_node_ptr(4)->shared_from_this(), params.deqPrecision),
|
||||
dequantization.multiplyConstant);
|
||||
|
||||
auto multiplyResultConstant = as_type_ptr<opset1::Constant>(multiplyResult);
|
||||
auto multiplyResultConstant = ov::as_type_ptr<opset1::Constant>(multiplyResult);
|
||||
auto intervals = multiplyResultConstant->cast_vector<float>();
|
||||
highInterval = *std::max_element(intervals.begin(), intervals.end());
|
||||
}
|
||||
@ -115,8 +115,8 @@ std::shared_ptr<VariantWrapper<std::shared_ptr<IntervalsAlignmentAttribute>>> Va
|
||||
fakeQuantize->get_levels()));
|
||||
rtInfo[ngraph::VariantWrapper<IntervalsAlignmentAttributePtr>::type_info.name] = attribute;
|
||||
|
||||
const std::vector<float> outputLowValues = as_type_ptr<opset1::Constant>(fakeQuantize->get_input_node_shared_ptr(3))->cast_vector<float>();
|
||||
const std::vector<float> outputHighValues = as_type_ptr<opset1::Constant>(fakeQuantize->get_input_node_shared_ptr(4))->cast_vector<float>();
|
||||
const std::vector<float> outputLowValues = ov::as_type_ptr<opset1::Constant>(fakeQuantize->get_input_node_shared_ptr(3))->cast_vector<float>();
|
||||
const std::vector<float> outputHighValues = ov::as_type_ptr<opset1::Constant>(fakeQuantize->get_input_node_shared_ptr(4))->cast_vector<float>();
|
||||
LayerTransformation::PrecisionDetails preferablePrecision = LayerTransformation::getPrecisionDetails(
|
||||
fakeQuantize->get_levels(),
|
||||
outputLowValues,
|
||||
|
@ -33,7 +33,7 @@ std::shared_ptr<VariantWrapper<std::shared_ptr<PrecisionsAttribute>>> VariantWra
|
||||
auto attribute = ngraph::pass::low_precision::make_shared_attribute<PrecisionsAttribute>();
|
||||
auto wrapper = std::make_shared<ngraph::VariantWrapper<std::shared_ptr<PrecisionsAttribute>>>(attribute);
|
||||
|
||||
auto& rt = is_type<opset1::FakeQuantize>(node) ? node->output(0).get_rt_info() : node->get_rt_info();
|
||||
auto& rt = ov::is_type<opset1::FakeQuantize>(node) ? node->output(0).get_rt_info() : node->get_rt_info();
|
||||
rt[ngraph::VariantWrapper<std::shared_ptr<PrecisionsAttribute>>::type_info.name] = wrapper;
|
||||
return wrapper;
|
||||
}
|
||||
|
@ -47,16 +47,16 @@ std::shared_ptr<VariantWrapper<std::shared_ptr<QuantizationAlignmentAttribute>>>
|
||||
|
||||
const auto dequantization = NetworkHelper::getDequantization(node, index);
|
||||
if (!dequantization.empty() &&
|
||||
(is_type<opset1::Convert>(dequantization.data.get_node())) &&
|
||||
is_type<opset1::FakeQuantize>(dequantization.data.get_node()->get_input_node_ptr(0))) {
|
||||
(ov::is_type<opset1::Convert>(dequantization.data.get_node())) &&
|
||||
ov::is_type<opset1::FakeQuantize>(dequantization.data.get_node()->get_input_node_ptr(0))) {
|
||||
inputNode = dequantization.data.get_node()->get_input_node_shared_ptr(0);
|
||||
}
|
||||
|
||||
if (is_type<opset1::Constant>(inputNode)) {
|
||||
if (ov::is_type<opset1::Constant>(inputNode)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!is_type<opset1::FakeQuantize>(inputNode)) {
|
||||
if (!ov::is_type<opset1::FakeQuantize>(inputNode)) {
|
||||
leastOneOperationIsNotFakeQuantize = true;
|
||||
break;
|
||||
}
|
||||
|
@ -38,7 +38,7 @@ bool ShuffleChannelsTransformation::transform(TransformationContext& context, ng
|
||||
return false;
|
||||
}
|
||||
|
||||
const auto shuffleChannels = as_type_ptr<opset1::ShuffleChannels>(NetworkHelper::separateInStandaloneBranch(m.get_match_root()));
|
||||
const auto shuffleChannels = ov::as_type_ptr<opset1::ShuffleChannels>(NetworkHelper::separateInStandaloneBranch(m.get_match_root()));
|
||||
auto dequantization = NetworkHelper::getDequantization(shuffleChannels);
|
||||
|
||||
const auto shuffleDequantizationConstant = [&](const std::shared_ptr<Node>& eltwise) {
|
||||
@ -58,7 +58,7 @@ bool ShuffleChannelsTransformation::transform(TransformationContext& context, ng
|
||||
} else {
|
||||
const auto group = shuffleChannels->get_group();
|
||||
const auto shuffledConst = fold<ngraph::opset1::ShuffleChannels>(normalizedConst, normalizedAxis, group);
|
||||
return as_type_ptr<opset1::Constant>(shuffledConst);
|
||||
return ov::as_type_ptr<opset1::Constant>(shuffledConst);
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -82,7 +82,7 @@ bool ShuffleChannelsTransformation::canBeTransformed(const TransformationContext
|
||||
return false;
|
||||
}
|
||||
|
||||
const auto shuffleChannels = as_type_ptr<opset1::ShuffleChannels>(op);
|
||||
const auto shuffleChannels = ov::as_type_ptr<opset1::ShuffleChannels>(op);
|
||||
if (shuffleChannels == nullptr) {
|
||||
return false;
|
||||
}
|
||||
|
@ -46,7 +46,7 @@ bool SplitTransformation::transform(TransformationContext& context, ngraph::patt
|
||||
newSplit->set_friendly_name(split->get_friendly_name());
|
||||
ngraph::copy_runtime_info(split, newSplit);
|
||||
|
||||
const int64_t axis = as_type_ptr<opset1::Constant>(split->get_input_node_shared_ptr(1))->cast_vector<int64_t>()[0];
|
||||
const int64_t axis = ov::as_type_ptr<opset1::Constant>(split->get_input_node_shared_ptr(1))->cast_vector<int64_t>()[0];
|
||||
const size_t normalizedAxis = normalize_axis(split->get_friendly_name(), axis, split->get_input_partial_shape(0).rank());
|
||||
const size_t outputSize = newSplit->get_output_size();
|
||||
|
||||
@ -128,7 +128,7 @@ void SplitTransformation::updateOutputs(
|
||||
const auto lastNode = lastNodes[i];
|
||||
for (auto output : lastNodes[i]->outputs()) {
|
||||
for (auto input : output.get_target_inputs()) {
|
||||
if (is_type<ngraph::opset1::Result>(input.get_node())) {
|
||||
if (ov::is_type<ngraph::opset1::Result>(input.get_node())) {
|
||||
originalNode->set_friendly_name(originalName + LayerTransformation::originalLayerPostfix);
|
||||
lastNode->set_friendly_name(originalName + "." + std::to_string(i));
|
||||
break;
|
||||
@ -149,7 +149,7 @@ bool SplitTransformation::canBeTransformed(const TransformationContext& context,
|
||||
}
|
||||
|
||||
const auto consumers = NetworkHelper::consumers(layer);
|
||||
const auto concat = as_type_ptr<opset1::Concat>(consumers[0]);
|
||||
const auto concat = ov::as_type_ptr<opset1::Concat>(consumers[0]);
|
||||
|
||||
// WA to avoid propagation of dequantization if after Split all consumers are the same unsupported Concat
|
||||
if (concat && concat->get_axis() != 1ul) {
|
||||
|
@ -47,7 +47,7 @@ bool SqueezeTransformation::transform(TransformationContext& context, ngraph::pa
|
||||
return NetworkHelper::toScalar(dequantizationOpConstant);
|
||||
}
|
||||
if (constantShape.size() == inputRankValue) {
|
||||
return as_type_ptr<opset1::Constant>(fold<opset1::Squeeze>(dequantizationOpConstant, squeeze->get_input_node_shared_ptr(1)));
|
||||
return ov::as_type_ptr<opset1::Constant>(fold<opset1::Squeeze>(dequantizationOpConstant, squeeze->get_input_node_shared_ptr(1)));
|
||||
}
|
||||
|
||||
return dequantizationOpConstant;
|
||||
|
@ -19,7 +19,7 @@ NGRAPH_RTTI_DEFINITION(ngraph::pass::low_precision::StridedSliceTransformation,
|
||||
std::shared_ptr<Node> stridedSliceDeqConstant(
|
||||
const std::shared_ptr<ngraph::Node> strSlice,
|
||||
const std::shared_ptr<ngraph::Node> dequantizaitonConstant) {
|
||||
auto constant = as_type_ptr<ngraph::opset1::Constant>(dequantizaitonConstant);
|
||||
auto constant = ov::as_type_ptr<ngraph::opset1::Constant>(dequantizaitonConstant);
|
||||
auto constantShape = constant->get_shape();
|
||||
if (shape_size(constantShape) == 1ul) {
|
||||
return NetworkHelper::toScalar(constant);
|
||||
@ -45,10 +45,10 @@ std::shared_ptr<Node> stridedSliceDeqConstant(
|
||||
const auto newConstant = fold<ngraph::opset1::Broadcast>(
|
||||
constant,
|
||||
ngraph::opset1::Constant::create(ngraph::element::i32, { newConstantShape.size() }, newConstantShape));
|
||||
constant = as_type_ptr<ngraph::opset1::Constant>(newConstant);
|
||||
constant = ov::as_type_ptr<ngraph::opset1::Constant>(newConstant);
|
||||
}
|
||||
|
||||
const auto stridedSlice = as_type_ptr<ngraph::opset1::StridedSlice>(strSlice);
|
||||
const auto stridedSlice = ov::as_type_ptr<ngraph::opset1::StridedSlice>(strSlice);
|
||||
|
||||
auto beginMask = stridedSlice->get_begin_mask();
|
||||
auto endMask = stridedSlice->get_end_mask();
|
||||
@ -116,7 +116,7 @@ bool StridedSliceTransformation::transform(TransformationContext& context, ngrap
|
||||
}
|
||||
|
||||
bool StridedSliceTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr<Node> operation) const {
|
||||
if (!is_type<ngraph::opset1::StridedSlice>(operation) || NetworkHelper::isDQByDynamicDimension(operation)) {
|
||||
if (!ov::is_type<ngraph::opset1::StridedSlice>(operation) || NetworkHelper::isDQByDynamicDimension(operation)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -42,7 +42,7 @@ SubtractTransformation::SubtractTransformation(const Params& params) : LayerTran
|
||||
}
|
||||
|
||||
bool SubtractTransformation::transform(TransformationContext& context, ngraph::pattern::Matcher &m) {
|
||||
std::shared_ptr<opset1::Subtract> subtract = as_type_ptr<opset1::Subtract>(m.get_match_root());
|
||||
std::shared_ptr<opset1::Subtract> subtract = ov::as_type_ptr<opset1::Subtract>(m.get_match_root());
|
||||
if (!canBeTransformed(context, subtract)) {
|
||||
return false;
|
||||
}
|
||||
@ -54,7 +54,7 @@ bool SubtractTransformation::transform(TransformationContext& context, ngraph::p
|
||||
// before: Y = X * SC - SH, after: Y = (X - SH') * SC
|
||||
// X * SC - SH = X * SC - SH' * SC
|
||||
// SH' = SH / SC
|
||||
std::shared_ptr<opset1::Subtract> newSubtract = as_type_ptr<opset1::Subtract>(subtract->copy_with_new_inputs({
|
||||
std::shared_ptr<opset1::Subtract> newSubtract = ov::as_type_ptr<opset1::Subtract>(subtract->copy_with_new_inputs({
|
||||
dequantization.multiply->get_input_node_shared_ptr(0),
|
||||
ngraph::pass::low_precision::fold<ngraph::opset1::Divide>(
|
||||
subtract->get_input_node_shared_ptr(1),
|
||||
@ -71,7 +71,7 @@ bool SubtractTransformation::transform(TransformationContext& context, ngraph::p
|
||||
}
|
||||
|
||||
if (dequantization.subtract != nullptr) {
|
||||
std::shared_ptr<opset1::Subtract> newSubtract = as_type_ptr<opset1::Subtract>(subtract->copy_with_new_inputs({
|
||||
std::shared_ptr<opset1::Subtract> newSubtract = ov::as_type_ptr<opset1::Subtract>(subtract->copy_with_new_inputs({
|
||||
dequantization.subtract->get_input_node_shared_ptr(0),
|
||||
ngraph::pass::low_precision::fold<ngraph::opset1::Add>(
|
||||
subtract->get_input_node_shared_ptr(1),
|
||||
|
@ -37,9 +37,9 @@ SubtractMultiplyToMultiplyAddTransformation::SubtractMultiplyToMultiplyAddTransf
|
||||
FakeQuantizeDequantization get(const std::shared_ptr<Node> node) {
|
||||
Output<Node> dataNode = node;
|
||||
|
||||
const std::shared_ptr<ngraph::opset1::Multiply> multiply = is_type<opset1::Constant>(
|
||||
const std::shared_ptr<ngraph::opset1::Multiply> multiply = ov::is_type<opset1::Constant>(
|
||||
dataNode.get_node_shared_ptr()->get_input_node_shared_ptr(1)) ?
|
||||
as_type_ptr<ngraph::opset1::Multiply>(dataNode.get_node_shared_ptr()) :
|
||||
ov::as_type_ptr<ngraph::opset1::Multiply>(dataNode.get_node_shared_ptr()) :
|
||||
nullptr;
|
||||
std::shared_ptr<opset1::Constant> multiplyConstant;
|
||||
if (multiply != nullptr) {
|
||||
@ -48,8 +48,8 @@ FakeQuantizeDequantization get(const std::shared_ptr<Node> node) {
|
||||
}
|
||||
|
||||
const std::shared_ptr<opset1::Subtract> subtract = (dataNode.get_node_shared_ptr()->get_input_size() > 1ul)
|
||||
&& is_type<opset1::Constant>(dataNode.get_node_shared_ptr()->get_input_node_ptr(1)) ?
|
||||
as_type_ptr<opset1::Subtract>(dataNode.get_node_shared_ptr()) :
|
||||
&& ov::is_type<opset1::Constant>(dataNode.get_node_shared_ptr()->get_input_node_ptr(1)) ?
|
||||
ov::as_type_ptr<opset1::Subtract>(dataNode.get_node_shared_ptr()) :
|
||||
nullptr;
|
||||
std::shared_ptr<opset1::Convert> subtractConvert;
|
||||
std::shared_ptr<opset1::Constant> subtractConstant;
|
||||
@ -58,7 +58,7 @@ FakeQuantizeDequantization get(const std::shared_ptr<Node> node) {
|
||||
dataNode = subtract->get_input_source_output(0);
|
||||
}
|
||||
|
||||
const std::shared_ptr<opset1::Convert> convert = as_type_ptr<opset1::Convert>(dataNode.get_node_shared_ptr());
|
||||
const std::shared_ptr<opset1::Convert> convert = ov::as_type_ptr<opset1::Convert>(dataNode.get_node_shared_ptr());
|
||||
if (convert != nullptr) {
|
||||
dataNode = convert->get_input_source_output(0);
|
||||
}
|
||||
@ -119,8 +119,8 @@ bool SubtractMultiplyToMultiplyAddTransformation::transform(TransformationContex
|
||||
std::make_shared<opset1::Constant>(deqPrecision, Shape{}, std::vector<float>{ -1.f })),
|
||||
foldConvert(dequantization.multiply->get_input_node_shared_ptr(1), deqPrecision));
|
||||
|
||||
if (is_type<opset1::Constant>(subtractConstant)) {
|
||||
std::shared_ptr<opset1::Constant> constant = as_type_ptr<opset1::Constant>(subtractConstant);
|
||||
if (ov::is_type<opset1::Constant>(subtractConstant)) {
|
||||
std::shared_ptr<opset1::Constant> constant = ov::as_type_ptr<opset1::Constant>(subtractConstant);
|
||||
if (NetworkHelper::isScalarLike(constant)) {
|
||||
subtractConstant = NetworkHelper::toScalar(constant);
|
||||
}
|
||||
@ -137,7 +137,7 @@ bool SubtractMultiplyToMultiplyAddTransformation::transform(TransformationContex
|
||||
|
||||
lastNewPrecision = precisionAfterDequantization;
|
||||
} else {
|
||||
NetworkHelper::setOutDataPrecision(as_type_ptr<opset1::Multiply>(lastNew.get_node_shared_ptr()), precisionAfterDequantization);
|
||||
NetworkHelper::setOutDataPrecision(ov::as_type_ptr<opset1::Multiply>(lastNew.get_node_shared_ptr()), precisionAfterDequantization);
|
||||
}
|
||||
|
||||
const std::shared_ptr<Node> lastOriginal = dequantization.multiply == nullptr ?
|
||||
|
@ -100,7 +100,7 @@ bool TransposeTransformation::canBeTransformed(const TransformationContext& cont
|
||||
return false;
|
||||
}
|
||||
|
||||
const std::shared_ptr<opset1::Constant> constant = as_type_ptr<opset1::Constant>(op->get_input_node_shared_ptr(1));
|
||||
const std::shared_ptr<opset1::Constant> constant = ov::as_type_ptr<opset1::Constant>(op->get_input_node_shared_ptr(1));
|
||||
if (constant == nullptr) {
|
||||
return false;
|
||||
}
|
||||
@ -113,7 +113,7 @@ bool TransposeTransformation::canBeTransformed(const TransformationContext& cont
|
||||
}
|
||||
}
|
||||
if (dequantization.multiply != nullptr) {
|
||||
const auto mulConst = as_type_ptr<ngraph::op::v0::Constant>(dequantization.multiplyConstant);
|
||||
const auto mulConst = ov::as_type_ptr<ngraph::op::v0::Constant>(dequantization.multiplyConstant);
|
||||
if (!NetworkHelper::isScalarLike(mulConst)) {
|
||||
return false;
|
||||
}
|
||||
|
@ -48,7 +48,7 @@ bool UnsqueezeTransformation::transform(TransformationContext& context, ngraph::
|
||||
}
|
||||
|
||||
if (constantShape.size() == inputRankValue) {
|
||||
return as_type_ptr<opset1::Constant>(fold<opset1::Unsqueeze>(dequantizationOpConstant, unsqueeze->get_input_node_shared_ptr(1)));
|
||||
return ov::as_type_ptr<opset1::Constant>(fold<opset1::Unsqueeze>(dequantizationOpConstant, unsqueeze->get_input_node_shared_ptr(1)));
|
||||
}
|
||||
|
||||
return dequantizationOpConstant;
|
||||
|
@ -34,7 +34,7 @@ bool WeightableLayerTransformation::canConvolutionBeTransformed(const Transforma
|
||||
return false;
|
||||
}
|
||||
|
||||
std::shared_ptr<opset1::Reshape> reshapeFromWeights = as_type_ptr<opset1::Reshape>(layer->get_input_node_shared_ptr(1));
|
||||
std::shared_ptr<opset1::Reshape> reshapeFromWeights = ov::as_type_ptr<opset1::Reshape>(layer->get_input_node_shared_ptr(1));
|
||||
dequantization = reshapeFromWeights == nullptr ?
|
||||
NetworkHelper::getDequantization(layer, 1ul) :
|
||||
NetworkHelper::getDequantization(reshapeFromWeights);
|
||||
@ -134,20 +134,20 @@ bool WeightableLayerTransformation::canBeTransformed(const TransformationContext
|
||||
|
||||
// TODO Implement similar checks in other weightable operaitons
|
||||
|
||||
const std::shared_ptr<opset1::Reshape> reshapeFromWeights = as_type_ptr<opset1::Reshape>(layer->get_input_node_shared_ptr(1));
|
||||
const std::shared_ptr<opset1::Reshape> reshapeFromWeights = ov::as_type_ptr<opset1::Reshape>(layer->get_input_node_shared_ptr(1));
|
||||
|
||||
std::shared_ptr<opset1::FakeQuantize> fqFromWeights;
|
||||
if (reshapeFromWeights == nullptr) {
|
||||
fqFromWeights = as_type_ptr<opset1::FakeQuantize>(layer->get_input_node_shared_ptr(1));
|
||||
fqFromWeights = ov::as_type_ptr<opset1::FakeQuantize>(layer->get_input_node_shared_ptr(1));
|
||||
if (fqFromWeights == nullptr) {
|
||||
const FakeQuantizeDequantization dequantization = NetworkHelper::getDequantization(layer, 1ul);
|
||||
fqFromWeights = as_type_ptr<opset1::FakeQuantize>(dequantization.data.get_node_shared_ptr());
|
||||
fqFromWeights = ov::as_type_ptr<opset1::FakeQuantize>(dequantization.data.get_node_shared_ptr());
|
||||
}
|
||||
} else {
|
||||
fqFromWeights = as_type_ptr<opset1::FakeQuantize>(reshapeFromWeights->get_input_node_shared_ptr(0));
|
||||
fqFromWeights = ov::as_type_ptr<opset1::FakeQuantize>(reshapeFromWeights->get_input_node_shared_ptr(0));
|
||||
if (fqFromWeights == nullptr) {
|
||||
const FakeQuantizeDequantization dequantization = NetworkHelper::getDequantization(reshapeFromWeights, 0ul);
|
||||
fqFromWeights = as_type_ptr<opset1::FakeQuantize>(dequantization.data.get_node_shared_ptr());
|
||||
fqFromWeights = ov::as_type_ptr<opset1::FakeQuantize>(dequantization.data.get_node_shared_ptr());
|
||||
}
|
||||
}
|
||||
|
||||
@ -164,7 +164,7 @@ bool WeightableLayerTransformation::canBeTransformed(const TransformationContext
|
||||
|
||||
|
||||
const auto fqOutPShape = fqFromWeights->get_output_partial_shape(0);
|
||||
const size_t outChannelsIdx = is_type<opset1::ConvolutionBackpropData>(layer) ? 1ul : 0ul;
|
||||
const size_t outChannelsIdx = ov::is_type<opset1::ConvolutionBackpropData>(layer) ? 1ul : 0ul;
|
||||
if (fqOutPShape.rank().is_dynamic() || fqOutPShape[outChannelsIdx].is_dynamic()) {
|
||||
return false;
|
||||
}
|
||||
@ -188,7 +188,7 @@ bool WeightableLayerTransformation::canBeTransformed(const TransformationContext
|
||||
return false;
|
||||
}
|
||||
|
||||
const auto weightsData = as_type_ptr<opset1::Constant>(dequantizationOnWeights.data.get_node_shared_ptr());
|
||||
const auto weightsData = ov::as_type_ptr<opset1::Constant>(dequantizationOnWeights.data.get_node_shared_ptr());
|
||||
if (weightsData == nullptr) {
|
||||
return false;
|
||||
}
|
||||
@ -205,7 +205,7 @@ bool WeightableLayerTransformation::canBeTransformed(const TransformationContext
|
||||
}
|
||||
}
|
||||
|
||||
const size_t outChannelsIdx = is_type<opset1::ConvolutionBackpropData>(layer) ? 1ul : 0ul;
|
||||
const size_t outChannelsIdx = ov::is_type<opset1::ConvolutionBackpropData>(layer) ? 1ul : 0ul;
|
||||
if (dequantizationOnWeights.subtract) {
|
||||
const auto subConstShape = dequantizationOnWeights.subtractConstant->get_shape();
|
||||
if (shape_size(subConstShape) > 1ul && shape_size(subConstShape) != subConstShape[outChannelsIdx]) {
|
||||
@ -227,18 +227,18 @@ bool WeightableLayerTransformation::isQuantizedStatic(const std::shared_ptr<cons
|
||||
FakeQuantizeDequantization dequantizationOnWeights;
|
||||
if (reshapeIsRequired) {
|
||||
const auto reshape = layer->get_input_node_shared_ptr(1);
|
||||
if (!is_type<opset1::Reshape>(reshape)) {
|
||||
if (!ov::is_type<opset1::Reshape>(reshape)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (is_type<opset1::FakeQuantize>(reshape->get_input_node_shared_ptr(0))) {
|
||||
const std::shared_ptr<opset1::FakeQuantize> fq = as_type_ptr<opset1::FakeQuantize>(reshape->get_input_node_shared_ptr(0));
|
||||
if (ov::is_type<opset1::FakeQuantize>(reshape->get_input_node_shared_ptr(0))) {
|
||||
const std::shared_ptr<opset1::FakeQuantize> fq = ov::as_type_ptr<opset1::FakeQuantize>(reshape->get_input_node_shared_ptr(0));
|
||||
return NetworkHelper::isQuantizeSupported(fq);
|
||||
}
|
||||
|
||||
dequantizationOnWeights = NetworkHelper::getDequantization(reshape, 0);
|
||||
} else if (is_type<opset1::FakeQuantize>(layer->get_input_node_shared_ptr(1))) {
|
||||
const std::shared_ptr<opset1::FakeQuantize> fq = as_type_ptr<opset1::FakeQuantize>(layer->get_input_node_shared_ptr(1));
|
||||
} else if (ov::is_type<opset1::FakeQuantize>(layer->get_input_node_shared_ptr(1))) {
|
||||
const std::shared_ptr<opset1::FakeQuantize> fq = ov::as_type_ptr<opset1::FakeQuantize>(layer->get_input_node_shared_ptr(1));
|
||||
return NetworkHelper::isQuantizeSupported(fq);
|
||||
} else {
|
||||
// TODO: update NetworkHelper API later
|
||||
@ -251,7 +251,7 @@ bool WeightableLayerTransformation::isQuantizedStatic(const std::shared_ptr<cons
|
||||
}
|
||||
|
||||
// TODO: LPT: is it possible to share with canBeTransformed?
|
||||
if (is_type<opset1::Constant>(dequantizationOnWeights.data.get_node())) {
|
||||
if (ov::is_type<opset1::Constant>(dequantizationOnWeights.data.get_node())) {
|
||||
const ngraph::element::Type weightsDataPrecision = dequantizationOnWeights.data.get_element_type();
|
||||
if (!DataPrecision::isSupported(weightsDataPrecision)) {
|
||||
return false;
|
||||
@ -264,7 +264,7 @@ bool WeightableLayerTransformation::isQuantizedStatic(const std::shared_ptr<cons
|
||||
}
|
||||
}
|
||||
|
||||
const size_t outChannelsShapeIndex = is_type<opset1::ConvolutionBackpropData>(layer) ? 1ul : 0ul;
|
||||
const size_t outChannelsShapeIndex = ov::is_type<opset1::ConvolutionBackpropData>(layer) ? 1ul : 0ul;
|
||||
if (dequantizationOnWeights.subtract) {
|
||||
const auto subConstShape = dequantizationOnWeights.subtractConstant->get_shape();
|
||||
if (shape_size(subConstShape) > 1ul && shape_size(subConstShape) != subConstShape[outChannelsShapeIndex]) {
|
||||
@ -279,7 +279,7 @@ bool WeightableLayerTransformation::isQuantizedStatic(const std::shared_ptr<cons
|
||||
}
|
||||
|
||||
return true;
|
||||
} else if (is_type<opset1::FakeQuantize>(dequantizationOnWeights.data.get_node())) {
|
||||
} else if (ov::is_type<opset1::FakeQuantize>(dequantizationOnWeights.data.get_node())) {
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -321,7 +321,7 @@ bool WeightableLayerTransformation::decomposeFakeQuantizeForWeightsPath(const st
|
||||
return false;
|
||||
}
|
||||
|
||||
if (as_type_ptr<ngraph::opset1::Constant>(fqOnWeights) == nullptr) {
|
||||
if (ov::as_type_ptr<ngraph::opset1::Constant>(fqOnWeights) == nullptr) {
|
||||
THROW_IE_LPT_EXCEPTION(*fqOnWeights) << "FakeQuantize on weights was not folded to constant";
|
||||
}
|
||||
|
||||
@ -329,7 +329,7 @@ bool WeightableLayerTransformation::decomposeFakeQuantizeForWeightsPath(const st
|
||||
}
|
||||
|
||||
bool WeightableLayerTransformation::isGroup(const std::shared_ptr<Node>& layer) {
|
||||
if (!is_type<opset1::Convolution>(layer) && !is_type<opset1::GroupConvolution>(layer)) {
|
||||
if (!ov::is_type<opset1::Convolution>(layer) && !ov::is_type<opset1::GroupConvolution>(layer)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -338,7 +338,7 @@ bool WeightableLayerTransformation::isGroup(const std::shared_ptr<Node>& layer)
|
||||
}
|
||||
|
||||
bool WeightableLayerTransformation::isDepthwise(const std::shared_ptr<Node>& layer) {
|
||||
if (!as_type_ptr<opset1::Convolution>(layer) && !as_type_ptr<opset1::GroupConvolution>(layer)) {
|
||||
if (!ov::as_type_ptr<opset1::Convolution>(layer) && !ov::as_type_ptr<opset1::GroupConvolution>(layer)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -349,10 +349,10 @@ bool WeightableLayerTransformation::isDepthwise(const std::shared_ptr<Node>& lay
|
||||
}
|
||||
|
||||
std::shared_ptr<opset1::FakeQuantize> WeightableLayerTransformation::getFakeQuantizeOnWeights(const std::shared_ptr<Node>& node) {
|
||||
auto fq = as_type_ptr<opset1::FakeQuantize>(node->get_input_node_shared_ptr(1));
|
||||
auto fq = ov::as_type_ptr<opset1::FakeQuantize>(node->get_input_node_shared_ptr(1));
|
||||
// TODO: temporary workaround
|
||||
if (fq == nullptr) {
|
||||
fq = as_type_ptr<opset1::FakeQuantize>(node->get_input_node_ptr(1)->get_input_node_shared_ptr(0));
|
||||
fq = ov::as_type_ptr<opset1::FakeQuantize>(node->get_input_node_ptr(1)->get_input_node_shared_ptr(0));
|
||||
}
|
||||
|
||||
return fq;
|
||||
|
@ -19,7 +19,7 @@ Mask::Ptr getMask(const Output<const Node> & output) {
|
||||
if (!rtInfo.count(MaskWrapper::type_info.name)) return nullptr;
|
||||
|
||||
const auto &attr = rtInfo.at(MaskWrapper::type_info.name);
|
||||
return as_type_ptr<MaskWrapper>(attr)->get();
|
||||
return ov::as_type_ptr<MaskWrapper>(attr)->get();
|
||||
}
|
||||
|
||||
Mask::Ptr getMask(const Output<Node> & output) {
|
||||
@ -29,7 +29,7 @@ Mask::Ptr getMask(const Output<Node> & output) {
|
||||
if (!rtInfo.count(MaskWrapper::type_info.name)) return nullptr;
|
||||
|
||||
const auto &attr = rtInfo.at(MaskWrapper::type_info.name);
|
||||
return as_type_ptr<MaskWrapper>(attr)->get();
|
||||
return ov::as_type_ptr<MaskWrapper>(attr)->get();
|
||||
}
|
||||
|
||||
void setMask(Output<Node> output, const Mask::Ptr & mask) {
|
||||
|
@ -223,7 +223,7 @@ snippets::Schedule snippets::op::Subgraph::generate(const BlockedShapeVector& ou
|
||||
// chack that body doesnt have constants for scheduling
|
||||
std::vector<std::shared_ptr<opset1::Constant>> constants;
|
||||
for (auto op : m_body->get_ordered_ops()) {
|
||||
if (auto constant = as_type_ptr<opset1::Constant>(op)) {
|
||||
if (auto constant = ov::as_type_ptr<opset1::Constant>(op)) {
|
||||
if (ngraph::shape_size(constant->get_shape()) != 1 && constant->get_shape() != Shape()) {
|
||||
constants.push_back(constant);
|
||||
}
|
||||
|
@ -156,13 +156,13 @@ bool ngraph::snippets::pass::AssignRegisters::run_on_function(std::shared_ptr<Fu
|
||||
continue;
|
||||
}
|
||||
// store effective address and procced with vector registers
|
||||
if (as_type_ptr<ngraph::snippets::op::Load>(n) || as_type_ptr<ngraph::snippets::op::BroadcastLoad>(n)) {
|
||||
if (ov::as_type_ptr<ngraph::snippets::op::Load>(n) || ov::as_type_ptr<ngraph::snippets::op::BroadcastLoad>(n)) {
|
||||
auto source = n->get_input_source_output(0).get_node_shared_ptr();
|
||||
|
||||
if (auto param = as_type_ptr<opset1::Parameter>(source)) {
|
||||
if (auto param = ov::as_type_ptr<opset1::Parameter>(source)) {
|
||||
auto ea = reg64_tmp_start+static_cast<int64_t>(f->get_parameter_index(param));
|
||||
rt["effectiveAddress"] = std::make_shared<VariantWrapper<int64_t>>(VariantWrapper<int64_t>(ea));
|
||||
} else if (auto constant = as_type_ptr<opset1::Constant>(source)) {
|
||||
} else if (auto constant = ov::as_type_ptr<opset1::Constant>(source)) {
|
||||
auto ea = reg64_tmp_start+static_cast<int64_t>(f->get_parameters().size() + f->get_results().size() + 1 + constantID);
|
||||
rt["effectiveAddress"] = std::make_shared<VariantWrapper<int64_t>>(VariantWrapper<int64_t>(ea));
|
||||
constantID++;
|
||||
|
@ -104,7 +104,7 @@ auto has_subgraph_as_input(std::shared_ptr<Node> node) -> bool {
|
||||
auto inputs = node->inputs();
|
||||
for (auto input : inputs) {
|
||||
auto parent = input.get_source_output().get_node_shared_ptr();
|
||||
if (!!as_type_ptr<snippets::op::Subgraph>(parent)) {
|
||||
if (!!ov::as_type_ptr<snippets::op::Subgraph>(parent)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -114,66 +114,66 @@ auto has_subgraph_as_input(std::shared_ptr<Node> node) -> bool {
|
||||
auto is_lo(std::shared_ptr<Node> n) -> bool {
|
||||
auto is_lob = [](std::shared_ptr<Node> n) -> bool {
|
||||
using ngraph::as_type_ptr;
|
||||
return !!as_type_ptr<opset1::Add>(n)
|
||||
|| !!as_type_ptr<opset1::Divide>(n)
|
||||
|| !!as_type_ptr<opset1::Equal>(n)
|
||||
|| !!as_type_ptr<opset1::FloorMod>(n)
|
||||
|| !!as_type_ptr<opset1::Greater>(n)
|
||||
|| !!as_type_ptr<opset1::GreaterEqual>(n)
|
||||
|| !!as_type_ptr<opset1::Less>(n)
|
||||
|| !!as_type_ptr<opset1::LessEqual>(n)
|
||||
|| !!as_type_ptr<opset1::LogicalAnd>(n)
|
||||
|| !!as_type_ptr<opset1::LogicalOr>(n)
|
||||
|| !!as_type_ptr<opset1::LogicalXor>(n)
|
||||
|| !!as_type_ptr<opset1::Maximum>(n)
|
||||
|| !!as_type_ptr<opset1::Minimum>(n)
|
||||
|| !!as_type_ptr<opset1::Mod>(n)
|
||||
|| !!as_type_ptr<opset1::Multiply>(n)
|
||||
|| !!as_type_ptr<opset1::NotEqual>(n)
|
||||
|| !!as_type_ptr<opset1::PRelu>(n)
|
||||
|| !!as_type_ptr<opset1::Power>(n)
|
||||
|| !!as_type_ptr<opset1::SquaredDifference>(n)
|
||||
|| !!as_type_ptr<opset1::Subtract>(n)
|
||||
|| !!as_type_ptr<opset1::Xor>(n);
|
||||
return !!ov::as_type_ptr<opset1::Add>(n)
|
||||
|| !!ov::as_type_ptr<opset1::Divide>(n)
|
||||
|| !!ov::as_type_ptr<opset1::Equal>(n)
|
||||
|| !!ov::as_type_ptr<opset1::FloorMod>(n)
|
||||
|| !!ov::as_type_ptr<opset1::Greater>(n)
|
||||
|| !!ov::as_type_ptr<opset1::GreaterEqual>(n)
|
||||
|| !!ov::as_type_ptr<opset1::Less>(n)
|
||||
|| !!ov::as_type_ptr<opset1::LessEqual>(n)
|
||||
|| !!ov::as_type_ptr<opset1::LogicalAnd>(n)
|
||||
|| !!ov::as_type_ptr<opset1::LogicalOr>(n)
|
||||
|| !!ov::as_type_ptr<opset1::LogicalXor>(n)
|
||||
|| !!ov::as_type_ptr<opset1::Maximum>(n)
|
||||
|| !!ov::as_type_ptr<opset1::Minimum>(n)
|
||||
|| !!ov::as_type_ptr<opset1::Mod>(n)
|
||||
|| !!ov::as_type_ptr<opset1::Multiply>(n)
|
||||
|| !!ov::as_type_ptr<opset1::NotEqual>(n)
|
||||
|| !!ov::as_type_ptr<opset1::PRelu>(n)
|
||||
|| !!ov::as_type_ptr<opset1::Power>(n)
|
||||
|| !!ov::as_type_ptr<opset1::SquaredDifference>(n)
|
||||
|| !!ov::as_type_ptr<opset1::Subtract>(n)
|
||||
|| !!ov::as_type_ptr<opset1::Xor>(n);
|
||||
};
|
||||
|
||||
auto is_lou = [](std::shared_ptr<Node> n) -> bool {
|
||||
using ngraph::as_type_ptr;
|
||||
return !!as_type_ptr<opset1::Abs>(n)
|
||||
// || !!as_type_ptr<opset1::Acos>(n)
|
||||
// || !!as_type_ptr<opset1::Asin>(n)
|
||||
// || !!as_type_ptr<opset1::Atan>(n)
|
||||
// || !!as_type_ptr<opset1::Ceiling>(n) ?
|
||||
|| !!as_type_ptr<opset1::Clamp>(n)
|
||||
// || !!as_type_ptr<opset1::Cos>(n)
|
||||
// || !!as_type_ptr<opset1::Cosh>(n)
|
||||
|| !!as_type_ptr<opset1::Elu>(n)
|
||||
|| !!as_type_ptr<opset1::Erf>(n)
|
||||
|| !!as_type_ptr<opset1::Exp>(n)
|
||||
// || !!as_type_ptr<opset1::Floor>(n) ?
|
||||
// || !!as_type_ptr<opset1::Log>(n) ?
|
||||
|| !!as_type_ptr<opset1::LogicalNot>(n)
|
||||
|| !!as_type_ptr<opset1::Negative>(n)
|
||||
|| !!as_type_ptr<opset1::Relu>(n)
|
||||
// || !!as_type_ptr<opset1::Sign>(n) ?
|
||||
|| !!as_type_ptr<opset1::Sigmoid>(n)
|
||||
// || !!as_type_ptr<opset1::Sin>(n)
|
||||
// || !!as_type_ptr<opset1::Sinh>(n)
|
||||
|| !!as_type_ptr<opset1::Sqrt>(n)
|
||||
// || !!as_type_ptr<opset1::Tan>(n)
|
||||
|| !!as_type_ptr<opset1::Tanh>(n);
|
||||
return !!ov::as_type_ptr<opset1::Abs>(n)
|
||||
// || !!ov::as_type_ptr<opset1::Acos>(n)
|
||||
// || !!ov::as_type_ptr<opset1::Asin>(n)
|
||||
// || !!ov::as_type_ptr<opset1::Atan>(n)
|
||||
// || !!ov::as_type_ptr<opset1::Ceiling>(n) ?
|
||||
|| !!ov::as_type_ptr<opset1::Clamp>(n)
|
||||
// || !!ov::as_type_ptr<opset1::Cos>(n)
|
||||
// || !!ov::as_type_ptr<opset1::Cosh>(n)
|
||||
|| !!ov::as_type_ptr<opset1::Elu>(n)
|
||||
|| !!ov::as_type_ptr<opset1::Erf>(n)
|
||||
|| !!ov::as_type_ptr<opset1::Exp>(n)
|
||||
// || !!ov::as_type_ptr<opset1::Floor>(n) ?
|
||||
// || !!ov::as_type_ptr<opset1::Log>(n) ?
|
||||
|| !!ov::as_type_ptr<opset1::LogicalNot>(n)
|
||||
|| !!ov::as_type_ptr<opset1::Negative>(n)
|
||||
|| !!ov::as_type_ptr<opset1::Relu>(n)
|
||||
// || !!ov::as_type_ptr<opset1::Sign>(n) ?
|
||||
|| !!ov::as_type_ptr<opset1::Sigmoid>(n)
|
||||
// || !!ov::as_type_ptr<opset1::Sin>(n)
|
||||
// || !!ov::as_type_ptr<opset1::Sinh>(n)
|
||||
|| !!ov::as_type_ptr<opset1::Sqrt>(n)
|
||||
// || !!ov::as_type_ptr<opset1::Tan>(n)
|
||||
|| !!ov::as_type_ptr<opset1::Tanh>(n);
|
||||
};
|
||||
|
||||
auto is_lot = [](std::shared_ptr<Node> n) -> bool {
|
||||
using ngraph::as_type_ptr;
|
||||
return false;
|
||||
// return !!as_type_ptr<opset1::HardSigmoid>(n) // ternary with 2 constants
|
||||
// || !!as_type_ptr<opset1::Selu>(n); // ternary with 2 constants / or DW
|
||||
// return !!ov::as_type_ptr<opset1::HardSigmoid>(n) // ternary with 2 constants
|
||||
// || !!ov::as_type_ptr<opset1::Selu>(n); // ternary with 2 constants / or DW
|
||||
};
|
||||
|
||||
auto is_fq = [](std::shared_ptr<Node> n) -> bool {
|
||||
using ngraph::as_type_ptr;
|
||||
return false;//!!as_type_ptr<opset1::FakeQuantize>(n); // 4->1
|
||||
return false;//!!ov::as_type_ptr<opset1::FakeQuantize>(n); // 4->1
|
||||
};
|
||||
|
||||
return is_lou(n) || is_lob(n) ||is_lot(n) || is_fq(n);
|
||||
@ -208,11 +208,11 @@ auto has_supported_in_out(std::shared_ptr<Node> n) -> bool {
|
||||
}
|
||||
|
||||
for (auto in_out : out.get_target_inputs()) {
|
||||
if (!!as_type_ptr<ngraph::op::v5::Loop>(in_out.get_node()->shared_from_this())) {
|
||||
if (!!ov::as_type_ptr<ngraph::op::v5::Loop>(in_out.get_node()->shared_from_this())) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!!as_type_ptr<ngraph::op::v0::Result>(in_out.get_node()->shared_from_this())) {
|
||||
if (!!ov::as_type_ptr<ngraph::op::v0::Result>(in_out.get_node()->shared_from_this())) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -305,7 +305,7 @@ ngraph::snippets::pass::AttachToSubgraph::AttachToSubgraph(bool tokenize_by_node
|
||||
for (auto& input : found.get_target_inputs()) {
|
||||
remark(13) << input.get_node() << " " << input.get_source_output() << " vs "
|
||||
<< found << " : " << input.get_index() << " " << found.get_index() << std::endl;
|
||||
if (as_type_ptr<op::Subgraph>(input.get_node()->shared_from_this()) != nullptr && input.get_source_output() == found) {
|
||||
if (ov::as_type_ptr<op::Subgraph>(input.get_node()->shared_from_this()) != nullptr && input.get_source_output() == found) {
|
||||
return input.get_index();
|
||||
}
|
||||
}
|
||||
@ -315,7 +315,7 @@ ngraph::snippets::pass::AttachToSubgraph::AttachToSubgraph(bool tokenize_by_node
|
||||
for (auto input : inputs) {
|
||||
auto input_node = input.get_source_output().get_node_shared_ptr();
|
||||
|
||||
if (auto subgraph = as_type_ptr<op::Subgraph>(input_node)) {
|
||||
if (auto subgraph = ov::as_type_ptr<op::Subgraph>(input_node)) {
|
||||
if (!clones.count(input_node)) {
|
||||
auto f = ngraph::clone_function(*subgraph->get_body().get());
|
||||
f->set_friendly_name(subgraph->get_body()->get_friendly_name());
|
||||
@ -327,7 +327,7 @@ ngraph::snippets::pass::AttachToSubgraph::AttachToSubgraph(bool tokenize_by_node
|
||||
for (auto input : inputs) {
|
||||
auto input_node = input.get_source_output().get_node_shared_ptr();
|
||||
|
||||
if (auto subgraph = as_type_ptr<op::Subgraph>(input_node)) {
|
||||
if (auto subgraph = ov::as_type_ptr<op::Subgraph>(input_node)) {
|
||||
if (!input_subgraphs.count(input_node)) {
|
||||
input_subgraphs.insert(input_node);
|
||||
|
||||
@ -356,7 +356,7 @@ ngraph::snippets::pass::AttachToSubgraph::AttachToSubgraph(bool tokenize_by_node
|
||||
|
||||
for (auto output : internal->outputs()) {
|
||||
for (auto consumer : output.get_target_inputs()) {
|
||||
if (auto to_replace_with = as_type_ptr<op::Subgraph>(subgraph->input_value(i).get_node_shared_ptr())) {
|
||||
if (auto to_replace_with = ov::as_type_ptr<op::Subgraph>(subgraph->input_value(i).get_node_shared_ptr())) {
|
||||
auto other_body = clones[subgraph->input_value(i).get_node_shared_ptr()];
|
||||
auto other_body_result = other_body->get_results()[consumer.get_source_output().get_index()];
|
||||
auto result_producer = other_body_result->input(0).get_source_output();
|
||||
|
@ -75,7 +75,7 @@ int64_t op::internal::NonMaxSuppressionIEInternal::max_boxes_output_from_input()
|
||||
}
|
||||
|
||||
const auto max_output_boxes_input =
|
||||
as_type_ptr<op::Constant>(input_value(max_output_boxes_per_class_port).get_node_shared_ptr());
|
||||
ov::as_type_ptr<op::Constant>(input_value(max_output_boxes_per_class_port).get_node_shared_ptr());
|
||||
max_output_boxes = max_output_boxes_input->cast_vector<int64_t>().at(0);
|
||||
|
||||
return max_output_boxes;
|
||||
|
@ -20,7 +20,7 @@ using namespace ngraph;
|
||||
//`simplify_gather`, optimizes gather if Gather is gathering the
|
||||
// whole input tensor
|
||||
static bool simplify_gather(std::shared_ptr<Node> node) {
|
||||
if (auto gather = as_type_ptr<opset3::Gather>(node)) {
|
||||
if (auto gather = ov::as_type_ptr<opset3::Gather>(node)) {
|
||||
// check if we are gathering the whole input
|
||||
auto data = gather->input_value(0);
|
||||
auto indices = gather->input_value(1);
|
||||
@ -56,7 +56,7 @@ static bool simplify_gather(std::shared_ptr<Node> node) {
|
||||
|
||||
// check if the indices is constant
|
||||
auto constant_indices =
|
||||
as_type_ptr<opset3::Constant>(gather->input_value(1).get_node_shared_ptr());
|
||||
ov::as_type_ptr<opset3::Constant>(gather->input_value(1).get_node_shared_ptr());
|
||||
if (!constant_indices) {
|
||||
return false;
|
||||
} else {
|
||||
@ -98,9 +98,9 @@ static bool eliminate_reshape_v1(const std::shared_ptr<Node>& node) {
|
||||
}
|
||||
// eliminate redundant reshape, squeeze, or unsqueeze
|
||||
auto input_node = input.get_node_shared_ptr();
|
||||
if (as_type_ptr<opset3::Squeeze>(input_node) ||
|
||||
as_type_ptr<opset3::Unsqueeze>(input_node) ||
|
||||
as_type_ptr<opset3::Reshape>(input_node)) {
|
||||
if (ov::as_type_ptr<opset3::Squeeze>(input_node) ||
|
||||
ov::as_type_ptr<opset3::Unsqueeze>(input_node) ||
|
||||
ov::as_type_ptr<opset3::Reshape>(input_node)) {
|
||||
auto shape = node->get_output_shape(0);
|
||||
std::vector<int64_t> vi;
|
||||
vi.assign(shape.begin(), shape.end());
|
||||
@ -151,8 +151,8 @@ static bool replace_squeeze_unsqueeze(const std::shared_ptr<Node>& node) {
|
||||
auto pat =
|
||||
opset3::Constant::create<int64_t>(element::i64, Shape{target_shape.size()}, target_shape);
|
||||
|
||||
if (is_type<opset3::Reshape>(input) || is_type<opset3::Squeeze>(input) ||
|
||||
is_type<opset3::Unsqueeze>(input)) {
|
||||
if (ov::is_type<opset3::Reshape>(input) || ov::is_type<opset3::Squeeze>(input) ||
|
||||
ov::is_type<opset3::Unsqueeze>(input)) {
|
||||
reshape = make_shared<opset3::Reshape>(input->input_value(0), pat, false);
|
||||
} else {
|
||||
reshape = make_shared<opset3::Reshape>(node->input_value(0), pat, false);
|
||||
@ -205,11 +205,11 @@ static bool eliminate_unsqueeze(const std::shared_ptr<Node>& node) {
|
||||
return replace_squeeze_unsqueeze(node);
|
||||
}
|
||||
|
||||
auto unsqueeze = as_type_ptr<opset3::Unsqueeze>(node);
|
||||
auto unsqueeze = ov::as_type_ptr<opset3::Unsqueeze>(node);
|
||||
if (unsqueeze == nullptr)
|
||||
return false;
|
||||
auto input = unsqueeze->input_value(0).get_node_shared_ptr();
|
||||
auto squeeze = as_type_ptr<opset3::Squeeze>(input);
|
||||
auto squeeze = ov::as_type_ptr<opset3::Squeeze>(input);
|
||||
auto replace_unsqueeze_only = [&](const vector<int64_t>& axes) {
|
||||
auto axes_const = opset3::Constant::create<int64_t>(element::i64, Shape{axes.size()}, axes);
|
||||
auto new_unsq = make_shared<opset3::Unsqueeze>(input->input_value(0), axes_const);
|
||||
@ -253,7 +253,7 @@ static bool eliminate_unsqueeze(const std::shared_ptr<Node>& node) {
|
||||
return false;
|
||||
}
|
||||
// eliminate redundant unsqueeze->unsqueeze
|
||||
auto unsqueeze_i = as_type_ptr<opset3::Unsqueeze>(input);
|
||||
auto unsqueeze_i = ov::as_type_ptr<opset3::Unsqueeze>(input);
|
||||
if (unsqueeze_i) {
|
||||
const auto& data_shape = unsqueeze_i->input_value(0).get_partial_shape();
|
||||
if (data_shape.rank().is_dynamic() || out_shape.rank().is_dynamic()) {
|
||||
@ -273,7 +273,7 @@ static bool eliminate_squeeze(const std::shared_ptr<Node>& node) {
|
||||
return replace_squeeze_unsqueeze(node);
|
||||
}
|
||||
|
||||
auto squeeze = as_type_ptr<opset3::Squeeze>(node);
|
||||
auto squeeze = ov::as_type_ptr<opset3::Squeeze>(node);
|
||||
if (squeeze == nullptr)
|
||||
return false;
|
||||
auto input = squeeze->input_value(0).get_node_shared_ptr();
|
||||
@ -286,7 +286,7 @@ static bool eliminate_squeeze(const std::shared_ptr<Node>& node) {
|
||||
return false;
|
||||
};
|
||||
// eliminate redundant unsqueeze->squeeze
|
||||
if (auto unsqueeze = as_type_ptr<opset3::Unsqueeze>(input)) {
|
||||
if (auto unsqueeze = ov::as_type_ptr<opset3::Unsqueeze>(input)) {
|
||||
PartialShape data_shape;
|
||||
if (op::is_parameter(input)) {
|
||||
data_shape = unsqueeze->input(0).get_partial_shape();
|
||||
@ -324,7 +324,7 @@ static bool eliminate_squeeze(const std::shared_ptr<Node>& node) {
|
||||
return false;
|
||||
}
|
||||
// eliminate redundant squeeze->squeeze
|
||||
if (auto squeeze_i = as_type_ptr<opset3::Squeeze>(input)) {
|
||||
if (auto squeeze_i = ov::as_type_ptr<opset3::Squeeze>(input)) {
|
||||
PartialShape data_shape;
|
||||
if (op::is_parameter(input)) {
|
||||
data_shape = squeeze_i->input(0).get_partial_shape();
|
||||
|
@ -30,7 +30,7 @@ bool ngraph::pass::SharedShapeOf::run_on_function(std::shared_ptr<ngraph::Functi
|
||||
if (auto sub_graph = sub_graph_node->get_function())
|
||||
graph_rewritten |= run_on_function(sub_graph);
|
||||
|
||||
if (is_type<ngraph::opset1::ShapeOf>(node) || is_type<ngraph::opset3::ShapeOf>(node))
|
||||
if (ov::is_type<ngraph::opset1::ShapeOf>(node) || ov::is_type<ngraph::opset3::ShapeOf>(node))
|
||||
source_to_shape_of[node->input_value(0)].push_back(node);
|
||||
}
|
||||
|
||||
@ -59,12 +59,12 @@ ngraph::pass::GroupedGatherElimination::GroupedGatherElimination() {
|
||||
while (inputs.size() > i + 1) {
|
||||
auto curr = inputs[i].get_node_shared_ptr(), next = inputs[i + 1].get_node_shared_ptr();
|
||||
if (curr->get_type_info() != next->get_type_info() ||
|
||||
(!is_type<opset1::Gather>(curr) && !is_type<opset7::Gather>(curr)) ||
|
||||
(!ov::is_type<opset1::Gather>(curr) && !ov::is_type<opset7::Gather>(curr)) ||
|
||||
(curr->input_value(0) != next->input_value(0))) {
|
||||
++i;
|
||||
continue;
|
||||
} // curr and next are the same type of gather which takes data from the same source
|
||||
bool is_opset1 = is_type<opset1::Gather>(curr);
|
||||
bool is_opset1 = ov::is_type<opset1::Gather>(curr);
|
||||
auto joint_indices = ngraph::op::util::make_try_fold<opset1::Concat>(OutputVector{curr->input_value(1), next->input_value(1)}, 0);
|
||||
std::shared_ptr<Node> new_gather;
|
||||
if (is_opset1)
|
||||
@ -131,7 +131,7 @@ ngraph::pass::SimplifyGatherShapeOf::SimplifyGatherShapeOf() {
|
||||
|
||||
ngraph::matcher_pass_callback callback = [](pattern::Matcher& m) {
|
||||
auto node = m.get_match_root();
|
||||
auto gather = as_type_ptr<opset3::Gather>(node->input_value(0).get_node_shared_ptr());
|
||||
auto gather = ov::as_type_ptr<opset3::Gather>(node->input_value(0).get_node_shared_ptr());
|
||||
if (!gather) {
|
||||
return false;
|
||||
}
|
||||
|
@ -24,7 +24,7 @@ bool replace_transpose_with_reshape(const std::shared_ptr<Node>& transpose) {
|
||||
|
||||
const size_t input_shape_rank = input_shape.rank().get_length();
|
||||
|
||||
auto order = as_type_ptr<opset6::Constant>(transpose->input_value(1).get_node_shared_ptr());
|
||||
auto order = ov::as_type_ptr<opset6::Constant>(transpose->input_value(1).get_node_shared_ptr());
|
||||
if (!order || !ngraph::shape_size(order->get_shape())) {
|
||||
return false;
|
||||
}
|
||||
|
@ -29,7 +29,7 @@ ngraph::pass::WeightsDequantizeToFakeQuantize::WeightsDequantizeToFakeQuantize()
|
||||
callback = [=](ngraph::pattern::Matcher &m) {
|
||||
const auto &pattern_map = m.get_pattern_map();
|
||||
|
||||
const auto &weights_node = as_type_ptr<opset6::Constant>(pattern_map.at(weights));
|
||||
const auto &weights_node = ov::as_type_ptr<opset6::Constant>(pattern_map.at(weights));
|
||||
const auto &convert_node = pattern_map.at(convert);
|
||||
const auto &multiply_node = pattern_map.at(mul);
|
||||
const auto &scale_node = pattern_map.at(mul_c);
|
||||
|
@ -52,8 +52,8 @@ ngraph::pass::DisableConvertConstantFoldingOnConstPath::DisableConvertConstantFo
|
||||
return false;
|
||||
}
|
||||
auto child = target_inputs.begin()->get_node();
|
||||
if (is_type<ngraph::opset1::Constant>(parent) &&
|
||||
(is_type<ngraph::opset1::Subtract>(child) || is_type<ngraph::opset1::Multiply>(child))) {
|
||||
if (ov::is_type<ngraph::opset1::Constant>(parent) &&
|
||||
(ov::is_type<ngraph::opset1::Subtract>(child) || ov::is_type<ngraph::opset1::Multiply>(child))) {
|
||||
auto& rtInfo = convert->get_rt_info();
|
||||
rtInfo["DISABLED_CONSTANT_FOLDING"] = std::make_shared<VariantWrapper<std::string>>("");
|
||||
return true;
|
||||
|
@ -150,9 +150,9 @@ void ngraph::pass::ConvertBatchToSpace::convert_batch_to_space_by_elements() {
|
||||
auto crops_begin = batch_to_space->input_value(2);
|
||||
auto crops_end = batch_to_space->input_value(3);
|
||||
|
||||
const auto block_const = as_type_ptr<opset3::Constant>(block.get_node_shared_ptr());
|
||||
const auto crops_begin_const = as_type_ptr<opset3::Constant>(crops_begin.get_node_shared_ptr());
|
||||
const auto crops_end_const = as_type_ptr<opset3::Constant>(crops_end.get_node_shared_ptr());
|
||||
const auto block_const = ov::as_type_ptr<opset3::Constant>(block.get_node_shared_ptr());
|
||||
const auto crops_begin_const = ov::as_type_ptr<opset3::Constant>(crops_begin.get_node_shared_ptr());
|
||||
const auto crops_end_const = ov::as_type_ptr<opset3::Constant>(crops_end.get_node_shared_ptr());
|
||||
|
||||
const std::vector<int64_t> &block_values = block_const->cast_vector<int64_t>();
|
||||
const std::vector<int64_t> &crops_end_values = crops_end_const->cast_vector<int64_t>();
|
||||
|
@ -142,9 +142,9 @@ void ngraph::pass::ConvertSpaceToBatch::convert_space_to_batch_by_elements() {
|
||||
auto pads_begin = space_to_batch->input_value(2);
|
||||
auto pads_end = space_to_batch->input_value(3);
|
||||
|
||||
const auto block_const = as_type_ptr<opset3::Constant>(block.get_node_shared_ptr());
|
||||
const auto pads_begin_const = as_type_ptr<opset3::Constant>(pads_begin.get_node_shared_ptr());
|
||||
const auto pads_end_const = as_type_ptr<opset3::Constant>(pads_end.get_node_shared_ptr());
|
||||
const auto block_const = ov::as_type_ptr<opset3::Constant>(block.get_node_shared_ptr());
|
||||
const auto pads_begin_const = ov::as_type_ptr<opset3::Constant>(pads_begin.get_node_shared_ptr());
|
||||
const auto pads_end_const = ov::as_type_ptr<opset3::Constant>(pads_end.get_node_shared_ptr());
|
||||
|
||||
if (!block_const || !pads_begin_const || !pads_end_const) {
|
||||
return false;
|
||||
|
@ -37,15 +37,16 @@ ngraph::pass::ConvertSubtract::ConvertSubtract() {
|
||||
if (subChildren.size() == 1ul) {
|
||||
const std::shared_ptr<Node> child = subChildren.begin()->get_node()->shared_from_this();
|
||||
if (child != nullptr) {
|
||||
if (is_type<opset1::Convolution>(child) ||
|
||||
is_type<opset1::ConvolutionBackpropData>(child) ||
|
||||
is_type<opset1::GroupConvolution>(child) ||
|
||||
is_type<opset1::GroupConvolutionBackpropData>(child) ||
|
||||
is_type<opset1::MatMul>(child) ||
|
||||
(is_type<opset1::Reshape>(child) &&
|
||||
if (ov::is_type<opset1::Convolution>(child) ||
|
||||
ov::is_type<opset1::ConvolutionBackpropData>(child) ||
|
||||
ov::is_type<opset1::GroupConvolution>(child) ||
|
||||
ov::is_type<opset1::GroupConvolutionBackpropData>(child) ||
|
||||
ov::is_type<opset1::MatMul>(child) ||
|
||||
(ov::is_type<opset1::Reshape>(child) &&
|
||||
(child->output(0).get_target_inputs().size() == 1ul) &&
|
||||
(is_type<opset1::GroupConvolution>(child->output(0).get_target_inputs().begin()->get_node()->shared_from_this()) ||
|
||||
is_type<opset1::GroupConvolutionBackpropData>(child->output(0).get_target_inputs().begin()->get_node()->shared_from_this())))) {
|
||||
(ov::is_type<opset1::GroupConvolution>(child->output(0).get_target_inputs().begin()->get_node()->shared_from_this()) ||
|
||||
ov::is_type<opset1::GroupConvolutionBackpropData>(child->output(0).get_target_inputs().begin()
|
||||
->get_node()->shared_from_this())))) {
|
||||
const auto input1Type = sub->input(0).get_element_type();
|
||||
const auto input2Type = sub->input(1).get_element_type();
|
||||
if (((input1Type == element::u8) && (input2Type == element::u8)) ||
|
||||
|
@ -27,7 +27,7 @@ std::string ngraph::getDequantization(const std::shared_ptr<ngraph::Node>& node)
|
||||
if (!rtInfo.count(getDequantizationWrapper::type_info.name)) return "";
|
||||
|
||||
const auto& attr = rtInfo.at(getDequantizationWrapper::type_info.name);
|
||||
DequantizationAttr pp = as_type_ptr<getDequantizationWrapper>(attr)->get();
|
||||
DequantizationAttr pp = ov::as_type_ptr<getDequantizationWrapper>(attr)->get();
|
||||
return pp.getDequantizationAttr();
|
||||
}
|
||||
|
||||
|
@ -41,7 +41,7 @@ std::string ngraph::getFusedNames(const std::shared_ptr<ngraph::Node> &node) {
|
||||
if (!rtInfo.count(FusedNamesWrapper::type_info.name)) return {};
|
||||
|
||||
const auto &attr = rtInfo.at(FusedNamesWrapper::type_info.name);
|
||||
FusedNames fusedNames = as_type_ptr<FusedNamesWrapper>(attr)->get();
|
||||
FusedNames fusedNames = ov::as_type_ptr<FusedNamesWrapper>(attr)->get();
|
||||
return fusedNames.getNames();
|
||||
}
|
||||
|
||||
@ -54,7 +54,7 @@ std::vector<std::string> ngraph::getFusedNamesVector(const std::shared_ptr<ngrap
|
||||
if (!rtInfo.count(FusedNamesWrapper::type_info.name)) return {};
|
||||
|
||||
const auto &attr = rtInfo.at(FusedNamesWrapper::type_info.name);
|
||||
FusedNames fusedNames = as_type_ptr<FusedNamesWrapper>(attr)->get();
|
||||
FusedNames fusedNames = ov::as_type_ptr<FusedNamesWrapper>(attr)->get();
|
||||
return fusedNames.getVectorNames();
|
||||
}
|
||||
|
||||
|
@ -30,7 +30,7 @@ std::string ngraph::getPrimitivesPriority(const std::shared_ptr<ngraph::Node> &n
|
||||
if (!rtInfo.count(PrimitivesPriorityWrapper::type_info.name)) return "";
|
||||
|
||||
const auto &attr = rtInfo.at(PrimitivesPriorityWrapper::type_info.name);
|
||||
PrimitivesPriority pp = as_type_ptr<PrimitivesPriorityWrapper>(attr)->get();
|
||||
PrimitivesPriority pp = ov::as_type_ptr<PrimitivesPriorityWrapper>(attr)->get();
|
||||
return pp.getPrimitivesPriority();
|
||||
}
|
||||
|
||||
|
@ -215,13 +215,13 @@ class XmlSerializer : public ngraph::AttributeVisitor {
|
||||
input.append_attribute("external_port_id").set_value(input_description->m_input_index);
|
||||
input.append_attribute("internal_layer_id").set_value(parameter_mapping[input_description->m_body_parameter_index].c_str());
|
||||
|
||||
if (auto slice_input = as_type_ptr<ngraph::op::util::SubGraphOp::SliceInputDescription>(input_description)) {
|
||||
if (auto slice_input = ov::as_type_ptr<ngraph::op::util::SubGraphOp::SliceInputDescription>(input_description)) {
|
||||
input.prepend_attribute("axis").set_value(slice_input->m_axis);
|
||||
input.append_attribute("start").set_value(slice_input->m_start);
|
||||
input.append_attribute("end").set_value(slice_input->m_end);
|
||||
input.append_attribute("stride").set_value(slice_input->m_stride);
|
||||
input.append_attribute("part_size").set_value(slice_input->m_part_size);
|
||||
} else if (auto merged_input = as_type_ptr<ngraph::op::util::SubGraphOp::MergedInputDescription>(input_description)) {
|
||||
} else if (auto merged_input = ov::as_type_ptr<ngraph::op::util::SubGraphOp::MergedInputDescription>(input_description)) {
|
||||
pugi::xml_node back_edges = m_xml_node.parent().child("back_edges");
|
||||
if (!back_edges) {
|
||||
back_edges = m_xml_node.parent().insert_child_after("back_edges", port_map);
|
||||
@ -249,7 +249,7 @@ class XmlSerializer : public ngraph::AttributeVisitor {
|
||||
output.append_attribute("external_port_id").set_value(input_count + output_description->m_output_index);
|
||||
output.append_attribute("internal_layer_id").set_value(result_mapping[output_description->m_body_value_index].c_str());
|
||||
|
||||
if (auto concat_output = as_type_ptr<ngraph::op::util::SubGraphOp::ConcatOutputDescription>(output_description)) {
|
||||
if (auto concat_output = ov::as_type_ptr<ngraph::op::util::SubGraphOp::ConcatOutputDescription>(output_description)) {
|
||||
output.prepend_attribute("axis").set_value(concat_output->m_axis);
|
||||
output.append_attribute("start").set_value(concat_output->m_start);
|
||||
output.append_attribute("end").set_value(concat_output->m_end);
|
||||
|
@ -118,13 +118,13 @@ public:
|
||||
outputShape,
|
||||
netPrecision,
|
||||
testValues.actual.fakeQuantizeOnWeights,
|
||||
as_type_ptr<opset1::Constant>(actualWeights));
|
||||
ov::as_type_ptr<opset1::Constant>(actualWeights));
|
||||
} else {
|
||||
actualWeights = ngraph::builder::subgraph::ConvolutionBackpropDataFunction::getWeights(
|
||||
outputShape,
|
||||
netPrecision,
|
||||
testValues.actual.dequantizationOnWeights,
|
||||
as_type_ptr<opset1::Constant>(actualWeights));
|
||||
ov::as_type_ptr<opset1::Constant>(actualWeights));
|
||||
}
|
||||
|
||||
actualFunction = ngraph::builder::subgraph::ConvolutionBackpropDataFunction::getOriginal(
|
||||
@ -152,13 +152,13 @@ public:
|
||||
outputShape,
|
||||
netPrecision,
|
||||
testValues.actual.fakeQuantizeOnWeights,
|
||||
as_type_ptr<opset1::Constant>(refWeights));
|
||||
ov::as_type_ptr<opset1::Constant>(refWeights));
|
||||
} else {
|
||||
refWeights = ngraph::builder::subgraph::ConvolutionBackpropDataFunction::getWeights(
|
||||
outputShape,
|
||||
netPrecision,
|
||||
testValues.expected.dequantizationOnWeights,
|
||||
as_type_ptr<opset1::Constant>(refWeights));
|
||||
ov::as_type_ptr<opset1::Constant>(refWeights));
|
||||
}
|
||||
|
||||
referenceFunction = ngraph::builder::subgraph::ConvolutionBackpropDataFunction::getReference(
|
||||
|
@ -83,7 +83,7 @@ public:
|
||||
OutputVector convertedOutput(1);
|
||||
convertOnWeights->constant_fold(convertedOutput, convertOnWeights->input_values());
|
||||
const auto convertedWeights = convertedOutput[0].get_node_shared_ptr();
|
||||
testValues.expected.weights = as_type_ptr<opset1::Constant>(convertedWeights);
|
||||
testValues.expected.weights = ov::as_type_ptr<opset1::Constant>(convertedWeights);
|
||||
}
|
||||
|
||||
referenceFunction = ngraph::builder::subgraph::ConvolutionFunction::getReference(
|
||||
|
@ -0,0 +1,43 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "shared_test_classes/single_layer/ctc_loss.hpp"
|
||||
|
||||
using namespace LayerTestsDefinitions;
|
||||
|
||||
namespace {
|
||||
TEST_P(CTCLossLayerTest, Serialize) { Serialize(); }
|
||||
|
||||
const std::vector<InferenceEngine::Precision> fPrecisions = {
|
||||
InferenceEngine::Precision::FP32,
|
||||
InferenceEngine::Precision::FP16};
|
||||
const std::vector<InferenceEngine::Precision> iPrecisions = {
|
||||
InferenceEngine::Precision::I32,
|
||||
InferenceEngine::Precision::I64};
|
||||
|
||||
const std::vector<bool> preprocessCollapseRepeated = {true, false};
|
||||
const std::vector<bool> ctcMergeRepeated = {true, false};
|
||||
const std::vector<bool> unique = {true, false};
|
||||
|
||||
const auto ctcLossArgsSubset1 = ::testing::Combine(
|
||||
::testing::Values(std::vector<size_t>({2, 3, 3})), // logits shape
|
||||
::testing::ValuesIn(std::vector<std::vector<int>>({{2, 3}, {3, 3}})), // logits length
|
||||
::testing::ValuesIn(std::vector<std::vector<std::vector<int>>>(
|
||||
{{{0, 1, 0}, {1, 0, 1}}, {{0, 1, 2}, {1, 1, 1}}})), // labels
|
||||
::testing::ValuesIn(std::vector<std::vector<int>>({{2, 2}, {2, 1}})), // labels length
|
||||
::testing::Values(2), // blank index
|
||||
::testing::ValuesIn(preprocessCollapseRepeated),
|
||||
::testing::ValuesIn(ctcMergeRepeated),
|
||||
::testing::ValuesIn(unique));
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_CTCLossSerialization, CTCLossLayerTest,
|
||||
::testing::Combine(
|
||||
ctcLossArgsSubset1,
|
||||
::testing::ValuesIn(fPrecisions),
|
||||
::testing::ValuesIn(iPrecisions),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
CTCLossLayerTest::getTestCaseName);
|
||||
} // namespace
|
@ -9,9 +9,6 @@
|
||||
|
||||
std::vector<std::string> disabledTestPatterns() {
|
||||
return {
|
||||
// TODO: FIX BUG 33375
|
||||
// Disabled due to rare sporadic failures.
|
||||
".*TransformationTests\\.ConstFoldingPriorBoxClustered.*",
|
||||
// TODO: task 32568, enable after supporting constants outputs in plugins
|
||||
".*TransformationTests\\.ConstFoldingPriorBox.*",
|
||||
// azure is failing after #6199
|
||||
|
@ -52,7 +52,7 @@ TEST(TransformationTests, AssignRegisters) {
|
||||
auto& rt = op->get_rt_info();
|
||||
|
||||
if (auto rinfo = rt["reginfo"]) {
|
||||
auto reginfo = as_type_ptr<VariantWrapper<std::vector<size_t>>>(rinfo)->get();
|
||||
auto reginfo = ov::as_type_ptr<VariantWrapper<std::vector<size_t>>>(rinfo)->get();
|
||||
auto reg = reginfo[0];
|
||||
ASSERT_TRUE(ref_registers[op->get_friendly_name()] == reg);
|
||||
total_ops++;
|
||||
@ -126,7 +126,7 @@ TEST(TransformationTests, AssignRegisters2) {
|
||||
auto& rt = op->get_rt_info();
|
||||
|
||||
if (auto rinfo = rt["reginfo"]) {
|
||||
auto reginfo = as_type_ptr<VariantWrapper<std::vector<size_t>>>(rinfo)->get();
|
||||
auto reginfo = ov::as_type_ptr<VariantWrapper<std::vector<size_t>>>(rinfo)->get();
|
||||
auto reg = reginfo[0];
|
||||
ASSERT_TRUE(ref_registers[op->get_friendly_name()] == reg);
|
||||
total_ops++;
|
||||
|
@ -55,7 +55,7 @@ std::shared_ptr<Node> makeElementwise(const std::shared_ptr<ngraph::Node> data,
|
||||
ngraph::pass::low_precision::NetworkHelper::setOutDataPrecision(operation, description.outPrecision);
|
||||
}
|
||||
|
||||
if (is_type<ngraph::opset1::Subtract>(operation) || is_type<ngraph::opset1::Add>(operation)) {
|
||||
if (ov::is_type<ngraph::opset1::Subtract>(operation) || ov::is_type<ngraph::opset1::Add>(operation)) {
|
||||
replace_node(
|
||||
operationConst,
|
||||
ngraph::pass::low_precision::fold<ngraph::opset1::Convert>(operationConst, data->get_output_element_type(0)));
|
||||
|
@ -139,11 +139,11 @@ std::shared_ptr<ngraph::Function> AddFunction::getOriginal(
|
||||
ngraph::ResultVector results {std::make_shared<ngraph::opset1::Result>(output)};
|
||||
ngraph::ParameterVector parameters;
|
||||
if (constInput == -1) {
|
||||
parameters = { as_type_ptr<ngraph::opset1::Parameter>(input1), as_type_ptr<ngraph::opset1::Parameter>(input2) };
|
||||
parameters = { ov::as_type_ptr<ngraph::opset1::Parameter>(input1), ov::as_type_ptr<ngraph::opset1::Parameter>(input2) };
|
||||
} else if (constInput == 0) {
|
||||
parameters = { as_type_ptr<ngraph::opset1::Parameter>(input2) };
|
||||
parameters = { ov::as_type_ptr<ngraph::opset1::Parameter>(input2) };
|
||||
} else if (constInput == 1) {
|
||||
parameters = { as_type_ptr<ngraph::opset1::Parameter>(input1) };
|
||||
parameters = { ov::as_type_ptr<ngraph::opset1::Parameter>(input1) };
|
||||
} else {
|
||||
throw std::runtime_error("Unexpected constant input index");
|
||||
}
|
||||
@ -226,7 +226,7 @@ std::shared_ptr<ngraph::Function> AddFunction::getReference(
|
||||
|
||||
auto dequantizationStructure1 = dequantization1;
|
||||
dequantizationStructure1.multiply.outPrecision = dequantizationAfter.empty() ? precision : element::f32;
|
||||
const auto dequantizationOp1 = is_type<ngraph::opset1::Constant>(parent1) ? parent1 : makeDequantization(parent1, dequantizationStructure1);
|
||||
const auto dequantizationOp1 = ov::is_type<ngraph::opset1::Constant>(parent1) ? parent1 : makeDequantization(parent1, dequantizationStructure1);
|
||||
|
||||
std::shared_ptr<ngraph::Node> input2;
|
||||
if (constInputIndex == 1) {
|
||||
@ -292,7 +292,7 @@ std::shared_ptr<ngraph::Function> AddFunction::getReference(
|
||||
|
||||
auto dequantizationStructure2 = dequantization2;
|
||||
dequantizationStructure2.multiply.outPrecision = dequantizationAfter.empty() ? precision : element::f32;
|
||||
const auto dequantizationOp2 = is_type<ngraph::opset1::Constant>(parent) ? parent : makeDequantization(parent, dequantizationStructure2);
|
||||
const auto dequantizationOp2 = ov::is_type<ngraph::opset1::Constant>(parent) ? parent : makeDequantization(parent, dequantizationStructure2);
|
||||
|
||||
const std::shared_ptr<Node> add = operationType == "Add" ?
|
||||
std::dynamic_pointer_cast<Node>(std::make_shared<ngraph::op::TypeRelaxed<ngraph::opset1::Add>>(
|
||||
@ -325,11 +325,11 @@ std::shared_ptr<ngraph::Function> AddFunction::getReference(
|
||||
|
||||
ngraph::ParameterVector parameters;
|
||||
if (constInputIndex == -1) {
|
||||
parameters = { as_type_ptr<ngraph::opset1::Parameter>(input1), as_type_ptr<ngraph::opset1::Parameter>(input2) };
|
||||
parameters = { ov::as_type_ptr<ngraph::opset1::Parameter>(input1), ov::as_type_ptr<ngraph::opset1::Parameter>(input2) };
|
||||
} else if (constInputIndex == 0) {
|
||||
parameters = { as_type_ptr<ngraph::opset1::Parameter>(input2) };
|
||||
parameters = { ov::as_type_ptr<ngraph::opset1::Parameter>(input2) };
|
||||
} else if (constInputIndex == 1) {
|
||||
parameters = { as_type_ptr<ngraph::opset1::Parameter>(input1) };
|
||||
parameters = { ov::as_type_ptr<ngraph::opset1::Parameter>(input1) };
|
||||
} else {
|
||||
throw std::runtime_error("Unexpected constant input index");
|
||||
}
|
||||
|
@ -238,7 +238,7 @@ std::shared_ptr<ngraph::opset1::FakeQuantize> makeFakeQuantize(
|
||||
const Output<Node>& output,
|
||||
const ngraph::element::Type precision,
|
||||
const FakeQuantizeOnData& fqOnData) {
|
||||
return as_type_ptr<ngraph::opset1::FakeQuantize>(ngraph::builder::makeFakeQuantize(
|
||||
return ov::as_type_ptr<ngraph::opset1::FakeQuantize>(ngraph::builder::makeFakeQuantize(
|
||||
output,
|
||||
precision,
|
||||
fqOnData.quantizationLevel,
|
||||
|
@ -82,7 +82,7 @@ std::shared_ptr<Node> ConvolutionBackpropDataFunction::getWeights(
|
||||
dequantizationStructure.subtract.constantPrecision = dequantizationOnWeights.subtract.constantPrecision;
|
||||
}
|
||||
if (weights->get_element_type().is_real()) {
|
||||
weights = as_type_ptr<opset1::Constant>(fold<opset1::Convert>(weights, netPrecision));
|
||||
weights = ov::as_type_ptr<opset1::Constant>(fold<opset1::Convert>(weights, netPrecision));
|
||||
}
|
||||
const auto dq = makeDequantization(weights, dequantizationStructure);
|
||||
|
||||
|
@ -45,7 +45,7 @@ std::shared_ptr<ngraph::Function> ConvolutionFunction::getOriginal(
|
||||
|
||||
if (weights->cast_vector<float>().size() == 1ul) {
|
||||
auto targetShape = ngraph::Shape{ outputChannelsCount, inputChannelsCount, 1, 1 };
|
||||
weights = as_type_ptr<ngraph::opset1::Constant>(fold<ngraph::opset1::Broadcast>(
|
||||
weights = ov::as_type_ptr<ngraph::opset1::Constant>(fold<ngraph::opset1::Broadcast>(
|
||||
weights, op::Constant::create(ngraph::element::i64, Shape{ targetShape.size() }, targetShape)));
|
||||
}
|
||||
|
||||
@ -234,7 +234,7 @@ std::shared_ptr<ngraph::Function> ConvolutionFunction::getReference(
|
||||
|
||||
if (weights->cast_vector<float>().size() == 1ul) {
|
||||
auto targetShape = ngraph::Shape{ outputChannelsCount, inputChannelsCount, 1, 1 };
|
||||
weights = as_type_ptr<ngraph::opset1::Constant>(fold<ngraph::opset1::Broadcast>(
|
||||
weights = ov::as_type_ptr<ngraph::opset1::Constant>(fold<ngraph::opset1::Broadcast>(
|
||||
weights, op::Constant::create(ngraph::element::i64, Shape{ targetShape.size() }, targetShape)));
|
||||
}
|
||||
|
||||
@ -295,7 +295,7 @@ std::shared_ptr<ngraph::Function> ConvolutionFunction::get(
|
||||
const auto input = std::make_shared<ngraph::opset1::Parameter>(precision, ngraph::Shape(inputShape));
|
||||
input->set_friendly_name("input");
|
||||
|
||||
const std::shared_ptr<ngraph::opset1::FakeQuantize> fqOnData = as_type_ptr<ngraph::opset1::FakeQuantize>(ngraph::builder::makeFakeQuantize(
|
||||
const std::shared_ptr<ngraph::opset1::FakeQuantize> fqOnData = ov::as_type_ptr<ngraph::opset1::FakeQuantize>(ngraph::builder::makeFakeQuantize(
|
||||
input,
|
||||
precision,
|
||||
fakeQuantizeOnData.quantizationLevel,
|
||||
|
@ -40,7 +40,7 @@ std::shared_ptr<ngraph::Function> GetDequantizationFunction::get(
|
||||
|
||||
return std::make_shared<ngraph::Function>(
|
||||
ngraph::ResultVector{ std::make_shared<ngraph::opset1::Result>(parent) },
|
||||
ngraph::ParameterVector{ as_type_ptr<op::v0::Parameter>(input) },
|
||||
ngraph::ParameterVector{ ov::as_type_ptr<op::v0::Parameter>(input) },
|
||||
"DequantizationFunction");
|
||||
}
|
||||
|
||||
@ -84,7 +84,7 @@ std::shared_ptr<ngraph::Function> GetDequantizationFunction::get(
|
||||
|
||||
return std::make_shared<ngraph::Function>(
|
||||
ngraph::ResultVector{ std::make_shared<ngraph::opset1::Result>(parent) },
|
||||
ngraph::ParameterVector{ as_type_ptr<op::v0::Parameter>(input) },
|
||||
ngraph::ParameterVector{ ov::as_type_ptr<op::v0::Parameter>(input) },
|
||||
"DequantizationFunction");
|
||||
}
|
||||
|
||||
@ -113,7 +113,7 @@ std::shared_ptr<ngraph::Function> GetDequantizationFunction::getOriginal(
|
||||
|
||||
return std::make_shared<ngraph::Function>(
|
||||
ngraph::ResultVector{ std::make_shared<ngraph::opset1::Result>(multiply) },
|
||||
ngraph::ParameterVector{ as_type_ptr<op::v0::Parameter>(input) },
|
||||
ngraph::ParameterVector{ ov::as_type_ptr<op::v0::Parameter>(input) },
|
||||
"Dequantization");
|
||||
}
|
||||
|
||||
@ -121,7 +121,7 @@ std::shared_ptr<ngraph::Function> GetDequantizationFunction::getReference(
|
||||
ngraph::pass::low_precision::FakeQuantizeDequantization dequantization) {
|
||||
return std::make_shared<ngraph::Function>(
|
||||
ngraph::ResultVector{ std::make_shared<ngraph::opset1::Result>(dequantization.multiply) },
|
||||
ngraph::ParameterVector{ as_type_ptr<op::v0::Parameter>(dequantization.data.get_node_shared_ptr()) },
|
||||
ngraph::ParameterVector{ ov::as_type_ptr<op::v0::Parameter>(dequantization.data.get_node_shared_ptr()) },
|
||||
"Dequantization");
|
||||
}
|
||||
|
||||
|
@ -425,9 +425,9 @@ public:
|
||||
private:
|
||||
InputModel::Ptr load_impl(const std::vector<std::shared_ptr<Variant>>& params) const override
|
||||
{
|
||||
if (params.size() > 0 && is_type<VariantWrapper<std::string>>(params[0]))
|
||||
if (params.size() > 0 && ov::is_type<VariantWrapper<std::string>>(params[0]))
|
||||
{
|
||||
auto path = as_type_ptr<VariantWrapper<std::string>>(params[0])->get();
|
||||
auto path = ov::as_type_ptr<VariantWrapper<std::string>>(params[0])->get();
|
||||
m_stat.m_load_paths.push_back(path);
|
||||
}
|
||||
return std::make_shared<InputModelMockPy>();
|
||||
@ -436,9 +436,9 @@ private:
|
||||
bool supported_impl(const std::vector<std::shared_ptr<Variant>>& params) const override
|
||||
{
|
||||
m_stat.m_supported++;
|
||||
if (params.size() > 0 && is_type<VariantWrapper<std::string>>(params[0]))
|
||||
if (params.size() > 0 && ov::is_type<VariantWrapper<std::string>>(params[0]))
|
||||
{
|
||||
auto path = as_type_ptr<VariantWrapper<std::string>>(params[0])->get();
|
||||
auto path = ov::as_type_ptr<VariantWrapper<std::string>>(params[0])->get();
|
||||
if (path.find(".test_mo_mock_mdl") != std::string::npos)
|
||||
{
|
||||
return true;
|
||||
|
@ -11,10 +11,8 @@ namespace op {
|
||||
namespace v4 {
|
||||
class NGRAPH_API CTCLoss : public Op {
|
||||
public:
|
||||
static constexpr NodeTypeInfo type_info{"CTCLoss", 0};
|
||||
const NodeTypeInfo& get_type_info() const override {
|
||||
return type_info;
|
||||
}
|
||||
NGRAPH_RTTI_DECLARATION;
|
||||
|
||||
CTCLoss() = default;
|
||||
/// \brief Constructs a CTCLoss operation
|
||||
///
|
||||
|
@ -103,7 +103,7 @@ public:
|
||||
static std::shared_ptr<T> unique_match(std::shared_ptr<Node> node) {
|
||||
std::shared_ptr<T> matched;
|
||||
for (auto arg : node->input_values()) {
|
||||
if (auto t_casted = as_type_ptr<T>(arg.get_node_shared_ptr())) {
|
||||
if (auto t_casted = ov::as_type_ptr<T>(arg.get_node_shared_ptr())) {
|
||||
if (matched) {
|
||||
throw ngraph_error("There's more than two arguments of the same type");
|
||||
} else {
|
||||
|
@ -29,7 +29,7 @@ PatternValueMap as_pattern_value_map(const PatternMap& pattern_map);
|
||||
template <typename T>
|
||||
std::function<bool(std::shared_ptr<Node>)> has_class() {
|
||||
auto pred = [](std::shared_ptr<Node> node) -> bool {
|
||||
return is_type<T>(node);
|
||||
return ov::is_type<T>(node);
|
||||
};
|
||||
|
||||
return pred;
|
||||
|
@ -4,62 +4,10 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
#include <cstring>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "ngraph/ngraph_visibility.hpp"
|
||||
#include "openvino/core/type.hpp"
|
||||
|
||||
namespace ngraph {
|
||||
/// Supports three functions, is_type<Type>, as_type<Type>, and as_type_ptr<Type> for type-safe
|
||||
/// dynamic conversions via static_cast/static_ptr_cast without using C++ RTTI.
|
||||
/// Type must have a static type_info member and a virtual get_type_info() member that
|
||||
/// returns a reference to its type_info member.
|
||||
|
||||
/// Type information for a type system without inheritance; instances have exactly one type not
|
||||
/// related to any other type.
|
||||
struct NGRAPH_API DiscreteTypeInfo {
|
||||
const char* name;
|
||||
uint64_t version;
|
||||
// A pointer to a parent type info; used for casting and inheritance traversal, not for
|
||||
// exact type identification
|
||||
const DiscreteTypeInfo* parent;
|
||||
|
||||
DiscreteTypeInfo() = default;
|
||||
|
||||
constexpr DiscreteTypeInfo(const char* _name, uint64_t _version, const DiscreteTypeInfo* _parent = nullptr)
|
||||
: name(_name),
|
||||
version(_version),
|
||||
parent(_parent) {}
|
||||
|
||||
bool is_castable(const DiscreteTypeInfo& target_type) const {
|
||||
return *this == target_type || (parent && parent->is_castable(target_type));
|
||||
}
|
||||
|
||||
// For use as a key
|
||||
bool operator<(const DiscreteTypeInfo& b) const {
|
||||
return version < b.version || (version == b.version && strcmp(name, b.name) < 0);
|
||||
}
|
||||
bool operator<=(const DiscreteTypeInfo& b) const {
|
||||
return version < b.version || (version == b.version && strcmp(name, b.name) <= 0);
|
||||
}
|
||||
bool operator>(const DiscreteTypeInfo& b) const {
|
||||
return version < b.version || (version == b.version && strcmp(name, b.name) > 0);
|
||||
}
|
||||
bool operator>=(const DiscreteTypeInfo& b) const {
|
||||
return version < b.version || (version == b.version && strcmp(name, b.name) >= 0);
|
||||
}
|
||||
bool operator==(const DiscreteTypeInfo& b) const {
|
||||
return version == b.version && strcmp(name, b.name) == 0;
|
||||
}
|
||||
bool operator!=(const DiscreteTypeInfo& b) const {
|
||||
return version != b.version || strcmp(name, b.name) != 0;
|
||||
}
|
||||
};
|
||||
using ov::DiscreteTypeInfo;
|
||||
|
||||
/// \brief Tests if value is a pointer/shared_ptr that can be statically cast to a
|
||||
/// Type*/shared_ptr<Type>
|
||||
@ -68,7 +16,7 @@ typename std::enable_if<
|
||||
std::is_convertible<decltype(std::declval<Value>()->get_type_info().is_castable(Type::type_info)), bool>::value,
|
||||
bool>::type
|
||||
is_type(Value value) {
|
||||
return value->get_type_info().is_castable(Type::type_info);
|
||||
return ov::is_type<Type>(value);
|
||||
}
|
||||
|
||||
/// Casts a Value* to a Type* if it is of type Type, nullptr otherwise
|
||||
@ -76,7 +24,7 @@ template <typename Type, typename Value>
|
||||
typename std::enable_if<std::is_convertible<decltype(static_cast<Type*>(std::declval<Value>())), Type*>::value,
|
||||
Type*>::type
|
||||
as_type(Value value) {
|
||||
return is_type<Type>(value) ? static_cast<Type*>(value) : nullptr;
|
||||
return ov::as_type<Type>(value);
|
||||
}
|
||||
|
||||
/// Casts a std::shared_ptr<Value> to a std::shared_ptr<Type> if it is of type
|
||||
@ -86,13 +34,6 @@ typename std::enable_if<
|
||||
std::is_convertible<decltype(std::static_pointer_cast<Type>(std::declval<Value>())), std::shared_ptr<Type>>::value,
|
||||
std::shared_ptr<Type>>::type
|
||||
as_type_ptr(Value value) {
|
||||
return is_type<Type>(value) ? std::static_pointer_cast<Type>(value) : std::shared_ptr<Type>();
|
||||
return ov::as_type_ptr<Type>(value);
|
||||
}
|
||||
} // namespace ngraph
|
||||
|
||||
namespace std {
|
||||
template <>
|
||||
struct NGRAPH_API hash<ngraph::DiscreteTypeInfo> {
|
||||
size_t operator()(const ngraph::DiscreteTypeInfo& k) const;
|
||||
};
|
||||
} // namespace std
|
||||
|
98
ngraph/core/include/openvino/core/type.hpp
Normal file
98
ngraph/core/include/openvino/core/type.hpp
Normal file
@ -0,0 +1,98 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
#include <cstring>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "openvino/core/core_visibility.hpp"
|
||||
|
||||
namespace ov {
|
||||
/// Supports three functions, ov::is_type<Type>, ov::as_type<Type>, and ov::as_type_ptr<Type> for type-safe
|
||||
/// dynamic conversions via static_cast/static_ptr_cast without using C++ RTTI.
|
||||
/// Type must have a static type_info member and a virtual get_type_info() member that
|
||||
/// returns a reference to its type_info member.
|
||||
|
||||
/// Type information for a type system without inheritance; instances have exactly one type not
|
||||
/// related to any other type.
|
||||
struct OPENVINO_API DiscreteTypeInfo {
|
||||
const char* name;
|
||||
uint64_t version;
|
||||
// A pointer to a parent type info; used for casting and inheritance traversal, not for
|
||||
// exact type identification
|
||||
const DiscreteTypeInfo* parent;
|
||||
|
||||
DiscreteTypeInfo() = default;
|
||||
|
||||
constexpr DiscreteTypeInfo(const char* _name, uint64_t _version, const DiscreteTypeInfo* _parent = nullptr)
|
||||
: name(_name),
|
||||
version(_version),
|
||||
parent(_parent) {}
|
||||
|
||||
bool is_castable(const DiscreteTypeInfo& target_type) const {
|
||||
return *this == target_type || (parent && parent->is_castable(target_type));
|
||||
}
|
||||
|
||||
// For use as a key
|
||||
bool operator<(const DiscreteTypeInfo& b) const {
|
||||
return version < b.version || (version == b.version && strcmp(name, b.name) < 0);
|
||||
}
|
||||
bool operator<=(const DiscreteTypeInfo& b) const {
|
||||
return version < b.version || (version == b.version && strcmp(name, b.name) <= 0);
|
||||
}
|
||||
bool operator>(const DiscreteTypeInfo& b) const {
|
||||
return version < b.version || (version == b.version && strcmp(name, b.name) > 0);
|
||||
}
|
||||
bool operator>=(const DiscreteTypeInfo& b) const {
|
||||
return version < b.version || (version == b.version && strcmp(name, b.name) >= 0);
|
||||
}
|
||||
bool operator==(const DiscreteTypeInfo& b) const {
|
||||
return version == b.version && strcmp(name, b.name) == 0;
|
||||
}
|
||||
bool operator!=(const DiscreteTypeInfo& b) const {
|
||||
return version != b.version || strcmp(name, b.name) != 0;
|
||||
}
|
||||
};
|
||||
|
||||
/// \brief Tests if value is a pointer/shared_ptr that can be statically cast to a
|
||||
/// Type*/shared_ptr<Type>
|
||||
template <typename Type, typename Value>
|
||||
typename std::enable_if<
|
||||
std::is_convertible<decltype(std::declval<Value>()->get_type_info().is_castable(Type::type_info)), bool>::value,
|
||||
bool>::type
|
||||
is_type(Value value) {
|
||||
return value->get_type_info().is_castable(Type::type_info);
|
||||
}
|
||||
|
||||
/// Casts a Value* to a Type* if it is of type Type, nullptr otherwise
|
||||
template <typename Type, typename Value>
|
||||
typename std::enable_if<std::is_convertible<decltype(static_cast<Type*>(std::declval<Value>())), Type*>::value,
|
||||
Type*>::type
|
||||
as_type(Value value) {
|
||||
return ov::is_type<Type>(value) ? static_cast<Type*>(value) : nullptr;
|
||||
}
|
||||
|
||||
/// Casts a std::shared_ptr<Value> to a std::shared_ptr<Type> if it is of type
|
||||
/// Type, nullptr otherwise
|
||||
template <typename Type, typename Value>
|
||||
typename std::enable_if<
|
||||
std::is_convertible<decltype(std::static_pointer_cast<Type>(std::declval<Value>())), std::shared_ptr<Type>>::value,
|
||||
std::shared_ptr<Type>>::type
|
||||
as_type_ptr(Value value) {
|
||||
return ov::is_type<Type>(value) ? std::static_pointer_cast<Type>(value) : std::shared_ptr<Type>();
|
||||
}
|
||||
} // namespace ov
|
||||
|
||||
namespace std {
|
||||
template <>
|
||||
struct OPENVINO_API hash<ov::DiscreteTypeInfo> {
|
||||
size_t operator()(const ov::DiscreteTypeInfo& k) const;
|
||||
};
|
||||
} // namespace std
|
@ -388,7 +388,7 @@ int64_t Function::get_parameter_index(const std::shared_ptr<op::Parameter>& para
|
||||
|
||||
int64_t Function::get_result_index(const Output<Node>& value) const {
|
||||
int64_t pos = 0;
|
||||
if (is_type<op::Result>(value.get_node_shared_ptr())) {
|
||||
if (ov::is_type<op::Result>(value.get_node_shared_ptr())) {
|
||||
auto result = value.get_node_shared_ptr();
|
||||
for (auto r : get_results()) {
|
||||
if (r == result) {
|
||||
|
@ -366,7 +366,7 @@ std::shared_ptr<ngraph::Function> ngraph::clone_function(const ngraph::Function&
|
||||
// get cloned function results and sinks and parameters
|
||||
ResultVector cloned_results;
|
||||
for (shared_ptr<Node> node : func.get_results()) {
|
||||
auto result = as_type_ptr<op::Result>(node_map.at(node.get()));
|
||||
auto result = ov::as_type_ptr<op::Result>(node_map.at(node.get()));
|
||||
if (!result) {
|
||||
throw ngraph_error("Results should be of type op::Result");
|
||||
}
|
||||
@ -379,7 +379,7 @@ std::shared_ptr<ngraph::Function> ngraph::clone_function(const ngraph::Function&
|
||||
|
||||
std::vector<std::shared_ptr<op::Parameter>> cloned_params;
|
||||
for (const auto& param : func.get_parameters()) {
|
||||
cloned_params.push_back(as_type_ptr<op::Parameter>(node_map.at(param.get())));
|
||||
cloned_params.push_back(ov::as_type_ptr<op::Parameter>(node_map.at(param.get())));
|
||||
}
|
||||
|
||||
// create and return cloned function
|
||||
@ -392,7 +392,7 @@ std::shared_ptr<ngraph::Function> ngraph::clone_function(const ngraph::Function&
|
||||
}
|
||||
|
||||
bool ngraph::is_equal_to_const_value(const std::string& const_value, const Output<Node>& reduce_constant) {
|
||||
if (auto rc = as_type_ptr<ngraph::op::Constant>(reduce_constant.get_node_shared_ptr())) {
|
||||
if (auto rc = ov::as_type_ptr<ngraph::op::Constant>(reduce_constant.get_node_shared_ptr())) {
|
||||
return (rc->get_all_data_elements_bitwise_identical() && rc->convert_value_to_string(0) == const_value);
|
||||
} else {
|
||||
return false;
|
||||
@ -777,17 +777,17 @@ bool ngraph::check_for_cycles(const ngraph::Function* func, ngraph::NodeVector&
|
||||
bool ngraph::replace_output_update_name(Output<Node> output, const Output<Node>& replacement) {
|
||||
bool has_result_output = false;
|
||||
for (auto& target_input : output.get_target_inputs()) {
|
||||
if (is_type<op::Result>(target_input.get_node())) {
|
||||
if (ov::is_type<op::Result>(target_input.get_node())) {
|
||||
// ignore trivial elimination
|
||||
has_result_output = true;
|
||||
if (is_type<ngraph::op::Parameter>(replacement.get_node())) {
|
||||
if (ov::is_type<ngraph::op::Parameter>(replacement.get_node())) {
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!has_result_output || replacement.get_node()->get_users().size() == 1) {
|
||||
if (has_result_output && !is_type<ngraph::op::Parameter>(replacement.get_node())) {
|
||||
if (has_result_output && !ov::is_type<ngraph::op::Parameter>(replacement.get_node())) {
|
||||
replacement.get_node()->set_friendly_name(output.get_node()->get_friendly_name());
|
||||
// Update output tensor name
|
||||
replacement.get_tensor().set_name(output.get_node()->get_friendly_name());
|
||||
@ -810,8 +810,8 @@ bool ngraph::replace_output_update_name(Output<Node> output, const Output<Node>&
|
||||
|
||||
bool ngraph::replace_node_update_name(std::shared_ptr<Node> target, std::shared_ptr<Node> replacement) {
|
||||
for (auto& output : target->output(0).get_target_inputs()) {
|
||||
if (as_type<ngraph::op::Parameter>(replacement->input_value(0).get_node()) &&
|
||||
as_type<op::Result>(output.get_node())) {
|
||||
if (ov::as_type<ngraph::op::Parameter>(replacement->input_value(0).get_node()) &&
|
||||
ov::as_type<op::Result>(output.get_node())) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@ -632,7 +632,8 @@ ResultVector ngraph::as_result_vector(const OutputVector& values) {
|
||||
ResultVector result;
|
||||
for (auto value : values) {
|
||||
shared_ptr<Node> node = value.get_node_shared_ptr();
|
||||
result.push_back(is_type<op::Result>(node) ? as_type_ptr<op::Result>(node) : make_shared<op::Result>(value));
|
||||
result.push_back(ov::is_type<op::Result>(node) ? ov::as_type_ptr<op::Result>(node)
|
||||
: make_shared<op::Result>(value));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
@ -808,14 +809,15 @@ bool Node::constant_fold(OutputVector& output_values, const OutputVector& input_
|
||||
|
||||
// If all the inputs are constants, try to evaluate the outputs
|
||||
bool all_constants = std::all_of(input_values.begin(), input_values.end(), [](const Output<Node>& input) {
|
||||
return as_type_ptr<op::v0::Constant>(input.get_node_shared_ptr());
|
||||
return ov::as_type_ptr<op::v0::Constant>(input.get_node_shared_ptr());
|
||||
});
|
||||
if (!all_constants)
|
||||
return false;
|
||||
|
||||
HostTensorVector input_tensors;
|
||||
for (const auto& input : input_values) {
|
||||
auto host_tensor = make_shared<runtime::HostTensor>(as_type_ptr<op::v0::Constant>(input.get_node_shared_ptr()));
|
||||
auto host_tensor =
|
||||
make_shared<runtime::HostTensor>(ov::as_type_ptr<op::v0::Constant>(input.get_node_shared_ptr()));
|
||||
input_tensors.push_back(host_tensor);
|
||||
}
|
||||
HostTensorVector output_tensors;
|
||||
|
@ -35,7 +35,7 @@ void op::v3::Assign::validate_and_infer_types() {
|
||||
}
|
||||
auto nodes = topological_sort(start_nodes);
|
||||
for (const auto& node : nodes) {
|
||||
if (auto read_value = as_type_ptr<op::v3::ReadValue>(node)) {
|
||||
if (auto read_value = ov::as_type_ptr<op::v3::ReadValue>(node)) {
|
||||
if (read_value->get_variable_id() == m_variable_id)
|
||||
m_variable = read_value->get_variable();
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user