Removed some global variables from ngraph (#8229)
This commit is contained in:
parent
6dd788ed21
commit
e481758f73
@ -28,7 +28,7 @@ bool MKLDNNBroadcastNode::isSupportedOperation(const std::shared_ptr<const ngrap
|
||||
errorMessage = "Only opset1 Broadcast operation is supported";
|
||||
return false;
|
||||
}
|
||||
if (broadcast->get_broadcast_spec() != ngraph::op::AutoBroadcastSpec::NUMPY) {
|
||||
if (broadcast->get_broadcast_spec() != ngraph::op::AutoBroadcastType::NUMPY) {
|
||||
errorMessage = "Only NUMPY broadcast type is supported";
|
||||
return false;
|
||||
}
|
||||
|
@ -1239,7 +1239,8 @@ void MKLDNNEltwiseNode::initSupportedPrimitiveDescriptors() {
|
||||
std::vector<VectorDims> MKLDNNEltwiseNode::shapeInfer() const {
|
||||
ov::PartialShape outShape = getParentEdgesAtPort(0)[0]->getMemory().GetShape().toPartialShape();
|
||||
for (size_t i = 1; i < getParentEdges().size(); i++) {
|
||||
ov::PartialShape::broadcast_merge_into(outShape, getParentEdgesAtPort(i)[0]->getMemory().GetShape().toPartialShape(), ov::op::AutoBroadcastSpec::NUMPY);
|
||||
ov::PartialShape::broadcast_merge_into(outShape, getParentEdgesAtPort(i)[0]->getMemory().GetShape().toPartialShape(),
|
||||
ov::op::AutoBroadcastType::NUMPY);
|
||||
}
|
||||
return {outShape.get_shape()};
|
||||
}
|
||||
|
@ -23,7 +23,7 @@ bool MKLDNNSelectNode::isSupportedOperation(const std::shared_ptr<const ngraph::
|
||||
return false;
|
||||
}
|
||||
const auto broadcast = select->get_auto_broadcast();
|
||||
if (!MKLDNNPlugin::one_of(broadcast, ngraph::op::AutoBroadcastSpec::NONE, ngraph::op::AutoBroadcastSpec::NUMPY)) {
|
||||
if (!MKLDNNPlugin::one_of(broadcast.m_type, ngraph::op::AutoBroadcastType::NONE, ngraph::op::AutoBroadcastType::NUMPY)) {
|
||||
errorMessage = "Does not support broadcast type: " + ngraph::as_string(broadcast.m_type);
|
||||
return false;
|
||||
}
|
||||
@ -47,9 +47,9 @@ MKLDNNSelectNode::MKLDNNSelectNode(const std::shared_ptr<ngraph::Node>& op, cons
|
||||
IE_THROW() << errorPrefix << " has incorrect number of input/output edges!";
|
||||
|
||||
const auto broadcast = select->get_auto_broadcast();
|
||||
if (broadcast == ngraph::op::AutoBroadcastSpec::NONE) {
|
||||
if (broadcast.m_type == ngraph::op::AutoBroadcastType::NONE) {
|
||||
broadcastType = SelectBroadcastType::NONE;
|
||||
} else if (broadcast == ngraph::op::AutoBroadcastSpec::NUMPY) {
|
||||
} else if (broadcast.m_type == ngraph::op::AutoBroadcastType::NUMPY) {
|
||||
broadcastType = SelectBroadcastType::NUMPY;
|
||||
} else {
|
||||
IE_THROW() << errorPrefix << " has unsupported broadcast type: " + ngraph::as_string(broadcast.m_type);
|
||||
|
@ -184,7 +184,7 @@ void snippets::op::Subgraph::canonicalize(const BlockedShapeVector& output_shape
|
||||
for (size_t i = 0; i < m_body->get_results().size(); i++) {
|
||||
auto result = m_body->get_results()[i];
|
||||
PartialShape partial(result->get_shape());
|
||||
bool isCompatible = ngraph::PartialShape::broadcast_merge_into(partial, std::get<0>(output_shapes[i]), ::ngraph::op::AutoBroadcastSpec::NUMPY);
|
||||
bool isCompatible = ngraph::PartialShape::broadcast_merge_into(partial, std::get<0>(output_shapes[i]), ::ngraph::op::AutoBroadcastType::NUMPY);
|
||||
// equality check won't pass since we reshape without changes on external snippet edges
|
||||
NODE_VALIDATION_CHECK(this, isCompatible, "Inferend and passed results shapes are difference for snippet : ",
|
||||
result->get_shape(), " vs ", std::get<0>(output_shapes[i]), ".");
|
||||
|
@ -118,11 +118,11 @@ auto reset_broacast_config(const std::shared_ptr<ngraph::Node>& op) -> void {
|
||||
|
||||
if (!is_scalar) {
|
||||
if (auto binary = std::dynamic_pointer_cast<ngraph::op::util::BinaryElementwiseArithmetic>(op)) {
|
||||
binary->set_autob(ngraph::op::AutoBroadcastSpec::NONE);
|
||||
binary->set_autob(ngraph::op::AutoBroadcastType::NONE);
|
||||
} else if (auto binary = std::dynamic_pointer_cast<ngraph::op::util::BinaryElementwiseComparison>(op)) {
|
||||
binary->set_autob(ngraph::op::AutoBroadcastSpec::NONE);
|
||||
binary->set_autob(ngraph::op::AutoBroadcastType::NONE);
|
||||
} else if (auto binary = std::dynamic_pointer_cast<ngraph::op::util::BinaryElementwiseLogical>(op)) {
|
||||
binary->set_autob(ngraph::op::AutoBroadcastSpec::NONE);
|
||||
binary->set_autob(ngraph::op::AutoBroadcastType::NONE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -14,7 +14,7 @@ bool can_eliminate_broadcast(const ngraph::Output<ngraph::Node>& eltwise,
|
||||
const ngraph::PartialShape & input_shape,
|
||||
const ngraph::PartialShape & broadcast_shape) {
|
||||
auto b = std::dynamic_pointer_cast<ngraph::op::util::BinaryElementwiseArithmetic>(eltwise.get_node_shared_ptr());
|
||||
if (!b || b->get_autob() == ngraph::op::AutoBroadcastSpec::NONE) {
|
||||
if (!b || b->get_autob() == ngraph::op::AutoBroadcastType::NONE) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -500,7 +500,7 @@ void contract_two_inputs(ngraph::pass::EinsumDecomposition* einsum_decompose_ptr
|
||||
auto unsqueeze_output2 = unsqueeze_input(input_node2, unsqueeze_axis2, subgraph_nodes);
|
||||
|
||||
// multiply both operands with broadcasting
|
||||
auto mul = std::make_shared<ngraph::opset7::Multiply>(unsqueeze_output1, unsqueeze_output2, ngraph::op::AutoBroadcastSpec::NUMPY);
|
||||
auto mul = std::make_shared<ngraph::opset7::Multiply>(unsqueeze_output1, unsqueeze_output2, ngraph::op::AutoBroadcastType::NUMPY);
|
||||
|
||||
// update input operand and input subscript for Einsum operation
|
||||
update_operands(input_nodes, input_subscripts, input_ind1, input_ind2, mul->output(0), resultant_subscript);
|
||||
|
@ -22,7 +22,7 @@ SliceConfiguration sliceBinaryEltwise(const ngraph::Node& node) {
|
||||
|
||||
const auto& broadcastSpec = eltwise.get_autob();
|
||||
auto inputPartialShape = lhsPartialShape;
|
||||
if (broadcastSpec == ngraph::op::AutoBroadcastSpec::NONE) {
|
||||
if (broadcastSpec == ngraph::op::AutoBroadcastType::NONE) {
|
||||
ngraph::PartialShape::merge_into(inputPartialShape, rhsPartialShape);
|
||||
} else {
|
||||
ngraph::PartialShape::broadcast_merge_into(inputPartialShape, rhsPartialShape, broadcastSpec);
|
||||
|
@ -17,7 +17,7 @@ const std::vector<std::vector<std::vector<size_t>>> noneShapes = {
|
||||
|
||||
const auto noneCases = ::testing::Combine(
|
||||
::testing::ValuesIn(noneShapes), ::testing::ValuesIn(inputPrecision),
|
||||
::testing::Values(ngraph::op::AutoBroadcastSpec::NONE),
|
||||
::testing::Values(ngraph::op::AutoBroadcastType::NONE),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU));
|
||||
|
||||
const std::vector<std::vector<std::vector<size_t>>> numpyShapes = {
|
||||
@ -25,7 +25,7 @@ const std::vector<std::vector<std::vector<size_t>>> numpyShapes = {
|
||||
|
||||
const auto numpyCases = ::testing::Combine(
|
||||
::testing::ValuesIn(numpyShapes), ::testing::ValuesIn(inputPrecision),
|
||||
::testing::Values(ngraph::op::AutoBroadcastSpec::NUMPY),
|
||||
::testing::Values(ngraph::op::AutoBroadcastType::NUMPY),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU));
|
||||
|
||||
TEST_P(SelectLayerTest, Serialize) {
|
||||
|
@ -30,7 +30,7 @@ const std::vector<std::vector<std::vector<size_t>>> noneShapes = {
|
||||
const auto noneCases = ::testing::Combine(
|
||||
::testing::ValuesIn(noneShapes),
|
||||
::testing::ValuesIn(inputPrecision),
|
||||
::testing::Values(ngraph::op::AutoBroadcastSpec::NONE),
|
||||
::testing::Values(ngraph::op::AutoBroadcastType::NONE),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)
|
||||
);
|
||||
|
||||
@ -77,7 +77,7 @@ const std::vector<std::vector<std::vector<size_t>>> numpyShapes = {
|
||||
const auto numpyCases = ::testing::Combine(
|
||||
::testing::ValuesIn(numpyShapes),
|
||||
::testing::ValuesIn(inputPrecision),
|
||||
::testing::Values(ngraph::op::AutoBroadcastSpec::NUMPY),
|
||||
::testing::Values(ngraph::op::AutoBroadcastType::NUMPY),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)
|
||||
);
|
||||
|
||||
|
@ -141,7 +141,7 @@ std::vector<std::pair<std::vector<ngraph::PartialShape>, std::vector<std::vector
|
||||
|
||||
const auto numpyCases = ::testing::Combine(
|
||||
::testing::ValuesIn(inShapesDynamicNumpy),
|
||||
::testing::Values(ngraph::op::AutoBroadcastSpec::NUMPY)
|
||||
::testing::Values(ngraph::op::AutoBroadcastType::NUMPY)
|
||||
);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefsNumpy_dynamic, SelectLayerCPUTest, numpyCases, SelectLayerCPUTest::getTestCaseName);
|
||||
@ -166,7 +166,7 @@ std::vector<std::pair<std::vector<ngraph::PartialShape>, std::vector<std::vector
|
||||
|
||||
const auto noneCases = ::testing::Combine(
|
||||
::testing::ValuesIn(inShapesDynamicNone),
|
||||
::testing::Values(ngraph::op::AutoBroadcastSpec::NONE)
|
||||
::testing::Values(ngraph::op::AutoBroadcastType::NONE)
|
||||
);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefsNone_dynamic, SelectLayerCPUTest, noneCases, SelectLayerCPUTest::getTestCaseName);
|
||||
|
@ -28,7 +28,7 @@ const std::vector<std::vector<std::vector<size_t>>> noneShapes = {
|
||||
const auto noneCases = ::testing::Combine(
|
||||
::testing::ValuesIn(noneShapes),
|
||||
::testing::ValuesIn(inputPrecision),
|
||||
::testing::Values(ngraph::op::AutoBroadcastSpec::NONE),
|
||||
::testing::Values(ngraph::op::AutoBroadcastType::NONE),
|
||||
::testing::Values(CommonTestUtils::DEVICE_GPU)
|
||||
);
|
||||
|
||||
@ -59,7 +59,7 @@ const std::vector<std::vector<std::vector<size_t>>> numpyShapes = {
|
||||
const auto numpyCases = ::testing::Combine(
|
||||
::testing::ValuesIn(numpyShapes),
|
||||
::testing::ValuesIn(inputPrecision),
|
||||
::testing::Values(ngraph::op::AutoBroadcastSpec::NUMPY),
|
||||
::testing::Values(ngraph::op::AutoBroadcastType::NUMPY),
|
||||
::testing::Values(CommonTestUtils::DEVICE_GPU)
|
||||
);
|
||||
|
||||
|
@ -152,7 +152,8 @@ protected:
|
||||
const auto& rhsShape = inputShapes.rhs.shape;
|
||||
|
||||
auto broadcastedPartialShape = ngraph::PartialShape{lhsShape};
|
||||
ngraph::PartialShape::broadcast_merge_into(broadcastedPartialShape, ngraph::PartialShape{rhsShape}, ngraph::op::AutoBroadcastSpec::NUMPY);
|
||||
ngraph::PartialShape::broadcast_merge_into(broadcastedPartialShape, ngraph::PartialShape{rhsShape},
|
||||
ngraph::op::AutoBroadcastType::NUMPY);
|
||||
const auto& broadcasted = broadcastedPartialShape.to_shape();
|
||||
|
||||
ASSERT_EQ(broadcasted, outputShape);
|
||||
|
@ -64,7 +64,7 @@ void SplitConcatMemory::SetUp() {
|
||||
auto spl = std::make_shared<ngraph::opset5::VariadicSplit>(cnc, axis_c, chunk_c);
|
||||
|
||||
auto one = std::make_shared<ngraph::opset5::Constant>(ngPrc, ngraph::Shape{}, 1);
|
||||
auto plus = std::make_shared<ngraph::opset5::Add>(cnc, one, ngraph::op::AutoBroadcastSpec::NUMPY);
|
||||
auto plus = std::make_shared<ngraph::opset5::Add>(cnc, one, ngraph::op::AutoBroadcastType::NUMPY);
|
||||
plus->set_friendly_name("plus_one");
|
||||
|
||||
auto mem_w = std::make_shared<ngraph::opset5::Assign>(spl->output(1), "id");
|
||||
|
@ -119,7 +119,7 @@ public:
|
||||
/// \brief Finish visiting a nested structure
|
||||
virtual std::string finish_structure();
|
||||
using node_id_t = std::string;
|
||||
static const node_id_t invalid_node_id;
|
||||
static constexpr char invalid_node_id[] = "";
|
||||
/// \brief Associate a node with an id.
|
||||
///
|
||||
/// No node may be used as an attribute unless it has already been registered with an ID.
|
||||
|
@ -19,7 +19,7 @@ public:
|
||||
BWDCMP_RTTI_DECLARATION;
|
||||
|
||||
/// \brief Constructs an uninitialized addition operation
|
||||
Add() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY) {}
|
||||
Add() : util::BinaryElementwiseArithmetic(AutoBroadcastType::NUMPY) {}
|
||||
|
||||
/// \brief Constructs an addition operation.
|
||||
///
|
||||
|
@ -15,7 +15,7 @@ public:
|
||||
OPENVINO_OP("Divide", "opset1", util::BinaryElementwiseArithmetic, 1);
|
||||
BWDCMP_RTTI_DECLARATION;
|
||||
/// \brief Constructs a division operation.
|
||||
Divide() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY) {}
|
||||
Divide() : util::BinaryElementwiseArithmetic(AutoBroadcastType::NUMPY) {}
|
||||
|
||||
/// \brief Constructs a division operation.
|
||||
///
|
||||
|
@ -31,7 +31,7 @@ public:
|
||||
OPENVINO_OP("Equal", "opset1", op::util::BinaryElementwiseComparison, 1);
|
||||
BWDCMP_RTTI_DECLARATION;
|
||||
/// \brief Constructs an equal operation.
|
||||
Equal() : util::BinaryElementwiseComparison(AutoBroadcastSpec::NUMPY) {}
|
||||
Equal() : util::BinaryElementwiseComparison(AutoBroadcastType::NUMPY) {}
|
||||
/// \brief Constructs an equal operation.
|
||||
///
|
||||
/// \param arg0 Node that produces the first input tensor.
|
||||
|
@ -17,7 +17,7 @@ public:
|
||||
BWDCMP_RTTI_DECLARATION;
|
||||
|
||||
/// \brief Constructs an uninitialized addition operation
|
||||
FloorMod() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY){};
|
||||
FloorMod() : util::BinaryElementwiseArithmetic(AutoBroadcastType::NUMPY) {}
|
||||
|
||||
/// \brief Constructs an Floor Mod operation.
|
||||
///
|
||||
|
@ -14,7 +14,7 @@ class OPENVINO_API Gather : public op::util::GatherBase {
|
||||
public:
|
||||
OPENVINO_OP("Gather", "opset1", op::util::GatherBase, 1);
|
||||
BWDCMP_RTTI_DECLARATION;
|
||||
static const int64_t AXIS_NOT_SET_VALUE = std::numeric_limits<int64_t>::max();
|
||||
static constexpr int64_t AXIS_NOT_SET_VALUE = std::numeric_limits<int64_t>::max();
|
||||
Gather() = default;
|
||||
/// \param data The tensor from which slices are gathered
|
||||
/// \param indices Tensor with indexes to gather
|
||||
|
@ -15,7 +15,7 @@ public:
|
||||
OPENVINO_OP("Greater", "opset1", op::util::BinaryElementwiseComparison, 1);
|
||||
BWDCMP_RTTI_DECLARATION;
|
||||
/// \brief Constructs a greater-than operation.
|
||||
Greater() : util::BinaryElementwiseComparison(AutoBroadcastSpec::NUMPY) {}
|
||||
Greater() : util::BinaryElementwiseComparison(AutoBroadcastType::NUMPY) {}
|
||||
/// \brief Constructs a greater-than operation.
|
||||
///
|
||||
/// \param arg0 Node that produces the first input tensor.
|
||||
|
@ -15,7 +15,7 @@ public:
|
||||
OPENVINO_OP("GreaterEqual", "opset1", op::util::BinaryElementwiseComparison, 1);
|
||||
BWDCMP_RTTI_DECLARATION;
|
||||
/// \brief Constructs a greater-than-or-equal operation.
|
||||
GreaterEqual() : util::BinaryElementwiseComparison(AutoBroadcastSpec::NUMPY) {}
|
||||
GreaterEqual() : util::BinaryElementwiseComparison(AutoBroadcastType::NUMPY) {}
|
||||
/// \brief Constructs a greater-than-or-equal operation.
|
||||
///
|
||||
/// \param arg0 Node that produces the first input tensor.
|
||||
|
@ -15,7 +15,7 @@ public:
|
||||
OPENVINO_OP("Less", "opset1", op::util::BinaryElementwiseComparison, 1);
|
||||
BWDCMP_RTTI_DECLARATION;
|
||||
/// \brief Constructs a less-than operation.
|
||||
Less() : util::BinaryElementwiseComparison(AutoBroadcastSpec::NUMPY) {}
|
||||
Less() : util::BinaryElementwiseComparison(AutoBroadcastType::NUMPY) {}
|
||||
/// \brief Constructs a less-than operation.
|
||||
///
|
||||
/// \param arg0 Node that produces the first input tensor.
|
||||
|
@ -15,7 +15,7 @@ public:
|
||||
OPENVINO_OP("LessEqual", "opset1", op::util::BinaryElementwiseComparison, 1);
|
||||
BWDCMP_RTTI_DECLARATION;
|
||||
/// \brief Constructs a less-than-or-equal operation.
|
||||
LessEqual() : util::BinaryElementwiseComparison(AutoBroadcastSpec::NUMPY) {}
|
||||
LessEqual() : util::BinaryElementwiseComparison(AutoBroadcastType::NUMPY) {}
|
||||
|
||||
/// \brief Constructs a less-than-or-equal operation.
|
||||
///
|
||||
|
@ -16,7 +16,7 @@ public:
|
||||
BWDCMP_RTTI_DECLARATION;
|
||||
|
||||
/// \brief Constructs a maximum operation.
|
||||
Maximum() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY) {}
|
||||
Maximum() : util::BinaryElementwiseArithmetic(AutoBroadcastType::NUMPY) {}
|
||||
|
||||
/// \brief Constructs a maximum operation.
|
||||
///
|
||||
|
@ -16,7 +16,7 @@ public:
|
||||
BWDCMP_RTTI_DECLARATION;
|
||||
|
||||
/// \brief Constructs a minimum operation.
|
||||
Minimum() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY) {}
|
||||
Minimum() : util::BinaryElementwiseArithmetic(AutoBroadcastType::NUMPY) {}
|
||||
|
||||
/// \brief Constructs a minimum operation.
|
||||
///
|
||||
|
@ -17,7 +17,7 @@ public:
|
||||
BWDCMP_RTTI_DECLARATION;
|
||||
|
||||
/// \brief Constructs a Mod node.
|
||||
Mod() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY) {}
|
||||
Mod() : util::BinaryElementwiseArithmetic(AutoBroadcastType::NUMPY) {}
|
||||
///
|
||||
/// \param A - Dividend tensor
|
||||
/// \param B - Divisor tensor
|
||||
|
@ -16,7 +16,7 @@ public:
|
||||
BWDCMP_RTTI_DECLARATION;
|
||||
|
||||
/// \brief Constructs a multiplication operation.
|
||||
Multiply() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY) {}
|
||||
Multiply() : util::BinaryElementwiseArithmetic(AutoBroadcastType::NUMPY) {}
|
||||
|
||||
/// \brief Constructs a multiplication operation.
|
||||
///
|
||||
|
@ -15,7 +15,7 @@ public:
|
||||
OPENVINO_OP("NotEqual", "opset1", op::util::BinaryElementwiseComparison, 1);
|
||||
BWDCMP_RTTI_DECLARATION;
|
||||
/// \brief Constructs a not-equal operation.
|
||||
NotEqual() : util::BinaryElementwiseComparison(AutoBroadcastSpec::NUMPY) {}
|
||||
NotEqual() : util::BinaryElementwiseComparison(AutoBroadcastType::NUMPY) {}
|
||||
/// \brief Constructs a not-equal operation.
|
||||
///
|
||||
/// \param arg0 Node that produces the first input tensor.
|
||||
|
@ -30,7 +30,7 @@ public:
|
||||
OPENVINO_OP("Power", "opset1", op::util::BinaryElementwiseArithmetic, 1);
|
||||
BWDCMP_RTTI_DECLARATION;
|
||||
|
||||
Power() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY) {}
|
||||
Power() : util::BinaryElementwiseArithmetic(AutoBroadcastType::NUMPY) {}
|
||||
|
||||
/// \brief Constructs an exponentiation operation.
|
||||
///
|
||||
|
@ -18,7 +18,7 @@ public:
|
||||
BWDCMP_RTTI_DECLARATION;
|
||||
|
||||
/// \brief Constrcuts an uninitialized squared difference operation
|
||||
SquaredDifference() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY) {}
|
||||
SquaredDifference() : util::BinaryElementwiseArithmetic(AutoBroadcastType::NUMPY) {}
|
||||
/// \brief Constructs the squared difference operation.
|
||||
///
|
||||
/// \param x1 First input tensor
|
||||
|
@ -15,7 +15,7 @@ public:
|
||||
OPENVINO_OP("Subtract", "opset1", util::BinaryElementwiseArithmetic, 1);
|
||||
BWDCMP_RTTI_DECLARATION;
|
||||
|
||||
Subtract() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY) {}
|
||||
Subtract() : util::BinaryElementwiseArithmetic(AutoBroadcastType::NUMPY) {}
|
||||
|
||||
/// \brief Constructs a subtraction operation.
|
||||
///
|
||||
|
@ -170,7 +170,9 @@ struct OPENVINO_API AutoBroadcastSpec {
|
||||
bool operator!=(const AutoBroadcastSpec& a) const {
|
||||
return !(*this == a);
|
||||
}
|
||||
OPENVINO_DEPRECATED("Use ov::op::AutoBroadcastType::NUMPY")
|
||||
static const AutoBroadcastSpec NUMPY;
|
||||
OPENVINO_DEPRECATED("Use ov::op::AutoBroadcastType::NONE")
|
||||
static const AutoBroadcastSpec NONE;
|
||||
|
||||
private:
|
||||
|
@ -62,7 +62,7 @@ public:
|
||||
bool visit_attributes(AttributeVisitor& visitor) override;
|
||||
|
||||
private:
|
||||
AutoBroadcastSpec m_autob = AutoBroadcastSpec::NUMPY;
|
||||
AutoBroadcastSpec m_autob = AutoBroadcastType::NUMPY;
|
||||
};
|
||||
} // namespace util
|
||||
} // namespace op
|
||||
|
@ -51,7 +51,7 @@ protected:
|
||||
visualize_tree_ops_map_t m_ops_to_details;
|
||||
node_modifiers_t m_node_modifiers = nullptr;
|
||||
bool m_dot_only;
|
||||
static const int max_jump_distance;
|
||||
static constexpr int max_jump_distance = 20;
|
||||
};
|
||||
} // namespace pass
|
||||
} // namespace ov
|
||||
|
@ -144,9 +144,9 @@ void gru_cell(const T* X,
|
||||
z_t.data(),
|
||||
gate_shape,
|
||||
{B_shape[0] / num_b_splits},
|
||||
op::AutoBroadcastSpec::NUMPY); //
|
||||
op::AutoBroadcastType::NUMPY); //
|
||||
reference::add(X_W_zrh[0].data(), z_t.data(), z_t.data(), gate_shape, gate_shape,
|
||||
op::AutoBroadcastSpec::NUMPY); //
|
||||
op::AutoBroadcastType::NUMPY); //
|
||||
clip_activation(z_t, activation_f);
|
||||
|
||||
// calculate r_t
|
||||
@ -160,8 +160,8 @@ void gru_cell(const T* X,
|
||||
r_t.data(),
|
||||
gate_shape,
|
||||
{B_shape[0] / num_b_splits},
|
||||
op::AutoBroadcastSpec::NUMPY);
|
||||
reference::add(X_W_zrh[1].data(), r_t.data(), r_t.data(), gate_shape, gate_shape, op::AutoBroadcastSpec::NUMPY);
|
||||
op::AutoBroadcastType::NUMPY);
|
||||
reference::add(X_W_zrh[1].data(), r_t.data(), r_t.data(), gate_shape, gate_shape, op::AutoBroadcastType::NUMPY);
|
||||
clip_activation(r_t, activation_f);
|
||||
|
||||
// calculate h_t
|
||||
@ -173,18 +173,18 @@ void gru_cell(const T* X,
|
||||
h_t.data(),
|
||||
gate_shape,
|
||||
{B_shape[0] / num_b_splits},
|
||||
op::AutoBroadcastSpec::NUMPY);
|
||||
reference::multiply(r_t.data(), h_t.data(), h_t.data(), gate_shape, gate_shape, op::AutoBroadcastSpec::NUMPY);
|
||||
op::AutoBroadcastType::NUMPY);
|
||||
reference::multiply(r_t.data(), h_t.data(), h_t.data(), gate_shape, gate_shape, op::AutoBroadcastType::NUMPY);
|
||||
reference::add(h_t.data(),
|
||||
biases_zrh[2].data(),
|
||||
h_t.data(),
|
||||
gate_shape,
|
||||
{B_shape[0] / num_b_splits},
|
||||
op::AutoBroadcastSpec::NUMPY);
|
||||
reference::add(X_W_zrh[2].data(), h_t.data(), h_t.data(), gate_shape, gate_shape, op::AutoBroadcastSpec::NUMPY);
|
||||
op::AutoBroadcastType::NUMPY);
|
||||
reference::add(X_W_zrh[2].data(), h_t.data(), h_t.data(), gate_shape, gate_shape, op::AutoBroadcastType::NUMPY);
|
||||
} else {
|
||||
// ht = g(Xt*(Wh^T) + (rt (.) Ht-1)*(Rh^T) + Rbh + Wbh)
|
||||
reference::multiply(r_t.data(), H, h_t.data(), gate_shape, H_shape, op::AutoBroadcastSpec::NUMPY);
|
||||
reference::multiply(r_t.data(), H, h_t.data(), gate_shape, H_shape, op::AutoBroadcastType::NUMPY);
|
||||
std::vector<T> matmul(gate_shape_size);
|
||||
reference::matmul(h_t.data(), R_zrh[2].data(), matmul.data(), gate_shape, bias_shape, gate_shape, false, true);
|
||||
reference::add(matmul.data(),
|
||||
@ -192,18 +192,18 @@ void gru_cell(const T* X,
|
||||
h_t.data(),
|
||||
gate_shape,
|
||||
{B_shape[0] / num_b_splits},
|
||||
op::AutoBroadcastSpec::NUMPY);
|
||||
reference::add(X_W_zrh[2].data(), h_t.data(), h_t.data(), gate_shape, gate_shape, op::AutoBroadcastSpec::NUMPY);
|
||||
op::AutoBroadcastType::NUMPY);
|
||||
reference::add(X_W_zrh[2].data(), h_t.data(), h_t.data(), gate_shape, gate_shape, op::AutoBroadcastType::NUMPY);
|
||||
}
|
||||
clip_activation(h_t, activation_g);
|
||||
// Ht = (1 - zt) (.) ht + zt (.) Ht-1
|
||||
std::vector<T> mul1(gate_shape_size);
|
||||
std::vector<T> mul2(gate_shape_size);
|
||||
T one[] = {1};
|
||||
reference::subtract(one, z_t.data(), mul1.data(), {1}, gate_shape, op::AutoBroadcastSpec::NUMPY);
|
||||
reference::multiply(mul1.data(), h_t.data(), mul1.data(), gate_shape, gate_shape, op::AutoBroadcastSpec::NUMPY);
|
||||
reference::multiply(z_t.data(), H, mul2.data(), gate_shape, gate_shape, op::AutoBroadcastSpec::NUMPY);
|
||||
reference::add(mul1.data(), mul2.data(), dst_data, gate_shape, gate_shape, op::AutoBroadcastSpec::NUMPY);
|
||||
reference::subtract(one, z_t.data(), mul1.data(), {1}, gate_shape, op::AutoBroadcastType::NUMPY);
|
||||
reference::multiply(mul1.data(), h_t.data(), mul1.data(), gate_shape, gate_shape, op::AutoBroadcastType::NUMPY);
|
||||
reference::multiply(z_t.data(), H, mul2.data(), gate_shape, gate_shape, op::AutoBroadcastType::NUMPY);
|
||||
reference::add(mul1.data(), mul2.data(), dst_data, gate_shape, gate_shape, op::AutoBroadcastType::NUMPY);
|
||||
}
|
||||
} // namespace reference
|
||||
} // namespace runtime
|
||||
|
@ -92,7 +92,7 @@ void lstm_cell(const T* X,
|
||||
|
||||
// Ht-1*(R^T) + Wb + Rb
|
||||
std::vector<T> Ht_R_B(all_gates_shape_size);
|
||||
reference::add(Ht_R.data(), B, Ht_R_B.data(), all_gates_shape, B_shape, op::AutoBroadcastSpec::NUMPY);
|
||||
reference::add(Ht_R.data(), B, Ht_R_B.data(), all_gates_shape, B_shape, op::AutoBroadcastType::NUMPY);
|
||||
|
||||
// Xt*(W^T) + Ht-1*(R^T) + Wb + Rb
|
||||
std::vector<T> XHB(all_gates_shape_size);
|
||||
@ -101,7 +101,7 @@ void lstm_cell(const T* X,
|
||||
XHB.data(),
|
||||
all_gates_shape,
|
||||
all_gates_shape,
|
||||
op::AutoBroadcastSpec::NUMPY);
|
||||
op::AutoBroadcastType::NUMPY);
|
||||
|
||||
std::vector<std::vector<T>> X_W_fico(4, std::vector<T>(all_gates_shape_size / 4));
|
||||
std::vector<char*> pointers = {reinterpret_cast<char*>(X_W_fico[0].data()),
|
||||
@ -139,21 +139,21 @@ void lstm_cell(const T* X,
|
||||
std::vector<T> mul2(gate_shape_size);
|
||||
std::vector<T> Ct(gate_shape_size);
|
||||
// ft (.) Ct-1
|
||||
reference::multiply(X_W_fico[0].data(), C, mul1.data(), gate_shape, C_shape, op::AutoBroadcastSpec::NUMPY);
|
||||
reference::multiply(X_W_fico[0].data(), C, mul1.data(), gate_shape, C_shape, op::AutoBroadcastType::NUMPY);
|
||||
// it (.) ct
|
||||
reference::multiply(X_W_fico[1].data(),
|
||||
X_W_fico[2].data(),
|
||||
mul2.data(),
|
||||
gate_shape,
|
||||
gate_shape,
|
||||
op::AutoBroadcastSpec::NUMPY);
|
||||
op::AutoBroadcastType::NUMPY);
|
||||
// Ct = ft (.) Ct-1 + it (.) ct
|
||||
reference::add(mul1.data(), mul2.data(), Ct.data(), gate_shape, gate_shape, op::AutoBroadcastSpec::NUMPY);
|
||||
reference::add(mul1.data(), mul2.data(), Ct.data(), gate_shape, gate_shape, op::AutoBroadcastType::NUMPY);
|
||||
std::memcpy(out_Ct, Ct.data(), Ct.size() * sizeof(T));
|
||||
clip_activation(Ct, activation_h, false);
|
||||
|
||||
// Ht = ot (.) h(Ct)
|
||||
reference::multiply(X_W_fico[3].data(), Ct.data(), out_Ht, gate_shape, gate_shape, op::AutoBroadcastSpec::NUMPY);
|
||||
reference::multiply(X_W_fico[3].data(), Ct.data(), out_Ht, gate_shape, gate_shape, op::AutoBroadcastType::NUMPY);
|
||||
}
|
||||
} // namespace reference
|
||||
} // namespace runtime
|
||||
|
@ -28,7 +28,7 @@ void mvn(const T* arg,
|
||||
auto reduced_shape = reduce(in_shape, reduction_axes, true);
|
||||
std::vector<T> tmp_buffer(shape_size(in_shape));
|
||||
mean(arg, tmp_buffer.data(), in_shape, reduction_axes);
|
||||
subtract(arg, tmp_buffer.data(), out, in_shape, reduced_shape, op::AutoBroadcastSpec::NUMPY);
|
||||
subtract(arg, tmp_buffer.data(), out, in_shape, reduced_shape, op::AutoBroadcastType::NUMPY);
|
||||
|
||||
if (normalize_variance) {
|
||||
multiply(out, out, tmp_buffer.data(), shape_size(in_shape));
|
||||
@ -40,10 +40,10 @@ void mvn(const T* arg,
|
||||
tmp_buffer.data(),
|
||||
reduced_shape,
|
||||
reduced_shape,
|
||||
op::AutoBroadcastSpec::NUMPY);
|
||||
op::AutoBroadcastType::NUMPY);
|
||||
sqrt(tmp_buffer.data(), tmp_buffer.data(), shape_size(reduced_shape));
|
||||
|
||||
divide(out, tmp_buffer.data(), out, in_shape, reduced_shape, op::AutoBroadcastSpec::NUMPY, true);
|
||||
divide(out, tmp_buffer.data(), out, in_shape, reduced_shape, op::AutoBroadcastType::NUMPY, true);
|
||||
}
|
||||
}
|
||||
|
||||
@ -58,7 +58,7 @@ void mvn_6(const T* arg,
|
||||
auto reduced_shape = reduce(in_shape, reduction_axes, true);
|
||||
std::vector<T> tmp_buffer(shape_size(in_shape));
|
||||
mean(arg, tmp_buffer.data(), in_shape, reduction_axes);
|
||||
subtract(arg, tmp_buffer.data(), out, in_shape, reduced_shape, op::AutoBroadcastSpec::NUMPY);
|
||||
subtract(arg, tmp_buffer.data(), out, in_shape, reduced_shape, op::AutoBroadcastType::NUMPY);
|
||||
|
||||
if (normalize_variance) {
|
||||
multiply(out, out, tmp_buffer.data(), shape_size(in_shape));
|
||||
@ -71,7 +71,7 @@ void mvn_6(const T* arg,
|
||||
tmp_buffer.data(),
|
||||
reduced_shape,
|
||||
reduced_shape,
|
||||
op::AutoBroadcastSpec::NUMPY);
|
||||
op::AutoBroadcastType::NUMPY);
|
||||
sqrt(tmp_buffer.data(), tmp_buffer.data(), shape_size(reduced_shape));
|
||||
} else {
|
||||
sqrt(mean_value.data(), tmp_buffer.data(), shape_size(reduced_shape));
|
||||
@ -80,10 +80,10 @@ void mvn_6(const T* arg,
|
||||
tmp_buffer.data(),
|
||||
reduced_shape,
|
||||
reduced_shape,
|
||||
op::AutoBroadcastSpec::NUMPY);
|
||||
op::AutoBroadcastType::NUMPY);
|
||||
}
|
||||
|
||||
divide(out, tmp_buffer.data(), out, in_shape, reduced_shape, op::AutoBroadcastSpec::NUMPY, true);
|
||||
divide(out, tmp_buffer.data(), out, in_shape, reduced_shape, op::AutoBroadcastType::NUMPY, true);
|
||||
}
|
||||
}
|
||||
} // namespace reference
|
||||
|
@ -66,7 +66,7 @@ void rnn_cell(const T* X,
|
||||
|
||||
// Ht-1*(R^T) + Wb + Rb
|
||||
std::vector<T> Ht_R_B(H_shape[0] * R_shape[0]);
|
||||
reference::add(Ht_R.data(), B, Ht_R_B.data(), {H_shape[0], R_shape[0]}, B_shape, op::AutoBroadcastSpec::NUMPY);
|
||||
reference::add(Ht_R.data(), B, Ht_R_B.data(), {H_shape[0], R_shape[0]}, B_shape, op::AutoBroadcastType::NUMPY);
|
||||
|
||||
// Xt*(W^T) + Ht-1*(R^T) + Wb + Rb
|
||||
std::vector<T> i_t(H_shape[0] * R_shape[0]);
|
||||
@ -75,7 +75,7 @@ void rnn_cell(const T* X,
|
||||
i_t.data(),
|
||||
{X_shape[0], W_shape[0]},
|
||||
{H_shape[0], R_shape[0]},
|
||||
op::AutoBroadcastSpec::NUMPY);
|
||||
op::AutoBroadcastType::NUMPY);
|
||||
|
||||
// f(Xt*(Wi^T) + Ht-1*(Ri^T) + Wbi + Rbi)
|
||||
if (clip != 0.f) {
|
||||
|
@ -531,7 +531,7 @@ HostTensorPtr build_multi_identity(const HostTensorPtr& input_ptr,
|
||||
PartialShape output_shape = multi_identity->get_partial_shape();
|
||||
PartialShape::broadcast_merge_into(output_shape,
|
||||
identity->get_partial_shape(),
|
||||
ngraph::op::AutoBroadcastSpec::NUMPY);
|
||||
ngraph::op::AutoBroadcastType::NUMPY);
|
||||
HostTensorPtr mul_output =
|
||||
std::shared_ptr<HostTensor>(new HostTensor(identity->get_element_type(), output_shape.get_shape()));
|
||||
ngraph::runtime::reference::multiply<T>(multi_identity->get_data_ptr<T>(),
|
||||
@ -539,7 +539,7 @@ HostTensorPtr build_multi_identity(const HostTensorPtr& input_ptr,
|
||||
mul_output->get_data_ptr<T>(),
|
||||
multi_identity->get_shape(),
|
||||
identity->get_shape(),
|
||||
ngraph::op::AutoBroadcastSpec::NUMPY);
|
||||
ngraph::op::AutoBroadcastType::NUMPY);
|
||||
multi_identity = mul_output;
|
||||
}
|
||||
return multi_identity;
|
||||
@ -599,7 +599,7 @@ void extract_diagonal(HostTensorVector& inputs, std::vector<std::string>& input_
|
||||
mul_output->get_data_ptr<T>(),
|
||||
input_ptr->get_shape(),
|
||||
multi_identity->get_shape(),
|
||||
ngraph::op::AutoBroadcastSpec::NUMPY);
|
||||
ngraph::op::AutoBroadcastType::NUMPY);
|
||||
|
||||
HostTensorPtr result = std::shared_ptr<HostTensor>(new HostTensor(input_ptr->get_element_type(), result_shape));
|
||||
ngraph::runtime::reference::sum<T>(mul_output->get_data_ptr<T>(),
|
||||
@ -798,7 +798,7 @@ void contract_two_inputs(HostTensorVector& inputs,
|
||||
PartialShape output_shape = unsqueeze_output1->get_partial_shape();
|
||||
PartialShape::broadcast_merge_into(output_shape,
|
||||
unsqueeze_output2->get_partial_shape(),
|
||||
ngraph::op::AutoBroadcastSpec::NUMPY);
|
||||
ngraph::op::AutoBroadcastType::NUMPY);
|
||||
HostTensorPtr mul_output = std::shared_ptr<HostTensor>(
|
||||
new HostTensor(unsqueeze_output1->get_element_type(), output_shape.get_shape()));
|
||||
ngraph::runtime::reference::multiply<T>(unsqueeze_output1->get_data_ptr<T>(),
|
||||
@ -806,7 +806,7 @@ void contract_two_inputs(HostTensorVector& inputs,
|
||||
mul_output->get_data_ptr<T>(),
|
||||
unsqueeze_output1->get_shape(),
|
||||
unsqueeze_output2->get_shape(),
|
||||
ngraph::op::AutoBroadcastSpec::NUMPY);
|
||||
ngraph::op::AutoBroadcastType::NUMPY);
|
||||
|
||||
// update input operand and input subscript for Einsum operation
|
||||
update_operands(inputs, input_subscripts, input_ind1, input_ind2, mul_output, resultant_subscript);
|
||||
@ -880,7 +880,7 @@ void contract_two_inputs(HostTensorVector& inputs,
|
||||
// broadcast both inputs to have common sub-shape broadcasted that is needed
|
||||
// in case of ellipsis among the common labels
|
||||
// ngraph::runtime::reference::broadcast()
|
||||
PartialShape::broadcast_merge_into(common_sub_shape1, common_sub_shape2, ngraph::op::AutoBroadcastSpec::NUMPY);
|
||||
PartialShape::broadcast_merge_into(common_sub_shape1, common_sub_shape2, ngraph::op::AutoBroadcastType::NUMPY);
|
||||
Shape common_sub_shape = common_sub_shape1.get_shape();
|
||||
broadcast_input<T>(inputs,
|
||||
input_ind1,
|
||||
|
@ -134,7 +134,7 @@ void ov::AttributeVisitor::on_adapter(const string& name, ValueAccessor<std::sha
|
||||
on_adapter(name, static_cast<ValueAccessor<void>&>(adapter));
|
||||
}
|
||||
|
||||
const ov::AttributeVisitor::node_id_t ov::AttributeVisitor::invalid_node_id = "";
|
||||
constexpr char ov::AttributeVisitor::invalid_node_id[];
|
||||
|
||||
void ov::AttributeVisitor::register_node(const std::shared_ptr<ngraph::Node>& node, node_id_t id) {
|
||||
if (id == invalid_node_id) {
|
||||
|
@ -102,8 +102,6 @@ using namespace std;
|
||||
* dealt with, but have not had time to implement them yet. --amprocte
|
||||
*/
|
||||
|
||||
const int ngraph::pass::VisualizeTree::max_jump_distance = 20;
|
||||
|
||||
class HeightMap {
|
||||
public:
|
||||
HeightMap() {}
|
||||
|
@ -1353,7 +1353,7 @@ shared_ptr<op::Constant> ngraph::get_constant_min_of_type(element::Type_t t) {
|
||||
HostTensorPtr equality_mask(const HostTensorPtr& tensor, const shared_ptr<op::Constant>& constant) {
|
||||
auto mask = std::make_shared<HostTensor>(element::boolean, tensor->get_shape());
|
||||
const auto& param = std::make_shared<op::Parameter>(tensor->get_element_type(), tensor->get_shape());
|
||||
op::v1::Equal(param, constant, ngraph::op::AutoBroadcastSpec::NUMPY)
|
||||
op::v1::Equal(param, constant, ngraph::op::AutoBroadcastType::NUMPY)
|
||||
.evaluate({mask}, {tensor, std::make_shared<HostTensor>(constant)});
|
||||
return mask;
|
||||
}
|
||||
@ -1362,7 +1362,7 @@ HostTensorPtr or_tensor(const HostTensorPtr& lhs, const HostTensorPtr& rhs) {
|
||||
auto result = std::make_shared<HostTensor>();
|
||||
op::v1::LogicalOr(std::make_shared<op::Parameter>(lhs->get_element_type(), lhs->get_shape()),
|
||||
std::make_shared<op::Parameter>(rhs->get_element_type(), rhs->get_shape()),
|
||||
ngraph::op::AutoBroadcastSpec::NUMPY)
|
||||
ngraph::op::AutoBroadcastType::NUMPY)
|
||||
.evaluate({result}, {lhs, rhs});
|
||||
return result;
|
||||
}
|
||||
|
@ -14,7 +14,7 @@ namespace onnx_import {
|
||||
namespace op {
|
||||
namespace set_1 {
|
||||
inline OutputVector max(const Node& node) {
|
||||
return variadic::make_ng_variadic_op<default_opset::Maximum>(node, ngraph::op::AutoBroadcastSpec::NONE);
|
||||
return variadic::make_ng_variadic_op<default_opset::Maximum>(node, ngraph::op::AutoBroadcastType::NONE);
|
||||
}
|
||||
|
||||
} // namespace set_1
|
||||
|
@ -14,7 +14,7 @@ namespace onnx_import {
|
||||
namespace op {
|
||||
namespace set_1 {
|
||||
inline OutputVector min(const Node& node) {
|
||||
return variadic::make_ng_variadic_op<default_opset::Minimum>(node, ngraph::op::AutoBroadcastSpec::NONE);
|
||||
return variadic::make_ng_variadic_op<default_opset::Minimum>(node, ngraph::op::AutoBroadcastType::NONE);
|
||||
}
|
||||
|
||||
} // namespace set_1
|
||||
|
@ -14,7 +14,7 @@ namespace onnx_import {
|
||||
namespace op {
|
||||
namespace set_1 {
|
||||
inline OutputVector sum(const Node& node) {
|
||||
return variadic::make_ng_variadic_op<default_opset::Add>(node, ngraph::op::AutoBroadcastSpec::NONE);
|
||||
return variadic::make_ng_variadic_op<default_opset::Add>(node, ngraph::op::AutoBroadcastType::NONE);
|
||||
}
|
||||
|
||||
} // namespace set_1
|
||||
|
@ -26,7 +26,7 @@ namespace variadic {
|
||||
template <class T>
|
||||
inline OutputVector make_ng_variadic_op(
|
||||
const Node& node,
|
||||
const ngraph::op::AutoBroadcastSpec& auto_broadcast = ngraph::op::AutoBroadcastSpec::NUMPY) {
|
||||
const ngraph::op::AutoBroadcastSpec& auto_broadcast = ngraph::op::AutoBroadcastType::NUMPY) {
|
||||
const OutputVector ng_inputs{node.get_ng_inputs()};
|
||||
|
||||
// Templated binary operation - Creates Add, Minimum, Maximum, etc.
|
||||
|
@ -65,7 +65,7 @@ TYPED_TEST_P(ArithmeticOperator, no_autobroadcast)
|
||||
auto A = std::make_shared<op::Parameter>(element::f32, Shape{2, 2});
|
||||
auto B = std::make_shared<op::Parameter>(element::f32, Shape{2, 2});
|
||||
|
||||
const auto op = std::make_shared<TypeParam>(A, B, op::AutoBroadcastSpec::NONE);
|
||||
const auto op = std::make_shared<TypeParam>(A, B, op::AutoBroadcastType::NONE);
|
||||
|
||||
ASSERT_EQ(op->get_element_type(), element::f32);
|
||||
ASSERT_EQ(op->get_shape(), (Shape{2, 2}));
|
||||
|
Loading…
Reference in New Issue
Block a user