Remove obsoleted v0::Product op (#2860)
This commit is contained in:
parent
2e7a17b367
commit
186e00fa2a
@ -82,7 +82,8 @@ TEST(algebraic_simplification, multiply_negative_tests) {
|
||||
TEST(algebraic_simplification, multiply_prod_negative) {
|
||||
auto fconst1 = ngraph::op::Constant::create(element::f64, Shape{2}, {1.0, 1.0});
|
||||
auto broadcast = builder::opset1::make_broadcast(fconst1, Shape{2, 5}, AxisSet{1});
|
||||
auto prod_fconst1 = std::make_shared<op::Product>(broadcast, AxisSet{0, 1});
|
||||
auto axes = op::Constant::create(element::i64, {2}, {0, 1});
|
||||
auto prod_fconst1 = std::make_shared<op::v1::ReduceProd>(broadcast, axes);
|
||||
|
||||
pass::Manager pass_manager;
|
||||
pass_manager.register_pass<pass::AlgebraicSimplification>();
|
||||
|
@ -23,7 +23,6 @@
|
||||
#include "ngraph/axis_vector.hpp"
|
||||
#include "ngraph/op/concat.hpp"
|
||||
#include "ngraph/op/constant.hpp"
|
||||
#include "ngraph/op/product.hpp"
|
||||
#include "ngraph/op/reduce_prod.hpp"
|
||||
#include "ngraph/op/reshape.hpp"
|
||||
#include "ngraph/op/shape_of.hpp"
|
||||
|
@ -132,7 +132,6 @@ NGRAPH_OP(Power, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(Power, ngraph::op::v1, 1)
|
||||
NGRAPH_OP(PriorBox, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(PriorBoxClustered, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(Product, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(Proposal, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(Quantize, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(QuantizedConvolution, ngraph::op::v0, 0)
|
||||
|
@ -1,68 +0,0 @@
|
||||
//*****************************************************************************
|
||||
// Copyright 2017-2020 Intel Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//*****************************************************************************
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "ngraph/op/util/arithmetic_reduction.hpp"
|
||||
|
||||
namespace ngraph
|
||||
{
|
||||
namespace op
|
||||
{
|
||||
namespace v0
|
||||
{
|
||||
/// \brief Product reduction operation.
|
||||
///
|
||||
/// Reduces the tensor, eliminating the specified reduction axes by taking the product.
|
||||
class NGRAPH_DEPRECATED(
|
||||
"This operation is deprecated and will be removed soon. "
|
||||
"Use v1::ReduceProd instead of it.") NGRAPH_API Product
|
||||
: public util::ArithmeticReduction
|
||||
{
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
public:
|
||||
static constexpr NodeTypeInfo type_info{"Product", 0};
|
||||
const NodeTypeInfo& get_type_info() const override { return type_info; }
|
||||
/// \brief Constructs a product reduction operation.
|
||||
Product() = default;
|
||||
/// \brief Constructs a product reduction operation.
|
||||
///
|
||||
/// \param arg The tensor to be reduced.
|
||||
/// \param reduction_axes The axis positions (0-based) to be eliminated.
|
||||
Product(const Output<Node>& arg, const AxisSet& reduction_axes);
|
||||
/// \brief Constructs a product reduction operation.
|
||||
///
|
||||
/// \param arg The tensor to be reduced.
|
||||
/// \param reduction_axes The axis positions (0-based) to be eliminated.
|
||||
Product(const Output<Node>& arg, const Output<Node>& reduction_axes);
|
||||
|
||||
/// \return The default value for Product.
|
||||
virtual std::shared_ptr<Node> get_default_value() const override;
|
||||
|
||||
virtual std::shared_ptr<Node>
|
||||
clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
|
||||
bool evaluate(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) const override;
|
||||
NGRAPH_SUPPRESS_DEPRECATED_END
|
||||
};
|
||||
}
|
||||
// default opset version
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
using v0::Product;
|
||||
NGRAPH_SUPPRESS_DEPRECATED_END
|
||||
}
|
||||
}
|
@ -109,7 +109,6 @@
|
||||
#include "ngraph/op/prelu.hpp"
|
||||
#include "ngraph/op/prior_box.hpp"
|
||||
#include "ngraph/op/prior_box_clustered.hpp"
|
||||
#include "ngraph/op/product.hpp"
|
||||
#include "ngraph/op/proposal.hpp"
|
||||
#include "ngraph/op/psroi_pooling.hpp"
|
||||
#include "ngraph/op/quantize.hpp"
|
||||
|
@ -1,99 +0,0 @@
|
||||
//*****************************************************************************
|
||||
// Copyright 2017-2020 Intel Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//*****************************************************************************
|
||||
|
||||
#include "ngraph/op/product.hpp"
|
||||
#include "itt.hpp"
|
||||
#include "ngraph/graph_util.hpp"
|
||||
#include "ngraph/runtime/host_tensor.hpp"
|
||||
#include "ngraph/runtime/reference/product.hpp"
|
||||
#include "ngraph/shape_util.hpp"
|
||||
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
constexpr NodeTypeInfo op::v0::Product::type_info;
|
||||
|
||||
op::v0::Product::Product(const Output<Node>& arg, const AxisSet& reduction_axes)
|
||||
: ArithmeticReduction(arg, reduction_axes)
|
||||
{
|
||||
constructor_validate_and_infer_types();
|
||||
}
|
||||
|
||||
op::v0::Product::Product(const Output<Node>& arg, const Output<Node>& reduction_axes)
|
||||
: ArithmeticReduction(arg, reduction_axes)
|
||||
{
|
||||
constructor_validate_and_infer_types();
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::v0::Product::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<op::v0::Product>(new_args.at(0), get_reduction_axes());
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::v0::Product::get_default_value() const
|
||||
{
|
||||
return ngraph::make_constant_from_string("1", get_element_type(), get_shape());
|
||||
}
|
||||
|
||||
namespace product
|
||||
{
|
||||
template <element::Type_t ET>
|
||||
bool evaluate(const HostTensorPtr& arg,
|
||||
const HostTensorPtr& out,
|
||||
const AxisSet& axes,
|
||||
bool keep_dims)
|
||||
{
|
||||
out->set_shape(reduce(arg->get_shape(), axes, keep_dims));
|
||||
runtime::reference::product(
|
||||
arg->get_data_ptr<ET>(), out->get_data_ptr<ET>(), arg->get_shape(), axes, keep_dims);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool evaluate_product(const HostTensorPtr& arg,
|
||||
const HostTensorPtr& out,
|
||||
const AxisSet& axes,
|
||||
bool keep_dims)
|
||||
{
|
||||
bool rc = true;
|
||||
switch (arg->get_element_type())
|
||||
{
|
||||
TYPE_CASE(i32)(arg, out, axes, keep_dims);
|
||||
break;
|
||||
TYPE_CASE(i64)(arg, out, axes, keep_dims);
|
||||
break;
|
||||
TYPE_CASE(u32)(arg, out, axes, keep_dims);
|
||||
break;
|
||||
TYPE_CASE(u64)(arg, out, axes, keep_dims);
|
||||
break;
|
||||
TYPE_CASE(f16)(arg, out, axes, keep_dims);
|
||||
break;
|
||||
TYPE_CASE(f32)(arg, out, axes, keep_dims);
|
||||
break;
|
||||
default: rc = false; break;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
bool op::v0::Product::evaluate(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) const
|
||||
{
|
||||
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v0::Product::evaluate");
|
||||
return product::evaluate_product(inputs[0], outputs[0], get_reduction_axes(), false);
|
||||
}
|
@ -19,7 +19,6 @@
|
||||
#include "ngraph/op/constant.hpp"
|
||||
#include "ngraph/op/max.hpp"
|
||||
#include "ngraph/op/min.hpp"
|
||||
#include "ngraph/op/product.hpp"
|
||||
#include "ngraph/op/reduce_mean.hpp"
|
||||
#include "ngraph/op/reduce_prod.hpp"
|
||||
#include "ngraph/op/reduce_sum.hpp"
|
||||
@ -74,14 +73,6 @@ static shared_ptr<op::Constant>
|
||||
constant->get_output_shape(0),
|
||||
reduce_min->get_reduction_axes());
|
||||
}
|
||||
else if (auto prod = as_type_ptr<op::Product>(reduction_node))
|
||||
{
|
||||
runtime::reference::product<T>(constant->get_data_ptr<T>(),
|
||||
data_ptr,
|
||||
constant->get_output_shape(0),
|
||||
prod->get_reduction_axes(),
|
||||
false);
|
||||
}
|
||||
else if (auto reduce_prod = as_type_ptr<op::v1::ReduceProd>(reduction_node))
|
||||
{
|
||||
runtime::reference::product<T>(constant->get_data_ptr<T>(),
|
||||
@ -184,8 +175,7 @@ void pass::ConstantFolding::construct_constant_arithmetic_reduction()
|
||||
make_shared<pattern::op::Label>(element::i64, Shape{2}, pattern::has_class<op::Constant>());
|
||||
auto is_supported_reduction = [](std::shared_ptr<Node> n) {
|
||||
return (pattern::has_class<op::Max>()(n) || pattern::has_class<op::Min>()(n) ||
|
||||
pattern::has_class<op::Product>()(n) || pattern::has_class<op::Sum>()(n) ||
|
||||
pattern::has_class<op::v1::ReduceMax>()(n) ||
|
||||
pattern::has_class<op::Sum>()(n) || pattern::has_class<op::v1::ReduceMax>()(n) ||
|
||||
pattern::has_class<op::v1::ReduceMin>()(n) ||
|
||||
pattern::has_class<op::v1::ReduceProd>()(n) ||
|
||||
pattern::has_class<op::v1::ReduceSum>()(n) ||
|
||||
|
@ -318,7 +318,6 @@ set(MULTI_TEST_SRC
|
||||
backend/pad.in.cpp
|
||||
backend/parameter_as_output.in.cpp
|
||||
backend/power.in.cpp
|
||||
backend/product.in.cpp
|
||||
backend/quantize_dequantize.in.cpp
|
||||
backend/quantized_convolution.in.cpp
|
||||
backend/quantized_dot.in.cpp
|
||||
|
@ -182,7 +182,8 @@ static void to_vector_test(const PartialShape& input_pshape, const std::vector<S
|
||||
auto x = make_shared<op::Parameter>(element::f32, input_pshape);
|
||||
|
||||
shared_ptr<Node> x_new_shape = make_shared<op::v0::ShapeOf>(x);
|
||||
x_new_shape = make_shared<op::Product>(x_new_shape, AxisSet{0});
|
||||
auto axes = op::Constant::create(element::i64, {}, {0});
|
||||
x_new_shape = make_shared<op::v1::ReduceProd>(x_new_shape, axes);
|
||||
x_new_shape = make_shared<op::Reshape>(x_new_shape, AxisVector{}, Shape{1});
|
||||
|
||||
auto x_reshaped = make_shared<op::v1::Reshape>(x, x_new_shape, true);
|
||||
|
@ -1,430 +0,0 @@
|
||||
//*****************************************************************************
|
||||
// Copyright 2017-2020 Intel Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//*****************************************************************************
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "util/all_close.hpp"
|
||||
#include "util/all_close_f.hpp"
|
||||
#include "util/known_element_types.hpp"
|
||||
#include "util/ndarray.hpp"
|
||||
#include "util/test_control.hpp"
|
||||
#include "util/test_tools.hpp"
|
||||
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
static string s_manifest = "${MANIFEST}";
|
||||
|
||||
// Trivial case with no reduced axes.
|
||||
NGRAPH_TEST(${BACKEND_NAME}, product_trivial)
|
||||
{
|
||||
Shape shape{2, 2};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
auto f = make_shared<Function>(make_shared<op::Product>(A, AxisSet{}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape);
|
||||
copy_data(a, vector<float>{1, 2, 3, 4});
|
||||
auto result = backend->create_tensor(element::f32, shape);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1, 2, 3, 4}), read_vector<float>(result)));
|
||||
}
|
||||
|
||||
// Failure has been reported at 5D for some reason
|
||||
NGRAPH_TEST(${BACKEND_NAME}, product_trivial_5d)
|
||||
{
|
||||
Shape shape{2, 2, 2, 2, 2};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
auto f = make_shared<Function>(make_shared<op::Product>(A, AxisSet{}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape);
|
||||
copy_data(a, vector<float>{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1});
|
||||
auto result = backend->create_tensor(element::f32, shape);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}),
|
||||
read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, product_to_scalar)
|
||||
{
|
||||
Shape shape{2, 2};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
auto f = make_shared<Function>(make_shared<op::Product>(A, AxisSet{0, 1}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape);
|
||||
copy_data(a, vector<float>{1, 2, 3, 4});
|
||||
auto result = backend->create_tensor(element::f32, Shape{});
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{24}), read_vector<float>(result)));
|
||||
|
||||
// For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
|
||||
// input tensors, so let's do this too.
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1, 2, 3, 4}), read_vector<float>(a)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, product_matrix_columns)
|
||||
{
|
||||
Shape shape_a{3, 2};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_rt{2};
|
||||
auto f = make_shared<Function>(make_shared<op::Product>(A, AxisSet{0}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a, vector<float>{1, 2, 3, 4, 5, 6});
|
||||
auto result = backend->create_tensor(element::f32, shape_rt);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{15, 48}), read_vector<float>(result)));
|
||||
|
||||
// For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
|
||||
// input tensors, so let's do this too.
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1, 2, 3, 4, 5, 6}), read_vector<float>(a)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, product_matrix_rows)
|
||||
{
|
||||
Shape shape_a{3, 2};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_rt{3};
|
||||
auto f = make_shared<Function>(make_shared<op::Product>(A, AxisSet{1}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a, vector<float>{1, 2, 3, 4, 5, 6});
|
||||
auto result = backend->create_tensor(element::f32, shape_rt);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{2, 12, 30}), read_vector<float>(result)));
|
||||
|
||||
// For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
|
||||
// input tensors, so let's do this too.
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1, 2, 3, 4, 5, 6}), read_vector<float>(a)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, product_matrix_rows_zero)
|
||||
{
|
||||
Shape shape_a{3, 0};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_rt{3};
|
||||
auto f = make_shared<Function>(make_shared<op::Product>(A, AxisSet{1}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a, vector<float>{});
|
||||
auto result = backend->create_tensor(element::f32, shape_rt);
|
||||
copy_data(result, vector<float>({3, 3, 3}));
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1, 1, 1}), read_vector<float>(result)));
|
||||
|
||||
// For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
|
||||
// input tensors, so let's do this too.
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{}), read_vector<float>(a)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, product_matrix_cols_zero)
|
||||
{
|
||||
// Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})).
|
||||
Shape shape_a{0, 2};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_rt{2};
|
||||
auto f = make_shared<Function>(make_shared<op::Product>(A, AxisSet{0}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a, vector<float>{});
|
||||
auto result = backend->create_tensor(element::f32, shape_rt);
|
||||
copy_data(result, vector<float>({3, 3}));
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1, 1}), read_vector<float>(result)));
|
||||
|
||||
// For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
|
||||
// input tensors, so let's do this too.
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{}), read_vector<float>(a)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, product_vector_zero)
|
||||
{
|
||||
Shape shape_a{0};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_rt{};
|
||||
auto f = make_shared<Function>(make_shared<op::Product>(A, AxisSet{0}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a, vector<float>{});
|
||||
auto result = backend->create_tensor(element::f32, shape_rt);
|
||||
copy_data(result, vector<float>({3}));
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1}), read_vector<float>(result)));
|
||||
|
||||
// For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
|
||||
// input tensors, so let's do this too.
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{}), read_vector<float>(a)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, product_matrix_to_scalar_zero_by_zero)
|
||||
{
|
||||
Shape shape_a{0, 0};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_rt{};
|
||||
auto f = make_shared<Function>(make_shared<op::Product>(A, AxisSet{0, 1}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a, vector<float>{});
|
||||
auto result = backend->create_tensor(element::f32, shape_rt);
|
||||
copy_data(result, vector<float>({3}));
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1}), read_vector<float>(result)));
|
||||
|
||||
// For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
|
||||
// input tensors, so let's do this too.
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{}), read_vector<float>(a)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, product_3d_to_matrix_most_sig)
|
||||
{
|
||||
Shape shape_a{3, 3, 3};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_rt{3, 3};
|
||||
auto f = make_shared<Function>(make_shared<op::Product>(A, AxisSet{0}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a, vector<float>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
|
||||
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27});
|
||||
auto result = backend->create_tensor(element::f32, shape_rt);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1 * 10 * 19,
|
||||
2 * 11 * 20,
|
||||
3 * 12 * 21,
|
||||
4 * 13 * 22,
|
||||
5 * 14 * 23,
|
||||
6 * 15 * 24,
|
||||
7 * 16 * 25,
|
||||
8 * 17 * 26,
|
||||
9 * 18 * 27}),
|
||||
read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, product_3d_to_matrix_least_sig)
|
||||
{
|
||||
Shape shape_a{3, 3, 3};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_rt{3, 3};
|
||||
auto f = make_shared<Function>(make_shared<op::Product>(A, AxisSet{2}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a, vector<float>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
|
||||
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27});
|
||||
auto result = backend->create_tensor(element::f32, shape_rt);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1 * 2 * 3,
|
||||
4 * 5 * 6,
|
||||
7 * 8 * 9,
|
||||
10 * 11 * 12,
|
||||
13 * 14 * 15,
|
||||
16 * 17 * 18,
|
||||
19 * 20 * 21,
|
||||
22 * 23 * 24,
|
||||
25 * 26 * 27}),
|
||||
read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, product_3d_to_vector)
|
||||
{
|
||||
Shape shape_a{3, 3, 3};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_rt{3};
|
||||
auto f = make_shared<Function>(make_shared<op::Product>(A, AxisSet{0, 1}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a, vector<float>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
|
||||
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27});
|
||||
auto result = backend->create_tensor(element::f32, shape_rt);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f(
|
||||
(vector<float>{1.0f * 10.0f * 19.0f * 4.0f * 13.0f * 22.0f * 7.0f * 16.0f * 25.0f,
|
||||
2.0f * 11.0f * 20.0f * 5.0f * 14.0f * 23.0f * 8.0f * 17.0f * 26.0f,
|
||||
3.0f * 12.0f * 21.0f * 6.0f * 15.0f * 24.0f * 9.0f * 18.0f * 27.0f}),
|
||||
read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, product_3d_to_scalar)
|
||||
{
|
||||
Shape shape_a{3, 3, 3};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_rt{};
|
||||
auto f =
|
||||
make_shared<Function>(make_shared<op::Product>(A, AxisSet{0, 1, 2}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a, vector<float>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
|
||||
13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1});
|
||||
auto result = backend->create_tensor(element::f32, shape_rt);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f(vector<float>{1.0f * 10.0f * 9.0f * 4.0f * 13.0f * 6.0f * 7.0f *
|
||||
12.0f * 3.0f * 2.0f * 11.0f * 8.0f * 5.0f * 14.0f *
|
||||
5.0f * 8.0f * 11.0f * 2.0f * 3.0f * 12.0f * 7.0f *
|
||||
6.0f * 13.0f * 4.0f * 9.0f * 10.0f * 1.0f},
|
||||
read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, product_3d_eliminate_zero_dim)
|
||||
{
|
||||
Shape shape_a{3, 0, 2};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_rt{3, 2};
|
||||
auto f = make_shared<Function>(make_shared<op::Product>(A, AxisSet{1}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a, vector<float>{});
|
||||
auto result = backend->create_tensor(element::f32, shape_rt);
|
||||
|
||||
// Overwrite the initial result vector to make sure we're not just coincidentally getting the
|
||||
// right value.
|
||||
copy_data(result, vector<float>{2112, 2112, 2112, 2112, 2112, 2112});
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1, 1, 1, 1, 1, 1}), read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, product_2d_to_scalar_int32)
|
||||
{
|
||||
Shape shape_a{3, 3};
|
||||
auto A = make_shared<op::Parameter>(element::i32, shape_a);
|
||||
Shape shape_rt{};
|
||||
auto f = make_shared<Function>(make_shared<op::Product>(A, AxisSet{0, 1}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::i32, shape_a);
|
||||
copy_data(a, vector<int32_t>{1, 2, 3, 4, 5, 6, 7, 8, 9});
|
||||
auto result = backend->create_tensor(element::i32, shape_rt);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_EQ(vector<int32_t>{1 * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9}, read_vector<int32_t>(result));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, product_to_scalar_int32)
|
||||
{
|
||||
Shape shape{2, 2};
|
||||
auto A = make_shared<op::Parameter>(element::i32, shape);
|
||||
auto f = make_shared<Function>(make_shared<op::Product>(A, AxisSet{0, 1}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::i32, shape);
|
||||
copy_data(a, vector<int32_t>{1, 2, 3, 4});
|
||||
auto result = backend->create_tensor(element::i32, Shape{});
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_EQ((vector<int32_t>{24}), read_vector<int32_t>(result));
|
||||
|
||||
// For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
|
||||
// input tensors, so let's do this too.
|
||||
EXPECT_EQ((vector<int32_t>{1, 2, 3, 4}), read_vector<int32_t>(a));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, product_to_scalar_int8)
|
||||
{
|
||||
Shape shape{2, 2};
|
||||
auto A = make_shared<op::Parameter>(element::i8, shape);
|
||||
auto f = make_shared<Function>(make_shared<op::Product>(A, AxisSet{0, 1}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::i8, shape);
|
||||
copy_data(a, vector<int8_t>{1, 2, 3, 4});
|
||||
auto result = backend->create_tensor(element::i8, Shape{});
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_EQ((vector<int8_t>{24}), read_vector<int8_t>(result));
|
||||
|
||||
// For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
|
||||
// input tensors, so let's do this too.
|
||||
EXPECT_EQ((vector<int8_t>{1, 2, 3, 4}), read_vector<int8_t>(a));
|
||||
}
|
@ -827,33 +827,6 @@ TEST(constant_folding, const_reverse)
|
||||
ASSERT_EQ(values_expected, values_out);
|
||||
}
|
||||
|
||||
TEST(constant_folding, const_product)
|
||||
{
|
||||
Shape input_shape{3, 3};
|
||||
|
||||
vector<int32_t> values_in{1, 2, 3, 4, 5, 6, 7, 8, 9};
|
||||
auto constant = op::Constant::create(element::i32, input_shape, values_in);
|
||||
auto convert = make_shared<op::Product>(constant, AxisSet{1});
|
||||
convert->set_friendly_name("test");
|
||||
auto f = make_shared<Function>(convert, ParameterVector{});
|
||||
|
||||
pass::Manager pass_manager;
|
||||
pass_manager.register_pass<pass::ConstantFolding>();
|
||||
pass_manager.run_passes(f);
|
||||
|
||||
ASSERT_EQ(count_ops_of_type<op::Product>(f), 0);
|
||||
ASSERT_EQ(count_ops_of_type<op::Constant>(f), 1);
|
||||
|
||||
auto new_const =
|
||||
as_type_ptr<op::Constant>(f->get_results().at(0)->input_value(0).get_node_shared_ptr());
|
||||
ASSERT_TRUE(new_const);
|
||||
ASSERT_EQ(new_const->get_friendly_name(), "test");
|
||||
auto values_out = new_const->get_vector<int32_t>();
|
||||
|
||||
vector<int32_t> values_expected{6, 120, 504};
|
||||
ASSERT_EQ(values_expected, values_out);
|
||||
}
|
||||
|
||||
TEST(constant_folding, const_reduceprod)
|
||||
{
|
||||
Shape input_shape{3, 3};
|
||||
|
@ -614,9 +614,9 @@ namespace
|
||||
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
|
||||
}
|
||||
|
||||
void op_is_Product()
|
||||
void op_is_ReduceProd()
|
||||
{
|
||||
op::Product node;
|
||||
op::v1::ReduceProd node;
|
||||
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
|
||||
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
|
||||
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
|
||||
|
@ -1445,7 +1445,6 @@ protected:
|
||||
case OP_TYPEID::NotEqual:
|
||||
case OP_TYPEID::Or:
|
||||
case OP_TYPEID::Power:
|
||||
case OP_TYPEID::Product:
|
||||
case OP_TYPEID::Range:
|
||||
case OP_TYPEID::Reshape:
|
||||
case OP_TYPEID::Result:
|
||||
|
@ -108,7 +108,6 @@ NGRAPH_OP(Parameter, ngraph::op)
|
||||
NGRAPH_OP(Power, ngraph::op)
|
||||
NGRAPH_OP(PRelu, ngraph::op)
|
||||
NGRAPH_OP(PriorBox, ngraph::op)
|
||||
NGRAPH_OP(Product, ngraph::op)
|
||||
NGRAPH_OP(Quantize, ngraph::op)
|
||||
NGRAPH_OP(QuantizedConvolution, ngraph::op)
|
||||
NGRAPH_OP(QuantizedDot, ngraph::op)
|
||||
|
@ -417,13 +417,6 @@ namespace opset0_downgrade
|
||||
return replacement_node;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op_cast(shared_ptr<op::v1::ReduceProd> node)
|
||||
{
|
||||
auto replacement_node = op_cast_reduction_node<op::v0::Product, op::v1::ReduceProd>(node);
|
||||
replace_node(node, replacement_node);
|
||||
return replacement_node;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op_cast(shared_ptr<op::v1::ReduceSum> node)
|
||||
{
|
||||
auto replacement_node = op_cast_reduction_node<op::v0::Sum, op::v1::ReduceSum>(node);
|
||||
|
@ -333,15 +333,6 @@ namespace opset1_upgrade
|
||||
return op_cast_binary_elementwise_node<op::v0::Power, op::v1::Power>(node);
|
||||
}
|
||||
|
||||
shared_ptr<Node> op_cast(shared_ptr<op::Product> node)
|
||||
{
|
||||
bool keep_dims = false;
|
||||
auto replacement_node =
|
||||
make_shared<op::v1::ReduceProd>(node->input_value(0), node->input_value(1), keep_dims);
|
||||
replace_node(node, replacement_node);
|
||||
return replacement_node;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op_cast(shared_ptr<op::Reverse> node)
|
||||
{
|
||||
// creates a Constant node from the v0::Reverse reversed_axes attribute
|
||||
|
Loading…
Reference in New Issue
Block a user