Removed ArgMax ArgMin (#1181)

This commit is contained in:
Ilya Churaev 2020-07-07 10:07:08 +03:00 committed by GitHub
parent 56916ace61
commit 0602a61a30
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 1 additions and 1287 deletions

View File

@ -127,10 +127,6 @@ set (SRC
op/and.hpp
op/any.cpp
op/any.hpp
op/argmax.cpp
op/argmax.hpp
op/argmin.cpp
op/argmin.hpp
op/asin.cpp
op/asin.hpp
op/asinh.cpp

View File

@ -1,48 +0,0 @@
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "ngraph/op/argmax.hpp"
#include "ngraph/graph_util.hpp"
using namespace std;
using namespace ngraph;
constexpr NodeTypeInfo op::ArgMax::type_info;
op::ArgMax::ArgMax(const Output<Node>& arg, size_t axis, const element::Type& index_element_type)
: op::util::IndexReduction(arg, axis, index_element_type)
{
constructor_validate_and_infer_types();
}
bool op::ArgMax::visit_attributes(AttributeVisitor& visitor)
{
IndexReduction::visit_attributes(visitor);
return true;
}
shared_ptr<Node> op::ArgMax::clone_with_new_inputs(const OutputVector& new_args) const
{
check_new_args_count(this, new_args);
return make_shared<ArgMax>(new_args.at(0), m_axis, this->get_element_type());
}
std::shared_ptr<Node> op::ArgMax::get_default_value() const
{
// Choice of value here is arbitrary, because validation should be rejecting cases where the
// axis of reduction has size zero.
return ngraph::make_constant_from_string("0", get_element_type(), get_shape());
}

View File

@ -1,53 +0,0 @@
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/op/util/index_reduction.hpp"
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Computes minimum index along a specified axis for a given tensor
class NGRAPH_API ArgMax : public op::util::IndexReduction
{
public:
static constexpr NodeTypeInfo type_info{"ArgMax", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a ArgMax operation.
ArgMax() = default;
/// \brief Constructs a ArgMax operation.
///
/// \param arg The input tensor
/// \param axis The axis along which to compute an index for maximum
/// \param index_element_type produce indices. Currently, only int64 or int32 are
/// supported
ArgMax(const Output<Node>& arg,
size_t axis,
const element::Type& index_element_type);
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
bool visit_attributes(AttributeVisitor& visitor) override;
virtual std::shared_ptr<Node> get_default_value() const override;
};
}
using v0::ArgMax;
}
}

View File

@ -1,48 +0,0 @@
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "ngraph/op/argmin.hpp"
#include "ngraph/graph_util.hpp"
using namespace std;
using namespace ngraph;
constexpr NodeTypeInfo op::ArgMin::type_info;
op::ArgMin::ArgMin(const Output<Node>& arg, size_t axis, const element::Type& index_element_type)
: op::util::IndexReduction(arg, axis, index_element_type)
{
constructor_validate_and_infer_types();
}
bool op::ArgMin::visit_attributes(AttributeVisitor& visitor)
{
IndexReduction::visit_attributes(visitor);
return true;
}
shared_ptr<Node> op::ArgMin::clone_with_new_inputs(const OutputVector& new_args) const
{
check_new_args_count(this, new_args);
return make_shared<ArgMin>(new_args.at(0), m_axis, this->get_element_type());
}
std::shared_ptr<Node> op::ArgMin::get_default_value() const
{
// Choice of value here is arbitrary, because validation should be rejecting cases where the
// axis of reduction has size zero.
return ngraph::make_constant_from_string("0", get_element_type(), get_shape());
}

View File

@ -1,54 +0,0 @@
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/op/util/index_reduction.hpp"
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Computes minimum index along a specified axis for a given tensor
class NGRAPH_API ArgMin : public op::util::IndexReduction
{
public:
static constexpr NodeTypeInfo type_info{"ArgMin", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a ArgMin operation.
ArgMin() = default;
/// \brief Constructs a ArgMin operation.
///
/// \param arg The input tensor
/// \param axis The axis along which to compute an index for minimum
/// \param index_element_type produce indices. Currently, only int64 or int32 are
/// supported
ArgMin(const Output<Node>& arg,
size_t axis,
const element::Type& index_element_type);
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
bool visit_attributes(AttributeVisitor& visitor) override;
virtual std::shared_ptr<Node> get_default_value() const override;
};
}
using v0::ArgMin;
}
}

View File

@ -35,8 +35,6 @@ NGRAPH_OP(All, ngraph::op::v0, 0)
NGRAPH_OP(AllReduce, ngraph::op::v0, 0)
NGRAPH_OP(And, ngraph::op::v0, 0)
NGRAPH_OP(Any, ngraph::op::v0, 0)
NGRAPH_OP(ArgMax, ngraph::op::v0, 0)
NGRAPH_OP(ArgMin, ngraph::op::v0, 0)
NGRAPH_OP(Asin, ngraph::op::v0, 0)
NGRAPH_OP(Asinh, ngraph::op::v3, 3)
NGRAPH_OP(Atan, ngraph::op::v0, 0)

View File

@ -26,8 +26,6 @@
#include "ngraph/op/allreduce.hpp"
#include "ngraph/op/and.hpp"
#include "ngraph/op/any.hpp"
#include "ngraph/op/argmax.hpp"
#include "ngraph/op/argmin.hpp"
#include "ngraph/op/asin.hpp"
#include "ngraph/op/asinh.hpp"
#include "ngraph/op/assign.hpp"

View File

@ -1,58 +0,0 @@
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <cmath>
#include "ngraph/coordinate_transform.hpp"
#include "ngraph/shape_util.hpp"
namespace ngraph
{
namespace runtime
{
namespace reference
{
template <typename T, typename U>
void argmax(
const T* arg, U* out, const Shape& in_shape, const Shape& out_shape, size_t axis)
{
// take the first elements (i.e. 0 indices) in out_shape - axis as maximums
memset(out, 0, shape_size(out_shape) * sizeof(U));
AxisVector av{axis};
CoordinateTransform input_transform(in_shape);
for (const Coordinate& input_coord : input_transform)
{
Coordinate output_coord = reduce(input_coord, av);
CoordinateTransform output_transform(out_shape);
auto min_index = static_cast<size_t>(out[output_transform.index(output_coord)]);
auto min_coord = input_coord;
min_coord[axis] = min_index;
if (arg[input_transform.index(input_coord)] >
arg[input_transform.index(min_coord)])
{
out[output_transform.index(output_coord)] =
static_cast<U>(input_coord[axis]);
}
}
}
}
}
}

View File

@ -1,58 +0,0 @@
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <cmath>
#include "ngraph/coordinate_transform.hpp"
#include "ngraph/shape_util.hpp"
namespace ngraph
{
namespace runtime
{
namespace reference
{
template <typename T, typename U>
void argmin(
const T* arg, U* out, const Shape& in_shape, const Shape& out_shape, size_t axis)
{
// take the first elements (i.e. 0 indices) in out_shape - axis as minimums
memset(out, 0, shape_size(out_shape) * sizeof(U));
AxisVector av{axis};
CoordinateTransform input_transform(in_shape);
for (const Coordinate& input_coord : input_transform)
{
Coordinate output_coord = reduce(input_coord, av);
CoordinateTransform output_transform(out_shape);
auto min_index = static_cast<size_t>(out[output_transform.index(output_coord)]);
auto min_coord = input_coord;
min_coord[axis] = min_index;
if (arg[input_transform.index(input_coord)] <
arg[input_transform.index(min_coord)])
{
out[output_transform.index(output_coord)] =
static_cast<U>(input_coord[axis]);
}
}
}
}
}
}

View File

@ -981,20 +981,6 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js)
node = make_shared<op::Any>(args[0], reduction_axes);
break;
}
case OP_TYPEID::ArgMin:
{
auto axis = node_js.at("axis").get<size_t>();
auto target_type = read_element_type(node_js.at("index_element_type"));
node = make_shared<op::ArgMin>(args[0], axis, target_type);
break;
}
case OP_TYPEID::ArgMax:
{
auto axis = node_js.at("axis").get<size_t>();
auto target_type = read_element_type(node_js.at("index_element_type"));
node = make_shared<op::ArgMax>(args[0], axis, target_type);
break;
}
case OP_TYPEID::Asin:
{
node = make_shared<op::Asin>(args[0]);
@ -2408,20 +2394,6 @@ json JSONSerializer::serialize_node(const Node& n)
}
break;
}
case OP_TYPEID::ArgMin:
{
auto tmp = static_cast<const op::ArgMin*>(&n);
node["axis"] = tmp->get_reduction_axis();
node["index_element_type"] = write_element_type(tmp->get_element_type());
break;
}
case OP_TYPEID::ArgMax:
{
auto tmp = static_cast<const op::ArgMax*>(&n);
node["axis"] = tmp->get_reduction_axis();
node["index_element_type"] = write_element_type(tmp->get_element_type());
break;
}
case OP_TYPEID::All:
{
auto tmp = static_cast<const op::All*>(&n);

View File

@ -149,7 +149,6 @@ set(SRC
type_prop/group_convolution_backprop_data.cpp
type_prop/gru_cell.cpp
type_prop/hard_sigmoid.cpp
type_prop/index_reduction.cpp
type_prop/layer_norm.cpp
type_prop/lrn.cpp
type_prop/lstm_cell.cpp
@ -287,7 +286,6 @@ set(MULTI_TEST_SRC
backend/all.in.cpp
backend/any.in.cpp
backend/api.in.cpp
backend/arg_reduce.in.cpp
backend/asin.in.cpp
backend/asinh.in.cpp
backend/atan.in.cpp

View File

@ -1,607 +0,0 @@
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <algorithm>
#include <cinttypes>
#include <cmath>
#include <cstdlib>
#include <random>
#include <string>
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "ngraph/runtime/tensor.hpp"
#include "runtime/backend.hpp"
#include "util/all_close.hpp"
#include "util/all_close_f.hpp"
#include "util/ndarray.hpp"
#include "util/random.hpp"
#include "util/test_control.hpp"
#include "util/test_tools.hpp"
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
// Trivial case.
NGRAPH_TEST(${BACKEND_NAME}, argmin_trivial)
{
Shape shape{4, 3};
Shape rshape{3};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::ArgMin>(A, 0, element::i32), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7});
auto result = backend->create_tensor(element::i32, rshape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<int>{3, 2, 1}), read_vector<int>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, argmin_2D_i32)
{
Shape shape{4, 3};
Shape rshape{3};
auto A = make_shared<op::Parameter>(element::i32, shape);
auto f = make_shared<Function>(make_shared<op::ArgMin>(A, 0, element::i32), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape);
copy_data(a, vector<int>{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7});
auto result = backend->create_tensor(element::i32, rshape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<int>{3, 2, 1}), read_vector<int>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, argmin_3D_i32)
{
Shape shape{3, 3, 4};
Shape rshape{3, 4};
auto A = make_shared<op::Parameter>(element::i32, shape);
auto f = make_shared<Function>(make_shared<op::ArgMin>(A, 1, element::i32), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape);
copy_data(a,
test::NDArray<int, 3>({{{12, 2, 10, 9}, {3, 5, 0, 8}, {7, 9, 1, 5}},
{{7, 2, 4, 10}, {6, 10, 2, 2}, {12, 1, 1, 1}},
{{10, 2, 2, 4}, {1, 5, 5, 1}, {7, 12, 2, 2}}})
.get_vector());
auto result = backend->create_tensor(element::i32, rshape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<int>{1, 0, 1, 2, 1, 2, 2, 2, 1, 0, 0, 1}), read_vector<int>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, argmin_3D_i64)
{
Shape shape{3, 3, 4};
Shape rshape{3, 4};
auto A = make_shared<op::Parameter>(element::i32, shape);
auto f = make_shared<Function>(make_shared<op::ArgMin>(A, 1, element::i64), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape);
copy_data(a,
test::NDArray<int, 3>({{{12, 2, 10, 9}, {3, 5, 0, 8}, {7, 9, 1, 5}},
{{7, 2, 4, 10}, {6, 10, 2, 2}, {12, 1, 1, 1}},
{{10, 2, 2, 4}, {1, 5, 5, 1}, {7, 12, 2, 2}}})
.get_vector());
auto result = backend->create_tensor(element::i64, rshape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<int64_t>{1, 0, 1, 2, 1, 2, 2, 2, 1, 0, 0, 1}), read_vector<int64_t>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, argmin_4D_i64)
{
Shape shape{2, 2, 5, 5}; // NCHW ->(0,1,2,3)
Shape rshape{2, 2, 5};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::ArgMin>(A, 3, element::i64), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(
a,
test::NDArray<int, 4>(
{{{{3, 1, 1, 2, 105},
{0, 3, 2, 1, 2},
{2, 4, 2, 0, 1},
{2, 5, 1, 1, 22},
{5, 2, 1, 7, 5}},
{{3, 1, 2, 2, 1},
{1, 7, 3, 8, 1},
{2, 10, 1, 3, 2},
{3, 1, 0, 0, 6},
{2, 0, 0, 0, 0}}},
{{{0, 2, 1, 1, 0}, {0, 0, 0, 0, 1}, {0, 0, 1, 0, 3}, {2, 0, 0, 3, 0}, {0, 0, 0, 0, 1}},
{{2, 1, 0, 0, 1},
{0, 2, 0, 0, 0},
{1, 1, 2, 0, 2},
{1, 1, 1, 0, 1},
{1, 0, 0, 0, 2}}}})
.get_vector());
auto result = backend->create_tensor(element::i64, rshape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<int64_t>{1, 0, 3, 2, 2, 1, 0, 2, 2, 1, 0, 0, 0, 1, 0, 2, 0, 3, 3, 1}),
read_vector<int64_t>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, argmin_4D_axis_3_i64)
{
Shape shape{2, 2, 5, 5}; // NCHW ->(0,1,2,3)
Shape rshape{2, 2, 5};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::ArgMin>(A, 3, element::i64), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a,
test::NDArray<float, 4>({{{{0.5f, 1.5f, 0.8f, 2.9f, 1.05f}, // img 0 ch 0
{0.5f, 3.5f, 2.0f, 1.0f, 0.2f},
{2.0f, 0.0f, 2.2f, 0.2f, 1.4f},
{2.9f, 0.0f, 1.52f, 1.2f, 2.22f},
{5.0f, 2.0f, 1.0f, 0.5f, 0.85f}},
{{0.25f, 0.02f, 0.02f, 2.2f, 0.001f}, // img 0 ch 1
{1.0f, 0.2f, 3.0f, 0.25f, 1.14f},
{2.25f, 10.1f, 1.0f, 0.02f, 2.22f},
{3.2f, 1.002f, 0.001f, 0.2f, 6.0f},
{2.0f, 0.0f, 0.0f, 0.0f, 0.0f}}},
{{{0.0f, 2.2f, 1.2f, 1.6f, 0.2f}, // img 1 ch 0
{0.01f, 0.0f, 0.22f, 0.02f, 1.1f},
{0.01f, 0.5f, 1.6f, 0.2f, 3.2f},
{2.4f, 0.5f, 0.0f, 3.0f, 0.1f},
{0.0f, 0.5f, 0.4f, 0.8f, 1.0f}},
{{2.0f, 1.0f, 0.0f, 0.0f, 1.0f}, // img 1 ch 1
{0.0f, 2.0f, 0.0f, 0.0f, 0.0f},
{1.0f, 1.0f, 2.0f, 0.0f, 2.0f},
{1.0f, 1.0f, 1.0f, 0.0f, 1.0f},
{1.0f, 0.0f, 0.0f, 0.0f, 2.0f}}}})
.get_vector());
auto result = backend->create_tensor(element::i64, rshape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((test::NDArray<int64_t, 3>({{{0, 4, 1, 1, 3}, // ch0
{4, 1, 3, 2, 1}}, //
{{0, 1, 0, 2, 0}, // ch1
{2, 0, 3, 3, 1}}}) //
.get_vector()),
read_vector<int64_t>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, argmin_4D_axis_3)
{
Shape shape{2, 2, 5, 5}; // NCHW ->(0,1,2,3)
Shape rshape{2, 2, 5};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::ArgMin>(A, 3, element::i32), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a,
test::NDArray<float, 4>({{{{0.5f, 1.5f, 0.8f, 2.9f, 1.05f}, // img 0 ch 0
{0.5f, 3.5f, 2.0f, 1.0f, 0.2f},
{2.0f, 0.0f, 2.2f, 0.2f, 1.4f},
{2.9f, 0.0f, 1.52f, 1.2f, 2.22f},
{5.0f, 2.0f, 1.0f, 0.5f, 0.85f}},
{{0.25f, 0.02f, 0.02f, 2.2f, 0.001f}, // img 0 ch 1
{1.0f, 0.2f, 3.0f, 0.25f, 1.14f},
{2.25f, 10.1f, 1.0f, 0.02f, 2.22f},
{3.2f, 1.002f, 0.001f, 0.2f, 6.0f},
{2.0f, 0.0f, 0.0f, 0.0f, 0.0f}}},
{{{0.0f, 2.2f, 1.2f, 1.6f, 0.2f}, // img 1 ch 0
{0.01f, 0.0f, 0.22f, 0.02f, 1.1f},
{0.01f, 0.5f, 1.6f, 0.2f, 3.2f},
{2.4f, 0.5f, 0.0f, 3.0f, 0.1f},
{0.0f, 0.5f, 0.4f, 0.8f, 1.0f}},
{{2.0f, 1.0f, 0.0f, 0.0f, 1.0f}, // img 1 ch 1
{0.0f, 2.0f, 0.0f, 0.0f, 0.0f},
{1.0f, 1.0f, 2.0f, 0.0f, 2.0f},
{1.0f, 1.0f, 1.0f, 0.0f, 1.0f},
{1.0f, 0.0f, 0.0f, 0.0f, 2.0f}}}})
.get_vector());
auto result = backend->create_tensor(element::i32, rshape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((test::NDArray<int, 3>({{{0, 4, 1, 1, 3}, // ch0
{4, 1, 3, 2, 1}}, //
{{0, 1, 0, 2, 0}, // ch1
{2, 0, 3, 3, 1}}}) //
.get_vector()),
read_vector<int>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, argmax_trivial)
{
Shape shape{4, 3}; // HW -> (0,1)
Shape rshape{3};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::ArgMax>(A, 0, element::i32), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{9, 2, 10, 12, 8, 4, 6, 1, 5, 3, 11, 7});
auto result = backend->create_tensor(element::i32, rshape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<int>{1, 3, 0}), read_vector<int>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, argmax_2D_i32)
{
Shape shape{4, 3};
Shape rshape{3};
auto A = make_shared<op::Parameter>(element::i32, shape);
auto f = make_shared<Function>(make_shared<op::ArgMax>(A, 0, element::i32), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape);
copy_data(a, vector<int>{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7});
auto result = backend->create_tensor(element::i32, rshape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<int>{0, 3, 0}), read_vector<int>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, argmax_3D_i32)
{
Shape shape{3, 3, 4};
Shape rshape{3, 4};
auto A = make_shared<op::Parameter>(element::i32, shape);
auto f = make_shared<Function>(make_shared<op::ArgMax>(A, 1, element::i32), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape);
copy_data(a,
test::NDArray<int, 3>({{{12, 2, 10, 9}, {3, 5, 0, 8}, {7, 9, 1, 5}},
{{7, 2, 4, 10}, {6, 10, 2, 2}, {12, 1, 1, 1}},
{{10, 2, 2, 4}, {1, 5, 5, 1}, {7, 12, 2, 2}}})
.get_vector());
auto result = backend->create_tensor(element::i32, rshape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<int>{0, 2, 0, 0, 2, 1, 0, 0, 0, 2, 1, 0}), read_vector<int>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, argmax_3D_i64)
{
Shape shape{3, 3, 4};
Shape rshape{3, 4};
auto A = make_shared<op::Parameter>(element::i32, shape);
auto f = make_shared<Function>(make_shared<op::ArgMax>(A, 1, element::i64), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape);
copy_data(a,
test::NDArray<int, 3>({{{12, 2, 10, 9}, {3, 5, 0, 8}, {7, 9, 1, 5}},
{{7, 2, 4, 10}, {6, 10, 2, 2}, {12, 1, 1, 1}},
{{10, 2, 2, 4}, {1, 5, 5, 1}, {7, 12, 2, 2}}})
.get_vector());
auto result = backend->create_tensor(element::i64, rshape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<int64_t>{0, 2, 0, 0, 2, 1, 0, 0, 0, 2, 1, 0}), read_vector<int64_t>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, argmax_4D_i64)
{
Shape shape{2, 2, 5, 5}; // NCHW ->(0,1,2,3)
Shape rshape{2, 2, 5};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::ArgMax>(A, 3, element::i64), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(
a,
test::NDArray<int, 4>(
{{{{3, 1, 1, 2, 105},
{0, 3, 2, 1, 2},
{2, 4, 2, 0, 1},
{2, 5, 1, 1, 22},
{5, 2, 1, 7, 5}},
{{3, 1, 2, 2, 1},
{1, 7, 3, 8, 1},
{2, 10, 1, 3, 2},
{3, 1, 0, 0, 6},
{2, 0, 0, 0, 0}}},
{{{0, 2, 1, 1, 0}, {0, 0, 0, 0, 1}, {0, 0, 1, 0, 3}, {2, 0, 0, 3, 0}, {0, 0, 0, 0, 1}},
{{2, 1, 0, 0, 1},
{0, 2, 0, 0, 0},
{1, 1, 2, 0, 2},
{1, 1, 1, 0, 1},
{1, 0, 0, 0, 2}}}})
.get_vector());
auto result = backend->create_tensor(element::i64, rshape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<int64_t>{4, 1, 1, 4, 3, 0, 3, 1, 4, 0, 1, 4, 4, 3, 4, 0, 1, 2, 0, 4}),
read_vector<int64_t>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, argmax_3D_axis_0) // Along Channels
{
Shape shape{3, 4, 2}; // CHW ->(0,1,2)
Shape rshape{4, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::ArgMax>(A, 0, element::i32), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a,
test::NDArray<float, 3>({{{8, 4}, // ch0
{12, 10},
{2, 9},
{1, 5}},
{{6, 7}, // ch1
{11, 3},
{9, 2},
{10, 12}},
{{8, 4}, // ch2
{6, 1},
{5, 3},
{11, 7}}})
.get_vector());
auto result = backend->create_tensor(element::i32, rshape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((test::NDArray<int, 2>({{0, 1}, // r0
{0, 0}, // r1
{1, 0}, // r2
{2, 1}}) // r3
.get_vector()),
read_vector<int>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, argmax_3D_axis_1) // Along Height
{
Shape shape{3, 4, 2}; // CHW ->(0,1,2)
Shape rshape{3, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::ArgMax>(A, 1, element::i32), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a,
test::NDArray<float, 3>({{{8, 4}, // ch0
{12, 10},
{2, 9},
{1, 5}},
{{6, 7}, // ch1
{11, 3},
{9, 2},
{10, 12}},
{{8, 4}, // ch2
{6, 1},
{5, 3},
{11, 7}}})
.get_vector());
auto result = backend->create_tensor(element::i32, rshape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((test::NDArray<int, 2>({{1, 1}, //
{1, 3}, //
{3, 3}})
.get_vector()),
read_vector<int>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, argmax_3D_axis_2) // Along Width
{
Shape shape{3, 4, 2}; // CHW ->(0,1,2)
Shape rshape{3, 4};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::ArgMax>(A, 2, element::i32), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a,
test::NDArray<float, 3>({{{8, 4}, // ch0
{12, 10},
{2, 9},
{1, 5}},
{{6, 7}, // ch1
{11, 3},
{9, 2},
{10, 12}},
{{8, 4}, // ch2
{6, 1},
{5, 3},
{11, 7}}})
.get_vector());
auto result = backend->create_tensor(element::i32, rshape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((test::NDArray<int, 2>({{0, 0, 1, 1}, //
{1, 0, 0, 1}, //
{0, 0, 0, 0}}) //
.get_vector()),
read_vector<int>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, argmax_4D_axis_3)
{
Shape shape{2, 2, 5, 5}; // NCHW ->(0,1,2,3)
Shape rshape{2, 2, 5};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::ArgMax>(A, 3, element::i32), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a,
test::NDArray<float, 4>({{{{0, 1, 0, 2, 1}, // img 0 ch 0
{0, 3, 2, 0, 0},
{2, 0, 0, 0, 1},
{2, 0, 1, 1, 2},
{0, 2, 1, 0, 0}},
{{0, 0, 0, 2, 0}, // img 0 ch 1
{0, 2, 3, 0, 1},
{2, 0, 1, 0, 2},
{3, 1, 0, 0, 0},
{2, 0, 0, 0, 0}}},
{{{0, 2, 1, 1, 0}, // img 1 ch 0
{0, 0, 2, 0, 1},
{0, 0, 1, 2, 3},
{2, 0, 0, 3, 0},
{0, 0, 0, 0, 0}},
{{2, 1, 0, 0, 1}, // img 1 ch 1
{0, 2, 0, 0, 0},
{1, 1, 2, 0, 2},
{1, 1, 1, 0, 1},
{1, 0, 0, 0, 2}}}})
.get_vector());
auto result = backend->create_tensor(element::i32, rshape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((test::NDArray<int, 3>({{{3, 1, 0, 0, 1}, {3, 2, 0, 0, 0}}, // ch0
{{1, 2, 4, 3, 0}, {0, 1, 2, 0, 4}}}) // ch1
.get_vector()),
read_vector<int>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, argmin_trivial_in_i32)
{
Shape shape{4, 3};
Shape rshape{3};
auto A = make_shared<op::Parameter>(element::i32, shape);
auto f = make_shared<Function>(make_shared<op::ArgMin>(A, 0, element::i32), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape);
copy_data(a, vector<int32_t>{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7});
auto result = backend->create_tensor(element::i32, rshape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<int>{3, 2, 1}), read_vector<int>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, argmax_4D_axis_3_i64_in_i32)
{
Shape shape{2, 2, 5, 5}; // NCHW ->(0,1,2,3)
Shape rshape{2, 2, 5};
auto A = make_shared<op::Parameter>(element::i32, shape);
auto f = make_shared<Function>(make_shared<op::ArgMax>(A, 3, element::i64), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape);
copy_data(a,
test::NDArray<int32_t, 4>({{{{0, 1, 0, 2, 1}, // img 0 ch 0
{0, 3, 2, 0, 0},
{2, 0, 0, 0, 1},
{2, 0, 1, 1, 2},
{0, 2, 1, 0, 0}},
{{0, 0, 0, 2, 0}, // img 0 ch 1
{0, 2, 3, 0, 1},
{2, 0, 1, 0, 2},
{3, 1, 0, 0, 0},
{2, 0, 0, 0, 0}}},
{{{0, 2, 1, 1, 0}, // img 1 ch 0
{0, 0, 2, 0, 1},
{0, 0, 1, 2, 3},
{2, 0, 0, 3, 0},
{0, 0, 0, 0, 0}},
{{2, 1, 0, 0, 1}, // img 1 ch 1
{0, 2, 0, 0, 0},
{1, 1, 2, 0, 2},
{1, 1, 1, 0, 1},
{1, 0, 0, 0, 2}}}})
.get_vector());
auto result = backend->create_tensor(element::i64, rshape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((test::NDArray<int64_t, 3>({{{3, 1, 0, 0, 1}, {3, 2, 0, 0, 0}}, // ch0
{{1, 2, 4, 3, 0}, {0, 1, 2, 0, 4}}}) // ch1
.get_vector()),
read_vector<int64_t>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, argmin_trivial_in_double)
{
Shape shape{4, 3};
Shape rshape{3};
auto A = make_shared<op::Parameter>(element::f64, shape);
auto f = make_shared<Function>(make_shared<op::ArgMin>(A, 0, element::i32), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f64, shape);
copy_data(a, vector<double>{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7});
auto result = backend->create_tensor(element::i32, rshape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<int32_t>{3, 2, 1}), read_vector<int32_t>(result));
}

View File

@ -88,24 +88,6 @@ namespace
EXPECT_FALSE(node.is_binary_elementwise_logical());
}
void op_is_ArgMax()
{
op::ArgMax node;
EXPECT_FALSE(node.is_unary_elementwise_arithmetic());
EXPECT_FALSE(node.is_binary_elementwise_arithmetic());
EXPECT_FALSE(node.is_binary_elementwise_comparison());
EXPECT_FALSE(node.is_binary_elementwise_logical());
}
void op_is_ArgMin()
{
op::ArgMin node;
EXPECT_FALSE(node.is_unary_elementwise_arithmetic());
EXPECT_FALSE(node.is_binary_elementwise_arithmetic());
EXPECT_FALSE(node.is_binary_elementwise_comparison());
EXPECT_FALSE(node.is_binary_elementwise_logical());
}
void op_is_Asin()
{
op::Asin node;

View File

@ -198,8 +198,7 @@ bool runtime::interpreter::INTExecutable::call(const vector<shared_ptr<runtime::
// get op type
element::Type type;
if (is_type<op::Convert>(op) || is_type<op::Quantize>(op) || is_type<op::Dequantize>(op) ||
is_type<op::ArgMin>(op) || is_type<op::ArgMax>(op))
if (is_type<op::Convert>(op) || is_type<op::Quantize>(op) || is_type<op::Dequantize>(op))
{
type = op->get_input_element_type(0);
}

View File

@ -33,8 +33,6 @@
#include "ngraph/runtime/reference/all.hpp"
#include "ngraph/runtime/reference/allreduce.hpp"
#include "ngraph/runtime/reference/any.hpp"
#include "ngraph/runtime/reference/argmax.hpp"
#include "ngraph/runtime/reference/argmin.hpp"
#include "ngraph/runtime/reference/asin.hpp"
#include "ngraph/runtime/reference/atan.hpp"
#include "ngraph/runtime/reference/atan2.hpp"
@ -240,58 +238,6 @@ protected:
any->get_reduction_axes());
break;
}
case OP_TYPEID::ArgMin:
{
const op::ArgMin* argmin = static_cast<const op::ArgMin*>(&node);
auto element_type = node.get_output_element_type(0);
if (element_type == element::i64)
{
reference::argmin<T, int64_t>(args[0]->get_data_ptr<const T>(),
out[0]->get_data_ptr<int64_t>(),
node.get_input_shape(0),
node.get_output_shape(0),
argmin->get_reduction_axis());
}
else if (element_type == element::i32)
{
reference::argmin<T, int32_t>(args[0]->get_data_ptr<const T>(),
out[0]->get_data_ptr<int32_t>(),
node.get_input_shape(0),
node.get_output_shape(0),
argmin->get_reduction_axis());
}
else
{
throw ngraph_error("Unexpected type");
}
break;
}
case OP_TYPEID::ArgMax:
{
const op::ArgMax* argmax = static_cast<const op::ArgMax*>(&node);
auto element_type = node.get_output_element_type(0);
if (element_type == element::i64)
{
reference::argmax<T, int64_t>(args[0]->get_data_ptr<const T>(),
out[0]->get_data_ptr<int64_t>(),
node.get_input_shape(0),
node.get_output_shape(0),
argmax->get_reduction_axis());
}
else if (element_type == element::i32)
{
reference::argmax<T, int32_t>(args[0]->get_data_ptr<const T>(),
out[0]->get_data_ptr<int32_t>(),
node.get_input_shape(0),
node.get_output_shape(0),
argmax->get_reduction_axis());
}
else
{
throw ngraph_error("Unexpected type");
}
break;
}
case OP_TYPEID::Asin:
{
size_t element_count = shape_size(node.get_output_shape(0));

View File

@ -57,8 +57,6 @@ NGRAPH_OP(All, ngraph::op)
NGRAPH_OP(AllReduce, ngraph::op)
NGRAPH_OP(And, ngraph::op)
NGRAPH_OP(Any, ngraph::op)
NGRAPH_OP(ArgMax, ngraph::op)
NGRAPH_OP(ArgMin, ngraph::op)
NGRAPH_OP(Asin, ngraph::op)
NGRAPH_OP(Atan, ngraph::op)
NGRAPH_OP(Atan2, ngraph::op::v0)

View File

@ -1,219 +0,0 @@
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/type_prop.hpp"
using namespace std;
using namespace ngraph;
TEST(type_prop, index_reduction_scalar)
{
auto a = make_shared<op::Parameter>(element::f32, Shape{});
try
{
auto argmin = make_shared<op::ArgMin>(a, 0, element::i32);
FAIL() << "ArgMin c-tor should throw for scalar shapes";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), "Argument rank is zero");
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, index_reduction_invalid_rank)
{
auto a = make_shared<op::Parameter>(element::f32, Shape{2, 2});
try
{
auto argmin = make_shared<op::ArgMin>(a, 2, element::i32);
FAIL() << "ArgMin c-tor should throw for axis out of bounds";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), "Reduction axis (2) is not less than argument rank (2)");
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, argmin_invalid_zero_reduction_axis)
{
auto a = make_shared<op::Parameter>(element::f32, Shape{2, 0});
try
{
auto argmin = make_shared<op::ArgMin>(a, 1, element::i32);
FAIL() << "ArgMin c-tor should throw for zero-length reduction axis";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), "reduction axis can not be empty");
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, argmax_invalid_zero_reduction_axis)
{
auto a = make_shared<op::Parameter>(element::f32, Shape{2, 0});
try
{
auto argmax = make_shared<op::ArgMax>(a, 1, element::i32);
FAIL() << "ArgMax c-tor should throw for zero-length reduction axis";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), "reduction axis can not be empty");
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, index_reduction_invalid_index_type)
{
auto a = make_shared<op::Parameter>(element::f32, Shape{2, 2});
try
{
auto argmin = make_shared<op::ArgMin>(a, 1, element::f32);
FAIL() << "ArgMin c-tor should throw for invalid index type";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), "Index element is neither i64 or i32");
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, index_reduction_partial_rank_dynamic_output_et_dynamic)
{
auto a = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
size_t axis = 228;
auto output_et = element::dynamic;
try
{
auto argmax = make_shared<op::ArgMax>(a, axis, output_et);
FAIL() << "Invalid output type of element::dynamic not detected";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), "Index element is neither i64 or i32");
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, index_reduction_partial_rank_dynamic_output_et_invalid)
{
auto a = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
size_t axis = 228;
auto output_et = element::dynamic;
try
{
auto argmax = make_shared<op::ArgMax>(a, axis, output_et);
FAIL() << "Invalid output type of element::f32 not detected";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), "Index element is neither i64 or i32");
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, index_reduction_partial_rank_dynamic_ok)
{
auto a = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
size_t axis = 228;
auto output_et = element::i32;
auto argmax = make_shared<op::ArgMax>(a, axis, output_et);
ASSERT_EQ(argmax->get_output_element_type(0), element::i32);
ASSERT_TRUE(argmax->get_output_partial_shape(0).rank().is_dynamic());
}
TEST(type_prop, index_reduction_partial_rank_static_dynamic_axis_oob)
{
auto a = make_shared<op::Parameter>(element::f32, PartialShape{Dimension::dynamic(), 2, 3, 4});
size_t axis = 4;
auto output_et = element::i32;
try
{
auto argmax = make_shared<op::ArgMax>(a, axis, output_et);
FAIL() << "Out-of-bounds reduction axis not detected (rank-static dynamic argument)";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), "Reduction axis (4) is not less than argument rank (4)");
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, index_reduction_partial_rank_static_dynamic_ok)
{
auto a = make_shared<op::Parameter>(element::f32, PartialShape{Dimension::dynamic(), 2, 3, 4});
size_t axis = 2;
auto output_et = element::i32;
auto argmax = make_shared<op::ArgMax>(a, axis, output_et);
ASSERT_EQ(argmax->get_output_element_type(0), element::i32);
ASSERT_TRUE(
argmax->get_output_partial_shape(0).same_scheme(PartialShape{Dimension::dynamic(), 2, 4}));
}
TEST(type_prop, index_reduction_partial_et_dynamic_rank_static_dynamic_ok)
{
auto a =
make_shared<op::Parameter>(element::dynamic, PartialShape{Dimension::dynamic(), 2, 3, 4});
size_t axis = 2;
auto output_et = element::i32;
auto argmax = make_shared<op::ArgMax>(a, axis, output_et);
ASSERT_EQ(argmax->get_output_element_type(0), element::i32);
ASSERT_TRUE(
argmax->get_output_partial_shape(0).same_scheme(PartialShape{Dimension::dynamic(), 2, 4}));
}

View File

@ -158,34 +158,6 @@ TEST(zero_dim_tensor_elimination, zero_const_slice)
EXPECT_EQ(count_ops_of_type<op::Slice>(f), 0);
}
TEST(zero_dim_tensor_elimination, zero_argmax)
{
auto A = std::make_shared<op::Parameter>(element::f32, Shape{0, 2, 3});
auto argmax = make_shared<op::ArgMax>(A, 1, element::i32);
auto f = std::make_shared<Function>(NodeVector{argmax}, ParameterVector{A});
pass::Manager pass_manager;
pass_manager.register_pass<ngraph::pass::ZeroDimTensorElimination>();
EXPECT_EQ(count_ops_of_type<op::ArgMax>(f), 1);
pass_manager.run_passes(f);
EXPECT_EQ(count_ops_of_type<op::ArgMax>(f), 0);
EXPECT_EQ(f->get_results().at(0)->get_shape(), (Shape{0, 3}));
}
TEST(zero_dim_tensor_elimination, zero_argmin)
{
auto A = std::make_shared<op::Parameter>(element::f32, Shape{0, 2, 3});
auto argmin = make_shared<op::ArgMin>(A, 1, element::i32);
auto f = std::make_shared<Function>(NodeVector{argmin}, ParameterVector{A});
pass::Manager pass_manager;
pass_manager.register_pass<ngraph::pass::ZeroDimTensorElimination>();
EXPECT_EQ(count_ops_of_type<op::ArgMin>(f), 1);
pass_manager.run_passes(f);
EXPECT_EQ(count_ops_of_type<op::ArgMin>(f), 0);
EXPECT_EQ(f->get_results().at(0)->get_shape(), (Shape{0, 3}));
}
TEST(zero_dim_tensor_elimination, pass_property)
{
auto pass = std::make_shared<ngraph::pass::ZeroDimTensorElimination>();