Removed all and allreduce layers (#1182)
* Removed all and allreduce layers * Removed reference implementations
This commit is contained in:
parent
bd3b6bfc5e
commit
71d41a992f
@ -119,10 +119,6 @@ set (SRC
|
||||
op/acosh.hpp
|
||||
op/add.cpp
|
||||
op/add.hpp
|
||||
op/all.cpp
|
||||
op/all.hpp
|
||||
op/allreduce.cpp
|
||||
op/allreduce.hpp
|
||||
op/and.cpp
|
||||
op/and.hpp
|
||||
op/any.cpp
|
||||
|
@ -1,46 +0,0 @@
|
||||
//*****************************************************************************
|
||||
// Copyright 2017-2020 Intel Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//*****************************************************************************
|
||||
|
||||
#include "ngraph/op/all.hpp"
|
||||
#include "ngraph/graph_util.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
constexpr NodeTypeInfo op::All::type_info;
|
||||
|
||||
op::All::All(const Output<Node>& arg, const AxisSet& reduction_axes)
|
||||
: LogicalReduction(arg, reduction_axes)
|
||||
{
|
||||
constructor_validate_and_infer_types();
|
||||
}
|
||||
|
||||
op::All::All(const Output<Node>& arg, const Output<Node>& reduction_axes)
|
||||
: LogicalReduction(arg, reduction_axes)
|
||||
{
|
||||
constructor_validate_and_infer_types();
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::All::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<All>(new_args.at(0), new_args.at(1));
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::All::get_default_value() const
|
||||
{
|
||||
return make_constant_from_string("1", get_element_type(), get_shape());
|
||||
}
|
@ -1,55 +0,0 @@
|
||||
//*****************************************************************************
|
||||
// Copyright 2017-2020 Intel Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//*****************************************************************************
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "ngraph/op/util/logical_reduction.hpp"
|
||||
|
||||
namespace ngraph
|
||||
{
|
||||
namespace op
|
||||
{
|
||||
namespace v0
|
||||
{
|
||||
/// \brief Logical "all" reduction operation.
|
||||
class NGRAPH_API All : public util::LogicalReduction
|
||||
{
|
||||
public:
|
||||
static constexpr NodeTypeInfo type_info{"All", 0};
|
||||
const NodeTypeInfo& get_type_info() const override { return type_info; }
|
||||
/// \brief Constructs an "all" reduction operation.
|
||||
All() = default;
|
||||
/// \brief Constructs an "all" reduction operation.
|
||||
///
|
||||
/// \param arg The tensor to be reduced.
|
||||
/// \param reduction_axes The axis positions (0-based) to be eliminated.
|
||||
All(const Output<Node>& arg, const AxisSet& reduction_axes);
|
||||
/// \brief Constructs an "all" reduction operation.
|
||||
///
|
||||
/// \param arg The tensor to be reduced.
|
||||
/// \param reduction_axes The axis positions (0-based) to be eliminated.
|
||||
All(const Output<Node>& arg, const Output<Node>& reduction_axes);
|
||||
bool visit_attributes(AttributeVisitor& visitor) override { return true; }
|
||||
std::shared_ptr<Node>
|
||||
clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
|
||||
/// \return The default value for All.
|
||||
virtual std::shared_ptr<Node> get_default_value() const override;
|
||||
};
|
||||
}
|
||||
using v0::All;
|
||||
}
|
||||
}
|
@ -1,66 +0,0 @@
|
||||
//*****************************************************************************
|
||||
// Copyright 2017-2020 Intel Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//*****************************************************************************
|
||||
|
||||
#include "ngraph/op/allreduce.hpp"
|
||||
#include "ngraph/attribute_visitor.hpp"
|
||||
#include "ngraph/type.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
constexpr NodeTypeInfo op::AllReduce::type_info;
|
||||
|
||||
op::AllReduce::AllReduce(const Output<Node>& arg, reduction::Type reduce_type)
|
||||
: Op({arg})
|
||||
, m_reduce_type(reduce_type)
|
||||
{
|
||||
constructor_validate_and_infer_types();
|
||||
}
|
||||
|
||||
void op::AllReduce::validate_and_infer_types()
|
||||
{
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
get_input_element_type(0).is_dynamic() ||
|
||||
get_input_element_type(0) == element::f32 ||
|
||||
get_input_element_type(0) == element::f64,
|
||||
"Only element types f32 and f64 are supported (argument element type: ",
|
||||
get_input_element_type(0),
|
||||
").");
|
||||
|
||||
set_output_type(0, get_input_element_type(0), get_input_partial_shape(0));
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::AllReduce::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<AllReduce>(new_args.at(0), get_reduce_type());
|
||||
}
|
||||
|
||||
bool op::AllReduce::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
visitor.on_attribute("reduce_type", m_reduce_type);
|
||||
return true;
|
||||
}
|
||||
|
||||
reduction::Type op::AllReduce::get_reduce_type() const
|
||||
{
|
||||
return m_reduce_type;
|
||||
}
|
||||
|
||||
void op::AllReduce::set_reduce_type(reduction::Type reduce_type)
|
||||
{
|
||||
m_reduce_type = reduce_type;
|
||||
}
|
@ -1,52 +0,0 @@
|
||||
//*****************************************************************************
|
||||
// Copyright 2017-2020 Intel Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//*****************************************************************************
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include "ngraph/distributed.hpp"
|
||||
#include "ngraph/op/op.hpp"
|
||||
|
||||
namespace ngraph
|
||||
{
|
||||
namespace op
|
||||
{
|
||||
namespace v0
|
||||
{
|
||||
class NGRAPH_API AllReduce : public Op
|
||||
{
|
||||
public:
|
||||
static constexpr NodeTypeInfo type_info{"AllReduce", 0};
|
||||
const NodeTypeInfo& get_type_info() const override { return type_info; }
|
||||
AllReduce() = default;
|
||||
AllReduce(const Output<Node>& arg,
|
||||
reduction::Type reduce_type = reduction::Type::SUM);
|
||||
|
||||
void validate_and_infer_types() override;
|
||||
|
||||
std::shared_ptr<Node>
|
||||
clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
reduction::Type get_reduce_type() const;
|
||||
void set_reduce_type(reduction::Type reduce_type);
|
||||
bool visit_attributes(AttributeVisitor& visitor) override;
|
||||
|
||||
private:
|
||||
reduction::Type m_reduce_type{reduction::Type::SUM};
|
||||
};
|
||||
}
|
||||
using v0::AllReduce;
|
||||
}
|
||||
}
|
@ -31,8 +31,6 @@ NGRAPH_OP(Acos, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(Acosh, ngraph::op::v3, 3)
|
||||
NGRAPH_OP(Add, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(Add, ngraph::op::v1, 1)
|
||||
NGRAPH_OP(All, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(AllReduce, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(Any, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(Asin, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(Asinh, ngraph::op::v3, 3)
|
||||
|
@ -22,8 +22,6 @@
|
||||
#include "ngraph/op/acos.hpp"
|
||||
#include "ngraph/op/acosh.hpp"
|
||||
#include "ngraph/op/add.hpp"
|
||||
#include "ngraph/op/all.hpp"
|
||||
#include "ngraph/op/allreduce.hpp"
|
||||
#include "ngraph/op/and.hpp"
|
||||
#include "ngraph/op/any.hpp"
|
||||
#include "ngraph/op/asin.hpp"
|
||||
|
@ -15,11 +15,9 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include "constant_folding.hpp"
|
||||
#include "ngraph/op/all.hpp"
|
||||
#include "ngraph/op/any.hpp"
|
||||
#include "ngraph/op/reduce_logical_and.hpp"
|
||||
#include "ngraph/op/reduce_logical_or.hpp"
|
||||
#include "ngraph/runtime/reference/all.hpp"
|
||||
#include "ngraph/runtime/reference/any.hpp"
|
||||
|
||||
using namespace std;
|
||||
@ -46,15 +44,7 @@ static shared_ptr<op::Constant> fold_constant_logical_reduction(shared_ptr<op::C
|
||||
runtime::AlignedBuffer buffer(shape_size(reduction_node->get_shape()) * sizeof(char));
|
||||
char* data_ptr = buffer.get_ptr<char>();
|
||||
|
||||
if (auto all = as_type_ptr<::ngraph::op::All>(reduction_node))
|
||||
{
|
||||
runtime::reference::all(constant->get_data_ptr<char>(),
|
||||
data_ptr,
|
||||
constant->get_output_shape(0),
|
||||
reduction_node->get_shape(),
|
||||
all->get_reduction_axes());
|
||||
}
|
||||
else if (auto any = as_type_ptr<::ngraph::op::Any>(reduction_node))
|
||||
if (auto any = as_type_ptr<::ngraph::op::Any>(reduction_node))
|
||||
{
|
||||
runtime::reference::any(constant->get_data_ptr<char>(),
|
||||
data_ptr,
|
||||
@ -66,12 +56,23 @@ static shared_ptr<op::Constant> fold_constant_logical_reduction(shared_ptr<op::C
|
||||
{
|
||||
const auto reduction_axes = reduce_and->get_reduction_axes();
|
||||
const auto input_shape = reduce_and->get_input_shape(0);
|
||||
const char* arg = constant->get_data_ptr<char>();
|
||||
CoordinateTransform output_transform(get_shape_no_keep_dims(reduction_axes, input_shape));
|
||||
|
||||
runtime::reference::all(constant->get_data_ptr<char>(),
|
||||
data_ptr,
|
||||
constant->get_output_shape(0),
|
||||
get_shape_no_keep_dims(reduction_axes, input_shape),
|
||||
reduction_axes);
|
||||
for (const Coordinate& output_coord : output_transform)
|
||||
{
|
||||
data_ptr[output_transform.index(output_coord)] = 1;
|
||||
}
|
||||
|
||||
CoordinateTransform input_transform(constant->get_output_shape(0));
|
||||
|
||||
for (const Coordinate& input_coord : input_transform)
|
||||
{
|
||||
Coordinate output_coord = reduce(input_coord, reduction_axes);
|
||||
data_ptr[output_transform.index(output_coord)] =
|
||||
data_ptr[output_transform.index(output_coord)] &&
|
||||
arg[input_transform.index(input_coord)];
|
||||
}
|
||||
}
|
||||
else if (auto reduce_or = as_type_ptr<::ngraph::op::v1::ReduceLogicalOr>(reduction_node))
|
||||
{
|
||||
@ -103,8 +104,7 @@ void pass::ConstantFolding::construct_constant_logical_reduction()
|
||||
auto constant_axes_label =
|
||||
make_shared<pattern::op::Label>(element::i64, Shape{2}, pattern::has_class<op::Constant>());
|
||||
auto is_supported_reduction = [](std::shared_ptr<Node> n) {
|
||||
return (pattern::has_class<::ngraph::op::All>()(n) ||
|
||||
pattern::has_class<::ngraph::op::Any>()(n) ||
|
||||
return (pattern::has_class<::ngraph::op::Any>()(n) ||
|
||||
pattern::has_class<::ngraph::op::v1::ReduceLogicalAnd>()(n) ||
|
||||
pattern::has_class<::ngraph::op::v1::ReduceLogicalOr>()(n));
|
||||
};
|
||||
|
@ -1,55 +0,0 @@
|
||||
//*****************************************************************************
|
||||
// Copyright 2017-2020 Intel Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//*****************************************************************************
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cmath>
|
||||
|
||||
#include "ngraph/coordinate_transform.hpp"
|
||||
#include "ngraph/shape_util.hpp"
|
||||
|
||||
namespace ngraph
|
||||
{
|
||||
namespace runtime
|
||||
{
|
||||
namespace reference
|
||||
{
|
||||
static inline void all(const char* arg,
|
||||
char* out,
|
||||
const Shape& in_shape,
|
||||
const Shape& out_shape,
|
||||
const AxisSet& reduction_axes)
|
||||
{
|
||||
CoordinateTransform output_transform(out_shape);
|
||||
|
||||
for (const Coordinate& output_coord : output_transform)
|
||||
{
|
||||
out[output_transform.index(output_coord)] = 1;
|
||||
}
|
||||
|
||||
CoordinateTransform input_transform(in_shape);
|
||||
|
||||
for (const Coordinate& input_coord : input_transform)
|
||||
{
|
||||
Coordinate output_coord = reduce(input_coord, reduction_axes);
|
||||
out[output_transform.index(output_coord)] =
|
||||
out[output_transform.index(output_coord)] &&
|
||||
arg[input_transform.index(input_coord)];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,38 +0,0 @@
|
||||
//*****************************************************************************
|
||||
// Copyright 2017-2020 Intel Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//*****************************************************************************
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "ngraph/distributed.hpp"
|
||||
|
||||
namespace ngraph
|
||||
{
|
||||
namespace runtime
|
||||
{
|
||||
namespace reference
|
||||
{
|
||||
template <typename T>
|
||||
void allreduce(T* arg,
|
||||
T* out,
|
||||
const element::Type_t element_type,
|
||||
const reduction::Type reduce_type,
|
||||
int count)
|
||||
{
|
||||
get_distributed_interface()->all_reduce(arg, out, element_type, reduce_type, count);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -958,17 +958,6 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js)
|
||||
args[0], args[1], read_auto_broadcast(node_js, "auto_broadcast"));
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::All:
|
||||
{
|
||||
auto reduction_axes = deserialize_axis_set(node_js.at("reduction_axes"));
|
||||
node = make_shared<op::All>(args[0], reduction_axes);
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::AllReduce:
|
||||
{
|
||||
node = make_shared<op::AllReduce>(args[0]);
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::Any:
|
||||
{
|
||||
auto reduction_axes = deserialize_axis_set(node_js.at("reduction_axes"));
|
||||
@ -2388,14 +2377,6 @@ json JSONSerializer::serialize_node(const Node& n)
|
||||
}
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::All:
|
||||
{
|
||||
auto tmp = static_cast<const op::All*>(&n);
|
||||
node["reduction_axes"] = serialize_axis_set(tmp->get_reduction_axes());
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::AllReduce: { break;
|
||||
}
|
||||
case OP_TYPEID::Any:
|
||||
{
|
||||
auto tmp = static_cast<const op::Any*>(&n);
|
||||
|
@ -109,7 +109,6 @@ set(SRC
|
||||
shape.cpp
|
||||
specialize_function.cpp
|
||||
tensor.cpp
|
||||
type_prop/all.cpp
|
||||
type_prop/any.cpp
|
||||
type_prop/assign.cpp
|
||||
type_prop/batch_norm.cpp
|
||||
@ -282,7 +281,6 @@ set(MULTI_TEST_SRC
|
||||
backend/acosh.in.cpp
|
||||
backend/add.in.cpp
|
||||
backend/aliased_output.in.cpp
|
||||
backend/all.in.cpp
|
||||
backend/any.in.cpp
|
||||
backend/api.in.cpp
|
||||
backend/asin.in.cpp
|
||||
|
@ -1,370 +0,0 @@
|
||||
//*****************************************************************************
|
||||
// Copyright 2017-2020 Intel Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//*****************************************************************************
|
||||
|
||||
#include <algorithm>
|
||||
#include <cinttypes>
|
||||
#include <cmath>
|
||||
#include <cstdlib>
|
||||
#include <string>
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "ngraph/runtime/tensor.hpp"
|
||||
#include "runtime/backend.hpp"
|
||||
#include "util/all_close.hpp"
|
||||
#include "util/all_close_f.hpp"
|
||||
#include "util/ndarray.hpp"
|
||||
#include "util/random.hpp"
|
||||
#include "util/test_control.hpp"
|
||||
#include "util/test_tools.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
static string s_manifest = "${MANIFEST}";
|
||||
|
||||
// Trivial case with no reduced axes.
|
||||
NGRAPH_TEST(${BACKEND_NAME}, all_trivial)
|
||||
{
|
||||
Shape shape{2, 2};
|
||||
auto A = make_shared<op::Parameter>(element::boolean, shape);
|
||||
auto f = make_shared<Function>(make_shared<op::All>(A, AxisSet{}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::boolean, shape);
|
||||
copy_data(a, vector<char>{1, 0, 0, 1});
|
||||
auto result = backend->create_tensor(element::boolean, shape);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_EQ((vector<char>{1, 0, 0, 1}), read_vector<char>(result));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, all_2x2_to_scalar_false)
|
||||
{
|
||||
Shape shape{2, 2};
|
||||
auto A = make_shared<op::Parameter>(element::boolean, shape);
|
||||
auto f = make_shared<Function>(make_shared<op::All>(A, AxisSet{0, 1}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::boolean, shape);
|
||||
copy_data(a, vector<char>{1, 0, 0, 1});
|
||||
auto result = backend->create_tensor(element::boolean, Shape{});
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_EQ((vector<char>{0}), read_vector<char>(result));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, all_2x2_to_scalar_true)
|
||||
{
|
||||
Shape shape{2, 2};
|
||||
auto A = make_shared<op::Parameter>(element::boolean, shape);
|
||||
auto f = make_shared<Function>(make_shared<op::All>(A, AxisSet{0, 1}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::boolean, shape);
|
||||
copy_data(a, vector<char>{1, 1, 1, 1});
|
||||
auto result = backend->create_tensor(element::boolean, Shape{});
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_EQ((vector<char>{1}), read_vector<char>(result));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, all_2x0_to_scalar)
|
||||
{
|
||||
Shape shape{2, 0};
|
||||
auto A = make_shared<op::Parameter>(element::boolean, shape);
|
||||
auto f = make_shared<Function>(make_shared<op::All>(A, AxisSet{0, 1}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::boolean, shape);
|
||||
auto result = backend->create_tensor(element::boolean, Shape{});
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_EQ((vector<char>{1}), read_vector<char>(result));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, all_2x3_eliminate_col_dim)
|
||||
{
|
||||
Shape shape{2, 3};
|
||||
auto A = make_shared<op::Parameter>(element::boolean, shape);
|
||||
auto f = make_shared<Function>(make_shared<op::All>(A, AxisSet{1}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::boolean, shape);
|
||||
copy_data(a, test::NDArray<char, 2>({{1, 0, 1}, {1, 1, 1}}).get_vector());
|
||||
auto result = backend->create_tensor(element::boolean, Shape{2});
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_EQ((vector<char>{0, 1}), read_vector<char>(result));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, all_2x3_eliminate_row_dim)
|
||||
{
|
||||
Shape shape{2, 3};
|
||||
auto A = make_shared<op::Parameter>(element::boolean, shape);
|
||||
auto f = make_shared<Function>(make_shared<op::All>(A, AxisSet{0}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::boolean, shape);
|
||||
copy_data(a, test::NDArray<char, 2>({{1, 0, 1}, {1, 1, 0}}).get_vector());
|
||||
auto result = backend->create_tensor(element::boolean, Shape{3});
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_EQ((vector<char>{1, 0, 0}), read_vector<char>(result));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, all_2x2x3_eliminate_dim_0)
|
||||
{
|
||||
Shape shape{2, 2, 3};
|
||||
auto A = make_shared<op::Parameter>(element::boolean, shape);
|
||||
auto f = make_shared<Function>(make_shared<op::All>(A, AxisSet{0}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::boolean, shape);
|
||||
copy_data(
|
||||
a, test::NDArray<char, 3>({{{1, 0, 1}, {1, 1, 0}}, {{0, 1, 0}, {1, 1, 1}}}).get_vector());
|
||||
auto result = backend->create_tensor(element::boolean, Shape{2, 3});
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_EQ((vector<char>{0, 0, 0, 1, 1, 0}), read_vector<char>(result));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, all_2x2x3_eliminate_dim_1)
|
||||
{
|
||||
Shape shape{2, 2, 3};
|
||||
auto A = make_shared<op::Parameter>(element::boolean, shape);
|
||||
auto f = make_shared<Function>(make_shared<op::All>(A, AxisSet{1}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::boolean, shape);
|
||||
copy_data(
|
||||
a, test::NDArray<char, 3>({{{1, 0, 1}, {1, 1, 0}}, {{0, 1, 0}, {1, 1, 1}}}).get_vector());
|
||||
auto result = backend->create_tensor(element::boolean, Shape{2, 3});
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_EQ((vector<char>{1, 0, 0, 0, 1, 0}), read_vector<char>(result));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, all_2x2x3_eliminate_dim_2)
|
||||
{
|
||||
Shape shape{2, 2, 3};
|
||||
auto A = make_shared<op::Parameter>(element::boolean, shape);
|
||||
auto f = make_shared<Function>(make_shared<op::All>(A, AxisSet{2}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::boolean, shape);
|
||||
copy_data(
|
||||
a, test::NDArray<char, 3>({{{1, 0, 1}, {1, 1, 0}}, {{0, 1, 0}, {1, 1, 1}}}).get_vector());
|
||||
auto result = backend->create_tensor(element::boolean, Shape{2, 2});
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_EQ((vector<char>{0, 0, 0, 1}), read_vector<char>(result));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, all_2x2x3_eliminate_dims_0_1)
|
||||
{
|
||||
Shape shape{2, 2, 3};
|
||||
auto A = make_shared<op::Parameter>(element::boolean, shape);
|
||||
auto f = make_shared<Function>(make_shared<op::All>(A, AxisSet{0, 1}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::boolean, shape);
|
||||
copy_data(
|
||||
a, test::NDArray<char, 3>({{{1, 0, 1}, {1, 1, 0}}, {{0, 1, 0}, {1, 1, 1}}}).get_vector());
|
||||
auto result = backend->create_tensor(element::boolean, Shape{3});
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_EQ((vector<char>{0, 0, 0}), read_vector<char>(result));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, all_2x2x3_eliminate_dims_0_2)
|
||||
{
|
||||
Shape shape{2, 2, 3};
|
||||
auto A = make_shared<op::Parameter>(element::boolean, shape);
|
||||
auto f = make_shared<Function>(make_shared<op::All>(A, AxisSet{0, 2}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::boolean, shape);
|
||||
copy_data(
|
||||
a, test::NDArray<char, 3>({{{1, 0, 1}, {1, 1, 0}}, {{0, 1, 0}, {1, 1, 1}}}).get_vector());
|
||||
auto result = backend->create_tensor(element::boolean, Shape{2});
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_EQ((vector<char>{0, 0}), read_vector<char>(result));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, all_2x2x3_eliminate_dims_1_2)
|
||||
{
|
||||
Shape shape{2, 2, 3};
|
||||
auto A = make_shared<op::Parameter>(element::boolean, shape);
|
||||
auto f = make_shared<Function>(make_shared<op::All>(A, AxisSet{1, 2}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::boolean, shape);
|
||||
copy_data(
|
||||
a, test::NDArray<char, 3>({{{1, 0, 1}, {1, 1, 0}}, {{0, 1, 0}, {1, 1, 1}}}).get_vector());
|
||||
auto result = backend->create_tensor(element::boolean, Shape{2});
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_EQ((vector<char>{0, 0}), read_vector<char>(result));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, all_2x2x3_eliminate_dims_0_1_2)
|
||||
{
|
||||
Shape shape{2, 2, 3};
|
||||
auto A = make_shared<op::Parameter>(element::boolean, shape);
|
||||
auto f = make_shared<Function>(make_shared<op::All>(A, AxisSet{0, 1, 2}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::boolean, shape);
|
||||
copy_data(
|
||||
a, test::NDArray<char, 3>({{{1, 0, 1}, {1, 1, 0}}, {{0, 1, 0}, {1, 1, 1}}}).get_vector());
|
||||
auto result = backend->create_tensor(element::boolean, Shape{});
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_EQ((vector<char>{0}), read_vector<char>(result));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, all_dynamic_axis)
|
||||
{
|
||||
Shape shape{2, 3};
|
||||
auto A = make_shared<op::Parameter>(element::boolean, shape);
|
||||
auto B = op::Constant::create(element::i64, Shape{1}, {1});
|
||||
auto f = make_shared<Function>(make_shared<op::All>(A, B), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::boolean, shape);
|
||||
copy_data(a, test::NDArray<char, 2>({{1, 0, 1}, {1, 1, 1}}).get_vector());
|
||||
auto result = backend->create_tensor(element::boolean, Shape{2});
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_EQ((vector<char>{0, 1}), read_vector<char>(result));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, all_change_axis)
|
||||
{
|
||||
Shape shape{2, 3};
|
||||
auto A = make_shared<op::Parameter>(element::boolean, shape);
|
||||
auto B = op::Constant::create(element::i64, Shape{1}, {1});
|
||||
auto all = make_shared<op::All>(A, B);
|
||||
ASSERT_EQ(all->get_reduction_axes(), AxisSet{1});
|
||||
auto f = make_shared<Function>(all, ParameterVector{A});
|
||||
|
||||
all->set_reduction_axes(AxisSet{0});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::boolean, shape);
|
||||
copy_data(a, test::NDArray<char, 2>({{1, 0, 1}, {1, 1, 1}}).get_vector());
|
||||
auto result = backend->create_tensor(element::boolean, Shape{3});
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_EQ((vector<char>{1, 0, 1}), read_vector<char>(result));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, all_dynamic)
|
||||
{
|
||||
// Create a graph for f(x,axes:int32) = All(x,Convert<int64>(axes)).
|
||||
auto x = make_shared<op::Parameter>(element::boolean, PartialShape::dynamic());
|
||||
auto axes = make_shared<op::Parameter>(element::i32, PartialShape{Dimension::dynamic()});
|
||||
auto axes_i64 = make_shared<op::Convert>(axes, element::i64);
|
||||
|
||||
auto all = make_shared<op::All>(x, axes_i64);
|
||||
ASSERT_TRUE(all->get_output_partial_shape(0).rank().is_dynamic());
|
||||
|
||||
auto f = make_shared<Function>(NodeVector{all}, ParameterVector{x, axes});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}", true);
|
||||
|
||||
auto ex = backend->compile(f);
|
||||
|
||||
auto t_r = backend->create_dynamic_tensor(element::boolean, PartialShape::dynamic());
|
||||
|
||||
std::vector<Shape> x_shapes{
|
||||
Shape{2, 3}, Shape{2, 3}, Shape{2, 3}, Shape{2, 3}, Shape{5}, Shape{5}};
|
||||
std::vector<std::vector<int32_t>> axeses{{}, {0}, {1}, {0, 1}, {}, {0}};
|
||||
std::vector<std::vector<char>> inputs{{1, 0, 1, 0, 1, 0},
|
||||
{1, 0, 1, 0, 0, 1},
|
||||
{1, 0, 1, 1, 1, 1},
|
||||
{1, 0, 1, 0, 1, 0},
|
||||
{1, 0, 1, 0, 1},
|
||||
{1, 0, 1, 0, 1}};
|
||||
std::vector<Shape> expected_result_shapes{
|
||||
Shape{2, 3}, Shape{3}, Shape{2}, Shape{}, Shape{5}, Shape{}};
|
||||
std::vector<std::vector<char>> expected_results{
|
||||
{1, 0, 1, 0, 1, 0}, {0, 0, 1}, {0, 1}, {0}, {1, 0, 1, 0, 1}, {0}};
|
||||
|
||||
for (size_t i = 0; i < x_shapes.size(); i++)
|
||||
{
|
||||
auto t_x = backend->create_tensor(element::boolean, x_shapes[i]);
|
||||
auto t_axes = backend->create_tensor(element::i32, Shape{axeses[i].size()});
|
||||
|
||||
copy_data(t_x, inputs[i]);
|
||||
copy_data(t_axes, axeses[i]);
|
||||
|
||||
ex->call_with_validate({t_r}, {t_x, t_axes});
|
||||
|
||||
ASSERT_EQ(t_r->get_shape(), expected_result_shapes[i]);
|
||||
|
||||
auto results = read_vector<char>(t_r);
|
||||
|
||||
ASSERT_EQ(results, expected_results[i]);
|
||||
}
|
||||
}
|
@ -1253,31 +1253,6 @@ TEST(constant_folding, const_reducemean_keepdims)
|
||||
ASSERT_EQ(values_expected, values_out);
|
||||
}
|
||||
|
||||
TEST(constant_folding, const_all)
|
||||
{
|
||||
Shape input_shape{3, 3};
|
||||
|
||||
vector<char> values_in{0, 1, 1, 0, 1, 0, 1, 1, 1};
|
||||
auto constant = op::Constant::create(element::boolean, input_shape, values_in);
|
||||
auto convert = make_shared<op::All>(constant, AxisSet{1});
|
||||
auto f = make_shared<Function>(convert, ParameterVector{});
|
||||
|
||||
pass::Manager pass_manager;
|
||||
pass_manager.register_pass<pass::ConstantFolding>();
|
||||
pass_manager.run_passes(f);
|
||||
|
||||
ASSERT_EQ(count_ops_of_type<op::All>(f), 0);
|
||||
ASSERT_EQ(count_ops_of_type<op::Constant>(f), 1);
|
||||
|
||||
auto new_const = as_type_ptr<op::Constant>(f->get_results().at(0)->get_argument(0));
|
||||
ASSERT_TRUE(new_const);
|
||||
auto values_out = new_const->get_vector<char>();
|
||||
|
||||
vector<char> values_expected{0, 0, 1};
|
||||
|
||||
ASSERT_EQ(values_expected, values_out);
|
||||
}
|
||||
|
||||
TEST(constant_folding, const_reduce_logical_and__no_keepdims)
|
||||
{
|
||||
const Shape input_shape{3, 3};
|
||||
|
@ -53,24 +53,6 @@ namespace
|
||||
EXPECT_FALSE(node.is_binary_elementwise_logical());
|
||||
}
|
||||
|
||||
void op_is_All()
|
||||
{
|
||||
op::All node;
|
||||
EXPECT_FALSE(node.is_unary_elementwise_arithmetic());
|
||||
EXPECT_FALSE(node.is_binary_elementwise_arithmetic());
|
||||
EXPECT_FALSE(node.is_binary_elementwise_comparison());
|
||||
EXPECT_FALSE(node.is_binary_elementwise_logical());
|
||||
}
|
||||
|
||||
void op_is_AllReduce()
|
||||
{
|
||||
op::AllReduce node;
|
||||
EXPECT_FALSE(node.is_unary_elementwise_arithmetic());
|
||||
EXPECT_FALSE(node.is_binary_elementwise_arithmetic());
|
||||
EXPECT_FALSE(node.is_binary_elementwise_comparison());
|
||||
EXPECT_FALSE(node.is_binary_elementwise_logical());
|
||||
}
|
||||
|
||||
void op_is_And()
|
||||
{
|
||||
op::v0::And node;
|
||||
|
@ -30,8 +30,6 @@
|
||||
#include "ngraph/runtime/aligned_buffer.hpp"
|
||||
#include "ngraph/runtime/reference/abs.hpp"
|
||||
#include "ngraph/runtime/reference/acos.hpp"
|
||||
#include "ngraph/runtime/reference/all.hpp"
|
||||
#include "ngraph/runtime/reference/allreduce.hpp"
|
||||
#include "ngraph/runtime/reference/any.hpp"
|
||||
#include "ngraph/runtime/reference/asin.hpp"
|
||||
#include "ngraph/runtime/reference/atan.hpp"
|
||||
@ -207,27 +205,6 @@ protected:
|
||||
args[0]->get_data_ptr<const T>(), out[0]->get_data_ptr<T>(), element_count);
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::All:
|
||||
{
|
||||
const op::All* all = static_cast<const op::All*>(&node);
|
||||
reference::all(args[0]->get_data_ptr<const char>(),
|
||||
out[0]->get_data_ptr<char>(),
|
||||
node.get_input_shape(0),
|
||||
node.get_output_shape(0),
|
||||
all->get_reduction_axes());
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::AllReduce:
|
||||
{
|
||||
const ngraph::op::AllReduce* allreduce =
|
||||
static_cast<const ngraph::op::AllReduce*>(&node);
|
||||
reference::allreduce<T>(args[0]->get_data_ptr<T>(),
|
||||
out[0]->get_data_ptr<T>(),
|
||||
node.get_input_element_type(0),
|
||||
allreduce->get_reduce_type(),
|
||||
static_cast<int>(shape_size(node.get_input_shape(0))));
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::Any:
|
||||
{
|
||||
const op::Any* any = static_cast<const op::Any*>(&node);
|
||||
|
@ -53,8 +53,6 @@
|
||||
NGRAPH_OP(Abs, ngraph::op)
|
||||
NGRAPH_OP(Acos, ngraph::op)
|
||||
NGRAPH_OP(Add, ngraph::op)
|
||||
NGRAPH_OP(All, ngraph::op)
|
||||
NGRAPH_OP(AllReduce, ngraph::op)
|
||||
NGRAPH_OP(And, ngraph::op::v0)
|
||||
NGRAPH_OP(Any, ngraph::op)
|
||||
NGRAPH_OP(Asin, ngraph::op)
|
||||
|
@ -197,26 +197,6 @@ TEST(serialize, constant)
|
||||
EXPECT_TRUE(found);
|
||||
}
|
||||
|
||||
TEST(benchmark, serialize)
|
||||
{
|
||||
stopwatch timer;
|
||||
string model = "mxnet/LSTM_backward.json";
|
||||
|
||||
const string json_path = file_util::path_join(SERIALIZED_ZOO, model);
|
||||
timer.start();
|
||||
const string json_string = file_util::read_file_to_string(json_path);
|
||||
timer.stop();
|
||||
cout << "file read took " << timer.get_milliseconds() << "ms\n";
|
||||
timer.start();
|
||||
shared_ptr<Function> f = ngraph::deserialize(json_string);
|
||||
timer.stop();
|
||||
cout << "deserialize took " << timer.get_milliseconds() << "ms\n";
|
||||
|
||||
WithSerializeOutputShapesEnabled serialize_outputs(true);
|
||||
ofstream out("test.json");
|
||||
out << serialize(f, 4);
|
||||
}
|
||||
|
||||
MATCHER_P2(IsOutputShape, type, shape, "")
|
||||
{
|
||||
return std::get<0>(arg) == type && std::get<1>(arg).to_shape() == shape;
|
||||
|
@ -1,159 +0,0 @@
|
||||
//*****************************************************************************
|
||||
// Copyright 2017-2020 Intel Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//*****************************************************************************
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "util/type_prop.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
TEST(type_prop, all_deduce)
|
||||
{
|
||||
auto param_0 = make_shared<op::Parameter>(element::boolean, Shape{2, 4});
|
||||
|
||||
auto r0 = make_shared<op::All>(param_0, AxisSet{0});
|
||||
ASSERT_EQ(r0->get_element_type(), element::boolean);
|
||||
ASSERT_EQ(r0->get_shape(), (Shape{4}));
|
||||
|
||||
auto r1 = make_shared<op::All>(param_0, AxisSet{1});
|
||||
ASSERT_EQ(r1->get_element_type(), element::boolean);
|
||||
ASSERT_EQ(r1->get_shape(), (Shape{2}));
|
||||
|
||||
auto r01 = make_shared<op::All>(param_0, AxisSet{0, 1});
|
||||
ASSERT_EQ(r01->get_element_type(), element::boolean);
|
||||
ASSERT_EQ(r01->get_shape(), (Shape{}));
|
||||
|
||||
auto r_none = make_shared<op::All>(param_0, AxisSet{});
|
||||
ASSERT_EQ(r_none->get_element_type(), element::boolean);
|
||||
ASSERT_EQ(r_none->get_shape(), (Shape{2, 4}));
|
||||
}
|
||||
|
||||
TEST(type_prop, all_deduce_et_dynamic)
|
||||
{
|
||||
auto param_0 = make_shared<op::Parameter>(element::dynamic, Shape{2, 4});
|
||||
|
||||
auto r0 = make_shared<op::All>(param_0, AxisSet{0});
|
||||
ASSERT_EQ(r0->get_element_type(), element::boolean);
|
||||
ASSERT_EQ(r0->get_shape(), (Shape{4}));
|
||||
|
||||
auto r1 = make_shared<op::All>(param_0, AxisSet{1});
|
||||
ASSERT_EQ(r1->get_element_type(), element::boolean);
|
||||
ASSERT_EQ(r1->get_shape(), (Shape{2}));
|
||||
|
||||
auto r01 = make_shared<op::All>(param_0, AxisSet{0, 1});
|
||||
ASSERT_EQ(r01->get_element_type(), element::boolean);
|
||||
ASSERT_EQ(r01->get_shape(), (Shape{}));
|
||||
|
||||
auto r_none = make_shared<op::All>(param_0, AxisSet{});
|
||||
ASSERT_EQ(r_none->get_element_type(), element::boolean);
|
||||
ASSERT_EQ(r_none->get_shape(), (Shape{2, 4}));
|
||||
}
|
||||
|
||||
TEST(type_prop, all_et_non_boolean)
|
||||
{
|
||||
auto param_0 = make_shared<op::Parameter>(element::i32, Shape{2, 4});
|
||||
|
||||
try
|
||||
{
|
||||
auto r = make_shared<op::All>(param_0, AxisSet{0, 1});
|
||||
// Should have thrown, so fail if it didn't
|
||||
FAIL() << "Did not detect invalid element type for All";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(error.what(), std::string("Input element type must be boolean"));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
TEST(type_prop, all_axis_oob)
|
||||
{
|
||||
auto param_0 = make_shared<op::Parameter>(element::boolean, Shape{2, 4});
|
||||
|
||||
try
|
||||
{
|
||||
auto r = make_shared<op::All>(param_0, AxisSet{0, 2, 1});
|
||||
// Should have thrown, so fail if it didn't
|
||||
FAIL() << "Did not detect out-of-bound axis for All";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(error.what(), std::string("Reduction axis (2) is out of bounds"));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
TEST(type_prop, all_partial_rank_dynamic)
|
||||
{
|
||||
auto param = make_shared<op::Parameter>(element::boolean, PartialShape::dynamic());
|
||||
auto axes = AxisSet{2385, 0, 4404}; // arbitrary
|
||||
auto all = make_shared<op::All>(param, axes);
|
||||
|
||||
EXPECT_EQ(all->get_output_element_type(0), element::boolean);
|
||||
EXPECT_TRUE(all->get_output_partial_shape(0).is_dynamic());
|
||||
}
|
||||
|
||||
TEST(type_prop, all_partial_rank_static_dynamic_ok_result_static)
|
||||
{
|
||||
auto param = make_shared<op::Parameter>(element::boolean,
|
||||
PartialShape{1, 2, Dimension::dynamic(), 4, 5});
|
||||
auto axes = AxisSet{2, 3};
|
||||
auto all = make_shared<op::All>(param, axes);
|
||||
|
||||
EXPECT_EQ(all->get_output_element_type(0), element::boolean);
|
||||
EXPECT_EQ(all->get_shape(), (Shape{1, 2, 5}));
|
||||
}
|
||||
|
||||
TEST(type_prop, all_partial_rank_static_dynamic_ok_result_dynamic)
|
||||
{
|
||||
auto param = make_shared<op::Parameter>(
|
||||
element::boolean, PartialShape{1, 2, Dimension::dynamic(), 4, Dimension::dynamic()});
|
||||
auto axes = AxisSet{2, 3};
|
||||
auto all = make_shared<op::All>(param, axes);
|
||||
|
||||
EXPECT_EQ(all->get_output_element_type(0), element::boolean);
|
||||
EXPECT_TRUE(
|
||||
all->get_output_partial_shape(0).same_scheme(PartialShape{1, 2, Dimension::dynamic()}));
|
||||
}
|
||||
|
||||
TEST(type_prop, all_partial_rank_static_dynamic_axes_oob)
|
||||
{
|
||||
auto param = make_shared<op::Parameter>(
|
||||
element::boolean, PartialShape{1, 2, Dimension::dynamic(), 4, Dimension::dynamic()});
|
||||
auto axes = AxisSet{2, 5, 1};
|
||||
|
||||
try
|
||||
{
|
||||
auto all = make_shared<op::All>(param, axes);
|
||||
// Should have thrown, so fail if it didn't
|
||||
FAIL() << "Did not detect out-of-bound axis for All (rank-static dynamic input)";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(error.what(), std::string("Reduction axis (5) is out of bounds"));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user