v1::Pad evaluator (#1771)

* v1::Pad reference implementation

* ut fix: pad_negative_exterior_1d

* ut fix: pad_negative_exterior_1d_check_limits & pad_edge_1d

* Code formatting

* ut fix: pad_edge_1d_top_neg & pad_edge_1d_top_neg_bigger_than_tensor

* More Pad UT fixes

* Pad UT fixes: REFLECT mode

* Fix all Pad UTs

* Switch Pad evaluation in INT backend

* Non-template solution to v1::Pad::evaluate

* Always create v1::Pad with 4 inputs

* VS compilation error fix

* Python test fix

* Remove the v0::Pad constant folding pass

* Some extra checks in v1::Pad evaluator

* Code formatting

* Remove an obsolete CF test
This commit is contained in:
Tomasz Dołbniak 2020-08-18 12:43:54 +02:00 committed by GitHub
parent ea80bc3d6f
commit 1cb1f13eaa
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 710 additions and 938 deletions

View File

@ -148,6 +148,9 @@ namespace ngraph
/// \return The padding mode.
PadMode get_pad_mode() const { return m_pad_mode; }
void set_pad_mode(PadMode pad_mode) { m_pad_mode = pad_mode; }
bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const override;
private:
PadMode m_pad_mode;
};

View File

@ -38,7 +38,6 @@ public:
{
m_cfmap = cfmap;
m_enable_shape_inference = true;
construct_constant_pad();
construct_constant_quantize();
construct_constant_dequantize();
construct_constant_convert();
@ -52,7 +51,6 @@ public:
}
private:
void construct_constant_pad();
void construct_constant_quantize();
void construct_constant_dequantize();
void construct_constant_convert();

View File

@ -29,176 +29,15 @@ namespace ngraph
{
namespace reference
{
template <typename T>
void pad(const T* arg0,
const T* arg1,
T* out,
const Shape& arg0_shape,
void pad(const char* data,
const char* pad_value,
char* out,
const size_t elem_size,
const Shape& data_shape,
const Shape& out_shape,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above,
op::PadMode pad_mode)
{
Coordinate input_start(arg0_shape.size(), 0); // start at (0,0,...,0)
Coordinate input_end = out_shape; // end at (d'0,d'1,...,d'n), the outer corner of
// the post-padding shape
Strides input_strides(arg0_shape.size(), 1);
AxisVector input_axis_order(arg0_shape.size());
for (size_t i = 0; i < arg0_shape.size(); i++)
{
input_axis_order[i] = i;
}
CoordinateTransform input_transform(arg0_shape,
input_start,
input_end,
input_strides,
input_axis_order,
padding_below,
padding_above);
CoordinateTransform output_transform(out_shape);
CoordinateTransform::Iterator output_it = output_transform.begin();
NGRAPH_CHECK(shape_size(input_transform.get_target_shape()) ==
shape_size(output_transform.get_target_shape()));
for (const Coordinate& in_coord : input_transform)
{
const Coordinate& out_coord = *output_it;
T v(0);
switch (pad_mode)
{
case op::PadMode::CONSTANT:
// If the coordinate is out of bounds, substitute *arg1.
v = input_transform.has_source_coordinate(in_coord)
? arg0[input_transform.index(in_coord)]
: *arg1;
break;
case op::PadMode::EDGE:
{
Coordinate c = in_coord; // have to copy because in_coord is const
// Truncate each out-of-bound dimension.
for (size_t i = 0; i < c.size(); i++)
{
if (static_cast<ptrdiff_t>(c[i]) < padding_below[i])
{
c[i] = padding_below[i];
}
if (static_cast<ptrdiff_t>(c[i]) >=
(padding_below[i] + static_cast<ptrdiff_t>(arg0_shape[i])))
{
c[i] = static_cast<size_t>(
padding_below[i] + static_cast<ptrdiff_t>(arg0_shape[i]) - 1);
}
}
v = arg0[input_transform.index(c)];
break;
}
case op::PadMode::REFLECT:
{
// clang-format off
// The algorithm here is a bit complicated because if the padding is
// bigger than the tensor, we may reflect multiple times.
//
// Example:
//
// Input shape: [2]
// Padding: 6 below, 6 above
// Output shape: [14]
//
// Input: a b
// Expected output: a b a b a b a b a b a b a b
//
// Computation for coordinate 13 of output:
//
// . . . . . . a b . . . . .[.] -> (oob above by 6 spaces, so reflection is at top-6)
// .[.]. . . . a b . . . . . . -> (oob below by 5 spaces, so reflection is at bottom+5)
// . . . . . . a b . . .[.]. . -> (oob above by 4 spaces, so reflection is at top-4)
// . . .[.]. . a b . . . . . . -> (oob below by 3 spaces, so reflection is at bottom+3)
// . . . . . . a b .[.]. . . . -> (oob above by 2 spaces, so reflection is at top-2)
// . . . . .[.]a b . . . . . . -> (oob below by 1 space, so reflection is at bottom+1)
// . . . . . . a[b]. . . . . . -> (no longer oob, so copy from here)
//
// Note that this algorithm works because REFLECT padding only makes sense
// if each dim is >= 2.
// clang-format on
Coordinate c = in_coord; // have to copy because in_coord is const
for (size_t i = 0; i < c.size(); i++)
{
ptrdiff_t new_dim = c[i];
bool done_reflecting = false;
while (!done_reflecting)
{
if (new_dim < padding_below[i])
{
ptrdiff_t distance_oob = padding_below[i] - new_dim;
new_dim = padding_below[i] + distance_oob;
}
else if (new_dim >=
padding_below[i] + static_cast<ptrdiff_t>(arg0_shape[i]))
{
ptrdiff_t distance_oob =
new_dim - padding_below[i] -
(static_cast<ptrdiff_t>(arg0_shape[i]) - 1);
new_dim = padding_below[i] +
static_cast<ptrdiff_t>(arg0_shape[i]) - distance_oob -
1;
}
else
{
done_reflecting = true;
}
}
c[i] = static_cast<size_t>(new_dim);
}
v = arg0[input_transform.index(c)];
break;
}
case op::PadMode::SYMMETRIC:
{
Coordinate c = in_coord; // have to copy because in_coord is const
for (size_t i = 0; i < c.size(); i++)
{
ptrdiff_t pos = padding_below[i] - (c[i] + 1);
if (pos >= 0)
{
c[i] = static_cast<size_t>(pos + padding_below[i]);
}
else
{
pos = -(pos + 1);
ptrdiff_t src_dim = static_cast<ptrdiff_t>(arg0_shape[i]);
if (pos < src_dim)
{
c[i] = static_cast<size_t>(pos + padding_below[i]);
}
else
{
c[i] = static_cast<size_t>(padding_below[i] + src_dim +
padding_above[i] - pos);
}
}
}
v = arg0[input_transform.index(c)];
break;
}
}
out[output_transform.index(out_coord)] = v;
++output_it;
}
}
const op::PadMode pad_mode);
}
}
}

View File

@ -20,6 +20,7 @@
#include "ngraph/op/broadcast.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/op/util/op_types.hpp"
#include "ngraph/runtime/reference/pad.hpp"
using namespace std;
using namespace ngraph;
@ -155,7 +156,7 @@ op::v1::Pad::Pad(const Output<Node>& arg,
const Output<Node>& pads_begin,
const Output<Node>& pads_end,
PadMode pad_mode)
: Op({arg, pads_begin, pads_end})
: Op({arg, pads_begin, pads_end, op::Constant::create(arg.get_element_type(), Shape{}, {0})})
, m_pad_mode{pad_mode}
{
constructor_validate_and_infer_types();
@ -197,8 +198,7 @@ void op::v1::Pad::validate_and_infer_types()
const auto& pads_begin_element_type = get_input_element_type(1);
const auto& pads_end_element_type = get_input_element_type(2);
const auto arg_pad_value_provided = get_input_size() == 4;
if (m_pad_mode == PadMode::CONSTANT && arg_pad_value_provided)
if (m_pad_mode == PadMode::CONSTANT && get_input_size() == 4)
{
const auto& arg_pad_element_type = get_input_element_type(3);
const auto& arg_pad_shape = get_input_partial_shape(3);
@ -310,8 +310,7 @@ void op::v1::Pad::validate_and_infer_types()
shared_ptr<Node> op::v1::Pad::clone_with_new_inputs(const OutputVector& new_args) const
{
check_new_args_count(this, new_args);
const auto arg_pad_value_provided = get_input_size() == 4;
if (arg_pad_value_provided)
if (get_input_size() == 4)
{
return make_shared<v1::Pad>(
new_args.at(0), new_args.at(1), new_args.at(2), new_args.at(3), m_pad_mode);
@ -321,3 +320,33 @@ shared_ptr<Node> op::v1::Pad::clone_with_new_inputs(const OutputVector& new_args
return make_shared<v1::Pad>(new_args.at(0), new_args.at(1), new_args.at(2), m_pad_mode);
}
}
bool op::v1::Pad::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
const auto& data = inputs[0];
const auto elem_size = data->get_element_type().size();
const char* pad_value = nullptr;
const std::vector<char> pad_zero_value(elem_size, 0);
if (get_input_size() == 4)
{
pad_value = inputs[3]->get_data_ptr<char>();
}
else
{
pad_value = pad_zero_value.data();
}
const auto& out = outputs[0];
ngraph::runtime::reference::pad(data->get_data_ptr<char>(),
pad_value,
out->get_data_ptr<char>(),
elem_size,
data->get_shape(),
out->get_shape(),
get_pads_begin(),
get_pads_end(),
get_pad_mode());
return true;
}

View File

@ -1,158 +0,0 @@
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "constant_folding.hpp"
#include "ngraph/op/pad.hpp"
#include "ngraph/runtime/reference/pad.hpp"
using namespace std;
using namespace ngraph;
template <class T>
shared_ptr<op::Constant> fold_constant_pad(shared_ptr<op::Constant> constant,
shared_ptr<op::Pad> pad,
NodeExecutorTy func)
{
const Shape& out_shape = pad->get_shape();
runtime::AlignedBuffer buffer(shape_size(out_shape) * sizeof(T));
T* data_ptr = buffer.get_ptr<T>();
auto pad_value = std::static_pointer_cast<op::Constant>(pad->get_input_node_shared_ptr(1));
if (func != nullptr)
{
vector<void*> inputs;
inputs.push_back(const_cast<void*>(constant->get_data_ptr()));
inputs.push_back(const_cast<void*>(pad_value->get_data_ptr()));
vector<void*> outputs;
outputs.push_back(data_ptr);
func(inputs, outputs);
}
else
{
runtime::reference::pad<T>(constant->get_data_ptr<T>(),
pad_value->get_data_ptr<T>(),
data_ptr,
constant->get_shape(),
out_shape,
pad->get_padding_below(),
pad->get_padding_above(),
pad->get_pad_mode());
}
return make_shared<op::Constant>(constant->get_element_type(), out_shape, data_ptr);
}
void pass::ConstantFolding::construct_constant_pad()
{
auto is_constant = pattern::has_class<op::Constant>();
auto constant_label = make_shared<pattern::op::Label>(element::f32, Shape{6}, is_constant);
auto pad_value_label = make_shared<pattern::op::Label>(element::f32, Shape{}, is_constant);
CoordinateDiff padding_below{0};
CoordinateDiff padding_above{0};
op::PadMode pad_mode{op::PadMode::CONSTANT};
auto pad = make_shared<op::Pad>(
constant_label, pad_value_label, padding_below, padding_above, pad_mode);
auto constant_pad_callback = [&, constant_label](pattern::Matcher& m) {
NGRAPH_DEBUG << "In callback for constant_pad_callback against node = "
<< m.get_match_root()->get_name();
auto pattern_map = m.get_pattern_map();
auto constant_match = static_pointer_cast<op::Constant>(pattern_map[constant_label]);
auto pad_match = static_pointer_cast<op::Pad>(m.get_match_root());
if (cf_is_disabled(pad_match))
return false;
NGRAPH_CHECK(revalidate_and_ensure_static(pad_match));
NodeExecutorTy func = nullptr;
if (!m_cfmap.empty())
{
auto handler = m_cfmap.find(type_index(typeid(ngraph::op::Pad)));
NGRAPH_CHECK(handler != m_cfmap.end(), "constant folding map should have pad entry");
func = handler->second(pad_match.get());
}
std::shared_ptr<Node> replacement;
auto type = constant_match->get_element_type();
switch (type)
{
case element::Type_t::undefined:
NGRAPH_CHECK(false, "Encountered 'undefined' element type in constant_pad_callback");
break;
case element::Type_t::dynamic:
NGRAPH_CHECK(false, "Encountered 'dynamic' element type in constant_pad_callback");
break;
case element::Type_t::u1:
NGRAPH_CHECK(false, "Encountered 'u1' element type in constant_pad_callback");
break;
case element::Type_t::boolean:
replacement = fold_constant_pad<char>(constant_match, pad_match, func);
break;
case element::Type_t::bf16:
replacement = fold_constant_pad<bfloat16>(constant_match, pad_match, func);
break;
case element::Type_t::f16:
replacement = fold_constant_pad<float16>(constant_match, pad_match, func);
break;
case element::Type_t::f32:
replacement = fold_constant_pad<float>(constant_match, pad_match, func);
break;
case element::Type_t::f64:
replacement = fold_constant_pad<double>(constant_match, pad_match, func);
break;
case element::Type_t::i8:
replacement = fold_constant_pad<int8_t>(constant_match, pad_match, func);
break;
case element::Type_t::i16:
replacement = fold_constant_pad<int16_t>(constant_match, pad_match, func);
break;
case element::Type_t::i32:
replacement = fold_constant_pad<int32_t>(constant_match, pad_match, func);
break;
case element::Type_t::i64:
replacement = fold_constant_pad<int64_t>(constant_match, pad_match, func);
break;
case element::Type_t::u8:
replacement = fold_constant_pad<uint8_t>(constant_match, pad_match, func);
break;
case element::Type_t::u16:
replacement = fold_constant_pad<uint16_t>(constant_match, pad_match, func);
break;
case element::Type_t::u32:
replacement = fold_constant_pad<uint32_t>(constant_match, pad_match, func);
break;
case element::Type_t::u64:
replacement = fold_constant_pad<uint64_t>(constant_match, pad_match, func);
break;
}
replace_node(m.get_match_root(), replacement);
return true;
};
auto pad_matcher = make_shared<pattern::Matcher>(pad, "ConstantFolding.ConstantPad");
NGRAPH_SUPPRESS_DEPRECATED_START
this->add_matcher(pad_matcher, constant_pad_callback, PassProperty::CHANGE_DYNAMIC_STATE);
NGRAPH_SUPPRESS_DEPRECATED_END
}

View File

@ -0,0 +1,211 @@
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "ngraph/runtime/reference/pad.hpp"
namespace ngraph
{
namespace runtime
{
namespace reference
{
void pad(const char* data,
const char* pad_value,
char* out,
const size_t elem_size,
const Shape& data_shape,
const Shape& out_shape,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above,
const op::PadMode pad_mode)
{
Coordinate input_start(data_shape.size(), 0); // start at (0,0,...,0)
Coordinate input_end = out_shape; // end at (d'0,d'1,...,d'n), the outer corner of
// the post-padding shape
Strides input_strides(data_shape.size(), 1);
AxisVector input_axis_order(data_shape.size());
for (size_t i = 0; i < data_shape.size(); i++)
{
input_axis_order[i] = i;
}
CoordinateTransform input_transform(data_shape,
input_start,
input_end,
input_strides,
input_axis_order,
padding_below,
padding_above);
CoordinateTransform output_transform(out_shape);
CoordinateTransform::Iterator output_it = output_transform.begin();
NGRAPH_CHECK(shape_size(input_transform.get_target_shape()) ==
shape_size(output_transform.get_target_shape()));
// depending on the data tensor element type, allocate enough bytes to fit a
// single value of this type
std::vector<char> v(elem_size, 0);
for (const Coordinate& in_coord : input_transform)
{
const Coordinate& out_coord = *output_it;
std::fill(v.begin(), v.end(), 0);
switch (pad_mode)
{
case op::PadMode::CONSTANT:
// If the coordinate is out of bounds, substitute *pad_value.
if (input_transform.has_source_coordinate(in_coord))
{
const auto* offset = data + input_transform.index(in_coord) * elem_size;
std::copy(offset, offset + elem_size, v.begin());
}
else
{
std::copy(pad_value, pad_value + elem_size, v.begin());
}
break;
case op::PadMode::EDGE:
{
Coordinate c = in_coord; // have to copy because in_coord is const
// Truncate each out-of-bound dimension.
for (size_t i = 0; i < c.size(); i++)
{
if (static_cast<ptrdiff_t>(c[i]) < padding_below[i])
{
c[i] = padding_below[i];
}
if (static_cast<ptrdiff_t>(c[i]) >=
(padding_below[i] + static_cast<ptrdiff_t>(data_shape[i])))
{
c[i] = static_cast<size_t>(
padding_below[i] + static_cast<ptrdiff_t>(data_shape[i]) - 1);
}
}
const auto* offset = data + input_transform.index(c) * elem_size;
std::copy(offset, offset + elem_size, v.begin());
break;
}
case op::PadMode::REFLECT:
{
// clang-format off
// The algorithm here is a bit complicated because if the padding is
// bigger than the tensor, we may reflect multiple times.
//
// Example:
//
// Input shape: [2]
// Padding: 6 below, 6 above
// Output shape: [14]
//
// Input: a b
// Expected output: a b a b a b a b a b a b a b
//
// Computation for coordinate 13 of output:
//
// . . . . . . a b . . . . .[.] -> (oob above by 6 spaces, so reflection is at top-6)
// .[.]. . . . a b . . . . . . -> (oob below by 5 spaces, so reflection is at bottom+5)
// . . . . . . a b . . .[.]. . -> (oob above by 4 spaces, so reflection is at top-4)
// . . .[.]. . a b . . . . . . -> (oob below by 3 spaces, so reflection is at bottom+3)
// . . . . . . a b .[.]. . . . -> (oob above by 2 spaces, so reflection is at top-2)
// . . . . .[.]a b . . . . . . -> (oob below by 1 space, so reflection is at bottom+1)
// . . . . . . a[b]. . . . . . -> (no longer oob, so copy from here)
//
// Note that this algorithm works because REFLECT padding only makes sense
// if each dim is >= 2.
// clang-format on
Coordinate c = in_coord; // have to copy because in_coord is const
for (size_t i = 0; i < c.size(); i++)
{
ptrdiff_t new_dim = c[i];
bool done_reflecting = false;
while (!done_reflecting)
{
if (new_dim < padding_below[i])
{
ptrdiff_t distance_oob = padding_below[i] - new_dim;
new_dim = padding_below[i] + distance_oob;
}
else if (new_dim >=
padding_below[i] + static_cast<ptrdiff_t>(data_shape[i]))
{
ptrdiff_t distance_oob =
new_dim - padding_below[i] -
(static_cast<ptrdiff_t>(data_shape[i]) - 1);
new_dim = padding_below[i] +
static_cast<ptrdiff_t>(data_shape[i]) - distance_oob -
1;
}
else
{
done_reflecting = true;
}
}
c[i] = static_cast<size_t>(new_dim);
}
const auto* offset = data + input_transform.index(c) * elem_size;
std::copy(offset, offset + elem_size, v.begin());
break;
}
case op::PadMode::SYMMETRIC:
{
Coordinate c = in_coord; // have to copy because in_coord is const
for (size_t i = 0; i < c.size(); i++)
{
ptrdiff_t pos = padding_below[i] - (c[i] + 1);
if (pos >= 0)
{
c[i] = static_cast<size_t>(pos + padding_below[i]);
}
else
{
pos = -(pos + 1);
ptrdiff_t src_dim = static_cast<ptrdiff_t>(data_shape[i]);
if (pos < src_dim)
{
c[i] = static_cast<size_t>(pos + padding_below[i]);
}
else
{
c[i] = static_cast<size_t>(padding_below[i] + src_dim +
padding_above[i] - pos);
}
}
}
const auto* offset = data + input_transform.index(c) * elem_size;
std::copy(offset, offset + elem_size, v.begin());
break;
}
}
std::copy(
v.begin(), v.end(), out + output_transform.index(out_coord) * elem_size);
++output_it;
}
}
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -352,36 +352,6 @@ TEST(constant_folding, constant_broadcast_v1_numpy)
ASSERT_EQ(values_expected, values_out);
}
TEST(constant_folding, constant_pad_exterior)
{
Shape shape_in{2};
vector<int> values_in{777, 888};
auto constant = make_shared<op::Constant>(element::i32, shape_in, values_in);
auto pad_value = make_shared<op::Constant>(element::i32, Shape{}, vector<int>{111});
CoordinateDiff padding_below{1};
CoordinateDiff padding_above{2};
auto broadcast = make_shared<op::Pad>(constant, pad_value, padding_below, padding_above);
auto f = make_shared<Function>(broadcast, ParameterVector{});
pass::Manager pass_manager;
pass_manager.register_pass<pass::ConstantFolding>();
pass_manager.run_passes(f);
ASSERT_EQ(count_ops_of_type<op::Pad>(f), 0);
ASSERT_EQ(count_ops_of_type<op::Constant>(f), 1);
auto new_const =
as_type_ptr<op::Constant>(f->get_results().at(0)->input_value(0).get_node_shared_ptr());
ASSERT_TRUE(new_const);
auto values_out = new_const->get_vector<int>();
vector<int> padded_values{111, 777, 888, 111, 111};
ASSERT_EQ(padded_values, values_out);
}
TEST(constant_folding, constant_unary_binary)
{
vector<int> values_a{1, 2, 3, 4};
@ -3209,4 +3179,4 @@ TEST(constant_folding, disable_constant_folding)
ASSERT_EQ(count_ops_of_type<op::v1::Reshape>(f), 1);
ASSERT_EQ(count_ops_of_type<op::Constant>(f), 1);
}
}

View File

@ -737,20 +737,6 @@ protected:
break;
}
case OP_TYPEID::Parameter: break;
case OP_TYPEID::Pad:
{
const op::Pad* pad = static_cast<const op::Pad*>(&node);
reference::pad(args[0]->get_data_ptr<const T>(),
args[1]->get_data_ptr<const T>(),
out[0]->get_data_ptr<T>(),
node.get_input_shape(0),
node.get_output_shape(0),
pad->get_padding_below(),
pad->get_padding_above(),
pad->get_pad_mode());
break;
}
case OP_TYPEID::Quantize:
{
const op::Quantize* quantize = static_cast<const op::Quantize*>(&node);
@ -1302,6 +1288,7 @@ protected:
case OP_TYPEID::NonZero_v3:
case OP_TYPEID::NotEqual:
case OP_TYPEID::Or:
case OP_TYPEID::Pad:
case OP_TYPEID::Power:
case OP_TYPEID::Product:
case OP_TYPEID::Range:

View File

@ -467,26 +467,6 @@ namespace
return replacement_node;
}
shared_ptr<Node> op_cast(shared_ptr<op::v1::Pad> node)
{
const auto pad_arg = node->input_value(0);
Output<Node> pad_value;
if (node->get_input_size() == 4)
{
pad_value = node->input_value(3);
}
else
{
pad_value =
make_shared<op::Constant>(pad_arg.get_element_type(), Shape{}, vector<float>{0.f});
}
auto replacement_node = make_shared<op::v0::Pad>(
pad_arg, pad_value, node->get_pads_begin(), node->get_pads_end(), node->get_pad_mode());
replace_node(node, replacement_node);
return replacement_node;
}
shared_ptr<Node> op_cast(shared_ptr<op::v1::Power> node)
{
return op_cast_binary_elementwise_node<op::v0::Power, op::v1::Power>(node);

View File

@ -346,25 +346,6 @@ namespace
return op_cast_binary_elementwise_node<op::v0::Or, op::v1::LogicalOr>(node);
}
shared_ptr<Node> op_cast(shared_ptr<op::Pad> node)
{
auto padding_below = node->get_padding_below();
auto pads_begin_node =
make_shared<op::Constant>(element::i64, Shape{padding_below.size()}, padding_below);
auto padding_above = node->get_padding_above();
auto pads_end_node =
make_shared<op::Constant>(element::i64, Shape{padding_above.size()}, padding_above);
auto replacement_node = make_shared<op::v1::Pad>(node->input_value(0),
pads_begin_node,
pads_end_node,
node->input_value(1),
node->get_pad_mode());
replace_node(node, replacement_node);
return replacement_node;
}
shared_ptr<Node> op_cast(shared_ptr<op::Power> node)
{
return op_cast_binary_elementwise_node<op::v0::Power, op::v1::Power>(node);