Removed adjoints (#1269)

This commit is contained in:
Ilya Churaev 2020-07-10 13:49:43 +03:00 committed by GitHub
parent 8da662b2b8
commit 8d1e7a705d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
164 changed files with 3 additions and 5687 deletions

View File

@ -62,10 +62,6 @@ protected:
Mode m_mode;
SortType m_sort;
element::Type m_index_element_type{element::i32};
void generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) override {
throw ngraph_error("Forward-propagation-only operation");
}
};
} // namespace op

View File

@ -21,8 +21,6 @@ set (SRC
attribute_adapter.hpp
attribute_visitor.cpp
attribute_visitor.hpp
autodiff/adjoints.cpp
autodiff/adjoints.hpp
axis_set.cpp
axis_set.hpp
axis_vector.cpp

View File

@ -1,214 +0,0 @@
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <list>
#include <memory>
#include <unordered_map>
#include <unordered_set>
#include "ngraph/autodiff/adjoints.hpp"
#include "ngraph/axis_set.hpp"
#include "ngraph/function.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/add.hpp"
#include "ngraph/op/broadcast.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/op/convert.hpp"
#include "ngraph/op/get_output_element.hpp"
#include "ngraph/op/replace_slice.hpp"
#include "ngraph/op/slice.hpp"
#include "ngraph/strides.hpp"
using namespace ngraph;
Output<Node> make_broadcast_zero(const Output<Node>& output)
{
Output<Node> zero = std::make_shared<op::ScalarConstantLike>(output, 0.0);
Output<Node> bzero = std::make_shared<op::BroadcastLike>(zero, output, AxisSet{});
return bzero;
}
OutputVector make_zeros(std::shared_ptr<Node> x)
{
OutputVector zeros;
for (auto output : x->outputs())
{
zeros.push_back(make_broadcast_zero(output));
}
return zeros;
}
autodiff::Adjoints::Adjoints(const OutputVector& ys, const OutputVector& cs)
{
if (ys.size() != cs.size())
{
throw ngraph_error("ys and cs must be equal size");
}
// Pass 1 determines which nodes contribute to y as well as setting up a reverse
// topological sort.
// Number of nodes that use the node's value
std::unordered_map<std::shared_ptr<Node>, size_t> parent_counts;
// Nodes we should check
std::list<std::shared_ptr<Node>> nodes_to_check;
for (auto& y : ys)
{
nodes_to_check.push_back(y.get_node_shared_ptr());
}
while (nodes_to_check.size() > 0)
{
auto node = nodes_to_check.front();
nodes_to_check.pop_front();
if (m_adjoint_map.find(node.get()) == m_adjoint_map.end())
{
m_adjoint_map[node.get()] = OutputVector(node->get_output_size());
for (auto value : node->input_values())
{
auto arg = value.get_node_shared_ptr();
auto count_it = parent_counts.find(arg);
if (count_it == parent_counts.end())
{
parent_counts[arg] = 1;
nodes_to_check.push_front(arg);
}
else
{
parent_counts[arg]++;
}
}
}
}
// Second pass visits the nodes so that all users of a node's value are visited
// before a node is visited.
for (size_t i = 0; i < ys.size(); i++)
{
add_delta(ys.at(i), cs.at(i));
}
for (auto& y : ys)
{
auto node = y.get_node_shared_ptr();
if (find(nodes_to_check.begin(), nodes_to_check.end(), node) == nodes_to_check.end())
{
nodes_to_check.push_back(y.get_node_shared_ptr());
}
}
while (nodes_to_check.size() > 0)
{
auto node = nodes_to_check.front();
nodes_to_check.pop_front();
// Look for nodes that will be available when this node is done
for (auto value : node->input_values())
{
auto input_source_node = value.get_node_shared_ptr();
auto count_it = parent_counts.find(input_source_node);
count_it->second--;
if (0 == count_it->second)
{
nodes_to_check.push_front(input_source_node);
}
}
OutputVector deltas = m_adjoint_map[node.get()];
for (size_t i = 0; i < node->get_output_size(); ++i)
{
auto& delta = deltas[i];
if (delta == Output<Node>())
{
delta = make_broadcast_zero(node->output(i));
}
}
node->generate_adjoints(*this, deltas);
}
}
Output<Node> autodiff::Adjoints::backprop_output(const Output<Node>& x)
{
auto node = x.get_node();
auto adjoint_it = m_adjoint_map.find(node);
Output<Node> result;
OutputVector deltas;
if (m_adjoint_map.end() == adjoint_it)
{
deltas = OutputVector(node->get_output_size());
m_adjoint_map[node] = deltas;
}
else
{
deltas = adjoint_it->second;
}
if (deltas.at(x.get_index()) == Output<Node>())
{
deltas.at(x.get_index()) = make_broadcast_zero(x);
}
return deltas.at(x.get_index());
}
void autodiff::Adjoints::add_delta(const Output<Node>& x, const Output<Node>& delta)
{
auto adjoint_it = m_adjoint_map.find(x.get_node());
if (adjoint_it == m_adjoint_map.end())
{
m_adjoint_map[x.get_node()] = OutputVector(x.get_node()->get_output_size());
adjoint_it = m_adjoint_map.find(x.get_node());
}
auto& deltas = adjoint_it->second[x.get_index()];
if (deltas == Output<Node>())
{
deltas = delta;
}
else
{
deltas = std::make_shared<op::Add>(deltas, delta);
}
}
// This doesn't need an index since slice can only sit on top of GOE
void autodiff::Adjoints::add_delta_to_slice(const Output<Node>& x,
const Output<Node>& delta,
const Coordinate& lower_bounds,
const Coordinate& upper_bounds,
const Strides& strides)
{
if (!(x.get_element_type().compatible(delta.get_element_type())) ||
!(x.get_partial_shape().rank().compatible(delta.get_partial_shape().rank())))
{
throw ngraph_error(
"Autodiff internal error: Mismatch on backprop and op in add_delta_to_slice.");
}
auto adjoint_it = m_adjoint_map.find(x.get_node());
auto& deltas = adjoint_it->second[x.get_index()];
if (deltas == Output<Node>())
{
auto zero = make_broadcast_zero(x);
deltas =
std::make_shared<op::ReplaceSlice>(zero, delta, lower_bounds, upper_bounds, strides);
}
else
{
deltas = std::make_shared<op::ReplaceSlice>(
deltas,
std::make_shared<op::Add>(
std::make_shared<op::Slice>(deltas, lower_bounds, upper_bounds, strides), delta),
lower_bounds,
upper_bounds,
strides);
}
}

View File

@ -1,78 +0,0 @@
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <map>
#include <memory>
#include <unordered_map>
#include "ngraph/coordinate.hpp"
#include "ngraph/output_vector.hpp"
#include "ngraph/strides.hpp"
namespace ngraph
{
class Node;
class Function;
template <typename T>
class Output;
namespace autodiff
{
class NGRAPH_API Adjoints
{
public:
/// \brief (dy/dx)(c) for all x used to compute y
///
/// \param y The dependent value
/// \param c An expression for where to evaluate the derivatives
Adjoints(const OutputVector& y, const OutputVector& c);
Adjoints(const Adjoints& adjoints) = default;
Adjoints& operator=(const Adjoints& adjoints) = default;
Adjoints() = default;
/// \brief Add a backprop contribution to x's adjoint
///
/// \param x The adjoint node
/// \param delta A backprop contribution
void add_delta(const Output<Node>& x, const Output<Node>& delta);
/// \brief Add a backprop contribution to a slice of x's adjoint
///
/// \param x The adjoint node
/// \param delta A backprop contribution
/// \param lower_bounds Lower bounds of slice to add to
/// \param upper_bounds Upper bounds of slice to add to
/// \param strides Strides of slice to add to
void add_delta_to_slice(const Output<Node>& x,
const Output<Node>& delta,
const Coordinate& lower_bounds,
const Coordinate& upper_bounds,
const Strides& strides);
/// \brief (dy/dx)(c)
///
/// \param x The output whose adjoint is desired.
Output<Node> backprop_output(const Output<Node>& x);
protected:
std::map<Node*, OutputVector> m_adjoint_map;
};
}
}

View File

@ -19,7 +19,6 @@
#include <typeindex>
#include <typeinfo>
#include "ngraph/autodiff/adjoints.hpp"
#include "ngraph/descriptor/input.hpp"
#include "ngraph/descriptor/layout/tensor_layout.hpp"
#include "ngraph/graph_util.hpp"

View File

@ -20,6 +20,7 @@
#include <cstring>
#include <deque>
#include <iostream>
#include <map>
#include <memory>
#include <set>
#include <string>
@ -30,7 +31,6 @@
#include <vector>
#include "ngraph/attribute_visitor.hpp"
#include "ngraph/autodiff/adjoints.hpp"
#include "ngraph/check.hpp"
#include "ngraph/coordinate.hpp"
#include "ngraph/deprecated.hpp"
@ -92,11 +92,6 @@ namespace ngraph
using ResultVector = std::vector<std::shared_ptr<op::v0::Result>>;
namespace autodiff
{
class Adjoints;
}
NGRAPH_API
std::string node_validation_failure_loc_string(const Node* node);
@ -141,9 +136,6 @@ namespace ngraph
/// or a (possibly empty) tuple of values.
class NGRAPH_API Node : public std::enable_shared_from_this<Node>
{
// For access to generate_adjoints.
friend class autodiff::Adjoints;
// For access to m_outputs.
friend class descriptor::Input;
@ -186,14 +178,6 @@ namespace ngraph
/// \param arguments Output i will connect to input i
/// \param output_size Number of outputs for this node
Node(const OutputVector& arguments, size_t output_size = 1);
// For back-compatibility
virtual void generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas)
NGRAPH_DEPRECATED("use OutputVector version instead")
{
generate_adjoints(adjoints, as_output_vector(deltas));
}
virtual void generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) {}
/// \brief Moves nodes that would be deleted from inputs to nodes to avoid stack overflows
/// on deep networks.
void safe_delete(NodeVector& nodes, bool recurse);
@ -588,7 +572,6 @@ namespace ngraph
std::set<std::shared_ptr<Node>> m_provenance_group;
std::deque<descriptor::Input> m_inputs;
std::deque<descriptor::Output> m_outputs;
std::unordered_map<Node*, autodiff::Adjoints> m_adjoint_map;
Placement m_placement = Placement::DEFAULT;
std::shared_ptr<ngraph::op::util::OpAnnotations> m_op_annotations;
std::map<std::string, std::shared_ptr<Variant>> m_rt_info;

View File

@ -38,15 +38,6 @@ shared_ptr<Node> op::Abs::clone_with_new_inputs(const OutputVector& new_args) co
return make_shared<Abs>(new_args.at(0));
}
void op::Abs::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
{
auto delta = deltas.at(0);
auto x = input_value(0);
adjoints.add_delta(x, delta * make_shared<op::Sign>(x));
}
namespace
{
template <element::Type_t ET>

View File

@ -50,10 +50,6 @@ namespace ngraph
bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
};
}
using v0::Abs;

View File

@ -47,18 +47,6 @@ shared_ptr<Node> op::Acos::clone_with_new_inputs(const OutputVector& new_args) c
return make_shared<Acos>(new_args.at(0));
}
void op::Acos::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
{
auto delta = deltas.at(0);
auto x = input_value(0);
auto one = make_shared<op::ScalarConstantLike>(x, 1.0);
auto ones = make_shared<op::BroadcastLike>(one, x, AxisSet());
adjoints.add_delta(x, -delta / make_shared<op::Sqrt>(ones - x * x));
}
namespace
{
template <element::Type_t ET>

View File

@ -48,10 +48,6 @@ namespace ngraph
clone_with_new_inputs(const OutputVector& new_args) const override;
bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
};
}
using v0::Acos;

View File

@ -45,22 +45,6 @@ bool op::v0::Add::visit_attributes(AttributeVisitor& visitor)
return true;
}
void op::v0::Add::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
{
if (get_autob().m_type != op::AutoBroadcastType::NONE)
{
throw ngraph_error("Autodiff not supported with auto broadcasting");
}
auto delta = deltas.at(0);
auto x = input_value(0);
auto y = input_value(1);
adjoints.add_delta(x, delta);
adjoints.add_delta(y, delta);
}
shared_ptr<Node> ngraph::operator+(const Output<Node>& arg0, const Output<Node>& arg1)
{
return make_shared<op::Add>(arg0, arg1);
@ -149,22 +133,6 @@ shared_ptr<Node> op::v1::Add::clone_with_new_inputs(const OutputVector& new_args
return make_shared<op::v1::Add>(new_args.at(0), new_args.at(1), this->get_autob());
}
void op::v1::Add::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
{
if (get_autob().m_type != op::AutoBroadcastType::NONE)
{
throw ngraph_error("Autodiff not supported with auto broadcasting");
}
auto delta = deltas.at(0);
auto x = input_value(0);
auto y = input_value(1);
adjoints.add_delta(x, delta);
adjoints.add_delta(y, delta);
}
bool op::v1::Add::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs)
{
return evaluate_add(inputs[0], inputs[1], outputs[0], get_autob());

View File

@ -60,10 +60,6 @@ namespace ngraph
virtual bool is_commutative() const override { return true; }
bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
};
} // namespace v0
@ -105,10 +101,6 @@ namespace ngraph
size_t get_version() const override { return 1; }
bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
};
} // namespace v1

View File

@ -48,22 +48,6 @@ shared_ptr<Node> op::Asin::clone_with_new_inputs(const OutputVector& new_args) c
return make_shared<Asin>(new_args.at(0));
}
void op::Asin::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
{
auto delta = deltas.at(0);
auto x = input_value(0);
auto one = make_shared<op::Constant>(x.get_element_type(), Shape{}, vector<string>{"1"});
AxisSet axes;
for (size_t i = 0; i < x.get_shape().size(); i++)
axes.insert(i);
auto ones = make_shared<op::Broadcast>(one, x.get_shape(), axes);
adjoints.add_delta(x, delta / make_shared<op::Sqrt>(ones - x * x));
}
namespace
{
template <element::Type_t ET>

View File

@ -49,10 +49,6 @@ namespace ngraph
bool visit_attributes(AttributeVisitor& visitor) override { return true; }
bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
};
}
using v0::Asin;

View File

@ -47,22 +47,6 @@ shared_ptr<Node> op::Atan::clone_with_new_inputs(const OutputVector& new_args) c
return make_shared<Atan>(new_args.at(0));
}
void op::Atan::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
{
auto delta = deltas.at(0);
auto x = input_value(0);
auto one = make_shared<op::Constant>(x.get_element_type(), Shape{}, vector<string>{"1"});
AxisSet axes;
for (size_t i = 0; i < x.get_shape().size(); i++)
axes.insert(i);
auto ones = make_shared<op::Broadcast>(one, x.get_shape(), axes);
adjoints.add_delta(x, delta / (ones + x * x));
}
namespace
{
template <element::Type_t ET>

View File

@ -50,10 +50,6 @@ namespace ngraph
bool visit_attributes(AttributeVisitor& visitor) override { return true; }
bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
};
}
using v0::Atan;

View File

@ -155,12 +155,6 @@ bool op::v1::BinaryConvolution::visit_attributes(AttributeVisitor& visitor)
return true;
}
void op::v1::BinaryConvolution::generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas)
{
throw ngraph_error("BinaryConvolution generate_adjoints not implemented");
}
namespace ngraph
{
template <>

View File

@ -78,8 +78,6 @@ namespace ngraph
std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
/// \return The strides.
const Strides& get_strides() const { return m_strides; }

View File

@ -344,15 +344,6 @@ shared_ptr<Node> op::v0::Broadcast::clone_with_new_inputs(const OutputVector& ne
return make_shared<v0::Broadcast>(new_args.at(0), m_shape, m_broadcast_axes);
}
void op::v0::Broadcast::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
{
auto delta = deltas.at(0);
auto x = input_value(0);
adjoints.add_delta(x, make_shared<op::Sum>(delta, m_broadcast_axes));
}
namespace
{
#define TYPE_CASE_v0(a) \

View File

@ -189,9 +189,6 @@ namespace ngraph
const Shape& shape,
const AxisSet& broadcast_axes);
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
virtual void infer_shape() {}
Shape m_shape;
AxisSet m_broadcast_axes;

View File

@ -113,37 +113,6 @@ shared_ptr<Node> op::Concat::clone_with_new_inputs(const OutputVector& new_args)
return make_shared<Concat>(new_args, m_axis);
}
void op::Concat::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
{
auto delta = deltas.at(0);
auto concat_result_shape = get_output_shape(0);
Coordinate arg_delta_slice_lower = Coordinate(concat_result_shape.size(), 0);
Coordinate arg_delta_slice_upper = concat_result_shape;
Coordinate arg_delta_slice_strides = Coordinate(concat_result_shape.size(), 1);
size_t pos = 0;
for (auto value : input_values())
{
auto arg_shape = value.get_shape();
auto slice_width = arg_shape[m_axis];
size_t next_pos = pos + slice_width;
arg_delta_slice_lower[m_axis] = pos;
arg_delta_slice_upper[m_axis] = next_pos;
adjoints.add_delta(
value,
make_shared<op::Slice>(
delta, arg_delta_slice_lower, arg_delta_slice_upper, arg_delta_slice_strides));
pos = next_pos;
}
}
namespace
{
template <element::Type_t ET>

View File

@ -65,8 +65,6 @@ namespace ngraph
const HostTensorVector& inputs) override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
/// \ brief m_axis stores default value for all iterations
int64_t m_axis;
/// \brief m_concat_axis stores m_axis plus the number of rank for each iteration

View File

@ -634,29 +634,6 @@ bool op::v0::Constant::evaluate(const HostTensorVector& outputs, const HostTenso
return true;
}
constexpr NodeTypeInfo op::ScalarConstantLike::type_info;
shared_ptr<op::Constant> op::ScalarConstantLike::as_constant() const
{
return std::make_shared<op::Constant>(m_element_type, m_shape, get_data_ptr());
}
std::shared_ptr<Node>
op::ScalarConstantLike::clone_with_new_inputs(const OutputVector& new_args) const
{
return std::make_shared<ScalarConstantLike>(new_args.at(0), m_value);
}
void op::ScalarConstantLike::infer_element_type()
{
m_element_type = get_input_element_type(0);
if (nullptr == m_data)
{
allocate_buffer();
write_values(std::vector<double>(1, m_value));
}
}
//
// We have to open up namespace blocks here to work around a problem with gcc:
//

View File

@ -560,41 +560,7 @@ namespace ngraph
bool m_all_elements_bitwise_identical;
bool are_all_data_elements_bitwise_identical() const;
};
/// \brief A scalar constant whose element type is the same as like.
class NGRAPH_API ScalarConstantLike : public Constant
{
public:
static constexpr NodeTypeInfo type_info{"ScalarConstantLike", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief A scalar constant whose element type is the same as like.
///
/// Once the element type is known, the dependency on like will be removed and
/// this node will be replaced with an equivalent constant.
///
/// \param like A tensor that will supply the element type.
/// \param value The value of the scalar.
template <typename T>
ScalarConstantLike(const Output<Node>& like, T value)
: Constant(OutputVector{like})
, m_value(static_cast<double>(value))
{
constructor_validate_and_infer_types();
}
ScalarConstantLike() = default;
std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
std::shared_ptr<op::v0::Constant> as_constant() const;
protected:
void infer_element_type() override;
double m_value;
};
}
using v0::Constant;
using v0::ScalarConstantLike;
}
}

View File

@ -48,15 +48,6 @@ shared_ptr<Node> op::Convert::clone_with_new_inputs(const OutputVector& new_args
return make_shared<Convert>(new_args.at(0), m_destination_type);
}
void op::Convert::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
{
auto delta = deltas.at(0);
auto x = input_value(0);
adjoints.add_delta(x, make_shared<op::Convert>(delta, x.get_element_type()));
}
namespace
{
template <element::Type_t INPUT_ET, element::Type_t OUTPUT_ET>

View File

@ -59,8 +59,6 @@ namespace ngraph
protected:
ngraph::element::Type m_destination_type;
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
};
}
using v0::Convert;

View File

@ -44,11 +44,3 @@ shared_ptr<Node> op::v1::ConvertLike::clone_with_new_inputs(const OutputVector&
check_new_args_count(this, new_args);
return make_shared<ConvertLike>(new_args.at(0), new_args.at(1));
}
void op::v1::ConvertLike::generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas)
{
const auto delta = deltas.at(0);
adjoints.add_delta(input_value(0), make_shared<op::v1::ConvertLike>(delta, input_value(1)));
}

View File

@ -42,10 +42,6 @@ namespace ngraph
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
};
}
}

View File

@ -156,29 +156,6 @@ shared_ptr<Node> op::v1::Convolution::clone_with_new_inputs(const OutputVector&
m_auto_pad);
}
void op::v1::Convolution::generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas)
{
auto delta = deltas.at(0);
auto x = input_value(0);
const auto x_shape = x.get_shape();
auto f = input_value(1);
const auto f_shape = f.get_shape();
adjoints.add_delta(x,
make_shared<op::v1::ConvolutionBackpropData>(
delta,
f,
op::Constant::create(element::i64, Shape{x_shape.size()}, x_shape),
m_strides,
m_pads_begin,
m_pads_end,
m_dilations,
m_auto_pad));
}
constexpr NodeTypeInfo op::v1::ConvolutionBackpropData::type_info;
shared_ptr<Node> op::v1::Convolution::get_default_value() const
{
@ -453,71 +430,6 @@ void op::v1::ConvolutionBackpropData::validate_and_infer_types()
set_output_type(0, result_et, output_pshape);
}
void op::v1::ConvolutionBackpropData::generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas)
{
auto delta = deltas.at(0);
auto x = input_value(0);
const auto x_shape = x.get_shape();
auto f = input_value(1);
const auto f_shape = f.get_shape();
auto data_conv = make_shared<op::v1::Convolution>(
delta, f, m_strides, m_pads_begin, m_pads_end, m_dilations, m_auto_pad);
adjoints.add_delta(x, data_conv);
Strides strides = m_dilations;
CoordinateDiff pads_begin;
CoordinateDiff pads_end;
const Shape& filters_shape = get_input_shape(1);
for (size_t i = 0; i < f_shape.size() - 2; i++)
{
ptrdiff_t pads_begin_backward =
(static_cast<ptrdiff_t>(filters_shape[i + 2]) - 1) - m_pads_begin[i];
pads_begin.push_back(pads_begin_backward);
ptrdiff_t pads_end_backward =
(static_cast<ptrdiff_t>(filters_shape[i + 2]) - 1) * m_dilations[i] +
((m_pads_begin[i] + (get_output_shape()[i].get_length() - 1) * m_strides[i] +
m_pads_end[i] - (static_cast<ptrdiff_t>(filters_shape[i + 2]) - 1) * m_dilations[i]) %
m_strides[i]) -
m_pads_end[i];
pads_end.push_back(pads_end_backward -
(pads_begin_backward + (x_shape[i + 2] - 1) * m_strides[i] +
pads_end_backward - (f_shape[i + 2] - 1) * m_dilations[i]) %
m_strides[i]);
}
auto swap_NC = [](const Output<Node>& n) {
AxisVector ax_order = ngraph::get_default_order(n.get_shape());
ax_order[0] = 1;
ax_order[1] = 0;
auto new_shape = n.get_shape();
new_shape[0] = n.get_shape()[1];
new_shape[1] = n.get_shape()[0];
return make_shared<op::Reshape>(n, ax_order, new_shape);
};
delta = swap_NC(delta);
x = swap_NC(x);
shared_ptr<Node> filter_deconv_bprop = make_shared<op::v1::Convolution>(
x, delta, strides, pads_begin, pads_end, Strides(x.get_shape().size() - 2, 1), m_auto_pad);
AxisSet axes;
for (size_t i = 2; i < filter_deconv_bprop->get_shape().size(); ++i)
{
axes.insert(i);
}
filter_deconv_bprop = make_shared<ngraph::op::Reverse>(filter_deconv_bprop, axes);
adjoints.add_delta(f, filter_deconv_bprop);
}
shared_ptr<Node>
op::v1::ConvolutionBackpropData::clone_with_new_inputs(const OutputVector& new_args) const
{
@ -714,28 +626,6 @@ shared_ptr<Node> op::v0::Convolution::clone_with_new_inputs(const OutputVector&
m_pad_type);
}
void op::v0::Convolution::generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas)
{
auto delta = deltas.at(0);
auto x = input_value(0);
const auto x_shape = x.get_shape();
auto f = input_value(1);
const auto f_shape = f.get_shape();
adjoints.add_delta(x,
make_shared<op::v0::ConvolutionBackpropData>(x_shape,
f,
delta,
m_window_movement_strides,
m_window_dilation_strides,
m_padding_below,
m_padding_above,
m_data_dilation_strides));
}
constexpr NodeTypeInfo op::v0::ConvolutionBackpropData::type_info;
shared_ptr<Node> op::v0::Convolution::get_default_value() const
{
@ -839,89 +729,6 @@ void op::v0::ConvolutionBackpropData::validate_and_infer_types()
set_output_type(0, forward_result_et, m_data_batch_shape);
}
void op::v0::ConvolutionBackpropData::generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas)
{
auto delta = deltas.at(0);
auto x = input_value(1);
const auto x_shape = x.get_shape();
auto f = input_value(0);
const auto f_shape = f.get_shape();
auto data_conv = make_shared<op::v0::Convolution>(delta,
f,
m_window_movement_strides_forward,
m_window_dilation_strides_forward,
m_padding_below_forward,
m_padding_above_forward,
m_data_dilation_strides_forward);
adjoints.add_delta(x, data_conv);
Strides window_movement_strides = m_window_dilation_strides_forward;
Strides window_dilation_strides = m_data_dilation_strides_forward;
Strides data_dilation_strides = m_window_movement_strides_forward;
CoordinateDiff padding_below;
CoordinateDiff padding_above;
const Shape& filters_shape = get_input_shape(0);
for (size_t i = 0; i < f_shape.size() - 2; i++)
{
ptrdiff_t padding_below_backward =
(static_cast<ptrdiff_t>(filters_shape[i + 2]) - 1) * window_dilation_strides[i] -
m_padding_below_forward[i];
padding_below.push_back(padding_below_backward);
ptrdiff_t padding_above_backward =
(static_cast<ptrdiff_t>(filters_shape[i + 2]) - 1) *
m_window_dilation_strides_forward[i] +
((m_padding_below_forward[i] +
((m_data_batch_shape[i + 2]) - 1) * m_data_dilation_strides_forward[i] +
m_padding_above_forward[i] -
(static_cast<ptrdiff_t>(filters_shape[i + 2]) - 1) *
m_window_dilation_strides_forward[i]) %
m_window_movement_strides_forward[i]) -
m_padding_above_forward[i];
padding_above.push_back(
padding_above_backward -
(padding_below_backward + (x_shape[i + 2] - 1) * m_window_movement_strides_forward[i] +
padding_above_backward - (f_shape[i + 2] - 1) * m_window_dilation_strides_forward[i]) %
m_data_dilation_strides_forward[i]);
}
auto swap_NC = [](const Output<Node>& n) {
AxisVector ax_order = ngraph::get_default_order(n.get_shape());
ax_order[0] = 1;
ax_order[1] = 0;
auto new_shape = n.get_shape();
new_shape[0] = n.get_shape()[1];
new_shape[1] = n.get_shape()[0];
return make_shared<op::Reshape>(n, ax_order, new_shape);
};
delta = swap_NC(delta);
x = swap_NC(x);
shared_ptr<Node> filter_deconv_bprop = make_shared<op::v0::Convolution>(x,
delta,
window_movement_strides,
window_dilation_strides,
padding_below,
padding_above,
data_dilation_strides);
AxisSet axes;
for (size_t i = 2; i < filter_deconv_bprop->get_shape().size(); ++i)
{
axes.insert(i);
}
filter_deconv_bprop = make_shared<ngraph::op::Reverse>(filter_deconv_bprop, axes);
adjoints.add_delta(f, filter_deconv_bprop);
}
shared_ptr<Node>
op::v0::ConvolutionBackpropData::clone_with_new_inputs(const OutputVector& new_args) const
{

View File

@ -67,8 +67,6 @@ namespace ngraph
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
/// \return The strides.
const Strides& get_strides() const { return m_strides; }
@ -161,8 +159,6 @@ namespace ngraph
bool visit_attributes(AttributeVisitor& visitor) override;
virtual bool is_dynamic() const override;
void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
@ -346,8 +342,6 @@ namespace ngraph
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
/// \return The window movement strides.
const Strides& get_window_movement_strides() const
@ -439,8 +433,6 @@ namespace ngraph
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;

View File

@ -44,15 +44,6 @@ shared_ptr<Node> op::Cos::clone_with_new_inputs(const OutputVector& new_args) co
return make_shared<Cos>(new_args.at(0));
}
void op::Cos::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
{
auto delta = deltas.at(0);
auto x = input_value(0);
adjoints.add_delta(x, -delta * (make_shared<op::Sin>(x)));
}
namespace
{
template <element::Type_t ET>

View File

@ -42,10 +42,6 @@ namespace ngraph
clone_with_new_inputs(const OutputVector& new_args) const override;
bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
};
}
using v0::Cos;

View File

@ -43,15 +43,6 @@ shared_ptr<Node> op::Cosh::clone_with_new_inputs(const OutputVector& new_args) c
return make_shared<Cosh>(new_args.at(0));
}
void op::Cosh::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
{
auto delta = deltas.at(0);
auto x = input_value(0);
adjoints.add_delta(x, delta * (make_shared<op::Sinh>(x)));
}
namespace
{
template <element::Type_t ET>

View File

@ -42,10 +42,6 @@ namespace ngraph
clone_with_new_inputs(const OutputVector& new_args) const override;
bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
};
}
using v0::Cosh;

View File

@ -77,13 +77,6 @@ shared_ptr<Node> op::v0::CumSum::clone_with_new_inputs(const OutputVector& new_a
return make_shared<op::CumSum>(new_args.at(0), new_args.at(1), m_exclusive, m_reverse);
}
void op::v0::CumSum::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
{
auto delta = deltas.at(0);
auto input_tensor = input_value(0);
adjoints.add_delta(input_tensor, delta);
}
shared_ptr<Node> op::v0::CumSum::get_default_value() const
{
return ngraph::make_constant_from_string("0", get_element_type(), get_shape());

View File

@ -106,10 +106,6 @@ namespace ngraph
virtual std::shared_ptr<Node> get_default_value() const override;
bool is_exclusive() const { return m_exclusive; }
bool is_reverse() const { return m_reverse; }
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
private:
bool m_exclusive;
bool m_reverse;

View File

@ -156,9 +156,3 @@ shared_ptr<Node> op::Dequantize::clone_with_new_inputs(const OutputVector& new_a
check_new_args_count(this, new_args);
return make_shared<Dequantize>(new_args.at(0), new_args.at(1), new_args.at(2), m_type, m_axes);
}
void op::Dequantize::generate_adjoints(autodiff::Adjoints& /* adjoints */,
const OutputVector& /* deltas */)
{
throw ngraph_error("Forward-propagation-only operation");
}

View File

@ -59,10 +59,6 @@ namespace ngraph
void set_axes(const AxisSet& axes) { m_axes = axes; }
const element::Type& get_type() const { return m_type; }
void set_type(const element::Type& type) { m_type = type; }
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
private:
element::Type m_type;
AxisSet m_axes;

View File

@ -59,22 +59,6 @@ shared_ptr<Node> op::v0::Divide::clone_with_new_inputs(const OutputVector& new_a
new_args.at(0), new_args.at(1), this->is_pythondiv(), this->get_autob());
}
void op::v0::Divide::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
{
if (get_autob().m_type != op::AutoBroadcastType::NONE)
{
throw ngraph_error("Autodiff not supported with auto broadcasting");
}
auto delta = deltas.at(0);
auto x = input_value(0);
auto y = input_value(1);
adjoints.add_delta(x, delta / y);
adjoints.add_delta(y, -delta * shared_from_this() / y);
}
shared_ptr<Node> ngraph::operator/(const Output<Node>& arg0, const Output<Node>& arg1)
{
return make_shared<op::v0::Divide>(arg0, arg1);
@ -168,22 +152,6 @@ shared_ptr<Node> op::v1::Divide::clone_with_new_inputs(const OutputVector& new_a
new_args.at(0), new_args.at(1), this->is_pythondiv(), this->get_autob());
}
void op::v1::Divide::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
{
if (get_autob().m_type != op::AutoBroadcastType::NONE)
{
throw ngraph_error("Autodiff not supported with auto broadcasting");
}
auto delta = deltas.at(0);
auto x = input_value(0);
auto y = input_value(1);
adjoints.add_delta(x, delta / y);
adjoints.add_delta(y, -delta * shared_from_this() / y);
}
bool op::v1::Divide::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs)
{
return evaluate_divide(inputs[0], inputs[1], outputs[0], get_autob(), is_pythondiv());

View File

@ -60,8 +60,6 @@ namespace ngraph
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) override;
@ -111,8 +109,6 @@ namespace ngraph
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
size_t get_version() const override { return 1; }
bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) override;

View File

@ -177,33 +177,6 @@ shared_ptr<op::Reshape> make_reshape_axes_to_front(const Output<Node>& n,
return make_shared<op::Reshape>(n, input_order, output_shape);
}
void op::Dot::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
{
auto delta = deltas.at(0);
auto x = input_value(0);
auto y = input_value(1);
auto x_shape = x.get_shape(); // shape IJ
auto y_shape = y.get_shape(); // shape JK
auto delta_shape = delta.get_shape(); // shape IK
Shape I_shape;
Shape J_shape;
Shape K_shape;
I_shape.insert(I_shape.begin(), x_shape.begin(), x_shape.end() - m_reduction_axes_count);
J_shape.insert(J_shape.begin(), y_shape.begin(), y_shape.begin() + m_reduction_axes_count);
K_shape.insert(K_shape.begin(), y_shape.begin() + J_shape.size(), y_shape.end());
auto y_reshaped = make_reshape_axes_to_front(y, J_shape, K_shape); // KJ
auto delta_dot_y_reshaped = make_shared<Dot>(delta, y_reshaped, K_shape.size()); // IK.KJ->IJ
adjoints.add_delta(x, delta_dot_y_reshaped);
auto x_reshaped = make_reshape_axes_to_front(x, I_shape, J_shape); // JI
auto x_reshaped_dot_delta = make_shared<Dot>(x_reshaped, delta, I_shape.size()); // JI.IK->JK
adjoints.add_delta(y, x_reshaped_dot_delta);
}
shared_ptr<Node> op::Dot::get_default_value() const
{
return ngraph::make_constant_from_string("0", get_element_type(), get_shape());

View File

@ -89,9 +89,6 @@ namespace ngraph
protected:
size_t m_reduction_axes_count;
bool m_has_reduction_axes_count;
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
};
}
using v0::Dot;

View File

@ -49,12 +49,6 @@ namespace ngraph
}
void validate_and_infer_types() override;
void generate_adjoints(autodiff::Adjoints& /* adjoints */,
const OutputVector& /* deltas */) override
{
throw ngraph_error("Not yet implemented");
}
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
};

View File

@ -42,14 +42,6 @@ shared_ptr<Node> op::Exp::clone_with_new_inputs(const OutputVector& new_args) co
return make_shared<Exp>(new_args.at(0));
}
void op::Exp::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
{
auto delta = deltas.at(0);
auto x = input_value(0);
adjoints.add_delta(x, delta * shared_from_this());
}
namespace
{
template <element::Type_t ET>

View File

@ -41,8 +41,6 @@ namespace ngraph
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) override;
};

View File

@ -16,7 +16,6 @@
#pragma once
#include "ngraph/autodiff/adjoints.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/util/attr_types.hpp"
#include "ngraph/op/util/fused_op.hpp"

View File

@ -16,7 +16,6 @@
#pragma once
#include "ngraph/autodiff/adjoints.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/util/fused_op.hpp"

View File

@ -21,7 +21,6 @@
#include <string>
#include <vector>
#include "ngraph/autodiff/adjoints.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/util/activation_functions.hpp"
#include "ngraph/op/util/fused_op.hpp"

View File

@ -44,11 +44,6 @@ shared_ptr<Node> op::Stack::clone_with_new_inputs(const OutputVector& new_args)
return make_shared<Stack>(new_args, m_axis);
}
void op::Stack::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
{
ngraph_error("Not yet implemented");
}
void op::Stack::pre_validate_and_infer_types()
{
bool is_input_dynamic = false;

View File

@ -48,9 +48,6 @@ namespace ngraph
/// stacked.
Stack(const NodeVector& args, int64_t axis);
void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
virtual void pre_validate_and_infer_types() override;
virtual NodeVector decompose_op() const override;

View File

@ -94,12 +94,6 @@ void op::v0::Gather::validate_and_infer_types()
set_output_type(0, result_et, result_shape);
}
void op::v0::Gather::generate_adjoints(autodiff::Adjoints& /* adjoints */,
const OutputVector& /* deltas */)
{
throw ngraph_error("Not yet implemented");
}
constexpr NodeTypeInfo op::v1::Gather::type_info;
const int64_t op::v1::Gather::AXIS_NOT_SET_VALUE;
@ -199,12 +193,6 @@ int64_t op::v1::Gather::get_axis() const
return axis;
}
void op::v1::Gather::generate_adjoints(autodiff::Adjoints& /* adjoints */,
const OutputVector& /* deltas */)
{
throw ngraph_error("Not yet implemented");
}
shared_ptr<Node> op::v1::Gather::clone_with_new_inputs(const OutputVector& new_args) const
{
check_new_args_count(this, new_args);

View File

@ -38,9 +38,6 @@ namespace ngraph
void validate_and_infer_types() override;
void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
size_t get_axis() const { return m_axis; }
void set_axis(size_t axis) { m_axis = axis; }
virtual std::shared_ptr<Node>
@ -75,9 +72,6 @@ namespace ngraph
void validate_and_infer_types() override;
void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;

View File

@ -40,12 +40,6 @@ namespace ngraph
}
void validate_and_infer_types() override;
void generate_adjoints(autodiff::Adjoints& /* adjoints */,
const OutputVector& /* deltas */) override
{
throw ngraph_error("Not yet implemented");
}
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
};

View File

@ -81,9 +81,3 @@ void op::v1::GatherTree::validate_and_infer_types()
const auto& step_ids_et = get_input_element_type(0);
set_output_type(0, step_ids_et, step_ids_rank);
}
void op::v1::GatherTree::generate_adjoints(autodiff::Adjoints& /* adjoints */,
const OutputVector& /* deltas */)
{
throw ngraph_error("generate_adjoints is not implemented for GatherTree");
}

View File

@ -49,10 +49,6 @@ namespace ngraph
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
};
}
}

View File

@ -59,14 +59,6 @@ NodeVector op::GetOutputElement::get_arguments() const
return NodeVector{input_value(0).get_node_shared_ptr()};
}
void op::GetOutputElement::generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas)
{
auto delta = deltas.at(0);
adjoints.add_delta(input_value(0), delta);
}
NodeVector op::get_output_elements(const shared_ptr<Node>& mon)
{
NodeVector goes(mon->get_output_size());

View File

@ -52,8 +52,6 @@ namespace ngraph
NodeVector get_arguments() const override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
size_t m_n;
};
}

View File

@ -156,12 +156,6 @@ shared_ptr<Node> op::v1::GroupConvolution::clone_with_new_inputs(const OutputVec
m_auto_pad);
}
void op::v1::GroupConvolution::generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas)
{
ngraph_error("Not Yet Implemented");
}
//------------------------------------------------------------------------------
// v1::GroupConvolutionBackpropData
//------------------------------------------------------------------------------
@ -527,12 +521,6 @@ NodeVector op::v1::GroupConvolutionBackpropData::decompose_op() const
return {std::make_shared<ngraph::op::Concat>(conv_groups, concatenation_axis)};
}
void op::v1::GroupConvolutionBackpropData::generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas)
{
ngraph_error("Not Yet Implemented");
}
shared_ptr<Node>
op::v1::GroupConvolutionBackpropData::clone_with_new_inputs(const OutputVector& new_args) const
{
@ -758,12 +746,6 @@ NodeVector op::v0::GroupConvolution::decompose_op() const
return {std::make_shared<ngraph::op::Concat>(convolution_nodes, concatenation_axis)};
}
void op::GroupConvolution::generate_adjoints(autodiff::Adjoints& /* adjoints */,
const OutputVector& /* deltas */)
{
throw ngraph_error("NYI");
}
//------------------------------------------------------------------------------
// v0::GroupConvolutionBackpropData
//------------------------------------------------------------------------------

View File

@ -67,9 +67,6 @@ namespace ngraph
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
/// \return The strides.
const Strides& get_strides() const { return m_strides; }
void set_strides(const Strides& strides) { m_strides = strides; }
@ -214,8 +211,6 @@ namespace ngraph
virtual NodeVector decompose_op() const override;
virtual void pre_validate_and_infer_types() override;
void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
@ -306,9 +301,6 @@ namespace ngraph
virtual void pre_validate_and_infer_types() override;
virtual void post_validate_and_infer_types() override;
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
bool has_groups_in_filters() const { return m_groups_in_filters; }
protected:
Strides m_window_movement_strides;

View File

@ -42,15 +42,6 @@ shared_ptr<Node> op::Log::clone_with_new_inputs(const OutputVector& new_args) co
return make_shared<Log>(new_args.at(0));
}
void op::Log::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
{
auto delta = deltas.at(0);
auto x = input_value(0);
adjoints.add_delta(x, delta / x);
}
namespace
{
template <element::Type_t ET>

View File

@ -40,9 +40,6 @@ namespace ngraph
bool visit_attributes(AttributeVisitor& visitor) override;
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) override;
};

View File

@ -126,9 +126,3 @@ shared_ptr<Node> op::LRN::clone_with_new_inputs(const OutputVector& new_args) co
check_new_args_count(this, new_args);
return make_shared<op::LRN>(new_args.at(0), new_args.at(1), m_alpha, m_beta, m_bias, m_size);
}
void op::LRN::generate_adjoints(autodiff::Adjoints& /* adjoints */,
const OutputVector& /* deltas */)
{
throw ngraph_error("NYI");
}

View File

@ -74,9 +74,6 @@ namespace ngraph
AxisSet get_reduction_axes() const;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
double m_alpha;
double m_beta;
double m_bias;

View File

@ -45,25 +45,6 @@ shared_ptr<Node> op::v0::Maximum::clone_with_new_inputs(const OutputVector& new_
return make_shared<op::v0::Maximum>(new_args.at(0), new_args.at(1), this->get_autob());
}
void op::v0::Maximum::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
{
if (get_autob().m_type != op::AutoBroadcastType::NONE)
{
throw ngraph_error("Autodiff not supported with auto broadcasting");
}
auto delta = deltas.at(0);
auto x = input_value(0);
auto y = input_value(1);
adjoints.add_delta(
x,
delta * make_shared<op::Convert>(make_shared<op::v0::Greater>(x, y), x.get_element_type()));
adjoints.add_delta(
y,
delta * make_shared<op::Convert>(make_shared<op::v0::Greater>(y, x), y.get_element_type()));
}
namespace
{
template <element::Type_t ET>
@ -131,25 +112,6 @@ shared_ptr<Node> op::v1::Maximum::clone_with_new_inputs(const OutputVector& new_
return make_shared<op::v1::Maximum>(new_args.at(0), new_args.at(1), this->get_autob());
}
void op::v1::Maximum::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
{
if (get_autob().m_type != op::AutoBroadcastType::NONE)
{
throw ngraph_error("Autodiff not supported with auto broadcasting");
}
auto delta = deltas.at(0);
auto x = input_value(0);
auto y = input_value(1);
adjoints.add_delta(
x,
delta * make_shared<op::Convert>(make_shared<op::v1::Greater>(x, y), x.get_element_type()));
adjoints.add_delta(
y,
delta * make_shared<op::Convert>(make_shared<op::v1::Greater>(y, x), y.get_element_type()));
}
bool op::v1::Maximum::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs)
{
return evaluate_maximum(inputs[0], inputs[1], outputs[0], get_autob());

View File

@ -50,10 +50,6 @@ namespace ngraph
virtual bool is_commutative() const override { return true; }
bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
};
} // namespace v0
@ -87,10 +83,6 @@ namespace ngraph
virtual bool is_commutative() const override { return true; }
bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
};
} // namespace v1

View File

@ -45,24 +45,6 @@ shared_ptr<Node> op::v0::Minimum::clone_with_new_inputs(const OutputVector& new_
return make_shared<op::v0::Minimum>(new_args.at(0), new_args.at(1), this->get_autob());
}
void op::v0::Minimum::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
{
if (get_autob().m_type != op::AutoBroadcastType::NONE)
{
throw ngraph_error("Autodiff not supported with auto broadcasting");
}
auto delta = deltas.at(0);
auto x = input_value(0);
auto y = input_value(1);
adjoints.add_delta(
x, delta * make_shared<op::Convert>(make_shared<op::v0::Less>(x, y), x.get_element_type()));
adjoints.add_delta(
y, delta * make_shared<op::Convert>(make_shared<op::v0::Less>(y, x), y.get_element_type()));
}
namespace
{
template <element::Type_t ET>
@ -130,24 +112,6 @@ shared_ptr<Node> op::v1::Minimum::clone_with_new_inputs(const OutputVector& new_
return make_shared<op::v1::Minimum>(new_args.at(0), new_args.at(1), this->get_autob());
}
void op::v1::Minimum::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
{
if (get_autob().m_type != op::AutoBroadcastType::NONE)
{
throw ngraph_error("Autodiff not supported with auto broadcasting");
}
auto delta = deltas.at(0);
auto x = input_value(0);
auto y = input_value(1);
adjoints.add_delta(
x, delta * make_shared<op::Convert>(make_shared<op::v1::Less>(x, y), x.get_element_type()));
adjoints.add_delta(
y, delta * make_shared<op::Convert>(make_shared<op::v1::Less>(y, x), y.get_element_type()));
}
bool op::v1::Minimum::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs)
{
return evaluate_minimum(inputs[0], inputs[1], outputs[0], get_autob());

View File

@ -50,10 +50,6 @@ namespace ngraph
virtual bool is_commutative() const override { return true; }
bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
};
} // namespace v0
@ -87,10 +83,6 @@ namespace ngraph
virtual bool is_commutative() const override { return true; }
bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
};
} // namespace v1

View File

@ -39,22 +39,6 @@ shared_ptr<Node> op::v0::Multiply::clone_with_new_inputs(const OutputVector& new
return make_shared<op::v0::Multiply>(new_args.at(0), new_args.at(1), this->get_autob());
}
void op::v0::Multiply::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
{
if (get_autob().m_type != op::AutoBroadcastType::NONE)
{
throw ngraph_error("Autodiff not supported with auto broadcasting");
}
auto delta = deltas.at(0);
auto x = input_value(0);
auto y = input_value(1);
adjoints.add_delta(x, delta * y);
adjoints.add_delta(y, x * delta);
}
namespace
{
template <element::Type_t ET>
@ -122,22 +106,6 @@ shared_ptr<Node> op::v1::Multiply::clone_with_new_inputs(const OutputVector& new
return make_shared<op::v1::Multiply>(new_args.at(0), new_args.at(1), this->get_autob());
}
void op::v1::Multiply::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
{
if (get_autob().m_type != op::AutoBroadcastType::NONE)
{
throw ngraph_error("Autodiff not supported with auto broadcasting");
}
auto delta = deltas.at(0);
auto x = input_value(0);
auto y = input_value(1);
adjoints.add_delta(x, delta * y);
adjoints.add_delta(y, x * delta);
}
bool op::v1::Multiply::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs)
{
return evaluate_multiply(inputs[0], inputs[1], outputs[0], get_autob());

View File

@ -50,10 +50,6 @@ namespace ngraph
virtual bool is_commutative() const override { return true; }
bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
};
} // namespace v0
@ -87,10 +83,6 @@ namespace ngraph
virtual bool is_commutative() const override { return true; }
bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
};
} // namespace v1

View File

@ -83,15 +83,6 @@ bool op::Negative::evaluate(const HostTensorVector& outputs, const HostTensorVec
return evaluate_negative(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
void op::Negative::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
{
auto delta = deltas.at(0);
auto x = input_value(0);
adjoints.add_delta(x, -delta);
}
shared_ptr<Node> ngraph::operator-(const Output<Node>& arg0)
{
return make_shared<op::Negative>(arg0);

View File

@ -42,10 +42,6 @@ namespace ngraph
clone_with_new_inputs(const OutputVector& new_args) const override;
bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
};
}
using v0::Negative;

View File

@ -180,7 +180,6 @@ NGRAPH_OP(Reverse, ngraph::op::v1, 1)
NGRAPH_OP(ReverseSequence, ngraph::op::v0, 0)
NGRAPH_OP(Round, ngraph::op::v0, 0)
NGRAPH_OP(ROIAlign, ngraph::op::v3, 3)
NGRAPH_OP(ScalarConstantLike, ngraph::op::v0, 0)
NGRAPH_OP(ScaleShift, ngraph::op::v0, 0)
NGRAPH_OP(ScatterAdd, ngraph::op::v0, 0)
NGRAPH_OP(ScatterAdd, ngraph::op::v3, 3)

View File

@ -126,47 +126,6 @@ shared_ptr<Node> op::v0::Pad::clone_with_new_inputs(const OutputVector& new_args
new_args.at(0), new_args.at(1), m_padding_below, m_padding_above, m_pad_mode);
}
/* The "y" half of this is going to be a bit tricky... best way to handle it, I think,
is to ReplaceSlice the non-padded values in the incoming delta tensor with a zero
broadcasted to x's shape; then sum that and backprop the result to y.
For example, let's say we are padding a 2x2 with 1 above and below, and the deltas
coming back are:
d00 d01 d02 d03
d10 d11 d12 d13
d20 d21 d22 d23
d30 d31 d32 d33
We know that everything but d11, d12, d21, and d22 on the forward prop is just "y".
So we mask that off (using the forward-prop padding values to determine start, end,
and slice stride):
d00 d01 d02 d03
d10 0 0 d13
d20 0 0 d23
d30 d31 d32 d33
Then sum that up:
d00 + d01 + d02 + d03 +
d10 + 0 + 0 + d13 +
d20 + 0 + 0 + d23 +
d30 + d31 + d32 + d33
For the "x" backprop it's sort of the opposite; just slice out:
d11 d12
d21 d22
and push that back.
*/
void op::v0::Pad::generate_adjoints(autodiff::Adjoints& /* adjoints */,
const OutputVector& /* deltas */)
{
throw invalid_argument("Autodiff is not yet implemented for Pad");
}
std::shared_ptr<Node> op::Pad::get_default_value() const
{
AxisSet axes{};
@ -360,9 +319,3 @@ shared_ptr<Node> op::v1::Pad::clone_with_new_inputs(const OutputVector& new_args
return make_shared<v1::Pad>(new_args.at(0), new_args.at(1), new_args.at(2), m_pad_mode);
}
}
void op::v1::Pad::generate_adjoints(autodiff::Adjoints& /* adjoints */,
const OutputVector& /* deltas */)
{
throw invalid_argument("Autodiff is not yet implemented for Pad:v1");
}

View File

@ -81,8 +81,6 @@ namespace ngraph
virtual std::shared_ptr<Node> get_default_value() const override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
CoordinateDiff m_padding_below;
CoordinateDiff m_padding_above;
Shape m_padding_interior_fake; // LEGACY: This is all zeros.
@ -150,10 +148,6 @@ namespace ngraph
/// \return The padding mode.
PadMode get_pad_mode() const { return m_pad_mode; }
void set_pad_mode(PadMode pad_mode) { m_pad_mode = pad_mode; }
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
private:
PadMode m_pad_mode;
};

View File

@ -55,12 +55,6 @@ shared_ptr<Node> op::Parameter::clone_with_new_inputs(const OutputVector& new_ar
return make_shared<Parameter>(m_element_type, m_partial_shape);
}
void op::Parameter::generate_adjoints(autodiff::Adjoints& /* adjoints */,
const OutputVector& deltas)
{
auto delta = deltas.at(0);
}
bool op::Parameter::is_relevant_to_shapes() const
{
return m_is_relevant_to_shapes;

View File

@ -32,10 +32,6 @@ namespace ngraph
/// Basic graph operations do not need parameters attached to a function.
class NGRAPH_API Parameter : public op::Op
{
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
public:
static constexpr NodeTypeInfo type_info{"Parameter", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }

View File

@ -42,24 +42,6 @@ shared_ptr<Node> op::v0::Power::clone_with_new_inputs(const OutputVector& new_ar
return make_shared<op::v0::Power>(new_args.at(0), new_args.at(1), this->get_autob());
}
void op::v0::Power::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
{
if (get_autob().m_type != op::AutoBroadcastType::NONE)
{
throw ngraph_error("Autodiff not supported with auto broadcasting");
}
auto delta = deltas.at(0);
auto x = input_value(0);
auto y = input_value(1);
auto log_x = make_shared<op::Log>(x);
adjoints.add_delta(x, delta * y * shared_from_this() / x);
adjoints.add_delta(y, delta * shared_from_this() * log_x);
}
namespace
{
template <element::Type_t ET>
@ -127,24 +109,6 @@ shared_ptr<Node> op::v1::Power::clone_with_new_inputs(const OutputVector& new_ar
return make_shared<op::v1::Power>(new_args.at(0), new_args.at(1), this->get_autob());
}
void op::v1::Power::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
{
if (get_autob().m_type != op::AutoBroadcastType::NONE)
{
throw ngraph_error("Autodiff not supported with auto broadcasting");
}
auto delta = deltas.at(0);
auto x = input_value(0);
auto y = input_value(1);
auto log_x = make_shared<op::Log>(x);
adjoints.add_delta(x, delta * y * shared_from_this() / x);
adjoints.add_delta(y, delta * shared_from_this() * log_x);
}
bool op::v1::Power::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs)
{
return evaluate_power(inputs[0], inputs[1], outputs[0], get_autob());

View File

@ -62,10 +62,6 @@ namespace ngraph
clone_with_new_inputs(const OutputVector& new_args) const override;
bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
};
} // namespace v0
@ -111,10 +107,6 @@ namespace ngraph
clone_with_new_inputs(const OutputVector& new_args) const override;
bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
};
} // namespace v1

View File

@ -159,9 +159,3 @@ shared_ptr<Node> op::Quantize::clone_with_new_inputs(const OutputVector& new_arg
return make_shared<Quantize>(
new_args.at(0), new_args.at(1), new_args.at(2), m_type, m_axes, m_round_mode);
}
void op::Quantize::generate_adjoints(autodiff::Adjoints& /* adjoints */,
const OutputVector& /* deltas */)
{
throw ngraph_error("Forward-propagation-only operation");
}

View File

@ -103,10 +103,6 @@ namespace ngraph
const ngraph::AxisSet& get_axes() const { return m_axes; }
RoundMode get_round_mode() const { return m_round_mode; }
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
private:
ngraph::element::Type m_type;
ngraph::AxisSet m_axes;

View File

@ -192,9 +192,3 @@ shared_ptr<Node> op::QuantizedConvolution::clone_with_new_inputs(const OutputVec
m_filter_axes,
m_output_axes));
}
void op::QuantizedConvolution::generate_adjoints(autodiff::Adjoints& /* adjoints */,
const OutputVector& /* deltas */)
{
throw ngraph_error("Forward-propagation-only operation");
}

View File

@ -90,9 +90,6 @@ namespace ngraph
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
protected:
Strides m_window_movement_strides;
Strides m_window_dilation_strides;

View File

@ -217,9 +217,3 @@ shared_ptr<Node> op::QuantizedDot::clone_with_new_inputs(const OutputVector& new
m_input1_axes,
m_output_axes));
}
void op::QuantizedDot::generate_adjoints(autodiff::Adjoints& /* adjoints */,
const OutputVector& /* deltas */)
{
throw ngraph_error("Forward-propagation-only operation");
}

View File

@ -73,9 +73,6 @@ namespace ngraph
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
protected:
size_t m_reduction_axes_count;
bool m_has_reduction_axes_count;

View File

@ -45,16 +45,6 @@ shared_ptr<Node> op::v1::ReduceSum::clone_with_new_inputs(const OutputVector& ne
return make_shared<ReduceSum>(new_args.at(0), new_args.at(1), get_keep_dims());
}
void op::v1::ReduceSum::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
{
auto delta = deltas.at(0);
auto x = input_value(0);
auto& x_shape = x.get_shape();
adjoints.add_delta(x, make_shared<op::Broadcast>(delta, x_shape, get_reduction_axes()));
}
namespace
{
template <element::Type_t ET>

View File

@ -100,10 +100,6 @@ namespace ngraph
bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
};
}
}

View File

@ -174,20 +174,3 @@ shared_ptr<Node> op::ReplaceSlice::clone_with_new_inputs(const OutputVector& new
return make_shared<ReplaceSlice>(
new_args.at(0), new_args.at(1), m_lower_bounds, m_upper_bounds, m_strides);
}
void op::ReplaceSlice::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
{
auto delta = deltas.at(0);
auto x = input_value(0);
auto y = input_value(1);
auto& y_element_type = y.get_element_type();
auto y_shape = y.get_shape();
auto zeros_shaped_like_y = op::Constant::create(y_element_type, y_shape, {0.0});
adjoints.add_delta(x,
make_shared<op::ReplaceSlice>(
delta, zeros_shaped_like_y, m_lower_bounds, m_upper_bounds, m_strides));
adjoints.add_delta(y, make_shared<op::Slice>(delta, m_lower_bounds, m_upper_bounds, m_strides));
}

View File

@ -106,9 +106,6 @@ namespace ngraph
const Strides& get_strides() const { return m_strides; }
void set_strides(const Strides& strides) { m_strides = strides; }
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
Coordinate m_lower_bounds;
Coordinate m_upper_bounds;
Strides m_strides;

View File

@ -186,39 +186,6 @@ bool op::Reshape::visit_attributes(AttributeVisitor& visitor)
return true;
}
void op::Reshape::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
{
auto delta = deltas.at(0);
auto x_shape = get_input_shape(0);
auto x_rank = x_shape.size();
Shape permuted_x_shape(x_rank);
AxisVector x_input_order(x_rank);
bool is_permuted = false;
for (size_t i = 0; i < x_rank; ++i)
{
size_t permuted_i = m_input_order[i];
if (i != permuted_i)
{
is_permuted = true;
}
permuted_x_shape[i] = x_shape[permuted_i];
x_input_order[permuted_i] = i;
}
AxisVector input_order(m_output_shape.size());
for (size_t i = 0; i < m_output_shape.size(); i++)
{
input_order[i] = i;
}
auto reshape = make_shared<op::Reshape>(delta, input_order, permuted_x_shape);
if (is_permuted)
{
reshape = make_shared<op::Reshape>(reshape, x_input_order, x_shape);
}
adjoints.add_delta(input_value(0), reshape);
}
bool op::v0::Reshape::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs)
{
return evaluate_reshape(inputs[0], outputs[0], get_input_order());
@ -386,12 +353,6 @@ shared_ptr<Node> op::v1::Reshape::clone_with_new_inputs(const OutputVector& new_
return make_shared<v1::Reshape>(new_args.at(0), new_args.at(1), m_special_zero);
}
void op::v1::Reshape::generate_adjoints(autodiff::Adjoints& /* adjoints */,
const OutputVector& /* deltas */)
{
throw ngraph_error("generate_adjoints not implemented for Reshape");
}
bool op::v1::Reshape::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs)
{
// infer and set output shape if the output shape contain -1

View File

@ -105,9 +105,6 @@ namespace ngraph
const HostTensorVector& inputs) override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
AxisVector m_input_order;
Shape m_output_shape;
bool m_is_transpose{false};
@ -154,10 +151,6 @@ namespace ngraph
bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
private:
bool m_special_zero;
};

View File

@ -55,13 +55,6 @@ shared_ptr<Node> op::Result::clone_with_new_inputs(const OutputVector& new_args)
return std::move(res);
}
void op::Result::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
{
auto delta = deltas.at(0);
adjoints.add_delta(input_value(0), delta);
}
bool op::Result::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs)
{
outputs[0]->set_unary(inputs[0]);

View File

@ -52,10 +52,6 @@ namespace ngraph
bool constant_fold(OutputVector& output_values,
const OutputVector& inputs_values) override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
private:
bool m_needs_default_layout{false};
};

View File

@ -63,15 +63,6 @@ shared_ptr<Node> op::v0::Reverse::clone_with_new_inputs(const OutputVector& new_
return make_shared<v0::Reverse>(new_args.at(0), m_reversed_axes);
}
void op::v0::Reverse::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
{
auto delta = deltas.at(0);
auto x = input_value(0);
adjoints.add_delta(x, make_shared<op::Reverse>(delta, m_reversed_axes));
}
constexpr NodeTypeInfo op::v1::Reverse::type_info;
op::v1::Reverse::Reverse(const Output<Node>& data,
@ -182,16 +173,6 @@ shared_ptr<Node> op::v1::Reverse::clone_with_new_inputs(const OutputVector& new_
return make_shared<op::v1::Reverse>(new_args.at(0), new_args.at(1), m_mode);
}
void op::v1::Reverse::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
{
const auto delta = deltas.at(0);
const auto x = input_value(0);
const auto reversed_axes = input_value(1);
adjoints.add_delta(x, make_shared<op::v1::Reverse>(delta, reversed_axes, m_mode));
}
op::v1::Reverse::Mode op::v1::Reverse::mode_from_string(const std::string& mode) const
{
static const std::map<std::string, Mode> allowed_values = {{"index", Mode::INDEX},

View File

@ -73,9 +73,6 @@ namespace ngraph
}
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
AxisSet m_reversed_axes;
};
}
@ -119,9 +116,6 @@ namespace ngraph
void set_mode(const Mode mode) { m_mode = mode; }
virtual size_t get_version() const override { return 1; }
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
Mode mode_from_string(const std::string& mode) const;
/// \brief Indicates how the values from the second input should be interpreted.

View File

@ -98,12 +98,3 @@ shared_ptr<Node> op::ReverseSequence::clone_with_new_inputs(const OutputVector&
make_shared<ReverseSequence>(new_args.at(0), new_args.at(1), m_batch_axis, m_seq_axis);
return move(res);
}
void op::ReverseSequence::generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas)
{
auto x = input_value(0);
auto rs_delta =
make_shared<ReverseSequence>(deltas.at(0), input_value(1), m_batch_axis, m_seq_axis);
adjoints.add_delta(x, rs_delta);
}

View File

@ -54,10 +54,6 @@ namespace ngraph
size_t get_sequence_axis() const { return m_normalized_seq_axis; }
int64_t get_origin_sequence_axis() const { return m_seq_axis; }
void set_sequence_axis(int64_t sequence_axis) { m_seq_axis = sequence_axis; }
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
private:
int64_t m_batch_axis;
int64_t m_seq_axis;

View File

@ -44,12 +44,6 @@ namespace ngraph
}
void validate_and_infer_types() override;
void generate_adjoints(autodiff::Adjoints& /* adjoints */,
const OutputVector& /* deltas */) override
{
throw ngraph_error("Not yet implemented");
}
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
};

View File

@ -95,26 +95,6 @@ bool op::v1::Select::visit_attributes(AttributeVisitor& visitor)
return true;
}
void op::v1::Select::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
{
if (get_auto_broadcast().m_type != op::AutoBroadcastType::NONE)
{
throw ngraph_error("Autodiff not supported with auto broadcasting");
}
auto delta = deltas.at(0);
auto p = input_value(0);
auto x = input_value(1);
auto y = input_value(2);
auto p_as_x_type = make_shared<op::Convert>(p, x.get_element_type());
auto not_p_as_y_type = make_shared<op::Convert>(make_shared<op::Not>(p), y.get_element_type());
adjoints.add_delta(x, delta * p_as_x_type);
adjoints.add_delta(y, delta * not_p_as_y_type);
}
constexpr NodeTypeInfo op::v0::Select::type_info;
op::v0::Select::Select(const Output<Node>& arg0, const Output<Node>& arg1, const Output<Node>& arg2)
@ -156,18 +136,3 @@ shared_ptr<Node> op::v0::Select::clone_with_new_inputs(const OutputVector& new_a
check_new_args_count(this, new_args);
return make_shared<v0::Select>(new_args.at(0), new_args.at(1), new_args.at(2));
}
void op::Select::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
{
auto delta = deltas.at(0);
auto p = input_value(0);
auto x = input_value(1);
auto y = input_value(2);
auto p_as_x_type = make_shared<op::Convert>(p, x.get_element_type());
auto not_p_as_y_type = make_shared<op::Convert>(make_shared<op::Not>(p), y.get_element_type());
adjoints.add_delta(x, delta * p_as_x_type);
adjoints.add_delta(y, delta * not_p_as_y_type);
}

View File

@ -60,10 +60,6 @@ namespace ngraph
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
void validate_and_infer_types() override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
};
}
@ -124,10 +120,6 @@ namespace ngraph
bool supports_auto_broadcast() const override { return true; }
// TODO: Move all uses of get_autob to get_auto_broadcast() and remove this.
const AutoBroadcastSpec& get_autob() const override { return m_auto_broadcast; }
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
private:
AutoBroadcastSpec m_auto_broadcast;
};

View File

@ -43,15 +43,6 @@ shared_ptr<Node> op::Sin::clone_with_new_inputs(const OutputVector& new_args) co
return make_shared<Sin>(new_args.at(0));
}
void op::Sin::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
{
auto delta = deltas.at(0);
auto x = input_value(0);
adjoints.add_delta(x, delta * (make_shared<op::Cos>(x)));
}
namespace
{
template <element::Type_t ET>

Some files were not shown because too many files have changed in this diff Show More