remove nGraph deprecated methods (part 1) (#1314)

* Remove remove_goe

* Remove traverse_nodes

* Removed deprecated constructors

* Removed deprecated tensor methods

* Fixed IE build

* Fixed code style
This commit is contained in:
Ilya Churaev 2020-07-16 06:03:59 +03:00 committed by GitHub
parent 3b6cb0e0cd
commit 317a60545b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
25 changed files with 14 additions and 388 deletions

View File

@ -88,7 +88,7 @@ inline ::ngraph::element::Type convertPrecision(const std::string& precision) {
}
inline Precision convertPrecision(const ::ngraph::element::Type& precision) {
switch (precision.get_type_enum()) {
switch (precision) {
case ::ngraph::element::Type_t::undefined:
return Precision(Precision::UNSPECIFIED);
case ::ngraph::element::Type_t::f16:

View File

@ -71,7 +71,7 @@ public:
genNode->doReshape(false);
genericOps.emplace_back(genNode);
}
}, true, nParams);
}, nParams);
}
}
};

View File

@ -381,7 +381,7 @@ std::shared_ptr<ngraph::Node> V10Parser::createNode(const std::vector<ngraph::Ou
if (!inputs[i].get_node())
THROW_IE_EXCEPTION << params.type << " layer " << params.name << " with id: " << params.layerId
<< " has incorrect input with index " << i << "!";
if (inputs[i].get_element_type().get_type_enum() == ngraph::element::Type_t::undefined)
if (ngraph::element::Type_t::undefined == inputs[i].get_element_type())
THROW_IE_EXCEPTION << params.type << " layer " << params.name << " with id: " << params.layerId
<< " has undefined element type for input with index " << i << "!";
}

View File

@ -63,8 +63,8 @@ void evaluateStaticShapeNonZero(const Shape& inputShape,
const auto outShapeBuffer = outShape->get_data_ptr<OutType>();
const auto totalInputSize = shape_size(inputShape);
const auto inputRank = static_cast<ngraph::Dimension::value_type>(nonZeroOutput->get_partial_shape()[0]);
const auto nonZeroCount = static_cast<ngraph::Dimension::value_type>(nonZeroOutput->get_partial_shape()[1]);
const auto inputRank = nonZeroOutput->get_partial_shape()[0].get_length();
const auto nonZeroCount = nonZeroOutput->get_partial_shape()[1].get_length();
for (size_t i = 0; i < inputRank; ++i) {
for (size_t j = 0; j < nonZeroCount; j++) {

View File

@ -62,16 +62,6 @@ namespace ngraph
/// \brief Check whether this dimension is dynamic.
/// \return `false` if the dimension is static, else `true`.
bool is_dynamic() const { return m_dimension.size() != 1; }
/// \brief Convert this dimension to `value-type`. This dimension must be static.
/// \throws std::invalid_argument If this dimension is dynamic.
explicit operator value_type() const NGRAPH_DEPRECATED("use get_length() instead")
{
if (is_dynamic())
{
throw std::invalid_argument("Cannot convert dynamic dimension to value_type");
}
return m_dimension.get_min_val();
}
/// \brief Convert this dimension to `value_type`. This dimension must be static and
/// non-negative.
/// \throws std::invalid_argument If this dimension is dynamic or negative.

View File

@ -96,14 +96,6 @@ void ngraph::traverse_nodes(const NodeVector& subgraph_results,
}
}
void ngraph::traverse_nodes(const NodeVector& subgraph_results,
std::function<void(std::shared_ptr<Node>)> f,
bool,
const NodeVector& subgraph_params)
{
traverse_nodes(subgraph_results, f, subgraph_params);
}
NodeVector ngraph::find_common_args(std::shared_ptr<Node> node1, std::shared_ptr<Node> node2)
{
std::unordered_set<std::shared_ptr<Node>> node1_args;
@ -877,12 +869,6 @@ bool ngraph::check_for_cycles(const ngraph::Function* func,
return false;
}
void ngraph::traverse_functions(std::shared_ptr<Function> p,
std::function<void(std::shared_ptr<Function>)> f)
{
f(p);
}
bool ngraph::replace_output_update_name(Output<Node> output, const Output<Node>& replacement)
{
bool has_result_output = false;

View File

@ -71,18 +71,6 @@ namespace ngraph
std::function<void(std::shared_ptr<Node>)> f,
const NodeVector& subgraph_params = {});
NGRAPH_API
void traverse_nodes(const NodeVector& subgraph_results,
std::function<void(std::shared_ptr<Node>)> f,
bool,
const NodeVector& subgraph_params = {})
NGRAPH_DEPRECATED("Use traverse_nodes without control-deps option");
NGRAPH_API
void traverse_functions(std::shared_ptr<Function> p,
std::function<void(std::shared_ptr<Function>)> f)
NGRAPH_DEPRECATED("Replace with f(p)");
/// \brief Replace the node `target` with the node `replacement`, i.e.,
/// redirect all users and control dependencies of `target` to
/// `replacement`.

View File

@ -68,13 +68,6 @@ namespace ngraph
using HostTensorPtr = std::shared_ptr<HostTensor>;
using HostTensorVector = std::vector<HostTensorPtr>;
// Intermal, controls whether GetOutputElement nodes are elided
// Defaults to being elided. Transformer should set to false if
// it has passes that depend on GetOutputElement.
NGRAPH_API void set_remove_goe(bool value)
NGRAPH_DEPRECATED("Remove dependencies on GetOrderedOutput");
NGRAPH_API bool get_remove_goe() NGRAPH_DEPRECATED("Remove dependencies on GetOrderedOutput");
namespace op
{
struct AutoBroadcastSpec;

View File

@ -21,16 +21,6 @@
namespace ngraph
{
namespace
{
static bool remove_goe = true;
}
void set_remove_goe(bool value)
{
NGRAPH_DEBUG << "Remove GOE set: " << value;
remove_goe = value;
}
bool get_remove_goe() { return remove_goe; }
Output<Node>::Output(Node* node, size_t index)
: m_node(node->shared_from_this())
, m_index(index)
@ -128,12 +118,9 @@ namespace ngraph
bool Output<Node>::operator>=(const Output& other) const { return !(*this < other); }
void Output<Node>::eliminate_goe()
{
if (remove_goe)
while (is_type<op::GetOutputElement>(m_node))
{
while (is_type<op::GetOutputElement>(m_node))
{
*this = m_node->input_value(0);
}
*this = m_node->input_value(0);
}
}
@ -212,14 +199,11 @@ namespace ngraph
bool Output<const Node>::operator>=(const Output& other) const { return !(*this < other); }
void Output<const Node>::eliminate_goe()
{
if (remove_goe)
while (is_type<const op::GetOutputElement>(m_node))
{
while (is_type<const op::GetOutputElement>(m_node))
{
auto value = m_node->input_value(0);
m_node = value.get_node_shared_ptr();
m_index = value.get_index();
}
auto value = m_node->input_value(0);
m_node = value.get_node_shared_ptr();
m_index = value.get_index();
}
}

View File

@ -38,19 +38,6 @@ op::BatchNormInference::BatchNormInference(const Output<Node>& input,
constructor_validate_and_infer_types();
}
// DEPRECATED
op::BatchNormInference::BatchNormInference(double eps,
const Output<Node>& gamma,
const Output<Node>& beta,
const Output<Node>& input,
const Output<Node>& mean,
const Output<Node>& variance)
: Op({gamma, beta, input, mean, variance})
, m_epsilon(eps)
{
constructor_validate_and_infer_types();
}
bool op::BatchNormInference::visit_attributes(AttributeVisitor& visitor)
{
visitor.on_attribute("epsilon", m_epsilon);

View File

@ -49,33 +49,6 @@ namespace ngraph
bool visit_attributes(AttributeVisitor& visitor) override;
NGRAPH_DEPRECATED_DOC
/// In this version of BatchNorm:
///
/// MEAN AND VARIANCE: provided by the 'mean' and 'variance' parameters.
///
/// OUTPUT VALUE: a single tensor with the normalized value of 'input'.
///
/// AUTODIFF SUPPORT:
/// - 'generate_adjoints(...) may throw an exception.
///
/// SHAPE DETAILS:
/// gamma: must have rank 1, with the same span as input's channel axis.
/// beta: must have rank 1, with the same span as input's channel axis.
/// input: must have rank >= 2. The second dimension represents the channel
/// axis
/// and must have a span of at least 1.
/// mean: must have rank 1, with the same span as input's channel axis.
/// variance: must have rank 1, with the same span as input's channel axis.
/// output: shall have the same shape as 'input'.
NGRAPH_DEPRECATED("Use another constructor")
BatchNormInference(double eps,
const Output<Node>& gamma,
const Output<Node>& beta,
const Output<Node>& input,
const Output<Node>& mean,
const Output<Node>& variance);
void validate_and_infer_types() override;
double get_eps_value() const { return m_epsilon; }

View File

@ -35,12 +35,6 @@ namespace ngraph
{
}
Op(const OutputVector& arguments);
// To only be removed by OpenVINO
NGRAPH_DEPRECATED("Use OutputVector constructor instead")
Op(const NodeVector& nodes)
: Op(as_output_vector(nodes))
{
}
};
}
}

View File

@ -197,7 +197,7 @@ void pass::CoreFusion::construct_folded_batch_norm()
auto beta = make_shared<pattern::op::Label>(element::f32, beta_shape);
double eps = 0.001;
auto shape_r = Shape{1, 2, 2, 2};
auto bn = make_shared<op::BatchNormInference>(eps, gamma, beta, pconv, mean, var);
auto bn = make_shared<op::BatchNormInference>(pconv, gamma, beta, mean, var, eps);
auto callback = [input, filters, mean, var, gamma, beta](pattern::Matcher& m) {
NGRAPH_DEBUG << "In callback for folded batch norm against node = "

View File

@ -54,12 +54,6 @@ public:
void set_function(const std::shared_ptr<Function> function) { m_function = function; }
std::shared_ptr<Function> get_function() const { return m_function; }
std::vector<std::shared_ptr<Function>> get_functions() const
NGRAPH_DEPRECATED("Use get_function()")
{
return {m_function};
}
private:
visualize_tree_ops_map_t m_visualize_tree_ops_map;
std::shared_ptr<Function> m_function;

View File

@ -171,7 +171,7 @@ static bool replace_squeeze_unsqueeze(const std::shared_ptr<Node>& node)
}
else
{
target_shape.emplace_back(shape_ps[i]);
target_shape.emplace_back(shape_ps[i].get_length());
}
}

View File

@ -77,21 +77,3 @@ void runtime::Tensor::set_stale(bool val)
{
m_stale = val;
}
void runtime::Tensor::copy_from(const ngraph::runtime::Tensor& source)
{
if (get_element_count() != source.get_element_count())
{
throw invalid_argument("runtime::Tensor::copy_from element count must match");
}
if (get_element_type() != source.get_element_type())
{
throw invalid_argument("runtime::Tensor::copy_from element types must match");
}
// This is potentially inefficient but is supplied only to get things going
// This is be replaced with more optimial implementations in later PRs
auto size = get_size_in_bytes();
AlignedBuffer buffer{size, 64};
source.read(buffer.get_ptr(), size);
write(buffer.get_ptr(), size);
}

View File

@ -103,12 +103,6 @@ namespace ngraph
/// \brief notify tensor of new data, call may block.
/// backends may use this as indication of new data in tensor.
virtual void wait_for_write_ready() {}
/// \brief copy bytes directly from source to this tensor
/// \param source The source tensor
virtual void copy_from(const ngraph::runtime::Tensor& source) NGRAPH_DEPRECATED(
"Allocate buf_ptr with size=get_size_in_bytes(), then use source.read(buf_ptr, "
"size) followed by this->write(buf_ptr, size)");
protected:
std::shared_ptr<ngraph::descriptor::Tensor> m_descriptor;
bool m_stale;

View File

@ -76,10 +76,6 @@ namespace ngraph
const std::string& cname);
~Type() {}
Type& operator=(const Type&) = default;
NGRAPH_DEPRECATED("Use operator Type_t()") Type_t get_type_enum() const
{
return m_type;
}
const std::string& c_type_string() const;
size_t size() const;
size_t hash() const;

View File

@ -646,7 +646,7 @@ bool ngraph::try_apply_auto_padding(const PartialShape& image_shape,
for (size_t i = 0; i < static_cast<size_t>(filter_shape.size()); i++)
{
int64_t image_size = static_cast<int64_t>(image_dims[i + 2]);
int64_t image_size = static_cast<int64_t>(image_dims[i + 2].get_length());
int64_t filter_size = (static_cast<int64_t>(filter_shape[i]) - 1) * filter_dilations[i] + 1;
int64_t filter_stride = static_cast<int64_t>(filter_strides[i]);
auto output_size = (image_size + filter_stride - 1) / filter_stride;

View File

@ -55,25 +55,6 @@ NGRAPH_TEST(${BACKEND_NAME}, create_tensor_1)
EXPECT_TRUE(test::all_close_f(read_vector<float>(result), expected, MIN_FLOAT_TOLERANCE_BITS));
}
// This tests a backend's implementation of the copy_from for tensor
NGRAPH_TEST(${BACKEND_NAME}, tensor_copy_from)
{
Shape shape{2, 2};
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
vector<float> av = {1, 2, 3, 4};
vector<float> bv = {5, 6, 7, 8};
shared_ptr<runtime::Tensor> a = backend->create_tensor(element::f32, shape);
shared_ptr<runtime::Tensor> b = backend->create_tensor(element::f32, shape);
copy_data(a, av);
copy_data(b, bv);
a->copy_from(*b);
EXPECT_TRUE(test::all_close_f(bv, read_vector<float>(a), MIN_FLOAT_TOLERANCE_BITS));
}
NGRAPH_TEST(${BACKEND_NAME}, get_parameters_and_results)
{
Shape shape{2, 2};

View File

@ -2053,69 +2053,6 @@ NGRAPH_TEST(${BACKEND_NAME}, split_var_len_parts)
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, lstm_cell_no_bias_no_peepholes)
{
DisableRemoveGOE nogoe;
const size_t batch_size = 2;
const size_t input_size = 3;
const size_t hidden_size = 3;
const size_t gates_count = 4;
const auto X = make_shared<op::Parameter>(element::f32, Shape{batch_size, input_size});
const auto W =
make_shared<op::Parameter>(element::f32, Shape{gates_count * hidden_size, input_size});
const auto R =
make_shared<op::Parameter>(element::f32, Shape{gates_count * hidden_size, hidden_size});
const auto H_t = make_shared<op::Parameter>(element::f32, Shape{batch_size, hidden_size});
const auto C_t = make_shared<op::Parameter>(element::f32, Shape{batch_size, hidden_size});
const auto lstm_cell =
make_shared<op::LSTMCell>(X, H_t, C_t, W, R, hidden_size, op::LSTMWeightsFormat::IOFC);
auto ht_function = make_shared<Function>(make_shared<op::GetOutputElement>(lstm_cell, 0),
ParameterVector{X, H_t, C_t, W, R});
auto ht_test_case = test::TestCase<TestEngine>(ht_function);
// X
vector<float> in_X{0.81342685f, 0.84108883f, 0.8152282f, 0.46893653f, 0.0901856f, 0.37088776f};
// W
vector<float> in_W{3.3330739e-01f, 3.6229487e-04f, 4.6773660e-01f, 4.3046016e-01f,
7.3950343e-02f, 3.8063636e-01f, 9.6921772e-01f, 9.6897459e-01f,
6.2964785e-01f, 3.1134409e-01f, 8.4709978e-01f, 9.4928098e-01f,
6.1676943e-01f, 6.6020679e-01f, 1.9072217e-01f, 8.8032126e-02f,
4.0472135e-01f, 6.8342745e-01f, 8.3432144e-01f, 4.4928190e-01f,
7.9524308e-01f, 5.3966165e-01f, 8.5936421e-01f, 8.3136767e-01f,
5.5125546e-02f, 4.7791195e-01f, 3.5788772e-01f, 6.7507404e-01f,
2.1716513e-01f, 2.7473119e-01f, 3.3999152e-02f, 9.6835363e-01f,
3.7581277e-01f, 2.4026000e-01f, 6.7418844e-01f, 3.4199652e-01f};
// R
vector<float> in_R{
0.0987983f, 0.52032113f, 0.5848073f, 0.5356095f, 0.74497133f, 0.73260087f,
0.1700787f, 0.45684233f, 0.1495722f, 0.42734373f, 0.4433832f, 0.25906256f,
0.03854987f, 0.47480518f, 0.37215272f, 0.99890584f, 0.74019486f, 0.3518967f,
0.6881257f, 0.8170279f, 0.54088944f, 0.81225616f, 0.14619833f, 0.42941234f,
0.86843914f, 0.45967972f, 0.6237719f, 0.11074839f, 0.6029616f, 0.3149305f,
0.46504205f, 0.5843412f, 0.8733427f, 0.7687243f, 0.07074859f, 0.39188156f};
// Ht
vector<float> in_Ht{0.77956f, 0.5331557f, 0.04297554f, 0.7962175f, 0.7635707f, 0.11989366f};
// Ct
vector<float> in_Ct{0.8488452f, 0.18851636f, 0.5020695f, 0.29716516f, 0.06740791f, 0.45384037f};
ht_test_case.add_multiple_inputs(vector<vector<float>>{in_X, in_Ht, in_Ct, in_W, in_R});
ht_test_case.add_expected_output<float>(
Shape{batch_size, hidden_size},
{0.81457126f, 0.61109227f, 0.769522f, 0.52239674f, 0.4324641f, 0.63183f});
ht_test_case.run();
auto ct_function = make_shared<Function>(make_shared<op::GetOutputElement>(lstm_cell, 1),
ParameterVector{X, H_t, C_t, W, R});
auto ct_test_case = test::TestCase<TestEngine>(ct_function);
ct_test_case.add_multiple_inputs(vector<vector<float>>{in_X, in_Ht, in_Ct, in_W, in_R});
ct_test_case.add_expected_output<float>(
Shape{batch_size, hidden_size},
{1.4444952f, 0.9635685f, 1.2875274f, 0.8053419f, 0.7184521f, 0.95803297f});
ct_test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, lstm_cell_zero_bias_peepholes)
{
const size_t batch_size = 2;
@ -2187,124 +2124,6 @@ NGRAPH_TEST(${BACKEND_NAME}, lstm_cell_zero_bias_peepholes)
ct_test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, lstm_cell_zero_bias_peepholes_constant)
{
DisableRemoveGOE nogoe;
const size_t batch_size = 2;
const size_t input_size = 3;
const size_t hidden_size = 3;
const size_t gates_count = 4;
const auto X = make_shared<op::Parameter>(element::f32, Shape{batch_size, input_size});
const auto W =
make_shared<op::Parameter>(element::f32, Shape{gates_count * hidden_size, input_size});
const auto R =
make_shared<op::Parameter>(element::f32, Shape{gates_count * hidden_size, hidden_size});
const auto H_t = make_shared<op::Parameter>(element::f32, Shape{batch_size, hidden_size});
const auto C_t = make_shared<op::Parameter>(element::f32, Shape{batch_size, hidden_size});
const auto B = make_shared<op::Constant>(
element::f32, Shape{gates_count * hidden_size}, std::vector<float>{0.f});
const auto P =
make_shared<op::Constant>(element::f32, Shape{3 * hidden_size}, std::vector<float>{0.f});
const auto lstm_cell = make_shared<op::LSTMCell>(
X, H_t, C_t, W, R, B, P, hidden_size, op::LSTMWeightsFormat::IOFC);
auto ht_function = make_shared<Function>(make_shared<op::GetOutputElement>(lstm_cell, 0),
ParameterVector{X, H_t, C_t, W, R});
auto ht_test_case = test::TestCase<TestEngine>(ht_function);
// X
vector<float> in_X{0.81342685f, 0.84108883f, 0.8152282f, 0.46893653f, 0.0901856f, 0.37088776f};
// W
vector<float> in_W{3.3330739e-01f, 3.6229487e-04f, 4.6773660e-01f, 4.3046016e-01f,
7.3950343e-02f, 3.8063636e-01f, 9.6921772e-01f, 9.6897459e-01f,
6.2964785e-01f, 3.1134409e-01f, 8.4709978e-01f, 9.4928098e-01f,
6.1676943e-01f, 6.6020679e-01f, 1.9072217e-01f, 8.8032126e-02f,
4.0472135e-01f, 6.8342745e-01f, 8.3432144e-01f, 4.4928190e-01f,
7.9524308e-01f, 5.3966165e-01f, 8.5936421e-01f, 8.3136767e-01f,
5.5125546e-02f, 4.7791195e-01f, 3.5788772e-01f, 6.7507404e-01f,
2.1716513e-01f, 2.7473119e-01f, 3.3999152e-02f, 9.6835363e-01f,
3.7581277e-01f, 2.4026000e-01f, 6.7418844e-01f, 3.4199652e-01f};
// R
vector<float> in_R{
0.0987983f, 0.52032113f, 0.5848073f, 0.5356095f, 0.74497133f, 0.73260087f,
0.1700787f, 0.45684233f, 0.1495722f, 0.42734373f, 0.4433832f, 0.25906256f,
0.03854987f, 0.47480518f, 0.37215272f, 0.99890584f, 0.74019486f, 0.3518967f,
0.6881257f, 0.8170279f, 0.54088944f, 0.81225616f, 0.14619833f, 0.42941234f,
0.86843914f, 0.45967972f, 0.6237719f, 0.11074839f, 0.6029616f, 0.3149305f,
0.46504205f, 0.5843412f, 0.8733427f, 0.7687243f, 0.07074859f, 0.39188156f};
// Ht
vector<float> in_Ht{0.77956f, 0.5331557f, 0.04297554f, 0.7962175f, 0.7635707f, 0.11989366f};
// Ct
vector<float> in_Ct{0.8488452f, 0.18851636f, 0.5020695f, 0.29716516f, 0.06740791f, 0.45384037f};
ht_test_case.add_multiple_inputs(vector<vector<float>>{in_X, in_Ht, in_Ct, in_W, in_R});
ht_test_case.add_expected_output<float>(
Shape{batch_size, hidden_size},
{0.81457126f, 0.61109227f, 0.769522f, 0.52239674f, 0.4324641f, 0.63183f});
ht_test_case.run();
auto ct_function = make_shared<Function>(make_shared<op::GetOutputElement>(lstm_cell, 1),
ParameterVector{X, H_t, C_t, W, R});
auto ct_test_case = test::TestCase<TestEngine>(ct_function);
ct_test_case.add_multiple_inputs(vector<vector<float>>{in_X, in_Ht, in_Ct, in_W, in_R});
ct_test_case.add_expected_output<float>(
Shape{batch_size, hidden_size},
{1.4444952f, 0.9635685f, 1.2875274f, 0.8053419f, 0.7184521f, 0.95803297f});
ct_test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, lstm_cell_fixed_no_bias_no_peepholes)
{
DisableRemoveGOE nogoe;
const size_t batch_size = 2;
const size_t input_size = 3;
const size_t hidden_size = 3;
const size_t gates_count = 4;
const auto X = make_shared<op::Parameter>(element::f32, Shape{batch_size, input_size});
const auto W =
make_shared<op::Parameter>(element::f32, Shape{gates_count * hidden_size, input_size});
const auto R =
make_shared<op::Parameter>(element::f32, Shape{gates_count * hidden_size, hidden_size});
const auto H_t = make_shared<op::Parameter>(element::f32, Shape{batch_size, hidden_size});
const auto C_t = make_shared<op::Parameter>(element::f32, Shape{batch_size, hidden_size});
const auto lstm_cell =
make_shared<op::LSTMCell>(X, H_t, C_t, W, R, hidden_size, op::LSTMWeightsFormat::IOFC);
auto ht_function = make_shared<Function>(make_shared<op::GetOutputElement>(lstm_cell, 0),
ParameterVector{X, H_t, C_t, W, R});
auto ht_test_case = test::TestCase<TestEngine>(ht_function);
// X
vector<float> in_X(batch_size * input_size, 0.5f);
// W
vector<float> in_W(gates_count * hidden_size * input_size, 0.25f);
// R
vector<float> in_R(gates_count * hidden_size * hidden_size, 0.25f);
// Ht
vector<float> in_Ht(batch_size * hidden_size, 0.75f);
// Ct
vector<float> in_Ct(batch_size * hidden_size, 0.75f);
ht_test_case.add_multiple_inputs(vector<vector<float>>{in_X, in_Ht, in_Ct, in_W, in_R});
ht_test_case.add_expected_output<float>(
Shape{batch_size, hidden_size},
{0.56633735f, 0.56633735f, 0.56633735f, 0.56633735f, 0.56633735f, 0.56633735f});
ht_test_case.run();
auto ct_function = make_shared<Function>(make_shared<op::GetOutputElement>(lstm_cell, 1),
ParameterVector{X, H_t, C_t, W, R});
auto ct_test_case = test::TestCase<TestEngine>(ct_function);
ct_test_case.add_multiple_inputs(vector<vector<float>>{in_X, in_Ht, in_Ct, in_W, in_R});
ct_test_case.add_expected_output<float>(
Shape{batch_size, hidden_size},
{1.0664454f, 1.0664454f, 1.0664454f, 1.0664454f, 1.0664454f, 1.0664454f});
ct_test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, lstm_cell_bias_peepholes)
{
const size_t batch_size = 2;

View File

@ -383,13 +383,6 @@ void runtime::dynamic::DynamicTensor::read(void* p, size_t n) const
m_wrapped_tensor->read(p, n);
}
void runtime::dynamic::DynamicTensor::copy_from(const ngraph::runtime::Tensor& source)
{
NGRAPH_CHECK(m_wrapped_tensor != nullptr,
"tried to copy_from to a dynamic tensor with no allocated storage");
m_wrapped_tensor->copy_from(source);
}
bool runtime::dynamic::DynamicTensor::has_storage() const
{
return m_wrapped_tensor != nullptr;

View File

@ -139,7 +139,6 @@ public:
virtual const ngraph::Shape& get_shape() const override;
virtual void write(const void* p, size_t n) override;
virtual void read(void* p, size_t n) const override;
virtual void copy_from(const ngraph::runtime::Tensor& source) override;
bool has_storage() const;
void release_storage();
void make_storage(const element::Type& element_type, const Shape& shape);

View File

@ -165,20 +165,6 @@ TEST(util, all_close)
}
#endif
TEST(util, traverse_functions)
{
// First create "f(A,B,C) = (A+B)*C".
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto C = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>((A + B) * C, ParameterVector{A, B, C}, "f");
vector<Function*> functions;
traverse_functions(f, [&](shared_ptr<Function> fp) { functions.push_back(fp.get()); });
ASSERT_EQ(1, functions.size());
}
class CloneTest : public ::testing::Test
{
public:

View File

@ -68,19 +68,6 @@ namespace ngraph
};
}
class DisableRemoveGOE
{
public:
DisableRemoveGOE()
: m_saved_remove_goe(ngraph::get_remove_goe())
{
ngraph::set_remove_goe(false);
}
~DisableRemoveGOE() { ngraph::set_remove_goe(m_saved_remove_goe); }
private:
bool m_saved_remove_goe;
};
bool validate_list(const std::vector<std::shared_ptr<ngraph::Node>>& nodes);
std::shared_ptr<ngraph::Function> make_test_graph();
#ifndef NGRAPH_JSON_DISABLE