Removed v0 builders (#1528)
This commit is contained in:
@@ -6,6 +6,7 @@
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <numeric>
|
||||
|
||||
#include <ngraph_ops/fully_connected.hpp>
|
||||
#include <ngraph/builder/make_constant.hpp>
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
#include "transformations/convert_opset1_to_legacy/conv_bias_fusion.hpp"
|
||||
|
||||
#include <memory>
|
||||
#include <numeric>
|
||||
#include <vector>
|
||||
|
||||
#include <ngraph/opsets/opset1.hpp>
|
||||
@@ -120,4 +121,4 @@ ngraph::pass::DeconvAddFusion::DeconvAddFusion() {
|
||||
|
||||
auto m = std::make_shared<ngraph::pattern::Matcher>(add, "DeconvAddFusion");
|
||||
register_matcher(m, callback);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
#include "transformations/convert_opset1_to_legacy/fc_bias_fusion.hpp"
|
||||
|
||||
#include <memory>
|
||||
#include <numeric>
|
||||
#include <vector>
|
||||
|
||||
#include <ngraph/opsets/opset1.hpp>
|
||||
@@ -71,4 +72,4 @@ ngraph::pass::FullyConnectedBiasFusion::FullyConnectedBiasFusion() {
|
||||
|
||||
auto m = std::make_shared<ngraph::pattern::Matcher>(add, "FullyConnectedBiasFusion");
|
||||
this->register_matcher(m, callback);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,21 +27,11 @@ set (SRC
|
||||
axis_vector.hpp
|
||||
builder/autobroadcast.cpp
|
||||
builder/autobroadcast.hpp
|
||||
builder/dequantize_builder.cpp
|
||||
builder/dequantize_builder.hpp
|
||||
builder/make_constant.hpp
|
||||
builder/matmul_factory.cpp
|
||||
builder/matmul_factory.hpp
|
||||
builder/norm.cpp
|
||||
builder/norm.hpp
|
||||
builder/numpy_transpose.cpp
|
||||
builder/numpy_transpose.hpp
|
||||
builder/quantize_builder.cpp
|
||||
builder/quantize_builder.hpp
|
||||
builder/quantized_concat_builder.cpp
|
||||
builder/quantized_concat_builder.hpp
|
||||
builder/quantized_dot_builder.cpp
|
||||
builder/quantized_dot_builder.hpp
|
||||
builder/quantization_utils.hpp
|
||||
builder/quantization_utils.cpp
|
||||
builder/reduce_ops.cpp
|
||||
@@ -50,7 +40,6 @@ set (SRC
|
||||
builder/reshape.hpp
|
||||
builder/split.cpp
|
||||
builder/split.hpp
|
||||
builder/tensor_mask.hpp
|
||||
check.hpp
|
||||
chrome_trace.cpp
|
||||
chrome_trace.hpp
|
||||
|
||||
@@ -319,65 +319,6 @@ namespace ngraph
|
||||
numpy_broadcast_node(right, right_output_shape, right_full_shape)};
|
||||
}
|
||||
|
||||
OutputVector legacy_broadcast_for_binary_operation(const Output<Node>& left,
|
||||
const Output<Node>& right,
|
||||
size_t start_match_axis)
|
||||
{
|
||||
const auto& left_shape = left.get_shape();
|
||||
const auto& right_shape = right.get_shape();
|
||||
|
||||
bool dimensions_identical = (left_shape == right_shape);
|
||||
if (dimensions_identical)
|
||||
{
|
||||
return {left, right};
|
||||
}
|
||||
|
||||
// Prepare new shape of right operand for broadcasting
|
||||
// Remove dimensions with length=1 from back
|
||||
auto new_right_shape = right_shape;
|
||||
for (int dimension = new_right_shape.size() - 1; dimension >= 0; --dimension)
|
||||
{
|
||||
if (new_right_shape[dimension] == 1)
|
||||
{
|
||||
new_right_shape.pop_back();
|
||||
}
|
||||
else
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Find first dimensions at front with length different from 1
|
||||
size_t num_ones = 0;
|
||||
for (size_t dimension : new_right_shape)
|
||||
{
|
||||
if (dimension == 1)
|
||||
{
|
||||
++num_ones;
|
||||
}
|
||||
else
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Remove dimensions with length=1 from front
|
||||
new_right_shape.erase(begin(new_right_shape), next(begin(new_right_shape), num_ones));
|
||||
|
||||
auto reshape_right =
|
||||
make_shared<op::Reshape>(right, get_default_order(right_shape), new_right_shape);
|
||||
|
||||
// Move broadcast start axis parameter to right
|
||||
start_match_axis += num_ones;
|
||||
|
||||
auto broadcast_right = make_shared<op::Broadcast>(
|
||||
reshape_right,
|
||||
left_shape,
|
||||
calculate_broadcast_axes(left_shape, new_right_shape, start_match_axis));
|
||||
|
||||
return {left, broadcast_right};
|
||||
}
|
||||
|
||||
OutputVector pdpd_broadcast(const OutputVector& inputs, int64_t axis)
|
||||
{
|
||||
if (inputs.size() <= 1)
|
||||
|
||||
@@ -152,30 +152,6 @@ namespace ngraph
|
||||
operand1, shaped_op2_op3.first, shaped_op2_op3.second);
|
||||
}
|
||||
|
||||
///
|
||||
/// \brief Cast shape of two outputs to make them compatible for an element-wise binary
|
||||
/// operation.
|
||||
///
|
||||
/// \note If necessary the right-hand-side argument will be broadcast to match the
|
||||
/// shape of left-hand-side argument. The starting of the mutually equal shape
|
||||
/// is specified by the argument "start_match_axis", and if it is not set suffix
|
||||
/// matching is assumed.
|
||||
///
|
||||
/// \note This style of broadcast was used in ONNX Op sets prior to version 7, where
|
||||
/// it was replaced by NumPy style auto-broadcasting mechanism.
|
||||
///
|
||||
/// \param left Node which contain input of binary op.
|
||||
/// \param right Node which contain input of binary op.
|
||||
/// \param start_match_axis Position in shape denoting start of the mutually equal
|
||||
/// shape
|
||||
///
|
||||
/// \return Left and right node after broadcasting.
|
||||
///
|
||||
NGRAPH_API
|
||||
OutputVector legacy_broadcast_for_binary_operation(const Output<Node>& left,
|
||||
const Output<Node>& right,
|
||||
size_t start_match_axis);
|
||||
|
||||
/// \brief Broadcast shape of two nodes to make them compatible for a matrix
|
||||
/// multiplication.
|
||||
///
|
||||
|
||||
@@ -1,58 +0,0 @@
|
||||
//*****************************************************************************
|
||||
// Copyright 2017-2020 Intel Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//*****************************************************************************
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "ngraph/builder/dequantize_builder.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
namespace ngraph
|
||||
{
|
||||
namespace builder
|
||||
{
|
||||
shared_ptr<Node> DequantizeBuilder(const Output<Node>& input,
|
||||
const Output<Node>& min,
|
||||
const Output<Node>& max,
|
||||
const ngraph::element::Type& real_type,
|
||||
const ngraph::AxisSet& axes)
|
||||
{
|
||||
auto quant_type = input.get_element_type();
|
||||
|
||||
if (min.get_element_type() != real_type)
|
||||
{
|
||||
throw ngraph_error("DequantizeBuilder: min must match input type");
|
||||
}
|
||||
|
||||
if (max.get_element_type() != real_type)
|
||||
{
|
||||
throw ngraph_error("DequantizeBuilder: max must match input type");
|
||||
}
|
||||
|
||||
auto shape = min.get_shape();
|
||||
if (shape != max.get_shape())
|
||||
{
|
||||
throw ngraph_error("DequantizeBuilder: min and max must have same shape");
|
||||
}
|
||||
|
||||
auto zero = make_constant(quant_type, shape, 0);
|
||||
auto scale = quantization_utils::get_scale(min, max, quant_type);
|
||||
return make_shared<op::Dequantize>(input, scale, zero, real_type, axes)
|
||||
->add_provenance_group_members_above({input, min, max});
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
//*****************************************************************************
|
||||
// Copyright 2017-2020 Intel Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//*****************************************************************************
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "ngraph/builder/make_constant.hpp"
|
||||
#include "ngraph/coordinate_diff.hpp"
|
||||
#include "ngraph/node.hpp"
|
||||
#include "ngraph/op/dequantize.hpp"
|
||||
#include "quantization_utils.hpp"
|
||||
|
||||
namespace ngraph
|
||||
{
|
||||
namespace builder
|
||||
{
|
||||
NGRAPH_API
|
||||
std::shared_ptr<Node> DequantizeBuilder(const Output<Node>& input,
|
||||
const Output<Node>& min,
|
||||
const Output<Node>& max,
|
||||
const ngraph::element::Type& real_type,
|
||||
const ngraph::AxisSet& axes);
|
||||
}
|
||||
}
|
||||
@@ -1,82 +0,0 @@
|
||||
//*****************************************************************************
|
||||
// Copyright 2017-2020 Intel Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//*****************************************************************************
|
||||
|
||||
#include <sstream>
|
||||
|
||||
#include "ngraph/axis_vector.hpp"
|
||||
#include "ngraph/builder/numpy_transpose.hpp"
|
||||
#include "ngraph/except.hpp"
|
||||
#include "ngraph/op/reshape.hpp"
|
||||
#include "ngraph/util.hpp"
|
||||
|
||||
namespace ngraph
|
||||
{
|
||||
[[noreturn]] void numpy_transpose_error(const AxisVector& order, const Shape& in_shape)
|
||||
{
|
||||
std::ostringstream os;
|
||||
os << "The axes order ";
|
||||
os << "[ " << ngraph::join(order) << " ]";
|
||||
os << " is incompatible with the input shape ";
|
||||
os << "[ " << ngraph::join(in_shape) << " ]";
|
||||
os << " during numpy_transpose.";
|
||||
throw ngraph_error(os.str());
|
||||
}
|
||||
|
||||
namespace builder
|
||||
{
|
||||
std::shared_ptr<Node> numpy_transpose(const Output<Node>& value, AxisVector order)
|
||||
{
|
||||
auto in_shape = value.get_shape();
|
||||
// default, reverse the order of the axes
|
||||
if (order.size() == 0)
|
||||
{
|
||||
auto n = in_shape.size();
|
||||
order = AxisVector(n);
|
||||
std::generate(order.begin(), order.end(), [&n]() { return --n; });
|
||||
}
|
||||
else if (order.size() == in_shape.size())
|
||||
{
|
||||
// validate that the axes order is valid, i.e., unique and the right size
|
||||
std::unordered_set<ngraph::AxisVector::value_type> axes;
|
||||
for (auto o : order)
|
||||
{
|
||||
if (o < in_shape.size() && !axes.count(o))
|
||||
{
|
||||
axes.insert(o);
|
||||
}
|
||||
else
|
||||
{
|
||||
numpy_transpose_error(order, in_shape);
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
numpy_transpose_error(order, in_shape);
|
||||
}
|
||||
|
||||
// create output shape
|
||||
Shape out_shape;
|
||||
for (size_t i = 0; i < in_shape.size(); ++i)
|
||||
out_shape.push_back(in_shape[order[i]]);
|
||||
|
||||
// do the reshaping with the order
|
||||
return std::make_shared<ngraph::op::Reshape>(value, order, out_shape)
|
||||
->add_provenance_group_members_above({value});
|
||||
}
|
||||
|
||||
} // namespace builder
|
||||
} // namespace ngraph
|
||||
@@ -1,53 +0,0 @@
|
||||
//*****************************************************************************
|
||||
// Copyright 2017-2020 Intel Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//*****************************************************************************
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "ngraph/axis_vector.hpp"
|
||||
#include "ngraph/node.hpp"
|
||||
|
||||
namespace ngraph
|
||||
{
|
||||
namespace builder
|
||||
{
|
||||
// clang-format off
|
||||
/// \brief Implement's Numpy's multidimensional transpose op. Doubles as DimShuffle.
|
||||
///
|
||||
/// If `order` is empty, the vector is transposed by reversing it's axes, i.e.
|
||||
///
|
||||
/// shape [1,2,4] becomes shape [4,2,1]
|
||||
///
|
||||
/// If `order` is provided, it should be a vector of unique axis positions ranging
|
||||
/// from 0 to N-1, when N is the length of the input shape. In this case, numpy_transpose
|
||||
/// acts like dimshuffle, so
|
||||
///
|
||||
/// shape [1,2,4] with order [1,2,0] becomes shape [2,4,1]
|
||||
///
|
||||
/// | | Type | Description |
|
||||
/// | ---------------- | ------------------------------------- | ------------------------------------------------------- |
|
||||
/// | `node` | \f$E[d_0,\dots,d_{n-1}]~(n \geq 0)\f$ | An input tensor of any shape |
|
||||
/// | `order` | AxisVector (empty default) | The axes to eliminate through reduction (0 indexed). |
|
||||
///
|
||||
/// ## Output
|
||||
///
|
||||
/// | Type | Description |
|
||||
/// | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- |
|
||||
/// | \f$E[d_{n-1},\dots,d_0)]\textit{ or }E[d_{order[0]},\dots,d_{order[n-1]}]\f$ | The tensor \f$T\f$, where \f$T\f$ is the input tensor with the axes reordered via Numpy Transpose rules |
|
||||
// clang-format on
|
||||
NGRAPH_API
|
||||
std::shared_ptr<Node> numpy_transpose(const Output<Node>& value, AxisVector order = {});
|
||||
} // namespace builder
|
||||
} // namespace ngraph
|
||||
@@ -1,59 +0,0 @@
|
||||
//*****************************************************************************
|
||||
// Copyright 2017-2020 Intel Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//*****************************************************************************
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "ngraph/builder/quantize_builder.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
namespace ngraph
|
||||
{
|
||||
namespace builder
|
||||
{
|
||||
shared_ptr<Node> QuantizeBuilder(const Output<Node>& input,
|
||||
const Output<Node>& min,
|
||||
const Output<Node>& max,
|
||||
const ngraph::element::Type& quant_type,
|
||||
const ngraph::AxisSet& axes,
|
||||
op::Quantize::RoundMode round_mode)
|
||||
{
|
||||
auto real_type = input.get_element_type();
|
||||
|
||||
if (min.get_element_type() != real_type)
|
||||
{
|
||||
throw ngraph_error("QuantizeBuilder: min must match input type");
|
||||
}
|
||||
|
||||
if (max.get_element_type() != real_type)
|
||||
{
|
||||
throw ngraph_error("QuantizeBuilder: max must match input type");
|
||||
}
|
||||
|
||||
auto shape = min.get_shape();
|
||||
if (shape != max.get_shape())
|
||||
{
|
||||
throw ngraph_error("QuantizeBuilder: min and max must have same shape");
|
||||
}
|
||||
|
||||
auto zero = make_constant(quant_type, shape, 0);
|
||||
auto scale = quantization_utils::get_scale(min, max, quant_type, true);
|
||||
return make_shared<op::Quantize>(input, scale, zero, quant_type, axes, round_mode)
|
||||
->add_provenance_group_members_above({input, min, max});
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,37 +0,0 @@
|
||||
//*****************************************************************************
|
||||
// Copyright 2017-2020 Intel Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//*****************************************************************************
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "ngraph/builder/make_constant.hpp"
|
||||
#include "ngraph/coordinate_diff.hpp"
|
||||
#include "ngraph/node.hpp"
|
||||
#include "ngraph/op/quantize.hpp"
|
||||
#include "quantization_utils.hpp"
|
||||
|
||||
namespace ngraph
|
||||
{
|
||||
namespace builder
|
||||
{
|
||||
NGRAPH_API
|
||||
std::shared_ptr<Node> QuantizeBuilder(const Output<Node>& input,
|
||||
const Output<Node>& min,
|
||||
const Output<Node>& max,
|
||||
const ngraph::element::Type& quant_type,
|
||||
const ngraph::AxisSet& axes,
|
||||
op::Quantize::RoundMode round_mode);
|
||||
}
|
||||
}
|
||||
@@ -1,69 +0,0 @@
|
||||
//*****************************************************************************
|
||||
// Copyright 2017-2020 Intel Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//*****************************************************************************
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "ngraph/builder/quantized_dot_builder.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
namespace ngraph
|
||||
{
|
||||
namespace builder
|
||||
{
|
||||
shared_ptr<Node> QuantizedDotBuilder(const Output<Node>& input0,
|
||||
const Output<Node>& input1,
|
||||
const size_t reduction_axes_count,
|
||||
const Output<Node>& min_input0,
|
||||
const Output<Node>& max_input0,
|
||||
const Output<Node>& min_input1,
|
||||
const Output<Node>& max_input1,
|
||||
const Output<Node>& min_output,
|
||||
const Output<Node>& max_output,
|
||||
const ngraph::element::Type& output_type,
|
||||
const ngraph::AxisSet& input0_axes,
|
||||
const ngraph::AxisSet& input1_axes,
|
||||
const ngraph::AxisSet& output_axes)
|
||||
{
|
||||
auto input0_scale =
|
||||
quantization_utils::get_scale(min_input0, max_input0, input0.get_element_type());
|
||||
auto input1_scale =
|
||||
quantization_utils::get_scale(min_input1, max_input1, input1.get_element_type());
|
||||
auto output_scale = quantization_utils::get_scale(min_output, max_output, output_type);
|
||||
|
||||
// For Builders the zero point is assumed to be zero (for now)
|
||||
auto input0_zero_point = op::Constant::create(input0.get_element_type(), Shape{}, {0});
|
||||
auto input1_zero_point = op::Constant::create(input1.get_element_type(), Shape{}, {0});
|
||||
auto output_zero_point = op::Constant::create(output_type, Shape{}, {0});
|
||||
|
||||
return make_shared<op::QuantizedDot>(input0,
|
||||
input1,
|
||||
reduction_axes_count,
|
||||
input0_scale,
|
||||
input0_zero_point,
|
||||
input1_scale,
|
||||
input1_zero_point,
|
||||
output_scale,
|
||||
output_zero_point,
|
||||
output_type,
|
||||
input0_axes,
|
||||
input1_axes,
|
||||
output_axes)
|
||||
->add_provenance_group_members_above({input0, input1});
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
//*****************************************************************************
|
||||
// Copyright 2017-2020 Intel Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//*****************************************************************************
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "ngraph/coordinate_diff.hpp"
|
||||
#include "ngraph/node.hpp"
|
||||
#include "ngraph/op/constant.hpp"
|
||||
#include "ngraph/op/quantize.hpp"
|
||||
#include "ngraph/op/quantized_dot.hpp"
|
||||
#include "quantization_utils.hpp"
|
||||
|
||||
namespace ngraph
|
||||
{
|
||||
namespace builder
|
||||
{
|
||||
NGRAPH_API
|
||||
std::shared_ptr<Node> QuantizedDotBuilder(const Output<Node>& input0,
|
||||
const Output<Node>& input1,
|
||||
const size_t reduction_axes_count,
|
||||
const Output<Node>& min_input0,
|
||||
const Output<Node>& max_input0,
|
||||
const Output<Node>& min_input1,
|
||||
const Output<Node>& max_input1,
|
||||
const Output<Node>& min_output,
|
||||
const Output<Node>& max_output,
|
||||
const ngraph::element::Type& output_type,
|
||||
const ngraph::AxisSet& input0_axes,
|
||||
const ngraph::AxisSet& input1_axes,
|
||||
const ngraph::AxisSet& output_axes);
|
||||
}
|
||||
}
|
||||
@@ -1,103 +0,0 @@
|
||||
//*****************************************************************************
|
||||
// Copyright 2017-2020 Intel Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//*****************************************************************************
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <numeric>
|
||||
|
||||
#include "ngraph/axis_set.hpp"
|
||||
#include "ngraph/node.hpp"
|
||||
#include "ngraph/op/broadcast.hpp"
|
||||
#include "ngraph/op/constant.hpp"
|
||||
#include "ngraph/op/convert.hpp"
|
||||
#include "ngraph/op/less.hpp"
|
||||
#include "ngraph/op/reshape.hpp"
|
||||
#include "ngraph/shape.hpp"
|
||||
#include "ngraph/util.hpp"
|
||||
|
||||
namespace ngraph
|
||||
{
|
||||
namespace builder
|
||||
{
|
||||
// batch_size = mask_shape on the batch_axis
|
||||
// max_sequence_length = mask_shape on the sequence_axis
|
||||
// sequence_lengths = list of lengths < max_sequence_length of shape batch_size
|
||||
// a mask is created by...
|
||||
// 1. creating a sequence starting at sequence_begin of shape max_sequence_length
|
||||
// 2. broadcasting that sequence along all non-sequence axes to mask_shape
|
||||
// 3. broadcasting sequence_lengths along all non-batch axes to mask_shape
|
||||
// 4. returning the specified binary element-wise operation T #2 and #3
|
||||
template <class T>
|
||||
std::shared_ptr<Node> tensor_mask(const std::shared_ptr<Node>& sequence_lengths,
|
||||
size_t sequence_axis,
|
||||
size_t batch_axis,
|
||||
ngraph::Shape mask_shape,
|
||||
uint32_t sequence_begin)
|
||||
{
|
||||
if (sequence_axis >= mask_shape.size())
|
||||
{
|
||||
throw ngraph_error("Sequence axis must be in range 0..mask_shape rank");
|
||||
}
|
||||
|
||||
if (batch_axis >= mask_shape.size())
|
||||
{
|
||||
throw ngraph_error("Sequence axis must be in range 0..mask_shape rank");
|
||||
}
|
||||
|
||||
// all axes except the sequence axis
|
||||
ngraph::AxisSet non_sequence_axes;
|
||||
// all axes except the batch axis
|
||||
ngraph::AxisSet non_batch_axes;
|
||||
|
||||
for (size_t axis = 0; axis < mask_shape.size(); ++axis)
|
||||
{
|
||||
if (axis != sequence_axis)
|
||||
{
|
||||
non_sequence_axes.insert(axis);
|
||||
}
|
||||
if (axis != batch_axis)
|
||||
{
|
||||
non_batch_axes.insert(axis);
|
||||
}
|
||||
}
|
||||
|
||||
// broadcast sequence lengths to mask shape along all non-batch axes
|
||||
auto broadcast_sequence_lengths = std::make_shared<ngraph::op::Broadcast>(
|
||||
sequence_lengths, mask_shape, non_batch_axes);
|
||||
|
||||
// create sequence data [0, ..., max_sequence_length]
|
||||
auto max_sequence_length = mask_shape[sequence_axis];
|
||||
std::vector<uint32_t> sequence_data(max_sequence_length);
|
||||
std::iota(sequence_data.begin(), sequence_data.end(), sequence_begin);
|
||||
|
||||
// create sequence constant
|
||||
auto sequence = std::make_shared<ngraph::op::Constant>(
|
||||
element::u32, Shape{max_sequence_length}, sequence_data);
|
||||
|
||||
// convert sequence to input type
|
||||
auto convert_sequence = std::make_shared<ngraph::op::Convert>(
|
||||
sequence, sequence_lengths->get_element_type());
|
||||
|
||||
// broadcast sequence to mask shape along all non-sequence axes
|
||||
auto broadcast_sequence = std::make_shared<ngraph::op::Broadcast>(
|
||||
convert_sequence, mask_shape, non_sequence_axes);
|
||||
|
||||
// mask = sequence_length < sequence
|
||||
return std::make_shared<T>(broadcast_sequence, broadcast_sequence_lengths)
|
||||
->add_provenance_group_members_above({sequence_lengths});
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -68,14 +68,8 @@ namespace ngraph
|
||||
#include "ngraph/attribute_adapter.hpp"
|
||||
#include "ngraph/attribute_visitor.hpp"
|
||||
#include "ngraph/builder/autobroadcast.hpp"
|
||||
#include "ngraph/builder/dequantize_builder.hpp"
|
||||
#include "ngraph/builder/numpy_transpose.hpp"
|
||||
#include "ngraph/builder/quantize_builder.hpp"
|
||||
#include "ngraph/builder/quantized_concat_builder.hpp"
|
||||
#include "ngraph/builder/quantized_dot_builder.hpp"
|
||||
#include "ngraph/builder/reduce_ops.hpp"
|
||||
#include "ngraph/builder/reshape.hpp"
|
||||
#include "ngraph/builder/tensor_mask.hpp"
|
||||
#include "ngraph/coordinate_transform.hpp"
|
||||
#include "ngraph/descriptor/input.hpp"
|
||||
#include "ngraph/descriptor/layout/dense_tensor_layout.hpp"
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <cmath>
|
||||
#include <numeric>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
#include <cinttypes>
|
||||
#include <cmath>
|
||||
#include <cstdlib>
|
||||
#include <numeric>
|
||||
#include <random>
|
||||
#include <string>
|
||||
|
||||
|
||||
@@ -14,6 +14,8 @@
|
||||
// limitations under the License.
|
||||
//*****************************************************************************
|
||||
|
||||
#include <numeric>
|
||||
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "util/engine/test_engines.hpp"
|
||||
#include "util/test_case.hpp"
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#include <cstdlib>
|
||||
#include <iterator>
|
||||
#include <limits>
|
||||
#include <numeric>
|
||||
#include <random>
|
||||
#include <string>
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
#include <cinttypes>
|
||||
#include <cmath>
|
||||
#include <cstdlib>
|
||||
#include <numeric>
|
||||
#include <random>
|
||||
#include <string>
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
#include <cinttypes>
|
||||
#include <cmath>
|
||||
#include <cstdlib>
|
||||
#include <numeric>
|
||||
#include <random>
|
||||
#include <string>
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
#include <cinttypes>
|
||||
#include <cmath>
|
||||
#include <cstdlib>
|
||||
#include <numeric>
|
||||
#include <random>
|
||||
#include <string>
|
||||
|
||||
|
||||
@@ -14,6 +14,8 @@
|
||||
// limitations under the License.
|
||||
//*****************************************************************************
|
||||
|
||||
#include <numeric>
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "ngraph/runtime/tensor.hpp"
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
#include <cinttypes>
|
||||
#include <cmath>
|
||||
#include <cstdlib>
|
||||
#include <numeric>
|
||||
#include <random>
|
||||
#include <string>
|
||||
|
||||
|
||||
@@ -106,52 +106,3 @@ TEST(builder, variance)
|
||||
result = make_reduce_result_true(builder::variance);
|
||||
ASSERT_TRUE(test::all_close((vector<float>{4, 4}), read_vector<float>(result)));
|
||||
}
|
||||
|
||||
TEST(builder, numpy_transpose)
|
||||
{
|
||||
// 2D Transpose
|
||||
Shape shape{2, 4};
|
||||
auto param = make_shared<op::Parameter>(element::f32, shape);
|
||||
auto transposed = as_type_ptr<op::Reshape>(builder::numpy_transpose(param));
|
||||
EXPECT_EQ(Shape({4, 2}), transposed->get_output_shape(0));
|
||||
|
||||
// Multidimensional Transpose
|
||||
shape = Shape{2, 4, 8};
|
||||
param = make_shared<op::Parameter>(element::f32, shape);
|
||||
transposed = as_type_ptr<op::Reshape>(builder::numpy_transpose(param));
|
||||
EXPECT_EQ(Shape({8, 4, 2}), transposed->get_output_shape(0));
|
||||
|
||||
// Dimshuffle
|
||||
shape = Shape{2, 4, 8};
|
||||
param = make_shared<op::Parameter>(element::f32, shape);
|
||||
transposed = as_type_ptr<op::Reshape>(builder::numpy_transpose(param, AxisVector{2, 0, 1}));
|
||||
EXPECT_EQ(Shape({8, 2, 4}), transposed->get_output_shape(0));
|
||||
|
||||
// Bad Orders
|
||||
EXPECT_ANY_THROW(as_type_ptr<op::Reshape>(builder::numpy_transpose(param, AxisVector{2})));
|
||||
EXPECT_ANY_THROW(
|
||||
as_type_ptr<op::Reshape>(builder::numpy_transpose(param, AxisVector{2, 2, 1})));
|
||||
}
|
||||
|
||||
TEST(builder, tensor_mask)
|
||||
{
|
||||
Shape max_sequence_length{3};
|
||||
auto sequence_lengths = make_shared<op::Parameter>(element::u32, max_sequence_length);
|
||||
|
||||
Shape mask_shape{3, 5};
|
||||
auto f =
|
||||
make_shared<Function>(builder::tensor_mask<op::Less>(sequence_lengths, 1, 0, mask_shape, 0),
|
||||
ParameterVector{sequence_lengths});
|
||||
|
||||
auto backend = runtime::Backend::create("INTERPRETER");
|
||||
|
||||
auto sequence_lengths_data = backend->create_tensor(element::u32, max_sequence_length);
|
||||
copy_data(sequence_lengths_data, vector<uint32_t>{1, 3, 2});
|
||||
auto result = backend->create_tensor(element::boolean, mask_shape);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {sequence_lengths_data});
|
||||
vector<char> expected{1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0};
|
||||
|
||||
EXPECT_EQ(expected, read_vector<char>(result));
|
||||
}
|
||||
|
||||
@@ -273,110 +273,6 @@ TEST(autobroadcast, numpy_broadcast_for_matmul_op_nop)
|
||||
EXPECT_EQ(result.at(1).get_shape(), (Shape{6, 5}));
|
||||
}
|
||||
|
||||
TEST(autobroadcast, legacy_broadcast_scalar)
|
||||
{
|
||||
const Shape lhs{2, 3, 4, 5};
|
||||
const Shape rhs{};
|
||||
size_t start_match_axis{3};
|
||||
const auto lhs_node = make_shared<op::Parameter>(element::f32, lhs);
|
||||
const auto rhs_node = make_shared<op::Parameter>(element::f32, rhs);
|
||||
|
||||
const OutputVector result =
|
||||
builder::legacy_broadcast_for_binary_operation(lhs_node, rhs_node, start_match_axis);
|
||||
|
||||
EXPECT_EQ(result.at(0).get_shape(), lhs);
|
||||
EXPECT_EQ(result.at(1).get_shape(), lhs);
|
||||
}
|
||||
|
||||
TEST(autobroadcast, legacy_broadcast_1elem_tensor)
|
||||
{
|
||||
const Shape lhs{2, 3, 4, 5};
|
||||
const Shape rhs{1, 1, 1};
|
||||
size_t start_match_axis{1};
|
||||
const auto lhs_node = make_shared<op::Parameter>(element::f32, lhs);
|
||||
const auto rhs_node = make_shared<op::Parameter>(element::f32, rhs);
|
||||
|
||||
const OutputVector result =
|
||||
builder::legacy_broadcast_for_binary_operation(lhs_node, rhs_node, start_match_axis);
|
||||
|
||||
EXPECT_EQ(result.at(0).get_shape(), lhs);
|
||||
EXPECT_EQ(result.at(1).get_shape(), lhs);
|
||||
}
|
||||
|
||||
TEST(autobroadcast, legacy_broadcast_1d)
|
||||
{
|
||||
const Shape lhs{2, 3, 4, 5};
|
||||
const Shape rhs{5};
|
||||
size_t start_match_axis{3};
|
||||
const auto lhs_node = make_shared<op::Parameter>(element::f32, lhs);
|
||||
const auto rhs_node = make_shared<op::Parameter>(element::f32, rhs);
|
||||
|
||||
const OutputVector result =
|
||||
builder::legacy_broadcast_for_binary_operation(lhs_node, rhs_node, start_match_axis);
|
||||
|
||||
EXPECT_EQ(result.at(0).get_shape(), lhs);
|
||||
EXPECT_EQ(result.at(1).get_shape(), lhs);
|
||||
}
|
||||
|
||||
TEST(autobroadcast, legacy_broadcast_2d)
|
||||
{
|
||||
const Shape lhs{2, 3, 4, 5};
|
||||
const Shape rhs{4, 5};
|
||||
size_t start_match_axis{2};
|
||||
const auto lhs_node = make_shared<op::Parameter>(element::f32, lhs);
|
||||
const auto rhs_node = make_shared<op::Parameter>(element::f32, rhs);
|
||||
|
||||
const OutputVector result =
|
||||
builder::legacy_broadcast_for_binary_operation(lhs_node, rhs_node, start_match_axis);
|
||||
|
||||
EXPECT_EQ(result.at(0).get_shape(), lhs);
|
||||
EXPECT_EQ(result.at(1).get_shape(), lhs);
|
||||
}
|
||||
|
||||
TEST(autobroadcast, legacy_broadcast_2d_inside)
|
||||
{
|
||||
const Shape lhs{2, 3, 4, 5};
|
||||
const Shape rhs{3, 4};
|
||||
size_t start_match_axis{1};
|
||||
const auto lhs_node = make_shared<op::Parameter>(element::f32, lhs);
|
||||
const auto rhs_node = make_shared<op::Parameter>(element::f32, rhs);
|
||||
|
||||
const OutputVector result =
|
||||
builder::legacy_broadcast_for_binary_operation(lhs_node, rhs_node, start_match_axis);
|
||||
|
||||
EXPECT_EQ(result.at(0).get_shape(), lhs);
|
||||
EXPECT_EQ(result.at(1).get_shape(), lhs);
|
||||
}
|
||||
|
||||
TEST(autobroadcast, legacy_broadcast_1d_left)
|
||||
{
|
||||
const Shape lhs{2, 3, 4, 5};
|
||||
const Shape rhs{2};
|
||||
size_t start_match_axis{0};
|
||||
const auto lhs_node = make_shared<op::Parameter>(element::f32, lhs);
|
||||
const auto rhs_node = make_shared<op::Parameter>(element::f32, rhs);
|
||||
|
||||
const OutputVector result =
|
||||
builder::legacy_broadcast_for_binary_operation(lhs_node, rhs_node, start_match_axis);
|
||||
|
||||
EXPECT_EQ(result.at(0).get_shape(), lhs);
|
||||
EXPECT_EQ(result.at(1).get_shape(), lhs);
|
||||
}
|
||||
|
||||
TEST(autobroadcast, legacy_broadcast_identical)
|
||||
{
|
||||
const Shape lhs{2, 3, 4, 5};
|
||||
size_t start_match_axis{0};
|
||||
const auto lhs_node = make_shared<op::Parameter>(element::f32, lhs);
|
||||
const auto rhs_node = make_shared<op::Parameter>(element::f32, lhs);
|
||||
|
||||
const OutputVector result =
|
||||
builder::legacy_broadcast_for_binary_operation(lhs_node, rhs_node, start_match_axis);
|
||||
|
||||
EXPECT_EQ(result.at(0).get_shape(), lhs);
|
||||
EXPECT_EQ(result.at(1).get_shape(), lhs);
|
||||
}
|
||||
|
||||
TEST(autobroadcast, opset1_legacy_broadcast_scalar)
|
||||
{
|
||||
const Shape lhs{2, 3, 4, 5};
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include <memory>
|
||||
#include <numeric>
|
||||
#include <string>
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#include <fstream>
|
||||
#include <iterator>
|
||||
#include <limits>
|
||||
#include <numeric>
|
||||
#include <sstream>
|
||||
#include <stdexcept>
|
||||
#include <vector>
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#include <fstream>
|
||||
#include <iterator>
|
||||
#include <limits>
|
||||
#include <numeric>
|
||||
#include <sstream>
|
||||
#include <stdexcept>
|
||||
#include <vector>
|
||||
|
||||
@@ -513,36 +513,6 @@ TEST(provenance, empty_group)
|
||||
}
|
||||
}
|
||||
|
||||
TEST(provenance, scaled_quantize_concat_unsigned)
|
||||
{
|
||||
ngraph::Shape shape_a{2, 2};
|
||||
auto A = make_shared<ngraph::op::Parameter>(ngraph::element::u8, shape_a);
|
||||
auto An = make_shared<ngraph::op::Parameter>(ngraph::element::f32, ngraph::Shape{1});
|
||||
auto Ax = make_shared<ngraph::op::Parameter>(ngraph::element::f32, ngraph::Shape{1});
|
||||
A->add_provenance_tag("in0");
|
||||
An->add_provenance_tag("in1");
|
||||
Ax->add_provenance_tag("in2");
|
||||
ngraph::Shape shape_r{2, 2};
|
||||
auto QConcat = ngraph::builder::QuantizedConcatBuilder({A}, 0, {An}, {Ax});
|
||||
auto f = make_shared<ngraph::Function>(ngraph::OutputVector{QConcat},
|
||||
ngraph::ParameterVector{A, An, Ax});
|
||||
QConcat->add_provenance_tag("hello");
|
||||
auto check_if_result = [](shared_ptr<Node> n) {
|
||||
// Pointer will cast to nullptr if this node is not a Result
|
||||
auto ng_node = dynamic_pointer_cast<op::Result>(n);
|
||||
bool is_result = (ng_node != nullptr);
|
||||
return is_result;
|
||||
};
|
||||
|
||||
for (auto n : f->get_ordered_ops())
|
||||
{
|
||||
if (!check_if_result(n))
|
||||
{
|
||||
ASSERT_EQ(n->get_provenance_tags().size(), 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST(provenance, opset1_upgrade_pass_topk)
|
||||
{
|
||||
test::ProvenanceEnabler provenance_enabler;
|
||||
|
||||
Reference in New Issue
Block a user