[IE TESTS] GatherTree single layer test has been created. (#2006)
* [IE TESTS] GatherTree op ref function has been created. * [IE TESTS] Added GatherTree single layer test * [IE TESTS] Fixed code styles. * [IE TESTS] GatherTree test FP32 precion was enabled. * [IE TESTS] Refactoring of Builder::makeConstatn procedure The refactoring is aimed at managing the range of random data for the constants initialization procedure. * [IE TESTS] GatherTree test was extended with constants * [IE TESTS] GatherTree ref rewritten to non-templated function. * [IE TESTS] GatherTree test inp shape indx enum removed. * Revert "[IE TESTS] Refactoring of Builder::makeConstatn procedure" This reverts commit 2648172e00ccca266d39e8775b890b8a8395f57c. * [IE TESTS] makeConstant was augmented with random data range parameters. * [IE TESTS] GatherTree test was rewritten using makeConstant function. * [IE TESTS] GaterTree test call templated makeConstant * [IE TESTS] GaterTree test code style fix
This commit is contained in:
parent
52ebe68cc7
commit
2c7f06e08f
@ -0,0 +1,34 @@
|
||||
// Copyright (C) 2019 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "single_layer_tests/gather_tree.hpp"
|
||||
#include "common_test_utils/test_constants.hpp"
|
||||
|
||||
using namespace LayerTestsDefinitions;
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<InferenceEngine::Precision> netPrecisions = {
|
||||
InferenceEngine::Precision::FP32,
|
||||
InferenceEngine::Precision::I32
|
||||
};
|
||||
|
||||
const std::vector<std::vector<size_t>> inputShapes = { {5, 1, 10}, {1, 1, 10}, {20, 1, 10}, {20, 20, 10} };
|
||||
|
||||
const std::vector<ngraph::helpers::InputLayerType> secondaryInputTypes = {
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
ngraph::helpers::InputLayerType::PARAMETER
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(Basic_smoke, GatherTreeLayerTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(inputShapes),
|
||||
::testing::ValuesIn(secondaryInputTypes),
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
GatherTreeLayerTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
@ -0,0 +1,34 @@
|
||||
// Copyright (C) 2019 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <tuple>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <memory>
|
||||
|
||||
#include "functional_test_utils/layer_test_utils.hpp"
|
||||
#include "ngraph_functions/builders.hpp"
|
||||
#include "ngraph_functions/utils/ngraph_helpers.hpp"
|
||||
|
||||
namespace LayerTestsDefinitions {
|
||||
|
||||
using GatherTreeParamsTuple = typename std::tuple<
|
||||
std::vector<size_t>, // Input tensors shape
|
||||
ngraph::helpers::InputLayerType, // Secondary input type
|
||||
InferenceEngine::Precision, // Network precision
|
||||
std::string>; // Device name
|
||||
|
||||
class GatherTreeLayerTest : public testing::WithParamInterface<GatherTreeParamsTuple>,
|
||||
virtual public LayerTestsUtils::LayerTestsCommon {
|
||||
public:
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<GatherTreeParamsTuple> &obj);
|
||||
InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override;
|
||||
|
||||
protected:
|
||||
void SetUp() override;
|
||||
};
|
||||
|
||||
} // namespace LayerTestsDefinitions
|
@ -0,0 +1,87 @@
|
||||
// Copyright (C) 2019 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <tuple>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <memory>
|
||||
|
||||
#include "common_test_utils/common_utils.hpp"
|
||||
#include "functional_test_utils/precision_utils.hpp"
|
||||
#include "functional_test_utils/skip_tests_config.hpp"
|
||||
|
||||
#include "single_layer_tests/gather_tree.hpp"
|
||||
|
||||
namespace LayerTestsDefinitions {
|
||||
std::string GatherTreeLayerTest::getTestCaseName(const testing::TestParamInfo<GatherTreeParamsTuple> &obj) {
|
||||
std::vector<size_t> inputShape;
|
||||
InferenceEngine::Precision netPrecision;
|
||||
ngraph::helpers::InputLayerType secondaryInputType;
|
||||
std::string targetName;
|
||||
|
||||
std::tie(inputShape, secondaryInputType, netPrecision, targetName) = obj.param;
|
||||
|
||||
std::ostringstream result;
|
||||
result << "IS=" << CommonTestUtils::vec2str(inputShape) << "_";
|
||||
result << "secondaryInputType=" << secondaryInputType << "_";
|
||||
result << "netPRC=" << netPrecision.name() << "_";
|
||||
result << "targetDevice=" << targetName;
|
||||
return result.str();
|
||||
}
|
||||
|
||||
void GatherTreeLayerTest::SetUp() {
|
||||
std::vector<size_t> inputShape;
|
||||
InferenceEngine::Precision netPrecision;
|
||||
ngraph::helpers::InputLayerType secondaryInputType;
|
||||
|
||||
std::tie(inputShape, secondaryInputType, netPrecision, targetDevice) = GetParam();
|
||||
|
||||
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
|
||||
|
||||
std::shared_ptr<ngraph::Node> inp2;
|
||||
std::shared_ptr<ngraph::Node> inp3;
|
||||
std::shared_ptr<ngraph::Node> inp4;
|
||||
|
||||
auto paramsIn = ngraph::builder::makeParams(ngPrc, { inputShape });
|
||||
if (ngraph::helpers::InputLayerType::PARAMETER == secondaryInputType) {
|
||||
auto paramsSecond = ngraph::builder::makeParams(ngPrc, { inputShape, {inputShape.at(1)}, {}});
|
||||
paramsIn.insert(paramsIn.end(), paramsSecond.begin(), paramsSecond.end());
|
||||
|
||||
inp2 = paramsIn.at(1);
|
||||
inp3 = paramsIn.at(2);
|
||||
inp4 = paramsIn.at(3);
|
||||
} else if (ngraph::helpers::InputLayerType::CONSTANT == secondaryInputType) {
|
||||
auto maxBeamIndex = inputShape.at(2) - 1;
|
||||
|
||||
inp2 = ngraph::builder::makeConstant<float>(ngPrc, inputShape, {}, true, maxBeamIndex);
|
||||
inp3 = ngraph::builder::makeConstant<float>(ngPrc, {inputShape.at(1)}, {}, true, maxBeamIndex);
|
||||
inp4 = ngraph::builder::makeConstant<float>(ngPrc, {}, {}, true, maxBeamIndex);
|
||||
} else {
|
||||
throw std::runtime_error("Unsupported inputType");
|
||||
}
|
||||
|
||||
auto operationResult = std::make_shared<ngraph::opset4::GatherTree>(paramsIn.front(), inp2, inp3, inp4);
|
||||
|
||||
ngraph::ResultVector results{std::make_shared<ngraph::opset4::Result>(operationResult)};
|
||||
function = std::make_shared<ngraph::Function>(results, paramsIn, "GatherTree");
|
||||
}
|
||||
|
||||
InferenceEngine::Blob::Ptr GatherTreeLayerTest::GenerateInput(const InferenceEngine::InputInfo &info) const {
|
||||
auto& shape = function->get_parameters()[0]->get_output_shape(0);
|
||||
auto& vecDims = info.getTensorDesc().getDims();
|
||||
|
||||
auto maxBeamIndx = shape.at(2) - 1;
|
||||
|
||||
if (vecDims.size() == 1 || vecDims.size() == 0) { //max_seq_len vector || end_token
|
||||
return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), maxBeamIndx, maxBeamIndx / 2);
|
||||
}
|
||||
|
||||
return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), maxBeamIndx);
|
||||
}
|
||||
|
||||
TEST_P(GatherTreeLayerTest, CompareWithRefs) {
|
||||
Run();
|
||||
};
|
||||
|
||||
} // namespace LayerTestsDefinitions
|
@ -24,27 +24,28 @@ makeParams(const element::Type &type, const std::vector<std::pair<std::string, s
|
||||
|
||||
template<typename T>
|
||||
std::shared_ptr<Node> makeConstant(const element::Type &type, const std::vector<size_t> &shape,
|
||||
const std::vector<T> &data, bool random = false) {
|
||||
const std::vector<T> &data, bool random = false,
|
||||
uint32_t upTo = 10, uint32_t startFrom = 1) {
|
||||
std::shared_ptr<ngraph::Node> weightsNode;
|
||||
|
||||
#define makeNode(TYPE) \
|
||||
case TYPE: \
|
||||
weightsNode = std::make_shared<ngraph::opset1::Constant>( \
|
||||
type, shape, \
|
||||
random ? NGraphFunctions::Utils::generateVector<TYPE>(ngraph::shape_size(shape)) : \
|
||||
random ? NGraphFunctions::Utils::generateVector<TYPE>(ngraph::shape_size(shape), upTo, startFrom) : \
|
||||
NGraphFunctions::Utils::castVector<T, ngraph::helpers::nGraphTypesTrait<TYPE>::value_type >(data)); \
|
||||
break;
|
||||
switch (type) {
|
||||
case ngraph::element::Type_t::bf16:
|
||||
weightsNode = std::make_shared<ngraph::opset1::Constant>(
|
||||
type, shape,
|
||||
random ? NGraphFunctions::Utils::generateBF16Vector(ngraph::shape_size(shape)) :
|
||||
random ? NGraphFunctions::Utils::generateBF16Vector(ngraph::shape_size(shape), upTo, startFrom) :
|
||||
NGraphFunctions::Utils::castVector<T, ngraph::bfloat16>(data));
|
||||
break;
|
||||
case ngraph::element::Type_t::f16:
|
||||
weightsNode = std::make_shared<ngraph::opset1::Constant>(
|
||||
type, shape,
|
||||
random ? NGraphFunctions::Utils::generateF16Vector(ngraph::shape_size(shape)) :
|
||||
random ? NGraphFunctions::Utils::generateF16Vector(ngraph::shape_size(shape), upTo, startFrom) :
|
||||
NGraphFunctions::Utils::castVector<T, ngraph::float16>(data));
|
||||
break;
|
||||
makeNode(ngraph::element::Type_t::f32);
|
||||
|
@ -17,13 +17,13 @@ namespace Utils {
|
||||
|
||||
template<ngraph::element::Type_t dType>
|
||||
std::vector<typename ngraph::helpers::nGraphTypesTrait<dType>::value_type> inline
|
||||
generateVector(size_t vec_len) {
|
||||
generateVector(size_t vec_len, uint32_t upTo = 10, uint32_t startFrom = 1) {
|
||||
std::vector<typename ngraph::helpers::nGraphTypesTrait<dType>::value_type> res;
|
||||
|
||||
std::mt19937 gen(
|
||||
static_cast<unsigned long>(std::chrono::high_resolution_clock::now().time_since_epoch().count()));
|
||||
// chose values between this range to avoid type overrun (e.g. in case of I8 precision)
|
||||
std::uniform_int_distribution<unsigned long> dist(1, 10);
|
||||
std::uniform_int_distribution<unsigned long> dist(startFrom, upTo);
|
||||
|
||||
for (int i = 0; i < vec_len; i++) {
|
||||
res.push_back(
|
||||
@ -32,13 +32,13 @@ generateVector(size_t vec_len) {
|
||||
return res;
|
||||
}
|
||||
|
||||
std::vector<ngraph::float16> inline generateF16Vector(size_t vec_len) {
|
||||
std::vector<ngraph::float16> inline generateF16Vector(size_t vec_len, uint32_t upTo = 10, uint32_t startFrom = 1) {
|
||||
std::vector<ngraph::float16> res;
|
||||
|
||||
std::mt19937 gen(
|
||||
static_cast<unsigned long>(std::chrono::high_resolution_clock::now().time_since_epoch().count()));
|
||||
// chose values between this range to avoid type overrun (e.g. in case of I8 precision)
|
||||
std::uniform_int_distribution<unsigned long> dist(1, 10);
|
||||
std::uniform_int_distribution<unsigned long> dist(startFrom, upTo);
|
||||
|
||||
for (int i = 0; i < vec_len; i++) {
|
||||
res.emplace_back(ngraph::float16(static_cast<float>(dist(gen))));
|
||||
@ -46,13 +46,13 @@ std::vector<ngraph::float16> inline generateF16Vector(size_t vec_len) {
|
||||
return res;
|
||||
}
|
||||
|
||||
std::vector<ngraph::bfloat16> inline generateBF16Vector(size_t vec_len) {
|
||||
std::vector<ngraph::bfloat16> inline generateBF16Vector(size_t vec_len, uint32_t upTo = 10, uint32_t startFrom = 1) {
|
||||
std::vector<ngraph::bfloat16> res;
|
||||
|
||||
std::mt19937 gen(
|
||||
static_cast<unsigned long>(std::chrono::high_resolution_clock::now().time_since_epoch().count()));
|
||||
// chose values between this range to avoid type overrun (e.g. in case of I8 precision)
|
||||
std::uniform_int_distribution<unsigned long> dist(1, 10);
|
||||
std::uniform_int_distribution<unsigned long> dist(startFrom, upTo);
|
||||
|
||||
for (int i = 0; i < vec_len; i++) {
|
||||
res.emplace_back(ngraph::bfloat16(static_cast<float>(dist(gen))));
|
||||
|
@ -0,0 +1,39 @@
|
||||
//*****************************************************************************
|
||||
// Copyright 2017-2020 Intel Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//*****************************************************************************
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "ngraph/type/element_type.hpp"
|
||||
|
||||
namespace ngraph
|
||||
{
|
||||
namespace runtime
|
||||
{
|
||||
namespace reference
|
||||
{
|
||||
void gather_tree(const char* step_ids,
|
||||
const char* parent_ids,
|
||||
const char* max_seq_len,
|
||||
const char* end_token,
|
||||
char* out,
|
||||
const Shape& step_ids_shape,
|
||||
const Shape& parent_ids_shape,
|
||||
const Shape& max_seq_len_shape,
|
||||
const Shape& end_token_shape,
|
||||
const element::Type& type);
|
||||
}
|
||||
}
|
||||
}
|
145
ngraph/core/reference/src/runtime/reference/gather_tree.cpp
Normal file
145
ngraph/core/reference/src/runtime/reference/gather_tree.cpp
Normal file
@ -0,0 +1,145 @@
|
||||
//*****************************************************************************
|
||||
// Copyright 2017-2020 Intel Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//*****************************************************************************
|
||||
|
||||
#include <cmath>
|
||||
#include <numeric>
|
||||
#include <stdio.h>
|
||||
|
||||
#include "ngraph/check.hpp"
|
||||
#include "ngraph/coordinate_transform.hpp"
|
||||
#include "ngraph/runtime/reference/gather_tree.hpp"
|
||||
|
||||
using namespace ngraph;
|
||||
|
||||
static size_t _asIndex(const char* source, const element::Type& element_type)
|
||||
{
|
||||
// According to the GatherTree op specification only I32 and FP32 precisions are supported.
|
||||
switch (element_type)
|
||||
{
|
||||
case element::Type_t::f32:
|
||||
{
|
||||
float tmpBuff = 0.f;
|
||||
memcpy(&tmpBuff, source, sizeof(float));
|
||||
return tmpBuff;
|
||||
}
|
||||
case element::Type_t::i32:
|
||||
{
|
||||
int32_t tmpBuff = 0;
|
||||
memcpy(&tmpBuff, source, sizeof(int32_t));
|
||||
return tmpBuff;
|
||||
}
|
||||
default:
|
||||
{
|
||||
throw ngraph_error(std::string("Unsupported input data type: ") +
|
||||
element_type.get_type_name());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This is an implementation of the algorithm from the tensorflow 1.5 sources.
|
||||
void runtime::reference::gather_tree(const char* step_ids,
|
||||
const char* parent_ids,
|
||||
const char* max_seq_len,
|
||||
const char* end_token,
|
||||
char* out,
|
||||
const Shape& step_ids_shape,
|
||||
const Shape& parent_ids_shape,
|
||||
const Shape& max_seq_len_shape,
|
||||
const Shape& end_token_shape,
|
||||
const element::Type& element_type)
|
||||
{
|
||||
if (step_ids_shape != parent_ids_shape)
|
||||
{
|
||||
throw ngraph_error("step_ids shape and parent_ids shape must be the same");
|
||||
}
|
||||
if (step_ids_shape.size() != 3)
|
||||
{
|
||||
throw ngraph_error("step_ids must be a 3-tensor");
|
||||
}
|
||||
if (!is_vector(max_seq_len_shape))
|
||||
{
|
||||
throw ngraph_error("max_seq_len must be a vector");
|
||||
}
|
||||
if (!is_scalar(end_token_shape))
|
||||
{
|
||||
throw ngraph_error("end_token must be a scalar");
|
||||
}
|
||||
|
||||
const size_t max_time = step_ids_shape.at(0);
|
||||
const size_t batch_size = step_ids_shape.at(1);
|
||||
const size_t beam_width = step_ids_shape.at(2);
|
||||
|
||||
const size_t elem_size = element_type.size();
|
||||
|
||||
if (max_seq_len_shape.front() != batch_size)
|
||||
{
|
||||
throw ngraph_error("max_seq_len must have size of BATCH_SIZE");
|
||||
}
|
||||
|
||||
ngraph::CoordinateTransform cordinate_transform(step_ids_shape);
|
||||
|
||||
for (const auto& coord : cordinate_transform)
|
||||
{
|
||||
memcpy(out + cordinate_transform.index(coord) * elem_size, end_token, elem_size);
|
||||
}
|
||||
|
||||
for (size_t batch = 0; batch < batch_size; ++batch)
|
||||
{
|
||||
for (size_t beam = 0; beam < beam_width; ++beam)
|
||||
{
|
||||
const size_t max_seq_in_beam =
|
||||
std::min(max_time, _asIndex(max_seq_len + batch * elem_size, element_type));
|
||||
|
||||
if (max_seq_in_beam == 0)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
auto offset = cordinate_transform.index({max_seq_in_beam - 1, batch, beam}) * elem_size;
|
||||
|
||||
memcpy(out + offset, step_ids + offset, elem_size);
|
||||
|
||||
size_t parent = _asIndex(parent_ids + offset, element_type);
|
||||
|
||||
for (size_t level = max_seq_in_beam - 1; level-- > 0;)
|
||||
{
|
||||
memcpy(out + cordinate_transform.index({level, batch, beam}) * elem_size,
|
||||
step_ids + cordinate_transform.index({level, batch, parent}) * elem_size,
|
||||
elem_size);
|
||||
|
||||
parent = _asIndex(parent_ids +
|
||||
cordinate_transform.index({level, batch, parent}) * elem_size,
|
||||
element_type);
|
||||
}
|
||||
|
||||
bool finished = false;
|
||||
for (size_t time = 0; time < max_seq_in_beam; ++time)
|
||||
{
|
||||
if (finished)
|
||||
{
|
||||
memcpy(out + cordinate_transform.index({time, batch, beam}) * elem_size,
|
||||
end_token,
|
||||
elem_size);
|
||||
}
|
||||
else if (_asIndex(out + cordinate_transform.index({time, batch, beam}) * elem_size,
|
||||
element_type) == _asIndex(end_token, element_type))
|
||||
{
|
||||
finished = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -59,6 +59,8 @@
|
||||
#include "ngraph/runtime/reference/floor.hpp"
|
||||
#include "ngraph/runtime/reference/gather.hpp"
|
||||
#include "ngraph/runtime/reference/gather_nd.hpp"
|
||||
#include "ngraph/runtime/reference/gather_tree.hpp"
|
||||
#include "ngraph/runtime/reference/gather_tree.hpp"
|
||||
#include "ngraph/runtime/reference/gru_cell.hpp"
|
||||
#include "ngraph/runtime/reference/log.hpp"
|
||||
#include "ngraph/runtime/reference/lrn.hpp"
|
||||
@ -1258,6 +1260,20 @@ protected:
|
||||
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::GatherTree_v1:
|
||||
{
|
||||
reference::gather_tree(args[0]->get_data_ptr<const char>(),
|
||||
args[1]->get_data_ptr<const char>(),
|
||||
args[2]->get_data_ptr<const char>(),
|
||||
args[3]->get_data_ptr<const char>(),
|
||||
out[0]->get_data_ptr<char>(),
|
||||
node.get_input_shape(0),
|
||||
node.get_input_shape(1),
|
||||
node.get_input_shape(2),
|
||||
node.get_input_shape(3),
|
||||
args[1]->get_element_type());
|
||||
break;
|
||||
}
|
||||
|
||||
// Fused Ops are not supported in interpreter. They need to be decomposed before execution
|
||||
case OP_TYPEID::DepthToSpace:
|
||||
|
@ -29,6 +29,7 @@ NGRAPH_OP(LogicalAnd, op::v1)
|
||||
NGRAPH_OP(LogicalOr, op::v1)
|
||||
NGRAPH_OP(LogicalXor, op::v1)
|
||||
NGRAPH_OP(LogicalNot, op::v1)
|
||||
NGRAPH_OP(GatherTree, op::v1)
|
||||
#undef ID_SUFFIX
|
||||
|
||||
#define ID_SUFFIX(NAME) NAME##_v3
|
||||
|
Loading…
Reference in New Issue
Block a user