[GNA] 4D concat align pass (#2970)
* [GNA] Fix RemovePermutationsNHWCToNCHWPass in cases that permute input has many outData * style * [GNA] linux test fail fix
This commit is contained in:
parent
9070cb865d
commit
9f54989824
@ -6,7 +6,7 @@
|
|||||||
|
|
||||||
#include <legacy/graph_tools.hpp>
|
#include <legacy/graph_tools.hpp>
|
||||||
#include "gna_plugin_log.hpp"
|
#include "gna_plugin_log.hpp"
|
||||||
|
#include "frontend/quantized_layer_params.hpp"
|
||||||
#include <utility>
|
#include <utility>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
@ -441,7 +441,45 @@ inline void CNNNetSwapLayers(InferenceEngine::CNNLayerPtr lhs,
|
|||||||
lhs->outData.front()->setDims(rhs->outData.front()->getDims());
|
lhs->outData.front()->setDims(rhs->outData.front()->getDims());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief changes the Tensor Desctiption if data by created a new one with correct description and replacing original one
|
||||||
|
*/
|
||||||
|
inline DataPtr CNNReplaceDataWithChangedTensorDescription(DataPtr old_data, TensorDesc& new_td) {
|
||||||
|
auto new_dataPtr = std::make_shared<Data>(old_data->getName() + "_reshaped", new_td);
|
||||||
|
getInputTo(new_dataPtr) = getInputTo(old_data);
|
||||||
|
auto creatorLayer = getCreatorLayer(old_data).lock();
|
||||||
|
getCreatorLayer(new_dataPtr) = creatorLayer;
|
||||||
|
size_t idx = -1;
|
||||||
|
for (size_t i=0; i < creatorLayer->outData.size(); i++) {
|
||||||
|
if (areEqualDatas(old_data, creatorLayer->outData[i])) {
|
||||||
|
idx = i;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (idx == -1) THROW_GNA_EXCEPTION << "No idx for data was found";
|
||||||
|
|
||||||
|
creatorLayer->outData[idx] = new_dataPtr;
|
||||||
|
auto input_to = getInputTo(new_dataPtr);
|
||||||
|
for (auto& input : input_to) {
|
||||||
|
for (auto& input_idx : CNNLayerFindInsDataIdxes(old_data, input.second)) {
|
||||||
|
input.second->insData[input_idx] = new_dataPtr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return new_dataPtr;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Creates a Reshape with given name and tensor description
|
||||||
|
*/
|
||||||
|
inline CNNLayerPtr CNNNetworkCreateReshape(TensorDesc td, std::string name, bool quantized) {
|
||||||
|
auto reshape = std::make_shared<ReshapeLayer>(LayerParams({name, "reshape", Precision::FP32}));
|
||||||
|
auto reshapeLayerWithQuant = quantized ? InferenceEngine::injectData<GNAPluginNS::QuantizedLayerParams>(reshape) : reshape;
|
||||||
|
auto dataPtr = std::make_shared<Data>(name + "_data", td);
|
||||||
|
getCreatorLayer(dataPtr) = reshapeLayerWithQuant;
|
||||||
|
reshapeLayerWithQuant->outData.push_back(dataPtr);
|
||||||
|
|
||||||
|
return reshapeLayerWithQuant;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @@brief insertLayer between given layers
|
* @@brief insertLayer between given layers
|
||||||
@ -594,6 +632,7 @@ std::vector<std::pair<CNNLayerPtr, int> > CNNNetGetPrevLayersSkip(CNNLayerPtr or
|
|||||||
* @brief remove given layer from topology, currently only layers with one input data and one output data supported
|
* @brief remove given layer from topology, currently only layers with one input data and one output data supported
|
||||||
*/
|
*/
|
||||||
inline void CNNNetworkRemoveLayer(CNNLayerPtr layer, bool checkDims = true) {
|
inline void CNNNetworkRemoveLayer(CNNLayerPtr layer, bool checkDims = true) {
|
||||||
|
gnalog() << "Removing " << layer->name << "layer";
|
||||||
if (!layer) {
|
if (!layer) {
|
||||||
THROW_IE_EXCEPTION << "Cannot remove layer pointed to NULL";
|
THROW_IE_EXCEPTION << "Cannot remove layer pointed to NULL";
|
||||||
}
|
}
|
||||||
|
@ -408,6 +408,7 @@ void GNAPlugin::LoadNetwork(ICNNNetwork & _network) {
|
|||||||
passes->registerPass<EltwiseSplitOverChannelsPass>();
|
passes->registerPass<EltwiseSplitOverChannelsPass>();
|
||||||
passes->registerPass<InsertSplitAligningFilterPass>();
|
passes->registerPass<InsertSplitAligningFilterPass>();
|
||||||
|
|
||||||
|
passes->registerPass<Concat4Dto2DPass>();
|
||||||
passes->registerPass<InsertConcatAligningFilterPass>();
|
passes->registerPass<InsertConcatAligningFilterPass>();
|
||||||
passes->registerPass<ReorderConcatInputsPass>();
|
passes->registerPass<ReorderConcatInputsPass>();
|
||||||
if (policy.PermutePolicy != Policy::Permute::DISABLED) {
|
if (policy.PermutePolicy != Policy::Permute::DISABLED) {
|
||||||
|
@ -34,6 +34,11 @@ class Policy {
|
|||||||
AUTO_PERMUTE
|
AUTO_PERMUTE
|
||||||
} PermutePolicy = Permute::DISABLED;
|
} PermutePolicy = Permute::DISABLED;
|
||||||
|
|
||||||
|
enum class Concat4Dto2DConversion {
|
||||||
|
DISABLED,
|
||||||
|
ENABLED
|
||||||
|
} ConcatConversionPolicy = Concat4Dto2DConversion::ENABLED;
|
||||||
|
|
||||||
enum class ConcatAlignment {
|
enum class ConcatAlignment {
|
||||||
DISABLED,
|
DISABLED,
|
||||||
DISABLED_FOR_FP32,
|
DISABLED_FOR_FP32,
|
||||||
|
@ -634,6 +634,10 @@ void RemovePermutationsNHWCToNCHWPass::run() {
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (l->outData.size() != 1) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
if (getInputTo(l->outData.front()).empty()) {
|
if (getInputTo(l->outData.front()).empty()) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@ -661,7 +665,18 @@ void RemovePermutationsNHWCToNCHWPass::run() {
|
|||||||
next->input()->setDims(toRemove->input()->getDims());
|
next->input()->setDims(toRemove->input()->getDims());
|
||||||
next->input()->setLayout(Layout::NHWC);
|
next->input()->setLayout(Layout::NHWC);
|
||||||
auto layerBeforePermute = CNNNetPrevLayer(toRemove);
|
auto layerBeforePermute = CNNNetPrevLayer(toRemove);
|
||||||
layerBeforePermute->outData[0]->setLayout(Layout::NHWC);
|
|
||||||
|
DataPtr output = nullptr;
|
||||||
|
for (auto before_output : layerBeforePermute->outData) {
|
||||||
|
if (areEqualDatas(toRemove->input(), before_output)) {
|
||||||
|
output = before_output;
|
||||||
|
output->setLayout(Layout::NHWC);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (output == nullptr) {
|
||||||
|
THROW_GNA_EXCEPTION << "Could not find correct data link between " << toRemove->name << " and " << layerBeforePermute->name;
|
||||||
|
}
|
||||||
|
|
||||||
auto* convolution = dynamic_cast<ConvolutionLayer*>(next.get());
|
auto* convolution = dynamic_cast<ConvolutionLayer*>(next.get());
|
||||||
if (!convolution) {
|
if (!convolution) {
|
||||||
@ -808,6 +823,85 @@ void InsertCopyLayerPass::run() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Concat4Dto2DPass::run() {
|
||||||
|
// Find 4D concat layers that will have to use ConcatAlignFilters and can be substituted by 2D concat
|
||||||
|
// for example if 4D concat have unaligned inputs then ConcatAlignFilters need to be used if sizes before
|
||||||
|
// axis are all ones then concat can be changed to 2D for example, lets say all unputs have same shape equal to:
|
||||||
|
// 1, 1, 5, 3 then for axis 0, 1, 2 the change will be made and inputs will be reshaped to 1, 15,
|
||||||
|
// but for shape 2, 1, 5, 3 only axis 0 is valid and inputs will reshape to 1, 30
|
||||||
|
auto quantized = InferenceEngine::getInjectedData<QuantizedLayerParams>(pLayers->front());
|
||||||
|
|
||||||
|
if (getPassManager()->getPolicy().ConcatConversionPolicy == Policy::Concat4Dto2DConversion::DISABLED) return;
|
||||||
|
if (getPassManager()->getPolicy().ConcatAlignmentPolicy == Policy::ConcatAlignment::DISABLED) return;
|
||||||
|
if (getPassManager()->getPolicy().ConcatAlignmentPolicy == Policy::ConcatAlignment::DISABLED_FOR_FP32 && !quantized) return;
|
||||||
|
|
||||||
|
for (auto & l : *pLayers) {
|
||||||
|
LayerInfo info(l);
|
||||||
|
auto concatLayer = info.as<ConcatLayer*>();
|
||||||
|
if (!concatLayer) continue;
|
||||||
|
if (concatLayer->insData.size() < 1) continue;
|
||||||
|
|
||||||
|
auto dims_size = concatLayer->insData[0].lock()->getDims().size();
|
||||||
|
if (dims_size > 2) {
|
||||||
|
auto axis = concatLayer->_axis;
|
||||||
|
bool skip_layer = false;
|
||||||
|
for (int i = 0; i < axis; i++) {
|
||||||
|
if (concatLayer->insData[0].lock()->getDims()[i] != 1) skip_layer = true;
|
||||||
|
}
|
||||||
|
if (skip_layer) continue;
|
||||||
|
skip_layer = true;
|
||||||
|
std::vector<size_t> total_sizes;
|
||||||
|
for (auto& input : concatLayer->insData) {
|
||||||
|
auto input_dims = input.lock()->getDims();
|
||||||
|
total_sizes.push_back(std::accumulate(input_dims.begin(), input_dims.end(), size_t(1), std::multiplies<size_t>()));
|
||||||
|
if (total_sizes.back() % 64 != 0) skip_layer = false;
|
||||||
|
}
|
||||||
|
if (skip_layer) continue;
|
||||||
|
|
||||||
|
for (size_t input_idx = 0; input_idx != concatLayer->insData.size(); input_idx++) {
|
||||||
|
auto getLayerByIndex = [&concatLayer](int idx) {
|
||||||
|
auto input = concatLayer->insData[idx];
|
||||||
|
auto lockedInput = input.lock();
|
||||||
|
if (!lockedInput) {
|
||||||
|
THROW_GNA_EXCEPTION << "cannot get insdata : "<< idx << " for layer: " << concatLayer->name;
|
||||||
|
}
|
||||||
|
return lockedInput;
|
||||||
|
};
|
||||||
|
|
||||||
|
auto concatInput = getLayerByIndex(input_idx);
|
||||||
|
|
||||||
|
auto tensor = InferenceEngine::TensorDesc(concatInput->getTensorDesc());
|
||||||
|
tensor.reshape(SizeVector({1, total_sizes[input_idx]}), Layout::NC);
|
||||||
|
auto reshapeName = l->name + "_input_"+ std::to_string(input_idx) +"_reshape";
|
||||||
|
auto reshape = CNNNetworkCreateReshape(tensor, reshapeName, quantized);
|
||||||
|
|
||||||
|
CNNNetworkInsertLayer(getCreatorLayer(concatInput).lock(), l, reshape);
|
||||||
|
gnalog() << "\tInserted " << reshapeName << " between " << getCreatorLayer(concatInput).lock()->name << " and " << l->name << std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (auto output_idx = 0; output_idx != concatLayer->outData.size(); output_idx++) {
|
||||||
|
auto output = concatLayer->outData[output_idx];
|
||||||
|
auto output_tensor_copy = TensorDesc(output->getTensorDesc());
|
||||||
|
|
||||||
|
auto dims = output_tensor_copy.getDims();
|
||||||
|
auto total_size = std::accumulate(dims.begin(), dims.end(), size_t(1), std::multiplies<size_t>());
|
||||||
|
|
||||||
|
auto new_tensor = output->getTensorDesc();
|
||||||
|
new_tensor.reshape(SizeVector({1, total_size}), Layout::NC);
|
||||||
|
|
||||||
|
auto new_output = CNNReplaceDataWithChangedTensorDescription(output, new_tensor);
|
||||||
|
gnalog() << "\tChanged " << output->getName() << " dims to 2D" << std::endl;
|
||||||
|
|
||||||
|
auto reshapeName = l->name + "_output_"+ std::to_string(output_idx) +"_reshape";
|
||||||
|
|
||||||
|
auto reshape = CNNNetworkCreateReshape(output_tensor_copy, reshapeName, quantized);
|
||||||
|
CNNNetworkInsertLayer(l, nullptr, reshape, output_idx);
|
||||||
|
gnalog() << "\tInserted " << reshapeName << " after " << l->name << std::endl;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void InsertConcatAligningFilterPass::run() {
|
void InsertConcatAligningFilterPass::run() {
|
||||||
auto quantized = InferenceEngine::getInjectedData<QuantizedLayerParams>(pLayers->front());
|
auto quantized = InferenceEngine::getInjectedData<QuantizedLayerParams>(pLayers->front());
|
||||||
|
|
||||||
|
@ -141,6 +141,11 @@ DECL_PASS(InsertCopyLayer);
|
|||||||
*/
|
*/
|
||||||
DECL_PASS(InsertSplitAligningFilter);
|
DECL_PASS(InsertSplitAligningFilter);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Pass that changes 4D concat to 2D concat in cases that would have to use ConcatAlignFilter
|
||||||
|
*/
|
||||||
|
DECL_PASS(Concat4Dto2D);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief concat-aligning filter layer insertion required in cases when concat inputs size are not 64-aligned
|
* @brief concat-aligning filter layer insertion required in cases when concat inputs size are not 64-aligned
|
||||||
*/
|
*/
|
||||||
|
@ -0,0 +1,34 @@
|
|||||||
|
// Copyright (C) 2019 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "single_layer_tests/concat_4D.hpp"
|
||||||
|
#include "common_test_utils/test_constants.hpp"
|
||||||
|
|
||||||
|
using namespace LayerTestsDefinitions;
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
std::vector<std::vector<size_t>> inShapes = {
|
||||||
|
{1, 1, 33, 16},
|
||||||
|
{1, 1, 65, 16},
|
||||||
|
};
|
||||||
|
|
||||||
|
std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32,
|
||||||
|
InferenceEngine::Precision::FP16};
|
||||||
|
|
||||||
|
std::map<std::string, std::string> additional_config = {
|
||||||
|
{"GNA_COMPACT_MODE", "NO"},
|
||||||
|
{"GNA_DEVICE_MODE", "GNA_SW_EXACT"},
|
||||||
|
{"GNA_SCALE_FACTOR_0", "2000.0"},
|
||||||
|
};
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_CASE_P(smoke_Concat4D_Basic, Concat4DLayerTest,
|
||||||
|
::testing::Combine(
|
||||||
|
::testing::ValuesIn(inShapes),
|
||||||
|
::testing::ValuesIn(netPrecisions),
|
||||||
|
::testing::Values(CommonTestUtils::DEVICE_GNA),
|
||||||
|
::testing::Values(additional_config)),
|
||||||
|
Concat4DLayerTest::getTestCaseName);
|
||||||
|
} // namespace
|
@ -0,0 +1,32 @@
|
|||||||
|
// Copyright (C) 2019 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <tuple>
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
#include "functional_test_utils/layer_test_utils.hpp"
|
||||||
|
#include "ngraph_functions/builders.hpp"
|
||||||
|
#include "ngraph_functions/utils/ngraph_helpers.hpp"
|
||||||
|
|
||||||
|
namespace LayerTestsDefinitions {
|
||||||
|
using concat4DParamsTuple = typename std::tuple<
|
||||||
|
std::vector<size_t>, // Inputs shape
|
||||||
|
InferenceEngine::Precision, // Network precision
|
||||||
|
std::string, // Device name
|
||||||
|
std::map<std::string, std::string> // Configuration
|
||||||
|
>;
|
||||||
|
|
||||||
|
class Concat4DLayerTest : public testing::WithParamInterface<concat4DParamsTuple>,
|
||||||
|
virtual public LayerTestsUtils::LayerTestsCommon {
|
||||||
|
public:
|
||||||
|
static std::string getTestCaseName(const testing::TestParamInfo<concat4DParamsTuple> &obj);
|
||||||
|
protected:
|
||||||
|
void SetUp() override;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace LayerTestsDefinitions
|
@ -0,0 +1,70 @@
|
|||||||
|
// Copyright (C) 2019 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
#include <tuple>
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
#include <memory>
|
||||||
|
#include <functional>
|
||||||
|
|
||||||
|
#include "ie_core.hpp"
|
||||||
|
|
||||||
|
#include "common_test_utils/common_utils.hpp"
|
||||||
|
#include "functional_test_utils/blob_utils.hpp"
|
||||||
|
#include "common_test_utils/data_utils.hpp"
|
||||||
|
#include "functional_test_utils/precision_utils.hpp"
|
||||||
|
#include "functional_test_utils/plugin_cache.hpp"
|
||||||
|
#include "functional_test_utils/skip_tests_config.hpp"
|
||||||
|
|
||||||
|
#include "single_layer_tests/concat_4D.hpp"
|
||||||
|
|
||||||
|
namespace LayerTestsDefinitions {
|
||||||
|
|
||||||
|
std::string Concat4DLayerTest::getTestCaseName(const testing::TestParamInfo<concat4DParamsTuple> &obj) {
|
||||||
|
int axis;
|
||||||
|
std::vector<size_t> inputShapes;
|
||||||
|
InferenceEngine::Precision netPrecision;
|
||||||
|
InferenceEngine::Precision inPrc, outPrc;
|
||||||
|
InferenceEngine::Layout inLayout, outLayout;
|
||||||
|
std::string targetName;
|
||||||
|
std::map<std::string, std::string> config;
|
||||||
|
std::tie(inputShapes, netPrecision, targetName, config) = obj.param;
|
||||||
|
std::ostringstream result;
|
||||||
|
result << "IS=" << CommonTestUtils::vec2str(inputShapes) << "_";
|
||||||
|
result << "netPRC=" << netPrecision.name() << "_";
|
||||||
|
result << "trgDev=" << targetName << "_";
|
||||||
|
return result.str();
|
||||||
|
}
|
||||||
|
|
||||||
|
void Concat4DLayerTest::SetUp() {
|
||||||
|
int axis = 1;
|
||||||
|
InferenceEngine::SizeVector inputShape;
|
||||||
|
InferenceEngine::Precision netPrecision;
|
||||||
|
std::map<std::string, std::string> additional_config;
|
||||||
|
std::tie(inputShape, netPrecision, targetDevice, additional_config) = this->GetParam();
|
||||||
|
configuration.insert(additional_config.begin(), additional_config.end());
|
||||||
|
|
||||||
|
auto total_size = std::accumulate(inputShape.begin(), inputShape.end(), static_cast<size_t>(1), std::multiplies<size_t>());
|
||||||
|
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
|
||||||
|
auto params = ngraph::builder::makeParams(ngPrc, {inputShape});
|
||||||
|
auto input = params[0];
|
||||||
|
|
||||||
|
auto constant_values = CommonTestUtils::generate_float_numbers(total_size, 11.0f, 12.0f);
|
||||||
|
auto constant = ngraph::builder::makeConstant(ngPrc, std::vector<size_t>({1, total_size}), constant_values);
|
||||||
|
auto first_reshape_pattern = std::make_shared<ngraph::op::Constant>(ngraph::element::i64,
|
||||||
|
ngraph::Shape{4}, std::vector<size_t>(inputShape));
|
||||||
|
auto first_reshape = std::make_shared<ngraph::op::v1::Reshape>(constant, first_reshape_pattern, false);
|
||||||
|
auto constant_2 = ngraph::builder::makeConstant(ngPrc, inputShape, constant_values);
|
||||||
|
|
||||||
|
auto concat = std::make_shared<ngraph::opset1::Concat>(ngraph::OutputVector({first_reshape, input, constant_2}), axis);
|
||||||
|
auto act = ngraph::builder::makeActivation(concat, ngPrc, ngraph::helpers::ActivationTypes::Relu);
|
||||||
|
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(act)};
|
||||||
|
function = std::make_shared<ngraph::Function>(results, params, "concat");
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
TEST_P(Concat4DLayerTest, CompareWithRefs) {
|
||||||
|
Run();
|
||||||
|
};
|
||||||
|
} // namespace LayerTestsDefinitions
|
@ -41,20 +41,7 @@ std::string MatmulSqueezeAddTest::getTestCaseName(testing::TestParamInfo<matmulS
|
|||||||
}
|
}
|
||||||
|
|
||||||
void MatmulSqueezeAddTest::SetUp() {
|
void MatmulSqueezeAddTest::SetUp() {
|
||||||
auto generateFloatNumbers = [](float startFrom, float upTo, std::size_t vec_len) {
|
auto seed = std::chrono::high_resolution_clock::now().time_since_epoch().count();
|
||||||
std::vector<float> res;
|
|
||||||
|
|
||||||
std::mt19937 gen(
|
|
||||||
static_cast<float>(std::chrono::high_resolution_clock::now().time_since_epoch().count()));
|
|
||||||
|
|
||||||
std::uniform_real_distribution<float> dist(startFrom, upTo);
|
|
||||||
|
|
||||||
for (int i = 0; i < vec_len; i++)
|
|
||||||
res.emplace_back(static_cast<float>(dist(gen)));
|
|
||||||
|
|
||||||
return res;
|
|
||||||
};
|
|
||||||
|
|
||||||
InferenceEngine::Precision netPrecision;
|
InferenceEngine::Precision netPrecision;
|
||||||
std::map<std::string, std::string> tempConfig;
|
std::map<std::string, std::string> tempConfig;
|
||||||
std::vector<size_t> inputShape;
|
std::vector<size_t> inputShape;
|
||||||
@ -67,14 +54,14 @@ void MatmulSqueezeAddTest::SetUp() {
|
|||||||
auto params = ngraph::builder::makeParams(ngPrc, { inputShape });
|
auto params = ngraph::builder::makeParams(ngPrc, { inputShape });
|
||||||
|
|
||||||
auto constant_0 = ngraph::builder::makeConstant<float>(ngPrc, { outputSize, inputShape[1] },
|
auto constant_0 = ngraph::builder::makeConstant<float>(ngPrc, { outputSize, inputShape[1] },
|
||||||
generateFloatNumbers(0, 1, outputSize * inputShape[1]), false);
|
CommonTestUtils::generate_float_numbers(outputSize * inputShape[1], 0, 1, seed), false);
|
||||||
auto matmul_0 = std::make_shared<ngraph::op::MatMul>(params[0], constant_0, false, true);
|
auto matmul_0 = std::make_shared<ngraph::op::MatMul>(params[0], constant_0, false, true);
|
||||||
|
|
||||||
auto constant_1 = std::make_shared<ngraph::op::Constant>(ngraph::element::Type_t::i64, ngraph::Shape{ 1 }, std::vector<size_t>{0});
|
auto constant_1 = std::make_shared<ngraph::op::Constant>(ngraph::element::Type_t::i64, ngraph::Shape{ 1 }, std::vector<size_t>{0});
|
||||||
auto unsqueeze_0 = std::make_shared<ngraph::op::Unsqueeze>(matmul_0, constant_1);
|
auto unsqueeze_0 = std::make_shared<ngraph::op::Unsqueeze>(matmul_0, constant_1);
|
||||||
|
|
||||||
auto constant_2 = ngraph::builder::makeConstant<float>(ngPrc, { 1, inputShape[0], outputSize },
|
auto constant_2 = ngraph::builder::makeConstant<float>(ngPrc, { 1, inputShape[0], outputSize },
|
||||||
generateFloatNumbers(0, 1, inputShape[0] * outputSize), false);
|
CommonTestUtils::generate_float_numbers(inputShape[0] * outputSize, 0, 1, seed), false);
|
||||||
auto add_0 = std::make_shared<ngraph::op::v1::Add>(unsqueeze_0, constant_2);
|
auto add_0 = std::make_shared<ngraph::op::v1::Add>(unsqueeze_0, constant_2);
|
||||||
|
|
||||||
auto constant_3 = std::make_shared<ngraph::op::Constant>(ngraph::element::Type_t::i64, ngraph::Shape{ 1 }, std::vector<size_t>{0});
|
auto constant_3 = std::make_shared<ngraph::op::Constant>(ngraph::element::Type_t::i64, ngraph::Shape{ 1 }, std::vector<size_t>{0});
|
||||||
|
@ -58,26 +58,13 @@ namespace SubgraphTestsDefinitions {
|
|||||||
std::vector<size_t> hidden_memory_dims {1, hiddenSize};
|
std::vector<size_t> hidden_memory_dims {1, hiddenSize};
|
||||||
std::vector<size_t> cell_memory_dims {1, hiddenSize};
|
std::vector<size_t> cell_memory_dims {1, hiddenSize};
|
||||||
|
|
||||||
const int seed = 0;
|
input_bias = CommonTestUtils::generate_float_numbers(inputSize, -0.2f, 0.0f);
|
||||||
std::mt19937 gen(static_cast<float>(seed));
|
input_weights = CommonTestUtils::generate_float_numbers(inputSize, 0.0f, 0.1f);
|
||||||
|
hidden_memory_init = CommonTestUtils::generate_float_numbers(hiddenSize, -0.2f, 0.2f);
|
||||||
auto generateFloatNumbers = [gen](std::size_t vec_len, float min, float max) mutable {
|
cell_memory_init = CommonTestUtils::generate_float_numbers(hiddenSize, -0.2f, 0.2f);
|
||||||
std::vector<float> res;
|
weights_vals = CommonTestUtils::generate_float_numbers(4 * hiddenSize * inputSize, -0.1f, 0.1f);
|
||||||
|
reccurrenceWeights_vals = CommonTestUtils::generate_float_numbers(4 * hiddenSize * hiddenSize, -0.1f, 0.1f);
|
||||||
std::uniform_real_distribution<float> dist(min, max);
|
bias_vals = CommonTestUtils::generate_float_numbers(4 * hiddenSize, -0.2f, 0.1f);
|
||||||
for (int i = 0; i < vec_len; i++)
|
|
||||||
res.emplace_back(static_cast<float>(dist(gen)));
|
|
||||||
|
|
||||||
return res;
|
|
||||||
};
|
|
||||||
|
|
||||||
input_bias = generateFloatNumbers(inputSize, -0.25f, 0.0f);
|
|
||||||
input_weights = generateFloatNumbers(inputSize, 0.0f, 0.15f);
|
|
||||||
hidden_memory_init = generateFloatNumbers(hiddenSize, -0.2f, 0.2f);
|
|
||||||
cell_memory_init = generateFloatNumbers(hiddenSize, -0.2f, 0.2f);
|
|
||||||
weights_vals = generateFloatNumbers(4 * hiddenSize * inputSize, -0.1f, 0.1f);
|
|
||||||
reccurrenceWeights_vals = generateFloatNumbers(4 * hiddenSize * hiddenSize, -0.1f, 0.1f);
|
|
||||||
bias_vals = generateFloatNumbers(4 * hiddenSize, -0.25f, 0.15f);
|
|
||||||
|
|
||||||
auto input_parameter = ngraph::builder::makeParams(ngPrc, {input_dims});
|
auto input_parameter = ngraph::builder::makeParams(ngPrc, {input_dims});
|
||||||
|
|
||||||
|
@ -55,27 +55,14 @@ void MultipleLSTMCellTest::SetUp() {
|
|||||||
std::vector<size_t> hidden_memory_dims {1, hiddenSize};
|
std::vector<size_t> hidden_memory_dims {1, hiddenSize};
|
||||||
std::vector<size_t> cell_memory_dims {1, hiddenSize};
|
std::vector<size_t> cell_memory_dims {1, hiddenSize};
|
||||||
|
|
||||||
const int seed = 0;
|
input_bias = CommonTestUtils::generate_float_numbers(inputSize, -0.25f, 0.0f);
|
||||||
std::mt19937 gen(static_cast<float>(seed));
|
input_weights = CommonTestUtils::generate_float_numbers(inputSize, 0.0f, 0.15f);
|
||||||
|
hidden_memory_init = CommonTestUtils::generate_float_numbers(hiddenSize, -0.2f, 0.2f);
|
||||||
auto generateFloatNumbers = [gen](std::size_t vec_len, float min, float max) mutable {
|
cell_memory_init = CommonTestUtils::generate_float_numbers(hiddenSize, -0.2f, 0.2f);
|
||||||
std::vector<float> res;
|
weights_vals = CommonTestUtils::generate_float_numbers(4 * hiddenSize * inputSize, -0.1f, 0.1f);
|
||||||
|
weights_2_vals = CommonTestUtils::generate_float_numbers(4 * hiddenSize * hiddenSize, -0.1f, 0.1f);
|
||||||
std::uniform_real_distribution<float> dist(min, max);
|
reccurrenceWeights_vals = CommonTestUtils::generate_float_numbers(4 * hiddenSize * hiddenSize, -0.1f, 0.1f);
|
||||||
for (int i = 0; i < vec_len; i++)
|
bias_vals = CommonTestUtils::generate_float_numbers(4 * hiddenSize, -0.25f, 0.15f);
|
||||||
res.emplace_back(static_cast<float>(dist(gen)));
|
|
||||||
|
|
||||||
return res;
|
|
||||||
};
|
|
||||||
|
|
||||||
input_bias = generateFloatNumbers(inputSize, -0.25f, 0.0f);
|
|
||||||
input_weights = generateFloatNumbers(inputSize, 0.0f, 0.15f);
|
|
||||||
hidden_memory_init = generateFloatNumbers(hiddenSize, -0.2f, 0.2f);
|
|
||||||
cell_memory_init = generateFloatNumbers(hiddenSize, -0.2f, 0.2f);
|
|
||||||
weights_vals = generateFloatNumbers(4 * hiddenSize * inputSize, -0.1f, 0.1f);
|
|
||||||
weights_2_vals = generateFloatNumbers(4 * hiddenSize * hiddenSize, -0.1f, 0.1f);
|
|
||||||
reccurrenceWeights_vals = generateFloatNumbers(4 * hiddenSize * hiddenSize, -0.1f, 0.1f);
|
|
||||||
bias_vals = generateFloatNumbers(4 * hiddenSize, -0.25f, 0.15f);
|
|
||||||
|
|
||||||
auto input_parameter = ngraph::builder::makeParams(ngPrc, {input_dims});
|
auto input_parameter = ngraph::builder::makeParams(ngPrc, {input_dims});
|
||||||
|
|
||||||
|
@ -49,21 +49,8 @@ void MultipleConcatTest::SetUp() {
|
|||||||
std::vector<size_t> input_dims { 1, inputSize };
|
std::vector<size_t> input_dims { 1, inputSize };
|
||||||
std::vector<size_t> constant_dims {1, constantSize};
|
std::vector<size_t> constant_dims {1, constantSize};
|
||||||
|
|
||||||
const int seed = 0;
|
auto concat_1_vals = CommonTestUtils::generate_float_numbers(constantSize, -2.0f, 2.0f);
|
||||||
std::mt19937 gen(static_cast<float>(seed));
|
auto concat_2_vals = CommonTestUtils::generate_float_numbers(constantSize, -5.0f, 5.0f);
|
||||||
|
|
||||||
auto generateFloatNumbers = [gen](std::size_t vec_len, float min, float max) mutable {
|
|
||||||
std::vector<float> res;
|
|
||||||
|
|
||||||
std::uniform_real_distribution<float> dist(min, max);
|
|
||||||
for (int i = 0; i < vec_len; i++)
|
|
||||||
res.emplace_back(static_cast<float>(dist(gen)));
|
|
||||||
|
|
||||||
return res;
|
|
||||||
};
|
|
||||||
|
|
||||||
auto concat_1_vals = generateFloatNumbers(constantSize, -2.0f, 2.0f);
|
|
||||||
auto concat_2_vals = generateFloatNumbers(constantSize, -5.0f, 5.0f);
|
|
||||||
|
|
||||||
auto input_parameter = ngraph::builder::makeParams(ngPrc, {input_dims});
|
auto input_parameter = ngraph::builder::makeParams(ngPrc, {input_dims});
|
||||||
|
|
||||||
|
@ -52,19 +52,6 @@ void PermConvPermConcat::SetUp() {
|
|||||||
std::vector<size_t> permute_in_order = { 0, 3, 1, 2 };
|
std::vector<size_t> permute_in_order = { 0, 3, 1, 2 };
|
||||||
std::vector<size_t> permute_out_order = { 0, 2, 3, 1 };
|
std::vector<size_t> permute_out_order = { 0, 2, 3, 1 };
|
||||||
|
|
||||||
const int seed = 0;
|
|
||||||
std::mt19937 gen(static_cast<float>(seed));
|
|
||||||
|
|
||||||
auto generateFloatNumbers = [gen](std::size_t vec_len, float min, float max) mutable {
|
|
||||||
std::vector<float> res;
|
|
||||||
|
|
||||||
std::uniform_real_distribution<float> dist(min, max);
|
|
||||||
for (int i = 0; i < vec_len; i++)
|
|
||||||
res.emplace_back(static_cast<float>(dist(gen)));
|
|
||||||
|
|
||||||
return res;
|
|
||||||
};
|
|
||||||
|
|
||||||
auto input_parameter = ngraph::builder::makeParams(ngPrc, {input_dims});
|
auto input_parameter = ngraph::builder::makeParams(ngPrc, {input_dims});
|
||||||
|
|
||||||
auto reshape_in_pattern = std::make_shared<ngraph::op::Constant>(ngraph::element::i64,
|
auto reshape_in_pattern = std::make_shared<ngraph::op::Constant>(ngraph::element::i64,
|
||||||
@ -79,7 +66,7 @@ void PermConvPermConcat::SetUp() {
|
|||||||
auto conv_in_shape = permute_in->get_output_shape(0);
|
auto conv_in_shape = permute_in->get_output_shape(0);
|
||||||
auto conv_weights_size = output_channels * (conv_in_shape[1]) * kernel_shape[0] * kernel_shape[1];
|
auto conv_weights_size = output_channels * (conv_in_shape[1]) * kernel_shape[0] * kernel_shape[1];
|
||||||
auto conv = ngraph::builder::makeConvolution(permute_in, ngPrc, {kernel_shape[0], kernel_shape[1]}, {1, 1}, {0, 0}, {0, 0}, {1, 1},
|
auto conv = ngraph::builder::makeConvolution(permute_in, ngPrc, {kernel_shape[0], kernel_shape[1]}, {1, 1}, {0, 0}, {0, 0}, {1, 1},
|
||||||
ngraph::op::PadType::VALID, output_channels, false, generateFloatNumbers(conv_weights_size, -0.5f, 0.5f));
|
ngraph::op::PadType::VALID, output_channels, false, CommonTestUtils::generate_float_numbers(conv_weights_size, -0.5f, 0.5f));
|
||||||
|
|
||||||
auto permute_out_params = std::make_shared<ngraph::opset1::Constant>(ngraph::element::i64,
|
auto permute_out_params = std::make_shared<ngraph::opset1::Constant>(ngraph::element::i64,
|
||||||
ngraph::Shape{4},
|
ngraph::Shape{4},
|
||||||
@ -88,7 +75,8 @@ void PermConvPermConcat::SetUp() {
|
|||||||
|
|
||||||
auto permute_out_shape = permute_out->get_output_shape(0);
|
auto permute_out_shape = permute_out->get_output_shape(0);
|
||||||
|
|
||||||
auto concat_const = ngraph::builder::makeConstant(ngPrc, {1, 1, 1, permute_out_shape[3]}, generateFloatNumbers(permute_out_shape[3], -10, 10));
|
auto concat_const = ngraph::builder::makeConstant(ngPrc, {1, 1, 1, permute_out_shape[3]},
|
||||||
|
CommonTestUtils::generate_float_numbers(permute_out_shape[3], -10, 10));
|
||||||
|
|
||||||
auto concat = ngraph::builder::makeConcat({permute_out, concat_const}, 2);
|
auto concat = ngraph::builder::makeConcat({permute_out, concat_const}, 2);
|
||||||
|
|
||||||
|
@ -31,6 +31,21 @@ static void fill_data_sine(float *data, size_t size, float center, float ampl, f
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Create vector of floats with length of vec_len, with values ranging from min to max,
|
||||||
|
* with initial seed equal to variable seed with default of 0
|
||||||
|
*/
|
||||||
|
static inline std::vector<float> generate_float_numbers(std::size_t vec_len, float min, float max, int seed = 0) {
|
||||||
|
std::vector<float> res;
|
||||||
|
std::mt19937 gen(static_cast<float>(seed));
|
||||||
|
|
||||||
|
std::uniform_real_distribution<float> dist(min, max);
|
||||||
|
for (int i = 0; i < vec_len; i++)
|
||||||
|
res.emplace_back(static_cast<float>(dist(gen)));
|
||||||
|
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Fill blob with value data blob. Broadcast semantic is included.
|
* Fill blob with value data blob. Broadcast semantic is included.
|
||||||
* Broadcasting with alignment through last dimension.
|
* Broadcasting with alignment through last dimension.
|
||||||
|
Loading…
Reference in New Issue
Block a user