[GNA] Fixes for concat with multiple const inputs (#3130)

This commit is contained in:
Elizaveta Lobanova 2020-11-27 15:52:44 +03:00 committed by GitHub
parent 49589855d8
commit a889acec53
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 80 additions and 12 deletions

View File

@ -476,7 +476,8 @@ class DataQuantizer<Desc, InferenceEngine::CNNLayer *> : public DataQuantizerBas
if (LayerInfo(*cnnLayer).isActivation() ||
LayerInfo(*cnnLayer).isCopy() ||
LayerInfo(*cnnLayer).isNonFunctional() ||
LayerInfo(*cnnLayer).isPermute()) {
LayerInfo(*cnnLayer).isPermute() ||
LayerInfo(*cnnLayer).isConst()) {
// precision of activation layers is always equal input precision
for (auto &&outData : cnnLayer->outData) {
outData->setPrecision(Desc::mandatory().getInputPrecision());
@ -485,8 +486,12 @@ class DataQuantizer<Desc, InferenceEngine::CNNLayer *> : public DataQuantizerBas
}
cnnLayer->precision = Desc::mandatory().getInputPrecision();
if (cnnLayer->type == "Const") {
if (cnnLayer->blobs["custom"]->getTensorDesc().getPrecision() == InferenceEngine::Precision::FP16) {
if (LayerInfo(*cnnLayer).isConst()) {
auto initial_precision = cnnLayer->blobs["custom"]->getTensorDesc().getPrecision();
// TODO I32 must be handled separately when it'll be supported
IE_ASSERT(initial_precision != InferenceEngine::Precision::I32);
if (initial_precision == InferenceEngine::Precision::FP16) {
cnnLayer->blobs["custom"] = make_fp32_blob(cnnLayer->blobs["custom"]);
}
auto const_scale_factor = InferenceEngine::getInjectedData<QuantizedLayerParams>(*cnnLayer)->_dst_quant.GetScale();

View File

@ -1668,11 +1668,17 @@ void FuseMultipleIdentitiesPass::run() {
};
auto prevLayersReached = CNNNetGetPrevLayersSkip(l, isFunctional);
prevLayersReached.erase(std::remove_if(prevLayersReached.begin(),
prevLayersReached.end(),
[] (const std::pair<CNNLayerPtr, int> & candidate) {
return LayerInfo(candidate.first).isLink();
}), prevLayersReached.end());
if (!prevLayersReached.empty()) {
prevLayersReached.erase(std::remove_if(prevLayersReached.begin(),
prevLayersReached.end(),
[] (const std::pair<CNNLayerPtr, int> & candidate) {
return LayerInfo(candidate.first).isLink();
}), prevLayersReached.end());
if (prevLayersReached.empty()) {
gnalog() << ", connected to link output only" << std::endl;
continue;
}
}
if (prevLayersReached.size() != 1) {
std::stringstream layers;

View File

@ -43,6 +43,10 @@ std::vector<std::string> disabledTestPatterns() {
// TODO: Issue 39358
R"(.*unaligned.*MultipleConcatTest.*)",
R"(.*ActivationConcatsEltwise.*CS=35.*)",
// TODO: Issue 38974
R"(.*ConcatMultiInput.CompareWithRefConstOnly.*IS=\(1.8\).*)",
R"(.*ConcatMultiInput.CompareWithRefConstOnly.*IS=\(1.16\).*)",
R"(.*ConcatMultiInput.CompareWithRefConstOnly.*IS=\(1.32\).*)",
// TODO: Issue: 40960
R"(.*(ConstantResultSubgraphTest).*)",
// TODO: Issue: 29577

View File

@ -24,7 +24,14 @@ namespace LayerTestsDefinitions {
class ConcatMultiInput : public testing::WithParamInterface<concatMultiParams>,
virtual public LayerTestsUtils::LayerTestsCommon {
private:
std::vector<size_t> paramSize;
ngraph::element::Type ngPrc;
std::vector<std::vector<size_t>> inputShapes;
public:
void GenerateStridedSliceModel();
void GenerateConstOnlyModel();
static std::string getTestCaseName(testing::TestParamInfo<concatMultiParams> obj);
protected:

View File

@ -38,17 +38,19 @@ std::string ConcatMultiInput::getTestCaseName(testing::TestParamInfo<concatMulti
}
void ConcatMultiInput::SetUp() {
std::vector<std::vector<size_t>> inputShapes;
InferenceEngine::Precision netPrecision;
std::map<std::string, std::string> additional_config;
std::tie(inputShapes, netPrecision, targetDevice, additional_config) = this->GetParam();
configuration.insert(additional_config.begin(), additional_config.end());
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
std::vector<size_t> paramSize = { 1, 0 };
ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
paramSize = { 1, 0 };
for (const auto& val : inputShapes) {
paramSize[1] += val[1];
}
}
void ConcatMultiInput::GenerateStridedSliceModel() {
auto params = ngraph::builder::makeParams(ngPrc, { paramSize });
auto stride = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{ 2 }, std::vector<int64_t>{ 1, 1 });
@ -80,9 +82,53 @@ void ConcatMultiInput::SetUp() {
function = std::make_shared<ngraph::Function>(results, params, "ConcatMultiInput");
}
TEST_P(ConcatMultiInput, CompareWithRefImpl) {
void ConcatMultiInput::GenerateConstOnlyModel() {
ngraph::OutputVector concatInputs;
const int seed = 0;
std::mt19937 gen(static_cast<float>(seed));
auto generateFloatNumbers = [gen](std::size_t vec_len, float min, float max) mutable {
std::vector<float> res;
std::uniform_real_distribution<float> dist(min, max);
for (int i = 0; i < vec_len; i++)
res.emplace_back(static_cast<float>(dist(gen)));
return res;
};
ngraph::ParameterVector input_vector;
for (size_t i = 0; i < inputShapes.size(); ++i) {
size_t total_size = 1;
for (auto dim : inputShapes[i]) {
total_size *= dim;
}
if (i == 0) {
input_vector = ngraph::builder::makeParams(ngPrc, {{1, total_size}});
auto relu = ngraph::builder::makeActivation(input_vector[0], ngPrc, ngraph::helpers::ActivationTypes::Relu);
concatInputs.push_back(relu);
} else {
auto min_max = (i % 2 == 0) ? 2 : 30;
auto const_values = generateFloatNumbers(total_size, -min_max, min_max);
auto const_node = ngraph::builder::makeConstant(ngPrc, {1, total_size}, const_values);
concatInputs.push_back(const_node);
}
}
auto concat = ngraph::builder::makeConcat(concatInputs, 1);
ngraph::ResultVector results{ std::make_shared<ngraph::opset1::Result>(concat) };
function = std::make_shared<ngraph::Function>(results, input_vector, "ConcatConstOnly");
}
TEST_P(ConcatMultiInput, CompareWithRefStridedSlice) {
GenerateStridedSliceModel();
Run();
};
TEST_P(ConcatMultiInput, CompareWithRefConstOnly) {
GenerateConstOnlyModel();
Run();
};
} // namespace LayerTestsDefinitions