Removed unneeded deprecated test code (#16939)

This commit is contained in:
Ryszard Jezierski
2023-04-26 21:53:10 +02:00
committed by GitHub
parent 561bf6d478
commit 8005a3d0b0
11 changed files with 0 additions and 2120 deletions

View File

@@ -1,70 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "ti_tests.hpp"
static const ti_test_params ti_test_cases[] = {{"GNA", 8, InferenceEngine::Precision(InferenceEngine::Precision::FP32)}};
static std::map<std::string, std::string> config_fp32 = {
{"GNA_DEVICE_MODE", "GNA_SW_FP32"},
{"GNA_COMPACT_MODE", "NO"}
};
static std::map<std::string, std::string> config_I16 = {
{"GNA_DEVICE_MODE", "GNA_SW_EXACT"},
{"GNA_COMPACT_MODE", "NO"},
{"GNA_PRECISION", "I16"}
};
static std::map<std::string, std::string> config_I8 = {
{"GNA_DEVICE_MODE", "GNA_SW_EXACT"},
{"GNA_COMPACT_MODE", "NO"},
{"GNA_PRECISION", "I16"}
};
static const std::map<std::string, std::string> config_input_1 = {
{"GNA_SCALE_FACTOR_0", "1024"}
};
static const std::map<std::string, std::string> config_input_2 = {
{"GNA_SCALE_FACTOR_0", "1024"},
{"GNA_SCALE_FACTOR_1", "1024"}
};
TEST_P(TITestBase, GNA_sw_fp32_ti_test) {
std::map<std::string, std::string> test_config(config_fp32);
test_config.insert(config_input_2.begin(), config_input_2.end());
RunTITest(test_config);
}
TEST_P(TITestBase, GNA_I16_ti_test) {
std::map<std::string, std::string> test_config(config_I16);
test_config.insert(config_input_2.begin(), config_input_2.end());
RunTITest(test_config);
}
TEST_P(TITestBase, GNA_I8_ti_test) {
std::map<std::string, std::string> test_config(config_I8);
test_config.insert(config_input_2.begin(), config_input_2.end());
RunTITest(test_config);}
RUN_CASE_P_WITH_SUFFIX(GNA, _smoke, TITestBase, ti_test_cases);
TEST_P(TITest2Base, GNA_sw_fp32_ti_test) {
std::map<std::string, std::string> test_config(config_fp32);
test_config.insert(config_input_1.begin(), config_input_1.end());
RunTITest(test_config);}
TEST_P(TITest2Base, GNA_I16_ti_test) {
std::map<std::string, std::string> test_config(config_I16);
test_config.insert(config_input_1.begin(), config_input_1.end());
RunTITest(test_config);}
TEST_P(TITest2Base, GNA_I8_ti_test) {
std::map<std::string, std::string> test_config(config_I8);
test_config.insert(config_input_1.begin(), config_input_1.end());
RunTITest(test_config);}
RUN_CASE_P_WITH_SUFFIX(GNA, _smoke, TITest2Base, ti_test_cases);

View File

@@ -1,283 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <legacy/ie_layers.h>
#include <precision_utils.h>
#include "conv_ref.hpp"
#include "common_layers_params.hpp"
using namespace InferenceEngine;
void Convolution_parseParams(InferenceEngine::CNNLayer* layer) {
auto convLayer = dynamic_cast<InferenceEngine::ConvolutionLayer*>(layer);
if (!convLayer) {
IE_THROW() << "Layer is not instance of ConvolutionLayer class";
}
convLayer->_out_depth = convLayer->GetParamAsUInt("output");
convLayer->_kernel.clear();
convLayer->_stride.clear();
convLayer->_padding.clear();
convLayer->_pads_end.clear();
convLayer->_dilation.clear();
std::vector<unsigned int> kernels = convLayer->GetParamAsUInts("kernel", {});
if (kernels.empty()) {
// IR_v == 2
convLayer->_kernel.insert(InferenceEngine::X_AXIS, convLayer->GetParamAsUInt("kernel-x"));
convLayer->_kernel.insert(InferenceEngine::Y_AXIS, convLayer->GetParamAsUInt("kernel-y"));
convLayer->_stride.insert(InferenceEngine::X_AXIS, convLayer->GetParamAsUInt("stride-x", 1u));
convLayer->_stride.insert(InferenceEngine::Y_AXIS, convLayer->GetParamAsUInt("stride-y", 1u));
// TODO: maybe just throw exception, why do we change IR?
if (0 == convLayer->_stride[InferenceEngine::X_AXIS]) {
convLayer->_stride[InferenceEngine::X_AXIS] = 1u;
printf("Warning! in layer %s: Stride x is 0, setting to 1 ", convLayer->name.c_str());
}
if (0 == convLayer->_stride[InferenceEngine::Y_AXIS]) {
convLayer->_stride[InferenceEngine::Y_AXIS] = 1u;
printf("Warning! in layer %s: Stride y is 0, setting to 1", convLayer->name.c_str());
}
convLayer->_padding.insert(InferenceEngine::X_AXIS, convLayer->GetParamAsUInt("pad-x", 0u));
convLayer->_padding.insert(InferenceEngine::Y_AXIS, convLayer->GetParamAsUInt("pad-y", 0u));
convLayer->_pads_end.insert(InferenceEngine::X_AXIS, convLayer->GetParamAsUInt("pad-r", convLayer->_padding[InferenceEngine::X_AXIS]));
convLayer->_pads_end.insert(InferenceEngine::Y_AXIS, convLayer->GetParamAsUInt("pad-b", convLayer->_padding[InferenceEngine::Y_AXIS]));
convLayer->_dilation.insert(InferenceEngine::X_AXIS, convLayer->GetParamAsUInt("dilation-x", 1u));
convLayer->_dilation.insert(InferenceEngine::Y_AXIS, convLayer->GetParamAsUInt("dilation-y", 1u));
} else {
// IR_v > 2
for (size_t i = 1; i <= kernels.size(); i++) {
convLayer->_kernel.insert(i - 1, kernels[kernels.size() - i]);
}
std::vector<unsigned int> default_0 = std::vector<unsigned int> (convLayer->_kernel.size(), 0u);
std::vector<unsigned int> default_1 = std::vector<unsigned int> (convLayer->_kernel.size(), 1u);
std::vector<unsigned int> strides = convLayer->GetParamAsUInts("strides", default_1);
for (size_t i = 1; i <= strides.size(); i++) {
if (strides[strides.size() - i] == 0) {
IE_THROW() << "Stride could not be 0.\nIn layer " << convLayer->name;
}
convLayer->_stride.insert(i - 1, strides[strides.size() - i]);
}
std::vector<unsigned int> pads_begin = convLayer->GetParamAsUInts("pads_begin", default_0);
for (size_t i = 1; i <= pads_begin.size(); i++) {
convLayer->_padding.insert(i - 1, pads_begin[pads_begin.size() - i]);
}
std::vector<unsigned int> pads_end = convLayer->GetParamAsUInts("pads_end", pads_begin);
for (size_t i = 1; i <= pads_end.size(); i++) {
convLayer->_pads_end.insert(i - 1, pads_end[pads_end.size() - i]);
}
std::vector<unsigned int> dilations = convLayer->GetParamAsUInts("dilations", default_1);
for (size_t i = 1; i <= dilations.size(); i++) {
convLayer->_dilation.insert(i - 1, dilations[dilations.size() - i]);
}
}
convLayer->_auto_pad = convLayer->GetParamAsString("auto_pad", "");
convLayer->_group = convLayer->GetParamAsUInt("group", 1u);
}
template<typename wei_data_t, typename bias_data_t>
void ref_conv_common(const std::vector<InferenceEngine::Blob::Ptr> srcs,
Blob& dst,
const wei_data_t* weights_data,
size_t weights_size,
const bias_data_t* bias_data,
size_t bias_size,
const CommonTestUtils::conv_common_params& prm) {
if (srcs[0]->getTensorDesc().getLayout() != Layout::NCHW &&
srcs[0]->getTensorDesc().getLayout() != Layout::NCDHW)
IE_THROW() << "Reference FP32 convolution supports NCHW and NCDHW layouts only";
size_t KW = prm.kernel[X_AXIS];
size_t KH = prm.kernel[Y_AXIS];
size_t KD = prm.kernel.size() > Z_AXIS ? prm.kernel[Z_AXIS] : 1lu;
size_t SW = prm.stride[X_AXIS];
size_t SH = prm.stride[Y_AXIS];
size_t SD = prm.stride.size() > Z_AXIS ? prm.stride[Z_AXIS] : 0lu;
size_t DW = prm.dilation[X_AXIS];
size_t DH = prm.dilation[Y_AXIS];
size_t DD = prm.dilation.size() > Z_AXIS ? prm.dilation[Z_AXIS] : 0lu;
size_t PW = prm.pads_begin[X_AXIS];
size_t PH = prm.pads_begin[Y_AXIS];
size_t PD = prm.pads_begin.size() > Z_AXIS ? prm.pads_begin[Z_AXIS] : 0lu;
size_t GC = prm.group;
auto src_dims = srcs[0]->getTensorDesc().getDims();
size_t IC = src_dims[1];
size_t ID = (src_dims.size() == 5lu) ? src_dims[2] : 1lu;
size_t IH = src_dims.at(src_dims.size() - 2);
size_t IW = src_dims.back();
auto dst_dims = dst.getTensorDesc().getDims();
size_t OW = dst_dims.back();
size_t OH = dst_dims.at(dst_dims.size() - 2);
size_t OD = (dst_dims.size() == 5lu) ? dst_dims[2] : 1lu;
size_t OC = prm.out_c;
const auto src_buffer = srcs[0]->cbuffer();
auto* dst_data = dst.buffer().as<float*>();
Precision src_precision = srcs[0]->getTensorDesc().getPrecision();
IE_ASSERT(KW * KH * KD * OC * IC / GC == weights_size);
IE_ASSERT(OC == bias_size);
for (uint32_t g = 0; g < GC; g++) {
for (uint32_t oc = 0; oc < OC / GC; oc++) {
for (uint32_t od = 0; od < OD; od++) {
for (uint32_t oh = 0; oh < OH; oh++) {
for (uint32_t ow = 0; ow < OW; ow++) {
size_t oidx = g * OC / GC * OD * OH * OW
+ oc * OD * OH * OW
+ od * OH * OW
+ oh * OW
+ ow;
if (bias_data)
dst_data[oidx] = bias_data[g * OC / GC + oc];
for (size_t ic = 0; ic < IC / GC; ic++) {
for (size_t kd = 0; kd < KD; kd++) {
for (size_t kh = 0; kh < KH; kh++) {
for (size_t kw = 0; kw < KW; kw++) {
int32_t iw = ow * SW - PW + kw * DW;
int32_t ih = oh * SH - PH + kh * DH;
int32_t id = od * SD - PD + kd * DD;
if (iw < 0 || iw >= (int32_t) IW ||
ih < 0 || ih >= (int32_t) IH ||
id < 0 || id >= (int32_t) ID)
continue;
size_t iidx = g * IC / GC * ID * IH * IW
+ ic * ID * IH * IW
+ id * IH * IW
+ ih * IW
+ iw;
size_t widx = g * OC / GC * IC / GC * KD * KH * KW
+ oc * IC / GC * KD * KH * KW
+ ic * KD * KH * KW
+ kd * KH * KW
+ kh * KW
+ kw;
if (src_precision == Precision::U8) {
dst_data[oidx] += (src_buffer.as<const uint8_t*>())[iidx] * weights_data[widx];
} else if (src_precision == Precision::I8) {
dst_data[oidx] += (src_buffer.as<const int8_t*>())[iidx] * weights_data[widx];
} else {
dst_data[oidx] += (src_buffer.as<const float*>())[iidx] * weights_data[widx];
}
}
}
}
}
}
}
}
}
}
}
template void ref_conv_common(const std::vector<InferenceEngine::Blob::Ptr> srcs, Blob& dst, const float* weights_data,
size_t, const float* bias_data, size_t, const CommonTestUtils::conv_common_params& prm);
template void ref_conv_common(const std::vector<InferenceEngine::Blob::Ptr> srcs, Blob& dst, const int8_t* weights_data,
size_t, const int32_t* bias_data, size_t, const CommonTestUtils::conv_common_params& prm);
template<>
void ref_conv_common(const std::vector<InferenceEngine::Blob::Ptr> srcs,
Blob& dst,
const ie_fp16* weights_data,
size_t /*weights_size*/,
const ie_fp16* bias_data,
size_t /*bias_size*/,
const CommonTestUtils::conv_common_params& prm) {
const auto* src_data = srcs[0]->cbuffer().as<const ie_fp16*>();
auto* dst_data = dst.buffer().as<ie_fp16*>();
IE_ASSERT(src_data != nullptr);
IE_ASSERT(dst_data != nullptr);
size_t KH = prm.kernel[Y_AXIS];
size_t KW = prm.kernel[X_AXIS];
size_t SH = prm.stride[Y_AXIS];
size_t SW = prm.stride[X_AXIS];
size_t DH = prm.dilation[Y_AXIS];
size_t DW = prm.dilation[X_AXIS];
size_t PH = prm.pads_begin[Y_AXIS];
size_t PW = prm.pads_begin[X_AXIS];
size_t GC = prm.group;
int32_t IW = 0;
int32_t IH = 0;
int32_t IC = 0;
int32_t I_N = 0;
int32_t OW = 0;
int32_t OH = 0;
int32_t OC = 0;
int32_t ON = 0;
CommonTestUtils::get_common_dims(*srcs[0], IW, IH, IC, I_N);
CommonTestUtils::get_common_dims(dst, OW, OH, OC, ON);
IE_ASSERT(I_N == ON);
size_t src_channels = IC / GC;
size_t dst_channels = OC / GC;
for (size_t n = 0; n < ON; ++n) {
size_t oShift = n * OC * OH * OW;
size_t iShift = n * IC * IH * IW;
for (size_t g = 0; g < GC; ++g) {
for (size_t oc = 0; oc < dst_channels; ++oc) {
size_t dst_channel = (g * dst_channels + oc);
for (size_t oh = 0; oh < OH; oh++) {
for (size_t ow = 0; ow < OW; ow++) {
size_t oidx = dst_channel + ow * OC + oh * OC * OW + oShift;
IE_ASSERT(oidx < dst.size());
float val = 0.0f;
if (bias_data)
val = PrecisionUtils::f16tof32(bias_data[dst_channel]);
for (size_t ic = 0; ic < src_channels; ++ic) {
size_t src_channel = (g * src_channels + ic);
for (size_t ky = 0; ky < KH; ++ky) {
for (size_t kx = 0; kx < KW; ++kx) {
int32_t iw = ow * SW - PW + kx * DW;
int32_t ih = oh * SH - PH + ky * DH;
if (iw < 0 || iw >= (int32_t) IW || ih < 0 || ih >= (int32_t) IH) {
continue;
}
size_t iidx = src_channel + iw * IC + ih * IC * IW + iShift;
IE_ASSERT(iidx < srcs[0]->size());
size_t widx = (ky * KW + kx) + ic * KH * KW +
dst_channel * src_channels * KW * KH;
IE_ASSERT(widx < KH * KW * (IC / GC) * OC);
val += PrecisionUtils::f16tof32(src_data[iidx]) *
PrecisionUtils::f16tof32(weights_data[widx]);
}
}
}
dst_data[oidx] = PrecisionUtils::f32tof16(val);
}
}
}
}
}
}

View File

@@ -1,58 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <legacy/ie_layers.h>
#include <legacy/ie_layers_property.hpp>
#include <ie_blob.h>
#include <precision_utils.h>
#include <legacy/ie_layers_internal.hpp>
#include "common_layers_params.hpp"
template<typename wei_data_t, typename bias_data_t>
void ref_conv_common(const std::vector<InferenceEngine::Blob::Ptr> srcs,
InferenceEngine::Blob& dst,
const wei_data_t* weights_data,
size_t weights_size,
const bias_data_t* bias_data,
size_t bias_size,
const CommonTestUtils::conv_common_params& prm);
void Convolution_parseParams(InferenceEngine::CNNLayer* layer);
template<typename wei_data_t, typename bias_data_t>
void common_ref_convolution_wrap(const std::vector<InferenceEngine::Blob::Ptr> srcs,
InferenceEngine::Blob::Ptr& dst,
const wei_data_t* weights_data,
size_t weights_size,
const bias_data_t* bias_data,
size_t bias_size,
const std::map<std::string, std::string>& params_map) {
InferenceEngine::LayerParams lp{};
InferenceEngine::ConvolutionLayer convLayer(lp);
auto data = std::make_shared<InferenceEngine::Data>("insData", srcs[0]->getTensorDesc());
convLayer.params = params_map;
convLayer.insData.push_back(data);
Convolution_parseParams(&convLayer);
CommonTestUtils::conv_common_params params;
params.kernel = convLayer._kernel;
auto allPad = InferenceEngine::getPaddings(convLayer);
params.pads_begin = allPad.begin;
params.pads_end = allPad.end;
params.stride = convLayer._stride;
params.dilation = convLayer._dilation;
params.out_c = convLayer._out_depth;
params.group = convLayer._group;
ref_conv_common<>(srcs,
*dst.get(),
weights_data,
weights_size,
bias_data,
bias_size,
params);
}

View File

@@ -1,187 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <legacy/ie_layers.h>
#include <precision_utils.h>
#include <gtest/gtest.h>
#include "deconv_ref.hpp"
#include "common_layers_params.hpp"
using namespace InferenceEngine;
template<>
void ref_deconv_common(const std::vector<InferenceEngine::Blob::Ptr> srcs,
Blob &dst,
const float *weights_data,
size_t weights_size,
const float *bias_data,
size_t bias_size,
const CommonTestUtils::conv_common_params &prm) {
if (srcs[0]->getTensorDesc().getLayout() != Layout::NCHW)
IE_THROW() << "Reference FP32 convolution supports NCHW layout only";
size_t KH = prm.kernel[Y_AXIS];
size_t KW = prm.kernel[X_AXIS];
size_t SH = prm.stride[Y_AXIS];
size_t SW = prm.stride[X_AXIS];
size_t PH = prm.pads_begin[Y_AXIS];
size_t PW = prm.pads_begin[X_AXIS];
auto src_dims = srcs[0]->getTensorDesc().getDims();
size_t IW = src_dims.back();
size_t IH = src_dims.at(src_dims.size() - 2);
size_t IC = src_dims.at(1);
size_t MB = src_dims.at(0);
size_t OC = prm.out_c;
auto dst_dims = dst.getTensorDesc().getDims();
size_t OW = dst_dims.back();
size_t OH = dst_dims.at(dst_dims.size() - 2);
const auto *src_data = srcs[0]->cbuffer().as<float *>();
auto *dst_data = dst.buffer().as<float *>();;
for (int mb = 0; mb < MB; ++mb) {
for (int oc = 0; oc < OC; ++oc) {
for (int oh = 0; oh < OH; ++oh) {
for (int ow = 0; ow < OW; ++ow) {
size_t didx = mb * OC * OH * OW
+ oc * OH * OW + oh * OW + ow;
dst_data[didx] = float(0);
if (bias_data) dst_data[didx] += bias_data[oc];
for (int ic = 0; ic < IC; ic++) {
for (int kh = 0; kh < KH; kh++) {
for (int kw = 0; kw < KW; kw++) {
if (ow + PW < kw || oh + PH < kh)
continue;
size_t iw = ow - kw + PW;
size_t ih = oh - kh + PH;
if (iw % SW != 0 || ih % SH != 0)
continue;
iw /= SW;
ih /= SH;
if (ih < IH && iw < IW) {
size_t sidx = mb * IC * IH * IW
+ ic * IH * IW + ih * IW
+ iw;
size_t widx = ic * OC * KH * KW
+ oc * KH * KW + kh * KW
+ kw;
dst_data[didx] += src_data[sidx] * weights_data[widx];
}
}
}
}
}
}
}
}
}
template<>
void ref_deconv_common(const std::vector<InferenceEngine::Blob::Ptr> srcs,
Blob &dst,
const ie_fp16 *weights_data,
size_t /*weights_size*/,
const ie_fp16 *bias_data,
size_t /*bias_size*/,
const CommonTestUtils::conv_common_params &prm) {
const auto *src_data = srcs[0]->cbuffer().as<ie_fp16 *>();
auto *dst_data = dst.buffer().as<ie_fp16 *>();
IE_ASSERT(src_data != nullptr);
IE_ASSERT(dst_data != nullptr);
size_t KH = prm.kernel[Y_AXIS];
size_t KW = prm.kernel[X_AXIS];
size_t SH = prm.stride[Y_AXIS];
size_t SW = prm.stride[X_AXIS];
size_t PH = prm.pads_begin[Y_AXIS];
size_t PW = prm.pads_begin[X_AXIS];
auto src_dims = srcs[0]->getTensorDesc().getDims();
size_t IW = src_dims.back();
size_t IH = src_dims.at(src_dims.size() - 2);
size_t IC = src_dims.at(1);
size_t IB = src_dims.at(0);
auto dst_dims = dst.getTensorDesc().getDims();
size_t OW = dst_dims.back();
size_t OH = dst_dims.at(dst_dims.size() - 2);
size_t OC = dst_dims.at(1);
size_t OB = src_dims.at(0);
size_t GC = prm.group;
size_t src_channels = IC / GC;
size_t dst_channels = OC / GC;
size_t ib_size = srcs[0]->size() / IB;
size_t ob_size = dst.size() / OB;
for (size_t ob = 0; ob < OB; ++ob) {
for (size_t g = 0; g < GC; ++g) {
for (size_t oc = 0; oc < dst_channels; ++oc) {
size_t dst_channel = (g * dst_channels + oc);
for (size_t oy = 0; oy < OH; oy++) {
for (size_t ox = 0; ox < OW; ox++) {
size_t oidx = ob * ob_size + dst_channel + ox * OC + oy * OC * OW;
ASSERT_LT(oidx, dst.size());
float val = bias_data != nullptr ? PrecisionUtils::f16tof32(bias_data[dst_channel]) : 0;
for (size_t ic = 0; ic < src_channels; ++ic) {
size_t src_channel = (g * src_channels + ic);
for (size_t ky = 0; ky < KH; ++ky) {
for (size_t kx = 0; kx < KW; ++kx) {
if (ox + PW < kx || oy + PH < ky)
continue;
int32_t ix = ox - kx + PW;
int32_t iy = oy - ky + PH;
if (ix % SW != 0 || iy % SH != 0)
continue;
ix /= SW;
iy /= SH;
if (iy < IH && ix < IW) {
size_t iidx = ob * ib_size + src_channel + ix * IC + iy * IC * IW;
ASSERT_LT(iidx, srcs[0]->size());
size_t widx = ic * OC * KH * KW
+ dst_channel * KH * KW
+ ky * KW
+ kx;
ASSERT_LT(widx, KW * KH * (IC / GC) * OC);
val += PrecisionUtils::f16tof32(src_data[iidx]) *
PrecisionUtils::f16tof32(weights_data[widx]);
}
}
}
}
dst_data[oidx] = PrecisionUtils::f32tof16(val);
}
}
}
}
}
}

View File

@@ -1,57 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <legacy/ie_layers.h>
#include <legacy/ie_layers_property.hpp>
#include <ie_blob.h>
#include <precision_utils.h>
#include <legacy/ie_layers_internal.hpp>
#include "common_layers_params.hpp"
template<typename data_t>
void ref_deconv_common(const std::vector<InferenceEngine::Blob::Ptr> srcs,
InferenceEngine::Blob& dst,
const data_t* weights_data,
size_t weights_size,
const data_t* bias_data,
size_t bias_size,
const CommonTestUtils::conv_common_params& prm);
void Convolution_parseParams(InferenceEngine::CNNLayer* layer);
template<typename data_t>
void common_ref_deconvolution_wrap(const std::vector<InferenceEngine::Blob::Ptr> srcs,
InferenceEngine::Blob::Ptr& dst,
const data_t* weights_data,
size_t weights_size,
const data_t* bias_data,
size_t bias_size,
const std::map<std::string, std::string>& params_map) {
InferenceEngine::LayerParams lp{};
InferenceEngine::ConvolutionLayer deconvLayer(lp);
auto data = std::make_shared<InferenceEngine::Data>("insData", srcs[0]->getTensorDesc());
deconvLayer.params = params_map;
deconvLayer.insData.push_back(data);
Convolution_parseParams(&deconvLayer);
CommonTestUtils::conv_common_params params;
params.kernel = deconvLayer._kernel;
auto allPad = InferenceEngine::getPaddings(deconvLayer);
params.pads_begin = allPad.begin;
params.pads_end = allPad.end;
params.stride = deconvLayer._stride;
params.dilation = deconvLayer._dilation;
params.out_c = deconvLayer._out_depth;
params.group = deconvLayer._group;
ref_deconv_common<data_t>(srcs,
*dst.get(),
weights_data,
weights_size,
bias_data,
bias_size,
params);
}

View File

@@ -1,270 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <legacy/ie_layers.h>
#include <precision_utils.h>
#include <math.h>
#include <ie_parallel.hpp>
#include "def_conv_ref.hpp"
#include "common_layers_params.hpp"
using namespace InferenceEngine;
void Convolution_parseParams(InferenceEngine::CNNLayer* layer);
void DeformableConvolution_parseParams(InferenceEngine::CNNLayer* layer) {
auto deformable_conv_layer = dynamic_cast<InferenceEngine::DeformableConvolutionLayer*>(layer);
if (!deformable_conv_layer) {
IE_THROW() << "Layer is not instance of DeformableConvolutionLayer class";
}
deformable_conv_layer->_deformable_group = deformable_conv_layer->GetParamAsUInt("deformable_group", 1u);
Convolution_parseParams(layer);
}
template<>
void ref_def_conv_common(const std::vector<InferenceEngine::Blob::Ptr> srcs,
Blob& dst,
const float* weights_data,
size_t weights_size,
const float* bias_data,
size_t bias_size,
const CommonTestUtils::def_conv_common_params& prm) {
if (srcs[0]->getTensorDesc().getLayout() != Layout::NCHW &&
srcs[0]->getTensorDesc().getLayout() != Layout::NCDHW)
IE_THROW() << "Reference FP32 deformable convolution supports NCHW and NCDHW layouts only";
size_t KW = prm.kernel[X_AXIS];
size_t KH = prm.kernel[Y_AXIS];
size_t KD = prm.kernel.size() > Z_AXIS ? prm.kernel[Z_AXIS] : 1lu;
size_t SW = prm.stride[X_AXIS];
size_t SH = prm.stride[Y_AXIS];
size_t SD = prm.stride.size() > Z_AXIS ? prm.stride[Z_AXIS] : 0lu;
size_t DW = prm.dilation[X_AXIS];
size_t DH = prm.dilation[Y_AXIS];
size_t DD = prm.dilation.size() > Z_AXIS ? prm.dilation[Z_AXIS] : 0lu;
size_t PW = prm.pads_begin[X_AXIS];
size_t PH = prm.pads_begin[Y_AXIS];
size_t PD = prm.pads_begin.size() > Z_AXIS ? prm.pads_begin[Z_AXIS] : 0lu;
size_t GC = prm.group;
auto src_dims = srcs[0]->getTensorDesc().getDims();
size_t MB = src_dims[0];
size_t IC = src_dims[1];
size_t ID = (src_dims.size() == 5lu) ? src_dims[2] : 1lu;
size_t IH = src_dims.at(src_dims.size() - 2);
size_t IW = src_dims.back();
auto dst_dims = dst.getTensorDesc().getDims();
size_t OW = dst_dims.back();
size_t OH = dst_dims.at(dst_dims.size() - 2);
size_t OD = (dst_dims.size() == 5lu) ? dst_dims[2] : 1lu;
size_t OC = prm.out_c;
size_t DG = prm.deformable_group;
const auto* src_data = srcs[0]->cbuffer().as<const float*>();
const auto* trans_data = srcs[1]->cbuffer().as<const float*>();
auto* dst_data = dst.buffer().as<float*>();
IE_ASSERT(KW * KH * KD * OC * IC / GC == weights_size);
IE_ASSERT(OC == bias_size);
const int channel_per_deformable_group = IC / DG;
parallel_for5d(MB, GC, OC / GC, OD, OH, [&](size_t mb, size_t g, size_t oc, size_t od, size_t oh) {
for (size_t ow = 0; ow < OW; ow++) {
size_t oidx = mb * OC * OD * OH * OW
+ g * OC / GC * OD * OH * OW
+ oc * OD * OH * OW
+ od * OH * OW
+ oh * OW
+ ow;
if (bias_data)
dst_data[oidx] = bias_data[g * OC / GC + oc];
for (size_t ic = 0; ic < IC / GC; ic++) {
const int deformable_group_idx = ic / channel_per_deformable_group;
const int trans_offset = mb * DG * 2 * KH * KW * OH * OW
+ deformable_group_idx * 2 * KH * KW * OH * OW;
for (size_t kd = 0; kd < KD; kd++) {
for (size_t kh = 0; kh < KH; kh++) {
for (size_t kw = 0; kw < KW; kw++) {
int32_t iw = ow * SW - PW + kw * DW;
int32_t ih = oh * SH - PH + kh * DH;
int32_t id = od * SD - PD + kd * DD;
const int trans_y_idx = ((2 * (kh * KW + kw)) * OH + oh) * OW + ow;
float transformed_y = ih + trans_data[trans_offset + trans_y_idx];
const int trans_x_idx = ((2 * (kh * KW + kw) + 1) * OH + oh) * OW + ow;
float transformed_x = iw + trans_data[trans_offset + trans_x_idx];
if (transformed_x < 0 || transformed_x >= (int32_t) IW ||
transformed_y < 0 || transformed_y >= (int32_t) IH ||
id < 0 || id >= (int32_t) ID)
continue;
auto get_data_index = [&](int h, int w) -> int {
return mb * IC * ID * IH * IW
+ g * IC / GC * ID * IH * IW
+ ic * ID * IH * IW
+ id * IH * IW
+ h * IW
+ w;
};
size_t widx = g * OC / GC * IC / GC * KD * KH * KW
+ oc * IC / GC * KD * KH * KW
+ ic * KD * KH * KW
+ kd * KH * KW
+ kh * KW
+ kw;
const int top_y_index = floor(transformed_y);
const int bottom_y_index = fmin(ceil(transformed_y), IH - 1);
const int left_x_index = floor(transformed_x);
const int right_x_index = fmin(ceil(transformed_x), IW - 1);
const float top_left = src_data[get_data_index(top_y_index, left_x_index)];
const float top_right = src_data[get_data_index(top_y_index,
right_x_index)];
const float bottom_left = src_data[get_data_index(bottom_y_index,
left_x_index)];
const float bottom_right = src_data[get_data_index(bottom_y_index,
right_x_index)];
const float top =
top_left + (top_right - top_left) * (transformed_x - left_x_index);
const float bottom = bottom_left + (bottom_right - bottom_left) *
(transformed_x - left_x_index);
float val = top + (bottom - top) * (transformed_y - top_y_index);
dst_data[oidx] += val * weights_data[widx];
}
}
}
}
}
});
}
template<>
void ref_def_conv_common(const std::vector<InferenceEngine::Blob::Ptr> srcs,
Blob& dst,
const ie_fp16* weights_data,
size_t /*weights_size*/,
const ie_fp16* bias_data,
size_t /*bias_size*/,
const CommonTestUtils::def_conv_common_params& prm) {
if (srcs[0]->getTensorDesc().getLayout() != Layout::NCHW &&
srcs[0]->getTensorDesc().getLayout() != Layout::NCDHW)
IE_THROW() << "Reference FP16 deformable convolution supports NCHW and NCDHW layouts only";
size_t KW = prm.kernel[X_AXIS];
size_t KH = prm.kernel[Y_AXIS];
size_t KD = prm.kernel.size() > Z_AXIS ? prm.kernel[Z_AXIS] : 1lu;
size_t SW = prm.stride[X_AXIS];
size_t SH = prm.stride[Y_AXIS];
size_t SD = prm.stride.size() > Z_AXIS ? prm.stride[Z_AXIS] : 0lu;
size_t DW = prm.dilation[X_AXIS];
size_t DH = prm.dilation[Y_AXIS];
size_t DD = prm.dilation.size() > Z_AXIS ? prm.dilation[Z_AXIS] : 0lu;
size_t PW = prm.pads_begin[X_AXIS];
size_t PH = prm.pads_begin[Y_AXIS];
size_t PD = prm.pads_begin.size() > Z_AXIS ? prm.pads_begin[Z_AXIS] : 0lu;
size_t GC = prm.group;
auto src_dims = srcs[0]->getTensorDesc().getDims();
size_t IW = src_dims[0];
size_t IH = src_dims[1];
size_t ID = src_dims.size() == 5lu ? src_dims[2] : 1lu;
size_t IC = src_dims.size() == 5lu ? src_dims[3] : src_dims[2];
auto dst_dims = dst.getTensorDesc().getDims();
size_t OW = dst_dims[0];
size_t OH = dst_dims[1];
size_t OD = dst_dims.size() == 5lu ? dst_dims[2] : 1lu;
size_t OC = prm.out_c;
const auto* src_data = srcs[0]->cbuffer().as<const ie_fp16 *>();
const auto* trans_data = srcs[1]->cbuffer().as<const ie_fp16 *>();
auto* dst_data = dst.buffer().as<ie_fp16 *>();
const int channel_per_deformable_group = IC / prm.deformable_group;
parallel_for4d(GC, OC / GC, OD, OH, [&](size_t g, size_t oc, size_t od, size_t oh) {
for (uint32_t ow = 0; ow < OW; ow++) {
size_t oidx = g * OC / GC * OD * OH * OW
+ oc * OD * OH * OW
+ od * OH * OW
+ oh * OW
+ ow;
if (bias_data)
dst_data[oidx] = bias_data[g * OC / GC + oc];
for (size_t ic = 0; ic < IC / GC; ic++) {
const int deformable_group_idx = ic / channel_per_deformable_group;
const int trans_offset = deformable_group_idx * 2 * KH * KW * OW * OW;
for (size_t kd = 0; kd < KD; kd++) {
for (size_t kh = 0; kh < KH; kh++) {
for (size_t kw = 0; kw < KW; kw++) {
int32_t iw = ow * SW - PW + kw * DW;
int32_t ih = oh * SH - PH + kh * DH;
int32_t id = od * SD - PD + kd * DD;
const int trans_y_idx = ((2 * (kh * KW + kw)) * OW + oh) * OW + ow;
float transformed_y = ih + PrecisionUtils::f16tof32(trans_data[trans_offset + trans_y_idx]);
const int trans_x_idx = ((2 * (kh * KW + kw) + 1) * OW + oh) * OW + ow;
float transformed_x = iw + PrecisionUtils::f16tof32(trans_data[trans_offset + trans_x_idx]);
if (transformed_x < 0 || transformed_x >= (int32_t) IW ||
transformed_y < 0 || transformed_y >= (int32_t) IH ||
id < 0 || id >= (int32_t) ID)
continue;
auto get_data_index = [&](int h, int w) -> int {
return g * IC / GC * ID * IH * IW
+ ic * ID * IH * IW
+ id * IH * IW
+ h * IW
+ w;
};
size_t widx = g * OC / GC * IC / GC * KD * KH * KW
+ oc * IC / GC * KD * KH * KW
+ ic * KD * KH * KW
+ kd * KH * KW
+ kh * KW
+ kw;
const int top_y_index = floor(transformed_y);
const int bottom_y_index = fmin(ceil(transformed_y), IH - 1);
const int left_x_index = floor(transformed_x);
const int right_x_index = fmin(ceil(transformed_x), IW - 1);
const float top_left = PrecisionUtils::f16tof32(src_data[get_data_index(top_y_index, left_x_index)]);
const float top_right = PrecisionUtils::f16tof32(src_data[get_data_index(top_y_index, right_x_index)]);
const float bottom_left = PrecisionUtils::f16tof32(src_data[get_data_index(bottom_y_index, left_x_index)]);
const float bottom_right = PrecisionUtils::f16tof32(src_data[get_data_index(bottom_y_index, right_x_index)]);
const float top = top_left + (top_right - top_left) * (transformed_x - left_x_index);
const float bottom = bottom_left + (bottom_right - bottom_left) * (transformed_x - left_x_index);
float val = top + (bottom - top) * (transformed_y - top_y_index);
dst_data[oidx] += PrecisionUtils::f32tof16(val * PrecisionUtils::f16tof32(weights_data[widx]));
}
}
}
}
}
});
}

View File

@@ -1,57 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <legacy/ie_layers.h>
#include <legacy/ie_layers_property.hpp>
#include <ie_blob.h>
#include <precision_utils.h>
#include <legacy/ie_layers_internal.hpp>
#include "common_layers_params.hpp"
template<typename data_t>
void ref_def_conv_common(const std::vector<InferenceEngine::Blob::Ptr> srcs,
InferenceEngine::Blob& dst,
const data_t* weights_data,
size_t weights_size,
const data_t* bias_data,
size_t bias_size,
const CommonTestUtils::def_conv_common_params& prm);
void DeformableConvolution_parseParams(InferenceEngine::CNNLayer* layer);
template<typename data_t>
void common_ref_def_convolution_wrap(const std::vector<InferenceEngine::Blob::Ptr> srcs,
InferenceEngine::Blob::Ptr& dst,
const data_t* weights_data,
size_t weights_size,
const data_t* bias_data,
size_t bias_size,
const std::map<std::string, std::string>& params_map) {
InferenceEngine::LayerParams lp{};
InferenceEngine::ConvolutionLayer convLayer(lp);
auto data = std::make_shared<InferenceEngine::Data>("insData", srcs[0]->getTensorDesc());
convLayer.params = params_map;
convLayer.insData.push_back(data);
DeformableConvolution_parseParams(&convLayer);
CommonTestUtils::conv_common_params params;
params.kernel = convLayer._kernel;
auto allPad = InferenceEngine::getPaddings(convLayer);
params.pads_begin = allPad.begin;
params.pads_end = allPad.end;
params.stride = convLayer._stride;
params.dilation = convLayer._dilation;
params.out_c = convLayer._out_depth;
params.group = convLayer._group;
ref_def_conv_common<data_t>(srcs,
*dst.get(),
weights_data,
weights_size,
bias_data,
bias_size,
params);
}

View File

@@ -1,267 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <legacy/ie_layers.h>
#include <precision_utils.h>
#include "common_layers_params.hpp"
#include "pool_ref.hpp"
using namespace InferenceEngine;
void Pool_parseParams(InferenceEngine::CNNLayer* layer) {
auto poolLayer = dynamic_cast<InferenceEngine::PoolingLayer*>(layer);
if (!poolLayer) {
IE_THROW() << "Layer is not instance of PoolingLayer class";
}
poolLayer->_kernel.clear();
poolLayer->_stride.clear();
poolLayer->_padding.clear();
poolLayer->_pads_end.clear();
poolLayer->_auto_pad = poolLayer->GetParamAsString("auto_pad", "");
std::vector<unsigned int> kernels = poolLayer->GetParamAsUInts("kernel", {});
if (kernels.empty()) {
int kernel_x = poolLayer->GetParamAsInt("kernel-x", -1);
/** Pooling as custom layer */
if (kernel_x == -1) {
try {
unsigned int kernel_size = poolLayer->GetParamAsUInt("kernel_size");
unsigned int kernel_w = poolLayer->GetParamAsUInt("kernel_w", 0u);
unsigned int kernel_h = poolLayer->GetParamAsUInt("kernel_h", 0u);
poolLayer->_kernel.insert(InferenceEngine::X_AXIS, kernel_w == 0u ? kernel_size : kernel_w);
poolLayer->_kernel.insert(InferenceEngine::Y_AXIS, kernel_h == 0u ? kernel_size : kernel_h);
unsigned int stride = poolLayer->GetParamAsUInt("stride", 1u);
unsigned int stride_w = poolLayer->GetParamAsUInt("stride_w", 0u);
unsigned int stride_h = poolLayer->GetParamAsUInt("stride_h", 0u);
poolLayer->_stride.insert(InferenceEngine::X_AXIS, stride_w == 0u ? stride : stride_w);
poolLayer->_stride.insert(InferenceEngine::Y_AXIS, stride_h == 0u ? stride : stride_h);
unsigned int pad = poolLayer->GetParamAsUInt("pad", 0u);
unsigned int pad_w = poolLayer->GetParamAsUInt("pad_w", 0u);
unsigned int pad_h = poolLayer->GetParamAsUInt("pad_h", 0u);
poolLayer->_padding.insert(InferenceEngine::X_AXIS, pad_w == 0u ? pad : pad_w);
poolLayer->_padding.insert(InferenceEngine::Y_AXIS, pad_h == 0u ? pad : pad_h);
poolLayer->_pads_end.insert(InferenceEngine::X_AXIS, 0u);
poolLayer->_pads_end.insert(InferenceEngine::Y_AXIS, 0u);
} catch (...) {
}
std::string alg = poolLayer->GetParamAsString("pool", "caffe.PoolingParameter.MAX");
poolLayer->_type = alg == "caffe.PoolingParameter.MAX" ? InferenceEngine::PoolingLayer::MAX : InferenceEngine::PoolingLayer::AVG;
} else /** Default behavior */ {
poolLayer->_kernel.insert(InferenceEngine::X_AXIS, poolLayer->GetParamAsUInt("kernel-x"));
poolLayer->_kernel.insert(InferenceEngine::Y_AXIS, poolLayer->GetParamAsUInt("kernel-y"));
poolLayer->_stride.insert(InferenceEngine::X_AXIS, poolLayer->GetParamAsUInt("stride-x", 1u));
poolLayer->_stride.insert(InferenceEngine::Y_AXIS, poolLayer->GetParamAsUInt("stride-y", 1u));
// TODO: maybe just throw exception, why do we change IR?
if (0 == poolLayer->_stride[InferenceEngine::X_AXIS]) {
poolLayer->_stride[InferenceEngine::X_AXIS] = 1u;
printf("Warning! in layer %s: Stride x is 0, setting to 1 ", poolLayer->name.c_str());
}
if (0 == poolLayer->_stride[InferenceEngine::Y_AXIS]) {
poolLayer->_stride[InferenceEngine::Y_AXIS] = 1u;
printf("Warning! in layer %s: Stride y is 0, setting to 1", poolLayer->name.c_str());
}
poolLayer->_padding.insert(InferenceEngine::X_AXIS, poolLayer->GetParamAsUInt("pad-x", 0u));
poolLayer->_padding.insert(InferenceEngine::Y_AXIS, poolLayer->GetParamAsUInt("pad-y", 0u));
poolLayer->_pads_end.insert(InferenceEngine::X_AXIS, poolLayer->GetParamAsUInt("pad-r", poolLayer->_padding[InferenceEngine::X_AXIS]));
poolLayer->_pads_end.insert(InferenceEngine::Y_AXIS, poolLayer->GetParamAsUInt("pad-b", poolLayer->_padding[InferenceEngine::Y_AXIS]));
// TODO: All kind of pool methods
poolLayer->_exclude_pad = poolLayer->GetParamAsBool("exclude-pad", false);
std::string alg = poolLayer->GetParamAsString("pool-method", "max");
poolLayer->_type = alg == "avg" ? InferenceEngine::PoolingLayer::AVG : InferenceEngine::PoolingLayer::MAX;
if (alg != "max" && alg != "avg") {
IE_THROW() << "Layer has incorrect pool-type!";
}
}
} else {
for (size_t i = 1; i <= kernels.size(); i++) {
poolLayer->_kernel.insert(i - 1, kernels[kernels.size() - i]);
}
std::vector<unsigned int> default_0 = std::vector<unsigned int> (poolLayer->_kernel.size(), 0u);
std::vector<unsigned int> default_1 = std::vector<unsigned int> (poolLayer->_kernel.size(), 1u);
std::vector<unsigned int> strides = poolLayer->GetParamAsUInts("strides", default_1);
for (size_t i = 1; i <= strides.size(); i++) {
if (strides[strides.size() - i] == 0) {
IE_THROW() << "Stride could not be 0.\nIn layer " << poolLayer->name;
}
poolLayer->_stride.insert(i - 1, strides[strides.size() - i]);
}
std::vector<unsigned int> pads_begin = poolLayer->GetParamAsUInts("pads_begin", default_0);
for (size_t i = 1; i <= pads_begin.size(); i++) {
poolLayer->_padding.insert(i - 1, pads_begin[pads_begin.size() - i]);
}
std::vector<unsigned int> pads_end = poolLayer->GetParamAsUInts("pads_end", pads_begin);
for (size_t i = 1; i <= pads_end.size(); i++) {
poolLayer->_pads_end.insert(i - 1, pads_end[pads_end.size() - i]);
}
poolLayer->_exclude_pad = poolLayer->GetParamAsBool("exclude-pad", false);
std::string alg = poolLayer->GetParamAsString("pool-method", "max");
poolLayer->_type = alg == "avg" ? InferenceEngine::PoolingLayer::AVG : InferenceEngine::PoolingLayer::MAX;
if (alg != "max" && alg != "avg") {
IE_THROW() << "Layer has incorrect pad-type!";
}
}
// TODO: checks for presence of all required attributes, and that there's no extraneous parameters only.
}
template<>
void ref_pool_common<float>(const std::vector<InferenceEngine::Blob::Ptr> srcs, Blob &dst,
const CommonTestUtils::pool_common_params &p) {
if (srcs[0]->getTensorDesc().getLayout() != Layout::NCHW)
IE_THROW() << "Reference FP32 convolution supports NCHW layout only";
size_t KW = p.kernel[X_AXIS];
size_t KH = p.kernel[Y_AXIS];
size_t SH = p.stride[Y_AXIS];
size_t SW = p.stride[X_AXIS];
int PH = p.pads_begin[Y_AXIS];
int PW = p.pads_begin[X_AXIS];
int32_t IW, IH, IC, OW, OH, OC;
CommonTestUtils::get_common_dims(*srcs[0], IW, IH, IC);
CommonTestUtils::get_common_dims(dst, OW, OH, OC);
const auto *src_data = srcs[0]->cbuffer().as<const float *>();
auto *dst_data = dst.buffer().as<float *>();
IE_ASSERT(Layout::NCHW == dst.getTensorDesc().getLayout());
IE_ASSERT(4 == dst.getTensorDesc().getDims().size());
IE_ASSERT(OC == dst.getTensorDesc().getDims()[1]);
for (size_t c = 0; c < OC; c++) {
for (size_t oh = 0; oh < OH; oh++) {
for (size_t ow = 0; ow < OW; ow++) {
size_t oidx = c * OH * OW + oh * OW + ow;
float out_ref = p.avg ? float(0) : -FLT_MAX;
for (uint32_t kh = 0; kh < KH; kh++) {
for (uint32_t kw = 0; kw < KW; kw++) {
int32_t iw = ow * SW - PW + kw;
int32_t ih = oh * SH - PH + kh;
if (iw < 0 || iw >= IW || ih < 0
|| ih >= IH)
continue;
uint32_t iidx = c * IH * IW + ih * IW + iw;
float d = src_data[iidx];
out_ref = p.avg ? out_ref + d : std::max(out_ref, d);
}
}
if (p.avg) {
int w_beg = ow * SW - PW;
int w_end = w_beg + KW;
int h_beg = oh * SH - PH;
int h_end = h_beg + KH;
w_beg = p.exclude_pad ? std::max<int>(w_beg, 0) : std::max<int>(w_beg, -PW);
h_beg = p.exclude_pad ? std::max<int>(h_beg, 0) : std::max<int>(h_beg, -PH);
w_end = p.exclude_pad ? std::min<int>(w_end, IW) : std::min<int>(w_end, IW + PW);
h_end = p.exclude_pad ? std::min<int>(h_end, IH) : std::min<int>(h_end, IH + PH);
out_ref /= (h_end - h_beg) * (w_end - w_beg);
}
dst_data[oidx] = out_ref;
}
}
}
}
template<>
void ref_pool_common<ie_fp16>(const std::vector<InferenceEngine::Blob::Ptr> srcs,
Blob &dst,
const CommonTestUtils::pool_common_params &p) {
const auto *src_data = srcs[0]->cbuffer().as<const ie_fp16 *>();
auto *dst_data = dst.buffer().as<ie_fp16 *>();
ASSERT_NE(src_data, nullptr);
ASSERT_NE(dst_data, nullptr);
int32_t IW = 0;
int32_t IH = 0;
int32_t IC = 0;
int32_t I_N = 0;
int32_t OW = 0;
int32_t OH = 0;
int32_t OC = 0;
int32_t ON = 0;
// from myriad_tests
auto get_dims = [](const InferenceEngine::Blob &blob,
int32_t &dimx,
int32_t &dimy,
int32_t &dimz,
int32_t &dimn) {
auto dims = blob.getTensorDesc().getDims();
auto dims_size = dims.size();
dimn = (dims_size >= 4) ? dims[dims_size - 4] : 1;
dimz = (dims_size >= 3) ? dims[dims_size - 3] : 1;
dimy = (dims_size >= 2) ? dims[dims_size - 2] : 0;
dimx = (dims_size >= 1) ? dims[dims_size - 1] : 0;
};
get_dims(*srcs[0], IW, IH, IC, I_N);
get_dims(dst, OW, OH, OC, ON);
ASSERT_EQ(IC, OC);
ASSERT_EQ(I_N, ON);
/* to align with Caffe */
for (int32_t n = 0; n < ON; n++) {
for (int32_t c = 0; c < OC; c++) {
for (int32_t oh = 0; oh < OH; oh++) {
for (int32_t ow = 0; ow < OW; ow++) {
size_t oidx = c + OC * (ow + OW * (oh + OH * n));
float out_ref = 0.0f;
bool is_initialized = false;
size_t count = 0;
for (uint32_t kh = 0; kh < p.kernel[Y_AXIS]; kh++) {
for (uint32_t kw = 0; kw < p.kernel[X_AXIS]; kw++) {
int32_t iw = ow * p.stride[X_AXIS] - p.pads_begin[X_AXIS] + kw;
int32_t ih = oh * p.stride[Y_AXIS] - p.pads_begin[Y_AXIS] + kh;
if (iw < 0 || iw >= IW || ih < 0 || ih >= IH)
continue;
size_t iidx = c + IC * (iw + IW * (ih + IH * n));
float d = PrecisionUtils::f16tof32(src_data[iidx]);
if (p.avg) {
out_ref += d;
count++;
} else {
if (!is_initialized) {
out_ref = d;
is_initialized = true;
} else {
if (out_ref < d)
out_ref = d;
}
}
}
}
if (p.avg) {
if ((p.pads_begin[X_AXIS] || p.pads_begin[Y_AXIS]) && !p.exclude_pad) {
out_ref /= (p.kernel[Y_AXIS] * p.kernel[X_AXIS]);
} else
out_ref /= count;
}
dst_data[oidx] = PrecisionUtils::f32tof16(out_ref);
}
}
}
}
}

View File

@@ -1,40 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cfloat>
#include <ie_blob.h>
#include <gtest/gtest.h>
#include <legacy/ie_layers_internal.hpp>
#include "common_layers_params.hpp"
template<typename data_t>
void ref_pool_common(const std::vector<InferenceEngine::Blob::Ptr> srcs,
InferenceEngine::Blob &dst,
const CommonTestUtils::pool_common_params &p);
void Pool_parseParams(InferenceEngine::CNNLayer* layer);
template<typename data_t>
void common_ref_pool_wrap(const std::vector<InferenceEngine::Blob::Ptr> srcs, InferenceEngine::Blob::Ptr &dst,
const std::map<std::string, std::string> &params_map) {
InferenceEngine::LayerParams lp{};
InferenceEngine::PoolingLayer poolLayer(lp);
auto data = std::make_shared<InferenceEngine::Data>("insData", srcs[0]->getTensorDesc());
poolLayer.params = params_map;
poolLayer.insData.push_back(data);
Pool_parseParams(&poolLayer);
CommonTestUtils::pool_common_params params;
params.kernel = poolLayer._kernel;
auto allPad = InferenceEngine::getPaddings(poolLayer);
params.pads_begin = allPad.begin;
params.pads_end = allPad.end;
params.stride = poolLayer._stride;
params.avg = poolLayer._type == InferenceEngine::PoolingLayer::PoolType::AVG;
params.exclude_pad = poolLayer._exclude_pad;
ref_pool_common<data_t>(srcs, *dst.get(), params);
}

View File

@@ -1,500 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <gtest/gtest.h>
#include <tests_common.hpp>
#include <legacy/ie_layers_internal.hpp>
#include <legacy/details/ie_cnn_network_iterator.hpp>
#include <functional_test_utils/plugin_cache.hpp>
#include "single_layer_common.hpp"
#include "conv_ref.hpp"
#include "deconv_ref.hpp"
#include "def_conv_ref.hpp"
#include "pool_ref.hpp"
#include "single_layer_common.hpp"
#include "common_layers_params.hpp"
#include <xml_net_builder.hpp>
using namespace InferenceEngine;
struct PluginDependentParam {
std::string deviceName;
InferenceEngine::Layout layout;
InferenceEngine::Precision precision;
float tolerance;
};
class LayerTestHelper {
protected:
std::string type;
public:
using Ptr = std::shared_ptr<LayerTestHelper>;
explicit LayerTestHelper(const std::string &_type) : type(_type) {}
virtual ~LayerTestHelper() = default;
LayerTestHelper() = default;
virtual void updatePaddingValues(const InferenceEngine::CNNNetwork &network) = 0;
virtual std::map<std::string, std::string> getMapParams() const = 0;
virtual size_t getWeightByteSize(size_t elementSize, size_t numChannels) const = 0;
virtual size_t getBiasByteSize(size_t elementSize) const = 0;
std::string getType() const { return type; }
virtual void ref_fp32(const std::vector<InferenceEngine::Blob::Ptr> srcs,
InferenceEngine::Blob &dst,
const float *weights_data,
size_t weights_size,
const float *bias_data,
size_t bias_size) const = 0;
virtual void ref_fp16(const std::vector<InferenceEngine::Blob::Ptr> srcs,
InferenceEngine::Blob &dst,
const InferenceEngine::ie_fp16 *weights_data,
size_t weights_size,
const InferenceEngine::ie_fp16 *bias_data,
size_t bias_size) const = 0;
InferenceEngine::Blob::Ptr getRefBlob(size_t weightSize, size_t biasSize,
const InferenceEngine::TBlob<uint8_t>::Ptr &weights,
const std::vector<InferenceEngine::Blob::Ptr> srcs,
const InferenceEngine::TensorDesc &dstTensorDesc,
const InferenceEngine::Precision &precision) const;
static std::string propertyToString(const InferenceEngine::PropertyVector<unsigned int> &propertyVector);
};
class ConvolutionTestHelper : public LayerTestHelper {
protected:
CommonTestUtils::conv_common_params convParams;
public:
explicit ConvolutionTestHelper(const CommonTestUtils::conv_common_params &_convParams);
void updatePaddingValues(const InferenceEngine::CNNNetwork &network) override;
std::map<std::string, std::string> getMapParams() const override;
size_t getWeightByteSize(size_t elementSize, size_t numChannels) const override;
size_t getBiasByteSize(size_t elementSize) const override;
void ref_fp32(const std::vector<InferenceEngine::Blob::Ptr> srcs, InferenceEngine::Blob &dst, const float *weights_data,
size_t weights_size, const float *bias_data, size_t bias_size) const override;
void
ref_fp16(const std::vector<InferenceEngine::Blob::Ptr> srcs, InferenceEngine::Blob &dst, const InferenceEngine::ie_fp16 *weights_data,
size_t weights_size, const InferenceEngine::ie_fp16 *bias_data, size_t bias_size) const override;
};
class DeconvolutionTestHelper : public ConvolutionTestHelper {
public:
explicit DeconvolutionTestHelper(const CommonTestUtils::conv_common_params &_convParams);
void ref_fp32(const std::vector<InferenceEngine::Blob::Ptr> srcs, InferenceEngine::Blob &dst, const float *weights_data,
size_t weights_size, const float *bias_data, size_t bias_size) const override;
void
ref_fp16(const std::vector<InferenceEngine::Blob::Ptr> srcs, InferenceEngine::Blob &dst, const InferenceEngine::ie_fp16 *weights_data,
size_t weights_size, const InferenceEngine::ie_fp16 *bias_data, size_t bias_size) const override;
};
class DeformableConvolutionTestHelper : public ConvolutionTestHelper {
protected:
CommonTestUtils::def_conv_common_params defConvParams;
public:
explicit DeformableConvolutionTestHelper(const CommonTestUtils::conv_common_params &_convParams, const int deformable_group);
void updatePaddingValues(const InferenceEngine::CNNNetwork &network) override;
std::map<std::string, std::string> getMapParams() const override;
void ref_fp32(const std::vector<InferenceEngine::Blob::Ptr> srcs, InferenceEngine::Blob &dst, const float *weights_data,
size_t weights_size, const float *bias_data, size_t bias_size) const override;
void
ref_fp16(const std::vector<InferenceEngine::Blob::Ptr> srcs, InferenceEngine::Blob &dst, const InferenceEngine::ie_fp16 *weights_data,
size_t weights_size, const InferenceEngine::ie_fp16 *bias_data, size_t bias_size) const override;
};
class PoolingTestHelper : public LayerTestHelper {
protected:
CommonTestUtils::pool_common_params poolParams;
public:
explicit PoolingTestHelper(const CommonTestUtils::pool_common_params &_poolParams);
void ref_fp32(const std::vector<InferenceEngine::Blob::Ptr> srcs, InferenceEngine::Blob &dst, const float *weights_data,
size_t weights_size, const float *bias_data, size_t bias_size) const override;
void
ref_fp16(const std::vector<InferenceEngine::Blob::Ptr> srcs, InferenceEngine::Blob &dst, const InferenceEngine::ie_fp16 *weights_data,
size_t weights_size, const InferenceEngine::ie_fp16 *bias_data, size_t bias_size) const override;
std::map<std::string, std::string> getMapParams() const override;
void updatePaddingValues(const InferenceEngine::CNNNetwork &network) override;
size_t getWeightByteSize(size_t elementSize, size_t numChannels) const override;
size_t getBiasByteSize(size_t elementSize) const override;
};
PRETTY_PARAM(InitialShapes, CommonTestUtils::InOutShapes)
PRETTY_PARAM(NewShapes, CommonTestUtils::InOutShapes)
PRETTY_PARAM(ConvParams, CommonTestUtils::conv_common_params)
PRETTY_PARAM(PluginParams, PluginDependentParam)
PRETTY_PARAM(Helper, LayerTestHelper::Ptr)
Blob::Ptr LayerTestHelper::getRefBlob(size_t weightSize, size_t biasSize,
const TBlob<uint8_t>::Ptr &weights,
const std::vector<InferenceEngine::Blob::Ptr> srcs,
const TensorDesc &dstTensorDesc,
const Precision &precision) const {
Blob::Ptr dst_ref;
if (precision == Precision::FP32) {
dst_ref = make_shared_blob<float>(dstTensorDesc);
dst_ref->allocate();
const auto *weights_data = weights->buffer().as<const float *>();
ref_fp32(srcs, *dst_ref.get(), weights_data, weightSize, weights_data + weightSize, biasSize);
} else {
dst_ref = make_shared_blob<ie_fp16>(dstTensorDesc);
dst_ref->allocate();
const auto *weights_data = weights->buffer().as<const ie_fp16 *>();
ref_fp16(srcs, *dst_ref.get(), weights_data, weightSize, weights_data + weightSize, biasSize);
}
return dst_ref;
}
std::string LayerTestHelper::propertyToString(const PropertyVector<unsigned int> &propertyVector) {
if (!propertyVector.size()) return "";
std::string result = std::to_string(propertyVector[0]);
for (int i = 1; i < propertyVector.size(); i++) {
result += "," + std::to_string(propertyVector[i]);
}
return result;
}
ConvolutionTestHelper::ConvolutionTestHelper(const CommonTestUtils::conv_common_params &_convParams) : LayerTestHelper("Convolution"), convParams(_convParams) {}
void ConvolutionTestHelper::updatePaddingValues(const CNNNetwork &network) {
details::CNNNetworkIterator i(network), end;
auto found = std::find_if(i, end, [this](const CNNLayer::Ptr &layer) {
return layer->type == type;
});
ASSERT_NE(found, end);
auto castedLayer = std::dynamic_pointer_cast<ConvolutionLayer>(*found);
auto allPad = getPaddings(*castedLayer.get());
convParams.pads_end = allPad.end;
convParams.pads_begin = allPad.begin;
}
std::map<std::string, std::string> ConvolutionTestHelper::getMapParams() const {
std::map<std::string, std::string> params;
if (!convParams.auto_pad.empty()) {
params["auto_pad"] = convParams.auto_pad;
}
params["group"] = std::to_string(convParams.group);
params["output"] = std::to_string(convParams.out_c);
auto propertyToString = [](const PropertyVector<unsigned int> &propertyVector) -> std::string {
if (!propertyVector.size()) return "";
std::string result = std::to_string(propertyVector[0]);
for (int i = 1; i < propertyVector.size(); i++) {
result += "," + std::to_string(propertyVector[i]);
}
return result;
};
params["kernel"] = propertyToString(convParams.kernel);
params["strides"] = propertyToString(convParams.stride);
params["pads_begin"] = propertyToString(convParams.pads_begin);
params["pads_end"] = propertyToString(convParams.pads_end);
params["dilations"] = propertyToString(convParams.dilation);
return params;
}
size_t ConvolutionTestHelper::getWeightByteSize(size_t elementSize, size_t numChannels) const {
return (convParams.kernel[X_AXIS] * convParams.kernel[Y_AXIS] * convParams.out_c * numChannels * elementSize)
/ convParams.group;
}
size_t ConvolutionTestHelper::getBiasByteSize(size_t elementSize) const { return convParams.out_c * elementSize; }
void
ConvolutionTestHelper::ref_fp32(const std::vector<InferenceEngine::Blob::Ptr> srcs, Blob &dst, const float *weights_data,
size_t weights_size, const float *bias_data, size_t bias_size) const {
ref_conv_common<>(srcs, dst, weights_data, weights_size, bias_data, bias_size, convParams);
}
void ConvolutionTestHelper::ref_fp16(const std::vector<InferenceEngine::Blob::Ptr> srcs, Blob &dst,
const ie_fp16 *weights_data, size_t weights_size,
const ie_fp16 *bias_data, size_t bias_size) const {
ref_conv_common<>(srcs, dst, weights_data, weights_size, bias_data, bias_size, convParams);
}
DeconvolutionTestHelper::DeconvolutionTestHelper(const CommonTestUtils::conv_common_params &_convParams) : ConvolutionTestHelper(
_convParams) {
type = "Deconvolution";
}
void
DeconvolutionTestHelper::ref_fp32(const std::vector<InferenceEngine::Blob::Ptr> srcs, Blob &dst,
const float *weights_data,
size_t weights_size, const float *bias_data, size_t bias_size) const {
ref_deconv_common<float>(srcs, dst, weights_data, weights_size, bias_data, bias_size, convParams);
}
void DeconvolutionTestHelper::ref_fp16(const std::vector<InferenceEngine::Blob::Ptr> srcs, Blob &dst,
const ie_fp16 *weights_data, size_t weights_size,
const ie_fp16 *bias_data, size_t bias_size) const {
ref_deconv_common<ie_fp16>(srcs, dst, weights_data, weights_size, bias_data, bias_size, convParams);
}
DeformableConvolutionTestHelper::DeformableConvolutionTestHelper(const CommonTestUtils::conv_common_params &_convParams,
const int deformable_group) :
defConvParams(convParams), ConvolutionTestHelper( _convParams) {
defConvParams.deformable_group = deformable_group;
type = "DeformableConvolution";
}
void DeformableConvolutionTestHelper::ref_fp32(const std::vector<InferenceEngine::Blob::Ptr> srcs, Blob &dst,
const float *weights_data,
size_t weights_size, const float *bias_data, size_t bias_size) const {
ref_def_conv_common<float>(srcs, dst, weights_data, weights_size, bias_data, bias_size, defConvParams);
}
void DeformableConvolutionTestHelper::ref_fp16(const std::vector<InferenceEngine::Blob::Ptr> srcs, Blob &dst,
const ie_fp16 *weights_data, size_t weights_size,
const ie_fp16 *bias_data, size_t bias_size) const {
ref_def_conv_common<ie_fp16>(srcs, dst, weights_data, weights_size, bias_data, bias_size, defConvParams);
}
void DeformableConvolutionTestHelper::updatePaddingValues(const CNNNetwork &network) {
details::CNNNetworkIterator i(network), end;
auto found = std::find_if(i, end, [this](const CNNLayer::Ptr &layer) {
return layer->type == type;
});
ASSERT_NE(found, end);
auto castedLayer = std::dynamic_pointer_cast<ConvolutionLayer>(*found);
auto allPad = getPaddings(*castedLayer.get());
defConvParams.pads_end = allPad.end;
defConvParams.pads_begin = allPad.begin;
}
std::map<std::string, std::string> DeformableConvolutionTestHelper::getMapParams() const {
std::map<std::string, std::string> params;
if (!defConvParams.auto_pad.empty()) {
params["auto_pad"] = defConvParams.auto_pad;
}
params["group"] = std::to_string(defConvParams.group);
params["output"] = std::to_string(defConvParams.out_c);
params["deformable_group"] = std::to_string(defConvParams.deformable_group);
auto propertyToString = [](const PropertyVector<unsigned int> &propertyVector) -> std::string {
if (!propertyVector.size()) return "";
std::string result = std::to_string(propertyVector[0]);
for (int i = 1; i < propertyVector.size(); i++) {
result += "," + std::to_string(propertyVector[i]);
}
return result;
};
params["kernel"] = propertyToString(defConvParams.kernel);
params["strides"] = propertyToString(defConvParams.stride);
params["pads_begin"] = propertyToString(defConvParams.pads_begin);
params["pads_end"] = propertyToString(defConvParams.pads_end);
params["dilations"] = propertyToString(defConvParams.dilation);
return params;
}
PoolingTestHelper::PoolingTestHelper(const CommonTestUtils::pool_common_params &_poolParams) : LayerTestHelper("Pooling"),
poolParams(_poolParams) {
}
std::map<std::string, std::string> PoolingTestHelper::getMapParams() const {
std::map<std::string, std::string> params;
if (!poolParams.auto_pad.empty()) {
params["auto_pad"] = poolParams.auto_pad;
}
params["kernel"] = propertyToString(poolParams.kernel);
params["strides"] = propertyToString(poolParams.stride);
auto padStr = propertyToString(poolParams.pads_begin);
if (!padStr.empty()) params["pads_begin"] = padStr;
padStr = propertyToString(poolParams.pads_end);
if (!padStr.empty()) params["pads_end"] = padStr;
params["exclude-pad"] = poolParams.exclude_pad ? "true" : "false";
params["pool-method"] = poolParams.avg ? "avg" : "max";
return params;
}
void
PoolingTestHelper::ref_fp32(const std::vector<InferenceEngine::Blob::Ptr> srcs, Blob &dst,
const float *weights_data, size_t weights_size,
const float *bias_data, size_t bias_size) const {
ref_pool_common<float>(srcs, dst, poolParams);
}
void PoolingTestHelper::ref_fp16(const std::vector<InferenceEngine::Blob::Ptr> srcs, Blob &dst,
const ie_fp16 *weights_data, size_t weights_size,
const ie_fp16 *bias_data, size_t bias_size) const {
ref_pool_common<ie_fp16>(srcs, dst, poolParams);
}
void PoolingTestHelper::updatePaddingValues(const InferenceEngine::CNNNetwork &network) {
details::CNNNetworkIterator i(network), end;
auto found = std::find_if(i, end, [this](const CNNLayer::Ptr &layer) {
return layer->type == type;
});
ASSERT_NE(found, end);
auto castedLayer = std::dynamic_pointer_cast<PoolingLayer>(*found);
auto allPad = getPaddings(*castedLayer.get());
poolParams.pads_end = allPad.end;
poolParams.pads_begin = allPad.begin;
}
size_t PoolingTestHelper::getWeightByteSize(size_t elementSize, size_t numChannels) const {
return 0;
}
size_t PoolingTestHelper::getBiasByteSize(size_t elementSize) const {
return 0;
}
class CommonSingleLayerTest
: public testing::WithParamInterface<std::tuple<InitialShapes, NewShapes, PluginParams, Helper>>,
public ::testing::Test {
protected:
void SetUp() override {
auto params = GetParam();
initialShapes = std::get<0>(params);
newShapes = std::get<1>(params);
pluginParams = std::get<2>(params);
layerHelper = std::get<3>(params);
PluginCache::get().reset();
}
ICNNNetwork::InputShapes
setInputShapes(CNNNetwork &network, const std::vector<SizeVector> &dims) {
auto inputShapes = network.getInputShapes();
int i = 0;
IE_ASSERT(inputShapes.size() == dims.size());
for (auto &pair : inputShapes) {
pair.second = dims[i++];
}
return inputShapes;
}
TBlob<uint8_t>::Ptr createWeights(size_t elementSize, size_t weightByteSize, size_t biasByteSize) const {
TBlob<uint8_t>::Ptr weights = make_shared_blob<uint8_t>({Precision::U8, {weightByteSize + biasByteSize}, Layout::C});
weights->allocate();
BufferWrapper wrappedWeights(weights, this->pluginParams.precision);
fill_data_common(wrappedWeights, weights->size() / elementSize);
return weights;
}
template<int Version = 3>
static InferenceEngine::CNNNetwork
buildSingleLayerNetwork(const std::string &layerType,
const CommonTestUtils::InOutShapes &inOutShapes,
std::map<std::string, std::string> *params,
const std::string &layerDataName = "data",
const Precision &precision = Precision::FP32,
size_t weightsSize = 0,
size_t biasesSize = 0,
const TBlob<uint8_t>::Ptr &weights = nullptr) {
return buildSingleLayerNetworkCommon<Version>(layerType, inOutShapes, params, layerDataName, precision,
weightsSize, biasesSize, weights);
}
protected:
CommonTestUtils::InOutShapes initialShapes;
CommonTestUtils::InOutShapes newShapes;
PluginDependentParam pluginParams;
LayerTestHelper::Ptr layerHelper;
InputInfo::Ptr inputData;
std::string inputName;
InputInfo::Ptr transData;
std::string transName;
DataPtr outputData;
std::string outputName;
};
TEST_P(CommonSingleLayerTest, inferAfterReshape) {
Core ie;
auto params = layerHelper->getMapParams();
size_t elementSize = Precision(pluginParams.precision).size();
ASSERT_EQ(initialShapes.inDims[0][1], newShapes.inDims[0][1]);
size_t numChannels = initialShapes.inDims[0][1];
size_t weightByteSize = layerHelper->getWeightByteSize(elementSize, numChannels);
size_t biasByteSize = layerHelper->getBiasByteSize(elementSize);
auto weights = createWeights(elementSize, weightByteSize, biasByteSize);
auto network = buildSingleLayerNetwork<3>(layerHelper->getType(), initialShapes, &params, "data",
pluginParams.precision, weightByteSize, biasByteSize, weights);
std::tie(inputName, inputData) = (*network.getInputsInfo().begin());
inputData->setPrecision(pluginParams.precision);
inputData->setLayout(pluginParams.layout);
std::tie(outputName, outputData) = (*network.getOutputsInfo().begin());
outputData->setPrecision(pluginParams.precision);
outputData->setLayout(pluginParams.layout);
if (layerHelper->getType() == "DeformableConvolution") {
std::tie(transName, transData) = (*network.getInputsInfo().find("Input1"));
transData->setPrecision(pluginParams.precision);
transData->setLayout(pluginParams.layout);
}
auto inputShapes = setInputShapes(network, newShapes.inDims);
network.reshape(inputShapes);
layerHelper->updatePaddingValues(network);
auto exeNetwork = ie.LoadNetwork(network, pluginParams.deviceName);
auto request = exeNetwork.CreateInferRequest();
auto src = request.GetBlob(inputName);
GenRandomDataCommon(src);
size_t weights_size = weightByteSize / elementSize;
size_t biases_size = biasByteSize / elementSize;
if (layerHelper->getType() == "DeformableConvolution") {
auto trans = request.GetBlob(transName);
GenRandomDataCommon(trans);
request.Infer();
auto dst = request.GetBlob(outputName);
Blob::Ptr dst_ref = layerHelper->getRefBlob(weights_size, biases_size, weights, { src, trans },
dst->getTensorDesc(), pluginParams.precision);
CompareCommonAbsolute(dst, dst_ref, pluginParams.tolerance);
BufferWrapper src_ptr(src);
BufferWrapper trans_ptr(trans);
BufferWrapper dst_ptr(dst_ref);
} else {
request.Infer();
auto dst = request.GetBlob(outputName);
Blob::Ptr dst_ref = layerHelper->getRefBlob(weights_size, biases_size, weights, { src },
dst->getTensorDesc(), pluginParams.precision);
CompareCommonAbsolute(dst, dst_ref, pluginParams.tolerance);
}
}

View File

@@ -1,331 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include <string>
#include <gtest/gtest.h>
#include <cpp/ie_infer_request.hpp>
#include <blob_factory.hpp>
#include <ie_algorithm.hpp>
#include <precision_utils.h>
#include "plg_test.hpp"
#include "single_layer_common.hpp"
using namespace ::testing;
using namespace InferenceEngine;
struct ti_test_params {
std::string device_name;
std::size_t tensorSize;
InferenceEngine::Precision precision;
};
static void setValuesInBlob(Blob::Ptr blob, float value) {
auto dims = blob->getTensorDesc().getDims();
auto output_size = details::product(std::begin(dims), std::end(dims));
std::vector<float> values_vector(output_size, value);
if (!blob->is<MemoryBlob>())
IE_THROW() << "Only MemoryBlob is expected here";
auto m_blob = blob->as<MemoryBlob>();
if (m_blob->wmap().as<void*>() == nullptr)
blob->allocate();
CopyVectorToBlob(blob, values_vector);
}
/*
______________main_ti__________________
in1 --|~~ iter -> add -> plus_one -> next1 |
in2 --|~~ prev1 -> add -> out_iter ~~~~~~~~~~~|-- out1
---------------------------------------
*/
class TITestBase: public PlgTest<ti_test_params> {
std::string model_t = R"V0G0N(
<net batch="1" name="frozen" version="5">
<layers>
<layer id="0" name="in1" precision="_PRC_" type="Input">
<output>
<port id="0">
<dim>_IN_</dim>
<dim>_INPUT_SIZE_</dim>
</port>
</output>
</layer>
<layer id="1" name="in2" precision="_PRC_" type="Input">
<output>
<port id="0">
<dim>_IN_</dim>
<dim>_CHUNK_SIZE_</dim>
</port>
</output>
</layer>
<layer id="2" name="main_ti" type="TensorIterator" precision="_PRC_">
<input>
<port id="0">
<dim>_IN_</dim>
<dim>_INPUT_SIZE_</dim>
</port>
<port id="1">
<dim>_IN_</dim>
<dim>_CHUNK_SIZE_</dim>
</port>
</input>
<output>
<port id="2">
<dim>_IN_</dim>
<dim>_INPUT_SIZE_</dim>
</port>
</output>
<port_map>
<input external_port_id="0" internal_layer_id="0" internal_port_id="0" axis="1" stride="_CHUNK_SIZE_"/>
<input external_port_id="1" internal_layer_id="0" internal_port_id="1"/>
<output external_port_id="2" internal_layer_id="0" internal_port_id="2" axis="1" stride="_CHUNK_SIZE_"/>
</port_map>
<back_edges>
<edge from-layer="1" from-port="1" to-layer="0" to-port="1"/>
</back_edges>
<body>
<layers>
<layer id="0" name="add" precision="_PRC_" type="Eltwise">
<data operation="sum"/>
<input>
<port id="0">
<dim>_IN_</dim>
<dim>_CHUNK_SIZE_</dim>
</port>
<port id="1">
<dim>_IN_</dim>
<dim>_CHUNK_SIZE_</dim>
</port>
</input>
<output>
<port id="2">
<dim>_IN_</dim>
<dim>_CHUNK_SIZE_</dim>
</port>
</output>
</layer>
<layer id="1" name="plus_one" precision="_PRC_" type="Power">
<data scale="1" shift="1" power="1"/>
<input>
<port id="0">
<dim>_IN_</dim>
<dim>_CHUNK_SIZE_</dim>
</port>
</input>
<output>
<port id="1">
<dim>_IN_</dim>
<dim>_CHUNK_SIZE_</dim>
</port>
</output>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="2" to-layer="1" to-port="0"/>
</edges>
</body>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="2" to-port="0"/>
<edge from-layer="1" from-port="0" to-layer="2" to-port="1"/>
</edges>
</net>
)V0G0N";
std::string getModel(const ti_test_params & p) {
std::string model = model_t;
std::size_t iteration_count = 3;
REPLACE_WITH_NUM(model, "_IN_", 1);
REPLACE_WITH_NUM(model, "_IC_", 3);
REPLACE_WITH_NUM(model, "_INPUT_SIZE_", iteration_count * p.tensorSize);
REPLACE_WITH_NUM(model, "_CHUNK_SIZE_", p.tensorSize);
REPLACE_WITH_STR(model, "_PRC_", p.precision.name());
return model;
}
protected:
void RunTITest(const std::map<std::string, std::string> & config = {}) {
try {
ti_test_params p = param();
std::string model = getModel(p);
Core ie;
auto net = ie.ReadNetwork(model, Blob::CPtr());
auto exec = ie.LoadNetwork(net, device_name, config);
auto req = exec.CreateInferRequest();
setValuesInBlob(req.GetBlob("in1"), 1.0f);
setValuesInBlob(req.GetBlob("in2"), 1.0f);
req.Infer();
} catch (const InferenceEngine::Exception &e) {
FAIL() << e.what();
}
}
};
using TITest = TITestBase;
// disabled due to transition to ngraph transformations
TEST_P(TITest, DISABLED_TestsWitUnusedOut) { RunTITest(); }
/*
TI body contains const data placeholder
______________main_ti__________________
in1 --|~~ iter -> add -> plus_one ~~~~~~~~~~~|-- out1
| const1 -> add |
---------------------------------------
*/
class TITest2Base: public PlgTest<ti_test_params> {
std::string model_t = R"V0G0N(
<net batch="1" name="frozen" version="5">
<layers>
<layer id="0" name="in1" precision="_PRC_" type="Input">
<output>
<port id="0">
<dim>_IN_</dim>
<dim>_INPUT_SIZE_</dim>
</port>
</output>
</layer>
<layer id="1" name="main_ti" type="TensorIterator" precision="_PRC_">
<input>
<port id="0">
<dim>_IN_</dim>
<dim>_INPUT_SIZE_</dim>
</port>
</input>
<output>
<port id="1">
<dim>_IN_</dim>
<dim>_INPUT_SIZE_</dim>
</port>
</output>
<port_map>
<input external_port_id="0" internal_layer_id="1" internal_port_id="0" axis="1" stride="_CHUNK_SIZE_"/>
<output external_port_id="1" internal_layer_id="2" internal_port_id="1" axis="1" stride="_CHUNK_SIZE_"/>
</port_map>
<body>
<layers>
<layer id="0" name="const" precision="_PRC_" type="Const">
<output>
<port id="1">
<dim>1</dim>
<dim>_CHUNK_SIZE_</dim>
</port>
</output>
<blobs>
<custom offset="0" size="_SZ_"/>
</blobs>
</layer>
<layer id="1" name="add" precision="_PRC_" type="Eltwise">
<data operation="sum"/>
<input>
<port id="0">
<dim>_IN_</dim>
<dim>_CHUNK_SIZE_</dim>
</port>
<port id="1">
<dim>1</dim>
<dim>_CHUNK_SIZE_</dim>
</port>
</input>
<output>
<port id="2">
<dim>_IN_</dim>
<dim>_CHUNK_SIZE_</dim>
</port>
</output>
</layer>
<layer id="2" name="plus_one" precision="_PRC_" type="Power">
<data scale="1" shift="1" power="1"/>
<input>
<port id="0">
<dim>_IN_</dim>
<dim>_CHUNK_SIZE_</dim>
</port>
</input>
<output>
<port id="1">
<dim>_IN_</dim>
<dim>_CHUNK_SIZE_</dim>
</port>
</output>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="1" to-layer="1" to-port="1"/>
<edge from-layer="1" from-port="2" to-layer="2" to-port="0"/>
</edges>
</body>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
</edges>
</net>
)V0G0N";
std::string getModel(const ti_test_params& p) {
std::string model = model_t;
std::size_t iteration_count = 3;
REPLACE_WITH_NUM(model, "_IN_", 1);
REPLACE_WITH_NUM(model, "_INPUT_SIZE_", iteration_count * p.tensorSize);
REPLACE_WITH_NUM(model, "_CHUNK_SIZE_", p.tensorSize);
REPLACE_WITH_STR(model, "_PRC_", p.precision.name());
REPLACE_WITH_NUM(model, "_SZ_", p.precision.size() * p.tensorSize);
return model;
}
protected:
virtual void RunTITest(const std::map<std::string, std::string> & config = {}) {
try {
ti_test_params p = param();
std::string model = getModel(p);
auto weights = make_shared_blob<uint8_t>(TensorDesc {Precision::U8, {p.precision.size() * p.tensorSize}, C});
weights->allocate();
if (p.precision == Precision::FP32) {
std::vector<float> weights_vector(p.tensorSize, 1.0f);
ie_memcpy(weights->buffer().as<float *>(), p.tensorSize * sizeof(float),
&weights_vector[0], p.tensorSize * sizeof(float));
} else if (p.precision == Precision::FP16) {
// FP16 case
std::vector<ie_fp16> weights_vector(p.tensorSize, PrecisionUtils::f32tof16(1.0f));
ie_memcpy(weights->buffer().as<ie_fp16 *>(), p.tensorSize * sizeof(ie_fp16),
&weights_vector[0], p.tensorSize * sizeof(ie_fp16));
} else {
ASSERT_TRUE(false);
}
Core ie;
auto net = ie.ReadNetwork(model, weights);
auto exec = ie.LoadNetwork(net, device_name, config);
auto req = exec.CreateInferRequest();
setValuesInBlob(req.GetBlob("in1"), 1.0f);
req.Infer();
} catch (const InferenceEngine::Exception &e) {
FAIL() << e.what();
}
}
};
using TITest2 = TITest2Base;
// disabled due to transition to ngraph transformations
TEST_P(TITest2, DISABLED_TestsWitCopy) { RunTITest(); }