Revert "[IE TESTS] dynavic batch for mvn layer (#1010)" (#2256)

This reverts commit 2e3378c50f.
This commit is contained in:
Gorokhov Dmitriy
2020-09-16 14:11:34 +03:00
committed by GitHub
parent d604a03ac0
commit ebf009d1a1
8 changed files with 18 additions and 54 deletions

View File

@@ -97,7 +97,7 @@ MKLDNNExecNetwork::MKLDNNExecNetwork(const InferenceEngine::ICNNNetwork &network
MKLDNNGraph::ApplyUnrollPasses(static_cast<ICNNNetwork&>(*_clonedNetwork));
if (_cfg.enableDynamicBatch) {
if (_cfg.batchLimit > 1) {
// check topology for applicability
if (!CanProcessDynBatch(*_clonedNetwork)) {
THROW_IE_EXCEPTION << "MKLDNNGraph::CreateGraph: such topology cannot be compiled for dynamic batch!";
@@ -279,8 +279,7 @@ bool MKLDNNExecNetwork::CanProcessDynBatch(const InferenceEngine::ICNNNetwork &n
type != Eltwise &&
type != Crop &&
type != BatchNormalization &&
type != Copy &&
type != MVN) {
type != Copy) {
check_result = false;
}
}, false);

View File

@@ -990,8 +990,7 @@ void MKLDNNMVNNode::mvn_blk(const in_data_t* src_data, out_data_t* dst_data, con
std::vector<float> mean_buffer(aux_buffer_size * threads_num);
std::vector<float> variance_buffer(aux_buffer_size * threads_num);
int actual_N = batchToProcess();
for (size_t b = 0lu; b < actual_N; b++) {
for (size_t b = 0lu; b < N; b++) {
size_t ccb = is_nhwc ? b * C2 : b * C3;
if (across_channels) {
// mean for this instance in batch

View File

@@ -38,30 +38,14 @@ const std::vector<double> epsilon = {
0.000000001
};
const std::vector<std::map<std::string, std::string>> Configs = {
{}
};
const auto MvnCases = ::testing::Combine(
::testing::ValuesIn(inputShapes),
::testing::Values(InferenceEngine::Precision::FP32),
::testing::ValuesIn(acrossChannels),
::testing::ValuesIn(normalizeVariance),
::testing::ValuesIn(epsilon),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::ValuesIn(Configs)
::testing::Values(CommonTestUtils::DEVICE_CPU)
);
INSTANTIATE_TEST_CASE_P(smoke_MKLDNN_TestsMVN, MvnLayerTest, MvnCases, MvnLayerTest::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_MKLDNN_MvnLayerCheckDynBatch, MvnLayerTest,
::testing::Combine(
::testing::Values(std::vector<size_t>({5, 8, 3, 5})),
::testing::Values(InferenceEngine::Precision::FP32),
::testing::ValuesIn(acrossChannels),
::testing::ValuesIn(normalizeVariance),
::testing::ValuesIn(epsilon),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::Values(std::map<std::string, std::string>({{CONFIG_KEY(DYN_BATCH_ENABLED), CONFIG_VALUE(YES)}}))),
MvnLayerTest::getTestCaseName);

View File

@@ -38,19 +38,13 @@ const std::vector<double> epsilon = {
0.000000001
};
const std::vector<std::map<std::string, std::string>> Configs = {
{}
};
const auto MvnCases = ::testing::Combine(
::testing::ValuesIn(inputShapes),
::testing::Values(InferenceEngine::Precision::FP32),
::testing::ValuesIn(acrossChannels),
::testing::ValuesIn(normalizeVariance),
::testing::ValuesIn(epsilon),
::testing::Values(CommonTestUtils::DEVICE_GPU),
::testing::ValuesIn(Configs)
::testing::Values(CommonTestUtils::DEVICE_GPU)
);
INSTANTIATE_TEST_CASE_P(smoke_CLDNN_TestsMVN, MvnLayerTest, MvnCases, MvnLayerTest::getTestCaseName);

View File

@@ -13,18 +13,17 @@
namespace LayerTestsDefinitions {
typedef std::tuple<
InferenceEngine::SizeVector, // Input shapes
InferenceEngine::Precision, // Input precision
bool, // Across channels
bool, // Normalize variance
double, // Epsilon
std::string, // Device name
std::map<std::string, std::string> // Config
> mvnParams;
InferenceEngine::SizeVector, // Input shapes
InferenceEngine::Precision, // Input precision
bool, // Across channels
bool, // Normalize variance
double, // Epsilon
std::string> mvnParams; // Device name
class MvnLayerTest : public testing::WithParamInterface<mvnParams>, virtual public LayerTestsUtils::LayerTestsCommon {
public:
static std::string getTestCaseName(testing::TestParamInfo<mvnParams> obj);
protected:
void SetUp() override;
};

View File

@@ -27,8 +27,7 @@ std::string MvnLayerTest::getTestCaseName(testing::TestParamInfo<mvnParams> obj)
bool acrossChannels, normalizeVariance;
double eps;
std::string targetDevice;
std::map<std::string, std::string> configuration;
std::tie(inputShapes, inputPrecision, acrossChannels, normalizeVariance, eps, targetDevice, configuration) = obj.param;
std::tie(inputShapes, inputPrecision, acrossChannels, normalizeVariance, eps, targetDevice) = obj.param;
std::ostringstream result;
result << "IS=" << CommonTestUtils::vec2str(inputShapes) << "_";
result << "Precision=" << inputPrecision.name() << "_";
@@ -36,11 +35,6 @@ std::string MvnLayerTest::getTestCaseName(testing::TestParamInfo<mvnParams> obj)
result << "NormalizeVariance=" << (normalizeVariance ? "TRUE" : "FALSE") << "_";
result << "Epsilon=" << eps << "_";
result << "TargetDevice=" << targetDevice;
if (!configuration.empty()) {
for (auto& configItem : configuration) {
result << "configItem=" << configItem.first << "_" << configItem.second << "_";
}
}
return result.str();
}
@@ -49,7 +43,7 @@ void MvnLayerTest::SetUp() {
InferenceEngine::Precision inputPrecision;
bool acrossChanels, normalizeVariance;
double eps;
std::tie(inputShapes, inputPrecision, acrossChanels, normalizeVariance, eps, targetDevice, configuration) = this->GetParam();
std::tie(inputShapes, inputPrecision, acrossChanels, normalizeVariance, eps, targetDevice) = this->GetParam();
auto inType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision);
auto param = ngraph::builder::makeParams(inType, {inputShapes});
auto paramOuts = ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(param));
@@ -60,5 +54,6 @@ void MvnLayerTest::SetUp() {
TEST_P(MvnLayerTest, CompareWithRefs) {
Run();
}
};
} // namespace LayerTestsDefinitions

View File

@@ -42,14 +42,7 @@ void LayerTestsCommon::Compare(const std::vector<std::uint8_t> &expected, const
const auto actualBuffer = lockedMemory.as<const std::uint8_t *>();
const auto &precision = actual->getTensorDesc().getPrecision();
auto bufferSize = actual->size();
// With dynamic batch, you need to size
if (configuration.count(InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED)) {
auto batchSize = actual->getTensorDesc().getDims()[0];
auto halfBatchSize = batchSize > 1 ? batchSize/ 2 : 1;
bufferSize = (actual->size() * halfBatchSize / batchSize);
}
const auto &size = bufferSize;
const auto &size = actual->size();
switch (precision) {
case InferenceEngine::Precision::FP32:
Compare(reinterpret_cast<const float *>(expectedBuffer), reinterpret_cast<const float *>(actualBuffer),

View File

@@ -113,6 +113,7 @@ protected:
float threshold;
InferenceEngine::CNNNetwork cnnNetwork;
std::shared_ptr<InferenceEngine::Core> core;
virtual void Validate();
virtual std::vector<std::vector<std::uint8_t>> CalculateRefs();