diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_exec_network.cpp b/inference-engine/src/mkldnn_plugin/mkldnn_exec_network.cpp index de08a67e73e..36f2c0ec669 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_exec_network.cpp +++ b/inference-engine/src/mkldnn_plugin/mkldnn_exec_network.cpp @@ -97,7 +97,7 @@ MKLDNNExecNetwork::MKLDNNExecNetwork(const InferenceEngine::ICNNNetwork &network MKLDNNGraph::ApplyUnrollPasses(static_cast(*_clonedNetwork)); - if (_cfg.enableDynamicBatch) { + if (_cfg.batchLimit > 1) { // check topology for applicability if (!CanProcessDynBatch(*_clonedNetwork)) { THROW_IE_EXCEPTION << "MKLDNNGraph::CreateGraph: such topology cannot be compiled for dynamic batch!"; @@ -279,8 +279,7 @@ bool MKLDNNExecNetwork::CanProcessDynBatch(const InferenceEngine::ICNNNetwork &n type != Eltwise && type != Crop && type != BatchNormalization && - type != Copy && - type != MVN) { + type != Copy) { check_result = false; } }, false); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_mvn_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_mvn_node.cpp index 99e89c0f459..0605e71a103 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_mvn_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_mvn_node.cpp @@ -990,8 +990,7 @@ void MKLDNNMVNNode::mvn_blk(const in_data_t* src_data, out_data_t* dst_data, con std::vector mean_buffer(aux_buffer_size * threads_num); std::vector variance_buffer(aux_buffer_size * threads_num); - int actual_N = batchToProcess(); - for (size_t b = 0lu; b < actual_N; b++) { + for (size_t b = 0lu; b < N; b++) { size_t ccb = is_nhwc ? b * C2 : b * C3; if (across_channels) { // mean for this instance in batch diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/mvn.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/mvn.cpp index 30548fb1c76..cd789208ea5 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/mvn.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/mvn.cpp @@ -38,30 +38,14 @@ const std::vector epsilon = { 0.000000001 }; -const std::vector> Configs = { - {} -}; - const auto MvnCases = ::testing::Combine( ::testing::ValuesIn(inputShapes), ::testing::Values(InferenceEngine::Precision::FP32), ::testing::ValuesIn(acrossChannels), ::testing::ValuesIn(normalizeVariance), ::testing::ValuesIn(epsilon), - ::testing::Values(CommonTestUtils::DEVICE_CPU), - ::testing::ValuesIn(Configs) + ::testing::Values(CommonTestUtils::DEVICE_CPU) ); INSTANTIATE_TEST_CASE_P(smoke_MKLDNN_TestsMVN, MvnLayerTest, MvnCases, MvnLayerTest::getTestCaseName); - -INSTANTIATE_TEST_CASE_P(smoke_MKLDNN_MvnLayerCheckDynBatch, MvnLayerTest, - ::testing::Combine( - ::testing::Values(std::vector({5, 8, 3, 5})), - ::testing::Values(InferenceEngine::Precision::FP32), - ::testing::ValuesIn(acrossChannels), - ::testing::ValuesIn(normalizeVariance), - ::testing::ValuesIn(epsilon), - ::testing::Values(CommonTestUtils::DEVICE_CPU), - ::testing::Values(std::map({{CONFIG_KEY(DYN_BATCH_ENABLED), CONFIG_VALUE(YES)}}))), - MvnLayerTest::getTestCaseName); \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/mvn.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/mvn.cpp index d338554b24f..8cbd56f53d9 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/mvn.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/mvn.cpp @@ -38,19 +38,13 @@ const std::vector epsilon = { 0.000000001 }; - -const std::vector> Configs = { - {} -}; - const auto MvnCases = ::testing::Combine( ::testing::ValuesIn(inputShapes), ::testing::Values(InferenceEngine::Precision::FP32), ::testing::ValuesIn(acrossChannels), ::testing::ValuesIn(normalizeVariance), ::testing::ValuesIn(epsilon), - ::testing::Values(CommonTestUtils::DEVICE_GPU), - ::testing::ValuesIn(Configs) + ::testing::Values(CommonTestUtils::DEVICE_GPU) ); INSTANTIATE_TEST_CASE_P(smoke_CLDNN_TestsMVN, MvnLayerTest, MvnCases, MvnLayerTest::getTestCaseName); diff --git a/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/mvn.hpp b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/mvn.hpp index e47aa7a6ba9..39870828a24 100644 --- a/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/mvn.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/mvn.hpp @@ -13,18 +13,17 @@ namespace LayerTestsDefinitions { typedef std::tuple< - InferenceEngine::SizeVector, // Input shapes - InferenceEngine::Precision, // Input precision - bool, // Across channels - bool, // Normalize variance - double, // Epsilon - std::string, // Device name - std::map // Config - > mvnParams; + InferenceEngine::SizeVector, // Input shapes + InferenceEngine::Precision, // Input precision + bool, // Across channels + bool, // Normalize variance + double, // Epsilon + std::string> mvnParams; // Device name class MvnLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: static std::string getTestCaseName(testing::TestParamInfo obj); + protected: void SetUp() override; }; diff --git a/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/mvn.cpp b/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/mvn.cpp index 23fab214625..e21fd7732d6 100644 --- a/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/mvn.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/mvn.cpp @@ -27,8 +27,7 @@ std::string MvnLayerTest::getTestCaseName(testing::TestParamInfo obj) bool acrossChannels, normalizeVariance; double eps; std::string targetDevice; - std::map configuration; - std::tie(inputShapes, inputPrecision, acrossChannels, normalizeVariance, eps, targetDevice, configuration) = obj.param; + std::tie(inputShapes, inputPrecision, acrossChannels, normalizeVariance, eps, targetDevice) = obj.param; std::ostringstream result; result << "IS=" << CommonTestUtils::vec2str(inputShapes) << "_"; result << "Precision=" << inputPrecision.name() << "_"; @@ -36,11 +35,6 @@ std::string MvnLayerTest::getTestCaseName(testing::TestParamInfo obj) result << "NormalizeVariance=" << (normalizeVariance ? "TRUE" : "FALSE") << "_"; result << "Epsilon=" << eps << "_"; result << "TargetDevice=" << targetDevice; - if (!configuration.empty()) { - for (auto& configItem : configuration) { - result << "configItem=" << configItem.first << "_" << configItem.second << "_"; - } - } return result.str(); } @@ -49,7 +43,7 @@ void MvnLayerTest::SetUp() { InferenceEngine::Precision inputPrecision; bool acrossChanels, normalizeVariance; double eps; - std::tie(inputShapes, inputPrecision, acrossChanels, normalizeVariance, eps, targetDevice, configuration) = this->GetParam(); + std::tie(inputShapes, inputPrecision, acrossChanels, normalizeVariance, eps, targetDevice) = this->GetParam(); auto inType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); auto param = ngraph::builder::makeParams(inType, {inputShapes}); auto paramOuts = ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes(param)); @@ -60,5 +54,6 @@ void MvnLayerTest::SetUp() { TEST_P(MvnLayerTest, CompareWithRefs) { Run(); -} +}; + } // namespace LayerTestsDefinitions \ No newline at end of file diff --git a/inference-engine/tests/ie_test_utils/functional_test_utils/layer_test_utils.cpp b/inference-engine/tests/ie_test_utils/functional_test_utils/layer_test_utils.cpp index b03361f7e5e..06152180953 100644 --- a/inference-engine/tests/ie_test_utils/functional_test_utils/layer_test_utils.cpp +++ b/inference-engine/tests/ie_test_utils/functional_test_utils/layer_test_utils.cpp @@ -42,14 +42,7 @@ void LayerTestsCommon::Compare(const std::vector &expected, const const auto actualBuffer = lockedMemory.as(); const auto &precision = actual->getTensorDesc().getPrecision(); - auto bufferSize = actual->size(); - // With dynamic batch, you need to size - if (configuration.count(InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED)) { - auto batchSize = actual->getTensorDesc().getDims()[0]; - auto halfBatchSize = batchSize > 1 ? batchSize/ 2 : 1; - bufferSize = (actual->size() * halfBatchSize / batchSize); - } - const auto &size = bufferSize; + const auto &size = actual->size(); switch (precision) { case InferenceEngine::Precision::FP32: Compare(reinterpret_cast(expectedBuffer), reinterpret_cast(actualBuffer), diff --git a/inference-engine/tests/ie_test_utils/functional_test_utils/layer_test_utils.hpp b/inference-engine/tests/ie_test_utils/functional_test_utils/layer_test_utils.hpp index ce86d00c5b9..7fdbc75f469 100644 --- a/inference-engine/tests/ie_test_utils/functional_test_utils/layer_test_utils.hpp +++ b/inference-engine/tests/ie_test_utils/functional_test_utils/layer_test_utils.hpp @@ -113,6 +113,7 @@ protected: float threshold; InferenceEngine::CNNNetwork cnnNetwork; std::shared_ptr core; + virtual void Validate(); virtual std::vector> CalculateRefs();