diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp b/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp index 902b8a1e90a..1f69e7bdbba 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp +++ b/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp @@ -44,6 +44,7 @@ #include #include #include +#include #include #include #include @@ -147,6 +148,7 @@ static void Transformation(ICNNNetwork::Ptr& clonedNetwork, const Config& conf) pass_config->disable(); pass_config->disable(); pass_config->disable(); + pass_config->disable(); pass_config->enable(); diff --git a/inference-engine/src/mkldnn_plugin/nodes/log_softmax.cpp b/inference-engine/src/mkldnn_plugin/nodes/log_softmax.cpp index b79109d946b..3c3a32e4862 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/log_softmax.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/log_softmax.cpp @@ -60,37 +60,41 @@ public: StatusCode execute(std::vector& inputs, std::vector& outputs, ResponseDesc *resp) noexcept override { const float *src_data = inputs[0]->cbuffer().as() + inputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding(); - float* dst_data = outputs[0]->cbuffer().as() + + float* dst_data = outputs[0]->buffer().as() + outputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding(); if (is_last_dim) { parallel_for(axis_step, [&](size_t i) { - float reduce_prod = 0.0f; const float *src_dataPtr = &src_data[i * reduced_axis_size]; + float *dst_dataPtr = &dst_data[i * reduced_axis_size]; + + float reduce_prod = 0.0f; + const float max = *std::max_element(src_dataPtr, src_dataPtr + reduced_axis_size); for (size_t j = 0; j < reduced_axis_size; ++j) - reduce_prod += expf(src_dataPtr[j]); + reduce_prod += expf(src_dataPtr[j] - max); + reduce_prod = logf(reduce_prod); - float *dst_dataPtr = reinterpret_cast(&dst_data[i * reduced_axis_size]); for (size_t j = 0; j < reduced_axis_size; ++j) - dst_dataPtr[j] = src_dataPtr[j] - reduce_prod; + dst_dataPtr[j] = src_dataPtr[j] - max - reduce_prod; }); } else { parallel_for2d(axis_step, reduced_axis_stride, [&](size_t k, size_t i) { - float reduce_prod = 0.0f; const float *src_dataPtr = &src_data[k * reduced_axis_stride * reduced_axis_size + i]; + float *dst_dataPtr = &dst_data[k * reduced_axis_stride * reduced_axis_size + i]; + + float reduce_prod = 0.0f; + float max = std::numeric_limits::min(); for (size_t j = 0; j < reduced_axis_size; ++j) { - reduce_prod += expf((*src_dataPtr)); - src_dataPtr += reduced_axis_stride; + if (src_dataPtr[j * reduced_axis_stride] > max) + max = src_dataPtr[j * reduced_axis_stride]; } + for (size_t j = 0; j < reduced_axis_size; ++j) + reduce_prod += expf(src_dataPtr[j * reduced_axis_stride] - max); + reduce_prod = logf(reduce_prod); - src_dataPtr = &src_data[k * reduced_axis_stride * reduced_axis_size + i]; - float *dst_dataPtr = reinterpret_cast(&dst_data[k * reduced_axis_stride * reduced_axis_size + i]); - for (size_t j = 0; j < reduced_axis_size; ++j) { - (*dst_dataPtr) = (*src_dataPtr) - reduce_prod; - src_dataPtr += reduced_axis_stride; - dst_dataPtr += reduced_axis_stride; - } + for (size_t j = 0; j < reduced_axis_size; ++j) + dst_dataPtr[j * reduced_axis_stride] = src_dataPtr[j * reduced_axis_stride] - max - reduce_prod; }); } diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/log_softmax.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/log_softmax.cpp new file mode 100644 index 00000000000..8c87da149ec --- /dev/null +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/log_softmax.cpp @@ -0,0 +1,76 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "single_layer_tests/log_softmax.hpp" +#include "common_test_utils/test_constants.hpp" + +using namespace LayerTestsDefinitions; + +namespace { + +const std::vector netPrecisions = { + InferenceEngine::Precision::FP32, +}; + +const std::vector inputShapes2D = { + InferenceEngine::SizeVector {1, 100}, + InferenceEngine::SizeVector {100, 1}, + InferenceEngine::SizeVector {10, 10}, +}; + +const std::vector axis2D = { + -2, -1, 0, 1 +}; + +const auto params2D = testing::Combine( + testing::ValuesIn(netPrecisions), + testing::Values(InferenceEngine::Precision::UNSPECIFIED), + testing::Values(InferenceEngine::Precision::UNSPECIFIED), + testing::Values(InferenceEngine::Layout::ANY), + testing::Values(InferenceEngine::Layout::ANY), + testing::ValuesIn(inputShapes2D), + testing::ValuesIn(axis2D), + testing::Values(CommonTestUtils::DEVICE_CPU), + testing::Values(std::map()) +); + +INSTANTIATE_TEST_CASE_P( + smoke_LogSoftmax2D, + LogSoftmaxLayerTest, + params2D, + LogSoftmaxLayerTest::getTestCaseName +); + +const std::vector inputShapes4D = { + InferenceEngine::SizeVector {1, 100, 1, 1}, + InferenceEngine::SizeVector {1, 3, 4, 3}, + InferenceEngine::SizeVector {2, 3, 4, 5}, +}; + +const std::vector axis4D = { + -4, -3, -2, -1, 0, 1, 2, 3 +}; + +const auto params4D = testing::Combine( + testing::ValuesIn(netPrecisions), + testing::Values(InferenceEngine::Precision::UNSPECIFIED), + testing::Values(InferenceEngine::Precision::UNSPECIFIED), + testing::Values(InferenceEngine::Layout::ANY), + testing::Values(InferenceEngine::Layout::ANY), + testing::ValuesIn(inputShapes4D), + testing::ValuesIn(axis4D), + testing::Values(CommonTestUtils::DEVICE_CPU), + testing::Values(std::map()) +); + +INSTANTIATE_TEST_CASE_P( + smoke_LogSoftmax4D, + LogSoftmaxLayerTest, + params4D, + LogSoftmaxLayerTest::getTestCaseName +); + +} // namespace diff --git a/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/log_softmax.hpp b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/log_softmax.hpp new file mode 100644 index 00000000000..0c2231d098e --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/log_softmax.hpp @@ -0,0 +1,41 @@ +// Copyright (C) 2020 Intel Corporation +// +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include +#include + +#include "functional_test_utils/layer_test_utils.hpp" +#include "ngraph_functions/builders.hpp" +#include "ngraph_functions/utils/ngraph_helpers.hpp" + +namespace LayerTestsDefinitions { + +using logSoftmaxLayerTestParams = std::tuple< + InferenceEngine::Precision, // netPrecision + InferenceEngine::Precision, // Input precision + InferenceEngine::Precision, // Output precision + InferenceEngine::Layout, // Input layout + InferenceEngine::Layout, // Output layout + InferenceEngine::SizeVector, // inputShape + int64_t, // axis + std::string, // targetDevice + std::map // config +>; + +class LogSoftmaxLayerTest : public testing::WithParamInterface, + virtual public LayerTestsUtils::LayerTestsCommon { +public: + static std::string getTestCaseName(testing::TestParamInfo obj); + +protected: + void SetUp() override; +}; + +} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/softmax.hpp b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/softmax.hpp index 87dd39587e8..47dbd0f6375 100644 --- a/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/softmax.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/softmax.hpp @@ -19,10 +19,10 @@ namespace LayerTestsDefinitions { using softMaxLayerTestParams = std::tuple< InferenceEngine::Precision, // netPrecision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout + InferenceEngine::Precision, // Input precision + InferenceEngine::Precision, // Output precision + InferenceEngine::Layout, // Input layout + InferenceEngine::Layout, // Output layout InferenceEngine::SizeVector, // inputShape size_t, // axis std::string, // targetDevice diff --git a/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/log_softmax.cpp b/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/log_softmax.cpp new file mode 100644 index 00000000000..998157158f3 --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/log_softmax.cpp @@ -0,0 +1,70 @@ +// Copyright (C) 2020 Intel Corporation +// +// SPDX-License-Identifier: Apache-2.0 +// + +#include "single_layer_tests/log_softmax.hpp" + +#include "common_test_utils/common_utils.hpp" +#include "functional_test_utils/skip_tests_config.hpp" +#include "functional_test_utils/layer_test_utils.hpp" + +#include "ie_core.hpp" + +#include +#include +#include +#include + +namespace LayerTestsDefinitions { + +std::string LogSoftmaxLayerTest::getTestCaseName(testing::TestParamInfo obj) { + InferenceEngine::Precision netPrecision; + InferenceEngine::Precision inPrc, outPrc; + InferenceEngine::Layout inLayout, outLayout; + InferenceEngine::SizeVector inputShape; + int64_t axis; + std::string targetDevice; + std::map config; + std::tie(netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, axis, targetDevice, config) = obj.param; + + std::ostringstream result; + result << "netPRC=" << netPrecision.name() << "_"; + result << "inPRC=" << inPrc.name() << "_"; + result << "outPRC=" << outPrc.name() << "_"; + result << "inL=" << inLayout << "_"; + result << "outL=" << outLayout << "_"; + result << "IS=" << CommonTestUtils::vec2str(inputShape) << "_"; + result << "axis=" << axis << "_"; + result << "trgDev=" << targetDevice; + + return result.str(); +} + +void LogSoftmaxLayerTest::SetUp() { + InferenceEngine::SizeVector inputShape; + InferenceEngine::Precision netPrecision; + int64_t axis; + + std::tie(netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, axis, targetDevice, configuration) = GetParam(); + outLayout = inLayout; + + const auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); + + const auto params = ngraph::builder::makeParams(ngPrc, {inputShape}); + + const auto paramOuts = + ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes(params)); + + const auto logSoftmax = std::make_shared(paramOuts.at(0), axis); + + const ngraph::ResultVector results {std::make_shared(logSoftmax)}; + + function = std::make_shared(results, params, "logSoftmax"); +} + +TEST_P(LogSoftmaxLayerTest, CompareWithRefs) { + Run(); +} + +} // namespace LayerTestsDefinitions diff --git a/ngraph/test/runtime/ie/unit_test.manifest b/ngraph/test/runtime/ie/unit_test.manifest index 559f6846f16..76e96fc9db5 100644 --- a/ngraph/test/runtime/ie/unit_test.manifest +++ b/ngraph/test/runtime/ie/unit_test.manifest @@ -1140,13 +1140,6 @@ IE_CPU.onnx_resize11_scales_nearest_asymmetric_floor_dynamic_sizes # Input data precision not supported. Expected float. ctc_greedy_decoder_f16 -# Wrong output when axis 0 -IE_CPU.log_softmax_1d_single_value -IE_CPU.log_softmax_2d_axis0 -IE_CPU.log_softmax_2d_axis_neg2 -IE_CPU.log_softmax_3d_axis_0 -IE_CPU.log_softmax_3d_axis_neg3 - #------------------------------------------------------------------------------- # # Inference Engine GPU plugin excludes