[CPU] SoftMax cache (#9480)

* [CPUCache]SoftMax cache

* [CpuCache]fix bf16 tests

* [CPUCache]apply review comments

* [CPUCache]fix compilation
This commit is contained in:
Zhang Yi 2022-01-10 23:46:57 +08:00 committed by GitHub
parent af105b86f8
commit c1206ef447
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 276 additions and 239 deletions

View File

@ -10,11 +10,45 @@
#include <memory_desc/cpu_memory_desc_utils.h> #include <memory_desc/cpu_memory_desc_utils.h>
#include <ngraph/opsets/opset1.hpp> #include <ngraph/opsets/opset1.hpp>
#include "memory_desc/dnnl_blocked_memory_desc.h" #include "memory_desc/dnnl_blocked_memory_desc.h"
#include <common/primitive_hashing_utils.hpp>
using namespace mkldnn; using namespace mkldnn;
using namespace MKLDNNPlugin; using namespace MKLDNNPlugin;
using namespace InferenceEngine; using namespace InferenceEngine;
namespace {
struct SoftmaxKey {
DnnlMemoryDescCPtr inp0;
impl_desc_type implType;
size_t axis;
size_t hash() const;
bool operator==(const SoftmaxKey& rhs) const;
};
size_t SoftmaxKey::hash() const {
using namespace dnnl::impl;
using namespace dnnl::impl::primitive_hashing;
size_t seed = 0;
seed = hash_combine(seed, get_md_hash(inp0->getDnnlDesc().data));
seed = hash_combine(seed, implType);
seed = hash_combine(seed, axis);
return seed;
}
bool SoftmaxKey::operator==(const SoftmaxKey& rhs) const {
bool retVal = true;
if (inp0 != rhs.inp0) {
retVal = retVal && inp0 && rhs.inp0 && inp0->getDnnlDesc() == rhs.inp0->getDnnlDesc();
}
retVal = retVal && implType == rhs.implType && axis == rhs.axis;
return retVal;
}
} // namespace
bool MKLDNNSoftMaxNode::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept { bool MKLDNNSoftMaxNode::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
try { try {
if (!std::dynamic_pointer_cast<const ngraph::opset1::Softmax>(op)) { if (!std::dynamic_pointer_cast<const ngraph::opset1::Softmax>(op)) {
@ -108,32 +142,44 @@ void MKLDNNSoftMaxNode::createDescriptor(const std::vector<MemoryDescPtr> &input
void MKLDNNSoftMaxNode::prepareParams() { void MKLDNNSoftMaxNode::prepareParams() {
auto inpDesc = getParentEdgeAt(0)->getMemory().GetDescWithType<DnnlMemoryDesc>(); auto inpDesc = getParentEdgeAt(0)->getMemory().GetDescWithType<DnnlMemoryDesc>();
const auto& in_candidate = inpDesc->getDnnlDesc(); const NodeDesc* selected_pd = getSelectedPrimitiveDescriptor();
MKLDNNDescriptor desc(std::shared_ptr<softmax_forward::desc>(
new softmax_forward::desc(prop_kind::forward_scoring, in_candidate, axis)));
const NodeDesc *selected_pd = getSelectedPrimitiveDescriptor();
if (selected_pd == nullptr) if (selected_pd == nullptr)
IE_THROW() << "Preferable primitive descriptor is not set for node " << getName() << "."; IE_THROW() << "Preferable primitive descriptor is not set for node " << getName() << ".";
softmax_forward::primitive_desc prim_desc; SoftmaxKey key = {inpDesc, selected_pd->getImplementationType(), axis};
primitive_desc_iterator itpd = desc.createPrimitiveDescriptorIterator(getEngine()); auto engine = getEngine();
auto builder = [&engine](const SoftmaxKey& key) -> std::shared_ptr<mkldnn::primitive> {
softmax_forward::primitive_desc prim_desc;
MKLDNNDescriptor desc(std::shared_ptr<softmax_forward::desc>(
new softmax_forward::desc(prop_kind::forward_scoring, key.inp0->getDnnlDesc(), key.axis)));
primitive_desc_iterator itpd = desc.createPrimitiveDescriptorIterator(engine);
while (itpd) { while (itpd) {
impl_desc_type impl_type = parse_impl_name(itpd.impl_info_str()); impl_desc_type impl_type = parse_impl_name(itpd.impl_info_str());
if (impl_type == selected_pd->getImplementationType() || if (impl_type == key.implType ||
// At least for oneDNN v2.4 the softmax primitive is optimized for the cases where the dimension of the softmax axis is physically dense. // At least for oneDNN v2.4 the softmax primitive is optimized for the cases where the dimension of the
// There could be situations where it is not possible to detect the optimized case in advance in case of dynamic shapes, but // softmax axis is physically dense. There could be situations where it is not possible to detect the
// in runtime the shape could be suitable for the optimized implementation, so we have to select the optimized one. // optimized case in advance in case of dynamic shapes, but in runtime the shape could be suitable for
(ref_any == selected_pd->getImplementationType() && (impl_type & jit))) { // the optimized implementation, so we have to select the optimized one.
prim_desc = itpd.get(); (ref_any == key.implType && (impl_type & jit))) {
break; prim_desc = itpd.get();
break;
}
if (!itpd.next_impl())
return nullptr;
} }
if (!itpd.next_impl()) return std::make_shared<softmax_forward>(prim_desc);
IE_THROW() << "Primitive descriptor was not found for node " << getName() << "."; };
auto cache = getRuntimeCache();
auto result = cache->getOrCreate(key, builder);
if (!result.first) {
IE_THROW() << "Primitive descriptor was not found for node " << getName() << ".";
} }
prim.reset(new softmax_forward(prim_desc)); prim = result.first;
auto src = getParentEdgesAtPort(0)[0]->getMemoryPtr()->GetPrimitive(); auto src = getParentEdgesAtPort(0)[0]->getMemoryPtr()->GetPrimitive();
auto dst = getChildEdgesAtPort(0)[0]->getMemoryPtr()->GetPrimitive(); auto dst = getChildEdgesAtPort(0)[0]->getMemoryPtr()->GetPrimitive();

View File

@ -1,222 +1,213 @@
//// Copyright (C) 2018-2021 Intel Corporation // Copyright (C) 2018-2021 Intel Corporation
//// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
////
// //
//#include <ngraph_functions/builders.hpp>
//#include "test_utils/cpu_test_utils.hpp" #include <ngraph_functions/builders.hpp>
//
//using namespace InferenceEngine; #include "shared_test_classes/base/ov_subgraph.hpp"
//using namespace CPUTestUtils; #include "test_utils/cpu_test_utils.hpp"
//
//namespace CPULayerTestsDefinitions { using namespace InferenceEngine;
//using ShapesDefenition = std::pair<std::vector<ngraph::PartialShape>, std::vector<std::vector<ngraph::Shape>>>; using namespace CPUTestUtils;
// using namespace ov::test;
//struct SoftMaxConfig {
// ShapesDefenition inputShapes; namespace CPULayerTestsDefinitions {
// size_t axis;
//}; struct SoftMaxConfig {
// ov::test::InputShape inputShape;
//typedef std::tuple< size_t axis;
// InferenceEngine::Precision, // netPrecision };
// SoftMaxConfig, // softmaxTestConfig
// std::string, // targetDevice typedef std::tuple<ElementType, // netPrecision
// CPUSpecificParams SoftMaxConfig, // softmaxTestConfig
//> softmaxCPUTestParams; std::string, // targetDevice
// CPUSpecificParams>
//class SoftMaxLayerCPUTest : public testing::WithParamInterface<softmaxCPUTestParams>, softmaxCPUTestParams;
// virtual public LayerTestsUtils::LayerTestsCommon, public CPUTestsBase {
//public: class SoftMaxLayerCPUTest : public testing::WithParamInterface<softmaxCPUTestParams>,
// static std::string getTestCaseName(const testing::TestParamInfo<softmaxCPUTestParams>& obj) { virtual public SubgraphBaseTest,
// CPUSpecificParams cpuParams; public CPUTestsBase {
// InferenceEngine::Precision netPrecision; public:
// SoftMaxConfig config; static std::string getTestCaseName(const testing::TestParamInfo<softmaxCPUTestParams>& obj) {
// std::string targetDevice; CPUSpecificParams cpuParams;
// std::tie(netPrecision, config, targetDevice, cpuParams) = obj.param; ElementType inType;
// SoftMaxConfig config;
// std::ostringstream result; std::string targetDevice;
// result << "netPRC=" << netPrecision.name() << "_"; std::tie(inType, config, targetDevice, cpuParams) = obj.param;
// if (!config.inputShapes.first.empty()) {
// result << "IS=" << CommonTestUtils::partialShape2str(config.inputShapes.first) << "_"; std::ostringstream result;
// } result << "netPRC=" << inType << "_";
// result << "TS="; result << "IS=" << CommonTestUtils::partialShape2str({config.inputShape.first}) << "_";
// for (const auto& shape : config.inputShapes.second) { result << "TS=";
// result << "("; for (const auto& shape : config.inputShape.second) {
// if (!shape.empty()) { result << "(";
// auto itr = shape.begin(); result << CommonTestUtils::vec2str(shape);
// do { result << ")_";
// result << CommonTestUtils::vec2str(*itr); }
// } while (++itr != shape.end() && result << "_"); result << "axis=" << config.axis << "_";
// } result << "trgDev=" << targetDevice;
// result << ")_"; result << CPUTestsBase::getTestCaseName(cpuParams);
// }
// result << "axis=" << config.axis << "_"; return result.str();
// result << "trgDev=" << targetDevice; }
// result << CPUTestsBase::getTestCaseName(cpuParams);
// protected:
// return result.str(); void SetUp() override {
// } ElementType inType;
// SoftMaxConfig config;
//protected: CPUSpecificParams cpuParams;
// void SetUp() override { std::tie(inType, config, targetDevice, cpuParams) = this->GetParam();
// InferenceEngine::Precision netPrecision;
// SoftMaxConfig config; std::tie(inFmts, outFmts, priority, selectedType) = cpuParams;
// CPUSpecificParams cpuParams; if (selectedType.empty()) {
// std::tie(netPrecision, config, targetDevice, cpuParams) = this->GetParam(); selectedType = getPrimitiveType();
// }
// inPrc = outPrc = netPrecision;
// if (inType == ElementType::bf16) {
// std::tie(inFmts, outFmts, priority, selectedType) = cpuParams; rel_threshold = 1e-2f;
// if (selectedType.empty()) { }
// selectedType = getPrimitiveType(); selectedType = makeSelectedTypeStr(selectedType, inType);
// } init_input_shapes({config.inputShape});
// selectedType.push_back('_'); auto params = ngraph::builder::makeDynamicParams(inType, inputDynamicShapes);
// selectedType += inPrc.name();
// const auto paramOuts =
// const auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(params));
//
// targetStaticShapes = config.inputShapes.second; const auto softMax = std::make_shared<ngraph::opset1::Softmax>(paramOuts.at(0), config.axis);
// inputDynamicShapes = config.inputShapes.first;
// function = makeNgraphFunction(inType, params, softMax, "SoftMax");
// auto inputShape = targetStaticShapes.front().front(); }
// };
// auto params = ngraph::builder::makeParams(ngPrc, {inputShape});
// TEST_P(SoftMaxLayerCPUTest, CompareWithRefs) {
// const auto paramOuts = SKIP_IF_CURRENT_TEST_IS_DISABLED()
// ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(params)); run();
// CheckPluginRelatedResults(executableNetwork, "Softmax");
// const auto softMax = std::make_shared<ngraph::opset1::Softmax>(paramOuts.at(0), config.axis); }
//
// function = makeNgraphFunction(ngPrc, params, softMax, "SoftMax"); namespace {
// } // not optimized cpu spec
//}; const auto notOptimizedCPUSpec = CPUSpecificParams{{}, {}, {"ref_any"}, "ref_any"};
//
//TEST_P(SoftMaxLayerCPUTest, CompareWithRefs) { const std::vector<SoftMaxConfig> optimizedConfigsFP32 = {
// SKIP_IF_CURRENT_TEST_IS_DISABLED() // Static shapes
// {ov::test::InputShape{ov::PartialShape{1, 100}, {ov::Shape{1, 100}}}, 1},
// Run(); {ov::test::InputShape{ov::PartialShape{10, 10}, {ov::Shape{10, 10}}}, 1},
// CheckPluginRelatedResults(executableNetwork, "Softmax"); {ov::test::InputShape{ov::PartialShape{100, 1}, {ov::Shape{100, 1}}}, 0},
//} {ov::test::InputShape{ov::PartialShape{100, 1}, {ov::Shape{100, 1}}}, 1},
// {ov::test::InputShape{ov::PartialShape{5, 5, 1}, {ov::Shape{5, 5, 1}}}, 1},
//namespace { {ov::test::InputShape{ov::PartialShape{5, 5, 5}, {ov::Shape{5, 5, 5}}}, 2},
////not optimized cpu spec {ov::test::InputShape{ov::PartialShape{5, 5, 5, 5}, {ov::Shape{5, 5, 5, 5}}}, 0},
//const auto notOptimizedCPUSpec = CPUSpecificParams{{}, {}, {}, "ref_any"}; {ov::test::InputShape{ov::PartialShape{5, 5, 1, 1}, {ov::Shape{5, 5, 1, 1}}}, 1},
// {ov::test::InputShape{ov::PartialShape{5, 5, 5, 5}, {ov::Shape{5, 5, 5, 5}}}, 1},
//const std::vector<SoftMaxConfig> optimizedConfigsFP32 = { {ov::test::InputShape{ov::PartialShape{5, 5, 5, 1}, {ov::Shape{5, 5, 5, 1}}}, 2},
// //Static shapes {ov::test::InputShape{ov::PartialShape{5, 5, 5, 5}, {ov::Shape{5, 5, 5, 5}}}, 2},
// {ShapesDefenition{{}, {{{1, 100}}}}, 1}, {ov::test::InputShape{ov::PartialShape{5, 5, 5, 5}, {ov::Shape{5, 5, 5, 5}}}, 3},
// {ShapesDefenition{{}, {{{10, 10}}}}, 1}, {ov::test::InputShape{ov::PartialShape{5, 5, 5, 5, 5}, {ov::Shape{5, 5, 5, 5, 5}}}, 0},
// {ShapesDefenition{{}, {{{100, 1}}}}, 0}, {ov::test::InputShape{ov::PartialShape{5, 5, 1, 1, 1}, {ov::Shape{5, 5, 1, 1, 1}}}, 1},
// {ShapesDefenition{{}, {{{100, 1}}}}, 1}, {ov::test::InputShape{ov::PartialShape{5, 5, 5, 5, 5}, {ov::Shape{5, 5, 5, 5, 5}}}, 1},
// {ShapesDefenition{{}, {{{5, 5, 1}}}}, 1}, {ov::test::InputShape{ov::PartialShape{5, 5, 5, 1, 1}, {ov::Shape{5, 5, 5, 1, 1}}}, 2},
// {ShapesDefenition{{}, {{{5, 5, 5}}}}, 2}, {ov::test::InputShape{ov::PartialShape{5, 5, 5, 5, 5}, {ov::Shape{5, 5, 5, 5, 5}}}, 2},
// {ShapesDefenition{{}, {{{5, 5, 5, 5}}}}, 0}, {ov::test::InputShape{ov::PartialShape{5, 5, 5, 1, 1}, {ov::Shape{5, 5, 5, 1, 1}}}, 3},
// {ShapesDefenition{{}, {{{5, 5, 1, 1}}}}, 1}, {ov::test::InputShape{ov::PartialShape{5, 5, 5, 5, 5}, {ov::Shape{5, 5, 5, 5, 5}}}, 3},
// {ShapesDefenition{{}, {{{5, 5, 5, 5}}}}, 1}, {ov::test::InputShape{ov::PartialShape{5, 5, 5, 5, 1}, {ov::Shape{5, 5, 5, 5, 1}}}, 4},
// {ShapesDefenition{{}, {{{5, 5, 5, 1}}}}, 2}, {ov::test::InputShape{ov::PartialShape{5, 5, 5, 5, 5}, {ov::Shape{5, 5, 5, 5, 5}}}, 4},
// {ShapesDefenition{{}, {{{5, 5, 5, 5}}}}, 2}, // Dynamic shapes
// {ShapesDefenition{{}, {{{5, 5, 5, 5}}}}, 3}, {ov::test::InputShape{// dynamic shape
// {ShapesDefenition{{}, {{{5, 5, 5, 5, 5}}}}, 0}, ov::PartialShape{-1, -1},
// {ShapesDefenition{{}, {{{5, 5, 1, 1, 1}}}}, 1}, {// target static shapes
// {ShapesDefenition{{}, {{{5, 5, 5, 5, 5}}}}, 1}, ov::Shape{10, 10},
// {ShapesDefenition{{}, {{{5, 5, 5, 1, 1}}}}, 2}, ov::Shape{15, 15},
// {ShapesDefenition{{}, {{{5, 5, 5, 5, 5}}}}, 2}, ov::Shape{10, 10},
// {ShapesDefenition{{}, {{{5, 5, 5, 1, 1}}}}, 3}, ov::Shape{10, 5}}},
// {ShapesDefenition{{}, {{{5, 5, 5, 5, 5}}}}, 3}, 1},
// {ShapesDefenition{{}, {{{5, 5, 5, 5, 1}}}}, 4}, {ov::test::InputShape{// dynamic shape
// {ShapesDefenition{{}, {{{5, 5, 5, 5, 5}}}}, 4}, ov::PartialShape{-1, -1, 1, 1, 1},
// //Dynamic shapes {// target static shapes
// {ShapesDefenition{ ov::Shape{5, 5, 1, 1, 1},
// { //dynamic shape ov::Shape{10, 7, 1, 1, 1},
// {-1, -1} ov::Shape{5, 5, 1, 1, 1}}},
// }, 1},
// { //target static shapes };
// {{10, 10}},
// {{15, 15}}, const std::vector<SoftMaxConfig> notOptimizedConfigsFP32{
// {{10, 5}} // Static shapes
// }}, 1}, {ov::test::InputShape{ov::PartialShape{1, 100}, {ov::Shape{1, 100}}}, 0},
// {ShapesDefenition{ {ov::test::InputShape{ov::PartialShape{10, 10}, {ov::Shape{10, 10}}}, 0},
// { //dynamic shape {ov::test::InputShape{ov::PartialShape{10, 10, 10}, {ov::Shape{10, 10, 10}}}, 0},
// {{1, 100}, {1, 100}} {ov::test::InputShape{ov::PartialShape{10, 10, 10}, {ov::Shape{10, 10, 10}}}, 1},
// }, // Dynamic shapes
// { //target static shapes {ov::test::InputShape{// dynamic shape
// {{10, 10}}, ov::PartialShape{-1, -1},
// {{15, 15}}, {// target static shapes
// {{10, 5}} ov::Shape{10, 1},
// }}, 1}, ov::Shape{15, 15},
// {ShapesDefenition{ ov::Shape{10, 5},
// { //dynamic shape ov::Shape{15, 15}}},
// {-1, -1, 1, 1, 1} 0},
// }, {ov::test::InputShape{// dynamic shape
// { //target static shapes ov::PartialShape{ov::Dimension{1, 100}, ov::Dimension{1, 100}, -1},
// {{5, 5, 1, 1, 1}}, {// target static shapes
// {{10, 7, 1, 1, 1}} ov::Shape{10, 10, 10},
// }}, 1}, ov::Shape{10, 10, 1},
//}; ov::Shape{10, 5, 10},
// ov::Shape{10, 10, 1}}},
//const std::vector<SoftMaxConfig> notOptimizedConfigsFP32 { 1},
// //Static shapes };
// {ShapesDefenition{{}, {{{1, 100}}}}, 0},
// {ShapesDefenition{{}, {{{10, 10}}}}, 0}, const std::vector<SoftMaxConfig> unsupportedConfigsFP32{
// {ShapesDefenition{{}, {{{10, 10, 10}}}}, 0}, // Static shapes
// {ShapesDefenition{{}, {{{10, 10, 10}}}}, 1}, {ov::test::InputShape{ov::PartialShape{5, 5, 5, 5, 5, 5}, {ov::Shape{5, 5, 5, 5, 5, 5}}}, 0},
// //Dynamic shapes {ov::test::InputShape{ov::PartialShape{5, 5, 5, 5, 5, 5}, {ov::Shape{5, 5, 5, 5, 5, 5}}}, 1},
// {ShapesDefenition{ {ov::test::InputShape{ov::PartialShape{5, 5, 5, 5, 5, 5}, {ov::Shape{5, 5, 5, 5, 5, 5}}}, 2},
// { //dynamic shape {ov::test::InputShape{ov::PartialShape{5, 5, 5, 5, 5, 5}, {ov::Shape{5, 5, 5, 5, 5, 5}}}, 3},
// {-1, -1} {ov::test::InputShape{ov::PartialShape{5, 5, 5, 5, 5, 5}, {ov::Shape{5, 5, 5, 5, 5, 5}}}, 4},
// }, {ov::test::InputShape{ov::PartialShape{5, 5, 5, 5, 5, 5}, {ov::Shape{5, 5, 5, 5, 5, 5}}}, 5},
// { //target static shapes // Dynamic shapes
// {{10, 1}}, {{15, 15}}, {{10, 5}} {ov::test::InputShape{// dynamic shape
// }}, 0}, ov::PartialShape{-1, -1, -1, -1, -1, -1},
// {ShapesDefenition{ {// target static shapes
// { //dynamic shape ov::Shape{5, 5, 5, 5, 5, 5},
// {{1, 100}, {1, 100}, -1} ov::Shape{7, 7, 7, 7, 7, 7},
// }, ov::Shape{5, 5, 5, 5, 5, 5}}},
// { //target static shapes 4},
// {{10, 10, 10}}, {{10, 10, 1}}, {{10, 5, 10}} };
// }}, 1},
//}; const auto avx512 = CPUSpecificParams{{}, {}, {"jit_avx512"}, "jit_avx512"};
// const auto avx2 = CPUSpecificParams{{}, {}, {"jit_avx2"}, "jit_avx2"};
//const std::vector<SoftMaxConfig> unsupportedConfigsFP32 { const auto sse42 = CPUSpecificParams{{}, {}, {"jit_sse42"}, "jit_sse42"};
// //Static shapes const auto ref = CPUSpecificParams{{}, {}, {"ref_any"}, "ref_any"};
// {ShapesDefenition{{}, {{{5, 5, 5, 5, 5, 5}}}}, 0},
// {ShapesDefenition{{}, {{{5, 5, 5, 5, 5, 5}}}}, 1}, const std::vector<CPUSpecificParams> vecCpuConfigs = {ref, sse42, avx2, avx512};
// {ShapesDefenition{{}, {{{5, 5, 5, 5, 5, 5}}}}, 2}, const auto OptimizedParams = testing::Combine(testing::Values(ElementType::f32, ElementType::bf16),
// {ShapesDefenition{{}, {{{5, 5, 5, 5, 5, 5}}}}, 3}, testing::ValuesIn(optimizedConfigsFP32),
// {ShapesDefenition{{}, {{{5, 5, 5, 5, 5, 5}}}}, 4}, testing::Values(CommonTestUtils::DEVICE_CPU),
// {ShapesDefenition{{}, {{{5, 5, 5, 5, 5, 5}}}}, 5}, testing::ValuesIn(filterCPUInfoForDevice(vecCpuConfigs)));
// //Dynamic shapes
// {ShapesDefenition{ INSTANTIATE_TEST_SUITE_P(smoke_SoftMax_Optimized_CPU,
// { //dynamic shape SoftMaxLayerCPUTest,
// {-1, -1, -1, -1, -1, -1} OptimizedParams,
// }, SoftMaxLayerCPUTest::getTestCaseName);
// { //target static shapes
// {{5, 5, 5, 5, 5, 5}}, {{7, 7, 7, 7, 7, 7}} const auto NotOptimizedParams = testing::Combine(testing::Values(ElementType::f32, ElementType::bf16),
// }}, 4}, testing::ValuesIn(notOptimizedConfigsFP32),
//}; testing::Values(CommonTestUtils::DEVICE_CPU),
// testing::Values(notOptimizedCPUSpec));
//const auto OptimizedParams = testing::Combine(
// testing::Values(Precision::FP32, Precision::BF16), INSTANTIATE_TEST_SUITE_P(smoke_SoftMax_CPU,
// testing::ValuesIn(optimizedConfigsFP32), SoftMaxLayerCPUTest,
// testing::Values(CommonTestUtils::DEVICE_CPU), NotOptimizedParams,
// testing::Values(emptyCPUSpec)); SoftMaxLayerCPUTest::getTestCaseName);
//
//INSTANTIATE_TEST_SUITE_P(smoke_SoftMax_Optimized_CPU, SoftMaxLayerCPUTest, OptimizedParams, SoftMaxLayerCPUTest::getTestCaseName); const auto UnsupportedParams = testing::Combine(testing::Values(ElementType::f32, ElementType::bf16),
// testing::ValuesIn(unsupportedConfigsFP32),
//const auto NotOptimizedParams = testing::Combine( testing::Values(CommonTestUtils::DEVICE_CPU),
// testing::Values(Precision::FP32, Precision::BF16), testing::Values(notOptimizedCPUSpec));
// testing::ValuesIn(notOptimizedConfigsFP32),
// testing::Values(CommonTestUtils::DEVICE_CPU), INSTANTIATE_TEST_SUITE_P(smoke_SoftMax_Unsupported_CPU,
// testing::Values(notOptimizedCPUSpec)); SoftMaxLayerCPUTest,
// UnsupportedParams,
//INSTANTIATE_TEST_SUITE_P(smoke_SoftMax_CPU, SoftMaxLayerCPUTest, NotOptimizedParams, SoftMaxLayerCPUTest::getTestCaseName); SoftMaxLayerCPUTest::getTestCaseName);
//
//const auto UnsupportedParams = testing::Combine( } // namespace
// testing::Values(Precision::FP32, Precision::BF16), } // namespace CPULayerTestsDefinitions
// testing::ValuesIn(unsupportedConfigsFP32),
// testing::Values(CommonTestUtils::DEVICE_CPU),
// testing::Values(notOptimizedCPUSpec));
//
//INSTANTIATE_TEST_SUITE_P(smoke_SoftMax_Unsupported_CPU, SoftMaxLayerCPUTest, UnsupportedParams, SoftMaxLayerCPUTest::getTestCaseName);
//
//} // namespace
//} // namespace CPULayerTestsDefinitions