[CPU] Math dynamic done (#8290)

This commit is contained in:
Egor Shulman 2021-11-11 10:42:19 +03:00 committed by GitHub
parent 700beb7d1f
commit a560ba58e6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 156 additions and 59 deletions

View File

@ -16,10 +16,6 @@ using namespace InferenceEngine;
bool MKLDNNMathNode::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
try {
if (isDynamicNgraphNode(op)) {
errorMessage = "Doesn't support op with dynamic shapes";
return false;
}
if (initializers.find(op->get_type_info()) == initializers.end()) {
errorMessage = "Unsupported Math layer type.";
return false;
@ -63,6 +59,10 @@ void MKLDNNMathNode::initSupportedPrimitiveDescriptors() {
impl_desc_type::ref_any);
}
std::vector<VectorDims> MKLDNNMathNode::shapeInfer() const {
return std::vector<VectorDims>{getParentEdgesAtPort(0)[0]->getMemory().getStaticDims()};
}
void MKLDNNMathNode::execute(mkldnn::stream strm) {
size_t dataSize = getChildEdgesAtPort(0)[0]->getMemory().GetShape().getElementsCount();
const float *src_data = reinterpret_cast<const float *>(getParentEdgeAt(0)->getMemoryPtr()->GetPtr());

View File

@ -19,6 +19,10 @@ public:
void execute(mkldnn::stream strm) override;
bool created() const override;
std::vector<VectorDims> shapeInfer() const override;
bool needPrepareParams() const override { return false; };
void executeDynamicImpl(mkldnn::stream strm) override { execute(strm); };
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
private:

View File

@ -3,72 +3,118 @@
//
#include <shared_test_classes/single_layer/activation.hpp>
#include "shared_test_classes/base/ov_subgraph.hpp"
#include "functional_test_utils/ov_tensor_utils.hpp"
#include "test_utils/cpu_test_utils.hpp"
using namespace InferenceEngine;
using namespace CPUTestUtils;
using namespace ngraph::helpers;
using namespace ov::test;
namespace CPULayerTestsDefinitions {
typedef std::tuple<
LayerTestsDefinitions::activationParams,
CPUSpecificParams>
ActivationLayerCPUTestParamSet;
using ActivationLayerCPUTestParamSet = std::tuple<
std::vector<InputShape>, // Input shapes
std::vector<size_t>, // Activation shapes
std::pair<ngraph::helpers::ActivationTypes, std::vector<float>>, // Activation type and constant value
InferenceEngine::Precision, // Net precision
InferenceEngine::Precision, // Input precision
InferenceEngine::Precision, // Output precision
CPUSpecificParams>;
class ActivationLayerCPUTest : public testing::WithParamInterface<ActivationLayerCPUTestParamSet>,
virtual public LayerTestsUtils::LayerTestsCommon, public CPUTestsBase {
virtual public SubgraphBaseTest, public CPUTestsBase {
public:
ActivationTypes activationType;
static std::string getTestCaseName(const testing::TestParamInfo<ActivationLayerCPUTestParamSet> &obj) {
LayerTestsDefinitions::activationParams basicParamsSet;
std::vector<InputShape> inputShapes;
std::vector<size_t> activationShapes;
std::pair<ngraph::helpers::ActivationTypes, std::vector<float>> activationTypeAndConstValue;
Precision netPrecision, inPrecision, outPrecision;
CPUSpecificParams cpuParams;
std::tie(basicParamsSet, cpuParams) = obj.param;
std::tie(inputShapes, activationShapes, activationTypeAndConstValue, netPrecision, inPrecision, outPrecision, cpuParams) = obj.param;
std::ostringstream result;
result << LayerTestsDefinitions::ActivationLayerTest::getTestCaseName(testing::TestParamInfo<LayerTestsDefinitions::activationParams>(
basicParamsSet, 0));
result << LayerTestsDefinitions::activationNames[activationTypeAndConstValue.first] << "_";
if (inputShapes.front().first.size() != 0) {
result << "IS=(";
for (const auto &shape : inputShapes) {
result << CommonTestUtils::partialShape2str({shape.first}) << "_";
}
result.seekp(-1, result.cur);
result << ")_";
}
result << "TS=";
for (const auto& shape : inputShapes) {
for (const auto& item : shape.second) {
result << CommonTestUtils::vec2str(item) << "_";
}
}
result << "AS=" << CommonTestUtils::vec2str(activationShapes) << "_";
result << "ConstantsValue=" << CommonTestUtils::vec2str(activationTypeAndConstValue.second) << "_";
result << "netPRC=" << netPrecision.name() << "_";
result << "inPRC=" << inPrecision.name() << "_";
result << "outPRC=" << outPrecision.name() << "_";
result << CPUTestsBase::getTestCaseName(cpuParams);
return result.str();
}
InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override {
int32_t data_start_from;
uint32_t data_range;
void generate_inputs(const std::vector<ngraph::Shape>& targetInputStaticShapes) override {
int32_t startFrom;
uint32_t range;
int32_t resolution;
if (activationType == ActivationTypes::Exp && netPrecision == Precision::BF16) {
data_start_from = 0;
data_range = 2;
startFrom = 0;
range = 2;
resolution = 32768;
} else if (activationType == ActivationTypes::Acosh) {
startFrom = 2;
range = 2;
resolution = 128;
} else {
data_start_from = 0;
data_range = 15;
startFrom = 0;
range = 15;
resolution = 32768;
}
return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), data_range, data_start_from, resolution);
inputs.clear();
const auto& funcInputs = function->inputs();
for (int i = 0; i < funcInputs.size(); ++i) {
const auto& funcInput = funcInputs[i];
ov::runtime::Tensor tensor;
if (funcInput.get_element_type().is_real()) {
tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i],
range, startFrom, resolution);
} else {
tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i]);
}
inputs.insert({funcInput.get_node_shared_ptr(), tensor});
}
}
protected:
void SetUp() override {
LayerTestsDefinitions::activationParams basicParamsSet;
targetDevice = CommonTestUtils::DEVICE_CPU;
std::vector<InputShape> inputShapes;
std::vector<size_t> activationShapes;
std::pair<ngraph::helpers::ActivationTypes, std::vector<float>> activationTypeAndConstValue;
Precision inPrecision, outPrecision;
CPUSpecificParams cpuParams;
std::tie(basicParamsSet, cpuParams) = this->GetParam();
std::tie(inputShapes, activationShapes, activationTypeAndConstValue, netPrecision, inPrecision, outPrecision, cpuParams) = this->GetParam();
std::tie(inFmts, outFmts, priority, selectedType) = cpuParams;
activationType = activationTypeAndConstValue.first;
auto constantsValue = activationTypeAndConstValue.second;
std::pair<std::vector<size_t>, std::vector<size_t>> shapes;
std::pair<ActivationTypes, std::vector<float>> activationDecl;
std::tie(activationDecl, netPrecision, inPrc, outPrc, inLayout, outLayout, shapes, targetDevice) = basicParamsSet;
inType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inPrecision);
outType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(outPrecision);
selectedType = getPrimitiveType() + "_" + netPrecision.name();
activationType = activationDecl.first;
auto constantsValue = activationDecl.second;
init_input_shapes(inputShapes);
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
auto params = ngraph::builder::makeParams(ngPrc, {shapes.first});
auto activation = ngraph::builder::makeActivation(params[0], ngPrc, activationType, shapes.second, constantsValue);
auto params = ngraph::builder::makeDynamicParams(ngPrc, {inputDynamicShapes.front()});
auto activation = ngraph::builder::makeActivation(params[0], ngPrc, activationType, activationShapes, constantsValue);
activation->get_rt_info() = getCPUInfo();
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{activation}, params, "Activation");
}
@ -79,13 +125,16 @@ protected:
TEST_P(ActivationLayerCPUTest, CompareWithRefs) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Run();
CheckPluginRelatedResults(executableNetwork, "Eltwise");
run();
// TODO: Should be uncommented after updating the CheckPluginRelatedResults() method
// CheckPluginRelatedResults(executableNetwork, "Eltwise");
}
namespace {
// list only types supported by eltwise
const std::vector<size_t> activationShapes = {};
const std::map<ActivationTypes, std::vector<std::vector<float>>> activationTypes = {
{Sqrt, {{}}},
{Sigmoid, {{}}},
@ -108,23 +157,24 @@ std::vector<CPUSpecificParams> cpuParams_4D = {
CPUSpecificParams({nchw}, {nchw}, {}, {})
};
std::map<std::vector<size_t>, std::vector<std::vector<size_t>>> basic4D = {
{{2, 4, 4, 1}, {{}}},
{{2, 17, 5, 4}, {{}}},
std::vector<std::vector<ov::Shape>> basic4D = {
{{2, 4, 4, 1}},
{{2, 17, 5, 4}}
};
std::vector<Precision> netPrc = {Precision::BF16, Precision::FP32};
std::vector<Precision> netPrc = {
// TODO: Should be uncommented after PR #8339 merge
// Precision::BF16
Precision::FP32
};
const auto basicCases4D = ::testing::Combine(
::testing::Combine(
::testing::ValuesIn(static_shapes_to_test_representation(basic4D)),
::testing::Values(activationShapes),
::testing::ValuesIn(CommonTestUtils::combineParams(activationTypes)),
::testing::ValuesIn(netPrc),
::testing::Values(Precision::FP32),
::testing::Values(Precision::FP32),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::ValuesIn(CommonTestUtils::combineParams(basic4D)),
::testing::Values(CommonTestUtils::DEVICE_CPU)),
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D))
);
@ -136,24 +186,67 @@ std::vector<CPUSpecificParams> cpuParams_5D = {
CPUSpecificParams({ncdhw}, {ncdhw}, {}, {})
};
std::map<std::vector<size_t>, std::vector<std::vector<size_t>>> basic5D = {
{{2, 4, 3, 4, 1}, {{}}},
{{2, 17, 7, 5, 4}, {{}}},
std::vector<std::vector<ov::Shape>> basic5D = {
{{2, 4, 3, 4, 1}},
{{2, 17, 7, 5, 4}}
};
const auto basicCases5D = ::testing::Combine(
::testing::Combine(
::testing::ValuesIn(static_shapes_to_test_representation(basic5D)),
::testing::Values(activationShapes),
::testing::ValuesIn(CommonTestUtils::combineParams(activationTypes)),
::testing::ValuesIn(netPrc),
::testing::Values(Precision::FP32),
::testing::Values(Precision::FP32),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::ValuesIn(CommonTestUtils::combineParams(basic5D)),
::testing::Values(CommonTestUtils::DEVICE_CPU)),
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D))
);
INSTANTIATE_TEST_SUITE_P(smoke_Activation5D_Eltwise_CPU_BF16, ActivationLayerCPUTest, basicCases5D, ActivationLayerCPUTest::getTestCaseName);
const std::map<ActivationTypes, std::vector<std::vector<float>>> activationTypesDynamicMath = {
{Log, {{}}},
{Sign, {{}}},
{Acos, {{}}},
{Acosh, {{}}},
{Asin, {{}}},
{Asinh, {{}}},
{Atan, {{}}},
{Atanh, {{}}},
{Cos, {{}}},
{Cosh, {{}}},
{Tan, {{}}},
{HardSigmoid, {{0.2f, 0.5f}}},
{Selu, {{1.6732f, 1.0507f}}},
{Ceiling, {{}}}
};
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32
};
std::vector<CPUSpecificParams> cpuParamsDynamicMath = {
CPUSpecificParams({}, {}, {}, {})
};
std::vector<std::vector<InputShape>> dynamicMathBasic = {
{
{{{-1, -1}, {{1, 50}, {5, 128}, {3, 64}}}},
{{{-1, -1, -1, -1, -1, -1, -1, -1}, {{2, 2, 2, 2, 2, 2, 2, 2}, {2, 3, 2, 3, 2, 3, 2, 3}, {3, 3, 3, 3, 3, 3, 3, 3}}}},
{{{{1, 5}, 128}, {{1, 128}, {3, 128}, {5, 128}}}}
}
};
const auto dynamicMathBasicCases = ::testing::Combine(
::testing::ValuesIn(dynamicMathBasic),
::testing::Values(activationShapes),
::testing::ValuesIn(CommonTestUtils::combineParams(activationTypesDynamicMath)),
::testing::ValuesIn(netPrecisions),
::testing::Values(Precision::FP32),
::testing::Values(Precision::FP32),
::testing::ValuesIn(cpuParamsDynamicMath)
);
INSTANTIATE_TEST_SUITE_P(smoke_Activation5D_dynamicMath_CPU, ActivationLayerCPUTest, dynamicMathBasicCases, ActivationLayerCPUTest::getTestCaseName);
} // namespace
} // namespace CPULayerTestsDefinitions