[GNA] ActivationLayer tests enabled (#7920)

* [GNA] ActivationLayer tests in integer precision enabled

* Enabling some FP32 and FP16 ActivationLayerTests | Limiting Sign activation input values

* Refactoring activation tests
This commit is contained in:
Andrey Sapozhnikov 2021-11-10 14:20:43 +03:00 committed by GitHub
parent aa5a220ea0
commit 286fefb956
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 114 additions and 18 deletions

View File

@ -6,6 +6,66 @@
#include "single_layer_tests/activation.hpp"
#include "common_test_utils/test_constants.hpp"
namespace LayerTestsDefinitions {
class ActivationLayerGNATest : public ActivationLayerTest {
protected:
void SetUp() override {
ActivationLayerTest::SetUp();
// TODO: remove after integer inference output support
auto ngPrc = function->get_parameters()[0]->get_element_type().get_type_name();
if (ngPrc == "u8" || ngPrc == "i16") {
threshold = 1.0;
}
}
InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override {
bool inPrcSigned = function->get_parameters()[0]->get_element_type().is_signed();
int32_t data_start_from = -10;
uint32_t data_range = 20;
int32_t resolution = 32768;
switch (activationType) {
case ngraph::helpers::ActivationTypes::Log: {
data_start_from = 1;
data_range = 20;
resolution = 32768;
break;
}
case ngraph::helpers::ActivationTypes::Sign: {
data_start_from = -10;
data_range = 20;
resolution = 3072;
break;
}
case ngraph::helpers::ActivationTypes::Exp: {
const double max_result_on_GNA = 15.9;
const double exp_inverse = std::round(std::log(max_result_on_GNA));
if (inPrcSigned) {
data_range = exp_inverse * 2.0;
data_start_from = -exp_inverse;
} else {
data_range = exp_inverse;
data_start_from = 0;
}
break;
}
}
if (!inPrcSigned) {
data_range = 15;
data_start_from = 0;
}
return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), data_range,
data_start_from, resolution);
}
};
TEST_P(ActivationLayerGNATest, CompareWithRefs) {
Run();
}
} // namespace LayerTestsDefinitions
using namespace LayerTestsDefinitions;
using namespace ngraph::helpers;
namespace {
@ -16,13 +76,12 @@ const std::vector<InferenceEngine::Precision> inputPrecisions = {
InferenceEngine::Precision::U8
};
const std::vector<InferenceEngine::Precision> netPrecisions = {
// TODO: Issue:27391
// InferenceEngine::Precision::FP32,
// TODO: Issue:28036
// InferenceEngine::Precision::FP16,
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16,
InferenceEngine::Precision::I16,
InferenceEngine::Precision::U8
InferenceEngine::Precision::U8,
};
const std::map<ActivationTypes, std::vector<std::vector<float>>> activationTypes = {
@ -62,6 +121,6 @@ const auto basicCases = ::testing::Combine(
);
INSTANTIATE_TEST_SUITE_P(smoke_Activation_Basic, ActivationLayerTest, basicCases, ActivationLayerTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Activation_Basic, ActivationLayerGNATest, basicCases, ActivationLayerTest::getTestCaseName);
} // namespace

View File

@ -17,7 +17,6 @@ std::vector<std::string> disabledTestPatterns() {
// TODO: FIX BUG 59041
".*Behavior.*CallbackThrowException.*",
// TODO: FIX BUG 32210
R"(.*ActivationLayerTest.CompareWithRefs/(Sigmoid|Tanh|Exp|Log).*)",
R"(.*ActivationFQSubgraph.*activation=(Exp|Log).*)",
// TODO: Issue 68586
R"(.*EltwiseActFqTest.*act=Log.*)",
@ -76,5 +75,8 @@ std::vector<std::string> disabledTestPatterns() {
// TODO: Issue: CVS-69639
R"(.*EltwiseLayerTest.*OpType=Prod.*)",
R"(.*EltwiseLayerTest.*OpType=Sum.*PARAMETER.*VECTOR.*)",
// TODO: Issue:27391
// TODO: Issue:28036
R"(.*ActivationLayerGNATest.*(Log|Exp).*netPRC=(FP16|FP32).*)",
};
}

View File

@ -137,17 +137,7 @@ InferenceEngine::Blob::Ptr ActivationLayerTest::GenerateInput(const InferenceEng
data_range = 15;
data_start_from = 0;
}
if (activationType == ngraph::helpers::ActivationTypes::Exp && targetDevice == CommonTestUtils::DEVICE_GNA) {
const double max_result_on_GNA = 15.9;
const double exp_inverse = std::round(std::log(max_result_on_GNA));
if (inPrcSigned) {
data_range = exp_inverse * 2.0;
data_start_from = -exp_inverse;
} else {
data_range = exp_inverse;
data_start_from = 0;
}
}
return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), data_range,
data_start_from,
resolution);

View File

@ -27,6 +27,7 @@
#include <ngraph/runtime/reference/embedding_bag_offsets_sum.hpp>
#include <ngraph/runtime/reference/embedding_bag_packed_sum.hpp>
#include <ngraph/runtime/reference/embedding_segments_sum.hpp>
#include <ngraph/runtime/reference/exp.hpp>
#include <ngraph/runtime/reference/experimental_detectron_detection_output.hpp>
#include <ngraph/runtime/reference/experimental_detectron_prior_grid_generator.hpp>
#include <ngraph/runtime/reference/experimental_detectron_proposal_single_image.hpp>
@ -45,6 +46,7 @@
#include <ngraph/runtime/reference/gru_cell.hpp>
#include <ngraph/runtime/reference/hard_sigmoid.hpp>
#include <ngraph/runtime/reference/if.hpp>
#include <ngraph/runtime/reference/log.hpp>
#include <ngraph/runtime/reference/log_softmax.hpp>
#include <ngraph/runtime/reference/lrn.hpp>
#include <ngraph/runtime/reference/lstm_cell.hpp>
@ -67,9 +69,11 @@
#include <ngraph/runtime/reference/scatter_nd_update.hpp>
#include <ngraph/runtime/reference/selu.hpp>
#include <ngraph/runtime/reference/sequences.hpp>
#include <ngraph/runtime/reference/sigmoid.hpp>
#include <ngraph/runtime/reference/sign.hpp>
#include <ngraph/runtime/reference/slice.hpp>
#include <ngraph/runtime/reference/squared_difference.hpp>
#include <ngraph/runtime/reference/tanh.hpp>
#include <ngraph/runtime/reference/tensor_iterator.hpp>
#include <ngraph/runtime/reference/utils/nms_common.hpp>
@ -1684,6 +1688,42 @@ bool evaluate(const shared_ptr<op::v0::Abs>& op, const HostTensorVector& outputs
return true;
}
template <element::Type_t ET>
bool evaluate(const shared_ptr<op::v0::Sigmoid>& op, const HostTensorVector& outputs, const HostTensorVector& inputs) {
using T = typename element_type_traits<ET>::value_type;
runtime::reference::sigmoid<T>(inputs[0]->get_data_ptr<T>(),
outputs[0]->get_data_ptr<T>(),
shape_size(inputs[0]->get_shape()));
return true;
}
template <element::Type_t ET>
bool evaluate(const shared_ptr<op::v0::Exp>& op, const HostTensorVector& outputs, const HostTensorVector& inputs) {
using T = typename element_type_traits<ET>::value_type;
runtime::reference::exp<T>(inputs[0]->get_data_ptr<T>(),
outputs[0]->get_data_ptr<T>(),
shape_size(inputs[0]->get_shape()));
return true;
}
template <element::Type_t ET>
bool evaluate(const shared_ptr<op::v0::Tanh>& op, const HostTensorVector& outputs, const HostTensorVector& inputs) {
using T = typename element_type_traits<ET>::value_type;
runtime::reference::tanh<T>(inputs[0]->get_data_ptr<T>(),
outputs[0]->get_data_ptr<T>(),
shape_size(inputs[0]->get_shape()));
return true;
}
template <element::Type_t ET>
bool evaluate(const shared_ptr<op::v0::Log>& op, const HostTensorVector& outputs, const HostTensorVector& inputs) {
using T = typename element_type_traits<ET>::value_type;
runtime::reference::log<T>(inputs[0]->get_data_ptr<T>(),
outputs[0]->get_data_ptr<T>(),
shape_size(inputs[0]->get_shape()));
return true;
}
namespace ctc_loss_v4 {
template <element::Type_t t1, element::Type_t t2>
inline void evaluate(const shared_ptr<op::v4::CTCLoss>& op,

View File

@ -106,3 +106,8 @@ NGRAPH_OP(MulticlassNms, op::v8)
NGRAPH_OP(Slice, op::v8)
NGRAPH_OP(DeformableConvolution, ngraph::op::v8)
NGRAPH_OP(If, ngraph::op::v8)
NGRAPH_OP(Sigmoid, op::v0)
NGRAPH_OP(Tanh, op::v0)
NGRAPH_OP(Exp, op::v0)
NGRAPH_OP(Log, op::v0)