[IE CLDNN] Added HSigmoid operation (#2700)

This commit is contained in:
Roman Lyamin 2020-10-18 20:47:22 +03:00 committed by GitHub
parent cc2bfcf1d7
commit cc569d2254
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 82 additions and 9 deletions

View File

@ -830,6 +830,7 @@ Program::LayerType Program::LayerTypeFromStr(const std::string &str) {
{ "Ceiling" , Ceiling },
{ "Erf" , Erf },
{ "HardSigmoid" , HardSigmoid },
{ "HSigmoid", HSigmoid },
{ "Log" , Log },
{ "Neg" , Neg },
{ "Reciprocal" , Reciprocal },
@ -1399,6 +1400,7 @@ void Program::CreateSingleLayerPrimitive(cldnn::topology& topology, InferenceEng
case Ceiling:
case Erf:
case HardSigmoid:
case HSigmoid:
case Log:
case Neg:
case Reciprocal:
@ -3078,6 +3080,8 @@ void Program::CreateActivationPrimitive(cldnn::topology& topology, InferenceEngi
activationType = Exp;
} else if (activation_type == "not") {
activationType = Not;
} else if (activation_type == "hsigmoid") {
activationType = HSigmoid;
} else {
THROW_CLDNN_EXCEPTION("Unsupported activation type (" + activation_type +
") in layer " + layer->name);
@ -3199,6 +3203,11 @@ void Program::CreateActivationPrimitive(cldnn::topology& topology, InferenceEngi
params.b = layer->GetParamAsFloat("beta", 0.5f);
break;
}
case HSigmoid:
{
func = cldnn::activation_func::hsigmoid;
break;
}
case Log:
{
func = cldnn::activation_func::log;

View File

@ -195,6 +195,7 @@ public:
Ceiling,
Erf,
HardSigmoid,
HSigmoid,
Log,
Neg,
Reciprocal,

View File

@ -44,7 +44,8 @@ const std::map<ActivationTypes, std::vector<std::vector<float>>> activationTypes
{Ceiling, {}},
{Mish, {}},
{HSwish, {}},
{SoftPlus, {}}
{SoftPlus, {}},
{HSigmoid, {}}
};
std::map<std::vector<size_t>, std::vector<std::vector<size_t>>> basic = {

View File

@ -71,6 +71,7 @@ static std::map<ngraph::helpers::ActivationTypes, std::string> activationNames =
{ngraph::helpers::ActivationTypes::HSwish, "HSwish"},
{ngraph::helpers::ActivationTypes::SoftPlus, "SoftPlus"},
{ngraph::helpers::ActivationTypes::Swish, "Swish"},
{ngraph::helpers::ActivationTypes::HSigmoid, "HSigmoid"},
};
typedef std::tuple<

View File

@ -111,7 +111,8 @@ enum ActivationTypes {
Mish,
HSwish,
SoftPlus,
Swish
Swish,
HSigmoid
};
enum EltwiseTypes {

View File

@ -102,6 +102,8 @@ std::shared_ptr<ngraph::Node> makeActivation(const ngraph::Output<Node> &in,
auto beta = std::make_shared<ngraph::op::Constant>(type, inShape, constantsValue[0]);
return std::make_shared<ngraph::op::v4::Swish>(in, beta);
}
case ngraph::helpers::ActivationTypes::HSigmoid:
return std::make_shared<ngraph::op::v5::HSigmoid>(in);
default:
throw std::runtime_error("Can't create layer for this activation type");
}

View File

@ -63,6 +63,7 @@ enum class activation_func {
reciprocal, // (1/val)
erf, // Gauss error function
hard_sigmoid, // max(0, min(1, a * val + b)) (a,b are additional params)
hsigmoid, // min(max(val + 3, 0), 6) / 6
selu, // for val <= 0: b * (a * e^val - a); for val > 0: b * val (a,b are additional params)
sign, // val > 0: 1; val < 0: -1; val == 0: 0
softplus, // ln(exp(val) + 1)

View File

@ -145,6 +145,7 @@ enum class ActivationFunction {
POW,
ERF,
HARD_SIGMOID,
HSIGMOID,
RECIPROCAL,
SELU,
SIGN,

View File

@ -584,7 +584,7 @@ class WeightTensorJitConstant : public TensorBaseTJitConstant<WeightsType, Weigh
} else if (l == WeightsLayout::g_os_zyx_is_osv16_isv16 || l == WeightsLayout::g_os_zyx_is_osv16_isv32 ||
l == WeightsLayout::g_os_zyx_is_osv32_isv16 || l == WeightsLayout::g_os_zyx_is_osv32_isv32) {
args macroNameArgs = {"prefix", "g", "o", "i", "z", "y", "x"};
args funcArgs = {"g", "o", "i", "z", "y", "x", "g_size", "o_size", "i_size", "z_size", "y_size", "x_size", "osv", "isv"};
args funcArgs = {"g", "o", "i", "z", "y", "x", "g_size", "o_size", "i_size", "z_size", "y_size", "x_size", "osv", "isv"};
const auto name = toString(l);
const auto body = R"V0G0N( \
uint is_size = (i_size + isv - 1) / isv; \
@ -615,7 +615,7 @@ class WeightTensorJitConstant : public TensorBaseTJitConstant<WeightsType, Weigh
this->macroName = MacroName(name, macroNameArgs);
this->calcFunction = FuncBody(name, funcArgs, body);
std::string osv = "16", isv = "16";
if (l == WeightsLayout::g_os_zyx_is_osv16_isv16) {
if (l == WeightsLayout::g_os_zyx_is_osv16_isv16) {
osv = "16"; isv = "16";
} else if (l == WeightsLayout::g_os_zyx_is_osv16_isv32) {
osv = "16"; isv = "32";
@ -741,7 +741,7 @@ JitDefinitions WeightTensorJitConstant::GetDefinitions() const {
if (is_grouped_4d_layout) {
index_macro_name = _name + "_GET_INDEX(g, o, i, y, x)";
auto layout_str = toString(layout);
if (layout == WeightsLayout::goiyx)
if (layout == WeightsLayout::goiyx)
index_func_val = "GET_WEIGHTS_" + layout_str + "_INDEX(" + _name + ", g, o, i, 0, y, x)";
else if (layout == WeightsLayout::g_os_is_yx_isv16_osv16)
index_func_val = "GET_WEIGHTS_" + layout_str + "_INDEX(" + _name + ", g, o, i, 0, y, x, 16)";
@ -765,7 +765,7 @@ JitDefinitions WeightTensorJitConstant::GetDefinitions() const {
if (is_grouped_5d_layout) {
index_macro_name = _name + "_GET_INDEX(g, o, i, z, y, x)";
auto layout_str = toString(layout);
if (layout == WeightsLayout::goizyx)
if (layout == WeightsLayout::goizyx)
index_func_val = "GET_WEIGHTS_" + layout_str + "_INDEX(" + _name + ", g, o, i, z, y, x)";
else if (layout == WeightsLayout::g_os_is_zyx_isv16_osv16)
index_func_val = "GET_WEIGHTS_" + layout_str + "_INDEX(" + _name + ", g, o, i, z, y, x, 16)";
@ -787,7 +787,7 @@ JitDefinitions WeightTensorJitConstant::GetDefinitions() const {
if (is_common_4d_layout) {
index_macro_name = _name + "_GET_INDEX(o, i, y, x)";
auto layout_str = toString(layout);
if (layout == WeightsLayout::oiyx)
if (layout == WeightsLayout::oiyx)
index_func_val = "GET_WEIGHTS_" + layout_str + "_INDEX(" + _name + ", 0, o, i, 0, y, x)";
else if (layout == WeightsLayout::os_is_yx_isv16_osv16)
index_func_val = "GET_WEIGHTS_" + layout_str + "_INDEX(" + _name + ", 0, o, i, 0, y, x, 16)";
@ -814,7 +814,7 @@ JitDefinitions WeightTensorJitConstant::GetDefinitions() const {
if (is_common_5d_layout) {
index_macro_name = _name + "_GET_INDEX(o, i, z, y, x)";
auto layout_str = toString(layout);
if (layout == WeightsLayout::oizyx)
if (layout == WeightsLayout::oizyx)
index_func_val = "GET_WEIGHTS_" + layout_str + "_INDEX(" + _name + ", 0, o, i, z, y, x)";
else if (layout == WeightsLayout::os_is_zyx_isv16_osv16)
index_func_val = "GET_WEIGHTS_" + layout_str + "_INDEX(" + _name + ", 0, o, i, z, y, x, 16)";
@ -1022,6 +1022,15 @@ JitConstants MakeActivationJitConstants(ActivationFunction activation_function,
max_func(zero, min_func(one, (JitTerm)((alpha * input + beta).str()))).str()));
break;
}
case ActivationFunction::HSIGMOID: {
std::string type_suffix = out_dt == Datatype::F32 ? "f" : "h";
const JitTerm three("3." + type_suffix);
const JitTerm six("6." + type_suffix);
jitConstants.AddConstant(MakeJitConstant(
macro_def,
(min_func(max_func(zero, input + three), six) / six).str()));
break;
}
case ActivationFunction::SIGN:
jitConstants.AddConstant(MakeJitConstant(
macro_def,

View File

@ -76,6 +76,7 @@ std::string toString(ActivationFunction activation) {
case ActivationFunction::NEGATIVE: method = "NEGATIVE"; break;
case ActivationFunction::ERF: method = "ERF"; break;
case ActivationFunction::HARD_SIGMOID: method = "HARD_SIGMOID"; break;
case ActivationFunction::HSIGMOID: method = "HSIGMOID"; break;
case ActivationFunction::RECIPROCAL: method = "RECIPROCAL"; break;
case ActivationFunction::SELU: method = "SELU"; break;
case ActivationFunction::SIGN: method = "SIGN"; break;

View File

@ -693,6 +693,8 @@ kernel_selector::activation_function get_kernel_selector_activation_param(activa
return kernel_selector::activation_function::SOFTSIGN;
case cldnn::activation_func::hard_sigmoid:
return kernel_selector::activation_function::HARD_SIGMOID;
case cldnn::activation_func::hsigmoid:
return kernel_selector::activation_function::HSIGMOID;
case cldnn::activation_func::swish:
return kernel_selector::activation_function::SWISH;
case cldnn::activation_func::hswish:

View File

@ -735,6 +735,46 @@ TEST(activation_f16_fw_gpu, basic_yxfb_hswish) {
}
}
TEST(activation_f16_fw_gpu, basic_yxfb_hsigmoid) {
const auto& engine = get_test_engine();
auto input = memory::allocate(engine, { data_types::f16, format::yxfb, { 1, 2, 5, 2 } });
set_values(input,
{ FLOAT16(0.0f), FLOAT16(-2.0f), FLOAT16(-3.0f), FLOAT16(4.0f), FLOAT16(5.0f),
FLOAT16(2.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(-6.0f),
FLOAT16(3.0f), FLOAT16(-3.0f), FLOAT16(3.0f), FLOAT16(5.0f), FLOAT16(1.0f),
FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(-1.0f), FLOAT16(1.0f) });
topology topology(
input_layout("input", input.get_layout()),
activation("hsigmoid", "input", activation_func::hsigmoid));
network network(engine, topology);
network.set_input_data("input", input);
auto outputs = network.execute();
EXPECT_EQ(outputs.size(), size_t(1));
EXPECT_EQ(outputs.begin()->first, "hsigmoid");
auto output_memory = outputs.at("hsigmoid").get_memory();
auto output_layout = output_memory.get_layout();
auto output_ptr = output_memory.pointer<FLOAT16>();
auto input_ptr = input.pointer<FLOAT16>();
int y_size = output_layout.size.spatial[1];
int x_size = output_layout.size.spatial[0];
int f_size = output_layout.size.feature[0];
int b_size = output_layout.size.batch[0];
EXPECT_EQ(output_layout.format, format::yxfb);
EXPECT_EQ(y_size, 2);
EXPECT_EQ(x_size, 5);
EXPECT_EQ(f_size, 2);
EXPECT_EQ(b_size, 1);
for (size_t i = 0; i < output_layout.get_linear_size(); ++i) {
EXPECT_NEAR((FLOAT16)(std::fmin(std::fmax(0.f, (float)input_ptr[i] + 3.f), 6.f) / 6.f),
output_ptr[i], 1e-3f);
}
}
TEST(activation_f32_fw_gpu, basic_yxfb_all_functions)
{
// Input:
@ -782,7 +822,8 @@ TEST(activation_f32_fw_gpu, basic_yxfb_all_functions)
activation_func::swish,
activation_func::hswish,
activation_func::mish,
activation_func::gelu
activation_func::gelu,
activation_func::hsigmoid
};
activation_additional_params params = { 0.5f, 2.5f };
@ -910,6 +951,9 @@ TEST(activation_f32_fw_gpu, basic_yxfb_all_functions)
EXPECT_NEAR(0.5f * (float)input_ptr[i] * (1.f + std::erf((float)(input_ptr[i]) / std::sqrt(2.0f))),
output_ptr[i], 1e-5f);
break;
case activation_func::hsigmoid:
EXPECT_FLOAT_EQ(std::fmin(std::fmax(0.f, (float)input_ptr[i] + 3.f), 6.f) / 6.f, output_ptr[i]);
break;
default:
break;
}