Substitute soft sign pass port to ngraph (#8167)

* add base ngraph::SoftSign operation wrapper; add substite softsign pass draft code

* add SoftSign convertor from ngraph to CNNNetwork

* add unit tests; write DoTransformation

* cpplint fixes

* add pass description

* cleanups; always use add layer

* remove bf16 since it is CPU specific

* add softsign evaluate unit test; code review fixes

* add unit tests on checking const values

* inherit softsign from :UnaryElementwiseArithmetic; code review fixes

* remove unneeded visit_attributes

* remove NGRAPH_TYPE_CASE macro

* use ngraph::op::util::get_single_value in pattern checking

* use legacy SubstituteSoftSignPass if there are FQ layers; build fix

* remvoe deprecated softsign tests

* fix deprecated error

* add draft unit tests on softsign operation with new api

* use new test API for softsign

* use another evaluate virtual method
This commit is contained in:
Evgeny Kotov 2021-11-16 11:27:37 +03:00 committed by GitHub
parent 95e1a423c6
commit 6860fd3ef4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 456 additions and 245 deletions

View File

@ -70,6 +70,7 @@
#include "transformations/op_conversions/lstm_cell_decomposition.hpp"
#include "transformations/remove_single_input_concat.hpp"
#include "transformations/broadcast_const.hpp"
#include "transformations/substitute_softsign.hpp"
#include <ngraph/opsets/opset7.hpp>
@ -713,6 +714,7 @@ void GNAPlugin::LoadNetwork(CNNNetwork & _network) {
manager.register_pass<InsertTransposeAfterConvOrPool>();
manager.register_pass<ReorderActivationAndPooling>();
manager.register_pass<RemoveSingleInputConcat>();
manager.register_pass<SubstituteSoftsign>();
manager.register_pass<ngraph::pass::ConvertOpSet3ToOpSet2>();
manager.register_pass<ngraph::pass::ConvertOpSet2ToOpSet1>();
manager.register_pass<ngraph::pass::ConvertOpSet1ToLegacy>();
@ -781,6 +783,9 @@ void GNAPlugin::LoadNetwork(CNNNetwork & _network) {
passes->registerPass<SubstituteScaleShiftBroadCastPass>();
}
if (fake_quantized)
passes->registerPass<SubstituteSoftSignPass>();
// fake quantisation aware passes
passes->registerPass<FuseFQIntoWeightsPass>();
passes->registerPass<MoveFakeQuantizeLayerIntoQuantParamsPass>();
@ -788,7 +793,6 @@ void GNAPlugin::LoadNetwork(CNNNetwork & _network) {
passes->registerPass<TransposeWeightsFromNCHWToNHWCPass>();
passes->registerPass<SubstitutePReluPass>();
passes->registerPass<SubstituteSoftSignPass>();
passes->registerPass<ReorderMaxPoolPass>();
passes->registerPass<EltwiseSplitOverChannelsPass>();

View File

@ -0,0 +1,79 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "softsign.hpp"
#include <ngraph/validation_util.hpp>
#include "ngraph/attribute_visitor.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include <cmath>
#include <cstddef>
NGRAPH_RTTI_DEFINITION(GNAPluginNS::SoftSign, "SoftSign", 0);
namespace GNAPluginNS {
template <typename T>
void softsign(const T* arg, T* out, size_t count) {
for (size_t i = 0; i < count; i++) {
out[i] = 1 / (1 + std::abs(arg[i]));
}
}
SoftSign::SoftSign(const ngraph::Output<ngraph::Node>& arg) : ov::op::util::UnaryElementwiseArithmetic(arg) {
constructor_validate_and_infer_types();
}
std::shared_ptr<ngraph::Node> SoftSign::clone_with_new_inputs(const ngraph::OutputVector& new_args) const {
check_new_args_count(this, new_args);
return std::make_shared<SoftSign>(new_args.at(0));
}
template <ngraph::element::Type_t ET>
inline bool evaluate(const ov::runtime::Tensor& arg, ov::runtime::Tensor& out, const size_t count) {
using T = typename ngraph::element_type_traits<ET>::value_type;
softsign<T>(arg.data<T>(), out.data<T>(), count);
return true;
}
namespace {
bool evaluate_softsign(const ov::runtime::Tensor& arg, ov::runtime::Tensor& out) {
bool rc = true;
size_t count = shape_size(arg.get_shape());
switch (arg.get_element_type()) {
case ov::element::Type_t::f16:
rc = evaluate<ov::element::Type_t::f16>(arg, out, count);
break;
case ov::element::Type_t::f32:
rc = evaluate<ov::element::Type_t::f32>(arg, out, count);
break;
default:
rc = false;
break;
}
return rc;
}
} // namespace
bool SoftSign::evaluate(ov::runtime::TensorVector& outputs,
const ov::runtime::TensorVector& inputs,
const ov::EvaluationContext& evaluation_context) const {
return evaluate_softsign(inputs[0], outputs[0]);
}
bool SoftSign::has_evaluate() const {
switch (get_input_element_type(0)) {
case ngraph::element::f16:
case ngraph::element::f32:
return true;
default:
break;
}
return false;
}
} // namespace GNAPluginNS

View File

@ -0,0 +1,30 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/op/op.hpp"
#include "ngraph/node.hpp"
#include "openvino/op/util/unary_elementwise_arithmetic.hpp"
namespace GNAPluginNS {
/// \brief Neural Activation Function
/// f(x) = x/(1.0 + |x|)
///
class SoftSign : public ov::op::util::UnaryElementwiseArithmetic {
public:
NGRAPH_RTTI_DECLARATION;
SoftSign() = default;
/// \brief Constructs an SoftSign operation.
///
/// \param data Input tensor
SoftSign(const ngraph::Output<ngraph::Node>& arg);
std::shared_ptr<Node> clone_with_new_inputs(const ngraph::OutputVector& new_args) const override;
bool evaluate(ov::runtime::TensorVector& output_values,
const ov::runtime::TensorVector& input_values,
const ov::EvaluationContext & evaluation_context) const override;
bool has_evaluate() const override;
};
} // namespace GNAPluginNS

View File

@ -0,0 +1,89 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <openvino/cc/ngraph/itt.hpp>
#include "transformations/substitute_softsign.hpp"
#include "transformations/utils/transformation_helper.hpp"
#include "transformations/utils/utils.hpp"
#include <ngraph/opsets/opset8.hpp>
#include <ngraph/pattern/op/wrap_type.hpp>
#include <ngraph/pattern/op/or.hpp>
#include <ngraph/rt_info.hpp>
#include <ops/softsign.hpp>
using namespace GNAPluginNS;
NGRAPH_RTTI_DEFINITION(SubstituteSoftsign, "SubstituteSoftsign", 0);
using Node = std::shared_ptr<ngraph::Node>;
namespace {
void DoTransformation(Node start_node, Node last_node) {
auto activation = std::make_shared<SoftSign>(start_node);
activation->set_friendly_name(last_node->get_friendly_name());
ngraph::copy_runtime_info(last_node, activation);
ngraph::replace_node(last_node, activation);
}
class IsConstValueAcceptable {
public:
IsConstValueAcceptable(double expected_value) :
m_expected_value(expected_value) {}
bool operator()(const ngraph::Output<ngraph::Node>& output) const {
auto node = std::dynamic_pointer_cast<ngraph::opset8::Constant>(output.get_node_shared_ptr());
if (!node)
return false;
float value;
if (!ngraph::op::util::get_single_value(node, value)) {
return false;
}
return (value == m_expected_value);
}
private:
const double m_expected_value;
};
} // namespace
SubstituteSoftsign::SubstituteSoftsign() {
MATCHER_SCOPE(SubstituteSoftsign);
auto root = ngraph::pattern::any_input();
auto abs = ngraph::pattern::wrap_type<ngraph::opset8::Abs>({root});
auto add_const = ngraph::pattern::wrap_type<ngraph::opset8::Constant>(IsConstValueAcceptable(1.0));
auto add = ngraph::pattern::wrap_type<ngraph::opset8::Add>({abs, add_const});
auto power_const = ngraph::pattern::wrap_type<ngraph::opset8::Constant>(IsConstValueAcceptable(-1.0));
auto power = ngraph::pattern::wrap_type<ngraph::opset8::Power>({add, power_const});
auto multiply = ngraph::pattern::wrap_type<ngraph::opset8::Multiply>({root, power});
auto divide = ngraph::pattern::wrap_type<ngraph::opset8::Divide>({root, add});
auto last = std::make_shared<ngraph::pattern::op::Or>(ngraph::OutputVector{multiply, divide});
ngraph::matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) {
const auto& pattern_map = m.get_pattern_value_map();
auto root_node = pattern_map.at(root).get_node_shared_ptr();
auto last_node_it = pattern_map.find(multiply);
if (last_node_it == pattern_map.end())
last_node_it = pattern_map.find(divide);
auto last_node = last_node_it->second.get_node_shared_ptr();
DoTransformation(root_node, last_node);
return true;
};
auto m = std::make_shared<ngraph::pattern::Matcher>(last, matcher_name);
this->register_matcher(m, callback);
}

View File

@ -0,0 +1,45 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ngraph/pass/graph_rewrite.hpp>
namespace GNAPluginNS {
/**
* @brief Current version of ModelOptimizer substitutes SoftSign activation
* function with next subgraph
* a layer
* | \
* abs \
* | |
* add |
* | |
* power |
* \ /
* Divide
*
* See model-optimizer/extensions/front/softsign_replacer.py
*
* The ConvertDivide transformation from CommonOptimizations
* substitutes Divide with {-1} and add constant {1}
* - GNA supports Power [0, 2.8]
* - Add, Power, Divide layers are more perfomance expensive in GNA
* than SoftSign PWL
*
* Legacy SubstituteSoftSignPass supports irv7 model where SoftSign subgraph
* could have been without add layer. Current ModelOptimezer always generates
* SoftSign subgraph with that layer.
*
* SubstituteSoftsign transformation does backward substitution to SoftSign.
* TODO: remove that pass as soon as ModelOptimizer will not substitute SoftSign activation
*/
class SubstituteSoftsign : public ngraph::pass::MatcherPass {
public:
NGRAPH_RTTI_DECLARATION;
SubstituteSoftsign();
};
} // namespace GNAPluginNS

View File

@ -13,6 +13,7 @@ addIeTargetTest(
gmock
commonTestUtils_s
GNAPlugin_test_static
engines_test_util
ADD_CPPLINT
LABELS
GNA

View File

@ -0,0 +1,40 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "ops/softsign.hpp"
#include <string>
#include <vector>
#include "execute_tools.hpp"
#include "gtest/gtest.h"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/validation_util.hpp"
#include "ngraph/opsets/opset8.hpp"
using namespace GNAPluginNS;
TEST(op_eval, softsign) {
auto p = std::make_shared<ngraph::opset8::Parameter>(ngraph::element::f32, ngraph::Shape{4});
auto softsign = std::make_shared<SoftSign>(p);
auto fun = std::make_shared<ngraph::Function>(ngraph::OutputVector{softsign}, ngraph::ParameterVector{p});
float inputs[] = {-1.0, 0.0, 1.0, 20.0};
std::vector<float> expected_result{0.5, 1.0, 0.5, 0.047619};
ov::runtime::TensorVector result(1);
ov::runtime::Tensor input{ov::element::f32, ov::Shape{4}, inputs};
ASSERT_TRUE(fun->evaluate(result, ov::runtime::TensorVector{input}));
EXPECT_EQ(result.size(), 1);
EXPECT_EQ(result[0].get_element_type(), ngraph::element::f32);
EXPECT_EQ(result[0].get_shape(), ngraph::Shape{4});
EXPECT_EQ(result[0].get_size(), 4);
const float * result_data = result[0].data<float>();
for (size_t i = 0; i < result[0].get_size(); ++i)
EXPECT_NEAR(result_data[i], expected_result[i], 0.000001);
}

View File

@ -0,0 +1,167 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include "transformations/substitute_softsign.hpp"
#include "common_test_utils/ngraph_test_utils.hpp"
#include <ngraph/function.hpp>
#include <ngraph/opsets/opset8.hpp>
#include <ngraph/pass/manager.hpp>
#include <transformations/init_node_info.hpp>
#include <ops/softsign.hpp>
namespace testing {
namespace {
std::shared_ptr<ngraph::Function> createSoftSignFunction() {
auto input_params = std::make_shared<ngraph::opset8::Parameter>(ngraph::element::f32,
ngraph::Shape{ 1, 1, 1, 64 });
auto softsign = std::make_shared<GNAPluginNS::SoftSign>(input_params);
ngraph::ResultVector results{ std::make_shared<ngraph::op::Result>(softsign) };
return std::make_shared<ngraph::Function>(ngraph::ResultVector{results},
ngraph::ParameterVector{input_params});
}
} // namespace
TEST(TransformationTests, SubstituteSoftSignMulPower) {
std::shared_ptr<ngraph::Function> func(nullptr), reference_func(nullptr);
{
auto input_params = std::make_shared<ngraph::opset8::Parameter>(ngraph::element::f32,
ngraph::Shape{ 1, 1, 1, 64 });
auto abs = std::make_shared<ngraph::op::Abs>(input_params);
auto const_1 = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{}, {1});
auto const_neg_1 = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{}, {-1});
auto add = std::make_shared<ngraph::opset8::Add>(abs, const_1);
auto power = std::make_shared<ngraph::opset8::Power>(add, const_neg_1);
auto mul = std::make_shared<ngraph::opset8::Multiply>(power, input_params);
ngraph::ResultVector results{ std::make_shared<ngraph::op::Result>(mul) };
func = std::make_shared<ngraph::Function>(ngraph::ResultVector{results},
ngraph::ParameterVector{input_params});
ngraph::pass::Manager m;
m.register_pass<ngraph::pass::InitNodeInfo>();
m.register_pass<GNAPluginNS::SubstituteSoftsign>();
m.run_passes(func);
ASSERT_NO_THROW(check_rt_info(func));
}
reference_func = createSoftSignFunction();
const FunctionsComparator func_comparator = FunctionsComparator::with_default().enable(FunctionsComparator::ATTRIBUTES);
const FunctionsComparator::Result result = func_comparator(func, reference_func);
ASSERT_TRUE(result.valid);
}
TEST(TransformationTests, SubstituteSoftSignDivide) {
std::shared_ptr<ngraph::Function> func(nullptr), reference_func(nullptr);
{
auto input_params = std::make_shared<ngraph::opset8::Parameter>(ngraph::element::f32,
ngraph::Shape{ 1, 1, 1, 64 });
auto abs = std::make_shared<ngraph::opset8::Abs>(input_params);
auto const_1 = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{}, {1});
auto add = std::make_shared<ngraph::opset8::Add>(abs, const_1);
auto divide = std::make_shared<ngraph::opset8::Divide>(input_params, add);
ngraph::ResultVector results{ std::make_shared<ngraph::opset8::Result>(divide) };
func = std::make_shared<ngraph::Function>(ngraph::ResultVector{results},
ngraph::ParameterVector{input_params});
ngraph::pass::Manager m;
m.register_pass<ngraph::pass::InitNodeInfo>();
m.register_pass<GNAPluginNS::SubstituteSoftsign>();
m.run_passes(func);
ASSERT_NO_THROW(check_rt_info(func));
}
reference_func = createSoftSignFunction();
const FunctionsComparator func_comparator = FunctionsComparator::with_default().enable(FunctionsComparator::ATTRIBUTES);
const FunctionsComparator::Result result = func_comparator(func, reference_func);
ASSERT_TRUE(result.valid);
}
TEST(TransformationTests, SubstituteSoftSignMulPowerInvalidAddConst) {
std::shared_ptr<ngraph::Function> func(nullptr), reference_func(nullptr);
{
auto input_params = std::make_shared<ngraph::opset8::Parameter>(ngraph::element::f32,
ngraph::Shape{ 1, 1, 1, 64 });
auto abs = std::make_shared<ngraph::op::Abs>(input_params);
auto const_1 = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{}, {1.1});
auto const_neg_1 = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{}, {-1});
auto add = std::make_shared<ngraph::opset8::Add>(abs, const_1);
auto power = std::make_shared<ngraph::opset8::Power>(add, const_neg_1);
auto mul = std::make_shared<ngraph::opset8::Multiply>(power, input_params);
ngraph::ResultVector results{ std::make_shared<ngraph::op::Result>(mul) };
func = std::make_shared<ngraph::Function>(ngraph::ResultVector{results},
ngraph::ParameterVector{input_params});
ngraph::pass::Manager m;
m.register_pass<ngraph::pass::InitNodeInfo>();
m.register_pass<GNAPluginNS::SubstituteSoftsign>();
m.run_passes(func);
ASSERT_NO_THROW(check_rt_info(func));
}
reference_func = ngraph::clone_function(*func);
const FunctionsComparator func_comparator = FunctionsComparator::with_default().enable(FunctionsComparator::ATTRIBUTES);
const FunctionsComparator::Result result = func_comparator(func, reference_func);
ASSERT_TRUE(result.valid);
}
TEST(TransformationTests, SubstituteSoftSignMulPowerInvalidPowerConst) {
std::shared_ptr<ngraph::Function> func(nullptr), reference_func(nullptr);
{
auto input_params = std::make_shared<ngraph::opset8::Parameter>(ngraph::element::f32,
ngraph::Shape{ 1, 1, 1, 64 });
auto abs = std::make_shared<ngraph::op::Abs>(input_params);
auto const_1 = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{}, {1});
auto const_neg_1 = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{}, {-1.1});
auto add = std::make_shared<ngraph::opset8::Add>(abs, const_1);
auto power = std::make_shared<ngraph::opset8::Power>(add, const_neg_1);
auto mul = std::make_shared<ngraph::opset8::Multiply>(power, input_params);
ngraph::ResultVector results{ std::make_shared<ngraph::op::Result>(mul) };
func = std::make_shared<ngraph::Function>(ngraph::ResultVector{results},
ngraph::ParameterVector{input_params});
ngraph::pass::Manager m;
m.register_pass<ngraph::pass::InitNodeInfo>();
m.register_pass<GNAPluginNS::SubstituteSoftsign>();
m.run_passes(func);
ASSERT_NO_THROW(check_rt_info(func));
}
reference_func = ngraph::clone_function(*func);
const FunctionsComparator func_comparator = FunctionsComparator::with_default().enable(FunctionsComparator::ATTRIBUTES);
const FunctionsComparator::Result result = func_comparator(func, reference_func);
ASSERT_TRUE(result.valid);
}
} // namespace testing

View File

@ -218,12 +218,6 @@ TEST_F(I16QuantisationTest, canDetectLeakyRelu) {
.gna().propagate_forward().called_with().pwl_inserted_into_nnet();
}
TEST_F(I16QuantisationTest, canDetectSoftWSignSubgraph) {
assert_that().onInferModel(TFSoftsignUnfoldedModel())
.inNotCompactMode().withGNAConfig(GNA_CONFIG_KEY(SCALE_FACTOR), 1.0f)
.gna().propagate_forward().called_with().pwls_inserted_into_nnet({kActSigmoid});
}
TEST_F(I16QuantisationTest, MaxPool_followedAfterActivation) {
assert_that().onInferModel(maxpoolAfterRelu())
.inNotCompactMode().withGNAConfig(GNA_CONFIG_KEY(SCALE_FACTOR), 1.0f)

View File

@ -1,97 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include <gtest/gtest.h>
#include <single_layer_common.hpp>
#include <ngraph/op/parameter.hpp>
#include <ngraph/ops.hpp>
#include <ie_precision.hpp>
#include <legacy/ngraph_ops/power.hpp>
#include <debug.h>
#include "../gna_matcher.hpp"
typedef struct {
std::string activationType;
size_t input_shape;
std::pair<float, float> range;
} ActivationCaseParam;
using ActivationCaseParam2 = std::tuple<InferenceEngine::Precision, ActivationCaseParam>;
class GNAActivationTest : public GNATest<>,
public testing::WithParamInterface<ActivationCaseParam2> {
public:
static std::string getTestName(const testing::TestParamInfo<ActivationCaseParam2>& params) {
std::string test_name = std::string(std::get<0>(params.param).name()) + "_";
test_name += std::get<1>(params.param).activationType + "_";
test_name += std::to_string(std::get<1>(params.param).input_shape) + "_";
test_name += std::to_string(std::get<1>(params.param).range.first) + "_";
test_name += std::to_string(std::get<1>(params.param).range.second);
return test_name;
}
std::shared_ptr<ngraph::Function> buildNgraphFunction(const ActivationCaseParam& param) {
auto shape = ngraph::Shape{1, param.input_shape};
auto inputN = std::make_shared<ngraph::op::Parameter>(ngraph::element::f32, shape);
auto absN = std::make_shared<ngraph::op::v0::Abs>(inputN);
auto powerN = std::make_shared<ngraph::op::PowerIE>(absN, -1, 1, 1.0);
auto eltwiseN = std::make_shared<ngraph::op::v1::Multiply>(powerN, inputN);
auto function = std::make_shared<ngraph::Function>(ngraph::NodeVector{eltwiseN}, ngraph::ParameterVector{inputN});
return function;
}
};
TEST_P(GNAActivationTest, ActivationTest) {
const auto precision = std::get<0>(GetParam());
const auto param = std::get<1>(GetParam());
if (precision == InferenceEngine::Precision::FP32) {
auto input_data = generate_random_1d<float>(param.input_shape, param.range.first, param.range.second);
std::vector<float> expected_result(param.input_shape);
for (std::size_t i = 0; i < expected_result.size(); i++) {
auto & x = input_data[i];
if (param.activationType == "softsign") {
expected_result[i] = x / (1 + fabs(x));
} else {
FAIL() << "Unsupported activation type: " << param.activationType;
}
}
assert_that().onInferNgraphModel(buildNgraphFunction(param))
.inNotCompactMode()
.gna()
.propagate_forward()
.onCPU()
.called_with_input(input_data)
.equals_to(expected_result);
} else {
assert_that().onInferNgraphModel(buildNgraphFunction(param))
.inNotCompactMode()
.gna()
.withGNAConfig(GNA_CONFIG_KEY(SCALE_FACTOR), 1.0f)
.withGNAConfig(GNA_CONFIG_KEY(PRECISION), precision.name())
.propagate_forward()
.called_with().pwls_inserted_into_nnet({kActSigmoid});
}
}
static const ActivationCaseParam gna_activation_test_params[] = {
{"softsign", 200, {-10, 10}},
};
INSTANTIATE_TEST_SUITE_P(
GNALayerTests, GNAActivationTest,
::testing::Combine(
::testing::Values(InferenceEngine::Precision::FP32, InferenceEngine::Precision::I16, InferenceEngine::Precision::I8),
::testing::ValuesIn(gna_activation_test_params)),
GNAActivationTest::getTestName);

View File

@ -2164,146 +2164,6 @@ std::string TFLeakyReluModel() {
)V0G0N";
}
std::string TFSoftsignUnfoldedModel() {
return R"V0G0N(
<?xml version="1.0" ?>
<net name="LSTM_statics_1000_1frame_test_unpack" version="7">
<layers>
<layer id="0" name="input_batch" type="Input">
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="42" name="FC_layer0/packed/ExpandDims_/Dims/Output_0/Data__const" type="Const">
<output>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</output>
<blobs>
<custom offset="0" precision="I32" size="4"/>
</blobs>
</layer>
<layer id="43" name="FC_layer0/packed/ExpandDims_131" type="Unsqueeze">
<input>
<port id="0">
<dim>1</dim>
<dim>64</dim>
</port>
<port id="1">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="44" name="abs_FC_layer0/Softsign" type="Abs">
<input>
<port id="0">
<dim>1</dim>
<dim>1</dim>
<dim>64</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="45" name="FC_layer0/Softsign_plus_1/fused_power" type="Power">
<data power="-1.0" scale="1" shift="1.0"/>
<input>
<port id="0">
<dim>1</dim>
<dim>1</dim>
<dim>64</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="46" name="div_FC_layer0/Softsign/mul_" type="Eltwise">
<data operation="mul"/>
<input>
<port id="0">
<dim>1</dim>
<dim>1</dim>
<dim>64</dim>
</port>
<port id="1">
<dim>1</dim>
<dim>1</dim>
<dim>64</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="47" name="unstack_6/Squeeze_/value/Output_0/Data__const" type="Const">
<output>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</output>
<blobs>
<custom offset="4" precision="I32" size="4"/>
</blobs>
</layer>
<layer id="48" name="unstack_6/Squeeze_" type="Squeeze">
<input>
<port id="0">
<dim>1</dim>
<dim>1</dim>
<dim>64</dim>
</port>
<port id="1">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>64</dim>
</port>
</output>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="43" to-port="0"/>
<edge from-layer="42" from-port="1" to-layer="43" to-port="1"/>
<edge from-layer="43" from-port="2" to-layer="44" to-port="0"/>
<edge from-layer="44" from-port="1" to-layer="45" to-port="0"/>
<edge from-layer="45" from-port="1" to-layer="46" to-port="0"/>
<edge from-layer="43" from-port="2" to-layer="46" to-port="1"/>
<edge from-layer="46" from-port="2" to-layer="48" to-port="0"/>
<edge from-layer="47" from-port="1" to-layer="48" to-port="1"/>
</edges>
</net>
)V0G0N";
}
std::string maxpoolAfterRelu() {
return R"V0G0N(
<?xml version="1.0" ?>

View File

@ -39,7 +39,6 @@ std::string ClampActivationModel();
std::string IdentityActivationModel();
std::string maxpoolAfterRelu();
std::string TFLeakyReluModel();
std::string TFSoftsignUnfoldedModel();
std::string cropWithoutOffsetModel();
std::string cropWithAlignedOffsetModel();
std::string cropWithOffsetModel();