Minimized a number of static variables in static build (#8131)
* Minimal RRTI for cpuFuncTests * Fixed build * Fixed compilation * clang-format fix * Fixed compilation for GPU plugin * Fixed CPU * Fixed comments * Fixes * Fix * Reverted some ngraph changes * Fixed cpuUnitTests compilation
This commit is contained in:
parent
77799a2a56
commit
479081ff32
@ -25,7 +25,7 @@ using namespace TemplateExtension;
|
||||
//! [extension:ctor]
|
||||
Extension::Extension() {
|
||||
#ifdef OPENVINO_ONNX_FRONTEND_ENABLED
|
||||
ngraph::onnx_import::register_operator(Operation::type_info.name,
|
||||
ngraph::onnx_import::register_operator(Operation::get_type_info_static().name,
|
||||
1,
|
||||
"custom_domain",
|
||||
[](const ngraph::onnx_import::Node& node) -> ngraph::OutputVector {
|
||||
@ -34,7 +34,7 @@ Extension::Extension() {
|
||||
return {std::make_shared<Operation>(ng_inputs.at(0), add)};
|
||||
});
|
||||
# ifdef OPENCV_IMPORT_ENABLED
|
||||
ngraph::onnx_import::register_operator(FFTOp::type_info.name,
|
||||
ngraph::onnx_import::register_operator(FFTOp::get_type_info_static().name,
|
||||
1,
|
||||
"custom_domain",
|
||||
[](const ngraph::onnx_import::Node& node) -> ngraph::OutputVector {
|
||||
@ -50,9 +50,9 @@ Extension::Extension() {
|
||||
//! [extension:dtor]
|
||||
Extension::~Extension() {
|
||||
#ifdef OPENVINO_ONNX_FRONTEND_ENABLED
|
||||
ngraph::onnx_import::unregister_operator(Operation::type_info.name, 1, "custom_domain");
|
||||
ngraph::onnx_import::unregister_operator(Operation::get_type_info_static().name, 1, "custom_domain");
|
||||
# ifdef OPENCV_IMPORT_ENABLED
|
||||
ngraph::onnx_import::unregister_operator(FFTOp::type_info.name, 1, "custom_domain");
|
||||
ngraph::onnx_import::unregister_operator(FFTOp::get_type_info_static().name, 1, "custom_domain");
|
||||
# endif // OPENCV_IMPORT_ENABLED
|
||||
#endif // OPENVINO_ONNX_FRONTEND_ENABLED
|
||||
}
|
||||
|
@ -7,8 +7,6 @@
|
||||
|
||||
using namespace TemplateExtension;
|
||||
|
||||
constexpr ngraph::NodeTypeInfo FFTOp::type_info;
|
||||
|
||||
FFTOp::FFTOp(const ngraph::Output<ngraph::Node>& inp, bool _inverse) : Op({inp}) {
|
||||
constructor_validate_and_infer_types();
|
||||
inverse = _inverse;
|
||||
|
@ -11,10 +11,7 @@ namespace TemplateExtension {
|
||||
|
||||
class FFTOp : public ngraph::op::Op {
|
||||
public:
|
||||
static constexpr ngraph::NodeTypeInfo type_info{"FFT", 0};
|
||||
const ngraph::NodeTypeInfo& get_type_info() const override {
|
||||
return type_info;
|
||||
}
|
||||
OPENVINO_OP("FFT", "custom_opset");
|
||||
|
||||
FFTOp() = default;
|
||||
FFTOp(const ngraph::Output<ngraph::Node>& inp, bool inverse);
|
||||
|
@ -7,8 +7,6 @@
|
||||
using namespace TemplateExtension;
|
||||
|
||||
//! [op:ctor]
|
||||
NGRAPH_RTTI_DEFINITION(TemplateExtension::Operation, "Template", 0);
|
||||
|
||||
Operation::Operation(const ngraph::Output<ngraph::Node>& arg, int64_t add) : Op({arg}), add(add) {
|
||||
constructor_validate_and_infer_types();
|
||||
}
|
||||
|
@ -11,7 +11,7 @@ namespace TemplateExtension {
|
||||
|
||||
class Operation : public ngraph::op::Op {
|
||||
public:
|
||||
NGRAPH_RTTI_DECLARATION;
|
||||
OPENVINO_OP("Template", "custom_opset");
|
||||
|
||||
Operation() = default;
|
||||
Operation(const ngraph::Output<ngraph::Node>& arg, int64_t add);
|
||||
|
@ -82,7 +82,7 @@ int main(int argc, char* argv[]) {
|
||||
// that simplifies output filtering, try to find it.
|
||||
if (auto ngraphFunction = network.getFunction()) {
|
||||
for (const auto& op : ngraphFunction->get_ops()) {
|
||||
if (op->get_type_info() == ngraph::op::DetectionOutput::type_info) {
|
||||
if (op->get_type_info() == ngraph::op::DetectionOutput::get_type_info_static()) {
|
||||
if (output_info->getName() != op->get_friendly_name()) {
|
||||
throw std::logic_error("Detection output op does not produce a network output");
|
||||
}
|
||||
|
@ -84,10 +84,7 @@ private:
|
||||
|
||||
class CustomReluOp : public ngraph::op::Op {
|
||||
public:
|
||||
static constexpr ngraph::NodeTypeInfo type_info{CUSTOM_RELU_TYPE, 0};
|
||||
const ngraph::NodeTypeInfo& get_type_info() const override {
|
||||
return type_info;
|
||||
}
|
||||
OPENVINO_OP("CustomReluOp", "experimental");
|
||||
|
||||
CustomReluOp() = default;
|
||||
explicit CustomReluOp(const ngraph::Output<ngraph::Node>& arg) : Op({arg}) {
|
||||
@ -118,8 +115,6 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
constexpr ngraph::NodeTypeInfo CustomReluOp::type_info;
|
||||
|
||||
class InPlaceExtension : public InferenceEngine::IExtension {
|
||||
public:
|
||||
InPlaceExtension() {
|
||||
|
@ -182,7 +182,7 @@ int main(int argc, char* argv[]) {
|
||||
if (auto ngraphFunction = network.getFunction()) {
|
||||
for (const auto& out : outputsInfo) {
|
||||
for (const auto& op : ngraphFunction->get_ops()) {
|
||||
if (op->get_type_info() == ngraph::op::DetectionOutput::type_info &&
|
||||
if (op->get_type_info() == ngraph::op::DetectionOutput::get_type_info_static() &&
|
||||
op->get_friendly_name() == out.second->getName()) {
|
||||
outputName = out.first;
|
||||
outputInfo = out.second;
|
||||
|
@ -122,19 +122,7 @@ public:
|
||||
using factory_t = std::function<void(Program&, const std::shared_ptr<ngraph::Node>&)>;
|
||||
using factories_map_t = std::map<ngraph::DiscreteTypeInfo, factory_t>;
|
||||
|
||||
template<typename OpType,
|
||||
typename std::enable_if<std::is_base_of<ngraph::Node, OpType>::value && ngraph::HasTypeInfoMember<OpType>::value, int>::type = 0>
|
||||
static void RegisterFactory(factory_t func) {
|
||||
static std::mutex m;
|
||||
std::lock_guard<std::mutex> lock(m);
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
if (Program::factories_map.find(OpType::type_info) == Program::factories_map.end())
|
||||
Program::factories_map.insert({OpType::type_info, func});
|
||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
||||
}
|
||||
|
||||
template<typename OpType,
|
||||
typename std::enable_if<std::is_base_of<ngraph::Node, OpType>::value && !ngraph::HasTypeInfoMember<OpType>::value, int>::type = 0>
|
||||
template<typename OpType>
|
||||
static void RegisterFactory(factory_t func) {
|
||||
static std::mutex m;
|
||||
std::lock_guard<std::mutex> lock(m);
|
||||
|
@ -25,10 +25,18 @@
|
||||
* @def INFERENCE_EXTENSION_API(TYPE)
|
||||
* @brief Defines Inference Engine Extension API method
|
||||
*/
|
||||
#if defined(_WIN32) && defined(IMPLEMENT_INFERENCE_EXTENSION_API)
|
||||
# define INFERENCE_EXTENSION_API(TYPE) extern "C" __declspec(dllexport) TYPE
|
||||
#else
|
||||
# define INFERENCE_EXTENSION_API(TYPE) INFERENCE_ENGINE_API(TYPE)
|
||||
#if defined(_WIN32)
|
||||
# ifdef IMPLEMENT_INFERENCE_EXTENSION_API
|
||||
# define INFERENCE_EXTENSION_API(type) extern "C" __declspec(dllexport) type
|
||||
# else
|
||||
# define INFERENCE_EXTENSION_API(type) extern "C" type
|
||||
# endif
|
||||
#elif defined(__GNUC__) && (__GNUC__ >= 4)
|
||||
# ifdef IMPLEMENT_INFERENCE_EXTENSION_API
|
||||
# define INFERENCE_EXTENSION_API(type) extern "C" __attribute__((visibility("default"))) type
|
||||
# else
|
||||
# define INFERENCE_EXTENSION_API(type) extern "C" type
|
||||
# endif
|
||||
#endif
|
||||
|
||||
namespace InferenceEngine {
|
||||
|
@ -77,13 +77,14 @@ target_include_directories(${TARGET_NAME}_obj PRIVATE $<TARGET_PROPERTY:inferenc
|
||||
$<TARGET_PROPERTY:ov_shape_inference,INTERFACE_INCLUDE_DIRECTORIES>
|
||||
PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}
|
||||
$<TARGET_PROPERTY:openvino::conditional_compilation,INTERFACE_INCLUDE_DIRECTORIES>)
|
||||
|
||||
|
||||
target_include_directories(${TARGET_NAME}_obj SYSTEM PUBLIC $<TARGET_PROPERTY:mkldnn,INCLUDE_DIRECTORIES>)
|
||||
|
||||
set_ie_threading_interface_for(${TARGET_NAME}_obj)
|
||||
|
||||
target_compile_definitions(${TARGET_NAME}_obj
|
||||
PRIVATE USE_STATIC_IE IMPLEMENT_INFERENCE_ENGINE_PLUGIN IMPLEMENT_INFERENCE_EXTENSION_API
|
||||
target_compile_definitions(${TARGET_NAME}_obj PRIVATE
|
||||
USE_STATIC_IE IMPLEMENT_INFERENCE_ENGINE_PLUGIN IMPLEMENT_INFERENCE_EXTENSION_API
|
||||
$<TARGET_PROPERTY:ngraph,INTERFACE_COMPILE_DEFINITIONS>
|
||||
)
|
||||
|
||||
set_target_properties(${TARGET_NAME}_obj PROPERTIES EXCLUDE_FROM_ALL ON)
|
||||
|
@ -116,11 +116,11 @@ void MKLDNNGraph::Replicate(const std::shared_ptr<const ngraph::Function> &subgr
|
||||
|
||||
graphNodes.push_back(node);
|
||||
|
||||
if (op->get_type_info() == ngraph::op::v0::Parameter::type_info) {
|
||||
if (op->get_type_info() == ngraph::op::v0::Parameter::get_type_info_static()) {
|
||||
inputNodesMap[node->getName()] = node;
|
||||
}
|
||||
|
||||
if (op->get_type_info() == ngraph::op::v0::Result::type_info) {
|
||||
if (op->get_type_info() == ngraph::op::v0::Result::get_type_info_static()) {
|
||||
auto prev = op->get_input_node_shared_ptr(0);
|
||||
std::string inputID;
|
||||
inputID = prev->get_friendly_name();
|
||||
@ -143,9 +143,9 @@ void MKLDNNGraph::Replicate(const std::shared_ptr<const ngraph::Function> &subgr
|
||||
}
|
||||
|
||||
if (!MKLDNNPlugin::one_of(op->get_type_info(),
|
||||
ngraph::op::v0::Result::type_info,
|
||||
ngraph::op::v3::Assign::type_info,
|
||||
ngraph::op::v6::Assign::type_info)) {
|
||||
ngraph::op::v0::Result::get_type_info_static(),
|
||||
ngraph::op::v3::Assign::get_type_info_static(),
|
||||
ngraph::op::v6::Assign::get_type_info_static())) {
|
||||
int outPortIdx = 0;
|
||||
for (int oi = 0; oi < op->get_output_size(); oi++) {
|
||||
op2node[op->output(oi).get_node_shared_ptr()] = {node, outPortIdx++};
|
||||
@ -215,7 +215,7 @@ void MKLDNNGraph::Replicate(const CNNNetwork &network, const MKLDNNExtensionMana
|
||||
}
|
||||
graphNodes.push_back(node);
|
||||
|
||||
if (op->get_type_info() == ngraph::op::v0::Parameter::type_info) {
|
||||
if (op->get_type_info() == ngraph::op::v0::Parameter::get_type_info_static()) {
|
||||
const auto inInfo = inputsInfo.find(node->getName());
|
||||
if (inInfo != inputsInfo.end()) {
|
||||
inputNodesMap[node->getName()] = node;
|
||||
@ -226,7 +226,7 @@ void MKLDNNGraph::Replicate(const CNNNetwork &network, const MKLDNNExtensionMana
|
||||
}
|
||||
}
|
||||
|
||||
if (op->get_type_info() == ngraph::op::v0::Result::type_info) {
|
||||
if (op->get_type_info() == ngraph::op::v0::Result::get_type_info_static()) {
|
||||
const auto &input = op->input_value(0);
|
||||
auto name = ngraph::op::util::get_ie_output_name(input);
|
||||
|
||||
@ -247,9 +247,9 @@ void MKLDNNGraph::Replicate(const CNNNetwork &network, const MKLDNNExtensionMana
|
||||
}
|
||||
|
||||
if (!MKLDNNPlugin::one_of(op->get_type_info(),
|
||||
ngraph::op::v0::Result::type_info,
|
||||
ngraph::op::v3::Assign::type_info,
|
||||
ngraph::op::v6::Assign::type_info)) {
|
||||
ngraph::op::v0::Result::get_type_info_static(),
|
||||
ngraph::op::v3::Assign::get_type_info_static(),
|
||||
ngraph::op::v6::Assign::get_type_info_static())) {
|
||||
for (int oi = 0; oi < op->get_output_size(); oi++) {
|
||||
if (op->get_output_target_inputs(oi).empty()) {
|
||||
unusedOutputs.push_back(op->output(oi));
|
||||
|
@ -304,7 +304,7 @@ static void TransformationUpToCPUSpecificOpSet(std::shared_ptr<ngraph::Function>
|
||||
for (size_t i = 0; i < node->get_output_size(); i++) {
|
||||
const auto outputs = node->get_output_target_inputs(i);
|
||||
for (const auto &out : outputs) {
|
||||
if (out.get_node()->get_type_info() != ngraph::op::v0::Result::type_info) {
|
||||
if (out.get_node()->get_type_info() != ngraph::op::v0::Result::get_type_info_static()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@ -41,15 +41,16 @@ bool isConvertableToPowerStatic(const std::shared_ptr<BaseOp> &node) {
|
||||
auto const_shape = node->get_input_shape(constPort);
|
||||
return ngraph::shape_size(const_shape) == 1 &&
|
||||
input_rank.get_length() >= const_shape.size() &&
|
||||
!MKLDNNPlugin::one_of(node->get_input_node_shared_ptr(nonConstPort)->get_type_info(), ngraph::opset1::NormalizeL2::type_info,
|
||||
ngraph::opset4::Interpolate::type_info,
|
||||
ngraph::opset1::Convolution::type_info,
|
||||
ngraph::opset1::GroupConvolution::type_info,
|
||||
ngraph::opset1::ConvolutionBackpropData::type_info,
|
||||
ngraph::opset1::GroupConvolutionBackpropData::type_info,
|
||||
MKLDNNPlugin::FullyConnectedNode::type_info,
|
||||
ngraph::op::v0::MVN::type_info,
|
||||
ngraph::opset6::MVN::type_info);
|
||||
!MKLDNNPlugin::one_of(node->get_input_node_shared_ptr(nonConstPort)->get_type_info(),
|
||||
ngraph::opset1::NormalizeL2::get_type_info_static(),
|
||||
ngraph::opset4::Interpolate::get_type_info_static(),
|
||||
ngraph::opset1::Convolution::get_type_info_static(),
|
||||
ngraph::opset1::GroupConvolution::get_type_info_static(),
|
||||
ngraph::opset1::ConvolutionBackpropData::get_type_info_static(),
|
||||
ngraph::opset1::GroupConvolutionBackpropData::get_type_info_static(),
|
||||
MKLDNNPlugin::FullyConnectedNode::get_type_info_static(),
|
||||
ngraph::op::v0::MVN::get_type_info_static(),
|
||||
ngraph::opset6::MVN::get_type_info_static());
|
||||
}
|
||||
|
||||
template <>
|
||||
|
@ -4,8 +4,6 @@
|
||||
|
||||
#include "fully_connected.hpp"
|
||||
|
||||
constexpr ngraph::NodeTypeInfo MKLDNNPlugin::FullyConnectedNode::type_info;
|
||||
|
||||
MKLDNNPlugin::FullyConnectedNode::FullyConnectedNode(const ngraph::Output<Node>& A,
|
||||
const ngraph::Output<Node>& B,
|
||||
const ngraph::Rank& output_rank,
|
||||
|
@ -11,9 +11,7 @@ namespace MKLDNNPlugin {
|
||||
|
||||
class FullyConnectedNode : public ngraph::op::Op {
|
||||
public:
|
||||
static constexpr ngraph::NodeTypeInfo type_info{"FullyConnected", 0};
|
||||
static constexpr const ::ngraph::Node::type_info_t& get_type_info_static() { return type_info; }
|
||||
const ngraph::NodeTypeInfo& get_type_info() const override { return type_info; }
|
||||
OPENVINO_OP("FullyConnected", "cpu_plugin_opset");
|
||||
|
||||
FullyConnectedNode() = default;
|
||||
|
||||
|
@ -4,8 +4,6 @@
|
||||
|
||||
#include "leaky_relu.hpp"
|
||||
|
||||
constexpr ngraph::NodeTypeInfo MKLDNNPlugin::LeakyReluNode::type_info;
|
||||
|
||||
MKLDNNPlugin::LeakyReluNode::LeakyReluNode(const ngraph::Output<ngraph::Node> &data,
|
||||
const float &negative_slope,
|
||||
const ngraph::element::Type output_type)
|
||||
|
@ -10,9 +10,7 @@ namespace MKLDNNPlugin {
|
||||
|
||||
class LeakyReluNode : public ngraph::op::Op {
|
||||
public:
|
||||
static constexpr ngraph::NodeTypeInfo type_info{"LeakyRelu", 0};
|
||||
static constexpr const ::ngraph::Node::type_info_t& get_type_info_static() { return type_info; }
|
||||
const ngraph::NodeTypeInfo& get_type_info() const override { return type_info; }
|
||||
OPENVINO_OP("LeakyRelu", "cpu_plugin_opset");
|
||||
|
||||
LeakyReluNode() = default;
|
||||
|
||||
|
@ -4,8 +4,6 @@
|
||||
|
||||
#include "power_static.hpp"
|
||||
|
||||
constexpr ngraph::NodeTypeInfo MKLDNNPlugin::PowerStaticNode::type_info;
|
||||
|
||||
MKLDNNPlugin::PowerStaticNode::PowerStaticNode(const ngraph::Output<Node> &data,
|
||||
const float &power,
|
||||
const float &scale,
|
||||
|
@ -10,9 +10,7 @@ namespace MKLDNNPlugin {
|
||||
|
||||
class PowerStaticNode : public ngraph::op::Op {
|
||||
public:
|
||||
static constexpr ngraph::NodeTypeInfo type_info{"PowerStatic", 0};
|
||||
static constexpr const ::ngraph::Node::type_info_t& get_type_info_static() { return type_info; }
|
||||
const ngraph::NodeTypeInfo& get_type_info() const override { return type_info; }
|
||||
OPENVINO_OP("PowerStatic", "cpu_plugin_opset");
|
||||
|
||||
PowerStaticNode() = default;
|
||||
|
||||
|
@ -4,8 +4,6 @@
|
||||
|
||||
#include "swish_cpu.hpp"
|
||||
|
||||
constexpr ngraph::NodeTypeInfo MKLDNNPlugin::SwishNode::type_info;
|
||||
|
||||
MKLDNNPlugin::SwishNode::SwishNode(const ngraph::Output<ngraph::Node> & input, const float alpha)
|
||||
: Op({input}), m_alpha(alpha) {
|
||||
validate_and_infer_types();
|
||||
|
@ -10,9 +10,7 @@ namespace MKLDNNPlugin {
|
||||
|
||||
class SwishNode : public ngraph::op::Op {
|
||||
public:
|
||||
static constexpr ngraph::NodeTypeInfo type_info{"SwishCPU", 0};
|
||||
static constexpr const ::ngraph::Node::type_info_t& get_type_info_static() { return type_info; }
|
||||
const ngraph::NodeTypeInfo &get_type_info() const override { return type_info; }
|
||||
OPENVINO_OP("SwishCPU", "cpu_plugin_opset");
|
||||
|
||||
SwishNode() = default;
|
||||
|
||||
|
@ -583,8 +583,8 @@ bool MKLDNNDeformableConvolutionNode::isSupportedOperation(const std::shared_ptr
|
||||
return false;
|
||||
}
|
||||
if (!one_of(op->get_type_info(),
|
||||
ngraph::op::v1::DeformableConvolution::type_info,
|
||||
ngraph::op::v8::DeformableConvolution::type_info)) {
|
||||
ngraph::op::v1::DeformableConvolution::get_type_info_static(),
|
||||
ngraph::op::v8::DeformableConvolution::get_type_info_static())) {
|
||||
errorMessage = "Node is not an instance of DeformableConvolution form the operation set v1 or v8.";
|
||||
return false;
|
||||
}
|
||||
@ -619,7 +619,7 @@ MKLDNNDeformableConvolutionNode::MKLDNNDeformableConvolutionNode(const std::shar
|
||||
|
||||
paddingL = defConvNodeBase->get_pads_begin();
|
||||
|
||||
if (op->get_type_info() == ngraph::op::v8::DeformableConvolution::type_info) {
|
||||
if (op->get_type_info() == ngraph::op::v8::DeformableConvolution::get_type_info_static()) {
|
||||
auto defConvNode = std::dynamic_pointer_cast<ngraph::op::v8::DeformableConvolution>(op);
|
||||
if (defConvNode == nullptr)
|
||||
IE_THROW() << "Operation with name '" << op->get_friendly_name() <<
|
||||
|
@ -787,89 +787,89 @@ private:
|
||||
};
|
||||
|
||||
std::map<const ngraph::DiscreteTypeInfo, std::function<void(const std::shared_ptr<ngraph::Node>&, MKLDNNEltwiseNode& node)>> MKLDNNEltwiseNode::initializers = {
|
||||
{ngraph::op::v1::Add::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
{ngraph::op::v1::Add::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
node.algorithm = EltwiseAdd;
|
||||
}},
|
||||
{ngraph::op::v1::Subtract::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
{ngraph::op::v1::Subtract::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
node.algorithm = EltwiseSubtract;
|
||||
}},
|
||||
{ngraph::op::v1::Multiply::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
{ngraph::op::v1::Multiply::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
node.algorithm = EltwiseMultiply;
|
||||
}},
|
||||
{ngraph::op::v1::Divide::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
{ngraph::op::v1::Divide::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
node.algorithm = EltwiseDivide;
|
||||
}},
|
||||
{ngraph::op::v0::SquaredDifference::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
{ngraph::op::v0::SquaredDifference::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
node.algorithm = EltwiseSquaredDifference;
|
||||
}},
|
||||
{ngraph::op::v1::Maximum::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
{ngraph::op::v1::Maximum::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
node.algorithm = EltwiseMaximum;
|
||||
}},
|
||||
{ngraph::op::v1::Minimum::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
{ngraph::op::v1::Minimum::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
node.algorithm = EltwiseMinimum;
|
||||
}},
|
||||
{ngraph::op::v1::Mod::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
{ngraph::op::v1::Mod::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
node.algorithm = EltwiseMod;
|
||||
}},
|
||||
{ngraph::op::v1::FloorMod::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
{ngraph::op::v1::FloorMod::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
node.algorithm = EltwiseFloorMod;
|
||||
}},
|
||||
{ngraph::op::v1::Power::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
{ngraph::op::v1::Power::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
node.algorithm = EltwisePowerDynamic;
|
||||
}},
|
||||
{PowerStaticNode::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
{PowerStaticNode::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
auto powerStatic = getNgraphOpAs<PowerStaticNode>(op);
|
||||
node.algorithm = EltwisePowerStatic;
|
||||
node.alpha = powerStatic->get_power();
|
||||
node.beta = powerStatic->get_scale();
|
||||
node.gamma = powerStatic->get_shift();
|
||||
}},
|
||||
{ngraph::op::v1::Equal::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
{ngraph::op::v1::Equal::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
node.algorithm = EltwiseEqual;
|
||||
}},
|
||||
{ngraph::op::v1::NotEqual::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
{ngraph::op::v1::NotEqual::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
node.algorithm = EltwiseNotEqual;
|
||||
}},
|
||||
{ngraph::op::v1::Greater::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
{ngraph::op::v1::Greater::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
node.algorithm = EltwiseGreater;
|
||||
}},
|
||||
{ngraph::op::v1::GreaterEqual::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
{ngraph::op::v1::GreaterEqual::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
node.algorithm = EltwiseGreaterEqual;
|
||||
}},
|
||||
{ngraph::op::v1::Less::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
{ngraph::op::v1::Less::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
node.algorithm = EltwiseLess;
|
||||
}},
|
||||
{ngraph::op::v1::LessEqual::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
{ngraph::op::v1::LessEqual::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
node.algorithm = EltwiseLessEqual;
|
||||
}},
|
||||
{ngraph::op::v1::LogicalAnd::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
{ngraph::op::v1::LogicalAnd::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
node.algorithm = EltwiseLogicalAnd;
|
||||
}},
|
||||
{ngraph::op::v1::LogicalOr::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
{ngraph::op::v1::LogicalOr::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
node.algorithm = EltwiseLogicalOr;
|
||||
}},
|
||||
{ngraph::op::v1::LogicalXor::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
{ngraph::op::v1::LogicalXor::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
node.algorithm = EltwiseLogicalXor;
|
||||
}},
|
||||
{ngraph::op::v1::LogicalNot::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
{ngraph::op::v1::LogicalNot::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
node.algorithm = EltwiseLogicalNot;
|
||||
}},
|
||||
{ngraph::op::v0::Relu::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
{ngraph::op::v0::Relu::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
node.algorithm = EltwiseRelu;
|
||||
node.mkldnnAlgorithm = mkldnn::algorithm::eltwise_relu;
|
||||
}},
|
||||
{LeakyReluNode::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
{LeakyReluNode::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
auto leakyRelu = getNgraphOpAs<LeakyReluNode>(op);
|
||||
node.algorithm = EltwiseRelu;
|
||||
node.mkldnnAlgorithm = mkldnn::algorithm::eltwise_relu;
|
||||
node.alpha = leakyRelu->get_slope();
|
||||
node.beta = 0.0f;
|
||||
}},
|
||||
{ngraph::op::v0::Gelu::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
{ngraph::op::v0::Gelu::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
node.algorithm = EltwiseGelu;
|
||||
node.mkldnnAlgorithm = mkldnn::algorithm::eltwise_gelu_erf;
|
||||
}},
|
||||
{ngraph::op::v7::Gelu::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
{ngraph::op::v7::Gelu::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
auto gelu = getNgraphOpAs<ngraph::op::v7::Gelu>(op);
|
||||
node.algorithm = EltwiseGelu;
|
||||
ngraph::op::GeluApproximationMode approximationMode = gelu->get_approximation_mode();
|
||||
@ -880,30 +880,30 @@ std::map<const ngraph::DiscreteTypeInfo, std::function<void(const std::shared_pt
|
||||
else
|
||||
IE_THROW(NotImplemented) << "CPU Eltwise node doesn't support ngraph operation Gelu with approximation mode: " << approximationMode;
|
||||
}},
|
||||
{ngraph::op::v0::Elu::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
{ngraph::op::v0::Elu::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
auto eluOp = getNgraphOpAs<ngraph::op::v0::Elu>(op);
|
||||
|
||||
node.alpha = static_cast<float>(eluOp->get_alpha());
|
||||
node.algorithm = EltwiseElu;
|
||||
node.mkldnnAlgorithm = mkldnn::algorithm::eltwise_elu;
|
||||
}},
|
||||
{ngraph::op::v0::Tanh::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
{ngraph::op::v0::Tanh::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
node.algorithm = EltwiseTanh;
|
||||
node.mkldnnAlgorithm = mkldnn::algorithm::eltwise_tanh;
|
||||
}},
|
||||
{ngraph::op::v0::Sigmoid::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
{ngraph::op::v0::Sigmoid::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
node.algorithm = EltwiseSigmoid;
|
||||
node.mkldnnAlgorithm = mkldnn::algorithm::eltwise_logistic;
|
||||
}},
|
||||
{ngraph::op::v0::Abs::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
{ngraph::op::v0::Abs::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
node.algorithm = EltwiseAbs;
|
||||
node.mkldnnAlgorithm = mkldnn::algorithm::eltwise_abs;
|
||||
}},
|
||||
{ngraph::op::v0::Sqrt::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
{ngraph::op::v0::Sqrt::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
node.algorithm = EltwiseSqrt;
|
||||
node.mkldnnAlgorithm = mkldnn::algorithm::eltwise_sqrt;
|
||||
}},
|
||||
{ngraph::op::v0::Clamp::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
{ngraph::op::v0::Clamp::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
auto clampOp = getNgraphOpAs<ngraph::op::v0::Clamp>(op);
|
||||
|
||||
node.alpha = static_cast<float>(clampOp->get_min());
|
||||
@ -911,29 +911,29 @@ std::map<const ngraph::DiscreteTypeInfo, std::function<void(const std::shared_pt
|
||||
node.algorithm = EltwiseClamp;
|
||||
node.mkldnnAlgorithm = mkldnn::algorithm::eltwise_clip;
|
||||
}},
|
||||
{ngraph::op::v0::Exp::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
{ngraph::op::v0::Exp::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
node.algorithm = EltwiseExp;
|
||||
node.mkldnnAlgorithm = mkldnn::algorithm::eltwise_exp;
|
||||
}},
|
||||
{SwishNode::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
{SwishNode::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
auto swishOp = getNgraphOpAs<SwishNode>(op);
|
||||
node.algorithm = EltwiseSwish;
|
||||
node.mkldnnAlgorithm = mkldnn::algorithm::eltwise_swish;
|
||||
node.alpha = swishOp->get_alpha();
|
||||
}},
|
||||
{ngraph::op::v4::HSwish::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
{ngraph::op::v4::HSwish::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
node.algorithm = EltwiseHswish;
|
||||
node.mkldnnAlgorithm = mkldnn::algorithm::eltwise_hardswish;
|
||||
}},
|
||||
{ngraph::op::v4::Mish::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
{ngraph::op::v4::Mish::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
node.algorithm = EltwiseMish;
|
||||
node.mkldnnAlgorithm = mkldnn::algorithm::eltwise_mish;
|
||||
}},
|
||||
{ngraph::op::v5::HSigmoid::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
{ngraph::op::v5::HSigmoid::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
node.algorithm = EltwiseHsigmoid;
|
||||
node.mkldnnAlgorithm = mkldnn::algorithm::eltwise_hsigmoid;
|
||||
}},
|
||||
{ngraph::op::v5::Round::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
{ngraph::op::v5::Round::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
auto roundOp = getNgraphOpAs<ngraph::op::v5::Round>(op);
|
||||
|
||||
switch (roundOp->get_mode()) {
|
||||
@ -947,13 +947,13 @@ std::map<const ngraph::DiscreteTypeInfo, std::function<void(const std::shared_pt
|
||||
break;
|
||||
}
|
||||
}},
|
||||
{ngraph::op::v0::PRelu::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
{ngraph::op::v0::PRelu::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
node.algorithm = EltwisePrelu;
|
||||
}},
|
||||
{ngraph::op::v0::Erf::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
{ngraph::op::v0::Erf::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
node.algorithm = EltwiseErf;
|
||||
}},
|
||||
{ngraph::op::v4::SoftPlus::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
{ngraph::op::v4::SoftPlus::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNEltwiseNode& node) {
|
||||
node.algorithm = EltwiseSoftRelu;
|
||||
node.mkldnnAlgorithm = mkldnn::algorithm::eltwise_soft_relu;
|
||||
}},
|
||||
|
@ -30,7 +30,7 @@ bool MKLDNNIfNode::isSupportedOperation(const std::shared_ptr<const ov::Node>& o
|
||||
return false;
|
||||
}
|
||||
if (!one_of(op->get_type_info(),
|
||||
ov::op::v8::If::type_info)) {
|
||||
ov::op::v8::If::get_type_info_static())) {
|
||||
errorMessage = "Not supported If operation version " + std::to_string(op->get_type_info().version) +
|
||||
" with name '" + op->get_friendly_name() + "'. Node If supports only opset8 version.";
|
||||
return false;
|
||||
|
@ -230,11 +230,11 @@ jit_has_subnormals_base::fn_t jit_has_subnormals_function() {
|
||||
MKLDNNInputNode::MKLDNNInputNode(const std::shared_ptr<ngraph::Node>& op, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache)
|
||||
: MKLDNNNode(op, eng, cache) {
|
||||
if (!one_of(op->get_type_info(),
|
||||
v0::Parameter::type_info,
|
||||
v0::Constant::type_info,
|
||||
v0::Result::type_info,
|
||||
v3::ReadValue::type_info,
|
||||
v6::ReadValue::type_info))
|
||||
v0::Parameter::get_type_info_static(),
|
||||
v0::Constant::get_type_info_static(),
|
||||
v0::Result::get_type_info_static(),
|
||||
v3::ReadValue::get_type_info_static(),
|
||||
v6::ReadValue::get_type_info_static()))
|
||||
IE_THROW(NotImplemented) << "CPU Input node doesn't support ngraph operation " << op->get_type_name() << " with name " << op->get_friendly_name();
|
||||
|
||||
constant = ConstantType::NoConst;
|
||||
|
@ -25,7 +25,7 @@ bool MKLDNNMathNode::isSupportedOperation(const std::shared_ptr<const ngraph::No
|
||||
return false;
|
||||
}
|
||||
|
||||
if (MKLDNNPlugin::one_of(op->get_type_info(), ngraph::op::v0::HardSigmoid::type_info, ngraph::op::v0::Selu::type_info)) {
|
||||
if (MKLDNNPlugin::one_of(op->get_type_info(), ngraph::op::v0::HardSigmoid::get_type_info_static(), ngraph::op::v0::Selu::get_type_info_static())) {
|
||||
auto firstConst = ngraph::as_type_ptr<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(1));
|
||||
auto secondConst = ngraph::as_type_ptr<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(2));
|
||||
if (!firstConst || !secondConst) {
|
||||
@ -200,68 +200,68 @@ bool MKLDNNMathNode::created() const {
|
||||
}
|
||||
|
||||
std::map<const ngraph::DiscreteTypeInfo, std::function<void(const std::shared_ptr<ngraph::Node>&, MKLDNNMathNode& node)>> MKLDNNMathNode::initializers {
|
||||
{ngraph::op::v0::Abs::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
|
||||
{ngraph::op::v0::Abs::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
|
||||
node.algorithm = MKLDNNPlugin::MathAbs;
|
||||
}},
|
||||
{ngraph::op::v0::Acos::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
|
||||
{ngraph::op::v0::Acos::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
|
||||
node.algorithm = MKLDNNPlugin::MathAcos;
|
||||
}},
|
||||
{ngraph::op::v3::Acosh::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
|
||||
{ngraph::op::v3::Acosh::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
|
||||
node.algorithm = MKLDNNPlugin::MathAcosh;
|
||||
}},
|
||||
{ngraph::op::v0::Asin::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
|
||||
{ngraph::op::v0::Asin::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
|
||||
node.algorithm = MKLDNNPlugin::MathAsin;
|
||||
}},
|
||||
{ngraph::op::v3::Asinh::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
|
||||
{ngraph::op::v3::Asinh::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
|
||||
node.algorithm = MKLDNNPlugin::MathAsinh;
|
||||
}},
|
||||
{ngraph::op::v0::Atan::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
|
||||
{ngraph::op::v0::Atan::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
|
||||
node.algorithm = MKLDNNPlugin::MathAtan;
|
||||
}},
|
||||
{ngraph::op::v0::Ceiling::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
|
||||
{ngraph::op::v0::Ceiling::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
|
||||
node.algorithm = MKLDNNPlugin::MathCeiling;
|
||||
}},
|
||||
{ngraph::op::v0::Cos::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
|
||||
{ngraph::op::v0::Cos::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
|
||||
node.algorithm = MKLDNNPlugin::MathCos;
|
||||
}},
|
||||
{ngraph::op::v0::Cosh::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
|
||||
{ngraph::op::v0::Cosh::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
|
||||
node.algorithm = MKLDNNPlugin::MathCosh;
|
||||
}},
|
||||
{ngraph::op::v0::Floor::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
|
||||
{ngraph::op::v0::Floor::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
|
||||
node.algorithm = MKLDNNPlugin::MathFloor;
|
||||
}},
|
||||
{ngraph::op::v0::HardSigmoid::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
|
||||
{ngraph::op::v0::HardSigmoid::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
|
||||
node.algorithm = MKLDNNPlugin::MathHardSigmoid;
|
||||
node.alpha = ngraph::as_type_ptr<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(1))->cast_vector<float>()[0];
|
||||
node.beta = ngraph::as_type_ptr<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(2))->cast_vector<float>()[0];
|
||||
}},
|
||||
{ngraph::op::v0::Log::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
|
||||
{ngraph::op::v0::Log::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
|
||||
node.algorithm = MKLDNNPlugin::MathLog;
|
||||
}},
|
||||
{ngraph::op::v0::Negative::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
|
||||
{ngraph::op::v0::Negative::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
|
||||
node.algorithm = MKLDNNPlugin::MathNegative;
|
||||
}},
|
||||
{ngraph::op::v0::Selu::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
|
||||
{ngraph::op::v0::Selu::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
|
||||
node.algorithm = MKLDNNPlugin::MathSelu;
|
||||
node.alpha = ngraph::as_type_ptr<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(1))->cast_vector<float>()[0];
|
||||
node.gamma = ngraph::as_type_ptr<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(2))->cast_vector<float>()[0];
|
||||
}},
|
||||
{ngraph::op::v0::Sign::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
|
||||
{ngraph::op::v0::Sign::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
|
||||
node.algorithm = MKLDNNPlugin::MathSign;
|
||||
}},
|
||||
{ngraph::op::v0::Sin::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
|
||||
{ngraph::op::v0::Sin::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
|
||||
node.algorithm = MKLDNNPlugin::MathSin;
|
||||
}},
|
||||
{ngraph::op::v0::Sinh::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
|
||||
{ngraph::op::v0::Sinh::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
|
||||
node.algorithm = MKLDNNPlugin::MathSinh;
|
||||
}},
|
||||
{ngraph::op::v4::SoftPlus::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
|
||||
{ngraph::op::v4::SoftPlus::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
|
||||
node.algorithm = MKLDNNPlugin::MathSoftPlus;
|
||||
}},
|
||||
{ngraph::op::v0::Tan::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
|
||||
{ngraph::op::v0::Tan::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
|
||||
node.algorithm = MKLDNNPlugin::MathTan;
|
||||
}},
|
||||
{ngraph::op::v3::Atanh::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
|
||||
{ngraph::op::v3::Atanh::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
|
||||
node.algorithm = MKLDNNPlugin::MathAtanh;
|
||||
}}
|
||||
};
|
||||
|
@ -33,8 +33,8 @@ bool MKLDNNMemoryOutputNode::isSupportedOperation(const std::shared_ptr<const ng
|
||||
}
|
||||
|
||||
if (!MKLDNNPlugin::one_of(op->get_type_info(),
|
||||
ngraph::op::v3::Assign::type_info,
|
||||
ngraph::op::v6::Assign::type_info)) {
|
||||
ngraph::op::v3::Assign::get_type_info_static(),
|
||||
ngraph::op::v6::Assign::get_type_info_static())) {
|
||||
errorMessage = "Node is not an instance of Assign from the operation set v3 or v6.";
|
||||
return false;
|
||||
}
|
||||
@ -91,8 +91,8 @@ bool MKLDNNMemoryInputNode::isSupportedOperation(const std::shared_ptr<const ngr
|
||||
}
|
||||
|
||||
if (!MKLDNNPlugin::one_of(op->get_type_info(),
|
||||
ngraph::op::v3::ReadValue::type_info,
|
||||
ngraph::op::v6::ReadValue::type_info)) {
|
||||
ngraph::op::v3::ReadValue::get_type_info_static(),
|
||||
ngraph::op::v6::ReadValue::get_type_info_static())) {
|
||||
errorMessage = "Node is not an instance of ReadValue from the operation set v3 or v6.";
|
||||
return false;
|
||||
}
|
||||
|
@ -18,7 +18,7 @@ bool MKLDNNRangeNode::isSupportedOperation(const std::shared_ptr<const ngraph::N
|
||||
errorMessage = "Doesn't support op with dynamic shapes";
|
||||
return false;
|
||||
}
|
||||
if (!MKLDNNPlugin::one_of(op->get_type_info(), ngraph::op::v0::Range::type_info, ngraph::op::v4::Range::type_info)) {
|
||||
if (!MKLDNNPlugin::one_of(op->get_type_info(), ngraph::op::v0::Range::get_type_info_static(), ngraph::op::v4::Range::get_type_info_static())) {
|
||||
errorMessage = "Only opset1 and opset4 Range operation is supported";
|
||||
return false;
|
||||
}
|
||||
|
@ -1330,31 +1330,31 @@ private:
|
||||
};
|
||||
|
||||
std::map<const ngraph::DiscreteTypeInfo, std::function<void(const std::shared_ptr<ngraph::Node>&, MKLDNNReduceNode&)>> MKLDNNReduceNode::initializers = {
|
||||
{ngraph::opset4::ReduceL1::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNReduceNode& node) {
|
||||
{ngraph::opset4::ReduceL1::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNReduceNode& node) {
|
||||
node.algorithm = ReduceL1;
|
||||
}},
|
||||
{ngraph::opset4::ReduceL2::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNReduceNode& node) {
|
||||
{ngraph::opset4::ReduceL2::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNReduceNode& node) {
|
||||
node.algorithm = ReduceL2;
|
||||
}},
|
||||
{ngraph::opset1::ReduceLogicalAnd::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNReduceNode& node) {
|
||||
{ngraph::opset1::ReduceLogicalAnd::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNReduceNode& node) {
|
||||
node.algorithm = ReduceAnd;
|
||||
}},
|
||||
{ngraph::opset1::ReduceLogicalOr::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNReduceNode& node) {
|
||||
{ngraph::opset1::ReduceLogicalOr::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNReduceNode& node) {
|
||||
node.algorithm = ReduceOr;
|
||||
}},
|
||||
{ngraph::opset1::ReduceMax::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNReduceNode& node) {
|
||||
{ngraph::opset1::ReduceMax::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNReduceNode& node) {
|
||||
node.algorithm = ReduceMax;
|
||||
}},
|
||||
{ngraph::opset1::ReduceMean::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNReduceNode& node) {
|
||||
{ngraph::opset1::ReduceMean::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNReduceNode& node) {
|
||||
node.algorithm = ReduceMean;
|
||||
}},
|
||||
{ngraph::opset1::ReduceMin::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNReduceNode& node) {
|
||||
{ngraph::opset1::ReduceMin::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNReduceNode& node) {
|
||||
node.algorithm = ReduceMin;
|
||||
}},
|
||||
{ngraph::opset1::ReduceProd::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNReduceNode& node) {
|
||||
{ngraph::opset1::ReduceProd::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNReduceNode& node) {
|
||||
node.algorithm = ReduceProd;
|
||||
}},
|
||||
{ngraph::opset1::ReduceSum::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNReduceNode& node) {
|
||||
{ngraph::opset1::ReduceSum::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, MKLDNNReduceNode& node) {
|
||||
node.algorithm = ReduceSum;
|
||||
}}
|
||||
};
|
||||
|
@ -23,13 +23,13 @@ namespace MKLDNNPlugin {
|
||||
|
||||
static rnn_direction ieDirection2dnnl(const std::shared_ptr<const ngraph::Node>& op) {
|
||||
ngraph::op::RecurrentSequenceDirection direction = ngraph::op::RecurrentSequenceDirection::FORWARD;
|
||||
if (op->get_type_info() == ngraph::op::v5::GRUSequence::type_info) {
|
||||
if (op->get_type_info() == ngraph::op::v5::GRUSequence::get_type_info_static()) {
|
||||
direction = ngraph::as_type_ptr<const ngraph::op::v5::GRUSequence>(op)->get_direction();
|
||||
} else if (op->get_type_info() == ngraph::op::v0::LSTMSequence::type_info) {
|
||||
} else if (op->get_type_info() == ngraph::op::v0::LSTMSequence::get_type_info_static()) {
|
||||
direction = ngraph::as_type_ptr<const ngraph::op::v0::LSTMSequence>(op)->get_direction();
|
||||
} else if (op->get_type_info() == ngraph::op::v5::LSTMSequence::type_info) {
|
||||
} else if (op->get_type_info() == ngraph::op::v5::LSTMSequence::get_type_info_static()) {
|
||||
direction = ngraph::as_type_ptr<const ngraph::op::v5::LSTMSequence>(op)->get_direction();
|
||||
} else if (op->get_type_info() == ngraph::op::v5::RNNSequence::type_info) {
|
||||
} else if (op->get_type_info() == ngraph::op::v5::RNNSequence::get_type_info_static()) {
|
||||
direction = ngraph::as_type_ptr<const ngraph::op::v5::RNNSequence>(op)->get_direction();
|
||||
}
|
||||
return direction == ngraph::op::RecurrentSequenceDirection::FORWARD ? rnn_direction::unidirectional_left2right
|
||||
@ -47,8 +47,8 @@ static mkldnn::algorithm ie2dnnl(std::string act_type) {
|
||||
|
||||
static mkldnn::algorithm ie2dnnl(const std::shared_ptr<const ngraph::Node>& op) {
|
||||
if (one_of(op->get_type_info(),
|
||||
ngraph::op::v3::GRUCell::type_info,
|
||||
ngraph::op::v5::GRUSequence::type_info)) {
|
||||
ngraph::op::v3::GRUCell::get_type_info_static(),
|
||||
ngraph::op::v5::GRUSequence::get_type_info_static())) {
|
||||
auto gruCellOp = ngraph::as_type_ptr<const ngraph::op::v3::GRUCell>(op);
|
||||
auto gruSeqOp = ngraph::as_type_ptr<const ngraph::op::v5::GRUSequence>(op);
|
||||
if ((gruCellOp && gruCellOp->get_linear_before_reset()) ||
|
||||
@ -57,14 +57,14 @@ static mkldnn::algorithm ie2dnnl(const std::shared_ptr<const ngraph::Node>& op)
|
||||
else
|
||||
return mkldnn::algorithm::vanilla_gru;
|
||||
} else if (one_of(op->get_type_info(),
|
||||
ngraph::op::v0::LSTMCell::type_info,
|
||||
ngraph::op::v4::LSTMCell::type_info,
|
||||
ngraph::op::v0::LSTMSequence::type_info,
|
||||
ngraph::op::v5::LSTMSequence::type_info)) {
|
||||
ngraph::op::v0::LSTMCell::get_type_info_static(),
|
||||
ngraph::op::v4::LSTMCell::get_type_info_static(),
|
||||
ngraph::op::v0::LSTMSequence::get_type_info_static(),
|
||||
ngraph::op::v5::LSTMSequence::get_type_info_static())) {
|
||||
return mkldnn::algorithm::vanilla_lstm;
|
||||
} else if (one_of(op->get_type_info(),
|
||||
ngraph::op::v0::RNNCell::type_info,
|
||||
ngraph::op::v5::RNNSequence::type_info)) {
|
||||
ngraph::op::v0::RNNCell::get_type_info_static(),
|
||||
ngraph::op::v5::RNNSequence::get_type_info_static())) {
|
||||
return mkldnn::algorithm::vanilla_rnn;
|
||||
} else {
|
||||
IE_THROW() << "Unsupported cell type";
|
||||
@ -116,54 +116,54 @@ bool MKLDNNRNN::isSupportedOperation(const std::shared_ptr<const ngraph::Node>&
|
||||
}
|
||||
|
||||
if (!one_of(op->get_type_info(),
|
||||
ngraph::op::v3::GRUCell::type_info,
|
||||
ngraph::op::v0::LSTMCell::type_info,
|
||||
ngraph::op::v4::LSTMCell::type_info,
|
||||
ngraph::op::v0::RNNCell::type_info,
|
||||
ngraph::op::v5::GRUSequence::type_info,
|
||||
ngraph::op::v0::LSTMSequence::type_info,
|
||||
ngraph::op::v5::LSTMSequence::type_info,
|
||||
ngraph::op::v5::RNNSequence::type_info)) {
|
||||
ngraph::op::v3::GRUCell::get_type_info_static(),
|
||||
ngraph::op::v0::LSTMCell::get_type_info_static(),
|
||||
ngraph::op::v4::LSTMCell::get_type_info_static(),
|
||||
ngraph::op::v0::RNNCell::get_type_info_static(),
|
||||
ngraph::op::v5::GRUSequence::get_type_info_static(),
|
||||
ngraph::op::v0::LSTMSequence::get_type_info_static(),
|
||||
ngraph::op::v5::LSTMSequence::get_type_info_static(),
|
||||
ngraph::op::v5::RNNSequence::get_type_info_static())) {
|
||||
errorMessage = "Unsupported RNN operation.";
|
||||
return false;
|
||||
}
|
||||
|
||||
if (one_of(op->get_type_info(), ngraph::op::v0::RNNCell::type_info, ngraph::op::v3::GRUCell::type_info)) {
|
||||
if (one_of(op->get_type_info(), ngraph::op::v0::RNNCell::get_type_info_static(), ngraph::op::v3::GRUCell::get_type_info_static())) {
|
||||
if (op->get_input_size() != 5) {
|
||||
errorMessage = "Node expects 5 inputs. Actual: " + std::to_string(op->get_input_size());
|
||||
return false;
|
||||
}
|
||||
if (op->get_input_node_ptr(2)->get_type_info() != ngraph::op::v0::Constant::type_info ||
|
||||
op->get_input_node_ptr(3)->get_type_info() != ngraph::op::v0::Constant::type_info ||
|
||||
op->get_input_node_ptr(4)->get_type_info() != ngraph::op::v0::Constant::type_info) {
|
||||
if (op->get_input_node_ptr(2)->get_type_info() != ngraph::op::v0::Constant::get_type_info_static() ||
|
||||
op->get_input_node_ptr(3)->get_type_info() != ngraph::op::v0::Constant::get_type_info_static() ||
|
||||
op->get_input_node_ptr(4)->get_type_info() != ngraph::op::v0::Constant::get_type_info_static()) {
|
||||
errorMessage = "Node expects constants as W, R, B inputs.";
|
||||
return false;
|
||||
}
|
||||
} else if (one_of(op->get_type_info(),
|
||||
ngraph::op::v0::LSTMCell::type_info,
|
||||
ngraph::op::v4::LSTMCell::type_info,
|
||||
ngraph::op::v5::GRUSequence::type_info,
|
||||
ngraph::op::v5::RNNSequence::type_info)) {
|
||||
ngraph::op::v0::LSTMCell::get_type_info_static(),
|
||||
ngraph::op::v4::LSTMCell::get_type_info_static(),
|
||||
ngraph::op::v5::GRUSequence::get_type_info_static(),
|
||||
ngraph::op::v5::RNNSequence::get_type_info_static())) {
|
||||
if (op->get_input_size() != 6) {
|
||||
errorMessage = "Node expects 6 inputs. Actual: " + std::to_string(op->get_input_size());
|
||||
return false;
|
||||
}
|
||||
if (op->get_input_node_ptr(3)->get_type_info() != ngraph::op::v0::Constant::type_info ||
|
||||
op->get_input_node_ptr(4)->get_type_info() != ngraph::op::v0::Constant::type_info ||
|
||||
op->get_input_node_ptr(5)->get_type_info() != ngraph::op::v0::Constant::type_info) {
|
||||
if (op->get_input_node_ptr(3)->get_type_info() != ngraph::op::v0::Constant::get_type_info_static() ||
|
||||
op->get_input_node_ptr(4)->get_type_info() != ngraph::op::v0::Constant::get_type_info_static() ||
|
||||
op->get_input_node_ptr(5)->get_type_info() != ngraph::op::v0::Constant::get_type_info_static()) {
|
||||
errorMessage = "Node expects constants as W, R, B inputs.";
|
||||
return false;
|
||||
}
|
||||
} else if (one_of(op->get_type_info(),
|
||||
ngraph::op::v0::LSTMSequence::type_info,
|
||||
ngraph::op::v5::LSTMSequence::type_info)) {
|
||||
ngraph::op::v0::LSTMSequence::get_type_info_static(),
|
||||
ngraph::op::v5::LSTMSequence::get_type_info_static())) {
|
||||
if (op->get_input_size() != 7) {
|
||||
errorMessage = "Node expects 7 inputs. Actual: " + std::to_string(op->get_input_size());
|
||||
return false;
|
||||
}
|
||||
if (op->get_input_node_ptr(4)->get_type_info() != ngraph::op::v0::Constant::type_info ||
|
||||
op->get_input_node_ptr(5)->get_type_info() != ngraph::op::v0::Constant::type_info ||
|
||||
op->get_input_node_ptr(6)->get_type_info() != ngraph::op::v0::Constant::type_info) {
|
||||
if (op->get_input_node_ptr(4)->get_type_info() != ngraph::op::v0::Constant::get_type_info_static() ||
|
||||
op->get_input_node_ptr(5)->get_type_info() != ngraph::op::v0::Constant::get_type_info_static() ||
|
||||
op->get_input_node_ptr(6)->get_type_info() != ngraph::op::v0::Constant::get_type_info_static()) {
|
||||
errorMessage = "Node expects constants as W, R, B inputs.";
|
||||
return false;
|
||||
}
|
||||
@ -176,13 +176,13 @@ bool MKLDNNRNN::isSupportedOperation(const std::shared_ptr<const ngraph::Node>&
|
||||
}
|
||||
|
||||
ngraph::op::RecurrentSequenceDirection direction = ngraph::op::RecurrentSequenceDirection::FORWARD;
|
||||
if (op->get_type_info() == ngraph::op::v5::GRUSequence::type_info) {
|
||||
if (op->get_type_info() == ngraph::op::v5::GRUSequence::get_type_info_static()) {
|
||||
direction = ngraph::as_type_ptr<const ngraph::op::v5::GRUSequence>(op)->get_direction();
|
||||
} else if (op->get_type_info() == ngraph::op::v0::LSTMSequence::type_info) {
|
||||
} else if (op->get_type_info() == ngraph::op::v0::LSTMSequence::get_type_info_static()) {
|
||||
direction = ngraph::as_type_ptr<const ngraph::op::v0::LSTMSequence>(op)->get_direction();
|
||||
} else if (op->get_type_info() == ngraph::op::v5::LSTMSequence::type_info) {
|
||||
} else if (op->get_type_info() == ngraph::op::v5::LSTMSequence::get_type_info_static()) {
|
||||
direction = ngraph::as_type_ptr<const ngraph::op::v5::LSTMSequence>(op)->get_direction();
|
||||
} else if (op->get_type_info() == ngraph::op::v5::RNNSequence::type_info) {
|
||||
} else if (op->get_type_info() == ngraph::op::v5::RNNSequence::get_type_info_static()) {
|
||||
direction = ngraph::as_type_ptr<const ngraph::op::v5::RNNSequence>(op)->get_direction();
|
||||
}
|
||||
if (!one_of(direction, ngraph::op::RecurrentSequenceDirection::FORWARD, ngraph::op::RecurrentSequenceDirection::REVERSE)) {
|
||||
@ -213,24 +213,24 @@ MKLDNNRNN::MKLDNNRNN(const std::shared_ptr<ngraph::Node>& op, const mkldnn::engi
|
||||
}
|
||||
|
||||
is_cell = one_of(op->get_type_info(),
|
||||
ngraph::op::v0::RNNCell::type_info,
|
||||
ngraph::op::v3::GRUCell::type_info,
|
||||
ngraph::op::v0::LSTMCell::type_info,
|
||||
ngraph::op::v4::LSTMCell::type_info);
|
||||
ngraph::op::v0::RNNCell::get_type_info_static(),
|
||||
ngraph::op::v3::GRUCell::get_type_info_static(),
|
||||
ngraph::op::v0::LSTMCell::get_type_info_static(),
|
||||
ngraph::op::v4::LSTMCell::get_type_info_static());
|
||||
|
||||
if (one_of(op->get_type_info(),
|
||||
ngraph::op::v0::RNNCell::type_info,
|
||||
ngraph::op::v3::GRUCell::type_info)) {
|
||||
ngraph::op::v0::RNNCell::get_type_info_static(),
|
||||
ngraph::op::v3::GRUCell::get_type_info_static())) {
|
||||
wIdx = 2; rIdx = 3; bIdx = 4;
|
||||
} else if (one_of(op->get_type_info(),
|
||||
ngraph::op::v5::RNNSequence::type_info,
|
||||
ngraph::op::v0::LSTMCell::type_info,
|
||||
ngraph::op::v4::LSTMCell::type_info,
|
||||
ngraph::op::v5::GRUSequence::type_info)) {
|
||||
ngraph::op::v5::RNNSequence::get_type_info_static(),
|
||||
ngraph::op::v0::LSTMCell::get_type_info_static(),
|
||||
ngraph::op::v4::LSTMCell::get_type_info_static(),
|
||||
ngraph::op::v5::GRUSequence::get_type_info_static())) {
|
||||
wIdx = 3; rIdx = 4; bIdx = 5;
|
||||
} else if (one_of(op->get_type_info(),
|
||||
ngraph::op::v0::LSTMSequence::type_info,
|
||||
ngraph::op::v5::LSTMSequence::type_info)) {
|
||||
ngraph::op::v0::LSTMSequence::get_type_info_static(),
|
||||
ngraph::op::v5::LSTMSequence::get_type_info_static())) {
|
||||
wIdx = 4; rIdx = 5; bIdx = 6;
|
||||
}
|
||||
|
||||
|
@ -26,7 +26,7 @@ bool MKLDNNSplitNode::isSupportedOperation(const std::shared_ptr<const ngraph::N
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!MKLDNNPlugin::one_of(op->get_type_info(), ngraph::op::v1::Split::type_info, ngraph::op::v1::VariadicSplit::type_info)) {
|
||||
if (!MKLDNNPlugin::one_of(op->get_type_info(), ngraph::op::v1::Split::get_type_info_static(), ngraph::op::v1::VariadicSplit::get_type_info_static())) {
|
||||
errorMessage = "Only opset1 Split and VariadicSplit operations are supported";
|
||||
return false;
|
||||
}
|
||||
|
@ -283,8 +283,8 @@ bool MKLDNNTensorIteratorNode::isSupportedOperation(const std::shared_ptr<const
|
||||
}
|
||||
|
||||
if (!one_of(op->get_type_info(),
|
||||
ngraph::op::v0::TensorIterator::type_info,
|
||||
ngraph::op::v5::Loop::type_info)) {
|
||||
ngraph::op::v0::TensorIterator::get_type_info_static(),
|
||||
ngraph::op::v5::Loop::get_type_info_static())) {
|
||||
errorMessage = "Only opset1 TensorIterator or opset5 Loop operations are supported.";
|
||||
return false;
|
||||
}
|
||||
|
@ -16,12 +16,12 @@ using namespace InferenceEngine;
|
||||
bool MKLDNNTransposeNode::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
|
||||
try {
|
||||
if (!one_of(op->get_type_info(),
|
||||
ov::op::v1::Transpose::type_info)) {
|
||||
ov::op::v1::Transpose::get_type_info_static())) {
|
||||
errorMessage = "Node is not an instance of the Transpose operation from opset1.";
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!isDynamicNgraphNode(op) && op->get_input_node_ptr(INPUT_ORDER_IDX)->get_type_info() != ov::op::v0::Constant::type_info) {
|
||||
if (!isDynamicNgraphNode(op) && op->get_input_node_ptr(INPUT_ORDER_IDX)->get_type_info() != ov::op::v0::Constant::get_type_info_static()) {
|
||||
errorMessage = "Constant expected as the second input for static shapes.";
|
||||
return false;
|
||||
}
|
||||
@ -38,7 +38,7 @@ MKLDNNTransposeNode::MKLDNNTransposeNode(const std::shared_ptr<ov::Node>& op, co
|
||||
IE_THROW(NotImplemented) << errorMessage;
|
||||
}
|
||||
|
||||
if (op->get_input_node_ptr(INPUT_ORDER_IDX)->get_type_info() == ov::op::v0::Constant::type_info) {
|
||||
if (op->get_input_node_ptr(INPUT_ORDER_IDX)->get_type_info() == ov::op::v0::Constant::get_type_info_static()) {
|
||||
constMap[INPUT_ORDER_IDX] = true;
|
||||
order = ov::as_type<ov::op::v0::Constant>(op->get_input_node_ptr(INPUT_ORDER_IDX))->cast_vector<size_t>();
|
||||
|
||||
|
@ -12,14 +12,12 @@ using namespace ngraph;
|
||||
using namespace ov;
|
||||
|
||||
template class ov::MLKDNNMemoryFormatsHelper<MLKDNNInputMemoryFormats>;
|
||||
constexpr VariantTypeInfo VariantWrapper<MLKDNNInputMemoryFormats>::type_info;
|
||||
|
||||
std::string ngraph::getMLKDNNInputMemoryFormats(const std::shared_ptr<ngraph::Node>& node) {
|
||||
return MLKDNNMemoryFormatsHelper<MLKDNNInputMemoryFormats>::getMemoryFormats(node);
|
||||
}
|
||||
|
||||
template class ov::MLKDNNMemoryFormatsHelper<MLKDNNOutputMemoryFormats>;
|
||||
constexpr VariantTypeInfo VariantWrapper<MLKDNNOutputMemoryFormats>::type_info;
|
||||
|
||||
std::string ngraph::getMLKDNNOutputMemoryFormats(const std::shared_ptr<ngraph::Node>& node) {
|
||||
return MLKDNNMemoryFormatsHelper<MLKDNNOutputMemoryFormats>::getMemoryFormats(node);
|
||||
|
@ -52,8 +52,8 @@ public:
|
||||
static std::string getMemoryFormats(const std::shared_ptr<ngraph::Node>& node) {
|
||||
const auto &rtInfo = node->get_rt_info();
|
||||
using MemoryFormatsWrapper = VariantWrapper<MemoryFormatsType>;
|
||||
if (!rtInfo.count(MemoryFormatsWrapper::type_info.name)) return "";
|
||||
const auto &attr = rtInfo.at(MemoryFormatsWrapper::type_info.name);
|
||||
if (!rtInfo.count(MemoryFormatsWrapper::get_type_info_static().name)) return "";
|
||||
const auto &attr = rtInfo.at(MemoryFormatsWrapper::get_type_info_static().name);
|
||||
MemoryFormatsType mem_format = ngraph::as_type_ptr<MemoryFormatsWrapper>(attr)->get();
|
||||
return mem_format.getMemoryFormats();
|
||||
}
|
||||
@ -67,7 +67,7 @@ public:
|
||||
}
|
||||
|
||||
if (unique_mem_format.size() > 1) {
|
||||
throw ngraph::ngraph_error(std::string(VariantWrapper<MemoryFormatsType>::type_info.name) + " no rule defined for multiple values.");
|
||||
throw ngraph::ngraph_error(std::string(VariantWrapper<MemoryFormatsType>::get_type_info_static().name) + " no rule defined for multiple values.");
|
||||
}
|
||||
|
||||
std::string final_mem_format;
|
||||
@ -78,7 +78,7 @@ public:
|
||||
}
|
||||
|
||||
std::shared_ptr<ngraph::Variant> init(const std::shared_ptr<ngraph::Node> & node) override {
|
||||
throw ngraph::ngraph_error(std::string(VariantWrapper<MemoryFormatsType>::type_info.name) + " has no default initialization.");
|
||||
throw ngraph::ngraph_error(std::string(VariantWrapper<MemoryFormatsType>::get_type_info_static().name) + " has no default initialization.");
|
||||
}
|
||||
};
|
||||
extern template class MLKDNNMemoryFormatsHelper<ngraph::MLKDNNInputMemoryFormats>;
|
||||
@ -86,8 +86,7 @@ extern template class MLKDNNMemoryFormatsHelper<ngraph::MLKDNNInputMemoryFormats
|
||||
template<>
|
||||
class VariantWrapper<ngraph::MLKDNNInputMemoryFormats> : public MLKDNNMemoryFormatsHelper<ngraph::MLKDNNInputMemoryFormats> {
|
||||
public:
|
||||
static constexpr VariantTypeInfo type_info{ngraph::MLKDNNInputMemoryFormatsAttr, 0};
|
||||
const VariantTypeInfo &get_type_info() const override { return type_info; }
|
||||
OPENVINO_RTTI(ngraph::MLKDNNInputMemoryFormatsAttr);
|
||||
|
||||
VariantWrapper(const ngraph::MLKDNNInputMemoryFormats &value) : MLKDNNMemoryFormatsHelper<ngraph::MLKDNNInputMemoryFormats>(value) {}
|
||||
};
|
||||
@ -97,8 +96,7 @@ extern template class MLKDNNMemoryFormatsHelper<ngraph::MLKDNNOutputMemoryFormat
|
||||
template<>
|
||||
class VariantWrapper<ngraph::MLKDNNOutputMemoryFormats> : public MLKDNNMemoryFormatsHelper<ngraph::MLKDNNOutputMemoryFormats> {
|
||||
public:
|
||||
static constexpr VariantTypeInfo type_info{ngraph::MLKDNNOutputMemoryFormatsAttr, 0};
|
||||
const VariantTypeInfo &get_type_info() const override { return type_info; }
|
||||
OPENVINO_RTTI(ngraph::MLKDNNOutputMemoryFormatsAttr);
|
||||
|
||||
VariantWrapper(const ngraph::MLKDNNOutputMemoryFormats &value) : MLKDNNMemoryFormatsHelper<ngraph::MLKDNNOutputMemoryFormats>(value) {}
|
||||
};
|
||||
|
@ -20,7 +20,7 @@ namespace op {
|
||||
*/
|
||||
class TRANSFORMATIONS_API BlockedLoad : public Load {
|
||||
public:
|
||||
NGRAPH_RTTI_DECLARATION;
|
||||
OPENVINO_OP("BlockedLoad", "SnippetsOpset", ngraph::snippets::op::Load);
|
||||
|
||||
BlockedLoad(const Output<Node>& x);
|
||||
BlockedLoad() = default;
|
||||
|
@ -20,7 +20,7 @@ namespace op {
|
||||
*/
|
||||
class TRANSFORMATIONS_API BlockedParameter : public ngraph::op::Parameter {
|
||||
public:
|
||||
NGRAPH_RTTI_DECLARATION;
|
||||
OPENVINO_OP("BlockedParameter", "SnippetsOpset", ngraph::op::Parameter);
|
||||
|
||||
BlockedParameter() = default;
|
||||
BlockedParameter(const ngraph::element::Type& element_type, const PartialShape& pshape)
|
||||
|
@ -20,7 +20,7 @@ namespace op {
|
||||
*/
|
||||
class TRANSFORMATIONS_API BroadcastLoad : public BroadcastMove {
|
||||
public:
|
||||
NGRAPH_RTTI_DECLARATION;
|
||||
OPENVINO_OP("BroadcastLoad", "SnippetsOpset", ngraph::snippets::op::BroadcastMove);
|
||||
|
||||
BroadcastLoad(const Output<Node>& x, Shape output_shape);
|
||||
BroadcastLoad() = default;
|
||||
|
@ -19,7 +19,7 @@ namespace op {
|
||||
*/
|
||||
class TRANSFORMATIONS_API BroadcastMove : public ngraph::op::Op {
|
||||
public:
|
||||
NGRAPH_RTTI_DECLARATION;
|
||||
OPENVINO_OP("BroadcastMove", "SnippetsOpset");
|
||||
|
||||
BroadcastMove(const Output<Node>& x, Shape output_shape);
|
||||
BroadcastMove() = default;
|
||||
|
@ -20,7 +20,7 @@ namespace op {
|
||||
*/
|
||||
class TRANSFORMATIONS_API Kernel : public ngraph::op::Op {
|
||||
public:
|
||||
NGRAPH_RTTI_DECLARATION;
|
||||
OPENVINO_OP("Kernel", "SnippetsOpset");
|
||||
|
||||
Kernel(const std::vector<std::pair<std::shared_ptr<ngraph::snippets::Emitter>, ngraph::snippets::RegInfo>>& region);
|
||||
Kernel() = default;
|
||||
|
@ -23,7 +23,7 @@ namespace op {
|
||||
*/
|
||||
class TRANSFORMATIONS_API Load : public ngraph::op::Op {
|
||||
public:
|
||||
NGRAPH_RTTI_DECLARATION;
|
||||
OPENVINO_OP("Load", "SnippetsOpset");
|
||||
|
||||
Load(const Output<Node>& x);
|
||||
Load() = default;
|
||||
|
@ -19,7 +19,7 @@ namespace op {
|
||||
*/
|
||||
class TRANSFORMATIONS_API Nop : public ngraph::op::Op {
|
||||
public:
|
||||
NGRAPH_RTTI_DECLARATION;
|
||||
OPENVINO_OP("Nop", "SnippetsOpset");
|
||||
|
||||
Nop(const OutputVector& arguments, const OutputVector& results);
|
||||
Nop() = default;
|
||||
|
@ -20,7 +20,7 @@ namespace op {
|
||||
*/
|
||||
class TRANSFORMATIONS_API Scalar : public ngraph::op::Constant {
|
||||
public:
|
||||
NGRAPH_RTTI_DECLARATION;
|
||||
OPENVINO_OP("Scalar", "SnippetsOpset", ngraph::op::Constant);
|
||||
|
||||
Scalar() = default;
|
||||
Scalar(const std::shared_ptr<runtime::Tensor>& tensor) : Constant(tensor) {}
|
||||
|
@ -20,7 +20,7 @@ namespace op {
|
||||
*/
|
||||
class TRANSFORMATIONS_API ScalarLoad : public Load {
|
||||
public:
|
||||
NGRAPH_RTTI_DECLARATION;
|
||||
OPENVINO_OP("ScalarLoad", "SnippetsOpset", ngraph::snippets::op::Load);
|
||||
|
||||
ScalarLoad(const Output<Node>& x);
|
||||
ScalarLoad() = default;
|
||||
|
@ -20,7 +20,7 @@ namespace op {
|
||||
*/
|
||||
class TRANSFORMATIONS_API ScalarStore : public Store {
|
||||
public:
|
||||
NGRAPH_RTTI_DECLARATION;
|
||||
OPENVINO_OP("ScalarStore", "SnippetsOpset", ngraph::snippets::op::Store);
|
||||
|
||||
ScalarStore(const Output<Node>& x);
|
||||
ScalarStore() = default;
|
||||
|
@ -21,7 +21,7 @@ namespace op {
|
||||
*/
|
||||
class TRANSFORMATIONS_API PowerStatic : public ngraph::op::v1::Power {
|
||||
public:
|
||||
NGRAPH_RTTI_DECLARATION;
|
||||
OPENVINO_OP("PowerStatic", "SnippetsOpset", ngraph::op::v1::Power);
|
||||
|
||||
PowerStatic() : Power() {
|
||||
}
|
||||
|
@ -19,7 +19,7 @@ namespace op {
|
||||
*/
|
||||
class TRANSFORMATIONS_API Store : public ngraph::op::Op {
|
||||
public:
|
||||
NGRAPH_RTTI_DECLARATION;
|
||||
OPENVINO_OP("Store", "SnippetsOpset");
|
||||
|
||||
Store(const Output<Node>& x);
|
||||
Store() = default;
|
||||
|
@ -25,6 +25,8 @@ namespace op {
|
||||
*/
|
||||
class TRANSFORMATIONS_API Subgraph : public ngraph::op::Op {
|
||||
public:
|
||||
OPENVINO_OP("Subgraph", "SnippetsOpset");
|
||||
|
||||
// < 1, 42, 17, 15, 16> < 0, 1, 2, 3, 1>
|
||||
// should be:
|
||||
// A = < 1, 42, 17, 15> -> < 1, 3, 17, 15, 16> < 0, 1, 2, 3, 1>
|
||||
@ -69,8 +71,6 @@ public:
|
||||
using BlockedShape = std::tuple<ngraph::Shape, ngraph::AxisVector, ngraph::element::Type>;
|
||||
using BlockedShapeVector = std::vector<BlockedShape>;
|
||||
|
||||
NGRAPH_RTTI_DECLARATION;
|
||||
|
||||
Subgraph(const OutputVector& args, std::shared_ptr<Function> body);
|
||||
|
||||
Subgraph(const NodeVector& args, std::shared_ptr<Function> body);
|
||||
|
@ -20,7 +20,7 @@ namespace op {
|
||||
*/
|
||||
class TRANSFORMATIONS_API Tile : public ngraph::op::Op {
|
||||
public:
|
||||
NGRAPH_RTTI_DECLARATION;
|
||||
OPENVINO_OP("Tile", "SnippetsOpset");
|
||||
|
||||
Tile(const std::vector<std::pair<std::shared_ptr<ngraph::snippets::Emitter>, ngraph::snippets::RegInfo>>& region);
|
||||
Tile() = default;
|
||||
|
@ -20,7 +20,7 @@ namespace op {
|
||||
*/
|
||||
class TRANSFORMATIONS_API VectorLoad : public Load {
|
||||
public:
|
||||
NGRAPH_RTTI_DECLARATION;
|
||||
OPENVINO_OP("VectorLoad", "SnippetsOpset", ngraph::snippets::op::Load);
|
||||
|
||||
VectorLoad(const Output<Node>& x);
|
||||
VectorLoad() = default;
|
||||
|
@ -20,7 +20,7 @@ namespace op {
|
||||
*/
|
||||
class TRANSFORMATIONS_API VectorStore : public Store {
|
||||
public:
|
||||
NGRAPH_RTTI_DECLARATION;
|
||||
OPENVINO_OP("VectorStore", "SnippetsOpset", ngraph::snippets::op::Store);
|
||||
|
||||
VectorStore(const Output<Node>& x);
|
||||
VectorStore() = default;
|
||||
|
@ -69,13 +69,15 @@ ngraph::snippets::code ngraph::snippets::Generator::generate(std::shared_ptr<ngr
|
||||
|
||||
// wrapping into tiles
|
||||
std::vector<std::pair<std::shared_ptr<Emitter>, RegInfo>> tiles;
|
||||
tiles.push_back(std::make_pair(target->get(ngraph::snippets::op::Tile::type_info)(std::make_shared<ngraph::snippets::op::Tile>(lowered)),
|
||||
std::make_pair(std::vector<size_t>({target->get_lanes(), nptrs}), std::vector<size_t>{})));
|
||||
tiles.push_back(std::make_pair(target->get(ngraph::snippets::op::Tile::type_info)(std::make_shared<ngraph::snippets::op::Tile>(scalar_lowered)),
|
||||
tiles.push_back(std::make_pair(target->get(ngraph::snippets::op::Tile::get_type_info_static())(
|
||||
std::make_shared<ngraph::snippets::op::Tile>(lowered)),
|
||||
std::make_pair(std::vector<size_t>({target->get_lanes(), nptrs}), std::vector<size_t>{})));
|
||||
tiles.push_back(std::make_pair(target->get(ngraph::snippets::op::Tile::get_type_info_static())(
|
||||
std::make_shared<ngraph::snippets::op::Tile>(scalar_lowered)),
|
||||
std::make_pair(std::vector<size_t>{{1, nptrs}}, std::vector<size_t>{})));
|
||||
|
||||
// emission
|
||||
std::shared_ptr<Emitter> kernel = target->get(ngraph::snippets::op::Kernel::type_info)(std::make_shared<ngraph::snippets::op::Kernel>(tiles));
|
||||
std::shared_ptr<Emitter> kernel = target->get(ngraph::snippets::op::Kernel::get_type_info_static())(std::make_shared<ngraph::snippets::op::Kernel>(tiles));
|
||||
kernel->emit_code({params.size(), results.size()}, {});
|
||||
|
||||
lowered.insert(lowered.end(), scalar_lowered.begin(), scalar_lowered.end());
|
||||
|
@ -6,7 +6,5 @@
|
||||
|
||||
using namespace ngraph;
|
||||
|
||||
NGRAPH_RTTI_DEFINITION(snippets::op::BlockedLoad, "BlockedLoad", 0);
|
||||
|
||||
snippets::op::BlockedLoad::BlockedLoad(const Output<Node>& x) : Load(x) {
|
||||
}
|
||||
|
@ -1,9 +0,0 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "snippets/op/blockedparameter.hpp"
|
||||
|
||||
using namespace ngraph;
|
||||
|
||||
NGRAPH_RTTI_DEFINITION(snippets::op::BlockedParameter, "BlockedParameter", 0);
|
@ -11,8 +11,6 @@
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
NGRAPH_RTTI_DEFINITION(snippets::op::BroadcastLoad, "BroadcastLoad", 0);
|
||||
|
||||
snippets::op::BroadcastLoad::BroadcastLoad(const Output<Node>& x, Shape shape)
|
||||
: BroadcastMove(x, shape), broadcast_info(x.get_shape().size(), 0) {
|
||||
constructor_validate_and_infer_types();
|
||||
|
@ -12,8 +12,6 @@
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
NGRAPH_RTTI_DEFINITION(snippets::op::BroadcastMove, "BroadcastMove", 0);
|
||||
|
||||
snippets::op::BroadcastMove::BroadcastMove(const Output<Node>& x, Shape shape) : Op({x}), output_shape(shape) {
|
||||
constructor_validate_and_infer_types();
|
||||
}
|
||||
|
@ -8,7 +8,5 @@
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
NGRAPH_RTTI_DEFINITION(snippets::op::Kernel, "Kernel", 0);
|
||||
|
||||
snippets::op::Kernel::Kernel(const std::vector<std::pair<std::shared_ptr<snippets::Emitter>, snippets::RegInfo>>& nested) : Op(), region(nested) {
|
||||
}
|
||||
|
@ -11,8 +11,6 @@
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
NGRAPH_RTTI_DEFINITION(snippets::op::Load, "Load", 0);
|
||||
|
||||
snippets::op::Load::Load(const Output<Node>& x) : Op({x}) {
|
||||
constructor_validate_and_infer_types();
|
||||
}
|
||||
|
@ -7,8 +7,6 @@
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
NGRAPH_RTTI_DEFINITION(snippets::op::Nop, "Nop", 0);
|
||||
|
||||
snippets::op::Nop::Nop(const OutputVector& arguments, const OutputVector& results) : Op([arguments, results]() -> OutputVector {
|
||||
OutputVector x;
|
||||
x.insert(x.end(), arguments.begin(), arguments.end());
|
||||
|
@ -1,9 +0,0 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "snippets/op/scalar.hpp"
|
||||
|
||||
using namespace ngraph;
|
||||
|
||||
NGRAPH_RTTI_DEFINITION(snippets::op::Scalar, "Scalar", 0);
|
@ -6,7 +6,5 @@
|
||||
|
||||
using namespace ngraph;
|
||||
|
||||
NGRAPH_RTTI_DEFINITION(snippets::op::ScalarLoad, "ScalarLoad", 0);
|
||||
|
||||
snippets::op::ScalarLoad::ScalarLoad(const Output<Node>& x) : Load(x) {
|
||||
}
|
||||
|
@ -6,7 +6,5 @@
|
||||
|
||||
using namespace ngraph;
|
||||
|
||||
NGRAPH_RTTI_DEFINITION(snippets::op::ScalarStore, "ScalarStore", 0);
|
||||
|
||||
snippets::op::ScalarStore::ScalarStore(const Output<Node>& x) : Store(x) {
|
||||
}
|
||||
|
@ -1,9 +0,0 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "snippets/op/staticpower.hpp"
|
||||
|
||||
using namespace ngraph;
|
||||
|
||||
NGRAPH_RTTI_DEFINITION(snippets::op::PowerStatic, "PowerStatic", 0);
|
@ -11,8 +11,6 @@
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
NGRAPH_RTTI_DEFINITION(snippets::op::Store, "Store", 0);
|
||||
|
||||
snippets::op::Store::Store(const Output<Node>& x) : Op({x}) {
|
||||
constructor_validate_and_infer_types();
|
||||
}
|
||||
|
@ -21,8 +21,6 @@
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
NGRAPH_RTTI_DEFINITION(snippets::op::Subgraph, "Subgraph", 0);
|
||||
|
||||
void snippets::op::Subgraph::set_generator(std::shared_ptr<ngraph::snippets::Generator> generator) {
|
||||
m_generator = generator;
|
||||
}
|
||||
|
@ -8,7 +8,5 @@
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
NGRAPH_RTTI_DEFINITION(snippets::op::Tile, "Tile", 0);
|
||||
|
||||
snippets::op::Tile::Tile(const std::vector<std::pair<std::shared_ptr<snippets::Emitter>, snippets::RegInfo>>& nested) : Op(), region(nested) {
|
||||
}
|
||||
|
@ -6,7 +6,5 @@
|
||||
|
||||
using namespace ngraph;
|
||||
|
||||
NGRAPH_RTTI_DEFINITION(snippets::op::VectorLoad, "VectorLoad", 0);
|
||||
|
||||
snippets::op::VectorLoad::VectorLoad(const Output<Node>& x) : Load(x) {
|
||||
}
|
||||
|
@ -6,7 +6,5 @@
|
||||
|
||||
using namespace ngraph;
|
||||
|
||||
NGRAPH_RTTI_DEFINITION(snippets::op::VectorStore, "VectorStore", 0);
|
||||
|
||||
snippets::op::VectorStore::VectorStore(const Output<Node>& x) : Store(x) {
|
||||
}
|
||||
|
@ -113,8 +113,10 @@ const ::ngraph::Node::type_info_t& NmsStaticShapeIE<BaseNmsOp>::get_type_info_st
|
||||
return type_info_static;
|
||||
}
|
||||
|
||||
#ifndef OPENVINO_STATIC_LIBRARY
|
||||
template <typename BaseNmsOp>
|
||||
const ::ngraph::Node::type_info_t NmsStaticShapeIE<BaseNmsOp>::type_info = NmsStaticShapeIE<BaseNmsOp>::get_type_info_static();
|
||||
#endif
|
||||
|
||||
#ifdef __clang__
|
||||
extern template class TRANSFORMATIONS_API op::internal::NmsStaticShapeIE<ov::op::v8::MulticlassNms>;
|
||||
|
@ -335,8 +335,10 @@ const ::ngraph::Node::type_info_t& TypeRelaxed<BaseOp>::get_type_info_static() {
|
||||
return type_info_static;
|
||||
}
|
||||
|
||||
#ifndef OPENVINO_STATIC_LIBRARY
|
||||
template <typename BaseOp>
|
||||
const ::ngraph::Node::type_info_t TypeRelaxed<BaseOp>::type_info = TypeRelaxed<BaseOp>::get_type_info_static();
|
||||
#endif
|
||||
|
||||
NGRAPH_SUPPRESS_DEPRECATED_END
|
||||
|
||||
|
@ -18,9 +18,7 @@ enum class DynamicShapeResolverMode {
|
||||
|
||||
class DynamicShapeResolver : public ngraph::op::Op {
|
||||
public:
|
||||
static constexpr NodeTypeInfo type_info{"DynamicShapeResolver", 0};
|
||||
|
||||
const NodeTypeInfo& get_type_info() const override { return type_info; }
|
||||
OPENVINO_OP("DynamicShapeResolver", "VPUOpset");
|
||||
|
||||
DynamicShapeResolver(const Output<Node>& tensorWithData,
|
||||
const Output<Node>& tensorWithDims,
|
||||
|
@ -19,14 +19,13 @@ namespace ngraph { namespace vpu { namespace op {
|
||||
|
||||
class ExpGatherElements : public ngraph::op::Op {
|
||||
public:
|
||||
static constexpr NodeTypeInfo type_info{"ExpGatherElements", 0};
|
||||
const NodeTypeInfo& get_type_info() const override { return type_info; }
|
||||
OPENVINO_OP("ExpGatherElements", "VPUOpset");
|
||||
|
||||
explicit ExpGatherElements(const Output<Node>& data,
|
||||
const Output<Node>& indices,
|
||||
const Output<Node>& lookupIndices,
|
||||
const int64_t axis,
|
||||
const int64_t lookupAxis);
|
||||
ExpGatherElements(const Output<Node>& data,
|
||||
const Output<Node>& indices,
|
||||
const Output<Node>& lookupIndices,
|
||||
const int64_t axis,
|
||||
const int64_t lookupAxis);
|
||||
|
||||
void validate_and_infer_types() override;
|
||||
|
||||
|
@ -12,8 +12,7 @@ namespace ngraph { namespace vpu { namespace op {
|
||||
|
||||
class OutShapeOfReshape : public ngraph::op::Op {
|
||||
public:
|
||||
static constexpr NodeTypeInfo type_info{"OutShapeOfReshape", 1};
|
||||
const NodeTypeInfo& get_type_info() const override { return type_info; }
|
||||
OPENVINO_OP("OutShapeOfReshape", "VPUOpset");
|
||||
|
||||
OutShapeOfReshape(
|
||||
const Output<Node>& inDataShape,
|
||||
|
@ -17,9 +17,7 @@ namespace ngraph { namespace vpu { namespace op {
|
||||
|
||||
class StaticShapeBroadcast : public ::ngraph::op::v3::Broadcast {
|
||||
public:
|
||||
static constexpr NodeTypeInfo type_info{"StaticShapeBroadcast", 0};
|
||||
|
||||
const NodeTypeInfo& get_type_info() const override { return type_info; }
|
||||
OPENVINO_OP("StaticShapeBroadcast", "VPUOpset");
|
||||
|
||||
StaticShapeBroadcast(const Output<Node>& arg,
|
||||
const Output<Node>& targetShape,
|
||||
|
@ -10,8 +10,7 @@ namespace ngraph { namespace vpu { namespace op {
|
||||
|
||||
class StaticShapeLoop : public ngraph::opset6::Loop {
|
||||
public:
|
||||
static constexpr NodeTypeInfo type_info{"StaticShapeLoop", 0};
|
||||
const NodeTypeInfo& get_type_info() const override { return type_info; }
|
||||
OPENVINO_OP("StaticShapeLoop", "VPUOpset");
|
||||
|
||||
explicit StaticShapeLoop(const Loop& loop);
|
||||
void validate_and_infer_types() override;
|
||||
|
@ -15,8 +15,7 @@ namespace ngraph { namespace vpu { namespace op {
|
||||
|
||||
class StaticShapeNonMaxSuppression : public ngraph::op::NonMaxSuppressionIE3 {
|
||||
public:
|
||||
static constexpr NodeTypeInfo type_info{"StaticShapeNonMaxSuppression", 0};
|
||||
const NodeTypeInfo& get_type_info() const override { return type_info; }
|
||||
OPENVINO_OP("StaticShapeNonMaxSuppression", "VPUOpset", ngraph::op::NonMaxSuppressionIE3);
|
||||
|
||||
explicit StaticShapeNonMaxSuppression(const ngraph::opset5::NonMaxSuppression& nms);
|
||||
|
||||
|
@ -14,9 +14,7 @@ namespace ngraph { namespace vpu { namespace op {
|
||||
|
||||
class StaticShapeNonZero : public ngraph::op::Op {
|
||||
public:
|
||||
static constexpr NodeTypeInfo type_info{"StaticShapeNonZero", 0};
|
||||
|
||||
const NodeTypeInfo& get_type_info() const override { return type_info; }
|
||||
OPENVINO_OP("StaticShapeNonZero", "VPUOpset");
|
||||
|
||||
explicit StaticShapeNonZero(const Output<ngraph::Node>& input, const element::Type& output_type = element::i64);
|
||||
|
||||
|
@ -15,12 +15,11 @@ namespace ngraph { namespace vpu { namespace op {
|
||||
|
||||
class StaticShapeReshape : public ngraph::opset3::Reshape {
|
||||
public:
|
||||
OPENVINO_OP("StaticShapeReshape", "VPUOpset");
|
||||
|
||||
StaticShapeReshape(const Output<Node>& arg, const Output<Node>& pattern, bool special_zero);
|
||||
explicit StaticShapeReshape(const std::shared_ptr<ngraph::opset3::Reshape>& reshape);
|
||||
|
||||
static constexpr NodeTypeInfo type_info{"StaticShapeReshape", 0};
|
||||
const NodeTypeInfo& get_type_info() const override { return type_info; }
|
||||
|
||||
void validate_and_infer_types() override;
|
||||
|
||||
protected:
|
||||
|
@ -15,8 +15,7 @@ namespace ngraph { namespace vpu { namespace op {
|
||||
|
||||
class StaticShapeTopK : public ngraph::op::v3::TopK {
|
||||
public:
|
||||
static constexpr NodeTypeInfo type_info{"StaticShapeTopK", 0};
|
||||
const NodeTypeInfo& get_type_info() const override { return type_info; }
|
||||
OPENVINO_OP("StaticShapeTopK", "VPUOpset");
|
||||
|
||||
StaticShapeTopK(const Output<Node>& data,
|
||||
const Output<Node>& k,
|
||||
|
@ -27,10 +27,10 @@ bool fuseTypeToStaticShapeTopK(const std::shared_ptr<ngraph::Node>& node, ngraph
|
||||
bool fuseTypeToOutShapeOfReshape(const std::shared_ptr<ngraph::Node>& node, ngraph::element::Type to, size_t idx);
|
||||
|
||||
static typeToFuseMap myriadTypeToFuseMap {
|
||||
{ngraph::vpu::op::StaticShapeNonMaxSuppression::type_info, fuseTypeToStaticShapeNonMaxSuppression},
|
||||
{ngraph::vpu::op::StaticShapeNonZero::type_info, fuseTypeToStaticShapeNonZero},
|
||||
{ngraph::vpu::op::StaticShapeTopK::type_info, fuseTypeToStaticShapeTopK},
|
||||
{ngraph::vpu::op::OutShapeOfReshape::type_info, fuseTypeToOutShapeOfReshape},
|
||||
{ngraph::vpu::op::StaticShapeNonMaxSuppression::get_type_info_static(), fuseTypeToStaticShapeNonMaxSuppression},
|
||||
{ngraph::vpu::op::StaticShapeNonZero::get_type_info_static(), fuseTypeToStaticShapeNonZero},
|
||||
{ngraph::vpu::op::StaticShapeTopK::get_type_info_static(), fuseTypeToStaticShapeTopK},
|
||||
{ngraph::vpu::op::OutShapeOfReshape::get_type_info_static(), fuseTypeToOutShapeOfReshape},
|
||||
};
|
||||
|
||||
std::shared_ptr<ngraph::Node> shapeToConstant(const ngraph::element::Type& type, const ngraph::Shape& shape);
|
||||
|
@ -8,8 +8,6 @@
|
||||
|
||||
namespace ngraph { namespace vpu { namespace op {
|
||||
|
||||
constexpr NodeTypeInfo DynamicShapeResolver::type_info;
|
||||
|
||||
DynamicShapeResolver::DynamicShapeResolver(
|
||||
const Output<Node>& tensorWithData,
|
||||
const Output<Node>& tensorWithDims,
|
||||
|
@ -8,8 +8,6 @@
|
||||
|
||||
namespace ngraph { namespace vpu { namespace op {
|
||||
|
||||
constexpr NodeTypeInfo ExpGatherElements::type_info;
|
||||
|
||||
ExpGatherElements::ExpGatherElements(const Output<Node>& data,
|
||||
const Output<Node>& indices,
|
||||
const Output<Node>& lookupIndices,
|
||||
|
@ -7,8 +7,6 @@
|
||||
|
||||
namespace ngraph { namespace vpu { namespace op {
|
||||
|
||||
constexpr NodeTypeInfo OutShapeOfReshape::type_info;
|
||||
|
||||
OutShapeOfReshape::OutShapeOfReshape(
|
||||
const Output<Node>& inDataShape,
|
||||
const Output<Node>& outShapeDescriptor,
|
||||
|
@ -12,8 +12,6 @@
|
||||
|
||||
namespace ngraph { namespace vpu { namespace op {
|
||||
|
||||
constexpr NodeTypeInfo StaticShapeBroadcast::type_info;
|
||||
|
||||
StaticShapeBroadcast::StaticShapeBroadcast(const Output<Node>& arg,
|
||||
const Output<Node>& targetShape,
|
||||
const Output<Node>& axesMapping,
|
||||
|
@ -7,8 +7,6 @@
|
||||
|
||||
namespace ngraph { namespace vpu { namespace op {
|
||||
|
||||
constexpr NodeTypeInfo StaticShapeLoop::type_info;
|
||||
|
||||
StaticShapeLoop::StaticShapeLoop(const Loop& loop) : Loop(loop), m_evaluatedIterationsCount{ngraph::PartialShape::dynamic()} {}
|
||||
|
||||
void StaticShapeLoop::validate_and_infer_types() {
|
||||
|
@ -10,8 +10,6 @@
|
||||
|
||||
namespace ngraph { namespace vpu { namespace op {
|
||||
|
||||
constexpr NodeTypeInfo StaticShapeNonMaxSuppression::type_info;
|
||||
|
||||
StaticShapeNonMaxSuppression::StaticShapeNonMaxSuppression(const ngraph::opset5::NonMaxSuppression& nms)
|
||||
: StaticShapeNonMaxSuppression(
|
||||
nms.input_value(0),
|
||||
|
@ -9,8 +9,6 @@
|
||||
|
||||
namespace ngraph { namespace vpu { namespace op {
|
||||
|
||||
constexpr NodeTypeInfo StaticShapeNonZero::type_info;
|
||||
|
||||
StaticShapeNonZero::StaticShapeNonZero(const Output<Node>& input, const element::Type& output_type)
|
||||
: Op({input}), m_output_type(output_type) {
|
||||
constructor_validate_and_infer_types();
|
||||
|
@ -9,8 +9,6 @@
|
||||
|
||||
namespace ngraph { namespace vpu { namespace op {
|
||||
|
||||
constexpr NodeTypeInfo StaticShapeReshape::type_info;
|
||||
|
||||
StaticShapeReshape::StaticShapeReshape(const Output<Node>& arg, const Output<Node>& pattern, bool special_zero)
|
||||
: ::ngraph::opset3::Reshape(arg, pattern, special_zero),
|
||||
m_evaluatedOutputShape{PartialShape::dynamic()} {
|
||||
|
@ -6,8 +6,6 @@
|
||||
#include <ngraph/validation_util.hpp>
|
||||
namespace ngraph { namespace vpu { namespace op {
|
||||
|
||||
constexpr NodeTypeInfo StaticShapeTopK::type_info;
|
||||
|
||||
ngraph::vpu::op::StaticShapeTopK::StaticShapeTopK(
|
||||
const Output<Node>& data,
|
||||
const Output<Node>& k,
|
||||
|
@ -140,7 +140,7 @@ const Transformations& getDefaultTransformations() {
|
||||
{ngraph::opset5::Split::get_type_info_static(), dynamicToStaticShapeSplit},
|
||||
{ngraph::opset5::GatherND::get_type_info_static(), dynamicToStaticShapeGatherND},
|
||||
{ngraph::opset6::GatherElements::get_type_info_static(), dynamicToStaticShapeGatherElements},
|
||||
{ngraph::vpu::op::ExpGatherElements::type_info, dynamicToStaticShapeGatherElements},
|
||||
{ngraph::vpu::op::ExpGatherElements::get_type_info_static(), dynamicToStaticShapeGatherElements},
|
||||
|
||||
// reduction
|
||||
{ngraph::opset3::ReduceLogicalAnd::get_type_info_static(), dynamicToStaticShapeReduce},
|
||||
|
@ -34,7 +34,7 @@ void dynamicToStaticShapeConcat(std::shared_ptr<ngraph::Node> target) {
|
||||
VPU_THROW_UNLESS(!dsrInputs.empty(),
|
||||
"DynamicToStaticShape transformation for {} of type {} expects at least "
|
||||
"one {} as input, actual types: {}", target->get_friendly_name(),
|
||||
target->get_type_info(), ngraph::vpu::op::DynamicShapeResolver::type_info,
|
||||
target->get_type_info(), ngraph::vpu::op::DynamicShapeResolver::get_type_info_static(),
|
||||
std::accumulate(inputs.begin(), inputs.end(), std::string(), [](
|
||||
const std::string& typesStr, const ngraph::Output<ngraph::Node>& input) {
|
||||
return typesStr + input.get_node_shared_ptr()->get_type_info().name + ", ";
|
||||
|
@ -18,7 +18,8 @@ void dynamicToStaticShapeGatherElements(std::shared_ptr<ngraph::Node> target) {
|
||||
const auto dsr = target->input_value(1).get_node_shared_ptr();
|
||||
VPU_THROW_UNLESS(ngraph::as_type_ptr<ngraph::vpu::op::DynamicShapeResolver>(dsr),
|
||||
"DynamicToStaticShape transformation for {} of type {} expects {} as input with index {}",
|
||||
target->get_friendly_name(), target->get_type_info(), ngraph::vpu::op::DynamicShapeResolver::type_info, 1);
|
||||
target->get_friendly_name(), target->get_type_info(),
|
||||
ngraph::vpu::op::DynamicShapeResolver::get_type_info_static(), 1);
|
||||
|
||||
const auto shape = dsr->input(1).get_source_output();
|
||||
const auto copied = target->clone_with_new_inputs(target->input_values());
|
||||
|
@ -125,7 +125,8 @@ void dynamicToStaticShapeStridedSlice(std::shared_ptr<ngraph::Node> target) {
|
||||
const auto dsr = target->input_value(0).get_node_shared_ptr();
|
||||
VPU_THROW_UNLESS(ngraph::as_type_ptr<ngraph::vpu::op::DynamicShapeResolver>(dsr),
|
||||
"DynamicToStaticShape transformation for {} of type {} expects {} as input with index {}",
|
||||
target->get_friendly_name(), target->get_type_info(), ngraph::vpu::op::DynamicShapeResolver::type_info, 0);
|
||||
target->get_friendly_name(), target->get_type_info(),
|
||||
ngraph::vpu::op::DynamicShapeResolver::get_type_info_static(), 0);
|
||||
|
||||
const auto stridedSlice = ngraph::as_type_ptr<ngraph::opset3::StridedSlice>(target);
|
||||
VPU_THROW_UNLESS(stridedSlice, "dynamicToStaticShapeStridedSlice transformation is not applicable for {}", target);
|
||||
|
@ -20,7 +20,8 @@ namespace vpu {
|
||||
void dynamicToStaticShapeTopK(std::shared_ptr<ngraph::Node> target) {
|
||||
const auto dsr = ngraph::as_type_ptr<ngraph::vpu::op::DynamicShapeResolver>(target->input_value(0).get_node_shared_ptr());
|
||||
VPU_THROW_UNLESS(dsr, "DynamicToStaticShape transformation for {} of type {} expects {} as input with index {}",
|
||||
target->get_friendly_name(), target->get_type_info(), ngraph::vpu::op::DynamicShapeResolver::type_info, 0);
|
||||
target->get_friendly_name(), target->get_type_info(),
|
||||
ngraph::vpu::op::DynamicShapeResolver::get_type_info_static(), 0);
|
||||
|
||||
const auto topk = ngraph::as_type_ptr<ngraph::opset3::TopK>(target);
|
||||
|
||||
|
@ -19,7 +19,8 @@ void dynamicToStaticUnaryElementwise(std::shared_ptr<ngraph::Node> target) {
|
||||
const auto dsr = target->input_value(0).get_node_shared_ptr();
|
||||
VPU_THROW_UNLESS(ngraph::as_type_ptr<ngraph::vpu::op::DynamicShapeResolver>(dsr),
|
||||
"DynamicToStaticShape transformation for {} of type {} expects {} as input with index {}",
|
||||
target->get_friendly_name(), target->get_type_info(), ngraph::vpu::op::DynamicShapeResolver::type_info, 0);
|
||||
target->get_friendly_name(), target->get_type_info(),
|
||||
ngraph::vpu::op::DynamicShapeResolver::get_type_info_static(), 0);
|
||||
|
||||
const auto shape = dsr->input(1).get_source_output();
|
||||
const auto copied = target->clone_with_new_inputs(target->input_values());
|
||||
|
@ -20,7 +20,8 @@ namespace vpu {
|
||||
void dynamicToStaticShapeVariadicSplit(std::shared_ptr<ngraph::Node> target) {
|
||||
const auto dsr = ngraph::as_type_ptr<ngraph::vpu::op::DynamicShapeResolver>(target->input_value(0).get_node_shared_ptr());
|
||||
VPU_THROW_UNLESS(dsr, "DynamicToStaticShape transformation for {} of type {} expects {} as input with index {}",
|
||||
target->get_friendly_name(), target->get_type_info(), ngraph::vpu::op::DynamicShapeResolver::type_info, 0);
|
||||
target->get_friendly_name(), target->get_type_info(),
|
||||
ngraph::vpu::op::DynamicShapeResolver::get_type_info_static(), 0);
|
||||
|
||||
const auto axis_node = ngraph::as_type_ptr<ngraph::opset3::Constant>(target->input_value(1).get_node_shared_ptr());
|
||||
VPU_THROW_UNLESS(axis_node, "dynamicToStaticShapeVariadic transformation is not applicable for {}, dynamic axis is not supported", target);
|
||||
|
@ -296,7 +296,7 @@ void FrontEnd::parseConcat(
|
||||
auto inferRequirement = ConcatInferRequirement::CanBeReplaced;
|
||||
if (auto concatOp = std::dynamic_pointer_cast<ngraph::opset3::Concat>(layer->getNode())) {
|
||||
inferRequirement = concatOp->get_input_source_output(0).get_node_shared_ptr()->get_type_info() ==
|
||||
ngraph::vpu::op::DynamicShapeResolver::type_info
|
||||
ngraph::vpu::op::DynamicShapeResolver::get_type_info_static()
|
||||
? ConcatInferRequirement::NeedToInfer
|
||||
: ConcatInferRequirement::CanBeReplaced;
|
||||
}
|
||||
|
@ -10,8 +10,7 @@
|
||||
|
||||
class FakeAbs : public ngraph::op::Op {
|
||||
public:
|
||||
static constexpr ngraph::NodeTypeInfo type_info{"Abs", 100500};
|
||||
const ngraph::NodeTypeInfo& get_type_info() const override { return type_info; }
|
||||
OPENVINO_OP("Abs", "experimental");
|
||||
|
||||
FakeAbs() = default;
|
||||
FakeAbs(const ngraph::Output<ngraph::Node>& arg): ngraph::op::Op({arg}) {
|
||||
@ -27,7 +26,6 @@ public:
|
||||
return true;
|
||||
}
|
||||
};
|
||||
constexpr ngraph::NodeTypeInfo FakeAbs::type_info;
|
||||
|
||||
class AbsFakeExtension: public InferenceEngine::IExtension {
|
||||
public:
|
||||
@ -38,7 +36,7 @@ public:
|
||||
std::map<std::string, ngraph::OpSet> opsets;
|
||||
ngraph::OpSet opset;
|
||||
opset.insert<FakeAbs>();
|
||||
opsets["experimental"] = opset;
|
||||
opsets[FakeAbs::get_type_info_static().version_id] = opset;
|
||||
return opsets;
|
||||
}
|
||||
};
|
||||
@ -103,7 +101,7 @@ TEST_F(NGraphReaderTests, ReadAbsFromCustomOpsetNetwork) {
|
||||
bool genericNodeExists = false;
|
||||
const std::string type = "Abs";
|
||||
for (auto op : nGraph->get_ops()) {
|
||||
if (type == op->get_type_info().name && 100500 == op->get_type_info().version)
|
||||
if (type == op->get_type_info().name)
|
||||
genericNodeExists = true;
|
||||
}
|
||||
ASSERT_TRUE(genericNodeExists);
|
||||
|
@ -9,8 +9,8 @@
|
||||
|
||||
class CustomAddConst : public ngraph::op::Op {
|
||||
public:
|
||||
static constexpr ngraph::NodeTypeInfo type_info{"CustomAddConst", 100600};
|
||||
const ngraph::NodeTypeInfo& get_type_info() const override { return type_info; }
|
||||
OPENVINO_OP("CustomAddConst", "custom_opset");
|
||||
|
||||
CustomAddConst() = default;
|
||||
CustomAddConst(const ngraph::Output<ngraph::Node>& arg, const ngraph::element::Type element_type,
|
||||
const ngraph::Shape shape, const std::shared_ptr<ngraph::runtime::AlignedBuffer> data):
|
||||
@ -45,8 +45,6 @@ private:
|
||||
std::shared_ptr<ngraph::runtime::AlignedBuffer> m_data;
|
||||
};
|
||||
|
||||
constexpr ngraph::NodeTypeInfo CustomAddConst::type_info;
|
||||
|
||||
class CustomAddConstExtension : public InferenceEngine::IExtension {
|
||||
public:
|
||||
void GetVersion(const InferenceEngine::Version*& versionInfo) const noexcept override {}
|
||||
@ -57,7 +55,7 @@ public:
|
||||
std::map<std::string, ngraph::OpSet> opsets;
|
||||
ngraph::OpSet opset;
|
||||
opset.insert<CustomAddConst>();
|
||||
opsets["custom_opset"] = opset;
|
||||
opsets[CustomAddConst::get_type_info_static().version_id] = opset;
|
||||
return opsets;
|
||||
}
|
||||
};
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user