[CPU][Snippets] Dynamism via recompilation and cache (#15430)

This commit is contained in:
Chenhu Wang 2023-08-25 01:31:42 +08:00 committed by GitHub
parent 8df85badf8
commit 28a5bf7b04
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
52 changed files with 1932 additions and 747 deletions

View File

@ -116,6 +116,7 @@ public:
const void* compile_params = nullptr); const void* compile_params = nullptr);
snippets::Schedule generate(const void* compile_params = nullptr); snippets::Schedule generate(const void* compile_params = nullptr);
ov::PartialShape canonicalize(const BlockedShapeVector& output_shapes, const BlockedShapeVector& input_shapes); ov::PartialShape canonicalize(const BlockedShapeVector& output_shapes, const BlockedShapeVector& input_shapes);
ov::PartialShape canonicalized_body_shape_infer(const BlockedShapeVector& input_shapes);
std::vector<PartialShape> reshape_body(const std::vector<PartialShape>& input_shapes); std::vector<PartialShape> reshape_body(const std::vector<PartialShape>& input_shapes);
std::vector<Shape> reshape_body(const std::vector<Shape>& input_shapes); std::vector<Shape> reshape_body(const std::vector<Shape>& input_shapes);
@ -161,6 +162,8 @@ private:
ov::PartialShape master_shape; ov::PartialShape master_shape;
size_t tileRank = 0; // set by plugin to specify the number of dimensions processed in a single kernel call size_t tileRank = 0; // set by plugin to specify the number of dimensions processed in a single kernel call
size_t maxInputRank = 0;
std::vector<size_t> appendOnesForCanonical;
/** /**
* @interface SubgraphConfig * @interface SubgraphConfig

View File

@ -0,0 +1,39 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <string>
#include "openvino/core/model.hpp"
#include <openvino/pass/pass.hpp>
namespace ov {
namespace snippets {
namespace pass {
/**
* @brief Hash transformation calculates hash value for snippets. Don't care about names as no difference from execution perspective
*/
class Hash : public ov::pass::ModelPass {
public:
OPENVINO_RTTI("HashPass", "0");
bool run_on_model(const std::shared_ptr<ov::Model>& f) override;
/**
* @brief Hash pass constructor
*
* @param output_hash_value Reference to output value. By applying hash pass on function, resulting hash value
* will be set to this variable
*/
Hash(uint64_t& output_hash_value);
private:
uint64_t& m_hash;
};
} // namespace pass
} // namespace snippets
} // namespace ov

View File

@ -325,7 +325,8 @@ ov::PartialShape snippets::op::Subgraph::canonicalize(const BlockedShapeVector&
PartialShape baseShape; PartialShape baseShape;
AxisVector baseOrder; AxisVector baseOrder;
std::tie(baseShape, baseOrder, std::ignore) = getMaxRankBlockedShape(inputShapes); std::tie(baseShape, baseOrder, std::ignore) = getMaxRankBlockedShape(inputShapes);
const auto baseRank = baseShape.size(); maxInputRank = baseShape.size();
appendOnesForCanonical.resize(inputShapes.size(), 0);
const bool baseIsBlocked = baseOrder.size() != std::set<size_t>(baseOrder.begin(), baseOrder.end()).size(); const bool baseIsBlocked = baseOrder.size() != std::set<size_t>(baseOrder.begin(), baseOrder.end()).size();
for (size_t i = 0; i < inputShapes.size(); i++) { for (size_t i = 0; i < inputShapes.size(); i++) {
const auto& blockedShape = inputShapes[i]; const auto& blockedShape = inputShapes[i];
@ -334,14 +335,16 @@ ov::PartialShape snippets::op::Subgraph::canonicalize(const BlockedShapeVector&
element::Type inType; element::Type inType;
std::tie(inShape, inOrder, inType) = blockedShape; std::tie(inShape, inOrder, inType) = blockedShape;
const auto inRank = inShape.size(); const auto inRank = inShape.size();
NODE_VALIDATION_CHECK(this, inRank <= baseRank, "Input rank can't be larger than output rank in snippets."); NODE_VALIDATION_CHECK(this, inRank <= maxInputRank, "Input rank can't be larger than output rank in snippets.");
if (inRank < baseRank) { if (inRank < maxInputRank) {
PartialShape newShape(ov::Shape(baseRank, 1)); appendOnesForCanonical[i] = maxInputRank - inRank;
PartialShape newShape(ov::Shape(maxInputRank, 1));
// todo: more complicated logics is needed if we want to merge smth else than blocked and planar // todo: more complicated logics is needed if we want to merge smth else than blocked and planar
if (baseIsBlocked) { if (baseIsBlocked) {
const bool inIsNotBlocked = inOrder.size() == std::set<size_t>(inOrder.begin(), inOrder.end()).size(); const bool inIsNotBlocked = inOrder.size() == std::set<size_t>(inOrder.begin(), inOrder.end()).size();
NODE_VALIDATION_CHECK(this, inIsNotBlocked, "Snippets don't support conversion between blocked layouts of different ranks"); NODE_VALIDATION_CHECK(this, inIsNotBlocked, "Snippets don't support conversion between blocked layouts of different ranks");
inShape.insert(inShape.end(), ov::Dimension(1)); inShape.insert(inShape.end(), ov::Dimension(1));
appendOnesForCanonical[i]--;
} }
NODE_VALIDATION_CHECK(this, PartialShape::broadcast_merge_into(newShape, inShape, ov::op::AutoBroadcastType::NUMPY), NODE_VALIDATION_CHECK(this, PartialShape::broadcast_merge_into(newShape, inShape, ov::op::AutoBroadcastType::NUMPY),
"Failed to broadcast_merge inputs in snippets canonicalization"); "Failed to broadcast_merge inputs in snippets canonicalization");
@ -364,6 +367,7 @@ ov::PartialShape snippets::op::Subgraph::canonicalize(const BlockedShapeVector&
body_ptr()->replace_parameter(i, std::make_shared<ov::op::v0::Parameter>(paramType, inShape)); body_ptr()->replace_parameter(i, std::make_shared<ov::op::v0::Parameter>(paramType, inShape));
} }
body_ptr()->validate_nodes_and_infer_types(); body_ptr()->validate_nodes_and_infer_types();
auto skipStartEndOnes = [](const PartialShape& shape) { auto skipStartEndOnes = [](const PartialShape& shape) {
auto begin = shape.begin(); auto begin = shape.begin();
auto end = shape.end(); auto end = shape.end();
@ -415,6 +419,43 @@ ov::PartialShape snippets::op::Subgraph::canonicalize(const BlockedShapeVector&
return master_shape; return master_shape;
} }
ov::PartialShape snippets::op::Subgraph::canonicalized_body_shape_infer(const BlockedShapeVector& inputShapes) {
std::vector<Shape> normInputShapes;
for (size_t i = 0; i < inputShapes.size(); i++) {
PartialShape inShape = std::get<0>(inputShapes[i]);
const auto inRank = inShape.size();
if (inRank < maxInputRank) {
PartialShape newShape(ov::Shape(maxInputRank, 1));
for (size_t ir = 0; ir < inRank; ir++) {
newShape[appendOnesForCanonical[i] + ir] = inShape[ir];
}
normInputShapes.push_back(newShape.get_shape());
} else {
normInputShapes.push_back(inShape.get_shape());
}
}
reshape_body(normInputShapes);
const auto& body_results = body_ptr()->get_results();
PartialShape outPShape = body_results[0]->get_input_partial_shape(0);
const auto& result_parent = body_results[0]->get_input_node_shared_ptr(0);
if (body_results.size() == 1 &&
ov::is_type<ov::op::v1::Transpose>(result_parent) &&
ov::is_type<ov::op::v0::MatMul>(result_parent->get_input_node_shared_ptr(0))) {
outPShape = result_parent->get_input_partial_shape(0);
} else {
for (size_t i = 0; i < body_results.size(); i++) {
auto shape_i = body_results[i]->get_input_partial_shape(0);
bool compatibleWithOtherOutputs = PartialShape::broadcast_merge_into(outPShape, shape_i,
::ov::op::AutoBroadcastType::NUMPY);
NODE_VALIDATION_CHECK(this, compatibleWithOtherOutputs,
"Snippets output shapes must be numpy broadcastable");
}
}
master_shape = outPShape;
return master_shape;
}
bool snippets::op::Subgraph::check_broadcast(const std::shared_ptr<const ov::Node>& node) noexcept { bool snippets::op::Subgraph::check_broadcast(const std::shared_ptr<const ov::Node>& node) noexcept {
const auto elementwise = std::dynamic_pointer_cast<const ov::op::util::BinaryElementwiseArithmetic>(node); const auto elementwise = std::dynamic_pointer_cast<const ov::op::util::BinaryElementwiseArithmetic>(node);
return return

View File

@ -188,11 +188,10 @@ auto has_supported_in_out(const std::shared_ptr<const Node> &n) -> bool {
auto supported = [&n](descriptor::Tensor& t) -> bool { auto supported = [&n](descriptor::Tensor& t) -> bool {
// Todo: int32 isn't supported in general because i32 emitters are required for bit-exact i32 calculations in some cases // Todo: int32 isn't supported in general because i32 emitters are required for bit-exact i32 calculations in some cases
// So i32 is supported exclusively for transposes and broadcast // So i32 is supported exclusively for transposes and broadcast
return t.get_partial_shape().is_static() && return TokenizeSnippets::supported_element_types.count(t.get_element_type()) != 0 ||
(TokenizeSnippets::supported_element_types.count(t.get_element_type()) != 0 ||
(t.get_element_type() == ov::element::i32 && (t.get_element_type() == ov::element::i32 &&
(ov::is_type<const opset1::Transpose>(n) || (ov::is_type<const opset1::Transpose>(n) ||
ov::is_type<const opset1::Broadcast>(n)))); ov::is_type<const opset1::Broadcast>(n)));
}; };
const auto& inputs = n->inputs(); const auto& inputs = n->inputs();
const auto& outputs = n->outputs(); const auto& outputs = n->outputs();

View File

@ -0,0 +1,411 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "snippets/pass/hash.hpp"
#include <array>
#include <cassert>
#include <cstdint>
#include <unordered_map>
#include <unordered_set>
#include "ngraph/ops.hpp"
#include "ngraph/opsets/opset.hpp"
#include "openvino/core/except.hpp"
#include "openvino/core/meta_data.hpp"
#include "openvino/core/model.hpp"
#include "openvino/op/util/framework_node.hpp"
#include "openvino/opsets/opset1.hpp"
#include "transformations/rt_info/primitives_priority_attribute.hpp"
namespace ov {
namespace snippets {
namespace pass {
OPENVINO_SUPPRESS_DEPRECATED_START
// helper
namespace {
template <typename Container>
std::string join(const Container& c, const char* glue = ", ") {
std::stringstream oss;
const char* s = "";
for (const auto& v : c) {
oss << s << v;
s = glue;
}
return oss.str();
}
struct Edge {
int from_layer = 0;
int from_port = 0;
int to_layer = 0;
int to_port = 0;
};
enum class AttrType {layers, layer, id, type, data, rt_info, attribute, name, version, input, port, precision, dimension, output, value,
edges, edge, from_layer, from_port, to_layer, to_port, constant, size};
template <typename T, typename std::enable_if<!std::is_enum<T>::value , int>::type = 0>
static uint64_t hash_combine(uint64_t seed, const T &v) {
// Hash combine formula from boost
return seed ^= std::hash<T> {}(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2);
}
template <typename T, typename std::enable_if<std::is_enum<T>::value , int>::type = 0>
static uint64_t hash_combine(uint64_t seed, const T &v) {
using underlying_t = typename std::underlying_type<T>::type;
return hash_combine(seed, static_cast<underlying_t>(v));
}
namespace rt_info {
// some node attr is not type of ov::RuntimeAttribute, need dedicate visitor.
const std::vector<std::string> list_of_names{
"PrimitivesPriority",
"alt_width",
};
class NodeAuxRTInfoHasher {
public:
explicit NodeAuxRTInfoHasher(uint64_t& hash) : m_hash(hash) {}
void serialize(const ov::Node::RTMap& rt_info) {
for (const auto& rt_info_name : list_of_names) {
const auto& found_rt_info = rt_info.find(rt_info_name);
if (found_rt_info != rt_info.end()) {
std::stringstream strm;
found_rt_info->second.print(strm);
m_hash = hash_combine(m_hash, rt_info_name);
m_hash = hash_combine(m_hash, strm.str());
}
}
}
private:
uint64_t& m_hash;
};
class RTInfoHasher : public ov::AttributeVisitor {
uint64_t& m_rt_hash;
public:
RTInfoHasher(uint64_t& rt_hash) : m_rt_hash(rt_hash) {}
void on_adapter(const std::string& name, ov::ValueAccessor<void>& adapter) override {
if (auto a = ov::as_type<ov::AttributeAdapter<std::set<std::string>>>(&adapter)) {
const auto& value = join(a->get());
m_rt_hash = hash_combine(hash_combine(m_rt_hash, name), value);
} else {
OPENVINO_THROW("Unsupported attribute type for snippets hash generation: ", name);
}
}
void on_adapter(const std::string& name, ov::ValueAccessor<bool>& adapter) override {
m_rt_hash = hash_combine(hash_combine(m_rt_hash, name), adapter.get());
}
void on_adapter(const std::string& name, ov::ValueAccessor<std::string>& adapter) override {
m_rt_hash = hash_combine(hash_combine(m_rt_hash, name), adapter.get());
}
void on_adapter(const std::string& name, ov::ValueAccessor<int64_t>& adapter) override {
m_rt_hash = hash_combine(hash_combine(m_rt_hash, name), adapter.get());
}
void on_adapter(const std::string& name, ov::ValueAccessor<double>& adapter) override {
m_rt_hash = hash_combine(hash_combine(m_rt_hash, name), adapter.get());
}
void on_adapter(const std::string& name, ov::ValueAccessor<std::vector<int>>& adapter) override {
const auto& value = join(adapter.get());
m_rt_hash = hash_combine(hash_combine(m_rt_hash, name), value);
}
void on_adapter(const std::string& name, ov::ValueAccessor<std::vector<int64_t>>& adapter) override {
const auto& value = join(adapter.get());
m_rt_hash = hash_combine(hash_combine(m_rt_hash, name), value);
}
void on_adapter(const std::string& name, ov::ValueAccessor<std::vector<uint64_t>>& adapter) override {
const auto& value = join(adapter.get());
m_rt_hash = hash_combine(hash_combine(m_rt_hash, name), value);
}
void on_adapter(const std::string& name, ov::ValueAccessor<std::vector<float>>& adapter) override {
const auto& value = join(adapter.get());
m_rt_hash = hash_combine(hash_combine(m_rt_hash, name), value);
}
void on_adapter(const std::string& name, ov::ValueAccessor<std::vector<std::string>>& adapter) override {
const auto& value = join(adapter.get());
m_rt_hash = hash_combine(hash_combine(m_rt_hash, name), value);
}
void on_adapter(const std::string& name, ov::ValueAccessor<std::shared_ptr<ov::Model>>& adapter) override {
OPENVINO_THROW("Model type is unsupported for snippets rt info hash generation");
}
};
} // namespace rt_info
void ovfunction_2_hash(uint64_t& hash, const ov::Model& model);
OPENVINO_SUPPRESS_DEPRECATED_START
class SnippetsHasher : public ov::AttributeVisitor {
uint64_t& m_hash;
const std::string& m_node_type_name;
template <typename T>
std::string create_attribute_list(ov::ValueAccessor<std::vector<T>>& adapter) {
return join(adapter.get());
}
public:
SnippetsHasher(uint64_t& hash,
const std::string& node_type_name)
: m_hash(hash),
m_node_type_name(node_type_name) {}
void on_adapter(const std::string& name, ov::ValueAccessor<void>& adapter) override {
if (const auto& a = ov::as_type<ov::AttributeAdapter<std::shared_ptr<ngraph::Variable>>>(&adapter)) {
m_hash = hash_combine(hash_combine(m_hash, name), a->get()->get_info().variable_id);
} else if (const auto& a =
ov::as_type<ov::AttributeAdapter<std::shared_ptr<ngraph::runtime::AlignedBuffer>>>(&adapter)) {
if (name == "value" && m_node_type_name == "Constant") {
m_hash = hash_combine(m_hash, AttrType::constant);
const int64_t size = a->get()->size();
m_hash = hash_combine(hash_combine(m_hash, AttrType::size), size);
auto data = static_cast<const char*>(a->get()->get_ptr());
for (int64_t i = 0; i < size; i++) {
m_hash = hash_combine(m_hash, data[i]);
}
}
} else if (const auto& a = ov::as_type<ov::AttributeAdapter<ov::op::util::FrameworkNodeAttrs>>(&adapter)) {
const auto& attrs = a->get();
// Update node attributes in data field
for (const auto& attr : attrs) {
m_hash = hash_combine(hash_combine(m_hash, attr.first), attr.second);
}
} else if (const auto& a = ov::as_type<ov::AttributeAdapter<ov::element::TypeVector>>(&adapter)) {
const auto& attrs = a->get();
m_hash = hash_combine(hash_combine(m_hash, name), join(attrs));
} else if (const auto& a = ov::as_type<ov::AttributeAdapter<ov::PartialShape>>(&adapter)) {
const auto& attrs = a->get();
auto shape_str = attrs.to_string();
m_hash = hash_combine(hash_combine(m_hash, name), shape_str);
} else if (const auto& a = ov::as_type<ov::AttributeAdapter<ov::Dimension>>(&adapter)) {
const auto& attrs = a->get();
std::stringstream dim_str_stream;
dim_str_stream << attrs;
auto dim_str = dim_str_stream.str();
m_hash = hash_combine(hash_combine(m_hash, name), dim_str);
} else {
OPENVINO_THROW("Unsupported attribute type for snippets hash generation: ", name);
}
}
void on_adapter(const std::string& name, ov::ValueAccessor<bool>& adapter) override {
m_hash = hash_combine(hash_combine(m_hash, name), adapter.get());
}
void on_adapter(const std::string& name, ov::ValueAccessor<std::string>& adapter) override {
m_hash = hash_combine(hash_combine(m_hash, name), adapter.get());
}
void on_adapter(const std::string& name, ov::ValueAccessor<int64_t>& adapter) override {
m_hash = hash_combine(hash_combine(m_hash, name), static_cast<long long>(adapter.get()));
}
void on_adapter(const std::string& name, ov::ValueAccessor<double>& adapter) override {
m_hash = hash_combine(hash_combine(m_hash, name), adapter.get());
}
void on_adapter(const std::string& name, ov::ValueAccessor<std::vector<int>>& adapter) override {
m_hash = hash_combine(hash_combine(m_hash, name), create_attribute_list(adapter));
}
void on_adapter(const std::string& name, ov::ValueAccessor<std::vector<int64_t>>& adapter) override {
m_hash = hash_combine(hash_combine(m_hash, name), create_attribute_list(adapter));
}
void on_adapter(const std::string& name, ov::ValueAccessor<std::vector<uint64_t>>& adapter) override {
m_hash = hash_combine(hash_combine(m_hash, name), create_attribute_list(adapter));
}
void on_adapter(const std::string& name, ov::ValueAccessor<std::vector<float>>& adapter) override {
m_hash = hash_combine(hash_combine(m_hash, name), create_attribute_list(adapter));
}
void on_adapter(const std::string& name, ov::ValueAccessor<std::vector<std::string>>& adapter) override {
m_hash = hash_combine(hash_combine(m_hash, name), create_attribute_list(adapter));
}
void on_adapter(const std::string& name, ov::ValueAccessor<std::shared_ptr<ov::Model>>& adapter) override {
ovfunction_2_hash(m_hash, *adapter.get());
}
};
OPENVINO_SUPPRESS_DEPRECATED_END
std::unordered_map<ov::Node*, int> create_layer_ids(const ov::Model& model) {
std::unordered_map<ov::Node*, int> layer_ids;
int id = 0;
for (const auto& node : model.get_ordered_ops()) {
layer_ids[node.get()] = id++;
}
return layer_ids;
}
std::vector<Edge> create_edge_mapping(const std::unordered_map<ov::Node*, int>& layer_ids,
const ov::Model& model) {
std::vector<Edge> edges;
for (const auto& node : model.get_ordered_ops()) {
if (ov::op::util::is_parameter(node)) {
continue;
}
for (const auto& i : node->inputs()) {
auto source_output = i.get_source_output();
auto source_node = source_output.get_node();
auto current_node = i.get_node();
if (layer_ids.find(source_node) == layer_ids.end() || layer_ids.find(current_node) == layer_ids.end()) {
OPENVINO_THROW("Failed creat edge map in snippets hash generation");
}
Edge e{};
e.from_layer = layer_ids.find(source_node)->second;
e.from_port = static_cast<int>(source_node->get_input_size() + source_output.get_index());
e.to_layer = layer_ids.find(current_node)->second;
e.to_port = static_cast<int>(i.get_index());
edges.push_back(e);
}
}
std::sort(begin(edges), end(edges), [](const Edge& a, const Edge& b) -> bool {
return a.from_layer < b.from_layer;
});
return edges;
}
void hash_rt_info(uint64_t& hash, const std::string& name, const ov::Any& data) {
if (data.is<std::shared_ptr<ov::Meta>>()) {
std::shared_ptr<ov::Meta> meta = data.as<std::shared_ptr<ov::Meta>>();
ov::AnyMap& map = *meta;
for (const auto& it : map) {
hash_rt_info(hash, it.first, it.second);
}
} else if (data.is<ov::AnyMap>()) {
const ov::AnyMap& any_map = data.as<ov::AnyMap>();
for (const auto& it : any_map) {
hash_rt_info(hash, it.first, it.second);
}
} else {
std::string value = data.as<std::string>();
hash = hash_combine(hash_combine(hash, AttrType::value), value);
}
}
void ovfunction_2_hash(uint64_t& hash,
const ov::Model& model) {
hash = hash_combine(hash, AttrType::layers);
const std::unordered_map<ov::Node*, int> layer_ids = create_layer_ids(model);
std::unordered_set<std::string> unique_names;
auto sorted_ops = model.get_ordered_ops();
for (const auto& n : sorted_ops) {
ov::Node* node = n.get();
const std::string& node_type_name{node->get_type_name()};
if (layer_ids.find(node) == layer_ids.end())
OPENVINO_THROW("Failed to find layer's id in snippets hash generation.");
// <layers>
hash = hash_combine(hash, AttrType::layer);
hash = hash_combine(hash_combine(hash, AttrType::id), layer_ids.find(node)->second);
hash = hash_combine(hash_combine(hash, AttrType::type), node_type_name);
// <layers/data> general attributes
hash = hash_combine(hash, AttrType::data);
auto append_runtime_info = [&](uint64_t& hash, ov::RTMap& attributes) {
hash = hash_combine(hash, AttrType::rt_info);
for (auto& item : attributes) {
if (item.second.is<ov::RuntimeAttribute>()) {
auto& rt_attribute = item.second.as<ov::RuntimeAttribute>();
const auto& type_info = rt_attribute.get_type_info();
if (!strcmp(type_info.name, "fused_names")) {
continue;
}
hash = hash_combine(hash, AttrType::attribute);
hash = hash_combine(hash_combine(hash, AttrType::name), type_info.name);
hash = hash_combine(hash_combine(hash, AttrType::version), type_info.get_version());
rt_info::RTInfoHasher rt_info_visitor(hash);
rt_attribute.visit_attributes(rt_info_visitor);
}
}
};
append_runtime_info(hash, node->get_rt_info());
int port_id = 0;
// <layers/input>
if (node->get_input_size() > 0) {
hash = hash_combine(hash, AttrType::input);
for (auto& i : node->inputs()) {
hash = hash_combine(hash, AttrType::port);
hash = hash_combine(hash_combine(hash, AttrType::id), port_id++);
hash = hash_combine(hash_combine(hash, AttrType::precision), i.get_element_type().hash());
hash = hash_combine(hash_combine(hash, AttrType::dimension), i.get_partial_shape().to_string());
append_runtime_info(hash, i.get_rt_info());
}
}
// <layers/output>
if ((node->get_output_size() > 0) && !ov::op::util::is_output(node)) {
hash = hash_combine(hash, AttrType::output);
for (auto& o : node->outputs()) {
hash = hash_combine(hash, AttrType::port);
hash = hash_combine(hash_combine(hash, AttrType::id), port_id++);
hash = hash_combine(hash_combine(hash, AttrType::precision), o.get_element_type().hash());
hash = hash_combine(hash_combine(hash, AttrType::dimension), o.get_partial_shape().to_string());
append_runtime_info(hash, o.get_rt_info());
}
}
// fill <data> general attributes
{
SnippetsHasher visitor(hash, node_type_name);
if (!node->visit_attributes(visitor)) {
OPENVINO_THROW("Visitor API is not supported in " + node_type_name + " in snippets hash generation");
}
}
rt_info::NodeAuxRTInfoHasher{hash}.serialize(node->get_rt_info());
}
// <edges>
const std::vector<Edge> edge_mapping = create_edge_mapping(layer_ids, model);
hash = hash_combine(hash, AttrType::edges);
for (auto& e : edge_mapping) {
hash = hash_combine(hash, AttrType::edge);
hash = hash_combine(hash_combine(hash, AttrType::from_layer), e.from_layer);
hash = hash_combine(hash_combine(hash, AttrType::from_port), e.from_port);
hash = hash_combine(hash_combine(hash, AttrType::to_layer), e.to_layer);
hash = hash_combine(hash_combine(hash, AttrType::to_port), e.to_port);
}
// Serialize rt info
hash = hash_combine(hash, AttrType::rt_info);
for (const auto& it : model.get_rt_info()) {
hash_rt_info(hash, it.first, it.second);
}
}
} // namespace
bool Hash::run_on_model(const std::shared_ptr<ov::Model>& f) {
uint64_t seed = 0;
std::string name = "net";
SnippetsHasher visitor(seed, name);
std::shared_ptr<ov::Model> m(f); // for complilation error, on_attribute don't accept f
visitor.on_attribute(name, m);
m_hash = seed;
// Return false because we didn't change OpenVINO Model
return false;
}
Hash::Hash(uint64_t& output_hash_value) : m_hash(output_hash_value) {}
} // namespace pass
} // namespace snippets
} // namespace ov

View File

@ -34,6 +34,8 @@
#include "transformations/cpu_opset/common/pass/convert_to_swish_cpu.hpp" #include "transformations/cpu_opset/common/pass/convert_to_swish_cpu.hpp"
#include "transformations/defs.hpp" #include "transformations/defs.hpp"
#include "shape_inference/custom/subgraph.hpp" #include "shape_inference/custom/subgraph.hpp"
#include <common/primitive_hashing_utils.hpp>
#include "snippets/pass/hash.hpp"
using namespace InferenceEngine; using namespace InferenceEngine;
using namespace dnnl::impl::utils; using namespace dnnl::impl::utils;
@ -44,35 +46,138 @@ using namespace Xbyak;
namespace ov { namespace ov {
namespace intel_cpu { namespace intel_cpu {
namespace node { namespace node {
namespace {
struct SnippetKey {
Snippet::SnippetAttrs attrs;
size_t hash() const;
bool operator==(const SnippetKey& rhs) const;
};
size_t SnippetKey::hash() const {
using namespace dnnl::impl;
using namespace dnnl::impl::primitive_hashing;
size_t seed = 0;
for (const auto& blockedDim : attrs.inMemBlockedDims)
seed = get_vector_hash(seed, blockedDim);
for (const auto& order : attrs.inMemOrders)
seed = get_vector_hash(seed, order);
for (const auto& prec : attrs.inMemPrecs)
seed = hash_combine(seed, prec.getPrecVal());
for (const auto& blockedDim : attrs.outMemBlockedDims)
seed = get_vector_hash(seed, blockedDim);
for (const auto& order : attrs.outMemOrders)
seed = get_vector_hash(seed, order);
for (const auto& prec : attrs.outMemPrecs)
seed = hash_combine(seed, prec.getPrecVal());
seed = hash_combine(seed, attrs.bodyHash);
return seed;
}
bool SnippetKey::operator==(const SnippetKey& rhs) const {
if (attrs.bodyHash != rhs.attrs.bodyHash)
return false;
if (attrs.inMemBlockedDims.size() != rhs.attrs.inMemBlockedDims.size() ||
attrs.inMemOrders.size() != rhs.attrs.inMemOrders.size() ||
attrs.inMemPrecs.size() != rhs.attrs.inMemPrecs.size())
return false;
if (attrs.outMemBlockedDims.size() != rhs.attrs.outMemBlockedDims.size() ||
attrs.outMemOrders.size() != rhs.attrs.outMemOrders.size() ||
attrs.outMemPrecs.size() != rhs.attrs.outMemPrecs.size())
return false;
for (size_t i = 0; i < attrs.inMemBlockedDims.size(); i++) {
if (!(attrs.inMemBlockedDims[i] == rhs.attrs.inMemBlockedDims[i]))
return false;
}
for (size_t i = 0; i < attrs.outMemBlockedDims.size(); i++) {
if (!(attrs.outMemBlockedDims[i] == rhs.attrs.outMemBlockedDims[i]))
return false;
}
for (size_t i = 0; i < attrs.inMemOrders.size(); i++) {
if (!(attrs.inMemOrders[i] == rhs.attrs.inMemOrders[i]))
return false;
}
for (size_t i = 0; i < attrs.outMemOrders.size(); i++) {
if (!(attrs.outMemOrders[i] == rhs.attrs.outMemOrders[i]))
return false;
}
for (size_t i = 0; i < attrs.inMemPrecs.size(); i++) {
if (!(attrs.inMemPrecs[i] == rhs.attrs.inMemPrecs[i]))
return false;
}
for (size_t i = 0; i < attrs.outMemPrecs.size(); i++) {
if (!(attrs.outMemPrecs[i] == rhs.attrs.outMemPrecs[i]))
return false;
}
return true;
}
snippets::op::Subgraph::BlockedShapeVector getBlockedShapes(const std::vector<std::vector<size_t>>& memBlockedDims,
const std::vector<std::vector<size_t>>& memOrders, const std::vector<InferenceEngine::Precision>& memPrecs) {
size_t numShapes = memBlockedDims.size();
if (memOrders.size() != numShapes || memPrecs.size() != numShapes)
IE_THROW(Unexpected) << "Number of shapes is mismacthed for dimensions, orders and precisions";
snippets::op::Subgraph::BlockedShapeVector blockedShapes(numShapes);
for (size_t i = 0; i < numShapes; i++) {
size_t dimSize = memBlockedDims[i].size();
std::vector<Dimension> dims(dimSize);
for (size_t j = 0; j < dimSize; j++) {
dims[j] = memBlockedDims[i][j];
}
ov::PartialShape shape(dims);
ov::AxisVector order(memOrders[i]);
ov::element::Type precision = InferenceEngine::details::convertPrecision(memPrecs[i]);
blockedShapes[i] = snippets::op::Subgraph::BlockedShape{shape, order, precision};
}
return blockedShapes;
}
} // namespace
Snippet::Snippet(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context) Snippet::Snippet(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
: Node(op, context, SnippetShapeInferFactory(this)) { : Node(op, context, SnippetShapeInferFactory(op)) {
host_isa = dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core) ? host_isa = dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core) ?
dnnl::impl::cpu::x64::avx512_core : dnnl::impl::cpu::x64::avx2; dnnl::impl::cpu::x64::avx512_core : dnnl::impl::cpu::x64::avx2;
original_snippet = ov::as_type_ptr<snippets::op::Subgraph>(op); original_snippet = ov::as_type_ptr<snippets::op::Subgraph>(op);
if (!original_snippet) { if (!original_snippet) {
IE_THROW(NotImplemented) << "Node is not an instance of snippets::op::Subgraph"; IE_THROW(NotImplemented) << "Node is not an instance of snippets::op::Subgraph";
} }
init_body_hash();
is_dynamic = isDynamicNgraphNode(op);
} }
void Snippet::copy_snippet() { void Snippet::copy_snippet() const {
ov::OutputVector subgraph_node_inputs; ov::OutputVector subgraph_node_inputs;
for (const auto &input : original_snippet->input_values()) { for (const auto &input : original_snippet->input_values()) {
auto new_input = std::make_shared<ov::opset1::Parameter>(input.get_element_type(), input.get_partial_shape()); auto new_input = std::make_shared<ov::opset1::Parameter>(input.get_element_type(), input.get_partial_shape());
subgraph_node_inputs.push_back(new_input); subgraph_node_inputs.push_back(new_input);
} }
std::shared_ptr<ov::Model> new_body = original_snippet->body_ptr()->clone(); std::shared_ptr<ov::Model> new_body = original_snippet->body_ptr()->clone();
snippet = std::make_shared<snippets::op::Subgraph>(subgraph_node_inputs, new_body); snippetAttrs.snippet = std::make_shared<snippets::op::Subgraph>(subgraph_node_inputs, new_body);
ov::copy_runtime_info(original_snippet, snippet); ov::copy_runtime_info(original_snippet, snippetAttrs.snippet);
snippet->set_friendly_name(original_snippet->get_friendly_name()); snippetAttrs.snippet->set_friendly_name(original_snippet->get_friendly_name());
#if defined(OPENVINO_ARCH_X86_64) #if defined(OPENVINO_ARCH_X86_64)
snippet->set_generator(std::make_shared<CPUGenerator>(host_isa)); snippetAttrs.snippet->set_generator(std::make_shared<CPUGenerator>(host_isa));
isa_num_lanes = snippet->get_generator()->get_target_machine()->get_lanes();
#else #else
IE_THROW(NotImplemented) << "CPU plugin: code-generation is not supported on non-x64 platforms"; IE_THROW(NotImplemented) << "CPU plugin: code-generation is not supported on non-x64 platforms";
#endif // OPENVINO_ARCH_X86_64 #endif // OPENVINO_ARCH_X86_64
} }
void Snippet::init_body_hash() {
uint64_t seed = 0;
ov::snippets::pass::Hash hash_function(seed);
hash_function.run_on_model(original_snippet->body_ptr());
snippetAttrs.bodyHash = seed;
}
void Snippet::initSupportedPrimitiveDescriptors() { void Snippet::initSupportedPrimitiveDescriptors() {
copy_snippet(); copy_snippet();
if (!supportedPrimitiveDescriptors.empty()) if (!supportedPrimitiveDescriptors.empty())
@ -90,12 +195,12 @@ void Snippet::initSupportedPrimitiveDescriptors() {
const size_t ndims = outputShapes[0].getRank(); const size_t ndims = outputShapes[0].getRank();
// Domain sensitive operations support only Planar layout // Domain sensitive operations support only Planar layout
const bool isOnlyPlanarApplicable = snippet->has_domain_sensitive_ops(); const bool isOnlyPlanarApplicable = snippetAttrs.snippet->has_domain_sensitive_ops();
const bool isChannelsFirstApplicable = dnnl::impl::utils::one_of(ndims, 1u, 2u, 3u, 4u, 5u) && dimRanksAreEqual && !isOnlyPlanarApplicable; const bool isChannelsFirstApplicable = dnnl::impl::utils::one_of(ndims, 1u, 2u, 3u, 4u, 5u) && dimRanksAreEqual && !isOnlyPlanarApplicable;
// Todo: Snippets currently don't support per-channel broadcasting of Blocked descriptors because // Todo: Snippets currently don't support per-channel broadcasting of Blocked descriptors because
// canonicalization can't distinguish between <N, C, H, W, c> and <N, C, D, H, W> cases. // canonicalization can't distinguish between <N, C, H, W, c> and <N, C, D, H, W> cases.
// See snippets::op::Subgraph::canonicalize for details. // See snippets::op::Subgraph::canonicalize for details.
bool isBlockedApplicable = dnnl::impl::utils::one_of(ndims, 4u, 5u) && dimRanksAreEqual && !isOnlyPlanarApplicable; bool isBlockedApplicable = dnnl::impl::utils::one_of(ndims, 3u, 4u, 5u) && dimRanksAreEqual && !isOnlyPlanarApplicable;
for (const auto& inShape : inputShapes) { for (const auto& inShape : inputShapes) {
if (isDynamic && inShape.getRank() != 1) if (isDynamic && inShape.getRank() != 1)
@ -153,7 +258,7 @@ void Snippet::initSupportedPrimitiveDescriptors() {
const auto originalInputPrecision = getOriginalInputPrecisionAtPort(i); const auto originalInputPrecision = getOriginalInputPrecisionAtPort(i);
const auto precision = ((originalInputPrecision == InferenceEngine::Precision::FP32) && const auto precision = ((originalInputPrecision == InferenceEngine::Precision::FP32) &&
context->getConfig().inferencePrecision == ov::element::bf16 && context->getConfig().inferencePrecision == ov::element::bf16 &&
snippet->has_domain_sensitive_ops()) ? snippetAttrs.snippet->has_domain_sensitive_ops()) ?
static_cast<InferenceEngine::Precision>(InferenceEngine::Precision::BF16) : static_cast<InferenceEngine::Precision>(InferenceEngine::Precision::BF16) :
originalInputPrecision; originalInputPrecision;
if (supportedPrecisions.count(precision) == 0) if (supportedPrecisions.count(precision) == 0)
@ -208,6 +313,33 @@ void Snippet::initSupportedPrimitiveDescriptors() {
void Snippet::selectOptimalPrimitiveDescriptor() { void Snippet::selectOptimalPrimitiveDescriptor() {
selectPreferPrimitiveDescriptor(getImplPriority(), true); selectPreferPrimitiveDescriptor(getImplPriority(), true);
} }
void Snippet::initOptimalPrimitiveDescriptor() {
Node::initOptimalPrimitiveDescriptor();
// memory order and precision is determined now, there is no need to prepare for each dynamic shapes.
const auto config = getSelectedPrimitiveDescriptor()->getConfig();
inputNum = config.inConfs.size();
snippetAttrs.inMemPrecs.resize(inputNum);
snippetAttrs.inMemOrders.resize(inputNum);
for (size_t i = 0; i < inputNum; i++) {
const auto& memDesc = config.inConfs[i].getMemDesc();
snippetAttrs.inMemPrecs[i] = memDesc->getPrecision();
snippetAttrs.inMemOrders[i] = memDesc->as<BlockedMemoryDesc>()->getOrder();
}
outputNum = config.outConfs.size();
snippetAttrs.outMemPrecs.resize(outputNum);
snippetAttrs.outMemOrders.resize(outputNum);
for (size_t i = 0; i < outputNum; i++) {
snippetAttrs.outMemPrecs[i] = config.outConfs[i].getMemDesc()->getPrecision();
snippetAttrs.outMemOrders[i] = config.outConfs[i].getMemDesc()->as<BlockedMemoryDesc>()->getOrder();
}
// reserve fixed size.
snippetAttrs.inMemBlockedDims.resize(inputNum);
snippetAttrs.outMemBlockedDims.resize(outputNum);
srcMemPtrs.resize(inputNum);
dstMemPtrs.resize(outputNum);
}
InferenceEngine::Precision Snippet::getRuntimePrecision() const { InferenceEngine::Precision Snippet::getRuntimePrecision() const {
std::vector<InferenceEngine::Precision> inputPrecisions; std::vector<InferenceEngine::Precision> inputPrecisions;
for (size_t i = 0; i < getParentEdges().size(); i++) { for (size_t i = 0; i < getParentEdges().size(); i++) {
@ -220,7 +352,280 @@ InferenceEngine::Precision Snippet::getRuntimePrecision() const {
return getMaxPrecision(inputPrecisions); return getMaxPrecision(inputPrecisions);
} }
bool Snippet::optimizeExecDomain(std::vector<VectorDims>& inputShapes, std::vector<VectorDims>& outputShapes, void Snippet::prepareParams() {
for (size_t i = 0; i < inputNum; i++)
snippetAttrs.inMemBlockedDims[i] = getParentEdgesAtPort(i)[0]->getMemory().getDescWithType<BlockedMemoryDesc>()->getBlockDims();
for (size_t i = 0; i < outputNum; i++)
snippetAttrs.outMemBlockedDims[i] = getChildEdgesAtPort(i)[0]->getMemory().getDescWithType<BlockedMemoryDesc>()->getBlockDims();
SnippetKey key = {snippetAttrs};
auto builder = [this](const SnippetKey& key) -> std::shared_ptr<SnippetExecutor> {
std::shared_ptr<SnippetExecutor> executor = std::make_shared<SnippetJitExecutor>(key.attrs, is_canonicalized,
is_dynamic, context->getConfig().inferencePrecision == ov::element::bf16);
is_canonicalized = true;
return executor;
};
auto cache = context->getParamsCache();
auto result = cache->getOrCreate(key, builder);
execPtr = result.first;
if (!execPtr) {
IE_THROW() << "Executor is not created for node " << getName() << ".";
}
}
bool Snippet::needPrepareParams() const {
auto jit_executor = dynamic_cast<SnippetJitExecutor*>(execPtr.get());
return inputShapesModified() || (jit_executor && !jit_executor->schedule_created());
}
bool Snippet::canBeInPlace() const {
if (isDynamic || getParentEdgesAtPort(0)[0]->getParent()->getType() == Type::Input) {
return false;
}
if (getChildEdges().size() != 1) {
return false;
}
for (auto& parentEdge : getParentEdges()) {
auto parent = parentEdge.lock()->getParent();
if (parent->getChildEdges().size() != 1)
return false;
// WA to prevent memory corruption caused by inplace feature
if (parent->getType() == Type::Concatenation) {
for (auto& parentParentEdge : parent->getParentEdges()) {
auto parentParent = parentParentEdge.lock()->getParent();
if (parentParent->getChildEdges().size() != 1)
return false;
}
}
}
return getInputShapeAtPort(0) == getOutputShapeAtPort(0);
}
bool Snippet::created() const {
return getType() == Type::Subgraph;
}
void Snippet::execute(dnnl::stream strm) {
if (!execPtr) {
IE_THROW() << "Can't execute Subgraph node. Primitive didn't created";
}
for (size_t i = 0; i < inputNum; i++)
srcMemPtrs[i] = getParentEdgeAt(i)->getMemoryPtr();
for (size_t i = 0; i < outputNum; i++)
dstMemPtrs[i] = getChildEdgeAt(i)->getMemoryPtr();
execPtr->exec(srcMemPtrs, dstMemPtrs);
}
void Snippet::executeDynamicImpl(dnnl::stream strm) {
execute(strm);
}
void Snippet::SnippetJitExecutor::exec(const std::vector<MemoryPtr>& inMemPtrs, const std::vector<MemoryPtr>& outMemPtrs) {
if (schedule.ptr == nullptr) {
IE_THROW() << "Snippet can't use Optimized implementation and can't fallback to reference";
}
auto initStartMemoryOffsets = [this, &inMemPtrs, &outMemPtrs]() {
for (size_t i = 0; i < numInput; i++) {
start_offset_in[i] = inMemPtrs[i]->getDescWithType<BlockedMemoryDesc>()->getOffsetPadding() * dataSize[i];
}
for (size_t i = 0; i < numOutput; i++) {
start_offset_out[i] = outMemPtrs[i]->getDescWithType<BlockedMemoryDesc>()->getOffsetPadding() * dataSize[i + numInput];
}
};
// initialize start offsets to src and dst memory
// Needs to be done for every set of infer, as data memory ptrs could've updated
initStartMemoryOffsets();
if (tensorRank == rank6D) {
schedule_6d(inMemPtrs, outMemPtrs);
} else {
schedule_nt(inMemPtrs, outMemPtrs);
}
}
void Snippet::SnippetJitExecutor::update_ptrs(jit_snippets_call_args& call_args,
const std::vector<MemoryPtr>& inMemPtrs, const std::vector<MemoryPtr>& outMemPtrs) {
for (size_t i = 0; i < inMemPtrs.size(); i++)
call_args.src_ptrs[i] = reinterpret_cast<const uint8_t*>(inMemPtrs[i]->getData()) + start_offset_in[i];
for (size_t i = 0; i < outMemPtrs.size(); i++)
call_args.dst_ptrs[i] = reinterpret_cast<uint8_t*>(outMemPtrs[i]->getData()) + start_offset_out[i];
if (buffer_scratchpad_size > 0) {
call_args.buffer_scratchpad_ptr =
reinterpret_cast<uint8_t*>(buffer_scratchpad.data()) + parallel_get_thread_num() * buffer_scratchpad_size;
}
}
void Snippet::SnippetJitExecutor::schedule_6d(const std::vector<MemoryPtr>& inMemPtrs, const std::vector<MemoryPtr>& outMemPtrs) {
const auto& dom = exec_domain;
// < N, C, H, W > < 1, 1, N, C*H*W>
parallel_for5d(dom[0], dom[1], dom[2], dom[3], dom[4],
[&](int64_t d0, int64_t d1, int64_t d2, int64_t d3, int64_t d4) {
int64_t indexes[] = {d0, d1, d2, d3, d4};
jit_snippets_call_args call_args;
update_ptrs(call_args, inMemPtrs, outMemPtrs);
schedule.get_callable<kernel>()(indexes, &call_args);
});
}
void Snippet::SnippetJitExecutor::schedule_nt(const std::vector<MemoryPtr>& inMemPtrs, const std::vector<MemoryPtr>& outMemPtrs) {
const auto& work_size = exec_domain;
parallel_nt(0, [&](const int ithr, const int nthr) {
jit_snippets_call_args call_args;
update_ptrs(call_args, inMemPtrs, outMemPtrs);
size_t start = 0, end = 0;
splitter(harnessWorkAmount, nthr, ithr, start, end);
std::vector<int64_t> indexes(work_size.size() - 1, 0);
for (size_t iwork = start; iwork < end; ++iwork) {
size_t tmp = iwork;
for (ptrdiff_t j = work_size.size() - 2; j >= 0; j--) {
indexes[j] = tmp % work_size[j];
tmp /= work_size[j];
}
schedule.get_callable<kernel>()(indexes.data(), &call_args);
}
});
}
Snippet::SnippetExecutor::SnippetExecutor(const SnippetAttrs& attrs, bool is_canonicalized, bool is_dynamic, bool enforceBF16)
: snippetAttrs(attrs), is_canonicalized(is_canonicalized), is_dynamic(is_dynamic), enforceBF16(enforceBF16) {}
Snippet::SnippetJitExecutor::SnippetJitExecutor(const SnippetAttrs& attrs, bool is_canonicalized, bool is_dynamic, bool enforceBF16) :
SnippetExecutor(attrs, is_canonicalized, is_dynamic, enforceBF16) {
numInput = snippetAttrs.inMemBlockedDims.size();
numOutput = snippetAttrs.outMemBlockedDims.size();
start_offset_in.resize(numInput);
start_offset_out.resize(numOutput);
auto local_copy = [this]() {
ov::OutputVector subgraph_node_inputs;
for (size_t i = 0; i < numInput; i++) {
const auto paramShape = snippetAttrs.snippet->body_ptr()->get_parameters()[i]->get_shape();
const auto paramType = snippetAttrs.snippet->body_ptr()->get_parameters()[i]->get_element_type();
auto new_input = std::make_shared<ov::opset1::Parameter>(paramType, paramShape);
subgraph_node_inputs.push_back(new_input);
}
std::shared_ptr<ov::Model> new_body = snippetAttrs.snippet->body_ptr()->clone();
snippet_for_generation = std::make_shared<ov::snippets::op::Subgraph>(subgraph_node_inputs, new_body);
ov::copy_runtime_info(snippetAttrs.snippet, snippet_for_generation);
snippet_for_generation->set_friendly_name(snippetAttrs.snippet->get_friendly_name());
auto host_isa = dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core) ?
dnnl::impl::cpu::x64::avx512_core : dnnl::impl::cpu::x64::avx2;
snippet_for_generation->set_generator(std::make_shared<CPUGenerator>(host_isa));
};
// is_canonicalized is ture means just reshape canonicalized graph with new input shapes, and get updated master shape,
// false means canonicalization, determine master_shape on snippetAttrs.snippet.
ov::PartialShape canonicalShape = canonicalizeBody(is_canonicalized);
if (is_dynamic) {
// we need a local snippets for generation, which will be adjusted based on input shapes possibily.
// The adjustment may be not compatible with new input shape in dynamic node, such as broadcastMove inserted.
local_copy();
} else {
snippet_for_generation = snippetAttrs.snippet;
}
// initialize by maximum output dimension. Dimensions of outputs should be broadcastable
tensorRank = std::max(static_cast<size_t>(rank6D), canonicalShape.size());
auto initDataSizes = [this]() {
dataSize.resize(numInput + numOutput);
for (size_t i = 0; i < numInput; i++)
dataSize[i] = snippetAttrs.inMemPrecs[i].size();
for (size_t i = 0; i < numOutput; i++)
dataSize[i + numInput] = snippetAttrs.outMemPrecs[i].size();
};
initDataSizes();
if (canonicalShape.is_dynamic())
IE_THROW() << "Snippets: Canonicalization returned dynamic shape in static pipeline";
masterShape = canonicalShape.get_shape();
const auto &body = snippet_for_generation->body_ptr();
normInputShapes.clear();
for (const auto& p : body->get_parameters())
normInputShapes.emplace_back(p->get_output_shape(0));
normOutputShapes.clear();
for (const auto& r : body->get_results())
normOutputShapes.emplace_back(r->get_input_shape(0));
// prepare
masterShape = getNormalizedDimsBySize(masterShape, tensorRank);
std::vector<size_t> original_input_shape_ranks;
for (auto& pshape : normInputShapes) {
original_input_shape_ranks.push_back(pshape.size());
pshape = getNormalizedDimsBySize(pshape, tensorRank);
}
for (auto& pshape : normOutputShapes)
pshape = getNormalizedDimsBySize(pshape, tensorRank);
tileRank = 1;
bool dims_collapsed = false;
fullWorkAmount = std::accumulate(masterShape.begin(), masterShape.end(), 1, std::multiplies<size_t>());
if (snippet_for_generation->has_domain_sensitive_ops()) {
tileRank = 2;
} else {
dims_collapsed = optimizeExecDomain(normInputShapes, normOutputShapes, masterShape, tileRank);
}
exec_domain = masterShape;
std::vector<size_t> scheduler_work_amounts;
// rename schedulerWorkAmount to harnessWorkAmount?
harnessWorkAmount = fullWorkAmount;
const auto rank = exec_domain.size();
for (auto i = rank - tileRank; i < rank; i++) {
auto& dim = exec_domain[i];
harnessWorkAmount /= dim;
scheduler_work_amounts.push_back(dim);
dim = 1;
}
if (dims_collapsed) {
std::vector<ov::Shape> new_shapes;
for (size_t i = 0; i < normInputShapes.size(); i++) {
const auto norm_shape = normInputShapes[i];
size_t ndims_to_skip = norm_shape.size() - original_input_shape_ranks[i];
new_shapes.emplace_back(norm_shape.begin() + ndims_to_skip, norm_shape.end());
}
snippet_for_generation->reshape_body(new_shapes);
}
snippet_for_generation->set_master_shape(ov::PartialShape(masterShape));
snippet_for_generation->set_tile_rank(tileRank);
// generate
jit_snippets_compile_args jcp;
jcp.master_shape = masterShape;
jcp.tile_rank = tileRank;
generate(&jcp);
buffer_scratchpad_size = snippet_for_generation->get_buffer_scratchpad_size();
buffer_scratchpad.resize(buffer_scratchpad_size * parallel_get_max_threads(), 0);
}
ov::PartialShape Snippet::SnippetJitExecutor::canonicalizeBody(bool reshape) {
ov::snippets::op::Subgraph::BlockedShapeVector input_blocked_shapes = getBlockedShapes(
snippetAttrs.inMemBlockedDims, snippetAttrs.inMemOrders, snippetAttrs.inMemPrecs);
if (reshape) {
const auto& canonicalShape = snippetAttrs.snippet->canonicalized_body_shape_infer(input_blocked_shapes);
return canonicalShape;
} else {
ov::snippets::op::Subgraph::BlockedShapeVector output_blocked_shapes = getBlockedShapes(
snippetAttrs.outMemBlockedDims, snippetAttrs.outMemOrders, snippetAttrs.outMemPrecs);
const auto& canonicalShape = snippetAttrs.snippet->canonicalize(output_blocked_shapes, input_blocked_shapes);
return canonicalShape;
}
}
bool Snippet::SnippetJitExecutor::optimizeExecDomain(std::vector<VectorDims>& inputShapes, std::vector<VectorDims>& outputShapes,
VectorDims &domain, size_t& TileRank) const { VectorDims &domain, size_t& TileRank) const {
const size_t minimalConcurrency = parallel_get_max_threads(); const size_t minimalConcurrency = parallel_get_max_threads();
const size_t minimalJitWorkAmount = 256; const size_t minimalJitWorkAmount = 256;
@ -287,240 +692,11 @@ bool Snippet::optimizeExecDomain(std::vector<VectorDims>& inputShapes, std::vect
}; };
return findDimsToCollapse(); return findDimsToCollapse();
} }
ov::PartialShape Snippet::canonicalizeBody() {
auto edgeToBlockedShape = [](const EdgePtr& edge) {
const auto blockedDesc = edge->getMemory().getDescWithType<BlockedMemoryDesc>();
std::vector<Dimension> dims;
// if blockDim == Shape::UNDEFINED_DIM, then it's a dynamic dimension, and we need to recreate a proper dynamic Dim
for (const auto& d : blockedDesc->getBlockDims())
dims.emplace_back(d == Shape::UNDEFINED_DIM ? -1 : d);
ov::PartialShape shape(dims);
ov::AxisVector blocking(blockedDesc->getOrder());
ov::element::Type precision = InferenceEngine::details::convertPrecision(blockedDesc->getPrecision());
return snippets::op::Subgraph::BlockedShape{shape, blocking, precision};
};
inputShapeIsBlocked.resize(inputShapes.size(), false);
masterShapeIsBlocked = false;
snippets::op::Subgraph::BlockedShapeVector input_blocked_shapes;
for (size_t i = 0; i < inputShapes.size(); i++) {
auto blockedShape = edgeToBlockedShape(getParentEdgesAtPort(i)[0]);
inputShapeIsBlocked[i] = std::get<0>(blockedShape).size() != std::get<1>(blockedShape).size();
masterShapeIsBlocked = masterShapeIsBlocked || inputShapeIsBlocked[i];
input_blocked_shapes.push_back(blockedShape);
}
outputShapeIsBlocked.resize(outputShapes.size(), false); void Snippet::SnippetJitExecutor::generate(const jit_snippets_compile_args* jcp) {
ov::snippets::op::Subgraph::BlockedShapeVector output_blocked_shapes;
for (size_t i = 0; i < outputShapes.size(); i++) {
auto blockedShape = edgeToBlockedShape(getChildEdgesAtPort(i)[0]);
outputShapeIsBlocked[i] = std::get<0>(blockedShape).size() != std::get<1>(blockedShape).size();
output_blocked_shapes.push_back(blockedShape);
}
const auto& canonicalShape = snippet->canonicalize(output_blocked_shapes, input_blocked_shapes);
return canonicalShape;
}
void Snippet::createPrimitive() {
// determine canonicalize, determine master_shape and prepend up to 6D
// NB! normInputShapes are updated, so body reshape might be needed
const auto& canonicalShape = canonicalizeBody();
// initialize by maximum output dimension. Dimensions of outputs should be broadcastable
tensorRank = std::max(static_cast<size_t>(rank6D), canonicalShape.size());
const auto config = getSelectedPrimitiveDescriptor()->getConfig();
auto initDataSizes = [this, config]() {
const size_t numInputs = inputShapes.size();
const size_t numOutputs = outputShapes.size();
dataSize.resize(numInputs + numOutputs);
for (size_t i = 0; i < numInputs; i++)
dataSize[i] = config.inConfs[i].getMemDesc()->getPrecision().size();
for (size_t i = 0; i < numOutputs; i++)
dataSize[i + numInputs] = config.outConfs[i].getMemDesc()->getPrecision().size();
};
initDataSizes();
jit_snippets_compile_args jcp;
if (canonicalShape.is_dynamic())
IE_THROW() << "Snippets: Canonicalization returned dynamic shape in static pipeline";
masterShape = canonicalShape.get_shape();
const auto &body = snippet->body_ptr();
for (const auto& p : body->get_parameters())
normInputShapes.emplace_back(p->get_output_shape(0));
for (const auto& r : body->get_results())
normOutputShapes.emplace_back(r->get_input_shape(0));
prepareParams();
jcp.master_shape = masterShape;
jcp.tile_rank = tileRank;
generate(&jcp);
buffer_scratchpad_size = snippet->get_buffer_scratchpad_size();
buffer_scratchpad.resize(buffer_scratchpad_size * parallel_get_max_threads(), 0);
}
std::vector<VectorDims> Snippet::shapeInfer() {
// todo: it's very strange that we don't have broadcast_merge_into for cpu shapes
auto broadcast_merge = [](VectorDims& dst, const VectorDims& src){
// Ranks are both static.
auto dst_rank = dst.size();
auto src_rank = src.size();
const auto new_rank = std::max(dst_rank, src_rank);
dst.insert(dst.begin(), new_rank - dst_rank, 1);
std::vector<Dimension> dims(new_rank);
bool success = true;
for (size_t i = 0; i < new_rank; i++) {
auto dsti = i < (new_rank - dst_rank) ? 1 : dst[i - (new_rank - dst_rank)];
auto srci = i < (new_rank - src_rank) ? 1 : src[i - (new_rank - src_rank)];
if (dsti != srci && srci != Shape::UNDEFINED_DIM) {
if (dsti == 1 || dsti == Shape::UNDEFINED_DIM) {
dsti = srci;
} else {
success = false;
}
}
}
return success;
};
for (size_t i = 0; i < getParentEdges().size(); i++) {
VectorDims inDims {getParentEdgesAtPort(i)[0]->getMemory().getShape().getDims()};
if (masterShapeIsBlocked && !inputShapeIsBlocked[i])
inDims.insert(inDims.end(), 1);
// todo: this is a simple master_shape inference for shape-agnostic operations,
// we'll need to account for body operations semantics in the future
if (i == 0)
masterShape = inDims;
else
broadcast_merge(masterShape, inDims);
normInputShapes[i] = std::move(inDims);
}
if (std::any_of(masterShape.begin(), masterShape.end(), [](const Dim& d){ return d == Shape::UNDEFINED_DIM;})) {
std::ostringstream errorMessage;
errorMessage << "Can't compute static master shape for Snippet node with name: " << getName();
errorMessage << ". Input shapes = ( ";
for (size_t i = 0; i < getParentEdges().size(); i++) {
errorMessage << i << " port = " << getParentEdgesAtPort(i)[0]->getMemory().getShape().toString() << ", ";
}
errorMessage << "). Master shape = ( " << Shape(masterShape).toString() << " )";
IE_THROW() << errorMessage.str();
}
if (normOutputShapes.size() == 1) {
normOutputShapes[0] = masterShape;
return {masterShape};
}
std::vector<VectorDims> outputDims;
std::vector<ov::Shape> new_shapes;
for (const auto& s : normInputShapes)
new_shapes.emplace_back(s);
const auto& outputShapes = snippet->reshape_body(new_shapes);
for (size_t i = 0; i < outputShapes.size(); i++)
normOutputShapes[i] = outputShapes[i];
return normOutputShapes;
}
void Snippet::prepareParams() {
masterShape = getNormalizedDimsBySize(masterShape, tensorRank);
std::vector<size_t> original_input_shape_ranks;
for (auto& pshape : normInputShapes) {
original_input_shape_ranks.push_back(pshape.size());
pshape = getNormalizedDimsBySize(pshape, tensorRank);
}
for (auto& pshape : normOutputShapes)
pshape = getNormalizedDimsBySize(pshape, tensorRank);
tileRank = 1;
bool dims_collapsed = false;
fullWorkAmount = std::accumulate(masterShape.begin(), masterShape.end(), 1, std::multiplies<size_t>());
if (snippet->has_domain_sensitive_ops()) {
tileRank = 2;
} else {
dims_collapsed = optimizeExecDomain(normInputShapes, normOutputShapes, masterShape, tileRank);
}
exec_domain = masterShape;
auto initStartMemoryOffsets = [this]() {
const auto config = getSelectedPrimitiveDescriptor()->getConfig();
const size_t numInputs = inputShapes.size();
start_offset_in.resize(numInputs);
srcMemPtrs.resize(numInputs);
for (size_t i = 0; i < numInputs; i++) {
const auto memPtr = getParentEdgeAt(i)->getMemoryPtr();
srcMemPtrs[i] = memPtr;
start_offset_in[i] = memPtr->getDescWithType<BlockedMemoryDesc>()->getOffsetPadding() * dataSize[i];
}
const size_t numOutputs = outputShapes.size();
start_offset_out.resize(numOutputs);
dstMemPtrs.resize(numOutputs);
for (size_t i = 0; i < numOutputs; i++) {
const auto memPtr = getChildEdgeAt(i)->getMemoryPtr();
dstMemPtrs[i] = memPtr;
start_offset_out[i] = memPtr->getDescWithType<BlockedMemoryDesc>()->getOffsetPadding() * dataSize[i + numInputs];
}
};
// initialize start offsets to src and dst memory
// Needs to be done for every set of input shapes sce memory ptrs could've updated
initStartMemoryOffsets();
std::vector<size_t> scheduler_work_amounts;
// rename schedulerWorkAmount to harnessWorkAmount?
harnessWorkAmount = fullWorkAmount;
const auto rank = exec_domain.size();
for (auto i = rank - tileRank; i < rank; i++) {
auto& dim = exec_domain[i];
harnessWorkAmount /= dim;
scheduler_work_amounts.push_back(dim);
dim = 1;
}
if (dims_collapsed) {
std::vector<ov::Shape> new_shapes;
for (size_t i = 0; i < normInputShapes.size(); i++) {
const auto norm_shape = normInputShapes[i];
size_t ndims_to_skip = norm_shape.size() - original_input_shape_ranks[i];
new_shapes.emplace_back(norm_shape.begin() + ndims_to_skip, norm_shape.end());
}
snippet->reshape_body(new_shapes);
}
snippet->set_master_shape(ov::PartialShape(masterShape));
snippet->set_tile_rank(tileRank);
}
bool Snippet::needPrepareParams() const {
return inputShapesModified() || !schedule.ptr;
}
bool Snippet::canBeInPlace() const {
if (isDynamic || getParentEdgesAtPort(0)[0]->getParent()->getType() == Type::Input) {
return false;
}
if (getChildEdges().size() != 1) {
return false;
}
for (auto& parentEdge : getParentEdges()) {
auto parent = parentEdge.lock()->getParent();
if (parent->getChildEdges().size() != 1)
return false;
// WA to prevent memory corruption caused by inplace feature
if (parent->getType() == Type::Concatenation) {
for (auto& parentParentEdge : parent->getParentEdges()) {
auto parentParent = parentParentEdge.lock()->getParent();
if (parentParent->getChildEdges().size() != 1)
return false;
}
}
}
return getInputShapeAtPort(0) == getOutputShapeAtPort(0);
}
bool Snippet::created() const {
return getType() == Type::Subgraph;
}
void Snippet::generate(const jit_snippets_compile_args* jcp) {
ov::pass::Manager pre_dialect; ov::pass::Manager pre_dialect;
pre_dialect.register_pass<ConvertToSwishCPU>(); pre_dialect.register_pass<ConvertToSwishCPU>();
if (context->getConfig().inferencePrecision == ov::element::bf16 && snippet->has_domain_sensitive_ops()) { if (enforceBF16 && snippet_for_generation->has_domain_sensitive_ops()) {
// enforce BF16 precisions to supported operations // enforce BF16 precisions to supported operations
// MatMul has to be decomposed to Brgemm operations before enforcement // MatMul has to be decomposed to Brgemm operations before enforcement
// Note, MatMul decomposition will be ran later again for case if BF16 enforcement is not happened // Note, MatMul decomposition will be ran later again for case if BF16 enforcement is not happened
@ -542,7 +718,7 @@ void Snippet::generate(const jit_snippets_compile_args* jcp) {
ov::snippets::lowered::pass::PassPipeline control_flow_pipeline; ov::snippets::lowered::pass::PassPipeline control_flow_pipeline;
CPU_REGISTER_PASS_X64(control_flow_pipeline, ov::intel_cpu::pass::FuseLoadStoreConvert); CPU_REGISTER_PASS_X64(control_flow_pipeline, ov::intel_cpu::pass::FuseLoadStoreConvert);
schedule = snippet->generate( schedule = snippet_for_generation->generate(
pre_dialect, pre_dialect,
post_dialect, post_dialect,
post_precision, post_precision,
@ -551,63 +727,8 @@ void Snippet::generate(const jit_snippets_compile_args* jcp) {
reinterpret_cast<const void*>(jcp)); reinterpret_cast<const void*>(jcp));
} }
void Snippet::update_ptrs(jit_snippets_call_args& call_args) { bool Snippet::SnippetJitExecutor::schedule_created() {
for (size_t i = 0; i < srcMemPtrs.size(); i++) return schedule.ptr != nullptr;
call_args.src_ptrs[i] = reinterpret_cast<const uint8_t*>(srcMemPtrs[i]->getData()) + start_offset_in[i];
for (size_t i = 0; i < dstMemPtrs.size(); i++)
call_args.dst_ptrs[i] = reinterpret_cast<uint8_t*>(dstMemPtrs[i]->getData()) + start_offset_out[i];
if (buffer_scratchpad_size > 0) {
call_args.buffer_scratchpad_ptr =
reinterpret_cast<uint8_t*>(buffer_scratchpad.data()) + parallel_get_thread_num() * buffer_scratchpad_size;
}
}
void Snippet::execute(dnnl::stream strm) {
if (schedule.ptr == nullptr) {
IE_THROW() << "Snippet can't use Optimized implementation and can't fallback to reference";
}
if (tensorRank == rank6D) {
schedule_6d();
} else {
schedule_nt();
}
}
void Snippet::schedule_6d() {
const auto& dom = exec_domain;
// < N, C, H, W > < 1, 1, N, C*H*W>
parallel_for5d(dom[0], dom[1], dom[2], dom[3], dom[4],
[&](int64_t d0, int64_t d1, int64_t d2, int64_t d3, int64_t d4) {
int64_t indexes[] = {d0, d1, d2, d3, d4};
jit_snippets_call_args call_args;
update_ptrs(call_args);
schedule.get_callable<kernel>()(indexes, &call_args);
});
}
void Snippet::schedule_nt() {
const auto& work_size = exec_domain;
parallel_nt(0, [&](const int ithr, const int nthr) {
jit_snippets_call_args call_args;
update_ptrs(call_args);
size_t start = 0, end = 0;
splitter(harnessWorkAmount, nthr, ithr, start, end);
std::vector<int64_t> indexes(work_size.size() - 1, 0);
for (size_t iwork = start; iwork < end; ++iwork) {
size_t tmp = iwork;
for (ptrdiff_t j = work_size.size() - 2; j >= 0; j--) {
indexes[j] = tmp % work_size[j];
tmp /= work_size[j];
}
schedule.get_callable<kernel>()(indexes.data(), &call_args);
}
});
} }
} // namespace node } // namespace node

View File

@ -30,6 +30,7 @@ public:
void getSupportedDescriptors() override {}; void getSupportedDescriptors() override {};
void initSupportedPrimitiveDescriptors() override; void initSupportedPrimitiveDescriptors() override;
void selectOptimalPrimitiveDescriptor() override; void selectOptimalPrimitiveDescriptor() override;
void initOptimalPrimitiveDescriptor() override;
InferenceEngine::Precision getRuntimePrecision() const override; InferenceEngine::Precision getRuntimePrecision() const override;
// to avoid collisions in throughput mode with copy of TypeRelaxed nodes // to avoid collisions in throughput mode with copy of TypeRelaxed nodes
@ -37,9 +38,7 @@ public:
void setSharedMutex(const std::shared_ptr<std::mutex>& mutex); void setSharedMutex(const std::shared_ptr<std::mutex>& mutex);
// Here we convert to canonical for & jit everything // Here we convert to canonical for & jit everything
void createPrimitive() override;
void prepareParams() override; void prepareParams() override;
std::vector<VectorDims> shapeInfer();
bool needPrepareParams() const override; bool needPrepareParams() const override;
bool canBeInPlace() const override; bool canBeInPlace() const override;
@ -47,6 +46,19 @@ public:
// if generator is set, it would execute generated code otherwise it would fallback to nGraph reference // if generator is set, it would execute generated code otherwise it would fallback to nGraph reference
void execute(dnnl::stream strm) override; void execute(dnnl::stream strm) override;
void executeDynamicImpl(dnnl::stream strm) override;
struct SnippetAttrs {
// Local copy of subgraph node for canonization & code generation
std::shared_ptr<snippets::op::Subgraph> snippet;
uint64_t bodyHash;
std::vector<std::vector<size_t>> inMemBlockedDims;
std::vector<std::vector<size_t>> inMemOrders;
std::vector<InferenceEngine::Precision> inMemPrecs;
std::vector<std::vector<size_t>> outMemBlockedDims;
std::vector<std::vector<size_t>> outMemOrders;
std::vector<InferenceEngine::Precision> outMemPrecs;
};
private: private:
static const size_t rank6D {6}; static const size_t rank6D {6};
@ -55,31 +67,71 @@ private:
// Create a deep local copy of the input snippet to perform canonicalization & code generation // Create a deep local copy of the input snippet to perform canonicalization & code generation
// TODO: Probably better to implement a proper copy constructor // TODO: Probably better to implement a proper copy constructor
// NOTE: Before call mutex should be initialized void copy_snippet() const;
void copy_snippet(); void init_body_hash();
ov::PartialShape canonicalizeBody(); size_t inputNum = 0;
size_t outputNum = 0;
// Original subgraph node
std::shared_ptr<snippets::op::Subgraph> original_snippet;
mutable std::shared_ptr<snippets::op::Subgraph> local_snippet;
// Holds ISA version used is codeGeneration target
dnnl::impl::cpu::x64::cpu_isa_t host_isa;
std::vector<MemoryPtr> srcMemPtrs = {};
std::vector<MemoryPtr> dstMemPtrs = {};
mutable SnippetAttrs snippetAttrs;
mutable bool is_canonicalized = false;
bool is_dynamic = false;
class SnippetExecutor {
public:
SnippetExecutor(const SnippetAttrs& attrs, bool is_canonicalized, bool is_dynamic, bool enforceBF16);
virtual void exec(const std::vector<MemoryPtr>& inMemPtrs, const std::vector<MemoryPtr>& outMemPtrs) = 0;
virtual ~SnippetExecutor() = default;
protected:
SnippetAttrs snippetAttrs;
bool is_canonicalized = false;
bool is_dynamic = false;
bool enforceBF16 = false;
};
std::shared_ptr<SnippetExecutor> execPtr = nullptr;
class SnippetJitExecutor : public SnippetExecutor {
public:
SnippetJitExecutor(const SnippetAttrs& attrs, bool is_canonicalized, bool is_dynamic, bool enforceBF16);
void exec(const std::vector<MemoryPtr>& inMemPtrs, const std::vector<MemoryPtr>& outMemPtrs) override;
bool schedule_created();
private:
static const size_t rank6D {6};
typedef void (*kernel)(const void *, const void *);
size_t numInput = 0;
size_t numOutput = 0;
ov::PartialShape canonicalizeBody(bool reshape);
// returns true if exec domain was modified // returns true if exec domain was modified
bool optimizeExecDomain(std::vector<VectorDims>&, std::vector<VectorDims>&, VectorDims&, size_t&) const; bool optimizeExecDomain(std::vector<VectorDims>&, std::vector<VectorDims>&, VectorDims&, size_t&) const;
void generate(const jit_snippets_compile_args*); void generate(const jit_snippets_compile_args*);
inline void update_ptrs(jit_snippets_call_args&); inline void update_ptrs(jit_snippets_call_args&, const std::vector<MemoryPtr>& inMemPtrs, const std::vector<MemoryPtr>& outMemPtrs);
// Evaluates generated snippet using parallel backend // Evaluates generated snippet using parallel backend
void schedule_6d(); void schedule_6d(const std::vector<MemoryPtr>& inMemPtrs, const std::vector<MemoryPtr>& outMemPtrs);
void schedule_nt(); void schedule_nt(const std::vector<MemoryPtr>& inMemPtrs, const std::vector<MemoryPtr>& outMemPtrs);
// Original subgraph node std::shared_ptr<snippets::op::Subgraph> snippet_for_generation;
std::shared_ptr<snippets::op::Subgraph> original_snippet;
// Local copy of subgraph node for canonization & code generation
std::shared_ptr<snippets::op::Subgraph> snippet;
// Holds generated snippet with information about how to schedule it // Holds generated snippet with information about how to schedule it
snippets::Schedule schedule; snippets::Schedule schedule;
// Holds ISA version used is codeGeneration target
dnnl::impl::cpu::x64::cpu_isa_t host_isa;
size_t isa_num_lanes = 0; // number of elements that fit in vector size
// Holds index of output used as in execution domain // Holds index of output used as in execution domain
// it should be compatible with a schedule's work size // it should be compatible with a schedule's work size
std::vector<size_t> exec_domain = {}; std::vector<size_t> exec_domain = {};
@ -91,18 +143,12 @@ private:
size_t harnessWorkAmount = 0; size_t harnessWorkAmount = 0;
const size_t maxTileRank = 2; const size_t maxTileRank = 2;
std::vector<MemoryPtr> srcMemPtrs = {};
std::vector<MemoryPtr> dstMemPtrs = {};
std::vector<size_t> dataSize = {}; std::vector<size_t> dataSize = {};
// this is needed for fast shape inference of blocking-invariant prepended shapes // master shape is mutable since we need to modify it inside const shapeInfer method
std::vector<bool> inputShapeIsBlocked = {}; // we need this info to shape-infer mixed layouts mutable VectorDims masterShape = {};
std::vector<bool> outputShapeIsBlocked = {}; // we need this info to shape-infer mixed layouts mutable std::vector<VectorDims> normInputShapes = {};
bool masterShapeIsBlocked = false; mutable std::vector<VectorDims> normOutputShapes = {};
VectorDims masterShape = {};
std::vector<VectorDims> normInputShapes = {};
std::vector<VectorDims> normOutputShapes = {};
std::vector<ptrdiff_t> start_offset_in = {}; std::vector<ptrdiff_t> start_offset_in = {};
std::vector<ptrdiff_t> start_offset_out = {}; std::vector<ptrdiff_t> start_offset_out = {};
@ -111,6 +157,7 @@ private:
std::vector<uint8_t> buffer_scratchpad = {}; std::vector<uint8_t> buffer_scratchpad = {};
size_t buffer_scratchpad_size = 0; size_t buffer_scratchpad_size = 0;
}; };
};
} // namespace node } // namespace node
} // namespace intel_cpu } // namespace intel_cpu

View File

@ -10,15 +10,69 @@ namespace ov {
namespace intel_cpu { namespace intel_cpu {
namespace node { namespace node {
using Result = IShapeInfer::Result; using Result = IShapeInfer::Result;
/* This class implementation is a temporal WA
TODO: revise the implementation to remove the node reference*/
class SnippetShapeInfer : public ShapeInferEmptyPads { class SnippetShapeInfer : public ShapeInferEmptyPads {
public: public:
SnippetShapeInfer(Snippet* node) : m_node(node) {} SnippetShapeInfer(std::shared_ptr<ov::Model> body) : m_body(body) {}
Result infer( Result infer(
const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes, const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) override { const std::unordered_map<size_t, MemoryPtr>& data_dependency) override {
return {m_node->shapeInfer(), ShapeInferStatus::success}; auto broadcast_merge = [](VectorDims& dst, const VectorDims& src) {
// Ranks are both static.
auto dst_rank = dst.size();
auto src_rank = src.size();
const auto new_rank = std::max(dst_rank, src_rank);
dst.insert(dst.begin(), new_rank - dst_rank, 1);
for (size_t i = 0; i < new_rank; i++) {
auto srci = i < (new_rank - src_rank) ? 1 : src[i - (new_rank - src_rank)];
if (dst[i] != srci && srci != Shape::UNDEFINED_DIM) {
if (dst[i] == 1 || dst[i] == Shape::UNDEFINED_DIM) {
dst[i] = srci;
} else {
if (srci != 1) {
IE_THROW() << "Got imcompatible input shapes in snippets shape infer";
}
}
}
}
};
const size_t out_size = m_body->get_output_size();
if (out_size == 1) {
VectorDims masterShape;
for (size_t i = 0; i < input_shapes.size(); i++) {
if (i == 0)
masterShape = input_shapes[i];
else
broadcast_merge(masterShape, input_shapes[i]);
}
size_t output_rank = m_body->get_output_partial_shape(0).rank().get_length();
if (output_rank > masterShape.size()) {
masterShape.insert(masterShape.begin(), output_rank - masterShape.size(), 1);
}
return {{masterShape}, ShapeInferStatus::success};
} else {
std::vector<VectorDims> outputDims;
std::vector<ov::Shape> new_shapes;
for (const auto& s : input_shapes)
new_shapes.emplace_back(s);
auto& params = m_body->get_parameters();
if (params.size() != input_shapes.size()) {
IE_THROW() << "Got invalid number of input shapes to reshape subgraph body";
}
for (size_t i = 0; i < params.size(); ++i) {
params[i]->set_partial_shape(new_shapes[i]);
}
m_body->validate_nodes_and_infer_types();
for (const auto& res : m_body->get_results()) {
auto& pshape = res->get_input_partial_shape(0);
if (!pshape.is_static()) {
IE_THROW() << "Subgraph inferred dynamic output shape during reshape with static inputs";
}
outputDims.emplace_back(pshape.get_shape());
}
return {outputDims, ShapeInferStatus::success};
}
} }
port_mask_t get_port_mask() const override { port_mask_t get_port_mask() const override {
@ -26,18 +80,21 @@ public:
} }
private: private:
Snippet* m_node; std::shared_ptr<ov::Model> m_body;
}; };
class SnippetShapeInferFactory : public ShapeInferFactory { class SnippetShapeInferFactory : public ShapeInferFactory {
public: public:
SnippetShapeInferFactory(Snippet* node) : m_node(node) {} SnippetShapeInferFactory(const std::shared_ptr<ov::Node>& op) {
auto subgraph = ov::as_type_ptr<snippets::op::Subgraph>(op);
snippet_body = subgraph->body_ptr()->clone();
}
ShapeInferPtr makeShapeInfer() const override { ShapeInferPtr makeShapeInfer() const override {
return std::make_shared<SnippetShapeInfer>(m_node); return std::make_shared<SnippetShapeInfer>(snippet_body);
} }
private: private:
Snippet* m_node; std::shared_ptr<ov::Model> snippet_body = nullptr;
}; };
} // namespace node } // namespace node
} // namespace intel_cpu } // namespace intel_cpu

View File

@ -667,10 +667,14 @@ void Transformations::MainSnippets(void) {
}, snippets::pass::ExtractReshapesFromMHA); }, snippets::pass::ExtractReshapesFromMHA);
CPU_SET_CALLBACK_X64(snippetsManager, CPU_SET_CALLBACK_X64(snippetsManager,
[](const std::shared_ptr<const ov::Node>& n) -> bool { [](const std::shared_ptr<const ov::Node>& n) -> bool {
if (n->is_dynamic())
return true;
// CPU Plugin support Swish in Subgraph via conversion to SwichCPU which assumes second input to be constant // CPU Plugin support Swish in Subgraph via conversion to SwichCPU which assumes second input to be constant
const bool is_unsupported_swish = const bool is_unsupported_swish =
ov::is_type<const ov::op::v4::Swish>(n) && n->inputs().size() > 1 && ov::is_type<const ov::op::v4::Swish>(n) && n->inputs().size() > 1 &&
!ov::is_type<const ov::op::v0::Constant>(n->get_input_node_shared_ptr(1)); !ov::is_type<const ov::op::v0::Constant>(n->get_input_node_shared_ptr(1));
if (is_unsupported_swish)
return true;
// todo: general tokenization flow is not currently supported for these operations. // todo: general tokenization flow is not currently supported for these operations.
// they can be tokenized only as a part of complex patterns // they can be tokenized only as a part of complex patterns
const bool is_disabled_tokenization = (ov::is_type<const ov::op::v1::Softmax>(n) || const bool is_disabled_tokenization = (ov::is_type<const ov::op::v1::Softmax>(n) ||
@ -679,6 +683,8 @@ void Transformations::MainSnippets(void) {
ov::is_type<const ov::op::v1::Transpose>(n) || ov::is_type<const ov::op::v1::Transpose>(n) ||
ov::is_type<const ov::op::v1::Broadcast>(n) || ov::is_type<const ov::op::v1::Broadcast>(n) ||
ov::is_type<const ov::op::v3::Broadcast>(n)); ov::is_type<const ov::op::v3::Broadcast>(n));
if (is_disabled_tokenization)
return true;
const auto& inputs = n->inputs(); const auto& inputs = n->inputs();
// todo: clarify whether we can evaluate snippets on const paths // todo: clarify whether we can evaluate snippets on const paths
const bool has_only_const_inputs = std::all_of(inputs.begin(), inputs.end(), const bool has_only_const_inputs = std::all_of(inputs.begin(), inputs.end(),
@ -686,6 +692,8 @@ void Transformations::MainSnippets(void) {
return ov::is_type<ov::op::v0::Constant>( return ov::is_type<ov::op::v0::Constant>(
in.get_source_output().get_node_shared_ptr()); in.get_source_output().get_node_shared_ptr());
}); });
if (has_only_const_inputs)
return true;
// todo: clarify whether we can evaluate snippets on inputs with larger ranks // todo: clarify whether we can evaluate snippets on inputs with larger ranks
auto rank_is_too_large = [](const ov::descriptor::Tensor& t) { auto rank_is_too_large = [](const ov::descriptor::Tensor& t) {
// callback is called has_supported_in_out(), so it's safe to assume that the shapes are static // callback is called has_supported_in_out(), so it's safe to assume that the shapes are static
@ -695,13 +703,17 @@ void Transformations::MainSnippets(void) {
[&](const ov::Input<const ov::Node>& in) { [&](const ov::Input<const ov::Node>& in) {
return rank_is_too_large(in.get_tensor()); return rank_is_too_large(in.get_tensor());
}); });
if (bad_input_rank)
return true;
const auto& outputs = n->outputs(); const auto& outputs = n->outputs();
const bool bad_output_rank = std::any_of(outputs.begin(), outputs.end(), const bool bad_output_rank = std::any_of(outputs.begin(), outputs.end(),
[&](const ov::Output<const ov::Node>& out) { [&](const ov::Output<const ov::Node>& out) {
return rank_is_too_large(out.get_tensor()); return rank_is_too_large(out.get_tensor());
}); });
return has_only_const_inputs || bad_input_rank || bad_output_rank || is_unsupported_swish || if (bad_output_rank)
is_disabled_tokenization; return true;
return false;
}, },
snippets::pass::TokenizeSnippets); snippets::pass::TokenizeSnippets);
} }

View File

@ -180,7 +180,7 @@ std::vector<std::string> disabledTestPatterns() {
// Issue: 111412 // Issue: 111412
R"(.*smoke_Proposal_(Static|Dynamic)_Test_Case1/ProposalLayerCPUTest.*)", R"(.*smoke_Proposal_(Static|Dynamic)_Test_Case1/ProposalLayerCPUTest.*)",
// Issue: 111418 // Issue: 111418
R"(.*smoke_Snippets_ConvertStub/ConvertStub\.CompareWithRefImpl/IS=.*_OT=\(bf16\)_#N=2_#S=2_targetDevice=CPU.*)", R"(.*smoke_Snippets_ConvertStub/ConvertStub\.CompareWithRefImpl/IS.*_OT=\(bf16\)_#N=2_#S=2_targetDevice=CPU.*)",
// Issue: 111944 // Issue: 111944
R"(.*smoke_DefConvLayoutTest6.*)", R"(.*smoke_DefConvLayoutTest6.*)",
// Issue: 106939 // Issue: 106939

View File

@ -11,11 +11,17 @@ namespace snippets {
namespace { namespace {
// ===================================Add=========================================================//
namespace snippets_static_1 {
// These inputs are needed to test static Loop optimizations (emit the whole tile, body with increments, set WA etc) // These inputs are needed to test static Loop optimizations (emit the whole tile, body with increments, set WA etc)
std::vector<ov::Shape> inShapesStatic1{{1, 16, 29, 1}, {1, 16, 29, 7}, {1, 16, 29, 8}, {1, 16, 29, 15}, {1, 16, 29, 16}, {1, 16, 29, 31}}; std::vector<ov::test::InputShape> inShapesStatic1{{{}, {{1, 16, 29, 1}}},
std::vector<ov::Shape> inShapesStatic2{{1, 16, 29, 1}, {1, 16, 1, 1}, {1, 1, 1, 1}}; {{}, {{1, 16, 29, 7}}},
{{}, {{1, 16, 29, 8}}},
{{}, {{1, 16, 29, 15}}},
{{}, {{1, 16, 29, 16}}},
{{}, {{1, 16, 29, 31}}}};
std::vector<ov::test::InputShape> inShapesStatic2{{{}, {{1, 16, 29, 1}}},
{{}, {{1, 16, 1, 1}}},
{{}, {{1, 1, 1, 1}}}};
INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise, Add, INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise, Add,
::testing::Combine( ::testing::Combine(
@ -26,41 +32,79 @@ INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise, Add,
::testing::Values(1), // Subgraph is created, since the inputs are followed by converts ::testing::Values(1), // Subgraph is created, since the inputs are followed by converts
::testing::Values(ov::test::utils::DEVICE_CPU)), ::testing::Values(ov::test::utils::DEVICE_CPU)),
Add::getTestCaseName); Add::getTestCaseName);
// DS
std::vector<InputShape> inShapesDynamic1{
{
{{ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()},
{{1, 3, 1, 10}, {1, 3, 10, 10}, {1, 3, 1, 10}}},
}
};
std::vector<InputShape> inShapesDynamic2{
{
{{ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()},
{{1, 3, 10, 1}, {1, 3, 1, 1}, {1, 3, 10, 1}}},
}
};
INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise_Add, Add,
::testing::Combine(
::testing::ValuesIn(inShapesDynamic1),
::testing::ValuesIn(inShapesDynamic2),
::testing::Values(ov::element::f32),
::testing::Values(1),
::testing::Values(1), // Subgraph is created, since the inputs are followed by converts
::testing::Values(ov::test::utils::DEVICE_CPU)),
Add::getTestCaseName);
// ===================================AddPair=========================================================//
// test cross-tile (vector vs scalar) optimizations in the absence of vector tile // test cross-tile (vector vs scalar) optimizations in the absence of vector tile
std::vector<std::vector<ov::Shape>> inShapesStatic{ std::vector<std::vector<InputShape>> inShapesAddPair {
{{1, 128, 1, 1}, {1, 128, 1, 1}}, {{{}, {{1, 128, 1, 1}}}, {{}, {{1, 128, 1, 1}}}},
{{1, 128, 1, 9}, {1, 128, 1, 9}}, {{{}, {{1, 128, 1, 9}}}, {{}, {{1, 128, 1, 9}}}},
{{1, 128, 1, 16}, {1, 128, 1, 16}}, {{{}, {{1, 128, 1, 16}}}, {{}, {{1, 128, 1, 16}}}},
{{1, 128, 1, 17}, {1, 128, 1, 17}}, {{{}, {{1, 128, 1, 17}}}, {{}, {{1, 128, 1, 17}}}},
{{1, 128, 1, 29}, {1, 128, 1, 29}}, {{{}, {{1, 128, 1, 29}}}, {{}, {{1, 128, 1, 29}}}},
{{1, 128, 1, 33}, {1, 128, 1, 33}}, {{{}, {{1, 128, 1, 33}}}, {{}, {{1, 128, 1, 33}}}},
{{1, 128, 9, 30}, {1, 128, 1, 30}}, {{{}, {{1, 128, 9, 30}}}, {{}, {{1, 128, 1, 30}}}},
{{1, 128, 9, 1}, {1, 128, 1, 30}}, {{{}, {{1, 128, 9, 1}}}, {{}, {{1, 128, 1, 30}}}},
{{1, 128, 9, 16}, {1, 128, 9, 1}}, {{{}, {{1, 128, 9, 16}}}, {{}, {{1, 128, 9, 1}}}},
// DS
{{{1, -1, {1, 10}, {1, 33}}, {{1, 128, 1, 1}, {1, 128, 1, 9}, {1, 128, 1, 17}, {1, 128, 1, 29}, {1, 128, 9, 1}, {1, 128, 1, 1}}},
{{{1, 1}, {128, 128}, {1, 10}, {1, 33}}, {{1, 128, 1, 1}, {1, 128, 1, 9}, {1, 128, 1, 17}, {1, 128, 1, 29}, {1, 128, 1, 30}, {1, 128, 1, 1}}}},
{{{1, -1, 1, {1, 32}}, {{1, 16, 1, 32}, {1, 16, 1, 32}, {1, 16, 1, 32}, {1, 16, 1, 32}}},
{{1, -1, 1, {1, 32}}, {{1, 16, 1, 32}, {1, 16, 1, 32}, {1, 16, 1, 32}, {1, 16, 1, 32}}}},
}; };
INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise, AddPair, INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise, AddPair,
::testing::Combine( ::testing::Combine(
::testing::ValuesIn(inShapesStatic), ::testing::ValuesIn(inShapesAddPair),
::testing::Values(ov::element::f32), ::testing::Values(ov::element::f32),
::testing::Values(1), ::testing::Values(1),
::testing::Values(1), // Subgraph is created, since the inputs are followed by converts ::testing::Values(1), // Subgraph is created, since the inputs are followed by converts
::testing::Values(ov::test::utils::DEVICE_CPU)), ::testing::Values(ov::test::utils::DEVICE_CPU)),
AddPair::getTestCaseName); AddPair::getTestCaseName);
} // namespace snippets_static_1 // ===================================AddConst, AddRollConst=========================================================//
std::vector<ov::test::InputShape> inShapesAddConst{{{}, {{1, 2, 3, 32}}},
{{}, {{1, 3, 17, 33}}},
{{-1, -1, -1, -1}, {{1, 3, 17, 33}, {1, 2, 1, 65}, {1, 3, 17, 33}}},
{{1, {1, 10}, {1, 8}, {1, 4}}, {{1, 2, 8, 4}, {1, 8, 1, 1}, {1, 2, 8, 4}}}};
std::vector<PartialShape> inShapesConstAddConst{{1, 1, 1, 1}};
INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise, AddConst, INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise, AddConst,
::testing::Combine( ::testing::Combine(
::testing::Values(ov::Shape {1, 42, 16, 64}), ::testing::ValuesIn(inShapesAddConst),
::testing::ValuesIn(inShapesConstAddConst),
::testing::Values(ov::element::f32), ::testing::Values(ov::element::f32),
::testing::Values(1), // Add ::testing::Values(1), // Add
::testing::Values(1), // Subgraph is created, since the inputs are followed by converts ::testing::Values(1), // Subgraph is created, since the inputs are followed by converts
::testing::Values(ov::test::utils::DEVICE_CPU)), ::testing::Values(ov::test::utils::DEVICE_CPU)),
AddConst::getTestCaseName); AddConst::getTestCaseName);
// ===================================AddRollConst=========================================================//
INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise, AddRollConst, INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise, AddRollConst,
::testing::Combine( ::testing::Combine(
::testing::Values(ov::Shape {1, 42, 16, 64}), ::testing::ValuesIn(inShapesAddConst),
::testing::ValuesIn(inShapesConstAddConst),
::testing::Values(ov::element::f32), ::testing::Values(ov::element::f32),
::testing::Values(2), // Add + roll after inputs ::testing::Values(2), // Add + roll after inputs
::testing::Values(1), // Subgraph is created, since the inputs are followed by converts ::testing::Values(1), // Subgraph is created, since the inputs are followed by converts
@ -69,12 +113,14 @@ INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise, AddRollConst,
INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise_BF16, AddRollConst, INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise_BF16, AddRollConst,
::testing::Combine( ::testing::Combine(
::testing::Values(ov::Shape {1, 2, 3, 32}), ::testing::ValuesIn(inShapesAddConst),
::testing::ValuesIn(inShapesConstAddConst),
::testing::Values(ov::element::bf16), ::testing::Values(ov::element::bf16),
::testing::Values(3), // Add + reorder + roll after inputs ::testing::Values(3), // Add + reorder + roll after inputs
::testing::Values(1), // Subgraph is created, since the inputs are followed by converts ::testing::Values(1), // Subgraph is created, since the inputs are followed by converts
::testing::Values(ov::test::utils::DEVICE_CPU)), ::testing::Values(ov::test::utils::DEVICE_CPU)),
AddRollConst::getTestCaseName); AddRollConst::getTestCaseName);
} // namespace } // namespace
} // namespace snippets } // namespace snippets
} // namespace test } // namespace test

View File

@ -21,13 +21,26 @@ const std::vector<ov::element::Type> input_types = {
const std::vector<CheckBroadcastTestCaseParams> test_cases = { const std::vector<CheckBroadcastTestCaseParams> test_cases = {
// broadcast is neccessary // broadcast is neccessary
{ {
{{1, 3, 4, 4}, {4, 4}}, {{{}, {{1, 3, 4, 4}}}, {{}, {{4, 4}}}},
ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::PDPD, -1), ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::PDPD, -1),
1, 1,
0 0
}, },
{ {
{{1, 3, 4, 4}, {4, 4}}, {{{}, {{1, 3, 4, 4}}}, {{}, {{4, 4}}}},
ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::PDPD, 2),
1,
0
},
// DS
{
{{{1, 3, -1, {1, 4}}, {{1, 3, 4, 4}, {1, 3, 1, 3}, {1, 3, 4, 4}}}, {{-1, {1, 4}}, {{4, 4}, {1, 3}, {4, 4}}}},
ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::PDPD, -1),
1,
0
},
{
{{{1, 3, -1, {1, 4}}, {{1, 3, 4, 4}, {1, 3, 1, 3}, {1, 3, 4, 4}}}, {{-1, {1, 4}}, {{4, 4}, {1, 3}, {4, 4}}}},
ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::PDPD, 2), ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::PDPD, 2),
1, 1,
0 0
@ -35,13 +48,26 @@ const std::vector<CheckBroadcastTestCaseParams> test_cases = {
// broadcast is not neccessary // broadcast is not neccessary
{ {
{{1, 3, 4, 4}, {1, 3, 4, 4}}, {{{}, {{1, 3, 4, 4}}}, {{}, {{1, 3, 4, 4}}}},
ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::PDPD, -1), ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::PDPD, -1),
1, 1,
1 1
}, },
{ {
{{1, 3, 4, 4}, {1, 3, 4, 4}}, {{{}, {{1, 3, 4, 4}}}, {{}, {{1, 3, 4, 4}}}},
ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::PDPD, 0),
1,
1
},
// DS
{
{{{1, 3, -1, {1, 4}}, {{1, 3, 4, 4}, {1, 3, 1, 3}, {1, 3, 4, 4}}}, {{-1, 3, {1, 4}, {1, 4}}, {{1, 3, 4, 4}, {1, 3, 1, 3}, {1, 3, 4, 4}}}},
ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::PDPD, -1),
1,
1
},
{
{{{1, 3, -1, {1, 4}}, {{1, 3, 4, 4}, {1, 3, 1, 3}, {1, 3, 4, 4}}}, {{-1, 3, {1, 4}, {1, 4}}, {{1, 3, 4, 4}, {1, 3, 1, 3}, {1, 3, 4, 4}}}},
ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::PDPD, 0), ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::PDPD, 0),
1, 1,
1 1
@ -49,19 +75,38 @@ const std::vector<CheckBroadcastTestCaseParams> test_cases = {
// any other PDPD // any other PDPD
{ {
{{1, 3, 4, 4}, {4, 4}}, {{{}, {{1, 3, 4, 4}}}, {{}, {{4, 4}}}},
ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::NUMPY, -1), ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::NUMPY, -1),
1, 1,
1 1
}, },
{ {
{{1, 3, 4, 4}, {4, 4}}, {{{}, {{1, 3, 4, 4}}}, {{}, {{4, 4}}}},
ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::NUMPY, 0), ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::NUMPY, 0),
1, 1,
1 1
}, },
{ {
{{1, 3, 4, 4}, {4, 4}}, {{{}, {{1, 3, 4, 4}}}, {{}, {{4, 4}}}},
ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::NUMPY, 2),
1,
1
},
// DS
{
{{{1, 3, -1, {1, 4}}, {{1, 3, 4, 4}, {1, 3, 1, 3}, {1, 3, 4, 4}}}, {{-1, {1, 4}}, {{4, 4}, {1, 3}, { 4, 4}}}},
ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::NUMPY, -1),
1,
1
},
{
{{{1, 3, -1, {1, 4}}, {{1, 3, 4, 4}, {1, 3, 1, 3}, {1, 3, 4, 4}}}, {{-1, {1, 4}}, {{4, 4}, {1, 3}, {4, 4}}}},
ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::NUMPY, 0),
1,
1
},
{
{{{1, 3, -1, {1, 4}}, {{1, 3, 4, 4}, {1, 3, 1, 3}, {1, 3, 4, 4}}}, {{-1, {1, 4}}, {{4, 4}, {1, 3}, {4, 4}}}},
ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::NUMPY, 2), ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::NUMPY, 2),
1, 1,
1 1

View File

@ -13,14 +13,41 @@ namespace ov {
namespace test { namespace test {
namespace snippets { namespace snippets {
namespace { namespace {
std::vector<InputShape> inShapes0{
{{}, {{1, 38, 130}}},
{{}, {{1, 1, 130}}},
};
std::vector<InputShape> inShapes1{
{{}, {{1, 38, 130}}},
{{}, {{1, 38, 1}}},
};
INSTANTIATE_TEST_SUITE_P(NoReshape, CodegenGelu, INSTANTIATE_TEST_SUITE_P(NoReshapeAndReshape, CodegenGelu,
::testing::Combine( ::testing::Combine(
::testing::Values(ov::element::f32), ::testing::Values(ov::element::f32),
::testing::Values(ov::Shape {1, 384, 4096}), ::testing::ValuesIn(inShapes0),
::testing::ValuesIn(inShapes1),
::testing::Values(true, false), ::testing::Values(true, false),
::testing::Values(ov::test::utils::DEVICE_CPU)), ::testing::Values(ov::test::utils::DEVICE_CPU)),
CodegenGelu::getTestCaseName); CodegenGelu::getTestCaseName);
// DS
std::vector<InputShape> inShapesDynamic0{
{{-1, -1, -1}, {{1, 12, 128}, {1, 12, 1}, {1, 12, 128}}},
};
std::vector<InputShape> inShapesDynamic1{
{{-1, -1, -1}, {{1, 12, 128}, {1, 1, 128}, {1, 12, 128}}},
};
INSTANTIATE_TEST_SUITE_P(NoReshapeAndReshapeDynamic, CodegenGelu,
::testing::Combine(
::testing::Values(ov::element::f32),
::testing::ValuesIn(inShapesDynamic0),
::testing::ValuesIn(inShapesDynamic1),
::testing::Values(true, false),
::testing::Values(ov::test::utils::DEVICE_CPU)),
CodegenGelu::getTestCaseName);
} // namespace } // namespace
} // namespace snippets } // namespace snippets
} // namespace test } // namespace test

View File

@ -30,10 +30,14 @@ const std::vector<std::pair<std::vector<ov::element::Type>, std::vector<ov::elem
{ { ov::element::u8 }, { ov::element::i8 } }, { { ov::element::u8 }, { ov::element::i8 } },
}; };
const std::vector<std::vector<ov::PartialShape>> inputShapes_Convert = { const std::vector<std::vector<ov::test::InputShape>> inputShapes_Convert = {
{ ov::PartialShape{2, 16} }, { {{}, {{2, 16}}} },
{ ov::PartialShape{5, 5} }, { {{}, {{5, 5}}} },
{ ov::PartialShape{2, 12, 1} } { {{}, {{2, 12, 1}}} },
// DS(dynamic shape)
{ {{-1, -1}, {{2, 16}, {2, 8}, {2, 16}}} },
{ {{{1, 5}, 5}, {{5, 5}, {1, 5}, {5, 5}}} },
{ {{{1, 10}, {4, 12}, {1, 2}}, {{2, 12, 1}, {4, 4, 2}, {2, 12, 1}}} }
}; };
INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Convert, Convert, INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Convert, Convert,
@ -57,10 +61,14 @@ const std::vector<std::pair<std::vector<ov::element::Type>, std::vector<ov::elem
{ { ov::element::u8 }, { ov::element::bf16 } }, { { ov::element::u8 }, { ov::element::bf16 } },
}; };
const std::vector<std::vector<ov::PartialShape>> inputShapes_ConvertInput = { const std::vector<std::vector<ov::test::InputShape>> inputShapes_ConvertInput = {
{ ov::PartialShape{2, 16}, ov::PartialShape{1, 16} }, { {{}, {{2, 16}}}, {{}, {{1, 16}}} },
{ ov::PartialShape{5, 18}, ov::PartialShape{5, 1} }, { {{}, {{5, 18}}}, {{}, {{5, 1}}} },
{ ov::PartialShape{3, 1}, ov::PartialShape{3, 21} } { {{}, {{3, 1}}}, {{}, {{3, 21}}} },
// DS
{ {{-1, -1}, {{2, 16}, {1, 16}, {2, 16}}}, {{-1, -1}, {{1, 16}, {2, 16}, {1, 16}}} },
{ {{5, -1}, {{5, 18}, {5, 1}, {5, 18}}}, {{-1, 1}, {{5, 1}, {1, 1}, {5, 1}}} },
{ {{{1, 4}, {1, 8}}, {{3, 1}, {4, 8}, {3, 1}}}, {{{1, 4}, {8, 21}}, {{3, 21}, {1, 8}, {3, 21}}} }
}; };
INSTANTIATE_TEST_SUITE_P(smoke_Snippets_ConvertInput, ConvertInput, INSTANTIATE_TEST_SUITE_P(smoke_Snippets_ConvertInput, ConvertInput,
@ -94,10 +102,12 @@ const std::vector<std::pair<std::vector<ov::element::Type>, std::vector<ov::elem
{ { ov::element::i8, ov::element::i8, ov::element::f32 }, { ov::element::f32, ov::element::i8 } }, { { ov::element::i8, ov::element::i8, ov::element::f32 }, { ov::element::f32, ov::element::i8 } },
}; };
const std::vector<std::vector<ov::PartialShape>> inputShapes_ConvertPartialInputsAndResults = { const std::vector<std::vector<ov::test::InputShape>> inputShapes_ConvertPartialInputsAndResults = {
{ ov::PartialShape{2, 16}, ov::PartialShape{1, 16}, ov::PartialShape{1, 1} }, { {{}, {{2, 16}}}, {{}, {{1, 16}}}, {{}, {{1, 1}}} },
{ ov::PartialShape{5, 18}, ov::PartialShape{5, 1}, ov::PartialShape{1, 18} }, { {{}, {{5, 18}}}, {{}, {{5, 1}}}, {{}, {{1, 18}}} },
{ ov::PartialShape{3, 1}, ov::PartialShape{3, 21}, ov::PartialShape{3, 1} } { {{}, {{3, 1}}}, {{}, {{3, 21}}}, {{}, {{3, 1}}} },
// DS
{ {{-1, -1}, {{3, 1}, {2, 4}, {3, 1}}}, {{{1, 3}, -1}, {{3, 21}, {2, 1}, {3, 21}}}, {{{1, 3}, {1, 2}}, {{3, 1}, {1, 1}, {3, 1}}} },
}; };
INSTANTIATE_TEST_SUITE_P(smoke_Snippets_ConvertPartialInputsAndResults, ConvertPartialInputsAndResults, INSTANTIATE_TEST_SUITE_P(smoke_Snippets_ConvertPartialInputsAndResults, ConvertPartialInputsAndResults,
@ -115,9 +125,15 @@ const std::vector<std::pair<std::vector<ov::element::Type>, std::vector<ov::elem
{ { ov::element::f32, ov::element::f32, ov::element::i8, ov::element::i8 }, {} }, { { ov::element::f32, ov::element::f32, ov::element::i8, ov::element::i8 }, {} },
}; };
const std::vector<std::vector<ov::test::InputShape>> inputShapes_ConvertManyOnInputs = {
{ {{}, {{5, 5, 5, 5}}} },
// DS
{ {{-1, -1, -1, -1}, {{5, 5, 5, 5}, {3, 3, 3, 3}, {5, 5, 5, 5}}} }
};
INSTANTIATE_TEST_SUITE_P(smoke_Snippets_ConvertManyOnInputs, ConvertManyOnInputs, INSTANTIATE_TEST_SUITE_P(smoke_Snippets_ConvertManyOnInputs, ConvertManyOnInputs,
::testing::Combine( ::testing::Combine(
::testing::Values(std::vector<ov::PartialShape>{{5, 5, 5, 5}}), ::testing::ValuesIn(inputShapes_ConvertManyOnInputs),
::testing::ValuesIn(types_ConvertMany), ::testing::ValuesIn(types_ConvertMany),
::testing::Values(1), ::testing::Values(1),
::testing::Values(1), ::testing::Values(1),
@ -126,7 +142,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Snippets_ConvertManyOnInputs, ConvertManyOnInputs
INSTANTIATE_TEST_SUITE_P(smoke_Snippets_ConvertManyOnOutputs, ConvertManyOnOutputs, INSTANTIATE_TEST_SUITE_P(smoke_Snippets_ConvertManyOnOutputs, ConvertManyOnOutputs,
::testing::Combine( ::testing::Combine(
::testing::Values(std::vector<ov::PartialShape>{{5, 5, 5, 5}}), ::testing::ValuesIn(inputShapes_ConvertManyOnInputs),
::testing::ValuesIn(types_ConvertMany), ::testing::ValuesIn(types_ConvertMany),
::testing::Values(1), ::testing::Values(1),
::testing::Values(1), ::testing::Values(1),
@ -140,7 +156,7 @@ const std::vector<std::pair<std::vector<ov::element::Type>, std::vector<ov::elem
INSTANTIATE_TEST_SUITE_P(smoke_Snippets_ConvertManyOnInputOutput, ConvertManyOnInputOutput, INSTANTIATE_TEST_SUITE_P(smoke_Snippets_ConvertManyOnInputOutput, ConvertManyOnInputOutput,
::testing::Combine( ::testing::Combine(
::testing::Values(std::vector<ov::PartialShape>{{5, 5, 5, 5}}), ::testing::ValuesIn(inputShapes_ConvertManyOnInputs),
::testing::ValuesIn(types_ConvertManyIO), ::testing::ValuesIn(types_ConvertManyIO),
::testing::Values(1), ::testing::Values(1),
::testing::Values(1), ::testing::Values(1),

View File

@ -10,10 +10,19 @@ namespace test {
namespace snippets { namespace snippets {
namespace { namespace {
INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise, EltwiseTwoResults, INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise_TwoResults, EltwiseTwoResults,
::testing::Combine( ::testing::Combine(
::testing::Values(ov::Shape {1, 64, 10, 10}), ::testing::Values(InputShape {{}, {{1, 64, 10, 10}}}),
::testing::Values(ov::Shape {1, 64, 10, 1}), ::testing::Values(InputShape {{}, {{1, 64, 10, 1}}}),
::testing::Values(2),
::testing::Values(2),
::testing::Values(ov::test::utils::DEVICE_CPU)),
EltwiseTwoResults::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise_TwoResults_Dynamic, EltwiseTwoResults,
::testing::Combine(
::testing::Values(InputShape {{-1, -1, -1, -1}, {{1, 64, 10, 10}, {2, 8, 2, 1}, {1, 64, 10, 10}}}),
::testing::Values(InputShape {{{1, 2}, {1, 64}, {1, 10}, 1}, {{1, 64, 10, 1}, {2, 1, 1, 1}, {1, 64, 10, 1}}}),
::testing::Values(2), ::testing::Values(2),
::testing::Values(2), ::testing::Values(2),
::testing::Values(ov::test::utils::DEVICE_CPU)), ::testing::Values(ov::test::utils::DEVICE_CPU)),

View File

@ -10,8 +10,19 @@ namespace test {
namespace snippets { namespace snippets {
namespace { namespace {
// Note that we need these shapes to cover all cases of code emission (none/one/multiple of scalar/vector tiles) // Note that we need these shapes to cover all cases of code emission (none/one/multiple of scalar/vector tiles)
std::vector<ov::Shape> input_shapes {{1, 64, 10, 10}, {1, 1, 17, 37}, {1, 1, 1, 1}, {1, 1, 1, 7}, std::vector<InputShape> input_shapes {{{}, {{1, 64, 10, 10}}},
{1, 1, 1, 128}, {1, 1, 1, 14}, {1, 1, 1, 16}, {1, 1, 1, 30}}; {{}, {{1, 1, 17, 37}}},
{{}, {{1, 1, 1, 1}}},
{{}, {{1, 1, 1, 7}}},
{{}, {{1, 1, 1, 128}}},
{{}, {{1, 1, 1, 14}}},
{{}, {{1, 1, 1, 16}}},
{{}, {{1, 1, 1, 30}}},
// DS
{{-1, -1, -1, -1}, {{1, 64, 10, 10}, {1, 1, 17, 37}, {1, 64, 10, 10}}},
{{1, {1, 64}, {10, 20}, -1}, {{1, 64, 10, 10}, {1, 1, 17, 37}, {1, 64, 10, 10}}},
{{1, 1, 1, {1, 128}}, {{1, 1, 1, 1}, {1, 1, 1, 7}, {1, 1, 1, 128}, {1, 1, 1, 14}, {1, 1, 1, 16}, {1, 1, 1, 1}}}};
INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise, MaxNumParamsEltwise, INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise, MaxNumParamsEltwise,
::testing::Combine( ::testing::Combine(
::testing::ValuesIn(input_shapes), ::testing::ValuesIn(input_shapes),

View File

@ -13,8 +13,11 @@ namespace snippets {
namespace { namespace {
const std::vector<std::vector<ov::PartialShape>> input_shapes = { const std::vector<std::vector<ov::test::InputShape>> input_shapes = {
{{ 1, 3, 16, 16 }, { 1, 1, 1, 16 }}, { {{}, {{1, 3, 16, 16}}}, {{}, {{1, 1, 1, 16}}} },
// DS
{ {{-1, -1, -1, -1}, {{1, 3, 16, 16}, {1, 1, 1, 16}, {1, 3, 16, 16}}}, {{-1, -1, -1, -1}, {{1, 3, 16, 16}, {1, 1, 1, 16}, {1, 3, 16, 16}}} },
{ {{1, 16, -1, {1, 16}}, {{1, 16, 32, 1}, {1, 16, 1, 16}, {1, 16, 32, 1}}}, {{1, 1, -1, {1, 20}}, {{1, 1, 1, 16}, {1, 1, 8, 16}, {1, 1, 1, 16}}} }
}; };
const std::vector<std::vector<float>> fake_quantize_intervals = { const std::vector<std::vector<float>> fake_quantize_intervals = {

View File

@ -12,29 +12,69 @@ namespace snippets {
namespace { namespace {
//============================Select=======================================//
std::vector<ov::test::InputShape> inShapes_a{{{}, {{1, 5, 5, 35}}}};
std::vector<ov::test::InputShape> inShapes_b{{{}, {{1}}}};
INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Select, Select, INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Select, Select,
::testing::Combine( ::testing::Combine(
::testing::ValuesIn({ov::Shape{1, 5, 5, 35}, }), ::testing::ValuesIn(inShapes_a),
::testing::ValuesIn({ov::Shape{1, 5, 5, 35}, }), ::testing::ValuesIn(inShapes_a),
::testing::ValuesIn({ov::Shape{1}}), ::testing::ValuesIn(inShapes_b),
::testing::ValuesIn({ov::element::f32, ov::element::i8}), ::testing::ValuesIn({ov::element::f32, ov::element::i8}),
::testing::Values(1), ::testing::Values(1),
::testing::Values(1), ::testing::Values(1),
::testing::Values(ov::test::utils::DEVICE_CPU)), ::testing::Values(ov::test::utils::DEVICE_CPU)),
Select::getTestCaseName); Select::getTestCaseName);
// DS
std::vector<ov::test::InputShape> inShapesDynamic_a{{{1, {1, 5}, -1, 35}, {{1, 5, 5, 35}, {1, 1, 1, 35}, {1, 5, 5, 35}}}};
std::vector<ov::test::InputShape> inShapesDynamic_b{{{-1}, {{1}, {1}, {1}}}};
INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Select_Dynamic, Select,
::testing::Combine(
::testing::ValuesIn(inShapesDynamic_a),
::testing::ValuesIn(inShapesDynamic_a),
::testing::ValuesIn(inShapesDynamic_b),
::testing::ValuesIn({ov::element::f32, ov::element::i8}),
::testing::Values(1),
::testing::Values(1),
::testing::Values(ov::test::utils::DEVICE_CPU)),
Select::getTestCaseName);
//============================BroadcastSelect=======================================//
std::vector<ov::test::InputShape> inShapes0{{{}, {{1, 8, 2, 1}}}, {{}, {{1, 1, 1, 1}}}};
std::vector<ov::test::InputShape> inShapes1{{{}, {{1, 8, 2, 10}}}, {{}, {{1, 8, 2, 1}}}};
std::vector<ov::test::InputShape> inShapes2{{{}, {{1, 8, 2, 10}}}, {{}, {{1, 1, 1, 1}}}};
std::vector<ov::PartialShape> inShapes3{{1, 8, 2, 1}, {1, 8, 2, 10}};
INSTANTIATE_TEST_SUITE_P(smoke_Snippets_BroadcastSelect, BroadcastSelect, INSTANTIATE_TEST_SUITE_P(smoke_Snippets_BroadcastSelect, BroadcastSelect,
::testing::Combine( ::testing::Combine(
::testing::ValuesIn({Shape{1, 8, 2, 1}, Shape{1, 1, 1, 1}}), ::testing::ValuesIn(inShapes0),
::testing::ValuesIn({Shape{1, 8, 2, 10}, Shape{1, 8, 2, 1}}), ::testing::ValuesIn(inShapes1),
::testing::ValuesIn({Shape{1, 8, 2, 10}, Shape{1, 1, 1, 1}}), ::testing::ValuesIn(inShapes2),
::testing::ValuesIn({Shape{1, 8, 2, 1}, Shape{1, 8, 2, 10}}), ::testing::ValuesIn(inShapes3),
::testing::ValuesIn({ov::element::f32, ov::element::i8}), ::testing::ValuesIn({ov::element::f32, ov::element::i8}),
::testing::Values(1), ::testing::Values(1),
::testing::Values(1), ::testing::Values(1),
::testing::Values(ov::test::utils::DEVICE_CPU)), ::testing::Values(ov::test::utils::DEVICE_CPU)),
BroadcastSelect::getTestCaseName); BroadcastSelect::getTestCaseName);
// DS
std::vector<ov::test::InputShape> inShapes0_d{{{-1, -1, -1, -1}, {{1, 8, 2, 1}, {1, 1, 1, 1}, {1, 8, 2, 1}}}};
std::vector<ov::test::InputShape> inShapes1_d{{{1, -1, -1, -1}, {{1, 8, 2, 10}, {1, 8, 2, 10}, {1, 8, 2, 10}}}};
std::vector<ov::test::InputShape> inShapes2_d{{{1, {1, 8}, {1, 2}, {1, 10}}, {{1, 8, 2, 10}, {1, 1, 2, 1}, {1, 8, 2, 10}}}};
std::vector<ov::PartialShape> inShapes3_d{{1, 8, 2, 1}, {1, 8, 2, 10}};
INSTANTIATE_TEST_SUITE_P(smoke_Snippets_BroadcastSelect_Dynamic, BroadcastSelect,
::testing::Combine(
::testing::ValuesIn(inShapes0_d),
::testing::ValuesIn(inShapes1_d),
::testing::ValuesIn(inShapes2_d),
::testing::ValuesIn(inShapes3_d),
::testing::ValuesIn({ov::element::f32, ov::element::i8}),
::testing::Values(1),
::testing::Values(1),
::testing::Values(ov::test::utils::DEVICE_CPU)),
BroadcastSelect::getTestCaseName);
} // namespace } // namespace
} // namespace snippets } // namespace snippets

View File

@ -12,31 +12,34 @@ namespace snippets {
namespace { namespace {
const std::vector<ov::Shape> inputShape = { const std::vector<InputShape> inputShape = {
ov::Shape{1, 16}, {{}, {{1, 16}}},
ov::Shape{1, 32}, {{}, {{1, 32}}},
ov::Shape{1, 1}, {{}, {{1, 1}}},
ov::Shape{1, 9}, {{}, {{1, 9}}},
ov::Shape{1, 17}, {{}, {{1, 17}}},
ov::Shape{1, 19}, {{}, {{1, 19}}},
ov::Shape{1, 49}, {{}, {{1, 49}}},
ov::Shape{1, 50}, {{}, {{1, 50}}},
ov::Shape{5, 16}, {{}, {{5, 16}}},
ov::Shape{5, 32}, {{}, {{5, 32}}},
ov::Shape{5, 1}, {{}, {{5, 1}}},
ov::Shape{5, 9}, {{}, {{5, 9}}},
ov::Shape{5, 17}, {{}, {{5, 17}}},
ov::Shape{5, 19}, {{}, {{5, 19}}},
ov::Shape{5, 49}, {{}, {{5, 49}}},
ov::Shape{5, 50}, {{}, {{5, 50}}},
ov::Shape{1, 3, 128, 128}, {{}, {{1, 3, 128, 128}}},
ov::Shape{1, 3, 128, 129}, {{}, {{1, 3, 128, 129}}},
ov::Shape{1, 3, 128, 130}, {{}, {{1, 3, 128, 130}}},
ov::Shape{1, 3, 128, 1}, {{}, {{1, 3, 128, 1}}},
ov::Shape{1, 3, 128, 9}, {{}, {{1, 3, 128, 9}}},
ov::Shape{1, 3, 128, 16}, {{}, {{1, 3, 128, 16}}},
ov::Shape{1, 3, 128, 17}, {{}, {{1, 3, 128, 17}}},
ov::Shape{1, 3, 128, 20}, {{}, {{1, 3, 128, 20}}},
// DS
{{-1, -1}, {{1, 16}, {1, 32}, {1, 1}, {1, 9}, {1, 17}, {1, 19}, {1, 49}, {1, 50}, {5, 16}, {1, 16}, {1, 9}}},
{{-1, -1, -1, -1}, {{1, 3, 128, 128}, {1, 3, 128, 129}, {1, 3, 128, 130}, {1, 3, 128, 1}, {1, 3, 128, 16}, {1, 3, 128, 1}}}
}; };
INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Softmax, Softmax, INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Softmax, Softmax,
@ -48,13 +51,16 @@ INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Softmax, Softmax,
::testing::Values(ov::test::utils::DEVICE_CPU)), ::testing::Values(ov::test::utils::DEVICE_CPU)),
Softmax::getTestCaseName); Softmax::getTestCaseName);
const std::vector<std::pair<ov::Shape, ov::Shape>> inputShapesPair = { const std::vector<std::pair<InputShape, InputShape>> inputShapesPair = {
std::pair<ov::Shape, ov::Shape>{ov::Shape{1, 5, 16, 35}, ov::Shape{1, 5, 16, 35}}, {{{}, {{1, 5, 16, 35}}}, {{}, {{1, 5, 16, 35}}}},
std::pair<ov::Shape, ov::Shape>{ov::Shape{1, 5, 16, 1}, ov::Shape{1, 5, 16, 35}}, {{{}, {{1, 5, 16, 1}}}, {{}, {{1, 5, 16, 35}}}},
std::pair<ov::Shape, ov::Shape>{ov::Shape{1, 5, 16, 35}, ov::Shape{1, 5, 1, 1}}, {{{}, {{1, 5, 16, 35}}}, {{}, {{1, 5, 1, 1}}}},
std::pair<ov::Shape, ov::Shape>{ov::Shape{1, 5, 16, 1}, ov::Shape{1, 5, 16, 1}}, {{{}, {{1, 5, 16, 1}}}, {{}, {{1, 5, 16, 1}}}},
std::pair<ov::Shape, ov::Shape>{ov::Shape{1, 5, 16, 35}, ov::Shape{1, 5, 1, 35}}, {{{}, {{1, 5, 16, 35}}}, {{}, {{1, 5, 1, 35}}}},
std::pair<ov::Shape, ov::Shape>{ov::Shape{1, 5, 1, 35}, ov::Shape{1, 5, 1, 35}}, {{{}, {{1, 5, 1, 35}}}, {{}, {{1, 5, 1, 35}}}},
// DS
{{{-1, -1, -1, -1}, {{1, 5, 16, 35}, {1, 5, 16, 1}, {1, 5, 16, 35}}}, {{-1, -1, -1, -1}, {{1, 5, 16, 35}, {1, 5, 16, 35}, {1, 5, 16, 35}}}},
{{{-1, {1, 8}, {1, 16}, {1, 16}}, {{1, 3, 1, 8}, {1, 8, 16, 16}, {1, 3, 1, 8}}}, {{-1, {1, 8}, -1, {1, 8}}, {{1, 3, 2, 8}, {2, 1, 1, 1}, {1, 3, 2, 8}}}}
}; };
INSTANTIATE_TEST_SUITE_P(smoke_Snippets_AddSoftmax, AddSoftmax, INSTANTIATE_TEST_SUITE_P(smoke_Snippets_AddSoftmax, AddSoftmax,

View File

@ -12,14 +12,24 @@ namespace {
INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise, ThreeInputsEltwise, INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise, ThreeInputsEltwise,
::testing::Combine( ::testing::Combine(
::testing::Values(ov::Shape {1, 64, 10, 10}), ::testing::Values(InputShape {{}, {{1, 64, 10, 10}}}),
::testing::Values(ov::Shape {1, 64, 10, 1}), ::testing::Values(InputShape {{}, {{1, 64, 10, 1}}}),
::testing::Values(ov::Shape {1, 1, 1, 10}), ::testing::Values(InputShape {{}, {{1, 1, 1, 10}}}),
::testing::Values(1), // eltwises fuse only for non-broadcasted shapes ::testing::Values(1), // eltwises fuse only for non-broadcasted shapes
::testing::Values(1), ::testing::Values(1),
::testing::Values(ov::test::utils::DEVICE_CPU)), ::testing::Values(ov::test::utils::DEVICE_CPU)),
ThreeInputsEltwise::getTestCaseName); ThreeInputsEltwise::getTestCaseName);
// DS
INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise_Dynamic, ThreeInputsEltwise,
::testing::Combine(
::testing::Values(InputShape {{-1, -1, -1, -1}, {{1, 64, 10, 10}, {2, 3, 1, 8}, {1, 64, 10, 10}}}),
::testing::Values(InputShape {{1, -1, {1, 10}, 1}, {{1, 64, 10, 1}, {1, 3, 2, 1}, {1, 64, 10, 1}}}),
::testing::Values(InputShape {{1, 1, 1, {1, 10}}, {{1, 1, 1, 10}, {1, 1, 1, 8}, {1, 1, 1, 10}}}),
::testing::Values(1), // eltwises fuse only for non-broadcasted shapes
::testing::Values(1),
::testing::Values(ov::test::utils::DEVICE_CPU)),
ThreeInputsEltwise::getTestCaseName);
} // namespace } // namespace
} // namespace snippets } // namespace snippets
} // namespace test } // namespace test

View File

@ -10,24 +10,32 @@ namespace test {
namespace snippets { namespace snippets {
namespace { namespace {
const std::vector<std::vector<ov::PartialShape>> input_shapes = { const std::vector<std::vector<InputShape>> input_shapes = {
{ {5, 5, 256, 1}, {5, 5, 256, 1} }, { {{}, {{5, 5, 256, 1}}}, {{}, {{5, 5, 256, 1}}} },
{ {5, 5, 16, 35}, {5, 5, 16, 35} }, { {{}, {{5, 5, 16, 35}}}, {{}, {{5, 5, 16, 35}}} },
{ {5, 5, 256, 1}, {5, 5, 256, 35} }, { {{}, {{5, 5, 256, 1}}}, {{}, {{5, 5, 256, 35}}} },
{ {5, 5, 256, 1}, {5, 5, 1, 1} }, { {{}, {{5, 5, 256, 1}}}, {{}, {{5, 5, 1, 1}}} },
{ {5, 5, 16, 35}, {5, 5, 1, 1} }, { {{}, {{5, 5, 16, 35}}}, {{}, {{5, 5, 1, 1}}} },
{ {5, 5, 16, 35}, {5, 5, 16, 1} }, { {{}, {{5, 5, 16, 35}}}, {{}, {{5, 5, 16, 1}}} },
{ {5, 5, 5, 35}, {5, 5, 1, 35} }, { {{}, {{5, 5, 5, 35}}}, {{}, {{5, 5, 1, 35}}} },
{ {5, 5, 16, 1}, {5, 5, 1, 35} }, { {{}, {{5, 5, 16, 1}}}, {{}, {{5, 5, 1, 35}}} },
{ {5, 5, 35, 16}, {5, 5, 35, 16} }, { {{}, {{5, 5, 35, 16}}}, {{}, {{5, 5, 35, 16}}} },
{ {5, 5, 35, 16}, {5, 5, 1, 16} }, { {{}, {{5, 5, 35, 16}}}, {{}, {{5, 5, 1, 16}}} },
{ {5, 5, 35, 17}, {5, 5, 35, 17} }, { {{}, {{5, 5, 35, 17}}}, {{}, {{5, 5, 35, 17}}} },
{ {5, 5, 35, 17}, {5, 5, 1, 17} }, { {{}, {{5, 5, 35, 17}}}, {{}, {{5, 5, 1, 17}}} },
{ {5, 5, 35, 18}, {5, 5, 35, 18} }, { {{}, {{5, 5, 35, 18}}}, {{}, {{5, 5, 35, 18}}} },
{ {5, 5, 35, 18}, {5, 5, 1, 18} }, { {{}, {{5, 5, 35, 18}}}, {{}, {{5, 5, 1, 18}}} },
// DS
{ {{-1, -1, -1, -1}, {{5, 5, 256, 1}, {3, 3, 8, 15}, {5, 5, 256, 1}}},
{{-1, -1, -1, -1}, {{5, 5, 256, 1}, {1, 1, 8, 1}, {5, 5, 256, 1}}} },
{ {{{1, 5}, {2, 6}, {3, 7}, {4, 8}}, {{1, 2, 3, 4}, {5, 6, 7, 8}, {1, 2, 3, 4}}},
{{1, 1, 1, -1}, {{1, 1, 1, 4}, {1, 1, 1, 8}, {1, 1, 1, 4}}} },
{ {{1, -1, {1, 10}, {4, 12}}, {{1, 5, 3, 4}, {1, 10, 8, 12}, {1, 5, 3, 4}}},
{{1, 1, -1, {1, 12}}, {{1, 1, 3, 1}, {1, 1, 8, 12}, {1, 1, 3, 1}}} }
}; };
INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise, TwoInputsAndOutputs, INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise, TwoInputsAndOutputs,

View File

@ -7,6 +7,7 @@
#include "openvino/core/type/element_type.hpp" #include "openvino/core/type/element_type.hpp"
#include "openvino/runtime/properties.hpp" #include "openvino/runtime/properties.hpp"
#include "test_utils/cpu_test_utils.hpp" #include "test_utils/cpu_test_utils.hpp"
#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp"
using namespace InferenceEngine; using namespace InferenceEngine;
using namespace CPUTestUtils; using namespace CPUTestUtils;
@ -19,13 +20,15 @@ std::string EltwiseLayerCPUTest::getTestCaseName(testing::TestParamInfo<EltwiseL
subgraph::EltwiseTestParams basicParamsSet; subgraph::EltwiseTestParams basicParamsSet;
CPUSpecificParams cpuParams; CPUSpecificParams cpuParams;
fusingSpecificParams fusingParams; fusingSpecificParams fusingParams;
std::tie(basicParamsSet, cpuParams, fusingParams) = obj.param; bool enforceSnippets;
std::tie(basicParamsSet, cpuParams, fusingParams, enforceSnippets) = obj.param;
std::ostringstream result; std::ostringstream result;
result << subgraph::EltwiseLayerTest::getTestCaseName(testing::TestParamInfo<subgraph::EltwiseTestParams>( result << subgraph::EltwiseLayerTest::getTestCaseName(testing::TestParamInfo<subgraph::EltwiseTestParams>(
basicParamsSet, 0)); basicParamsSet, 0));
result << CPUTestsBase::getTestCaseName(cpuParams); result << CPUTestsBase::getTestCaseName(cpuParams);
result << CpuTestWithFusing::getTestCaseName(fusingParams); result << CpuTestWithFusing::getTestCaseName(fusingParams);
result << "_enforceSnippets=" << enforceSnippets;
return result.str(); return result.str();
} }
@ -78,7 +81,8 @@ void EltwiseLayerCPUTest::SetUp() {
subgraph::EltwiseTestParams basicParamsSet; subgraph::EltwiseTestParams basicParamsSet;
CPUSpecificParams cpuParams; CPUSpecificParams cpuParams;
fusingSpecificParams fusingParams; fusingSpecificParams fusingParams;
std::tie(basicParamsSet, cpuParams, fusingParams) = this->GetParam(); bool enforceSnippets;
std::tie(basicParamsSet, cpuParams, fusingParams, enforceSnippets) = this->GetParam();
std::vector<InputShape> shapes; std::vector<InputShape> shapes;
ElementType netType; ElementType netType;
ngraph::helpers::InputLayerType secondaryInputType; ngraph::helpers::InputLayerType secondaryInputType;
@ -126,6 +130,13 @@ void EltwiseLayerCPUTest::SetUp() {
} }
#endif #endif
if (enforceSnippets) {
configuration.insert({InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE,
InferenceEngine::PluginConfigInternalParams::IGNORE_CALLBACK});
} else {
configuration.insert({InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE,
InferenceEngine::PluginConfigInternalParams::DISABLE});
}
ov::ParameterVector parameters{std::make_shared<ov::op::v0::Parameter>(netType, inputDynamicShapes.front())}; ov::ParameterVector parameters{std::make_shared<ov::op::v0::Parameter>(netType, inputDynamicShapes.front())};
std::shared_ptr<ngraph::Node> secondaryInput; std::shared_ptr<ngraph::Node> secondaryInput;
if (secondaryInputType == ngraph::helpers::InputLayerType::PARAMETER) { if (secondaryInputType == ngraph::helpers::InputLayerType::PARAMETER) {
@ -426,5 +437,10 @@ const std::vector<CPUSpecificParams>& cpuParams_5D_1D_constant() {
return cpuParams_5D_1D_constant; return cpuParams_5D_1D_constant;
} }
const std::vector<bool>& enforceSnippets() {
static const std::vector<bool> enforceSnippets = { false, true };
return enforceSnippets;
}
} // namespace Eltwise } // namespace Eltwise
} // namespace CPULayerTestsDefinitions } // namespace CPULayerTestsDefinitions

View File

@ -20,7 +20,8 @@ namespace CPULayerTestsDefinitions {
typedef std::tuple< typedef std::tuple<
subgraph::EltwiseTestParams, subgraph::EltwiseTestParams,
CPUSpecificParams, CPUSpecificParams,
fusingSpecificParams> EltwiseLayerCPUTestParamsSet; fusingSpecificParams,
bool> EltwiseLayerCPUTestParamsSet;
class EltwiseLayerCPUTest : public testing::WithParamInterface<EltwiseLayerCPUTestParamsSet>, class EltwiseLayerCPUTest : public testing::WithParamInterface<EltwiseLayerCPUTestParamsSet>,
virtual public SubgraphBaseTest, public CPUTestUtils::CpuTestWithFusing { virtual public SubgraphBaseTest, public CPUTestUtils::CpuTestWithFusing {
@ -67,5 +68,7 @@ const std::vector<CPUSpecificParams>& cpuParams_5D_1D_parameter();
const std::vector<ngraph::helpers::EltwiseTypes>& eltwiseOpTypesI32(); const std::vector<ngraph::helpers::EltwiseTypes>& eltwiseOpTypesI32();
const std::vector<bool>& enforceSnippets();
} // namespace Eltwise } // namespace Eltwise
} // namespace CPULayerTestsDefinitions } // namespace CPULayerTestsDefinitions

View File

@ -27,7 +27,8 @@ const auto params_4D = ::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(additional_config())), ::testing::ValuesIn(additional_config())),
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D())), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D())),
::testing::Values(emptyFusingSpec)); ::testing::Values(emptyFusingSpec),
::testing::Values(false));
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_MemOrder, EltwiseLayerCPUTest, params_4D, EltwiseLayerCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_MemOrder, EltwiseLayerCPUTest, params_4D, EltwiseLayerCPUTest::getTestCaseName);
@ -43,7 +44,8 @@ const auto params_4D_emptyCPUSpec = ::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(additional_config())), ::testing::ValuesIn(additional_config())),
::testing::Values(emptyCPUSpec), ::testing::Values(emptyCPUSpec),
::testing::Values(emptyFusingSpec)); ::testing::Values(emptyFusingSpec),
::testing::Values(false));
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_emptyCPUSpec, EltwiseLayerCPUTest, params_4D_emptyCPUSpec, EltwiseLayerCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_emptyCPUSpec, EltwiseLayerCPUTest, params_4D_emptyCPUSpec, EltwiseLayerCPUTest::getTestCaseName);
@ -59,7 +61,8 @@ const auto params_5D = ::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(additional_config())), ::testing::ValuesIn(additional_config())),
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D())), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D())),
::testing::Values(emptyFusingSpec)); ::testing::Values(emptyFusingSpec),
::testing::Values(false));
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_MemOrder, EltwiseLayerCPUTest, params_5D, EltwiseLayerCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_MemOrder, EltwiseLayerCPUTest, params_5D, EltwiseLayerCPUTest::getTestCaseName);
@ -75,7 +78,8 @@ const auto params_5D_emptyCPUSpec = ::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(additional_config())), ::testing::ValuesIn(additional_config())),
::testing::Values(emptyCPUSpec), ::testing::Values(emptyCPUSpec),
::testing::Values(emptyFusingSpec)); ::testing::Values(emptyFusingSpec),
::testing::Values(false));
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D, EltwiseLayerCPUTest, params_5D_emptyCPUSpec, EltwiseLayerCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D, EltwiseLayerCPUTest, params_5D_emptyCPUSpec, EltwiseLayerCPUTest::getTestCaseName);
@ -91,7 +95,8 @@ const auto params_4D_1D_constant_mode = ::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(additional_config())), ::testing::ValuesIn(additional_config())),
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_1D_Constant_mode())), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_1D_Constant_mode())),
::testing::Values(emptyFusingSpec)); ::testing::Values(emptyFusingSpec),
::testing::Values(false));
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_1D_Constant, EltwiseLayerCPUTest, params_4D_1D_constant_mode, EltwiseLayerCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_1D_Constant, EltwiseLayerCPUTest, params_4D_1D_constant_mode, EltwiseLayerCPUTest::getTestCaseName);
@ -107,7 +112,8 @@ const auto params_4D_1D_parameter_mode = ::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(additional_config())), ::testing::ValuesIn(additional_config())),
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_1D_Parameter_mode())), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_1D_Parameter_mode())),
::testing::Values(emptyFusingSpec)); ::testing::Values(emptyFusingSpec),
::testing::Values(false));
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_1D_Parameter, EltwiseLayerCPUTest, params_4D_1D_parameter_mode, EltwiseLayerCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_1D_Parameter, EltwiseLayerCPUTest, params_4D_1D_parameter_mode, EltwiseLayerCPUTest::getTestCaseName);
@ -123,7 +129,8 @@ const auto params_5D_1D_constant = ::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(additional_config())), ::testing::ValuesIn(additional_config())),
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_1D_constant())), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_1D_constant())),
::testing::Values(emptyFusingSpec)); ::testing::Values(emptyFusingSpec),
::testing::Values(false));
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_1D_Constant, EltwiseLayerCPUTest, params_5D_1D_constant, EltwiseLayerCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_1D_Constant, EltwiseLayerCPUTest, params_5D_1D_constant, EltwiseLayerCPUTest::getTestCaseName);
@ -139,7 +146,8 @@ const auto params_5D_1D_parameter = ::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(additional_config())), ::testing::ValuesIn(additional_config())),
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_1D_parameter())), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_1D_parameter())),
::testing::Values(emptyFusingSpec)); ::testing::Values(emptyFusingSpec),
::testing::Values(false));
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_1D_Parameter, EltwiseLayerCPUTest, params_5D_1D_parameter, EltwiseLayerCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_1D_Parameter, EltwiseLayerCPUTest, params_5D_1D_parameter, EltwiseLayerCPUTest::getTestCaseName);
@ -155,7 +163,8 @@ const auto params_4D_dyn_const = ::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(additional_config())), ::testing::ValuesIn(additional_config())),
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D())), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D())),
::testing::Values(emptyFusingSpec)); ::testing::Values(emptyFusingSpec),
::testing::Values(false));
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_MemOrder_dyn_const, EltwiseLayerCPUTest, params_4D_dyn_const, EltwiseLayerCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_MemOrder_dyn_const, EltwiseLayerCPUTest, params_4D_dyn_const, EltwiseLayerCPUTest::getTestCaseName);
@ -171,7 +180,8 @@ const auto params_4D_dyn_param = ::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(additional_config())), ::testing::ValuesIn(additional_config())),
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D())), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D())),
::testing::Values(emptyFusingSpec)); ::testing::Values(emptyFusingSpec),
::testing::Values(false));
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_MemOrder_dyn_param, EltwiseLayerCPUTest, params_4D_dyn_param, EltwiseLayerCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_MemOrder_dyn_param, EltwiseLayerCPUTest, params_4D_dyn_param, EltwiseLayerCPUTest::getTestCaseName);
@ -187,7 +197,8 @@ const auto params_5D_dyn_const = ::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(additional_config())), ::testing::ValuesIn(additional_config())),
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D())), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D())),
::testing::Values(emptyFusingSpec)); ::testing::Values(emptyFusingSpec),
::testing::Values(false));
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_MemOrder_dyn_const, EltwiseLayerCPUTest, params_5D_dyn_const, EltwiseLayerCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_MemOrder_dyn_const, EltwiseLayerCPUTest, params_5D_dyn_const, EltwiseLayerCPUTest::getTestCaseName);
@ -203,7 +214,8 @@ const auto params_5D_dyn_param = ::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(additional_config())), ::testing::ValuesIn(additional_config())),
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D())), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D())),
::testing::Values(emptyFusingSpec)); ::testing::Values(emptyFusingSpec),
::testing::Values(false));
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_MemOrder_dyn_param, EltwiseLayerCPUTest, params_5D_dyn_param, EltwiseLayerCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_MemOrder_dyn_param, EltwiseLayerCPUTest, params_5D_dyn_param, EltwiseLayerCPUTest::getTestCaseName);

View File

@ -167,7 +167,8 @@ const auto params_4D_Blocked_Blocked = ::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(additional_config())), ::testing::ValuesIn(additional_config())),
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Blocked_Blocked())), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Blocked_Blocked())),
::testing::Values(emptyFusingSpec)); ::testing::Values(emptyFusingSpec),
::testing::ValuesIn(enforceSnippets()));
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_MemOrder_Blocked_Blocked, EltwiseLayerCPUTest, params_4D_Blocked_Blocked, INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_MemOrder_Blocked_Blocked, EltwiseLayerCPUTest, params_4D_Blocked_Blocked,
EltwiseLayerCPUTest::getTestCaseName); EltwiseLayerCPUTest::getTestCaseName);
@ -184,7 +185,8 @@ const auto params_4D_fusing = ::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(additional_config())), ::testing::ValuesIn(additional_config())),
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D())), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D())),
::testing::ValuesIn(fusingParamsSet_x64)); ::testing::ValuesIn(fusingParamsSet_x64),
::testing::ValuesIn(enforceSnippets()));
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_Fusing, EltwiseLayerCPUTest, params_4D_fusing, EltwiseLayerCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_Fusing, EltwiseLayerCPUTest, params_4D_fusing, EltwiseLayerCPUTest::getTestCaseName);
@ -200,7 +202,8 @@ const auto params_4D_fusing_blocked_blocked = ::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(additional_config())), ::testing::ValuesIn(additional_config())),
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Blocked_Blocked())), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Blocked_Blocked())),
::testing::ValuesIn(fusingParamsSet_x64)); ::testing::ValuesIn(fusingParamsSet_x64),
::testing::ValuesIn(enforceSnippets()));
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_Fusing_Blocked_Blocked, EltwiseLayerCPUTest, params_4D_fusing_blocked_blocked, INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_Fusing_Blocked_Blocked, EltwiseLayerCPUTest, params_4D_fusing_blocked_blocked,
EltwiseLayerCPUTest::getTestCaseName); EltwiseLayerCPUTest::getTestCaseName);
@ -217,7 +220,8 @@ const auto params_4D_blocked_blocked_fusing = ::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(additional_config())), ::testing::ValuesIn(additional_config())),
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Blocked_Blocked())), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Blocked_Blocked())),
::testing::ValuesIn(fusingParamsSet_x64)); ::testing::ValuesIn(fusingParamsSet_x64),
::testing::ValuesIn(enforceSnippets()));
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_Blocked_Blocked_Fusing, EltwiseLayerCPUTest, params_4D_blocked_blocked_fusing, INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_Blocked_Blocked_Fusing, EltwiseLayerCPUTest, params_4D_blocked_blocked_fusing,
EltwiseLayerCPUTest::getTestCaseName); EltwiseLayerCPUTest::getTestCaseName);
@ -234,7 +238,8 @@ const auto params_4D_emptyCPUSpec = ::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(additional_config())), ::testing::ValuesIn(additional_config())),
::testing::Values(emptyCPUSpec), ::testing::Values(emptyCPUSpec),
::testing::Values(emptyFusingSpec)); ::testing::Values(emptyFusingSpec),
::testing::ValuesIn(enforceSnippets()));
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_emptyCPUSpec_x64, EltwiseLayerCPUTest, params_4D_emptyCPUSpec, EltwiseLayerCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_emptyCPUSpec_x64, EltwiseLayerCPUTest, params_4D_emptyCPUSpec, EltwiseLayerCPUTest::getTestCaseName);
@ -250,7 +255,8 @@ const auto params_5D_Blocked_Blocked = ::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(additional_config())), ::testing::ValuesIn(additional_config())),
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_Blocked_Blocked())), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_Blocked_Blocked())),
::testing::Values(emptyFusingSpec)); ::testing::Values(emptyFusingSpec),
::testing::ValuesIn(enforceSnippets()));
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_MemOrder_Blocked_Blocked, EltwiseLayerCPUTest, params_5D_Blocked_Blocked, INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_MemOrder_Blocked_Blocked, EltwiseLayerCPUTest, params_5D_Blocked_Blocked,
EltwiseLayerCPUTest::getTestCaseName); EltwiseLayerCPUTest::getTestCaseName);
@ -271,7 +277,8 @@ const auto params_5D_emptyCPUSpec_I32 = ::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(additional_config())), ::testing::ValuesIn(additional_config())),
::testing::Values(emptyCPUSpec), ::testing::Values(emptyCPUSpec),
::testing::ValuesIn(fusingParamsSet_I32)); ::testing::ValuesIn(fusingParamsSet_I32),
::testing::ValuesIn(enforceSnippets()));
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_I32, EltwiseLayerCPUTest, params_5D_emptyCPUSpec_I32, EltwiseLayerCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_I32, EltwiseLayerCPUTest, params_5D_emptyCPUSpec_I32, EltwiseLayerCPUTest::getTestCaseName);
@ -287,7 +294,8 @@ const auto params_4D_Blocked_Planar = ::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(additional_config())), ::testing::ValuesIn(additional_config())),
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Blocked_Planar())), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Blocked_Planar())),
::testing::Values(emptyFusingSpec)); ::testing::Values(emptyFusingSpec),
::testing::ValuesIn(enforceSnippets()));
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_Blocked_Planar, EltwiseLayerCPUTest, params_4D_Blocked_Planar, EltwiseLayerCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_Blocked_Planar, EltwiseLayerCPUTest, params_4D_Blocked_Planar, EltwiseLayerCPUTest::getTestCaseName);
@ -303,7 +311,8 @@ const auto params_4D_Planar_Blocked = ::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(additional_config())), ::testing::ValuesIn(additional_config())),
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Planar_Blocked())), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Planar_Blocked())),
::testing::Values(emptyFusingSpec)); ::testing::Values(emptyFusingSpec),
::testing::ValuesIn(enforceSnippets()));
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_Planar_Blocked, EltwiseLayerCPUTest, params_4D_Planar_Blocked, EltwiseLayerCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_Planar_Blocked, EltwiseLayerCPUTest, params_4D_Planar_Blocked, EltwiseLayerCPUTest::getTestCaseName);
@ -319,7 +328,8 @@ const auto params_5D_Blocked_Planar = ::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(additional_config())), ::testing::ValuesIn(additional_config())),
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_Blocked_Planar())), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_Blocked_Planar())),
::testing::Values(emptyFusingSpec)); ::testing::Values(emptyFusingSpec),
::testing::ValuesIn(enforceSnippets()));
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_Blocked_Planar, EltwiseLayerCPUTest, params_5D_Blocked_Planar, EltwiseLayerCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_Blocked_Planar, EltwiseLayerCPUTest, params_5D_Blocked_Planar, EltwiseLayerCPUTest::getTestCaseName);
@ -335,7 +345,8 @@ const auto params_5D_Planar_Blocked = ::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(additional_config())), ::testing::ValuesIn(additional_config())),
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_Planar_Blocked())), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_Planar_Blocked())),
::testing::Values(emptyFusingSpec)); ::testing::Values(emptyFusingSpec),
::testing::ValuesIn(enforceSnippets()));
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_Planar_Blocked_x64, EltwiseLayerCPUTest, params_5D_Planar_Blocked, EltwiseLayerCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_Planar_Blocked_x64, EltwiseLayerCPUTest, params_5D_Planar_Blocked, EltwiseLayerCPUTest::getTestCaseName);
@ -351,7 +362,8 @@ const auto params_4D_1D_constant_mode = ::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(additional_config())), ::testing::ValuesIn(additional_config())),
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_1D_Constant_mode_x64())), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_1D_Constant_mode_x64())),
::testing::Values(emptyFusingSpec)); ::testing::Values(emptyFusingSpec),
::testing::ValuesIn(enforceSnippets()));
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_1D_Constant_x64, EltwiseLayerCPUTest, params_4D_1D_constant_mode, EltwiseLayerCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_1D_Constant_x64, EltwiseLayerCPUTest, params_4D_1D_constant_mode, EltwiseLayerCPUTest::getTestCaseName);
@ -367,7 +379,8 @@ const auto params_4D_1D_parameter_mode = ::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(additional_config())), ::testing::ValuesIn(additional_config())),
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_1D_Parameter_mode())), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_1D_Parameter_mode())),
::testing::Values(emptyFusingSpec)); ::testing::Values(emptyFusingSpec),
::testing::ValuesIn(enforceSnippets()));
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_1D_Parameter_x64, EltwiseLayerCPUTest, params_4D_1D_parameter_mode, EltwiseLayerCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_1D_Parameter_x64, EltwiseLayerCPUTest, params_4D_1D_parameter_mode, EltwiseLayerCPUTest::getTestCaseName);
@ -383,7 +396,8 @@ const auto params_5D_1D_constant = ::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(additional_config())), ::testing::ValuesIn(additional_config())),
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_1D_constant())), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_1D_constant())),
::testing::Values(emptyFusingSpec)); ::testing::Values(emptyFusingSpec),
::testing::ValuesIn(enforceSnippets()));
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_1D_Constant_x64, EltwiseLayerCPUTest, params_5D_1D_constant, EltwiseLayerCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_1D_Constant_x64, EltwiseLayerCPUTest, params_5D_1D_constant, EltwiseLayerCPUTest::getTestCaseName);
@ -399,7 +413,8 @@ const auto params_5D_1D_parameter = ::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(additional_config())), ::testing::ValuesIn(additional_config())),
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_1D_parameter())), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_1D_parameter())),
::testing::Values(emptyFusingSpec)); ::testing::Values(emptyFusingSpec),
::testing::ValuesIn(enforceSnippets()));
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_1D_Parameter_x64, EltwiseLayerCPUTest, params_5D_1D_parameter, EltwiseLayerCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_1D_Parameter_x64, EltwiseLayerCPUTest, params_5D_1D_parameter, EltwiseLayerCPUTest::getTestCaseName);
@ -417,7 +432,8 @@ const auto params_4D_dyn_const = ::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(additional_config())), ::testing::ValuesIn(additional_config())),
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D())), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D())),
::testing::Values(emptyFusingSpec)); ::testing::Values(emptyFusingSpec),
::testing::ValuesIn(enforceSnippets()));
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_MemOrder_dyn_const_x64, EltwiseLayerCPUTest, params_4D_dyn_const, EltwiseLayerCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_MemOrder_dyn_const_x64, EltwiseLayerCPUTest, params_4D_dyn_const, EltwiseLayerCPUTest::getTestCaseName);
@ -433,7 +449,8 @@ const auto params_4D_blocked_blocked_dyn_const = ::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(additional_config())), ::testing::ValuesIn(additional_config())),
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Blocked_Blocked())), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Blocked_Blocked())),
::testing::Values(emptyFusingSpec)); ::testing::Values(emptyFusingSpec),
::testing::ValuesIn(enforceSnippets()));
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_Blocked_Blocked_MemOrder_dyn_const_x64, EltwiseLayerCPUTest, params_4D_blocked_blocked_dyn_const, INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_Blocked_Blocked_MemOrder_dyn_const_x64, EltwiseLayerCPUTest, params_4D_blocked_blocked_dyn_const,
EltwiseLayerCPUTest::getTestCaseName); EltwiseLayerCPUTest::getTestCaseName);
@ -450,7 +467,8 @@ const auto params_4D_dyn_param = ::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(additional_config())), ::testing::ValuesIn(additional_config())),
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D())), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D())),
::testing::Values(emptyFusingSpec)); ::testing::Values(emptyFusingSpec),
::testing::ValuesIn(enforceSnippets()));
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_MemOrder_dyn_param_x64, EltwiseLayerCPUTest, params_4D_dyn_param, EltwiseLayerCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_MemOrder_dyn_param_x64, EltwiseLayerCPUTest, params_4D_dyn_param, EltwiseLayerCPUTest::getTestCaseName);
@ -466,7 +484,8 @@ const auto params_4D_blocked_blocked_dyn_param = ::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(additional_config())), ::testing::ValuesIn(additional_config())),
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Blocked_Blocked())), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Blocked_Blocked())),
::testing::Values(emptyFusingSpec)); ::testing::Values(emptyFusingSpec),
::testing::ValuesIn(enforceSnippets()));
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_Blocked_Blocked_MemOrder_dyn_param_x64, EltwiseLayerCPUTest, params_4D_blocked_blocked_dyn_param, INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_Blocked_Blocked_MemOrder_dyn_param_x64, EltwiseLayerCPUTest, params_4D_blocked_blocked_dyn_param,
EltwiseLayerCPUTest::getTestCaseName); EltwiseLayerCPUTest::getTestCaseName);
@ -483,7 +502,8 @@ const auto params_4D_dyn_param_fusing = ::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(additional_config())), ::testing::ValuesIn(additional_config())),
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D())), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D())),
::testing::ValuesIn(fusingParamsSet_x64)); ::testing::ValuesIn(fusingParamsSet_x64),
::testing::ValuesIn(enforceSnippets()));
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_dyn_param_fusing, EltwiseLayerCPUTest, params_4D_dyn_param_fusing, EltwiseLayerCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_dyn_param_fusing, EltwiseLayerCPUTest, params_4D_dyn_param_fusing, EltwiseLayerCPUTest::getTestCaseName);
@ -499,7 +519,8 @@ const auto params_4D_dyn_param_fusing_Blocked_Blocked = ::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(additional_config())), ::testing::ValuesIn(additional_config())),
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Blocked_Blocked())), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Blocked_Blocked())),
::testing::ValuesIn(fusingParamsSet_x64)); ::testing::ValuesIn(fusingParamsSet_x64),
::testing::ValuesIn(enforceSnippets()));
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_dyn_param_fusing_Blocked_Blocked, EltwiseLayerCPUTest, params_4D_dyn_param_fusing_Blocked_Blocked, INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_dyn_param_fusing_Blocked_Blocked, EltwiseLayerCPUTest, params_4D_dyn_param_fusing_Blocked_Blocked,
EltwiseLayerCPUTest::getTestCaseName); EltwiseLayerCPUTest::getTestCaseName);
@ -516,7 +537,8 @@ const auto params_4D_blocked_blocked_dyn_param_fusing = ::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(additional_config())), ::testing::ValuesIn(additional_config())),
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Blocked_Blocked())), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Blocked_Blocked())),
::testing::ValuesIn(fusingParamsSet_x64)); ::testing::ValuesIn(fusingParamsSet_x64),
::testing::ValuesIn(enforceSnippets()));
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_blocked_blocked_dyn_param_fusing, EltwiseLayerCPUTest, params_4D_blocked_blocked_dyn_param_fusing, INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_blocked_blocked_dyn_param_fusing, EltwiseLayerCPUTest, params_4D_blocked_blocked_dyn_param_fusing,
EltwiseLayerCPUTest::getTestCaseName); EltwiseLayerCPUTest::getTestCaseName);
@ -535,7 +557,8 @@ const auto params_5D_dyn_const_Blocked_Blocked = ::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(additional_config())), ::testing::ValuesIn(additional_config())),
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_Blocked_Blocked())), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_Blocked_Blocked())),
::testing::Values(emptyFusingSpec)); ::testing::Values(emptyFusingSpec),
::testing::ValuesIn(enforceSnippets()));
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_MemOrder_dyn_const_Blocked_Blocked, EltwiseLayerCPUTest, params_5D_dyn_const_Blocked_Blocked, INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_MemOrder_dyn_const_Blocked_Blocked, EltwiseLayerCPUTest, params_5D_dyn_const_Blocked_Blocked,
EltwiseLayerCPUTest::getTestCaseName); EltwiseLayerCPUTest::getTestCaseName);
@ -552,7 +575,8 @@ const auto params_5D_dyn_param_Blocked_Blocked = ::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(additional_config())), ::testing::ValuesIn(additional_config())),
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_Blocked_Blocked())), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_Blocked_Blocked())),
::testing::Values(emptyFusingSpec)); ::testing::Values(emptyFusingSpec),
::testing::ValuesIn(enforceSnippets()));
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_MemOrder_dyn_param_Blocked_Blocked, EltwiseLayerCPUTest, params_5D_dyn_param_Blocked_Blocked, INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_MemOrder_dyn_param_Blocked_Blocked, EltwiseLayerCPUTest, params_5D_dyn_param_Blocked_Blocked,
EltwiseLayerCPUTest::getTestCaseName); EltwiseLayerCPUTest::getTestCaseName);

View File

@ -86,7 +86,7 @@ protected:
TEST_P(SelectLayerCPUTest, CompareWithRefs) { TEST_P(SelectLayerCPUTest, CompareWithRefs) {
run(); run();
CheckPluginRelatedResults(compiledModel, "Eltwise"); CheckPluginRelatedResults(compiledModel, std::set<std::string>{"Eltwise", "Subgraph"});
} }
const std::vector<ElementType> precisions = { const std::vector<ElementType> precisions = {

View File

@ -8,6 +8,7 @@
#include "shared_test_classes/base/ov_subgraph.hpp" #include "shared_test_classes/base/ov_subgraph.hpp"
#include "ngraph_functions/utils/ngraph_helpers.hpp" #include "ngraph_functions/utils/ngraph_helpers.hpp"
#include "ngraph_functions/builders.hpp" #include "ngraph_functions/builders.hpp"
#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp"
using namespace CPUTestUtils; using namespace CPUTestUtils;
using namespace InferenceEngine; using namespace InferenceEngine;
@ -127,6 +128,11 @@ public:
function = makeNgraphFunction(getNetType(), inputParams, sum, "ConvolutionSumBroadcast"); function = makeNgraphFunction(getNetType(), inputParams, sum, "ConvolutionSumBroadcast");
targetDevice = ov::test::utils::DEVICE_CPU; targetDevice = ov::test::utils::DEVICE_CPU;
if (!configuration.count(InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE)) {
configuration.insert({InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE,
InferenceEngine::PluginConfigInternalParams::DISABLE});
}
} }
protected: protected:

View File

@ -13,6 +13,7 @@
#include <common_test_utils/ov_tensor_utils.hpp> #include <common_test_utils/ov_tensor_utils.hpp>
#include "test_utils/cpu_test_utils.hpp" #include "test_utils/cpu_test_utils.hpp"
#include <openvino/opsets/opset1.hpp> #include <openvino/opsets/opset1.hpp>
#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp"
using namespace CPUTestUtils; using namespace CPUTestUtils;
using namespace ov::test; using namespace ov::test;
@ -205,6 +206,11 @@ protected:
std::tie(inputShapes, data_et, idces_et, k) = this->GetParam(); std::tie(inputShapes, data_et, idces_et, k) = this->GetParam();
init_input_shapes(inputShapes); init_input_shapes(inputShapes);
function = initNgram(inputDynamicShapes, data_et, idces_et, k); function = initNgram(inputDynamicShapes, data_et, idces_et, k);
if (!configuration.count(InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE)) {
configuration.insert({InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE,
InferenceEngine::PluginConfigInternalParams::DISABLE});
}
} }
}; };

View File

@ -11,8 +11,8 @@ namespace test {
namespace snippets { namespace snippets {
typedef std::tuple< typedef std::tuple<
ov::Shape, // Input 0 Shape InputShape, // Input 0 Shape
ov::Shape, // Input 1 Shape InputShape, // Input 1 Shape
ov::element::Type, // Element type ov::element::Type, // Element type
size_t, // Expected num nodes size_t, // Expected num nodes
size_t, // Expected num subgraphs size_t, // Expected num subgraphs
@ -20,21 +20,22 @@ typedef std::tuple<
> AddParams; > AddParams;
typedef std::tuple< typedef std::tuple<
std::vector<ov::Shape>, // Input 0, Input 1 Shape InputShape, // Input 0 Shape
ov::element::Type, // Element type PartialShape, // const shape
size_t, // Expected num nodes
size_t, // Expected num subgraphs
std::string // Target Device
> AddParamsPair;
typedef std::tuple<
ov::Shape, // Input 0 Shape
ov::element::Type, // Element type ov::element::Type, // Element type
size_t, // Expected num nodes size_t, // Expected num nodes
size_t, // Expected num subgraphs size_t, // Expected num subgraphs
std::string // Target Device std::string // Target Device
> AddConstParams; > AddConstParams;
typedef std::tuple<
std::vector<InputShape>, // Input 0, Input 1 Shape
ov::element::Type, // Element type
size_t, // Expected num nodes
size_t, // Expected num subgraphs
std::string // Target Device
> AddParamsPair;
class Add : public testing::WithParamInterface<ov::test::snippets::AddParams>, class Add : public testing::WithParamInterface<ov::test::snippets::AddParams>,
virtual public ov::test::SnippetsTestsCommon { virtual public ov::test::SnippetsTestsCommon {
public: public:
@ -57,7 +58,6 @@ protected:
void SetUp() override; void SetUp() override;
}; };
// repack AddPair input shapes into shape vector to cover some cases easier
class AddPair : public testing::WithParamInterface<ov::test::snippets::AddParamsPair>, class AddPair : public testing::WithParamInterface<ov::test::snippets::AddParamsPair>,
virtual public ov::test::SnippetsTestsCommon { virtual public ov::test::SnippetsTestsCommon {
public: public:

View File

@ -12,7 +12,7 @@ namespace snippets {
class CheckBroadcastTestCaseParams { class CheckBroadcastTestCaseParams {
public: public:
std::pair<ov::PartialShape, ov::PartialShape> input_shapes; std::pair<InputShape, InputShape> input_shapes;
ov::op::AutoBroadcastSpec broadcast; ov::op::AutoBroadcastSpec broadcast;
size_t num_nodes; size_t num_nodes;
size_t num_subgraphs; size_t num_subgraphs;

View File

@ -4,15 +4,7 @@
#pragma once #pragma once
#include <tuple> #include "shared_test_classes/base/snippets_test_utils.hpp"
#include <vector>
#include <string>
#include <memory>
#include "shared_test_classes/base/layer_test_utils.hpp"
#include "ngraph_functions/utils/ngraph_helpers.hpp"
#include "ngraph_functions/builders.hpp"
// todo: Rewrite this test using Snippets test infrastructure. See add_convert or conv_eltwise for example
namespace ov { namespace ov {
namespace test { namespace test {
@ -20,13 +12,14 @@ namespace snippets {
typedef std::tuple< typedef std::tuple<
ov::element::Type_t, // Network Precision ov::element::Type_t, // Network Precision
ov::Shape, // Input Shape, InputShape, // Input1 Shape,
InputShape, // Input2 Shape,
bool, bool,
std::string // Target Device std::string // Target Device
> CodegenGeluParams; > CodegenGeluParams;
class CodegenGelu : public testing::WithParamInterface<ov::test::snippets::CodegenGeluParams>, class CodegenGelu : public testing::WithParamInterface<ov::test::snippets::CodegenGeluParams>,
virtual public LayerTestsUtils::LayerTestsCommon { virtual public ov::test::SnippetsTestsCommon {
public: public:
static std::string getTestCaseName(testing::TestParamInfo<ov::test::snippets::CodegenGeluParams> obj); static std::string getTestCaseName(testing::TestParamInfo<ov::test::snippets::CodegenGeluParams> obj);

View File

@ -11,7 +11,7 @@ namespace test {
namespace snippets { namespace snippets {
typedef std::tuple< typedef std::tuple<
std::vector<ov::PartialShape>, // InputShapes std::vector<InputShape>, // InputShapes
std::pair<std::vector<ov::element::Type>, std::vector<ov::element::Type>>, // Input and Output data types for Converts std::pair<std::vector<ov::element::Type>, std::vector<ov::element::Type>>, // Input and Output data types for Converts
size_t, // Expected num nodes size_t, // Expected num nodes
size_t, // Expected num subgraphs size_t, // Expected num subgraphs

View File

@ -11,8 +11,8 @@ namespace test {
namespace snippets { namespace snippets {
typedef std::tuple< typedef std::tuple<
ov::Shape, // Input 0 Shape InputShape, // Input 0 Shape
ov::Shape, // Input 1 Shape InputShape, // Input 1 Shape
size_t, // Expected num nodes size_t, // Expected num nodes
size_t, // Expected num subgraphs size_t, // Expected num subgraphs
std::string // Target Device std::string // Target Device

View File

@ -11,7 +11,7 @@ namespace test {
namespace snippets { namespace snippets {
typedef std::tuple< typedef std::tuple<
ov::Shape, // Input Shape All shapes are replicated InputShape, // Input Shape All shapes are replicated
size_t, // Expected num nodes size_t, // Expected num nodes
size_t, // Expected num subgraphs size_t, // Expected num subgraphs
std::string // Target Device std::string // Target Device

View File

@ -11,7 +11,7 @@ namespace test {
namespace snippets { namespace snippets {
typedef std::tuple< typedef std::tuple<
std::vector<ov::PartialShape>, // Input shapes std::vector<InputShape>, // Input shapes
std::vector<float>, // FakeQuantize intervals std::vector<float>, // FakeQuantize intervals
size_t, // Expected num nodes size_t, // Expected num nodes
size_t, // Expected num subgraphs size_t, // Expected num subgraphs

View File

@ -11,9 +11,9 @@ namespace test {
namespace snippets { namespace snippets {
typedef std::tuple< typedef std::tuple<
ov::Shape, // Input 0 Shape InputShape, // Input 0 Shape
ov::Shape, // Input 1 Shape InputShape, // Input 1 Shape
ov::Shape, // Input 2 Shape InputShape, // Input 2 Shape
ov::element::Type, // Element type ov::element::Type, // Element type
size_t, // Expected num nodes size_t, // Expected num nodes
size_t, // Expected num subgraphs size_t, // Expected num subgraphs
@ -21,10 +21,10 @@ typedef std::tuple<
> SelectParams; > SelectParams;
typedef std::tuple< typedef std::tuple<
ov::Shape, // Input 0 Shape InputShape, // Input 0 Shape
ov::Shape, // Input 1 Shape InputShape, // Input 1 Shape
ov::Shape, // Input 2 Shape InputShape, // Input 2 Shape
ov::Shape, // Input 3 Shape ov::PartialShape, // Input 3 Shape
ov::element::Type, // Element type ov::element::Type, // Element type
size_t, // Expected num nodes size_t, // Expected num nodes
size_t, // Expected num subgraphs size_t, // Expected num subgraphs
@ -53,7 +53,6 @@ protected:
void generate_inputs(const std::vector<ov::Shape>& targetInputStaticShapes) override; void generate_inputs(const std::vector<ov::Shape>& targetInputStaticShapes) override;
}; };
} // namespace snippets } // namespace snippets
} // namespace test } // namespace test
} // namespace ov } // namespace ov

View File

@ -11,7 +11,7 @@ namespace test {
namespace snippets { namespace snippets {
typedef std::tuple< typedef std::tuple<
ov::Shape, // Input 0 Shape InputShape, // Input 0 Shape
int, // Axis int, // Axis
size_t, // Expected num nodes size_t, // Expected num nodes
size_t, // Expected num subgraphs size_t, // Expected num subgraphs
@ -19,7 +19,7 @@ typedef std::tuple<
> SoftmaxParams; > SoftmaxParams;
typedef std::tuple< typedef std::tuple<
std::pair<ov::Shape, ov::Shape>, // Input Shapes std::pair<InputShape, InputShape>,// Input Shapes
int, // Axis int, // Axis
size_t, // Expected num nodes size_t, // Expected num nodes
size_t, // Expected num subgraphs size_t, // Expected num subgraphs

View File

@ -10,15 +10,6 @@ namespace ov {
namespace test { namespace test {
namespace snippets { namespace snippets {
typedef std::tuple<
ov::Shape, // Input 0 Shape
ov::Shape, // Input 1 Shape
ov::Shape, // Input 2 Shape
size_t, // Expected num nodes
size_t, // Expected num subgraphs
std::string // Target Device
> ThreeInputsEltwiseParams;
typedef std::tuple< typedef std::tuple<
InputShape, // Input 0 Shape InputShape, // Input 0 Shape
InputShape, // Input 1 Shape InputShape, // Input 1 Shape
@ -26,7 +17,7 @@ typedef std::tuple<
size_t, // Expected num nodes size_t, // Expected num nodes
size_t, // Expected num subgraphs size_t, // Expected num subgraphs
std::string // Target Device std::string // Target Device
> ThreeInputsEltwiseDynamicParams; > ThreeInputsEltwiseParams;
class ThreeInputsEltwise : public testing::WithParamInterface<ov::test::snippets::ThreeInputsEltwiseParams>, class ThreeInputsEltwise : public testing::WithParamInterface<ov::test::snippets::ThreeInputsEltwiseParams>,
virtual public ov::test::SnippetsTestsCommon { virtual public ov::test::SnippetsTestsCommon {

View File

@ -11,7 +11,7 @@ namespace test {
namespace snippets { namespace snippets {
typedef std::tuple< typedef std::tuple<
std::vector<ov::PartialShape>, // Input Shape All shapes std::vector<InputShape>, // Input Shape All shapes
size_t, // Expected num nodes size_t, // Expected num nodes
size_t, // Expected num subgraphs size_t, // Expected num subgraphs
std::string // Target Device std::string // Target Device

View File

@ -7,21 +7,30 @@
#include "subgraph_simple.hpp" #include "subgraph_simple.hpp"
#include "ngraph_functions/builders.hpp" #include "ngraph_functions/builders.hpp"
#include "functional_test_utils/skip_tests_config.hpp" #include "functional_test_utils/skip_tests_config.hpp"
#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp"
namespace ov { namespace ov {
namespace test { namespace test {
namespace snippets { namespace snippets {
std::string Add::getTestCaseName(testing::TestParamInfo<ov::test::snippets::AddParams> obj) { std::string Add::getTestCaseName(testing::TestParamInfo<ov::test::snippets::AddParams> obj) {
ov::Shape inputShapes0, inputShapes1, newInputShapes; ov::test::InputShape inputShapes0, inputShapes1;
ov::element::Type type; ov::element::Type type;
std::string targetDevice; std::string targetDevice;
size_t num_nodes, num_subgraphs; size_t num_nodes, num_subgraphs;
std::tie(inputShapes0, inputShapes1, type, num_nodes, num_subgraphs, targetDevice) = obj.param; std::tie(inputShapes0, inputShapes1, type, num_nodes, num_subgraphs, targetDevice) = obj.param;
std::ostringstream result; std::ostringstream result;
result << "IS[0]=" << ov::test::utils::vec2str(inputShapes0) << "_"; result << "IS[0]=" << ov::test::utils::partialShape2str({inputShapes0.first}) << "_";
result << "IS[1]=" << ov::test::utils::vec2str(inputShapes1) << "_"; result << "TS[0]=";
for (const auto& shape : inputShapes0.second) {
result << "(" << ov::test::utils::vec2str(shape) << ")_";
}
result << "IS[1]=" << ov::test::utils::partialShape2str({inputShapes1.first}) << "_";
result << "TS[1]=";
for (const auto& shape : inputShapes1.second) {
result << "(" << ov::test::utils::vec2str(shape) << ")_";
}
result << "T=" << type << "_"; result << "T=" << type << "_";
result << "#N=" << num_nodes << "_"; result << "#N=" << num_nodes << "_";
result << "#S=" << num_subgraphs << "_"; result << "#S=" << num_subgraphs << "_";
@ -30,25 +39,34 @@ std::string Add::getTestCaseName(testing::TestParamInfo<ov::test::snippets::AddP
} }
void Add::SetUp() { void Add::SetUp() {
ov::Shape inputShape0, inputShape1; ov::test::InputShape inputShape0, inputShape1;
ov::element::Type type; ov::element::Type type;
std::tie(inputShape0, inputShape1, type, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam(); std::tie(inputShape0, inputShape1, type, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam();
init_input_shapes({{{}, {inputShape0, }}, {{}, {inputShape1, }}}); init_input_shapes({inputShape0, inputShape1});
auto f = ov::test::snippets::AddFunction(inputDynamicShapes);
auto f = ov::test::snippets::AddFunction({inputShape0, inputShape1});
function = f.getOriginal(); function = f.getOriginal();
setInferenceType(type); setInferenceType(type);
if (!configuration.count(InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE)) {
configuration.insert({InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE,
InferenceEngine::PluginConfigInternalParams::IGNORE_CALLBACK});
}
} }
std::string AddConst::getTestCaseName(testing::TestParamInfo<ov::test::snippets::AddConstParams> obj) { std::string AddConst::getTestCaseName(testing::TestParamInfo<ov::test::snippets::AddConstParams> obj) {
ov::Shape inputShapes, newInputShapes; InputShape inputShapes;
PartialShape constShape;
ov::element::Type type; ov::element::Type type;
std::string targetDevice; std::string targetDevice;
size_t num_nodes, num_subgraphs; size_t num_nodes, num_subgraphs;
std::tie(inputShapes, type, num_nodes, num_subgraphs, targetDevice) = obj.param; std::tie(inputShapes, constShape, type, num_nodes, num_subgraphs, targetDevice) = obj.param;
std::ostringstream result; std::ostringstream result;
result << "IS[0]=" << ov::test::utils::vec2str(inputShapes) << "_"; result << "IS[0]=" << ov::test::utils::partialShape2str({inputShapes.first}) << "_";
result << "TS[0]=";
for (const auto& shape : inputShapes.second) {
result << "(" << ov::test::utils::vec2str(shape) << ")_";
}
result << "IS_ConstShape=" << ov::test::utils::partialShape2str({constShape}) << "_";
result << "T=" << type << "_"; result << "T=" << type << "_";
result << "#N=" << num_nodes << "_"; result << "#N=" << num_nodes << "_";
result << "#S=" << num_subgraphs << "_"; result << "#S=" << num_subgraphs << "_";
@ -57,29 +75,37 @@ std::string AddConst::getTestCaseName(testing::TestParamInfo<ov::test::snippets:
} }
void AddConst::SetUp() { void AddConst::SetUp() {
ov::Shape inputShape; InputShape inputShape;
PartialShape constShape;
ov::element::Type type; ov::element::Type type;
std::tie(inputShape, type, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam(); std::tie(inputShape, constShape, type, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam();
init_input_shapes({{{}, {inputShape, }}}); init_input_shapes({{inputShape}});
auto f = ov::test::snippets::AddConstFunction({inputDynamicShapes}, constShape);
auto f = ov::test::snippets::AddConstFunction({inputShape});
function = f.getOriginal(); function = f.getOriginal();
setInferenceType(type); setInferenceType(type);
if (!configuration.count(InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE)) {
configuration.insert({InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE,
InferenceEngine::PluginConfigInternalParams::IGNORE_CALLBACK});
}
} }
void AddRollConst::SetUp() { void AddRollConst::SetUp() {
ov::Shape inputShape; InputShape inputShape;
PartialShape constShape;
ov::element::Type type; ov::element::Type type;
std::tie(inputShape, type, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam(); std::tie(inputShape, constShape, type, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam();
init_input_shapes({{{}, {inputShape, }}}); init_input_shapes({inputShape});
auto f = ov::test::snippets::AddRollConstFunction({inputDynamicShapes}, constShape);
auto f = ov::test::snippets::AddRollConstFunction({inputShape});
function = f.getOriginal(); function = f.getOriginal();
setInferenceType(type); setInferenceType(type);
if (!configuration.count(InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE)) {
configuration.insert({InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE,
InferenceEngine::PluginConfigInternalParams::IGNORE_CALLBACK});
}
} }
std::string AddPair::getTestCaseName(testing::TestParamInfo<ov::test::snippets::AddParamsPair> obj) { std::string AddPair::getTestCaseName(testing::TestParamInfo<ov::test::snippets::AddParamsPair> obj) {
std::vector<ov::Shape> input_shapes; std::vector<InputShape> input_shapes;
ov::element::Type type; ov::element::Type type;
std::string targetDevice; std::string targetDevice;
size_t num_nodes, num_subgraphs; size_t num_nodes, num_subgraphs;
@ -87,8 +113,16 @@ std::string AddPair::getTestCaseName(testing::TestParamInfo<ov::test::snippets::
if (input_shapes.size() != 2) if (input_shapes.size() != 2)
IE_THROW() << "Invalid input shapes vector size"; IE_THROW() << "Invalid input shapes vector size";
std::ostringstream result; std::ostringstream result;
result << "IS[0]=" << ov::test::utils::vec2str(input_shapes[0]) << "_"; result << "IS[0]=" << ov::test::utils::partialShape2str({input_shapes[0].first}) << "_";
result << "IS[1]=" << ov::test::utils::vec2str(input_shapes[1]) << "_"; result << "TS[0]=";
for (const auto& shape : input_shapes[0].second) {
result << "(" << ov::test::utils::vec2str(shape) << ")_";
}
result << "IS[1]=" << ov::test::utils::partialShape2str({input_shapes[1].first}) << "_";
result << "TS[1]=";
for (const auto& shape : input_shapes[1].second) {
result << "(" << ov::test::utils::vec2str(shape) << ")_";
}
result << "T=" << type << "_"; result << "T=" << type << "_";
result << "#N=" << num_nodes << "_"; result << "#N=" << num_nodes << "_";
result << "#S=" << num_subgraphs << "_"; result << "#S=" << num_subgraphs << "_";
@ -97,17 +131,17 @@ std::string AddPair::getTestCaseName(testing::TestParamInfo<ov::test::snippets::
} }
void AddPair::SetUp() { void AddPair::SetUp() {
std::vector<ov::Shape> input_shapes; std::vector<InputShape> input_shapes;
ov::element::Type type; ov::element::Type type;
std::tie(input_shapes, type, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam(); std::tie(input_shapes, type, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam();
std::vector<InputShape> is; init_input_shapes(input_shapes);
for (const auto& s : input_shapes) { auto f = ov::test::snippets::AddFunction(inputDynamicShapes);
is.emplace_back(InputShape {{}, {s, }});
}
init_input_shapes(is);
auto f = ov::test::snippets::AddFunction({input_shapes[0], input_shapes[1]});
function = f.getOriginal(); function = f.getOriginal();
setInferenceType(type); setInferenceType(type);
if (!configuration.count(InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE)) {
configuration.insert({InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE,
InferenceEngine::PluginConfigInternalParams::IGNORE_CALLBACK});
}
} }
TEST_P(Add, CompareWithRefImpl) { TEST_P(Add, CompareWithRefImpl) {

View File

@ -7,6 +7,7 @@
#include "common_test_utils/common_utils.hpp" #include "common_test_utils/common_utils.hpp"
#include "subgraph_converts.hpp" #include "subgraph_converts.hpp"
#include "common_test_utils/ov_tensor_utils.hpp" #include "common_test_utils/ov_tensor_utils.hpp"
#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp"
namespace ov { namespace ov {
namespace test { namespace test {
@ -49,8 +50,16 @@ std::string CheckBroadcast::getTestCaseName(testing::TestParamInfo<CheckBroadcas
std::tie(input_type, test_case_params, target_device) = obj.param; std::tie(input_type, test_case_params, target_device) = obj.param;
std::ostringstream result; std::ostringstream result;
result << "IS=" << test_case_params.input_shapes.first.get_shape() << "_" << result << "IS[0]=" << ov::test::utils::partialShape2str({test_case_params.input_shapes.first.first}) << "_";
test_case_params.input_shapes.second.get_shape() << "_"; result << "TS[0]=";
for (const auto& shape : test_case_params.input_shapes.first.second) {
result << "(" << ov::test::utils::vec2str(shape) << ")_";
}
result << "IS[1]=" << ov::test::utils::partialShape2str({test_case_params.input_shapes.second.first}) << "_";
result << "TS[1]=";
for (const auto& shape : test_case_params.input_shapes.second.second) {
result << "(" << ov::test::utils::vec2str(shape) << ")_";
}
result << "IT=" << input_type << "_"; result << "IT=" << input_type << "_";
result << "BCT=" << test_case_params.broadcast.m_type << "_"; result << "BCT=" << test_case_params.broadcast.m_type << "_";
result << "BCA=" << test_case_params.broadcast.m_axis << "_"; result << "BCA=" << test_case_params.broadcast.m_axis << "_";
@ -68,15 +77,17 @@ void CheckBroadcast::SetUp() {
ref_num_nodes = test_case_params.num_nodes; ref_num_nodes = test_case_params.num_nodes;
ref_num_subgraphs = test_case_params.num_subgraphs; ref_num_subgraphs = test_case_params.num_subgraphs;
init_input_shapes(static_partial_shapes_to_test_representation({ init_input_shapes({test_case_params.input_shapes.first, test_case_params.input_shapes.second});
test_case_params.input_shapes.first,
test_case_params.input_shapes.second}));
function = CheckBroadcastFunction::get( function = CheckBroadcastFunction::get(
test_case_params.input_shapes.first, inputDynamicShapes[0],
test_case_params.input_shapes.second, inputDynamicShapes[1],
input_type, input_type,
test_case_params.broadcast); test_case_params.broadcast);
if (!configuration.count(InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE)) {
configuration.insert({InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE,
InferenceEngine::PluginConfigInternalParams::IGNORE_CALLBACK});
}
} }
TEST_P(CheckBroadcast, CompareWithRefImpl) { TEST_P(CheckBroadcast, CompareWithRefImpl) {

View File

@ -3,41 +3,36 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <memory>
#include <tuple>
#include <vector>
#include <string>
#include <ie_core.hpp>
#include "common_test_utils/common_utils.hpp"
#include "functional_test_utils/plugin_cache.hpp"
#include "shared_test_classes/base/layer_test_utils.hpp"
#include "functional_test_utils/blob_utils.hpp"
#include "ngraph_functions/pass/convert_prc.hpp"
#include "snippets/codegen_gelu.hpp"
#include <ngraph/pass/constant_folding.hpp> #include <ngraph/pass/constant_folding.hpp>
#include <ngraph/pass/visualize_tree.hpp> #include "common_test_utils/common_utils.hpp"
#include "snippets/codegen_gelu.hpp"
#include "subgraph_simple.hpp"
#include "ngraph_functions/builders.hpp"
#include "functional_test_utils/skip_tests_config.hpp"
#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp"
#include <transformations/init_node_info.hpp>
#include <transformations/utils/utils.hpp>
// todo: Rewrite this test using Snippets test infrastructure. See add_convert or conv_eltwise for example
namespace ov { namespace ov {
namespace test { namespace test {
namespace snippets { namespace snippets {
std::string CodegenGelu::getTestCaseName(testing::TestParamInfo<ov::test::snippets::CodegenGeluParams> obj) { std::string CodegenGelu::getTestCaseName(testing::TestParamInfo<ov::test::snippets::CodegenGeluParams> obj) {
ov::element::Type_t netPrecision; ov::element::Type_t netPrecision;
ov::Shape inputShapes0, newInputShapes; InputShape inputShapes0, inputShapes1;
bool useSubgraph; bool useSubgraph;
std::string targetDevice; std::string targetDevice;
std::tie(netPrecision, inputShapes0, useSubgraph, targetDevice) = obj.param; std::tie(netPrecision, inputShapes0, inputShapes1, useSubgraph, targetDevice) = obj.param;
std::ostringstream result; std::ostringstream result;
result << "IS[0]=" << ov::test::utils::vec2str(inputShapes0) << "_"; result << "IS[0]=" << ov::test::utils::partialShape2str({inputShapes0.first}) << "_";
result << "TS[0]=";
for (const auto& shape : inputShapes0.second) {
result << "(" << ov::test::utils::vec2str(shape) << ")_";
}
result << "IS[1]=" << ov::test::utils::partialShape2str({inputShapes1.first}) << "_";
result << "TS[1]=";
for (const auto& shape : inputShapes1.second) {
result << "(" << ov::test::utils::vec2str(shape) << ")_";
}
result << "netPRC=" << netPrecision << "_"; result << "netPRC=" << netPrecision << "_";
result << "overSnippet=" << (useSubgraph ? "yes" : "no") << "_"; result << "overSnippet=" << (useSubgraph ? "yes" : "no") << "_";
result << "targetDevice=" << targetDevice; result << "targetDevice=" << targetDevice;
@ -46,13 +41,15 @@ namespace snippets {
// Gelu from bert-large-uncased-whole-word-masking-squad-fp32-onnx-0001 // Gelu from bert-large-uncased-whole-word-masking-squad-fp32-onnx-0001
void CodegenGelu::SetUp() { void CodegenGelu::SetUp() {
ov::Shape inputShape0; InputShape inputShape0, inputShapes1;
ov::element::Type_t netPrecision; ov::element::Type_t netPrecision;
bool useSubgraph; bool useSubgraph;
std::tie(netPrecision, inputShape0, useSubgraph, targetDevice) = this->GetParam(); std::tie(netPrecision, inputShape0, inputShapes1, useSubgraph, targetDevice) = this->GetParam();
auto input0 = std::make_shared<ngraph::opset1::Parameter>(netPrecision, ngraph::Shape{inputShape0}); init_input_shapes({inputShape0, inputShapes1});
auto input1 = std::make_shared<ngraph::opset1::Parameter>(netPrecision, ngraph::Shape{inputShape0});
auto input0 = std::make_shared<ngraph::opset1::Parameter>(netPrecision, inputDynamicShapes[0]);
auto input1 = std::make_shared<ngraph::opset1::Parameter>(netPrecision, inputDynamicShapes[1]);
auto add = std::make_shared<ngraph::opset1::Add>(input0, input1); auto add = std::make_shared<ngraph::opset1::Add>(input0, input1);
auto gelu = std::make_shared<ngraph::opset2::Gelu>(add); auto gelu = std::make_shared<ngraph::opset2::Gelu>(add);
@ -67,10 +64,14 @@ namespace snippets {
ov::pass::InitNodeInfo().run_on_model(function); ov::pass::InitNodeInfo().run_on_model(function);
ngraph::pass::ConstantFolding().run_on_model(function); ngraph::pass::ConstantFolding().run_on_model(function);
} }
if (!configuration.count(InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE)) {
configuration.insert({InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE,
InferenceEngine::PluginConfigInternalParams::IGNORE_CALLBACK});
}
} }
TEST_P(CodegenGelu, CompareWithRefImpl) { TEST_P(CodegenGelu, CompareWithRefImpl) {
Run(); run();
}; };

View File

@ -6,22 +6,27 @@
#include "snippets/convert.hpp" #include "snippets/convert.hpp"
#include "subgraph_converts.hpp" #include "subgraph_converts.hpp"
#include "common_test_utils/ov_tensor_utils.hpp" #include "common_test_utils/ov_tensor_utils.hpp"
#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp"
namespace ov { namespace ov {
namespace test { namespace test {
namespace snippets { namespace snippets {
std::string Convert::getTestCaseName(testing::TestParamInfo<ov::test::snippets::ConvertParams> obj) { std::string Convert::getTestCaseName(testing::TestParamInfo<ov::test::snippets::ConvertParams> obj) {
std::vector<ov::PartialShape> inputShape; std::vector<InputShape> inputShape;
std::pair<std::vector<ov::element::Type>, std::vector<ov::element::Type>> types; std::pair<std::vector<ov::element::Type>, std::vector<ov::element::Type>> types;
std::string targetDevice; std::string targetDevice;
size_t num_nodes, num_subgraphs; size_t num_nodes, num_subgraphs;
std::tie(inputShape, types, num_nodes, num_subgraphs, targetDevice) = obj.param; std::tie(inputShape, types, num_nodes, num_subgraphs, targetDevice) = obj.param;
std::ostringstream result; std::ostringstream result;
result << "IS="; for (size_t i = 0; i < inputShape.size(); ++i) {
for (const auto& sh : inputShape) result << "IS[" << i << "]=" << ov::test::utils::partialShape2str({inputShape[i].first}) << "_";
result << ov::test::utils::vec2str(sh.get_shape()) << "_"; result << "TS[" << i << "]=";
for (const auto& shape : inputShape[i].second) {
result << ov::test::utils::vec2str(shape) << "_";
}
}
result << "IT=" << ov::test::utils::vec2str(types.first) << "_"; result << "IT=" << ov::test::utils::vec2str(types.first) << "_";
result << "OT=" << ov::test::utils::vec2str(types.second) << "_"; result << "OT=" << ov::test::utils::vec2str(types.second) << "_";
result << "#N=" << num_nodes << "_"; result << "#N=" << num_nodes << "_";
@ -31,13 +36,17 @@ std::string Convert::getTestCaseName(testing::TestParamInfo<ov::test::snippets::
} }
void Convert::SetUp() { void Convert::SetUp() {
std::vector<ov::PartialShape> inputShape; std::vector<InputShape> inputShape;
std::pair<std::vector<ov::element::Type>, std::vector<ov::element::Type>> types; std::pair<std::vector<ov::element::Type>, std::vector<ov::element::Type>> types;
std::tie(inputShape, types, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam(); std::tie(inputShape, types, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam();
init_input_shapes(static_partial_shapes_to_test_representation(inputShape)); init_input_shapes(inputShape);
auto f = ov::test::snippets::ConvertFunction(inputShape, types.first[0], types.second[0]); auto f = ov::test::snippets::ConvertFunction(inputDynamicShapes, types.first[0], types.second[0]);
function = f.getOriginal(); function = f.getOriginal();
output_type = types.second.front(); output_type = types.second.front();
if (!configuration.count(InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE)) {
configuration.insert({InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE,
InferenceEngine::PluginConfigInternalParams::IGNORE_CALLBACK});
}
} }
parameters Convert::generate_params_random() const { parameters Convert::generate_params_random() const {
@ -84,12 +93,16 @@ void Convert::generate_inputs(const std::vector<ov::Shape>& targetInputStaticSha
} }
void ConvertInput::SetUp() { void ConvertInput::SetUp() {
std::vector<ov::PartialShape> inputShape; std::vector<InputShape> inputShape;
std::pair<std::vector<ov::element::Type>, std::vector<ov::element::Type>> types; std::pair<std::vector<ov::element::Type>, std::vector<ov::element::Type>> types;
std::tie(inputShape, types, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam(); std::tie(inputShape, types, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam();
init_input_shapes(static_partial_shapes_to_test_representation(inputShape)); init_input_shapes(inputShape);
auto f = ov::test::snippets::ConvertInputFunction(inputShape, types.first[0], types.second[0]); auto f = ov::test::snippets::ConvertInputFunction(inputDynamicShapes, types.first[0], types.second[0]);
function = f.getOriginal(); function = f.getOriginal();
if (!configuration.count(InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE)) {
configuration.insert({InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE,
InferenceEngine::PluginConfigInternalParams::IGNORE_CALLBACK});
}
} }
parameters ConvertInput::generate_params_random() const { parameters ConvertInput::generate_params_random() const {
@ -123,65 +136,89 @@ parameters ConvertInput::generate_params_random() const {
} }
void ConvertOutput::SetUp() { void ConvertOutput::SetUp() {
std::vector<ov::PartialShape> inputShape; std::vector<InputShape> inputShape;
std::pair<std::vector<ov::element::Type>, std::vector<ov::element::Type>> types; std::pair<std::vector<ov::element::Type>, std::vector<ov::element::Type>> types;
std::tie(inputShape, types, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam(); std::tie(inputShape, types, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam();
init_input_shapes(static_partial_shapes_to_test_representation(inputShape)); init_input_shapes(inputShape);
auto f = ov::test::snippets::ConvertOutputFunction(inputShape, types.first[0], types.second[0]); auto f = ov::test::snippets::ConvertOutputFunction(inputDynamicShapes, types.first[0], types.second[0]);
function = f.getOriginal(); function = f.getOriginal();
output_type = types.second.front(); output_type = types.second.front();
if (!configuration.count(InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE)) {
configuration.insert({InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE,
InferenceEngine::PluginConfigInternalParams::IGNORE_CALLBACK});
}
} }
void ConvertStub::SetUp() { void ConvertStub::SetUp() {
std::vector<ov::PartialShape> inputShape; std::vector<InputShape> inputShape;
std::pair<std::vector<ov::element::Type>, std::vector<ov::element::Type>> types; std::pair<std::vector<ov::element::Type>, std::vector<ov::element::Type>> types;
std::tie(inputShape, types, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam(); std::tie(inputShape, types, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam();
init_input_shapes(static_partial_shapes_to_test_representation(inputShape)); init_input_shapes(inputShape);
auto f = ov::test::snippets::ConvertStubFunction(inputShape, types.first[0], types.second[0]); auto f = ov::test::snippets::ConvertStubFunction(inputDynamicShapes, types.first[0], types.second[0]);
function = f.getOriginal(); function = f.getOriginal();
output_type = types.second.front(); output_type = types.second.front();
if (!configuration.count(InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE)) {
configuration.insert({InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE,
InferenceEngine::PluginConfigInternalParams::IGNORE_CALLBACK});
}
} }
void ConvertPartialInputsAndResults::SetUp() { void ConvertPartialInputsAndResults::SetUp() {
std::vector<ov::PartialShape> inputShape; std::vector<InputShape> inputShape;
std::pair<std::vector<ov::element::Type>, std::vector<ov::element::Type>> types; std::pair<std::vector<ov::element::Type>, std::vector<ov::element::Type>> types;
std::tie(inputShape, types, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam(); std::tie(inputShape, types, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam();
init_input_shapes(static_partial_shapes_to_test_representation(inputShape)); init_input_shapes(inputShape);
auto f = ov::test::snippets::ConvertPartialInputsAndResultsFunction(inputShape, types.first, types.second); auto f = ov::test::snippets::ConvertPartialInputsAndResultsFunction(inputDynamicShapes, types.first, types.second);
function = f.getOriginal(); function = f.getOriginal();
if (!configuration.count(InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE)) {
configuration.insert({InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE,
InferenceEngine::PluginConfigInternalParams::IGNORE_CALLBACK});
}
} }
void ConvertManyOnInputs::SetUp() { void ConvertManyOnInputs::SetUp() {
std::vector<ov::PartialShape> inputShape; std::vector<InputShape> inputShape;
std::pair<std::vector<ov::element::Type>, std::vector<ov::element::Type>> types; std::pair<std::vector<ov::element::Type>, std::vector<ov::element::Type>> types;
std::tie(inputShape, types, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam(); std::tie(inputShape, types, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam();
init_input_shapes(static_partial_shapes_to_test_representation(inputShape)); init_input_shapes(inputShape);
auto f = ov::test::snippets::ConvertManyOnInputsFunction(inputShape, types.first); auto f = ov::test::snippets::ConvertManyOnInputsFunction(inputDynamicShapes, types.first);
function = f.getOriginal(); function = f.getOriginal();
if (!configuration.count(InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE)) {
configuration.insert({InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE,
InferenceEngine::PluginConfigInternalParams::IGNORE_CALLBACK});
}
} }
void ConvertManyOnOutputs::SetUp() { void ConvertManyOnOutputs::SetUp() {
std::vector<ov::PartialShape> inputShape; std::vector<InputShape> inputShape;
std::pair<std::vector<ov::element::Type>, std::vector<ov::element::Type>> types; std::pair<std::vector<ov::element::Type>, std::vector<ov::element::Type>> types;
std::tie(inputShape, types, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam(); std::tie(inputShape, types, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam();
init_input_shapes(static_partial_shapes_to_test_representation(inputShape)); init_input_shapes(inputShape);
auto f = ov::test::snippets::ConvertManyOnOutputsFunction(inputShape, types.first); auto f = ov::test::snippets::ConvertManyOnOutputsFunction(inputDynamicShapes, types.first);
function = f.getOriginal(); function = f.getOriginal();
if (!configuration.count(InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE)) {
configuration.insert({InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE,
InferenceEngine::PluginConfigInternalParams::IGNORE_CALLBACK});
}
} }
void ConvertManyOnInputOutput::SetUp() { void ConvertManyOnInputOutput::SetUp() {
std::vector<ov::PartialShape> inputShape; std::vector<InputShape> inputShape;
std::pair<std::vector<ov::element::Type>, std::vector<ov::element::Type>> types; std::pair<std::vector<ov::element::Type>, std::vector<ov::element::Type>> types;
std::tie(inputShape, types, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam(); std::tie(inputShape, types, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam();
init_input_shapes(static_partial_shapes_to_test_representation(inputShape)); init_input_shapes(inputShape);
auto f = ov::test::snippets::ConvertManyOnInputOutputFunction(inputShape, types.first, types.second); auto f = ov::test::snippets::ConvertManyOnInputOutputFunction(inputDynamicShapes, types.first, types.second);
function = f.getOriginal(); function = f.getOriginal();
if (!configuration.count(InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE)) {
configuration.insert({InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE,
InferenceEngine::PluginConfigInternalParams::IGNORE_CALLBACK});
}
} }
TEST_P(Convert, CompareWithRefImpl) { TEST_P(Convert, CompareWithRefImpl) {

View File

@ -5,20 +5,30 @@
#include "common_test_utils/common_utils.hpp" #include "common_test_utils/common_utils.hpp"
#include "snippets/eltwise_two_results.hpp" #include "snippets/eltwise_two_results.hpp"
#include "subgraph_simple.hpp" #include "subgraph_simple.hpp"
#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp"
namespace ov { namespace ov {
namespace test { namespace test {
namespace snippets { namespace snippets {
std::string EltwiseTwoResults::getTestCaseName(testing::TestParamInfo<ov::test::snippets::EltwiseTwoResultsParams> obj) { std::string EltwiseTwoResults::getTestCaseName(testing::TestParamInfo<ov::test::snippets::EltwiseTwoResultsParams> obj) {
ov::Shape inputShapes0, inputShapes1; InputShape inputShapes0, inputShapes1;
std::string targetDevice; std::string targetDevice;
size_t num_nodes, num_subgraphs; size_t num_nodes, num_subgraphs;
std::tie(inputShapes0, inputShapes1, num_nodes, num_subgraphs, targetDevice) = obj.param; std::tie(inputShapes0, inputShapes1, num_nodes, num_subgraphs, targetDevice) = obj.param;
std::ostringstream result; std::ostringstream result;
result << "IS[0]=" << ov::test::utils::vec2str(inputShapes0) << "_"; result << "IS=" << ov::test::utils::partialShape2str({inputShapes0.first}) << "_"
result << "IS[1]=" << ov::test::utils::vec2str(inputShapes1) << "_"; << ov::test::utils::partialShape2str({inputShapes1.first}) << "_";
result << "TS[0]=";
for (const auto& item : inputShapes0.second) {
result << ov::test::utils::vec2str(item) << "_";
}
result << "TS[1]=";
for (const auto& item : inputShapes1.second) {
result << ov::test::utils::vec2str(item) << "_";
}
result << "#N=" << num_nodes << "_"; result << "#N=" << num_nodes << "_";
result << "#S=" << num_subgraphs << "_"; result << "#S=" << num_subgraphs << "_";
result << "targetDevice=" << targetDevice; result << "targetDevice=" << targetDevice;
@ -26,12 +36,16 @@ std::string EltwiseTwoResults::getTestCaseName(testing::TestParamInfo<ov::test::
} }
void EltwiseTwoResults::SetUp() { void EltwiseTwoResults::SetUp() {
ov::Shape inputShape0, inputShape1; InputShape inputShape0, inputShape1;
std::tie(inputShape0, inputShape1, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam(); std::tie(inputShape0, inputShape1, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam();
init_input_shapes({{{}, {inputShape0, }}, {{}, {inputShape1, }}}); init_input_shapes({inputShape0, inputShape1});
auto f = ov::test::snippets::EltwiseTwoResultsFunction({inputShape0, inputShape1}); auto f = ov::test::snippets::EltwiseTwoResultsFunction({inputDynamicShapes[0], inputDynamicShapes[1]});
function = f.getOriginal(); function = f.getOriginal();
if (!configuration.count(InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE)) {
configuration.insert({InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE,
InferenceEngine::PluginConfigInternalParams::IGNORE_CALLBACK});
}
} }
TEST_P(EltwiseTwoResults, CompareWithRefImpl) { TEST_P(EltwiseTwoResults, CompareWithRefImpl) {

View File

@ -5,19 +5,25 @@
#include "common_test_utils/common_utils.hpp" #include "common_test_utils/common_utils.hpp"
#include "snippets/max_num_params_eltwise.hpp" #include "snippets/max_num_params_eltwise.hpp"
#include "subgraph_simple.hpp" #include "subgraph_simple.hpp"
#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp"
namespace ov { namespace ov {
namespace test { namespace test {
namespace snippets { namespace snippets {
std::string MaxNumParamsEltwise::getTestCaseName(testing::TestParamInfo<ov::test::snippets::MaxNumParamsEltwiseParams> obj) { std::string MaxNumParamsEltwise::getTestCaseName(testing::TestParamInfo<ov::test::snippets::MaxNumParamsEltwiseParams> obj) {
ov::Shape inputShapes; ov::test::InputShape inputShapes;
std::string targetDevice; std::string targetDevice;
size_t num_nodes, num_subgraphs; size_t num_nodes, num_subgraphs;
std::tie(inputShapes, num_nodes, num_subgraphs, targetDevice) = obj.param; std::tie(inputShapes, num_nodes, num_subgraphs, targetDevice) = obj.param;
std::ostringstream result; std::ostringstream result;
result << "IS[0]=" << ov::test::utils::vec2str(inputShapes) << "_"; result << "IS[0]=" << ov::test::utils::partialShape2str({inputShapes.first}) << "_";
result << "TS[0]=";
for (const auto& item : inputShapes.second) {
result << ov::test::utils::vec2str(item) << "_";
}
result << "#N=" << num_nodes << "_"; result << "#N=" << num_nodes << "_";
result << "#S=" << num_subgraphs << "_"; result << "#S=" << num_subgraphs << "_";
result << "targetDevice=" << targetDevice; result << "targetDevice=" << targetDevice;
@ -25,18 +31,17 @@ std::string MaxNumParamsEltwise::getTestCaseName(testing::TestParamInfo<ov::test
} }
void MaxNumParamsEltwise::SetUp() { void MaxNumParamsEltwise::SetUp() {
ov::Shape inputShape; ov::test::InputShape inputShape;
std::tie(inputShape, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam(); std::tie(inputShape, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam();
std::vector<ov::PartialShape> expandedShapes(10, inputShape); std::vector<ov::test::InputShape> expandedShapes(10, inputShape);
std::vector<InputShape> input_shapes; init_input_shapes(expandedShapes);
for (const auto& s : expandedShapes) {
input_shapes.emplace_back(InputShape {{}, {s.get_shape(), }});
}
init_input_shapes(input_shapes); auto f = ov::test::snippets::EltwiseMaxNumParamsFunction(inputDynamicShapes);
auto f = ov::test::snippets::EltwiseMaxNumParamsFunction(expandedShapes);
function = f.getOriginal(); function = f.getOriginal();
if (!configuration.count(InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE)) {
configuration.insert({InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE,
InferenceEngine::PluginConfigInternalParams::IGNORE_CALLBACK});
}
} }
TEST_P(MaxNumParamsEltwise, CompareWithRefImpl) { TEST_P(MaxNumParamsEltwise, CompareWithRefImpl) {

View File

@ -6,21 +6,27 @@
#include "common_test_utils/common_utils.hpp" #include "common_test_utils/common_utils.hpp"
#include "precision_propagation_convertion_function.hpp" #include "precision_propagation_convertion_function.hpp"
#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp"
namespace ov { namespace ov {
namespace test { namespace test {
namespace snippets { namespace snippets {
std::string PrecisionPropagationConvertion::getTestCaseName(testing::TestParamInfo<PrecisionPropagationParams> obj) { std::string PrecisionPropagationConvertion::getTestCaseName(testing::TestParamInfo<PrecisionPropagationParams> obj) {
std::vector<ov::PartialShape> input_shapes; std::vector<InputShape> input_shapes;
std::vector<float> fake_quantize_intervals; std::vector<float> fake_quantize_intervals;
std::string targetDevice; std::string targetDevice;
size_t num_nodes, num_subgraphs; size_t num_nodes, num_subgraphs;
std::tie(input_shapes, fake_quantize_intervals, num_nodes, num_subgraphs, targetDevice) = obj.param; std::tie(input_shapes, fake_quantize_intervals, num_nodes, num_subgraphs, targetDevice) = obj.param;
std::ostringstream result; std::ostringstream result;
for (size_t i = 0; i < input_shapes.size(); ++i) for (size_t i = 0; i < input_shapes.size(); ++i) {
result << "IS[" << i << "]=" << input_shapes[i] << "_"; result << "IS[" << i << "]=" << ov::test::utils::partialShape2str({input_shapes[i].first}) << "_";
result << "TS[" << i << "}=";
for (const auto& shape : input_shapes[i].second) {
result << "(" << ov::test::utils::vec2str(shape) << ")_";
}
}
for (size_t i = 0; i < fake_quantize_intervals.size(); ++i) for (size_t i = 0; i < fake_quantize_intervals.size(); ++i)
result << "FQ[" << i << "]=" << fake_quantize_intervals[i] << "_"; result << "FQ[" << i << "]=" << fake_quantize_intervals[i] << "_";
result << "#N=" << num_nodes << "_"; result << "#N=" << num_nodes << "_";
@ -30,12 +36,16 @@ std::string PrecisionPropagationConvertion::getTestCaseName(testing::TestParamIn
} }
void PrecisionPropagationConvertion::SetUp() { void PrecisionPropagationConvertion::SetUp() {
std::vector<ov::PartialShape> input_shapes; std::vector<InputShape> input_shapes;
std::vector<float> fake_quantize_intervals; std::vector<float> fake_quantize_intervals;
std::tie(input_shapes, fake_quantize_intervals, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam(); std::tie(input_shapes, fake_quantize_intervals, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam();
init_input_shapes(static_partial_shapes_to_test_representation(input_shapes)); init_input_shapes(input_shapes);
function = PrecisionPropagationConvertionFunction(input_shapes, ov::element::f32, fake_quantize_intervals).getOriginal(); function = PrecisionPropagationConvertionFunction(inputDynamicShapes, ov::element::f32, fake_quantize_intervals).getOriginal();
if (!configuration.count(InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE)) {
configuration.insert({InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE,
InferenceEngine::PluginConfigInternalParams::IGNORE_CALLBACK});
}
} }
TEST_P(PrecisionPropagationConvertion, CompareWithRefImpl) { TEST_P(PrecisionPropagationConvertion, CompareWithRefImpl) {

View File

@ -13,11 +13,12 @@ namespace test {
namespace snippets { namespace snippets {
namespace { namespace {
void generate_data(std::map<std::shared_ptr<ov::Node>, ov::Tensor>& data_inputs, const std::vector<ov::Output<ov::Node>>& model_inputs) { void generate_data(std::map<std::shared_ptr<ov::Node>, ov::Tensor>& data_inputs, const std::vector<ov::Output<ov::Node>>& model_inputs,
const std::vector<ngraph::Shape>& targetInputStaticShapes) {
data_inputs.clear(); data_inputs.clear();
auto tensor_bool = ov::test::utils::create_and_fill_tensor(model_inputs[0].get_element_type(), model_inputs[0].get_shape(), 3, -1, 2); auto tensor_bool = ov::test::utils::create_and_fill_tensor(model_inputs[0].get_element_type(), targetInputStaticShapes[0], 3, -1, 2);
auto tensor0 = ov::test::utils::create_and_fill_tensor(model_inputs[1].get_element_type(), model_inputs[1].get_shape(), 10, -10, 2); auto tensor0 = ov::test::utils::create_and_fill_tensor(model_inputs[1].get_element_type(), targetInputStaticShapes[1], 10, -10, 2);
auto tensor1 = ov::test::utils::create_and_fill_tensor(model_inputs[2].get_element_type(), model_inputs[2].get_shape(), 10, 0, 2); auto tensor1 = ov::test::utils::create_and_fill_tensor(model_inputs[2].get_element_type(), targetInputStaticShapes[2], 10, 0, 2);
data_inputs.insert({model_inputs[0].get_node_shared_ptr(), tensor_bool}); data_inputs.insert({model_inputs[0].get_node_shared_ptr(), tensor_bool});
data_inputs.insert({model_inputs[1].get_node_shared_ptr(), tensor0}); data_inputs.insert({model_inputs[1].get_node_shared_ptr(), tensor0});
data_inputs.insert({model_inputs[2].get_node_shared_ptr(), tensor1}); data_inputs.insert({model_inputs[2].get_node_shared_ptr(), tensor1});
@ -25,16 +26,28 @@ void generate_data(std::map<std::shared_ptr<ov::Node>, ov::Tensor>& data_inputs,
} // namespace } // namespace
std::string Select::getTestCaseName(testing::TestParamInfo<ov::test::snippets::SelectParams> obj) { std::string Select::getTestCaseName(testing::TestParamInfo<ov::test::snippets::SelectParams> obj) {
ov::Shape inputShapes0, inputShapes1, inputShapes2; InputShape inputShapes0, inputShapes1, inputShapes2;
ov::element::Type type; ov::element::Type type;
std::string targetDevice; std::string targetDevice;
size_t num_nodes, num_subgraphs; size_t num_nodes, num_subgraphs;
std::tie(inputShapes0, inputShapes1, inputShapes2, type, num_nodes, num_subgraphs, targetDevice) = obj.param; std::tie(inputShapes0, inputShapes1, inputShapes2, type, num_nodes, num_subgraphs, targetDevice) = obj.param;
std::ostringstream result; std::ostringstream result;
result << "IS[0]=" << ov::test::utils::vec2str(inputShapes0) << "_"; result << "IS[0]=" << ov::test::utils::partialShape2str({inputShapes0.first}) << "_";
result << "IS[1]=" << ov::test::utils::vec2str(inputShapes1) << "_"; result << "TS[0]=";
result << "IS[2]=" << ov::test::utils::vec2str(inputShapes2) << "_"; for (const auto& shape : inputShapes0.second) {
result << "(" << ov::test::utils::vec2str(shape) << ")_";
}
result << "IS[1]=" << ov::test::utils::partialShape2str({inputShapes1.first}) << "_";
result << "TS[1]=";
for (const auto& shape : inputShapes1.second) {
result << "(" << ov::test::utils::vec2str(shape) << ")_";
}
result << "IS[2]=" << ov::test::utils::partialShape2str({inputShapes2.first}) << "_";
result << "TS[2]=";
for (const auto& shape : inputShapes2.second) {
result << "(" << ov::test::utils::vec2str(shape) << ")_";
}
result << "T=" << type << "_"; result << "T=" << type << "_";
result << "#N=" << num_nodes << "_"; result << "#N=" << num_nodes << "_";
result << "#S=" << num_subgraphs << "_"; result << "#S=" << num_subgraphs << "_";
@ -43,12 +56,12 @@ std::string Select::getTestCaseName(testing::TestParamInfo<ov::test::snippets::S
} }
void Select::SetUp() { void Select::SetUp() {
ov::Shape inputShape0, inputShape1, inputShape2; InputShape inputShape0, inputShape1, inputShape2;
ov::element::Type type; ov::element::Type type;
std::tie(inputShape0, inputShape1, inputShape2, type, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam(); std::tie(inputShape0, inputShape1, inputShape2, type, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam();
init_input_shapes(static_shapes_to_test_representation({inputShape0, inputShape1, inputShape2})); init_input_shapes({{inputShape0}, {inputShape1}, {inputShape2}});
auto f = ov::test::snippets::SelectFunction({inputShape0, inputShape1, inputShape2}); auto f = ov::test::snippets::SelectFunction(inputDynamicShapes);
function = f.getOriginal(); function = f.getOriginal();
if (!configuration.count(InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE)) { if (!configuration.count(InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE)) {
@ -58,21 +71,34 @@ void Select::SetUp() {
} }
void Select::generate_inputs(const std::vector<ngraph::Shape>& targetInputStaticShapes) { void Select::generate_inputs(const std::vector<ngraph::Shape>& targetInputStaticShapes) {
generate_data(inputs, function->inputs()); generate_data(inputs, function->inputs(), targetInputStaticShapes);
} }
std::string BroadcastSelect::getTestCaseName(testing::TestParamInfo<ov::test::snippets::BroadcastSelectParams> obj) { std::string BroadcastSelect::getTestCaseName(testing::TestParamInfo<ov::test::snippets::BroadcastSelectParams> obj) {
ov::Shape inputShapes0, inputShapes1, inputShapes2, broadcastShape; InputShape inputShapes0, inputShapes1, inputShapes2;
ov::PartialShape broadcastShape;
ov::element::Type type; ov::element::Type type;
std::string targetDevice; std::string targetDevice;
size_t num_nodes, num_subgraphs; size_t num_nodes, num_subgraphs;
std::tie(inputShapes0, inputShapes1, inputShapes2, broadcastShape, type, num_nodes, num_subgraphs, targetDevice) = obj.param; std::tie(inputShapes0, inputShapes1, inputShapes2, broadcastShape, type, num_nodes, num_subgraphs, targetDevice) = obj.param;
std::ostringstream result; std::ostringstream result;
result << "IS[0]=" << ov::test::utils::vec2str(inputShapes0) << "_"; result << "IS[0]=" << ov::test::utils::partialShape2str({inputShapes0.first}) << "_";
result << "IS[1]=" << ov::test::utils::vec2str(inputShapes1) << "_"; result << "TS[0]=";
result << "IS[2]=" << ov::test::utils::vec2str(inputShapes2) << "_"; for (const auto& shape : inputShapes0.second) {
result << "BS=" << ov::test::utils::vec2str(broadcastShape) << "_"; result << "(" << ov::test::utils::vec2str(shape) << ")_";
}
result << "IS[1]=" << ov::test::utils::partialShape2str({inputShapes1.first}) << "_";
result << "TS[1]=";
for (const auto& shape : inputShapes1.second) {
result << "(" << ov::test::utils::vec2str(shape) << ")_";
}
result << "IS[2]=" << ov::test::utils::partialShape2str({inputShapes2.first}) << "_";
result << "TS[2]=";
for (const auto& shape : inputShapes2.second) {
result << "(" << ov::test::utils::vec2str(shape) << ")_";
}
result << "IS_Broadcast=" << ov::test::utils::partialShape2str({broadcastShape}) << "_";
result << "T=" << type << "_"; result << "T=" << type << "_";
result << "#N=" << num_nodes << "_"; result << "#N=" << num_nodes << "_";
result << "#S=" << num_subgraphs << "_"; result << "#S=" << num_subgraphs << "_";
@ -81,12 +107,13 @@ std::string BroadcastSelect::getTestCaseName(testing::TestParamInfo<ov::test::sn
} }
void BroadcastSelect::SetUp() { void BroadcastSelect::SetUp() {
ov::Shape inputShape0, inputShape1, inputShape2, broadcastShape; InputShape inputShape0, inputShape1, inputShape2;
ov::PartialShape broadcastShape;
ov::element::Type type; ov::element::Type type;
std::tie(inputShape0, inputShape1, inputShape2, broadcastShape, type, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam(); std::tie(inputShape0, inputShape1, inputShape2, broadcastShape, type, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam();
init_input_shapes(static_shapes_to_test_representation({inputShape0, inputShape1, inputShape2})); init_input_shapes({inputShape0, inputShape1, inputShape2});
auto f = ov::test::snippets::BroadcastSelectFunction({inputShape0, inputShape1, inputShape2}, broadcastShape); auto f = ov::test::snippets::BroadcastSelectFunction({inputDynamicShapes[0], inputDynamicShapes[1], inputDynamicShapes[2]}, broadcastShape);
function = f.getOriginal(); function = f.getOriginal();
if (!configuration.count(InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE)) { if (!configuration.count(InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE)) {
@ -96,7 +123,7 @@ void BroadcastSelect::SetUp() {
} }
void BroadcastSelect::generate_inputs(const std::vector<ngraph::Shape>& targetInputStaticShapes) { void BroadcastSelect::generate_inputs(const std::vector<ngraph::Shape>& targetInputStaticShapes) {
generate_data(inputs, function->inputs()); generate_data(inputs, function->inputs(), targetInputStaticShapes);
} }
TEST_P(Select, CompareWithRefImpl) { TEST_P(Select, CompareWithRefImpl) {

View File

@ -14,14 +14,18 @@ namespace test {
namespace snippets { namespace snippets {
std::string Softmax::getTestCaseName(testing::TestParamInfo<ov::test::snippets::SoftmaxParams> obj) { std::string Softmax::getTestCaseName(testing::TestParamInfo<ov::test::snippets::SoftmaxParams> obj) {
ov::Shape inputShapes; InputShape inputShapes;
int axis; int axis;
std::string targetDevice; std::string targetDevice;
size_t num_nodes, num_subgraphs; size_t num_nodes, num_subgraphs;
std::tie(inputShapes, axis, num_nodes, num_subgraphs, targetDevice) = obj.param; std::tie(inputShapes, axis, num_nodes, num_subgraphs, targetDevice) = obj.param;
std::ostringstream result; std::ostringstream result;
result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; result << "IS=" << ov::test::utils::partialShape2str({inputShapes.first}) << "_";
result << "TS=";
for (const auto& shape : inputShapes.second) {
result << "(" << ov::test::utils::vec2str(shape) << ")_";
}
result << "Axis=" << axis << "_"; result << "Axis=" << axis << "_";
result << "#N=" << num_nodes << "_"; result << "#N=" << num_nodes << "_";
result << "#S=" << num_subgraphs << "_"; result << "#S=" << num_subgraphs << "_";
@ -30,12 +34,12 @@ std::string Softmax::getTestCaseName(testing::TestParamInfo<ov::test::snippets::
} }
void Softmax::SetUp() { void Softmax::SetUp() {
ov::Shape inputShape; InputShape inputShape;
int axis; int axis;
std::tie(inputShape, axis, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam(); std::tie(inputShape, axis, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam();
init_input_shapes({{{}, {inputShape, }}}); init_input_shapes({inputShape});
auto f = ov::test::snippets::SoftmaxFunction({inputShape}, axis); auto f = ov::test::snippets::SoftmaxFunction(inputDynamicShapes, axis);
function = f.getOriginal(); function = f.getOriginal();
if (!configuration.count(InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE)) { if (!configuration.count(InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE)) {
@ -45,15 +49,23 @@ void Softmax::SetUp() {
} }
std::string AddSoftmax::getTestCaseName(testing::TestParamInfo<ov::test::snippets::AddSoftmaxParams> obj) { std::string AddSoftmax::getTestCaseName(testing::TestParamInfo<ov::test::snippets::AddSoftmaxParams> obj) {
std::pair<ov::Shape, ov::Shape> inputShapes; std::pair<InputShape, InputShape> inputShapes;
int axis; int axis;
std::string targetDevice; std::string targetDevice;
size_t num_nodes, num_subgraphs; size_t num_nodes, num_subgraphs;
std::tie(inputShapes, axis, num_nodes, num_subgraphs, targetDevice) = obj.param; std::tie(inputShapes, axis, num_nodes, num_subgraphs, targetDevice) = obj.param;
std::ostringstream result; std::ostringstream result;
result << "IS[0]=" << ov::test::utils::vec2str(inputShapes.first) << "_"; result << "IS[0]=" << ov::test::utils::partialShape2str({inputShapes.first.first}) << "_";
result << "IS[1]=" << ov::test::utils::vec2str(inputShapes.second) << "_"; result << "TS[0]=";
for (const auto& shape : inputShapes.first.second) {
result << "(" << ov::test::utils::vec2str(shape) << ")_";
}
result << "IS[1]=" << ov::test::utils::partialShape2str({inputShapes.second.first}) << "_";
result << "TS[1]=";
for (const auto& shape : inputShapes.second.second) {
result << "(" << ov::test::utils::vec2str(shape) << ")_";
}
result << "Axis=" << axis << "_"; result << "Axis=" << axis << "_";
result << "#N=" << num_nodes << "_"; result << "#N=" << num_nodes << "_";
result << "#S=" << num_subgraphs << "_"; result << "#S=" << num_subgraphs << "_";
@ -62,12 +74,12 @@ std::string AddSoftmax::getTestCaseName(testing::TestParamInfo<ov::test::snippet
} }
void AddSoftmax::SetUp() { void AddSoftmax::SetUp() {
std::pair<ov::Shape, ov::Shape> inputShapes; std::pair<InputShape, InputShape> inputShapes;
int axis; int axis;
std::tie(inputShapes, axis, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam(); std::tie(inputShapes, axis, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam();
init_input_shapes({{{}, {inputShapes.first, }}, {{}, {inputShapes.second, }}}); init_input_shapes({inputShapes.first, inputShapes.second});
auto f = ov::test::snippets::AddSoftmaxFunction({inputShapes.first, inputShapes.second}, axis); auto f = ov::test::snippets::AddSoftmaxFunction(inputDynamicShapes, axis);
function = f.getOriginal(); function = f.getOriginal();
if (!configuration.count(InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE)) { if (!configuration.count(InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE)) {

View File

@ -6,22 +6,35 @@
#include "snippets/three_inputs_eltwise.hpp" #include "snippets/three_inputs_eltwise.hpp"
#include "subgraph_simple.hpp" #include "subgraph_simple.hpp"
#include "functional_test_utils/skip_tests_config.hpp" #include "functional_test_utils/skip_tests_config.hpp"
#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp"
namespace ov { namespace ov {
namespace test { namespace test {
namespace snippets { namespace snippets {
std::string ThreeInputsEltwise::getTestCaseName(testing::TestParamInfo<ov::test::snippets::ThreeInputsEltwiseParams> obj) { std::string ThreeInputsEltwise::getTestCaseName(testing::TestParamInfo<ov::test::snippets::ThreeInputsEltwiseParams> obj) {
ov::Shape inputShapes0, inputShapes1, inputShapes2; InputShape inputShapes0, inputShapes1, inputShapes2;
std::string targetDevice; std::string targetDevice;
size_t num_nodes, num_subgraphs; size_t num_nodes, num_subgraphs;
std::tie(inputShapes0, inputShapes1, inputShapes2, std::tie(inputShapes0, inputShapes1, inputShapes2,
num_nodes, num_subgraphs, targetDevice) = obj.param; num_nodes, num_subgraphs, targetDevice) = obj.param;
std::ostringstream result; std::ostringstream result;
result << "IS[0]=" << ov::test::utils::vec2str(inputShapes0) << "_"; result << "IS[0]=" << ov::test::utils::partialShape2str({inputShapes0.first}) << "_";
result << "IS[1]=" << ov::test::utils::vec2str(inputShapes1) << "_"; result << "TS[0]=";
result << "IS[2]=" << ov::test::utils::vec2str(inputShapes2) << "_"; for (const auto& shape : inputShapes0.second) {
result << "(" << ov::test::utils::vec2str(shape) << ")_";
}
result << "IS[1]=" << ov::test::utils::partialShape2str({inputShapes1.first}) << "_";
result << "TS[1]=";
for (const auto& shape : inputShapes1.second) {
result << "(" << ov::test::utils::vec2str(shape) << ")_";
}
result << "IS[2]=" << ov::test::utils::partialShape2str({inputShapes2.first}) << "_";
result << "TS[2]=";
for (const auto& shape : inputShapes2.second) {
result << "(" << ov::test::utils::vec2str(shape) << ")_";
}
result << "#N=" << num_nodes << "_"; result << "#N=" << num_nodes << "_";
result << "#S=" << num_subgraphs << "_"; result << "#S=" << num_subgraphs << "_";
result << "targetDevice=" << targetDevice; result << "targetDevice=" << targetDevice;
@ -29,13 +42,17 @@ std::string ThreeInputsEltwise::getTestCaseName(testing::TestParamInfo<ov::test:
} }
void ThreeInputsEltwise::SetUp() { void ThreeInputsEltwise::SetUp() {
ov::Shape inputShape0, inputShape1, inputShape2; InputShape inputShape0, inputShape1, inputShape2;
std::tie(inputShape0, inputShape1, inputShape2, std::tie(inputShape0, inputShape1, inputShape2,
ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam(); ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam();
init_input_shapes({{{}, {inputShape0, }}, {{}, {inputShape1, }}, {{}, {inputShape2, }}}); init_input_shapes({inputShape0, inputShape1, inputShape2});
auto f = ov::test::snippets::EltwiseThreeInputsFunction({inputShape0, inputShape1, inputShape2}); auto f = ov::test::snippets::EltwiseThreeInputsFunction(inputDynamicShapes);
function = f.getOriginal(); function = f.getOriginal();
if (!configuration.count(InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE)) {
configuration.insert({InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE,
InferenceEngine::PluginConfigInternalParams::IGNORE_CALLBACK});
}
} }
TEST_P(ThreeInputsEltwise, CompareWithRefImpl) { TEST_P(ThreeInputsEltwise, CompareWithRefImpl) {

View File

@ -5,20 +5,26 @@
#include "common_test_utils/common_utils.hpp" #include "common_test_utils/common_utils.hpp"
#include "snippets/two_inputs_and_outputs.hpp" #include "snippets/two_inputs_and_outputs.hpp"
#include "subgraph_simple.hpp" #include "subgraph_simple.hpp"
#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp"
namespace ov { namespace ov {
namespace test { namespace test {
namespace snippets { namespace snippets {
std::string TwoInputsAndOutputs::getTestCaseName(testing::TestParamInfo<ov::test::snippets::TwoInputsAndOutputsParams> obj) { std::string TwoInputsAndOutputs::getTestCaseName(testing::TestParamInfo<ov::test::snippets::TwoInputsAndOutputsParams> obj) {
std::vector<ov::PartialShape> inputShapes; std::vector<InputShape> inputShapes;
std::string targetDevice; std::string targetDevice;
size_t num_nodes, num_subgraphs; size_t num_nodes, num_subgraphs;
std::tie(inputShapes, num_nodes, num_subgraphs, targetDevice) = obj.param; std::tie(inputShapes, num_nodes, num_subgraphs, targetDevice) = obj.param;
std::ostringstream result; std::ostringstream result;
for (auto i = 0; i < inputShapes.size(); i++) for (size_t i = 0; i < inputShapes.size(); ++i) {
result << "IS[" << i << "]=" << ov::test::utils::vec2str(inputShapes[i].get_shape()) << "_"; result << "IS[" << i << "]=" << ov::test::utils::partialShape2str({inputShapes[i].first}) << "_";
result << "TS[" << i << "]=";
for (const auto& shape : inputShapes[i].second) {
result << "(" << ov::test::utils::vec2str(shape) << ")_";
}
}
result << "#N=" << num_nodes << "_"; result << "#N=" << num_nodes << "_";
result << "#S=" << num_subgraphs << "_"; result << "#S=" << num_subgraphs << "_";
result << "targetDevice=" << targetDevice; result << "targetDevice=" << targetDevice;
@ -26,19 +32,27 @@ std::string TwoInputsAndOutputs::getTestCaseName(testing::TestParamInfo<ov::test
} }
void TwoInputsAndOutputs::SetUp() { void TwoInputsAndOutputs::SetUp() {
std::vector<ov::PartialShape> inputShape; std::vector<InputShape> inputShape;
std::tie(inputShape, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam(); std::tie(inputShape, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam();
init_input_shapes(static_partial_shapes_to_test_representation(inputShape)); init_input_shapes(inputShape);
auto f = ov::test::snippets::TwoInputsAndOutputsFunction(inputShape); auto f = ov::test::snippets::TwoInputsAndOutputsFunction(inputDynamicShapes);
function = f.getOriginal(); function = f.getOriginal();
if (!configuration.count(InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE)) {
configuration.insert({InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE,
InferenceEngine::PluginConfigInternalParams::IGNORE_CALLBACK});
}
} }
void TwoInputsAndOutputsWithReversedOutputs::SetUp() { void TwoInputsAndOutputsWithReversedOutputs::SetUp() {
std::vector<ov::PartialShape> inputShape; std::vector<InputShape> inputShape;
std::tie(inputShape, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam(); std::tie(inputShape, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam();
init_input_shapes(static_partial_shapes_to_test_representation(inputShape)); init_input_shapes(inputShape);
auto f = ov::test::snippets::TwoInputsAndOutputsWithReversedOutputsFunction(inputShape); auto f = ov::test::snippets::TwoInputsAndOutputsWithReversedOutputsFunction(inputDynamicShapes);
function = f.getOriginal(); function = f.getOriginal();
if (!configuration.count(InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE)) {
configuration.insert({InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE,
InferenceEngine::PluginConfigInternalParams::IGNORE_CALLBACK});
}
} }
TEST_P(TwoInputsAndOutputs, CompareWithRefImpl) { TEST_P(TwoInputsAndOutputs, CompareWithRefImpl) {

View File

@ -36,12 +36,14 @@ protected:
// todo: remove Sinh once "no subgraph after input" limitation is relaxed // todo: remove Sinh once "no subgraph after input" limitation is relaxed
class AddConstFunction : public SnippetsFunctionBase { class AddConstFunction : public SnippetsFunctionBase {
public: public:
explicit AddConstFunction(const std::vector<PartialShape>& inputShapes) : SnippetsFunctionBase(inputShapes) { explicit AddConstFunction(const std::vector<PartialShape>& inputShapes, const PartialShape& constShape) :
SnippetsFunctionBase(inputShapes), m_const_shape(constShape) {
NGRAPH_CHECK(input_shapes.size() == 1, "Got invalid number of input shapes"); NGRAPH_CHECK(input_shapes.size() == 1, "Got invalid number of input shapes");
NGRAPH_CHECK(input_shapes[0].is_static(), "This test supports only static shapes"); NGRAPH_CHECK(m_const_shape.is_static(), "Const shape must be static shape");
} }
protected: protected:
std::shared_ptr<ov::Model> initOriginal() const override; std::shared_ptr<ov::Model> initOriginal() const override;
PartialShape m_const_shape;
// std::shared_ptr<ov::Model> initReference() const override; // std::shared_ptr<ov::Model> initReference() const override;
}; };
// Function is to check for different model precision // Function is to check for different model precision
@ -52,11 +54,12 @@ protected:
// Add // Add
// Result // Result
// The function is needed to check different input element types (model precision change) // The function is needed to check different input element types (model precision change)
class AddRollConstFunction : public SnippetsFunctionBase { class AddRollConstFunction : public AddConstFunction {
public: public:
explicit AddRollConstFunction(const std::vector<PartialShape>& inputShapes) : SnippetsFunctionBase(inputShapes) { explicit AddRollConstFunction(const std::vector<PartialShape>& inputShapes, const PartialShape& constShape) :
AddConstFunction(inputShapes, constShape) {
NGRAPH_CHECK(input_shapes.size() == 1, "Got invalid number of input shapes"); NGRAPH_CHECK(input_shapes.size() == 1, "Got invalid number of input shapes");
NGRAPH_CHECK(input_shapes[0].is_static(), "Only static shapes are supported"); NGRAPH_CHECK(m_const_shape[0].is_static(), "Const shape must be static shape");
} }
protected: protected:
std::shared_ptr<ov::Model> initOriginal() const override; std::shared_ptr<ov::Model> initOriginal() const override;

View File

@ -28,16 +28,15 @@ std::shared_ptr<ov::Model> AddFunction::initReference() const {
} }
std::shared_ptr<ov::Model> AddConstFunction::initOriginal() const { std::shared_ptr<ov::Model> AddConstFunction::initOriginal() const {
auto data0 = std::make_shared<op::v0::Parameter>(precision, input_shapes[0]); auto data0 = std::make_shared<op::v0::Parameter>(precision, input_shapes[0]);
const std::vector<float> const_values = ov::test::utils::generate_float_numbers(shape_size(input_shapes[0].get_shape()), -10., 10.); const std::vector<float> const_values = ov::test::utils::generate_float_numbers(shape_size(m_const_shape.get_shape()), -10., 10.);
auto const_data1 = std::make_shared<op::v0::Constant>(precision, input_shapes[0].get_shape(), const_values); auto const_data1 = std::make_shared<op::v0::Constant>(precision, m_const_shape.get_shape(), const_values);
auto add = std::make_shared<op::v1::Add>(data0, const_data1); auto add = std::make_shared<op::v1::Add>(data0, const_data1);
return std::make_shared<ov::Model>(NodeVector{add}, ParameterVector{data0}); return std::make_shared<ov::Model>(NodeVector{add}, ParameterVector{data0});
} }
std::shared_ptr<ov::Model> AddRollConstFunction::initOriginal() const { std::shared_ptr<ov::Model> AddRollConstFunction::initOriginal() const {
const auto input_shape = input_shapes[0].get_shape(); auto data0 = std::make_shared<op::v0::Parameter>(precision, input_shapes[0]);
auto data0 = std::make_shared<op::v0::Parameter>(precision, input_shape); const std::vector<float> const_values = ov::test::utils::generate_float_numbers(shape_size(m_const_shape.get_shape()), -10., 10.);
const std::vector<float> const_values = ov::test::utils::generate_float_numbers(shape_size(input_shape), -10., 10.); auto const_data1 = std::make_shared<op::v0::Constant>(precision, m_const_shape.get_shape(), const_values);
auto const_data1 = std::make_shared<op::v0::Constant>(precision, input_shape, const_values);
auto shift = std::make_shared<op::v0::Constant>(ov::element::i32, ov::Shape{1}, std::vector<float>{1}); auto shift = std::make_shared<op::v0::Constant>(ov::element::i32, ov::Shape{1}, std::vector<float>{1});
auto axes = std::make_shared<op::v0::Constant>(ov::element::i32, ov::Shape{1}, std::vector<float>{0}); auto axes = std::make_shared<op::v0::Constant>(ov::element::i32, ov::Shape{1}, std::vector<float>{0});
auto roll0 = std::make_shared<ov::op::v7::Roll>(data0, shift, axes); auto roll0 = std::make_shared<ov::op::v7::Roll>(data0, shift, axes);