[CONFORMANCE][TOOLS] New SubgraphsDumper (#17836)

* New Subgraph Dumper -> Cache + Meta

* iCache tests

* tests_meta

* OpCache tests

* Move & refactor matchers

* fix all tests and refactor meta

* Prepare for review

* Add caching types

* Model testing + improvement

* Add ignored ports. Add tests for node utils

* fix build

* Fix compilation error

* fix arm build

* fix win?

* revert chanes in file util

* Update file_util.hpp

* Update constant.hpp
This commit is contained in:
Irina Efode 2023-06-24 00:12:47 +04:00 committed by GitHub
parent 92fbf96300
commit aefaf74a08
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
34 changed files with 2339 additions and 0 deletions

View File

@ -7,3 +7,5 @@ add_subdirectory(test_runner/api_conformance_runner)
add_subdirectory(test_runner/op_conformance_runner)
add_subdirectory(subgraphs_dumper)
add_subdirectory(subgraphs_dumper/tests)
add_subdirectory(subgraphs_dumper_new)
add_subdirectory(subgraphs_dumper_new/tests)

View File

@ -0,0 +1,57 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
set(TARGET_NAME subgraphs_dumper)
list(APPEND LIBRARIES
gflags
inference_engine
funcTestUtils
openvino::pugixml
)
if(ENABLE_OV_IR_FRONTEND)
list(APPEND DEPENDENCIES openvino_ir_frontend)
endif()
if(ENABLE_OV_ONNX_FRONTEND)
list(APPEND DEPENDENCIES openvino_onnx_frontend)
endif()
if(ENABLE_OV_PADDLE_FRONTEND)
list(APPEND DEPENDENCIES openvino_paddle_frontend)
endif()
# todo: use convert_model and save to ir instead this
if(ENABLE_OV_PYTORCH_FRONTEND)
list(APPEND DEPENDENCIES openvino_pytorch_frontend)
endif()
if(ENABLE_OV_TF_FRONTEND)
list(APPEND DEPENDENCIES openvino_tensorflow_frontend)
endif()
if(ENABLE_OV_TF_LITE_FRONTEND)
list(APPEND DEPENDENCIES openvino_tensorflow_lite_frontend)
endif()
if(ENABLE_OV_PADDLE_FRONTEND)
list(APPEND DEPENDENCIES openvino_paddle_frontend)
endif()
addIeTargetTest(
NAME ${TARGET_NAME}
ROOT ${CMAKE_CURRENT_SOURCE_DIR}/src
INCLUDES
PRIVATE
${CMAKE_CURRENT_SOURCE_DIR}/include
LINK_LIBRARIES
PRIVATE
${LIBRARIES}
DEPENDENCIES
${DEPENDENCIES}
ADD_CPPLINT
)
ie_faster_build(${TARGET_NAME} UNITY)

View File

@ -0,0 +1,39 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
#include <memory>
#include "openvino/openvino.hpp"
#include "cache/meta/meta_info.hpp"
namespace ov {
namespace tools {
namespace subgraph_dumper {
class ICache {
public:
virtual void update_cache(const std::shared_ptr<ov::Model>& model,
const std::string& source_model, bool extract_body = true) {};
virtual void serialize_cache() {};
void set_serialization_dir(const std::string& serialization_dir) {
m_serialization_dir = serialization_dir;
}
protected:
size_t m_serialization_timeout = 60;
// NOLINT Static/global string variables are not permitted
std::string m_serialization_dir = ".";
ICache() = default;
bool serialize_model(const std::pair<std::shared_ptr<ov::Model>, MetaInfo>& graph_info,
const std::string& rel_serialization_path);
};
} // namespace subgraph_dumper
} // namespace tools
} // namespace ov

View File

@ -0,0 +1,39 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "cache/cache.hpp"
namespace ov {
namespace tools {
namespace subgraph_dumper {
class GraphCache final : public virtual ICache {
public:
void update_cache(const std::shared_ptr<ov::Model>& model, const std::string& model_meta_data,
bool extract_body = true) override;
void serialize_cache() override;
static std::shared_ptr<GraphCache>& get() {
if (m_cache_instance == nullptr) {
m_cache_instance = std::shared_ptr<GraphCache>(new GraphCache);
}
return m_cache_instance;
}
static void reset() {
m_cache_instance.reset();
m_cache_instance = nullptr;
}
private:
std::map<std::shared_ptr<ov::Model>, MetaInfo> m_graph_cache;
static std::shared_ptr<GraphCache> m_cache_instance;
GraphCache() = default;
};
} // namespace subgraph_dumper
} // namespace tools
} // namespace ov

View File

@ -0,0 +1,56 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
namespace ov {
namespace tools {
namespace subgraph_dumper {
constexpr double DEFAULT_MIN_VALUE = std::numeric_limits<double>::min();
constexpr double DEFAULT_MAX_VALUE = std::numeric_limits<double>::max();
constexpr double DEFAULT_EPSILON = std::numeric_limits<float>::epsilon();
struct InputInfo {
struct Range {
double min, max;
Range(double in_min, double in_max) : min(in_min), max(in_max) {}
Range& operator=(const Range& ranges) {
if (ranges.max != DEFAULT_MAX_VALUE) {
this->max = this->max != DEFAULT_MAX_VALUE ? std::max(this->max, ranges.max) : ranges.max;
}
if (ranges.min != DEFAULT_MIN_VALUE) {
this->min = this->min != DEFAULT_MIN_VALUE ? std::min(this->min, ranges.min) : ranges.min;
}
return *this;
}
bool operator==(const Range& ranges) const {
double max_delta = (this->max - ranges.max) > 0 ? this->max - ranges.max : ranges.max - this->max;
double min_delta = (this->min - ranges.min) > 0 ? this->min - ranges.min : ranges.min - this->min;
return max_delta <= DEFAULT_EPSILON && min_delta <= DEFAULT_EPSILON;
}
};
Range ranges;
bool is_const;
InputInfo(double in_min = DEFAULT_MIN_VALUE,
double in_max = DEFAULT_MAX_VALUE,
bool in_is_const = false) :
is_const(in_is_const),
ranges(Range(in_min, in_max)) {}
bool operator==(const InputInfo& input_info_ref) const {
return this->is_const == input_info_ref.is_const && this->ranges == input_info_ref.ranges;
}
};
} // namespace subgraph_dumper
} // namespace tools
} // namespace ov

View File

@ -0,0 +1,43 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "cache/meta/input_info.hpp"
#include "cache/meta/model_info.hpp"
namespace ov {
namespace tools {
namespace subgraph_dumper {
class MetaInfo {
public:
MetaInfo(const std::string& model_path = "", const std::map<std::string, InputInfo>& _input_info = {}, size_t total_op_cnt = 1, size_t model_priority = 1);
void serialize(const std::string& serialization_path);
void update(const std::string& model_path, const std::map<std::string, InputInfo>& _input_info,
size_t _total_op_cnt = 1, const std::vector<std::string>& ignored_inputs = {});
std::map<std::string, InputInfo> get_input_info();
std::map<std::string, ModelInfo> get_model_info();
protected:
// { input_node_name: input_info }
std::map<std::string, InputInfo> input_info;
// { model_name: model_paths, this_op/graph_cnt, total_op_cnt, model_priority}
std::map<std::string, ModelInfo> model_info;
// to store model priority ranges to normilize graph_priority
static unsigned long MAX_MODEL_PRIORITY;
static unsigned long MIN_MODEL_PRIORITY;
double get_graph_priority();
std::string get_model_name_by_path(const std::string& model_path);
// get abs priority graph before normalization
unsigned long get_abs_graph_priority();
};
} // namespace subgraph_dumper
} // namespace tools
} // namespace ov

View File

@ -0,0 +1,38 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <set>
#include <string>
namespace ov {
namespace tools {
namespace subgraph_dumper {
struct ModelInfo {
std::set<std::string> model_paths;
size_t this_op_cnt, total_op_cnt, model_priority;
ModelInfo(const std::string& model_path = "", size_t total_ops_in_model = 1, size_t _model_priority = 1) :
total_op_cnt(total_ops_in_model), model_paths({model_path}),
this_op_cnt(1), model_priority(_model_priority) {};
bool operator==(const ModelInfo& model_info_ref) const {
if (this->model_priority != model_info_ref.model_priority || this->this_op_cnt != model_info_ref.this_op_cnt ||
this->total_op_cnt != model_info_ref.total_op_cnt || this->model_paths.size() != model_info_ref.model_paths.size()) {
return false;
}
for (const auto& model_path : this->model_paths) {
if (std::find(model_info_ref.model_paths.begin(), model_info_ref.model_paths.end(), model_path) == model_info_ref.model_paths.end()) {
return false;
}
}
return true;
}
};
} // namespace subgraph_dumper
} // namespace tools
} // namespace ov

View File

@ -0,0 +1,55 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "cache/cache.hpp"
#include "single_op_matchers/manager.hpp"
#include "single_op_matchers/base.hpp"
#include "single_op_matchers/convolutions.hpp"
namespace ov {
namespace tools {
namespace subgraph_dumper {
class OpCache : public ICache {
public:
void update_cache(const std::shared_ptr<ov::Model>& model,
const std::string& model_path, bool extract_body) override;
void serialize_cache() override;
static std::shared_ptr<OpCache> get() {
if (m_cache_instance == nullptr) {
m_cache_instance = std::shared_ptr<OpCache>(new OpCache);
}
return std::shared_ptr<OpCache>(m_cache_instance);
}
static void reset() {
m_cache_instance.reset();
m_cache_instance = nullptr;
}
protected:
std::map<std::shared_ptr<ov::Node>, MetaInfo> m_ops_cache;
static std::shared_ptr<OpCache> m_cache_instance;
MatchersManager m_manager = MatchersManager();
OpCache() {
MatchersManager::MatchersMap matchers = {
{ "generic_single_op", BaseMatcher::Ptr(new BaseMatcher) },
{ "convolutions", ConvolutionsMatcher::Ptr(new ConvolutionsMatcher) },
};
m_manager.set_matchers(matchers);
}
void update_cache(const std::shared_ptr<ov::Node>& node, const std::string& model_path, size_t model_op_cnt = 1);
bool serialize_op(const std::pair<std::shared_ptr<ov::Node>, MetaInfo>& op_info);
std::string get_rel_serilization_dir(const std::shared_ptr<ov::Node>& node);
};
} // namespace subgraph_dumper
} // namespace tools
} // namespace ov

View File

@ -0,0 +1,43 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <gflags/gflags.h>
#include <iostream>
static const char help_message[] = "Print a usage message.";
static const char input_folders_message[] = "Required. Comma separated paths to the input folders with IRs";
static const char local_cache_message[] = "Optional. Comma separated paths to the local cache folders with IRs";
static const char output_folder_message[] = "Required. Path to the output folders where to serialize IRs";
static const char path_regex_message[] = "Optional. regular expression to be applied in input "
"folders recursive discovery";
static const char extract_body_message[] = "Optional. Allow to extract operation bodies to operation cache.";
static const char cache_type_message[] = "Optional. Specify caching type: OP, GRAPH. The default value is both";
DEFINE_bool(h, false, help_message);
DEFINE_string(input_folders, "", local_cache_message);
DEFINE_string(local_cache, "", input_folders_message);
DEFINE_string(output_folder, "output", output_folder_message);
DEFINE_string(path_regex, ".*", output_folder_message);
DEFINE_bool(extract_body, true, extract_body_message);
DEFINE_string(cache_type, "", cache_type_message);
/**
* @brief This function shows a help message
*/
static void showUsage() {
std::cout << "\n";
std::cout << "Subgraph Dumper [OPTION]\n";
std::cout << "Options:\n";
std::cout << "\n";
std::cout << " -h " << help_message << "\n";
std::cout << " --input_folders \"<path>\" " << input_folders_message << "\n";
std::cout << " --local_cache \"<path>\" " << input_folders_message << "\n";
std::cout << " --output_folder \"<path>\" " << output_folder_message << "\n";
std::cout << " --path_regex \"<path>\" " << path_regex_message << "\n";
std::cout << " --extract_body \"<value>\" " << extract_body_message << "\n";
std::cout << " --cache_type \"<value>\" " << extract_body_message << "\n";
std::cout << std::flush;
}

View File

@ -0,0 +1,44 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <utility>
#include "pugixml.hpp"
#include "single_op_matchers/config.hpp"
namespace ov {
namespace tools {
namespace subgraph_dumper {
class BaseMatcher {
public:
using Ptr = std::shared_ptr<BaseMatcher>;
BaseMatcher();
virtual bool match(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref) const;
iMatcherConfig::Ptr get_config(const std::shared_ptr<ov::Node> &node) const;
protected:
virtual void configure(const pugi::xml_document &cfg) {};
virtual bool match_only_configured_ops() const { return false; };
virtual bool match_inputs(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref) const;
virtual bool same_op_type(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref) const;
virtual bool match_outputs(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref) const;
virtual bool match_attrs(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref) const;
std::vector<iMatcherConfig::Ptr> default_configs;
};
} // namespace subgraph_dumper
} // namespace tools
} // namespace ov

View File

@ -0,0 +1,59 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <memory>
#include <utility>
#include "openvino/openvino.hpp"
namespace ov {
namespace tools {
namespace subgraph_dumper {
class iMatcherConfig {
public:
using Ptr = std::shared_ptr<iMatcherConfig>;
explicit iMatcherConfig(bool is_fallback_config) : is_fallback_config(is_fallback_config) {}
iMatcherConfig(
std::vector<std::string> ignored_attributes,
std::vector<size_t> ignored_ports,
bool is_fallback_config,
bool ignore_matching = false)
: ignored_attributes(std::move(ignored_attributes)),
ignored_ports(std::move(ignored_ports)),
is_fallback_config(is_fallback_config),
ignore_matching(ignore_matching) {}
// Empty vectors stands for any of possible values
std::vector<std::string> ignored_attributes;
std::vector<size_t> ignored_ports;
bool is_fallback_config;
bool ignore_matching = false;
virtual bool op_in_config(const std::shared_ptr<ov::Node> &node) = 0;
};
template <typename... OPTypes>
struct MatcherConfig : public iMatcherConfig {
public:
MatcherConfig() : iMatcherConfig(sizeof...(OPTypes) == 0) {}
MatcherConfig(std::vector<std::string> ignored_attributes, std::vector<size_t> ignored_ports, bool ignore_matching = false) :
iMatcherConfig(std::move(ignored_attributes), std::move(ignored_ports), sizeof...(OPTypes) == 0, ignore_matching) {}
MatcherConfig(bool ignore_matching) : iMatcherConfig({}, {}, sizeof...(OPTypes) == 0, ignore_matching) {}
bool op_in_config(const std::shared_ptr<ov::Node> &node) override {
std::initializer_list<bool> vals{(ov::is_type<OPTypes>(node))...};
return std::any_of(vals.begin(), vals.end(), [](bool i) { return i; });
};
};
} // namespace subgraph_dumper
} // namespace tools
} // namespace ov

View File

@ -0,0 +1,27 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "single_op_matchers/base.hpp"
namespace ov {
namespace tools {
namespace subgraph_dumper {
class ConvolutionsMatcher : public BaseMatcher {
public:
ConvolutionsMatcher();
bool match_inputs(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref) const override;
bool match(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref) const override;
protected:
bool match_only_configured_ops() const override { return true; }
};
} // namespace subgraph_dumper
} // namespace tools
} // namespace ov

View File

@ -0,0 +1,42 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
// #include "pugixml.hpp"
// #include "ngraph/node.hpp"
// #include "single_op_matchers/single_op.hpp"
// #include "single_op_matchers/convolutions.hpp"
#include "single_op_matchers/base.hpp"
namespace ov {
namespace tools {
namespace subgraph_dumper {
class Matcher;
class MatchersManager {
public:
using MatchersMap = std::map<std::string, BaseMatcher::Ptr>;
explicit MatchersManager(const MatchersMap& matchers = {}) : m_matchers(matchers) {}
bool match_all(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref);
bool match_any(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref);
void set_matchers(const MatchersMap& matchers = {}) { m_matchers = matchers; }
iMatcherConfig::Ptr get_config(const std::shared_ptr<ov::Node> &node) const;
private:
std::vector<bool> run_matchers(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref);
MatchersMap m_matchers = {};
};
} // namespace subgraph_dumper
} // namespace tools
} // namespace ov

View File

@ -0,0 +1,135 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <vector>
#include <map>
#include <regex>
#include "openvino/util/file_util.hpp"
#include "common_test_utils/file_utils.hpp"
#include "common_test_utils/test_constants.hpp"
#include "functional_test_utils/ov_plugin_cache.hpp"
#include "cache/cache.hpp"
namespace ov {
namespace tools {
namespace subgraph_dumper {
static std::vector<std::regex> FROTEND_REGEXP = {
#ifdef ENABLE_OV_ONNX_FRONTEND
std::regex(R"(.*\.onnx)"),
#endif
#ifdef ENABLE_OV_PADDLE_FRONTEND
std::regex(R"(.*\.pdmodel)"),
std::regex(R"(.*__model__)"),
#endif
#ifdef ENABLE_OV_TF_FRONTEND
std::regex(R"(.*\.pb)"),
#endif
#ifdef ENABLE_OV_IR_FRONTEND
std::regex(R"(.*\.xml)"),
#endif
#ifdef ENABLE_OV_TF_LITE_FRONTEND
std::regex(R"(.*\.tflite)"),
#endif
#ifdef ENABLE_OV_PYTORCH_FRONTEND
std::regex(R"(.*\.pt)"),
#endif
};
enum ModelCacheStatus {
SUCCEED = 0,
NOT_FULLY_CACHED = 1,
NOT_READ = 2
};
static std::map<ModelCacheStatus, std::string> model_cache_status_to_str = {
{ ModelCacheStatus::SUCCEED, "successful_models" },
{ ModelCacheStatus::NOT_FULLY_CACHED, "not_fully_cached_models" },
{ ModelCacheStatus::NOT_READ, "not_read_models" },
};
inline std::vector<std::string> find_models(const std::vector<std::string> &dirs, const std::string& regexp = ".*") {
std::vector<std::string> models, full_content;
for (const auto& dir : dirs) {
std::vector<std::string> dir_content;
if (ov::util::directory_exists(dir)) {
dir_content = CommonTestUtils::getFileListByPatternRecursive({dir}, FROTEND_REGEXP);
} else if (ov::util::file_exists(dir) && std::regex_match(dir, std::regex(".*" + std::string(CommonTestUtils::LST_EXTENSION)))) {
dir_content = CommonTestUtils::readListFiles({dir});
} else {
std::string msg = "Input directory (" + dir + ") doesn't not exist!";
throw std::runtime_error(msg);
}
if (!dir_content.empty()) {
full_content.insert(full_content.end(), dir_content.begin(), dir_content.end());
}
}
auto in_regex = std::regex(regexp);
for (const auto& file : full_content) {
if (std::regex_match(file, in_regex)) {
try {
models.emplace_back(file);
} catch (std::exception& e) {
std::cout << "Impossible to read model: " << file << std::endl << "Exception: " << e.what();
}
}
}
return models;
}
// model_cache_status: model_list
inline std::map<ModelCacheStatus, std::vector<std::string>> cache_models(
std::vector<std::shared_ptr<ICache>>& caches,
const std::vector<std::string>& models,
bool extract_body) {
std::map<ModelCacheStatus, std::vector<std::string>> cache_status = {
{ ModelCacheStatus::SUCCEED, {} },
{ ModelCacheStatus::NOT_FULLY_CACHED, {} },
{ ModelCacheStatus::NOT_READ, {} }
};
auto core = ov::test::utils::PluginCache::get().core();
for (const auto& model : models) {
if (ov::util::file_exists(model)) {
std::cout << "Processing model: " << model << std::endl;
ModelCacheStatus model_status = ModelCacheStatus::SUCCEED;
try {
std::shared_ptr<ov::Model> function = core->read_model(model);
try {
for (auto& cache : caches) {
cache->update_cache(function, model, extract_body);
}
} catch (std::exception &e) {
std::cout << "Model processing failed with exception:" << std::endl << e.what() << std::endl;
model_status = ModelCacheStatus::NOT_FULLY_CACHED;
}
} catch (std::exception &e) {
model_status = ModelCacheStatus::NOT_READ;
std::cout << "Model reading failed with exception:" << std::endl << e.what() << std::endl;
}
cache_status[model_status].push_back(model);
}
}
return cache_status;
}
inline void save_model_status_to_file(const std::map<ModelCacheStatus, std::vector<std::string>>& caching_status, const std::string& output_dir) {
std::string cache_status_path = ov::util::path_join({output_dir, "model_caching_status"});
if (!ov::util::directory_exists(cache_status_path)) {
ov::util::create_directory_recursive(cache_status_path);
}
for (const auto& status_info : caching_status) {
std::string output_file_path = ov::util::path_join({ cache_status_path, model_cache_status_to_str[status_info.first] + CommonTestUtils::LST_EXTENSION});
CommonTestUtils::vec2File(status_info.second, output_file_path);
}
}
} // namespace subgraph_dumper
} // namespace tools
} // namespace ov

View File

@ -0,0 +1,172 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
#include <memory>
#include "cache/meta/input_info.hpp"
#include "functional_test_utils/summary/op_info.hpp"
#include "openvino/openvino.hpp"
namespace ov {
namespace tools {
namespace subgraph_dumper {
template <typename dType>
inline InputInfo::Range get_const_ranges(const std::shared_ptr<ov::op::v0::Constant>& node) {
size_t elements_count = ov::shape_size(node->get_shape());
const auto& const_values = node->cast_vector<dType>();
auto max = *std::max_element(const_values.begin(), const_values.end());
auto min = *std::min_element(const_values.begin(), const_values.end());
return InputInfo::Range(static_cast<double>(min), static_cast<double>(max));
}
inline std::map<std::string, InputInfo> get_input_info_by_node(const std::shared_ptr<ov::Node>& node) {
std::map<std::string, InputInfo> input_info;
for (size_t port_id = 0; port_id < node->get_input_size(); ++port_id) {
InputInfo in_info;
std::shared_ptr<ov::Node> input_node = node->input_value(port_id).get_node_shared_ptr();
std::string input_name = input_node->get_friendly_name();
if (std::dynamic_pointer_cast<ov::op::v0::Constant>(input_node)) {
auto const_node =
std::dynamic_pointer_cast<ov::op::v0::Constant>(input_node);
in_info.is_const = true;
switch (node->get_output_element_type(0)) {
case ov::element::Type_t::boolean: {
in_info.ranges = get_const_ranges<bool>(const_node);
break;
}
case ov::element::Type_t::bf16: {
in_info.ranges = get_const_ranges<ov::bfloat16>(const_node);
break;
}
case ov::element::Type_t::f16: {
in_info.ranges = get_const_ranges<ov::float16>(const_node);
break;
}
case ov::element::Type_t::f32: {
in_info.ranges = get_const_ranges<float>(const_node);
break;
}
case ov::element::Type_t::f64: {
in_info.ranges = get_const_ranges<double>(const_node);
break;
}
case ov::element::Type_t::i8: {
in_info.ranges = get_const_ranges<int8_t>(const_node);
break;
}
case ov::element::Type_t::i16: {
in_info.ranges = get_const_ranges<int16_t>(const_node);
break;
}
case ov::element::Type_t::i32: {
in_info.ranges = get_const_ranges<int32_t>(const_node);
break;
}
case ov::element::Type_t::i64: {
in_info.ranges = get_const_ranges<int64_t>(const_node);
break;
}
// TODO cast_vector doesn't support u1 now
// case ov::element::Type_t::u1:
// return get_const_ranges<char>(const_node);
case ov::element::Type_t::u8: {
in_info.ranges = get_const_ranges<uint8_t>(const_node);
break;
}
case ov::element::Type_t::u16: {
in_info.ranges = get_const_ranges<uint16_t>(const_node);
break;
}
case ov::element::Type_t::u32: {
in_info.ranges = get_const_ranges<uint32_t>(const_node);
break;
}
case ov::element::Type_t::u64: {
in_info.ranges = get_const_ranges<uint64_t>(const_node);
break;
}
default: {
std::cout << "Can't get ranges.. Unsupported data type" << std::endl;
break;
}
}
}
input_info.insert({ input_name, in_info });
}
return input_info;
}
// replace all input node by parameters and constants instead of non input mode types
// if `!is_save_const` replace only by parameters
// if `!is_copy_const_node` do not create new node with constants only as inputs
inline std::shared_ptr<ov::Node> clone_node(std::shared_ptr<ov::Node> node,
bool is_save_const = false,
bool is_copy_const_node = false) {
bool has_parameters = false;
ov::OutputVector inputs;
inputs.resize(node->get_input_size());
std::string in_name_base = ov::test::functional::get_node_version(node);
for (size_t i = 0; i < node->get_input_size(); ++i) {
std::string node_name = in_name_base + "_" + std::to_string(i);
if (is_save_const) {
// todo: replace deprecated code
OPENVINO_SUPPRESS_DEPRECATED_START
const auto constant_input = ov::get_constant_from_source(node->input(i).get_source_output());
OPENVINO_SUPPRESS_DEPRECATED_END
if (constant_input) {
auto in_const = std::make_shared<ov::op::v0::Constant>(constant_input->get_element_type(),
constant_input->get_shape(),
constant_input->get_data_ptr());
in_const->set_friendly_name(node_name);
inputs[i] = in_const;
continue;
}
}
has_parameters = true;
auto param =
std::make_shared<ov::op::v0::Parameter>(node->get_input_element_type(i), node->get_input_partial_shape(i));
param->set_friendly_name(node_name);
inputs[i] = param;
}
if (!has_parameters && !is_copy_const_node) {
std::cout << "The operation: " + node->get_friendly_name() + " does not have parameters!" << std::endl;
return nullptr;
}
std::shared_ptr<ov::Node> cloned_node = node->clone_with_new_inputs(inputs);
cloned_node->set_friendly_name(in_name_base);
return cloned_node;
}
// all inputs are defined as parameters and contains detailed info in meta
inline std::shared_ptr<ov::Model> generate_model_by_node(const std::shared_ptr<ov::Node>& node) {
static size_t model_cnt = 0;
auto cloned_node = clone_node(node);
ov::OutputVector results;
for (auto& out : cloned_node->outputs()) {
results.push_back(std::make_shared<ov::op::v0::Result>(out));
}
auto model = std::make_shared<ov::Model>(results);
model->set_friendly_name(cloned_node->get_friendly_name() + "_" + std::to_string(model_cnt++));
return model;
}
inline std::string get_node_type(const std::shared_ptr<ov::Node>& node) {
for (size_t i = 0; i < node->get_input_size(); ++i) {
if (node->get_input_partial_shape(i).is_dynamic()) {
return "dynamic";
}
}
for (size_t i = 0; i < node->get_output_size(); ++i) {
if (node->get_output_partial_shape(i).is_dynamic()) {
return "dynamic";
}
}
return "static";
}
} // namespace subgraph_dumper
} // namespace tools
} // namespace ov

View File

@ -0,0 +1,55 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <memory>
#include "openvino/util/file_util.hpp"
#include "openvino/pass/manager.hpp"
#include "common_test_utils/file_utils.hpp"
#include "cache/cache.hpp"
namespace ov {
namespace tools {
namespace subgraph_dumper {
bool ICache::serialize_model(const std::pair<std::shared_ptr<ov::Model>, MetaInfo>& graph_info,
const std::string& rel_serialization_dir) {
std::shared_ptr<ov::Model> model = graph_info.first;
MetaInfo meta = graph_info.second;
std::string model_name = model->get_friendly_name();
std::string abs_searilization_dir = ov::util::path_join({ m_serialization_dir, rel_serialization_dir });
std::string xml_path = ov::util::path_join({ abs_searilization_dir, model_name + ".xml" });
std::string bin_path = ov::util::path_join({ abs_searilization_dir, model_name + ".bin" });
std::string meta_path = ov::util::path_join({ abs_searilization_dir, model_name + ".meta" });
if (!ov::util::directory_exists(abs_searilization_dir)) {
ov::util::create_directory_recursive(abs_searilization_dir);
}
auto exit_time = std::chrono::system_clock::now() + std::chrono::seconds(m_serialization_timeout);
do {
try {
ov::pass::Manager manager;
manager.register_pass<ov::pass::Serialize>(xml_path, bin_path);
manager.run_passes(model);
model->validate_nodes_and_infer_types();
meta.serialize(meta_path);
return true;
} catch (std::exception &e) {
std::cout << "Failed to serialize model: " << model_name
<< ". Exception: " << e.what() << std::endl;
CommonTestUtils::removeIRFiles(xml_path, bin_path);
CommonTestUtils::removeFile(meta_path);
if (std::string(e.what()).find("Can't open") == std::string::npos) {
return false;
}
}
} while (std::chrono::system_clock::now() < exit_time);
return false;
}
} // namespace subgraph_dumper
} // namespace tools
} // namespace ov

View File

@ -0,0 +1,18 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "cache/graph_cache.hpp"
namespace ov {
namespace tools {
namespace subgraph_dumper {
std::shared_ptr<GraphCache> GraphCache::m_cache_instance = nullptr;
void GraphCache::update_cache(const std::shared_ptr<ov::Model>& model, const std::string& model_meta_data, bool extract_body) {}
void GraphCache::serialize_cache() {}
} // namespace subgraph_dumper
} // namespace tools
} // namespace ov

View File

@ -0,0 +1,133 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "pugixml.hpp"
#include "common_test_utils/file_utils.hpp"
#include "cache/meta/meta_info.hpp"
namespace ov {
namespace tools {
namespace subgraph_dumper {
unsigned long MetaInfo::MIN_MODEL_PRIORITY = std::numeric_limits<unsigned long>::max();
unsigned long MetaInfo::MAX_MODEL_PRIORITY = std::numeric_limits<unsigned long>::min();
MetaInfo::MetaInfo(const std::string& _model_path, const std::map<std::string, InputInfo>& _input_info, size_t _total_op_cnt, size_t model_priority) {
unsigned long tmp_graph_priority = _total_op_cnt * model_priority;
if (tmp_graph_priority < MIN_MODEL_PRIORITY) MIN_MODEL_PRIORITY = tmp_graph_priority;
if (tmp_graph_priority > MAX_MODEL_PRIORITY) MAX_MODEL_PRIORITY = tmp_graph_priority;
if (_model_path != "") {
model_info.insert({ get_model_name_by_path(_model_path), ModelInfo(_model_path, _total_op_cnt) });
}
if (!_input_info.empty()) {
input_info = _input_info;
}
}
unsigned long MetaInfo::get_abs_graph_priority() {
unsigned long res = 0;
for (const auto& model : model_info) {
res += model.second.total_op_cnt * model.second.this_op_cnt * model.second.model_priority;
}
return res;
}
double MetaInfo::get_graph_priority() {
auto delta = MAX_MODEL_PRIORITY - MIN_MODEL_PRIORITY == 0 ? 1 : MAX_MODEL_PRIORITY - MIN_MODEL_PRIORITY;
// return normilized graph priority from [0, 1]
double diff = get_abs_graph_priority() - MIN_MODEL_PRIORITY;
return diff / delta;
}
void MetaInfo::serialize(const std::string& serialization_path) {
pugi::xml_document doc;
pugi::xml_node root = doc.append_child("meta_info");
pugi::xml_node models = root.append_child("models");
// todo: iefode: update to prioritize_latest opset
for (const auto& model : model_info) {
pugi::xml_node model_node = models.append_child("model");
model_node.append_attribute("name").set_value(model.first.c_str());
model_node.append_attribute("this_op_count").set_value(static_cast<unsigned long long>(model.second.this_op_cnt));
model_node.append_attribute("total_op_count").set_value(static_cast<unsigned long long>(model.second.total_op_cnt));
for (const auto& model_path : model.second.model_paths) {
model_node.append_child("path").append_child(model_path.c_str());
}
}
double graph_priority = get_graph_priority();
root.append_child("graph_priority").append_attribute("value").set_value(graph_priority);
auto ports_info = root.append_child("input_info");
for (const auto& input : input_info) {
auto input_node = ports_info.append_child("input");
input_node.append_attribute("id").set_value(input.first.c_str());
if (input.second.ranges.min == DEFAULT_MIN_VALUE) {
input_node.append_attribute("min").set_value("undefined");
} else {
input_node.append_attribute("min").set_value(input.second.ranges.min);
}
if (input.second.ranges.max == DEFAULT_MAX_VALUE) {
input_node.append_attribute("max").set_value("undefined");
} else {
input_node.append_attribute("max").set_value(input.second.ranges.max);
}
input_node.append_attribute("convert_to_const").set_value(input.second.is_const);
}
doc.save_file(serialization_path.c_str());
}
void MetaInfo::update(const std::string& _model_path,
const std::map<std::string, InputInfo>& _input_info,
size_t _total_op_cnt,
const std::vector<std::string>& ignored_inputs) {
if (input_info.size() != _input_info.size()) {
throw std::runtime_error("Uncompatible input info!");
}
std::string model_name = get_model_name_by_path(_model_path);
if (model_info.find(model_name) != model_info.end()) {
if (model_info.at(model_name).model_paths.find(_model_path) == model_info.at(model_name).model_paths.end()) {
model_info.at(model_name).model_paths.insert(_model_path);
model_info.at(model_name).total_op_cnt += _total_op_cnt;
}
model_info.at(model_name).this_op_cnt++;
} else {
model_info.insert({ model_name, ModelInfo(_model_path, _total_op_cnt) });\
}
for (const auto& in : _input_info) {
if (std::find(ignored_inputs.begin(), ignored_inputs.end(), in.first) != ignored_inputs.begin()) {
continue;
}
if (input_info.find(in.first) == input_info.end()) {
throw std::runtime_error("Incorrect Input Info!");
} else if (input_info[in.first].is_const != in.second.is_const) {
throw std::runtime_error("Try to cast parameter ro constant!");
} else {
input_info[in.first] = in.second;
}
}
// update max and mib abs priority to normilize priorities when serialize
{
auto abs_graph_priority = get_abs_graph_priority();
if (abs_graph_priority > MAX_MODEL_PRIORITY) MAX_MODEL_PRIORITY = abs_graph_priority;
if (abs_graph_priority < MIN_MODEL_PRIORITY) MIN_MODEL_PRIORITY = abs_graph_priority;
}
}
std::map<std::string, InputInfo> MetaInfo::get_input_info() {
return input_info;
}
std::map<std::string, ModelInfo> MetaInfo::get_model_info() {
return model_info;
}
std::string MetaInfo::get_model_name_by_path(const std::string& model_path) {
auto pos = model_path.rfind(CommonTestUtils::FileSeparator);
auto model_name = CommonTestUtils::replaceExt(model_path.substr(pos + 1), "");
return model_name;
}
} // namespace subgraph_dumper
} // namespace tools
} // namespace ov

View File

@ -0,0 +1,121 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <memory>
#include <utility>
#include "openvino/core/core.hpp"
#include "openvino/op/ops.hpp"
#include "openvino/util/file_util.hpp"
#include "cache/op_cache.hpp"
#include "utils/node.hpp"
namespace ov {
namespace tools {
namespace subgraph_dumper {
std::shared_ptr<OpCache> OpCache::m_cache_instance = nullptr;
void OpCache::update_cache(const std::shared_ptr<ov::Model>& model,
const std::string& model_path,
bool extract_body) {
size_t model_op_cnt = model->get_ops().size() - model->get_output_size() - model->inputs().size();
for (const auto& op : model->get_ordered_ops()) {
if (std::dynamic_pointer_cast<ov::op::v0::Parameter>(op) ||
std::dynamic_pointer_cast<ov::op::v0::Constant>(op) ||
std::dynamic_pointer_cast<ov::op::v0::Result>(op) ||
// ReadValue and Assign have to be handled in pair
// Will be handled as part of 48838
std::dynamic_pointer_cast<ov::op::util::AssignBase>(op) ||
std::dynamic_pointer_cast<ov::op::util::ReadValueBase>(op)) {
continue;
}
if (extract_body) {
if (std::dynamic_pointer_cast<ov::op::v8::If>(op)) {
auto if_op = std::dynamic_pointer_cast<ov::op::v8::If>(op);
for (size_t i = 0; i < if_op->get_internal_subgraphs_size(); i++) {
auto if_body = if_op->get_function(i);
update_cache(if_body, model_path, extract_body);
}
} else if (std::dynamic_pointer_cast<ov::op::v5::Loop>(op)) {
auto loop = std::dynamic_pointer_cast<ov::op::v5::Loop>(op);
auto loop_body = loop->get_function();
update_cache(loop_body, model_path, extract_body);
} else if (std::dynamic_pointer_cast<ov::op::v0::TensorIterator>(op)) {
auto ti = std::dynamic_pointer_cast<ov::op::v0::TensorIterator>(op);
auto ti_body = ti->get_function();
update_cache(ti_body, model_path, extract_body);
}
}
update_cache(op, model_path, model_op_cnt);
}
}
void OpCache::update_cache(const std::shared_ptr<ov::Node>& node,
const std::string& model_path,
size_t model_op_cnt) {
std::shared_ptr<ov::Node> find_op_in_cache = nullptr;
// Clone node to get node with Parameter/Constants input only
auto cloned_node = clone_node(node, true);
if (cloned_node == nullptr)
return;
cloned_node->set_friendly_name(ov::test::functional::get_node_version(cloned_node));
for (auto &&it : m_ops_cache) {
if (m_manager.match_any(it.first, cloned_node)) {
std::cout << "Match " << cloned_node->get_type_info().name << " " << cloned_node->get_friendly_name() <<
" with " << it.first->get_friendly_name() << std::endl;
find_op_in_cache = it.first;
break;
}
}
// to identify ignored inputs
std::vector<std::string> ignored_input_names = {};
{
auto matching_config = m_manager.get_config(find_op_in_cache);
if (matching_config) {
for (const auto& ignored_port : matching_config->ignored_ports) {
ignored_input_names.push_back(find_op_in_cache->get_friendly_name() + "_" + std::to_string(ignored_port));
}
}
}
auto meta = MetaInfo(model_path, get_input_info_by_node(cloned_node), model_op_cnt);
if (find_op_in_cache != nullptr) {
m_ops_cache[find_op_in_cache].update(model_path, get_input_info_by_node(cloned_node), model_op_cnt, ignored_input_names);
}
if (find_op_in_cache > cloned_node) {
std::cout << "Update cache node: " << cloned_node->get_type_info().name << " " << find_op_in_cache->get_friendly_name() << std::endl;
meta = m_ops_cache[find_op_in_cache];
m_ops_cache.erase(find_op_in_cache);
find_op_in_cache = nullptr;
}
if (find_op_in_cache == nullptr) {
std::cout << "Insert node: " << cloned_node->get_type_info().name << " " << cloned_node->get_friendly_name() << " to Cache" << std::endl;
m_ops_cache.insert({ cloned_node, meta });
}
}
void OpCache::serialize_cache() {
for (const auto& cache_item : m_ops_cache) {
serialize_op(cache_item);
}
}
bool OpCache::serialize_op(const std::pair<std::shared_ptr<ov::Node>, MetaInfo> &op_info) {
std::string serialization_dir = get_rel_serilization_dir(op_info.first);
std::shared_ptr<ov::Model> model = generate_model_by_node(op_info.first);
return serialize_model(make_pair(model, op_info.second), serialization_dir);
}
std::string OpCache::get_rel_serilization_dir(const std::shared_ptr<ov::Node>& node) {
std::string op_folder_name = ov::test::functional::get_node_version(node);
auto op_el_type = node->get_output_element_type(0).get_type_name();
return ov::util::path_join({"operation", get_node_type(node), op_folder_name, op_el_type});
}
} // namespace subgraph_dumper
} // namespace tools
} // namespace ov

View File

@ -0,0 +1,60 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gflag_config.hpp"
#include "cache/op_cache.hpp"
#include "cache/graph_cache.hpp"
#include "utils/model.hpp"
using namespace ov::tools::subgraph_dumper;
int main(int argc, char *argv[]) {
gflags::ParseCommandLineNonHelpFlags(&argc, &argv, true);
if (FLAGS_h) {
showUsage();
return 0;
}
// SubgraphsDumper::ClonersMap::constant_size_threshold_mb = FLAGS_constants_size_threshold;
std::vector<std::string> local_cache_dirs = CommonTestUtils::splitStringByDelimiter(FLAGS_local_cache);
std::vector<std::string> dirs = CommonTestUtils::splitStringByDelimiter(FLAGS_input_folders);
std::vector<std::string> models;
if (!CommonTestUtils::directoryExists(FLAGS_output_folder)) {
std::string msg = "Output directory (" + FLAGS_output_folder + ") doesn't not exist! The directory will be created.";
std::cout << msg << std::endl;
CommonTestUtils::createDirectoryRecursive(FLAGS_output_folder);
}
try {
models = find_models(dirs, FLAGS_path_regex);
} catch (std::runtime_error& e) {
std::cout << "Try 'subgraphdumper -h' for more information. \nException: " << e.what() << std::endl;
return 1;
}
std::vector<std::shared_ptr<ICache>> caches;
if (FLAGS_cache_type == "OP" || FLAGS_cache_type.empty()) {
caches.push_back(OpCache::get());
} else if (FLAGS_cache_type == "GRAPH" || FLAGS_cache_type.empty()) {
caches.push_back(GraphCache::get());
}
std::map<ModelCacheStatus, std::vector<std::string>> cache_model_status;
// Upload previously cached graphs to cache
if (!FLAGS_local_cache.empty()) {
auto cachedOps = find_models(local_cache_dirs);
cache_model_status = cache_models(caches, cachedOps, FLAGS_extract_body);
}
{
auto tmp_cache_model_status = cache_models(caches, models, FLAGS_extract_body);
cache_model_status.insert(tmp_cache_model_status.begin(), tmp_cache_model_status.end());
}
for (auto& cache : caches) {
cache->set_serialization_dir(FLAGS_output_folder);
cache->serialize_cache();
}
save_model_status_to_file(cache_model_status, FLAGS_output_folder);
return cache_model_status[ModelCacheStatus::NOT_FULLY_CACHED].empty() && cache_model_status[ModelCacheStatus::NOT_READ].empty();
}

View File

@ -0,0 +1,125 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
// #include "matchers/single_op.hpp"
// #include "ngraph/ops.hpp"
// #include "ngraph/validation_util.hpp"
// #include <cstdlib>
#include "single_op_matchers/base.hpp"
#include "common_test_utils/graph_comparator.hpp"
using namespace ov::tools::subgraph_dumper;
iMatcherConfig::Ptr BaseMatcher::get_config(const std::shared_ptr<ov::Node> &node) const {
for (const auto &cfg : default_configs) {
if (cfg->op_in_config(node)) {
return cfg;
}
}
for (const auto &cfg : default_configs) {
if (cfg->is_fallback_config) {
return cfg;
}
}
return std::make_shared<MatcherConfig<>>();
}
bool BaseMatcher::match_inputs(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref) const {
if (node->get_input_size() != ref->get_input_size()) {
return false;
}
const std::vector<size_t> &ignored_ports = get_config(node)->ignored_ports;
for (size_t port_id = 0; port_id < node->get_input_size(); ++port_id) {
if (std::find(ignored_ports.begin(), ignored_ports.end(), port_id) != ignored_ports.end()) {
continue;
}
const auto &cur_node_input_type = node->input_value(port_id).get_node_shared_ptr()->get_type_info();
const auto &ref_node_input_type = ref->input_value(port_id).get_node_shared_ptr()->get_type_info();
if (cur_node_input_type != ref_node_input_type) {
return false;
}
if (node->get_input_tensor(port_id).get_partial_shape().rank() != ref->get_input_tensor(port_id).get_partial_shape().rank()) {
return false;
}
if (node->get_input_tensor(port_id).get_element_type() != ref->get_input_tensor(port_id).get_element_type()) {
return false;
}
if (node->get_input_partial_shape(port_id).is_dynamic() != ref->get_input_partial_shape(port_id).is_dynamic()) {
return false;
}
}
return true;
}
bool
BaseMatcher::match_outputs(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref) const {
if (node->get_output_size() != ref->get_output_size()) {
return false;
}
for (size_t port_id = 0; port_id < node->get_output_size(); ++port_id) {
if (node->get_output_tensor(port_id).get_element_type() != ref->get_output_tensor(port_id).get_element_type()) {
return false;
}
if (node->get_output_tensor(port_id).get_partial_shape().is_dynamic() != ref->get_output_tensor(port_id).get_partial_shape().is_dynamic()) {
return false;
}
if (node->get_output_tensor(port_id).get_partial_shape().rank()!= ref->get_output_tensor(port_id).get_partial_shape().rank()) {
return false;
}
}
return true;
}
bool BaseMatcher::match_attrs(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref) const {
// todo: iefode: to provide correct with ingored attributes
return attributes::compare(node.get(), ref.get(), Comparator::CmpValues::ATTRIBUTES).valid;
}
bool BaseMatcher::match(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref) const {
const auto &cfg = get_config(node);
if (match_only_configured_ops() && cfg->is_fallback_config) {
return false;
}
if (cfg->ignore_matching) {
return false;
}
if (!same_op_type(node, ref)) {
return false;
}
if (!match_inputs(node, ref)) {
return false;
}
if (!match_attrs(node, ref)) {
return false;
}
if (!match_outputs(node, ref)) {
return false;
}
return true;
}
bool BaseMatcher::same_op_type(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref) const {
return node->get_type_info() == ref->get_type_info();
}
BaseMatcher::BaseMatcher() {
default_configs = {
// std::make_shared<MatcherConfig<>>(std::vector<std::string>{}, std::vector<size_t>{0}),
// std::make_shared<MatcherConfig<ov::opset8::FakeQuantize>>(std::vector<std::string>{},
// std::vector<size_t>{0, 1, 2, 3, 4}),
std::make_shared<MatcherConfig<
ov::op::v1::Convolution,
ov::op::v1::ConvolutionBackpropData,
ov::op::v1::GroupConvolution,
ov::op::v1::GroupConvolutionBackpropData>>(true)
};
}

View File

@ -0,0 +1,64 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/op/ops.hpp"
#include "single_op_matchers/convolutions.hpp"
using namespace ov::tools::subgraph_dumper;
ConvolutionsMatcher::ConvolutionsMatcher() {
default_configs = {
std::make_shared<MatcherConfig<
ov::op::v1::Convolution,
ov::op::v1::ConvolutionBackpropData,
ov::op::v1::GroupConvolution,
ov::op::v1::GroupConvolutionBackpropData>>(std::vector<std::string>{}, std::vector<size_t>{1})
};
}
bool ConvolutionsMatcher::match(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref) const {
const auto &cfg = get_config(node);
if (match_only_configured_ops() && cfg->is_fallback_config) {
return false;
}
if (cfg->ignore_matching) {
return false;
}
if (!same_op_type(node, ref)) {
return false;
}
if (!match_inputs(node, ref)) {
return false;
}
if (!match_attrs(node, ref)) {
return false;
}
if (!match_outputs(node, ref)) {
return false;
}
return true;
}
bool ConvolutionsMatcher::match_inputs(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref) const {
if (!BaseMatcher::match_inputs(node, ref)) {
return false;
}
bool has_groups = std::dynamic_pointer_cast<ov::op::v1::GroupConvolution>(node) ||
std::dynamic_pointer_cast<ov::op::v1::GroupConvolutionBackpropData>(node);
size_t kernel_size_offset = has_groups ? 3 : 2;
auto ref_weights_shape = ref->get_input_tensor(1).get_shape();
auto cur_weights_shape = node->get_input_tensor(1).get_shape();
const auto ref_kernel_size = std::vector<size_t>(ref_weights_shape.begin() + kernel_size_offset,
ref_weights_shape.end());
const auto cur_kernel_size = std::vector<size_t>(cur_weights_shape.begin() + kernel_size_offset,
cur_weights_shape.end());
if (ref_kernel_size != cur_kernel_size) {
return false;
}
return true;
}

View File

@ -0,0 +1,41 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "single_op_matchers/manager.hpp"
using namespace ov::tools::subgraph_dumper;
iMatcherConfig::Ptr MatchersManager::get_config(const std::shared_ptr<ov::Node> &node) const {
if (node == nullptr) return nullptr;
for (const auto &it : m_matchers) {
auto default_config = it.second->get_config(node);
if (default_config->op_in_config(node)) {
return default_config;
}
}
return nullptr;
}
bool MatchersManager::match_any(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref) {
for (const auto &it : m_matchers) {
if (it.second->match(node, ref)) return true;
}
return false;
}
bool MatchersManager::match_all(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref) {
const auto matches = this->run_matchers(node, ref);
return std::all_of(matches.begin(), matches.end(), [](bool i) { return i; });
}
std::vector<bool> MatchersManager::run_matchers(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref) {
std::vector<bool> matches;
for (const auto &it : m_matchers) {
matches.push_back(it.second->match(node, ref));
}
return matches;
}

View File

@ -0,0 +1,24 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
set(TARGET_NAME subgraphs_dumper_tests)
addIeTargetTest(
NAME ${TARGET_NAME}
ROOT ${CMAKE_CURRENT_SOURCE_DIR}
ADDITIONAL_SOURCE_DIRS
${OpenVINO_SOURCE_DIR}/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src
EXCLUDED_SOURCE_PATHS
${OpenVINO_SOURCE_DIR}/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/main.cpp
INCLUDES
${OpenVINO_SOURCE_DIR}/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include
LINK_LIBRARIES
PRIVATE
funcTestUtils
openvino::runtime
openvino::pugixml
ADD_CPPLINT
)
ie_faster_build(${TARGET_NAME} UNITY)

View File

@ -0,0 +1,95 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <memory>
#include "gtest/gtest.h"
#include "openvino/op/ops.hpp"
#include "openvino/util/file_util.hpp"
#include "openvino/openvino.hpp"
#include "common_test_utils/file_utils.hpp"
#include "common_test_utils/graph_comparator.hpp"
#include "cache/cache.hpp"
#include "cache/meta/meta_info.hpp"
namespace {
class ICacheUnitTest : public ::testing::Test,
public virtual ov::tools::subgraph_dumper::ICache {
protected:
std::shared_ptr<ov::Model> test_model;
ov::tools::subgraph_dumper::MetaInfo test_meta;
std::string test_model_path, model_name;
std::string test_artifacts_dir;
void SetUp() override {
model_name = "test_model";
test_artifacts_dir = ov::util::path_join({CommonTestUtils::getCurrentWorkingDir(), "test_artifacts"});
test_model_path = ov::util::path_join({ test_artifacts_dir, model_name + ".xml" });
ov::util::create_directory_recursive(test_artifacts_dir);
{
auto params = ov::ParameterVector {
std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::PartialShape{1, 1, 1, 1}),
};
// params->begin()->set_friendly_name("in_0");
auto convert = std::make_shared<ov::op::v0::Convert>(params.front(), ov::element::f16);
convert->set_friendly_name("convert_0");
test_model = std::make_shared<ov::Model>(convert, params);
test_model->set_friendly_name(model_name);
}
test_meta = ov::tools::subgraph_dumper::MetaInfo(test_model_path, {{"in_0", ov::tools::subgraph_dumper::InputInfo(0, 1, true)}});
}
void TearDown() override {
CommonTestUtils::removeDir(test_artifacts_dir);
}
};
TEST_F(ICacheUnitTest, set_serialization_dir) {
ASSERT_NO_THROW(this->set_serialization_dir(test_artifacts_dir));
ASSERT_EQ(test_artifacts_dir, this->m_serialization_dir);
}
TEST_F(ICacheUnitTest, update_cache) {
ASSERT_NO_THROW(this->update_cache(test_model, test_model_path));
ASSERT_NO_THROW(this->update_cache(test_model, test_model_path, true));
ASSERT_NO_THROW(this->update_cache(test_model, test_model_path, false));
}
TEST_F(ICacheUnitTest, serialize_cache) {
ASSERT_NO_THROW(this->serialize_cache());
}
TEST_F(ICacheUnitTest, serialize_model) {
std::pair<std::shared_ptr<ov::Model>, ov::tools::subgraph_dumper::MetaInfo> graph_info({ test_model, test_meta });
ASSERT_TRUE(this->serialize_model(graph_info, test_artifacts_dir));
auto xml_path = test_model_path;
auto bin_path = CommonTestUtils::replaceExt(test_model_path, "bin");
auto meta_path = CommonTestUtils::replaceExt(test_model_path, "meta");
try {
if (!ov::util::file_exists(xml_path) ||
!ov::util::file_exists(bin_path)) {
throw std::runtime_error("Model was not serilized!");
}
if (!ov::util::file_exists(meta_path)) {
throw std::runtime_error("Meta was not serilized!");
}
auto core = ov::Core();
auto serialized_model = core.read_model(xml_path, bin_path);
auto res = compare_functions(test_model, serialized_model, true, true, true, true, true, true);
if (!res.first) {
throw std::runtime_error("Serialized and runtime model are not equal!");
}
} catch(std::exception& e) {
CommonTestUtils::removeFile(xml_path);
CommonTestUtils::removeFile(bin_path);
CommonTestUtils::removeFile(meta_path);
GTEST_FAIL() << e.what() << std::endl;
}
}
} // namespace

View File

@ -0,0 +1,205 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <memory>
#include "gtest/gtest.h"
#include "pugixml.hpp"
#include "openvino/openvino.hpp"
#include "openvino/util/file_util.hpp"
#include "common_test_utils/file_utils.hpp"
#include "cache/meta/meta_info.hpp"
namespace {
using namespace ov::tools::subgraph_dumper;
// ======================== Input Info Unit tests =============================================
class InputInfoUnitTest : public ::testing::Test {};
TEST_F(InputInfoUnitTest, constructor) {
ASSERT_NO_THROW(auto in_info = InputInfo());
ASSERT_NO_THROW(auto in_info = InputInfo(0));
ASSERT_NO_THROW(auto in_info = InputInfo(0, 1));
ASSERT_NO_THROW(auto in_info = InputInfo(0, 1, true));
}
TEST_F(InputInfoUnitTest, update_ranges) {
auto in_info_0 = InputInfo();
auto in_info_1 = InputInfo(0);
in_info_0 = in_info_1;
ASSERT_EQ(in_info_0.ranges.min, in_info_1.ranges.min);
ASSERT_EQ(in_info_0.ranges.max, in_info_1.ranges.max);
ASSERT_EQ(in_info_0.is_const, in_info_1.is_const);
auto in_info_2 = InputInfo(1, 2);
auto ref_in_info = InputInfo(0, 2);
in_info_0 = in_info_2;
ASSERT_EQ(in_info_0.ranges.min, ref_in_info.ranges.min);
ASSERT_EQ(in_info_0.ranges.max, ref_in_info.ranges.max);
ASSERT_EQ(in_info_0.is_const, ref_in_info.is_const);
}
// ======================== Model Info Func tests =============================================
class ModelInfoFuncTest : public ::testing::Test {};
TEST_F(ModelInfoFuncTest, constructor) {
ASSERT_NO_THROW(auto model_info = ModelInfo());
ASSERT_NO_THROW(auto model_info = ModelInfo("model.xml"));
ASSERT_NO_THROW(auto model_info = ModelInfo("model.xml", 1));
ASSERT_NO_THROW(auto model_info = ModelInfo("model.xml", 1, 2));
}
// ======================== Meta Info Functional tests =============================================
class MetaInfoFuncTest : public ::testing::Test{
protected:
std::string test_model_path, test_model_name;
std::map<std::string, InputInfo> test_in_info;
std::map<std::string, ModelInfo> test_model_info;
std::string test_artifacts_dir;
void SetUp() override {
test_model_path = "test_model_path.xml";
test_model_name = CommonTestUtils::replaceExt(test_model_path, "");
test_in_info = {{ "test_in_0", InputInfo(DEFAULT_MIN_VALUE, 1, true) }};
test_model_info = {{ test_model_name, ModelInfo(test_model_path, 5) }};
test_artifacts_dir = ov::util::path_join({CommonTestUtils::getCurrentWorkingDir(), "test_artifacts"});
ov::util::create_directory_recursive(test_artifacts_dir);
}
void TearDown() override {
CommonTestUtils::removeDir(test_artifacts_dir);
}
};
TEST_F(MetaInfoFuncTest, constructor) {
ASSERT_NO_THROW(auto meta = MetaInfo());
ASSERT_NO_THROW(auto meta = MetaInfo(test_model_name));
ASSERT_NO_THROW(auto meta = MetaInfo(test_model_name, test_in_info));
ASSERT_NO_THROW(auto meta = MetaInfo(test_model_name, test_in_info, 2));
ASSERT_NO_THROW(auto meta = MetaInfo(test_model_name, test_in_info, 3));
}
TEST_F(MetaInfoFuncTest, get_input_info) {
auto test_meta = MetaInfo(test_model_name, test_in_info);
ASSERT_NO_THROW(test_meta.get_input_info());
ASSERT_EQ(test_meta.get_input_info(), test_in_info);
}
TEST_F(MetaInfoFuncTest, get_model_info) {
auto test_meta = MetaInfo(test_model_path, test_in_info, 5);
ASSERT_NO_THROW(test_meta.get_model_info());
ASSERT_EQ(test_meta.get_model_info(), test_model_info);
}
TEST_F(MetaInfoFuncTest, update) {
std::map<std::string, InputInfo> test_in_info = {{ "test_in_0", InputInfo(DEFAULT_MIN_VALUE, 1, true) }};
auto test_meta = MetaInfo(test_model_name, test_in_info);
std::map<std::string, InputInfo> test_input_info_1 = {{ "test_in_0", InputInfo(0, 1, true) }};
std::string test_model_1 = "test_model_1";
std::string test_model_path_1 = ov::util::path_join({ "path", "to", test_model_1 + ".xml"});
ASSERT_ANY_THROW(test_meta.update(test_model_path_1, {}));
ASSERT_ANY_THROW(test_meta.update(test_model_path_1, {{ "test_in_1", InputInfo() }}));
ASSERT_ANY_THROW(test_meta.update(test_model_path_1, {{ "test_in_0", InputInfo(0, 1, false) }}));
ASSERT_NO_THROW(test_meta.update(test_model_path_1, test_input_info_1));
ASSERT_NO_THROW(test_meta.update(test_model_path_1, test_input_info_1, 2));
}
TEST_F(MetaInfoFuncTest, serialize) {
auto test_meta = MetaInfo(test_model_name, test_in_info);
std::string seriliazation_path(ov::util::path_join({test_artifacts_dir, "test_meta.meta"}));
test_meta.serialize(seriliazation_path);
ASSERT_TRUE(ov::util::file_exists(seriliazation_path));
}
// ======================== Meta Info Unit tests =============================================
class MetaInfoUnitTest : public MetaInfoFuncTest,
public virtual MetaInfo {
protected:
void SetUp() override {
MetaInfoFuncTest::SetUp();
this->input_info = test_in_info;
this->model_info = test_model_info;
}
};
TEST_F(MetaInfoUnitTest, serialize) {
std::string seriliazation_path(ov::util::path_join({test_artifacts_dir, "test_meta.meta"}));
this->serialize(seriliazation_path);
ASSERT_TRUE(ov::util::file_exists(seriliazation_path));
pugi::xml_document doc;
doc.load_file(seriliazation_path.c_str());
{
auto models_xml = doc.child("meta_info").child("models");
for (const auto model_xml : models_xml.children()) {
auto model_name_xml = std::string(model_xml.attribute("name").value());
ASSERT_NE(model_info.find(model_name_xml), model_info.end());
ASSERT_EQ(model_info[model_name_xml].this_op_cnt, model_xml.attribute("this_op_count").as_uint());
ASSERT_EQ(model_info[model_name_xml].total_op_cnt, model_xml.attribute("total_op_count").as_uint());
auto paths = model_info[model_name_xml].model_paths;
for (const auto& path_xml : model_xml.child("path")) {
auto path_xml_value = std::string(path_xml.name());
ASSERT_NE(std::find(paths.begin(), paths.end(), path_xml_value), paths.end());
}
}
}
{
auto graph_priority_xml = doc.child("meta_info").child("graph_priority").attribute("value").as_double();
ASSERT_EQ(graph_priority_xml, this->get_graph_priority());
}
{
auto input_info_xml = doc.child("meta_info").child("input_info");
for (const auto& in_info_xml : input_info_xml.children()) {
auto in_xml = std::string(in_info_xml.attribute("id").value());
ASSERT_NE(input_info.find(in_xml), input_info.end());
ASSERT_EQ(input_info[in_xml].is_const, in_info_xml.attribute("convert_to_const").as_bool());
auto min_xml = std::string(in_info_xml.attribute("min").value()) == "undefined" ? DEFAULT_MIN_VALUE : in_info_xml.attribute("min").as_double();
ASSERT_EQ(input_info[in_xml].ranges.min, min_xml);
auto max_xml = std::string(in_info_xml.attribute("max").value()) == "undefined" ? DEFAULT_MAX_VALUE : in_info_xml.attribute("max").as_double();
ASSERT_EQ(input_info[in_xml].ranges.max, max_xml);
}
}
}
TEST_F(MetaInfoUnitTest, update) {
auto test_meta = MetaInfo(test_model_name, test_in_info);
std::map<std::string, InputInfo> test_meta_1 = {{ "test_in_0", InputInfo(0, 1, true) }};
std::string test_model_1 = "test_model_1";
std::string test_model_path_1 = ov::util::path_join({ "path", "to", test_model_1 + ".xml"});
this->update(test_model_path_1, test_meta_1);
ASSERT_NE(this->model_info.find(test_model_1), this->model_info.end());
ASSERT_EQ(*this->model_info[test_model_1].model_paths.begin(), test_model_path_1);
ASSERT_EQ(this->model_info[test_model_1].this_op_cnt, 1);
this->update(test_model_path_1, test_meta_1);
ASSERT_EQ(this->model_info[test_model_1].model_paths.size(), 1);
ASSERT_EQ(this->model_info[test_model_1].this_op_cnt, 2);
test_model_path_1 = ov::util::path_join({ "path", "to", "test", test_model_1 + ".xml"});
this->update(test_model_path_1, test_meta_1);
ASSERT_EQ(this->model_info[test_model_1].model_paths.size(), 2);
ASSERT_EQ(this->model_info[test_model_1].this_op_cnt, 3);
}
TEST_F(MetaInfoUnitTest, get_model_name_by_path) {
ASSERT_NO_THROW(this->get_model_name_by_path(test_model_path));
auto name = this->get_model_name_by_path(test_model_path);
ASSERT_EQ(name, test_model_name);
}
TEST_F(MetaInfoUnitTest, get_graph_priority) {
ASSERT_NO_THROW(this->get_graph_priority());
ASSERT_TRUE(this->get_graph_priority() >= 0 && this->get_graph_priority() <= 1);
ASSERT_NO_THROW(this->get_abs_graph_priority());
ASSERT_EQ(this->get_abs_graph_priority(), 5);
}
} // namespace

View File

@ -0,0 +1,180 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <memory>
#include "gtest/gtest.h"
#include "openvino/op/ops.hpp"
#include "openvino/util/file_util.hpp"
#include "common_test_utils/file_utils.hpp"
#include "common_test_utils/graph_comparator.hpp"
#include "cache/op_cache.hpp"
#include "utils/node.hpp"
namespace {
using namespace ov::tools::subgraph_dumper;
// ====================== Operation Cache Functional tests ==============================
class OpCacheFuncTest : public ::testing::Test {
protected:
std::shared_ptr<ov::Model> test_model;
std::string test_artifacts_dir, test_model_name, test_model_path;
void SetUp() override {
test_model_name = "test_model_name";
test_artifacts_dir = ov::util::path_join({CommonTestUtils::getCurrentWorkingDir(), "test_artifacts"});
test_model_path = ov::util::path_join({test_artifacts_dir, test_model_name + ".xml"});
ov::util::create_directory_recursive(test_artifacts_dir);
{
auto params = ov::ParameterVector {
std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::PartialShape{1, 1, 1, 1}),
};
auto convert = std::make_shared<ov::op::v0::Convert>(params.front(), ov::element::f16);
convert->set_friendly_name("convert_0");
test_model = std::make_shared<ov::Model>(convert, params);
test_model->set_friendly_name(test_model_name);
}
};
void TearDown() override {
CommonTestUtils::removeDir(test_artifacts_dir);
OpCache::reset();
}
};
TEST_F(OpCacheFuncTest, get_op_cache) {
std::shared_ptr<ov::tools::subgraph_dumper::OpCache> op_cache = nullptr;
EXPECT_NO_THROW(op_cache = ov::tools::subgraph_dumper::OpCache::get());
ASSERT_NE(op_cache, nullptr);
}
TEST_F(OpCacheFuncTest, get_op_cache_twice) {
std::shared_ptr<ov::tools::subgraph_dumper::OpCache> op_cache_0 = nullptr, op_cache_1 = nullptr;
op_cache_0 = ov::tools::subgraph_dumper::OpCache::OpCache::get();
op_cache_1 = ov::tools::subgraph_dumper::OpCache::OpCache::get();
ASSERT_EQ(op_cache_0, op_cache_1);
}
TEST_F(OpCacheFuncTest, update_cache) {
auto op_cache = ov::tools::subgraph_dumper::OpCache::get();
ASSERT_NO_THROW(op_cache->update_cache(test_model, test_model_path, true));
ASSERT_NO_THROW(op_cache->update_cache(test_model, test_model_path, true));
}
TEST_F(OpCacheFuncTest, serialize_cache) {
auto op_cache = ov::tools::subgraph_dumper::OpCache::get();
op_cache->set_serialization_dir(test_artifacts_dir);
ASSERT_NO_THROW(op_cache->serialize_cache());
}
// ====================== Operation Cache Unit tests ==============================
class OpCacheUnitTest : public OpCacheFuncTest,
public virtual OpCache {
protected:
std::shared_ptr<ov::op::v0::Convert> convert_node;
MetaInfo test_meta;
void SetUp() override {
OpCacheFuncTest::SetUp();
auto param = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::PartialShape{1, 1, 1, 1});
convert_node = std::make_shared<ov::op::v0::Convert>(param, ov::element::f16);
convert_node->set_friendly_name("convert_0");
test_meta = MetaInfo(test_model_path, {{"in_0", InputInfo()}});
}
};
TEST_F(OpCacheUnitTest, update_cache_by_op) {
this->update_cache(convert_node, test_model_path);
ASSERT_EQ(m_ops_cache.size(), 1);
}
TEST_F(OpCacheUnitTest, update_cache_by_model) {
this->update_cache(convert_node, test_model_path, 1);
ASSERT_EQ(m_ops_cache.size(), 1);
std::shared_ptr<ov::Model> test_model_1;
std::string test_model_path_1 = ov::util::path_join({test_artifacts_dir, "model_1", test_model_name + ".xml"});
{
auto param = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::PartialShape{1, 1, 1, 1});
param->set_friendly_name("in_0");
auto convert = std::make_shared<ov::op::v0::Convert>(param, ov::element::f16);
convert->set_friendly_name("convert_0");
auto erf = std::make_shared<ov::op::v0::Erf>(convert);
erf->set_friendly_name("erf_0");
test_model_1 = std::make_shared<ov::Model>(erf, ov::ParameterVector{param});
test_model_1->set_friendly_name(test_model_name);
}
this->update_cache(test_model_1, test_model_path_1, false);
// check cache
ASSERT_EQ(m_ops_cache.size(), 2);
for (const auto& cached_node : this->m_ops_cache) {
ASSERT_TRUE(std::dynamic_pointer_cast<ov::op::v0::Convert>(cached_node.first) ||
std::dynamic_pointer_cast<ov::op::v0::Erf>(cached_node.first));
auto meta = cached_node.second;
if (std::dynamic_pointer_cast<ov::op::v0::Convert>(cached_node.first)) {
// check model_path
ASSERT_EQ(meta.get_model_info().size(), 1);
ASSERT_EQ(meta.get_model_info().begin()->first, test_model_name);
ASSERT_EQ(meta.get_model_info().begin()->second.model_paths.size(), 2);
ASSERT_EQ(*meta.get_model_info().begin()->second.model_paths.begin(), test_model_path_1);
ASSERT_EQ(*meta.get_model_info().begin()->second.model_paths.rbegin(), test_model_path);
// check occurence
ASSERT_EQ(meta.get_model_info().begin()->second.this_op_cnt, 2);
ASSERT_EQ(meta.get_model_info().begin()->second.total_op_cnt, 3);
// check input_info
ASSERT_EQ(meta.get_input_info().size(), 1);
ASSERT_EQ(meta.get_input_info().begin()->first, "Convert-1_0");
ASSERT_EQ(meta.get_input_info().begin()->second.ranges.max, DEFAULT_MAX_VALUE);
ASSERT_EQ(meta.get_input_info().begin()->second.ranges.min, DEFAULT_MIN_VALUE);
ASSERT_EQ(meta.get_input_info().begin()->second.is_const, false);
} else {
// check model_path
ASSERT_EQ(meta.get_model_info().size(), 1);
ASSERT_EQ(meta.get_model_info().begin()->first, test_model_name);
ASSERT_EQ(meta.get_model_info().begin()->second.model_paths.size(), 1);
ASSERT_EQ(*meta.get_model_info().begin()->second.model_paths.begin(), test_model_path_1);
// check occurence
ASSERT_EQ(meta.get_model_info().begin()->second.this_op_cnt, 1);
ASSERT_EQ(meta.get_model_info().begin()->second.total_op_cnt, 2);
// check input_info
ASSERT_EQ(meta.get_input_info().size(), 1);
ASSERT_EQ(meta.get_input_info().begin()->first, "Erf-1_0");
ASSERT_EQ(meta.get_input_info().begin()->second.ranges.max, DEFAULT_MAX_VALUE);
ASSERT_EQ(meta.get_input_info().begin()->second.ranges.min, DEFAULT_MIN_VALUE);
ASSERT_EQ(meta.get_input_info().begin()->second.is_const, false);
}
}
}
TEST_F(OpCacheUnitTest, serialize_op) {
this->set_serialization_dir(test_artifacts_dir);
ASSERT_TRUE(this->serialize_op({convert_node, test_meta}));
ASSERT_TRUE(ov::util::directory_exists(test_artifacts_dir));
auto serialized_model_path = ov::util::path_join({test_artifacts_dir,
"operation", "static", "Convert-1", "f16", "Convert-1_0.xml"});
ASSERT_TRUE(ov::util::file_exists(serialized_model_path));
auto core = ov::Core();
auto serialized_model = core.read_model(serialized_model_path);
auto res = compare_functions(test_model, serialized_model, true, false, true, true, true, false);
ASSERT_TRUE(res.first);
}
TEST_F(OpCacheUnitTest, get_rel_serilization_dir) {
auto ref_path = ov::util::path_join({"operation", "static", "Convert-1", "f16"});
auto original_path = this->get_rel_serilization_dir(convert_node);
ASSERT_EQ(ref_path, original_path);
}
TEST_F(OpCacheUnitTest, generate_model_by_node) {
auto generated_graph = generate_model_by_node(convert_node);
auto res = compare_functions(test_model, generated_graph, true, false, true, true, true, false);
ASSERT_TRUE(res.first);
}
} // namespace

View File

@ -0,0 +1,80 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gtest/gtest.h"
#include "single_op_matchers/convolutions.hpp"
#include "openvino/op/ops.hpp"
namespace {
using namespace ov::tools::subgraph_dumper;
class ConvolutionMatcherTest : public ::testing::Test {
protected:
void SetUp() override {
matcher = ConvolutionsMatcher();
}
ConvolutionsMatcher matcher;
};
// Check that two convolutions with different input ov::Shapes but same kernel size are match each other
TEST_F(ConvolutionMatcherTest, ConvsSameKernelSize) {
const auto param = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::Shape({1, 3, 10, 10}));
const auto weights = std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::f32, ov::Shape({10, 3, 3, 3}), 1);
const auto op1 = std::make_shared<ov::op::v1::Convolution>(param, weights, ov::Strides(0, 0), ov::CoordinateDiff(0, 0),
ov::CoordinateDiff(0, 0), ov::Strides(0, 0));
const auto param2 = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::Shape({1, 5, 20, 20}));
const auto weights2 = std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::f32, ov::Shape({10, 5, 3, 3}), 1);
const auto op2 = std::make_shared<ov::op::v1::Convolution>(param2, weights2, ov::Strides(0, 0), ov::CoordinateDiff(0, 0),
ov::CoordinateDiff(0, 0), ov::Strides(0, 0));
ASSERT_TRUE(matcher.match(op1, op2));
}
// Check that two convolutions with different input ov::Shapes but same kernel size are match each other
TEST_F(ConvolutionMatcherTest, ConvsDifferentKernelSize) {
const auto param = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::Shape({1, 3, 10, 10}));
const auto weights = std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::f32, ov::Shape({10, 3, 3, 5}), 1);
const auto op1 = std::make_shared<ov::op::v1::Convolution>(param, weights, ov::Strides(0, 0), ov::CoordinateDiff(0, 0),
ov::CoordinateDiff(0, 0), ov::Strides(0, 0));
const auto param2 = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::Shape({1, 5, 20, 20}));
const auto weights2 = std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::f32, ov::Shape({10, 5, 3, 3}), 1);
const auto op2 = std::make_shared<ov::op::v1::Convolution>(param2, weights2, ov::Strides(0, 0), ov::CoordinateDiff(0, 0),
ov::CoordinateDiff(0, 0), ov::Strides(0, 0));
ASSERT_FALSE(matcher.match(op1, op2));
}
// Check that two group convolutions with different input ov::Shapes but same kernel size are match each other
TEST_F(ConvolutionMatcherTest, GroupConvsSameKernelSize) {
const auto param = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::Shape({1, 4, 10, 10}));
const auto weights = std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::f32, ov::Shape({2, 10, 2, 3, 3}), 1);
const auto op1 = std::make_shared<ov::op::v1::GroupConvolution>(param, weights, ov::Strides(0, 0), ov::CoordinateDiff(0, 0),
ov::CoordinateDiff(0, 0), ov::Strides(0, 0));
const auto param2 = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::Shape({1, 6, 20, 20}));
const auto weights2 = std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::f32, ov::Shape({2, 10, 3, 3, 3}), 1);
const auto op2 = std::make_shared<ov::op::v1::GroupConvolution>(param2, weights2, ov::Strides(0, 0), ov::CoordinateDiff(0, 0),
ov::CoordinateDiff(0, 0), ov::Strides(0, 0));
ASSERT_TRUE(matcher.match(op1, op2));
}
// Check that two group convolutions with different input ov::Shapes but same kernel size are match each other
TEST_F(ConvolutionMatcherTest, GroupConvsDifferentKernelSize) {
const auto param = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::Shape({1, 4, 10, 10}));
const auto weights = std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::f32, ov::Shape({2, 10, 2, 3, 5}), 1);
const auto op1 = std::make_shared<ov::op::v1::GroupConvolution>(param, weights, ov::Strides(0, 0), ov::CoordinateDiff(0, 0),
ov::CoordinateDiff(0, 0), ov::Strides(0, 0));
const auto param2 = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::Shape({1, 6, 20, 20}));
const auto weights2 = std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::f32, ov::Shape({2, 10, 3, 3, 3}), 1);
const auto op2 = std::make_shared<ov::op::v1::GroupConvolution>(param2, weights2, ov::Strides(0, 0), ov::CoordinateDiff(0, 0),
ov::CoordinateDiff(0, 0), ov::Strides(0, 0));
ASSERT_FALSE(matcher.match(op1, op2));
}
} // namespace

View File

@ -0,0 +1,93 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gtest/gtest.h"
#include "openvino/op/ops.hpp"
#include "single_op_matchers/base.hpp"
namespace {
using namespace ov::tools::subgraph_dumper;
class SingleOpMatcherTest : public ::testing::Test {
protected:
void SetUp() override {
matcher = BaseMatcher();
}
BaseMatcher matcher;
};
// Check that different values of constant nodes on port 0 (default value) are ignored in match()
TEST_F(SingleOpMatcherTest, AllPortsAreConsts_IgnoreConstPortVals) {
const auto const1 = std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::f32, ov::Shape({5, 5}), 1);
const auto shape_pattern = std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::i64, ov::Shape({2}), std::vector<int>{1, 25});
const auto op1 = std::make_shared<ov::op::v1::Reshape>(const1, shape_pattern, false);
const auto const2 = std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::f32, ov::Shape({5, 5}), 2);
const auto op2 = std::make_shared<ov::op::v1::Reshape>(const2, shape_pattern, false);
ASSERT_TRUE(matcher.match(op1, op2));
}
// Check match of equal nodes
TEST_F(SingleOpMatcherTest, AllPortsAreParams_NodesEqual) {
const auto param1 = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::Shape({10, 10}));
const auto param2 = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::Shape({10, 20}));
const auto op1 = std::make_shared<ov::op::v0::Concat>(ov::OutputVector({param1, param2}), 1);
const auto op2 = std::make_shared<ov::op::v0::Concat>(ov::OutputVector({param1, param2}), 1);
ASSERT_TRUE(matcher.match(op1, op2));
}
// Check nodes doesn't match - different input ranks
TEST_F(SingleOpMatcherTest, AllPortsAreParams_RanksNotEqual) {
const auto param1 = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::Shape({10, 10}));
const auto param2 = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::Shape({10, 20}));
const auto op1 = std::make_shared<ov::op::v0::Concat>(ov::OutputVector({param1, param2}), 1);
const auto param3 = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::Shape({10, 40, 10}));
const auto param4 = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::Shape({10, 40, 10}));
const auto op2 = std::make_shared<ov::op::v0::Concat>(ov::OutputVector({param3, param4}), 1);
ASSERT_FALSE(matcher.match(op1, op2));
}
// Check nodes doesn't match - different input element types
TEST_F(SingleOpMatcherTest, AllPortsAreParams_TypesNotEqual) {
const auto param1 = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::Shape({10, 10}));
const auto param2 = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::Shape({10, 20}));
const auto op1 = std::make_shared<ov::op::v0::Concat>(ov::OutputVector({param1, param2}), 1);
const auto param3 = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f16, ov::Shape({10, 10}));
const auto param4 = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f16, ov::Shape({10, 20}));
const auto op2 = std::make_shared<ov::op::v0::Concat>(ov::OutputVector({param3, param4}), 1);
ASSERT_FALSE(matcher.match(op1, op2));
}
// Check nodes doesn't match - different input element types
TEST_F(SingleOpMatcherTest, AllPortsAreParams_AttrsNotEqual) {
const auto param1 = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::Shape({10, 10, 10}));
const auto param2 = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::Shape({10, 10, 10}));
const auto op1 = std::make_shared<ov::op::v0::Concat>(ov::OutputVector({param1, param2}), 1);
const auto param3 = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::Shape({10, 10, 10}));
const auto param4 = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::Shape({10, 10, 10}));
const auto op2 = std::make_shared<ov::op::v0::Concat>(ov::OutputVector({param3, param4}), 2);
ASSERT_FALSE(matcher.match(op1, op2));
}
// Check nodes Add OPs match with different constants on ports
TEST_F(SingleOpMatcherTest, ChecAddOpConfiguration) {
const auto const1 = std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::f32, ov::Shape({5, 5}), 1);
const auto const2 = std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::f32, ov::Shape({5, 5}), 2);
const auto op1 = std::make_shared<ov::op::v1::Add>(const1, const2);
const auto const3 = std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::f32, ov::Shape({5, 5}), 3);
const auto const4 = std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::f32, ov::Shape({5, 5}), 4);
const auto op2 = std::make_shared<ov::op::v1::Add>(const1, const2);
ASSERT_TRUE(matcher.match(op1, op2));
}
} // namespace

View File

@ -0,0 +1,61 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gtest/gtest.h"
#include "single_op_matchers/base.hpp"
#include "openvino/op/ops.hpp"
namespace {
using namespace ov::op;
using namespace ngraph;
using ov::element::Type_t;
using namespace ov::tools::subgraph_dumper;
class MatcherConfigTest : public ::testing::Test {
protected:
void SetUp() override {
const auto const1 = std::make_shared<v0::Constant>(Type_t::f32, Shape({5, 5}), 1);
const auto const2 = std::make_shared<v0::Constant>(Type_t::f32, Shape({5, 5}), 2);
node = std::make_shared<v1::Add>(const1, const2);
}
std::shared_ptr<Node> node;
};
// Check that matcher configuration for operation created successfully and all parameters are set
TEST_F(MatcherConfigTest, ParametersAreSet) {
std::vector<size_t> ignored_ports = {0};
std::vector<std::string> ignored_attrs = {"attr"};
MatcherConfig<v1::Add> matcher_cfg(ignored_attrs, ignored_ports);
ASSERT_TRUE(matcher_cfg.op_in_config(node));
ASSERT_TRUE(matcher_cfg.ignored_ports == ignored_ports);
ASSERT_TRUE(matcher_cfg.ignored_attributes == ignored_attrs);
ASSERT_FALSE(matcher_cfg.is_fallback_config);
}
// Check that fallback matcher configuration created successfully and all parameters are set
TEST_F(MatcherConfigTest, FallbackConfig) {
std::vector<size_t> ignored_ports = {0};
std::vector<std::string> ignored_attrs = {"attr"};
MatcherConfig<> matcher_cfg(ignored_attrs, ignored_ports);
ASSERT_FALSE(matcher_cfg.op_in_config(node));
ASSERT_TRUE(matcher_cfg.ignored_ports == ignored_ports);
ASSERT_TRUE(matcher_cfg.ignored_attributes == ignored_attrs);
ASSERT_TRUE(matcher_cfg.is_fallback_config);
}
// Check that fallback matcher configuration created with default constructor
TEST_F(MatcherConfigTest, FallbackConfigDefaultConstructor) {
std::vector<size_t> ignored_ports = {};
std::vector<std::string> ignored_attrs = {};
auto matcher_cfg = MatcherConfig<>();
ASSERT_FALSE(matcher_cfg.op_in_config(node));
ASSERT_TRUE(matcher_cfg.ignored_ports == ignored_ports);
ASSERT_TRUE(matcher_cfg.ignored_attributes == ignored_attrs);
ASSERT_TRUE(matcher_cfg.is_fallback_config);
}
} // namespace

View File

@ -0,0 +1,96 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gtest/gtest.h"
#include "utils/node.hpp"
#include "openvino/op/ops.hpp"
namespace {
using namespace ov::tools::subgraph_dumper;
TEST(NodeUtilsTest, get_const_ranges) {
std::vector<float> values = {-1, -2.05, -3.65, 0, 5, 7};
auto const_node = std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::f32, ov::Shape({2, 3}), values);
auto range = get_const_ranges<float>(const_node);
auto range_ref = InputInfo::Range(-3.65, 7);
ASSERT_EQ(range, range_ref);
}
TEST(NodeUtilsTest, get_input_info_by_node) {
std::vector<float> values = {-1, -2.05, -3.65, 0, 5, 7};
auto const_node = std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::f32, ov::Shape({2, 3}), values);
const_node->set_friendly_name("const_0");
auto param = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::Shape({2, 3}));
param->set_friendly_name("param_0");
auto add_node = std::make_shared<ov::op::v1::Add>(param, const_node);
std::map<std::string, InputInfo> ref_test_info = {
{ "const_0", InputInfo(-3.65, 7, true) },
{ "param_0", InputInfo() },
};
std::map<std::string, InputInfo> orig_test_info = get_input_info_by_node(add_node);
ASSERT_EQ(ref_test_info, orig_test_info);
}
TEST(NodeUtilsTest, clone_node) {
std::vector<float> values = {-1, -2.05, -3.65, 0, 5, 7};
auto const_node = std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::f32, ov::Shape({2, 3}), values);
const_node->set_friendly_name("const_0");
auto param = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::Shape({2, 3}));
param->set_friendly_name("param_0");
auto add_node_0 = std::make_shared<ov::op::v1::Add>(param, const_node);
auto erf_node_0 = std::make_shared<ov::op::v0::Erf>(add_node_0);
auto erf_node_1 = std::make_shared<ov::op::v0::Erf>(const_node);
auto add_node_1 = std::make_shared<ov::op::v1::Add>(erf_node_0, erf_node_1);
{
auto cloned_node = clone_node(add_node_1);
ASSERT_TRUE(ov::op::util::is_parameter(cloned_node->get_input_node_shared_ptr(0)));
ASSERT_TRUE(ov::op::util::is_parameter(cloned_node->get_input_node_ptr(1)));
}
{
auto cloned_node = clone_node(add_node_1, true);
ASSERT_TRUE(ov::op::util::is_parameter(cloned_node->get_input_node_ptr(0)));
ASSERT_TRUE(ov::op::util::is_constant(cloned_node->get_input_node_ptr(1)));
}
{
add_node_1 = std::make_shared<ov::op::v1::Add>(const_node, erf_node_1);
auto cloned_node = clone_node(add_node_1, true, true);
ASSERT_TRUE(ov::op::util::is_constant(cloned_node->get_input_node_ptr(0)));
ASSERT_TRUE(ov::op::util::is_constant(cloned_node->get_input_node_ptr(1)));
}
}
TEST(NodeUtilsTest, generate_model_by_node) {
std::vector<float> values = {-1, -2.05, -3.65, 0, 5, 7};
auto const_node = std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::f32, ov::Shape({2, 3}), values);
const_node->set_friendly_name("const_0");
auto param = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::Shape({2, 3}));
param->set_friendly_name("param_0");
auto add_node_0 = std::make_shared<ov::op::v1::Add>(param, const_node);
auto erf_node_0 = std::make_shared<ov::op::v0::Erf>(add_node_0);
auto erf_node_1 = std::make_shared<ov::op::v0::Erf>(const_node);
auto add_node_1 = std::make_shared<ov::op::v1::Add>(erf_node_0, erf_node_1);
auto model = generate_model_by_node(add_node_1);
auto param_0 = model->inputs().begin() ->get_node_shared_ptr();
ASSERT_TRUE(ov::op::util::is_parameter(param_0));
ASSERT_EQ(param_0->get_shape(), ov::Shape({2, 3}));
ASSERT_EQ(param_0->get_element_type(), ov::element::Type_t::f32);
auto param_1 = model->inputs().begin()->get_node_shared_ptr();
ASSERT_TRUE(ov::op::util::is_parameter(param_1));
ASSERT_EQ(param_1->get_shape(), ov::Shape({2, 3}));
ASSERT_EQ(param_1->get_element_type(), ov::element::Type_t::f32);
auto res_0 = model->outputs().rbegin()->get_node_shared_ptr();
ASSERT_TRUE(ov::op::util::is_output(res_0));
ASSERT_EQ(res_0->get_shape(), ov::Shape({2, 3}));
ASSERT_EQ(res_0->get_element_type(), ov::element::Type_t::f32);
}
} // namespace

View File

@ -13,6 +13,7 @@
#include <ostream>
#include <set>
#include <sstream>
#include <fstream>
#include <string>
#include <vector>
@ -90,6 +91,16 @@ inline std::string vec2str(const std::vector<std::vector<std::vector<vecElementT
return result.str();
}
template <typename ElementType>
inline void vec2File(const std::vector<ElementType>& vec, const std::string& output_file_path) {
std::ofstream output_file;
output_file.open(output_file_path, std::ios::out | std::ios::trunc);
for (const auto& element : vec) {
output_file << element << std::endl;
}
output_file.close();
}
template <typename vecElementType>
inline std::string set2str(const std::set<vecElementType>& set) {
if (!set.empty()) {

View File

@ -19,6 +19,7 @@ extern const char* DEVICE_HETERO;
const char OP_REPORT_FILENAME[] = "report_op";
const char API_REPORT_FILENAME[] = "report_api";
const char REPORT_EXTENSION[] = ".xml";
const char LST_EXTENSION[] = ".lst";
const char DEVICE_SUFFIX_SEPARATOR = '.';

View File

@ -4,6 +4,31 @@
#pragma once
#include "openvino/openvino.hpp"
namespace ov {
namespace test {
namespace functional {
// todo: reuse in summary
inline std::string get_node_version(const std::shared_ptr<ov::Node>& node, const std::string& postfix = "") {
std::string op_name = node->get_type_info().name;
std::string opset_version = node->get_type_info().get_version();
std::string opset_name = "opset";
auto pos = opset_version.find(opset_name);
if (pos != std::string::npos) {
op_name += "-" + opset_version.substr(pos + opset_name.size());
}
if (!postfix.empty()) {
op_name += "_" + postfix;
}
return op_name;
}
} // namespace functional
} // namespace test
} // namespace ov
namespace LayerTestsUtils {
struct ModelInfo {