[CONFORMANCE][SUBGRAPHS DUMPER] Rework subgraphs_dumper graphs extraction algo feedback by plugins (#19669)

* [CONFORMANCE][SUBGRAPHS DUMPER] Change repeat pattern extractor to avoid duplications and reduce graphs size

* Small change

* temporary

* merge

* try to handle large models

* Fixes + tests

* Remove extra

* Exclude models after const folding in case dynamic modesl

* shapes to meta

* Fix tests

* Fix test + is_subgraph

* Fix issue with default output

* change hashing

* Check memory

* Hash algo

* correct modelsize check

* Log large models

* tmp disable fused_names extractor

* add device for fused_names

* remove extra

* fix vuild

* Disable fused_names extractor
This commit is contained in:
Irina Efode 2023-10-04 21:50:56 +04:00 committed by GitHub
parent d7be40b808
commit cdcbb1dc00
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
33 changed files with 765 additions and 172 deletions

View File

@ -28,9 +28,33 @@ public:
m_serialization_dir = serialization_dir;
}
bool is_model_large_to_read(const std::shared_ptr<ov::Model>& model, const std::string& model_path) {
// ov::Model + ov::CompiledModel
auto model_bytesize = model->get_graph_size();
if (2 * model_bytesize >= mem_size) {
auto model_bytesize_gb = model_bytesize;
model_bytesize_gb >>= 30;
auto mem_size_gb = mem_size;
mem_size_gb >>= 30;
// std::cout << "[ WARNING ] Model " << model_path << " bytesize is " << model_bytesize_gb <<
// "is larger than RAM size: " << mem_size_gb << ". Model will be skipped!" << std::endl;
return true;
}
return false;
}
bool is_model_large_to_store_const(const std::shared_ptr<ov::Model>& model) {
auto model_bytesize = model->get_graph_size();
if (mem_size < model_bytesize * 4) {
return true;
}
return false;
}
protected:
size_t m_serialization_timeout = 60;
std::string m_serialization_dir = ".";
static size_t mem_size;
ICache() = default;

View File

@ -19,12 +19,13 @@ class GraphCache : public ICache {
public:
void update_cache(const std::shared_ptr<ov::Model>& model,
const std::string& model_meta_data,
bool extract_body, bool from_cache = false) override;
bool extract_body,
bool from_cache = false) override;
void serialize_cache() override;
static std::shared_ptr<GraphCache>& get() {
static std::shared_ptr<GraphCache>& get(const std::string& device = "") {
if (m_cache_instance == nullptr) {
m_cache_instance = std::shared_ptr<GraphCache>(new GraphCache);
m_cache_instance = std::shared_ptr<GraphCache>(new GraphCache(device));
}
return m_cache_instance;
}
@ -46,19 +47,21 @@ protected:
// cache byte size
uint64_t m_graph_cache_bytesize = 0;
GraphCache() {
GraphCache(const std::string& device = "") {
ExtractorsManager::ExtractorsMap matchers = {
// temporary disabling according mem leaks in CI and not using swap mem
{ "fused_names", FusedNamesExtractor::Ptr(new FusedNamesExtractor) },
// { "fused_names", FusedNamesExtractor::Ptr(new FusedNamesExtractor(device)) },
{ "repeat_pattern", RepeatPatternExtractor::Ptr(new RepeatPatternExtractor) },
};
m_manager.set_extractors(matchers);
m_cache_subdir = "subgraph";
}
void update_cache(const std::shared_ptr<ov::Model>& model, const std::string& model_path,
std::map<std::string, InputInfo>& input_info, const std::string& extractor_name,
size_t model_op_cnt, bool from_cache = false);
void update_cache(const std::shared_ptr<ov::Model>& model,
const std::string& model_path,
std::map<std::string, InputInfo>& input_info,
const std::string& extractor_name,
size_t model_op_cnt);
};
} // namespace subgraph_dumper

View File

@ -40,15 +40,33 @@ struct InputInfo {
Range ranges;
bool is_const;
ov::PartialShape max_shape, min_shape;
InputInfo(double in_min = DEFAULT_MIN_VALUE,
InputInfo(const ov::PartialShape& shape = {},
double in_min = DEFAULT_MIN_VALUE,
double in_max = DEFAULT_MAX_VALUE,
bool in_is_const = false) :
is_const(in_is_const),
ranges(Range(in_min, in_max)) {}
ranges(Range(in_min, in_max)),
max_shape(shape),
min_shape(shape) {}
bool operator==(const InputInfo& input_info_ref) const {
return this->is_const == input_info_ref.is_const && this->ranges == input_info_ref.ranges;
return this->is_const == input_info_ref.is_const &&
this->ranges == input_info_ref.ranges &&
this->max_shape == input_info_ref.max_shape &&
this->min_shape == input_info_ref.min_shape;
}
InputInfo operator=(const InputInfo& input_info) {
this->ranges = input_info.ranges;
if (ov::shape_size(this->max_shape.get_max_shape()) < ov::shape_size(input_info.max_shape.get_max_shape())) {
this->max_shape = input_info.max_shape;
}
if (ov::shape_size(this->min_shape.get_min_shape()) > ov::shape_size(input_info.min_shape.get_min_shape())) {
this->min_shape = input_info.min_shape;
}
return *this;
}
};

View File

@ -13,8 +13,12 @@ namespace subgraph_dumper {
class MetaInfo {
public:
MetaInfo(const std::string& model_path = "", const std::map<std::string, InputInfo>& _input_info = {},
size_t total_op_cnt = 1, size_t this_op_cnt = 1, const std::string& extractor = "", size_t model_priority = 1);
MetaInfo(const std::string& model_path = "",
const std::map<std::string, InputInfo>& _input_info = {},
size_t total_op_cnt = 1,
size_t this_op_cnt = 1,
const std::string& extractor = "",
size_t model_priority = 1);
MetaInfo(std::map<std::string, InputInfo> _in_info,
std::map<std::string, ModelInfo> _model_info,
std::unordered_set<std::string> _extractors) :
@ -22,9 +26,14 @@ public:
input_info(_in_info),
extractors(_extractors) {};
void serialize(const std::string& serialization_path);
void update(const std::string& model_path, const std::map<std::string, InputInfo>& _input_info, size_t _total_op_cnt = 1,
size_t _this_op_cnt = 1, const std::string& extractor = "", const std::vector<std::string>& ignored_inputs = {});
void update(const std::string& model_path,
const std::map<std::string, InputInfo>& _input_info,
size_t _total_op_cnt = 1,
size_t _this_op_cnt = 1,
const std::string& extractor = "",
const std::vector<std::string>& ignored_inputs = {});
std::map<std::string, InputInfo> get_input_info() const;
void set_input_info(const std::map<std::string, InputInfo>& new_in_info) { input_info = new_in_info; };
std::map<std::string, ModelInfo> get_model_info() const;
std::string get_any_extractor() const { return *extractors.begin(); }

View File

@ -15,7 +15,10 @@ struct ModelInfo {
std::set<std::string> model_paths;
size_t this_op_cnt, total_op_cnt, model_priority;
ModelInfo(const std::string& model_path = "", size_t total_ops_in_model = 1, size_t this_ops_in_model = 1, size_t _model_priority = 1) :
ModelInfo(const std::string& model_path = "",
size_t total_ops_in_model = 1,
size_t this_ops_in_model = 1,
size_t _model_priority = 1) :
total_op_cnt(total_ops_in_model), this_op_cnt(this_ops_in_model), model_priority(_model_priority) {
model_paths = model_path.empty() ? std::set<std::string>() : std::set<std::string>({ model_path }) ;
}

View File

@ -7,6 +7,8 @@
#include <gflags/gflags.h>
#include <iostream>
#include "common_test_utils/test_constants.hpp"
static const char help_message[] = "Print a usage message.";
static const char input_folders_message[] = "Required. Comma separated paths to the input folders with IRs";
static const char local_cache_message[] = "Optional. Comma separated paths to the local cache folders with IRs";
@ -15,11 +17,13 @@ static const char path_regex_message[] = "Optional. regular expression to be app
"folders recursive discovery";
static const char extract_body_message[] = "Optional. Allow to extract operation bodies to operation cache.";
static const char cache_type_message[] = "Optional. Specify caching type: OP, GRAPH. The default value is both";
static const char device_message[] = "Optional. Specify device to compile model for `fused_names` extractor. Default is `TEMPLATE` ";
DEFINE_bool(h, false, help_message);
DEFINE_string(input_folders, "", local_cache_message);
DEFINE_string(local_cache, "", input_folders_message);
DEFINE_string(output_folder, "output", output_folder_message);
DEFINE_string(device, ov::test::utils::DEVICE_TEMPLATE, device_message);
DEFINE_string(path_regex, ".*", output_folder_message);
DEFINE_bool(extract_body, true, extract_body_message);
DEFINE_string(cache_type, "", cache_type_message);
@ -38,6 +42,7 @@ static void showUsage() {
std::cout << " --output_folder \"<path>\" " << output_folder_message << "\n";
std::cout << " --path_regex \"<path>\" " << path_regex_message << "\n";
std::cout << " --extract_body \"<value>\" " << extract_body_message << "\n";
std::cout << " --cache_type \"<value>\" " << extract_body_message << "\n";
std::cout << " --cache_type \"<value>\" " << cache_type_message << "\n";
std::cout << " --device \"<value>\" " << device_message << "\n";
std::cout << std::flush;
}

View File

@ -17,7 +17,7 @@ public:
explicit MatchersManager(const MatchersMap& matchers = {}) : m_matchers(matchers) {}
bool match(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref);
const std::shared_ptr<ov::Node> &ref) const;
void set_matchers(const MatchersMap& matchers = {}) { m_matchers = matchers; }
const MatchersMap& get_matchers() { return m_matchers; }

View File

@ -14,15 +14,16 @@ namespace subgraph_dumper {
class FusedNamesExtractor final : public SubgraphExtractor {
public:
FusedNamesExtractor();
FusedNamesExtractor(const std::string& device = "");
~FusedNamesExtractor();
std::list<ExtractedPattern> extract(const std::shared_ptr<ov::Model> &model,
bool is_extract_body = true) override;
void set_target_device(const std::string& _device) { device = _device; }
bool is_extract_body = true,
bool is_copy_constants = true) override;
protected:
std::unordered_set<std::string> extract_compiled_model_names(const std::shared_ptr<ov::Model>& model);
void set_target_device(const std::string& _device);
std::string device;
std::shared_ptr<ov::Core> core;

View File

@ -12,15 +12,23 @@ namespace subgraph_dumper {
class ExtractorsManager {
public:
// { model, subgraph, model_in_info, subgraph_in_info }
using ExtractedSubgraphTuple = std::tuple<bool, std::shared_ptr<ov::Model>, std::shared_ptr<ov::Model>, std::map<std::string, InputInfo>, std::map<std::string, InputInfo>>;
using ExtractorsMap = std::map<std::string, SubgraphExtractor::Ptr>;
explicit ExtractorsManager(const ExtractorsMap& extractors = {}) : m_extractors(extractors) {}
bool match(const std::shared_ptr<ov::Model> &model,
const std::shared_ptr<ov::Model> &ref,
const std::shared_ptr<ov::Model> &ref_model,
std::map<std::string, InputInfo> &in_info,
const std::map<std::string, InputInfo> &in_info_ref);
ExtractedSubgraphTuple is_subgraph(const std::shared_ptr<ov::Model> &model,
const std::shared_ptr<ov::Model> &ref_model,
const std::map<std::string, InputInfo> &in_info = {},
const std::map<std::string, InputInfo> &in_info_ref = {});
std::list<ExtractedPattern> extract(const std::shared_ptr<ov::Model> &model,
bool is_extract_body = true);
bool is_extract_body = true,
bool is_copy_constants = true);
void set_extractors(const ExtractorsMap& extractors = {}) { m_extractors = extractors; }
ExtractorsMap get_extractors() { return m_extractors; }
@ -28,7 +36,8 @@ public:
std::map<std::string, InputInfo> align_input_info(const std::shared_ptr<ov::Model>& model,
const std::shared_ptr<ov::Model>& model_ref,
const std::map<std::string, InputInfo> &in_info,
const std::map<std::string, InputInfo> &in_info_ref);
const std::map<std::string, InputInfo> &in_info_ref,
const std::map<std::string, std::string> &matched_op = {});
protected:
ExtractorsMap m_extractors = {};

View File

@ -25,7 +25,8 @@ public:
}
std::list<ExtractedPattern> extract(const std::shared_ptr<ov::Model> &model,
bool is_extract_body = true) override;
bool is_extract_body = true,
bool is_copy_constants = true) override;
private:
MatchersManager manager;

View File

@ -8,7 +8,11 @@
#include "openvino/op/util/op_types.hpp"
#include "common_test_utils/graph_comparator.hpp"
#include "cache/meta/input_info.hpp"
#include "matchers/single_op/single_op.hpp"
#include "matchers/single_op/convolutions.hpp"
#include "matchers/single_op/manager.hpp"
namespace ov {
namespace tools {
@ -16,13 +20,26 @@ namespace subgraph_dumper {
class SubgraphExtractor {
public:
// { is_subgraph, model, subgraph, matched_ops{ model_op_name, graph_op_name }}
using IsSubgraphTuple = std::tuple<bool, std::shared_ptr<ov::Model>, std::shared_ptr<ov::Model>, std::map<std::string, std::string>>;
using Ptr = std::shared_ptr<SubgraphExtractor>;
SubgraphExtractor() {
MatchersManager::MatchersMap matchers = {
{ "generic_single_op", SingleOpMatcher::Ptr(new SingleOpMatcher) },
{ "convolutions", ConvolutionsMatcher::Ptr(new ConvolutionsMatcher) },
};
m_manager.set_matchers(matchers);
}
bool match(const std::shared_ptr<ov::Model> &model,
const std::shared_ptr<ov::Model> &ref_model) const;
IsSubgraphTuple is_subgraph(const std::shared_ptr<ov::Model> &model,
const std::shared_ptr<ov::Model> &ref_model) const;
virtual std::list<ExtractedPattern> extract(const std::shared_ptr<ov::Model> &model,
bool is_extract_body = true) {
bool is_extract_body = true,
bool is_copy_constants = true) {
return std::list<ExtractedPattern>{};
};
@ -34,6 +51,7 @@ protected:
.enable(FunctionsComparator::ATTRIBUTES)
.enable(FunctionsComparator::NODES)
.enable(FunctionsComparator::PRECISIONS);
MatchersManager m_manager = MatchersManager();
inline bool is_node_to_skip(const std::shared_ptr<ov::Node>& node) const {
return ov::op::util::is_parameter(node) ||

View File

@ -0,0 +1,48 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#if defined(_WIN32)
#include <Windows.h>
#else
#include <unistd.h>
#include <sys/types.h>
#include <sys/param.h>
#endif
namespace ov {
namespace tools {
namespace subgraph_dumper {
static size_t get_ram_size() {
size_t ram_mem_size_bytes = 0;
#ifdef _WIN32
MEMORYSTATUSEX status;
status.dwLength = sizeof(status);
GlobalMemoryStatusEx( &status );
ram_mem_size_bytes = status.ullTotalPhys;
#elif defined(CTL_HW) && defined(HW_MEMSIZE)
int mib[2];
mib[0] = CTL_HW;
#if defined(HW_MEMSIZE)
mib[1] = HW_MEMSIZE;
#endif
int64_t size = 0;
size_t len = sizeof( size );
if ( sysctl( mib, 2, &size, &len, NULL, 0 ) == 0 )
ram_mem_size_bytes = size;
#elif defined(_SC_AIX_REALMEM)
ram_mem_size_bytes = sysconf( _SC_AIX_REALMEM ) * (size_t)1024L;
#elif defined(_SC_PHYS_PAGES) && defined(_SC_PAGE_SIZE)
ram_mem_size_bytes = static_cast<size_t>(sysconf( _SC_PHYS_PAGES )) *
static_cast<size_t>(sysconf(_SC_PAGE_SIZE));
#endif
return ram_mem_size_bytes;
}
} // namespace subgraph_dumper
} // namespace tools
} // namespace ov

View File

@ -49,13 +49,17 @@ static std::vector<std::regex> FROTEND_REGEXP = {
enum ModelCacheStatus {
SUCCEED = 0,
NOT_FULLY_CACHED = 1,
NOT_READ = 2
NOT_READ = 2,
LARGE_MODELS_EXCLUDED = 3,
LARGE_MODELS_INCLUDED = 4,
};
static std::map<ModelCacheStatus, std::string> model_cache_status_to_str = {
{ ModelCacheStatus::SUCCEED, "successful_models" },
{ ModelCacheStatus::NOT_FULLY_CACHED, "not_fully_cached_models" },
{ ModelCacheStatus::NOT_READ, "not_read_models" },
{ ModelCacheStatus::LARGE_MODELS_EXCLUDED, "large_models_excluded" },
{ ModelCacheStatus::LARGE_MODELS_INCLUDED, "large_models_included" },
};
std::pair<std::vector<std::string>, std::pair<ModelCacheStatus, std::vector<std::string>>>
@ -70,10 +74,32 @@ std::map<ModelCacheStatus, std::vector<std::string>> cache_models(
void save_model_status_to_file(const std::map<ModelCacheStatus, std::vector<std::string>>& caching_status,
const std::string& output_dir);
inline bool is_dynamic_model(const std::shared_ptr<ov::Model>& model) {
for (const auto& parameter : model->get_parameters()) {
if (is_dynamic_node(parameter)) {
return true;
}
}
for (const auto& result : model->get_results()) {
if (is_dynamic_node(result)) {
return true;
}
}
return false;
}
inline std::string get_model_type(const std::shared_ptr<ov::Model>& model) {
if (is_dynamic_model(model)) {
return "dynamic";
}
return "static";
}
inline ExtractedPattern
generate_model(const std::set<std::shared_ptr<ov::Node>>& nodes,
std::unordered_set<std::string>& checked_ops,
const std::string& extractor_name) {
const std::string& extractor_name,
bool is_copy_constants = true) {
// map to recover graph using cloned nodes and original connections
// { original_node_name, cloned_node }
std::unordered_map<std::string, std::shared_ptr<ov::Node>> cloned_node_map;
@ -89,7 +115,7 @@ generate_model(const std::set<std::shared_ptr<ov::Node>>& nodes,
auto orig_node_name = node->get_friendly_name();
checked_ops.insert(orig_node_name);
cloned_node_map.insert({ orig_node_name,
clone_node(node, true, false, orig_node_name) });
clone_node(node, is_copy_constants, false, orig_node_name) });
// create temporary vector to fill node output indexes
std::vector<size_t> out_ports(node->outputs().size());
@ -127,7 +153,7 @@ generate_model(const std::set<std::shared_ptr<ov::Node>>& nodes,
if (cloned_node_map.count(orig_in_node_name)) {
auto orig_in_node = cloned_node_map[orig_in_node_name];
auto cloned_in_node_name = cloned_in_node->get_friendly_name();
ov::replace_output_update_name(cloned_in_node->get_default_output(), orig_in_node->output(out_idx));
ov::replace_output_update_name(cloned_in_node->output(out_idx), orig_in_node->output(out_idx));
if (ov::op::util::is_parameter(orig_in_node)) {
auto param = std::dynamic_pointer_cast<ov::op::v0::Parameter>(orig_in_node);
model_parameters.push_back(param);

View File

@ -42,17 +42,24 @@ std::shared_ptr<ov::op::v0::Parameter> convert_const_to_param(const std::shared_
// all inputs are defined as parameters and contains detailed info in meta
std::shared_ptr<ov::Model> generate_model_by_node(const std::shared_ptr<ov::Node>& node);
inline std::string get_node_type(const std::shared_ptr<ov::Node>& node) {
inline bool is_dynamic_node(const std::shared_ptr<ov::Node>& node) {
for (size_t i = 0; i < node->get_input_size(); ++i) {
if (node->get_input_partial_shape(i).is_dynamic()) {
return "dynamic";
return true;
}
}
for (size_t i = 0; i < node->get_output_size(); ++i) {
if (node->get_output_partial_shape(i).is_dynamic()) {
return "dynamic";
return true;
}
}
return false;
}
inline std::string get_node_type(const std::shared_ptr<ov::Node>& node) {
if (is_dynamic_node(node)) {
return "dynamic";
}
return "static";
}

View File

@ -11,10 +11,12 @@
#include "common_test_utils/file_utils.hpp"
#include "cache/cache.hpp"
#include "utils/memory.hpp"
namespace ov {
namespace tools {
namespace subgraph_dumper {
size_t ICache::mem_size = get_ram_size();
bool ICache::serialize_model(const std::pair<std::shared_ptr<ov::Model>, MetaInfo>& graph_info,
const std::string& rel_serialization_dir) {
@ -40,8 +42,8 @@ bool ICache::serialize_model(const std::pair<std::shared_ptr<ov::Model>, MetaInf
meta.serialize(meta_path);
return true;
} catch (std::exception &e) {
std::cout << "[ ERROR ] Failed to serialize model: " << model_name
<< ". Exception: " << e.what() << std::endl;
// std::cout << "[ ERROR ] Failed to serialize model: " << model_name
// << ". Exception: " << e.what() << std::endl;
ov::test::utils::removeFile(xml_path);
ov::test::utils::removeFile(bin_path);
ov::test::utils::removeFile(meta_path);

View File

@ -13,6 +13,7 @@
#include "cache/graph_cache.hpp"
#include "utils/node.hpp"
#include "utils/model.hpp"
namespace ov {
namespace tools {
@ -22,32 +23,54 @@ std::shared_ptr<GraphCache> GraphCache::m_cache_instance = nullptr;
void GraphCache::update_cache(const std::shared_ptr<ov::Model>& model,
const std::string& model_meta_data,
bool extract_body, bool from_cache) {
bool extract_body,
bool from_cache) {
std::cout << "[ INFO ][ GRAPH CACHE ] Processing model: " << model_meta_data << std::endl;
auto model_total_op = model->get_ops().size() - model->get_output_size() - model->inputs().size();
auto extracted_patterns = m_manager.extract(model, extract_body);
if (extracted_patterns.empty()) {
return;
if (from_cache) {
auto meta_path = ov::test::utils::replaceExt(model_meta_data, "meta");
auto meta = MetaInfo::read_meta_from_file(meta_path);
m_graph_cache.insert({ model, meta });
m_graph_cache_bytesize += model->get_graph_size();
} else {
// const won't be cloned in case model takes > 50% RAM
auto model_bytesize = model->get_graph_size();
// check that Free RAM memory is enough. Serialize in other case
// serialize graph cache in case graph cache bytesize > 4GB to avoid long search the same graphs
if (m_graph_cache_bytesize + 2 * model_bytesize > mem_size || m_graph_cache_bytesize >> 20 != 0) {
// std::cout << "[ GRAPH CACHE ][ WARNING ] There are not enought RAM memory! Serialize graph cache" << std::endl;
serialize_cache();
m_graph_cache_bytesize = 0;
}
auto is_large_model = is_model_large_to_store_const(model);
if (is_large_model) {
auto model_bytesize_gb = model_bytesize;
model_bytesize_gb >>= 30;
auto mem_size_gb = mem_size;
mem_size_gb >>= 30;
// std::cout << "[ GRAPH CACHE ][ WARNING ] Model bytesize is " << model_bytesize_gb <<
// "GB. It is larger than 25% RAM size: " << mem_size_gb << ". Constants won't be copied!" << std::endl;
}
auto extracted_patterns = m_manager.extract(model, extract_body, !is_large_model);
if (extracted_patterns.empty()) {
return;
}
while (!extracted_patterns.empty()) {
auto it = *extracted_patterns.begin();
update_cache(std::get<0>(it), model_meta_data, std::get<1>(it), std::get<2>(it), model_total_op);
extracted_patterns.pop_front();
}
}
while (!extracted_patterns.empty()) {
auto it = *extracted_patterns.begin();
update_cache(std::get<0>(it), model_meta_data, std::get<1>(it), std::get<2>(it), model_total_op);
extracted_patterns.pop_front();
}
return;
}
void GraphCache::update_cache(const std::shared_ptr<ov::Model>& extracted_model, const std::string& model_path,
std::map<std::string, InputInfo>& input_info, const std::string& extractor_name, size_t model_op_cnt, bool from_cache) {
// todo: check the number 8GB
if (m_graph_cache_bytesize >> 33 > 0) {
std::cout << "[ GRAPH CACHE ][ WARNING ] Cache size > 8 GB. Serialize graph cache" << std::endl;
serialize_cache();
// m_graph_cache.clear();
m_graph_cache_bytesize = 0;
}
void GraphCache::update_cache(const std::shared_ptr<ov::Model>& extracted_model,
const std::string& model_path,
std::map<std::string, InputInfo>& input_info,
const std::string& extractor_name,
size_t model_op_cnt) {
auto graph_name = extracted_model->get_friendly_name();
auto this_op_cnt = extracted_model->get_ops().size() -
extracted_model->get_parameters().size() - extracted_model->get_results().size();
std::string serialized_model_path = "";
for (const auto& extractor : m_manager.get_extractors()) {
auto tmp_serialized_model_path = ov::util::path_join({ m_serialization_dir, m_cache_subdir, extractor.first, graph_name + ".xml" });
@ -60,7 +83,7 @@ void GraphCache::update_cache(const std::shared_ptr<ov::Model>& extracted_model,
std::shared_ptr<ov::Model> model_to_update = nullptr;
// if cached model was serialized
if (!serialized_model_path.empty()) {
std::cout << "[ GRAPH CACHE ][ INFO ] Reading cached model: " << serialized_model_path << std::endl;
// std::cout << "[ GRAPH CACHE ][ INFO ] Reading cached model: " << serialized_model_path << std::endl;
auto bin_path = ov::test::utils::replaceExt(serialized_model_path, ".bin");
auto meta_path = ov::test::utils::replaceExt(serialized_model_path, ".meta");
auto cached_model = ov::test::utils::PluginCache::get().core()->read_model(serialized_model_path);
@ -69,31 +92,48 @@ void GraphCache::update_cache(const std::shared_ptr<ov::Model>& extracted_model,
ov::test::utils::removeFile(serialized_model_path);
ov::test::utils::removeFile(bin_path);
ov::test::utils::removeFile(meta_path);
m_graph_cache.insert({ cached_model, cached_meta });
m_graph_cache_bytesize += cached_model->get_graph_size();
model_to_update = cached_model;
input_info = m_manager.align_input_info(extracted_model, model_to_update,
input_info, cached_meta.get_input_info());
if (m_manager.match(extracted_model, cached_model,
input_info, cached_meta.get_input_info())) {
model_to_update = cached_model;
}
} else {
for (const auto& cached_model : m_graph_cache) {
if (m_manager.match(extracted_model, cached_model.first,
input_info, cached_model.second.get_input_info())) {
model_to_update = cached_model.first;
break;
} else {
auto is_subgraph = m_manager.is_subgraph(extracted_model, cached_model.first,
input_info, cached_model.second.get_input_info());
// in case if one model is subgraph of other to update model meta info and remove subgraph from cache
if (std::get<0>(is_subgraph)) {
std::shared_ptr<ov::Model> graph, subgraph;
std::map<std::string, InputInfo> graph_in_info, subgraph_in_info;
std::tie(std::ignore, graph, subgraph, graph_in_info, subgraph_in_info) = is_subgraph;
if (subgraph == cached_model.first) {
auto meta = m_graph_cache[subgraph];
meta.set_input_info(graph_in_info);
m_graph_cache.erase(subgraph);
m_graph_cache.insert({graph, meta});
m_graph_cache_bytesize += (graph->get_graph_size() - subgraph->get_graph_size());
}
m_graph_cache[cached_model.first].update(model_path,
subgraph_in_info,
model_op_cnt,
this_op_cnt,
extractor_name);
return;
}
}
}
}
auto this_op_cnt = extracted_model->get_ops().size() -
extracted_model->get_parameters().size() - extracted_model->get_results().size();
if (model_to_update == nullptr) {
MetaInfo meta;
if (from_cache) {
auto meta_path = ov::test::utils::replaceExt(model_path, "meta");
meta = MetaInfo::read_meta_from_file(meta_path);
} else {
meta = MetaInfo(model_path, input_info, model_op_cnt, this_op_cnt, extractor_name);
}
MetaInfo meta = MetaInfo(model_path, input_info, model_op_cnt, this_op_cnt, extractor_name);
m_graph_cache.insert({ extracted_model, meta });
m_graph_cache_bytesize += extracted_model->get_graph_size();
return;
@ -110,16 +150,11 @@ void GraphCache::update_cache(const std::shared_ptr<ov::Model>& extracted_model,
}
void GraphCache::serialize_cache() {
// for (const auto& cache_item : m_graph_cache) {
auto it = m_graph_cache.begin();
while (it != m_graph_cache.end()) {
auto rel_dir = ov::util::path_join({m_cache_subdir, it->second.get_any_extractor() });
serialize_model(*it, rel_dir);
m_graph_cache.erase(it->first);
it = m_graph_cache.begin();
}
auto a = 0;
// }
for (const auto& cache_item : m_graph_cache) {
auto rel_dir = ov::util::path_join({ m_cache_subdir, get_model_type(cache_item.first), cache_item.second.get_any_extractor() });
serialize_model(cache_item, rel_dir);
}
m_graph_cache.clear();
}
} // namespace subgraph_dumper

View File

@ -15,8 +15,12 @@ namespace subgraph_dumper {
unsigned long MetaInfo::MIN_MODEL_PRIORITY = std::numeric_limits<unsigned long>::max();
unsigned long MetaInfo::MAX_MODEL_PRIORITY = std::numeric_limits<unsigned long>::min();
MetaInfo::MetaInfo(const std::string& _model_path, const std::map<std::string, InputInfo>& _input_info,
size_t _total_op_cnt, size_t _this_op_cnt, const std::string& extractor, size_t model_priority) {
MetaInfo::MetaInfo(const std::string& _model_path,
const std::map<std::string, InputInfo>& _input_info,
size_t _total_op_cnt,
size_t _this_op_cnt,
const std::string& extractor,
size_t model_priority) {
unsigned long tmp_graph_priority = _total_op_cnt * model_priority;
if (tmp_graph_priority < MIN_MODEL_PRIORITY) MIN_MODEL_PRIORITY = tmp_graph_priority;
if (tmp_graph_priority > MAX_MODEL_PRIORITY) MAX_MODEL_PRIORITY = tmp_graph_priority;
@ -46,6 +50,21 @@ double MetaInfo::get_graph_priority() {
return diff / delta;
}
inline ov::PartialShape str_to_ov_shape(std::string str) {
str = str.replace(str.find('['), 1, "");
str = str.replace(str.find(']'), 1, "");
std::vector<size_t> shape_vec;
size_t pos = 0;
do {
pos = str.find('.');
std::string dim_str = str.substr(0, pos);
shape_vec.push_back(atoi(dim_str.c_str()));
str = str.replace(0, dim_str.length() + 1, "");
} while (pos != std::string::npos);
return ov::PartialShape{shape_vec};
}
MetaInfo MetaInfo::read_meta_from_file(const std::string& meta_path) {
pugi::xml_document doc;
doc.load_file(meta_path.c_str());
@ -80,6 +99,12 @@ MetaInfo MetaInfo::read_meta_from_file(const std::string& meta_path) {
} else {
in_info.ranges.max = DEFAULT_MAX_VALUE;
}
{
auto max_shape_str = std::string(input.attribute("max_shape").value());
in_info.max_shape = str_to_ov_shape(max_shape_str);
auto min_shape_str = std::string(input.attribute("min_shape").value());
in_info.min_shape = str_to_ov_shape(min_shape_str);
}
input_info.insert({in_name, in_info});
}
}
@ -132,6 +157,8 @@ void MetaInfo::serialize(const std::string& serialization_path) {
input_node.append_attribute("max").set_value(input.second.ranges.max);
}
input_node.append_attribute("convert_to_const").set_value(input.second.is_const);
input_node.append_attribute("max_shape").set_value(ov::test::utils::partialShape2str({ input.second.max_shape }).c_str());
input_node.append_attribute("min_shape").set_value(ov::test::utils::partialShape2str({ input.second.min_shape }).c_str());
}
doc.save_file(serialization_path.c_str());
}
@ -142,6 +169,7 @@ void MetaInfo::update(const std::string& _model_path,
size_t _this_op_cnt,
const std::string& extractor,
const std::vector<std::string>& ignored_inputs) {
bool is_update_in_info = true;
if (input_info.size() != _input_info.size()) {
throw std::runtime_error("Incompatible input info!");
}
@ -155,6 +183,19 @@ void MetaInfo::update(const std::string& _model_path,
} else {
model_info.insert({ model_name, ModelInfo(_model_path, _total_op_cnt) });\
}
// update max and mib abs priority to normilize priorities when serialize
{
auto abs_graph_priority = get_abs_graph_priority();
if (abs_graph_priority > MAX_MODEL_PRIORITY) MAX_MODEL_PRIORITY = abs_graph_priority;
if (abs_graph_priority < MIN_MODEL_PRIORITY) MIN_MODEL_PRIORITY = abs_graph_priority;
}
if (!extractor.empty()) {
extractors.insert(extractor);
}
if (!is_update_in_info) {
return;
}
for (const auto& in : _input_info) {
if (std::find(ignored_inputs.begin(), ignored_inputs.end(), in.first) != ignored_inputs.begin()) {
continue;
@ -167,15 +208,6 @@ void MetaInfo::update(const std::string& _model_path,
input_info[in.first] = in.second;
}
}
// update max and mib abs priority to normilize priorities when serialize
{
auto abs_graph_priority = get_abs_graph_priority();
if (abs_graph_priority > MAX_MODEL_PRIORITY) MAX_MODEL_PRIORITY = abs_graph_priority;
if (abs_graph_priority < MIN_MODEL_PRIORITY) MIN_MODEL_PRIORITY = abs_graph_priority;
}
if (!extractor.empty()) {
extractors.insert(extractor);
}
}
std::map<std::string, InputInfo> MetaInfo::get_input_info() const {

View File

@ -10,6 +10,7 @@
#include "openvino/util/file_util.hpp"
#include "common_test_utils/file_utils.hpp"
#include "utils/memory.hpp"
using namespace ov::tools::subgraph_dumper;
@ -40,6 +41,9 @@ int main(int argc, char *argv[]) {
std::cout << "[ INFO ] Try 'subgraphsDumper -h' for more information. \nException: " << e.what() << std::endl;
return 1;
}
size_t ram_size_gb = get_ram_size();
ram_size_gb >>= 30;
std::cout << "[ INFO ] RAM size is " << ram_size_gb << "GB" << std::endl;
std::vector<std::shared_ptr<ICache>> caches;
if (FLAGS_cache_type == "OP" || FLAGS_cache_type.empty()) {
@ -48,7 +52,7 @@ int main(int argc, char *argv[]) {
}
if (FLAGS_cache_type == "GRAPH" || FLAGS_cache_type.empty()) {
std::cout << "[ INFO ] GraphCache is enabled!" << std::endl;
caches.push_back(GraphCache::get());
caches.push_back(GraphCache::get(FLAGS_device));
}
for (auto& cache : caches) {

View File

@ -18,7 +18,7 @@ iMatcherConfig::Ptr MatchersManager::get_config(const std::shared_ptr<ov::Node>
}
bool MatchersManager::match(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref) {
const std::shared_ptr<ov::Node> &ref) const {
for (const auto &it : m_matchers) {
if (it.second->match(node, ref)) {
return true;

View File

@ -4,6 +4,7 @@
#include "openvino/op/convolution.hpp"
#include "openvino/op/group_conv.hpp"
#include "openvino/op/util/op_types.hpp"
#include "common_test_utils/graph_comparator.hpp"
#include "matchers/single_op/single_op.hpp"
@ -34,10 +35,13 @@ bool SingleOpMatcher::match_inputs(const std::shared_ptr<ov::Node> &node,
if (std::find(ignored_ports.begin(), ignored_ports.end(), port_id) != ignored_ports.end()) {
continue;
}
const auto &cur_node_input_type = node->input_value(port_id).get_node_shared_ptr()->get_type_info();
const auto &ref_node_input_type = ref->input_value(port_id).get_node_shared_ptr()->get_type_info();
if (cur_node_input_type != ref_node_input_type) {
return false;
if (!ov::op::util::is_parameter(node) && !ov::op::util::is_parameter(ref) &&
!ov::op::util::is_constant(node) && !ov::op::util::is_constant(ref)) {
const auto &cur_node_input_type = node->input_value(port_id).get_node_shared_ptr()->get_type_info();
const auto &ref_node_input_type = ref->input_value(port_id).get_node_shared_ptr()->get_type_info();
if (cur_node_input_type != ref_node_input_type) {
return false;
}
}
if (node->get_input_tensor(port_id).get_partial_shape().rank() != ref->get_input_tensor(port_id).get_partial_shape().rank()) {
return false;
@ -59,6 +63,13 @@ SingleOpMatcher::match_outputs(const std::shared_ptr<ov::Node> &node,
return false;
}
for (size_t port_id = 0; port_id < node->get_output_size(); ++port_id) {
if (!ov::op::util::is_output(node) && !ov::op::util::is_output(ref)) {
const auto &cur_node_out_type = node->output(port_id).get_node_shared_ptr()->get_type_info();
const auto &ref_node_out_type = ref->output(port_id).get_node_shared_ptr()->get_type_info();
if (cur_node_out_type != ref_node_out_type) {
return false;
}
}
if (node->get_output_tensor(port_id).get_element_type() != ref->get_output_tensor(port_id).get_element_type()) {
return false;
}
@ -94,7 +105,7 @@ bool SingleOpMatcher::match(const std::shared_ptr<ov::Node> &node,
if (!match_inputs(node, ref)) {
return false;
}
if (!match_attrs(node, ref)) {
if (!match_attrs(node, ref) && !ov::op::util::is_parameter(node) && !ov::op::util::is_parameter(ref)) {
return false;
}
if (!match_outputs(node, ref)) {

View File

@ -6,6 +6,7 @@
#include "openvino/op/tensor_iterator.hpp"
#include "openvino/op/if.hpp"
#include "common_test_utils/common_utils.hpp"
#include "functional_test_utils/ov_plugin_cache.hpp"
#include "matchers/subgraph/fused_names.hpp"
@ -13,6 +14,26 @@
using namespace ov::tools::subgraph_dumper;
void FusedNamesExtractor::set_target_device(const std::string& _device) {
auto available_devices = core->get_available_devices();
if (_device.empty()) {
device = available_devices.front();
std::cout << "[ WARNING ][ GRAPH CACHE ] " << device <<
" will be used for `fused_names` extractor" << std::endl;
return;
} else if (std::find(available_devices.begin(),
available_devices.end(),
_device) == available_devices.end()) {
std::string message = "Incorrect device ";
message += _device;
message += " to enable `fused_names` extractor! Available devices: ";
message += ov::test::utils::vec2str(available_devices);
throw std::runtime_error(message);
}
device = _device;
std::cout << "[ INFO ][ GRAPH CACHE ] " << device << " is using for `fused_names` extractor" << std::endl;
}
std::unordered_set<std::string>
FusedNamesExtractor::extract_compiled_model_names(const std::shared_ptr<ov::Model>& model) {
auto compiled_model = core->compile_model(model, device);
@ -26,9 +47,9 @@ FusedNamesExtractor::extract_compiled_model_names(const std::shared_ptr<ov::Mode
return compiled_op_name;
}
FusedNamesExtractor::FusedNamesExtractor() {
FusedNamesExtractor::FusedNamesExtractor(const std::string& device) {
core = ov::test::utils::PluginCache::get().core();
device = *(core->get_available_devices().begin());
set_target_device(device);
}
FusedNamesExtractor::~FusedNamesExtractor() {
@ -37,7 +58,8 @@ FusedNamesExtractor::~FusedNamesExtractor() {
std::list<ExtractedPattern>
FusedNamesExtractor::extract(const std::shared_ptr<ov::Model> &model,
bool is_extract_body) {
bool is_extract_body,
bool is_copy_constants) {
auto compiled_op_name = extract_compiled_model_names(model);
std::list<ExtractedPattern> matched_patterns;
std::unordered_set<std::string> checked_ops;
@ -49,10 +71,10 @@ FusedNamesExtractor::extract(const std::shared_ptr<ov::Model> &model,
}
if (compiled_op_name.count(op_name)) {
try {
matched_patterns.push_back(generate_model(nodes, checked_ops, extractor_name));
matched_patterns.push_back(generate_model(nodes, checked_ops, extractor_name, is_copy_constants));
} catch(std::exception& e) {
if (std::string(e.what()) != "Incorrect node number to create model") {
std::cout << "[ WARNING ] Impossible to generate network and add to GraphCache: " <<e.what() << std::endl;
if (std::string(e.what()).find("Incorrect node number to create model") == std::string::npos) {
// std::cout << "[ WARNING ] Impossible to generate network and add to GraphCache: " <<e.what() << std::endl;
}
}
nodes.clear();
@ -82,10 +104,10 @@ FusedNamesExtractor::extract(const std::shared_ptr<ov::Model> &model,
}
}
try {
matched_patterns.push_back(generate_model(nodes, checked_ops, extractor_name));
matched_patterns.push_back(generate_model(nodes, checked_ops, extractor_name, is_copy_constants));
} catch(std::exception& e) {
if (std::string(e.what()) != "Incorrect node number to create model") {
std::cout << "[ WARNING ] Impossible to generate network and add to GraphCache: " <<e.what() << std::endl;
if (std::string(e.what()).find("Incorrect node number to create model") == std::string::npos) {
// std::cout << "[ WARNING ] Impossible to generate network and add to GraphCache: " <<e.what() << std::endl;
}
}
return matched_patterns;

View File

@ -2,7 +2,10 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <chrono>
#include "openvino/pass/manager.hpp"
#include "openvino/pass/constant_folding.hpp"
#include "matchers/subgraph/manager.hpp"
#include "utils/model.hpp"
using namespace ov::tools::subgraph_dumper;
@ -12,18 +15,47 @@ bool ExtractorsManager::match(const std::shared_ptr<ov::Model> &model,
if (it.second->match(model, ref)) {
return true;
}
return false;
}
return false;
}
ExtractorsManager::ExtractedSubgraphTuple
ExtractorsManager::is_subgraph(const std::shared_ptr<ov::Model> &model,
const std::shared_ptr<ov::Model> &ref_model,
const std::map<std::string, InputInfo> &in_info,
const std::map<std::string, InputInfo> &in_info_ref) {
for (const auto &it : m_extractors) {
auto extractor_res = it.second->is_subgraph(model, ref_model);
if (std::get<0>(extractor_res)) {
std::map<std::string, InputInfo> graph_in_info, subgraph_in_info;
if (std::get<1>(extractor_res) == model && std::get<2>(extractor_res) == ref_model) {
graph_in_info = in_info;
subgraph_in_info = in_info_ref;
} else if (std::get<1>(extractor_res) == ref_model && std::get<2>(extractor_res) == model) {
graph_in_info = in_info_ref;
subgraph_in_info = in_info;
} else {
throw std::runtime_error("Generated models are incompatible with original ones!");
}
try {
subgraph_in_info = align_input_info(std::get<2>(extractor_res), std::get<1>(extractor_res), subgraph_in_info, graph_in_info);
} catch(...) {
return { false, nullptr, nullptr, {}, {} };
}
return { true, std::get<1>(extractor_res), std::get<2>(extractor_res), graph_in_info, subgraph_in_info };
}
return { false, nullptr, nullptr, {}, {} };
}
}
bool ExtractorsManager::match(const std::shared_ptr<ov::Model> &model,
const std::shared_ptr<ov::Model> &ref,
std::map<std::string, InputInfo> &in_info,
const std::map<std::string, InputInfo> &in_info_ref) {
if (match(model, ref)) {
try {
auto new_input_info = align_input_info(model, ref, in_info, in_info_ref);
in_info = new_input_info;
in_info = align_input_info(model, ref, in_info, in_info_ref);
return true;
} catch (...) {
return false;
@ -36,51 +68,78 @@ std::map<std::string, InputInfo>
ExtractorsManager::align_input_info(const std::shared_ptr<ov::Model>& model,
const std::shared_ptr<ov::Model>& model_ref,
const std::map<std::string, InputInfo>& in_info,
const std::map<std::string, InputInfo>& in_info_ref) {
const std::map<std::string, InputInfo>& in_info_ref,
const std::map<std::string, std::string> &matched_op) {
std::map<std::string, InputInfo> new_input_info = in_info;
bool is_update_required = false;
for (const auto& in_info_item : in_info_ref) {
if (!in_info.count(in_info_item.first)) {
is_update_required = true;
break;
} else if (in_info.at(in_info_item.first).is_const != in_info_item.second.is_const) {
throw std::runtime_error("Impossible to update input info!!!");
}
}
if (is_update_required) {
std::map<std::string, InputInfo> new_ref_input_info = in_info_ref;
// align matched model names
auto ref_model_ops = model_ref->get_ordered_ops();
auto model_ops = model->get_ordered_ops();
size_t ref_ordered_ops_size = ref_model_ops.size();
size_t ordered_ops_size = model_ops.size();
if (ordered_ops_size != ref_model_ops.size()) {
throw std::runtime_error("Matched models are different!");
if (ref_ordered_ops_size != ordered_ops_size && matched_op.empty()) {
throw std::runtime_error("Matched models can not be compared according different op numbers!");
}
for (size_t i = 0; i < ordered_ops_size; ++i) {
auto model_op_name = model_ops[i]->get_friendly_name();
for (size_t i = 0; i < ref_ordered_ops_size; ++i) {
auto model_op_name = i < ordered_ops_size ? model_ops[i]->get_friendly_name() : "";
auto model_ref_op_name = ref_model_ops[i]->get_friendly_name();
if (in_info.count(model_op_name)) {
auto input_info = new_input_info[model_op_name];
if (input_info.is_const != new_ref_input_info[model_ref_op_name].is_const) {
throw std::runtime_error("Impossible yo update input info!!!");
}
new_input_info.erase(model_op_name);
new_input_info.insert({ model_ref_op_name, input_info });
if (!in_info_ref.count(model_ref_op_name) && !in_info.count(model_op_name)) {
continue;
}
auto input_info = matched_op.empty() ? new_input_info[model_op_name] : in_info_ref.at(model_ref_op_name);
std::string input_name = matched_op.count(model_ref_op_name) ? matched_op.at(model_ref_op_name) : model_op_name;
if (new_input_info.count(input_name)) {
if (input_info.is_const != in_info_ref.at(model_ref_op_name).is_const) {
throw std::runtime_error("Impossible to update input info!!!");
}
if (!matched_op.empty()) {
input_info = new_input_info.at(input_name);
}
new_input_info.erase(input_name);
}
new_input_info.insert({ model_ref_op_name, input_info });
}
}
return new_input_info;
}
std::list<ExtractedPattern>
ExtractorsManager::extract(const std::shared_ptr<ov::Model> &model, bool is_extract_body) {
ExtractorsManager::extract(const std::shared_ptr<ov::Model> &model,
bool is_extract_body,
bool is_copy_constants) {
std::list<ExtractedPattern> result;
for (const auto &it : m_extractors) {
// extract patterns from original models
auto start = std::chrono::high_resolution_clock::now();
it.second->set_extractor_name(it.first);
auto extracted_patterns = it.second->extract(model, is_extract_body);
auto extracted_patterns = it.second->extract(model, is_extract_body, is_copy_constants);
result.insert(result.end(), extracted_patterns.begin(), extracted_patterns.end());
auto end = std::chrono::high_resolution_clock::now();
auto delta = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
std::cout << "[ INFO ][ EXTRACTOR DURATION ] " << it.first << " " << delta << "ms" << std::endl;
std::cout << "[ INFO ][ EXTRACTOR DURATION ][ ORIGINAL MODEL ] " << it.first << " " << delta << "ms" << std::endl;
// todo: enable it after validation
// if (!is_dynamic_model(model)) {
// // extract patterns from models after `constant_folding` pass
// ov::pass::Manager manager;
// manager.register_pass<ov::pass::ConstantFolding>();
// manager.run_passes(model);
// extracted_patterns = it.second->extract(model, is_extract_body, is_copy_constants);
// result.insert(result.end(), extracted_patterns.begin(), extracted_patterns.end());
// end = std::chrono::high_resolution_clock::now();
// delta = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
// std::cout << "[ INFO ][ EXTRACTOR DURATION ][ CONSTANT FOLDING ] " << it.first << " " << delta << "ms" << std::endl;
// }
}
return result;
}

View File

@ -16,7 +16,8 @@ using namespace ov::tools::subgraph_dumper;
std::list<ExtractedPattern>
RepeatPatternExtractor::extract(const std::shared_ptr<ov::Model> &model,
bool is_extract_body) {
bool is_extract_body,
bool is_copy_constants) {
std::unordered_set<std::string> checked_ops;
std::list<ExtractedPattern> to_cache;
@ -92,11 +93,23 @@ RepeatPatternExtractor::extract(const std::shared_ptr<ov::Model> &model,
}
for (size_t i = 0; i < start_node_idx.size(); ++i) {
try {
to_cache.push_back(generate_model(nodes[i], checked_ops, extractor_name));
std::unordered_set<std::string> tmp_checked_ops;
auto extracted_pattern = generate_model(nodes[i], tmp_checked_ops, extractor_name, is_copy_constants);
auto extracted_model = std::get<0>(extracted_pattern);
std::list<ExtractedPattern> secondary_patterns;
if (nodes[i].size() > 20) {
secondary_patterns = extract(std::get<0>(extracted_pattern), is_extract_body, is_copy_constants);
}
if (secondary_patterns.size() > 1) {
to_cache.insert(to_cache.end(), secondary_patterns.begin(), secondary_patterns.end());
} else {
to_cache.push_back(extracted_pattern);
}
nodes[i].clear();
checked_ops.insert(tmp_checked_ops.begin(), tmp_checked_ops.end());
} catch(std::exception& e) {
if (std::string(e.what()) != "Incorrect node number to create model") {
std::cout << "[ WARNING ] Impossible to generate network and add to GraphCache: " <<e.what() << std::endl;
if (std::string(e.what()).find("Incorrect node number to create model!") == std::string::npos) {
// std::cout << "[ WARNING ] Impossible to generate network and add to GraphCache: " <<e.what() << std::endl;
}
}
}

View File

@ -1,9 +1,8 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "matchers/single_op/single_op.hpp"
#include "matchers/single_op/convolutions.hpp"
#include "matchers/single_op/manager.hpp"
#include <tuple>
#include "matchers/subgraph/subgraph.hpp"
using namespace ov::tools::subgraph_dumper;
@ -17,19 +16,61 @@ SubgraphExtractor::match(const std::shared_ptr<ov::Model> &model,
}
std::vector<std::shared_ptr<ov::Node>> ordered_ops = model->get_ordered_ops(),
ref_ordered_ops = ref_model->get_ordered_ops();
if (ordered_ops.size() != ref_ordered_ops.size())
if (ordered_ops.size() != ref_ordered_ops.size()) {
return false;
MatchersManager::MatchersMap matchers = {
{ "generic_single_op", SingleOpMatcher::Ptr(new SingleOpMatcher) },
{ "convolutions", ConvolutionsMatcher::Ptr(new ConvolutionsMatcher) },
};
MatchersManager manager(matchers);
for (size_t i = 0; i < ordered_ops.size(); ++i) {
if (is_node_to_skip(ordered_ops[i]) && is_node_to_skip(ref_ordered_ops[i]))
continue;
if (!manager.match(ordered_ops[i], ref_ordered_ops[i])) {
return false;
}
size_t matched_op_cnt = 0, total_op_cnt = ordered_ops.size();
size_t matched_op_cnt_required = round(0.9 * total_op_cnt);
for (size_t i = 0; i < total_op_cnt; ++i) {
if (is_node_to_skip(ordered_ops[i]) &&
is_node_to_skip(ref_ordered_ops[i]) ||
m_manager.match(ordered_ops[i], ref_ordered_ops[i])) {
++matched_op_cnt;
}
if (matched_op_cnt >= matched_op_cnt_required) {
return true;
}
}
return true;
return false;
}
inline SubgraphExtractor::IsSubgraphTuple prepare_is_subgraph_result(bool is_subgraph,
const std::shared_ptr<ov::Model>& graph,
const std::shared_ptr<ov::Model>& subgraph,
const std::map<std::string, std::string>& matched_ops) {
return is_subgraph ?
std::make_tuple(is_subgraph, graph, subgraph, matched_ops) :
std::make_tuple(is_subgraph, nullptr, nullptr, std::map<std::string, std::string>());
}
SubgraphExtractor::IsSubgraphTuple
SubgraphExtractor::is_subgraph(const std::shared_ptr<ov::Model> &model,
const std::shared_ptr<ov::Model> &ref_model) const {
std::vector<std::shared_ptr<ov::Node>> ordered_ops = model->get_ordered_ops(),
ref_ordered_ops = ref_model->get_ordered_ops();
bool is_model = ordered_ops.size() > ref_ordered_ops.size();
ov::NodeVector graph_to_check_ops, subgraph_to_check_ops;
std::shared_ptr<ov::Model> graph = nullptr, subgraph = nullptr;
if (is_model) {
graph_to_check_ops = ordered_ops;
subgraph_to_check_ops = ref_ordered_ops;
graph = model;
subgraph = ref_model;
} else {
graph_to_check_ops = ref_ordered_ops;
subgraph_to_check_ops = ordered_ops;
graph = ref_model;
subgraph = model;
}
std::map<std::string, std::string> matched_op_names;
auto graph_it = graph_to_check_ops.begin(), subgraph_it = subgraph_to_check_ops.begin();
while (graph_it != graph_to_check_ops.end() && subgraph_it != subgraph_to_check_ops.end()) {
if (m_manager.match(*graph_it, *subgraph_it)) {
matched_op_names.insert({ (*graph_it)->get_friendly_name(), (*subgraph_it)->get_friendly_name()});
++subgraph_it;
}
++graph_it;
}
return prepare_is_subgraph_result(subgraph_it == subgraph_to_check_ops.end(), graph, subgraph, matched_op_names);
}

View File

@ -54,7 +54,7 @@ find_models(const std::vector<std::string> &dirs, const std::string& regexp) {
}
} catch (std::exception& e) {
not_read_model.emplace_back(model_file);
std::cout << "[ ERROR ] Impossible to read model: " << model_file << std::endl << "Exception: " << e.what();
// std::cout << "[ ERROR ] Impossible to read model: " << model_file << std::endl << "Exception: " << e.what();
}
}
}
@ -77,7 +77,9 @@ std::map<ModelCacheStatus, std::vector<std::string>> cache_models(
std::map<ModelCacheStatus, std::vector<std::string>> cache_status = {
{ ModelCacheStatus::SUCCEED, {} },
{ ModelCacheStatus::NOT_FULLY_CACHED, {} },
{ ModelCacheStatus::NOT_READ, {} }
{ ModelCacheStatus::NOT_READ, {} },
{ ModelCacheStatus::LARGE_MODELS_EXCLUDED, {} },
{ ModelCacheStatus::LARGE_MODELS_INCLUDED, {} },
};
auto core = ov::test::utils::PluginCache::get().core();
auto models_size = models.size();
@ -86,19 +88,25 @@ std::map<ModelCacheStatus, std::vector<std::string>> cache_models(
const auto& model = models[i];
if (ov::util::file_exists(model)) {
std::cout << "[ INFO ] [ " << i << "/" << models_size << " ] model will be processed" << std::endl;
std::cout << "[ INFO ][ " << i + 1 << "/" << models_size << " ] model will be processed" << std::endl;
ModelCacheStatus model_status = ModelCacheStatus::SUCCEED;
try {
std::shared_ptr<ov::Model> function = core->read_model(model);
try {
if (cache->is_model_large_to_read(function, model)) {
cache_status[ModelCacheStatus::LARGE_MODELS_EXCLUDED].push_back(model);
continue;
} else if (cache->is_model_large_to_store_const(function)) {
cache_status[ModelCacheStatus::LARGE_MODELS_INCLUDED].push_back(model);
}
cache->update_cache(function, model, extract_body, from_cache);
} catch (std::exception &e) {
std::cout << "[ ERROR ] Model processing failed with exception:" << std::endl << e.what() << std::endl;
// std::cout << "[ ERROR ] Model processing failed with exception:" << std::endl << e.what() << std::endl;
model_status = ModelCacheStatus::NOT_FULLY_CACHED;
}
} catch (std::exception &e) {
model_status = ModelCacheStatus::NOT_READ;
std::cout << "[ ERROR ] Model reading failed with exception:" << std::endl << e.what() << std::endl;
// std::cout << "[ ERROR ] Model reading failed with exception:" << std::endl << e.what() << std::endl;
}
cache_status[model_status].push_back(model);
}

View File

@ -14,7 +14,7 @@ std::map<std::string, InputInfo> get_input_info_by_node(const std::shared_ptr<ov
if (!ov::op::util::is_parameter(input_node) && !ov::op::util::is_constant(input_node)) {
continue;
}
InputInfo in_info;
InputInfo in_info(node->get_input_partial_shape(port_id));
std::string input_name = input_node->get_friendly_name();
if (std::dynamic_pointer_cast<ov::op::v0::Constant>(input_node)) {
if (ov::shape_size(input_node->get_output_shape(0)) == 0)

View File

@ -42,7 +42,7 @@ protected:
test_model = std::make_shared<ov::Model>(convert, params);
test_model->set_friendly_name(model_name);
}
test_meta = ov::tools::subgraph_dumper::MetaInfo(test_model_path, {{"in_0", ov::tools::subgraph_dumper::InputInfo(0, 1, true)}});
test_meta = ov::tools::subgraph_dumper::MetaInfo(test_model_path, {{"in_0", ov::tools::subgraph_dumper::InputInfo({1, 2}, 0, 1, true)}});
}
void TearDown() override {

View File

@ -25,27 +25,38 @@ class InputInfoUnitTest : public SubgraphsDumperBaseTest {};
TEST_F(InputInfoUnitTest, constructor) {
ASSERT_NO_THROW(auto in_info = InputInfo());
ASSERT_NO_THROW(auto in_info = InputInfo(0));
ASSERT_NO_THROW(auto in_info = InputInfo(0, 1));
ASSERT_NO_THROW(auto in_info = InputInfo(0, 1, true));
ASSERT_NO_THROW(auto in_info = InputInfo({10}));
ASSERT_NO_THROW(auto in_info = InputInfo({}, 0));
ASSERT_NO_THROW(auto in_info = InputInfo({}, 0, 1));
ASSERT_NO_THROW(auto in_info = InputInfo({}, 0, 1, true));
}
TEST_F(InputInfoUnitTest, update_ranges) {
auto in_info_0 = InputInfo();
auto in_info_1 = InputInfo(0);
auto in_info_1 = InputInfo({}, 0);
in_info_0 = in_info_1;
ASSERT_EQ(in_info_0.ranges.min, in_info_1.ranges.min);
ASSERT_EQ(in_info_0.ranges.max, in_info_1.ranges.max);
ASSERT_EQ(in_info_0.is_const, in_info_1.is_const);
auto in_info_2 = InputInfo(1, 2);
auto ref_in_info = InputInfo(0, 2);
auto in_info_2 = InputInfo({}, 1, 2);
auto ref_in_info = InputInfo({}, 0, 2);
in_info_0 = in_info_2;
ASSERT_EQ(in_info_0.ranges.min, ref_in_info.ranges.min);
ASSERT_EQ(in_info_0.ranges.max, ref_in_info.ranges.max);
ASSERT_EQ(in_info_0.is_const, ref_in_info.is_const);
}
TEST_F(InputInfoUnitTest, update_shapes) {
auto in_info_0 = InputInfo({10});
ASSERT_EQ(in_info_0.min_shape, ov::PartialShape({10}));
ASSERT_EQ(in_info_0.max_shape, ov::PartialShape({10}));
auto in_info_1 = InputInfo({20});
in_info_0 = in_info_1;
ASSERT_EQ(in_info_0.min_shape, ov::PartialShape({10}));
ASSERT_EQ(in_info_1.max_shape, ov::PartialShape({20}));
}
// ======================== Model Info Func tests =============================================
class ModelInfoFuncTest : public ::testing::Test {};
@ -55,6 +66,7 @@ TEST_F(ModelInfoFuncTest, constructor) {
ASSERT_NO_THROW(auto model_info = ModelInfo("model.xml"));
ASSERT_NO_THROW(auto model_info = ModelInfo("model.xml", 1));
ASSERT_NO_THROW(auto model_info = ModelInfo("model.xml", 1, 2));
ASSERT_NO_THROW(auto model_info = ModelInfo("model.xml", 1, 2, 3));
}
// ======================== Meta Info Functional tests =============================================
@ -71,7 +83,7 @@ protected:
test_model_path = "test_model_path.xml";
test_extractor_name = "test_extractor";
test_model_name = ov::test::utils::replaceExt(test_model_path, "");
test_in_info = {{ "test_in_0", InputInfo(DEFAULT_MIN_VALUE, 1, true) }};
test_in_info = {{ "test_in_0", InputInfo({10}, DEFAULT_MIN_VALUE, 1, true) }};
test_model_info = {{ test_model_name, ModelInfo(test_model_path, 5) }};
test_artifacts_dir = ov::util::path_join({ov::test::utils::getCurrentWorkingDir(), "test_artifacts"});
ov::util::create_directory_recursive(test_artifacts_dir);
@ -110,17 +122,22 @@ TEST_F(MetaInfoFuncTest, get_any_extractor) {
}
TEST_F(MetaInfoFuncTest, update) {
std::map<std::string, InputInfo> test_in_info = {{ "test_in_0", InputInfo(DEFAULT_MIN_VALUE, 1, true) }};
std::map<std::string, InputInfo> test_in_info = {{ "test_in_0", InputInfo({10}, DEFAULT_MIN_VALUE, 1, true) }};
auto test_meta = MetaInfo(test_model_name, test_in_info, 1, 1, test_extractor_name);
std::map<std::string, InputInfo> test_input_info_1 = {{ "test_in_0", InputInfo(0, 1, true) }};
ASSERT_EQ(test_meta.get_input_info().at("test_in_0").min_shape, ov::PartialShape({10}));
ASSERT_EQ(test_meta.get_input_info().at("test_in_0").max_shape, ov::PartialShape({10}));
std::map<std::string, InputInfo> test_input_info_1 = {{ "test_in_0", InputInfo({50}, 0, 1, true) }};
std::string test_model_1 = "test_model_1";
std::string test_model_path_1 = ov::util::path_join({ "path", "to", test_model_1 + ".xml"});
ASSERT_ANY_THROW(test_meta.update(test_model_path_1, {}));
ASSERT_ANY_THROW(test_meta.update(test_model_path_1, {{ "test_in_1", InputInfo() }}));
ASSERT_ANY_THROW(test_meta.update(test_model_path_1, {{ "test_in_0", InputInfo(0, 1, false) }}));
ASSERT_ANY_THROW(test_meta.update(test_model_path_1, {{ "test_in_1", InputInfo({10}) }}));
ASSERT_ANY_THROW(test_meta.update(test_model_path_1, {{ "test_in_0", InputInfo({10}, 0, 1, false) }}));
ASSERT_NO_THROW(test_meta.update(test_model_path_1, test_input_info_1));
ASSERT_EQ(test_meta.get_input_info().at("test_in_0").min_shape, ov::PartialShape({10}));
ASSERT_EQ(test_meta.get_input_info().at("test_in_0").max_shape, ov::PartialShape({50}));
ASSERT_NO_THROW(test_meta.update(test_model_path_1, test_input_info_1, 1, 2, "test_extractor_1"));
ASSERT_NO_THROW(test_meta.update(test_model_path_1, test_input_info_1, 2));
ASSERT_NO_THROW(test_meta.update(test_model_path_1, test_input_info_1, 2, 4, "test"));
}
TEST_F(MetaInfoFuncTest, serialize) {
@ -178,6 +195,12 @@ TEST_F(MetaInfoUnitTest, serialize) {
ASSERT_EQ(input_info[in_xml].ranges.min, min_xml);
auto max_xml = std::string(in_info_xml.attribute("max").value()) == "undefined" ? DEFAULT_MAX_VALUE : in_info_xml.attribute("max").as_double();
ASSERT_EQ(input_info[in_xml].ranges.max, max_xml);
auto max_shape_str = std::string(in_info_xml.attribute("max_shape").value());
auto max_shape_ref = ov::test::utils::partialShape2str({this->get_input_info().begin()->second.max_shape});
ASSERT_EQ(max_shape_str, max_shape_ref);
auto min_shape_str = std::string(in_info_xml.attribute("min_shape").value());
auto min_shape_ref = ov::test::utils::partialShape2str({this->get_input_info().begin()->second.min_shape});
ASSERT_EQ(min_shape_str, min_shape_ref);
}
}
{
@ -202,18 +225,22 @@ TEST_F(MetaInfoUnitTest, read_meta_from_file) {
TEST_F(MetaInfoUnitTest, update) {
auto test_meta = MetaInfo(test_model_name, test_in_info);
std::map<std::string, InputInfo> test_meta_1 = {{ "test_in_0", InputInfo(0, 1, true) }};
std::map<std::string, InputInfo> test_meta_1 = {{ "test_in_0", InputInfo({20}, 0, 1, true) }};
std::string test_model_1 = "test_model_1";
std::string test_model_path_1 = ov::util::path_join({ "path", "to", test_model_1 + ".xml"});
this->update(test_model_path_1, test_meta_1);
ASSERT_NO_THROW(this->update(test_model_path_1, test_meta_1));
ASSERT_NE(this->model_info.find(test_model_1), this->model_info.end());
ASSERT_EQ(*this->model_info[test_model_1].model_paths.begin(), test_model_path_1);
ASSERT_EQ(this->model_info[test_model_1].this_op_cnt, 1);
this->update(test_model_path_1, test_meta_1);
ASSERT_EQ(this->input_info.begin()->second.min_shape, ov::PartialShape({10}));
ASSERT_EQ(this->input_info.begin()->second.max_shape, ov::PartialShape({20}));
ASSERT_NO_THROW(this->update(test_model_path_1, test_meta_1));
ASSERT_EQ(this->model_info[test_model_1].model_paths.size(), 1);
ASSERT_EQ(this->model_info[test_model_1].this_op_cnt, 2);
ASSERT_EQ(this->input_info.begin()->second.min_shape, ov::PartialShape({10}));
ASSERT_EQ(this->input_info.begin()->second.max_shape, ov::PartialShape({20}));
test_model_path_1 = ov::util::path_join({ "path", "to", "test", test_model_1 + ".xml"});
this->update(test_model_path_1, test_meta_1, 0, 1, "test_extractor");
ASSERT_NO_THROW(this->update(test_model_path_1, test_meta_1, 0, 1, "test_extractor"));
ASSERT_EQ(this->model_info[test_model_1].model_paths.size(), 2);
ASSERT_EQ(this->model_info[test_model_1].this_op_cnt, 3);
ASSERT_EQ(this->model_info[test_model_1].this_op_cnt, 3);

View File

@ -88,9 +88,20 @@ TEST_F(ExtractorsManagerTest, match) {
ASSERT_FALSE(this->match(test_model_0_1, test_model_1));
}
TEST_F(ExtractorsManagerTest, is_subgraph) {
this->set_extractors(test_map);
ASSERT_NO_THROW(this->is_subgraph(test_model_0_0, test_model_0_1));
auto is_subgraph = this->is_subgraph(test_model_0_0, test_model_0_1);
ASSERT_TRUE(std::get<0>(is_subgraph));
ASSERT_NO_THROW(this->is_subgraph(test_model_0_0, test_model_1));
ASSERT_FALSE(std::get<0>(this->is_subgraph(test_model_0_0, test_model_1)));
ASSERT_NO_THROW(this->is_subgraph(test_model_0_1, test_model_1));
ASSERT_FALSE(std::get<0>(this->is_subgraph(test_model_0_1, test_model_1)));
}
TEST_F(ExtractorsManagerTest, match_with_in_info) {
this->set_extractors(test_map);
std::map<std::string, InputInfo> test_in_info({{"test_parameter_0", InputInfo()}}), test_in_info_1({{"test_parameter_1", InputInfo(1, 2, true)}});
std::map<std::string, InputInfo> test_in_info({{"test_parameter_0", InputInfo()}}), test_in_info_1({{"test_parameter_1", InputInfo({}, 1, 2, true)}});
ASSERT_NO_THROW(this->match(test_model_0_0, test_model_0_1, test_in_info, test_in_info));
ASSERT_TRUE(this->match(test_model_0_0, test_model_0_1, test_in_info, test_in_info));
ASSERT_NO_THROW(this->match(test_model_0_0, test_model_0_1, test_in_info, test_in_info_1));
@ -112,4 +123,12 @@ TEST_F(ExtractorsManagerTest, align_input_info) {
ASSERT_EQ(c, test_in_info_ref);
}
TEST_F(ExtractorsManagerTest, align_input_info_for_subgraphs) {
std::map<std::string, InputInfo> test_in_info({{"test_parameter_0", InputInfo()}}), test_in_info_ref({{"test_parameter_1", InputInfo()}});
ASSERT_NE(test_in_info, test_in_info_ref);
ASSERT_NO_THROW(this->align_input_info(test_model_0_0, test_model_0_1, test_in_info, test_in_info_ref, {{"test_parameter_0", "test_parameter_1"}}));
auto c = this->align_input_info(test_model_0_0, test_model_0_1, test_in_info, test_in_info_ref);
ASSERT_EQ(c, test_in_info_ref);
}
} // namespace

View File

@ -64,10 +64,146 @@ TEST_F(SubgraphExtractorTest, match) {
ASSERT_FALSE(this->match(test_model_0_1, test_model_1));
}
TEST_F(SubgraphExtractorTest, match_90_percent) {
{
std::shared_ptr<ov::op::v0::Parameter> test_parameter =
std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape{1, 2});
std::shared_ptr<ov::op::v0::Abs> test_abs_0 =
std::make_shared<ov::op::v0::Abs>(test_parameter);
std::shared_ptr<ov::op::v0::Abs> test_abs_1 =
std::make_shared<ov::op::v0::Abs>(test_abs_0);
std::shared_ptr<ov::op::v0::Abs> test_abs_2 =
std::make_shared<ov::op::v0::Abs>(test_abs_1);
std::shared_ptr<ov::op::v0::Abs> test_abs_3 =
std::make_shared<ov::op::v0::Abs>(test_abs_2);
std::shared_ptr<ov::op::v0::Abs> test_abs_4 =
std::make_shared<ov::op::v0::Abs>(test_abs_3);
std::shared_ptr<ov::op::v0::Abs> test_abs_5 =
std::make_shared<ov::op::v0::Abs>(test_abs_4);
std::shared_ptr<ov::op::v0::Abs> test_abs_6 =
std::make_shared<ov::op::v0::Abs>(test_abs_5);
std::shared_ptr<ov::op::v0::Abs> test_abs_7 =
std::make_shared<ov::op::v0::Abs>(test_abs_6);
std::shared_ptr<ov::op::v0::Abs> test_abs_8 =
std::make_shared<ov::op::v0::Abs>(test_abs_7);
std::shared_ptr<ov::op::v0::Abs> test_abs_9 =
std::make_shared<ov::op::v0::Abs>(test_abs_8);
std::shared_ptr<ov::op::v0::Abs> test_abs_10 =
std::make_shared<ov::op::v0::Abs>(test_abs_9);
std::shared_ptr<ov::op::v0::Result> test_res =
std::make_shared<ov::op::v0::Result>(test_abs_10);
test_model_0_0 = std::make_shared<ov::Model>(ov::ResultVector{test_res},
ov::ParameterVector{test_parameter});
}
{
std::shared_ptr<ov::op::v0::Parameter> test_parameter =
std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape{1, 2});
std::shared_ptr<ov::op::v0::Abs> test_abs_0 =
std::make_shared<ov::op::v0::Abs>(test_parameter);
std::shared_ptr<ov::op::v0::Abs> test_abs_1 =
std::make_shared<ov::op::v0::Abs>(test_abs_0);
std::shared_ptr<ov::op::v0::Abs> test_abs_2 =
std::make_shared<ov::op::v0::Abs>(test_abs_1);
std::shared_ptr<ov::op::v0::Abs> test_abs_3 =
std::make_shared<ov::op::v0::Abs>(test_abs_2);
std::shared_ptr<ov::op::v0::Abs> test_abs_4 =
std::make_shared<ov::op::v0::Abs>(test_abs_3);
std::shared_ptr<ov::op::v0::Abs> test_abs_5 =
std::make_shared<ov::op::v0::Abs>(test_abs_4);
std::shared_ptr<ov::op::v0::Abs> test_abs_6 =
std::make_shared<ov::op::v0::Abs>(test_abs_5);
std::shared_ptr<ov::op::v0::Abs> test_abs_7 =
std::make_shared<ov::op::v0::Abs>(test_abs_6);
std::shared_ptr<ov::op::v0::Abs> test_abs_8 =
std::make_shared<ov::op::v0::Abs>(test_abs_7);
std::shared_ptr<ov::op::v0::Abs> test_abs_9 =
std::make_shared<ov::op::v0::Abs>(test_abs_8);
std::shared_ptr<ov::op::v0::Relu> test_abs_10 =
std::make_shared<ov::op::v0::Relu>(test_abs_9);
std::shared_ptr<ov::op::v0::Result> test_res =
std::make_shared<ov::op::v0::Result>(test_abs_10);
test_model_0_1 = std::make_shared<ov::Model>(ov::ResultVector{test_res},
ov::ParameterVector{test_parameter});
}
{
std::shared_ptr<ov::op::v0::Parameter> test_parameter =
std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape{1, 2});
std::shared_ptr<ov::op::v0::Abs> test_abs_0 =
std::make_shared<ov::op::v0::Abs>(test_parameter);
std::shared_ptr<ov::op::v0::Relu> test_abs_1 =
std::make_shared<ov::op::v0::Relu>(test_abs_0);
std::shared_ptr<ov::op::v0::Abs> test_abs_2 =
std::make_shared<ov::op::v0::Abs>(test_abs_1);
std::shared_ptr<ov::op::v0::Relu> test_abs_3 =
std::make_shared<ov::op::v0::Relu>(test_abs_2);
std::shared_ptr<ov::op::v0::Abs> test_abs_4 =
std::make_shared<ov::op::v0::Abs>(test_abs_3);
std::shared_ptr<ov::op::v0::Relu> test_abs_5 =
std::make_shared<ov::op::v0::Relu>(test_abs_4);
std::shared_ptr<ov::op::v0::Abs> test_abs_6 =
std::make_shared<ov::op::v0::Abs>(test_abs_5);
std::shared_ptr<ov::op::v0::Abs> test_abs_7 =
std::make_shared<ov::op::v0::Abs>(test_abs_6);
std::shared_ptr<ov::op::v0::Relu> test_abs_8 =
std::make_shared<ov::op::v0::Relu>(test_abs_7);
std::shared_ptr<ov::op::v0::Abs> test_abs_9 =
std::make_shared<ov::op::v0::Abs>(test_abs_8);
std::shared_ptr<ov::op::v0::Abs> test_abs_10 =
std::make_shared<ov::op::v0::Abs>(test_abs_9);
std::shared_ptr<ov::op::v0::Result> test_res =
std::make_shared<ov::op::v0::Result>(test_abs_10);
test_model_1 = std::make_shared<ov::Model>(ov::ResultVector{test_res},
ov::ParameterVector{test_parameter});
}
ASSERT_NO_THROW(this->match(test_model_0_0, test_model_0_1));
ASSERT_TRUE(this->match(test_model_0_0, test_model_0_1));
ASSERT_NO_THROW(this->match(test_model_0_0, test_model_1));
ASSERT_FALSE(this->match(test_model_0_0, test_model_1));
ASSERT_NO_THROW(this->match(test_model_0_1, test_model_1));
ASSERT_FALSE(this->match(test_model_0_1, test_model_1));
}
TEST_F(SubgraphExtractorTest, extract) {
ASSERT_NO_THROW(this->extract(test_model_0_0));
ASSERT_NO_THROW(this->extract(test_model_0_1));
ASSERT_NO_THROW(this->extract(test_model_1));
}
TEST_F(SubgraphExtractorTest, is_subgraph) {
auto is_subgraph = this->is_subgraph(test_model_0_0, test_model_0_0);
ASSERT_NO_THROW(this->is_subgraph(test_model_0_0, test_model_0_0));
ASSERT_TRUE(std::get<0>(is_subgraph));
ASSERT_NO_THROW(this->is_subgraph(test_model_0_0, test_model_1));
is_subgraph = this->is_subgraph(test_model_0_0, test_model_1);
ASSERT_FALSE(std::get<0>(is_subgraph));
ASSERT_NO_THROW(this->is_subgraph(test_model_0_1, test_model_1));
is_subgraph = this->is_subgraph(test_model_0_1, test_model_1);
ASSERT_FALSE(std::get<0>(is_subgraph));
{
std::shared_ptr<ov::op::v0::Parameter> test_parameter =
std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape{1, 2});
std::shared_ptr<ov::op::v0::Abs> test_abs_0 =
std::make_shared<ov::op::v0::Abs>(test_parameter);
std::shared_ptr<ov::op::v0::Abs> test_abs_1 =
std::make_shared<ov::op::v0::Abs>(test_abs_0);
std::shared_ptr<ov::op::v0::Result> test_res =
std::make_shared<ov::op::v0::Result>(test_abs_1);
auto big_model_0 = std::make_shared<ov::Model>(ov::ResultVector{test_res},
ov::ParameterVector{test_parameter});
is_subgraph = this->is_subgraph(test_model_0_0, big_model_0);
ASSERT_NO_THROW(this->is_subgraph(test_model_0_0, big_model_0));
ASSERT_TRUE(std::get<0>(is_subgraph));
ASSERT_EQ(std::get<1>(is_subgraph), big_model_0);
ASSERT_EQ(std::get<2>(is_subgraph), test_model_0_0);
is_subgraph = this->is_subgraph(test_model_0_1, big_model_0);
ASSERT_NO_THROW(this->is_subgraph(test_model_0_1, big_model_0));
ASSERT_TRUE(std::get<0>(is_subgraph));
ASSERT_EQ(std::get<1>(is_subgraph), big_model_0);
ASSERT_EQ(std::get<2>(is_subgraph), test_model_0_1);
ASSERT_NO_THROW(this->is_subgraph(test_model_1, big_model_0));
ASSERT_FALSE(std::get<0>(this->is_subgraph(test_model_1, big_model_0)));
}
}
} // namespace

View File

@ -28,8 +28,8 @@ TEST_F(NodeUtilsTest, get_input_info_by_node) {
auto add_node = std::make_shared<ov::op::v1::Add>(param, const_node);
std::map<std::string, InputInfo> ref_test_info = {
{ "const_0", InputInfo(-3.65, 7, true) },
{ "param_0", InputInfo() },
{ "const_0", InputInfo({2, 3}, -3.65, 7, true) },
{ "param_0", InputInfo({2, 3}) },
};
std::map<std::string, InputInfo> orig_test_info = get_input_info_by_node(add_node);
ASSERT_EQ(ref_test_info, orig_test_info);

View File

@ -21,7 +21,6 @@ TEST_P(ReadIRTest, Inference) {
run();
}
// temporarty disable to provide correct numbers for release
TEST_P(ReadIRTest, QueryModel) {
query_model();
}

View File

@ -137,6 +137,7 @@ def create_hash(in_dir_path: Path, operations=dict()):
core = Core()
models = in_dir_path.rglob("*.xml")
models = sorted(models)
model_prefix = os.path.commonprefix(models)
for model_path in models:
bin_path = model_path.with_suffix(BIN_EXTENSION)
meta_path = model_path.with_suffix(META_EXTENSION)
@ -156,6 +157,15 @@ def create_hash(in_dir_path: Path, operations=dict()):
if is_report_op(op_name):
if not op_name in operations.keys():
operations.update({op_name: TestStructure()})
# add op/subgraphs, dynamic/static and extractor_name to hash
model_dir, _ = os.path.split(model_path)
model_dir = str(model_dir).replace(model_prefix, "")
if op_name in model_dir:
model_dir = model_dir[:model_dir.find(op_name):]
model_dir = model_dir[:-1:]
model_dir = model_dir.replace(os.path.sep, "_")
str_to_hash += model_dir
# upgrade expected rel passrates files
if "static" in str(model_path):
operations[op_name].static += rel_weight
elif "dynamic" in str(model_path):
@ -170,8 +180,11 @@ def create_hash(in_dir_path: Path, operations=dict()):
logger.error(f"Impossible to create hash for {model_path}")
try:
input_info = ET.parse(meta_path).getroot().find("input_info")
str_to_hash += ET.tostring(input_info).decode('utf8').replace('\t', '')
# check only parameters/constant structures
for input in ET.parse(meta_path).getroot().find("input_info"):
for attrib in input.attrib:
if attrib == "convert_to_const":
str_to_hash += input.attrib.get(attrib)
except:
logger.error(f"Impossible to add input_info to hash for {model_path}")
@ -187,7 +200,7 @@ def create_hash(in_dir_path: Path, operations=dict()):
meta_path.rename(new_meta_path)
bin_path.rename(new_bin_path)
# TODO: if some models are still not renaming, create new file and remove old file
logger.info(f"{old_name} -> {new_name}")
# logger.info(f"{old_name} -> {new_name}")
elif old_name != new_xml_path:
# TODO: if some models are still not renaming and there are duplicates, remove files here
logger.warning(f"Could not rename model {old_name} ! Model file name already exists {new_xml_path} ")
@ -236,7 +249,7 @@ if __name__=="__main__":
if not Path(in_dir).is_dir():
logger.error(f"Directory {in_dir} is not exist!")
continue
logger.info(f"Starting to rename models in {in_dir}")
# logger.info(f"Starting to rename models in {in_dir}")
operations = create_hash(Path(in_dir), operations)
if not rel_weights_dir is None: