[CONFORMANCE] Remove deprecated subgraphs dumper tool (#18905)

* [CONFORMANCE] Remove deprecated subgraphs dumper tool

* fix build
This commit is contained in:
Irina Efode 2023-08-03 16:35:09 +04:00 committed by GitHub
parent 0f253c5986
commit 3109d8fd25
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
76 changed files with 58 additions and 1750 deletions

View File

@ -7,5 +7,3 @@ add_subdirectory(test_runner/api_conformance_runner)
add_subdirectory(test_runner/op_conformance_runner) add_subdirectory(test_runner/op_conformance_runner)
add_subdirectory(subgraphs_dumper) add_subdirectory(subgraphs_dumper)
add_subdirectory(subgraphs_dumper/tests) add_subdirectory(subgraphs_dumper/tests)
add_subdirectory(subgraphs_dumper_new)
add_subdirectory(subgraphs_dumper_new/tests)

View File

@ -2,54 +2,27 @@
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
# #
set(TARGET_NAME subgraphsDumper_deprecated) set(TARGET_NAME subgraphsDumper)
list(APPEND LIBRARIES list(APPEND LIBRARIES
gflags gflags
inference_engine openvino::runtime
func_test_utils func_test_utils
openvino::pugixml openvino::pugixml
) )
list(APPEND DEFINITIONS)
if(TARGET openvino_ir_frontend)
list(APPEND DEPENDENCIES openvino_ir_frontend)
list(APPEND DEFINITIONS ENABLE_OV_IR_FRONTEND)
endif()
if(TARGET openvino_onnx_frontend)
list(APPEND DEPENDENCIES openvino_onnx_frontend)
list(APPEND DEFINITIONS ENABLE_OV_ONNX_FRONTEND)
endif()
if(TARGET openvino_paddle_frontend)
list(APPEND DEPENDENCIES openvino_paddle_frontend)
list(APPEND DEFINITIONS ENABLE_OV_PADDLE_FRONTEND)
endif()
if(TARGET openvino_pytorch_frontend)
list(APPEND DEPENDENCIES openvino_pytorch_frontend)
list(APPEND DEFINITIONS ENABLE_OV_PYTORCH_FRONTEND)
endif()
if(TARGET openvino_tensorflow_frontend)
list(APPEND DEPENDENCIES openvino_tensorflow_frontend)
list(APPEND DEFINITIONS ENABLE_OV_TF_FRONTEND)
endif()
addIeTargetTest( addIeTargetTest(
NAME ${TARGET_NAME} NAME ${TARGET_NAME}
ROOT ${CMAKE_CURRENT_SOURCE_DIR} ROOT ${CMAKE_CURRENT_SOURCE_DIR}/src
INCLUDES INCLUDES
${CMAKE_CURRENT_SOURCE_DIR}/include PRIVATE
${CMAKE_CURRENT_SOURCE_DIR}/include
LINK_LIBRARIES LINK_LIBRARIES
PRIVATE PRIVATE
${LIBRARIES} ${LIBRARIES}
DEPENDENCIES DEPENDENCIES
${DEPENDENCIES} ov_frontends
ADD_CPPLINT ADD_CPPLINT
) )
target_compile_definitions(${TARGET_NAME} PRIVATE ${DEFINITIONS}) ie_faster_build(${TARGET_NAME} UNITY)
ie_faster_build(${TARGET_NAME} UNITY)

View File

@ -1,41 +0,0 @@
# Subgraphs Dumper tool
The tool is intended to analyse some arbitrary scope of the models in a formats supported by Inference Engine Readers
to extract and serialize unique patterns from all of the input models. Uniqueness and matching criteria are defined by implementation of
`Matcher` interface class declared in ./include/matchers/base_matcher.hpp and should be registered in
`MatchersManager`declared in ./include/matchers/matchers_manager.hpp by adding to `m_registry` map.
## Building
To build the tool need to run following commands
```
cmake -DENABLE_FUNCTIONAL_TESTS=ON
make -j subgraphsDumper
```
Outcome of a build is a `subgrpahsDumper` binary located in building artifacts folder.
## Running
The tool takes two command line parameters:
* `--input_folders` - Required. Comma separated paths to the input folders with IRs
* `--local_cache` - Optional. Comma separated paths to the local cache folders with IRs.
* `--output_folder` - Required. Path to the output folders where to serialize IRs
* `--path_regex` - Optional. regular expression to be applied in input folders recursive discovery
* `--constants_size_threshold` - Optional. Maximum size of constant in megabytes to be serialized.
If constant size exceeds specified number it will be replaced
with parameter and meta information about original data range will be saved
* '--extract_body' - Optional. Allow to extract operation bodies to operation cache.
E.g.
```subgraphsDumper --input_folders /folder/with/models,/another/folder/with/models --output_folder /output/folder```
## Extraction algorithm
*NOTE: current implementation presumes only single operation matching rules, to be extended to handle wider patterns.*
1. Recursively searching for all of rhe models in provided input folders
2. Reading first model and iterating over the nodes in the ngraph function model's representation
(Parameters, Results and Constants are ignored)
3. Comparing current operation with all of the operations in internal cache by running all of the matchers registered in
`MatchersManager`. Operation is cloned and added to the cache if it is not matched by any of matchers, otherwise will be ignored.
Cloning rules may vary depending on operation type and defined in `./src/op_cloner.cpp`
4. Proceeding with a next model without resetting internal operations cache.
5. Serializing all cached subgraphs to the output folder in IR format.

View File

@ -13,20 +13,16 @@ static const char local_cache_message[] = "Optional. Comma separated paths to th
static const char output_folder_message[] = "Required. Path to the output folders where to serialize IRs"; static const char output_folder_message[] = "Required. Path to the output folders where to serialize IRs";
static const char path_regex_message[] = "Optional. regular expression to be applied in input " static const char path_regex_message[] = "Optional. regular expression to be applied in input "
"folders recursive discovery"; "folders recursive discovery";
static const char constants_size_threshold_message[] = "Optional. Maximum size of constant in megabytes"
" to be serialized.\n"
"If constant size exceeds specified number it will be replaced"
"with parameter and meta information about original data range "
"will be saved";
static const char extract_body_message[] = "Optional. Allow to extract operation bodies to operation cache."; static const char extract_body_message[] = "Optional. Allow to extract operation bodies to operation cache.";
static const char cache_type_message[] = "Optional. Specify caching type: OP, GRAPH. The default value is both";
DEFINE_bool(h, false, help_message); DEFINE_bool(h, false, help_message);
DEFINE_string(input_folders, "", local_cache_message); DEFINE_string(input_folders, "", local_cache_message);
DEFINE_string(local_cache, "", input_folders_message); DEFINE_string(local_cache, "", input_folders_message);
DEFINE_string(output_folder, "output", output_folder_message); DEFINE_string(output_folder, "output", output_folder_message);
DEFINE_string(path_regex, ".*", output_folder_message); DEFINE_string(path_regex, ".*", output_folder_message);
DEFINE_double(constants_size_threshold, 1., constants_size_threshold_message);
DEFINE_bool(extract_body, true, extract_body_message); DEFINE_bool(extract_body, true, extract_body_message);
DEFINE_string(cache_type, "", cache_type_message);
/** /**
* @brief This function shows a help message * @brief This function shows a help message
@ -41,7 +37,7 @@ static void showUsage() {
std::cout << " --local_cache \"<path>\" " << input_folders_message << "\n"; std::cout << " --local_cache \"<path>\" " << input_folders_message << "\n";
std::cout << " --output_folder \"<path>\" " << output_folder_message << "\n"; std::cout << " --output_folder \"<path>\" " << output_folder_message << "\n";
std::cout << " --path_regex \"<path>\" " << path_regex_message << "\n"; std::cout << " --path_regex \"<path>\" " << path_regex_message << "\n";
std::cout << " --constants_size_threshold \"<value>\" " << constants_size_threshold_message << "\n";
std::cout << " --extract_body \"<value>\" " << extract_body_message << "\n"; std::cout << " --extract_body \"<value>\" " << extract_body_message << "\n";
std::cout << " --cache_type \"<value>\" " << extract_body_message << "\n";
std::cout << std::flush; std::cout << std::flush;
} }

View File

@ -1,84 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <utility>
#include "ngraph/node.hpp"
#include "pugixml.hpp"
#include "functional_test_utils/summary/op_info.hpp"
namespace SubgraphsDumper {
class MatchersManager;
class iMatcherConfig {
public:
using Ptr = std::shared_ptr<iMatcherConfig>;
explicit iMatcherConfig(bool is_fallback_config) : is_fallback_config(is_fallback_config) {}
iMatcherConfig(
std::vector<std::string> ignored_attributes,
std::vector<size_t> ignored_ports,
bool is_fallback_config,
bool ignore_matching = false)
: ignored_attributes(std::move(ignored_attributes)),
ignored_ports(std::move(ignored_ports)),
is_fallback_config(is_fallback_config),
ignore_matching(ignore_matching) {}
// Empty vectors stands for any of possible values
std::vector<std::string> ignored_attributes;
std::vector<size_t> ignored_ports;
bool is_fallback_config;
bool ignore_matching = false;
virtual bool op_in_config(const std::shared_ptr<ov::Node> &node) = 0;
virtual ~iMatcherConfig() = default;
};
template <typename... OPTypes>
struct MatcherConfig : public iMatcherConfig {
public:
MatcherConfig() : iMatcherConfig(sizeof...(OPTypes) == 0) {}
MatcherConfig(std::vector<std::string> ignored_attributes, std::vector<size_t> ignored_ports,
bool ignore_matching = false)
: iMatcherConfig(
std::move(ignored_attributes), std::move(ignored_ports), sizeof...(OPTypes) == 0, ignore_matching) {}
MatcherConfig(bool ignore_matching) : iMatcherConfig({}, {}, sizeof...(OPTypes) == 0, ignore_matching) {}
bool op_in_config(const std::shared_ptr<ov::Node> &node) override {
std::initializer_list<bool> vals{(ov::is_type<OPTypes>(node))...};
return std::any_of(vals.begin(), vals.end(), [](bool i) { return i; });
};
};
class Matcher {
using Ptr = std::shared_ptr<Matcher>;
friend class MatchersManager;
public:
virtual bool match(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref,
const LayerTestsUtils::OPInfo &op_info) const = 0;
virtual ~Matcher() = default;
protected:
virtual void configure(const pugi::xml_document &cfg) = 0;
iMatcherConfig::Ptr get_config(const std::shared_ptr<ov::Node> &node) const;
std::vector<iMatcherConfig::Ptr> default_configs;
virtual bool match_only_configured_ops() const = 0; // TODO: Add setter for external configuration purposes.
};
} // namespace SubgraphsDumper

View File

@ -1,22 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "matchers/single_op.hpp"
namespace SubgraphsDumper {
class ConvolutionsMatcher : public SingleOpMatcher {
public:
ConvolutionsMatcher();
bool match_inputs(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref,
const LayerTestsUtils::OPInfo &op_info) const override;
bool match(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref,
const LayerTestsUtils::OPInfo &op_info) const override;
protected:
bool match_only_configured_ops() const override { return true; }
};
} // namespace SubgraphsDumper

View File

@ -1,45 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "pugixml.hpp"
#include "ngraph/node.hpp"
#include "matchers/single_op.hpp"
#include "matchers/convolutions.hpp"
namespace SubgraphsDumper {
class Matcher;
class MatchersManager {
public:
using RegistryMap = std::map<std::string, std::function<Matcher::Ptr()>>;
using MatchersMap = std::map<std::string, Matcher::Ptr>;
explicit MatchersManager(const std::string &cfg_path = {});
bool match_all(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref,
const LayerTestsUtils::OPInfo &op_info);
bool match_any(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref,
const LayerTestsUtils::OPInfo &op_info);
// TODO: Implement default xml config file generation by Matchers
void generate_config() {}
private:
std::vector<bool> run_matchers(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref,
const LayerTestsUtils::OPInfo &op_info);
// TODO: No copy constructor for xml_document
// pugi::xml_document m_cfg;
RegistryMap m_registry = {
{"generic_single_op", []() { return std::make_shared<SingleOpMatcher>(); }},
{"convolutions", []() { return std::make_shared<ConvolutionsMatcher>(); }}
};
MatchersMap m_matchers = {};
};
} // namespace SubgraphsDumper

View File

@ -1,45 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <memory>
#include "base_matcher.hpp"
#include "pugixml.hpp"
#include "ngraph/node.hpp"
#include "common_test_utils/ngraph_test_utils.hpp"
namespace SubgraphsDumper {
class SingleOpMatcher : public Matcher {
public:
SingleOpMatcher();
bool match(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref,
const LayerTestsUtils::OPInfo &op_info) const override;
bool same_op_type(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref,
const LayerTestsUtils::OPInfo &op_info) const;
virtual bool match_inputs(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref,
const LayerTestsUtils::OPInfo &op_info) const;
bool match_outputs(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref,
const LayerTestsUtils::OPInfo &op_info) const;
bool same_attrs(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref,
const LayerTestsUtils::OPInfo &op_info) const;
bool match_ports(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref,
const LayerTestsUtils::OPInfo &op_info) const;
protected:
void configure(const pugi::xml_document &cfg) override {}
bool match_only_configured_ops() const override { return false; }
};
} // namespace SubgraphsDumper

View File

@ -1,25 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <functional>
#include <map>
#include <memory>
#include <ngraph/ngraph.hpp>
#include "ops_cache.hpp"
namespace SubgraphsDumper {
struct ClonersMap {
using clone_fn = std::function<const std::shared_ptr<ov::Node>(const std::shared_ptr<ov::Node> &,
LayerTestsUtils::OPInfo &meta)>;
using cloners_map_type = std::map<ov::NodeTypeInfo, clone_fn>;
static float constant_size_threshold_mb;
static const cloners_map_type cloners;
};
} // namespace SubgraphsDumper

View File

@ -1,52 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <utility>
#include <vector>
#include <string>
#include <memory>
#include <ngraph/ngraph.hpp>
#include "matchers/matchers_manager.hpp"
#include "functional_test_utils/summary/op_info.hpp"
#include "utils/model_wrap_struct.hpp"
namespace SubgraphsDumper {
class OPCache {
public:
OPCache() : num_neighbours_to_cache(0), manager(MatchersManager()),
m_ops_cache(std::map<std::shared_ptr<ov::Node>, LayerTestsUtils::OPInfo>()) {}
static std::unique_ptr<OPCache> make_cache() {
return std::unique_ptr<OPCache>(new OPCache());
}
void update_ops_cache(const std::shared_ptr<ov::Node> &op, const Model& source_model);
void update_ops_cache(const std::shared_ptr<ov::Model> &func, const Model& source_model, const bool extract_body = true);
void serialize_cached_ops(const std::string &serialization_dir);
void set_num_neighbours_to_cache(size_t num) { num_neighbours_to_cache = num; }
void serialize_meta_info(const LayerTestsUtils::OPInfo &info, const std::string &path);
float get_size_of_cached_ops();
protected:
std::map<std::shared_ptr<ov::Node>, LayerTestsUtils::OPInfo> m_ops_cache;
MatchersManager manager;
size_t num_neighbours_to_cache = 0;
enum SerializationStatus {
OK = 0,
FAILED = 1,
RETRY = 2,
};
SerializationStatus serialize_function(const std::pair<std::shared_ptr<ov::Node>, LayerTestsUtils::OPInfo> &op_info,
const std::string &serialization_dir);
};
} // namespace SubgraphsDumper

View File

@ -1,39 +0,0 @@
// Copyright (C) 2019-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "common_test_utils/file_utils.hpp"
#include "functional_test_utils/ov_plugin_cache.hpp"
namespace SubgraphsDumper {
struct Model {
std::string path;
size_t size = 0;
std::string name;
size_t op_cnt = 0;
Model(std::string model) {
path = model;
auto pos = model.rfind(ov::test::utils::FileSeparator);
name = pos == std::string::npos ? model : ov::test::utils::replaceExt(model.substr(pos + 1), "");
try {
auto ov_model = ov::test::utils::PluginCache::get().core()->read_model(path);
size = ov_model->get_graph_size();
op_cnt = ov_model->get_ops().size() - (ov_model->inputs().size() + ov_model->outputs().size());
} catch (...) {
std::cout << "Impossible to read network: " << path << std::endl;
}
}
bool operator<(const Model &m) const {
return size < m.size;
}
bool operator>(const Model &m) const {
return size > m.size;
}
};
} // namespace SubgraphsDumper

View File

@ -2,160 +2,65 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <fstream>
#include <regex>
#include <chrono>
#include <ctime>
#include "inference_engine.hpp"
#include "common_test_utils/file_utils.hpp"
#include "common_test_utils/test_constants.hpp"
#include "ops_cache.hpp"
#include "op_cloner.hpp"
#include "utils/model_wrap_struct.hpp"
#include "gflag_config.hpp" #include "gflag_config.hpp"
#include <string.h> #include "cache/op_cache.hpp"
#include "cache/graph_cache.hpp"
static std::vector<std::regex> getRegexByFrontend() { #include "utils/model.hpp"
std::vector<std::regex> result;
#ifdef ENABLE_OV_ONNX_FRONTEND
result.push_back(std::regex(R"(.*\.onnx)"));
#endif
#ifdef ENABLE_OV_PADDLE_FRONTEND
result.push_back(std::regex(R"(.*\.pdmodel)"));
result.push_back(std::regex(R"(.*__model__)"));
#endif
#ifdef ENABLE_OV_TF_FRONTEND
result.push_back(std::regex(R"(.*\.pb)"));
#endif
#ifdef ENABLE_OV_IR_FRONTEND
result.push_back(std::regex(R"(.*\.xml)"));
#endif
#ifdef ENABLE_OV_TF_LITE_FRONTEND
result.push_back(std::regex(R"(.*\.tflite)"));
#endif
#ifdef ENABLE_OV_PYTORCH_FRONTEND
result.push_back(std::regex(R"(.*\.pt)"));
#endif
return result;
}
std::vector<SubgraphsDumper::Model> findModelsInDirs(const std::vector<std::string> &dirs) {
std::vector<std::string> input_folder_content;
const auto patterns = getRegexByFrontend();
for (const auto &dir : dirs) {
std::vector<std::string> content;
if (ov::test::utils::directoryExists(dir)) {
content = ov::test::utils::getFileListByPatternRecursive({dir}, patterns);
} else if (ov::test::utils::fileExists(dir) && std::regex_match(dir, std::regex(".*.lst"))) {
content = ov::test::utils::readListFiles({dir});
} else {
std::string msg = "Input directory (" + dir + ") doesn't not exist!";
throw std::runtime_error(msg);
}
if (!content.empty()) {
input_folder_content.insert(input_folder_content.end(), content.begin(), content.end());
}
}
std::vector<SubgraphsDumper::Model> models;
auto xml_regex = std::regex(FLAGS_path_regex);
for (const auto &file : input_folder_content) {
if (std::regex_match(file, xml_regex)) {
models.emplace_back(SubgraphsDumper::Model(file));
}
}
std::sort(models.begin(), models.end());
std::reverse(models.begin(), models.end());
if (!ov::test::utils::directoryExists(FLAGS_output_folder)) {
std::string msg = "Output directory (" + FLAGS_output_folder + ") doesn't not exist! The directory will be created.";
std::cout << msg << std::endl;
ov::test::utils::createDirectoryRecursive(FLAGS_output_folder);
}
return models;
}
void cacheModels(std::unique_ptr<SubgraphsDumper::OPCache> &cache,
uint8_t& ret_code,
const std::vector<SubgraphsDumper::Model>& models,
const bool extract_body) {
auto core = ov::test::utils::PluginCache::get().core();
time_t rawtime;
struct tm *timeinfo;
char buffer[20];
size_t all_models = models.size();
std::string successful_models_file_path = FLAGS_output_folder + ov::test::utils::FileSeparator + "successful_models.lst",
not_read_models_file_path = FLAGS_output_folder + ov::test::utils::FileSeparator + "not_read_models.lst",
not_fully_cached_models_file_path = FLAGS_output_folder + ov::test::utils::FileSeparator + "not_fully_cached_models.lst";
std::ofstream successful_models_file, not_read_models_file, not_fully_cached_models_file;
successful_models_file.open(successful_models_file_path, std::ios::out | std::ios::trunc);
not_read_models_file.open(not_read_models_file_path, std::ios::out | std::ios::trunc);
not_fully_cached_models_file.open(not_fully_cached_models_file_path, std::ios::out | std::ios::trunc);
for (size_t i = 0; i < all_models; ++i) {
const auto model = models[i];
if (ov::test::utils::fileExists(model.path)) {
try {
time(&rawtime);
timeinfo = localtime(&rawtime); // NOLINT no localtime_r in C++11
strftime(buffer, 20, "%H:%M:%S", timeinfo);
std::cout << "[" << std::string(buffer) << "][" << i + 1 << "/" << all_models << "]Processing model: "
<< model.path << std::endl;
std::shared_ptr<ov::Model> function;
try {
function = core->read_model(model.path);
} catch (std::exception &e) {
not_read_models_file << model.path << std::endl;
std::cout << "Model reading failed with exception:" << std::endl << e.what() << std::endl;
ret_code = 1;
continue;
}
cache->update_ops_cache(function, model, extract_body);
successful_models_file << model.path << std::endl;
} catch (std::exception &e) {
not_fully_cached_models_file << model.path << std::endl;
std::cout << "Model processing failed with exception:" << std::endl << e.what() << std::endl;
ret_code = 1;
continue;
}
}
}
successful_models_file.close();
not_read_models_file.close();
not_fully_cached_models_file.close();
}
using namespace ov::tools::subgraph_dumper;
int main(int argc, char *argv[]) { int main(int argc, char *argv[]) {
uint8_t ret_code = 0;
gflags::ParseCommandLineNonHelpFlags(&argc, &argv, true); gflags::ParseCommandLineNonHelpFlags(&argc, &argv, true);
if (FLAGS_h) { if (FLAGS_h) {
showUsage(); showUsage();
return 0; return 0;
} }
SubgraphsDumper::ClonersMap::constant_size_threshold_mb = FLAGS_constants_size_threshold;
std::vector<std::string> local_cache_dirs = ov::test::utils::splitStringByDelimiter(FLAGS_local_cache); std::vector<std::string> local_cache_dirs = ov::test::utils::splitStringByDelimiter(FLAGS_local_cache);
std::vector<std::string> dirs = ov::test::utils::splitStringByDelimiter(FLAGS_input_folders); std::vector<std::string> dirs = ov::test::utils::splitStringByDelimiter(FLAGS_input_folders);
std::vector<SubgraphsDumper::Model> models; std::vector<std::string> models;
if (!ov::test::utils::directoryExists(FLAGS_output_folder)) {
std::string msg = "Output directory (" + FLAGS_output_folder + ") doesn't not exist! The directory will be created.";
std::cout << msg << std::endl;
ov::test::utils::createDirectoryRecursive(FLAGS_output_folder);
}
try { try {
models = findModelsInDirs(dirs); models = find_models(dirs, FLAGS_path_regex);
} catch (std::runtime_error& e) { } catch (std::runtime_error& e) {
std::cout << "Try 'subgraphdumper -h' for more information. \nException: " << e.what() << std::endl; std::cout << "[ INFO ] Try 'subgraphsDumper -h' for more information. \nException: " << e.what() << std::endl;
return 1; return 1;
} }
auto cache = SubgraphsDumper::OPCache::make_cache(); std::vector<std::shared_ptr<ICache>> caches;
if (!FLAGS_local_cache.empty()) { if (FLAGS_cache_type == "OP" || FLAGS_cache_type.empty()) {
auto cachedOps = findModelsInDirs(local_cache_dirs); std::cout << "[ INFO ] OpCache is enabled!" << std::endl;
cacheModels(cache, ret_code, cachedOps, FLAGS_extract_body); caches.push_back(OpCache::get());
}
if (FLAGS_cache_type == "GRAPH" || FLAGS_cache_type.empty()) {
// todo: iefode: to check and enable it in CI
// std::cout << "[ INFO ] GraphCache is enabled!" << std::endl;
// caches.push_back(GraphCache::get());
} }
cacheModels(cache, ret_code, models, FLAGS_extract_body);
cache->serialize_cached_ops(FLAGS_output_folder);
return ret_code; for (auto& cache : caches) {
cache->set_serialization_dir(FLAGS_output_folder);
}
std::map<ModelCacheStatus, std::vector<std::string>> cache_model_status;
// Upload previously cached graphs to cache
if (!FLAGS_local_cache.empty()) {
auto cachedOps = find_models(local_cache_dirs);
cache_model_status = cache_models(caches, cachedOps, FLAGS_extract_body);
}
{
auto tmp_cache_model_status = cache_models(caches, models, FLAGS_extract_body);
cache_model_status.insert(tmp_cache_model_status.begin(), tmp_cache_model_status.end());
}
for (auto& cache : caches) {
cache->set_serialization_dir(FLAGS_output_folder);
cache->serialize_cache();
}
save_model_status_to_file(cache_model_status, FLAGS_output_folder);
return cache_model_status[ModelCacheStatus::NOT_FULLY_CACHED].empty() && cache_model_status[ModelCacheStatus::NOT_READ].empty();
} }

View File

@ -1,21 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "matchers/base_matcher.hpp"
#include "common_test_utils/common_utils.hpp"
SubgraphsDumper::iMatcherConfig::Ptr SubgraphsDumper::Matcher::get_config(const std::shared_ptr<ov::Node> &node) const {
for (const auto &cfg : default_configs) {
if (cfg->op_in_config(node)) {
return cfg;
}
}
for (const auto &cfg : default_configs) {
if (cfg->is_fallback_config) {
return cfg;
}
}
return std::make_shared<MatcherConfig<>>();
}

View File

@ -1,65 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "matchers/convolutions.hpp"
#include "openvino/op/convolution.hpp"
#include "openvino/op/group_conv.hpp"
using namespace SubgraphsDumper;
ConvolutionsMatcher::ConvolutionsMatcher() {
default_configs = {
std::make_shared<MatcherConfig<
ov::op::v1::Convolution,
ov::op::v1::ConvolutionBackpropData,
ov::op::v1::GroupConvolution,
ov::op::v1::GroupConvolutionBackpropData>>(std::vector<std::string>{}, std::vector<size_t>{0, 1})
};
}
bool ConvolutionsMatcher::match(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref,
const LayerTestsUtils::OPInfo &op_info) const {
const auto &cfg = get_config(node);
if (match_only_configured_ops() && cfg->is_fallback_config) {
return false;
}
if (cfg->ignore_matching) {
return false;
}
return same_op_type(node, ref, op_info) &&
match_inputs(node, ref, op_info) &&
match_outputs(node, ref, op_info) &&
same_attrs(node, ref, op_info) &&
match_ports(node, ref, op_info);
}
bool ConvolutionsMatcher::match_inputs(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref,
const LayerTestsUtils::OPInfo &op_info) const {
if (node->get_input_size() != ref->get_input_size()) {
return false;
}
bool rankIsEqual = node->get_input_tensor(0).get_partial_shape().rank() ==
ref->get_input_tensor(0).get_partial_shape().rank();
bool elemTypeIsEqual = node->get_input_tensor(0).get_element_type() ==
ref->get_input_tensor(0).get_element_type();
bool is_dynamic = node->get_input_node_ptr(0)->is_dynamic() ==
ref->get_input_node_ptr(0)->is_dynamic();
if (!(rankIsEqual && elemTypeIsEqual && is_dynamic)) {
return false;
}
bool has_groups = std::dynamic_pointer_cast<ov::op::v1::GroupConvolution>(node) != nullptr ||
std::dynamic_pointer_cast<ov::op::v1::GroupConvolutionBackpropData>(node);
size_t kernel_size_offset = has_groups ? 3 : 2;
auto ref_weights_shape = ref->get_input_tensor(1).get_shape();
auto cur_weights_shape = node->get_input_tensor(1).get_shape();
const auto ref_kernel_size = std::vector<size_t>(ref_weights_shape.begin() + kernel_size_offset,
ref_weights_shape.end());
const auto cur_kernel_size = std::vector<size_t>(cur_weights_shape.begin() + kernel_size_offset,
cur_weights_shape.end());
if (ref_kernel_size != cur_kernel_size) {
return false;
}
return true;
}

View File

@ -1,42 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "matchers/matchers_manager.hpp"
using namespace SubgraphsDumper;
bool MatchersManager::match_any(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref,
const LayerTestsUtils::OPInfo &op_info) {
for (const auto &it : m_matchers) {
if (it.second->match(node, ref, op_info)) return true;
}
return false;
}
bool MatchersManager::match_all(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref,
const LayerTestsUtils::OPInfo &op_info) {
const auto matches = this->run_matchers(node, ref, op_info);
return std::all_of(matches.begin(), matches.end(), [](bool i) { return i; });
}
MatchersManager::MatchersManager(const std::string &cfg_path) {
if (!cfg_path.empty()) {
// m_cfg.load_file(cfg_path.c_str());
}
for (const auto &it : m_registry) {
m_matchers[it.first] = it.second();
}
}
std::vector<bool> MatchersManager::run_matchers(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref,
const LayerTestsUtils::OPInfo &op_info) {
std::vector<bool> matches;
for (const auto &it : m_matchers) {
matches.push_back(it.second->match(node, ref, op_info));
}
return matches;
}

View File

@ -1,199 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "matchers/single_op.hpp"
#include "ngraph/ops.hpp"
#include "ngraph/validation_util.hpp"
#include <cstdlib>
using namespace SubgraphsDumper;
template<typename dType>
bool compare_constants_data(const std::shared_ptr<ov::op::v0::Constant> &op,
const std::shared_ptr<ov::op::v0::Constant> &ref) {
size_t elements_count = ov::shape_size(op->get_shape());
if (elements_count != ov::shape_size(ref->get_shape())) {
return false;
}
const auto &op_data = op->cast_vector<dType>();
const auto &ref_data = ref->cast_vector<dType>();
for (size_t i = 0; i < elements_count; ++i) {
// std:abs doesn't implemented for unsigned types, compare explicitly to keep code universal for all dTypes
dType diff = op_data[i] > ref_data[i] ? op_data[i] - ref_data[i] : ref_data[i] - op_data[i];
if (diff > std::numeric_limits<dType>::epsilon()) {
return false;
}
}
return true;
}
// TODO: Move to some utils?
bool compare_constants_data(const std::shared_ptr<ov::op::v0::Constant> &op,
const std::shared_ptr<ov::op::v0::Constant> &ref) {
switch (op->get_element_type()) {
case ov::element::Type_t::boolean:
return compare_constants_data<bool>(op, ref);
case ov::element::Type_t::bf16:
return compare_constants_data<ov::bfloat16>(op, ref);
case ov::element::Type_t::f16:
return compare_constants_data<ov::float16>(op, ref);
case ov::element::Type_t::f32:
return compare_constants_data<float>(op, ref);
case ov::element::Type_t::f64:
return compare_constants_data<double>(op, ref);
case ov::element::Type_t::i8:
return compare_constants_data<int8_t>(op, ref);
case ov::element::Type_t::i16:
return compare_constants_data<int16_t>(op, ref);
case ov::element::Type_t::i32:
return compare_constants_data<int32_t>(op, ref);
case ov::element::Type_t::i64:
return compare_constants_data<int64_t>(op, ref);
// TODO cast_vector doesn't support u1 now
// case ov::element::Type_t::u1:
// return compare_constants_data<char>(op, ref);
case ov::element::Type_t::u8:
return compare_constants_data<uint8_t>(op, ref);
case ov::element::Type_t::u16:
return compare_constants_data<uint16_t>(op, ref);
case ov::element::Type_t::u32:
return compare_constants_data<uint32_t>(op, ref);
case ov::element::Type_t::u64:
return compare_constants_data<uint64_t>(op, ref);
default:
std::cout << "Can't compare constants" << op << " with " << ref << "\n" << "Unsupported data type";
return false;
}
}
bool SingleOpMatcher::same_op_type(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref,
const LayerTestsUtils::OPInfo &op_info) const {
return node->get_type_info() == ref->get_type_info();
}
bool SingleOpMatcher::match_inputs(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref,
const LayerTestsUtils::OPInfo &op_info) const {
if (node->get_input_size() != ref->get_input_size()) {
return false;
}
for (size_t i = 0; i < node->get_input_size(); ++i) {
bool rankIsEqual = node->get_input_tensor(i).get_partial_shape().rank() ==
ref->get_input_tensor(i).get_partial_shape().rank();
bool elemTypeIsEqual = node->get_input_tensor(i).get_element_type() ==
ref->get_input_tensor(i).get_element_type();
bool dynamismIsEqual = node->get_input_partial_shape(i).is_dynamic() ==
ref->get_input_partial_shape(i).is_dynamic();
if (!rankIsEqual || !elemTypeIsEqual || !dynamismIsEqual) {
return false;
}
}
return true;
}
bool
SingleOpMatcher::match_outputs(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref,
const LayerTestsUtils::OPInfo &op_info) const {
if (node->get_output_size() != ref->get_output_size()) {
return false;
}
// Match output element type, shape rank & dynamism
for (size_t i = 0; i < node->get_output_size(); ++i) {
if (node->get_output_tensor(i).get_element_type() !=
ref->get_output_tensor(i).get_element_type()) {
return false;
}
if (node->get_output_tensor(i).get_partial_shape().is_dynamic() !=
ref->get_output_tensor(i).get_partial_shape().is_dynamic()) {
return false;
}
if (node->get_output_tensor(i).get_partial_shape().rank()!=
ref->get_output_tensor(i).get_partial_shape().rank()) {
return false;
}
}
return true;
}
bool SingleOpMatcher::same_attrs(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref,
const LayerTestsUtils::OPInfo &op_info) const {
return attributes::compare(node.get(), ref.get(), Comparator::CmpValues::ATTRIBUTES).valid;
}
bool SingleOpMatcher::match_ports(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref,
const LayerTestsUtils::OPInfo &op_info) const {
const auto &cfg = get_config(node);
const std::vector<size_t> &ignored_ports = cfg->ignored_ports;
for (size_t port_id = 0; port_id < node->get_input_size(); ++port_id) {
if (std::any_of(begin(ignored_ports), end(ignored_ports), [=](size_t p) { return p == port_id; })) {
continue;
}
const auto &cur_node_input = node->input_value(port_id);
const auto &ref_node_input = ref->input_value(port_id);
OPENVINO_SUPPRESS_DEPRECATED_START
const auto &cur_const_input = ov::get_constant_from_source(cur_node_input);
const auto &ref_const_input = ov::get_constant_from_source(ref_node_input);
OPENVINO_SUPPRESS_DEPRECATED_END
// Check that both OP an reference port inputs are constant and have same data
if (cur_const_input && ref_const_input &&
!compare_constants_data(cur_const_input, ref_const_input)) {
return false;
// Check that input nodes on the port both not constants
} else if ((cur_const_input && !ref_const_input) || (!cur_const_input && ref_const_input)) {
return false;
}
}
return true;
}
bool SingleOpMatcher::match(const std::shared_ptr<ov::Node> &node,
const std::shared_ptr<ov::Node> &ref,
const LayerTestsUtils::OPInfo &op_info) const {
for (const auto& input_node : node->inputs()) {
if (input_node.get_partial_shape().is_dynamic()) {
break;
}
}
const auto &cfg = get_config(node);
if (match_only_configured_ops() && cfg->is_fallback_config) {
return false;
}
if (cfg->ignore_matching) {
return false;
}
return same_op_type(node, ref, op_info) &&
match_inputs(node, ref, op_info) &&
match_outputs(node, ref, op_info) &&
same_attrs(node, ref, op_info) &&
match_ports(node, ref, op_info);
}
SingleOpMatcher::SingleOpMatcher() {
default_configs = {
std::make_shared<MatcherConfig<>>(std::vector<std::string>{}, std::vector<size_t>{0}),
std::make_shared<MatcherConfig<ov::op::v0::FakeQuantize>>(std::vector<std::string>{},
std::vector<size_t>{0, 1, 2, 3, 4}),
std::make_shared<MatcherConfig<
ov::op::v0::MatMul,
ov::op::v1::Add,
ov::op::v1::Multiply,
ov::op::v1::Subtract,
ov::op::v1::Power>>(std::vector<std::string>{}, std::vector<size_t>{0, 1}),
std::make_shared<MatcherConfig<
ov::op::v1::Convolution,
ov::op::v1::ConvolutionBackpropData,
ov::op::v1::GroupConvolution,
ov::op::v1::GroupConvolutionBackpropData>>(true)
};
}

View File

@ -1,265 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <istream>
#include "op_cloner.hpp"
#include "ngraph/opsets/opset6.hpp"
#include "common_test_utils/data_utils.hpp"
namespace SubgraphsDumper {
namespace {
template<typename dType>
void get_port_range(const std::shared_ptr<ov::op::v0::Constant> &const_node, LayerTestsUtils::PortInfo &port_info) {
std::vector<dType> data = const_node->cast_vector<dType>();
if (!data.empty()) {
auto min_max = std::minmax_element(data.begin(), data.end());
port_info.min = *min_max.first;
port_info.max = *min_max.second;
}
}
void get_port_range(const std::shared_ptr<ov::op::v0::Constant> &constant_input, LayerTestsUtils::PortInfo &port_info) {
switch (constant_input->get_element_type()) {
case ov::element::Type_t::boolean:
get_port_range<char>(constant_input, port_info);
break;
case ov::element::Type_t::bf16:
get_port_range<ov::bfloat16>(constant_input, port_info);
break;
case ov::element::Type_t::f16:
get_port_range<ov::float16>(constant_input, port_info);
break;
case ov::element::Type_t::f32:
get_port_range<float>(constant_input, port_info);
break;
case ov::element::Type_t::f64:
get_port_range<double>(constant_input, port_info);
break;
case ov::element::Type_t::i8:
get_port_range<int8_t>(constant_input, port_info);
break;
case ov::element::Type_t::i16:
get_port_range<int16_t>(constant_input, port_info);
break;
case ov::element::Type_t::i32:
get_port_range<int32_t>(constant_input, port_info);
break;
case ov::element::Type_t::i64:
get_port_range<int64_t>(constant_input, port_info);
break;
case ov::element::Type_t::u1:
get_port_range<char>(constant_input, port_info);
break;
case ov::element::Type_t::u8:
get_port_range<uint8_t>(constant_input, port_info);
break;
case ov::element::Type_t::u16:
get_port_range<uint16_t>(constant_input, port_info);
break;
case ov::element::Type_t::u32:
get_port_range<uint32_t>(constant_input, port_info);
break;
case ov::element::Type_t::u64:
get_port_range<uint64_t>(constant_input, port_info);
break;
default:
break;
}
}
std::shared_ptr<ov::Node> clone(const std::shared_ptr<ov::Node> &node, LayerTestsUtils::OPInfo &meta) {
ov::OutputVector op_inputs;
bool has_parameters = false;
auto add_input_func = [&](size_t index) {
const auto input = node->input(index).get_source_output();
auto port_info = LayerTestsUtils::PortInfo();
OPENVINO_SUPPRESS_DEPRECATED_START
const auto constant = ov::get_constant_from_source(input);
OPENVINO_SUPPRESS_DEPRECATED_END
std::shared_ptr<ov::Node> input_node;
if (constant) {
get_port_range(constant, port_info);
float weights_size =
static_cast<float>(ov::shape_size(constant->get_shape()) * constant->get_element_type().size()) /
(1024 * 1024);
// Here we check for a big (by memory consuption) size constant, or case when Constant-only input
// has been covered and we need to add first Constant as a Parameter
if (weights_size > ClonersMap::constant_size_threshold_mb || (!has_parameters && index == 0)) {
std::cout << "Constant with size " << weights_size << " detected on port " << index << " of OP " << node
<< std::endl
<< "The constant will be replaced with parameter and initial data ranges meta info"
<< std::endl;
input_node =
std::make_shared<ov::op::v0::Parameter>(constant->get_element_type(), constant->get_shape());
if (!has_parameters && index == 0) {
// Resets port info to defaults
port_info.convert_to_const = false;
port_info.min = std::numeric_limits<double>::min();
port_info.max = std::numeric_limits<double>::max();
}
has_parameters = true;
} else {
input_node = std::make_shared<ov::op::v0::Constant>(constant->get_element_type(),
constant->get_shape(),
constant->get_data_ptr());
}
} else {
input_node = std::make_shared<ov::op::v0::Parameter>(input.get_element_type(), input.get_partial_shape());
has_parameters = true;
}
if (index > 0)
op_inputs.push_back(input_node);
else
op_inputs.insert(op_inputs.begin(), input_node);
meta.ports_info[index] = port_info;
};
// Try to add all inputs, except first
for (size_t i = 1; i < node->get_input_size(); ++i) {
add_input_func(i);
}
if (node->get_input_size() > 0) {
// In case of first input, if we haven't found a Parameter yet (means Constants-only input)
// we will add a first constant as a Parameter input explicitly
add_input_func(0);
}
if (!has_parameters) {
return nullptr;
}
return node->clone_with_new_inputs(op_inputs);
}
std::shared_ptr<ov::Node> clone_weightable_node(const std::shared_ptr<ov::Node> &node,
const std::vector<size_t> &weight_ports,
LayerTestsUtils::OPInfo &meta) {
ov::OutputVector op_inputs;
bool has_parameters = false;
for (size_t i = 0; i < node->get_input_size(); ++i) {
const auto input = node->input(i).get_source_output();
OPENVINO_SUPPRESS_DEPRECATED_START
const auto constant_input = ov::get_constant_from_source(input);
OPENVINO_SUPPRESS_DEPRECATED_END
auto port_info = LayerTestsUtils::PortInfo();
// Input is Parameter or dynamic data pass
if (!constant_input) {
has_parameters = true;
auto param = std::make_shared<ov::op::v0::Parameter>(input.get_element_type(),
input.get_partial_shape());
op_inputs.push_back(param);
meta.ports_info[i] = port_info;
continue;
}
get_port_range(constant_input, port_info);
// Input is Constant but not in the target weight ports
if (std::find(weight_ports.begin(), weight_ports.end(), i) == weight_ports.end()) {
float weights_size =
static_cast<float>(ov::shape_size(constant_input->get_shape()) *
constant_input->get_element_type().size()) / (1024 * 1024);
if (weights_size > ClonersMap::constant_size_threshold_mb) {
std::cout << "Constant with size " << weights_size << " detected on port " << i << " of OP " << node
<< std::endl
<< "The constant will be replaced with parameter and initial data ranges meta info"
<< std::endl;
auto param = std::make_shared<ov::op::v0::Parameter>(constant_input->get_element_type(),
constant_input->get_shape());
op_inputs.push_back(param);
has_parameters = true;
} else {
const auto clone = std::make_shared<ov::op::v0::Constant>(constant_input->get_element_type(),
constant_input->get_shape(),
constant_input->get_data_ptr());
op_inputs.push_back(clone);
}
meta.ports_info[i] = port_info;
continue;
}
// Input is constant and in the target weights ports
auto param = std::make_shared<ov::op::v0::Parameter>(constant_input->get_element_type(),
constant_input->get_shape());
port_info.convert_to_const = true;
meta.ports_info[i] = port_info;
op_inputs.push_back(param);
}
if (!has_parameters) {
return nullptr;
}
auto op_clone = node->clone_with_new_inputs(op_inputs);
return op_clone;
}
// Clone nodes requiring weights randomization
std::shared_ptr<ov::Node>
clone(const std::shared_ptr<ov::op::v1::Convolution> &node, LayerTestsUtils::OPInfo &meta) {
return clone_weightable_node(node, {1}, meta);
}
std::shared_ptr<ov::Node>
clone(const std::shared_ptr<ov::op::v1::GroupConvolution> &node, LayerTestsUtils::OPInfo &meta) {
return clone_weightable_node(node, {1}, meta);
}
std::shared_ptr<ov::Node>
clone(const std::shared_ptr<ov::op::v1::ConvolutionBackpropData> &node, LayerTestsUtils::OPInfo &meta) {
return clone_weightable_node(node, {1}, meta);
}
std::shared_ptr<ov::Node>
clone(const std::shared_ptr<ov::op::v1::GroupConvolutionBackpropData> &node, LayerTestsUtils::OPInfo &meta) {
return clone_weightable_node(node, {1}, meta);
}
std::shared_ptr<ov::Node>
clone(const std::shared_ptr<ov::op::v0::MatMul> &node, LayerTestsUtils::OPInfo &meta) {
return clone_weightable_node(node, {0, 1}, meta);
}
std::shared_ptr<ov::Node> clone(const std::shared_ptr<ov::op::v1::Add> &node, LayerTestsUtils::OPInfo &meta) {
return clone_weightable_node(node, {0, 1}, meta);
}
std::shared_ptr<ov::Node>
clone(const std::shared_ptr<ov::op::v1::Multiply> &node, LayerTestsUtils::OPInfo &meta) {
return clone_weightable_node(node, {0, 1}, meta);
}
std::shared_ptr<ov::Node>
clone(const std::shared_ptr<ov::op::v1::Subtract> &node, LayerTestsUtils::OPInfo &meta) {
return clone_weightable_node(node, {0, 1}, meta);
}
std::shared_ptr<ov::Node> clone(const std::shared_ptr<ov::op::v1::Power> &node, LayerTestsUtils::OPInfo &meta) {
return clone_weightable_node(node, {0, 1}, meta);
}
template<typename opType>
std::shared_ptr<ov::Node> clone_node(const std::shared_ptr<ov::Node> &node, LayerTestsUtils::OPInfo &meta) {
return clone(ov::as_type_ptr<opType>(node), meta);
}
} // namespace
#define NGRAPH_OP(NAME, NAMESPACE) {NAMESPACE::NAME::get_type_info_static(), clone_node<NAMESPACE::NAME>},
const ClonersMap::cloners_map_type ClonersMap::cloners{
#include <ngraph/opsets/opset1_tbl.hpp>
#include <ngraph/opsets/opset2_tbl.hpp>
#include <ngraph/opsets/opset3_tbl.hpp>
#include <ngraph/opsets/opset4_tbl.hpp>
#include <ngraph/opsets/opset5_tbl.hpp>
#include <ngraph/opsets/opset6_tbl.hpp>
#include <ngraph/opsets/opset7_tbl.hpp>
#include <ngraph/opsets/opset8_tbl.hpp>
#include <ngraph/opsets/opset9_tbl.hpp>
};
#undef NGRAPH_OP
float ClonersMap::constant_size_threshold_mb = 0.5;
} // namespace SubgraphsDumper

View File

@ -1,240 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <sstream>
#include <ngraph/validation_util.hpp>
#include <ops_cache.hpp>
#include <op_cloner.hpp>
#include "inference_engine.hpp"
#include "common_test_utils/file_utils.hpp"
#include "pugixml.hpp"
using namespace SubgraphsDumper;
void OPCache::update_ops_cache(const std::shared_ptr<ov::Node> &op,
const Model& source_model) {
const std::shared_ptr<ov::Node> cachedOp = [&] {
for (auto &&it : m_ops_cache) {
if (manager.match_any(it.first, op, it.second)) {
it.second.found_in_models[source_model.name].unique_op_cnt += 1;
it.second.found_in_models[source_model.name].model_paths.insert({{source_model.path, source_model.op_cnt}});
return it.first;
}
}
return std::shared_ptr<ov::Node>{};
}();
auto saveOpToCash = [&] {
try {
const auto& clone_fn = SubgraphsDumper::ClonersMap::cloners.at(op->get_type_info());
LayerTestsUtils::OPInfo meta(source_model.name, source_model.path, source_model.op_cnt);
const std::shared_ptr<ov::Node> op_clone = clone_fn(op, meta);
if (!op_clone) {
return;
}
op_clone->set_friendly_name(op_clone->get_friendly_name() + "_cached");
m_ops_cache.insert({op_clone, meta});
} catch (std::out_of_range& e) {
std::cout << "WARNING: Cloner for " << op->get_type_name() << " (" << op->get_type_info().get_version()
<< ") isn't found: " << e.what() << std::endl;
} catch (std::exception& e) {
std::cout << "ERROR: " << e.what() << std::endl;
}
};
if (!cachedOp.get()) {
saveOpToCash();
} else {
for (int i = 0; i < op->get_input_size(); i++) {
auto shape = op->get_input_shape(i);
unsigned long shapeSize = ov::shape_size(shape) * op->get_output_element_type(0).size();
auto cachedOpShape = cachedOp->get_input_shape(i);
unsigned long cachedOpShapeSize =
ov::shape_size(cachedOpShape) * cachedOp->get_output_element_type(0).size();
if (shapeSize < cachedOpShapeSize) {
m_ops_cache.erase(cachedOp);
saveOpToCash();
}
}
}
}
void OPCache::update_ops_cache(const std::shared_ptr<ov::Model> &func, const Model& source_model, const bool extract_body) {
size_t cached_ops_count = m_ops_cache.size();
for (const auto &op : func->get_ordered_ops()) {
if (std::dynamic_pointer_cast<ov::op::v0::Parameter>(op) ||
std::dynamic_pointer_cast<ov::op::v0::Constant>(op) ||
std::dynamic_pointer_cast<ov::op::v0::Result>(op) ||
// ReadValue and Assign have to be handled in pair
// Will be handled as part of 48838
std::dynamic_pointer_cast<ov::op::util::AssignBase>(op) ||
std::dynamic_pointer_cast<ov::op::util::ReadValueBase>(op)
) {
continue;
}
if (extract_body) {
if (std::dynamic_pointer_cast<ov::op::v8::If>(op)) {
auto if_op = std::dynamic_pointer_cast<ov::op::v8::If>(op);
std::vector<std::shared_ptr<ov::Model>> bodies;
for (size_t i = 0; i < if_op->get_internal_subgraphs_size(); i++) {
auto if_body = if_op->get_function(i);
update_ops_cache(if_body, source_model, extract_body);
}
} else if (std::dynamic_pointer_cast<ov::op::v5::Loop>(op)) {
auto loop = std::dynamic_pointer_cast<ov::op::v5::Loop>(op);
auto loop_body = loop->get_function();
update_ops_cache(loop_body, source_model, extract_body);
} else if (std::dynamic_pointer_cast<ov::op::v0::TensorIterator>(op)) {
auto ti = std::dynamic_pointer_cast<ov::op::v0::TensorIterator>(op);
auto ti_body = ti->get_body();
update_ops_cache(ti_body, source_model, extract_body);
}
}
update_ops_cache(op, source_model);
}
std::cout << "\t" << m_ops_cache.size() - cached_ops_count << " new OPs were cached." << std::endl;
}
void OPCache::serialize_cached_ops(const std::string &serialization_dir) {
if (!ov::test::utils::directoryExists(serialization_dir)) {
ov::test::utils::createDirectoryRecursive(serialization_dir);
}
for (const auto &op : m_ops_cache) {
auto res = serialize_function(op, serialization_dir);
if (res != OPCache::SerializationStatus::RETRY) {
continue;
} else {
for (size_t i = 1; i <= 5; ++i) {
std::cout << "Serialization retry #" << i << std::endl;
res = serialize_function(op, serialization_dir);
if (res != OPCache::SerializationStatus::RETRY) {
break;
}
}
}
}
}
void OPCache::serialize_meta_info(const LayerTestsUtils::OPInfo &info, const std::string &path) {
pugi::xml_document doc;
pugi::xml_node root = doc.append_child("meta_info");
pugi::xml_node models = root.append_child("models");
double k = 0;
for (const auto &model : info.found_in_models) {
pugi::xml_node model_node = models.append_child("model");
model_node.append_attribute("name").set_value(model.first.c_str());
double model_k = model.second.unique_op_cnt;
model_node.append_attribute("count").set_value(static_cast<unsigned long long>(model.second.unique_op_cnt));
size_t tmp = 0;
for (const auto& model_path : model.second.model_paths) {
if (model_path.second) {
model_node.append_child("path").append_attribute("model").set_value(model_path.first.c_str());
tmp += model_path.second;
}
}
model_k /= tmp;
model_k /= model.second.model_paths.size();
k += model_k;
}
k *= info.found_in_models.size();
root.append_child("graph_priority").append_attribute("value").set_value(k);
auto ports_info = root.append_child("ports_info");
for (const auto &port : info.ports_info) {
auto port_node = ports_info.append_child("port");
port_node.append_attribute("id").set_value(static_cast<unsigned long long>(port.first));
if (port.second.min == std::numeric_limits<double>::min()) {
port_node.append_attribute("max").set_value("undefined");
port_node.append_attribute("min").set_value("undefined");
} else {
port_node.append_attribute("max").set_value(port.second.max);
port_node.append_attribute("min").set_value(port.second.min);
}
port_node.append_attribute("convert_to_const").set_value(port.second.convert_to_const);
}
doc.save_file(path.c_str());
}
float OPCache::get_size_of_cached_ops() {
float size = 0;
for (const auto &op : m_ops_cache) {
for (size_t i = 0; i < op.first->get_input_size(); ++i) {
const auto constant = std::dynamic_pointer_cast<ov::op::v0::Constant>(
op.first->get_input_node_shared_ptr(i));
if (constant != nullptr) {
size += static_cast<float>(ov::shape_size(constant->get_shape()) *
constant->get_output_element_type(0).size()) / (1024 * 1024);
}
}
}
return size;
}
OPCache::SerializationStatus
OPCache::serialize_function(const std::pair<std::shared_ptr<ov::Node>, LayerTestsUtils::OPInfo> &op,
const std::string &serialization_dir) {
try {
std::cout << "Serializing function wrapping op " << op.first << std::endl;
std::cout << "Taken from model: " << op.second.found_in_models.begin()->first << std::endl;
ov::ParameterVector params;
bool is_dynamic = false;
for (size_t i = 0; i < op.first->get_input_size(); ++i) {
if (ov::op::util::is_parameter(op.first->get_input_node_ptr(i))) {
auto param = std::dynamic_pointer_cast<ov::op::v0::Parameter>(
op.first->get_input_node_shared_ptr(i));
if (param->get_partial_shape().is_dynamic()) {
is_dynamic = true;
}
params.push_back(param);
}
}
ov::ResultVector results;
for (auto &out : op.first->outputs()) {
if (out.get_partial_shape().is_dynamic()) {
is_dynamic = true;
}
results.push_back(std::make_shared<ov::op::v0::Result>(out));
}
auto function = std::make_shared<ov::Model>(results, params);
// TODO: How to define element type for multi-output ops
std::string op_folder_name = op.first->get_type_info().name;
std::string opset_version = op.first->get_type_info().get_version();
std::string opset_name = "opset";
auto pos = opset_version.find(opset_name);
if (pos != std::string::npos) {
op_folder_name += "-" + opset_version.substr(pos + opset_name.size());
}
auto op_el_type = op.first->get_output_element_type(0).get_type_name();
auto current_op_folder = serialization_dir + ov::test::utils::FileSeparator +
(is_dynamic ? "dynamic" : "static") + ov::test::utils::FileSeparator +
op_folder_name + ov::test::utils::FileSeparator + op_el_type;
auto op_name = op.first->get_name();
op_name = op_name.substr(op_name.find("_") + 1);
std::cout << op_name << " will be serialized to " << current_op_folder << std::endl;
if (!ov::test::utils::directoryExists(current_op_folder)) {
ov::test::utils::createDirectoryRecursive(current_op_folder);
}
std::replace(op_name.begin(), op_name.end(), '/', '_');
std::replace(op_name.begin(), op_name.end(), '\\', '_');
auto xml_path = current_op_folder + ov::test::utils::FileSeparator + op_name + ".xml";
auto bin_path = current_op_folder + ov::test::utils::FileSeparator + op_name + ".bin";
auto meta_info = current_op_folder + ov::test::utils::FileSeparator + op_name + ".meta";
auto cnn_net = InferenceEngine::CNNNetwork(function);
cnn_net.serialize(xml_path, bin_path);
serialize_meta_info(op.second, meta_info);
return OPCache::SerializationStatus::OK;
} catch (std::exception &e) {
std::cout << "Failed to serialize function related to op" << op.first << std::endl
<< "Exception occurred: " << e.what() << std::endl;
if (std::string(e.what()).find("Can't open") != std::string::npos) {
return OPCache::SerializationStatus::RETRY;
}
return OPCache::SerializationStatus::FAILED;
}
}

View File

@ -2,16 +2,18 @@
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
# #
set(TARGET_NAME subgraphsDumperTests_deprecated) set(TARGET_NAME subgraphsDumperTests)
addIeTargetTest( addIeTargetTest(
NAME ${TARGET_NAME} NAME ${TARGET_NAME}
ROOT ${CMAKE_CURRENT_SOURCE_DIR} ROOT ${CMAKE_CURRENT_SOURCE_DIR}
ADDITIONAL_SOURCE_DIRS ADDITIONAL_SOURCE_DIRS
# TODO: create static library for matchers instead of duplication ${OpenVINO_SOURCE_DIR}/src/tests/functional/plugin/conformance/subgraphs_dumper/src
${OpenVINO_SOURCE_DIR}/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers EXCLUDED_SOURCE_PATHS
${OpenVINO_SOURCE_DIR}/src/tests/functional/plugin/conformance/subgraphs_dumper/src/main.cpp
INCLUDES INCLUDES
${OpenVINO_SOURCE_DIR}/src/tests/functional/plugin/conformance/subgraphs_dumper/include ${OpenVINO_SOURCE_DIR}/src/tests/functional/plugin/conformance/subgraphs_dumper/include
${CMAKE_CURRENT_SOURCE_DIR}/
LINK_LIBRARIES LINK_LIBRARIES
PRIVATE PRIVATE
func_test_utils func_test_utils

View File

@ -1,76 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gtest/gtest.h"
#include "matchers/convolutions.hpp"
#include "ngraph/ops.hpp"
#include "functional_test_utils/summary/op_info.hpp"
class ConvolutionMatcherTest : public ::testing::Test {
protected:
void SetUp() override {
matcher = SubgraphsDumper::ConvolutionsMatcher();
op_info = LayerTestsUtils::OPInfo();
}
SubgraphsDumper::ConvolutionsMatcher matcher;
LayerTestsUtils::OPInfo op_info;
};
// Check that two convolutions with different input ov::Shapes but same kernel size are match each other
TEST_F(ConvolutionMatcherTest, ConvsSameKernelSize) {
const auto param = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::Shape({1, 3, 10, 10}));
const auto weights = std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::f32, ov::Shape({10, 3, 3, 3}), 1);
const auto op1 = std::make_shared<ov::op::v1::Convolution>(param, weights, ov::Strides(0, 0), ov::CoordinateDiff(0, 0),
ov::CoordinateDiff(0, 0), ov::Strides(0, 0));
const auto param2 = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::Shape({1, 5, 20, 20}));
const auto weights2 = std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::f32, ov::Shape({10, 5, 3, 3}), 1);
const auto op2 = std::make_shared<ov::op::v1::Convolution>(param2, weights2, ov::Strides(0, 0), ov::CoordinateDiff(0, 0),
ov::CoordinateDiff(0, 0), ov::Strides(0, 0));
ASSERT_TRUE(matcher.match(op1, op2, op_info));
}
// Check that two convolutions with different input ov::Shapes but same kernel size are match each other
TEST_F(ConvolutionMatcherTest, ConvsDifferentKernelSize) {
const auto param = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::Shape({1, 3, 10, 10}));
const auto weights = std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::f32, ov::Shape({10, 3, 3, 5}), 1);
const auto op1 = std::make_shared<ov::op::v1::Convolution>(param, weights, ov::Strides(0, 0), ov::CoordinateDiff(0, 0),
ov::CoordinateDiff(0, 0), ov::Strides(0, 0));
const auto param2 = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::Shape({1, 5, 20, 20}));
const auto weights2 = std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::f32, ov::Shape({10, 5, 3, 3}), 1);
const auto op2 = std::make_shared<ov::op::v1::Convolution>(param2, weights2, ov::Strides(0, 0), ov::CoordinateDiff(0, 0),
ov::CoordinateDiff(0, 0), ov::Strides(0, 0));
ASSERT_FALSE(matcher.match(op1, op2, op_info));
}
// Check that two group convolutions with different input ov::Shapes but same kernel size are match each other
TEST_F(ConvolutionMatcherTest, GroupConvsSameKernelSize) {
const auto param = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::Shape({1, 4, 10, 10}));
const auto weights = std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::f32, ov::Shape({2, 10, 2, 3, 3}), 1);
const auto op1 = std::make_shared<ov::op::v1::GroupConvolution>(param, weights, ov::Strides(0, 0), ov::CoordinateDiff(0, 0),
ov::CoordinateDiff(0, 0), ov::Strides(0, 0));
const auto param2 = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::Shape({1, 6, 20, 20}));
const auto weights2 = std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::f32, ov::Shape({2, 10, 3, 3, 3}), 1);
const auto op2 = std::make_shared<ov::op::v1::GroupConvolution>(param2, weights2, ov::Strides(0, 0), ov::CoordinateDiff(0, 0),
ov::CoordinateDiff(0, 0), ov::Strides(0, 0));
ASSERT_TRUE(matcher.match(op1, op2, op_info));
}
// Check that two group convolutions with different input ov::Shapes but same kernel size are match each other
TEST_F(ConvolutionMatcherTest, GroupConvsDifferentKernelSize) {
const auto param = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::Shape({1, 4, 10, 10}));
const auto weights = std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::f32, ov::Shape({2, 10, 2, 3, 5}), 1);
const auto op1 = std::make_shared<ov::op::v1::GroupConvolution>(param, weights, ov::Strides(0, 0), ov::CoordinateDiff(0, 0),
ov::CoordinateDiff(0, 0), ov::Strides(0, 0));
const auto param2 = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::Shape({1, 6, 20, 20}));
const auto weights2 = std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::f32, ov::Shape({2, 10, 3, 3, 3}), 1);
const auto op2 = std::make_shared<ov::op::v1::GroupConvolution>(param2, weights2, ov::Strides(0, 0), ov::CoordinateDiff(0, 0),
ov::CoordinateDiff(0, 0), ov::Strides(0, 0));
ASSERT_FALSE(matcher.match(op1, op2, op_info));
}

View File

@ -1,88 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gtest/gtest.h"
#include "matchers/single_op.hpp"
#include "ngraph/ops.hpp"
#include "functional_test_utils/summary/op_info.hpp"
class SingleOpMatcherTest : public ::testing::Test {
protected:
void SetUp() override {
matcher = SubgraphsDumper::SingleOpMatcher();
op_info = LayerTestsUtils::OPInfo();
}
SubgraphsDumper::SingleOpMatcher matcher;
LayerTestsUtils::OPInfo op_info;
};
// Check that different values of constant nodes on port 0 (default value) are ignored in match()
TEST_F(SingleOpMatcherTest, AllPortsAreConsts_IgnoreConstPortVals) {
const auto const1 = std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::f32, ov::Shape({5, 5}), 1);
const auto shape_pattern = std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::i64, ov::Shape({2}), std::vector<int>{1, 25});
const auto op1 = std::make_shared<ov::op::v1::Reshape>(const1, shape_pattern, false);
const auto const2 = std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::f32, ov::Shape({5, 5}), 2);
const auto op2 = std::make_shared<ov::op::v1::Reshape>(const2, shape_pattern, false);
ASSERT_TRUE(matcher.match(op1, op2, op_info));
}
// Check match of equal nodes
TEST_F(SingleOpMatcherTest, AllPortsAreParams_NodesEqual) {
const auto param1 = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::Shape({10, 10}));
const auto param2 = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::Shape({10, 20}));
const auto op1 = std::make_shared<ov::op::v0::Concat>(ov::OutputVector({param1, param2}), 1);
const auto op2 = std::make_shared<ov::op::v0::Concat>(ov::OutputVector({param1, param2}), 1);
ASSERT_TRUE(matcher.match(op1, op2, op_info));
}
// Check nodes doesn't match - different input ranks
TEST_F(SingleOpMatcherTest, AllPortsAreParams_RanksNotEqual) {
const auto param1 = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::Shape({10, 10}));
const auto param2 = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::Shape({10, 20}));
const auto op1 = std::make_shared<ov::op::v0::Concat>(ov::OutputVector({param1, param2}), 1);
const auto param3 = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::Shape({10, 40, 10}));
const auto param4 = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::Shape({10, 40, 10}));
const auto op2 = std::make_shared<ov::op::v0::Concat>(ov::OutputVector({param3, param4}), 1);
ASSERT_FALSE(matcher.match(op1, op2, op_info));
}
// Check nodes doesn't match - different input element types
TEST_F(SingleOpMatcherTest, AllPortsAreParams_TypesNotEqual) {
const auto param1 = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::Shape({10, 10}));
const auto param2 = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::Shape({10, 20}));
const auto op1 = std::make_shared<ov::op::v0::Concat>(ov::OutputVector({param1, param2}), 1);
const auto param3 = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f16, ov::Shape({10, 10}));
const auto param4 = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f16, ov::Shape({10, 20}));
const auto op2 = std::make_shared<ov::op::v0::Concat>(ov::OutputVector({param3, param4}), 1);
ASSERT_FALSE(matcher.match(op1, op2, op_info));
}
// Check nodes doesn't match - different input element types
TEST_F(SingleOpMatcherTest, AllPortsAreParams_AttrsNotEqual) {
const auto param1 = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::Shape({10, 10, 10}));
const auto param2 = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::Shape({10, 10, 10}));
const auto op1 = std::make_shared<ov::op::v0::Concat>(ov::OutputVector({param1, param2}), 1);
const auto param3 = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::Shape({10, 10, 10}));
const auto param4 = std::make_shared<ov::op::v0::Parameter>(ov::element::Type_t::f32, ov::Shape({10, 10, 10}));
const auto op2 = std::make_shared<ov::op::v0::Concat>(ov::OutputVector({param3, param4}), 2);
ASSERT_FALSE(matcher.match(op1, op2, op_info));
}
// Check nodes Add OPs match with different constants on ports
TEST_F(SingleOpMatcherTest, ChecAddOpConfiguration) {
const auto const1 = std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::f32, ov::Shape({5, 5}), 1);
const auto const2 = std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::f32, ov::Shape({5, 5}), 2);
const auto op1 = std::make_shared<ov::op::v1::Add>(const1, const2);
const auto const3 = std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::f32, ov::Shape({5, 5}), 3);
const auto const4 = std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::f32, ov::Shape({5, 5}), 4);
const auto op2 = std::make_shared<ov::op::v1::Add>(const1, const2);
ASSERT_TRUE(matcher.match(op1, op2, op_info));
}

View File

@ -1,56 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gtest/gtest.h"
#include "matchers/base_matcher.hpp"
#include "ngraph/ops.hpp"
using namespace ov::op;
using namespace ngraph;
using ov::element::Type_t;
class MatcherConfigTest : public ::testing::Test {
protected:
void SetUp() override {
const auto const1 = std::make_shared<v0::Constant>(Type_t::f32, Shape({5, 5}), 1);
const auto const2 = std::make_shared<v0::Constant>(Type_t::f32, Shape({5, 5}), 2);
node = std::make_shared<v1::Add>(const1, const2);
}
std::shared_ptr<Node> node;
};
// Check that matcher configuration for operation created successfully and all parameters are set
TEST_F(MatcherConfigTest, ParametersAreSet) {
std::vector<size_t> ignored_ports = {0};
std::vector<std::string> ignored_attrs = {"attr"};
SubgraphsDumper::MatcherConfig<v1::Add> matcher_cfg(ignored_attrs, ignored_ports);
ASSERT_TRUE(matcher_cfg.op_in_config(node));
ASSERT_TRUE(matcher_cfg.ignored_ports == ignored_ports);
ASSERT_TRUE(matcher_cfg.ignored_attributes == ignored_attrs);
ASSERT_FALSE(matcher_cfg.is_fallback_config);
}
// Check that fallback matcher configuration created successfully and all parameters are set
TEST_F(MatcherConfigTest, FallbackConfig) {
std::vector<size_t> ignored_ports = {0};
std::vector<std::string> ignored_attrs = {"attr"};
SubgraphsDumper::MatcherConfig<> matcher_cfg(ignored_attrs, ignored_ports);
ASSERT_FALSE(matcher_cfg.op_in_config(node));
ASSERT_TRUE(matcher_cfg.ignored_ports == ignored_ports);
ASSERT_TRUE(matcher_cfg.ignored_attributes == ignored_attrs);
ASSERT_TRUE(matcher_cfg.is_fallback_config);
}
// Check that fallback matcher configuration created with default constructor
TEST_F(MatcherConfigTest, FallbackConfigDefaultConstructor) {
std::vector<size_t> ignored_ports = {};
std::vector<std::string> ignored_attrs = {};
auto matcher_cfg = SubgraphsDumper::MatcherConfig<>();
ASSERT_FALSE(matcher_cfg.op_in_config(node));
ASSERT_TRUE(matcher_cfg.ignored_ports == ignored_ports);
ASSERT_TRUE(matcher_cfg.ignored_attributes == ignored_attrs);
ASSERT_TRUE(matcher_cfg.is_fallback_config);
}

View File

@ -1,28 +0,0 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
set(TARGET_NAME subgraphsDumper)
list(APPEND LIBRARIES
gflags
openvino::runtime
func_test_utils
openvino::pugixml
)
addIeTargetTest(
NAME ${TARGET_NAME}
ROOT ${CMAKE_CURRENT_SOURCE_DIR}/src
INCLUDES
PRIVATE
${CMAKE_CURRENT_SOURCE_DIR}/include
LINK_LIBRARIES
PRIVATE
${LIBRARIES}
DEPENDENCIES
${DEPENDENCIES}
ADD_CPPLINT
)
ie_faster_build(${TARGET_NAME} UNITY)

View File

@ -1,43 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <gflags/gflags.h>
#include <iostream>
static const char help_message[] = "Print a usage message.";
static const char input_folders_message[] = "Required. Comma separated paths to the input folders with IRs";
static const char local_cache_message[] = "Optional. Comma separated paths to the local cache folders with IRs";
static const char output_folder_message[] = "Required. Path to the output folders where to serialize IRs";
static const char path_regex_message[] = "Optional. regular expression to be applied in input "
"folders recursive discovery";
static const char extract_body_message[] = "Optional. Allow to extract operation bodies to operation cache.";
static const char cache_type_message[] = "Optional. Specify caching type: OP, GRAPH. The default value is both";
DEFINE_bool(h, false, help_message);
DEFINE_string(input_folders, "", local_cache_message);
DEFINE_string(local_cache, "", input_folders_message);
DEFINE_string(output_folder, "output", output_folder_message);
DEFINE_string(path_regex, ".*", output_folder_message);
DEFINE_bool(extract_body, true, extract_body_message);
DEFINE_string(cache_type, "", cache_type_message);
/**
* @brief This function shows a help message
*/
static void showUsage() {
std::cout << "\n";
std::cout << "Subgraph Dumper [OPTION]\n";
std::cout << "Options:\n";
std::cout << "\n";
std::cout << " -h " << help_message << "\n";
std::cout << " --input_folders \"<path>\" " << input_folders_message << "\n";
std::cout << " --local_cache \"<path>\" " << input_folders_message << "\n";
std::cout << " --output_folder \"<path>\" " << output_folder_message << "\n";
std::cout << " --path_regex \"<path>\" " << path_regex_message << "\n";
std::cout << " --extract_body \"<value>\" " << extract_body_message << "\n";
std::cout << " --cache_type \"<value>\" " << extract_body_message << "\n";
std::cout << std::flush;
}

View File

@ -1,66 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gflag_config.hpp"
#include "cache/op_cache.hpp"
#include "cache/graph_cache.hpp"
#include "utils/model.hpp"
using namespace ov::tools::subgraph_dumper;
int main(int argc, char *argv[]) {
gflags::ParseCommandLineNonHelpFlags(&argc, &argv, true);
if (FLAGS_h) {
showUsage();
return 0;
}
std::vector<std::string> local_cache_dirs = ov::test::utils::splitStringByDelimiter(FLAGS_local_cache);
std::vector<std::string> dirs = ov::test::utils::splitStringByDelimiter(FLAGS_input_folders);
std::vector<std::string> models;
if (!ov::test::utils::directoryExists(FLAGS_output_folder)) {
std::string msg = "Output directory (" + FLAGS_output_folder + ") doesn't not exist! The directory will be created.";
std::cout << msg << std::endl;
ov::test::utils::createDirectoryRecursive(FLAGS_output_folder);
}
try {
models = find_models(dirs, FLAGS_path_regex);
} catch (std::runtime_error& e) {
std::cout << "[ INFO ] Try 'subgraphsDumper -h' for more information. \nException: " << e.what() << std::endl;
return 1;
}
std::vector<std::shared_ptr<ICache>> caches;
if (FLAGS_cache_type == "OP" || FLAGS_cache_type.empty()) {
std::cout << "[ INFO ] OpCache is enabled!" << std::endl;
caches.push_back(OpCache::get());
}
if (FLAGS_cache_type == "GRAPH" || FLAGS_cache_type.empty()) {
// todo: iefode: to check and enable it in CI
// std::cout << "[ INFO ] GraphCache is enabled!" << std::endl;
// caches.push_back(GraphCache::get());
}
for (auto& cache : caches) {
cache->set_serialization_dir(FLAGS_output_folder);
}
std::map<ModelCacheStatus, std::vector<std::string>> cache_model_status;
// Upload previously cached graphs to cache
if (!FLAGS_local_cache.empty()) {
auto cachedOps = find_models(local_cache_dirs);
cache_model_status = cache_models(caches, cachedOps, FLAGS_extract_body);
}
{
auto tmp_cache_model_status = cache_models(caches, models, FLAGS_extract_body);
cache_model_status.insert(tmp_cache_model_status.begin(), tmp_cache_model_status.end());
}
for (auto& cache : caches) {
cache->set_serialization_dir(FLAGS_output_folder);
cache->serialize_cache();
}
save_model_status_to_file(cache_model_status, FLAGS_output_folder);
return cache_model_status[ModelCacheStatus::NOT_FULLY_CACHED].empty() && cache_model_status[ModelCacheStatus::NOT_READ].empty();
}

View File

@ -1,25 +0,0 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
set(TARGET_NAME subgraphsDumperTests)
addIeTargetTest(
NAME ${TARGET_NAME}
ROOT ${CMAKE_CURRENT_SOURCE_DIR}
ADDITIONAL_SOURCE_DIRS
${OpenVINO_SOURCE_DIR}/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src
EXCLUDED_SOURCE_PATHS
${OpenVINO_SOURCE_DIR}/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/main.cpp
INCLUDES
${OpenVINO_SOURCE_DIR}/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include
${CMAKE_CURRENT_SOURCE_DIR}/
LINK_LIBRARIES
PRIVATE
func_test_utils
openvino::runtime
openvino::pugixml
ADD_CPPLINT
)
ie_faster_build(${TARGET_NAME} UNITY)

View File

@ -12,6 +12,7 @@ addIeTargetTest(
INCLUDES INCLUDES
PRIVATE PRIVATE
"${CMAKE_CURRENT_SOURCE_DIR}/include" "${CMAKE_CURRENT_SOURCE_DIR}/include"
"${OpenVINO_SOURCE_DIR}/src/tests/functional/plugin/conformance/subgraphs_dumper/include/cache/meta/"
ADD_CPPLINT ADD_CPPLINT
LINK_LIBRARIES LINK_LIBRARIES
PUBLIC PUBLIC

View File

@ -40,7 +40,7 @@ TEST_P(OpImplCheckTest, checkPluginImplementationQueryModel) {
} }
if (expected != actual) { if (expected != actual) {
IE_THROW() << "Expected and actual results are different"; throw std::runtime_error("Expected and actual results are different");
} }
summary.updateOPsImplStatus(function, true); summary.updateOPsImplStatus(function, true);
} catch (const std::exception &e) { } catch (const std::exception &e) {