[CPU] Allow exec graph serialization to cout or xml via env variable (#5967)

This commit is contained in:
Egor Duplensky 2021-06-08 22:39:08 +03:00 committed by GitHub
parent 246932a5d3
commit 57850f0a87
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 169 additions and 60 deletions

View File

@ -4,9 +4,11 @@
#pragma once
#include <threading/ie_istreams_executor.hpp>
#include "utils/debug_capabilities.h"
#include <string>
#include <map>
#include <threading/ie_istreams_executor.hpp>
namespace MKLDNNPlugin {
@ -35,6 +37,10 @@ struct Config {
bool manualEnforceBF16 = false;
#endif
#ifdef CPU_DEBUG_CAPS
DebugCaps::Config debugCaps;
#endif
void readProperties(const std::map<std::string, std::string> &config);
void updateProperties();
std::map<std::string, std::string> _config;

View File

@ -32,7 +32,7 @@ bool MKLDNNEdge::isUseExternalMemory() const {
return externalMemoryPtr;
}
bool MKLDNNEdge::isDropped() {
bool MKLDNNEdge::isDropped() const {
bool not_in_parent = true;
bool not_in_child = true;
@ -124,6 +124,10 @@ void MKLDNNEdge::reuse(MKLDNNMemoryPtr ptr) {
status = Status::Allocated;
}
const InferenceEngine::TensorDesc& MKLDNNEdge::getInputDescRO() const {
return inputDesc;
}
InferenceEngine::TensorDesc MKLDNNEdge::getInputDesc() {
if (inputDesc.getLayout() == InferenceEngine::Layout::ANY) {
inputDesc = getSpecifiedInputDesc({});
@ -131,6 +135,10 @@ InferenceEngine::TensorDesc MKLDNNEdge::getInputDesc() {
return inputDesc;
}
const InferenceEngine::TensorDesc& MKLDNNEdge::getOutputDescRO() const {
return outputDesc;
}
InferenceEngine::TensorDesc MKLDNNEdge::getOutputDesc() {
if (outputDesc.getLayout() == InferenceEngine::Layout::ANY) {
outputDesc = getSpecifiedOutputDesc({});
@ -145,11 +153,11 @@ InferenceEngine::TensorDesc MKLDNNEdge::getDesc() {
return getInputDesc();
}
int MKLDNNEdge::getInputNum() {
int MKLDNNEdge::getInputNum() const {
return parent_port;
}
int MKLDNNEdge::getOutputNum() {
int MKLDNNEdge::getOutputNum() const {
return child_port;
}

View File

@ -61,11 +61,11 @@ public:
MKLDNNMemoryPtr& getMemoryPtr();
bool needReorder();
bool isDropped();
bool isDropped() const;
bool isUseExternalMemory() const;
int getInputNum();
int getOutputNum();
int getInputNum() const;
int getOutputNum() const;
void setChildPort(const size_t port) { child_port = port; }
@ -73,10 +73,12 @@ public:
MKLDNNEdgePtr getSharedEdge() const;
MKLDNNEdgePtr getSharedEdge(std::nothrow_t) const;
const InferenceEngine::TensorDesc& getInputDescRO() const;
const InferenceEngine::TensorDesc& getOutputDescRO() const;
private:
std::string name();
private:
std::weak_ptr<MKLDNNNode> parent;
std::weak_ptr<MKLDNNNode> child;
int parent_port;

View File

@ -78,7 +78,10 @@ void MKLDNNGraph::CreateGraph(NET &net, const MKLDNNExtensionManager::Ptr& extMg
Replicate(net, extMgr);
InitGraph();
status = Ready;
ENABLE_CPU_DEBUG_CAP(serialize(*this));
}
template void MKLDNNGraph::CreateGraph(const std::shared_ptr<const ngraph::Function>&,
@ -344,10 +347,6 @@ void MKLDNNGraph::InitGraph() {
graphNode->cleanup();
}
#endif
#if !defined(NDEBUG) && defined(PRINT_GRAPH_INFO)
printGraphInfo();
#endif
ExecuteConstantNodesOnly();
}
@ -809,7 +808,7 @@ void MKLDNNGraph::Infer(MKLDNNInferRequest* request, int batch) {
mkldnn::stream stream(eng);
ENABLE_CPU_DEBUG_CAP(NodeDumper nd(infer_count));
ENABLE_CPU_DEBUG_CAP(NodeDumper nd(config.debugCaps, infer_count));
for (int i = 0; i < graphNodes.size(); i++) {
if (request != nullptr) {
@ -954,6 +953,10 @@ void MKLDNNGraph::setConfig(const Config &cfg) {
config = cfg;
}
const Config& MKLDNNGraph::getConfig() const {
return config;
}
void MKLDNNGraph::setProperty(const std::map<std::string, std::string>& properties) {
config.readProperties(properties);
}
@ -1217,21 +1220,3 @@ void MKLDNNGraph::EnforceBF16() {
InferenceEngine::CNNNetwork MKLDNNGraph::dump() const {
return dump_graph_as_ie_ngraph_net(*this);
}
void MKLDNNGraph::printGraphInfo() const {
for (auto &graphNode : graphNodes) {
std::cout << "name: " << graphNode->getName() << " [ ";
if (graphNode->parentEdges.size() > 0) {
auto prnt_out_desc = graphNode->parentEdges[0].lock()->getOutputDesc();
std::cout << "in: " << prnt_out_desc.getPrecision().name()
<< "/l=" << prnt_out_desc.getLayout()
<< "; ";
}
if (graphNode->childEdges.size() > 0) {
auto chld_in_desc = graphNode->childEdges[0].lock()->getInputDesc();
std::cout << "out: " << chld_in_desc.getPrecision().name()
<< "/l=" << chld_in_desc.getLayout();
}
std::cout << " ]" << std::endl;
}
}

View File

@ -39,6 +39,8 @@ public:
}
void setConfig(const Config &cfg);
const Config& getConfig() const;
void setProperty(const std::map<std::string, std::string> &properties);
Config getProperty() const;
@ -59,6 +61,10 @@ public:
void Infer(MKLDNNInferRequest* request = nullptr, int batch = -1);
const std::vector<MKLDNNNodePtr>& GetNodes() const {
return graphNodes;
}
std::vector<MKLDNNNodePtr>& GetNodes() {
return graphNodes;
}
@ -219,7 +225,6 @@ protected:
private:
void EnforceBF16();
void printGraphInfo() const;
};
} // namespace MKLDNNPlugin

View File

@ -5,9 +5,11 @@
#include "mkldnn_graph_dumper.h"
#include <ie_ngraph_utils.hpp>
#include "exec_graph_info.hpp"
#include "ie_common.h"
#include "mkldnn_debug.h"
#include <ngraph/variant.hpp>
#include "ngraph/ngraph.hpp"
#include "utils/debug_capabilities.h"
#include <vector>
#include <string>
@ -18,6 +20,9 @@ using namespace InferenceEngine;
namespace MKLDNNPlugin {
void serializeToCout(const MKLDNNGraph &graph);
void serializeToXML(const MKLDNNGraph &graph, const std::string& path);
namespace {
std::map<std::string, std::string> extract_node_metadata(const MKLDNNNodePtr &node) {
@ -207,4 +212,46 @@ InferenceEngine::CNNNetwork dump_graph_as_ie_ngraph_net(const MKLDNNGraph &graph
return net;
}
#ifdef CPU_DEBUG_CAPS
void serialize(const MKLDNNGraph &graph) {
const std::string& path = graph.getConfig().debugCaps.execGraphPath;
if (path.empty())
return;
if (path == "cout")
serializeToCout(graph);
else if (!path.compare(path.size() - 4, 4, ".xml"))
serializeToXML(graph, path);
else
IE_THROW() << "Unknown serialize format. Should be either 'cout' or '*.xml'. Got " << path;
}
void serializeToXML(const MKLDNNGraph &graph, const std::string& path) {
if (path.empty())
return;
graph.dump().serialize(path);
}
void serializeToCout(const MKLDNNGraph &graph) {
for (const auto& node : graph.GetNodes()) {
std::cout << "name: " << node->getName() << " [ ";
if (!node->getParentEdges().empty()) {
const auto& parentEdge = *(node->getParentEdges()[0].lock());
const auto& prnt_out_desc = parentEdge.getOutputDescRO();
std::cout << "in: " << prnt_out_desc.getPrecision().name()
<< "/l=" << prnt_out_desc.getLayout()
<< "; ";
}
if (!node->getChildEdges().empty()) {
const auto& childEdge = *(node->getChildEdges()[0].lock());
const auto& chld_in_desc = childEdge.getInputDescRO();
std::cout << "out: " << chld_in_desc.getPrecision().name()
<< "/l=" << chld_in_desc.getLayout();
}
std::cout << " ]" << std::endl;
}
}
#endif
} // namespace MKLDNNPlugin

View File

@ -6,11 +6,14 @@
#include "cpp/ie_cnn_network.h"
#include "mkldnn_graph.h"
#include "utils/debug_capabilities.h"
#include <memory>
namespace MKLDNNPlugin {
InferenceEngine::CNNNetwork dump_graph_as_ie_ngraph_net(const MKLDNNGraph &graph);
#ifdef CPU_DEBUG_CAPS
void serialize(const MKLDNNGraph &graph);
#endif // CPU_DEBUG_CAPS
} // namespace MKLDNNPlugin

View File

@ -71,3 +71,22 @@ Example:
```sh
OV_CPU_BLOB_DUMP_NODE_NAME=".+" binary ...
```
## Graph serialization
The functionality allows to serialize execution graph using environment variable:
```sh
OV_CPU_EXEC_GRAPH_PATH=<path> binary ...
```
Possible serialization options:
* cout
Serialize to console output
* \<path\>.xml
Serialize graph into .xml and .bin files. Can be opened using, for example, *netron* app
* \<path\>.dot
TBD. Serialize graph into .dot file. Can be inspected using, for example, *graphviz* tools.

View File

@ -4,7 +4,44 @@
#pragma once
#ifdef CPU_DEBUG_CAPS
# define ENABLE_CPU_DEBUG_CAP(_x) _x;
#else
# define ENABLE_CPU_DEBUG_CAP(_x)
#endif
#include <map>
#include <string>
#include <vector>
#define ENABLE_CPU_DEBUG_CAP(_x) _x;
namespace MKLDNNPlugin {
namespace DebugCaps {
class Config {
public:
Config() {
readParam(blobDumpDir, "OV_CPU_BLOB_DUMP_DIR");
readParam(blobDumpFormat, "OV_CPU_BLOB_DUMP_FORMAT");
readParam(blobDumpNodeExecId, "OV_CPU_BLOB_DUMP_NODE_EXEC_ID");
readParam(blobDumpNodeType, "OV_CPU_BLOB_DUMP_NODE_TYPE");
readParam(blobDumpNodeName, "OV_CPU_BLOB_DUMP_NODE_NAME");
readParam(execGraphPath, "OV_CPU_EXEC_GRAPH_PATH");
}
std::string blobDumpDir;
std::string blobDumpFormat;
std::string blobDumpNodeExecId;
std::string blobDumpNodeType;
std::string blobDumpNodeName;
std::string execGraphPath;
private:
void readParam(std::string& param, const char* envVar) {
if (const char* envValue = std::getenv(envVar))
param = envValue;
}
};
} // namespace DebugCaps
} // namespace MKLDNNPlugin
#else // !CPU_DEBUG_CAPS
#define ENABLE_CPU_DEBUG_CAP(_x)
#endif // CPU_DEBUG_CAPS

View File

@ -6,9 +6,10 @@
#include "node_dumper.h"
#include "mkldnn_node.h"
#include "utils/blob_dump.h"
#include "ie_common.h"
#include "utils/blob_dump.h"
#include "utils/debug_capabilities.h"
#include <array>
#include <regex>
#include <sstream>
@ -18,27 +19,24 @@ using namespace InferenceEngine;
namespace MKLDNNPlugin {
NodeDumper::NodeDumper(int _count):
count(_count), dumpFormat(DUMP_FORMAT::BIN) {
const char* dumpDirEnv = std::getenv("OV_CPU_BLOB_DUMP_DIR");
if (dumpDirEnv)
dumpDirName = dumpDirEnv;
NodeDumper::NodeDumper(const DebugCaps::Config& config, const int _count)
: dumpFormat(DUMP_FORMAT::BIN)
, dumpDirName("mkldnn_dump")
, count(_count) {
if (!config.blobDumpDir.empty())
dumpDirName = config.blobDumpDir;
const char* dumpFormatEnv = std::getenv("OV_CPU_BLOB_DUMP_FORMAT");
if (dumpFormatEnv)
dumpFormat = parseDumpFormat(dumpFormatEnv);
if (!config.blobDumpFormat.empty())
dumpFormat = parseDumpFormat(config.blobDumpFormat);
const char* filter = std::getenv("OV_CPU_BLOB_DUMP_NODE_EXEC_ID");
if (filter)
dumpFilters[FILTER::BY_EXEC_ID] = filter;
if (!config.blobDumpNodeExecId.empty())
dumpFilters[FILTER::BY_EXEC_ID] = config.blobDumpNodeExecId;
filter = std::getenv("OV_CPU_BLOB_DUMP_NODE_TYPE");
if (filter)
dumpFilters[FILTER::BY_TYPE] = filter;
if (!config.blobDumpNodeType.empty())
dumpFilters[FILTER::BY_TYPE] = config.blobDumpNodeType;
filter = std::getenv("OV_CPU_BLOB_DUMP_NODE_NAME");
if (filter)
dumpFilters[FILTER::BY_NAME] = filter;
if (!config.blobDumpNodeName.empty())
dumpFilters[FILTER::BY_NAME] = config.blobDumpNodeName;
}
void NodeDumper::dumpInputBlobs(const MKLDNNNodePtr& node) const {

View File

@ -6,6 +6,7 @@
#include "mkldnn_node.h"
#include "utils/blob_dump.h"
#include "utils/debug_capabilities.h"
#include <unordered_map>
#include <string>
@ -22,7 +23,7 @@ namespace MKLDNNPlugin {
*/
class NodeDumper {
public:
NodeDumper(int _count);
NodeDumper(const DebugCaps::Config& config, const int _count);
void dumpInputBlobs(const MKLDNNNodePtr &node) const;
void dumpOutputBlobs(const MKLDNNNodePtr &node) const;
@ -41,11 +42,9 @@ private:
void formatNodeName(std::string& name) const;
DUMP_FORMAT dumpFormat;
std::string dumpDirName;
int count;
std::string dumpDirName = "mkldnn_dump";
enum FILTER {
BY_EXEC_ID,
BY_TYPE,