[IE Samples][OV2.0] final clean up of old API headers (#9494)

* final clean up of old API headers, compile_tool separated from samples

* make cpplint happy
This commit is contained in:
Vladimir Dudnik
2022-01-13 11:12:20 +03:00
committed by GitHub
parent 113014161d
commit 28fb55dffe
6 changed files with 427 additions and 302 deletions

View File

@@ -13,7 +13,6 @@
#include <string>
#include <vector>
#include "inference_engine.hpp"
#include "openvino/openvino.hpp"
// clang-format on
@@ -31,17 +30,6 @@ void readInputFilesArguments(std::vector<std::string>& files, const std::string&
*/
void parseInputFilesArguments(std::vector<std::string>& files);
void processPrecision(InferenceEngine::CNNNetwork& network,
const std::string& ip,
const std::string& op,
const std::string& iop);
void processLayout(InferenceEngine::CNNNetwork& network,
const std::string& il,
const std::string& ol,
const std::string& iol);
void printInputAndOutputsInfo(const InferenceEngine::CNNNetwork& network);
void printInputAndOutputsInfo(const ov::Model& network);
void configurePrePostProcessing(std::shared_ptr<ov::Model>& function,
@@ -55,7 +43,6 @@ void configurePrePostProcessing(std::shared_ptr<ov::Model>& function,
const std::string& oml,
const std::string& ioml);
//--- API 2.0 -------------------------------------------------------------------------
void printInputAndOutputsInfo(const ov::Model& network);
void printInputAndOutputsInfoShort(const ov::Model& network);
void processPrecision(const ov::Model& network, const std::string& ip, const std::string& op, const std::string& iop);

View File

@@ -17,14 +17,12 @@
#include <limits>
#include <list>
#include <map>
#include <openvino/openvino.hpp>
#include <random>
#include <string>
#include <utility>
#include <vector>
// clang-format off
#include "inference_engine.hpp"
#include "openvino/openvino.hpp"
#include "slog.hpp"
// clang-format on
@@ -124,16 +122,6 @@ inline std::string fileExt(const std::string& filename) {
return filename.substr(pos + 1);
}
inline slog::LogStream& operator<<(slog::LogStream& os, const InferenceEngine::Version& version) {
os << version.description << " version ......... ";
os << IE_VERSION_MAJOR << "." << IE_VERSION_MINOR << "." << IE_VERSION_PATCH << slog::endl;
os << "Build ........... ";
os << version.buildNumber << slog::endl;
return os;
}
inline slog::LogStream& operator<<(slog::LogStream& os, const ov::Version& version) {
os << version.description << " version ......... ";
os << OPENVINO_VERSION_MAJOR << "." << OPENVINO_VERSION_MINOR << "." << OPENVINO_VERSION_PATCH << slog::endl;
@@ -144,16 +132,6 @@ inline slog::LogStream& operator<<(slog::LogStream& os, const ov::Version& versi
return os;
}
inline slog::LogStream& operator<<(slog::LogStream& os,
const std::map<std::string, InferenceEngine::Version>& versions) {
for (auto&& version : versions) {
os << version.first << slog::endl;
os << version.second << slog::endl;
}
return os;
}
inline slog::LogStream& operator<<(slog::LogStream& os, const std::map<std::string, ov::Version>& versions) {
for (auto&& version : versions) {
os << version.first << slog::endl;
@@ -621,49 +599,6 @@ static UNUSED void printPerformanceCounts(const std::map<std::string, ov::runtim
std::cout.flags(fmt);
}
// static UNUSED void printPerformanceCounts(InferenceEngine::InferRequest request,
// std::ostream& stream,
// std::string deviceName,
// bool bshowHeader = true) {
// auto performanceMap = request.GetPerformanceCounts();
// printPerformanceCounts(performanceMap, stream, deviceName, bshowHeader);
//}
inline std::map<std::string, std::string> getMapFullDevicesNames(InferenceEngine::Core& ie,
std::vector<std::string> devices) {
std::map<std::string, std::string> devicesMap;
InferenceEngine::Parameter p;
for (std::string& deviceName : devices) {
if (deviceName != "") {
try {
p = ie.GetMetric(deviceName, METRIC_KEY(FULL_DEVICE_NAME));
devicesMap.insert(std::pair<std::string, std::string>(deviceName, p.as<std::string>()));
} catch (InferenceEngine::Exception&) {
}
}
}
return devicesMap;
}
inline std::string getFullDeviceName(std::map<std::string, std::string>& devicesMap, std::string device) {
std::map<std::string, std::string>::iterator it = devicesMap.find(device);
if (it != devicesMap.end()) {
return it->second;
} else {
return "";
}
}
inline std::string getFullDeviceName(InferenceEngine::Core& ie, std::string device) {
InferenceEngine::Parameter p;
try {
p = ie.GetMetric(device, METRIC_KEY(FULL_DEVICE_NAME));
return p.as<std::string>();
} catch (InferenceEngine::Exception&) {
return "";
}
}
/**
* @brief This class represents an object that is found by an object detection net
*/
@@ -1019,97 +954,6 @@ static UNUSED void addRectangles(unsigned char* data,
}
}
inline std::size_t getTensorWidth(const InferenceEngine::TensorDesc& desc) {
const auto& layout = desc.getLayout();
const auto& dims = desc.getDims();
const auto& size = dims.size();
if ((size >= 2) && (layout == InferenceEngine::Layout::NCHW || layout == InferenceEngine::Layout::NHWC ||
layout == InferenceEngine::Layout::NCDHW || layout == InferenceEngine::Layout::NDHWC ||
layout == InferenceEngine::Layout::OIHW || layout == InferenceEngine::Layout::GOIHW ||
layout == InferenceEngine::Layout::OIDHW || layout == InferenceEngine::Layout::GOIDHW ||
layout == InferenceEngine::Layout::CHW || layout == InferenceEngine::Layout::HW)) {
// Regardless of layout, dimensions are stored in fixed order
return dims.back();
} else {
IE_THROW() << "Tensor does not have width dimension";
}
return 0;
}
inline std::size_t getTensorHeight(const InferenceEngine::TensorDesc& desc) {
const auto& layout = desc.getLayout();
const auto& dims = desc.getDims();
const auto& size = dims.size();
if ((size >= 2) && (layout == InferenceEngine::Layout::NCHW || layout == InferenceEngine::Layout::NHWC ||
layout == InferenceEngine::Layout::NCDHW || layout == InferenceEngine::Layout::NDHWC ||
layout == InferenceEngine::Layout::OIHW || layout == InferenceEngine::Layout::GOIHW ||
layout == InferenceEngine::Layout::OIDHW || layout == InferenceEngine::Layout::GOIDHW ||
layout == InferenceEngine::Layout::CHW || layout == InferenceEngine::Layout::HW)) {
// Regardless of layout, dimensions are stored in fixed order
return dims.at(size - 2);
} else {
IE_THROW() << "Tensor does not have height dimension";
}
return 0;
}
inline std::size_t getTensorChannels(const InferenceEngine::TensorDesc& desc) {
const auto& layout = desc.getLayout();
if (layout == InferenceEngine::Layout::NCHW || layout == InferenceEngine::Layout::NHWC ||
layout == InferenceEngine::Layout::NCDHW || layout == InferenceEngine::Layout::NDHWC ||
layout == InferenceEngine::Layout::C || layout == InferenceEngine::Layout::CHW ||
layout == InferenceEngine::Layout::NC || layout == InferenceEngine::Layout::CN) {
// Regardless of layout, dimensions are stored in fixed order
const auto& dims = desc.getDims();
switch (desc.getLayoutByDims(dims)) {
case InferenceEngine::Layout::C:
return dims.at(0);
case InferenceEngine::Layout::NC:
return dims.at(1);
case InferenceEngine::Layout::CHW:
return dims.at(0);
case InferenceEngine::Layout::NCHW:
return dims.at(1);
case InferenceEngine::Layout::NCDHW:
return dims.at(1);
case InferenceEngine::Layout::SCALAR: // [[fallthrough]]
case InferenceEngine::Layout::BLOCKED: // [[fallthrough]]
default:
IE_THROW() << "Tensor does not have channels dimension";
}
} else {
IE_THROW() << "Tensor does not have channels dimension";
}
return 0;
}
inline std::size_t getTensorBatch(const InferenceEngine::TensorDesc& desc) {
const auto& layout = desc.getLayout();
if (layout == InferenceEngine::Layout::NCHW || layout == InferenceEngine::Layout::NHWC ||
layout == InferenceEngine::Layout::NCDHW || layout == InferenceEngine::Layout::NDHWC ||
layout == InferenceEngine::Layout::NC || layout == InferenceEngine::Layout::CN) {
// Regardless of layout, dimensions are stored in fixed order
const auto& dims = desc.getDims();
switch (desc.getLayoutByDims(dims)) {
case InferenceEngine::Layout::NC:
return dims.at(0);
case InferenceEngine::Layout::NCHW:
return dims.at(0);
case InferenceEngine::Layout::NCDHW:
return dims.at(0);
case InferenceEngine::Layout::CHW: // [[fallthrough]]
case InferenceEngine::Layout::C: // [[fallthrough]]
case InferenceEngine::Layout::SCALAR: // [[fallthrough]]
case InferenceEngine::Layout::BLOCKED: // [[fallthrough]]
default:
IE_THROW() << "Tensor does not have channels dimension";
}
} else {
IE_THROW() << "Tensor does not have channels dimension";
}
return 0;
}
inline void showAvailableDevices() {
ov::runtime::Core core;
std::vector<std::string> devices = core.get_available_devices();
@@ -1132,13 +976,12 @@ inline void showAvailableDevices() {
*/
std::map<std::string, std::string> parseConfig(const std::string& configName, char comment = '#');
//--- API 2.0 --------------------------------------------------------------------------------------
inline std::string getFullDeviceName(ov::runtime::Core& core, std::string device) {
InferenceEngine::Parameter p;
ov::Any p;
try {
p = core.get_metric(device, METRIC_KEY(FULL_DEVICE_NAME));
return p.as<std::string>();
} catch (InferenceEngine::Exception&) {
} catch (ov::Exception&) {
return "";
}
}

View File

@@ -174,29 +174,6 @@ InferenceEngine::Precision getPrecision(const std::string& value) {
return getPrecision(value, supported_precisions);
}
void setPrecisions(const InferenceEngine::CNNNetwork& network, const std::string& iop) {
const auto user_precisions_map = parseArgMap(iop);
auto inputs = network.getInputsInfo();
auto outputs = network.getOutputsInfo();
for (auto&& item : user_precisions_map) {
const auto& layer_name = item.first;
const auto& user_precision = item.second;
const auto input = inputs.find(layer_name);
const auto output = outputs.find(layer_name);
if (input != inputs.end()) {
input->second->setPrecision(getPrecision(user_precision));
} else if (output != outputs.end()) {
output->second->setPrecision(getPrecision(user_precision));
} else {
throw std::logic_error(layer_name + " is not an input neither output");
}
}
}
using supported_type_t = std::unordered_map<std::string, ov::element::Type>;
ov::element::Type getType(std::string value, const supported_type_t& supported_precisions) {
std::transform(value.begin(), value.end(), value.begin(), ::toupper);
@@ -225,29 +202,6 @@ ov::element::Type getType(const std::string& value) {
} // namespace
void processPrecision(InferenceEngine::CNNNetwork& network,
const std::string& ip,
const std::string& op,
const std::string& iop) {
if (!ip.empty()) {
const auto user_precision = getPrecision(ip);
for (auto&& layer : network.getInputsInfo()) {
layer.second->setPrecision(user_precision);
}
}
if (!op.empty()) {
auto user_precision = getPrecision(op);
for (auto&& layer : network.getOutputsInfo()) {
layer.second->setPrecision(user_precision);
}
}
if (!iop.empty()) {
setPrecisions(network, iop);
}
}
namespace {
using supported_layouts_t = std::unordered_map<std::string, InferenceEngine::Layout>;
using matchLayoutToDims_t = std::unordered_map<size_t, size_t>;
@@ -295,79 +249,8 @@ bool isMatchLayoutToDims(InferenceEngine::Layout layout, size_t dimension) {
return dimension == dims->second;
}
void setLayouts(const InferenceEngine::CNNNetwork& network, const std::string iol) {
const auto user_layouts_map = parseArgMap(iol);
auto inputs = network.getInputsInfo();
auto outputs = network.getOutputsInfo();
for (auto&& item : user_layouts_map) {
const auto& layer_name = item.first;
const auto& user_layout = getLayout(item.second);
const auto input = inputs.find(layer_name);
const auto output = outputs.find(layer_name);
if (input != inputs.end()) {
if (!isMatchLayoutToDims(user_layout, input->second->getTensorDesc().getDims().size())) {
throw std::logic_error(item.second + " layout is not applicable to " + layer_name);
}
input->second->setLayout(user_layout);
} else if (output != outputs.end()) {
if (!isMatchLayoutToDims(user_layout, output->second->getTensorDesc().getDims().size())) {
throw std::logic_error(item.second + " layout is not applicable to " + layer_name);
}
output->second->setLayout(user_layout);
} else {
throw std::logic_error(layer_name + " is not an input neither output");
}
}
}
} // namespace
void processLayout(InferenceEngine::CNNNetwork& network,
const std::string& il,
const std::string& ol,
const std::string& iol) {
if (!il.empty()) {
const auto layout = getLayout(il);
for (auto&& layer : network.getInputsInfo()) {
if (isMatchLayoutToDims(layout, layer.second->getTensorDesc().getDims().size())) {
layer.second->setLayout(layout);
}
}
}
if (!ol.empty()) {
const auto layout = getLayout(ol);
for (auto&& layer : network.getOutputsInfo()) {
if (isMatchLayoutToDims(layout, layer.second->getTensorDesc().getDims().size())) {
layer.second->setLayout(layout);
}
}
}
if (!iol.empty()) {
setLayouts(network, iol);
}
}
void printInputAndOutputsInfo(const InferenceEngine::CNNNetwork& network) {
std::cout << "Network inputs:" << std::endl;
for (auto&& layer : network.getInputsInfo()) {
std::cout << " " << layer.first << " : " << layer.second->getPrecision() << " / "
<< layer.second->getLayout() << std::endl;
}
std::cout << "Network outputs:" << std::endl;
for (auto&& layer : network.getOutputsInfo()) {
std::cout << " " << layer.first << " : " << layer.second->getPrecision() << " / "
<< layer.second->getLayout() << std::endl;
}
}
//--- API 2.0 -------------------------------------------------------------------------------------
void printInputAndOutputsInfoShort(const ov::Model& network) {
std::cout << "Network inputs:" << std::endl;
for (auto&& param : network.get_parameters()) {