Benchmark_app batch calculation fix (#9554)

* BenchmarkApp - batch size calculation fix

* stylefix

* -ip/op fix

* stylefix
This commit is contained in:
Fedor Zharinov
2022-01-13 23:34:38 +03:00
committed by GitHub
parent 6a126ac6bb
commit 6c69535d6c
6 changed files with 42 additions and 79 deletions

View File

@@ -455,11 +455,16 @@ std::map<std::string, ov::runtime::TensorVector> getTensors(std::map<std::string
}
}
std::vector<size_t> batchSizes;
for (const auto& info : app_inputs_info) {
batchSizes.push_back(getBatchSize(info));
}
for (const auto& files : inputFiles) {
std::string input_name = files.first.empty() ? app_inputs_info[0].begin()->first : files.first;
size_t n_shape = 0, m_file = 0;
while (n_shape < app_inputs_info.size() || m_file < filesNum) {
size_t batchSize = getBatchSize(app_inputs_info[n_shape % app_inputs_info.size()]);
size_t batchSize = batchSizes[n_shape % app_inputs_info.size()];
size_t inputId = m_file % files.second.size();
auto input_info = app_inputs_info[n_shape % app_inputs_info.size()].at(input_name);

View File

@@ -467,13 +467,25 @@ int main(int argc, char* argv[]) {
next_step();
auto preproc = ov::preprocess::PrePostProcessor(model);
processPrecision(*model, FLAGS_ip, FLAGS_op, FLAGS_iop);
ov::runtime::ConfigMap user_precisions_map;
if (!FLAGS_iop.empty()) {
user_precisions_map = parseArgMap(FLAGS_iop);
}
const auto input_precision = FLAGS_ip.empty() ? ov::element::undefined : getPrecision2(FLAGS_ip);
const auto output_precision = FLAGS_op.empty() ? ov::element::undefined : getPrecision2(FLAGS_op);
for (auto& item : model->inputs()) {
// if precision for input set by user, then set it to app_inputs
const auto& name = item.get_any_name();
if (!FLAGS_ip.empty() || FLAGS_iop.find(name) != std::string::npos) {
if (user_precisions_map.count(name)) {
const auto precision = getPrecision2(user_precisions_map.at(name));
for (auto& info : app_inputs_info) {
info.at(name).type = item.get_element_type();
info.at(name).type = precision;
}
} else if (input_precision != ov::element::undefined) {
for (auto& info : app_inputs_info) {
info.at(name).type = input_precision;
}
} else if (app_inputs_info[0].at(name).isImage()) {
// image input, set U8
@@ -488,6 +500,17 @@ int main(int argc, char* argv[]) {
in.model().set_layout(app_inputs_info[0].at(name).layout);
}
for (auto& item : model->outputs()) {
// if precision for input set by user, then set it to app_inputs
const auto& name = item.get_any_name();
if (user_precisions_map.count(name)) {
const auto precision = getPrecision2(user_precisions_map.at(name));
preproc.output(name).tensor().set_element_type(precision);
} else if (output_precision != ov::element::undefined) {
preproc.output(name).tensor().set_element_type(output_precision);
}
}
model = preproc.build();
// Check if network has dynamic shapes
@@ -499,9 +522,10 @@ int main(int argc, char* argv[]) {
});
topology_name = model->get_friendly_name();
// use batch size according to provided layout and shapes (static case)
if (!isDynamicNetwork) {
batchSize = getModelInputBatchSize(*model);
// Calculate batch size according to provided layout and shapes (static case)
if (!isDynamicNetwork && app_inputs_info.size()) {
batchSize = getBatchSize(app_inputs_info.front());
slog::info << "Network batch size: " << batchSize << slog::endl;
} else if (batchSize == 0) {

View File

@@ -158,24 +158,14 @@ size_t getBatchSize(const benchmark_app::InputsInfo& inputs_info) {
}
}
if (batch_size == 0) {
slog::warn << "No batch dimension was found at any input, asssuming batch to be 1. Beware: this might affect "
"FPS calculation."
<< slog::endl;
batch_size = 1;
}
return batch_size;
}
size_t getModelInputBatchSize(const ov::Model& model) {
try {
auto& param = model.get_parameters()[0];
auto layout = param->get_layout();
return param->get_shape().at(ov::layout::batch_idx(layout));
} catch (...) {
slog::warn
<< "No batch dimension was found, asssuming batch to be 1. Beware: this might affect FPS calculation."
<< slog::endl;
return 1; // Default batch value
}
}
std::string getShapeString(const ov::Shape& shape) {
std::stringstream ss;
ss << shape;

View File

@@ -58,7 +58,6 @@ std::vector<std::string> parseDevices(const std::string& device_string);
uint32_t deviceDefaultDeviceDurationInSeconds(const std::string& device);
std::map<std::string, std::string> parseNStreamsValuePerDevice(const std::vector<std::string>& devices,
const std::string& values_string);
size_t getModelInputBatchSize(const ov::Model& model);
std::string getShapeString(const ov::Shape& shape);
std::string getShapesString(const benchmark_app::PartialShapes& shapes);
size_t getBatchSize(const benchmark_app::InputsInfo& inputs_info);

View File

@@ -29,6 +29,7 @@ void readInputFilesArguments(std::vector<std::string>& files, const std::string&
* @return files updated vector of verified input files
*/
void parseInputFilesArguments(std::vector<std::string>& files);
std::map<std::string, std::string> parseArgMap(std::string argMap);
void printInputAndOutputsInfo(const ov::Model& network);
@@ -45,4 +46,4 @@ void configurePrePostProcessing(std::shared_ptr<ov::Model>& function,
void printInputAndOutputsInfo(const ov::Model& network);
void printInputAndOutputsInfoShort(const ov::Model& network);
void processPrecision(const ov::Model& network, const std::string& ip, const std::string& op, const std::string& iop);
ov::element::Type getPrecision2(const std::string& value);

View File

@@ -102,7 +102,6 @@ void parseInputFilesArguments(std::vector<std::string>& files) {
}
}
namespace {
std::vector<std::string> splitStringList(const std::string& str, char delim) {
if (str.empty())
return {};
@@ -200,8 +199,6 @@ ov::element::Type getType(const std::string& value) {
return getType(value, supported_types);
}
} // namespace
namespace {
using supported_layouts_t = std::unordered_map<std::string, InferenceEngine::Layout>;
using matchLayoutToDims_t = std::unordered_map<size_t, size_t>;
@@ -458,57 +455,4 @@ ov::element::Type getPrecision2(const std::string& value) {
};
return getPrecision(value, supported_precisions);
}
void setPrecisions(const ov::Model& network, const std::string& iop) {
const auto user_precisions_map = parseArgMap(iop);
for (auto&& item : user_precisions_map) {
const auto& layer_name = item.first;
const auto& user_precision = item.second;
auto& params = network.get_parameters();
auto& results = network.get_results();
const auto input =
std::find_if(params.begin(), params.end(), [&item](const std::shared_ptr<ov::op::v0::Parameter>& a) {
return a->get_friendly_name() == item.first;
});
const auto output =
std::find_if(results.begin(), results.end(), [&layer_name](const std::shared_ptr<ov::op::v0::Result>& a) {
return a->get_friendly_name() == layer_name;
});
if (input != params.end()) {
(*input)->set_element_type(getPrecision2(user_precision));
} else if (output != results.end()) {
for (int i = 0; i < (*output)->get_output_size(); i++) {
(*output)->set_output_type(i, getPrecision2(user_precision), (*output)->get_output_shape(i));
}
} else {
throw std::logic_error(layer_name + " is not an input neither output");
}
}
}
void processPrecision(const ov::Model& network, const std::string& ip, const std::string& op, const std::string& iop) {
if (!ip.empty()) {
const auto user_precision = getPrecision2(ip);
for (auto&& layer : network.get_parameters()) {
layer->set_element_type(user_precision);
}
}
if (!op.empty()) {
auto user_precision = getPrecision2(op);
for (auto&& layer : network.get_results()) {
for (int i = 0; i < layer->get_output_size(); i++) {
layer->set_output_type(i, user_precision, layer->get_output_shape(i));
}
}
}
if (!iop.empty()) {
setPrecisions(network, iop);
}
}
}