[IE Samples] Improved processing outputs for model with more than one output (#10737)

* Improved processing outputs for model with more than one output

* fixed condition

* added checking count of output/reference files
This commit is contained in:
Maxim Gordeev
2022-03-03 16:35:41 +03:00
committed by GitHub
parent 28889c4833
commit f81f819ecd

View File

@@ -342,11 +342,17 @@ int main(int argc, char* argv[]) {
}
count_file = reference_name_files.empty() ? 1 : reference_name_files.size();
}
if (count_file > executableNet.outputs().size()) {
throw std::logic_error(
"The number of output/reference files is not equal to the number of network outputs.");
}
// -----------------------------------------------------------------------------------------------------
// --------------------------- Step 5. Do inference --------------------------------------------------------
std::vector<std::vector<uint8_t>> ptrUtterances;
std::vector<std::vector<uint8_t>> vectorPtrScores((outputs.size() == 0) ? 1 : outputs.size());
std::vector<uint16_t> numScoresPerOutput((outputs.size() == 0) ? 1 : outputs.size());
std::vector<std::vector<uint8_t>> vectorPtrScores((outputs.size() == 0) ? executableNet.outputs().size()
: outputs.size());
std::vector<uint16_t> numScoresPerOutput((outputs.size() == 0) ? executableNet.outputs().size()
: outputs.size());
std::vector<std::vector<uint8_t>> vectorPtrReferenceScores(reference_name_files.size());
std::vector<ScoreErrorT> vectorFrameError(reference_name_files.size()),
vectorTotalError(reference_name_files.size());
@@ -474,8 +480,9 @@ int main(int argc, char* argv[]) {
inferRequest.inferRequest.wait();
if (inferRequest.frameIndex >= 0)
for (size_t next_output = 0; next_output < count_file; next_output++) {
std::string outputName = (outputs.size() == 0) ? executableNet.output(0).get_any_name()
: output_names[next_output];
std::string outputName = (outputs.size() == 0)
? executableNet.output(next_output).get_any_name()
: output_names[next_output];
auto dims = executableNet.output(outputName).get_shape();
numScoresPerOutput[next_output] = std::accumulate(std::begin(dims),
std::end(dims),
@@ -493,10 +500,6 @@ int main(int argc, char* argv[]) {
ov::Tensor outputBlob =
inferRequest.inferRequest.get_tensor(executableNet.output(outputName));
if (!outputs.empty()) {
outputBlob =
inferRequest.inferRequest.get_tensor(executableNet.output(outputName));
}
// locked memory holder should be alive all time while access to its buffer happens
auto byteSize = numScoresPerOutput[next_output] * sizeof(float);
std::memcpy(outputFrame, outputBlob.data<float>(), byteSize);
@@ -654,8 +657,8 @@ int main(int argc, char* argv[]) {
}
if (!FLAGS_r.empty()) {
// print statistical score error
std::string outputName =
(outputs.size() == 0) ? executableNet.output(0).get_any_name() : output_names[next_output];
std::string outputName = (outputs.size() == 0) ? executableNet.output(next_output).get_any_name()
: output_names[next_output];
std::cout << "Output name: " << outputName << std::endl;
std::cout << "Number scores per frame: " << numScoresPerOutput[next_output] / batchSize << std::endl
<< std::endl;