[IE CLDNN] Added inference precision into execution graph (#2913)

This commit is contained in:
Vladimir Paramuzov 2020-11-05 00:13:49 +03:00 committed by GitHub
parent d3ecfe56d4
commit 7c1690951c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 75 additions and 7 deletions

View File

@ -178,6 +178,7 @@ InferenceEngine::CNNNetwork CLDNNGraph::GetExecGraphInfoByPrimitivesInfo(std::ve
{ "tile", "Tile" },
{ "resample", "Resample" },
{ "interp", "Interp" },
{ "reduce", "Reduce" },
{ "reduce_max", "ReduceMax" },
{ "reduce_min", "ReduceMin" },
{ "reduce_mean", "ReduceMean" },
@ -364,11 +365,13 @@ InferenceEngine::CNNNetwork CLDNNGraph::GetExecGraphInfoByPrimitivesInfo(std::ve
std::map<std::string, std::string> info;
Precision prec = data_type_to_precision(prim_info.output_layout.data_type);
Precision inference_precision = data_type_to_precision(prim_info.runtime_precision);
info[ExecGraphInfoSerialization::OUTPUT_PRECISIONS] = prec.name();
info[ExecGraphInfoSerialization::LAYER_TYPE] = to_IE_type_name(prim_info.type_id);
info[ExecGraphInfoSerialization::OUTPUT_LAYOUTS] = prim_info.layout_str;
info[ExecGraphInfoSerialization::EXECUTION_ORDER] = std::to_string(prim_info.exec_id);
info[ExecGraphInfoSerialization::IMPL_TYPE] = prim_info.kernel_id;
info[ExecGraphInfoSerialization::RUNTIME_PRECISION] = inference_precision.name();
std::vector<std::string> originalNames{find_origin_layers(prim_info.original_id)};
for (auto& fused_id : prim_info.c_fused_ids) {

View File

@ -161,6 +161,30 @@ struct data_type_traits {
return std::string("invalid data type: " + std::to_string(static_cast<int>(data_type)));
}
}
static data_types max_type(data_types dt1, data_types dt2) {
if (dt1 == data_types::bin)
return dt2;
if (dt2 == data_types::bin)
return dt1;
if (size_of(dt1) < size_of(dt2))
return dt2;
if (size_of(dt1) > size_of(dt2))
return dt1;
if (is_floating_point(dt2))
return dt2;
return dt1;
}
static bool is_quantized(data_types dt) {
return dt == data_types::u8 || dt == data_types::i8;
}
template <typename T>
static T max(data_types data_type) {
switch (data_type) {

View File

@ -130,6 +130,7 @@ struct primitive_info {
const layout& output_layout,
const std::string& layout_str,
const std::string& kernel_id,
const data_types& runtime_precision,
bool is_cpu,
int exec_id)
: original_id(original_id),
@ -140,6 +141,7 @@ struct primitive_info {
output_layout(output_layout),
layout_str(layout_str),
kernel_id(kernel_id),
runtime_precision(runtime_precision),
is_cpu(is_cpu),
exec_id(exec_id) {}
@ -151,6 +153,7 @@ struct primitive_info {
layout output_layout;
std::string layout_str;
std::string kernel_id;
data_types runtime_precision;
bool is_cpu;
int exec_id;
};

View File

@ -60,6 +60,7 @@
#include "reorder_inst.h"
#include "split_inst.h"
#include "mvn_inst.h"
#include "gemm_inst.h"
#include "reduce_inst.h"
#include "strided_slice_inst.h"
#include "to_string_utils.h"
@ -1018,6 +1019,45 @@ void program_impl::dump_program(const char* stage,
program_impl::primitives_info program_impl::get_current_stage_info() const {
primitives_info info;
auto get_inference_precision = [](program_node& node) -> data_types {
if (node.is_input()) {
return node.get_output_layout().data_type;
}
std::vector<data_types> input_dts;
for (auto& dep : node.get_dependencies()) {
input_dts.push_back(dep->get_output_layout().data_type);
}
data_types output_dt = node.get_output_layout().data_type;
assert(!input_dts.empty());
if (node.is_type<reorder>()) {
// If reorder has different input/output types - pick the max one as runtime precision
return data_type_traits::max_type(input_dts[0], output_dt);
} else if (node.is_type<quantize>()) {
if (data_type_traits::is_quantized(output_dt))
return output_dt;
return data_type_traits::max_type(input_dts[0], output_dt);
} else if (node.is_type<eltwise>()) {
auto max_dt = input_dts[0];
for (size_t i = 1; i < input_dts.size(); i++) {
max_dt = data_type_traits::max_type(max_dt, input_dts[i]);
}
return max_dt;
} else if (node.is_type<convolution>() || node.is_type<deconvolution>() || node.is_type<fully_connected>() || node.is_type<gemm>()) {
if (input_dts.size() < 2) {
throw std::runtime_error("[clDNN] Invalid inputs count in node " + node.id() + " during stage info collection. Expected >= 2 inputs");
}
if (data_type_traits::is_quantized(input_dts[0]) && data_type_traits::is_quantized(input_dts[1])) {
return input_dts[0];
} else {
return data_type_traits::max_type(input_dts[0], input_dts[1]);
}
}
return input_dts[0];
};
// Get info for actually executed graph nodes
int exec_id = 0;
for (auto& p : get_processing_order()) {
@ -1047,6 +1087,7 @@ program_impl::primitives_info program_impl::get_current_stage_info() const {
p->get_output_layout(),
fmt_to_str(p->get_output_layout().format),
p->selected_impl ? p->selected_impl->get_kernel_name() : "",
get_inference_precision(*p),
p->selected_impl ? p->selected_impl->is_cpu() : false,
exec_id++);

View File

@ -4151,7 +4151,6 @@ TEST(convolution_f32_fw_gpu, byte_activation) {
build_options opts;
opts.set_option(build_option::optimize_data(true));
opts.set_option(build_option::graph_dumps_dir("graph"));
set_values<char>(input, { 1, 2, -3, 4, -5,
2, -2, 3, -4, 6,

View File

@ -98,7 +98,7 @@ TEST(spatial_concatenate_f32_gpu, test02) {
});
const auto expected_output = std::vector<float>{
1.0f, 2.0f,
1.0f, 2.0f,
3.0f, 4.0f,
5.0f, 6.0f,
7.0f, 8.0f
@ -445,7 +445,7 @@ TEST(spatial_concatenate_f32_gpu, inputs3d_axis_x) {
ASSERT_EQ(output_layout.size.spatial[1], input1.get_layout().size.spatial[1]);
ASSERT_EQ(output_layout.size.spatial[2], input1.get_layout().size.spatial[2]);
ASSERT_EQ(output_mem.get_layout().get_linear_size(), expected_output.size());
ASSERT_EQ(output_mem.get_layout().get_linear_size(), expected_output.size());
{
auto out_ptr = output_mem.pointer<const float>();
@ -632,9 +632,7 @@ TEST(spatial_concatenate_f32_gpu, inputs3d_axis_b) {
tpl.add(input_layout("in2", input2.get_layout()));
tpl.add(concatenation("conc", { "in1", "in2" }, concatenation::along_b));
build_options bo;
bo.set_option(build_option::graph_dumps_dir("C:\\graphs"));
network net(eng, tpl, bo);
network net(eng, tpl);
net.set_input_data("in1", input1);
net.set_input_data("in2", input2);
@ -791,4 +789,4 @@ TEST(spatial_concatenate_f32_gpu, inputs3d_3_uneven_axis_b) {
EXPECT_FLOAT_EQ(value, expected_output[idx++]);
}
}
}
}