[OV2.0] PrePostProcessor dump to output stream for debugging purposes (#9580)
* Calculate model layout based on 'tensor' layout and convert steps Previously, 'model layout' is set to '...' by default, thus no shape conversion happened when tensor layout is set to 'NHWC', then there was explicit convert_layout "NCHW" Now "model layout" is calculated based on tensor layout and conversion steps: Examples: 1) Tensor: NHWC, Convert: NCHW. Result: NCHW 2) Tensor: NHWC, Convert: 0312. Result: NCHW * Initial move of tensor data calculation * Moved 'impls' to new file * Postprocessing + unit tests * clang-format fix * Added more details to preprocessing nodes - Mean/Scale - will print mean/scale values - Convert type - will print type - Convert layout - will print destination layout - Convert color - will print destination color It is needed to troubleshoot the problems. If error occurs, message will not display last op's target shape/layout/type * Add python bindings * update tests * Added memory type to dump if set * Code style fix * unity build fix * Dump tensor if only memory type is set * Added debug print * Fix Param->Result case Previously, layout was set by preprocessing set to old parameter as well This is incorrect because in case of exception layout info will not be reverted In this case old Result pointed to old Parameter and was able to preserve runtime info After fixing of this, case Param->Result was broken if revalidation is not triggerred Fix is to detect 'Result' as a consumer of some parameter and force revalidation in this case * Revert occasionally committed line * And one more line
This commit is contained in:
parent
e78ada1178
commit
896532ace2
@ -423,4 +423,14 @@ void regclass_graph_PrePostProcessor(py::module m) {
|
||||
},
|
||||
py::arg("output_index"));
|
||||
proc.def("build", &ov::preprocess::PrePostProcessor::build);
|
||||
|
||||
proc.def("__str__", [](const ov::preprocess::PrePostProcessor& self) -> std::string {
|
||||
std::stringstream ss;
|
||||
ss << self;
|
||||
return ss.str();
|
||||
});
|
||||
|
||||
proc.def("__repr__", [](const ov::preprocess::PrePostProcessor& self) -> std::string {
|
||||
return "<PrePostProcessor: " + py::cast(self).attr("__str__")().cast<std::string>() + ">";
|
||||
});
|
||||
}
|
||||
|
@ -431,3 +431,33 @@ def test_ngraph_preprocess_model():
|
||||
output = computation(input_data, input_data)
|
||||
|
||||
assert np.equal(output, expected_output).all()
|
||||
|
||||
|
||||
def test_ngraph_preprocess_dump():
|
||||
shape = [1, 3, 224, 224]
|
||||
parameter_a = ops.parameter(shape, dtype=np.float32, name="RGB_input")
|
||||
model = parameter_a
|
||||
function = Model(model, [parameter_a], "TestFunction")
|
||||
|
||||
p = PrePostProcessor(function)
|
||||
p.input().tensor()\
|
||||
.set_layout(ov.Layout("NHWC"))\
|
||||
.set_element_type(Type.u8)\
|
||||
.set_spatial_dynamic_shape()
|
||||
p.input().preprocess()\
|
||||
.convert_element_type(Type.f32)\
|
||||
.reverse_channels()\
|
||||
.mean([1, 2, 3])\
|
||||
.scale([4, 5, 6])\
|
||||
.resize(ResizeAlgorithm.RESIZE_LINEAR)
|
||||
p.input().model().set_layout(ov.Layout("NCHW"))
|
||||
p_str = str(p)
|
||||
print(p)
|
||||
assert "Pre-processing steps (5):" in p_str
|
||||
assert "convert type (f32):" in p_str
|
||||
assert "reverse channels:" in p_str
|
||||
assert "mean (1,2,3):" in p_str
|
||||
assert "scale (4,5,6):" in p_str
|
||||
assert "resize to model width/height:" in p_str
|
||||
assert "Implicit pre-processing steps (1):" in p_str
|
||||
assert "convert layout " + ov.Layout("NCHW").to_string() in p_str
|
||||
|
@ -94,7 +94,21 @@ public:
|
||||
///
|
||||
/// \return Function with added pre/post-processing operations
|
||||
std::shared_ptr<Model> build();
|
||||
|
||||
private:
|
||||
friend OPENVINO_API std::ostream& operator<<(std::ostream& str, const PrePostProcessor& prePostProcessor);
|
||||
void dump(std::ostream&) const;
|
||||
};
|
||||
|
||||
/// \brief Inserts a human-readable representation of a PrePostProcessors into an output stream. The output to the
|
||||
/// stream is in "informal" notation and can be used for debugging purposes
|
||||
///
|
||||
/// \param str The output stream targeted for insertion.
|
||||
///
|
||||
/// \param prePostProcessor The shape to be inserted into output stream.
|
||||
///
|
||||
/// \return A reference to same output stream after insertion.
|
||||
OPENVINO_API std::ostream& operator<<(std::ostream& str, const PrePostProcessor& prePostProcessor);
|
||||
|
||||
} // namespace preprocess
|
||||
} // namespace ov
|
||||
|
@ -7,232 +7,12 @@
|
||||
#include "color_utils.hpp"
|
||||
#include "function_guard.hpp"
|
||||
#include "layout_utils.hpp"
|
||||
#include "ngraph/opsets/opset1.hpp"
|
||||
#include "openvino/core/model.hpp"
|
||||
#include "preprocess_steps_impl.hpp"
|
||||
#include "preprocess_impls.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace preprocess {
|
||||
|
||||
class TensorInfoImplBase {
|
||||
public:
|
||||
TensorInfoImplBase() = default;
|
||||
|
||||
void set_element_type(const element::Type& type) {
|
||||
m_type = type;
|
||||
m_type_set = true;
|
||||
}
|
||||
bool is_element_type_set() const {
|
||||
return m_type_set;
|
||||
}
|
||||
const element::Type& get_element_type() const {
|
||||
return m_type;
|
||||
}
|
||||
|
||||
void set_layout(const Layout& layout) {
|
||||
m_layout = layout;
|
||||
m_layout_set = true;
|
||||
}
|
||||
bool is_layout_set() const {
|
||||
return m_layout_set;
|
||||
}
|
||||
const Layout& get_layout() const {
|
||||
return m_layout;
|
||||
}
|
||||
|
||||
protected:
|
||||
element::Type m_type = element::dynamic;
|
||||
bool m_type_set = false;
|
||||
|
||||
Layout m_layout = Layout();
|
||||
bool m_layout_set = false;
|
||||
};
|
||||
|
||||
/// \brief InputTensorInfoImpl - internal data structure
|
||||
class InputTensorInfo::InputTensorInfoImpl : public TensorInfoImplBase {
|
||||
public:
|
||||
InputTensorInfoImpl() = default;
|
||||
|
||||
bool is_spatial_shape_set() const {
|
||||
return m_spatial_shape_set;
|
||||
}
|
||||
|
||||
int get_spatial_width() const {
|
||||
return m_spatial_width;
|
||||
}
|
||||
|
||||
int get_spatial_height() const {
|
||||
return m_spatial_height;
|
||||
}
|
||||
|
||||
bool is_spatial_shape_dynamic() const {
|
||||
return m_spatial_shape_set && m_spatial_width == -1 && m_spatial_height == -1;
|
||||
}
|
||||
|
||||
void set_spatial_dynamic_shape() {
|
||||
OPENVINO_ASSERT(!m_shape_set, "'set_spatial_dynamic_shape' and 'set_shape' shall not be used together");
|
||||
m_spatial_shape_set = true;
|
||||
m_spatial_width = -1;
|
||||
m_spatial_height = -1;
|
||||
}
|
||||
|
||||
void set_spatial_static_shape(size_t height, size_t width) & {
|
||||
OPENVINO_ASSERT(!m_shape_set, "'set_spatial_static_shape' and 'set_shape' shall not be used together");
|
||||
m_spatial_shape_set = true;
|
||||
m_spatial_height = static_cast<int>(height);
|
||||
m_spatial_width = static_cast<int>(width);
|
||||
}
|
||||
|
||||
const ColorFormat& get_color_format() const {
|
||||
return m_color_format;
|
||||
}
|
||||
|
||||
void set_color_format(ColorFormat format, const std::vector<std::string>& sub_names) {
|
||||
auto info = ColorFormatInfo::get(format);
|
||||
if (info->planes_count() == 1) {
|
||||
OPENVINO_ASSERT(sub_names.empty(),
|
||||
"Plane names are not allowed for single plane color format '",
|
||||
color_format_name(format),
|
||||
"'");
|
||||
} else if (!sub_names.empty()) {
|
||||
OPENVINO_ASSERT(sub_names.size() == info->planes_count(),
|
||||
"Number of sub-names (",
|
||||
sub_names.size(),
|
||||
") shall match with number of planes for '",
|
||||
color_format_name(format),
|
||||
"' color format (",
|
||||
info->planes_count(),
|
||||
")");
|
||||
}
|
||||
m_planes_sub_names = sub_names;
|
||||
m_color_format = format;
|
||||
}
|
||||
|
||||
const std::vector<std::string>& planes_sub_names() const {
|
||||
return m_planes_sub_names;
|
||||
}
|
||||
|
||||
void set_memory_type(const std::string& mem_type) {
|
||||
m_memory_type_set = true;
|
||||
m_memory_type = mem_type;
|
||||
}
|
||||
|
||||
const std::string& get_memory_type() const {
|
||||
return m_memory_type;
|
||||
}
|
||||
|
||||
bool is_memory_type_set() const {
|
||||
return m_memory_type_set;
|
||||
}
|
||||
|
||||
void set_shape(const PartialShape& shape) {
|
||||
OPENVINO_ASSERT(
|
||||
!m_spatial_shape_set,
|
||||
"'set_spatial_static_shape', 'set_spatial_dynamic_shape', 'set_shape' shall not be used together");
|
||||
m_shape = shape;
|
||||
m_shape_set = true;
|
||||
}
|
||||
|
||||
bool is_shape_set() const {
|
||||
return m_shape_set;
|
||||
}
|
||||
|
||||
const PartialShape& get_shape() const {
|
||||
return m_shape;
|
||||
}
|
||||
|
||||
private:
|
||||
ColorFormat m_color_format = ColorFormat::UNDEFINED;
|
||||
std::vector<std::string> m_planes_sub_names;
|
||||
|
||||
element::Type m_type = element::dynamic;
|
||||
bool m_type_set = false;
|
||||
|
||||
Layout m_layout = Layout();
|
||||
bool m_layout_set = false;
|
||||
|
||||
int m_spatial_width = -1;
|
||||
int m_spatial_height = -1;
|
||||
bool m_spatial_shape_set = false;
|
||||
|
||||
std::string m_memory_type = {};
|
||||
bool m_memory_type_set = false;
|
||||
|
||||
PartialShape m_shape = {};
|
||||
bool m_shape_set = false;
|
||||
};
|
||||
|
||||
class OutputTensorInfo::OutputTensorInfoImpl : public TensorInfoImplBase {};
|
||||
|
||||
/// \brief ModelInfoImpl - internal data structure
|
||||
class ModelInfoImpl {
|
||||
public:
|
||||
ModelInfoImpl() = default;
|
||||
|
||||
void set_layout(const Layout& layout) {
|
||||
m_layout = layout;
|
||||
m_layout_set = true;
|
||||
}
|
||||
bool is_layout_set() const {
|
||||
return m_layout_set;
|
||||
}
|
||||
const Layout& get_layout() const {
|
||||
return m_layout;
|
||||
}
|
||||
|
||||
private:
|
||||
Layout m_layout = Layout();
|
||||
bool m_layout_set = false;
|
||||
};
|
||||
|
||||
class InputModelInfo::InputModelInfoImpl : public ModelInfoImpl {};
|
||||
|
||||
class OutputModelInfo::OutputModelInfoImpl : public ModelInfoImpl {};
|
||||
|
||||
/// \brief InputInfoImpl - internal data structure
|
||||
struct InputInfo::InputInfoImpl {
|
||||
InputInfoImpl() = default;
|
||||
|
||||
std::unique_ptr<InputTensorInfo::InputTensorInfoImpl>& get_tensor_data() {
|
||||
return m_tensor_info.m_impl;
|
||||
}
|
||||
|
||||
std::unique_ptr<PreProcessSteps::PreProcessStepsImpl>& get_preprocess() {
|
||||
return m_preprocess.m_impl;
|
||||
}
|
||||
|
||||
std::unique_ptr<InputModelInfo::InputModelInfoImpl>& get_model() {
|
||||
return m_model_data.m_impl;
|
||||
}
|
||||
|
||||
InputTensorInfo m_tensor_info;
|
||||
PreProcessSteps m_preprocess;
|
||||
InputModelInfo m_model_data;
|
||||
std::shared_ptr<op::v0::Parameter> m_resolved_param;
|
||||
};
|
||||
|
||||
/// \brief OutputInfoImpl - internal data structure
|
||||
struct OutputInfo::OutputInfoImpl {
|
||||
OutputInfoImpl() = default;
|
||||
|
||||
std::unique_ptr<OutputTensorInfo::OutputTensorInfoImpl>& get_tensor_data() {
|
||||
return m_tensor_info.m_impl;
|
||||
}
|
||||
|
||||
std::unique_ptr<PostProcessSteps::PostProcessStepsImpl>& get_postprocess() {
|
||||
return m_postprocess.m_impl;
|
||||
}
|
||||
|
||||
std::unique_ptr<OutputModelInfo::OutputModelInfoImpl>& get_model_data() {
|
||||
return m_model_info.m_impl;
|
||||
}
|
||||
|
||||
OutputTensorInfo m_tensor_info;
|
||||
PostProcessSteps m_postprocess;
|
||||
OutputModelInfo m_model_info;
|
||||
ov::Output<ov::Node> m_output_node;
|
||||
};
|
||||
|
||||
//-------------- InputInfo ------------------
|
||||
InputInfo::InputInfo() : m_impl(std::unique_ptr<InputInfoImpl>(new InputInfoImpl)) {}
|
||||
InputInfo::InputInfo(InputInfo&& other) noexcept = default;
|
||||
@ -359,196 +139,37 @@ OutputInfo& PrePostProcessor::output(const std::string& tensor_name) {
|
||||
return m_impl->find_output(tensor_name);
|
||||
}
|
||||
|
||||
std::ostream& operator<<(std::ostream& str, const PrePostProcessor& prePostProcessor) {
|
||||
try {
|
||||
prePostProcessor.dump(str);
|
||||
} catch (ov::AssertFailure& ex) {
|
||||
str << std::endl << "Error occurred: " << ex.what();
|
||||
}
|
||||
return str;
|
||||
}
|
||||
|
||||
void PrePostProcessor::dump(std::ostream& str) const {
|
||||
auto model = m_impl->m_function;
|
||||
std::tuple<std::unordered_set<std::string>, bool> existing_names{std::unordered_set<std::string>{}, false};
|
||||
for (const auto& input_info : m_impl->m_inputs) {
|
||||
input_info.m_impl->dump(str, model, existing_names);
|
||||
}
|
||||
for (const auto& output_info : m_impl->m_outputs) {
|
||||
output_info.m_impl->dump(str);
|
||||
}
|
||||
}
|
||||
|
||||
std::shared_ptr<Model> PrePostProcessor::build() {
|
||||
auto function = m_impl->m_function;
|
||||
FunctionGuard guard(function);
|
||||
std::tuple<std::unordered_set<std::string>, bool> existing_names{std::unordered_set<std::string>{}, false};
|
||||
bool tensor_data_updated = false;
|
||||
for (const auto& input_info : m_impl->m_inputs) {
|
||||
auto& input = input_info.m_impl;
|
||||
// Set parameter layout from 'model' information
|
||||
if (input->get_model()->is_layout_set()) {
|
||||
// Overwrite existing model's layout here (fix 74065)
|
||||
input->m_resolved_param->set_layout(input->get_model()->get_layout());
|
||||
}
|
||||
}
|
||||
FunctionGuard guard(function);
|
||||
bool need_validate = false;
|
||||
auto results = function->get_results();
|
||||
auto parameters_list = std::list<std::shared_ptr<op::v0::Parameter>>(function->get_parameters().begin(),
|
||||
auto parameters_list = std::list<std::shared_ptr<opset8::Parameter>>(function->get_parameters().begin(),
|
||||
function->get_parameters().end());
|
||||
|
||||
for (const auto& input_info : m_impl->m_inputs) {
|
||||
const auto& input = input_info.m_impl;
|
||||
auto param = input->m_resolved_param;
|
||||
auto consumers = param->output(0).get_target_inputs();
|
||||
if (!input->get_tensor_data()->is_element_type_set()) {
|
||||
input->get_tensor_data()->set_element_type(param->get_element_type());
|
||||
}
|
||||
auto color_info = ColorFormatInfo::get(input->get_tensor_data()->get_color_format());
|
||||
if (!input->get_tensor_data()->is_layout_set()) {
|
||||
if (!color_info->default_layout().empty()) {
|
||||
input->get_tensor_data()->set_layout(color_info->default_layout());
|
||||
}
|
||||
}
|
||||
|
||||
auto net_shape = param->get_partial_shape();
|
||||
auto new_param_shape = net_shape;
|
||||
auto model_layout = param->get_layout();
|
||||
if (model_layout.empty() && input->get_tensor_data()->is_layout_set()) {
|
||||
model_layout = input->get_preprocess()->propagate_layout(input->get_tensor_data()->get_layout());
|
||||
}
|
||||
if (input->get_tensor_data()->is_layout_set() && !model_layout.empty() &&
|
||||
model_layout != input->get_tensor_data()->get_layout()) {
|
||||
auto sq_layout = Layout();
|
||||
// Find if some squeeze is needed between model and tensor
|
||||
// E.g. model=NCHW, tensor=HWC
|
||||
std::tie(new_param_shape, sq_layout) =
|
||||
layout::utils::find_squeeze(model_layout, net_shape, input->get_tensor_data()->get_layout());
|
||||
// Find transpose between model and tensor layouts and update tensor shape
|
||||
auto net_to_tensor =
|
||||
layout::utils::find_permutation(sq_layout, new_param_shape, input->get_tensor_data()->get_layout());
|
||||
if (!net_to_tensor.empty() && new_param_shape.rank().is_static()) {
|
||||
std::vector<ov::Dimension> dims(new_param_shape.size());
|
||||
std::transform(net_to_tensor.begin(), net_to_tensor.end(), dims.begin(), [&](int64_t v) {
|
||||
return new_param_shape[v];
|
||||
});
|
||||
new_param_shape = PartialShape(dims);
|
||||
}
|
||||
} else {
|
||||
Layout new_layout;
|
||||
std::tie(new_param_shape, new_layout) =
|
||||
input->get_preprocess()->calculate_param_shape(new_param_shape, model_layout);
|
||||
if (!input->get_tensor_data()->is_layout_set()) {
|
||||
// Reusing param's layout according to converted calculated layout
|
||||
input->get_tensor_data()->set_layout(new_layout);
|
||||
}
|
||||
}
|
||||
if (input->get_tensor_data()->is_shape_set()) {
|
||||
new_param_shape = input->get_tensor_data()->get_shape();
|
||||
} else if (input->get_tensor_data()->is_spatial_shape_set()) {
|
||||
auto height_idx = get_and_check_height_idx(input->get_tensor_data()->get_layout(), new_param_shape);
|
||||
auto width_idx = get_and_check_width_idx(input->get_tensor_data()->get_layout(), new_param_shape);
|
||||
if (input->get_tensor_data()->is_spatial_shape_dynamic()) {
|
||||
// Use dynamic spatial dimensions
|
||||
new_param_shape[height_idx] = Dimension::dynamic();
|
||||
new_param_shape[width_idx] = Dimension::dynamic();
|
||||
} else {
|
||||
// Use static spatial dimensions
|
||||
new_param_shape[height_idx] = input->get_tensor_data()->get_spatial_height();
|
||||
new_param_shape[width_idx] = input->get_tensor_data()->get_spatial_width();
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<Output<Node>> nodes;
|
||||
std::vector<std::shared_ptr<op::v0::Parameter>> new_params;
|
||||
|
||||
// Create separate parameter for each plane. Shape is based on color format
|
||||
for (size_t plane = 0; plane < color_info->planes_count(); plane++) {
|
||||
auto plane_shape = color_info->shape(plane, new_param_shape);
|
||||
auto plane_param =
|
||||
std::make_shared<op::v0::Parameter>(input->get_tensor_data()->get_element_type(), plane_shape);
|
||||
if (plane < input->get_tensor_data()->planes_sub_names().size()) {
|
||||
std::unordered_set<std::string> plane_tensor_names;
|
||||
std::string sub_name;
|
||||
sub_name = std::string("/") + input->get_tensor_data()->planes_sub_names()[plane];
|
||||
if (!std::get<1>(existing_names)) {
|
||||
existing_names = std::make_tuple(get_function_tensor_names(function), true);
|
||||
}
|
||||
for (const auto& tensor_name : param->get_default_output().get_tensor().get_names()) {
|
||||
auto new_name = tensor_name + sub_name;
|
||||
OPENVINO_ASSERT(
|
||||
std::get<0>(existing_names).count(new_name) == 0,
|
||||
"Error while trying to create plane input with name '",
|
||||
new_name,
|
||||
"' - name already exists in model. Please specify another sub-name for set_color_format");
|
||||
plane_tensor_names.insert(new_name);
|
||||
}
|
||||
plane_param->get_default_output().get_tensor().set_names(plane_tensor_names);
|
||||
plane_param->set_friendly_name(param->get_friendly_name() + sub_name);
|
||||
} else if (color_info->planes_count() == 1) {
|
||||
plane_param->get_default_output().get_tensor().set_names(
|
||||
param->get_default_output().get_tensor().get_names());
|
||||
plane_param->set_friendly_name(param->get_friendly_name());
|
||||
}
|
||||
// Fill runtime info
|
||||
plane_param->get_rt_info() = param->get_rt_info();
|
||||
plane_param->output(0).get_rt_info() = param->output(0).get_rt_info();
|
||||
if (!input->get_tensor_data()->get_layout().empty()) {
|
||||
plane_param->set_layout(input->get_tensor_data()->get_layout());
|
||||
}
|
||||
if (input->get_tensor_data()->is_memory_type_set()) {
|
||||
if (input->get_tensor_data()->get_memory_type().empty()) {
|
||||
plane_param->output(0).get_rt_info().erase(TensorInfoMemoryType::get_type_info_static());
|
||||
} else {
|
||||
plane_param->output(0).get_rt_info()[TensorInfoMemoryType::get_type_info_static()] =
|
||||
TensorInfoMemoryType(input->get_tensor_data()->get_memory_type());
|
||||
}
|
||||
}
|
||||
new_params.push_back(plane_param);
|
||||
nodes.emplace_back(plane_param);
|
||||
}
|
||||
|
||||
PreprocessingContext context(input->get_tensor_data()->get_layout());
|
||||
context.color_format() = input->get_tensor_data()->get_color_format();
|
||||
context.target_layout() = model_layout;
|
||||
context.model_shape() = param->get_partial_shape();
|
||||
context.target_element_type() = param->get_element_type();
|
||||
|
||||
// 2. Apply preprocessing
|
||||
for (const auto& action : input->get_preprocess()->actions()) {
|
||||
auto action_result = action(nodes, function, context);
|
||||
nodes = std::get<0>(action_result);
|
||||
tensor_data_updated |= std::get<1>(action_result);
|
||||
}
|
||||
|
||||
OPENVINO_ASSERT(nodes.size() == 1,
|
||||
"Multiple plane input is not allowed as model input. Consider using of convert_color "
|
||||
"preprocessing operation. Current format is '",
|
||||
color_format_name(context.color_format()),
|
||||
"'");
|
||||
OPENVINO_ASSERT(is_rgb_family(context.color_format()) || context.color_format() == ColorFormat::UNDEFINED,
|
||||
"model shall have RGB/BGR color format. Consider add 'convert_color' preprocessing operation "
|
||||
"to convert current color format '",
|
||||
color_format_name(context.color_format()),
|
||||
"'to RGB/BGR");
|
||||
|
||||
// Implicit: Convert element type + layout to user's tensor implicitly
|
||||
PreStepsList implicit_steps;
|
||||
implicit_steps.add_convert_impl(param->get_element_type());
|
||||
if (!context.target_layout().empty()) {
|
||||
implicit_steps.add_convert_layout_impl(context.target_layout());
|
||||
}
|
||||
|
||||
for (const auto& action : implicit_steps.actions()) {
|
||||
auto action_result = action(nodes, function, context);
|
||||
nodes = std::get<0>(action_result);
|
||||
}
|
||||
|
||||
auto node = nodes[0];
|
||||
if (node.get_partial_shape() != param->get_partial_shape()) {
|
||||
tensor_data_updated = true; // Trigger revalidation if input parameter shape is changed
|
||||
}
|
||||
// Check final shape
|
||||
OPENVINO_ASSERT(node.get_partial_shape().compatible(param->get_partial_shape()),
|
||||
"Resulting shape '",
|
||||
node.get_partial_shape(),
|
||||
"' after preprocessing is not aligned with original parameter's shape: ",
|
||||
param->get_partial_shape(),
|
||||
", input parameter: ",
|
||||
param->get_friendly_name());
|
||||
|
||||
// Replace parameter
|
||||
for (auto consumer : consumers) {
|
||||
consumer.replace_source_output(node);
|
||||
}
|
||||
{
|
||||
auto param_it = std::find(parameters_list.begin(), parameters_list.end(), param);
|
||||
OPENVINO_ASSERT(param_it != parameters_list.end(),
|
||||
"Parameter to replace has been replaced by previous steps of preprocessing. Use only one "
|
||||
"InputInfo for one input parameter");
|
||||
// Insert list of new parameters to the place of original parameter
|
||||
param_it = parameters_list.erase(param_it);
|
||||
parameters_list.insert(param_it, new_params.begin(), new_params.end());
|
||||
}
|
||||
need_validate |= input_info.m_impl->build(function, existing_names, parameters_list);
|
||||
}
|
||||
|
||||
// Add parameters with right order
|
||||
@ -560,85 +181,13 @@ std::shared_ptr<Model> PrePostProcessor::build() {
|
||||
function->add_parameters(parameters_vec);
|
||||
}
|
||||
// Validate nodes after preprocessing if needed (no need to repeat it after post-processing)
|
||||
if (tensor_data_updated) {
|
||||
if (need_validate) {
|
||||
function->validate_nodes_and_infer_types();
|
||||
}
|
||||
|
||||
// Post processing
|
||||
for (const auto& output_info : m_impl->m_outputs) {
|
||||
const auto& output = output_info.m_impl;
|
||||
std::shared_ptr<op::v0::Result> result;
|
||||
Output<Node> node = output->m_output_node;
|
||||
auto start_out_node_names = node.get_tensor().get_names();
|
||||
node.get_tensor().set_names({});
|
||||
result = std::dynamic_pointer_cast<op::v0::Result>(node.get_node_shared_ptr());
|
||||
// Set result layout from 'model' information
|
||||
if (output->get_model_data()->is_layout_set()) {
|
||||
// Overwrite existing model's layout here (fix 74065)
|
||||
result->set_layout(output->get_model_data()->get_layout());
|
||||
}
|
||||
auto parent = result->get_input_source_output(0);
|
||||
PostprocessingContext context(result->get_layout());
|
||||
if (output->get_tensor_data()->is_layout_set()) {
|
||||
context.target_layout() = output->get_tensor_data()->get_layout();
|
||||
}
|
||||
if (output->get_tensor_data()->is_element_type_set()) {
|
||||
context.target_element_type() = output->get_tensor_data()->get_element_type();
|
||||
}
|
||||
// Apply post-processing
|
||||
node = result->get_input_source_output(0);
|
||||
bool post_processing_applied = false;
|
||||
for (const auto& action : output->get_postprocess()->actions()) {
|
||||
auto action_result = action({node}, context);
|
||||
node = std::get<0>(action_result);
|
||||
post_processing_applied = true;
|
||||
}
|
||||
// Implicit: Convert element type + layout to user's tensor implicitly
|
||||
PostStepsList implicit_steps;
|
||||
if (node.get_element_type() != output->get_tensor_data()->get_element_type() &&
|
||||
output->get_tensor_data()->is_element_type_set() && node.get_element_type() != element::dynamic) {
|
||||
implicit_steps.add_convert_impl(output->get_tensor_data()->get_element_type());
|
||||
}
|
||||
|
||||
if (!context.target_layout().empty() && context.target_layout() != context.layout()) {
|
||||
implicit_steps.add_convert_layout_impl(context.target_layout());
|
||||
}
|
||||
for (const auto& action : implicit_steps.actions()) {
|
||||
auto action_result = action({node}, context);
|
||||
node = std::get<0>(action_result);
|
||||
post_processing_applied = true;
|
||||
}
|
||||
node.get_node_shared_ptr()->set_friendly_name(
|
||||
result->get_input_source_output(0).get_node_shared_ptr()->get_friendly_name());
|
||||
|
||||
// Reset friendly name of input node to avoid names collision
|
||||
// when there is at a new node inserted by post-processing steps
|
||||
// If no new nodes are inserted by post-processing, then we need to preserve friendly name of input
|
||||
// as it's required for old API correct work
|
||||
if (post_processing_applied)
|
||||
result->get_input_source_output(0).get_node_shared_ptr()->set_friendly_name("");
|
||||
|
||||
// Create result
|
||||
auto new_result = std::make_shared<ov::op::v0::Result>(node);
|
||||
new_result->set_friendly_name(result->get_friendly_name());
|
||||
node.get_tensor().set_names(start_out_node_names);
|
||||
|
||||
// Preserve runtime info of original result
|
||||
new_result->get_rt_info() = result->get_rt_info();
|
||||
new_result->input(0).get_rt_info() = result->input(0).get_rt_info();
|
||||
new_result->output(0).get_rt_info() = result->output(0).get_rt_info();
|
||||
|
||||
// Update layout
|
||||
if (!context.layout().empty()) {
|
||||
new_result->set_layout(context.layout());
|
||||
}
|
||||
|
||||
for (auto& old_result : results) {
|
||||
if (result == old_result) {
|
||||
old_result = new_result;
|
||||
break;
|
||||
}
|
||||
}
|
||||
output_info.m_impl->build(results);
|
||||
}
|
||||
// Add results with right order
|
||||
while (!function->get_results().empty())
|
||||
@ -758,14 +307,16 @@ PreProcessSteps& PreProcessSteps::convert_color(const ov::preprocess::ColorForma
|
||||
|
||||
PreProcessSteps& PreProcessSteps::custom(const CustomPreprocessOp& preprocess_cb) {
|
||||
// 'true' indicates that custom preprocessing step will trigger validate_and_infer_types
|
||||
m_impl->actions().emplace_back([preprocess_cb](const std::vector<Output<Node>>& nodes,
|
||||
const std::shared_ptr<ov::Model>&,
|
||||
PreprocessingContext&) {
|
||||
OPENVINO_ASSERT(nodes.size() == 1,
|
||||
"Can't apply custom preprocessing step for multi-plane input. Suggesting to convert "
|
||||
"current image to RGB/BGR color format using 'convert_color'");
|
||||
return std::make_tuple(std::vector<Output<Node>>{preprocess_cb(nodes[0])}, true);
|
||||
});
|
||||
m_impl->actions().emplace_back(
|
||||
[preprocess_cb](const std::vector<Output<Node>>& nodes,
|
||||
const std::shared_ptr<ov::Model>&,
|
||||
PreprocessingContext&) {
|
||||
OPENVINO_ASSERT(nodes.size() == 1,
|
||||
"Can't apply custom preprocessing step for multi-plane input. Suggesting to convert "
|
||||
"current image to RGB/BGR color format using 'convert_color'");
|
||||
return std::make_tuple(std::vector<Output<Node>>{preprocess_cb(nodes[0])}, true);
|
||||
},
|
||||
"custom");
|
||||
return *this;
|
||||
}
|
||||
|
||||
@ -819,9 +370,11 @@ PostProcessSteps& PostProcessSteps::convert_layout(const std::vector<uint64_t>&
|
||||
|
||||
PostProcessSteps& PostProcessSteps::custom(const CustomPostprocessOp& postprocess_cb) {
|
||||
// 'true' indicates that custom postprocessing step will trigger validate_and_infer_types
|
||||
m_impl->actions().emplace_back([postprocess_cb](const Output<ov::Node>& node, PostprocessingContext&) {
|
||||
return std::make_tuple(postprocess_cb(node), true);
|
||||
});
|
||||
m_impl->actions().emplace_back(
|
||||
[postprocess_cb](const Output<ov::Node>& node, PostprocessingContext&) {
|
||||
return std::make_tuple(postprocess_cb(node), true);
|
||||
},
|
||||
"custom");
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
467
src/core/src/preprocess/preprocess_impls.cpp
Normal file
467
src/core/src/preprocess/preprocess_impls.cpp
Normal file
@ -0,0 +1,467 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "preprocess_impls.hpp"
|
||||
|
||||
#include "layout_utils.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace preprocess {
|
||||
|
||||
namespace {
|
||||
void dump_tensor(std::ostream& str,
|
||||
const PartialShape& shape,
|
||||
const Layout& layout,
|
||||
const element::Type& type,
|
||||
const ColorFormat& color = ColorFormat::UNDEFINED) {
|
||||
str << shape << ", ";
|
||||
if (layout.empty()) {
|
||||
str << "<no layout>";
|
||||
} else {
|
||||
str << layout.to_string();
|
||||
}
|
||||
str << ", " << type;
|
||||
if (color != ColorFormat::UNDEFINED) {
|
||||
str << ", " << color_format_name(color);
|
||||
}
|
||||
}
|
||||
} // namespace
|
||||
|
||||
InputInfo::InputInfoImpl::InputInfoData InputInfo::InputInfoImpl::create_new_params(
|
||||
std::tuple<std::unordered_set<std::string>, bool>& existing_names,
|
||||
const std::shared_ptr<Model>& model) const {
|
||||
InputInfoData res;
|
||||
res.m_param = m_resolved_param;
|
||||
auto tensor_elem_type = get_tensor_data()->is_element_type_set() ? get_tensor_data()->get_element_type()
|
||||
: res.m_param->get_element_type();
|
||||
res.m_tensor_layout = get_tensor_data()->get_layout();
|
||||
auto color_info = ColorFormatInfo::get(get_tensor_data()->get_color_format());
|
||||
if (!get_tensor_data()->is_layout_set()) {
|
||||
if (!color_info->default_layout().empty()) {
|
||||
res.m_tensor_layout = color_info->default_layout();
|
||||
}
|
||||
}
|
||||
|
||||
auto model_shape = res.m_param->get_partial_shape();
|
||||
auto new_param_shape = model_shape;
|
||||
if (get_tensor_data()->is_shape_set()) {
|
||||
new_param_shape = get_tensor_data()->get_shape();
|
||||
}
|
||||
res.m_model_layout = get_model()->is_layout_set() ? get_model()->get_layout() : res.m_param->get_layout();
|
||||
if (res.m_model_layout.empty() && get_tensor_data()->is_layout_set()) {
|
||||
res.m_model_layout = get_preprocess()->propagate_layout(res.m_tensor_layout);
|
||||
}
|
||||
if (!res.m_tensor_layout.empty() && !res.m_model_layout.empty() && res.m_model_layout != res.m_tensor_layout) {
|
||||
auto sq_layout = Layout();
|
||||
// Find if some squeeze is needed between model and tensor
|
||||
// E.g. model=NCHW, tensor=HWC
|
||||
std::tie(new_param_shape, sq_layout) =
|
||||
layout::utils::find_squeeze(res.m_model_layout, model_shape, res.m_tensor_layout);
|
||||
// Find transpose between model and tensor layouts and update tensor shape
|
||||
auto net_to_tensor = layout::utils::find_permutation(sq_layout, new_param_shape, res.m_tensor_layout);
|
||||
if (!net_to_tensor.empty() && new_param_shape.rank().is_static()) {
|
||||
std::vector<ov::Dimension> dims(new_param_shape.size());
|
||||
std::transform(net_to_tensor.begin(), net_to_tensor.end(), dims.begin(), [&](int64_t v) {
|
||||
return new_param_shape[v];
|
||||
});
|
||||
new_param_shape = PartialShape(dims);
|
||||
}
|
||||
} else {
|
||||
Layout new_layout;
|
||||
std::tie(new_param_shape, new_layout) =
|
||||
get_preprocess()->calculate_param_shape(new_param_shape, res.m_model_layout);
|
||||
if (res.m_tensor_layout.empty()) {
|
||||
// Reusing param's layout according to converted calculated layout
|
||||
res.m_tensor_layout = new_layout;
|
||||
}
|
||||
}
|
||||
|
||||
if (get_tensor_data()->is_shape_set()) {
|
||||
new_param_shape = get_tensor_data()->get_shape();
|
||||
} else if (get_tensor_data()->is_spatial_shape_set()) {
|
||||
auto height_idx = get_and_check_height_idx(res.m_tensor_layout, new_param_shape);
|
||||
auto width_idx = get_and_check_width_idx(res.m_tensor_layout, new_param_shape);
|
||||
if (get_tensor_data()->is_spatial_shape_dynamic()) {
|
||||
// Use dynamic spatial dimensions
|
||||
new_param_shape[height_idx] = Dimension::dynamic();
|
||||
new_param_shape[width_idx] = Dimension::dynamic();
|
||||
} else {
|
||||
// Use static spatial dimensions
|
||||
new_param_shape[height_idx] = get_tensor_data()->get_spatial_height();
|
||||
new_param_shape[width_idx] = get_tensor_data()->get_spatial_width();
|
||||
}
|
||||
}
|
||||
|
||||
// Create separate parameter for each plane. Shape is based on color format
|
||||
for (size_t plane = 0; plane < color_info->planes_count(); plane++) {
|
||||
auto plane_shape = color_info->shape(plane, new_param_shape);
|
||||
auto plane_param = std::make_shared<opset8::Parameter>(tensor_elem_type, plane_shape);
|
||||
if (plane < get_tensor_data()->planes_sub_names().size()) {
|
||||
std::unordered_set<std::string> plane_tensor_names;
|
||||
std::string sub_name;
|
||||
sub_name = std::string("/") + get_tensor_data()->planes_sub_names()[plane];
|
||||
if (!std::get<1>(existing_names)) {
|
||||
existing_names = std::make_tuple(get_function_tensor_names(model), true);
|
||||
}
|
||||
for (const auto& tensor_name : res.m_param->get_default_output().get_tensor().get_names()) {
|
||||
auto new_name = tensor_name + sub_name;
|
||||
OPENVINO_ASSERT(
|
||||
std::get<0>(existing_names).count(new_name) == 0,
|
||||
"Error while trying to create plane input with name '",
|
||||
new_name,
|
||||
"' - name already exists in model. Please specify another sub-name for set_color_format");
|
||||
plane_tensor_names.insert(new_name);
|
||||
}
|
||||
plane_param->get_default_output().get_tensor().set_names(plane_tensor_names);
|
||||
plane_param->set_friendly_name(res.m_param->get_friendly_name() + sub_name);
|
||||
} else if (color_info->planes_count() == 1) {
|
||||
plane_param->get_default_output().get_tensor().set_names(
|
||||
res.m_param->get_default_output().get_tensor().get_names());
|
||||
plane_param->set_friendly_name(res.m_param->get_friendly_name());
|
||||
}
|
||||
// Fill runtime info
|
||||
plane_param->get_rt_info() = res.m_param->get_rt_info();
|
||||
plane_param->output(0).get_rt_info() = res.m_param->output(0).get_rt_info();
|
||||
if (!res.m_tensor_layout.empty()) {
|
||||
plane_param->set_layout(res.m_tensor_layout);
|
||||
}
|
||||
if (get_tensor_data()->is_memory_type_set()) {
|
||||
if (get_tensor_data()->get_memory_type().empty()) {
|
||||
plane_param->output(0).get_rt_info().erase(TensorInfoMemoryType::get_type_info_static());
|
||||
} else {
|
||||
plane_param->output(0).get_rt_info()[TensorInfoMemoryType::get_type_info_static()] =
|
||||
TensorInfoMemoryType(get_tensor_data()->get_memory_type());
|
||||
}
|
||||
}
|
||||
res.m_new_params.push_back(plane_param);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
PreStepsList InputInfo::InputInfoImpl::create_implicit_steps(const PreprocessingContext& context, element::Type type) {
|
||||
PreStepsList implicit_steps;
|
||||
if (type != context.target_element_type()) {
|
||||
implicit_steps.add_convert_impl(context.target_element_type());
|
||||
}
|
||||
if (!context.target_layout().empty() && context.target_layout() != context.layout()) {
|
||||
implicit_steps.add_convert_layout_impl(context.target_layout());
|
||||
}
|
||||
return implicit_steps;
|
||||
}
|
||||
|
||||
bool InputInfo::InputInfoImpl::build(const std::shared_ptr<Model>& model,
|
||||
std::tuple<std::unordered_set<std::string>, bool>& existing_names,
|
||||
std::list<std::shared_ptr<opset8::Parameter>>& parameters_list) {
|
||||
auto data = create_new_params(existing_names, model);
|
||||
auto consumers = data.m_param->output(0).get_target_inputs();
|
||||
bool need_validate = false;
|
||||
|
||||
PreprocessingContext context(data.m_tensor_layout);
|
||||
context.color_format() = get_tensor_data()->get_color_format();
|
||||
context.target_layout() = data.m_model_layout;
|
||||
context.model_shape() = data.m_param->get_partial_shape();
|
||||
context.target_element_type() = data.m_param->get_element_type();
|
||||
|
||||
// 2. Apply preprocessing
|
||||
auto nodes = data.as_nodes();
|
||||
for (const auto& action : get_preprocess()->actions()) {
|
||||
auto action_result = action.m_op(nodes, model, context);
|
||||
nodes = std::get<0>(action_result);
|
||||
need_validate |= std::get<1>(action_result);
|
||||
}
|
||||
|
||||
OPENVINO_ASSERT(nodes.size() == 1,
|
||||
"Multiple plane input is not allowed as model input. Consider using of convert_color "
|
||||
"preprocessing operation. Current format is '",
|
||||
color_format_name(context.color_format()),
|
||||
"'");
|
||||
OPENVINO_ASSERT(is_rgb_family(context.color_format()) || context.color_format() == ColorFormat::UNDEFINED,
|
||||
"model shall have RGB/BGR color format. Consider add 'convert_color' preprocessing operation "
|
||||
"to convert current color format '",
|
||||
color_format_name(context.color_format()),
|
||||
"'to RGB/BGR");
|
||||
|
||||
// Implicit: Convert element type + layout to user's tensor implicitly
|
||||
auto implicit_steps = create_implicit_steps(context, nodes[0].get_element_type());
|
||||
for (const auto& action : implicit_steps.actions()) {
|
||||
auto action_result = action.m_op(nodes, model, context);
|
||||
nodes = std::get<0>(action_result);
|
||||
}
|
||||
|
||||
auto node = nodes[0];
|
||||
if (node.get_partial_shape() != context.model_shape()) {
|
||||
need_validate = true; // Trigger revalidation if input parameter shape is changed
|
||||
}
|
||||
// Check final shape
|
||||
OPENVINO_ASSERT(node.get_partial_shape().compatible(context.model_shape()),
|
||||
"Resulting shape '",
|
||||
node.get_partial_shape(),
|
||||
"' after preprocessing is not aligned with original parameter's shape: ",
|
||||
context.model_shape(),
|
||||
", input parameter: ",
|
||||
data.m_param->get_friendly_name());
|
||||
|
||||
// Replace parameter
|
||||
for (auto consumer : consumers) {
|
||||
if (dynamic_cast<ov::opset8::Result*>(consumer.get_node())) {
|
||||
// Some result points to old parameter (Param->Result case), need to trigger revalidation
|
||||
need_validate = true;
|
||||
}
|
||||
consumer.replace_source_output(node);
|
||||
}
|
||||
{
|
||||
auto param_it = std::find(parameters_list.begin(), parameters_list.end(), data.m_param);
|
||||
OPENVINO_ASSERT(param_it != parameters_list.end(),
|
||||
"Parameter to replace has been replaced by previous steps of preprocessing. Use only one "
|
||||
"InputInfo for one input parameter");
|
||||
// Insert list of new parameters to the place of original parameter
|
||||
param_it = parameters_list.erase(param_it);
|
||||
parameters_list.insert(param_it, data.m_new_params.begin(), data.m_new_params.end());
|
||||
}
|
||||
return need_validate;
|
||||
}
|
||||
|
||||
void InputInfo::InputInfoImpl::dump(std::ostream& str,
|
||||
const std::shared_ptr<Model>& model,
|
||||
std::tuple<std::unordered_set<std::string>, bool>& existing_names) const {
|
||||
auto data = create_new_params(existing_names, model);
|
||||
auto nodes = data.as_nodes();
|
||||
|
||||
PreprocessingContext context(data.m_tensor_layout);
|
||||
context.color_format() = get_tensor_data()->get_color_format();
|
||||
context.target_layout() = data.m_model_layout;
|
||||
context.model_shape() = data.m_param->get_partial_shape();
|
||||
context.target_element_type() = data.m_param->get_element_type();
|
||||
bool need_dump = nodes.size() > 1 || nodes[0].get_partial_shape() != context.model_shape() ||
|
||||
data.m_param->get_layout() != context.target_layout() ||
|
||||
nodes[0].get_element_type() != context.target_element_type() ||
|
||||
get_tensor_data()->is_memory_type_set() || !get_preprocess()->actions().empty();
|
||||
if (!need_dump) {
|
||||
return;
|
||||
}
|
||||
// Dump tensor and model shapes if any preprocessing is needed
|
||||
str << "Input ";
|
||||
if (!data.m_param->output(0).get_names().empty()) {
|
||||
str << "\"" << data.m_param->output(0).get_any_name() << "\"";
|
||||
}
|
||||
if (context.color_format() != ColorFormat::UNDEFINED) {
|
||||
str << " (color " << color_format_name(context.color_format()) << ")";
|
||||
}
|
||||
if (get_tensor_data()->is_memory_type_set()) {
|
||||
str << " memory type=" << get_tensor_data()->get_memory_type();
|
||||
}
|
||||
str << ":" << std::endl;
|
||||
if (nodes.size() == 1) {
|
||||
str << " User's input tensor: ";
|
||||
dump_tensor(str, nodes[0].get_partial_shape(), context.layout(), nodes[0].get_element_type());
|
||||
str << std::endl;
|
||||
} else {
|
||||
str << " " << nodes.size() << " user's tensors expected for each plane:" << std::endl;
|
||||
for (size_t i = 0; i < nodes.size(); i++) {
|
||||
str << " " << i << ": ";
|
||||
if (!nodes[i].get_names().empty()) {
|
||||
str << nodes[i].get_any_name() << " ";
|
||||
}
|
||||
dump_tensor(str, nodes[i].get_partial_shape(), context.layout(), nodes[i].get_element_type());
|
||||
str << std::endl;
|
||||
}
|
||||
}
|
||||
str << " Model's expected tensor: ";
|
||||
dump_tensor(str, context.model_shape(), context.target_layout(), context.target_element_type());
|
||||
str << std::endl;
|
||||
|
||||
// Apply and dump preprocessing operations
|
||||
if (!get_preprocess()->actions().empty()) {
|
||||
str << " Pre-processing steps (" << get_preprocess()->actions().size() << "):" << std::endl;
|
||||
}
|
||||
for (const auto& action : get_preprocess()->actions()) {
|
||||
str << " " << action.m_name << ": (";
|
||||
dump_tensor(str,
|
||||
nodes[0].get_partial_shape(),
|
||||
context.layout(),
|
||||
nodes[0].get_element_type(),
|
||||
context.color_format());
|
||||
auto action_result = action.m_op(nodes, model, context);
|
||||
nodes = std::get<0>(action_result);
|
||||
str << ") -> (";
|
||||
dump_tensor(str,
|
||||
nodes[0].get_partial_shape(),
|
||||
context.layout(),
|
||||
nodes[0].get_element_type(),
|
||||
context.color_format());
|
||||
str << ")" << std::endl;
|
||||
}
|
||||
|
||||
// Implicit: Convert element type + layout to user's tensor implicitly
|
||||
auto implicit_steps = create_implicit_steps(context, nodes[0].get_element_type());
|
||||
if (!implicit_steps.actions().empty()) {
|
||||
str << " Implicit pre-processing steps (" << implicit_steps.actions().size() << "):" << std::endl;
|
||||
}
|
||||
for (const auto& action : implicit_steps.actions()) {
|
||||
str << " " << action.m_name << ": (";
|
||||
dump_tensor(str,
|
||||
nodes[0].get_partial_shape(),
|
||||
context.layout(),
|
||||
nodes[0].get_element_type(),
|
||||
context.color_format());
|
||||
auto action_result = action.m_op(nodes, model, context);
|
||||
nodes = std::get<0>(action_result);
|
||||
str << ") -> (";
|
||||
dump_tensor(str,
|
||||
nodes[0].get_partial_shape(),
|
||||
context.layout(),
|
||||
nodes[0].get_element_type(),
|
||||
context.color_format());
|
||||
str << ")" << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
//----------- OutputInfoImpl ----------
|
||||
void OutputInfo::OutputInfoImpl::build(ov::ResultVector& results) {
|
||||
std::shared_ptr<opset8::Result> result;
|
||||
auto node = m_output_node;
|
||||
auto start_out_node_names = node.get_tensor().get_names();
|
||||
node.get_tensor().set_names({});
|
||||
result = std::dynamic_pointer_cast<opset8::Result>(node.get_node_shared_ptr());
|
||||
// Set result layout from 'model' information
|
||||
if (get_model_data()->is_layout_set()) {
|
||||
// Overwrite existing model's layout here (fix 74065)
|
||||
result->set_layout(get_model_data()->get_layout());
|
||||
}
|
||||
PostprocessingContext context(result->get_layout());
|
||||
if (get_tensor_data()->is_layout_set()) {
|
||||
context.target_layout() = get_tensor_data()->get_layout();
|
||||
}
|
||||
if (get_tensor_data()->is_element_type_set()) {
|
||||
context.target_element_type() = get_tensor_data()->get_element_type();
|
||||
}
|
||||
// Apply post-processing
|
||||
node = result->get_input_source_output(0);
|
||||
bool post_processing_applied = false;
|
||||
for (const auto& action : get_postprocess()->actions()) {
|
||||
auto action_result = action.m_op({node}, context);
|
||||
node = std::get<0>(action_result);
|
||||
post_processing_applied = true;
|
||||
}
|
||||
// Implicit: Convert element type + layout to user's tensor implicitly
|
||||
PostStepsList implicit_steps;
|
||||
if (node.get_element_type() != get_tensor_data()->get_element_type() && get_tensor_data()->is_element_type_set() &&
|
||||
node.get_element_type() != element::dynamic) {
|
||||
implicit_steps.add_convert_impl(get_tensor_data()->get_element_type());
|
||||
}
|
||||
|
||||
if (!context.target_layout().empty() && context.target_layout() != context.layout()) {
|
||||
implicit_steps.add_convert_layout_impl(context.target_layout());
|
||||
}
|
||||
for (const auto& action : implicit_steps.actions()) {
|
||||
auto action_result = action.m_op({node}, context);
|
||||
node = std::get<0>(action_result);
|
||||
post_processing_applied = true;
|
||||
}
|
||||
node.get_node_shared_ptr()->set_friendly_name(
|
||||
result->get_input_source_output(0).get_node_shared_ptr()->get_friendly_name());
|
||||
|
||||
// Reset friendly name of input node to avoid names collision
|
||||
// when there is at a new node inserted by post-processing steps
|
||||
// If no new nodes are inserted by post-processing, then we need to preserve friendly name of input
|
||||
// as it's required for old API correct work
|
||||
if (post_processing_applied)
|
||||
result->get_input_source_output(0).get_node_shared_ptr()->set_friendly_name("");
|
||||
|
||||
// Create result
|
||||
auto new_result = std::make_shared<opset8::Result>(node);
|
||||
new_result->set_friendly_name(result->get_friendly_name());
|
||||
node.get_tensor().set_names(start_out_node_names);
|
||||
|
||||
// Preserve runtime info of original result
|
||||
new_result->get_rt_info() = result->get_rt_info();
|
||||
new_result->input(0).get_rt_info() = result->input(0).get_rt_info();
|
||||
new_result->output(0).get_rt_info() = result->output(0).get_rt_info();
|
||||
|
||||
// Update layout
|
||||
if (!context.layout().empty()) {
|
||||
new_result->set_layout(context.layout());
|
||||
}
|
||||
|
||||
for (auto& old_result : results) {
|
||||
if (result == old_result) {
|
||||
old_result = new_result;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void OutputInfo::OutputInfoImpl::dump(std::ostream& str) const {
|
||||
std::shared_ptr<opset8::Result> result;
|
||||
auto node = m_output_node;
|
||||
auto start_out_node_names = node.get_tensor().get_names();
|
||||
result = std::dynamic_pointer_cast<opset8::Result>(node.get_node_shared_ptr());
|
||||
auto model_layout = get_model_data()->is_layout_set() ? get_model_data()->get_layout() : result->get_layout();
|
||||
PostprocessingContext context(model_layout);
|
||||
if (get_tensor_data()->is_layout_set()) {
|
||||
context.target_layout() = get_tensor_data()->get_layout();
|
||||
}
|
||||
if (get_tensor_data()->is_element_type_set()) {
|
||||
context.target_element_type() = get_tensor_data()->get_element_type();
|
||||
}
|
||||
|
||||
bool need_dump =
|
||||
(model_layout != context.target_layout() && get_tensor_data()->is_layout_set()) ||
|
||||
(node.get_element_type() != context.target_element_type() && get_tensor_data()->is_element_type_set()) ||
|
||||
!get_postprocess()->actions().empty();
|
||||
if (!need_dump) {
|
||||
return;
|
||||
}
|
||||
|
||||
str << "Output ";
|
||||
if (!start_out_node_names.empty()) {
|
||||
str << "\"" << *start_out_node_names.begin() << "\"";
|
||||
}
|
||||
str << ":" << std::endl;
|
||||
str << " Model's data tensor: ";
|
||||
dump_tensor(str, node.get_partial_shape(), model_layout, node.get_element_type());
|
||||
str << std::endl;
|
||||
|
||||
if (!get_postprocess()->actions().empty()) {
|
||||
str << " Post-processing steps (" << get_postprocess()->actions().size() << "):" << std::endl;
|
||||
}
|
||||
// Apply post-processing
|
||||
node = result->get_input_source_output(0);
|
||||
for (const auto& action : get_postprocess()->actions()) {
|
||||
str << " " << action.m_name << ": (";
|
||||
dump_tensor(str, node.get_partial_shape(), context.layout(), node.get_element_type());
|
||||
auto action_result = action.m_op({node}, context);
|
||||
node = std::get<0>(action_result);
|
||||
str << ") -> (";
|
||||
dump_tensor(str, node.get_partial_shape(), context.layout(), node.get_element_type());
|
||||
str << ")" << std::endl;
|
||||
}
|
||||
// Implicit: Convert element type + layout to user's tensor implicitly
|
||||
PostStepsList implicit_steps;
|
||||
if (node.get_element_type() != get_tensor_data()->get_element_type() && get_tensor_data()->is_element_type_set() &&
|
||||
node.get_element_type() != element::dynamic) {
|
||||
implicit_steps.add_convert_impl(get_tensor_data()->get_element_type());
|
||||
}
|
||||
|
||||
if (!context.target_layout().empty() && context.target_layout() != context.layout()) {
|
||||
implicit_steps.add_convert_layout_impl(context.target_layout());
|
||||
}
|
||||
if (!implicit_steps.actions().empty()) {
|
||||
str << " Post-processing implicit steps (" << implicit_steps.actions().size() << "):" << std::endl;
|
||||
}
|
||||
for (const auto& action : implicit_steps.actions()) {
|
||||
str << " " << action.m_name << ": (";
|
||||
dump_tensor(str, node.get_partial_shape(), context.layout(), node.get_element_type());
|
||||
auto action_result = action.m_op({node}, context);
|
||||
node = std::get<0>(action_result);
|
||||
str << ") -> (";
|
||||
dump_tensor(str, node.get_partial_shape(), context.layout(), node.get_element_type());
|
||||
str << ")" << std::endl;
|
||||
}
|
||||
|
||||
str << " User's output tensor: ";
|
||||
dump_tensor(str, node.get_partial_shape(), context.layout(), node.get_element_type());
|
||||
}
|
||||
} // namespace preprocess
|
||||
} // namespace ov
|
290
src/core/src/preprocess/preprocess_impls.hpp
Normal file
290
src/core/src/preprocess/preprocess_impls.hpp
Normal file
@ -0,0 +1,290 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <openvino/core/preprocess/input_info.hpp>
|
||||
#include <openvino/core/preprocess/output_info.hpp>
|
||||
#include <openvino/opsets/opset8.hpp>
|
||||
|
||||
#include "color_utils.hpp"
|
||||
#include "preprocess_steps_impl.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace preprocess {
|
||||
|
||||
/// \brief ModelInfoImpl - internal data structure
|
||||
class ModelInfoImpl {
|
||||
public:
|
||||
ModelInfoImpl() = default;
|
||||
|
||||
void set_layout(const Layout& layout) {
|
||||
m_layout = layout;
|
||||
m_layout_set = true;
|
||||
}
|
||||
bool is_layout_set() const {
|
||||
return m_layout_set;
|
||||
}
|
||||
const Layout& get_layout() const {
|
||||
return m_layout;
|
||||
}
|
||||
|
||||
private:
|
||||
Layout m_layout = Layout();
|
||||
bool m_layout_set = false;
|
||||
};
|
||||
|
||||
class InputModelInfo::InputModelInfoImpl : public ModelInfoImpl {};
|
||||
|
||||
class OutputModelInfo::OutputModelInfoImpl : public ModelInfoImpl {};
|
||||
|
||||
/// \brief OutputInfoImpl - internal data structure
|
||||
struct OutputInfo::OutputInfoImpl {
|
||||
OutputInfoImpl() = default;
|
||||
|
||||
std::unique_ptr<OutputTensorInfo::OutputTensorInfoImpl>& get_tensor_data() {
|
||||
return m_tensor_info.m_impl;
|
||||
}
|
||||
|
||||
const std::unique_ptr<OutputTensorInfo::OutputTensorInfoImpl>& get_tensor_data() const {
|
||||
return m_tensor_info.m_impl;
|
||||
}
|
||||
|
||||
std::unique_ptr<PostProcessSteps::PostProcessStepsImpl>& get_postprocess() {
|
||||
return m_postprocess.m_impl;
|
||||
}
|
||||
|
||||
const std::unique_ptr<PostProcessSteps::PostProcessStepsImpl>& get_postprocess() const {
|
||||
return m_postprocess.m_impl;
|
||||
}
|
||||
|
||||
std::unique_ptr<OutputModelInfo::OutputModelInfoImpl>& get_model_data() {
|
||||
return m_model_info.m_impl;
|
||||
}
|
||||
|
||||
const std::unique_ptr<OutputModelInfo::OutputModelInfoImpl>& get_model_data() const {
|
||||
return m_model_info.m_impl;
|
||||
}
|
||||
|
||||
void build(ov::ResultVector& results);
|
||||
|
||||
void dump(std::ostream& str) const;
|
||||
|
||||
OutputTensorInfo m_tensor_info;
|
||||
PostProcessSteps m_postprocess;
|
||||
OutputModelInfo m_model_info;
|
||||
ov::Output<ov::Node> m_output_node;
|
||||
};
|
||||
|
||||
class TensorInfoImplBase {
|
||||
public:
|
||||
TensorInfoImplBase() = default;
|
||||
|
||||
void set_element_type(const element::Type& type) {
|
||||
m_type = type;
|
||||
m_type_set = true;
|
||||
}
|
||||
bool is_element_type_set() const {
|
||||
return m_type_set;
|
||||
}
|
||||
const element::Type& get_element_type() const {
|
||||
return m_type;
|
||||
}
|
||||
|
||||
void set_layout(const Layout& layout) {
|
||||
m_layout = layout;
|
||||
m_layout_set = true;
|
||||
}
|
||||
bool is_layout_set() const {
|
||||
return m_layout_set;
|
||||
}
|
||||
const Layout& get_layout() const {
|
||||
return m_layout;
|
||||
}
|
||||
|
||||
protected:
|
||||
element::Type m_type = element::dynamic;
|
||||
bool m_type_set = false;
|
||||
|
||||
Layout m_layout = Layout();
|
||||
bool m_layout_set = false;
|
||||
};
|
||||
|
||||
class OutputTensorInfo::OutputTensorInfoImpl : public TensorInfoImplBase {};
|
||||
|
||||
/// \brief InputTensorInfoImpl - internal data structure
|
||||
class InputTensorInfo::InputTensorInfoImpl : public TensorInfoImplBase {
|
||||
public:
|
||||
InputTensorInfoImpl() = default;
|
||||
|
||||
bool is_spatial_shape_set() const {
|
||||
return m_spatial_shape_set;
|
||||
}
|
||||
|
||||
int get_spatial_width() const {
|
||||
return m_spatial_width;
|
||||
}
|
||||
|
||||
int get_spatial_height() const {
|
||||
return m_spatial_height;
|
||||
}
|
||||
|
||||
bool is_spatial_shape_dynamic() const {
|
||||
return m_spatial_shape_set && m_spatial_width == -1 && m_spatial_height == -1;
|
||||
}
|
||||
|
||||
void set_spatial_dynamic_shape() {
|
||||
OPENVINO_ASSERT(!m_shape_set, "'set_spatial_dynamic_shape' and 'set_shape' shall not be used together");
|
||||
m_spatial_shape_set = true;
|
||||
m_spatial_width = -1;
|
||||
m_spatial_height = -1;
|
||||
}
|
||||
|
||||
void set_spatial_static_shape(size_t height, size_t width) & {
|
||||
OPENVINO_ASSERT(!m_shape_set, "'set_spatial_static_shape' and 'set_shape' shall not be used together");
|
||||
m_spatial_shape_set = true;
|
||||
m_spatial_height = static_cast<int>(height);
|
||||
m_spatial_width = static_cast<int>(width);
|
||||
}
|
||||
|
||||
const ColorFormat& get_color_format() const {
|
||||
return m_color_format;
|
||||
}
|
||||
|
||||
void set_color_format(ColorFormat format, const std::vector<std::string>& sub_names) {
|
||||
auto info = ColorFormatInfo::get(format);
|
||||
if (info->planes_count() == 1) {
|
||||
OPENVINO_ASSERT(sub_names.empty(),
|
||||
"Plane names are not allowed for single plane color format '",
|
||||
color_format_name(format),
|
||||
"'");
|
||||
} else if (!sub_names.empty()) {
|
||||
OPENVINO_ASSERT(sub_names.size() == info->planes_count(),
|
||||
"Number of sub-names (",
|
||||
sub_names.size(),
|
||||
") shall match with number of planes for '",
|
||||
color_format_name(format),
|
||||
"' color format (",
|
||||
info->planes_count(),
|
||||
")");
|
||||
}
|
||||
m_planes_sub_names = sub_names;
|
||||
m_color_format = format;
|
||||
}
|
||||
|
||||
const std::vector<std::string>& planes_sub_names() const {
|
||||
return m_planes_sub_names;
|
||||
}
|
||||
|
||||
void set_memory_type(const std::string& mem_type) {
|
||||
m_memory_type_set = true;
|
||||
m_memory_type = mem_type;
|
||||
}
|
||||
|
||||
const std::string& get_memory_type() const {
|
||||
return m_memory_type;
|
||||
}
|
||||
|
||||
bool is_memory_type_set() const {
|
||||
return m_memory_type_set;
|
||||
}
|
||||
|
||||
void set_shape(const PartialShape& shape) {
|
||||
OPENVINO_ASSERT(
|
||||
!m_spatial_shape_set,
|
||||
"'set_spatial_static_shape', 'set_spatial_dynamic_shape', 'set_shape' shall not be used together");
|
||||
m_shape = shape;
|
||||
m_shape_set = true;
|
||||
}
|
||||
|
||||
bool is_shape_set() const {
|
||||
return m_shape_set;
|
||||
}
|
||||
|
||||
const PartialShape& get_shape() const {
|
||||
return m_shape;
|
||||
}
|
||||
|
||||
private:
|
||||
ColorFormat m_color_format = ColorFormat::UNDEFINED;
|
||||
std::vector<std::string> m_planes_sub_names;
|
||||
|
||||
element::Type m_type = element::dynamic;
|
||||
bool m_type_set = false;
|
||||
|
||||
Layout m_layout = Layout();
|
||||
bool m_layout_set = false;
|
||||
|
||||
int m_spatial_width = -1;
|
||||
int m_spatial_height = -1;
|
||||
bool m_spatial_shape_set = false;
|
||||
|
||||
std::string m_memory_type = {};
|
||||
bool m_memory_type_set = false;
|
||||
|
||||
PartialShape m_shape = {};
|
||||
bool m_shape_set = false;
|
||||
};
|
||||
|
||||
/// \brief InputInfoImpl - internal data structure
|
||||
struct InputInfo::InputInfoImpl {
|
||||
struct InputInfoData {
|
||||
std::vector<std::shared_ptr<opset8::Parameter>> m_new_params;
|
||||
std::shared_ptr<opset8::Parameter> m_param;
|
||||
Layout m_model_layout;
|
||||
Layout m_tensor_layout;
|
||||
std::vector<Output<Node>> as_nodes() const {
|
||||
std::vector<Output<Node>> res;
|
||||
std::transform(m_new_params.begin(),
|
||||
m_new_params.end(),
|
||||
std::back_inserter(res),
|
||||
[](const std::shared_ptr<opset8::Parameter>& param) {
|
||||
return param;
|
||||
});
|
||||
return res;
|
||||
}
|
||||
};
|
||||
InputInfoImpl() = default;
|
||||
|
||||
std::unique_ptr<InputTensorInfo::InputTensorInfoImpl>& get_tensor_data() {
|
||||
return m_tensor_info.m_impl;
|
||||
}
|
||||
|
||||
const std::unique_ptr<InputTensorInfo::InputTensorInfoImpl>& get_tensor_data() const {
|
||||
return m_tensor_info.m_impl;
|
||||
}
|
||||
|
||||
std::unique_ptr<PreProcessSteps::PreProcessStepsImpl>& get_preprocess() {
|
||||
return m_preprocess.m_impl;
|
||||
}
|
||||
|
||||
const std::unique_ptr<PreProcessSteps::PreProcessStepsImpl>& get_preprocess() const {
|
||||
return m_preprocess.m_impl;
|
||||
}
|
||||
|
||||
const std::unique_ptr<InputModelInfo::InputModelInfoImpl>& get_model() const {
|
||||
return m_model_data.m_impl;
|
||||
}
|
||||
|
||||
InputInfoData create_new_params(std::tuple<std::unordered_set<std::string>, bool>& existing_names,
|
||||
const std::shared_ptr<Model>& model) const;
|
||||
|
||||
static PreStepsList create_implicit_steps(const PreprocessingContext& context, element::Type type);
|
||||
|
||||
bool build(const std::shared_ptr<Model>& model,
|
||||
std::tuple<std::unordered_set<std::string>, bool>& existing_names,
|
||||
std::list<std::shared_ptr<opset8::Parameter>>& parameters_list);
|
||||
|
||||
void dump(std::ostream& str,
|
||||
const std::shared_ptr<Model>& model,
|
||||
std::tuple<std::unordered_set<std::string>, bool>& existing_names) const;
|
||||
|
||||
InputTensorInfo m_tensor_info;
|
||||
PreProcessSteps m_preprocess;
|
||||
InputModelInfo m_model_data;
|
||||
std::shared_ptr<op::v0::Parameter> m_resolved_param;
|
||||
};
|
||||
|
||||
} // namespace preprocess
|
||||
} // namespace ov
|
@ -37,140 +37,169 @@ static Shape construct_mean_scale_shape(const Output<Node>& node,
|
||||
return {v};
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static std::string vector_to_string(const std::vector<T>& values) {
|
||||
if (values.empty()) {
|
||||
return {};
|
||||
}
|
||||
std::stringstream s;
|
||||
s << "(" << values[0];
|
||||
for (size_t i = 1; i < values.size(); i++) {
|
||||
s << "," << values[i];
|
||||
}
|
||||
s << ")";
|
||||
return s.str();
|
||||
}
|
||||
|
||||
void PreStepsList::add_scale_impl(const std::vector<float>& values) {
|
||||
m_actions.emplace_back([values](const std::vector<Output<Node>>& nodes,
|
||||
const std::shared_ptr<ov::Model>& function,
|
||||
PreprocessingContext& context) -> std::tuple<std::vector<Output<Node>>, bool> {
|
||||
OPENVINO_ASSERT(!nodes.empty(), "Internal error: Can't apply scale preprocessing for empty input.");
|
||||
OPENVINO_ASSERT(nodes.size() == 1,
|
||||
"Can't apply scale preprocessing for multi-plane input. Suggesting to convert current "
|
||||
"image to RGB/BGR color format using 'convert_color'");
|
||||
Shape shape;
|
||||
if (values.size() == 1) {
|
||||
shape = Shape{1};
|
||||
} else {
|
||||
shape = construct_mean_scale_shape(nodes[0], values.size(), context);
|
||||
}
|
||||
auto element_type = nodes[0].get_element_type();
|
||||
OPENVINO_ASSERT(element_type.is_real(),
|
||||
"Scale preprocessing can be applied to 'float' inputs. Consider using of "
|
||||
"'convert_element_type' before scaling. Current type is: ",
|
||||
element_type);
|
||||
m_actions.emplace_back(
|
||||
[values](const std::vector<Output<Node>>& nodes,
|
||||
const std::shared_ptr<ov::Model>& function,
|
||||
PreprocessingContext& context) -> std::tuple<std::vector<Output<Node>>, bool> {
|
||||
OPENVINO_ASSERT(!nodes.empty(), "Internal error: Can't apply scale preprocessing for empty input.");
|
||||
OPENVINO_ASSERT(nodes.size() == 1,
|
||||
"Can't apply scale preprocessing for multi-plane input. Suggesting to convert current "
|
||||
"image to RGB/BGR color format using 'convert_color'");
|
||||
Shape shape;
|
||||
if (values.size() == 1) {
|
||||
shape = Shape{1};
|
||||
} else {
|
||||
shape = construct_mean_scale_shape(nodes[0], values.size(), context);
|
||||
}
|
||||
auto element_type = nodes[0].get_element_type();
|
||||
OPENVINO_ASSERT(element_type.is_real(),
|
||||
"Scale preprocessing can be applied to 'float' inputs. Consider using of "
|
||||
"'convert_element_type' before scaling. Current type is: ",
|
||||
element_type);
|
||||
|
||||
auto constant = op::v0::Constant::create(element_type, shape, values);
|
||||
auto constant = op::v0::Constant::create(element_type, shape, values);
|
||||
|
||||
auto new_op = std::make_shared<op::v1::Divide>(nodes[0], constant);
|
||||
set_is_preprocessing_node(new_op);
|
||||
return std::make_tuple(std::vector<Output<Node>>{new_op}, false);
|
||||
});
|
||||
auto new_op = std::make_shared<op::v1::Divide>(nodes[0], constant);
|
||||
set_is_preprocessing_node(new_op);
|
||||
return std::make_tuple(std::vector<Output<Node>>{new_op}, false);
|
||||
},
|
||||
"scale " + vector_to_string(values));
|
||||
}
|
||||
|
||||
void PreStepsList::add_mean_impl(const std::vector<float>& values) {
|
||||
m_actions.emplace_back([values](const std::vector<Output<Node>>& nodes,
|
||||
const std::shared_ptr<ov::Model>& function,
|
||||
PreprocessingContext& context) {
|
||||
OPENVINO_ASSERT(!nodes.empty(), "Internal error: Can't apply mean preprocessing for empty input.");
|
||||
OPENVINO_ASSERT(nodes.size() == 1,
|
||||
"Can't apply scale preprocessing for multi-plane input. Suggesting to convert current "
|
||||
"image to RGB/BGR color format using 'convert_color'");
|
||||
Shape shape;
|
||||
if (values.size() == 1) {
|
||||
shape = Shape{1};
|
||||
} else {
|
||||
shape = construct_mean_scale_shape(nodes[0], values.size(), context);
|
||||
}
|
||||
auto element_type = nodes[0].get_element_type();
|
||||
OPENVINO_ASSERT(element_type.is_real(),
|
||||
"Mean preprocessing can be applied to 'float' inputs. Consider using of 'convert_element_type' "
|
||||
"before scaling. Current type is: ",
|
||||
element_type);
|
||||
m_actions.emplace_back(
|
||||
[values](const std::vector<Output<Node>>& nodes,
|
||||
const std::shared_ptr<ov::Model>& function,
|
||||
PreprocessingContext& context) {
|
||||
OPENVINO_ASSERT(!nodes.empty(), "Internal error: Can't apply mean preprocessing for empty input.");
|
||||
OPENVINO_ASSERT(nodes.size() == 1,
|
||||
"Can't apply scale preprocessing for multi-plane input. Suggesting to convert current "
|
||||
"image to RGB/BGR color format using 'convert_color'");
|
||||
Shape shape;
|
||||
if (values.size() == 1) {
|
||||
shape = Shape{1};
|
||||
} else {
|
||||
shape = construct_mean_scale_shape(nodes[0], values.size(), context);
|
||||
}
|
||||
auto element_type = nodes[0].get_element_type();
|
||||
OPENVINO_ASSERT(
|
||||
element_type.is_real(),
|
||||
"Mean preprocessing can be applied to 'float' inputs. Consider using of 'convert_element_type' "
|
||||
"before scaling. Current type is: ",
|
||||
element_type);
|
||||
|
||||
auto constant = op::v0::Constant::create(element_type, shape, values);
|
||||
auto constant = op::v0::Constant::create(element_type, shape, values);
|
||||
|
||||
auto new_op = std::make_shared<op::v1::Subtract>(nodes[0], constant);
|
||||
set_is_preprocessing_node(new_op);
|
||||
return std::make_tuple(std::vector<Output<Node>>{new_op}, false);
|
||||
});
|
||||
auto new_op = std::make_shared<op::v1::Subtract>(nodes[0], constant);
|
||||
set_is_preprocessing_node(new_op);
|
||||
return std::make_tuple(std::vector<Output<Node>>{new_op}, false);
|
||||
},
|
||||
"mean " + vector_to_string(values));
|
||||
}
|
||||
|
||||
void PreStepsList::add_convert_impl(const element::Type& type) {
|
||||
m_actions.emplace_back([type](const std::vector<Output<Node>>& nodes,
|
||||
const std::shared_ptr<Model>& function,
|
||||
PreprocessingContext& ctxt) {
|
||||
OPENVINO_ASSERT(!nodes.empty(), "Internal error: Can't set element type for empty input.");
|
||||
std::vector<Output<Node>> res;
|
||||
element::Type t = type;
|
||||
if (t == element::Type{}) {
|
||||
t = ctxt.target_element_type();
|
||||
}
|
||||
for (const auto& node : nodes) {
|
||||
OPENVINO_ASSERT(node.get_element_type().is_static(),
|
||||
"Can't insert 'convert_element_type' for dynamic source tensor type.");
|
||||
if (t != node.get_element_type()) {
|
||||
auto convert = std::make_shared<op::v0::Convert>(node, t);
|
||||
res.emplace_back(convert);
|
||||
} else {
|
||||
res.emplace_back(node);
|
||||
m_actions.emplace_back(
|
||||
[type](const std::vector<Output<Node>>& nodes,
|
||||
const std::shared_ptr<Model>& function,
|
||||
PreprocessingContext& ctxt) {
|
||||
OPENVINO_ASSERT(!nodes.empty(), "Internal error: Can't set element type for empty input.");
|
||||
std::vector<Output<Node>> res;
|
||||
element::Type t = type;
|
||||
if (t == element::Type{}) {
|
||||
t = ctxt.target_element_type();
|
||||
}
|
||||
}
|
||||
// return false to avoid excess function revalidations as conversion of types
|
||||
// doesn't require shape or type propagation.
|
||||
return std::make_tuple(res, false);
|
||||
});
|
||||
for (const auto& node : nodes) {
|
||||
OPENVINO_ASSERT(node.get_element_type().is_static(),
|
||||
"Can't insert 'convert_element_type' for dynamic source tensor type.");
|
||||
if (t != node.get_element_type()) {
|
||||
auto convert = std::make_shared<op::v0::Convert>(node, t);
|
||||
res.emplace_back(convert);
|
||||
} else {
|
||||
res.emplace_back(node);
|
||||
}
|
||||
}
|
||||
// return false to avoid excess function revalidations as conversion of types
|
||||
// doesn't require shape or type propagation.
|
||||
return std::make_tuple(res, false);
|
||||
},
|
||||
"convert type (" + type.get_type_name() + ")");
|
||||
}
|
||||
|
||||
void PreStepsList::add_resize_impl(ResizeAlgorithm alg, int dst_height, int dst_width) {
|
||||
using InterpolateMode = op::v4::Interpolate::InterpolateMode;
|
||||
m_actions.emplace_back([alg, dst_width, dst_height](const std::vector<Output<Node>>& nodes,
|
||||
const std::shared_ptr<Model>& function,
|
||||
PreprocessingContext& ctxt) {
|
||||
OPENVINO_ASSERT(!nodes.empty(), "Internal error: Can't add resize for empty input.");
|
||||
OPENVINO_ASSERT(nodes.size() == 1,
|
||||
"Can't resize multi-plane input. Suggesting to convert current image to "
|
||||
"RGB/BGR color format using 'PreProcessSteps::convert_color'");
|
||||
auto to_mode = [](ResizeAlgorithm alg) -> InterpolateMode {
|
||||
switch (alg) {
|
||||
case ResizeAlgorithm::RESIZE_NEAREST:
|
||||
return InterpolateMode::NEAREST;
|
||||
case ResizeAlgorithm::RESIZE_CUBIC:
|
||||
return InterpolateMode::CUBIC;
|
||||
case ResizeAlgorithm::RESIZE_LINEAR:
|
||||
default:
|
||||
return InterpolateMode::LINEAR;
|
||||
std::string name;
|
||||
if (dst_width > 0 && dst_height > 0) {
|
||||
name = "resize to (" + std::to_string(dst_height) + ", " + std::to_string(dst_width) + ")";
|
||||
} else {
|
||||
name = "resize to model width/height";
|
||||
}
|
||||
m_actions.emplace_back(
|
||||
[alg, dst_width, dst_height](const std::vector<Output<Node>>& nodes,
|
||||
const std::shared_ptr<Model>& function,
|
||||
PreprocessingContext& ctxt) {
|
||||
OPENVINO_ASSERT(!nodes.empty(), "Internal error: Can't add resize for empty input.");
|
||||
OPENVINO_ASSERT(nodes.size() == 1,
|
||||
"Can't resize multi-plane input. Suggesting to convert current image to "
|
||||
"RGB/BGR color format using 'PreProcessSteps::convert_color'");
|
||||
auto to_mode = [](ResizeAlgorithm alg) -> InterpolateMode {
|
||||
switch (alg) {
|
||||
case ResizeAlgorithm::RESIZE_NEAREST:
|
||||
return InterpolateMode::NEAREST;
|
||||
case ResizeAlgorithm::RESIZE_CUBIC:
|
||||
return InterpolateMode::CUBIC;
|
||||
case ResizeAlgorithm::RESIZE_LINEAR:
|
||||
default:
|
||||
return InterpolateMode::LINEAR;
|
||||
}
|
||||
};
|
||||
auto node = nodes.front();
|
||||
auto layout = ctxt.layout();
|
||||
OPENVINO_ASSERT(ov::layout::has_height(layout) && ov::layout::has_width(layout),
|
||||
"Can't add resize for layout without W/H specified. Use 'set_layout' API to define layout "
|
||||
"of image data, like `NCHW`");
|
||||
auto node_rank = node.get_partial_shape().rank();
|
||||
OPENVINO_ASSERT(node_rank.is_static(), "Resize operation is not supported for fully dynamic shape");
|
||||
|
||||
auto height_idx = static_cast<int64_t>(get_and_check_height_idx(layout, node.get_partial_shape()));
|
||||
auto width_idx = static_cast<int64_t>(get_and_check_width_idx(layout, node.get_partial_shape()));
|
||||
if (dst_height < 0 || dst_width < 0) {
|
||||
OPENVINO_ASSERT(ctxt.model_shape().rank().is_static(),
|
||||
"Resize is not fully specified while target model shape is dynamic");
|
||||
}
|
||||
};
|
||||
auto node = nodes.front();
|
||||
auto layout = ctxt.layout();
|
||||
OPENVINO_ASSERT(ov::layout::has_height(layout) && ov::layout::has_width(layout),
|
||||
"Can't add resize for layout without W/H specified. Use 'set_layout' API to define layout "
|
||||
"of image data, like `NCHW`");
|
||||
auto node_rank = node.get_partial_shape().rank();
|
||||
OPENVINO_ASSERT(node_rank.is_static(), "Resize operation is not supported for fully dynamic shape");
|
||||
int new_image_width = dst_width < 0 ? static_cast<int>(ctxt.get_model_width_for_resize()) : dst_width;
|
||||
int new_image_height = dst_height < 0 ? static_cast<int>(ctxt.get_model_height_for_resize()) : dst_height;
|
||||
|
||||
auto height_idx = static_cast<int64_t>(get_and_check_height_idx(layout, node.get_partial_shape()));
|
||||
auto width_idx = static_cast<int64_t>(get_and_check_width_idx(layout, node.get_partial_shape()));
|
||||
if (dst_height < 0 || dst_width < 0) {
|
||||
OPENVINO_ASSERT(ctxt.model_shape().rank().is_static(),
|
||||
"Resize is not fully specified while target model shape is dynamic");
|
||||
}
|
||||
int new_image_width = dst_width < 0 ? static_cast<int>(ctxt.get_model_width_for_resize()) : dst_width;
|
||||
int new_image_height = dst_height < 0 ? static_cast<int>(ctxt.get_model_height_for_resize()) : dst_height;
|
||||
auto target_spatial_shape =
|
||||
op::v0::Constant::create<int64_t>(element::i64, Shape{2}, {new_image_height, new_image_width});
|
||||
auto scales = op::v0::Constant::create<float>(element::f32, Shape{2}, {1, 1});
|
||||
// In future consider replacing this to set of new OV operations like `getDimByName(node, "H")`
|
||||
// This is to allow specifying layout on 'evaluation' stage
|
||||
auto axes = op::v0::Constant::create<int64_t>(element::i64, Shape{2}, {height_idx, width_idx});
|
||||
|
||||
auto target_spatial_shape =
|
||||
op::v0::Constant::create<int64_t>(element::i64, Shape{2}, {new_image_height, new_image_width});
|
||||
auto scales = op::v0::Constant::create<float>(element::f32, Shape{2}, {1, 1});
|
||||
// In future consider replacing this to set of new OV operations like `getDimByName(node, "H")`
|
||||
// This is to allow specifying layout on 'evaluation' stage
|
||||
auto axes = op::v0::Constant::create<int64_t>(element::i64, Shape{2}, {height_idx, width_idx});
|
||||
op::v4::Interpolate::InterpolateAttrs attrs(to_mode(alg),
|
||||
op::v4::Interpolate::ShapeCalcMode::SIZES,
|
||||
{0, 0},
|
||||
{0, 0});
|
||||
|
||||
op::v4::Interpolate::InterpolateAttrs attrs(to_mode(alg),
|
||||
op::v4::Interpolate::ShapeCalcMode::SIZES,
|
||||
{0, 0},
|
||||
{0, 0});
|
||||
|
||||
auto interp = std::make_shared<op::v4::Interpolate>(node, target_spatial_shape, scales, axes, attrs);
|
||||
return std::make_tuple(std::vector<Output<Node>>{interp}, true);
|
||||
});
|
||||
auto interp = std::make_shared<op::v4::Interpolate>(node, target_spatial_shape, scales, axes, attrs);
|
||||
return std::make_tuple(std::vector<Output<Node>>{interp}, true);
|
||||
},
|
||||
name);
|
||||
}
|
||||
|
||||
Layout PreStepsList::propagate_layout(const Layout& tensor_layout) const {
|
||||
@ -185,46 +214,50 @@ void PreStepsList::add_convert_layout_impl(const Layout& layout) {
|
||||
m_forward_layout_converts.clear();
|
||||
m_last_explicit_layout = layout;
|
||||
m_last_explicit_layout_set = true;
|
||||
m_actions.emplace_back([layout](const std::vector<Output<Node>>& nodes,
|
||||
const std::shared_ptr<Model>& function,
|
||||
PreprocessingContext& context) {
|
||||
OPENVINO_ASSERT(!nodes.empty(), "Internal error: Can't convert layout for empty input.");
|
||||
OPENVINO_ASSERT(nodes.size() == 1,
|
||||
"Can't convert layout for multi-plane input. Suggesting to convert current image to "
|
||||
"RGB/BGR color format using 'convert_color'");
|
||||
Layout dst_layout = layout.empty() ? context.target_layout() : layout;
|
||||
auto node = nodes[0];
|
||||
auto shape = node.get_partial_shape();
|
||||
size_t add_cnt;
|
||||
Layout unsqueeze_layout;
|
||||
std::tie(shape, unsqueeze_layout, add_cnt) = layout::utils::find_unsqueeze(context.layout(), shape, dst_layout);
|
||||
if (add_cnt) {
|
||||
std::vector<size_t> dims;
|
||||
dims.push_back(add_cnt);
|
||||
Shape const_shape(dims);
|
||||
std::vector<int64_t> vals(add_cnt);
|
||||
for (auto i = 0; i < add_cnt; i++) {
|
||||
vals[i] = i;
|
||||
m_actions.emplace_back(
|
||||
[layout](const std::vector<Output<Node>>& nodes,
|
||||
const std::shared_ptr<Model>& function,
|
||||
PreprocessingContext& context) {
|
||||
OPENVINO_ASSERT(!nodes.empty(), "Internal error: Can't convert layout for empty input.");
|
||||
OPENVINO_ASSERT(nodes.size() == 1,
|
||||
"Can't convert layout for multi-plane input. Suggesting to convert current image to "
|
||||
"RGB/BGR color format using 'convert_color'");
|
||||
Layout dst_layout = layout.empty() ? context.target_layout() : layout;
|
||||
auto node = nodes[0];
|
||||
auto shape = node.get_partial_shape();
|
||||
size_t add_cnt;
|
||||
Layout unsqueeze_layout;
|
||||
std::tie(shape, unsqueeze_layout, add_cnt) =
|
||||
layout::utils::find_unsqueeze(context.layout(), shape, dst_layout);
|
||||
if (add_cnt) {
|
||||
std::vector<size_t> dims;
|
||||
dims.push_back(add_cnt);
|
||||
Shape const_shape(dims);
|
||||
std::vector<int64_t> vals(add_cnt);
|
||||
for (auto i = 0; i < add_cnt; i++) {
|
||||
vals[i] = i;
|
||||
}
|
||||
auto axes = op::v0::Constant::create<int64_t>(element::i64, const_shape, vals);
|
||||
// Add unsqueeze on top
|
||||
node = std::make_shared<opset8::Unsqueeze>(node, axes);
|
||||
}
|
||||
auto axes = op::v0::Constant::create<int64_t>(element::i64, const_shape, vals);
|
||||
// Add unsqueeze on top
|
||||
node = std::make_shared<opset8::Unsqueeze>(node, axes);
|
||||
}
|
||||
auto permutation = layout::utils::find_permutation(unsqueeze_layout, shape, dst_layout);
|
||||
if (permutation.empty()) {
|
||||
// No transpose is needed, just update layout
|
||||
if (!layout.empty()) {
|
||||
context.layout() = layout;
|
||||
auto permutation = layout::utils::find_permutation(unsqueeze_layout, shape, dst_layout);
|
||||
if (permutation.empty()) {
|
||||
// No transpose is needed, just update layout
|
||||
if (!layout.empty()) {
|
||||
context.layout() = layout;
|
||||
}
|
||||
return std::make_tuple(nodes, false);
|
||||
}
|
||||
return std::make_tuple(nodes, false);
|
||||
}
|
||||
auto perm_constant = op::v0::Constant::create<int64_t>(element::i64, Shape{permutation.size()}, permutation);
|
||||
auto transpose = std::make_shared<op::v1::Transpose>(node, perm_constant);
|
||||
context.layout() = dst_layout; // Update context's current layout
|
||||
// return false to avoid excess function revalidations as layout conversion
|
||||
// doesn't require shape or type propagation.
|
||||
return std::make_tuple(std::vector<Output<Node>>{transpose}, false);
|
||||
});
|
||||
auto perm_constant =
|
||||
op::v0::Constant::create<int64_t>(element::i64, Shape{permutation.size()}, permutation);
|
||||
auto transpose = std::make_shared<op::v1::Transpose>(node, perm_constant);
|
||||
context.layout() = dst_layout; // Update context's current layout
|
||||
// return false to avoid excess function revalidations as layout conversion
|
||||
// doesn't require shape or type propagation.
|
||||
return std::make_tuple(std::vector<Output<Node>>{transpose}, false);
|
||||
},
|
||||
"convert layout " + layout.to_string());
|
||||
}
|
||||
|
||||
void PreStepsList::add_convert_layout_impl(const std::vector<uint64_t>& dims) {
|
||||
@ -233,21 +266,23 @@ void PreStepsList::add_convert_layout_impl(const std::vector<uint64_t>& dims) {
|
||||
}
|
||||
m_layout_converts.emplace_front(dims);
|
||||
m_forward_layout_converts.emplace_back(dims);
|
||||
m_actions.emplace_back([dims](const std::vector<Output<Node>>& nodes,
|
||||
const std::shared_ptr<Model>& function,
|
||||
PreprocessingContext& context) {
|
||||
OPENVINO_ASSERT(!nodes.empty(), "Internal error: Can't convert layout for empty input.");
|
||||
OPENVINO_ASSERT(nodes.size() == 1,
|
||||
"Can't convert layout for multi-plane input. Suggesting to convert current image to "
|
||||
"RGB/BGR color format using 'convert_color'");
|
||||
auto new_layout = layout::utils::apply_permutation(context.layout(), dims);
|
||||
auto perm_constant = op::v0::Constant::create<uint64_t>(element::u64, Shape{dims.size()}, dims);
|
||||
auto transpose = std::make_shared<op::v1::Transpose>(nodes[0], perm_constant);
|
||||
context.layout() = std::move(new_layout); // Update context's current layout
|
||||
// return false to avoid excess function revalidations as layout conversion
|
||||
// doesn't require shape or type propagation.
|
||||
return std::make_tuple(std::vector<Output<Node>>{transpose}, false);
|
||||
});
|
||||
m_actions.emplace_back(
|
||||
[dims](const std::vector<Output<Node>>& nodes,
|
||||
const std::shared_ptr<Model>& function,
|
||||
PreprocessingContext& context) {
|
||||
OPENVINO_ASSERT(!nodes.empty(), "Internal error: Can't convert layout for empty input.");
|
||||
OPENVINO_ASSERT(nodes.size() == 1,
|
||||
"Can't convert layout for multi-plane input. Suggesting to convert current image to "
|
||||
"RGB/BGR color format using 'convert_color'");
|
||||
auto new_layout = layout::utils::apply_permutation(context.layout(), dims);
|
||||
auto perm_constant = op::v0::Constant::create<uint64_t>(element::u64, Shape{dims.size()}, dims);
|
||||
auto transpose = std::make_shared<op::v1::Transpose>(nodes[0], perm_constant);
|
||||
context.layout() = std::move(new_layout); // Update context's current layout
|
||||
// return false to avoid excess function revalidations as layout conversion
|
||||
// doesn't require shape or type propagation.
|
||||
return std::make_tuple(std::vector<Output<Node>>{transpose}, false);
|
||||
},
|
||||
"convert layout " + vector_to_string(dims));
|
||||
}
|
||||
|
||||
std::tuple<PartialShape, Layout> PreStepsList::calculate_param_shape(const PartialShape& model_shape,
|
||||
@ -276,134 +311,144 @@ std::tuple<PartialShape, Layout> PreStepsList::calculate_param_shape(const Parti
|
||||
}
|
||||
|
||||
void PreStepsList::add_convert_color_impl(const ColorFormat& dst_format) {
|
||||
m_actions.emplace_back([dst_format](const std::vector<Output<Node>>& nodes,
|
||||
const std::shared_ptr<Model>& function,
|
||||
PreprocessingContext& context) {
|
||||
if (context.color_format() == dst_format) {
|
||||
return std::make_tuple(nodes, false);
|
||||
}
|
||||
if (context.color_format() == ColorFormat::NV12_SINGLE_PLANE) {
|
||||
OPENVINO_ASSERT(nodes.size() == 1, "Internal error: single plane NV12 image can't have multiple inputs");
|
||||
std::shared_ptr<Node> convert;
|
||||
switch (dst_format) {
|
||||
case ColorFormat::RGB:
|
||||
convert = std::make_shared<op::v8::NV12toRGB>(nodes[0]);
|
||||
break;
|
||||
case ColorFormat::BGR:
|
||||
convert = std::make_shared<op::v8::NV12toBGR>(nodes[0]);
|
||||
break;
|
||||
default:
|
||||
OPENVINO_ASSERT(false,
|
||||
"Unsupported conversion from NV12 to '",
|
||||
color_format_name(dst_format),
|
||||
"' format:");
|
||||
m_actions.emplace_back(
|
||||
[dst_format](const std::vector<Output<Node>>& nodes,
|
||||
const std::shared_ptr<Model>& function,
|
||||
PreprocessingContext& context) {
|
||||
if (context.color_format() == dst_format) {
|
||||
return std::make_tuple(nodes, false);
|
||||
}
|
||||
context.color_format() = dst_format;
|
||||
return std::make_tuple(std::vector<Output<Node>>{convert}, true);
|
||||
} else if (context.color_format() == ColorFormat::NV12_TWO_PLANES) {
|
||||
OPENVINO_ASSERT(nodes.size() == 2, "Internal error: two-plane NV12 image must have exactly two inputs");
|
||||
std::shared_ptr<Node> convert;
|
||||
switch (dst_format) {
|
||||
case ColorFormat::RGB:
|
||||
convert = std::make_shared<op::v8::NV12toRGB>(nodes[0], nodes[1]);
|
||||
break;
|
||||
case ColorFormat::BGR:
|
||||
convert = std::make_shared<op::v8::NV12toBGR>(nodes[0], nodes[1]);
|
||||
break;
|
||||
default:
|
||||
OPENVINO_ASSERT(false,
|
||||
"Unsupported conversion from NV12 to '",
|
||||
color_format_name(dst_format),
|
||||
"' format:");
|
||||
if (context.color_format() == ColorFormat::NV12_SINGLE_PLANE) {
|
||||
OPENVINO_ASSERT(nodes.size() == 1,
|
||||
"Internal error: single plane NV12 image can't have multiple inputs");
|
||||
std::shared_ptr<Node> convert;
|
||||
switch (dst_format) {
|
||||
case ColorFormat::RGB:
|
||||
convert = std::make_shared<op::v8::NV12toRGB>(nodes[0]);
|
||||
break;
|
||||
case ColorFormat::BGR:
|
||||
convert = std::make_shared<op::v8::NV12toBGR>(nodes[0]);
|
||||
break;
|
||||
default:
|
||||
OPENVINO_ASSERT(false,
|
||||
"Unsupported conversion from NV12 to '",
|
||||
color_format_name(dst_format),
|
||||
"' format:");
|
||||
}
|
||||
context.color_format() = dst_format;
|
||||
return std::make_tuple(std::vector<Output<Node>>{convert}, true);
|
||||
} else if (context.color_format() == ColorFormat::NV12_TWO_PLANES) {
|
||||
OPENVINO_ASSERT(nodes.size() == 2, "Internal error: two-plane NV12 image must have exactly two inputs");
|
||||
std::shared_ptr<Node> convert;
|
||||
switch (dst_format) {
|
||||
case ColorFormat::RGB:
|
||||
convert = std::make_shared<op::v8::NV12toRGB>(nodes[0], nodes[1]);
|
||||
break;
|
||||
case ColorFormat::BGR:
|
||||
convert = std::make_shared<op::v8::NV12toBGR>(nodes[0], nodes[1]);
|
||||
break;
|
||||
default:
|
||||
OPENVINO_ASSERT(false,
|
||||
"Unsupported conversion from NV12 to '",
|
||||
color_format_name(dst_format),
|
||||
"' format:");
|
||||
}
|
||||
context.color_format() = dst_format;
|
||||
return std::make_tuple(std::vector<Output<Node>>{convert}, true);
|
||||
} else if (context.color_format() == ColorFormat::I420_SINGLE_PLANE) {
|
||||
OPENVINO_ASSERT(nodes.size() == 1,
|
||||
"Internal error: single plane I420 image can't have multiple inputs");
|
||||
std::shared_ptr<Node> convert;
|
||||
switch (dst_format) {
|
||||
case ColorFormat::RGB:
|
||||
convert = std::make_shared<op::v8::I420toRGB>(nodes[0]);
|
||||
break;
|
||||
case ColorFormat::BGR:
|
||||
convert = std::make_shared<op::v8::I420toBGR>(nodes[0]);
|
||||
break;
|
||||
default:
|
||||
OPENVINO_ASSERT(false,
|
||||
"Unsupported conversion from I420 to '",
|
||||
color_format_name(dst_format),
|
||||
"' format:");
|
||||
}
|
||||
context.color_format() = dst_format;
|
||||
return std::make_tuple(std::vector<Output<Node>>{convert}, true);
|
||||
} else if (context.color_format() == ColorFormat::I420_THREE_PLANES) {
|
||||
OPENVINO_ASSERT(nodes.size() == 3,
|
||||
"Internal error: three-plane I420 image must have exactly three inputs");
|
||||
std::shared_ptr<Node> convert;
|
||||
switch (dst_format) {
|
||||
case ColorFormat::RGB:
|
||||
convert = std::make_shared<op::v8::I420toRGB>(nodes[0], nodes[1], nodes[2]);
|
||||
break;
|
||||
case ColorFormat::BGR:
|
||||
convert = std::make_shared<op::v8::I420toBGR>(nodes[0], nodes[1], nodes[2]);
|
||||
break;
|
||||
default:
|
||||
OPENVINO_ASSERT(false,
|
||||
"Unsupported conversion from I420 to '",
|
||||
color_format_name(dst_format),
|
||||
"' format:");
|
||||
}
|
||||
context.color_format() = dst_format;
|
||||
return std::make_tuple(std::vector<Output<Node>>{convert}, true);
|
||||
}
|
||||
context.color_format() = dst_format;
|
||||
return std::make_tuple(std::vector<Output<Node>>{convert}, true);
|
||||
} else if (context.color_format() == ColorFormat::I420_SINGLE_PLANE) {
|
||||
OPENVINO_ASSERT(nodes.size() == 1, "Internal error: single plane I420 image can't have multiple inputs");
|
||||
std::shared_ptr<Node> convert;
|
||||
switch (dst_format) {
|
||||
case ColorFormat::RGB:
|
||||
convert = std::make_shared<op::v8::I420toRGB>(nodes[0]);
|
||||
break;
|
||||
case ColorFormat::BGR:
|
||||
convert = std::make_shared<op::v8::I420toBGR>(nodes[0]);
|
||||
break;
|
||||
default:
|
||||
OPENVINO_ASSERT(false,
|
||||
"Unsupported conversion from I420 to '",
|
||||
color_format_name(dst_format),
|
||||
"' format:");
|
||||
}
|
||||
context.color_format() = dst_format;
|
||||
return std::make_tuple(std::vector<Output<Node>>{convert}, true);
|
||||
} else if (context.color_format() == ColorFormat::I420_THREE_PLANES) {
|
||||
OPENVINO_ASSERT(nodes.size() == 3, "Internal error: three-plane I420 image must have exactly three inputs");
|
||||
std::shared_ptr<Node> convert;
|
||||
switch (dst_format) {
|
||||
case ColorFormat::RGB:
|
||||
convert = std::make_shared<op::v8::I420toRGB>(nodes[0], nodes[1], nodes[2]);
|
||||
break;
|
||||
case ColorFormat::BGR:
|
||||
convert = std::make_shared<op::v8::I420toBGR>(nodes[0], nodes[1], nodes[2]);
|
||||
break;
|
||||
default:
|
||||
OPENVINO_ASSERT(false,
|
||||
"Unsupported conversion from I420 to '",
|
||||
color_format_name(dst_format),
|
||||
"' format:");
|
||||
}
|
||||
context.color_format() = dst_format;
|
||||
return std::make_tuple(std::vector<Output<Node>>{convert}, true);
|
||||
}
|
||||
if ((context.color_format() == ColorFormat::RGB || context.color_format() == ColorFormat::BGR) &&
|
||||
(dst_format == ColorFormat::RGB || dst_format == ColorFormat::BGR)) {
|
||||
auto res = reverse_channels(nodes, function, context);
|
||||
context.color_format() = dst_format;
|
||||
return res;
|
||||
}
|
||||
if (context.color_format() == ColorFormat::RGBX) {
|
||||
if (dst_format == ColorFormat::RGB) {
|
||||
auto res = cut_last_channel(nodes, function, context);
|
||||
if ((context.color_format() == ColorFormat::RGB || context.color_format() == ColorFormat::BGR) &&
|
||||
(dst_format == ColorFormat::RGB || dst_format == ColorFormat::BGR)) {
|
||||
auto res = reverse_channels(nodes, function, context);
|
||||
context.color_format() = dst_format;
|
||||
return res;
|
||||
} else if (dst_format == ColorFormat::BGR) {
|
||||
auto cut = cut_last_channel(nodes, function, context);
|
||||
auto reverse = reverse_channels(std::get<0>(cut), function, context);
|
||||
bool updated = std::get<1>(cut) | std::get<1>(reverse);
|
||||
context.color_format() = dst_format;
|
||||
return std::make_tuple(std::get<0>(reverse), updated);
|
||||
}
|
||||
}
|
||||
if (context.color_format() == ColorFormat::BGRX) {
|
||||
if (dst_format == ColorFormat::BGR) {
|
||||
auto res = cut_last_channel(nodes, function, context);
|
||||
context.color_format() = dst_format;
|
||||
return res;
|
||||
} else if (dst_format == ColorFormat::RGB) {
|
||||
auto cut = cut_last_channel(nodes, function, context);
|
||||
auto reverse = reverse_channels(std::get<0>(cut), function, context);
|
||||
bool updated = std::get<1>(cut) | std::get<1>(reverse);
|
||||
context.color_format() = dst_format;
|
||||
return std::make_tuple(std::get<0>(reverse), updated);
|
||||
if (context.color_format() == ColorFormat::RGBX) {
|
||||
if (dst_format == ColorFormat::RGB) {
|
||||
auto res = cut_last_channel(nodes, function, context);
|
||||
context.color_format() = dst_format;
|
||||
return res;
|
||||
} else if (dst_format == ColorFormat::BGR) {
|
||||
auto cut = cut_last_channel(nodes, function, context);
|
||||
auto reverse = reverse_channels(std::get<0>(cut), function, context);
|
||||
bool updated = std::get<1>(cut) | std::get<1>(reverse);
|
||||
context.color_format() = dst_format;
|
||||
return std::make_tuple(std::get<0>(reverse), updated);
|
||||
}
|
||||
}
|
||||
}
|
||||
OPENVINO_ASSERT(false,
|
||||
"Source color format '",
|
||||
color_format_name(context.color_format()),
|
||||
"' is not convertible to any other");
|
||||
});
|
||||
if (context.color_format() == ColorFormat::BGRX) {
|
||||
if (dst_format == ColorFormat::BGR) {
|
||||
auto res = cut_last_channel(nodes, function, context);
|
||||
context.color_format() = dst_format;
|
||||
return res;
|
||||
} else if (dst_format == ColorFormat::RGB) {
|
||||
auto cut = cut_last_channel(nodes, function, context);
|
||||
auto reverse = reverse_channels(std::get<0>(cut), function, context);
|
||||
bool updated = std::get<1>(cut) | std::get<1>(reverse);
|
||||
context.color_format() = dst_format;
|
||||
return std::make_tuple(std::get<0>(reverse), updated);
|
||||
}
|
||||
}
|
||||
OPENVINO_ASSERT(false,
|
||||
"Source color format '",
|
||||
color_format_name(context.color_format()),
|
||||
"' is not convertible to '",
|
||||
color_format_name(dst_format),
|
||||
"'");
|
||||
},
|
||||
"convert color (" + color_format_name(dst_format) + ")");
|
||||
}
|
||||
|
||||
void PreStepsList::add_reverse_channels() {
|
||||
m_actions.emplace_back([](const std::vector<Output<Node>>& nodes,
|
||||
const std::shared_ptr<Model>& function,
|
||||
PreprocessingContext& context) {
|
||||
auto resp = reverse_channels(nodes, function, context);
|
||||
auto outputs = std::get<0>(resp);
|
||||
OPENVINO_ASSERT(outputs.size() == 1, "Internal error: reverse_channels returned unexpected number of outputs");
|
||||
set_is_preprocessing_node(outputs.at(0).get_node_shared_ptr());
|
||||
return resp;
|
||||
});
|
||||
m_actions.emplace_back(
|
||||
[](const std::vector<Output<Node>>& nodes,
|
||||
const std::shared_ptr<Model>& function,
|
||||
PreprocessingContext& context) {
|
||||
auto resp = reverse_channels(nodes, function, context);
|
||||
auto outputs = std::get<0>(resp);
|
||||
OPENVINO_ASSERT(outputs.size() == 1,
|
||||
"Internal error: reverse_channels returned unexpected number of outputs");
|
||||
set_is_preprocessing_node(outputs.at(0).get_node_shared_ptr());
|
||||
return resp;
|
||||
},
|
||||
"reverse channels");
|
||||
}
|
||||
|
||||
std::tuple<std::vector<Output<Node>>, bool> PreStepsList::reverse_channels(const std::vector<Output<Node>>& nodes,
|
||||
@ -477,52 +522,59 @@ std::tuple<std::vector<Output<Node>>, bool> PreStepsList::cut_last_channel(const
|
||||
|
||||
//------------- Post processing ------
|
||||
void PostStepsList::add_convert_impl(const element::Type& type) {
|
||||
m_actions.emplace_back([type](const Output<Node>& node, PostprocessingContext& ctxt) {
|
||||
element::Type t = type;
|
||||
if (t == element::Type{}) {
|
||||
t = ctxt.target_element_type();
|
||||
}
|
||||
if (t == node.get_element_type()) {
|
||||
return std::make_tuple(node, false);
|
||||
}
|
||||
OPENVINO_ASSERT(
|
||||
!t.is_dynamic() && t != element::undefined,
|
||||
"Can't convert to dynamic/unknown element type, consider using of InputTensorInfo::set_element_type");
|
||||
auto convert = std::make_shared<op::v0::Convert>(node, t);
|
||||
return std::make_tuple(Output<Node>(convert), true);
|
||||
});
|
||||
m_actions.emplace_back(
|
||||
[type](const Output<Node>& node, PostprocessingContext& ctxt) {
|
||||
element::Type t = type;
|
||||
if (t == element::Type{}) {
|
||||
t = ctxt.target_element_type();
|
||||
}
|
||||
if (t == node.get_element_type()) {
|
||||
return std::make_tuple(node, false);
|
||||
}
|
||||
OPENVINO_ASSERT(
|
||||
!t.is_dynamic() && t != element::undefined,
|
||||
"Can't convert to dynamic/unknown element type, consider using of InputTensorInfo::set_element_type");
|
||||
auto convert = std::make_shared<op::v0::Convert>(node, t);
|
||||
return std::make_tuple(Output<Node>(convert), true);
|
||||
},
|
||||
"convert type (" + type.get_type_name() + ")");
|
||||
}
|
||||
|
||||
void PostStepsList::add_convert_layout_impl(const Layout& layout) {
|
||||
m_actions.emplace_back([layout](const Output<Node>& node, PostprocessingContext& context) {
|
||||
Layout dst_layout = layout.empty() ? context.target_layout() : layout;
|
||||
auto permutation = layout::utils::find_permutation(context.layout(), node.get_partial_shape(), dst_layout);
|
||||
if (permutation.empty()) {
|
||||
// No transpose is needed, just update layout
|
||||
if (!layout.empty()) {
|
||||
context.layout() = layout;
|
||||
m_actions.emplace_back(
|
||||
[layout](const Output<Node>& node, PostprocessingContext& context) {
|
||||
Layout dst_layout = layout.empty() ? context.target_layout() : layout;
|
||||
auto permutation = layout::utils::find_permutation(context.layout(), node.get_partial_shape(), dst_layout);
|
||||
if (permutation.empty()) {
|
||||
// No transpose is needed, just update layout
|
||||
if (!layout.empty()) {
|
||||
context.layout() = layout;
|
||||
}
|
||||
return std::make_tuple(node, false);
|
||||
}
|
||||
return std::make_tuple(node, false);
|
||||
}
|
||||
auto perm_constant = op::v0::Constant::create<int64_t>(element::i64, Shape{permutation.size()}, permutation);
|
||||
auto transpose = std::make_shared<op::v1::Transpose>(node, perm_constant);
|
||||
context.layout() = dst_layout; // Update context's current layout
|
||||
return std::make_tuple(Output<Node>(transpose), true);
|
||||
});
|
||||
auto perm_constant =
|
||||
op::v0::Constant::create<int64_t>(element::i64, Shape{permutation.size()}, permutation);
|
||||
auto transpose = std::make_shared<op::v1::Transpose>(node, perm_constant);
|
||||
context.layout() = dst_layout; // Update context's current layout
|
||||
return std::make_tuple(Output<Node>(transpose), true);
|
||||
},
|
||||
"convert layout " + layout.to_string());
|
||||
}
|
||||
|
||||
void PostStepsList::add_convert_layout_impl(const std::vector<uint64_t>& dims) {
|
||||
if (dims.empty()) {
|
||||
return;
|
||||
}
|
||||
m_actions.emplace_back([dims](const Output<Node>& node, PostprocessingContext& context) {
|
||||
auto perm_constant = op::v0::Constant::create<uint64_t>(element::u64, Shape{dims.size()}, dims);
|
||||
auto new_layout = layout::utils::apply_permutation(context.layout(), dims);
|
||||
auto transpose = std::make_shared<op::v1::Transpose>(node, perm_constant);
|
||||
auto res = std::make_tuple(Output<Node>(transpose), true);
|
||||
context.layout() = std::move(new_layout); // Update context's current layout
|
||||
return res;
|
||||
});
|
||||
m_actions.emplace_back(
|
||||
[dims](const Output<Node>& node, PostprocessingContext& context) {
|
||||
auto perm_constant = op::v0::Constant::create<uint64_t>(element::u64, Shape{dims.size()}, dims);
|
||||
auto new_layout = layout::utils::apply_permutation(context.layout(), dims);
|
||||
auto transpose = std::make_shared<op::v1::Transpose>(node, perm_constant);
|
||||
auto res = std::make_tuple(Output<Node>(transpose), true);
|
||||
context.layout() = std::move(new_layout); // Update context's current layout
|
||||
return res;
|
||||
},
|
||||
"convert layout " + vector_to_string(dims));
|
||||
}
|
||||
} // namespace preprocess
|
||||
} // namespace ov
|
||||
|
@ -146,6 +146,14 @@ using InternalPreprocessOp =
|
||||
const std::shared_ptr<Model>& function,
|
||||
PreprocessingContext& context)>;
|
||||
|
||||
struct InternalPreprocessAction {
|
||||
InternalPreprocessAction(InternalPreprocessOp op, std::string name)
|
||||
: m_op(std::move(op)),
|
||||
m_name(std::move(name)) {}
|
||||
InternalPreprocessOp m_op;
|
||||
std::string m_name;
|
||||
};
|
||||
|
||||
/// \brief PreProcessStepsImpl - internal data structure
|
||||
class PreStepsList {
|
||||
public:
|
||||
@ -160,10 +168,10 @@ public:
|
||||
std::tuple<PartialShape, Layout> calculate_param_shape(const PartialShape& model_shape,
|
||||
const Layout& model_layout) const;
|
||||
|
||||
const std::list<InternalPreprocessOp>& actions() const {
|
||||
const std::list<InternalPreprocessAction>& actions() const {
|
||||
return m_actions;
|
||||
}
|
||||
std::list<InternalPreprocessOp>& actions() {
|
||||
std::list<InternalPreprocessAction>& actions() {
|
||||
return m_actions;
|
||||
}
|
||||
|
||||
@ -179,7 +187,7 @@ private:
|
||||
PreprocessingContext& context);
|
||||
|
||||
private:
|
||||
std::list<InternalPreprocessOp> m_actions;
|
||||
std::list<InternalPreprocessAction> m_actions;
|
||||
std::list<std::vector<uint64_t>> m_layout_converts;
|
||||
std::list<std::vector<uint64_t>> m_forward_layout_converts;
|
||||
Layout m_last_explicit_layout;
|
||||
@ -197,6 +205,14 @@ public:
|
||||
using InternalPostprocessOp = std::function<std::tuple<ov::Output<ov::Node>, bool>(const ov::Output<ov::Node>& node,
|
||||
PostprocessingContext& context)>;
|
||||
|
||||
struct InternalPostprocessAction {
|
||||
InternalPostprocessAction(InternalPostprocessOp op, std::string name)
|
||||
: m_op(std::move(op)),
|
||||
m_name(std::move(name)) {}
|
||||
InternalPostprocessOp m_op;
|
||||
std::string m_name;
|
||||
};
|
||||
|
||||
/// \brief PostProcessStepsImpl - internal data structure
|
||||
class PostStepsList {
|
||||
public:
|
||||
@ -204,15 +220,15 @@ public:
|
||||
void add_convert_layout_impl(const Layout& layout);
|
||||
void add_convert_layout_impl(const std::vector<uint64_t>& dims);
|
||||
|
||||
const std::list<InternalPostprocessOp>& actions() const {
|
||||
const std::list<InternalPostprocessAction>& actions() const {
|
||||
return m_actions;
|
||||
}
|
||||
std::list<InternalPostprocessOp>& actions() {
|
||||
std::list<InternalPostprocessAction>& actions() {
|
||||
return m_actions;
|
||||
}
|
||||
|
||||
private:
|
||||
std::list<InternalPostprocessOp> m_actions;
|
||||
std::list<InternalPostprocessAction> m_actions;
|
||||
};
|
||||
|
||||
class PostProcessSteps::PostProcessStepsImpl : public PostStepsList {};
|
||||
|
@ -24,6 +24,16 @@ static std::shared_ptr<Model> create_simple_function(element::Type type, const P
|
||||
return std::make_shared<Model>(ResultVector{res}, ParameterVector{data1});
|
||||
}
|
||||
|
||||
static std::shared_ptr<Model> create_trivial(element::Type type, const PartialShape& shape) {
|
||||
auto data1 = std::make_shared<op::v0::Parameter>(type, shape);
|
||||
data1->set_friendly_name("input1");
|
||||
data1->get_output_tensor(0).set_names({"tensor_input1"});
|
||||
auto res = std::make_shared<op::v0::Result>(data1);
|
||||
res->set_friendly_name("Result1");
|
||||
res->get_output_tensor(0).set_names({"tensor_output1"});
|
||||
return std::make_shared<Model>(ResultVector{res}, ParameterVector{data1});
|
||||
}
|
||||
|
||||
template <int N>
|
||||
static std::shared_ptr<Model> create_n_inputs(element::Type type, const PartialShape& shape) {
|
||||
ResultVector res;
|
||||
@ -1694,3 +1704,158 @@ TEST(pre_post_process, exception_safety) {
|
||||
EXPECT_EQ(f->output(1).get_node_shared_ptr()->get_friendly_name(), out_name1);
|
||||
EXPECT_EQ(f->output(1).get_tensor().get_names(), out_tensor_names1);
|
||||
}
|
||||
|
||||
TEST(pre_post_process, layout_on_trivial) {
|
||||
auto f = create_trivial(element::f32, Shape{1, 440});
|
||||
auto p = PrePostProcessor(f);
|
||||
p.input().tensor().set_layout("NC").set_element_type(element::f32);
|
||||
p.input().model().set_layout("NC");
|
||||
p.output().tensor().set_element_type(element::f32);
|
||||
EXPECT_EQ(f->input().get_partial_shape(), (PartialShape{1, 440}));
|
||||
f = p.build();
|
||||
EXPECT_EQ(layout::get_layout(f->input()), "NC") << layout::get_layout(f->input()).to_string();
|
||||
EXPECT_EQ(f->input().get_partial_shape(), (PartialShape{1, 440}));
|
||||
ov::set_batch(f, 2);
|
||||
EXPECT_EQ(f->input().get_partial_shape(), (PartialShape{2, 440}));
|
||||
}
|
||||
|
||||
TEST(pre_post_process, dump_empty) {
|
||||
auto f = create_simple_function(element::f32, Shape{1, 3, 2, 2});
|
||||
auto p = PrePostProcessor(f);
|
||||
std::stringstream str;
|
||||
str << p;
|
||||
EXPECT_EQ(str.str(), std::string());
|
||||
}
|
||||
|
||||
TEST(pre_post_process, dump_non_empty) {
|
||||
auto f = create_simple_function(element::f32, Shape{1, 3, 2, 2});
|
||||
auto p = PrePostProcessor(f);
|
||||
p.input().tensor().set_memory_type("some_memory_type");
|
||||
std::stringstream str;
|
||||
str << p;
|
||||
EXPECT_NE(str.str(), std::string());
|
||||
}
|
||||
|
||||
TEST(pre_post_process, dump_preprocess) {
|
||||
auto shape = PartialShape{1, 3, 2, 2};
|
||||
std::stringstream shape_stream;
|
||||
shape_stream << shape;
|
||||
auto shape_str = shape_stream.str();
|
||||
auto f = create_simple_function(element::f32, shape);
|
||||
auto p = PrePostProcessor(f);
|
||||
p.input()
|
||||
.tensor()
|
||||
.set_element_type(element::u8)
|
||||
.set_layout("NHWC")
|
||||
.set_spatial_dynamic_shape()
|
||||
.set_memory_type("test_memory_type");
|
||||
p.input()
|
||||
.preprocess()
|
||||
.convert_element_type(element::f32)
|
||||
.mean(1.f)
|
||||
.scale(2.f)
|
||||
.convert_layout({3, 2, 1, 0})
|
||||
.resize(ResizeAlgorithm::RESIZE_LINEAR, 480, 640)
|
||||
.resize(ResizeAlgorithm::RESIZE_LINEAR)
|
||||
.custom([](const Output<Node>& node) {
|
||||
return node;
|
||||
});
|
||||
p.input().model().set_layout("NCHW");
|
||||
std::stringstream stream;
|
||||
stream << p;
|
||||
auto dump = stream.str();
|
||||
EXPECT_TRUE(dump.find("Input") != std::string::npos) << dump;
|
||||
EXPECT_TRUE(dump.find("input1") != std::string::npos) << dump;
|
||||
EXPECT_TRUE(dump.find("memory type=test_memory_type") != std::string::npos) << dump;
|
||||
EXPECT_TRUE(dump.find("Pre-processing steps (7):") != std::string::npos) << dump;
|
||||
EXPECT_TRUE(dump.find("mean") != std::string::npos) << dump;
|
||||
EXPECT_TRUE(dump.find("scale") != std::string::npos) << dump;
|
||||
EXPECT_TRUE(dump.find("convert type") != std::string::npos) << dump;
|
||||
EXPECT_TRUE(dump.find("convert layout (3,2,1,0):") != std::string::npos) << dump;
|
||||
EXPECT_TRUE(dump.find("resize to (480, 640):") != std::string::npos) << dump;
|
||||
EXPECT_TRUE(dump.find("resize to model width/height:") != std::string::npos) << dump;
|
||||
EXPECT_TRUE(dump.find("custom:") != std::string::npos) << dump;
|
||||
EXPECT_TRUE(dump.find("Implicit pre-processing steps (1):") != std::string::npos) << dump;
|
||||
EXPECT_TRUE(dump.find("convert layout") != std::string::npos) << dump;
|
||||
EXPECT_TRUE(dump.find("Model's expected tensor: " + shape_str + ", " + Layout("NCHW").to_string()) !=
|
||||
std::string::npos)
|
||||
<< dump;
|
||||
EXPECT_TRUE(dump.find("output1") == std::string::npos) << dump;
|
||||
}
|
||||
|
||||
TEST(pre_post_process, dump_preprocess_multiplane) {
|
||||
auto shape_to_string = [](const PartialShape& shape) {
|
||||
std::stringstream shape_stream;
|
||||
shape_stream << shape;
|
||||
return shape_stream.str();
|
||||
};
|
||||
auto shape = PartialShape{1, 3, 20, 20};
|
||||
auto shape_str = shape_to_string(shape);
|
||||
auto f = create_simple_function(element::f32, shape);
|
||||
auto p = PrePostProcessor(f);
|
||||
p.input().tensor().set_element_type(element::u8).set_color_format(ColorFormat::NV12_TWO_PLANES);
|
||||
p.input().preprocess().convert_element_type(element::f32).convert_color(ColorFormat::RGB);
|
||||
p.input().model().set_layout("NCHW");
|
||||
std::stringstream stream;
|
||||
stream << p;
|
||||
auto dump = stream.str();
|
||||
EXPECT_TRUE(dump.find("Input") != std::string::npos) << dump;
|
||||
EXPECT_TRUE(dump.find("input1") != std::string::npos) << dump;
|
||||
EXPECT_TRUE(dump.find("memory type=") == std::string::npos) << dump;
|
||||
EXPECT_TRUE(dump.find("NV12") != std::string::npos) << dump;
|
||||
EXPECT_TRUE(dump.find("RGB") != std::string::npos) << dump;
|
||||
EXPECT_TRUE(dump.find("Implicit pre-processing steps (1):") != std::string::npos) << dump;
|
||||
EXPECT_TRUE(dump.find("Pre-processing steps (2):") != std::string::npos) << dump;
|
||||
EXPECT_TRUE(dump.find(shape_to_string(PartialShape{1, 20, 20, 1})) != std::string::npos) << dump;
|
||||
EXPECT_TRUE(dump.find(shape_to_string(PartialShape{1, 10, 10, 2})) != std::string::npos) << dump;
|
||||
EXPECT_TRUE(dump.find("convert type") != std::string::npos) << dump;
|
||||
EXPECT_TRUE(dump.find("convert color") != std::string::npos) << dump;
|
||||
EXPECT_TRUE(dump.find("convert layout") != std::string::npos) << dump;
|
||||
EXPECT_TRUE(dump.find("Model's expected tensor: " + shape_str + ", " + Layout("NCHW").to_string()) !=
|
||||
std::string::npos)
|
||||
<< dump;
|
||||
EXPECT_TRUE(dump.find("output1") == std::string::npos) << dump;
|
||||
}
|
||||
|
||||
TEST(pre_post_process, dump_postprocess) {
|
||||
auto shape = PartialShape{1, 3, 2, 2};
|
||||
std::stringstream shape_stream;
|
||||
shape_stream << shape;
|
||||
auto shape_str = shape_stream.str();
|
||||
auto f = create_simple_function(element::f32, shape);
|
||||
auto p = PrePostProcessor(f);
|
||||
p.output().model().set_layout("NCHW");
|
||||
p.output()
|
||||
.postprocess()
|
||||
.convert_element_type(element::i32)
|
||||
.convert_layout({3, 2, 1, 0})
|
||||
.custom([](const Output<Node>& node) {
|
||||
return node;
|
||||
});
|
||||
p.output().tensor().set_element_type(element::u8).set_layout("NHWC");
|
||||
std::stringstream stream;
|
||||
stream << p;
|
||||
auto dump = stream.str();
|
||||
EXPECT_TRUE(dump.find("Output") != std::string::npos) << dump;
|
||||
EXPECT_TRUE(dump.find("output1") != std::string::npos) << dump;
|
||||
EXPECT_TRUE(dump.find("Post-processing steps (3):") != std::string::npos) << dump;
|
||||
EXPECT_TRUE(dump.find("Post-processing implicit steps (2):") != std::string::npos) << dump;
|
||||
EXPECT_TRUE(dump.find("convert type") != std::string::npos) << dump;
|
||||
EXPECT_TRUE(dump.find("convert layout (3,2,1,0):") != std::string::npos) << dump;
|
||||
EXPECT_TRUE(dump.find("convert layout " + Layout("NHWC").to_string()) != std::string::npos) << dump;
|
||||
EXPECT_TRUE(dump.find("custom:") != std::string::npos) << dump;
|
||||
EXPECT_TRUE(dump.find("Model's data tensor: " + shape_str + ", " + Layout("NCHW").to_string()) != std::string::npos)
|
||||
<< dump;
|
||||
EXPECT_TRUE(dump.find("input1") == std::string::npos) << dump;
|
||||
}
|
||||
|
||||
TEST(pre_post_process, dump_error) {
|
||||
auto f = create_simple_function(element::f32, Shape{2, 2});
|
||||
auto p = PrePostProcessor(f);
|
||||
p.input().tensor().set_layout("NC");
|
||||
p.input().model().set_layout("HW");
|
||||
std::stringstream stream;
|
||||
stream << p;
|
||||
auto dump = stream.str();
|
||||
EXPECT_TRUE(dump.find("Error occurred:") != std::string::npos) << dump;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user