[PYTHON API] fix hash operator for ports (#9673)
* Fix get_node call * Add operators and define hash * rename infer request property * add new line * remove unused var * Move tensor getters to InferRequestWrapper * check node hash * add new line * fix samples Co-authored-by: Anastasia Kuporosova <anastasia.kuporosova@intel.com>
This commit is contained in:
@@ -28,7 +28,7 @@ def infer_data(data: Dict[str, np.ndarray], infer_request: InferRequest, cw_l: i
|
||||
batch_size = infer_request.get_input_tensor(0).shape[0]
|
||||
num_of_frames = next(iter(data.values())).shape[0]
|
||||
|
||||
for output in infer_request.outputs:
|
||||
for output in infer_request.model_outputs:
|
||||
result[output.any_name] = np.ndarray((num_of_frames, np.prod(tuple(output.shape)[1:])))
|
||||
|
||||
for i in range(-cw_l, num_of_frames + cw_r, batch_size):
|
||||
@@ -39,7 +39,7 @@ def infer_data(data: Dict[str, np.ndarray], infer_request: InferRequest, cw_l: i
|
||||
else:
|
||||
index = i
|
||||
|
||||
for _input in infer_request.inputs:
|
||||
for _input in infer_request.model_inputs:
|
||||
frames_to_infer[_input.any_name] = data[_input.any_name][index:index + batch_size]
|
||||
num_of_frames_to_infer = len(frames_to_infer[_input.any_name])
|
||||
|
||||
|
||||
@@ -44,11 +44,14 @@ def normalize_inputs(inputs: Union[dict, list], py_types: dict) -> dict:
|
||||
def get_input_types(obj: Union[InferRequestBase, CompiledModelBase]) -> dict:
|
||||
"""Map all tensor names of all inputs to the data types of those tensors."""
|
||||
|
||||
def get_inputs(obj: Union[InferRequestBase, CompiledModelBase]) -> list:
|
||||
return obj.model_inputs if isinstance(obj, InferRequestBase) else obj.inputs
|
||||
|
||||
def map_tensor_names_to_types(input: Output) -> dict:
|
||||
return {n: input.get_element_type() for n in input.get_names()}
|
||||
|
||||
input_types: dict = {}
|
||||
for idx, input in enumerate(obj.inputs):
|
||||
for idx, input in enumerate(get_inputs(obj)):
|
||||
input_types.update(map_tensor_names_to_types(input))
|
||||
input_types[idx] = input.get_element_type()
|
||||
return input_types
|
||||
|
||||
@@ -236,29 +236,21 @@ void regclass_InferRequest(py::module m) {
|
||||
return self.userdata;
|
||||
});
|
||||
|
||||
cls.def_property_readonly("inputs", [](InferRequestWrapper& self) {
|
||||
cls.def_property_readonly("model_inputs", [](InferRequestWrapper& self) {
|
||||
return self._inputs;
|
||||
});
|
||||
|
||||
cls.def_property_readonly("outputs", [](InferRequestWrapper& self) {
|
||||
cls.def_property_readonly("model_outputs", [](InferRequestWrapper& self) {
|
||||
return self._outputs;
|
||||
});
|
||||
|
||||
cls.def_property_readonly("input_tensors", [](InferRequestWrapper& self) {
|
||||
std::vector<ov::runtime::Tensor> tensors;
|
||||
for (auto&& node : self._inputs) {
|
||||
tensors.push_back(self._request.get_tensor(node));
|
||||
}
|
||||
return tensors;
|
||||
});
|
||||
cls.def_property_readonly("inputs", &InferRequestWrapper::get_input_tensors);
|
||||
|
||||
cls.def_property_readonly("output_tensors", [](InferRequestWrapper& self) {
|
||||
std::vector<ov::runtime::Tensor> tensors;
|
||||
for (auto&& node : self._outputs) {
|
||||
tensors.push_back(self._request.get_tensor(node));
|
||||
}
|
||||
return tensors;
|
||||
});
|
||||
cls.def_property_readonly("outputs", &InferRequestWrapper::get_output_tensors);
|
||||
|
||||
cls.def_property_readonly("input_tensors", &InferRequestWrapper::get_input_tensors);
|
||||
|
||||
cls.def_property_readonly("output_tensors", &InferRequestWrapper::get_output_tensors);
|
||||
|
||||
cls.def_property_readonly("latency", [](InferRequestWrapper& self) {
|
||||
return self.get_latency();
|
||||
|
||||
@@ -32,6 +32,22 @@ public:
|
||||
}
|
||||
// ~InferRequestWrapper() = default;
|
||||
|
||||
std::vector<ov::runtime::Tensor> get_input_tensors() {
|
||||
std::vector<ov::runtime::Tensor> tensors;
|
||||
for (auto&& node : _inputs) {
|
||||
tensors.push_back(_request.get_tensor(node));
|
||||
}
|
||||
return tensors;
|
||||
}
|
||||
|
||||
std::vector<ov::runtime::Tensor> get_output_tensors() {
|
||||
std::vector<ov::runtime::Tensor> tensors;
|
||||
for (auto&& node : _outputs) {
|
||||
tensors.push_back(_request.get_tensor(node));
|
||||
}
|
||||
return tensors;
|
||||
}
|
||||
|
||||
bool user_callback_defined = false;
|
||||
py::object userdata;
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <pybind11/pybind11.h>
|
||||
#include <pybind11/operators.h>
|
||||
#include <pybind11/stl.h>
|
||||
|
||||
#include "openvino/core/node_output.hpp"
|
||||
@@ -25,8 +26,20 @@ void regclass_graph_Output(py::module m, std::string typestring)
|
||||
py::dynamic_attr());
|
||||
output.doc() = docs;
|
||||
|
||||
// operator overloading
|
||||
output.def(py::self < py::self);
|
||||
output.def(py::self <= py::self);
|
||||
output.def(py::self > py::self);
|
||||
output.def(py::self >= py::self);
|
||||
output.def(py::self == py::self);
|
||||
output.def(py::self != py::self);
|
||||
|
||||
output.def("__hash__", [](ov::Output<VT>& port) {
|
||||
return std::hash<VT*>()(port.get_node()) + port.get_index();
|
||||
});
|
||||
|
||||
output.def("get_node",
|
||||
&ov::Output<VT>::get_node,
|
||||
&ov::Output<VT>::get_node_shared_ptr,
|
||||
R"(
|
||||
Get node referenced by this output handle.
|
||||
|
||||
@@ -134,7 +147,7 @@ void regclass_graph_Output(py::module m, std::string typestring)
|
||||
)");
|
||||
|
||||
|
||||
output.def_property_readonly("node", &ov::Output<VT>::get_node);
|
||||
output.def_property_readonly("node", &ov::Output<VT>::get_node_shared_ptr);
|
||||
output.def_property_readonly("index", &ov::Output<VT>::get_index);
|
||||
output.def_property_readonly("any_name", &ov::Output<VT>::get_any_name);
|
||||
output.def_property_readonly("names", &ov::Output<VT>::get_names);
|
||||
|
||||
@@ -114,22 +114,38 @@ def test_set_tensors(device):
|
||||
assert np.allclose(tensor3.data, t5.data, atol=1e-2, rtol=1e-2)
|
||||
|
||||
request.set_input_tensor(tensor3)
|
||||
t6 = request.get_tensor(request.inputs[0])
|
||||
t6 = request.get_tensor(request.model_inputs[0])
|
||||
assert np.allclose(tensor3.data, t6.data, atol=1e-2, rtol=1e-2)
|
||||
|
||||
request.set_input_tensor(0, tensor1)
|
||||
t7 = request.get_tensor(request.inputs[0])
|
||||
t7 = request.get_tensor(request.model_inputs[0])
|
||||
assert np.allclose(tensor1.data, t7.data, atol=1e-2, rtol=1e-2)
|
||||
|
||||
request.set_output_tensor(tensor2)
|
||||
t8 = request.get_tensor(request.outputs[0])
|
||||
t8 = request.get_tensor(request.model_outputs[0])
|
||||
assert np.allclose(tensor2.data, t8.data, atol=1e-2, rtol=1e-2)
|
||||
|
||||
request.set_output_tensor(0, tensor4)
|
||||
t9 = request.get_tensor(request.outputs[0])
|
||||
t9 = request.get_tensor(request.model_outputs[0])
|
||||
assert np.allclose(tensor4.data, t9.data, atol=1e-2, rtol=1e-2)
|
||||
|
||||
|
||||
def test_inputs_outputs_property(device):
|
||||
num_inputs = 10
|
||||
input_shape = [1]
|
||||
params = [ops.parameter(input_shape, np.uint8) for _ in range(num_inputs)]
|
||||
model = Model(ops.split(ops.concat(params, 0), 0, num_inputs), params)
|
||||
core = Core()
|
||||
compiled = core.compile_model(model, device)
|
||||
request = compiled.create_infer_request()
|
||||
data = [np.atleast_1d(i) for i in range(num_inputs)]
|
||||
results = request.infer(data).values()
|
||||
for result, output_tensor in zip(results, request.outputs):
|
||||
assert np.array_equal(result, output_tensor.data)
|
||||
for input_data, input_tensor in zip(data, request.inputs):
|
||||
assert np.array_equal(input_data, input_tensor.data)
|
||||
|
||||
|
||||
def test_cancel(device):
|
||||
core = Core()
|
||||
func = core.read_model(test_net_xml, test_net_bin)
|
||||
@@ -213,7 +229,7 @@ def test_infer_mixed_keys(device):
|
||||
|
||||
request = model.create_infer_request()
|
||||
res = request.infer({0: tensor2, "data": tensor})
|
||||
assert np.argmax(res[list(res)[0]]) == 2
|
||||
assert np.argmax(res[model.output()]) == 2
|
||||
|
||||
|
||||
def test_infer_queue(device):
|
||||
@@ -342,13 +358,14 @@ def test_query_state_write_buffer(device, input_shape, data_type, mode):
|
||||
|
||||
def test_get_results(device):
|
||||
core = Core()
|
||||
func = core.read_model(test_net_xml, test_net_bin)
|
||||
core.set_config({"PERF_COUNT": "YES"}, device)
|
||||
exec_net = core.compile_model(func, device)
|
||||
img = read_image()
|
||||
request = exec_net.create_infer_request()
|
||||
outputs = request.infer({0: img})
|
||||
assert np.allclose(list(outputs.values()), list(request.results.values()))
|
||||
data = ops.parameter([10], np.float64)
|
||||
model = Model(ops.split(data, 0, 5), [data])
|
||||
compiled = core.compile_model(model, device)
|
||||
request = compiled.create_infer_request()
|
||||
inputs = [np.random.normal(size=list(compiled.input().shape))]
|
||||
results = request.infer(inputs)
|
||||
for output in compiled.outputs:
|
||||
assert np.array_equal(results[output], request.results[output])
|
||||
|
||||
|
||||
def test_results_async_infer(device):
|
||||
|
||||
@@ -131,3 +131,13 @@ def test_update_rt_info(device):
|
||||
for k, v in output_node.get_rt_info().items():
|
||||
assert k == "test12345"
|
||||
assert isinstance(v, Parameter)
|
||||
|
||||
|
||||
def test_operations():
|
||||
data = ops.parameter([2])
|
||||
split = ops.split(data, 0, 2)
|
||||
outputs = split.outputs()
|
||||
assert outputs[0] < outputs[1]
|
||||
assert outputs[0] == split.output(0)
|
||||
assert hash(outputs[0]) == hash(split.output(0))
|
||||
assert hash(outputs[0]) != hash(outputs[0].node)
|
||||
|
||||
Reference in New Issue
Block a user