pdpd Place: remaining methods, refactoring, unit tests (#6637)

* pdpd Place: remaining methods, refactoring, unit tests

* resolve merge issues

* use references instead of shared_ptr for Var and Op descs

* Add import pdpd fuzz test; To enable collection of pdpd models, cmake NGRAPH_PDPD_FRONTEND_ENABLE key should be set, the models will be generated to pdpd_test_models folder.

* fix ngraph codestyle

* fix review comments

* Add new methods for Place class

* fix implementation, add tests

* Place: Return nullptr instead of throwing an exception

* ngraph codestyle

* revert return nullptr

* fix build

* fix tests

* fix fuzzing tests

* fix fuzzing tests

* fix readme file

* Revert "fix readme file"

This reverts commit d061e69a0d.

* Resolve review comments

* ngraph codestyle

Co-authored-by: Somsikov, Andrey <andrey.somsikov@intel.com>
This commit is contained in:
Ivan Tikhonov 2021-07-23 15:22:05 +03:00 committed by GitHub
parent c776ea90d8
commit 6e8b0e0ea6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 1361 additions and 200 deletions

View File

@ -94,12 +94,62 @@ namespace ngraph
/// place
virtual std::vector<Ptr> get_consuming_operations(int output_port_index) const;
/// \brief Returns references to all operation nodes that consume data from this place
/// for specified output port
///
/// \note It can be called for any kind of graph place searching for the first consuming
/// operations.
///
/// \param outputName If a given place is itself an operation node, this specifies name
/// of output port group
///
/// \return A vector with all operation node references that consumes data from this
/// place
virtual std::vector<Ptr>
get_consuming_operations(const std::string& outputPortName) const;
/// \brief Returns references to all operation nodes that consume data from this place
/// for specified output port
///
/// \note It can be called for any kind of graph place searching for the first consuming
/// operations.
///
/// \param outputName If a given place is itself an operation node, this specifies name
/// of output port group, each group can have multiple ports
///
/// \param outputPortIndex If place is an operational node it specifies which output
/// port should be considered.
///
/// \return A vector with all operation node references that consumes data from this
/// place
virtual std::vector<Ptr> get_consuming_operations(const std::string& outputName,
int outputPortIndex) const;
/// \brief Returns a tensor place that gets data from this place; applicable for
/// operations, output ports and output edges which have only one output port
///
/// \return A tensor place which hold the resulting value for this place
virtual Ptr get_target_tensor() const;
/// \brief Returns a tensor place that gets data from this place; applicable for
/// operations, output ports and output edges which have only one output port
///
/// \param outputName Name of output port group
///
/// \return A tensor place which hold the resulting value for this place
virtual Ptr get_target_tensor(const std::string& outputName) const;
/// \brief Returns a tensor place that gets data from this place; applicable for
/// operations, output ports and output edges which have only one output port
///
/// \param outputName Name of output port group, each group can have multiple ports
///
/// \param outputPortIndex Output port index if the current place is an operation node
/// and has multiple output ports
///
/// \return A tensor place which hold the resulting value for this place
virtual Ptr get_target_tensor(const std::string& outputName, int outputPortIndex) const;
/// \brief Returns a tensor place that gets data from this place; applicable for
/// operations, output ports and output edges
///
@ -123,6 +173,25 @@ namespace ngraph
/// \return A tensor place which supplies data for this place
virtual Ptr get_source_tensor(int input_port_index) const;
/// \brief Returns a tensor place that supplies data for this place; applicable for
/// operations, input ports and input edges
///
/// \param inputName Name of input port group
///
/// \return A tensor place which supplies data for this place
virtual Ptr get_source_tensor(const std::string& inputName) const;
/// \brief Returns a tensor place that supplies data for this place; applicable for
/// operations, input ports and input edges
///
/// \param inputName If a given place is itself an operation node, this specifies name
/// of output port group, each group can have multiple ports
///
/// \param inputPortIndex Input port index for operational nodes.
///
/// \return A tensor place which supplies data for this place
virtual Ptr get_source_tensor(const std::string& inputName, int inputPortIndex) const;
/// \brief Get an operation node place that immediately produces data for this place;
/// applicable if place has only one input port
///
@ -137,7 +206,27 @@ namespace ngraph
/// \return An operation place that produces data for this place
virtual Ptr get_producing_operation(int input_port_index) const;
/// Returns a port that produces data for this place
/// \brief Get an operation node place that immediately produces data for this place
///
/// \param inputName If a given place is itself an operation node, this specifies name
/// of output port group
///
/// \return An operation place that produces data for this place
virtual Ptr get_producing_operation(const std::string& inputName) const;
/// \brief Get an operation node place that immediately produces data for this place
///
/// \param inputName If a given place is itself an operation node, this specifies name
/// of output port group, each group can have multiple ports
///
/// \param inputPortIndex If a given place is itself an operation node, this specifies a
/// port index
///
/// \return An operation place that produces data for this place
virtual Ptr get_producing_operation(const std::string& inputName,
int inputPortIndex) const;
/// \brief Returns a port that produces data for this place
virtual Ptr get_producing_port() const;
/// \brief For operation node returns reference to an input port; applicable if

View File

@ -2,6 +2,7 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <frontend_manager/place.hpp>
#include <ngraph/env_util.hpp>
#include <ngraph/except.hpp>
@ -169,35 +170,35 @@ void FrontEnd::normalize(std::shared_ptr<ngraph::Function> function) const
//----------- InputModel ---------------------------
std::vector<Place::Ptr> InputModel::get_inputs() const
{
FRONT_END_NOT_IMPLEMENTED(get_inputs);
return {};
}
std::vector<Place::Ptr> InputModel::get_outputs() const
{
FRONT_END_NOT_IMPLEMENTED(get_outputs);
return {};
}
Place::Ptr InputModel::get_place_by_tensor_name(const std::string& tensor_name) const
{
FRONT_END_NOT_IMPLEMENTED(get_place_by_tensor_name);
return nullptr;
}
Place::Ptr InputModel::get_place_by_operation_name(const std::string& operation_name)
{
FRONT_END_NOT_IMPLEMENTED(get_place_by_operation_name);
return nullptr;
}
Place::Ptr InputModel::get_place_by_operation_name_and_input_port(const std::string& operation_name,
int input_port_index)
{
FRONT_END_NOT_IMPLEMENTED(get_place_by_operation_name_and_input_port);
return nullptr;
}
Place::Ptr
InputModel::get_place_by_operation_name_and_output_port(const std::string& operation_name,
int output_port_index)
{
FRONT_END_NOT_IMPLEMENTED(get_place_by_operation_name_and_output_port);
return nullptr;
}
void InputModel::set_name_for_tensor(Place::Ptr tensor, const std::string& new_name)
@ -304,82 +305,88 @@ std::vector<std::string> Place::get_names() const
std::vector<Place::Ptr> Place::get_consuming_operations() const
{
FRONT_END_NOT_IMPLEMENTED(get_consuming_operations);
return {};
}
std::vector<Place::Ptr> Place::get_consuming_operations(int output_port_index) const
{
FRONT_END_NOT_IMPLEMENTED(get_consuming_operations);
return {};
}
std::vector<Place::Ptr> Place::get_consuming_operations(const std::string& outputPortName,
int outputPortIndex) const
{
return {};
}
Place::Ptr Place::get_target_tensor() const
{
FRONT_END_NOT_IMPLEMENTED(get_target_tensor);
return nullptr;
}
Place::Ptr Place::get_target_tensor(int output_port_index) const
{
FRONT_END_NOT_IMPLEMENTED(get_target_tensor);
return nullptr;
}
Place::Ptr Place::get_producing_operation() const
{
FRONT_END_NOT_IMPLEMENTED(get_producing_operation);
return nullptr;
}
Place::Ptr Place::get_producing_operation(int input_port_index) const
{
FRONT_END_NOT_IMPLEMENTED(get_producing_operation);
return nullptr;
}
Place::Ptr Place::get_producing_port() const
{
FRONT_END_NOT_IMPLEMENTED(get_producing_port);
return nullptr;
}
Place::Ptr Place::get_input_port() const
{
FRONT_END_NOT_IMPLEMENTED(get_input_port);
return nullptr;
}
Place::Ptr Place::get_input_port(int input_port_index) const
{
FRONT_END_NOT_IMPLEMENTED(get_input_port);
return nullptr;
}
Place::Ptr Place::get_input_port(const std::string& input_name) const
{
FRONT_END_NOT_IMPLEMENTED(get_input_port);
return nullptr;
}
Place::Ptr Place::get_input_port(const std::string& input_name, int input_port_index) const
{
FRONT_END_NOT_IMPLEMENTED(get_input_port);
return nullptr;
}
Place::Ptr Place::get_output_port() const
{
FRONT_END_NOT_IMPLEMENTED(get_output_port);
return nullptr;
}
Place::Ptr Place::get_output_port(int output_port_index) const
{
FRONT_END_NOT_IMPLEMENTED(get_output_port);
return nullptr;
}
Place::Ptr Place::get_output_port(const std::string& output_name) const
{
FRONT_END_NOT_IMPLEMENTED(get_output_port);
return nullptr;
}
Place::Ptr Place::get_output_port(const std::string& output_name, int output_port_index) const
{
FRONT_END_NOT_IMPLEMENTED(get_output_port);
return nullptr;
}
std::vector<Place::Ptr> Place::get_consuming_ports() const
{
FRONT_END_NOT_IMPLEMENTED(get_consuming_ports);
return {};
}
bool Place::is_input() const
@ -404,12 +411,47 @@ bool Place::is_equal_data(Ptr another) const
Place::Ptr Place::get_source_tensor() const
{
FRONT_END_NOT_IMPLEMENTED(get_source_tensor);
return nullptr;
}
Place::Ptr Place::get_source_tensor(int input_port_index) const
{
FRONT_END_NOT_IMPLEMENTED(get_source_tensor);
return nullptr;
}
Place::Ptr Place::get_source_tensor(const std::string& inputName, int inputPortIndex) const
{
return nullptr;
}
Place::Ptr Place::get_source_tensor(const std::string& inputName) const
{
return nullptr;
}
Place::Ptr Place::get_target_tensor(const std::string& outputPortName) const
{
return nullptr;
}
Place::Ptr Place::get_target_tensor(const std::string& outputPortName, int outputPortIndex) const
{
return nullptr;
}
Place::Ptr Place::get_producing_operation(const std::string& inputName) const
{
return nullptr;
}
Place::Ptr Place::get_producing_operation(const std::string& inputName, int inputPortIndex) const
{
return nullptr;
}
std::vector<Place::Ptr> Place::get_consuming_operations(const std::string& outputPortName) const
{
return {};
}
constexpr VariantTypeInfo VariantWrapper<std::shared_ptr<std::istream>>::type_info;

View File

@ -44,9 +44,7 @@ namespace ngraph
~PlacePDPD() override = default;
bool is_input() const override;
bool is_output() const override;
bool is_equal(Ptr another) const override { return this == another.get(); }
std::vector<std::string> get_names() const override { return m_names; }
@ -64,16 +62,20 @@ namespace ngraph
{
}
void setOp(const std::weak_ptr<OpPlacePDPD>& op) { m_op = op; }
void set_op(const std::weak_ptr<OpPlacePDPD>& op) { m_op = op; }
void set_source_tensor(const std::weak_ptr<TensorPlacePDPD>& source_tensor);
void setSourceTensor(const std::weak_ptr<TensorPlacePDPD>& source_tensor)
{
m_source_tensor = source_tensor;
}
// Internal usage
std::shared_ptr<TensorPlacePDPD> get_source_tensor_pdpd() const;
std::shared_ptr<OpPlacePDPD> get_op();
std::shared_ptr<TensorPlacePDPD> getSourceTensorPDPD() const;
// External usage
std::vector<Ptr> get_consuming_operations() const override;
Ptr get_producing_operation() const override;
Place::Ptr get_source_tensor() const override;
Ptr get_producing_port() const override;
std::shared_ptr<OpPlacePDPD> getOp();
bool is_equal_data(Ptr another) const override;
private:
std::weak_ptr<TensorPlacePDPD> m_source_tensor;
@ -88,14 +90,17 @@ namespace ngraph
{
}
void setOp(const std::weak_ptr<OpPlacePDPD>& op) { m_op = op; }
void set_op(const std::weak_ptr<OpPlacePDPD>& op) { m_op = op; }
void set_target_tensor(const std::weak_ptr<TensorPlacePDPD>& target_tensor);
void setTargetTensor(const std::weak_ptr<TensorPlacePDPD>& target_tensor)
{
m_target_tensor = target_tensor;
}
std::shared_ptr<TensorPlacePDPD> get_target_tensor_pdpd() const;
std::shared_ptr<TensorPlacePDPD> getTargetTensorPDPD() const;
// External usage
std::vector<Ptr> get_consuming_operations() const override;
Place::Ptr get_producing_operation() const override;
std::vector<Place::Ptr> get_consuming_ports() const override;
Ptr get_target_tensor() const override;
bool is_equal_data(Ptr another) const override;
private:
std::weak_ptr<OpPlacePDPD> m_op;
@ -106,52 +111,68 @@ namespace ngraph
{
public:
OpPlacePDPD(const InputModel& input_model,
const std::vector<std::string>& names,
const std::shared_ptr<paddle::framework::proto::OpDesc>& op_desc);
const paddle::framework::proto::OpDesc& op_desc,
const std::vector<std::string>& names);
OpPlacePDPD(const InputModel& input_model,
const std::shared_ptr<paddle::framework::proto::OpDesc>& op_desc);
const paddle::framework::proto::OpDesc& op_desc);
void addInPort(const std::shared_ptr<InPortPlacePDPD>& input, const std::string& name)
{
m_input_ports[name].push_back(input);
}
void addOutPort(const std::shared_ptr<OutPortPlacePDPD>& output,
const std::string& name)
{
m_output_ports[name].push_back(output);
}
void add_in_port(const std::shared_ptr<InPortPlacePDPD>& input,
const std::string& name);
void add_out_port(const std::shared_ptr<OutPortPlacePDPD>& output,
const std::string& name);
// Internal usage
const std::map<std::string, std::vector<std::shared_ptr<OutPortPlacePDPD>>>&
getOutputPorts() const
{
return m_output_ports;
}
get_output_ports() const;
const std::map<std::string, std::vector<std::shared_ptr<InPortPlacePDPD>>>&
getInputPorts() const
{
return m_input_ports;
}
get_input_ports() const;
std::shared_ptr<OutPortPlacePDPD> get_output_port_pdpd(const std::string& outputName,
int outputPortIndex) const;
std::shared_ptr<InPortPlacePDPD> get_input_port_pdpd(const std::string& inputName,
int inputPortIndex) const;
const paddle::framework::proto::OpDesc& get_desc() const;
std::shared_ptr<OutPortPlacePDPD> getOutputPortPDPD(const std::string& name, int idx)
{
return m_output_ports[name][idx];
}
// External API methods
std::vector<Place::Ptr> get_consuming_ports() const override;
std::shared_ptr<InPortPlacePDPD> getInputPortPDPD(const std::string& name, int idx)
{
return m_input_ports[name][idx];
}
Ptr get_output_port() const override;
Ptr get_output_port(int outputPortIndex) const override;
Ptr get_output_port(const std::string& outputPortName) const override;
Ptr get_output_port(const std::string& outputPortName,
int outputPortIndex) const override;
const std::shared_ptr<paddle::framework::proto::OpDesc>& getDesc() const
{
return m_op_desc;
}
Ptr get_input_port() const override;
Ptr get_input_port(int inputPortIndex) const override;
Ptr get_input_port(const std::string& inputName) const override;
Ptr get_input_port(const std::string& inputName, int inputPortIndex) const override;
std::vector<Ptr> get_consuming_operations() const override;
std::vector<Ptr> get_consuming_operations(int outputPortIndex) const override;
std::vector<Ptr>
get_consuming_operations(const std::string& outputPortName) const override;
std::vector<Ptr> get_consuming_operations(const std::string& outputPortName,
int outputPortIndex) const override;
Ptr get_producing_operation() const override;
Ptr get_producing_operation(int inputPortIndex) const override;
Ptr get_producing_operation(const std::string& inputName) const override;
Ptr get_producing_operation(const std::string& inputName,
int inputPortIndex) const override;
Ptr get_source_tensor() const override;
Ptr get_source_tensor(int inputPortIndex) const override;
Ptr get_source_tensor(const std::string& inputName) const override;
Ptr get_source_tensor(const std::string& inputName, int inputPortIndex) const override;
Ptr get_target_tensor() const override;
Ptr get_target_tensor(int outputPortIndex) const override;
Ptr get_target_tensor(const std::string& outputName) const override;
Ptr get_target_tensor(const std::string& outputName,
int outputPortIndex) const override;
private:
std::shared_ptr<paddle::framework::proto::OpDesc> m_op_desc;
const paddle::framework::proto::OpDesc& m_op_desc;
std::map<std::string, std::vector<std::shared_ptr<InPortPlacePDPD>>> m_input_ports;
std::map<std::string, std::vector<std::shared_ptr<OutPortPlacePDPD>>> m_output_ports;
};
@ -161,40 +182,30 @@ namespace ngraph
public:
TensorPlacePDPD(const InputModel& input_model,
const std::vector<std::string>& names,
const std::shared_ptr<paddle::framework::proto::VarDesc>& var_desc);
const paddle::framework::proto::VarDesc& var_desc);
TensorPlacePDPD(const InputModel& input_model,
const std::shared_ptr<paddle::framework::proto::VarDesc>& var_desc);
const paddle::framework::proto::VarDesc& var_desc);
void addProducingPort(const std::shared_ptr<OutPortPlacePDPD>& out_port)
{
m_producing_ports.push_back(out_port);
}
void add_producing_port(const std::shared_ptr<OutPortPlacePDPD>& out_port);
void add_consuming_port(const std::shared_ptr<InPortPlacePDPD>& in_port);
void addConsumingPort(const std::shared_ptr<InPortPlacePDPD>& in_port)
{
m_consuming_ports.push_back(in_port);
}
// Internal usage
const PartialShape& get_partial_shape() const { return m_pshape; }
const element::Type& get_element_type() const { return m_type; }
void set_partial_shape(const PartialShape& pshape) { m_pshape = pshape; }
void set_element_type(const element::Type& type) { m_type = type; }
const paddle::framework::proto::VarDesc& get_desc() const;
// External usage
Ptr get_producing_operation() const override;
std::vector<Place::Ptr> get_consuming_operations() const override;
std::vector<Place::Ptr> get_consuming_ports() const override;
Ptr get_producing_port() const override;
const PartialShape& getPartialShape() const { return m_pshape; }
const element::Type& getElementType() const { return m_type; }
void setPartialShape(const PartialShape& pshape) { m_pshape = pshape; }
void setElementType(const element::Type& type) { m_type = type; }
const std::shared_ptr<paddle::framework::proto::VarDesc>& getDesc() const
{
return m_var_desc;
}
bool is_equal_data(Ptr another) const override;
private:
std::shared_ptr<paddle::framework::proto::VarDesc> m_var_desc;
const paddle::framework::proto::VarDesc& m_var_desc;
PartialShape m_pshape;
element::Type m_type;

View File

@ -92,7 +92,7 @@ namespace ngraph
std::vector<pdpd::OutPortName> DecoderPDPDProto::get_output_names() const
{
std::vector<std::string> output_names;
for (const auto& output : op_place->getDesc()->outputs())
for (const auto& output : op_place->get_desc().outputs())
{
output_names.push_back(output.parameter());
}
@ -103,9 +103,9 @@ namespace ngraph
DecoderPDPDProto::get_out_port_type(const std::string& port_name) const
{
std::vector<ngraph::element::Type> output_types;
for (const auto& out_port : op_place->getOutputPorts().at(port_name))
for (const auto& out_port : op_place->get_output_ports().at(port_name))
{
output_types.push_back(out_port->getTargetTensorPDPD()->getElementType());
output_types.push_back(out_port->get_target_tensor_pdpd()->get_element_type());
}
FRONT_END_GENERAL_CHECK(output_types.size() > 0, "Port has no tensors connected.");
FRONT_END_GENERAL_CHECK(
@ -114,13 +114,13 @@ namespace ngraph
return output_types[0];
}
std::string DecoderPDPDProto::get_op_type() const { return op_place->getDesc()->type(); }
std::string DecoderPDPDProto::get_op_type() const { return op_place->get_desc().type(); }
std::vector<proto::OpDesc_Attr>
DecoderPDPDProto::decode_attribute_helper(const std::string& name) const
{
std::vector<proto::OpDesc_Attr> attrs;
for (const auto& attr : op_place->getDesc()->attrs())
for (const auto& attr : op_place->get_desc().attrs())
{
if (attr.name() == name)
attrs.push_back(attr);
@ -129,7 +129,7 @@ namespace ngraph
"An error occurred while parsing the ",
name,
" attribute of ",
op_place->getDesc()->type(),
op_place->get_desc().type(),
"node. Unsupported number of attributes. Current number: ",
attrs.size(),
" Expected number: 0 or 1");

View File

@ -39,22 +39,21 @@ namespace ngraph
const std::shared_ptr<OpPlacePDPD>& op_place,
const std::map<std::string, CreatorFunction>& CREATORS_MAP)
{
const auto& op = op_place->getDesc();
// std::cout << "Making node: " << op->type() << std::endl;
const auto& op = op_place->get_desc();
FRONT_END_OP_CONVERSION_CHECK(CREATORS_MAP.find(op->type()) != CREATORS_MAP.end(),
FRONT_END_OP_CONVERSION_CHECK(CREATORS_MAP.find(op.type()) != CREATORS_MAP.end(),
"No creator found for ",
op->type(),
op.type(),
" node.");
pdpd::NamedInputs named_inputs;
const auto& input_ports = op_place->getInputPorts();
const auto& input_ports = op_place->get_input_ports();
for (const auto& name_to_ports : input_ports)
{
for (const auto& port : name_to_ports.second)
{
const auto& var_desc = port->getSourceTensorPDPD()->getDesc();
if (nodes.count(var_desc->name()))
named_inputs[name_to_ports.first].push_back(nodes.at(var_desc->name()));
const auto& var_desc = port->get_source_tensor_pdpd()->get_desc();
if (nodes.count(var_desc.name()))
named_inputs[name_to_ports.first].push_back(nodes.at(var_desc.name()));
else
// return empty map when not all inputs exist. It usually means that
// these nodes are not used because model inputs were overwritten
@ -64,7 +63,7 @@ namespace ngraph
try
{
return CREATORS_MAP.at(op->type())(
return CREATORS_MAP.at(op.type())(
NodeContext(DecoderPDPDProto(op_place), named_inputs));
}
catch (...)
@ -118,20 +117,20 @@ namespace ngraph
for (const auto& _inp_place : model->get_inputs())
{
const auto& inp_place = std::dynamic_pointer_cast<TensorPlacePDPD>(_inp_place);
const auto& var = inp_place->getDesc();
const auto& shape = inp_place->getPartialShape();
const auto& type = inp_place->getElementType();
const auto& var = inp_place->get_desc();
const auto& shape = inp_place->get_partial_shape();
const auto& type = inp_place->get_element_type();
auto param = std::make_shared<Parameter>(type, shape);
param->set_friendly_name(var->name());
param->output(0).get_tensor().add_names({var->name()});
nodes_dict[var->name()] = param;
param->set_friendly_name(var.name());
param->output(0).get_tensor().add_names({var.name()});
nodes_dict[var.name()] = param;
parameter_nodes.push_back(param);
}
const auto& op_places = model->getOpPlaces();
for (const auto& op_place : op_places)
{
const auto& op_type = op_place->getDesc()->type();
const auto& op_type = op_place->get_desc().type();
if (op_type == "feed" || op_type == "fetch")
{
// inputs and outputs are stored in the model already
@ -145,16 +144,16 @@ namespace ngraph
// set layer name by the name of first output var
if (!named_outputs.empty())
{
const auto& first_output_var = op_place->getOutputPorts()
const auto& first_output_var = op_place->get_output_ports()
.begin()
->second.at(0)
->getTargetTensorPDPD()
->getDesc();
->get_target_tensor_pdpd()
->get_desc();
auto node = named_outputs.begin()->second[0].get_node_shared_ptr();
node->set_friendly_name(first_output_var->name());
node->set_friendly_name(first_output_var.name());
}
const auto& out_ports = op_place->getOutputPorts();
const auto& out_ports = op_place->get_output_ports();
for (const auto& name_to_outputs : named_outputs)
{
const auto& ports = out_ports.at(name_to_outputs.first);
@ -164,12 +163,12 @@ namespace ngraph
"the number of outputs of the ngraph node.");
for (size_t idx = 0; idx < ports.size(); ++idx)
{
const auto& var = ports[idx]->getTargetTensorPDPD()->getDesc();
name_to_outputs.second[idx].get_tensor().set_names({var->name()});
const auto& var = ports[idx]->get_target_tensor_pdpd()->get_desc();
name_to_outputs.second[idx].get_tensor().set_names({var.name()});
// if nodes_dict already has node mapped to this tensor name it usually
// means that it was overwritten using setTensorValue
if (!nodes_dict.count(var->name()))
nodes_dict[var->name()] = name_to_outputs.second[idx];
if (!nodes_dict.count(var.name()))
nodes_dict[var.name()] = name_to_outputs.second[idx];
}
}
}
@ -178,8 +177,8 @@ namespace ngraph
for (const auto& _outp_place : model->get_outputs())
{
const auto& outp_place = std::dynamic_pointer_cast<TensorPlacePDPD>(_outp_place);
auto var = outp_place->getDesc();
auto input_var_name = var->name();
auto var = outp_place->get_desc();
auto input_var_name = var.name();
auto result = std::make_shared<Result>(nodes_dict.at(input_var_name));
result->set_friendly_name(input_var_name + "/Result");
result_nodes.push_back(result);

View File

@ -80,14 +80,13 @@ namespace ngraph
for (const auto& var : block.vars())
{
m_var_places[var.name()] = std::make_shared<TensorPlacePDPD>(
m_input_model, std::make_shared<VarDesc>(var));
m_var_places[var.name()] =
std::make_shared<TensorPlacePDPD>(m_input_model, var);
}
for (const auto& op : block.ops())
{
auto op_place =
std::make_shared<OpPlacePDPD>(m_input_model, std::make_shared<OpDesc>(op));
auto op_place = std::make_shared<OpPlacePDPD>(m_input_model, op);
m_op_places.push_back(op_place);
for (const auto& output : op.outputs())
@ -98,12 +97,12 @@ namespace ngraph
// connect out_port and tensor
const auto& tensor = m_var_places.at(var_name);
tensor->addProducingPort(out_port);
out_port->setTargetTensor(tensor);
tensor->add_producing_port(out_port);
out_port->set_target_tensor(tensor);
// connect out_port and op
op_place->addOutPort(out_port, output.parameter());
out_port->setOp(op_place);
op_place->add_out_port(out_port, output.parameter());
out_port->set_op(op_place);
}
}
@ -115,34 +114,34 @@ namespace ngraph
// connect in_port and tensor
const auto& tensor = m_var_places.at(var_name);
tensor->addConsumingPort(in_port);
in_port->setSourceTensor(tensor);
tensor->add_consuming_port(in_port);
in_port->set_source_tensor(tensor);
// connect in_port and op
op_place->addInPort(in_port, input.parameter());
in_port->setOp(op_place);
op_place->add_in_port(in_port, input.parameter());
in_port->set_op(op_place);
}
}
// Determine outputs and inputs
if (op.type() == "feed")
{
const auto& place = op_place->getOutputPortPDPD("Out", 0);
const auto& place = op_place->get_output_port_pdpd("Out", 0);
const auto& var_place = std::dynamic_pointer_cast<TensorPlacePDPD>(
place->getTargetTensorPDPD());
place->get_target_tensor_pdpd());
const auto& tensor_desc =
var_place->getDesc()->type().lod_tensor().tensor();
var_place->get_desc().type().lod_tensor().tensor();
const auto& dims = tensor_desc.dims();
var_place->setElementType(TYPE_MAP[tensor_desc.data_type()]);
var_place->setPartialShape(
var_place->set_element_type(TYPE_MAP[tensor_desc.data_type()]);
var_place->set_partial_shape(
PartialShape(std::vector<Dimension>(dims.begin(), dims.end())));
m_inputs.push_back(var_place);
}
else if (op.type() == "fetch")
{
auto place = op_place->getInputPortPDPD("X", 0);
m_outputs.push_back(place->getSourceTensorPDPD());
auto place = op_place->get_input_port_pdpd("X", 0);
m_outputs.push_back(place->get_source_tensor_pdpd());
}
}
}
@ -235,17 +234,17 @@ namespace ngraph
{
for (const auto& item : m_var_places)
{
const auto& var_desc = item.second->getDesc();
const auto& var_desc = item.second->get_desc();
const auto& name = item.first;
if (pdpd::endsWith(name, std::string{"feed"}) ||
pdpd::endsWith(name, std::string{"fetch"}))
continue;
if (!var_desc->persistable())
if (!var_desc.persistable())
continue;
FRONT_END_GENERAL_CHECK(var_desc->type().type() ==
FRONT_END_GENERAL_CHECK(var_desc.type().type() ==
paddle::framework::proto::VarType::LOD_TENSOR);
const auto& tensor = var_desc->type().lod_tensor().tensor();
const auto& tensor = var_desc.type().lod_tensor().tensor();
Shape shape(tensor.dims().cbegin(), tensor.dims().cend());
const auto& type = TYPE_MAP[tensor.data_type()];
const auto& data_length = shape_size(shape) * type.size();
@ -355,11 +354,11 @@ namespace ngraph
}
else if (auto in_port_place = std::dynamic_pointer_cast<InPortPlacePDPD>(place))
{
return in_port_place->getSourceTensorPDPD();
return in_port_place->get_source_tensor_pdpd();
}
else if (auto out_port_place = std::dynamic_pointer_cast<OutPortPlacePDPD>(place))
{
return out_port_place->getTargetTensorPDPD();
return out_port_place->get_target_tensor_pdpd();
}
FRONT_END_GENERAL_CHECK(false, "Cannot cast this Place to TensorPlacePDPD.");
}
@ -403,26 +402,26 @@ namespace ngraph
InputModelPDPD::InputModelPDPDImpl::setPartialShape(Place::Ptr place,
const ngraph::PartialShape& p_shape)
{
pdpd::castToTensorPlace(place)->setPartialShape(p_shape);
pdpd::castToTensorPlace(place)->set_partial_shape(p_shape);
}
ngraph::PartialShape
InputModelPDPD::InputModelPDPDImpl::getPartialShape(Place::Ptr place) const
{
return pdpd::castToTensorPlace(place)->getPartialShape();
return pdpd::castToTensorPlace(place)->get_partial_shape();
}
void InputModelPDPD::InputModelPDPDImpl::setElementType(Place::Ptr place,
const ngraph::element::Type& type)
{
pdpd::castToTensorPlace(place)->setElementType(type);
pdpd::castToTensorPlace(place)->set_element_type(type);
}
void InputModelPDPD::InputModelPDPDImpl::setTensorValue(Place::Ptr place, const void* value)
{
auto tensor_place = pdpd::castToTensorPlace(place);
auto p_shape = tensor_place->getPartialShape();
auto type = tensor_place->getElementType();
auto p_shape = tensor_place->get_partial_shape();
auto type = tensor_place->get_element_type();
auto constant = opset7::Constant::create(type, p_shape.to_shape(), value);
auto name = tensor_place->get_names()[0];
constant->set_friendly_name(name);

View File

@ -25,26 +25,234 @@ bool PlacePDPD::is_output() const
}
OpPlacePDPD::OpPlacePDPD(const InputModel& input_model,
const std::vector<std::string>& names,
const std::shared_ptr<paddle::framework::proto::OpDesc>& op_desc)
const paddle::framework::proto::OpDesc& op_desc,
const std::vector<std::string>& names)
: PlacePDPD(input_model, names)
, m_op_desc(op_desc)
{
}
OpPlacePDPD::OpPlacePDPD(const InputModel& input_model,
const std::shared_ptr<paddle::framework::proto::OpDesc>& op_desc)
: OpPlacePDPD(input_model, {}, op_desc)
const paddle::framework::proto::OpDesc& op_desc)
: OpPlacePDPD(input_model, op_desc, {})
{
}
const std::map<std::string, std::vector<std::shared_ptr<OutPortPlacePDPD>>>&
OpPlacePDPD::get_output_ports() const
{
return m_output_ports;
}
const std::map<std::string, std::vector<std::shared_ptr<InPortPlacePDPD>>>&
OpPlacePDPD::get_input_ports() const
{
return m_input_ports;
}
std::shared_ptr<OutPortPlacePDPD> OpPlacePDPD::get_output_port_pdpd(const std::string& outputName,
int outputPortIndex) const
{
FRONT_END_GENERAL_CHECK(outputPortIndex <= m_output_ports.at(outputName).size(),
"outputPortIndex is out of bounds.");
return m_output_ports.at(outputName)[outputPortIndex];
}
std::shared_ptr<InPortPlacePDPD> OpPlacePDPD::get_input_port_pdpd(const std::string& inputName,
int inputPortIndex) const
{
FRONT_END_GENERAL_CHECK(inputPortIndex <= m_input_ports.at(inputName).size(),
"inputPortIndex is out of bounds.");
return m_input_ports.at(inputName)[inputPortIndex];
}
const paddle::framework::proto::OpDesc& OpPlacePDPD::get_desc() const
{
return m_op_desc;
}
void OpPlacePDPD::add_out_port(const std::shared_ptr<OutPortPlacePDPD>& output,
const std::string& name)
{
m_output_ports[name].push_back(output);
}
void OpPlacePDPD::add_in_port(const std::shared_ptr<InPortPlacePDPD>& input,
const std::string& name)
{
m_input_ports[name].push_back(input);
}
Place::Ptr OpPlacePDPD::get_output_port(const std::string& name) const
{
FRONT_END_GENERAL_CHECK(m_output_ports.at(name).size() == 1,
"Only one output port should exist.");
return m_output_ports.at(name)[0];
}
Place::Ptr OpPlacePDPD::get_input_port(const std::string& name) const
{
FRONT_END_GENERAL_CHECK(m_input_ports.at(name).size() == 1,
"Only one input port should exist.");
return m_input_ports.at(name)[0];
}
Place::Ptr OpPlacePDPD::get_input_port(int outputPortIndex) const
{
FRONT_END_GENERAL_CHECK(m_input_ports.size() == 1, "Only one named input port should exist.");
return m_input_ports.begin()->second[outputPortIndex];
}
Place::Ptr OpPlacePDPD::get_output_port(int outputPortIndex) const
{
FRONT_END_GENERAL_CHECK(m_output_ports.size() == 1, "Only one named output port should exist.");
return m_output_ports.begin()->second[outputPortIndex];
}
Place::Ptr OpPlacePDPD::get_output_port() const
{
FRONT_END_GENERAL_CHECK(m_output_ports.size() == 1 &&
m_output_ports.begin()->second.size() == 1,
"Only one output port should exist.");
return m_output_ports.begin()->second[0];
}
Place::Ptr OpPlacePDPD::get_input_port() const
{
FRONT_END_GENERAL_CHECK(m_input_ports.size() == 1 && m_input_ports.begin()->second.size() == 1,
"Only one input port should exist.");
return m_input_ports.begin()->second[0];
}
std::vector<Place::Ptr> OpPlacePDPD::get_consuming_operations() const
{
std::vector<Place::Ptr> consuming_ops;
for (const auto& out_port : m_output_ports)
{
for (const auto& out_port_place : out_port.second)
{
auto consuming_ops_out = out_port_place->get_consuming_operations();
consuming_ops.insert(
consuming_ops.end(), consuming_ops_out.begin(), consuming_ops_out.end());
}
}
return consuming_ops;
}
std::vector<Place::Ptr> OpPlacePDPD::get_consuming_operations(const std::string& outputPortName,
int outputPortIndex) const
{
return get_output_port(outputPortName, outputPortIndex)->get_consuming_operations();
}
std::vector<Place::Ptr> OpPlacePDPD::get_consuming_operations(int outputPortIndex) const
{
return get_output_port(outputPortIndex)->get_consuming_operations();
}
std::vector<Place::Ptr>
OpPlacePDPD::get_consuming_operations(const std::string& outputPortName) const
{
return get_output_port(outputPortName)->get_consuming_operations();
}
std::vector<Place::Ptr> OpPlacePDPD::get_consuming_ports() const
{
std::vector<Place::Ptr> consuming_ports;
for (const auto& out_port : m_output_ports)
{
for (const auto& out_port_place : out_port.second)
{
auto consuming_ops_out = out_port_place->get_consuming_ports();
consuming_ports.insert(
consuming_ports.end(), consuming_ops_out.begin(), consuming_ops_out.end());
}
}
return consuming_ports;
}
Place::Ptr OpPlacePDPD::get_output_port(const std::string& outputName, int outputPortIndex) const
{
FRONT_END_GENERAL_CHECK(outputPortIndex <= m_output_ports.at(outputName).size(),
"outputPortIndex is Out of bounds.");
return m_output_ports.at(outputName)[outputPortIndex];
}
Place::Ptr OpPlacePDPD::get_input_port(const std::string& inputName, int inputPortIndex) const
{
FRONT_END_GENERAL_CHECK(inputPortIndex <= m_input_ports.at(inputName).size(),
"inputPortIndex is out of bounds.");
return m_input_ports.at(inputName)[inputPortIndex];
}
Place::Ptr OpPlacePDPD::get_source_tensor() const
{
return get_input_port()->get_source_tensor();
}
Place::Ptr OpPlacePDPD::get_source_tensor(const std::string& inputName) const
{
return get_input_port(inputName)->get_source_tensor();
}
Place::Ptr OpPlacePDPD::get_source_tensor(int inputPortIndex) const
{
return get_input_port(inputPortIndex)->get_source_tensor();
}
Place::Ptr OpPlacePDPD::get_source_tensor(const std::string& inputName, int inputPortIndex) const
{
return get_input_port(inputName, inputPortIndex)->get_source_tensor();
}
Place::Ptr OpPlacePDPD::get_target_tensor() const
{
return get_output_port()->get_target_tensor();
}
Place::Ptr OpPlacePDPD::get_target_tensor(const std::string& outputName) const
{
return get_output_port(outputName)->get_target_tensor();
}
Place::Ptr OpPlacePDPD::get_target_tensor(const std::string& outputName, int outputPortIndex) const
{
return get_output_port(outputName, outputPortIndex)->get_target_tensor();
}
Place::Ptr OpPlacePDPD::get_producing_operation(const std::string& inputName) const
{
return get_input_port(inputName)->get_producing_operation();
}
Place::Ptr OpPlacePDPD::get_producing_operation(const std::string& inputName,
int inputPortIndex) const
{
return get_input_port(inputName, inputPortIndex)->get_producing_operation();
}
Place::Ptr OpPlacePDPD::get_producing_operation() const
{
return get_input_port()->get_producing_operation();
}
Place::Ptr OpPlacePDPD::get_producing_operation(int inputPortIndex) const
{
return get_input_port(inputPortIndex)->get_producing_operation();
}
Place::Ptr OpPlacePDPD::get_target_tensor(int outputPortIndex) const
{
return get_output_port(outputPortIndex)->get_target_tensor();
}
TensorPlacePDPD::TensorPlacePDPD(const InputModel& input_model,
const std::vector<std::string>& names,
const std::shared_ptr<paddle::framework::proto::VarDesc>& var_desc)
const paddle::framework::proto::VarDesc& var_desc)
: PlacePDPD(input_model, names)
, m_var_desc(var_desc)
{
const auto& var_type = var_desc->type();
const auto& var_type = var_desc.type();
if (var_type.type() == paddle::framework::proto::VarType::LOD_TENSOR)
{
const auto& tensor_desc = var_type.lod_tensor().tensor();
@ -55,8 +263,8 @@ TensorPlacePDPD::TensorPlacePDPD(const InputModel& input_model,
}
TensorPlacePDPD::TensorPlacePDPD(const InputModel& input_model,
const std::shared_ptr<paddle::framework::proto::VarDesc>& var_desc)
: TensorPlacePDPD(input_model, {var_desc->name()}, var_desc)
const paddle::framework::proto::VarDesc& var_desc)
: TensorPlacePDPD(input_model, {var_desc.name()}, var_desc)
{
}
@ -79,7 +287,7 @@ std::vector<Place::Ptr> TensorPlacePDPD::get_consuming_ports() const
Place::Ptr TensorPlacePDPD::get_producing_port() const
{
FRONT_END_GENERAL_CHECK(m_producing_ports.size() > 1, "Only one producing port is supported.");
FRONT_END_GENERAL_CHECK(m_producing_ports.size() == 1, "Only one producing port is supported.");
if (const auto& producing_port = m_producing_ports[0].lock())
{
return producing_port;
@ -87,7 +295,56 @@ Place::Ptr TensorPlacePDPD::get_producing_port() const
FRONT_END_THROW("Producing Port has expired.");
}
std::shared_ptr<TensorPlacePDPD> InPortPlacePDPD::getSourceTensorPDPD() const
void TensorPlacePDPD::add_producing_port(const std::shared_ptr<OutPortPlacePDPD>& out_port)
{
m_producing_ports.push_back(out_port);
}
void TensorPlacePDPD::add_consuming_port(const std::shared_ptr<InPortPlacePDPD>& in_port)
{
m_consuming_ports.push_back(in_port);
}
const paddle::framework::proto::VarDesc& TensorPlacePDPD::get_desc() const
{
return m_var_desc;
}
std::vector<Place::Ptr> TensorPlacePDPD::get_consuming_operations() const
{
std::vector<Place::Ptr> consuming_ops;
for (const auto& consuming_port : m_consuming_ports)
{
if (auto port_ptr = consuming_port.lock())
{
auto port_consuming_ops = port_ptr->get_consuming_operations();
consuming_ops.insert(
consuming_ops.end(), port_consuming_ops.begin(), port_consuming_ops.end());
}
else
{
FRONT_END_THROW("Port has expired.");
}
}
return consuming_ops;
}
bool TensorPlacePDPD::is_equal_data(Place::Ptr another) const
{
auto consuming_ports = get_consuming_ports();
bool eq_to_consuming_port =
std::any_of(consuming_ports.begin(), consuming_ports.end(), [&another](const Ptr& place) {
return place->is_equal(another);
});
return is_equal(another) || get_producing_port()->is_equal(another) || eq_to_consuming_port;
}
Place::Ptr TensorPlacePDPD::get_producing_operation() const
{
return get_producing_port()->get_producing_operation();
}
std::shared_ptr<TensorPlacePDPD> InPortPlacePDPD::get_source_tensor_pdpd() const
{
if (const auto& tensor = m_source_tensor.lock())
{
@ -96,7 +353,7 @@ std::shared_ptr<TensorPlacePDPD> InPortPlacePDPD::getSourceTensorPDPD() const
FRONT_END_THROW("Source Tensor has expired.");
}
std::shared_ptr<OpPlacePDPD> InPortPlacePDPD::getOp()
std::shared_ptr<OpPlacePDPD> InPortPlacePDPD::get_op()
{
if (const auto& op = m_op.lock())
{
@ -105,7 +362,45 @@ std::shared_ptr<OpPlacePDPD> InPortPlacePDPD::getOp()
FRONT_END_THROW("Operation has expired.");
}
std::shared_ptr<TensorPlacePDPD> OutPortPlacePDPD::getTargetTensorPDPD() const
void InPortPlacePDPD::set_source_tensor(const std::weak_ptr<TensorPlacePDPD>& source_tensor)
{
m_source_tensor = source_tensor;
}
std::vector<Place::Ptr> InPortPlacePDPD::get_consuming_operations() const
{
if (const auto& consuming_op = m_op.lock())
{
return {consuming_op};
}
FRONT_END_THROW("Operation has expired.");
}
Place::Ptr InPortPlacePDPD::get_source_tensor() const
{
if (const auto& tensor = m_source_tensor.lock())
{
return tensor;
}
FRONT_END_THROW("Source Tensor has expired.");
}
Place::Ptr InPortPlacePDPD::get_producing_port() const
{
return get_source_tensor()->get_producing_port();
}
bool InPortPlacePDPD::is_equal_data(Place::Ptr another) const
{
return get_source_tensor()->is_equal_data(another);
}
Place::Ptr InPortPlacePDPD::get_producing_operation() const
{
return get_producing_port()->get_producing_operation();
}
std::shared_ptr<TensorPlacePDPD> OutPortPlacePDPD::get_target_tensor_pdpd() const
{
if (const auto& target_tensor = m_target_tensor.lock())
{
@ -113,3 +408,49 @@ std::shared_ptr<TensorPlacePDPD> OutPortPlacePDPD::getTargetTensorPDPD() const
}
FRONT_END_THROW("Target Tensor has expired.");
}
std::vector<Place::Ptr> OutPortPlacePDPD::get_consuming_operations() const
{
if (auto tensor_ptr = m_target_tensor.lock())
{
return tensor_ptr->get_consuming_operations();
}
FRONT_END_THROW("Tensor has expired.");
}
void OutPortPlacePDPD::set_target_tensor(const std::weak_ptr<TensorPlacePDPD>& target_tensor)
{
m_target_tensor = target_tensor;
}
std::vector<Place::Ptr> OutPortPlacePDPD::get_consuming_ports() const
{
if (auto tensor_ptr = m_target_tensor.lock())
{
return tensor_ptr->get_consuming_ports();
}
FRONT_END_THROW("Tensor has expired.");
}
bool OutPortPlacePDPD::is_equal_data(Place::Ptr another) const
{
return get_target_tensor()->is_equal_data(another);
}
Place::Ptr OutPortPlacePDPD::get_target_tensor() const
{
if (const auto& target_tensor = m_target_tensor.lock())
{
return target_tensor;
}
FRONT_END_THROW("Target Tensor has expired.");
}
Place::Ptr OutPortPlacePDPD::get_producing_operation() const
{
if (auto op = m_op.lock())
{
return op;
}
FRONT_END_THROW("Operation has expired.");
}

View File

@ -0,0 +1,54 @@
import numpy as np
from save_model import saveModel
import sys
def pdpd_rnn_lstm(input_size, hidden_size, layers, direction):
import paddle as pdpd
pdpd.enable_static()
main_program = pdpd.static.Program()
startup_program = pdpd.static.Program()
num_of_directions = 1 if direction == 'forward' else 2
with pdpd.static.program_guard(main_program, startup_program):
rnn = pdpd.nn.LSTM(input_size, hidden_size, layers, direction, name="lstm")
data = pdpd.static.data(name='x', shape=[4, 3, input_size], dtype='float32')
prev_h = pdpd.ones(shape=[layers * num_of_directions, 4, hidden_size], dtype=np.float32, name="const_1")
prev_c = pdpd.ones(shape=[layers * num_of_directions, 4, hidden_size], dtype=np.float32, name="const_2")
y, (h, c) = rnn(data, (prev_h, prev_c))
relu_1 = pdpd.nn.functional.relu(c, name="relu_1")
relu_2 = pdpd.nn.functional.relu(c, name="relu_2")
relu_3 = pdpd.nn.functional.relu(c, name="relu_3")
cpu = pdpd.static.cpu_places(1)
exe = pdpd.static.Executor(cpu[0])
exe.run(startup_program)
outs = exe.run(
feed={'x': np.ones([4, 3, input_size]).astype(np.float32)},
fetch_list=[y, h, c],
program=main_program)
saveModel("place_test_model", exe, feedkeys=['x'],
fetchlist=[y, h, c, relu_1, relu_2, relu_3],
inputs=[np.ones([4, 3, input_size]).astype(np.float32)],
outputs=[outs[0], outs[1], outs[2]], target_dir=sys.argv[1])
print(outs[0])
return outs[0]
if __name__ == "__main__":
testCases = [
{
'input_size': 2,
'hidden_size': 2,
'layers': 1,
'direction': 'forward',
},
]
for test in testCases:
pdpd_rnn_lstm(test['input_size'], test['hidden_size'], test['layers'], test['direction'])

View File

@ -83,15 +83,15 @@ TEST(FrontEndManagerTest, testDefaultInputModel)
{
std::unique_ptr<InputModel> imPtr(new InputModel()); // to verify base destructor
InputModel::Ptr im = std::make_shared<InputModel>();
ASSERT_ANY_THROW(im->get_inputs());
ASSERT_ANY_THROW(im->get_outputs());
ASSERT_EQ(im->get_inputs(), std::vector<Place::Ptr>{});
ASSERT_EQ(im->get_outputs(), std::vector<Place::Ptr>{});
ASSERT_ANY_THROW(im->override_all_inputs({nullptr}));
ASSERT_ANY_THROW(im->override_all_outputs({nullptr}));
ASSERT_ANY_THROW(im->extract_subgraph({nullptr}, {nullptr}));
ASSERT_ANY_THROW(im->get_place_by_tensor_name(""));
ASSERT_ANY_THROW(im->get_place_by_operation_name(""));
ASSERT_ANY_THROW(im->get_place_by_operation_name_and_input_port("", 0));
ASSERT_ANY_THROW(im->get_place_by_operation_name_and_output_port("", 0));
ASSERT_EQ(im->get_place_by_tensor_name(""), nullptr);
ASSERT_EQ(im->get_place_by_operation_name(""), nullptr);
ASSERT_EQ(im->get_place_by_operation_name_and_input_port("", 0), nullptr);
ASSERT_EQ(im->get_place_by_operation_name_and_output_port("", 0), nullptr);
ASSERT_ANY_THROW(im->set_name_for_tensor(nullptr, ""));
ASSERT_ANY_THROW(im->add_name_for_tensor(nullptr, ""));
ASSERT_ANY_THROW(im->set_name_for_operation(nullptr, ""));
@ -114,24 +114,32 @@ TEST(FrontEndManagerTest, testDefaultPlace)
std::unique_ptr<Place> placePtr(new Place()); // to verify base destructor
Place::Ptr place = std::make_shared<Place>();
ASSERT_ANY_THROW(place->get_names());
ASSERT_ANY_THROW(place->get_consuming_operations());
ASSERT_ANY_THROW(place->get_consuming_operations(0));
ASSERT_ANY_THROW(place->get_target_tensor());
ASSERT_ANY_THROW(place->get_target_tensor(0));
ASSERT_ANY_THROW(place->get_source_tensor());
ASSERT_ANY_THROW(place->get_source_tensor(0));
ASSERT_ANY_THROW(place->get_producing_operation());
ASSERT_ANY_THROW(place->get_producing_operation(0));
ASSERT_ANY_THROW(place->get_producing_port());
ASSERT_ANY_THROW(place->get_input_port());
ASSERT_ANY_THROW(place->get_input_port(0));
ASSERT_ANY_THROW(place->get_input_port(""));
ASSERT_ANY_THROW(place->get_input_port("", 0));
ASSERT_ANY_THROW(place->get_output_port());
ASSERT_ANY_THROW(place->get_output_port(0));
ASSERT_ANY_THROW(place->get_output_port(""));
ASSERT_ANY_THROW(place->get_output_port("", 0));
ASSERT_ANY_THROW(place->get_consuming_ports());
ASSERT_EQ(place->get_consuming_operations(), std::vector<Place::Ptr>{});
ASSERT_EQ(place->get_consuming_operations(0), std::vector<Place::Ptr>{});
ASSERT_EQ(place->get_consuming_operations(""), std::vector<Place::Ptr>{});
ASSERT_EQ(place->get_consuming_operations("", 0), std::vector<Place::Ptr>{});
ASSERT_EQ(place->get_target_tensor(), nullptr);
ASSERT_EQ(place->get_target_tensor(0), nullptr);
ASSERT_EQ(place->get_target_tensor(""), nullptr);
ASSERT_EQ(place->get_target_tensor("", 0), nullptr);
ASSERT_EQ(place->get_source_tensor(), nullptr);
ASSERT_EQ(place->get_source_tensor(""), nullptr);
ASSERT_EQ(place->get_source_tensor(0), nullptr);
ASSERT_EQ(place->get_source_tensor("", 0), nullptr);
ASSERT_EQ(place->get_producing_operation(), nullptr);
ASSERT_EQ(place->get_producing_operation(""), nullptr);
ASSERT_EQ(place->get_producing_operation(0), nullptr);
ASSERT_EQ(place->get_producing_operation("", 0), nullptr);
ASSERT_EQ(place->get_producing_port(), nullptr);
ASSERT_EQ(place->get_input_port(), nullptr);
ASSERT_EQ(place->get_input_port(0), nullptr);
ASSERT_EQ(place->get_input_port(""), nullptr);
ASSERT_EQ(place->get_input_port("", 0), nullptr);
ASSERT_EQ(place->get_output_port(), nullptr);
ASSERT_EQ(place->get_output_port(0), nullptr);
ASSERT_EQ(place->get_output_port(""), nullptr);
ASSERT_EQ(place->get_output_port("", 0), nullptr);
ASSERT_EQ(place->get_consuming_ports(), std::vector<Place::Ptr>{});
ASSERT_ANY_THROW(place->is_input());
ASSERT_ANY_THROW(place->is_output());
ASSERT_ANY_THROW(place->is_equal(nullptr));

View File

@ -0,0 +1,476 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <frontend/shared/include/utils.hpp>
#include <frontend_manager/frontend_manager.hpp>
#include <memory>
#include "gtest/gtest.h"
using namespace ngraph::frontend;
const std::string model_file = "place_test_model/place_test_model.pdmodel";
/***
model:
[input]
|
[const] [const] [transpose]
\ | /
[ RNN (LSTM) ]
/ | \
[transpose] [scale_{1,2}] [relu_{0,1,2}]
| | |
[scale_0] [out_{1,2}] [scale_{3,4,5}]
| |
[out_1] [out_{3,4,5}]
***/
std::vector<std::string> tensor_names = {
"x",
"const_1.tmp_0",
"const_2.tmp_0",
"transpose_0.tmp_0",
"transpose_0.tmp_1",
"lstm_0.tmp_0",
"lstm_0._generated_var_0",
"lstm_0.tmp_3",
"lstm_0.tmp_1",
"lstm_0.tmp_2",
"transpose_1.tmp_0",
"transpose_1.tmp_1",
"relu_1.tmp_0",
"relu_2.tmp_0",
"relu_3.tmp_0",
"save_infer_model/scale_0.tmp_1",
"save_infer_model/scale_1.tmp_1",
"save_infer_model/scale_2.tmp_1",
"save_infer_model/scale_3.tmp_1",
"save_infer_model/scale_4.tmp_1",
"save_infer_model/scale_5.tmp_1",
};
TEST(PDPD_Places, check_tensor_names)
{
FrontEndTestUtils::setupTestEnv();
auto m_fem = FrontEndManager();
auto frontend = m_fem.load_by_framework("pdpd");
auto input_model = frontend->load(TEST_PDPD_MODELS + model_file);
for (const auto& tensor_name : tensor_names)
{
auto place = input_model->get_place_by_tensor_name(tensor_name);
EXPECT_NE(place, nullptr);
}
}
TEST(PDPD_Places, check_input_outputs)
{
FrontEndTestUtils::setupTestEnv();
auto m_fem = FrontEndManager();
auto frontend = m_fem.load_by_framework("pdpd");
auto input_model = frontend->load(TEST_PDPD_MODELS + model_file);
auto inputs = input_model->get_inputs();
auto outputs = input_model->get_outputs();
EXPECT_EQ(inputs.size(), 1);
EXPECT_EQ(outputs.size(), 6);
auto tensor_place = input_model->get_place_by_tensor_name("x");
tensor_place->is_equal(inputs[0]);
std::vector<std::string> output_names = {"save_infer_model/scale_0.tmp_1",
"save_infer_model/scale_1.tmp_1",
"save_infer_model/scale_2.tmp_1",
"save_infer_model/scale_3.tmp_1",
"save_infer_model/scale_4.tmp_1",
"save_infer_model/scale_5.tmp_1"};
for (const auto& name : output_names)
{
const auto output_place = input_model->get_place_by_tensor_name(name);
auto it =
std::find_if(outputs.begin(), outputs.end(), [&output_place](const Place::Ptr& place) {
return output_place->is_equal(place);
});
EXPECT_NE(it, outputs.end());
}
}
// all existed in the model ops have "Out" port
TEST(PDPD_Places, check_out_port_of_all_ops)
{
FrontEndTestUtils::setupTestEnv();
auto m_fem = FrontEndManager();
auto frontend = m_fem.load_by_framework("pdpd");
auto input_model = frontend->load(TEST_PDPD_MODELS + model_file);
for (const auto& tensor_name : tensor_names)
{
auto place = input_model->get_place_by_tensor_name(tensor_name);
EXPECT_NE(place, nullptr);
auto producing_op = place->get_producing_operation();
EXPECT_NE(producing_op, nullptr);
auto out_port_by_name = producing_op->get_output_port("Out");
EXPECT_NE(out_port_by_name, nullptr);
auto out_port_by_name_idx = producing_op->get_output_port("Out", 0);
EXPECT_NE(out_port_by_name_idx, nullptr);
EXPECT_TRUE(out_port_by_name->is_equal(out_port_by_name_idx));
}
}
TEST(PDPD_Places, check_in_out_ports_of_model_outputs)
{
FrontEndTestUtils::setupTestEnv();
auto m_fem = FrontEndManager();
auto frontend = m_fem.load_by_framework("pdpd");
auto input_model = frontend->load(TEST_PDPD_MODELS + model_file);
auto outputs = input_model->get_outputs();
for (const auto& output : outputs)
{
auto producing_op = output->get_producing_operation();
EXPECT_NE(producing_op, nullptr);
auto out_port = producing_op->get_output_port();
EXPECT_NE(out_port, nullptr);
auto out_port_by_name = producing_op->get_output_port("Out");
EXPECT_NE(out_port_by_name, nullptr);
auto out_port_by_name_idx = producing_op->get_output_port("Out", 0);
EXPECT_NE(out_port_by_name_idx, nullptr);
EXPECT_TRUE(out_port->is_equal(out_port_by_name));
EXPECT_TRUE(out_port->is_equal(out_port_by_name_idx));
auto in_port = producing_op->get_input_port();
EXPECT_NE(in_port, nullptr);
auto in_port_by_name = producing_op->get_input_port("X");
EXPECT_NE(in_port_by_name, nullptr);
auto in_port_by_name_idx = producing_op->get_input_port("X", 0);
EXPECT_NE(in_port_by_name_idx, nullptr);
EXPECT_TRUE(in_port->is_equal(in_port_by_name));
EXPECT_TRUE(in_port->is_equal(in_port_by_name_idx));
}
}
TEST(PDPD_Places, check_source_target_tensors_of_model_outputs)
{
FrontEndTestUtils::setupTestEnv();
auto m_fem = FrontEndManager();
auto frontend = m_fem.load_by_framework("pdpd");
auto input_model = frontend->load(TEST_PDPD_MODELS + model_file);
auto outputs = input_model->get_outputs();
for (const auto& output : outputs)
{
auto producing_op = output->get_producing_operation();
EXPECT_NE(producing_op, nullptr);
auto out = producing_op->get_target_tensor();
EXPECT_NE(out, nullptr);
auto out_by_name = producing_op->get_target_tensor("Out");
EXPECT_NE(out_by_name, nullptr);
auto out_by_name_idx = producing_op->get_target_tensor("Out", 0);
EXPECT_NE(out_by_name_idx, nullptr);
EXPECT_TRUE(out->is_equal(out_by_name));
EXPECT_TRUE(out->is_equal(out_by_name_idx));
auto in = producing_op->get_source_tensor();
EXPECT_NE(in, nullptr);
auto in_by_name = producing_op->get_source_tensor("X");
EXPECT_NE(in_by_name, nullptr);
auto in_by_name_idx = producing_op->get_source_tensor("X", 0);
EXPECT_NE(in_by_name_idx, nullptr);
EXPECT_TRUE(in->is_equal(in_by_name));
EXPECT_TRUE(in->is_equal(in_by_name_idx));
}
}
TEST(PDPD_Places, check_producing_consuming_ops_of_model_outputs)
{
FrontEndTestUtils::setupTestEnv();
auto m_fem = FrontEndManager();
auto frontend = m_fem.load_by_framework("pdpd");
auto input_model = frontend->load(TEST_PDPD_MODELS + model_file);
auto outputs = input_model->get_outputs();
for (const auto& output : outputs)
{
auto op = output->get_producing_operation();
EXPECT_NE(op, nullptr);
auto out = op->get_consuming_operations();
EXPECT_EQ(out.size(), 1);
auto out_by_name = op->get_consuming_operations("Out");
EXPECT_EQ(out_by_name.size(), 1);
auto out_by_name_idx = op->get_consuming_operations("Out", 0);
EXPECT_EQ(out_by_name_idx.size(), 1);
EXPECT_TRUE(out[0]->is_equal(out_by_name[0]));
EXPECT_TRUE(out[0]->is_equal(out_by_name_idx[0]));
auto in = op->get_producing_operation();
EXPECT_NE(in, nullptr);
auto in_by_name = op->get_producing_operation("X");
EXPECT_NE(in_by_name, nullptr);
auto in_by_name_idx = op->get_producing_operation("X", 0);
EXPECT_NE(in_by_name_idx, nullptr);
EXPECT_TRUE(in->is_equal(in_by_name));
EXPECT_TRUE(in->is_equal(in_by_name_idx));
}
}
// check data flow [ output port -> tensor -> input port ]
TEST(PDPD_Places, check_data_flow)
{
FrontEndTestUtils::setupTestEnv();
auto m_fem = FrontEndManager();
auto frontend = m_fem.load_by_framework("pdpd");
auto input_model = frontend->load(TEST_PDPD_MODELS + model_file);
for (const auto& tensor_name : tensor_names)
{
auto tensor_place = input_model->get_place_by_tensor_name(tensor_name);
EXPECT_NE(tensor_place, nullptr);
auto out_port = tensor_place->get_producing_port();
auto in_ports = tensor_place->get_consuming_ports();
EXPECT_TRUE(tensor_place->is_equal_data(out_port));
EXPECT_TRUE(out_port->is_equal_data(tensor_place));
EXPECT_FALSE(out_port->is_equal(tensor_place));
auto source_tensor = out_port->get_target_tensor();
EXPECT_TRUE(source_tensor->is_equal(tensor_place));
for (const auto& in_port : in_ports)
{
EXPECT_TRUE(out_port->is_equal_data(in_port));
EXPECT_TRUE(in_port->is_equal_data(out_port));
EXPECT_TRUE(in_port->is_equal_data(tensor_place));
EXPECT_TRUE(tensor_place->is_equal_data(in_port));
EXPECT_FALSE(in_port->is_equal(out_port));
EXPECT_FALSE(in_port->is_equal(tensor_place));
EXPECT_TRUE(out_port->is_equal(in_port->get_producing_port()));
EXPECT_TRUE(tensor_place->is_equal(in_port->get_source_tensor()));
}
}
}
// check [ tensor -> input_port
// -> input_port_2
// -> input_port_N]
// input_port, input_port_2, ... input_port_N are equal data
TEST(PDPD_Places, check_tensor_to_multiple_ports)
{
FrontEndTestUtils::setupTestEnv();
auto m_fem = FrontEndManager();
auto frontend = m_fem.load_by_framework("pdpd");
auto input_model = frontend->load(TEST_PDPD_MODELS + model_file);
for (const auto& tensor_name : tensor_names)
{
auto tensor_place = input_model->get_place_by_tensor_name(tensor_name);
auto inputs_to = tensor_place->get_consuming_ports();
for (size_t idx = 0; idx < inputs_to.size(); ++idx)
{
for (size_t idx_2 = 0; idx_2 < inputs_to.size(); ++idx_2)
{
EXPECT_TRUE(inputs_to[idx]->is_equal_data(inputs_to[idx_2]));
EXPECT_TRUE(inputs_to[idx_2]->is_equal_data(inputs_to[idx]));
if (idx == idx_2)
{
EXPECT_TRUE(inputs_to[idx]->is_equal(inputs_to[idx_2]));
}
else
{
EXPECT_FALSE(inputs_to[idx]->is_equal(inputs_to[idx_2]));
}
}
}
}
}
// consuming ops should be equal for tensor place and producing output port
TEST(PDPD_Places, check_consuming_ops)
{
FrontEndTestUtils::setupTestEnv();
auto m_fem = FrontEndManager();
auto frontend = m_fem.load_by_framework("pdpd");
auto input_model = frontend->load(TEST_PDPD_MODELS + model_file);
for (const auto& tensor_name : tensor_names)
{
auto tensor_place = input_model->get_place_by_tensor_name(tensor_name);
EXPECT_NE(tensor_place, nullptr);
auto consuming_ops_for_tensor = tensor_place->get_consuming_operations();
auto out_port = tensor_place->get_producing_port();
auto consuming_ops_for_out_port = out_port->get_consuming_operations();
bool is_permutation =
std::is_permutation(consuming_ops_for_out_port.begin(),
consuming_ops_for_out_port.end(),
consuming_ops_for_tensor.begin(),
[](const Place::Ptr& place1, const Place::Ptr& place2) {
return place1->is_equal(place2);
});
EXPECT_TRUE(is_permutation);
auto consuming_ports_for_tensor = tensor_place->get_consuming_ports();
std::vector<Place::Ptr> consuming_ops_for_in_ports;
for (const auto& port : consuming_ports_for_tensor)
{
EXPECT_EQ(port->get_consuming_operations().size(), 1);
consuming_ops_for_in_ports.push_back(port->get_consuming_operations()[0]);
}
is_permutation =
std::is_permutation(consuming_ops_for_in_ports.begin(),
consuming_ops_for_in_ports.end(),
consuming_ops_for_tensor.begin(),
[](const Place::Ptr& place1, const Place::Ptr& place2) {
return place1->is_equal(place2);
});
EXPECT_TRUE(is_permutation);
}
}
TEST(PDPD_Places, check_consuming_ops_2)
{
FrontEndTestUtils::setupTestEnv();
auto m_fem = FrontEndManager();
auto frontend = m_fem.load_by_framework("pdpd");
auto input_model = frontend->load(TEST_PDPD_MODELS + model_file);
auto it = find(tensor_names.begin(), tensor_names.end(), "lstm_0.tmp_2");
EXPECT_NE(it, tensor_names.end());
auto tensor_place = input_model->get_place_by_tensor_name(*it);
auto consuming_ports = tensor_place->get_consuming_ports();
auto consuming_ops = tensor_place->get_consuming_operations();
EXPECT_EQ(consuming_ports.size(), 4);
EXPECT_EQ(consuming_ops.size(), 4);
for (const auto& consuming_port : consuming_ports)
{
auto port_consuming_ops = consuming_port->get_consuming_operations();
EXPECT_EQ(port_consuming_ops.size(), 1);
auto in_port = port_consuming_ops[0]->get_input_port();
auto in_port_by_name = port_consuming_ops[0]->get_input_port("X");
auto in_port_by_name_and_idx = port_consuming_ops[0]->get_input_port("X", 0);
EXPECT_TRUE(consuming_port->is_equal(in_port) &&
consuming_port->is_equal(in_port_by_name) &&
consuming_port->is_equal(in_port_by_name_and_idx));
auto op = std::find_if(consuming_ops.begin(),
consuming_ops.end(),
[&port_consuming_ops](const Place::Ptr& place) {
return place->is_equal(port_consuming_ops[0]);
});
EXPECT_NE(op, consuming_ops.end());
const auto source_tensor = port_consuming_ops[0]->get_source_tensor();
EXPECT_TRUE(source_tensor->is_equal(tensor_place));
EXPECT_TRUE(source_tensor->is_equal(consuming_port->get_source_tensor()));
}
}
TEST(PDPD_Places, check_producing_ops)
{
FrontEndTestUtils::setupTestEnv();
auto m_fem = FrontEndManager();
auto frontend = m_fem.load_by_framework("pdpd");
auto input_model = frontend->load(TEST_PDPD_MODELS + model_file);
for (const auto& tensor_name : tensor_names)
{
auto tensor_place = input_model->get_place_by_tensor_name(tensor_name);
EXPECT_NE(tensor_place, nullptr);
auto producing_op = tensor_place->get_producing_operation();
auto consuming_ports = tensor_place->get_consuming_ports();
auto producing_port = tensor_place->get_producing_port();
EXPECT_TRUE(producing_op->is_equal(producing_port->get_producing_operation()));
for (const auto& consuming_port : consuming_ports)
{
EXPECT_TRUE(producing_op->is_equal(consuming_port->get_producing_operation()));
}
}
}
TEST(PDPD_Places, check_input_output_ports_dy_idx)
{
FrontEndTestUtils::setupTestEnv();
auto m_fem = FrontEndManager();
auto frontend = m_fem.load_by_framework("pdpd");
auto input_model = frontend->load(TEST_PDPD_MODELS + model_file);
std::vector<std::string> output_names = {"save_infer_model/scale_0.tmp_1",
"save_infer_model/scale_1.tmp_1",
"save_infer_model/scale_2.tmp_1",
"save_infer_model/scale_3.tmp_1",
"save_infer_model/scale_4.tmp_1",
"save_infer_model/scale_5.tmp_1"};
for (const auto& tensor_name : output_names)
{
auto tensor_place = input_model->get_place_by_tensor_name(tensor_name);
EXPECT_NE(tensor_place, nullptr);
auto op = tensor_place->get_producing_operation();
auto input_port = op->get_input_port(0);
EXPECT_NE(input_port, nullptr);
auto out_port = op->get_output_port(0);
EXPECT_NE(out_port, nullptr);
}
}
TEST(PDPD_Places, check_ops_tensors_by_idx)
{
FrontEndTestUtils::setupTestEnv();
auto m_fem = FrontEndManager();
auto frontend = m_fem.load_by_framework("pdpd");
auto input_model = frontend->load(TEST_PDPD_MODELS + model_file);
std::vector<std::string> output_names = {"save_infer_model/scale_0.tmp_1",
"save_infer_model/scale_1.tmp_1",
"save_infer_model/scale_2.tmp_1",
"save_infer_model/scale_3.tmp_1",
"save_infer_model/scale_4.tmp_1",
"save_infer_model/scale_5.tmp_1"};
for (const auto& tensor_name : output_names)
{
auto tensor_place = input_model->get_place_by_tensor_name(tensor_name);
EXPECT_NE(tensor_place, nullptr);
auto op = tensor_place->get_producing_operation();
auto prod_op = op->get_producing_operation(0);
EXPECT_NE(prod_op, nullptr);
auto target_tensor = op->get_target_tensor(0);
EXPECT_EQ(tensor_place, target_tensor);
auto source_tensor = op->get_source_tensor(0);
EXPECT_NE(source_tensor, nullptr);
auto consum_op = op->get_consuming_operations(0);
EXPECT_EQ(consum_op.size(), 1);
}
}

View File

@ -0,0 +1,40 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <cstdint>
#include <cstring>
class Tokenizer {
public:
/// Initialize tokenizer with an input and token separator buffers.
Tokenizer(const void *str, size_t str_size, const void *separator,
size_t separator_size)
: str((const uint8_t *)str), str_size(str_size), separator(separator),
separator_size(separator_size) {}
/// Get next token.
const void *next(size_t *token_size) {
const void *token = this->str;
if (this->str_size >= this->separator_size) {
for (size_t i = 0; i < this->str_size - this->separator_size; i++)
if (0 == memcmp((const uint8_t *)this->str + i, this->separator,
this->separator_size)) {
*token_size = this->str_size - this->separator_size;
this->str += i + this->separator_size;
this->str_size -= i + this->separator_size;
return token;
}
}
*token_size = this->str_size;
this->str = nullptr;
this->str_size = 0;
return token;
}
private:
const uint8_t *str;
size_t str_size;
const void *separator;
size_t separator_size;
};

View File

@ -0,0 +1,56 @@
#!/usr/bin/env python3
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
# Sample usage:
# ./scripts/init_corpus.py ./pdpd_layer_models/**/*.pdmodel --join pdiparams
# mkdir -p corpus && find ./pdpd_layer_models/ -name "*.fuzz" -exec cp \{\} .//import_pdpd-corpus \;
import argparse
import glob
import os
from pathlib import Path
import shutil
import sys
def globber(paths):
"""Generator extending paths with wildcards"""
for path in paths:
# XXX: use non-public `has_magic` here as we'd like to differentiate between glob and normal paths
# i.e. in the case when user specifies "normal" but non-existing path - we'd like to handle it by ourselves
if glob.has_magic(path):
for resolved in glob.iglob(path, recursive=True):
yield resolved
else:
yield path
def main():
""" Main entrypoint """
parser = argparse.ArgumentParser(
description="Join multiple files of the same name to a single *.fuzz file"
)
parser.add_argument("input", nargs="+", help="A file to add to the corpus")
parser.add_argument(
"--join",
help="Colon separated list of file extensions to concatenate to corpus entry",
)
args = parser.parse_args()
for input in globber(args.input):
base = os.path.splitext(input)[0]
output = f"{base}.fuzz"
shutil.copyfile(input, output)
if args.join:
with open(output, "ab") as output_file:
for join in args.join.split(":"):
join = f"{base}.{join}"
if os.path.isfile(join):
with open(join, "rb") as join_file:
output_file.write(bytes("FUZZ_NEXT_FIELD", "utf-8"))
output_file.write(join_file.read())
if __name__ == "__main__":
sys.exit(main())

View File

@ -16,7 +16,8 @@ foreach(test_source ${tests})
get_filename_component(test_name ${test_source} NAME_WE)
add_fuzzer(${test_name} ${test_source})
target_link_libraries(${test_name} PRIVATE IE::inference_engine cnpy zlib)
target_link_libraries(${test_name} PRIVATE IE::inference_engine cnpy zlib ${NGRAPH_LIBRARIES}
ngraph::frontend_manager)
add_dependencies(fuzz ${test_name})

View File

@ -0,0 +1,45 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "frontend_manager/frontend_manager.hpp"
#include "ngraph/ngraph.hpp"
#include "tokenizer.h"
#include <string>
#define COUNT_OF(A) (sizeof(A) / sizeof(A[0]))
const char split_sequence[] = {'F', 'U', 'Z', 'Z', '_', 'N', 'E', 'X',
'T', '_', 'F', 'I', 'E', 'L', 'D'};
const char *PDPD = "pdpd";
using namespace ngraph;
using namespace ngraph::frontend;
extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
/// split input buffer to model and params
Tokenizer tokenizer(data, size, split_sequence, sizeof(split_sequence));
size_t model_size = 0;
const void *model_buf = tokenizer.next(&model_size);
size_t params_size = 0;
const void *params_buf = tokenizer.next(&params_size);
try {
ngraph::frontend::FrontEndManager frontend_manager = FrontEndManager();
ngraph::frontend::FrontEnd::Ptr frontend =
frontend_manager.load_by_framework(PDPD);
ngraph::frontend::InputModel::Ptr input_model;
std::stringstream model;
model << std::string((const char *)model_buf, model_size);
std::shared_ptr<std::istream> in_model(&model);
if (params_buf) {
std::stringstream params;
params << std::string((const char *)params_buf, params_size);
std::shared_ptr<std::istream> in_params(&params);
input_model = frontend->load(in_model, in_params);
} else
input_model = frontend->load(in_model);
std::shared_ptr<ngraph::Function> function = frontend->convert(input_model);
} catch (const std::exception&) {
return 0; // fail gracefully on expected exceptions
}
return 0;
}