Publishing R5 content (#72)

* Publishing R5 content

* Updated ade revision

* updated readme

* add possibility to build CPU plugin with Intel MKL package
This commit is contained in:
Alexey Suhov
2019-01-21 21:31:31 +03:00
committed by openvino-pushbot
parent fbc7a4a710
commit 9de27f16bc
1616 changed files with 126362 additions and 17710 deletions

View File

@@ -0,0 +1,83 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_fragment.hpp>
#include <ie_inetwork.hpp>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for ArgMax layer
*/
class INFERENCE_ENGINE_API_CLASS(ArgMaxLayer): public LayerFragment {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit ArgMaxLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param genLayer generic builder
*/
explicit ArgMaxLayer(Layer& genLayer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
ArgMaxLayer& setName(const std::string& name);
/**
* @brief Returns port with shapes for the layer
* @return Port with shapes
*/
const Port& getPort() const;
/**
* @brief Sets port shapes for the layer
* @param port Port with shapes
* @return reference to layer builder
*/
ArgMaxLayer& setPort(const Port& port);
/**
* @brief Returns axis
* @return Axis
*/
int getAxis() const;
/**
* @brief Sets axis
* @param axis Axis
* @return reference to layer builder
*/
ArgMaxLayer& setAxis(int axis);
/**
* @brief Returns top K
* @return Top K
*/
size_t getTopK() const;
/**
* @brief Sets top K
* @param topK Top K
* @return reference to layer builder
*/
ArgMaxLayer& setTopK(size_t topK);
/**
* @brief Returns output maximum value
* @return Output maximum value
*/
size_t getOutMaxVal() const;
/**
* @brief Sets output maximum value
* @param size Maximum value
* @return reference to layer builder
*/
ArgMaxLayer& setOutMaxVal(size_t size);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,81 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_fragment.hpp>
#include <ie_inetwork.hpp>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for BatchNormalization layer
*/
class INFERENCE_ENGINE_API_CLASS(BatchNormalizationLayer): public LayerFragment {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit BatchNormalizationLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param genLayer generic builder
*/
explicit BatchNormalizationLayer(Layer& genLayer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
BatchNormalizationLayer& setName(const std::string& name);
/**
* @brief Returns port with shapes for the layer
* @return Port with shapes
*/
const Port& getPort() const;
/**
* @brief Sets port shapes for the layer
* @param port Port with shapes
* @return reference to layer builder
*/
BatchNormalizationLayer& setPort(const Port &port);
/**
* @brief Sets weights for layer
* @param weights Constant blob with weights
* @return reference to layer builder
*/
BatchNormalizationLayer& setWeights(const Blob::CPtr& weights);
/**
* @brief Sets biases for layer
* @param biases Constant blob with biases
* @return reference to layer builder
*/
BatchNormalizationLayer& setBiases(const Blob::CPtr& biases);
/**
* @brief Returns epsilon
* @return Epsilon
*/
float getEpsilon() const;
/**
* @brief Sets epsilon
* @param eps Epsilon
* @return reference to layer builder
*/
BatchNormalizationLayer& setEpsilon(float eps);
/**
* @brief Validates layer before creation
* @param layer generic layer builder
*/
static void validate(const Layer& layer);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,72 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_fragment.hpp>
#include <ie_inetwork.hpp>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for Clamp layer
*/
class INFERENCE_ENGINE_API_CLASS(ClampLayer): public LayerFragment {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit ClampLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param genLayer generic builder
*/
explicit ClampLayer(Layer& genLayer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
ClampLayer& setName(const std::string& name);
/**
* @brief Returns port with shapes for the layer
* @return Port with shapes
*/
const Port& getPort() const;
/**
* @brief Sets port shapes for the layer
* @param port Port with shapes
* @return reference to layer builder
*/
ClampLayer& setPort(const Port& port);
/**
* @brief Returns minimum value
* @return minimum value
*/
float getMinValue() const;
/**
* @brief Sets minimum value
* @param minValue Minimum value
* @return reference to layer builder
*/
ClampLayer& setMinValue(float minValue);
/**
* @brief Returns maximum value
* @return Maximum value
*/
float getMaxValue() const;
/**
* @brief Sets maximum value
* @param maxValue Maximum value
* @return reference to layer builder
*/
ClampLayer& setMaxValue(float maxValue);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,76 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_fragment.hpp>
#include <ie_inetwork.hpp>
#include <string>
#include <vector>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for Concat layer
*/
class INFERENCE_ENGINE_API_CLASS(ConcatLayer): public LayerFragment {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit ConcatLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param genLayer generic builder
*/
explicit ConcatLayer(Layer& genLayer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
ConcatLayer& setName(const std::string& name);
/**
* @brief Returns vector with input ports
* @return vector with ports
*/
const std::vector<Port>& getInputPorts() const;
/**
* @brief Sets input ports
* @param ports Vector of input ports
* @return reference to layer builder
*/
ConcatLayer& setInputPorts(const std::vector<Port>& ports);
/**
* @brief Returns output port
* @return Output port
*/
const Port& getOutputPort() const;
/**
* @brief Sets output port
* @param port Output port
* @return reference to layer builder
*/
ConcatLayer& setOutputPort(const Port& port);
/**
* @brief Returns axis
* @return Axis
*/
size_t getAxis() const;
/**
* @brief Sets axis
* @param axis Axis
* @return reference to layer builder
*/
ConcatLayer& setAxis(size_t axis);
private:
size_t axis;
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,57 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_fragment.hpp>
#include <ie_inetwork.hpp>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for Const layer
*/
class INFERENCE_ENGINE_API_CLASS(ConstLayer): public LayerFragment {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit ConstLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param genLayer generic builder
*/
explicit ConstLayer(Layer& genLayer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
ConstLayer& setName(const std::string& name);
/**
* @brief Returns port with shapes for the layer
* @return Port with shapes
*/
const Port& getPort() const;
/**
* @brief Sets port shapes for the layer
* @param port Port with shapes
* @return reference to layer builder
*/
ConstLayer& setPort(const Port& port);
/**
* @brief Sets constant data
* @param data constant blob with data
* @return reference to layer builder
*/
ConstLayer& setData(const Blob::CPtr& data);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,163 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_fragment.hpp>
#include <ie_inetwork.hpp>
#include <vector>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for ArgMax layer
*/
class INFERENCE_ENGINE_API_CLASS(ConvolutionLayer): public LayerFragment {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit ConvolutionLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param genLayer generic builder
*/
explicit ConvolutionLayer(Layer& genLayer);
/**
* @brief Operator creates generic layer builder
* @return Generic layer builder
*/
operator Layer() const override;
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
ConvolutionLayer& setName(const std::string& name);
/**
* @brief Sets weights for layer
* @param weights Constant blob with weights
* @return reference to layer builder
*/
ConvolutionLayer& setWeights(const Blob::CPtr& weights);
/**
* @brief Sets biases for layer
* @param biases Constant blob with biases
* @return reference to layer builder
*/
ConvolutionLayer& setBiases(const Blob::CPtr& biases);
/**
* @brief Returns input port
* @return Input port
*/
const Port& getInputPort() const;
/**
* @brief Sets input port
* @param port Input port
* @return reference to layer builder
*/
ConvolutionLayer& setInputPort(const Port& port);
/**
* @brief Returns output port
* @return Output port
*/
const Port& getOutputPort() const;
/**
* @brief Sets output port
* @param port Output port
* @return reference to layer builder
*/
ConvolutionLayer& setOutputPort(const Port& port);
/**
* @brief Returns kernel size
* @return Kernel size
*/
const std::vector<size_t> getKernel() const;
/**
* @brief Sets kernel size
* @param kernel Kernel size
* @return reference to layer builder
*/
ConvolutionLayer& setKernel(const std::vector<size_t>& kernel);
/**
* @brief Returns vector of strides
* @return vector of strides
*/
const std::vector<size_t> getStrides() const;
/**
* @brief Sets strides
* @param strides vector of strides
* @return reference to layer builder
*/
ConvolutionLayer& setStrides(const std::vector<size_t>& strides);
/**
* @brief Returns dilations
* @return vector of dilations
*/
const std::vector<size_t> getDilation() const;
/**
* @brief Sets dilations
* @param dilation Vector of dilations
* @return reference to layer builder
*/
ConvolutionLayer& setDilation(const std::vector<size_t>& dilation);
/**
* @brief Returns begin paddings
* @return vector of paddings
*/
const std::vector<size_t> getPaddingsBegin() const;
/**
* @brief Sets begin paddings
* @param paddings Vector of paddings
* @return reference to layer builder
*/
ConvolutionLayer& setPaddingsBegin(const std::vector<size_t>& paddings);
/**
* @brief Return end paddings
* @return Vector of paddings
*/
const std::vector<size_t> getPaddingsEnd() const;
/**
* @brief Sets end paddings
* @param paddings Vector of paddings
* @return reference to layer builder
*/
ConvolutionLayer& setPaddingsEnd(const std::vector<size_t>& paddings);
/**
* @brief Returns group
* @return Group
*/
size_t getGroup() const;
/**
* @brief Sets group
* @param group Group
* @return reference to layer builder
*/
ConvolutionLayer& setGroup(size_t group);
/**
* @brief Return output depth
* @return Output depth
*/
size_t getOutDepth() const;
/**
* @brief Sets output depth
* @param outDepth Output depth
* @return reference to layer builder
*/
ConvolutionLayer& setOutDepth(size_t outDepth);
/**
* @brief Validates layer before creation
* @param layer generic layer builder
*/
static void validate(const Layer& layer);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,90 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_fragment.hpp>
#include <ie_inetwork.hpp>
#include <string>
#include <vector>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for Crop layer
*/
class INFERENCE_ENGINE_API_CLASS(CropLayer): public LayerFragment {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit CropLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param genLayer generic builder
*/
explicit CropLayer(Layer& genLayer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
CropLayer& setName(const std::string& name);
/**
* @brief Returns input ports
* @return Vector of input ports
*/
const std::vector<Port>& getInputPorts() const;
/**
* @brief Sets input ports
* @param port Vector of input ports
* @return reference to layer builder
*/
CropLayer& setInputPorts(const std::vector<Port>& ports);
/**
* @brief Return output port
* @return Output port
*/
const Port& getOutputPort() const;
/**
* @brief Sets output port
* @param port Output port
* @return reference to layer builder
*/
CropLayer& setOutputPort(const Port& port);
/**
* @brief Returns axis
* @return Vector of axis
*/
const std::vector<size_t> getAxis() const;
/**
* @brief Sets axis
* @param axis Vector of axis
* @return reference to layer builder
*/
CropLayer& setAxis(const std::vector<size_t>& axis);
/**
* @brief Returns offsets
* @return Vector of offsets
*/
const std::vector<size_t> getOffset() const;
/**
* @brief Sets offsets
* @param offsets Vector of offsets
* @return reference to layer builder
*/
CropLayer& setOffset(const std::vector<size_t>& offsets);
/**
* @brief Validates layer before creation
* @param layer generic layer builder
*/
static void validate(const Layer& layer);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,74 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_fragment.hpp>
#include <ie_inetwork.hpp>
#include <string>
#include <vector>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for CTCGreedyDecoder layer
*/
class INFERENCE_ENGINE_API_CLASS(CTCGreedyDecoderLayer): public LayerFragment {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit CTCGreedyDecoderLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param genLayer generic builder
*/
explicit CTCGreedyDecoderLayer(Layer& genLayer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
CTCGreedyDecoderLayer& setName(const std::string& name);
/**
* @brief Returns input ports
* @return Vector of input ports
*/
const std::vector<Port>& getInputPorts() const;
/**
* @brief Sets input ports
* @param ports Vector of input ports
* @return reference to layer builder
*/
CTCGreedyDecoderLayer& setInputPorts(const std::vector<Port>& ports);
/**
* @brief Returns output port
* @return Output port
*/
const Port& getOutputPort() const;
/**
* @brief Sets output port
* @param port Output port
* @return reference to layer builder
*/
CTCGreedyDecoderLayer& setOutputPort(const Port& port);
/**
* @brief Returns CTCMergeRepeated
* @return true if merge repeated
*/
bool getCTCMergeRepeated() const;
/**
* @brief Sets CTCMergeRepeated
* @param flag bool value
* @return reference to layer builder
*/
CTCGreedyDecoderLayer& setCTCMergeRepeated(bool flag);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,32 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_convolution_layer.hpp>
#include <ie_inetwork.hpp>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for Deconvolution layer
*/
class INFERENCE_ENGINE_API_CLASS(DeconvolutionLayer): public ConvolutionLayer {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit DeconvolutionLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param genLayer generic builder
*/
explicit DeconvolutionLayer(Layer& genLayer);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,183 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_fragment.hpp>
#include <ie_inetwork.hpp>
#include <string>
#include <vector>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for ArgMax layer
*/
class INFERENCE_ENGINE_API_CLASS(DetectionOutputLayer): public LayerFragment {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit DetectionOutputLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param genLayer generic builder
*/
explicit DetectionOutputLayer(Layer& genLayer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
DetectionOutputLayer& setName(const std::string& name);
/**
* @brief Returns output port
* @return Output port
*/
const Port& getOutputPort() const;
/**
* @brief Sets output port
* @param port Output port
* @return reference to layer builder
*/
DetectionOutputLayer& setOutputPort(const Port& port);
/**
* @brief Returns input ports
* @return Vector of input ports
*/
const std::vector<Port>& getInputPorts() const;
/**
* @brief Sets input ports
* @param ports Vector of input ports
* @return reference to layer builder
*/
DetectionOutputLayer& setInputPorts(const std::vector<Port>& ports);
/**
* @brief Returns number of classes
* @return Number of classes
*/
size_t getNumClasses() const;
/**
* @brief Sets number of classes to be predict
* @param num Number of classes
* @return reference to layer builder
*/
DetectionOutputLayer& setNumClasses(size_t num);
/**
* @brief Returns background label ID
* @return Background ID
*/
int getBackgroudLabelId() const;
/**
* @brief Sets background label ID
* @param labelId Background ID if there is no background class, set it to -1.
* @return reference to layer builder
*/
DetectionOutputLayer& setBackgroudLabelId(int labelId);
/**
* @brief Returns maximum number of results to be kept on NMS stage
* @return Top K
*/
int getTopK() const;
/**
* @brief Sets maximum number of results to be kept on NMS stage
* @param topK Top K
* @return reference to layer builder
*/
DetectionOutputLayer& setTopK(int topK);
/**
* @brief Returns number of total boxes to be kept per image after NMS step
* @return Keep top K
*/
int getKeepTopK() const;
/**
* @brief Sets number of total boxes to be kept per image after NMS step
* @param topK Keep top K
* @return reference to layer builder
*/
DetectionOutputLayer& setKeepTopK(int topK);
/**
* @brief Returns number of oriented classes
* @return Number of oriented classes
*/
int getNumOrientClasses() const;
/**
* @brief Sets number of oriented classes
* @param numClasses Number of classes
* @return reference to layer builder
*/
DetectionOutputLayer& setNumOrientClasses(int numClasses);
/**
* @brief Returns type of coding method for bounding boxes
* @return String with code type
*/
std::string getCodeType() const;
/**
* @brief Sets type of coding method for bounding boxes
* @param type Type
* @return reference to layer builder
*/
DetectionOutputLayer& setCodeType(std::string type);
/**
* @brief Returns interpolate orientation
* @return Interpolate orientation
*/
int getInterpolateOrientation() const;
/**
* @brief Sets interpolate orientation
* @param orient Orientation
* @return reference to layer builder
*/
DetectionOutputLayer& setInterpolateOrientation(int orient);
/**
* @brief Returns threshold to be used in NMS stage
* @return Threshold
*/
float getNMSThreshold() const;
/**
* @brief Sets threshold to be used in NMS stage
* @param threshold NMS threshold
* @return reference to layer builder
*/
DetectionOutputLayer& setNMSThreshold(float threshold);
/**
* @brief Returns confidence threshold
* @return Threshold
*/
float getConfidenceThreshold() const;
/**
* @brief Sets confidence threshold
* @param threshold Threshold
* @return reference to layer builder
*/
DetectionOutputLayer& setConfidenceThreshold(float threshold);
/**
* @brief Returns share location
* @return true if bounding boxes are shared among different classes
*/
bool getShareLocation() const;
/**
* @brief Sets share location
* @param flag true if bounding boxes are shared among different classes
* @return reference to layer builder
*/
DetectionOutputLayer& setShareLocation(bool flag);
/**
* @brief Returns encoded settings
* @return true if variance is encoded in target
*/
bool getVariantEncodedInTarget() const;
/**
* @brief Sets encoded settings
* @param flag true if variance is encoded in target
* @return reference to layer builder
*/
DetectionOutputLayer& setVariantEncodedInTarget(bool flag);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,96 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_fragment.hpp>
#include <ie_inetwork.hpp>
#include <string>
#include <vector>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for Eltwise layer
*/
class INFERENCE_ENGINE_API_CLASS(EltwiseLayer): public LayerFragment {
public:
/**
* @brief The enum defines all Eltwise types
*/
enum EltwiseType {
SUM = 1,
MAX,
MUL
};
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit EltwiseLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param genLayer generic builder
*/
explicit EltwiseLayer(Layer& genLayer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
EltwiseLayer& setName(const std::string& name);
/**
* @brief Returns input ports
* @return Vector of input ports
*/
const std::vector<Port>& getInputPorts() const;
/**
* @brief Sets input ports
* @param ports Vector of input ports
* @return reference to layer builder
*/
EltwiseLayer& setInputPorts(const std::vector<Port>& ports);
/**
* @brief Returns output port
* @return Output port
*/
const Port& getOutputPort() const;
/**
* @brief Sets output port
* @param port Output port
* @return reference to layer builder
*/
EltwiseLayer& setOutputPort(const Port& port);
/**
* @brief Returns eltwise type
* @return Eltwise type
*/
EltwiseType getEltwiseType() const;
/**
* @brief Sets eltwise type
* @param type Eltwise type
* @return reference to layer builder
*/
EltwiseLayer& setEltwiseType(EltwiseType type);
/**
* @brief Returns eltwise scales
* @return Vector of scales
*/
const std::vector<float> getScales() const;
/**
* @brief Sets eltwise scales
* @param scales Vector of scales
* @return reference to layer builder
*/
EltwiseLayer& setScales(const std::vector<float>& scales);
private:
EltwiseType type;
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,62 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_fragment.hpp>
#include <ie_inetwork.hpp>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for ELU layer
*/
class INFERENCE_ENGINE_API_CLASS(ELULayer): public LayerFragment {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit ELULayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param genLayer generic builder
*/
explicit ELULayer(Layer& genLayer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
ELULayer& setName(const std::string& name);
/**
* @brief Returns port with shapes for the layer
* @return Port with shapes
*/
const Port& getPort() const;
/**
* @brief Sets port shapes for the layer
* @param port Port with shapes
* @return reference to layer builder
*/
ELULayer& setPort(const Port& port);
/**
* @brief Returns alpha
* @return alpha
*/
float getAlpha() const;
/**
* @brief Sets alpha
* @param alpha Alpha
* @return reference to layer builder
*/
ELULayer& setAlpha(float alpha);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,85 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_fragment.hpp>
#include <ie_inetwork.hpp>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for FullyConnected layer
*/
class INFERENCE_ENGINE_API_CLASS(FullyConnectedLayer): public LayerFragment {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit FullyConnectedLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param genLayer generic builder
*/
explicit FullyConnectedLayer(Layer& genLayer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
FullyConnectedLayer& setName(const std::string& name);
/**
* @brief Sets weights for layer
* @param weights Constant blob with weights
* @return reference to layer builder
*/
FullyConnectedLayer& setWeights(const Blob::CPtr& weights);
/**
* @brief Sets biases for layer
* @param biases Constant blob with biases
* @return reference to layer builder
*/
FullyConnectedLayer& setBiases(const Blob::CPtr& biases);
/**
* @brief Returns input port
* @return Input port
*/
const Port& getInputPort() const;
/**
* @brief Sets input port
* @param port Input port
* @return reference to layer builder
*/
FullyConnectedLayer& setInputPort(const Port& port);
/**
* @brief Returns output port
* @return Output port
*/
const Port& getOutputPort() const;
/**
* @brief Sets output port
* @param port Output port
* @return reference to layer builder
*/
FullyConnectedLayer& setOutputPort(const Port& port);
/**
* @brief Return output size
* @return Output size
*/
size_t getOutputNum() const;
/**
* @brief Sets output size
* @param outNum Output size
* @return reference to layer builder
*/
FullyConnectedLayer& setOutputNum(size_t outNum);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,61 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_fragment.hpp>
#include <ie_inetwork.hpp>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for ArgMax layer
*/
class INFERENCE_ENGINE_API_CLASS(GRNLayer): public LayerFragment {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit GRNLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param genLayer generic builder
*/
explicit GRNLayer(Layer& genLayer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
GRNLayer& setName(const std::string& name);
/**
* @brief Returns port with shapes for the layer
* @return Port with shapes
*/
const Port& getPort() const;
/**
* @brief Sets port shapes for the layer
* @param port Port with shapes
* @return reference to layer builder
*/
GRNLayer& setPort(const Port& port);
/**
* @brief Returns beta
* @return Beta
*/
float getBeta() const;
/**
* @brief Sets beta
* @param beta Beta
* @return reference to layer builder
*/
GRNLayer& setBeta(float beta);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,56 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_fragment.hpp>
#include <ie_inetwork.hpp>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for Input layer
*/
class INFERENCE_ENGINE_API_CLASS(InputLayer): public LayerFragment {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit InputLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param genLayer generic builder
*/
explicit InputLayer(Layer& genLayer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
InputLayer& setName(const std::string& name);
/**
* @brief Returns port with shapes for the layer
* @return Port with shapes
*/
const Port& getPort() const;
/**
* @brief Sets port shapes for the layer
* @param port Port with shapes
* @return reference to layer builder
*/
InputLayer& setPort(const Port &port);
/**
* @brief Validates layer before creation
* @param layer generic layer builder
*/
static void validate(const Layer& layer);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,247 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <details/caseless.hpp>
#include <ie_parameter.hpp>
#include <ie_inetwork.hpp>
#include <ie_blob.h>
#include <string>
#include <vector>
#include <memory>
#include <map>
namespace InferenceEngine {
namespace Builder {
class Layer;
/**
* @brief This structure implements a holder for validators
*/
struct ValidatorsHolder {
/**
* @brief Caseless map connects type with validator
*/
details::caseless_map<std::string, std::function<void(const Layer&)>> validators;
};
/**
* @brief This class implements a builder for IE Layer
*/
class INFERENCE_ENGINE_API_CLASS(Layer) {
public:
/**
* @brief The constructor creates a Layer builder with layer type and layer name
* @param type Layer type
* @param name Layer name
*/
explicit Layer(const std::string& type, const std::string& name = "");
/**
* @brief The constructor creates a Layer builder from shared pointer to ILayer
* @param layer shared pointer to ILayer
*/
explicit Layer(const ILayer::Ptr& layer);
/**
* @brief The constructor creates a Layer builder from shared pointer to constant ILayer
* @param layer shared pointer to constant ILayer
*/
explicit Layer(const ILayer::CPtr& layer);
/**
* @brief The constructor creates a Layer builder with layer ID and layer builder
* @param id Layer ID
* @param layer layer builder
*/
Layer(idx_t id, const Layer& layer);
/**
* @brief Returns layer builder ID
* @return ID
*/
idx_t getId() const;
/**
* @brief Returns a reference to layer type
* @return Layer type
*/
std::string& getType();
/**
* @brief Returns a reference to constant layer type
* @return constant layer type
*/
const std::string& getType() const;
/**
* @brief Sets layer type
* @param type Layer type
* @return Reference to Layer builder
*/
Layer& setType(const std::string& type);
/**
* @brief Returns a reference to layer name
* @return Layer name
*/
std::string& getName();
/**
* @brief Returns a reference to constant layer name
* @return constant layer name
*/
const std::string& getName() const;
/**
* @brief Sets layer name
* @param name Layer name
* @return Reference to Layer builder
*/
Layer& setName(const std::string& name);
/**
* @brief Returns layer subgraph
* @return shared pointer to INetwork
*/
INetwork::Ptr& getGraph();
/**
* @brief Returns constant layer subgraph
* @return constant shared pointer to INetwork
*/
const INetwork::Ptr& getGraph() const;
/**
* @brief Sets layer subgraph
* @param graph constant shared pointer to INetwork
* @return Reference to Layer builder
*/
Layer& setGraph(const INetwork::Ptr& graph);
/**
* @brief Returns map of parameters
* @return map of parameters
*/
std::map<std::string, Parameter>& getParameters();
/**
* @brief Returns constant map of parameters
* @return constant map of parameters
*/
const std::map<std::string, Parameter>& getParameters() const;
/**
* @brief Sets parameters for layer
* @param params constant map of parameters
* @return Reference to Layer builder
*/
Layer& setParameters(const std::map<std::string, Parameter>& params);
/**
* @brief Returns map of internal blobs
* @return map of internal blobs
*/
std::map<std::string, Blob::CPtr>& getConstantData();
/**
* @brief Returns constant map of internal blobs
* @return constant map of internal blobs
*/
const std::map<std::string, Blob::CPtr>& getConstantData() const;
/**
* @brief Sets constant data for layer
* @param constData constant map of shared pointers to blobs
* @return Reference to Layer builder
*/
Layer& setConstantData(const std::map<std::string, Blob::Ptr>& constData);
/**
* @brief Sets constant data for layer
* @param constData constant map of shared pointers to constant blobs
* @return Reference to Layer builder
*/
Layer& setConstantData(const std::map<std::string, Blob::CPtr>& constData);
/**
* @brief Adds constant data for layer by name
* @param name Name of constant data
* @param data shared pointer to constant blob
* @return Reference to Layer builder
*/
Layer& addConstantData(const std::string& name, const Blob::CPtr& data);
/**
* @brief Returns vector of input ports
* @return Vector of input ports
*/
std::vector<Port>& getInputPorts();
/**
* @brief Returns constant vector of input ports
* @return constant vector of input ports
*/
const std::vector<Port>& getInputPorts() const;
/**
* @brief Sets input ports
* @param ports vector of ports
* @return Reference to Layer builder
*/
Layer& setInputPorts(const std::vector<Port> &ports);
/**
* @brief Returns vector of output ports
* @return Vector of output ports
*/
std::vector<Port>& getOutputPorts();
/**
* @brief Returns constant vector of output ports
* @return constant vector of output ports
*/
const std::vector<Port>& getOutputPorts() const;
/**
* @brief Sets output ports
* @param ports vector of ports
* @return Reference to Layer builder
*/
Layer& setOutputPorts(const std::vector<Port> &ports);
/**
* @brief Validates the current builder and generates ILayer object
* @return constant shared pointer to ILayer
*/
const ILayer::Ptr build() const;
/**
* @brief Validates layer builder
*/
void validate() const;
/**
* @brief Registers a new validator for type
* @param type Layer type
* @param validator Layer validator
*/
static void addValidator(const std::string& type, const std::function<void(const Layer&)>& validator);
private:
idx_t id;
std::string type;
std::string name;
INetwork::Ptr graph;
std::vector<Port> inPorts;
std::vector<Port> outPorts;
std::map<std::string, Parameter> params;
std::map<std::string, Blob::CPtr> constData;
static std::shared_ptr<ValidatorsHolder> getValidatorsHolder();
};
/**
* @brief This class registers layer validators
*/
class ValidatorRegisterBase {
public:
/**
* @brief The constructor registers new layer validator
* @param type Layer type
* @param validator Layer validator
*/
explicit ValidatorRegisterBase(const std::string& type, const std::function<void(const Layer&)>& validator) {
InferenceEngine::Builder::Layer::addValidator(type, validator);
}
};
#define REG_VALIDATOR_FOR(__type, __validator) \
static InferenceEngine::Builder::ValidatorRegisterBase _reg_##__type(#__type, __validator)
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,76 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_builder.hpp>
#include <string>
#include <vector>
namespace InferenceEngine {
namespace Builder {
/**
* @brief This class defines the basic functional for layer builders
*/
class INFERENCE_ENGINE_API_CLASS(LayerFragment) {
public:
/**
* @brief The constructor creates layer builders with layer type and layer name
* @param type Layer type
* @param name Layer name
*/
LayerFragment(const std::string& type, const std::string& name);
/**
* @brief The constructor creates layer builders from reference to generic layer builder
* @param genLayer Generic layer builder
*/
explicit LayerFragment(Layer& genLayer);
/**
* @brief The copy constructor
* @param rval Source builder
*/
explicit LayerFragment(const LayerFragment& rval);
/**
* @brief Copy operator for LayerFragment
* @param rval
* @return Layer builder
*/
LayerFragment& operator=(const LayerFragment& rval);
/**
* @brief Virtual destructor
*/
virtual ~LayerFragment() = default;
/**
* @brief The operator creates generic builder
* @return Generic builder
*/
virtual operator Layer() const;
/**
* @brief Returns layer type
* @return Layer type
*/
const std::string& getType() const;
/**
* @brief Returns layer name
* @return Layer name
*/
const std::string& getName() const;
protected:
const std::vector<size_t> uInts2size_t(const std::vector<unsigned int>& vector) const;
Layer& getLayer() const;
private:
Layer layer;
Layer& refLayer;
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,94 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_fragment.hpp>
#include <ie_inetwork.hpp>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for Memory layer
*/
class INFERENCE_ENGINE_API_CLASS(MemoryLayer): public LayerFragment {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit MemoryLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param genLayer generic builder
*/
explicit MemoryLayer(Layer& genLayer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
MemoryLayer& setName(const std::string& name);
/**
* @brief Returns output port
* @return Output port
*/
const Port& getOutputPort() const;
/**
* @brief Sets output port
* @param port Output port
* @return reference to layer builder
*/
MemoryLayer& setOutputPort(const Port& port);
/**
* @brief Returns input port
* @return Input port
*/
const Port& getInputPort() const;
/**
* @brief Sets input port
* @param port Input port
* @return reference to layer builder
*/
MemoryLayer& setInputPort(const Port& port);
/**
* @brief Returns memory ID
* @return String with memory ID
*/
const std::string getId() const;
/**
* @brief Sets memory ID
* @param id Memory ID
* @return reference to layer builder
*/
MemoryLayer& setId(const std::string& id);
/**
* @brief Returns the index of memory layer
* @return Index
*/
size_t getIndex() const;
/**
* @brief Sets the index of memory layer
* @param index Index equal 0 means this layer is output one.
* @return reference to layer builder
*/
MemoryLayer& setIndex(size_t index);
/**
* @brief Returns size of the group
* @return Size of the group
*/
size_t getSize() const;
/**
* @brief Sets size of the group
* @param size Size if size equals 2 means this group is a pair (only 2 is supported).
* @return reference to layer builder
*/
MemoryLayer& setSize(size_t size);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,83 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_fragment.hpp>
#include <ie_inetwork.hpp>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for MVN layer
*/
class INFERENCE_ENGINE_API_CLASS(MVNLayer): public LayerFragment {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit MVNLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param genLayer generic builder
*/
explicit MVNLayer(Layer& genLayer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
MVNLayer& setName(const std::string& name);
/**
* @brief Returns port with shapes for the layer
* @return Port with shapes
*/
const Port& getPort() const;
/**
* @brief Sets port shapes for the layer
* @param port Port with shapes
* @return reference to layer builder
*/
MVNLayer& setPort(const Port& port);
/**
* @brief Returns across channels value
* @return true if mean values are shared across channels
*/
bool getAcrossChannels() const;
/**
* @brief Sets across channels
* @param flag true if mean values are shared across channels
* @return reference to layer builder
*/
MVNLayer& setAcrossChannels(bool flag);
/**
* @brief Returns normalize variance
* @return true if variance normalization is performed
*/
bool getNormalize() const;
/**
* @brief Sets normalize variance
* @param flag true if variance normalization is performed
* @return reference to layer builder
*/
MVNLayer& setNormalize(bool flag);
/**
* @brief Return epsilon
* @return Epsilon
*/
float getEpsilon() const;
/**
* @brief Sets epsilon
* @param eps Epsilon
* @return reference to layer builder
*/
MVNLayer& setEpsilon(float eps);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,185 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_builder.hpp>
#include <ie_icnn_network.hpp>
#include <cpp/ie_cnn_network.h>
#include <ie_inetwork.hpp>
#include <ie_context.hpp>
#include <ie_common.h>
#include <ie_blob.h>
#include <utility>
#include <memory>
#include <string>
#include <vector>
#include <map>
namespace InferenceEngine {
namespace Builder {
/**
* @brief This class implements a builder for IE Network
*/
class INFERENCE_ENGINE_API_CLASS(Network) {
public:
/**
* @brief A shared pointer to the Network builder
*/
using Ptr = std::shared_ptr<Network>;
/**
* @brief The constructor creates a builder based on ICNNNetwork
*
* @param network constant reference to ICNNNetwork object
*/
explicit Network(const ICNNNetwork& network);
/**
* @brief The constructor creates a empty builder with network name
*
* @param name Network name
*/
explicit Network(const std::string& name);
/**
* @brief The constructor creates a builder based on INetwork
*
* @param network constant reference to INetwork object
*/
explicit Network(const INetwork& network);
/**
* @brief The constructor creates a builder based on ICNNNetwork with custom Context
*
* @param network constant reference to ICNNNetwork object
*/
Network(const Context& ieContext, const ICNNNetwork& network);
/**
* @brief The constructor creates a empty builder with network name and custom Context
*
* @param name Network name
*/
Network(const Context& ieContext, const std::string& name);
/**
* @brief The constructor creates a builder based on INetwork with custom Context
*
* @param network constant reference to INetwork object
*/
Network(const Context& ieContext, const INetwork& network);
/**
* @brief Virtual destructor
*/
virtual ~Network() = default;
/**
* @brief Adds new layer and connects it with previous layers
*
* @param inputs Vector with PortInfo objects from previous layers
* @param layer Layer builder for new layer
*
* @return Id of new builder for the current network
*/
idx_t addLayer(const std::vector<PortInfo>& inputs, const Layer& layer);
/**
* @brief Adds new layer
*
* @param layer Layer builder for new layer
*
* @return Id of new builder for the current network
*/
idx_t addLayer(const Layer& layer);
/**
* @brief Removes a layer by ID
*
* @param layerId Layer ID
*/
void removeLayer(idx_t layerId);
/**
* @brief Connects two layers
*
* @param input PortInfo object from previous layer
* @param output PortInfo object from next layer
*/
void connect(const PortInfo& input, const PortInfo& output);
/**
* @brief Removes connection from the network
*
* @param connection Connection
*/
void disconnect(const Connection& connection);
/**
* @brief Returns layer builder by ID
*
* @param layerId Layer ID
*
* @return Layer buider
*/
Layer& getLayer(idx_t layerId);
/**
* @brief Returns constant layer builder by ID
*
* @param layerId Layer ID
*
* @return constant layer builder
*/
const Layer& getLayer(idx_t layerId) const;
/**
* @brief Returns vector of layer builders
*
* @return Vector of layer builders
*/
std::vector<Layer>& getLayers();
/**
* @brief Returns constant vector of layer builders
*
* @return constant vector of layer builders
*/
const std::vector<Layer>& getLayers() const;
/**
* @brief Returns all connections for layer
*
* @param layerId Layer ID
*
* @return Vector of connections for the current layer
*/
const std::vector<Connection> getLayerConnections(idx_t layerId) const noexcept;
/**
* @brief Builds and validate networks
*
* @return const shared pointer to INetwork
*/
const INetwork::Ptr build() const;
/**
* @brief The operator builds network
*
* @return const shared pointer to INetwork
*/
explicit operator const INetwork::Ptr() const;
private:
const Context ctx;
const size_t version;
std::string name;
std::vector<Layer> layers;
std::vector<Connection> connections;
};
/**
* @brief This function converts INetwork to ICNNNetwork
*
* @param network constant shared pointer to INetwork object
* @return constant shared pointer to ICNNNetwork
*/
INFERENCE_ENGINE_API_CPP(const std::shared_ptr<ICNNNetwork>) convertToICNNNetwork(const INetwork::Ptr& network);
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,112 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_fragment.hpp>
#include <ie_inetwork.hpp>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for Norm layer
*/
class INFERENCE_ENGINE_API_CLASS(NormLayer): public LayerFragment {
public:
/**
* @brief The enum defines all Norm types
*/
enum NormType {
WITHIN_CHANNEL = 0,
ACROSS_CHANNELS = 1
};
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit NormLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param genLayer generic builder
*/
explicit NormLayer(Layer& genLayer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
NormLayer& setName(const std::string& name);
/**
* @brief Returns port with shapes for the layer
* @return Port with shapes
*/
const Port& getPort() const;
/**
* @brief Sets port shapes for the layer
* @param port Port with shapes
* @return reference to layer builder
*/
NormLayer& setPort(const Port& port);
/**
* @brief Returns side length of the region
* @return Size
*/
size_t getSize() const;
/**
* @brief Sets side length of the region
* @param size Size
* @return reference to layer builder
*/
NormLayer& setSize(size_t size);
/**
* @brief Returns scaling parameter for the normalizing sum
* @return Scaling parameter
*/
float getAlpha() const;
/**
* @brief Sets scaling parameter for the normalizing sum
* @param alpha Scaling parameter
* @return reference to layer builder
*/
NormLayer& setAlpha(float alpha);
/**
* @brief Returns exponent for the normalizing sum
* @return Exponent
*/
float getBeta() const;
/**
* @brief Sets exponent for the normalizing sum
* @param beta Exponent
* @return reference to layer builder
*/
NormLayer& setBeta(float beta);
/**
* @brief Returns region type
* @return true if normalizing sum is performed over adjacent channels
*/
bool getAcrossMaps() const;
/**
* @brief Sets region type
* @param acrossMap true if normalizing sum is performed over adjacent channels
* @return reference to layer builder
*/
NormLayer& setAcrossMaps(bool acrossMap);
/**
* @brief Returns region type
* @return Norm type
*/
NormType getRegion() const;
/**
* @brief Sets region type
* @param type region type
* @return reference to layer builder
*/
NormLayer& setRegion(NormType type);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,85 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_fragment.hpp>
#include <ie_inetwork.hpp>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for Normalize layer
*/
class INFERENCE_ENGINE_API_CLASS(NormalizeLayer): public LayerFragment {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit NormalizeLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param genLayer generic builder
*/
explicit NormalizeLayer(Layer& genLayer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
NormalizeLayer& setName(const std::string& name);
/**
* @brief Returns port with shapes for the layer
* @return Port with shapes
*/
const Port& getPort() const;
/**
* @brief Sets port shapes for the layer
* @param port Port with shapes
* @return reference to layer builder
*/
NormalizeLayer& setPort(const Port& port);
/**
* @brief Returns channel shared flag
* @return true if scale parameters are shared across channels
*/
bool getChannelShared() const;
/**
* @brief Sets channel shared flag
* @param acrossMap true if scale parameters are shared across channels
* @return reference to layer builder
*/
NormalizeLayer& setChannelShared(bool acrossMap);
/**
* @brief Returns across maps
* @return true if normalization is shared across channels
*/
bool getAcrossMaps() const;
/**
* @brief Sets across map
* @param acrossMap true if normalization is shared across channels
* @return reference to layer builder
*/
NormalizeLayer& setAcrossMaps(bool acrossMap);
/**
* @brief Returns epsilon
* @return Epsilon
*/
float getEpsilon() const;
/**
* @brief Sets epsilon
* @param eps Epsilon
* @return reference to layer builder
*/
NormalizeLayer& setEpsilon(float eps);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,50 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_fragment.hpp>
#include <ie_inetwork.hpp>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for Output layer
*/
class INFERENCE_ENGINE_API_CLASS(OutputLayer): public LayerFragment {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit OutputLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param genLayer generic builder
*/
explicit OutputLayer(Layer& genLayer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
OutputLayer& setName(const std::string& name);
/**
* @brief Returns port with shapes for the layer
* @return Port with shapes
*/
const Port& getPort() const;
/**
* @brief Sets port shapes for the layer
* @param port Port with shapes
* @return reference to layer builder
*/
OutputLayer& setPort(const Port &port);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,86 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_fragment.hpp>
#include <vector>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for Permute layer
*/
class INFERENCE_ENGINE_API_CLASS(PermuteLayer): public LayerFragment {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit PermuteLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param genLayer generic builder
*/
explicit PermuteLayer(Layer& genLayer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
PermuteLayer& setName(const std::string& name);
/**
* @brief Sets weights for layer
* @param weights Constant blob with weights
* @return reference to layer builder
*/
PermuteLayer& setWeights(const Blob::CPtr& weights);
/**
* @brief Sets biases for layer
* @param biases Constant blob with biases
* @return reference to layer builder
*/
PermuteLayer& setBiases(const Blob::CPtr& biases);
/**
* @brief Returns input port
* @return Input port
*/
const Port& getInputPort() const;
/**
* @brief Sets input port
* @param port Input port
* @return reference to layer builder
*/
PermuteLayer& setInputPort(const Port& port);
/**
* @brief Returns output port
* @return Output port
*/
const Port& getOutputPort() const;
/**
* @brief Sets output port
* @param port Output port
* @return reference to layer builder
*/
PermuteLayer& setOutputPort(const Port& port);
/**
* @brief Return vector of dimensions indexes for output blob
* @return Order of dimensions for output blob
*/
const std::vector<size_t> getOrder() const;
/**
* @brief Sets the order of dimensions for output blob
* @param order dimensions indexes for output blob
* @return reference to layer builder
*/
PermuteLayer& setOrder(const std::vector<size_t>& order);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,170 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_fragment.hpp>
#include <ie_inetwork.hpp>
#include <vector>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for Pooling layer
*/
class INFERENCE_ENGINE_API_CLASS(PoolingLayer): public LayerFragment {
public:
/**
* @brief The enum defines available pooling types
*/
enum PoolingType {
MAX = 1,
AVG = 2
};
/**
* @brief The enum defines available rounding types
*/
enum RoundingType {
CEIL = 1,
FLOOR = 2
};
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit PoolingLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param genLayer generic builder
*/
explicit PoolingLayer(Layer& genLayer);
/**
* @brief Operator creates generic layer builder
* @return Generic layer builder
*/
operator Layer() const override;
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
PoolingLayer& setName(const std::string& name);
/**
* @brief Returns input port
* @return Input port
*/
const Port& getInputPort() const;
/**
* @brief Sets input port
* @param port Input port
* @return reference to layer builder
*/
PoolingLayer& setInputPort(const Port& port);
/**
* @brief Returns output port
* @return Output port
*/
const Port& getOutputPort() const;
/**
* @brief Sets output port
* @param port Output port
* @return reference to layer builder
*/
PoolingLayer& setOutputPort(const Port& port);
/**
* @brief Returns kernel size
* @return Kernel size
*/
const std::vector<size_t> getKernel() const;
/**
* @brief Sets kernel size
* @param kernel Kernel size
* @return reference to layer builder
*/
PoolingLayer& setKernel(const std::vector<size_t>& kernel);
/**
* @brief Returns vector of strides
* @return vector of strides
*/
const std::vector<size_t> getStrides() const;
/**
* @brief Sets strides
* @param strides vector of strides
* @return reference to layer builder
*/
PoolingLayer& setStrides(const std::vector<size_t>& strides);
/**
* @brief Returns begin paddings
* @return vector of paddings
*/
const std::vector<size_t> getPaddingsBegin() const;
/**
* @brief Sets begin paddings
* @param paddings Vector of paddings
* @return reference to layer builder
*/
PoolingLayer& setPaddingsBegin(const std::vector<size_t>& paddings);
/**
* @brief Return end paddings
* @return Vector of paddings
*/
const std::vector<size_t> getPaddingsEnd() const;
/**
* @brief Sets end paddings
* @param paddings Vector of paddings
* @return reference to layer builder
*/
PoolingLayer& setPaddingsEnd(const std::vector<size_t>& paddings);
/**
* @brief Returns pooling type
* @return Pooling type
*/
PoolingType getPoolingType() const;
/**
* @brief Sets pooling type
* @param type Pooling type
* @return reference to layer builder
*/
PoolingLayer& setPoolingType(PoolingType type);
/**
* @brief Returns rounding type
* @return Rounding type
*/
RoundingType getRoundingType() const;
/**
* @brief Sets rounding types
* @param type Rounding type
* @return reference to layer builder
*/
PoolingLayer& setRoundingType(RoundingType type);
/**
* @brief Returns a type of pooling strategy
* @return true if zero-values in the padding are not used
*/
bool getExcludePad() const;
/**
* @brief Sets a type of pooling strategy
* @param exclude zero-values in the padding are not used if true
* @return reference to layer builder
*/
PoolingLayer& setExcludePad(bool exclude);
/**
* @brief Validates layer before creation
* @param layer generic layer builder
*/
static void validate(const Layer& layer);
private:
PoolingType type;
RoundingType roundingType;
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,83 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_fragment.hpp>
#include <ie_inetwork.hpp>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for Power layer
*/
class INFERENCE_ENGINE_API_CLASS(PowerLayer): public LayerFragment {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit PowerLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param genLayer generic builder
*/
explicit PowerLayer(Layer& genLayer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
PowerLayer& setName(const std::string& name);
/**
* @brief Returns port with shapes for the layer
* @return Port with shapes
*/
const Port& getPort() const;
/**
* @brief Sets port shapes for the layer
* @param port Port with shapes
* @return reference to layer builder
*/
PowerLayer& setPort(const Port& port);
/**
* @brief Returns power
* @return Power parameter
*/
float getPower() const;
/**
* @brief Sets the power parameter
* @param power Power parameter
* @return reference to layer builder
*/
PowerLayer& setPower(float power);
/**
* @brief Returns scaling parameter
* @return Scaling
*/
float getScale() const;
/**
* @brief Sets scaling parameter
* @param scale Scaling parameter
* @return reference to layer builder
*/
PowerLayer& setScale(float scale);
/**
* @brief Returns shifting parameter
* @return Shift
*/
float getShift() const;
/**
* @brief Sets shift for the layer
* @param shift Shifting parameter
* @return reference to layer builder
*/
PowerLayer& setShift(float shift);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,67 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_fragment.hpp>
#include <ie_inetwork.hpp>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for PReLU layer
*/
class INFERENCE_ENGINE_API_CLASS(PReLULayer): public LayerFragment {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit PReLULayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param genLayer generic builder
*/
explicit PReLULayer(Layer& genLayer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
PReLULayer& setName(const std::string& name);
/**
* @brief Sets weights for layer
* @param weights Constant blob with weights
* @return reference to layer builder
*/
PReLULayer& setWeights(const Blob::CPtr& weights);
/**
* @brief Returns port with shapes for the layer
* @return Port with shapes
*/
const Port& getPort() const;
/**
* @brief Sets port shapes for the layer
* @param port Port with shapes
* @return reference to layer builder
*/
PReLULayer& setPort(const Port& port);
/**
* @brief Returns channel shared flag
* @return true if negative slope shared across channels
*/
bool getChannelShared() const;
/**
* @brief Sets channel shared flag
* @param flag true if negative slope shared across channels
* @return reference to layer builder
*/
PReLULayer& setChannelShared(bool flag);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,161 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_fragment.hpp>
#include <ie_inetwork.hpp>
#include <string>
#include <vector>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for PriorBoxClustered layer
*/
class INFERENCE_ENGINE_API_CLASS(PriorBoxClusteredLayer): public LayerFragment {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit PriorBoxClusteredLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param genLayer generic builder
*/
explicit PriorBoxClusteredLayer(Layer& genLayer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
PriorBoxClusteredLayer& setName(const std::string& name);
/**
* @brief Returns output port
* @return Output port
*/
const Port& getOutputPort() const;
/**
* @brief Sets output port
* @param port Output port
* @return reference to layer builder
*/
PriorBoxClusteredLayer& setOutputPort(const Port& port);
/**
* @brief Returns input ports
* @return Vector of input ports
*/
const std::vector<Port>& getInputPorts() const;
/**
* @brief Sets input ports
* @param port Vector of input ports
* @return reference to layer builder
*/
PriorBoxClusteredLayer& setInputPorts(const std::vector<Port>& port);
/**
* @brief Returns height and width of input image
* @return input image sizes
*/
const std::vector<float> getImgSizes() const;
/**
* @brief Sets height and width sizes
* @param sizes Height and width sizes
* @return reference to layer builder
*/
PriorBoxClusteredLayer& setImgSizes(const std::vector<float> sizes);
/**
* @brief returns distances between (height and width) box centers
* @return distances
*/
const std::vector<float> getSteps() const;
/**
* @brief Sets distances between box centers for height and width
* @param steps Distances between box centers
* @return reference to layer builder
*/
PriorBoxClusteredLayer& setSteps(const std::vector<float> steps);
/**
* @brief returns a distance between box centers
* @return distance
*/
float getStep() const;
/**
* @brief Sets a distance between box centers
* @param steps A distance between box centers
* @return reference to layer builder
*/
PriorBoxClusteredLayer& setStep(float step);
/**
* @brief Returns shift of box respectively to top left corner
* @return Shift
*/
float getOffset() const;
/**
* @brief Sets shift of box respectively to top left corner
* @param offset Shift
* @return reference to layer builder
*/
PriorBoxClusteredLayer& setOffset(float offset);
/**
* @brief Returns a variance of adjusting bounding boxes
* @return Variance
*/
float getVariance() const;
/**
* @brief Sets a variance of adjusting bounding boxes
* @param variance Variance
* @return reference to layer builder
*/
PriorBoxClusteredLayer& setVariance(float variance);
/**
* @brief Returns desired boxes width in pixels
* @return width of desired boxes
*/
float getWidth() const;
/**
* @brief Sets desired boxes width in pixels
* @param width Width of desired boxes
* @return reference to layer builder
*/
PriorBoxClusteredLayer& setWidth(float width);
/**
* @brief Returns desired boxes height in pixels
* @return height of desired boxes
*/
float getHeight() const;
/**
* @brief Sets desired boxes height in pixels
* @param height Height of desired boxes
* @return reference to layer builder
*/
PriorBoxClusteredLayer& setHeight(float height);
/**
* @brief Returns clip flag
* @return true if each value in the output blob is within [0,1]
*/
bool getClip() const;
/**
* @brief sets clip flag
* @param flag true if each value in the output blob is within [0,1]
* @return reference to layer builder
*/
PriorBoxClusteredLayer& setClip(bool flag);
/**
* @brief Returns flip flag
* @return list of boxes is augmented with the flipped ones if true
*/
bool getFlip() const;
/**
* @brief Sets flip flag
* @param flag true if list of boxes is augmented with the flipped ones
* @return reference to layer builder
*/
PriorBoxClusteredLayer& setFlip(bool flag);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,161 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_fragment.hpp>
#include <ie_inetwork.hpp>
#include <string>
#include <vector>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for PriorBox layer
*/
class INFERENCE_ENGINE_API_CLASS(PriorBoxLayer): public LayerFragment {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit PriorBoxLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param genLayer generic builder
*/
explicit PriorBoxLayer(Layer& genLayer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
PriorBoxLayer& setName(const std::string& name);
/**
* @brief Returns output port
* @return Output port
*/
const Port& getOutputPort() const;
/**
* @brief Sets output port
* @param port Output port
* @return reference to layer builder
*/
PriorBoxLayer& setOutputPort(const Port& port);
/**
* @brief Returns input ports
* @return Vector of input ports
*/
const std::vector<Port>& getInputPorts() const;
/**
* @brief Sets input ports
* @param ports Vector of input ports
* @return reference to layer builder
*/
PriorBoxLayer& setInputPorts(const std::vector<Port>& ports);
/**
* @brief Returns the minimum box size in pixels
* @return Minimum box size
*/
size_t getMinSize() const;
/**
* @brief Sets the minimum box size in pixels
* @param minSize Minimum size
* @return reference to layer builder
*/
PriorBoxLayer& setMinSize(size_t minSize);
/**
* @brief Returns the maximum box size in pixels
* @return maximum size
*/
size_t getMaxSize() const;
/**
* @brief Sets the maximum box size in pixels
* @param maxSize Maximum size
* @return reference to layer builder
*/
PriorBoxLayer& setMaxSize(size_t maxSize);
/**
* @brief Returns a distance between box centers
* @return Distance
*/
float getStep() const;
/**
* @brief Sets a distance between box centers
* @param step Distance
* @return reference to layer builder
*/
PriorBoxLayer& setStep(float step);
/**
* @brief Returns a shift of box respectively to top left corner
* @return Shift
*/
float getOffset() const;
/**
* @brief Sets a shift of box respectively to top left corner
* @param offset Shift
* @return reference to layer builder
*/
PriorBoxLayer& setOffset(float offset);
/**
* @brief Returns a variance of adjusting bounding boxes
* @return Variance
*/
float getVariance() const;
/**
* @brief Sets a variance of adjusting bounding boxes
* @param variance Variance
* @return reference to layer builder
*/
PriorBoxLayer& setVariance(float variance);
/**
* @brief Returns a flag that denotes type of inference
* @return true if max_size is used
*/
bool getScaleAllSizes() const;
/**
* @brief Sets a flag that denotes a type of inference
* @param flag max_size is used if true
* @return reference to layer builder
*/
PriorBoxLayer& setScaleAllSizes(bool flag);
/**
* @brief Returns clip flag
* @return true if each value in the output blob is within [0,1]
*/
bool getClip() const;
/**
* @brief sets clip flag
* @param flag true if each value in the output blob is within [0,1]
* @return reference to layer builder
*/
PriorBoxLayer& setClip(bool flag);
/**
* @brief Returns flip flag
* @return list of boxes is augmented with the flipped ones if true
*/
bool getFlip() const;
/**
* @brief Sets flip flag
* @param flag true if list of boxes is augmented with the flipped ones
* @return reference to layer builder
*/
PriorBoxLayer& setFlip(bool flag);
/**
* @brief Returns a variance of aspect ratios
* @return Vector of aspect ratios
*/
const std::vector<size_t> getAspectRatio() const;
/**
* @brief Sets a variance of aspect ratios
* @param aspectRatio Vector of aspect ratios
* @return reference to layer builder
*/
PriorBoxLayer& setAspectRatio(const std::vector<size_t>& aspectRatio);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,151 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_fragment.hpp>
#include <ie_inetwork.hpp>
#include <string>
#include <vector>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for Proposal layer
*/
class INFERENCE_ENGINE_API_CLASS(ProposalLayer): public LayerFragment {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit ProposalLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param genLayer generic builder
*/
explicit ProposalLayer(Layer& genLayer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
ProposalLayer& setName(const std::string& name);
/**
* @brief Returns output port
* @return Output port
*/
const Port& getOutputPort() const;
/**
* @brief Sets output port
* @param port Output port
* @return reference to layer builder
*/
ProposalLayer& setOutputPort(const Port& port);
/**
* @brief Returns input ports
* @return Vector of input ports
*/
const std::vector<Port>& getInputPorts() const;
/**
* @brief Sets input ports
* @param ports Vector of input ports
* @return reference to layer builder
*/
ProposalLayer& setInputPorts(const std::vector<Port>& ports);
/**
* @brief Returns the quantity of bounding boxes after applying NMS
* @return Quantity of bounding boxes
*/
size_t getPostNMSTopN() const;
/**
* @brief Sets the quantity of bounding boxes after applying NMS
* @param topN Quantity of bounding boxes
* @return reference to layer builder
*/
ProposalLayer& setPostNMSTopN(size_t topN);
/**
* @brief Returns the quantity of bounding boxes before applying NMS
* @return Quantity of bounding boxes
*/
size_t getPreNMSTopN() const;
/**
* @brief Sets the quantity of bounding boxes before applying NMS
* @param topN Quantity of bounding boxes
* @return reference to layer builder
*/
ProposalLayer& setPreNMSTopN(size_t topN);
/**
* @brief Returns minimum value of the proposal to be taken into consideration
* @return Threshold
*/
float getNMSThresh() const;
/**
* @brief Sets minimum value of the proposal to be taken into consideration
* @param thresh Threshold
* @return reference to layer builder
*/
ProposalLayer& setNMSThresh(float thresh);
/**
* @brief Returns base size for anchor generation
* @return Base size
*/
size_t getBaseSize() const;
/**
* @brief Sets base size for anchor generation
* @param baseSize Base size for anchor generation
* @return reference to layer builder
*/
ProposalLayer& setBaseSize(size_t baseSize);
/**
* @brief Returns minimum size of box to be taken into consideration
* @return Minimum size
*/
size_t getMinSize() const;
/**
* @brief Sets minimum size of box to be taken into consideration
* @param minSize Minimum size of the box
* @return reference to layer builder
*/
ProposalLayer& setMinSize(size_t minSize);
/**
* @brief Returns step size to slide over boxes in pixels
* @return Step size
*/
size_t getFeatStride() const;
/**
* @brief Sets step size to slide over boxes in pixels
* @param featStride Step size
* @return reference to layer builder
*/
ProposalLayer& setFeatStride(size_t featStride);
/**
* @brief Returns scales for anchor generation
* @return Vector of scales
*/
const std::vector<float> getScale() const;
/**
* @brief Sets scales for anchor generation
* @param scales Vector of scales
* @return reference to layer builder
*/
ProposalLayer& setScale(const std::vector<float>& scales);
/**
* @brief Returns ratios for anchor generation
* @return Vector of ratios
*/
const std::vector<float> getRatio() const;
/**
* @brief Sets ratios for anchor generation
* @param ratios Vector of scales
* @return reference to layer builder
*/
ProposalLayer& setRatio(const std::vector<float>& ratios);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,98 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_fragment.hpp>
#include <ie_inetwork.hpp>
#include <string>
#include <vector>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for PSROIPooling layer
*/
class INFERENCE_ENGINE_API_CLASS(PSROIPoolingLayer): public LayerFragment {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit PSROIPoolingLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param genLayer generic builder
*/
explicit PSROIPoolingLayer(Layer& genLayer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
PSROIPoolingLayer& setName(const std::string& name);
/**
* @brief Returns input ports
* @return Vector of input ports
*/
const std::vector<Port>& getInputPorts() const;
/**
* @brief Sets input ports
* @param ports Vector of input ports
* @return reference to layer builder
*/
PSROIPoolingLayer& setInputPorts(const std::vector<Port>& ports);
/**
* @brief Returns output ports
* @return Vector of output ports
*/
const Port& getOutputPort() const;
/**
* @brief Sets output ports
* @param port Vector of output ports
* @return reference to layer builder
*/
PSROIPoolingLayer& setOutputPort(const Port& port);
/**
* @brief Returns multiplicative spatial scale factor to translate ROI coordinates
* @return Spatial scale factor
*/
float getSpatialScale() const;
/**
* @brief Sets multiplicative spatial scale factor to translate ROI coordinates
* @param spatialScale Spatial scale factor
* @return reference to layer builder
*/
PSROIPoolingLayer& setSpatialScale(float spatialScale);
/**
* @brief Returns pooled output channel number
* @return Output channel number
*/
size_t getOutputDim() const;
/**
* @brief Sets pooled output channel number
* @param outDim Output channel number
* @return reference to layer builder
*/
PSROIPoolingLayer& setOutputDim(size_t outDim);
/**
* @brief Returns number of groups to encode position-sensitive score maps
* @return Number of groups
*/
size_t getGroupSize() const;
/**
* @brief Sets number of groups to encode position-sensitive score maps
* @param size Number of groups
* @return reference to layer builder
*/
PSROIPoolingLayer& setGroupSize(size_t size);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,155 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_fragment.hpp>
#include <ie_inetwork.hpp>
#include <string>
#include <vector>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for RegionYolo layer
*/
class INFERENCE_ENGINE_API_CLASS(RegionYoloLayer): public LayerFragment {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit RegionYoloLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param genLayer generic builder
*/
explicit RegionYoloLayer(Layer& genLayer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
RegionYoloLayer& setName(const std::string& name);
/**
* @brief Returns input port
* @return Input port
*/
const Port& getInputPort() const;
/**
* @brief Sets input port
* @param port Input port
* @return reference to layer builder
*/
RegionYoloLayer& setInputPort(const Port& port);
/**
* @brief Returns output port
* @return Output port
*/
const Port& getOutputPort() const;
/**
* @brief Sets output port
* @param port Output port
* @return reference to layer builder
*/
RegionYoloLayer& setOutputPort(const Port& port);
/**
* @brief Returns number of coordinates for each region
* @return Number of coordinates
*/
int getCoords() const;
/**
* @brief Sets number of coordinates for each region
* @param coords Number of coordinates
* @return reference to layer builder
*/
RegionYoloLayer& setCoords(int coords);
/**
* @brief Returns number of classes for each region
* @return Number of classes
*/
int getClasses() const;
/**
* @brief Sets number of classes for each region
* @param classes number of classes
* @return reference to layer builder
*/
RegionYoloLayer& setClasses(int classes);
/**
* @brief Returns number of regions
* @return Number of regions
*/
int getNum() const;
/**
* @brief Sets number of regions
* @param num Number of regions
* @return reference to layer builder
*/
RegionYoloLayer& setNum(int num);
/**
* @brief Returns a flag which specifies the method of infer
* @return true if softmax is performed
*/
bool getDoSoftMax() const;
/**
* @brief Sets a flag which specifies the method of infer
* @param flag softmax is performed if true
* @return reference to layer builder
*/
RegionYoloLayer& setDoSoftMax(bool flag);
/**
* @brief Returns anchors coordinates of regions
* @return anchors coordinates
*/
float getAnchors() const;
/**
* @brief Sets anchors coordinates of regions
* @param anchors Anchors coordinates
* @return reference to layer builder
*/
RegionYoloLayer& setAnchors(float anchors);
/**
* @brief Returns mask
* @return Mask
*/
int getMask() const;
/**
* @brief Sets mask
* @param mask Specifies which anchors to use
* @return reference to layer builder
*/
RegionYoloLayer& setMask(int mask);
/**
* @brief Returns the number of the dimension from which flattening is performed
* @return Axis
*/
size_t getAxis() const;
/**
* @brief Sets the number of the dimension from which flattening is performed
* @param axis Axis
* @return reference to layer builder
*/
RegionYoloLayer& setAxis(size_t axis);
/**
* @brief Returns the number of the dimension on which flattening is ended
* @return End axis
*/
size_t getEndAxis() const;
/**
* @brief Sets the number of the dimension on which flattening is ended
* @param axis End axis
* @return reference to layer builder
*/
RegionYoloLayer& setEndAxis(size_t axis);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,62 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_fragment.hpp>
#include <ie_inetwork.hpp>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for ReLU6 layer
*/
class INFERENCE_ENGINE_API_CLASS(ReLU6Layer): public LayerFragment {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit ReLU6Layer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param genLayer generic builder
*/
explicit ReLU6Layer(Layer& genLayer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
ReLU6Layer& setName(const std::string& name);
/**
* @brief Returns port with shapes for the layer
* @return Port with shapes
*/
const Port& getPort() const;
/**
* @brief Sets port shapes for the layer
* @param port Port with shapes
* @return reference to layer builder
*/
ReLU6Layer& setPort(const Port& port);
/**
* @brief Returns N value
* @return N
*/
float getN() const;
/**
* @brief Sets N value
* @param n N value (6 by default)
* @return reference to layer builder
*/
ReLU6Layer& setN(float n);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,62 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_fragment.hpp>
#include <ie_inetwork.hpp>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for ReLU layer
*/
class INFERENCE_ENGINE_API_CLASS(ReLULayer): public LayerFragment {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit ReLULayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param genLayer generic builder
*/
explicit ReLULayer(Layer& genLayer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
ReLULayer& setName(const std::string& name);
/**
* @brief Returns port with shapes for the layer
* @return Port with shapes
*/
const Port& getPort() const;
/**
* @brief Sets port shapes for the layer
* @param port Port with shapes
* @return reference to layer builder
*/
ReLULayer& setPort(const Port& port);
/**
* @brief Returns negative slope
* @return Negative slope
*/
float getNegativeSlope() const;
/**
* @brief Sets negative slope
* @param negativeSlope Negative slope
* @return reference to layer builder
*/
ReLULayer& setNegativeSlope(float negativeSlope);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,77 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_fragment.hpp>
#include <ie_inetwork.hpp>
#include <string>
#include <vector>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for ReorgYolo layer
*/
class INFERENCE_ENGINE_API_CLASS(ReorgYoloLayer): public LayerFragment {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit ReorgYoloLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param genLayer generic builder
*/
explicit ReorgYoloLayer(Layer& genLayer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
ReorgYoloLayer& setName(const std::string& name);
/**
* @brief Returns input port
* @return Input port
*/
const Port& getInputPort() const;
/**
* @brief Sets input port
* @param ports Input port
* @return reference to layer builder
*/
ReorgYoloLayer& setInputPort(const Port& ports);
/**
* @brief Returns output port
* @return Output port
*/
const Port& getOutputPort() const;
/**
* @brief Sets output port
* @param port Output port
* @return reference to layer builder
*/
ReorgYoloLayer& setOutputPort(const Port& port);
/**
* @brief Returns distance of cut throws in output blobs
* @return Stride
*/
int getStride() const;
/**
* @brief Sets distance of cut throws in output blobs
* @param stride Stride
* @return reference to layer builder
*/
ReorgYoloLayer& setStride(int stride);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,73 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_fragment.hpp>
#include <ie_inetwork.hpp>
#include <string>
#include <vector>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for Reshape layer
*/
class INFERENCE_ENGINE_API_CLASS(ReshapeLayer): public LayerFragment {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit ReshapeLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param genLayer generic builder
*/
explicit ReshapeLayer(Layer& genLayer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
ReshapeLayer& setName(const std::string& name);
/**
* @brief Returns input port
* @return Input port
*/
const Port& getInputPort() const;
/**
* @brief Sets input port
* @param port Input port
* @return reference to layer builder
*/
ReshapeLayer& setInputPort(const Port& port);
/**
* @brief Returns output port
* @return Output port
*/
const Port& getOutputPort() const;
/**
* @brief Sets output port
* @param port Output port
* @return reference to layer builder
*/
ReshapeLayer& setOutputPort(const Port& port);
/**
* @brief Returns reshape dimensions
* @return Dimensions
*/
const std::vector<int> getDims() const;
/**
* @brief Sets reshape dimensions
* @param dims Dimensions
* @return reference to layer builder
*/
ReshapeLayer& setDims(const std::vector<int>& dims);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,84 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_fragment.hpp>
#include <ie_inetwork.hpp>
#include <string>
#include <vector>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for ROIPooling layer
*/
class INFERENCE_ENGINE_API_CLASS(ROIPoolingLayer): public LayerFragment {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit ROIPoolingLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param genLayer generic builder
*/
explicit ROIPoolingLayer(Layer& genLayer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
ROIPoolingLayer& setName(const std::string& name);
/**
* @brief Returns input ports
* @return Vector of input ports
*/
const std::vector<Port>& getInputPorts() const;
/**
* @brief Sets input ports
* @param ports Vector of input ports
* @return reference to layer builder
*/
ROIPoolingLayer& setInputPorts(const std::vector<Port>& ports);
/**
* @brief Returns output port
* @return Output port
*/
const Port& getOutputPort() const;
/**
* @brief Sets output port
* @param port Output port
* @return reference to layer builder
*/
ROIPoolingLayer& setOutputPort(const Port& port);
/**
* @brief Returns a ratio of the input feature map over the input image size
* @return Spatial scale
*/
float getSpatialScale() const;
/**
* @brief Sets a ratio of the input feature map over the input image size
* @param spatialScale Spatial scale
* @return reference to layer builder
*/
ROIPoolingLayer& setSpatialScale(float spatialScale);
/**
* @brief Returns height and width of the ROI output feature map
* @return Vector contains height and width
*/
const std::vector<int> getPooled() const;
/**
* @brief Sets height and width of the ROI output feature map
* @param pooled Vector with height and width
* @return reference to layer builder
*/
ROIPoolingLayer& setPooled(const std::vector<int>& pooled);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,63 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_fragment.hpp>
#include <ie_inetwork.hpp>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for ScaleShift layer
*/
class INFERENCE_ENGINE_API_CLASS(ScaleShiftLayer): public LayerFragment {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit ScaleShiftLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param genLayer generic builder
*/
explicit ScaleShiftLayer(Layer& genLayer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
ScaleShiftLayer& setName(const std::string& name);
/**
* @brief Returns port with shapes for the layer
* @return Port with shapes
*/
const Port& getPort() const;
/**
* @brief Sets port shapes for the layer
* @param port Port with shapes
* @return reference to layer builder
*/
ScaleShiftLayer& setPort(const Port &port);
/**
* @brief Sets weights for layer
* @param weights Constant blob with weights
* @return reference to layer builder
*/
ScaleShiftLayer& setWeights(const Blob::CPtr& weights);
/**
* @brief Sets biases for layer
* @param biases Constant blob with biases
* @return reference to layer builder
*/
ScaleShiftLayer& setBiases(const Blob::CPtr& biases);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,50 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_fragment.hpp>
#include <ie_inetwork.hpp>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for Sigmoid layer
*/
class INFERENCE_ENGINE_API_CLASS(SigmoidLayer): public LayerFragment {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit SigmoidLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param genLayer generic builder
*/
explicit SigmoidLayer(Layer& genLayer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
SigmoidLayer& setName(const std::string& name);
/**
* @brief Returns port with shapes for the layer
* @return Port with shapes
*/
const Port& getPort() const;
/**
* @brief Sets port shapes for the layer
* @param port Port with shapes
* @return reference to layer builder
*/
SigmoidLayer& setPort(const Port& port);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,140 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_fragment.hpp>
#include <ie_inetwork.hpp>
#include <string>
#include <vector>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for SimplerNMS layer
*/
class INFERENCE_ENGINE_API_CLASS(SimplerNMSLayer): public LayerFragment {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit SimplerNMSLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param genLayer generic builder
*/
explicit SimplerNMSLayer(Layer& genLayer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
SimplerNMSLayer& setName(const std::string& name);
/**
* @brief Returns input ports
* @return Vector of input ports
*/
const std::vector<Port>& getInputPorts() const;
/**
* @brief Sets input ports
* @param ports Vector of input ports
*/
SimplerNMSLayer& setInputPorts(const std::vector<Port>& ports);
/**
* @brief Returns output port
* @return Output port
*/
const Port& getOutputPort() const;
/**
* @brief Sets output port
* @param port Output port
* @return reference to layer builder
*/
SimplerNMSLayer& setOutputPort(const Port& port);
/**
* @brief Returns the quantity of bounding boxes before applying NMS
* @return Quantity of bounding boxes
*/
size_t getPreNMSTopN() const;
/**
* @brief Sets the quantity of bounding boxes before applying NMS
* @param topN Quantity of bounding boxes
* @return reference to layer builder
*/
SimplerNMSLayer& setPreNMSTopN(size_t topN);
/**
* @brief Returns the quantity of bounding boxes after applying NMS
* @return Quantity of bounding boxes
*/
size_t getPostNMSTopN() const;
/**
* @brief Sets the quantity of bounding boxes after applying NMS
* @param topN Quantity of bounding boxes
* @return reference to layer builder
*/
SimplerNMSLayer& setPostNMSTopN(size_t topN);
/**
* @brief Returns the step size to slide over boxes in pixels
* @return Step size
*/
size_t getFeatStride() const;
/**
* @brief Sets the step size to slide over boxes in pixels
* @param featStride Step size
* @return reference to layer builder
*/
SimplerNMSLayer& setFeatStride(size_t featStride);
/**
* @brief Returns the minimum size of box to be taken into consideration
* @return Minimum size
*/
size_t getMinBoxSize() const;
/**
* @brief Sets the minimum size of box to be taken into consideration
* @param minSize Minimum size
* @return reference to layer builder
*/
SimplerNMSLayer& setMinBoxSize(size_t minSize);
/**
* @brief Returns scale for anchor boxes generating
* @return Scale for anchor boxes
*/
size_t getScale() const;
/**
* @brief Sets scale for anchor boxes generating
* @param scale Scale for anchor boxes
* @return reference to layer builder
*/
SimplerNMSLayer& setScale(size_t scale);
/**
* @brief Returns the minimum value of the proposal to be taken into consideration
* @return Threshold
*/
float getCLSThreshold() const;
/**
* @brief Sets the minimum value of the proposal to be taken into consideration
* @param threshold Minimum value
* @return reference to layer builder
*/
SimplerNMSLayer& setCLSThreshold(float threshold);
/**
* @brief Returns the minimum ratio of boxes overlapping to be taken into consideration
* @return Threshold
*/
float getIOUThreshold() const;
/**
* @brief Sets the minimum ratio of boxes overlapping to be taken into consideration
* @param threshold Minimum value
* @return reference to layer builder
*/
SimplerNMSLayer& setIOUThreshold(float threshold);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,61 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_fragment.hpp>
#include <ie_inetwork.hpp>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for SoftMax layer
*/
class INFERENCE_ENGINE_API_CLASS(SoftMaxLayer): public LayerFragment {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit SoftMaxLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param genLayer generic builder
*/
explicit SoftMaxLayer(Layer& genLayer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
SoftMaxLayer& setName(const std::string& name);
/**
* @brief Returns port with shapes for the layer
* @return Port with shapes
*/
const Port& getPort() const;
/**
* @brief Sets port shapes for the layer
* @param port Port with shapes
* @return reference to layer builder
*/
SoftMaxLayer& setPort(const Port& port);
/**
* @brief Returns axis
* @return Axis
*/
size_t getAxis() const;
/**
* @brief Sets axis
* @param axis Axis
* @return reference to layer builder
*/
SoftMaxLayer& setAxis(size_t axis);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,73 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_fragment.hpp>
#include <ie_inetwork.hpp>
#include <string>
#include <vector>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for Split layer
*/
class INFERENCE_ENGINE_API_CLASS(SplitLayer): public LayerFragment {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit SplitLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param genLayer generic builder
*/
explicit SplitLayer(Layer& genLayer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
SplitLayer& setName(const std::string& name);
/**
* @brief Returns output ports
* @return Vector of output ports
*/
const std::vector<Port>& getOutputPorts() const;
/**
* @brief Sets output ports
* @param ports Vector of output ports
* @return reference to layer builder
*/
SplitLayer& setOutputPorts(const std::vector<Port>& ports);
/**
* @brief Returns input port
* @return Input port
*/
const Port& getInputPort() const;
/**
* @brief Sets input port
* @param port Input port
* @return reference to layer builder
*/
SplitLayer& setInputPort(const Port& port);
/**
* @brief Returns axis
* @return Axis
*/
size_t getAxis() const;
/**
* @brief Sets axis
* @param axis Axis
* @return reference to layer builder
*/
SplitLayer& setAxis(size_t axis);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,50 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_fragment.hpp>
#include <ie_inetwork.hpp>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for TanH layer
*/
class INFERENCE_ENGINE_API_CLASS(TanHLayer): public LayerFragment {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit TanHLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param genLayer generic builder
*/
explicit TanHLayer(Layer& genLayer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
TanHLayer& setName(const std::string& name);
/**
* @brief Returns port with shapes for the layer
* @return Port with shapes
*/
const Port& getPort() const;
/**
* @brief Sets port shapes for the layer
* @param port Port with shapes
* @return reference to layer builder
*/
TanHLayer& setPort(const Port& port);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,89 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_fragment.hpp>
#include <ie_inetwork.hpp>
#include <string>
#include <vector>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for Tile layer
*/
class INFERENCE_ENGINE_API_CLASS(TileLayer): public LayerFragment {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit TileLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param genLayer generic builder
*/
explicit TileLayer(Layer& genLayer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
TileLayer& setName(const std::string& name);
/**
* @brief Returns input port
* @return Input port
*/
const Port& getInputPort() const;
/**
* @brief Sets input port
* @param port Input port
* @return reference to layer builder
*/
TileLayer& setInputPort(const Port& port);
/**
* @brief Returns output port
* @return Output port
*/
const Port& getOutputPort() const;
/**
* @brief Sets output port
* @param port Output port
* @return reference to layer builder
*/
TileLayer& setOutputPort(const Port& port);
/**
* @brief Returns axis
* @return Axis
*/
size_t getAxis() const;
/**
* @brief Sets axis
* @param axis Axis
* @return reference to layer builder
*/
TileLayer& setAxis(size_t axis);
/**
* @brief Returns tiles
* @return Tiles
*/
size_t getTiles() const;
/**
* @brief Sets tiles
* @param tiles Tiles
* @return reference to layer builder
*/
TileLayer& setTiles(size_t tiles);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
@@ -69,6 +68,11 @@ public:
}
}
/**
* @brief A destructor
*/
virtual ~CNNNetwork() {}
/**
* @brief Wraps original method
* ICNNNetwork::getPrecision
@@ -215,6 +219,15 @@ public:
CALL_STATUS_FNC(reshape, inputShapes);
}
/**
* @brief Serialize network to IR and weights files.
* @param xmlPath Path to output IR file.
* @param binPath Path to output weights file.
*/
void serialize(const std::string &xmlPath, const std::string &binPath) const {
CALL_STATUS_FNC(serialize, xmlPath, binPath);
}
protected:
/**
* @brief reader extra reference, might be nullptr

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
@@ -137,9 +136,8 @@ public:
}
/**
* @brief Sets data that will contain result of the inference
* @param results - a reference to a map of result blobs accessed by output names.
* The type of Blob must correspond to the network output precision and size.
* @brief Sets new batch size when dynamic batching is enabled in executable network that created this request.
* @param batch new batch size to be used by all the following inference calls for this request.
*/
void SetBatch(const int batch) {
CALL_STATUS_FNC(SetBatch, batch);

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
@@ -141,7 +140,7 @@ private:
}
template <typename T>
std::pair<StatusCode, bool> status_code_assign(const T & arg) {
std::pair<StatusCode, bool> status_code_assign(const T &) {
return {static_cast<StatusCode>(0), false};
}
};

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//

View File

@@ -0,0 +1,137 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
/**
* @brief A header file for the CNNNetworkIterator class
* @file ie_cnn_network_iterator.hpp
*/
#pragma once
#include <utility>
#include <unordered_map>
#include <unordered_set>
#include <list>
#include <iterator>
#include <memory>
#include <vector>
#include <ie_inetwork.hpp>
namespace InferenceEngine {
namespace details {
template<class NT, class LT>
class INetworkIterator: public std::iterator<std::input_iterator_tag, std::shared_ptr<LT>> {
public:
explicit INetworkIterator(NT * network, bool toEnd = false): network(network), currentIdx(0) {
if (!network || toEnd)
return;
const auto& inputs = network->getInputs();
std::vector<std::shared_ptr<LT>> allInputs;
for (const auto& input : inputs) {
allInputs.push_back(std::dynamic_pointer_cast<LT>(input));
}
bool res = forestDFS(allInputs, [&](std::shared_ptr<LT> current) {
sortedLayers.push_back(current);
}, false);
if (!res) {
THROW_IE_EXCEPTION << "Sorting not possible, due to existed loop.";
}
std::reverse(std::begin(sortedLayers), std::end(sortedLayers));
currentLayer = getNextLayer();
}
bool operator!=(const INetworkIterator& that) const {
return !operator==(that);
}
bool operator==(const INetworkIterator& that) const {
return network == that.network && currentLayer == that.currentLayer;
}
typename INetworkIterator::reference operator*() {
if (nullptr == currentLayer) {
THROW_IE_EXCEPTION << "iterator out of bound";
}
return currentLayer;
}
INetworkIterator& operator++() {
currentLayer = getNextLayer();
return *this;
}
const INetworkIterator<NT, LT> operator++(int) {
INetworkIterator<NT, LT> retval = *this;
++(*this);
return retval;
}
private:
std::vector<std::shared_ptr<LT>> sortedLayers;
std::shared_ptr<LT> currentLayer;
size_t currentIdx;
NT *network = nullptr;
std::shared_ptr<LT> getNextLayer() {
return (sortedLayers.size() > currentIdx) ? sortedLayers[currentIdx++] : nullptr;
}
template<class T>
inline bool forestDFS(const std::vector<std::shared_ptr<LT>>& heads, const T &visit, bool bVisitBefore) {
if (heads.empty()) {
return true;
}
std::unordered_map<idx_t, bool> visited;
for (auto & layer : heads) {
if (!DFS(visited, layer, visit, bVisitBefore)) {
return false;
}
}
return true;
}
template<class T>
inline bool DFS(std::unordered_map<idx_t, bool> &visited,
const std::shared_ptr<LT> &layer,
const T &visit,
bool visitBefore) {
if (layer == nullptr) {
return true;
}
if (visitBefore)
visit(layer);
visited[layer->getId()] = false;
for (const auto &connection : network->getLayerConnections(layer->getId())) {
if (connection.to().layerId() == layer->getId()) {
continue;
}
const auto outLayer = network->getLayer(connection.to().layerId());
auto i = visited.find(outLayer->getId());
if (i != visited.end()) {
/**
* cycle detected we entered still not completed node
*/
if (!i->second) {
return false;
}
continue;
}
if (!DFS(visited, outLayer, visit, visitBefore)) {
return false;
}
}
if (!visitBefore)
visit(layer);
visited[layer->getId()] = true;
return true;
}
};
} // namespace details
} // namespace InferenceEngine

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
@@ -37,9 +36,8 @@ class PreAllocator : public IAllocator {
}
/**
* @brief The PreAllocator class does not utilize this function
* @param handle Memory handle to unlock
*/
void unlock(void * handle) noexcept override {}
void unlock(void *) noexcept override {} // NOLINT
/**
* @brief Returns a pointer to preallocated memory
@@ -55,10 +53,11 @@ class PreAllocator : public IAllocator {
}
/**
* @brief The PreAllocator class cannot release the handle
* @param handle Memory handle to release
* @return false
*/
bool free(void* handle) noexcept override { return false;}
bool free(void *) noexcept override { // NOLINT
return false;
}
/**
* @brief Deletes current allocator.

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
@@ -86,7 +85,7 @@ public:
* @brief The main constructor
* @param name Name of a shared library file
*/
explicit SOPointer(const std::string &name)
explicit SOPointer(const file_name_t &name)
: _so_loader(new Loader(name.c_str()))
, _pointedObj(details::shared_from_irelease(
SymbolLoader<Loader>(_so_loader).template instantiateSymbol<T>(SOCreatorTrait<T>::name))) {
@@ -161,6 +160,6 @@ protected:
* @param name Name of the shared library file
*/
template <class T>
inline std::shared_ptr<T> make_so_pointer(const std::string & name) = delete;
inline std::shared_ptr<T> make_so_pointer(const file_name_t & name) = delete;
} // namespace InferenceEngine

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
@@ -15,7 +14,9 @@
// Avoidance of Windows.h to include winsock library.
#define _WINSOCKAPI_
// Avoidance of Windows.h to define min/max.
#ifndef NOMINMAX
#define NOMINMAX
#endif
#include <windows.h>
#include <direct.h>
@@ -35,7 +36,7 @@ private:
* WinAPI LoadLibrary rules
* @param pluginName Full or relative path to the plugin library
*/
explicit SharedObjectLoader(const char* pluginName) {
explicit SharedObjectLoader(LPCTSTR pluginName) {
char cwd[1024];
shared_object = LoadLibrary(pluginName);
if (!shared_object) {

View File

@@ -0,0 +1,78 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
/**
* @brief A header that defines advanced related properties for VPU plugins.
* These properties should be used in SetConfig() and LoadNetwork() methods of plugins
*
* @file vpu_plugin_config.hpp
*/
#pragma once
#include <string>
#include "../ie_plugin_config.hpp"
namespace InferenceEngine {
namespace GNAConfigParams {
#define GNA_CONFIG_KEY(name) InferenceEngine::GNAConfigParams::_CONFIG_KEY(GNA_##name)
#define GNA_CONFIG_VALUE(name) InferenceEngine::GNAConfigParams::GNA_##name
#define DECLARE_GNA_CONFIG_KEY(name) DECLARE_CONFIG_KEY(GNA_##name)
#define DECLARE_GNA_CONFIG_VALUE(name) DECLARE_CONFIG_VALUE(GNA_##name)
/**
* @brief Scale factor that is calculated by user, in order to use static quantisation feature
* This option should be used with floating point value serialized to string with decimal separator equals to . (dot)
*/
DECLARE_GNA_CONFIG_KEY(SCALE_FACTOR);
/**
* @brief By default gna api work in Int16 precision, however this can be adjusted if necessary,
* currently supported values are I16, I8
*/
DECLARE_GNA_CONFIG_KEY(PRECISION);
/**
* @brief if turned on, dump GNA firmware model into specified file
*/
DECLARE_GNA_CONFIG_KEY(FIRMWARE_MODEL_IMAGE);
/**
* @brief GNA proc_type setting that should be one of GNA_AUTO, GNA_HW, GNA_SW, GNA_SW_EXACT
*/
DECLARE_GNA_CONFIG_KEY(DEVICE_MODE);
DECLARE_GNA_CONFIG_VALUE(AUTO);
DECLARE_GNA_CONFIG_VALUE(HW);
DECLARE_GNA_CONFIG_VALUE(SW);
DECLARE_GNA_CONFIG_VALUE(SW_EXACT);
/**
* @brief if enabled produced minimum memory footprint for loaded network in GNA memory, default value is YES
*/
DECLARE_GNA_CONFIG_KEY(COMPACT_MODE);
/**
* @brief The option to enable/disable uniformly distributed PWL algorithm.
* By default (in case of NO value set) the optimized algorithm called "Recursive Descent Algorithm for Finding
* the Optimal Minimax Piecewise Linear Approximation of Convex Functions is used.
* If value is YES then simple uniform distribution used to create PWL approximation of activation functions
* Uniform distribution usually gives poor approximation with same number of segments
*/
DECLARE_GNA_CONFIG_KEY(PWL_UNIFORM_DESIGN);
/**
* @brief By default, the GNA plugin uses one worker thread for inference computations.
* This parameter allows you to create up to 127 threads for software modes.
*
* Note that multithreading mode does not guarantee the same computation order as order
* of issuing. Additionally, in this case, software modes do not implement any serializations.
*/
DECLARE_GNA_CONFIG_KEY(LIB_N_THREADS);
} // namespace GNAConfigParams
} // namespace InferenceEngine

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
@@ -11,20 +10,33 @@
#include "details/ie_no_copy.hpp"
#if defined(_WIN32) && !defined(USE_STATIC_IE)
#define INFERENCE_ENGINE_CDECL
#ifdef IMPLEMENT_INFERENCE_ENGINE_API
#if defined(USE_STATIC_IE) || ( defined(__GNUC__) && (__GNUC__ < 4) )
#define INFERENCE_ENGINE_API(TYPE) extern "C" TYPE
#define INFERENCE_ENGINE_API_CPP(type) type
#define INFERENCE_ENGINE_API_CLASS(type) type
#define INFERENCE_ENGINE_CDECL __attribute__((cdecl))
#else
#if defined(_WIN32)
#define INFERENCE_ENGINE_CDECL
#ifdef IMPLEMENT_INFERENCE_ENGINE_API
#define INFERENCE_ENGINE_API(type) extern "C" __declspec(dllexport) type __cdecl
#define INFERENCE_ENGINE_API_CPP(type) __declspec(dllexport) type __cdecl
#define INFERENCE_ENGINE_API_CLASS(type) __declspec(dllexport) type
#else
#else
#define INFERENCE_ENGINE_API(type) extern "C" __declspec(dllimport) type __cdecl
#define INFERENCE_ENGINE_API_CPP(type) __declspec(dllimport) type __cdecl
#define INFERENCE_ENGINE_API_CLASS(type) __declspec(dllimport) type
#endif
#else
#define INFERENCE_ENGINE_CDECL __attribute__((cdecl))
#ifdef IMPLEMENT_INFERENCE_ENGINE_API
#define INFERENCE_ENGINE_API(type) extern "C" __attribute__((visibility("default"))) type
#define INFERENCE_ENGINE_API_CPP(type) __attribute__((visibility("default"))) type
#define INFERENCE_ENGINE_API_CLASS(type) __attribute__((visibility("default"))) type
#else
#define INFERENCE_ENGINE_API(type) extern "C" type
#define INFERENCE_ENGINE_API_CPP(type) type
#define INFERENCE_ENGINE_API_CLASS(type) type
#endif
#endif
#else
#define INFERENCE_ENGINE_API(TYPE) extern "C" TYPE
#define INFERENCE_ENGINE_API_CPP(type) type
#define INFERENCE_ENGINE_API_CLASS(type) type
#define INFERENCE_ENGINE_CDECL __attribute__((cdecl))
#endif

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
@@ -126,7 +125,7 @@ public:
* @param layout New layout to set
* @return Total number of elements (a product of all the dimensions)
*/
size_t Resize(const SizeVector &dims, Layout layout = Layout::ANY) {
size_t Resize(const SizeVector &dims, Layout layout = Layout::ANY) noexcept {
bool bret = deallocate();
if (layout != Layout::ANY) {
@@ -147,9 +146,9 @@ public:
* @param layout New layout to set
* @return The total number of elements (a product of all the dims)
*/
size_t Reshape(const SizeVector &dims, Layout layout = Layout::ANY) {
size_t Reshape(const SizeVector &dims, Layout layout = Layout::ANY) noexcept {
if (product(tensorDesc.getDims()) != product(dims)) {
THROW_IE_EXCEPTION << "cannot reshape when total size changes";
return 0;
}
if (layout != Layout::ANY) {
@@ -164,28 +163,28 @@ public:
* @deprecated Please use TensorDesc for working with dimensions.
* @brief Returns the tensor dimensions vector with reversed order.
*/
const SizeVector dims() const {
const SizeVector dims() const noexcept {
return SizeVector(tensorDesc.getDims().rbegin(), tensorDesc.getDims().rend());
}
/**
* @brief Returns the tensor description
*/
const TensorDesc &getTensorDesc() const {
const TensorDesc &getTensorDesc() const noexcept {
return tensorDesc;
}
/**
* @brief Returns the total number of elements (a product of all the dims)
*/
size_t size() const {
size_t size() const noexcept {
return product(tensorDesc.getDims());
}
/**
* @brief Returns the size of the current Blob in bytes.
*/
size_t byteSize() const {
size_t byteSize() const noexcept {
return product(tensorDesc.getDims()) * element_size();
}
@@ -199,27 +198,27 @@ public:
* @brief Allocates memory to store the data.
* Abstract method.
*/
virtual void allocate() = 0;
virtual void allocate() noexcept = 0;
/**
* @brief Releases previously allocated data.
* Abstract method.
*/
virtual bool deallocate() = 0;
virtual bool deallocate() noexcept = 0;
/**
* @brief Gets access to the allocated memory.
* Abstract method.
* @return A LockedMemory object
*/
virtual LockedMemory<void> buffer() = 0;
virtual LockedMemory<void> buffer() noexcept = 0;
/**
* @brief Gets read-only access to the allocated memory.
* Abstract method.
* @return A LockedMemory object
*/
virtual LockedMemory<const void> cbuffer() const = 0;
virtual LockedMemory<const void> cbuffer() const noexcept = 0;
protected:
/**
@@ -232,7 +231,7 @@ protected:
* @param dims Reference to a vector with dimension values of type size_t
* @return Result of multiplication
*/
static size_t product(const SizeVector &dims) {
static size_t product(const SizeVector &dims) noexcept {
if (dims.empty())
return 0;
return std::accumulate(std::begin(dims), std::end(dims), (size_t) 1, std::multiplies<size_t>());
@@ -401,7 +400,7 @@ public:
* @brief Creates an new empty rvalue LockedMemory object.
* @return rvalue for the empty locked object of type T
*/
virtual LockedMemory<T> data() {
virtual LockedMemory<T> data() noexcept {
return std::move(lockme<T>());
}
@@ -409,7 +408,7 @@ public:
* @brief Creates a new empty rvalue read-only LockedMemory object.
* @return rvalue for the empty locked const object of type T.
*/
virtual LockedMemory<const T> readOnly() const {
virtual LockedMemory<const T> readOnly() const noexcept {
return std::move(lockme<const T>());
}
@@ -418,7 +417,7 @@ public:
* @brief Copies data from the given vector to the blob.
* @param that Vector of values to copy to the blob
*/
void set(const std::vector<T> &that) {
void set(const std::vector<T> &that) {
if (tensorDesc.getDims().size() != 0 && that.size() != product(tensorDesc.getDims()))
THROW_IE_EXCEPTION << "Size mismatch between dims and vector";
if (tensorDesc.getDims().size() == 0) {
@@ -435,7 +434,7 @@ public:
/**
* @brief Allocates or reallocates memory
*/
void allocate() override {
void allocate() noexcept override {
if (_handle != nullptr) {
getAllocator()->free(_handle);
}
@@ -445,7 +444,7 @@ public:
/**
* @brief Frees all allocated data
*/
bool deallocate() override {
bool deallocate() noexcept override {
return free();
}
@@ -453,7 +452,7 @@ public:
* @brief Creates a new LockedMemory instance holding void pointer.
* @return LockedMemory instance holding void pointer
*/
LockedMemory<void> buffer() override {
LockedMemory<void> buffer() noexcept override {
return std::move(lockme<void>());
}
@@ -461,7 +460,7 @@ public:
* @brief Creates a new LockedMemory instance holding constant void pointer.
* @return LockedMemory instance holding constant void pointer
*/
LockedMemory<const void> cbuffer() const override {
LockedMemory<const void> cbuffer() const noexcept override {
return std::move(lockme<const void>());
}
@@ -589,6 +588,7 @@ protected:
*/
template<class Type>
inline typename TBlob<Type>::Ptr make_shared_blob(Precision p, Layout l, const SizeVector &dims) {
IE_ASSERT(p.hasStorageType<Type>());
return std::make_shared<TBlob<Type>>(p, l, dims);
}
@@ -602,6 +602,7 @@ inline typename TBlob<Type>::Ptr make_shared_blob(Precision p, Layout l, const S
*/
template<class Type>
inline typename TBlob<Type>::Ptr make_shared_blob(Precision p, const SizeVector &dims) {
IE_ASSERT(p.hasStorageType<Type>());
return make_shared_blob<Type>(p, TensorDesc::getLayoutByDims(dims), dims);
}
@@ -615,6 +616,7 @@ inline typename TBlob<Type>::Ptr make_shared_blob(Precision p, const SizeVector
*/
template<typename Type, class TArg>
inline typename InferenceEngine::TBlob<Type>::Ptr make_shared_blob(Precision p, Layout l, const TArg &arg) {
IE_ASSERT(p.hasStorageType<Type>());
return std::make_shared<InferenceEngine::TBlob<Type>>(p, l, arg);
}
@@ -628,6 +630,7 @@ inline typename InferenceEngine::TBlob<Type>::Ptr make_shared_blob(Precision p,
*/
template<typename Type, class TArg>
inline typename InferenceEngine::TBlob<Type>::Ptr make_shared_blob(Precision p, const TArg &arg) {
IE_ASSERT(p.hasStorageType<Type>());
return make_shared_blob<Type, TArg>(p, TensorDesc::getLayoutByDims(arg), arg);
}
@@ -639,6 +642,7 @@ inline typename InferenceEngine::TBlob<Type>::Ptr make_shared_blob(Precision p,
*/
template<typename Type>
inline typename InferenceEngine::TBlob<Type>::Ptr make_shared_blob(const TensorDesc& tensorDesc) {
IE_ASSERT(tensorDesc.getPrecision().hasStorageType<Type>());
return std::make_shared<InferenceEngine::TBlob<Type>>(tensorDesc);
}
@@ -652,6 +656,7 @@ inline typename InferenceEngine::TBlob<Type>::Ptr make_shared_blob(const TensorD
*/
template<typename Type>
inline typename InferenceEngine::TBlob<Type>::Ptr make_shared_blob(const TensorDesc& tensorDesc, Type * ptr, size_t size = 0) {
IE_ASSERT(tensorDesc.getPrecision().hasStorageType<Type>());
return std::make_shared<InferenceEngine::TBlob<Type>>(tensorDesc, ptr, size);
}
@@ -682,13 +687,14 @@ inline typename InferenceEngine::TBlob<TypeTo>::Ptr make_shared_blob(const TBlob
/**
* @deprecated Use TensorDesc in order to create Blob::Ptr.
* @brief Creates a blob with the given precision.
* @tparam Type Type of the shared pointer to be created
* @tparam TypeTo Type of the shared pointer to be created
* @param p Given precision
* @return A shared pointer to the blob created
*/
template<typename Type>
inline typename InferenceEngine::TBlob<Type>::Ptr make_shared_blob(Precision p, Layout l = NCHW) {
return std::make_shared<TBlob<Type>>(p, l);
template<typename TypeTo>
inline typename InferenceEngine::TBlob<TypeTo>::Ptr make_shared_blob(Precision p, Layout l = NCHW) {
IE_ASSERT(p.hasStorageType<TypeTo>());
return std::make_shared<TBlob<TypeTo>>(p, l);
}
/**
@@ -703,6 +709,7 @@ inline typename InferenceEngine::TBlob<Type>::Ptr make_shared_blob(Precision p,
*/
template<typename TypeTo>
inline typename TBlob<TypeTo>::Ptr make_shared_blob(Precision p, Layout l, SizeVector dims, const std::vector<TypeTo> &arg) {
IE_ASSERT(p.hasStorageType<TypeTo>());
auto blob = std::make_shared<TBlob<TypeTo>>(p, l, dims);
blob->set(arg);
return blob;
@@ -719,6 +726,7 @@ inline typename TBlob<TypeTo>::Ptr make_shared_blob(Precision p, Layout l, SizeV
*/
template<typename TypeTo>
inline typename TBlob<TypeTo>::Ptr make_shared_blob(Precision p, Layout l, const std::vector<TypeTo> &arg) {
IE_ASSERT(p.hasStorageType<TypeTo>());
auto blob = std::make_shared<TBlob<TypeTo>>(p, l);
blob->set(arg);
return blob;
@@ -734,6 +742,7 @@ inline typename TBlob<TypeTo>::Ptr make_shared_blob(Precision p, Layout l, const
*/
template<typename TypeTo>
inline typename TBlob<TypeTo>::Ptr make_shared_blob(Precision p, const std::vector<TypeTo> &arg) {
IE_ASSERT(p.hasStorageType<TypeTo>());
return make_shared_blob<TypeTo>(p, TensorDesc::getLayoutByDims(arg), arg);
}
@@ -749,6 +758,7 @@ inline typename TBlob<TypeTo>::Ptr make_shared_blob(Precision p, const std::vect
*/
template <typename TypeTo>
inline typename TBlob<TypeTo>::Ptr make_shared_blob(Precision p, Layout l, const SizeVector &dims, TypeTo * ptr, size_t size = 0) {
IE_ASSERT(p.hasStorageType<TypeTo>());
auto blob = std::make_shared<TBlob<TypeTo>>(p, l, dims, ptr, size);
return blob;
}
@@ -764,6 +774,7 @@ inline typename TBlob<TypeTo>::Ptr make_shared_blob(Precision p, Layout l, const
*/
template <typename TypeTo>
inline typename TBlob<TypeTo>::Ptr make_shared_blob(Precision p, const SizeVector &dims, TypeTo * ptr, size_t size = 0) {
IE_ASSERT(p.hasStorageType<TypeTo>());
return make_shared_blob<TypeTo>(p, TensorDesc::getLayoutByDims(dims), dims, ptr, size);
}

View File

@@ -0,0 +1,49 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_network_builder.hpp>
#include <builders/ie_layer_builder.hpp>
#include <builders/ie_argmax_layer.hpp>
#include <builders/ie_clamp_layer.hpp>
#include <builders/ie_concat_layer.hpp>
#include <builders/ie_const_layer.hpp>
#include <builders/ie_convolution_layer.hpp>
#include <builders/ie_crop_layer.hpp>
#include <builders/ie_ctc_greedy_decoder_layer.hpp>
#include <builders/ie_deconvolution_layer.hpp>
#include <builders/ie_detection_output_layer.hpp>
#include <builders/ie_eltwise_layer.hpp>
#include <builders/ie_elu_layer.hpp>
#include <builders/ie_fully_connected_layer.hpp>
#include <builders/ie_grn_layer.hpp>
#include <builders/ie_input_layer.hpp>
#include <builders/ie_memory_layer.hpp>
#include <builders/ie_mvn_layer.hpp>
#include <builders/ie_norm_layer.hpp>
#include <builders/ie_normalize_layer.hpp>
#include <builders/ie_output_layer.hpp>
#include <builders/ie_permute_layer.hpp>
#include <builders/ie_pooling_layer.hpp>
#include <builders/ie_power_layer.hpp>
#include <builders/ie_prelu_layer.hpp>
#include <builders/ie_prior_box_clustered_layer.hpp>
#include <builders/ie_prior_box_layer.hpp>
#include <builders/ie_proposal_layer.hpp>
#include <builders/ie_psroi_pooling_layer.hpp>
#include <builders/ie_region_yolo_layer.hpp>
#include <builders/ie_relu6_layer.hpp>
#include <builders/ie_relu_layer.hpp>
#include <builders/ie_reorg_yolo_layer.hpp>
#include <builders/ie_reshape_layer.hpp>
#include <builders/ie_roi_pooling_layer.hpp>
#include <builders/ie_scale_shift_layer.hpp>
#include <builders/ie_sigmoid_layer.hpp>
#include <builders/ie_simpler_nms_layer.hpp>
#include <builders/ie_softmax_layer.hpp>
#include <builders/ie_split_layer.hpp>
#include <builders/ie_tanh_layer.hpp>
#include <builders/ie_tile_layer.hpp>

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
@@ -14,8 +13,11 @@
#include <string>
#include <ostream>
#include <algorithm>
#include <cstdlib>
#include <details/ie_exception.hpp>
#include "ie_unicode.hpp"
namespace InferenceEngine {
/**
* @brief Represents tensor size.
@@ -67,12 +69,6 @@ union UserValue {
void *v_ptr;
};
enum CellType {
ORIG,
LSTM,
GRU
};
/**
* @enum Layout
* @brief Layouts that the inference engine supports
@@ -83,6 +79,8 @@ enum Layout : uint8_t {
// I/O data layouts
NCHW = 1,
NHWC = 2,
NCDHW = 3,
NDHWC = 4,
// weight layouts
OIHW = 64,

View File

@@ -0,0 +1,50 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
/**
* @brief This is a header file for the IE Context class
* @file ie_context.hpp
*/
#pragma once
#include <details/caseless.hpp>
#include <ie_iextension.h>
#include <string>
#include <vector>
#include <map>
namespace InferenceEngine {
/**
* @brief This class implements object
*/
class INFERENCE_ENGINE_API_CLASS(Context) {
public:
Context();
/**
* @brief Registers extension within the context
* @param ext Pointer to already loaded extension
*/
void addExtension(const IShapeInferExtensionPtr& ext);
/**
* @brief Registers Shape Infer implementation within the Context
* @param type Layer type
* @param impl Shape Infer implementation
*/
void addShapeInferImpl(const std::string& type, const IShapeInferImpl::Ptr& impl);
/**
* @brief Returns the shape infer implementation by layer type
* @param type Layer type
* @return Shape Infer implementation
*/
IShapeInferImpl::Ptr getShapeInferImpl(const std::string& type);
private:
details::caseless_map<std::string, IShapeInferImpl::Ptr> shapeInferImpls;
};
} // namespace InferenceEngine

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
@@ -160,6 +159,6 @@ public:
*/
const UserValue& getUserObject() const;
private:
TensorDesc tensorDesc;
mutable TensorDesc tensorDesc;
};
} // namespace InferenceEngine

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
@@ -28,6 +27,7 @@ enum class TargetDevice : uint8_t {
eGPU = 3,
eFPGA = 4,
eMYRIAD = 5,
eHDDL = 6,
eGNA = 7,
eHETERO = 8
};
@@ -51,6 +51,7 @@ class TargetDeviceInfo {
DECL_DEVICE(GPU),
DECL_DEVICE(FPGA),
DECL_DEVICE(MYRIAD),
DECL_DEVICE(HDDL),
DECL_DEVICE(GNA),
DECL_DEVICE(HETERO)
};
@@ -65,6 +66,7 @@ class TargetDeviceInfo {
{ "GPU", InferenceEngine::TargetDevice::eGPU },
{ "FPGA", InferenceEngine::TargetDevice::eFPGA },
{ "MYRIAD", InferenceEngine::TargetDevice::eMYRIAD },
{ "HDDL", InferenceEngine::TargetDevice::eHDDL },
{ "GNA", InferenceEngine::TargetDevice::eGNA },
{ "BALANCED", InferenceEngine::TargetDevice::eBalanced },
{ "HETERO", InferenceEngine::TargetDevice::eHETERO }

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
@@ -53,7 +52,7 @@ public:
* @brief Loads extension from a shared library
* @param name Full or relative path to extension library
*/
explicit Extension(const std::string &name)
explicit Extension(const file_name_t &name)
: actual(name) {}
/**
@@ -127,7 +126,7 @@ public:
* @brief Loads extension from a shared library
* @param name Full or relative path to extension library
*/
explicit ShapeInferExtension(const std::string &name)
explicit ShapeInferExtension(const file_name_t &name)
: actual(name) {}
/**
@@ -192,7 +191,7 @@ protected:
* @return shared_pointer A wrapper for the given type from a specific shared module
*/
template<>
inline std::shared_ptr<IShapeInferExtension> make_so_pointer(const std::string &name) {
inline std::shared_ptr<IShapeInferExtension> make_so_pointer(const file_name_t &name) {
return std::make_shared<ShapeInferExtension>(name);
}
@@ -202,7 +201,7 @@ inline std::shared_ptr<IShapeInferExtension> make_so_pointer(const std::string &
* @return shared_pointer A wrapper for the given type from a specific shared module
*/
template<>
inline std::shared_ptr<IExtension> make_so_pointer(const std::string &name) {
inline std::shared_ptr<IExtension> make_so_pointer(const file_name_t &name) {
return std::make_shared<Extension>(name);
}

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
@@ -29,8 +28,6 @@ namespace InferenceEngine {
* @brief A collection that contains string as key, and Data smart pointer as value
*/
using OutputsDataMap = std::map<std::string, DataPtr>;
class IShapeInferExtension;
using IShapeInferExtensionPtr = std::shared_ptr<IShapeInferExtension>;
/**
* @brief This is the main interface to describe the NN topology
@@ -148,8 +145,9 @@ public:
* @return Status code of the operation
* @note: Current implementation of the function sets batch size to the first dimension of all layers in the networks.
* Before calling it make sure that all your layers have batch in the first dimension, otherwise the method works incorrectly.
* This limitation is resolved via [Shape Inference feature](./docs/Inference_Engine_Developer_Guide/ShapeInference.md)
* This limitation is resolved via shape inference feature
* by using InferenceEngine::ICNNNetwork::reshape method.
* To read more refer to the Shape Inference section in documentation
*/
virtual StatusCode setBatchSize(size_t size, ResponseDesc* responseDesc) noexcept = 0;
@@ -170,7 +168,7 @@ public:
* @param resp Pointer to the response message that holds a description of an error if any occurred
* @return Status code of the operation
*/
virtual StatusCode reshape(const InputShapes& inputShapes, ResponseDesc* resp) noexcept { return NOT_IMPLEMENTED; };
virtual StatusCode reshape(const InputShapes& /*inputShapes*/, ResponseDesc* /*resp*/) noexcept { return NOT_IMPLEMENTED; };
/**
* @brief Registers extension within the plugin
@@ -179,8 +177,16 @@ public:
* @return Status code of the operation. OK if succeeded
*/
virtual StatusCode
AddExtension(const IShapeInferExtensionPtr& extension, ResponseDesc* resp) noexcept { return NOT_IMPLEMENTED; };
AddExtension(const IShapeInferExtensionPtr& /*extension*/, ResponseDesc* /*resp*/) noexcept { return NOT_IMPLEMENTED; };
virtual StatusCode getStats(ICNNNetworkStats** stats, ResponseDesc* resp) const noexcept { return NOT_IMPLEMENTED; };
virtual StatusCode getStats(ICNNNetworkStats** /*stats*/, ResponseDesc* /*resp*/) const noexcept { return NOT_IMPLEMENTED; };
/**
* @brief Serialize network to IR and weights files.
* @param xmlPath Path to output IR file.
* @param binPath Path to output weights file.
* @return Status code of the operation
*/
virtual StatusCode serialize(const std::string &xmlPath, const std::string &binPath, ResponseDesc* resp) const noexcept = 0;
};
} // namespace InferenceEngine

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
@@ -135,8 +134,8 @@ public:
* @param resp Response descriptor
* @return Status code
*/
virtual StatusCode getShapes(const std::vector<TensorDesc>& inShapes, std::vector<TensorDesc>& outShapes,
ResponseDesc* resp) noexcept {
virtual StatusCode getShapes(const std::vector<TensorDesc>& /*inShapes*/, std::vector<TensorDesc>& /*outShapes*/,
ResponseDesc* /*resp*/) noexcept {
return NOT_IMPLEMENTED;
}
@@ -230,11 +229,11 @@ public:
*/
virtual StatusCode getPrimitiveTypes(char**& types, unsigned int& size, ResponseDesc* resp) noexcept = 0;
StatusCode getShapeInferTypes(char**& types, unsigned int& size, ResponseDesc* resp) noexcept override {
StatusCode getShapeInferTypes(char**&, unsigned int&, ResponseDesc*) noexcept override {
return NOT_IMPLEMENTED;
};
StatusCode getShapeInferImpl(IShapeInferImpl::Ptr& impl, const char* type, ResponseDesc* resp) noexcept override {
StatusCode getShapeInferImpl(IShapeInferImpl::Ptr&, const char*, ResponseDesc*) noexcept override {
return NOT_IMPLEMENTED;
};
};

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
@@ -80,7 +79,7 @@ public:
*/
virtual void QueryNetwork(const std::string &device,
const ICNNNetwork &network,
const std::map<std::string, std::string>& config,
const std::map<std::string, std::string>& /*config*/,
QueryNetworkResult &res) noexcept {
QueryNetwork(device, network, res);
};

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//

View File

@@ -0,0 +1,366 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
/**
* @brief a header file for the Inference Engine Network interface
* @file ie_inetwork.hpp
*/
#pragma once
#include <utility>
#include <string>
#include <memory>
#include <vector>
#include <map>
#include <ie_parameter.hpp>
#include <ie_context.hpp>
#include <ie_layouts.h>
#include <ie_blob.h>
namespace InferenceEngine {
/**
* @brief A type of network objects indexes.
*/
using idx_t = size_t;
/**
* @brief This class contains a pair from layerId and port index
*/
class PortInfo {
public:
/**
* @brief The constructor creates a PortInfo object for port 0
* @param layerID Layer id
*/
PortInfo(idx_t layerID): layer(layerID), port(0) {} // NOLINT
/**
* @brief The constructor creates a PortInfo object
* @param layerID Layer id
* @param portID Port id
*/
PortInfo(idx_t layerID, idx_t portID): layer(layerID), port(portID) {}
/**
* @brief Get layer id
* @return Layer id
*/
idx_t layerId() const {
return layer;
}
/**
* @brief Get port id
* @return Port id
*/
idx_t portId() const {
return port;
}
/**
* @brief Compares the given PortInfo object with the current one
* @param portInfo PortInfo object to compare with
* @return true if the given PortInfo object is equal to the current one, false - otherwise
*/
bool operator==(const PortInfo& portInfo) const {
return layer == portInfo.layerId() && port == portInfo.portId();
}
/**
* @brief Checks if the given PortInfo object is not equal to the current one
* @param portInfo PortInfo object to compare with
* @return true if the given PortInfo object is not equal to the current one, false - otherwise
*/
bool operator!=(const PortInfo& portInfo) const {
return !(*this == portInfo);
}
private:
idx_t layer;
idx_t port;
};
/**
* @brief This class is the main object to describe the Inference Engine connection.
*/
class Connection {
public:
/**
* @brief Constructor of a connection object.
* @param input pair of the index of input layer and the index of output port
* @param output pair of the index of output layer and the index of input port
*/
Connection(const PortInfo& input, const PortInfo& output): input(input), output(output) {}
/**
* @brief Compares the given Connection with the current one
* @param connection Connection to compare with
* @return true if the given Connection is equal to the current one, false - otherwise
*/
bool operator==(const Connection& connection) const {
return input == connection.from() && output == connection.to();
}
/**
* @brief Checks if the given Connection is not equal to the current one
* @param connection Connection to compare with
* @return true if the given Connection is not equal to the current one, false - otherwise
*/
bool operator!=(const Connection& connection) const {
return !(*this == connection);
}
/**
* Returns a constant reference to a pair of input layer index and output port index.
* @return pair of the index of input layer and the index of output port
*/
const PortInfo& from() const {
return input;
}
/**
* Returns a constant reference to a pair of output layer index and input port index.
* @return pair of the index of output layer and the index of input port
*/
const PortInfo& to() const {
return output;
}
private:
PortInfo input;
PortInfo output;
};
/**
* @brief This class is the main object to describe the Inference Engine port.
*/
class Port {
public:
/**
* @brief Default constructor of a port object.
*/
Port() = default;
/**
* @brief Constructor of a port object with shapes.
* @param shapes port shapes
*/
explicit Port(const SizeVector& shapes): pShapes(shapes) {}
/**
* @brief Copy constructor.
* @param port object to copy
*/
Port(const Port& port) {
this->pShapes = port.pShapes;
}
/**
* @brief Returns a constant reference to a vector with shapes.
* Shapes should be initialized if shape is empty.
* @return constant reference to shapes
*/
const SizeVector& shape() const noexcept {
return pShapes;
}
/**
* @brief Returns a reference to a vector with shapes.
* Shapes should be initialized if shape is empty.
* @return reference to shapes
*/
SizeVector& shape() noexcept {
return pShapes;
}
private:
SizeVector pShapes;
};
/**
* @brief This class is the main interface to describe the Inference Engine layer parameters.
* All methods here are constant and do not throw exceptions.
*/
class IParameters {
public:
/**
* @brief A shared pointer to the IParameters object.
*/
using Ptr = std::shared_ptr<IParameters>;
/**
* @brief Virtual destructor for the parameters interface
*/
virtual ~IParameters() = default;
/**
* @brief Returns a constant reference to a map with parameters.
* @return Map of parameters
*/
virtual const std::map<std::string, Parameter>& getParameters() const noexcept = 0;
/**
* @brief Returns a constant reference to a constant pointers to constant data.
* @return Map of constant pointers to constant data
*/
virtual const std::map<std::string, Blob::CPtr>& getConstantData() const noexcept = 0;
};
class INetwork;
template <class T>
class INetwotkIterator;
/**
* @brief This class is the main interface to describe the Inference Engine layer.
* All methods here are constant and do not throw exceptions.
*/
class ILayer {
public:
/**
* @brief A shared pointer to the ILayer object
*/
using Ptr = std::shared_ptr<ILayer>;
/**
* @brief A shared pointer to the const ILayer object
*/
using CPtr = std::shared_ptr<const ILayer>;
/**
* @brief Virtual destructor for the layer interface
*/
virtual ~ILayer() = default;
/**
* @brief Returns a id of the layer.
* @return Layer id
*/
virtual idx_t getId() const noexcept = 0;
/**
* @brief Returns a layer name.
* @return Layer name
*/
virtual const std::string& getName() const noexcept = 0;
/**
* @brief Returns a layer type.
* @return Layer type
*/
virtual const std::string& getType() const noexcept = 0;
/**
* @brief Returns a constant smart pointer reference to a Network interface.
* @return Network interface smart pointer
*/
virtual const std::shared_ptr<INetwork>& getGraph() const noexcept = 0;
/**
* @brief Returns a constant smart pointer reference to a Parameters interface.
* @return Parameters interface smart pointer
*/
virtual const IParameters::Ptr& getParameters() const noexcept = 0;
/**
* @brief Returns a constant reference to a vector with input ports.
* @return Vector of input ports
*/
virtual const std::vector<Port>& getInputPorts() const noexcept = 0;
/**
* @brief Returns a constant reference to a vector with output ports.
* @return Vector of output ports
*/
virtual const std::vector<Port>& getOutputPorts() const noexcept = 0;
};
namespace details {
template<class NT, class LT>
class INetworkIterator;
} // namespace details
/**
* @brief This class is the main interface to describe the Inference Engine network.
*
* All methods here are constant and do not throw exceptions.
*/
class INetwork {
public:
/**
* @brief A shared pointer to the INetwork object.
*/
using Ptr = std::shared_ptr<INetwork>;
/**
* @brief A constant iterator for INetwork objects definition
*/
using const_iterator = details::INetworkIterator<const INetwork, const ILayer>;
/**
* @brief Virtual destructor for the network interface
*/
virtual ~INetwork() = default;
/**
* @brief Begin network iterator
* @return const INetwork iterator
*/
virtual const_iterator begin() const noexcept = 0;
/**
* @brief End network iterator
* @return const INetwork iterator
*/
virtual const_iterator end() const noexcept = 0;
/**
* @brief Returns a number of layers in the network.
* @return Layers count
*/
virtual size_t size() const noexcept = 0;
/**
* @brief Returns a constant smart pointer to a Layer interface.
* If the layer is missing, returns nullptr.
* @param id Id of the Layer
* @return Layer interface smart pointer
*/
virtual const ILayer::Ptr getLayer(idx_t id) const noexcept = 0;
/**
* @brief Returns a constant vector of input layers.
* @return Vector of input layers
*/
virtual const std::vector<ILayer::Ptr> getInputs() const noexcept = 0;
/**
* @brief Returns a constant vector of output layers.
* @return Vector of output layers
*/
virtual const std::vector<ILayer::Ptr> getOutputs() const noexcept = 0;
/**
* @brief Returns a constant vector of connections for specific layer.
* If the layer is missing, returns empty vector.
* @param layerId layer index
* @return Vector of connections
*/
virtual const std::vector<Connection> getLayerConnections(idx_t layerId) const noexcept = 0;
/**
* @brief Returns a network name.
* @return Network name
*/
virtual const std::string& getName() const noexcept = 0;
/**
* @brief Returns a network context
* @return const reference to Context
*/
virtual const Context& getContext() const noexcept = 0;
};
} // namespace InferenceEngine
#include <details/ie_inetwork_iterator.hpp>

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
@@ -22,8 +21,6 @@
#include "ie_device.hpp"
#include "ie_layers_property.hpp"
#include "ie_icnn_network.hpp"
namespace InferenceEngine {
/**
* @brief This is an internal common Layer parameter parsing arguments
@@ -503,6 +500,10 @@ public:
* @brief Number of groups
*/
unsigned int _group = 1u;
/**
* @brief Auto padding type
*/
std::string _auto_pad;
/**
* @brief Creates a new ConvolutionLayer instance.
@@ -593,6 +594,10 @@ public:
* @brief A flag that indicates if padding is excluded or not
*/
bool _exclude_pad = false;
/**
* @brief Auto padding type
*/
std::string _auto_pad;
/**
* @brief Creates a new PoolingLayer instance.
@@ -926,61 +931,33 @@ public:
};
/**
* @brief This class represents RNN sequence layer
*/
class RNNLayer : public WeightableLayer {
public:
CellType cellType;
/**
* @brief An axis by which iteration is performed. Axis=0 means first input blob dimension is sequence, axis=1 means first dimension is batch.
*/
unsigned int _axis = 1;
using WeightableLayer::WeightableLayer;
/**
* @brief Creates a new RNNLayer instance.
*/
explicit RNNLayer(const LayerParams &p) : WeightableLayer(p) {}
};
/**
* @brief This class represents LSTMCell pseudo-layer to be used in TensorIterator
*/
class LSTMCell : public WeightableLayer {
public:
using WeightableLayer::WeightableLayer;
};
class ICNNNetReader;
/**
* @brief This class represents TensorIterator layer
*/
* @brief This class represents TensorIterator layer
*/
class TensorIterator : public CNNLayer {
public:
using CNNNetReaderPtr = std::shared_ptr<ICNNNetReader>;
CNNNetReaderPtr reader;
struct PortMap {
// Data map rule
int from; /**< Index of exteral data from ins/outs fields of CNNLayer */
int to; /**< Index of internal data in iterator body */
struct BackEdge {
int fromLayer;
int fromPort;
int toLayer;
int toPort;
// Iteration rule
int axis; /**< Axis to iterate throught */
int stride; /**< Stride to iterate throught */
int start; /**< Start index of iteration range */
int end; /**< Last index of iteration range */
int part_size; /**< Part size which will be transfered to body subnetwork */
};
struct Port {
int external_port_id;
int internal_layer_id;
int internal_port_id;
int axis;
int part_size;
int stride;
struct Body {
std::vector<DataPtr> inputs;
std::vector<DataPtr> outputs;
};
std::vector<Port> input_ports;
std::vector<Port> output_ports;
std::vector<BackEdge> backEdges;
std::vector<PortMap> input_port_map;
std::vector<PortMap> output_port_map;
std::vector<PortMap> back_edges;
Body body;
using CNNLayer::CNNLayer;
};
@@ -1045,4 +1022,83 @@ public:
using WeightableLayer::WeightableLayer;
};
/**
* @brief This class represents a general matrix multiplication operation layer
* Formula is: dst := alpha*src1*src2 + beta*src3
*/
class GemmLayer : public CNNLayer {
public:
/**
* @brief A scale factor of src1 matrix
*/
float alpha = 1.f;
/**
* @brief A scale factor of src3 matrix
*/
float beta = 1.f;
/**
* @brief A flag that indicates if the src1 matrix is to be transposed
*/
bool transpose_a = false;
/**
* @brief A flag that indicates if the src2 matrix is to be transposed
*/
bool transpose_b = false;
/**
* @brief Creates a new GemmLayer instance.
*/
using CNNLayer::CNNLayer;
};
/**
* @brief This class represents a standard Pad layer
* Adds paddings to input tensor
*/
class PadLayer : public CNNLayer {
public:
/**
* @enum ePadMode
* @brief Defines possible modes of pad operation
*/
enum ePadMode {
Constant = 0, Edge, Reflect, Symmetric
};
/**
* @brief Size of padding in the beginning of each axis
*/
PropertyVector<unsigned int> pads_begin;
/**
* @brief Size of padding in the end of each axis
*/
PropertyVector<unsigned int> pads_end;
/**
* @brief Mode of pad operation
*/
ePadMode pad_mode = Constant;
/**
* @brief A pad value which is used for filling in Constant mode
*/
float pad_value = 0.0f;
/**
* @brief Creates a new PadLayer instance.
*/
using CNNLayer::CNNLayer;
};
/**
* @brief This class represents a standard Gather layer
* Gather slices from Dictionary according to Indexes
*/
class GatherLayer : public CNNLayer {
public:
/**
* @brief The axis in Dictionary to gather Indexes from
*/
int axis = 0;
/**
* @brief Creates a new GatherLayer instance.
*/
using CNNLayer::CNNLayer;
};
} // namespace InferenceEngine

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
@@ -9,6 +8,8 @@
*/
#pragma once
#include <vector>
namespace InferenceEngine {
constexpr const int MAX_DIMS_NUMBER = 12;
@@ -32,13 +33,20 @@ public:
if (len > N) {
THROW_IE_EXCEPTION << "Property size exceeed limit of: " << N;
}
for (int i = 0; i < len; i++) {
for (size_t i = 0; i < len; i++) {
_axises[i] = val;
_allocated[i] = true;
}
_length = len;
}
explicit PropertyVector(const std::vector<T>& values) {
size_t i = 0;
for (const auto val : values) {
insert(i++, val);
}
}
/**
* @brief allows access up-to capacity size
* @param index

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
@@ -218,11 +217,18 @@ public:
* @param l memory layout
*/
void setLayout(Layout l) {
bool inconsistentLayout = false;
switch (layout) {
bool inconsistentLayout = true;
switch (l) {
case Layout::C:
inconsistentLayout = dims.size() != 1;
break;
case Layout::BLOCKED:
inconsistentLayout = false;
break;
case Layout::NCDHW:
case Layout::NDHWC:
inconsistentLayout = dims.size() != 5;
break;
case Layout::OIHW:
case Layout::NCHW:
case Layout::NHWC:
@@ -240,7 +246,7 @@ public:
break;
}
if (inconsistentLayout)
THROW_IE_EXCEPTION << "Dims and format are inconsistent.";
THROW_IE_EXCEPTION << "Dims(" << std::to_string(dims.size()) << ") and format(" << std::to_string(l) << ") are inconsistent.";
layout = l;
}

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
@@ -17,20 +16,28 @@
#define IE_THREAD_SEQ 2
#if IE_THREAD == IE_THREAD_TBB
#define TBB_PREVIEW_LOCAL_OBSERVER 1
#include "tbb/task_scheduler_observer.h"
#include "tbb/parallel_for.h"
#include "tbb/task_arena.h"
#include "tbb/parallel_reduce.h"
#include "tbb/blocked_range.h"
#include "tbb/blocked_range2d.h"
#include "tbb/blocked_range3d.h"
inline int parallel_get_max_threads() { return tbb::this_task_arena::max_concurrency(); }
inline int parallel_get_num_threads() { return parallel_get_max_threads(); }
inline int parallel_get_thread_num() { return tbb::this_task_arena::current_thread_index(); }
inline void parallel_set_num_threads(int n) { return; }
inline int parallel_get_env_threads() { return 0; }
#elif IE_THREAD == IE_THREAD_OMP
#include <cstdlib>
#include <string>
#include <omp.h>
/* MSVC still supports omp 2.0 only */
#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
# define collapse(x)
@@ -39,8 +46,20 @@ inline int parallel_get_max_threads() { return omp_get_max_threads(); }
inline int parallel_get_num_threads() { return omp_get_num_threads(); }
inline int parallel_get_thread_num() { return omp_get_thread_num(); }
inline void parallel_set_num_threads(int n) { omp_set_num_threads(n); }
inline int parallel_get_env_threads() {
int env_cores = 0;
if (getenv("OMP_NUM_THREADS") != nullptr) {
try {
env_cores = std::stoi(getenv("OMP_NUM_THREADS"));
} catch (const std::exception&) {
env_cores = 0;
}
}
return env_cores;
}
#elif IE_THREAD == IE_THREAD_SEQ
inline int parallel_get_env_threads() { return 1; }
inline int parallel_get_max_threads() { return 1; }
inline int parallel_get_num_threads() { return 1; }
inline int parallel_get_thread_num() { return 0; }
@@ -75,6 +94,35 @@ void parallel_nt(int nthr, F func) {
#endif
}
template <typename F>
void parallel_nt_static(int nthr, F func) {
#if IE_THREAD == IE_THREAD_SEQ
const bool serial = true;
#else
const bool serial = false;
#endif
if (serial || nthr == 1) {
func(0, 1);
return;
}
if (nthr == 0) nthr = parallel_get_max_threads();
#if IE_THREAD == IE_THREAD_TBB
tbb::parallel_for(0, nthr, [&](int ithr) {
func(ithr, nthr);
}
, tbb::static_partitioner{});
#elif IE_THREAD == IE_THREAD_OMP
# pragma omp parallel num_threads(nthr)
{
func(parallel_get_thread_num(), parallel_get_num_threads());
}
#endif
}
template <typename T0, typename R, typename F>
R parallel_sum(const T0 D0, R &input, F func) {
#if IE_THREAD == IE_THREAD_TBB
@@ -91,10 +139,17 @@ R parallel_sum(const T0 D0, R &input, F func) {
});
#else
R sum = input;
#ifdef _MSC_VER
using T0_IT = typename std::make_signed<T0>::type;
#else
using T0_IT = T0;
#endif
#if IE_THREAD == IE_THREAD_OMP
#pragma omp parallel for reduction(+ : sum) schedule(static)
#endif
for (T0 dim1 = 0; dim1 < D0; dim1++) {
for (T0_IT dim1 = 0; dim1 < D0; dim1++) {
sum += func(dim1);
}
return sum;
@@ -120,17 +175,71 @@ R parallel_sum2d(const T0 D0, const T1 D1, R input, F func) {
});
#else
R sum = input;
#ifdef _MSC_VER
using T0_IT = typename std::make_signed<T0>::type;
using T1_IT = typename std::make_signed<T1>::type;
#else
using T0_IT = T0;
using T1_IT = T1;
#endif
#if IE_THREAD == IE_THREAD_OMP
#pragma omp parallel for collapse(2) reduction(+ : sum) schedule(static)
#endif
for (T0 dim2 = 0; dim2 < D0; dim2++) {
for (T1 dim1 = 0; dim1 < D1; dim1++) {
for (T0_IT dim2 = 0; dim2 < D0; dim2++) {
for (T1_IT dim1 = 0; dim1 < D1; dim1++) {
sum += func(dim2, dim1);
}
}
return sum;
#endif
}
template <typename T0, typename T1, typename T2, typename R, typename F>
R parallel_sum3d(const T0 D0, const T1 D1, const T2 D2, R input, F func) {
#if IE_THREAD == IE_THREAD_TBB
return tbb::parallel_reduce(
tbb::blocked_range3d<T0, T1, T2>(0, D0, 0, D1, 0, D2), input,
[&](const tbb::blocked_range3d<T0, T1, T2>& r, R init)->R {
R sum = init;
for (T0 dim1 = r.pages().begin(); dim1 < r.pages().end(); dim1++) {
for (T1 dim2 = r.rows().begin(); dim2 < r.rows().end(); dim2++) {
for (T2 dim3 = r.cols().begin(); dim3 < r.cols().end(); dim3++) {
sum += func(dim1, dim2, dim3);
}
}
}
return sum;
},
[](R x, R y)->R {
return x + y;
});
#else
R sum = input;
#ifdef _MSC_VER
using T0_IT = typename std::make_signed<T0>::type;
using T1_IT = typename std::make_signed<T1>::type;
using T2_IT = typename std::make_signed<T2>::type;
#else
using T0_IT = T0;
using T1_IT = T1;
using T2_IT = T2;
#endif
#if IE_THREAD == IE_THREAD_OMP
#pragma omp parallel for collapse(3) reduction(+ : sum) schedule(static)
#endif
for (T0_IT dim1 = 0; dim1 < D0; dim1++) {
for (T1_IT dim2 = 0; dim2 < D1; dim2++) {
for (T2_IT dim3 = 0; dim3 < D2; dim3++) {
sum += func(dim1, dim2, dim3);
}
}
}
return sum;
#endif
}
template<typename T>
inline T parallel_it_init(T start) { return start; }

View File

@@ -0,0 +1,365 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
/**
* @brief A header file for the CNNNetworkIterator class
* @file ie_cnn_network_iterator.hpp
*/
#pragma once
#include <details/ie_exception.hpp>
#include <algorithm>
#include <iterator>
#include <vector>
#include <cctype>
#include <string>
#include <map>
namespace InferenceEngine {
/**
* @brief This class represents an object to work with different parameters
*/
class Parameter {
public:
/**
* @brief Default constructor
*/
Parameter() = default;
/**
* @brief The constructor creates a Parameter object with string value
* @param value string value
*/
Parameter(const std::string& value): initialized(true), value(value) {} // NOLINT
/**
* @brief The constructor creates a Parameter object with template value
* @param value template value
*/
template <class T>
Parameter(const T& value): initialized(true), value(std::to_string(value)) {} // NOLINT
/**
* @brief The constructor creates a Parameter object with a vector of template values
* @param values vector of template values
*/
template <class T>
Parameter(const std::vector<T>& values): initialized(true) { // NOLINT
for (const auto& val : values) {
if (!value.empty())
value += ",";
value += std::to_string(val);
}
}
/**
* @brief The cast to string object
* Throws exception if parameter was not found.
* @return string value
*/
operator std::string() const { // NOLINT
return asString();
}
/**
* @brief Returns a string value for the given parameter or returns the default one
* @param def Default value of the parameter if not found
* @return A string value
*/
std::string asString(std::string def) const {
if (!initialized) {
return def;
}
return value;
}
/**
* @brief Returns a string value for the given parameter.
* Throws exception if parameter was not found.
* @return A string value
*/
std::string asString() const {
if (!initialized) {
THROW_IE_EXCEPTION << "Parameter was not initialized!";
}
return value;
}
/**
* @brief Gets float value for the given parameter
* @param def - default value of the parameter if not found
* @return float value
*/
float asFloat(float def) const {
std::string val = asString(std::to_string(def));
try {
return std::stof(val);
} catch (...) {
THROW_IE_EXCEPTION << "Value " << val << " cannot be casted to float.";
}
}
/**
* @brief Returns a float value for the given layer parameter
* @return A float value for the specified parameter
*/
float asFloat() const {
std::string val = asString();
try {
return std::stof(val);
} catch (...) {
THROW_IE_EXCEPTION << "Value " << val << " cannot be casted to float.";
}
}
/**
* @brief Returns a vector of float values for the given parameter or returns the default value
* @param def Default value of the parameter if not found
* @return vector of float values
*/
std::vector<float> asFloats(std::vector<float> def) const {
std::string vals = asString("");
std::vector<float> result;
std::istringstream stream(vals);
std::string str;
if (vals.empty())
return def;
while (getline(stream, str, ',')) {
try {
result.push_back(std::stof(str));
} catch (...) {
THROW_IE_EXCEPTION << "Value " << vals << " cannot be casted to floats.";
}
}
return result;
}
/**
* @brief Returns a vector of float values for the given parameter
* @return vector of float values
*/
std::vector<float> asFloats() const {
std::string vals = asString();
std::vector<float> result;
std::istringstream stream(vals);
std::string str;
while (getline(stream, str, ',')) {
try {
result.push_back(std::stof(str));
} catch (...) {
THROW_IE_EXCEPTION << "Value " << vals << " cannot be casted to floats.";
}
}
return result;
}
/**
* @brief Returns an integer value for the given parameter or returns the default value
* @param def Default value of the parameter if not found
* @return An int value for the specified parameter
*/
int asInt(int def) const {
std::string val = asString(std::to_string(def));
try {
return std::stoi(val);
} catch (...) {
THROW_IE_EXCEPTION << "Value " << val << " cannot be casted to int.";
}
}
/**
* @brief Returns an integer value for the given parameter
* @return An int value for the specified parameter
*/
int asInt() const {
std::string val = asString();
try {
return std::stoi(val);
} catch (...) {
THROW_IE_EXCEPTION << "Value " << val << " cannot be casted to int.";
}
}
/**
* @brief Returns a vector of int values for the given parameter or returns the default value
* @param def Default value of the parameter if not found
* @return vector of int values
*/
std::vector<int> asInts(std::vector<int> def) const {
std::string vals = asString("");
std::vector<int> result;
std::istringstream stream(vals);
std::string str;
if (vals.empty())
return def;
while (getline(stream, str, ',')) {
try {
result.push_back(std::stoi(str));
} catch (...) {
THROW_IE_EXCEPTION << "Value " << vals << " cannot be casted to ints.";
}
}
return result;
}
/**
* @brief Returns a vector of int values for the given parameter
* @return vector of int values
*/
std::vector<int> asInts() const {
std::string vals = asString();
std::vector<int> result;
std::istringstream stream(vals);
std::string str;
while (getline(stream, str, ',')) {
try {
result.push_back(std::stoi(str));
} catch (...) {
THROW_IE_EXCEPTION << "Value " << vals << " cannot be casted to ints.";
}
}
return result;
}
/**
* @brief Returns an unsigned integer value for the given parameter or returns the default value
* @param def Default value of the parameter if not found
* @return An unsigned integer value for the specified parameter
*/
unsigned int asUInt(unsigned int def) const {
std::string val = asString(std::to_string(def));
std::string message = "Value " + val + " cannot be casted to unsigned int.";
try {
int value = std::stoi(val);
if (value < 0) {
THROW_IE_EXCEPTION << message;
}
return static_cast<unsigned int>(value);
} catch (...) {
THROW_IE_EXCEPTION << message;
}
}
/**
* @brief Returns an unsigned integer value for the given parameter
* @return An unsigned integer value for the specified parameter
*/
unsigned int asUInt() const {
std::string val = asString();
std::string message = "Value " + val + " cannot be casted to unsigned int.";
try {
int value = std::stoi(val);
if (value < 0) {
THROW_IE_EXCEPTION << message;
}
return static_cast<unsigned int>(value);
} catch (...) {
THROW_IE_EXCEPTION << message;
}
}
/**
* @brief Returns a vector of unsigned int values for the given parameter or returns the default value
* @param def Default value of the parameter if not found
* @return vector of unsigned int values
*/
std::vector<unsigned int> asUInts(std::vector<unsigned int> def) const {
std::string vals = asString("");
std::vector<unsigned int> result;
std::istringstream stream(vals);
std::string str;
std::string message = "Value " + vals + " cannot be casted to unsigned ints.";
if (vals.empty())
return def;
while (getline(stream, str, ',')) {
try {
int value = std::stoi(str);
if (value < 0) {
THROW_IE_EXCEPTION << message;
}
result.push_back(static_cast<unsigned int>(value));
} catch (...) {
THROW_IE_EXCEPTION << message;
}
}
return result;
}
/**
* @brief Returns a vector of unsigned int values for the given parameter
* @return vector of unsigned int values
*/
std::vector<unsigned int> asUInts() const {
std::string vals = asString();
std::vector<unsigned int> result;
std::istringstream stream(vals);
std::string str;
std::string message = "Value " + vals + " cannot be casted to unsigned ints.";
while (getline(stream, str, ',')) {
try {
int value = std::stoi(str);
if (value < 0) {
THROW_IE_EXCEPTION << message;
}
result.push_back(static_cast<unsigned int>(value));
} catch (...) {
THROW_IE_EXCEPTION << message;
}
}
return result;
}
/**
* @brief Returns an boolean value for the given parameter.
* The valid values are (true, false, 1, 0).
* @param def Default value of the parameter if not found
* @return An bool value for the specified parameter
*/
bool asBool(bool def) const {
std::string val = asString(std::to_string(def));
std::string loweredCaseValue;
std::transform(val.begin(), val.end(), std::back_inserter(loweredCaseValue), [](char value) {
return std::tolower(value);
});
bool result = false;
if (!(std::istringstream(loweredCaseValue) >> std::boolalpha >> result)) {
// attempting parse using non alpha bool
return static_cast<bool>(asInt(def));
}
return result;
}
/**
* @brief Returns an boolean value for the given parameter.
* The valid values are (true, false, 1, 0).
* @return An bool value for the specified parameter
*/
bool asBool() const {
std::string val = asString();
std::string loweredCaseValue;
std::transform(val.begin(), val.end(), std::back_inserter(loweredCaseValue), [](char value) {
return std::tolower(value);
});
bool result = false;
if (!(std::istringstream(loweredCaseValue) >> std::boolalpha >> result)) {
// attempting parse using non alpha bool
return static_cast<bool>(asInt());
}
return result;
}
private:
bool initialized;
std::string value;
};
} // namespace InferenceEngine

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
@@ -24,14 +23,19 @@
#if defined(_WIN32)
#ifdef IMPLEMENT_INFERENCE_ENGINE_PLUGIN
#define INFERENCE_PLUGIN_API(type) extern "C" __declspec(dllexport) type
#ifdef IMPLEMENT_INFERENCE_ENGINE_PLUGIN
#define INFERENCE_PLUGIN_API(type) extern "C" __declspec(dllexport) type
#else
#define INFERENCE_PLUGIN_API(type) extern "C" type
#endif
#elif(__GNUC__ >= 4)
#ifdef IMPLEMENT_INFERENCE_ENGINE_PLUGIN
#define INFERENCE_PLUGIN_API(type) extern "C" __attribute__((visibility("default"))) type
#else
#define INFERENCE_PLUGIN_API(type) extern "C" type
#endif
#else
#define INFERENCE_PLUGIN_API(type) extern "C" type
#endif
#else
#define INFERENCE_PLUGIN_API(TYPE) extern "C" TYPE
#define INFERENCE_PLUGIN_API(TYPE) extern "C" TYPE
#endif
namespace InferenceEngine {
@@ -162,7 +166,7 @@ public:
* @param network Network object to query
* @param resp Pointer to the response message that holds a description of an error if any occurred
*/
virtual void QueryNetwork(const ICNNNetwork& network, QueryNetworkResult& res) const noexcept {
virtual void QueryNetwork(const ICNNNetwork& /*network*/, QueryNetworkResult& res) const noexcept {
res.rc = InferenceEngine::NOT_IMPLEMENTED;
}
@@ -172,8 +176,8 @@ public:
* @param config Map of pairs: (config parameter name, config parameter value)
* @param resp Pointer to the response message that holds a description of an error if any occurred
*/
virtual void QueryNetwork(const ICNNNetwork& network,
const std::map<std::string, std::string> &config, QueryNetworkResult& res) const noexcept {
virtual void QueryNetwork(const ICNNNetwork& /*network*/,
const std::map<std::string, std::string> &/*config*/, QueryNetworkResult& res) const noexcept {
res.rc = InferenceEngine::NOT_IMPLEMENTED;
}
};

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
@@ -38,13 +37,34 @@ namespace PluginConfigParams {
DECLARE_CONFIG_VALUE(YES);
DECLARE_CONFIG_VALUE(NO);
/**
* @brief Limit #threads that are used by Inference Engine for inference on the CPU.
*/
DECLARE_CONFIG_KEY(CPU_THREADS_NUM);
/**
* @brief The name for setting CPU affinity per thread option.
* It is passed to IInferencePlugin::SetConfig(), this option should be used with values:
* PluginConfigParams::YES or PluginConfigParams::NO
* Ignored, if the OpenVINO compiled with OpenMP threading and any affinity-related OpenMP's
* environment variable is set
*/
DECLARE_CONFIG_KEY(CPU_BIND_THREAD);
/**
* @brief Optimize CPU execution to maximize throughput.
* It is passed to IInferencePlugin::SetConfig(), this option should be used with values:
* - KEY_CPU_THROUGHPUT_NUMA creates as many streams as needed to accomodate NUMA and avoid associated penalties
* - KEY_CPU_THROUGHPUT_AUTO creates bare minimum of streams to improve the performance,
* this is the most portable option if you have no insights into how many cores you target machine will have
* (and what is the optimal number of streams)
* - finally, specifying the positive integer value creates the requested number of streams
*/
DECLARE_CONFIG_VALUE(CPU_THROUGHPUT_NUMA);
DECLARE_CONFIG_VALUE(CPU_THROUGHPUT_AUTO);
DECLARE_CONFIG_KEY(CPU_THROUGHPUT_STREAMS);
/**
* @brief The name for setting performance counters option.
* It is passed to IInferencePlugin::SetConfig(), this option should be used with values:
@@ -125,10 +145,21 @@ DECLARE_CONFIG_KEY(DEVICE_ID);
/**
* @brief the key for enabling exclusive mode for async requests of different executable networks and the same plugin.
* Sometimes it's necessary to avoid oversubscription requests that are sharing the same device in parallel.
* E.g. There 2 task executors for CPU device: one - in FPGA, another - in MKLDNN. Parallel execution both of them leads to
* not optimal CPU usage. More efficient to run the corresponding tasks one by one via single executor.
* E.g. There 2 task executors for CPU device: one - in the Hetero plugin, another - in pure CPU plugin.
* Parallel execution both of them might lead to oversubscription and not optimal CPU usage. More efficient
* to run the corresponding tasks one by one via single executor.
* By default, the option is set to YES for hetero cases, and to NO for conventional (single-plugin) cases
* Notice that setting YES disables the CPU streams feature (see another config key in this file)
*/
DECLARE_CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS);
/**
* @brief This key enables dumping of the internal primitive graph.
* Should be passed into LoadNetwork method to enable dumping of internal graph of primitives and
* corresponding configuration information. Value is a name of output dot file without extension.
* Files <dot_file_name>_init.dot and <dot_file_name>_perf.dot will be produced.
*/
DECLARE_CONFIG_KEY(DUMP_EXEC_GRAPH_AS_DOT);
} // namespace PluginConfigParams
} // namespace InferenceEngine

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
@@ -24,24 +23,24 @@ public:
* @brief A constructor
* @param pp Vector of paths to plugin directories
*/
explicit PluginDispatcher(const std::vector<std::string> &pp) : pluginDirs(pp) {}
explicit PluginDispatcher(const std::vector<file_name_t> &pp) : pluginDirs(pp) {}
/**
* @brief Loads a plugin from plugin directories
* @param name Plugin name
* @return A pointer to the loaded plugin
*/
virtual InferencePlugin getPluginByName(const std::string& name) const {
virtual InferencePlugin getPluginByName(const file_name_t& name) const {
std::stringstream err;
for (auto &pluginPath : pluginDirs) {
try {
return InferencePlugin(InferenceEnginePluginPtr(make_plugin_name(pluginPath, name)));
}
catch (const std::exception &ex) {
err << "cannot load plugin: " << name << " from " << pluginPath << ": " << ex.what() << ", skipping\n";
err << "cannot load plugin: " << fileNameToString(name) << " from " << fileNameToString(pluginPath) << ": " << ex.what() << ", skipping\n";
}
}
THROW_IE_EXCEPTION << "Plugin " << name << " cannot be loaded: " << err.str() << "\n";
THROW_IE_EXCEPTION << "Plugin " << fileNameToString(name) << " cannot be loaded: " << err.str() << "\n";
}
/**
@@ -77,7 +76,7 @@ public:
std::stringstream err;
for (std::string& name : result.names) {
try {
return getPluginByName(name);
return getPluginByName(stringToFileName(name));
}
catch (const std::exception &ex) {
err << "Tried load plugin : " << name << ", error: " << ex.what() << "\n";
@@ -93,17 +92,26 @@ protected:
* @param input Plugin name
* @return The path to the plugin
*/
std::string make_plugin_name(const std::string &path, const std::string &input) const {
std::string separator =
file_name_t make_plugin_name(const file_name_t &path, const file_name_t &input) const {
file_name_t separator =
#if defined _WIN32 || defined __CYGWIN__
"\\";
# if defined UNICODE
L"\\";
# else
"\\";
# endif
#else
"/";
"/";
#endif
if (path.empty())
separator = "";
separator = file_name_t();
#ifdef _WIN32
return path + separator + input + ".dll";
return path + separator + input +
# if defined UNICODE
L".dll";
# else
".dll";
# endif
#elif __APPLE__
return path + separator + "lib" + input + ".dylib";
#else
@@ -111,7 +119,8 @@ protected:
#endif
}
private:
std::vector<std::string> pluginDirs;
std::vector<file_name_t> pluginDirs;
};
} // namespace InferenceEngine

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
@@ -80,20 +79,44 @@ public:
return Precision(8 * sizeof(T), typeName == nullptr ? typeid(T).name() : typeName);
}
/** @brief checks whether given storage class T can be used for store objects of current precision */
template <class T>
bool hasStorageType(const char * typeName = nullptr) const noexcept {
if (sizeof(T) != size()) {
return false;
}
#define CASE(x, y) case x: return std::is_same<T, y>()
#define CASE2(x, y1, y2) case x: return std::is_same<T, y1>() || std::is_same<T, y2>()
switch (precisionInfo.value) {
CASE(FP32, float);
CASE2(FP16, int16_t, uint16_t);
CASE(I16, int16_t);
CASE(I32, int32_t);
CASE(U16, uint16_t);
CASE(U8, uint8_t);
CASE(I8, int8_t);
CASE2(Q78, int16_t, uint16_t);
default : return areSameStrings(name(), typeName == nullptr ? typeid(T).name() : typeName);
#undef CASE
#undef CASE2
}
}
/** @brief Equality operator with Precision object */
bool operator == (const Precision & p) const noexcept {
bool operator == (const Precision & p) const noexcept {
return precisionInfo.value == p &&
precisionInfo.bitsSize == p.precisionInfo.bitsSize &&
areSameStrings(precisionInfo.name, p.precisionInfo.name);
}
/** @brief Equality operator with ePrecision enum value */
bool operator == (const ePrecision p) const noexcept {
bool operator == (const ePrecision p) const noexcept {
return precisionInfo.value == p;
}
/** @brief Inequality operator with ePrecision enum value */
bool operator != (const ePrecision p) const noexcept {
bool operator != (const ePrecision p) const noexcept {
return precisionInfo.value != p;
}
@@ -103,7 +126,7 @@ public:
return *this;
}
/** @brief Cust operator to a bool */
/** @brief Cast operator to a bool */
explicit operator bool() const noexcept {
return precisionInfo.value != UNSPECIFIED;
}
@@ -113,7 +136,7 @@ public:
return precisionInfo.value == UNSPECIFIED;
}
/** @brief Cust operator to a ePrecision */
/** @brief Cast operator to a ePrecision */
operator Precision::ePrecision () const noexcept {
return precisionInfo.value;
}
@@ -162,7 +185,7 @@ public:
template<Precision::ePrecision precision>
static PrecisionInfo makePrecisionInfo(const char * name);
static bool areSameStrings(const char *l, const char *r) {
static bool areSameStrings(const char *l, const char *r) noexcept {
if (l == r)
return true;
@@ -208,7 +231,7 @@ struct PrecisionTrait<Precision::FP32> {
template<>
struct PrecisionTrait<Precision::FP16> {
using value_type = uint16_t;
using value_type = int16_t;
};
template<>
struct PrecisionTrait<Precision::Q78> {

View File

@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//

Some files were not shown because too many files have changed in this diff Show More