nGraph code style upgrade to clang-format-9 (#4721)
* ngraph clang-format upgrade to 9 * Reformatted files * Remove comma at the end of test data vector
This commit is contained in:
parent
b83b5115a5
commit
5f098e1079
6
.github/workflows/code_style.yml
vendored
6
.github/workflows/code_style.yml
vendored
@ -3,14 +3,14 @@ on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
nGraph:
|
||||
runs-on: ubuntu-18.04
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Install clang-format-3.9
|
||||
run: sudo apt --assume-yes install clang-format-3.9
|
||||
- name: Install clang-format-9
|
||||
run: sudo apt --assume-yes install clang-format-9
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
|
@ -50,3 +50,5 @@ IncludeCategories:
|
||||
- Regex: '^<.*'
|
||||
Priority: 2
|
||||
SortIncludes: true
|
||||
|
||||
FixNamespaceComments: false
|
||||
|
@ -27,7 +27,7 @@ set(DIRECTORIES_OF_INTEREST
|
||||
python/pyngraph
|
||||
)
|
||||
|
||||
set(CLANG_FORMAT_FILENAME clang-format-3.9)
|
||||
set(CLANG_FORMAT_FILENAME clang-format-9)
|
||||
find_program(CLANG_FORMAT ${CLANG_FORMAT_FILENAME} PATHS ENV PATH)
|
||||
|
||||
if (CLANG_FORMAT)
|
||||
|
@ -14,7 +14,7 @@
|
||||
# limitations under the License.
|
||||
# ******************************************************************************
|
||||
|
||||
set(CLANG_FORMAT_FILENAME clang-format-3.9)
|
||||
set(CLANG_FORMAT_FILENAME clang-format-9)
|
||||
find_program(CLANG_FORMAT ${CLANG_FORMAT_FILENAME} PATHS ENV PATH)
|
||||
|
||||
macro(STYLE_CHECK_FILE PATH)
|
||||
|
@ -84,6 +84,7 @@ namespace ngraph
|
||||
}
|
||||
const AT& get() override { return m_ref; }
|
||||
void set(const AT& value) override { m_ref = value; }
|
||||
|
||||
protected:
|
||||
AT& m_ref;
|
||||
};
|
||||
@ -158,6 +159,7 @@ namespace ngraph
|
||||
}
|
||||
|
||||
operator AT&() { return m_ref; }
|
||||
|
||||
protected:
|
||||
AT& m_ref;
|
||||
VAT m_buffer;
|
||||
@ -185,6 +187,7 @@ namespace ngraph
|
||||
const std::string& get() override { return as_string(m_ref); }
|
||||
void set(const std::string& value) override { m_ref = as_enum<AT>(value); }
|
||||
operator AT&() { return m_ref; }
|
||||
|
||||
protected:
|
||||
AT& m_ref;
|
||||
};
|
||||
|
@ -61,6 +61,7 @@ namespace ngraph
|
||||
static constexpr DiscreteTypeInfo type_info{"AttributeAdapter<AxisSet>", 0};
|
||||
const DiscreteTypeInfo& get_type_info() const override { return type_info; }
|
||||
operator AxisSet&() { return m_ref; }
|
||||
|
||||
protected:
|
||||
AxisSet& m_ref;
|
||||
std::vector<int64_t> m_buffer;
|
||||
|
@ -82,6 +82,7 @@ namespace ngraph
|
||||
///
|
||||
/// See Node::set_input_is_relevant_to_value for more details.
|
||||
bool get_is_relevant_to_value() const { return m_is_relevant_to_value; }
|
||||
|
||||
protected:
|
||||
/// \return the tensor for the connected output
|
||||
std::shared_ptr<const Tensor> get_tensor_ptr() const;
|
||||
|
@ -70,6 +70,7 @@ namespace ngraph
|
||||
void set_univeral_handler(const op_handler& handler) { m_universal_handler = handler; }
|
||||
/// \brief If set, handles all ops not in the handlers
|
||||
void set_default_handler(const op_handler& handler) { m_default_handler = handler; }
|
||||
|
||||
protected:
|
||||
op_handler get_handler(Node* node)
|
||||
{
|
||||
@ -106,6 +107,7 @@ namespace ngraph
|
||||
virtual ~Inst() {}
|
||||
virtual void handle(Evaluator& evaluator, InstStack& inst_stack, Node* node) = 0;
|
||||
Node* get_node() { return m_node; }
|
||||
|
||||
protected:
|
||||
Node* m_node;
|
||||
};
|
||||
|
@ -50,6 +50,7 @@ namespace ngraph
|
||||
}
|
||||
constexpr const char* get_ptr(size_t offset) const { return &m_string[offset]; }
|
||||
constexpr size_t size() const { return m_size; }
|
||||
|
||||
private:
|
||||
const char* m_string;
|
||||
size_t m_size;
|
||||
@ -57,8 +58,9 @@ namespace ngraph
|
||||
|
||||
constexpr const char* find_last(ConstString s, size_t offset, char ch)
|
||||
{
|
||||
return offset == 0 ? s.get_ptr(0) : (s[offset] == ch ? s.get_ptr(offset + 1)
|
||||
: find_last(s, offset - 1, ch));
|
||||
return offset == 0
|
||||
? s.get_ptr(0)
|
||||
: (s[offset] == ch ? s.get_ptr(offset + 1) : find_last(s, offset - 1, ch));
|
||||
}
|
||||
|
||||
constexpr const char* find_last(ConstString s, char ch)
|
||||
@ -89,6 +91,7 @@ namespace ngraph
|
||||
~LogHelper();
|
||||
|
||||
std::ostream& stream() { return m_stream; }
|
||||
|
||||
private:
|
||||
std::function<void(const std::string&)> m_handler_func;
|
||||
std::stringstream m_stream;
|
||||
|
@ -107,20 +107,21 @@ namespace ngraph
|
||||
/// Alias useful for cloning
|
||||
using NodeMap = std::unordered_map<ngraph::Node*, std::shared_ptr<ngraph::Node>>;
|
||||
|
||||
/// \brief Used in evaluator switch statement so that the case type and evaluate call
|
||||
/// are guaranteed to have the types match.
|
||||
///
|
||||
/// Use this in an evaluate_*() function like this
|
||||
/// switch (arg0->get_element_type())
|
||||
/// {
|
||||
/// TYPE_CASE(i8)(arg0, arg1, out, broadcast_spec); break;
|
||||
/// TYPE_CASE(i16)(arg0, arg1, out, broadcast_spec); break;
|
||||
///
|
||||
/// Each TYPE_CASE statement expands like this:
|
||||
/// case element::Type_t::a: rc = evaluate<element::Type_t::a>(arg0, arg1, out, broadcast_spec)
|
||||
///
|
||||
/// \note Don't forget to put a break after each statement or it will fall through and generate
|
||||
/// a runtime error.
|
||||
/// \brief Used in evaluator switch statement so that the case type and evaluate call
|
||||
/// are guaranteed to have the types match.
|
||||
///
|
||||
/// Use this in an evaluate_*() function like this
|
||||
/// switch (arg0->get_element_type())
|
||||
/// {
|
||||
/// TYPE_CASE(i8)(arg0, arg1, out, broadcast_spec); break;
|
||||
/// TYPE_CASE(i16)(arg0, arg1, out, broadcast_spec); break;
|
||||
///
|
||||
/// Each TYPE_CASE statement expands like this:
|
||||
/// case element::Type_t::a: rc = evaluate<element::Type_t::a>(arg0, arg1, out,
|
||||
/// broadcast_spec)
|
||||
///
|
||||
/// \note Don't forget to put a break after each statement or it will fall through and generate
|
||||
/// a runtime error.
|
||||
|
||||
#define TYPE_CASE(a) \
|
||||
case element::Type_t::a: rc = evaluate<element::Type_t::a>
|
||||
@ -649,6 +650,7 @@ namespace ngraph
|
||||
bool visit_attributes(AttributeVisitor& visitor) override;
|
||||
static constexpr DiscreteTypeInfo type_info{"AttributeAdapter<std::shared_ptr<Node>>", 0};
|
||||
const DiscreteTypeInfo& get_type_info() const override { return type_info; }
|
||||
|
||||
protected:
|
||||
std::shared_ptr<Node>& m_ref;
|
||||
};
|
||||
@ -663,6 +665,7 @@ namespace ngraph
|
||||
|
||||
static constexpr DiscreteTypeInfo type_info{"AttributeAdapter<NodeVector>", 0};
|
||||
const DiscreteTypeInfo& get_type_info() const override { return type_info; }
|
||||
|
||||
protected:
|
||||
NodeVector& m_ref;
|
||||
};
|
||||
|
@ -100,6 +100,7 @@ namespace ngraph
|
||||
/// \return The pad value.
|
||||
float get_pad_value() const { return m_pad_value; }
|
||||
void set_pad_value(float pad_value) { m_pad_value = pad_value; }
|
||||
|
||||
protected:
|
||||
BinaryConvolutionMode mode_from_string(const std::string& mode) const;
|
||||
Strides m_strides;
|
||||
|
@ -45,6 +45,7 @@ namespace ngraph
|
||||
clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
|
||||
bool get_ctc_merge_repeated() const { return m_ctc_merge_repeated; }
|
||||
|
||||
private:
|
||||
bool m_ctc_merge_repeated;
|
||||
};
|
||||
|
@ -74,6 +74,7 @@ namespace ngraph
|
||||
}
|
||||
bool get_ctc_merge_repeated() const { return ctc_merge_repeated_; }
|
||||
bool get_unique() const { return unique_; }
|
||||
|
||||
private:
|
||||
bool preprocess_collapse_repeated_;
|
||||
bool ctc_merge_repeated_;
|
||||
|
@ -106,6 +106,7 @@ namespace ngraph
|
||||
virtual std::shared_ptr<Node> get_default_value() const override;
|
||||
bool is_exclusive() const { return m_exclusive; }
|
||||
bool is_reverse() const { return m_reverse; }
|
||||
|
||||
private:
|
||||
bool m_exclusive;
|
||||
bool m_reverse;
|
||||
|
@ -91,6 +91,7 @@ namespace ngraph
|
||||
int64_t get_spatial_bins_y() const { return m_spatial_bins_y; }
|
||||
float get_trans_std() const { return m_trans_std; }
|
||||
int64_t get_part_size() const { return m_part_size; }
|
||||
|
||||
private:
|
||||
int64_t m_output_dim;
|
||||
float m_spatial_scale;
|
||||
|
@ -48,6 +48,7 @@ namespace ngraph
|
||||
clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
|
||||
double get_alpha() const { return m_alpha; }
|
||||
|
||||
private:
|
||||
double m_alpha;
|
||||
};
|
||||
|
@ -79,6 +79,7 @@ namespace ngraph
|
||||
clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
|
||||
virtual bool visit_attributes(AttributeVisitor& visitor) override { return true; }
|
||||
|
||||
private:
|
||||
static constexpr int EMB_TABLE = 0;
|
||||
static constexpr int INDICES = 1;
|
||||
|
@ -80,6 +80,7 @@ namespace ngraph
|
||||
clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
/// \brief Returns attributes of the operation ExperimentalDetectronDetectionOutput
|
||||
const Attributes& get_attrs() const { return m_attrs; }
|
||||
|
||||
private:
|
||||
Attributes m_attrs;
|
||||
};
|
||||
|
@ -71,6 +71,7 @@ namespace ngraph
|
||||
clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
|
||||
const Attributes& get_attrs() const { return m_attrs; }
|
||||
|
||||
private:
|
||||
Attributes m_attrs;
|
||||
};
|
||||
|
@ -72,6 +72,7 @@ namespace ngraph
|
||||
clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
/// \brief Returns attributes of this operation.
|
||||
const Attributes& get_attrs() const { return m_attrs; }
|
||||
|
||||
private:
|
||||
Attributes m_attrs;
|
||||
|
||||
|
@ -68,6 +68,7 @@ namespace ngraph
|
||||
clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
/// \brief Returns attributes of the operation.
|
||||
const Attributes& get_attrs() const { return m_attrs; }
|
||||
|
||||
private:
|
||||
Attributes m_attrs;
|
||||
};
|
||||
|
@ -53,6 +53,7 @@ namespace ngraph
|
||||
clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
|
||||
size_t get_max_rois() const { return m_max_rois; }
|
||||
|
||||
private:
|
||||
size_t m_max_rois;
|
||||
};
|
||||
|
@ -59,6 +59,7 @@ namespace ngraph
|
||||
void set_rates(const Shape& rates) { m_patch_selection_rates = rates; }
|
||||
const PadType& get_auto_pad() const { return m_padding; }
|
||||
void set_auto_pad(PadType& padding) { m_padding = padding; }
|
||||
|
||||
private:
|
||||
Shape m_patch_sizes;
|
||||
Strides m_patch_movement_strides;
|
||||
|
@ -47,6 +47,7 @@ namespace ngraph
|
||||
clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
|
||||
int64_t get_axis() const { return m_axis; }
|
||||
|
||||
private:
|
||||
int64_t m_axis;
|
||||
};
|
||||
|
@ -48,6 +48,7 @@ namespace ngraph
|
||||
clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
|
||||
size_t get_batch_dims() const { return m_batch_dims; }
|
||||
|
||||
private:
|
||||
size_t m_batch_dims;
|
||||
};
|
||||
|
@ -153,6 +153,7 @@ namespace ngraph
|
||||
clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
|
||||
bool get_linear_before_reset() const { return m_linear_before_reset; }
|
||||
|
||||
private:
|
||||
/// brief Add and initialize bias input to all zeros.
|
||||
void add_default_bias_input();
|
||||
|
@ -58,6 +58,7 @@ namespace ngraph
|
||||
bool visit_attributes(AttributeVisitor& visitor) override;
|
||||
bool get_linear_before_reset() const { return m_linear_before_reset; }
|
||||
op::RecurrentSequenceDirection get_direction() const { return m_direction; }
|
||||
|
||||
protected:
|
||||
op::RecurrentSequenceDirection m_direction;
|
||||
bool m_linear_before_reset;
|
||||
|
@ -82,6 +82,7 @@ namespace ngraph
|
||||
clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
|
||||
const InterpolateAttrs& get_attrs() const { return m_attrs; }
|
||||
|
||||
private:
|
||||
InterpolateAttrs m_attrs;
|
||||
};
|
||||
@ -229,6 +230,7 @@ namespace ngraph
|
||||
const HostTensorVector& inputs) const override;
|
||||
|
||||
const InterpolateAttrs& get_attrs() const { return m_attrs; }
|
||||
|
||||
protected:
|
||||
/// \return The interpolation axes.
|
||||
std::vector<int64_t> get_axes() const;
|
||||
@ -282,8 +284,8 @@ namespace ngraph
|
||||
};
|
||||
} // namespace v4
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
using v0::InterpolateAttrs;
|
||||
using v0::Interpolate;
|
||||
using v0::InterpolateAttrs;
|
||||
NGRAPH_SUPPRESS_DEPRECATED_END
|
||||
} // namespace op
|
||||
|
||||
|
@ -48,6 +48,7 @@ namespace ngraph
|
||||
|
||||
int64_t get_axis() const { return m_axis; }
|
||||
void set_axis(const int64_t axis) { m_axis = axis; }
|
||||
|
||||
private:
|
||||
int64_t m_axis = 1;
|
||||
};
|
||||
|
@ -219,6 +219,7 @@ namespace ngraph
|
||||
|
||||
bool get_input_forget() const { return m_input_forget; }
|
||||
LSTMWeightsFormat get_weights_format() const { return m_weights_format; }
|
||||
|
||||
private:
|
||||
///
|
||||
/// \brief Creates the default bias input initialized with zeros.
|
||||
|
@ -107,6 +107,7 @@ namespace ngraph
|
||||
std::int64_t get_hidden_size() const { return m_hidden_size; }
|
||||
bool get_input_forget() const { return m_input_forget; }
|
||||
LSTMWeightsFormat get_weights_format() const { return m_weights_format; }
|
||||
|
||||
private:
|
||||
///
|
||||
/// \brief Gets the masked value according to sequence lenght in a batch.
|
||||
@ -201,6 +202,7 @@ namespace ngraph
|
||||
clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
|
||||
direction get_direction() const { return m_direction; }
|
||||
|
||||
private:
|
||||
direction m_direction;
|
||||
};
|
||||
|
@ -55,6 +55,7 @@ namespace ngraph
|
||||
bool get_transpose_b() const { return m_transpose_b; }
|
||||
void set_transpose_a(bool transpose_a) { m_transpose_a = transpose_a; }
|
||||
void set_transpose_b(bool transpose_b) { m_transpose_b = transpose_b; }
|
||||
|
||||
private:
|
||||
bool m_transpose_a;
|
||||
bool m_transpose_b;
|
||||
|
@ -52,6 +52,7 @@ namespace ngraph
|
||||
clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
|
||||
const AutoBroadcastSpec& get_auto_broadcast() const { return m_auto_broadcast; }
|
||||
|
||||
private:
|
||||
AutoBroadcastSpec m_auto_broadcast;
|
||||
};
|
||||
|
@ -79,6 +79,7 @@ namespace ngraph
|
||||
bool get_normalize_variance() const { return m_normalize_variance; }
|
||||
AxisSet get_reduction_axes() const { return m_reduction_axes; }
|
||||
void set_reduction_axes(AxisSet axes) { m_reduction_axes = axes; }
|
||||
|
||||
private:
|
||||
double m_eps = 1e-9;
|
||||
bool m_across_channels;
|
||||
@ -137,6 +138,7 @@ namespace ngraph
|
||||
float get_eps() const { return m_eps; }
|
||||
bool get_normalize_variance() const { return m_normalize_variance; }
|
||||
MVNEpsMode get_eps_mode() const { return m_eps_mode; }
|
||||
|
||||
private:
|
||||
bool m_normalize_variance = true;
|
||||
float m_eps = (float)1e-6;
|
||||
|
@ -58,6 +58,7 @@ namespace ngraph
|
||||
/// \return The index of the one-hot axis.
|
||||
int64_t get_axis() const { return m_axis; }
|
||||
void set_axis(int64_t axis) { m_axis = axis; }
|
||||
|
||||
protected:
|
||||
int64_t m_axis;
|
||||
};
|
||||
|
@ -85,6 +85,7 @@ namespace ngraph
|
||||
|
||||
static constexpr DiscreteTypeInfo type_info{"AttributeAdapter<ParameterVector>", 0};
|
||||
const DiscreteTypeInfo& get_type_info() const override { return type_info; }
|
||||
|
||||
protected:
|
||||
ParameterVector& m_ref;
|
||||
};
|
||||
|
@ -63,6 +63,7 @@ namespace ngraph
|
||||
int get_spatial_bins_x() const { return m_spatial_bins_x; }
|
||||
int get_spatial_bins_y() const { return m_spatial_bins_y; }
|
||||
const std::string& get_mode() const { return m_mode; }
|
||||
|
||||
private:
|
||||
size_t m_output_dim;
|
||||
size_t m_group_size;
|
||||
|
@ -69,6 +69,7 @@ namespace ngraph
|
||||
const std::vector<float>& get_anchors() const { return m_anchors; }
|
||||
int get_axis() const { return m_axis; }
|
||||
int get_end_axis() const { return m_end_axis; }
|
||||
|
||||
private:
|
||||
size_t m_num_coords;
|
||||
size_t m_num_classes;
|
||||
|
@ -47,6 +47,7 @@ namespace ngraph
|
||||
clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
|
||||
Strides get_strides() const { return m_strides; }
|
||||
|
||||
private:
|
||||
Strides m_strides;
|
||||
};
|
||||
|
@ -70,6 +70,7 @@ namespace ngraph
|
||||
|
||||
static constexpr DiscreteTypeInfo type_info{"AttributeAdapter<ResultVector>", 0};
|
||||
const DiscreteTypeInfo& get_type_info() const override { return type_info; }
|
||||
|
||||
protected:
|
||||
ResultVector& m_ref;
|
||||
};
|
||||
|
@ -54,6 +54,7 @@ namespace ngraph
|
||||
size_t get_sequence_axis() const { return m_normalized_seq_axis; }
|
||||
int64_t get_origin_sequence_axis() const { return m_seq_axis; }
|
||||
void set_sequence_axis(int64_t sequence_axis) { m_seq_axis = sequence_axis; }
|
||||
|
||||
private:
|
||||
int64_t m_batch_axis;
|
||||
int64_t m_seq_axis = 1;
|
||||
|
@ -58,6 +58,7 @@ namespace ngraph
|
||||
bool visit_attributes(AttributeVisitor& visitor) override;
|
||||
|
||||
op::RecurrentSequenceDirection get_direction() const { return m_direction; }
|
||||
|
||||
protected:
|
||||
op::RecurrentSequenceDirection m_direction;
|
||||
};
|
||||
|
@ -61,6 +61,7 @@ namespace ngraph
|
||||
const HostTensorVector& inputs) const override;
|
||||
|
||||
RoundMode get_mode() const { return m_mode; }
|
||||
|
||||
private:
|
||||
RoundMode m_mode;
|
||||
};
|
||||
|
@ -93,6 +93,7 @@ namespace ngraph
|
||||
|
||||
void set_alpha(float alpha) { m_alpha = alpha; }
|
||||
void set_beta(float beta) { m_beta = beta; }
|
||||
|
||||
private:
|
||||
/// \brief Activation function wrapper.
|
||||
ActivationFunctionType m_function;
|
||||
|
@ -46,6 +46,7 @@ namespace ngraph
|
||||
/// For each such axis, output dimension is equal to 1.
|
||||
bool get_keep_dims() const { return m_keep_dims; }
|
||||
void set_keep_dims(bool keep_dims) { m_keep_dims = keep_dims; }
|
||||
|
||||
private:
|
||||
bool m_keep_dims = false;
|
||||
};
|
||||
|
@ -368,6 +368,7 @@ namespace ngraph
|
||||
|
||||
static constexpr DiscreteTypeInfo type_info{"AttributeAdapter<op::AutoBroadcastSpec>", 0};
|
||||
const DiscreteTypeInfo& get_type_info() const override { return type_info; }
|
||||
|
||||
protected:
|
||||
op::AutoBroadcastSpec& m_ref;
|
||||
};
|
||||
@ -419,6 +420,7 @@ namespace ngraph
|
||||
|
||||
static constexpr DiscreteTypeInfo type_info{"AttributeAdapter<op::BroadcastModeSpec>", 0};
|
||||
const DiscreteTypeInfo& get_type_info() const override { return type_info; }
|
||||
|
||||
protected:
|
||||
op::BroadcastModeSpec& m_ref;
|
||||
};
|
||||
|
@ -51,6 +51,7 @@ namespace ngraph
|
||||
// Post-validation hook that will be invoked after op decomposition
|
||||
// in validate_and_infer_types().
|
||||
virtual void post_validate_and_infer_types() {}
|
||||
|
||||
protected:
|
||||
FusedOp();
|
||||
|
||||
|
@ -46,6 +46,7 @@ namespace ngraph
|
||||
/// For each such axis, output dimension is equal to 1.
|
||||
bool get_keep_dims() const { return m_keep_dims; }
|
||||
void set_keep_dims(bool keep_dims) { m_keep_dims = keep_dims; }
|
||||
|
||||
private:
|
||||
bool m_keep_dims = false;
|
||||
};
|
||||
|
@ -58,6 +58,7 @@ namespace ngraph
|
||||
}
|
||||
bool is_cacheable() const { return m_cacheable; }
|
||||
void set_cacheable(bool val) { m_cacheable = val; }
|
||||
|
||||
private:
|
||||
// map of output-input pairs for which in-place computation is valid
|
||||
std::vector<struct oi_pair> m_in_place_oi_pairs;
|
||||
|
@ -328,6 +328,7 @@ namespace ngraph
|
||||
SubGraphOp& operator=(SubGraphOp&&) = default;
|
||||
|
||||
int64_t get_num_iterations() const { return m_num_iterations; }
|
||||
|
||||
protected:
|
||||
int64_t m_num_iterations =
|
||||
-1; // -1 means infinity for Loop op, inconsistent for TensorIterator
|
||||
|
@ -41,6 +41,7 @@ namespace ngraph
|
||||
|
||||
VariableInfo get_info() { return m_info; }
|
||||
void update(const VariableInfo& variable_info) { m_info = variable_info; }
|
||||
|
||||
private:
|
||||
VariableInfo m_info;
|
||||
};
|
||||
|
@ -110,6 +110,7 @@ namespace ngraph
|
||||
|
||||
const std::set<NodeTypeInfo>& get_type_info_set() const { return m_op_types; }
|
||||
ngraph::FactoryRegistry<ngraph::Node>& get_factory_registry() { return m_factory_registry; }
|
||||
|
||||
protected:
|
||||
static std::string to_upper_name(const std::string& name)
|
||||
{
|
||||
|
@ -278,6 +278,7 @@ namespace ngraph
|
||||
/// to one before the first element in the shape. Iteration
|
||||
/// is done in reverse element order.
|
||||
const_reverse_iterator crend() const noexcept { return m_dimensions.crend(); }
|
||||
|
||||
private:
|
||||
// Private constructor for PartialShape::dynamic().
|
||||
PartialShape(bool rank_is_static, const std::vector<Dimension>& dimensions);
|
||||
@ -370,6 +371,7 @@ namespace ngraph
|
||||
static constexpr DiscreteTypeInfo type_info{"AttributeAdapter<PartialShape>", 0};
|
||||
const DiscreteTypeInfo& get_type_info() const override { return type_info; }
|
||||
operator PartialShape&() { return m_ref; }
|
||||
|
||||
protected:
|
||||
PartialShape& m_ref;
|
||||
std::vector<int64_t> m_buffer;
|
||||
|
@ -95,6 +95,7 @@ namespace ngraph
|
||||
}
|
||||
void clear_new_nodes() { m_new_nodes.clear(); }
|
||||
std::shared_ptr<pattern::Matcher> get_matcher() { return m_matcher; }
|
||||
|
||||
protected:
|
||||
void register_matcher(
|
||||
const std::shared_ptr<pattern::Matcher>& m,
|
||||
|
@ -109,6 +109,7 @@ namespace ngraph
|
||||
/// particular
|
||||
/// transformation. For mo details see PassConfig class.
|
||||
std::shared_ptr<PassConfig> get_pass_config() { return m_pass_config; }
|
||||
|
||||
protected:
|
||||
template <typename T, class... Args>
|
||||
std::shared_ptr<T> push_pass(Args&&... args)
|
||||
|
@ -275,6 +275,7 @@ namespace ngraph
|
||||
|
||||
std::shared_ptr<Node> get_match_root() { return m_match_root.get_node_shared_ptr(); }
|
||||
Output<Node> get_match_value() { return m_match_root; }
|
||||
|
||||
private:
|
||||
Output<Node> m_initial_pattern;
|
||||
Output<Node> m_pattern;
|
||||
|
@ -55,12 +55,13 @@ namespace ngraph
|
||||
const PartialShape& s,
|
||||
NodePredicate pred,
|
||||
const NodeVector& wrapped_values)
|
||||
: AnyOf(type,
|
||||
s,
|
||||
[pred](const Output<Node>& value) {
|
||||
return pred(value.get_node_shared_ptr());
|
||||
},
|
||||
as_output_vector(wrapped_values))
|
||||
: AnyOf(
|
||||
type,
|
||||
s,
|
||||
[pred](const Output<Node>& value) {
|
||||
return pred(value.get_node_shared_ptr());
|
||||
},
|
||||
as_output_vector(wrapped_values))
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -63,7 +63,8 @@ namespace ngraph
|
||||
|
||||
explicit Label(const element::Type& type = element::dynamic,
|
||||
const PartialShape& s = PartialShape::dynamic())
|
||||
: Label(type, s, [](const Output<Node>&) { return true; }, OutputVector())
|
||||
: Label(
|
||||
type, s, [](const Output<Node>&) { return true; }, OutputVector())
|
||||
{
|
||||
}
|
||||
|
||||
@ -118,10 +119,11 @@ namespace ngraph
|
||||
{
|
||||
}
|
||||
Label(const Output<Node>& value)
|
||||
: Label(value.get_element_type(),
|
||||
value.get_partial_shape(),
|
||||
[](const Output<Node>&) { return true; },
|
||||
OutputVector{})
|
||||
: Label(
|
||||
value.get_element_type(),
|
||||
value.get_partial_shape(),
|
||||
[](const Output<Node>&) { return true; },
|
||||
OutputVector{})
|
||||
{
|
||||
}
|
||||
Label(const Output<Node>& node,
|
||||
|
@ -31,20 +31,20 @@ namespace ngraph
|
||||
static constexpr NodeTypeInfo type_info{"patternAnyType", 0};
|
||||
const NodeTypeInfo& get_type_info() const override;
|
||||
|
||||
explicit WrapType(NodeTypeInfo wrapped_type,
|
||||
const ValuePredicate& pred =
|
||||
[](const Output<Node>& output) { return true; },
|
||||
const OutputVector& input_values = {})
|
||||
explicit WrapType(
|
||||
NodeTypeInfo wrapped_type,
|
||||
const ValuePredicate& pred = [](const Output<Node>& output) { return true; },
|
||||
const OutputVector& input_values = {})
|
||||
: Pattern(input_values, pred)
|
||||
, m_wrapped_types({wrapped_type})
|
||||
{
|
||||
set_output_type(0, element::Type_t::dynamic, PartialShape::dynamic());
|
||||
}
|
||||
|
||||
explicit WrapType(std::vector<NodeTypeInfo> wrapped_types,
|
||||
const ValuePredicate& pred =
|
||||
[](const Output<Node>& output) { return true; },
|
||||
const OutputVector& input_values = {})
|
||||
explicit WrapType(
|
||||
std::vector<NodeTypeInfo> wrapped_types,
|
||||
const ValuePredicate& pred = [](const Output<Node>& output) { return true; },
|
||||
const OutputVector& input_values = {})
|
||||
: Pattern(input_values, pred)
|
||||
, m_wrapped_types(std::move(wrapped_types))
|
||||
{
|
||||
|
@ -91,6 +91,7 @@ namespace ngraph
|
||||
/// \brief notify tensor of new data, call may block.
|
||||
/// backends may use this as indication of new data in tensor.
|
||||
virtual void wait_for_write_ready() {}
|
||||
|
||||
protected:
|
||||
std::shared_ptr<ngraph::descriptor::Tensor> m_descriptor;
|
||||
bool m_stale;
|
||||
|
@ -117,6 +117,7 @@ namespace ngraph
|
||||
}
|
||||
|
||||
static uint16_t truncate(float x) { return static_cast<uint16_t>((cu32(x)) >> 16); }
|
||||
|
||||
private:
|
||||
constexpr bfloat16(uint16_t x, bool)
|
||||
: m_value{x}
|
||||
|
@ -120,6 +120,7 @@ namespace ngraph
|
||||
|
||||
// \brief This allows switch(element_type)
|
||||
constexpr operator Type_t() const { return m_type; }
|
||||
|
||||
private:
|
||||
Type_t m_type{Type_t::undefined};
|
||||
};
|
||||
@ -210,6 +211,7 @@ namespace ngraph
|
||||
static constexpr DiscreteTypeInfo type_info{"AttributeAdapter<element::Type>", 0};
|
||||
const DiscreteTypeInfo& get_type_info() const override { return type_info; }
|
||||
operator element::Type&() { return m_ref; }
|
||||
|
||||
protected:
|
||||
element::Type& m_ref;
|
||||
};
|
||||
|
@ -50,6 +50,7 @@ namespace ngraph
|
||||
const value_type& get() const { return m_value; }
|
||||
value_type& get() { return m_value; }
|
||||
void set(const value_type& value) { m_value = value; }
|
||||
|
||||
protected:
|
||||
value_type m_value;
|
||||
};
|
||||
|
@ -154,6 +154,7 @@ namespace ngraph
|
||||
bool increment();
|
||||
|
||||
bool is_valid() const noexcept { return !has_zeros(m_source_shape); }
|
||||
|
||||
private:
|
||||
const Shape m_source_shape;
|
||||
const CoordinateBounds m_bounds;
|
||||
@ -200,6 +201,7 @@ namespace ngraph
|
||||
bool increment();
|
||||
|
||||
bool is_valid() const noexcept { return !has_zeros(m_source_shape); }
|
||||
|
||||
private:
|
||||
const Shape m_source_shape;
|
||||
const std::vector<size_t> m_memory_strides;
|
||||
|
@ -50,9 +50,9 @@ namespace ngraph
|
||||
std::string("Logit or label length cannot greater than max sequence"
|
||||
"length. Also a label length cannot be greater than a"
|
||||
"logit length.\nMaxSeqLen: ") +
|
||||
std::to_string(maxTime) + "; Logit len: " +
|
||||
std::to_string(actualLogitLen) + "; Label len: " +
|
||||
std::to_string(actualTargetLen));
|
||||
std::to_string(maxTime) +
|
||||
"; Logit len: " + std::to_string(actualLogitLen) +
|
||||
"; Label len: " + std::to_string(actualTargetLen));
|
||||
}
|
||||
|
||||
const U* target = &labels[b * maxTime];
|
||||
@ -118,85 +118,86 @@ namespace ngraph
|
||||
T res = -type_inf;
|
||||
|
||||
// Looking for aligned paths
|
||||
std::function<void(size_t, size_t, size_t, T)> findPaths = [&](
|
||||
size_t targetIdx, size_t start, size_t end, T prevLogProb) {
|
||||
if (end > actualLogitLen)
|
||||
{
|
||||
if (res == -type_inf)
|
||||
std::function<void(size_t, size_t, size_t, T)> findPaths =
|
||||
[&](size_t targetIdx, size_t start, size_t end, T prevLogProb) {
|
||||
if (end > actualLogitLen)
|
||||
{
|
||||
res = prevLogProb;
|
||||
if (res == -type_inf)
|
||||
{
|
||||
res = prevLogProb;
|
||||
}
|
||||
else if (prevLogProb != -type_inf)
|
||||
{
|
||||
if (res > prevLogProb)
|
||||
res = res + std::log1pf(std::exp(prevLogProb - res));
|
||||
else
|
||||
res =
|
||||
prevLogProb + std::log1pf(std::exp(res - prevLogProb));
|
||||
}
|
||||
return;
|
||||
}
|
||||
else if (prevLogProb != -type_inf)
|
||||
{
|
||||
if (res > prevLogProb)
|
||||
res = res + std::log1pf(std::exp(prevLogProb - res));
|
||||
else
|
||||
res = prevLogProb + std::log1pf(std::exp(res - prevLogProb));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
size_t nextIdx = targetIdx + 1;
|
||||
int64_t st64 = start;
|
||||
T newLogProb = prevLogProb;
|
||||
if (!ctcMergeRepeated)
|
||||
{
|
||||
for (size_t pos = start; pos < end; pos++)
|
||||
size_t nextIdx = targetIdx + 1;
|
||||
int64_t st64 = start;
|
||||
T newLogProb = prevLogProb;
|
||||
if (!ctcMergeRepeated)
|
||||
{
|
||||
newLogProb = prevLogProb;
|
||||
for (size_t bl = start; bl < pos; bl++)
|
||||
for (size_t pos = start; pos < end; pos++)
|
||||
{
|
||||
newLogProb += logProbabilities[bl].find(blankIndex)->second;
|
||||
}
|
||||
newLogProb +=
|
||||
logProbabilities[pos].find(targetD[targetIdx])->second;
|
||||
if (end == actualLogitLen)
|
||||
{
|
||||
for (int64_t ble = pos + 1; ble < actualLogitLen; ble++)
|
||||
newLogProb = prevLogProb;
|
||||
for (size_t bl = start; bl < pos; bl++)
|
||||
{
|
||||
newLogProb +=
|
||||
logProbabilities[ble].find(blankIndex)->second;
|
||||
newLogProb += logProbabilities[bl].find(blankIndex)->second;
|
||||
}
|
||||
}
|
||||
findPaths(nextIdx, pos + 1, end + 1, newLogProb);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
for (size_t pos = start; pos < end; pos++)
|
||||
{
|
||||
newLogProb = prevLogProb;
|
||||
size_t next_start = pos + 1;
|
||||
for (size_t bl = start; bl < pos; bl++)
|
||||
{
|
||||
newLogProb += logProbabilities[bl].find(blankIndex)->second;
|
||||
}
|
||||
if (end == actualLogitLen)
|
||||
{
|
||||
for (int64_t ble = pos + 1; ble < actualLogitLen; ble++)
|
||||
newLogProb +=
|
||||
logProbabilities[pos].find(targetD[targetIdx])->second;
|
||||
if (end == actualLogitLen)
|
||||
{
|
||||
newLogProb +=
|
||||
logProbabilities[ble].find(blankIndex)->second;
|
||||
for (int64_t ble = pos + 1; ble < actualLogitLen; ble++)
|
||||
{
|
||||
newLogProb +=
|
||||
logProbabilities[ble].find(blankIndex)->second;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (targetIdx < decodedTargetLen - 1 &&
|
||||
targetD[targetIdx] == targetD[targetIdx + 1])
|
||||
{
|
||||
newLogProb +=
|
||||
logProbabilities[next_start++].find(blankIndex)->second;
|
||||
}
|
||||
for (int64_t bl = pos; bl >= st64; bl--)
|
||||
{
|
||||
newLogProb +=
|
||||
logProbabilities[bl].find(targetD[targetIdx])->second;
|
||||
findPaths(nextIdx, next_start, end + 1, newLogProb);
|
||||
if (bl > 0)
|
||||
newLogProb -=
|
||||
logProbabilities[bl - 1].find(blankIndex)->second;
|
||||
findPaths(nextIdx, pos + 1, end + 1, newLogProb);
|
||||
}
|
||||
}
|
||||
}
|
||||
}; // findPaths
|
||||
else
|
||||
{
|
||||
for (size_t pos = start; pos < end; pos++)
|
||||
{
|
||||
newLogProb = prevLogProb;
|
||||
size_t next_start = pos + 1;
|
||||
for (size_t bl = start; bl < pos; bl++)
|
||||
{
|
||||
newLogProb += logProbabilities[bl].find(blankIndex)->second;
|
||||
}
|
||||
if (end == actualLogitLen)
|
||||
{
|
||||
for (int64_t ble = pos + 1; ble < actualLogitLen; ble++)
|
||||
{
|
||||
newLogProb +=
|
||||
logProbabilities[ble].find(blankIndex)->second;
|
||||
}
|
||||
}
|
||||
if (targetIdx < decodedTargetLen - 1 &&
|
||||
targetD[targetIdx] == targetD[targetIdx + 1])
|
||||
{
|
||||
newLogProb +=
|
||||
logProbabilities[next_start++].find(blankIndex)->second;
|
||||
}
|
||||
for (int64_t bl = pos; bl >= st64; bl--)
|
||||
{
|
||||
newLogProb +=
|
||||
logProbabilities[bl].find(targetD[targetIdx])->second;
|
||||
findPaths(nextIdx, next_start, end + 1, newLogProb);
|
||||
if (bl > 0)
|
||||
newLogProb -=
|
||||
logProbabilities[bl - 1].find(blankIndex)->second;
|
||||
}
|
||||
}
|
||||
}
|
||||
}; // findPaths
|
||||
|
||||
findPaths(0lu, 0lu, actualLogitLen - decodedTargetLen + 1lu, 0.f);
|
||||
|
||||
|
@ -70,7 +70,6 @@ namespace ngraph
|
||||
size_t output_index,
|
||||
T& prev,
|
||||
std::vector<std::pair<size_t, T>>& tensor_vec) -> void {
|
||||
|
||||
tensor_vec[input_index].second = prev + tensor_vec[input_index].second;
|
||||
out[tensor_vec[output_index].first] = tensor_vec[input_index].second;
|
||||
|
||||
|
@ -45,8 +45,8 @@ namespace ngraph
|
||||
throw ngraph_error(
|
||||
std::string(
|
||||
"Offset value exceeds indices size in the model.\noffset: ") +
|
||||
std::to_string(offsets[emb_index]) + "; indices size: " +
|
||||
std::to_string(indices_count));
|
||||
std::to_string(offsets[emb_index]) +
|
||||
"; indices size: " + std::to_string(indices_count));
|
||||
|
||||
indices_ref = nullptr;
|
||||
indices_num = 0lu;
|
||||
|
@ -191,8 +191,10 @@ namespace ngraph
|
||||
};
|
||||
break;
|
||||
case Transform_mode::align_corners:
|
||||
return [](
|
||||
float x_resized, float, float length_resized, float length_original) {
|
||||
return [](float x_resized,
|
||||
float,
|
||||
float length_resized,
|
||||
float length_original) {
|
||||
return length_resized == 1
|
||||
? 0
|
||||
: x_resized * (length_original - 1) / (length_resized - 1);
|
||||
|
@ -139,8 +139,9 @@ namespace ngraph
|
||||
4,
|
||||
pointers.data());
|
||||
|
||||
auto clip_activation = [&clip](
|
||||
std::vector<T>& gate, const std::string& activation, bool enable_clip = true) {
|
||||
auto clip_activation = [&clip](std::vector<T>& gate,
|
||||
const std::string& activation,
|
||||
bool enable_clip = true) {
|
||||
if (clip > 0.f && enable_clip)
|
||||
{
|
||||
reference::clamp(gate.data(),
|
||||
|
@ -107,8 +107,11 @@ namespace ngraph
|
||||
step_y = step;
|
||||
}
|
||||
|
||||
auto calculate_data = [&dst_data, &IWI, &IHI, &idx](
|
||||
float center_x, float center_y, float box_width, float box_height, bool clip) {
|
||||
auto calculate_data = [&dst_data, &IWI, &IHI, &idx](float center_x,
|
||||
float center_y,
|
||||
float box_width,
|
||||
float box_height,
|
||||
bool clip) {
|
||||
if (clip)
|
||||
{
|
||||
// order: xmin, ymin, xmax, ymax
|
||||
|
@ -158,10 +158,10 @@ namespace ngraph
|
||||
float bin_start_w = start_w + sbx * bin_width;
|
||||
float bin_start_h = start_h + sby * bin_height;
|
||||
|
||||
const T* input_offset = input +
|
||||
(batch_id * channels_in +
|
||||
c_in * channels_out + c_out) *
|
||||
height * width;
|
||||
const T* input_offset =
|
||||
input + (batch_id * channels_in +
|
||||
c_in * channels_out + c_out) *
|
||||
height * width;
|
||||
float point_x =
|
||||
pooling_width > 1
|
||||
? (pw * width_scale + bin_start_w * (width - 1))
|
||||
@ -188,9 +188,8 @@ namespace ngraph
|
||||
T bottom_right =
|
||||
input_offset[bottom * width + right];
|
||||
|
||||
T top_interp =
|
||||
top_left +
|
||||
(top_right - top_left) * (point_x - left);
|
||||
T top_interp = top_left + (top_right - top_left) *
|
||||
(point_x - left);
|
||||
T bottom_interp =
|
||||
bottom_left +
|
||||
(bottom_right - bottom_left) * (point_x - left);
|
||||
|
@ -50,9 +50,9 @@ namespace ngraph
|
||||
|
||||
size_t output_index = output_transform.index(output_coord);
|
||||
|
||||
out[output_index] = out[output_index] +
|
||||
arg[input_transform.index(input_coord)] *
|
||||
arg[input_transform.index(input_coord)];
|
||||
out[output_index] =
|
||||
out[output_index] + arg[input_transform.index(input_coord)] *
|
||||
arg[input_transform.index(input_coord)];
|
||||
}
|
||||
for (const Coordinate& output_coord : output_transform)
|
||||
{
|
||||
|
@ -211,9 +211,8 @@ namespace ngraph
|
||||
const T bottom_left = feature_maps[bottom_left_idx];
|
||||
const T bottom_right = feature_maps[bottom_right_idx];
|
||||
|
||||
const T top =
|
||||
top_left +
|
||||
(top_right - top_left) * (in_x - left_x_index);
|
||||
const T top = top_left + (top_right - top_left) *
|
||||
(in_x - left_x_index);
|
||||
const T bottom =
|
||||
bottom_left +
|
||||
(bottom_right - bottom_left) * (in_x - left_x_index);
|
||||
|
@ -45,7 +45,8 @@ namespace ngraph
|
||||
public:
|
||||
static const Xbyak::Reg64 param;
|
||||
|
||||
typedef enum {
|
||||
typedef enum
|
||||
{
|
||||
isa_any,
|
||||
sse42,
|
||||
avx,
|
||||
|
@ -45,13 +45,9 @@ ngraph::AxisVector::AxisVector(size_t n)
|
||||
{
|
||||
}
|
||||
|
||||
ngraph::AxisVector::AxisVector()
|
||||
{
|
||||
}
|
||||
ngraph::AxisVector::AxisVector() {}
|
||||
|
||||
ngraph::AxisVector::~AxisVector()
|
||||
{
|
||||
}
|
||||
ngraph::AxisVector::~AxisVector() {}
|
||||
|
||||
ngraph::AxisVector& ngraph::AxisVector::operator=(const AxisVector& v)
|
||||
{
|
||||
|
@ -28,9 +28,7 @@ std::ostream& ngraph::operator<<(std::ostream& s, const Coordinate& coordinate)
|
||||
return s;
|
||||
}
|
||||
|
||||
ngraph::Coordinate::Coordinate()
|
||||
{
|
||||
}
|
||||
ngraph::Coordinate::Coordinate() {}
|
||||
|
||||
ngraph::Coordinate::Coordinate(const std::initializer_list<size_t>& axes)
|
||||
: std::vector<size_t>(axes)
|
||||
@ -57,9 +55,7 @@ ngraph::Coordinate::Coordinate(size_t n, size_t initial_value)
|
||||
{
|
||||
}
|
||||
|
||||
ngraph::Coordinate::~Coordinate()
|
||||
{
|
||||
}
|
||||
ngraph::Coordinate::~Coordinate() {}
|
||||
|
||||
ngraph::Coordinate& ngraph::Coordinate::operator=(const Coordinate& v)
|
||||
{
|
||||
|
@ -48,13 +48,9 @@ ngraph::CoordinateDiff::CoordinateDiff(size_t n, std::ptrdiff_t initial_value)
|
||||
{
|
||||
}
|
||||
|
||||
ngraph::CoordinateDiff::CoordinateDiff()
|
||||
{
|
||||
}
|
||||
ngraph::CoordinateDiff::CoordinateDiff() {}
|
||||
|
||||
ngraph::CoordinateDiff::~CoordinateDiff()
|
||||
{
|
||||
}
|
||||
ngraph::CoordinateDiff::~CoordinateDiff() {}
|
||||
|
||||
ngraph::CoordinateDiff& ngraph::CoordinateDiff::operator=(const CoordinateDiff& v)
|
||||
{
|
||||
|
@ -230,19 +230,20 @@ void file_util::iterate_files(const string& path,
|
||||
FindClose(hFind);
|
||||
}
|
||||
#else
|
||||
iterate_files_worker(path,
|
||||
[&files, &dirs](const string& file, bool is_dir) {
|
||||
if (is_dir)
|
||||
{
|
||||
dirs.push_back(file);
|
||||
}
|
||||
else
|
||||
{
|
||||
files.push_back(file);
|
||||
}
|
||||
},
|
||||
recurse,
|
||||
include_links);
|
||||
iterate_files_worker(
|
||||
path,
|
||||
[&files, &dirs](const string& file, bool is_dir) {
|
||||
if (is_dir)
|
||||
{
|
||||
dirs.push_back(file);
|
||||
}
|
||||
else
|
||||
{
|
||||
files.push_back(file);
|
||||
}
|
||||
},
|
||||
recurse,
|
||||
include_links);
|
||||
#endif
|
||||
|
||||
for (auto f : files)
|
||||
|
@ -637,7 +637,8 @@ NodeVector ngraph::get_subgraph_outputs(const NodeVector& nodes,
|
||||
NodeVector ngraph::extract_subgraph(const NodeVector& results, const NodeVector& args)
|
||||
{
|
||||
NodeVector subgraph;
|
||||
traverse_nodes(results, [&](std::shared_ptr<Node> n) { subgraph.push_back(n); }, args);
|
||||
traverse_nodes(
|
||||
results, [&](std::shared_ptr<Node> n) { subgraph.push_back(n); }, args);
|
||||
return subgraph;
|
||||
}
|
||||
|
||||
|
@ -253,9 +253,7 @@ void Node::invalidate_values()
|
||||
output.get_tensor().invalidate_values();
|
||||
}
|
||||
|
||||
void Node::validate_and_infer_types()
|
||||
{
|
||||
}
|
||||
void Node::validate_and_infer_types() {}
|
||||
|
||||
void Node::set_input_is_relevant_to_shape(size_t i, bool relevant)
|
||||
{
|
||||
|
@ -152,15 +152,15 @@ namespace ngraph
|
||||
bool Input<const Node>::operator>=(const Input& other) const { return !(*this < other); }
|
||||
std::ostream& operator<<(std::ostream& out, const Input<Node>& input)
|
||||
{
|
||||
return input.get_node()->write_description(out, 0) << ".input(" << input.get_index()
|
||||
<< "):" << input.get_element_type()
|
||||
<< input.get_partial_shape();
|
||||
return input.get_node()->write_description(out, 0)
|
||||
<< ".input(" << input.get_index() << "):" << input.get_element_type()
|
||||
<< input.get_partial_shape();
|
||||
}
|
||||
|
||||
std::ostream& operator<<(std::ostream& out, const Input<const Node>& input)
|
||||
{
|
||||
return input.get_node()->write_description(out, 0) << ".input(" << input.get_index()
|
||||
<< "):" << input.get_element_type()
|
||||
<< input.get_partial_shape();
|
||||
return input.get_node()->write_description(out, 0)
|
||||
<< ".input(" << input.get_index() << "):" << input.get_element_type()
|
||||
<< input.get_partial_shape();
|
||||
}
|
||||
}
|
||||
|
@ -177,15 +177,15 @@ namespace ngraph
|
||||
bool Output<const Node>::operator>=(const Output& other) const { return !(*this < other); }
|
||||
std::ostream& operator<<(std::ostream& out, const Output<Node>& output)
|
||||
{
|
||||
return output.get_node()->write_description(out, 0) << "[" << output.get_index()
|
||||
<< "]:" << output.get_element_type()
|
||||
<< output.get_partial_shape();
|
||||
return output.get_node()->write_description(out, 0)
|
||||
<< "[" << output.get_index() << "]:" << output.get_element_type()
|
||||
<< output.get_partial_shape();
|
||||
}
|
||||
|
||||
std::ostream& operator<<(std::ostream& out, const Output<const Node>& output)
|
||||
{
|
||||
return output.get_node()->write_description(out, 0) << "[" << output.get_index()
|
||||
<< "]:" << output.get_element_type()
|
||||
<< output.get_partial_shape();
|
||||
return output.get_node()->write_description(out, 0)
|
||||
<< "[" << output.get_index() << "]:" << output.get_element_type()
|
||||
<< output.get_partial_shape();
|
||||
}
|
||||
}
|
||||
|
@ -179,7 +179,9 @@ op::Constant::Constant(const element::Type& type,
|
||||
{
|
||||
throw std::runtime_error("deserialize unsupported type dynamic");
|
||||
}
|
||||
case element::Type_t::u1: { throw std::runtime_error("deserialize unsupported type u1");
|
||||
case element::Type_t::u1:
|
||||
{
|
||||
throw std::runtime_error("deserialize unsupported type u1");
|
||||
}
|
||||
}
|
||||
m_all_elements_bitwise_identical = true;
|
||||
@ -328,9 +330,7 @@ op::Constant::Constant(const Constant& other)
|
||||
constructor_validate_and_infer_types();
|
||||
}
|
||||
|
||||
op::Constant::~Constant()
|
||||
{
|
||||
}
|
||||
op::Constant::~Constant() {}
|
||||
|
||||
string op::Constant::convert_value_to_string(size_t index) const
|
||||
{
|
||||
|
@ -163,8 +163,8 @@ void op::DetectionOutput::validate_and_infer_types()
|
||||
this,
|
||||
proposals_1st_dim == 1 || proposals_1st_dim == num_images_val,
|
||||
"Proposals' first dimension is must be equal to either batch size (" +
|
||||
std::to_string(num_images_val) + ") or 1. Got: " +
|
||||
std::to_string(proposals_1st_dim) + ".");
|
||||
std::to_string(num_images_val) +
|
||||
") or 1. Got: " + std::to_string(proposals_1st_dim) + ".");
|
||||
}
|
||||
if (proposals_pshape[1].is_static())
|
||||
{
|
||||
|
@ -118,11 +118,11 @@ void op::PSROIPooling::validate_and_infer_types()
|
||||
0,
|
||||
"Number of input's channels must be a multiply of "
|
||||
"spatial_bins_x * spatial_bins_y");
|
||||
NODE_VALIDATION_CHECK(
|
||||
this,
|
||||
m_output_dim == num_input_channels / (m_spatial_bins_x * m_spatial_bins_y),
|
||||
"output_dim must be equal to input channels divided by "
|
||||
"spatial_bins_x * spatial_bins_y");
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
m_output_dim == num_input_channels /
|
||||
(m_spatial_bins_x * m_spatial_bins_y),
|
||||
"output_dim must be equal to input channels divided by "
|
||||
"spatial_bins_x * spatial_bins_y");
|
||||
}
|
||||
}
|
||||
std::vector<Dimension> output_shape{coords_pshape[0],
|
||||
|
@ -20,6 +20,4 @@ using namespace ngraph;
|
||||
|
||||
NGRAPH_RTTI_DEFINITION(op::Sink, "Sink", 0);
|
||||
|
||||
op::Sink::~Sink()
|
||||
{
|
||||
}
|
||||
op::Sink::~Sink() {}
|
||||
|
@ -192,7 +192,9 @@ bool ngraph::op::v0::SpaceToDepth::evaluate_space_to_depth(const HostTensorVecto
|
||||
// /
|
||||
// block_size])
|
||||
case SpaceToDepthMode::BLOCKS_FIRST:
|
||||
default: { axes_order.insert(axes_order.begin() + spatial_dims + 1, 1);
|
||||
default:
|
||||
{
|
||||
axes_order.insert(axes_order.begin() + spatial_dims + 1, 1);
|
||||
}
|
||||
}
|
||||
std::vector<char> transposed_data(shape_size(data_shape) * elem_size);
|
||||
|
@ -22,9 +22,7 @@
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
op::util::ArithmeticReduction::ArithmeticReduction()
|
||||
{
|
||||
}
|
||||
op::util::ArithmeticReduction::ArithmeticReduction() {}
|
||||
|
||||
op::util::ArithmeticReduction::ArithmeticReduction(const Output<Node>& arg,
|
||||
const AxisSet& reduction_axes)
|
||||
|
@ -24,9 +24,7 @@ using namespace ngraph;
|
||||
|
||||
NGRAPH_RTTI_DEFINITION(op::util::BinaryElementwiseLogical, "BinaryElementwiseLogical", 0);
|
||||
|
||||
op::util::BinaryElementwiseLogical::BinaryElementwiseLogical()
|
||||
{
|
||||
}
|
||||
op::util::BinaryElementwiseLogical::BinaryElementwiseLogical() {}
|
||||
|
||||
op::util::BinaryElementwiseLogical::BinaryElementwiseLogical(const Output<Node>& arg0,
|
||||
const Output<Node>& arg1,
|
||||
|
@ -23,9 +23,7 @@
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
op::util::IndexReduction::IndexReduction()
|
||||
{
|
||||
}
|
||||
op::util::IndexReduction::IndexReduction() {}
|
||||
|
||||
op::util::IndexReduction::IndexReduction(const Output<Node>& arg,
|
||||
uint64_t axis,
|
||||
|
@ -22,9 +22,7 @@
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
op::util::LogicalReduction::LogicalReduction()
|
||||
{
|
||||
}
|
||||
op::util::LogicalReduction::LogicalReduction() {}
|
||||
|
||||
op::util::LogicalReduction::LogicalReduction(const Output<Node>& arg, const AxisSet& reduction_axes)
|
||||
: Op({arg,
|
||||
|
@ -83,9 +83,7 @@ pass::Manager::Manager()
|
||||
{
|
||||
}
|
||||
|
||||
pass::Manager::~Manager()
|
||||
{
|
||||
}
|
||||
pass::Manager::~Manager() {}
|
||||
|
||||
pass::Manager::Manager(std::shared_ptr<ngraph::pass::PassConfig> pass_config)
|
||||
: m_pass_config(std::move(pass_config))
|
||||
|
@ -79,10 +79,6 @@ void pass::PassBase::set_callback(const param_callback& callback)
|
||||
|
||||
// The symbols are requiered to be in cpp file to workaround RTTI issue on Android LLVM
|
||||
|
||||
pass::FunctionPass::~FunctionPass()
|
||||
{
|
||||
}
|
||||
pass::FunctionPass::~FunctionPass() {}
|
||||
|
||||
pass::NodePass::~NodePass()
|
||||
{
|
||||
}
|
||||
pass::NodePass::~NodePass() {}
|
||||
|
@ -259,11 +259,13 @@ void pass::VisualizeTree::add_node_arguments(shared_ptr<Node> node,
|
||||
m_ss << add_attributes(node);
|
||||
auto recv_node_name = "RECV_" + to_string(fake_node_ctr);
|
||||
auto send_node_name = "SEND_" + to_string(fake_node_ctr);
|
||||
m_ss << " " << recv_node_name << "[shape=\"box\" style=\"solid,filled\" "
|
||||
"fillcolor=\"#ffcccc\" label=\"Receive["
|
||||
m_ss << " " << recv_node_name
|
||||
<< "[shape=\"box\" style=\"solid,filled\" "
|
||||
"fillcolor=\"#ffcccc\" label=\"Receive["
|
||||
<< arg->get_name() << "]\"]\n";
|
||||
m_ss << " " << send_node_name << "[shape=\"box\" style=\"solid,filled\" "
|
||||
"fillcolor=\"#ccffcc\" label=\"Send["
|
||||
m_ss << " " << send_node_name
|
||||
<< "[shape=\"box\" style=\"solid,filled\" "
|
||||
"fillcolor=\"#ccffcc\" label=\"Send["
|
||||
<< node->get_name() << "]\"]\n";
|
||||
m_ss << " " << arg->get_name() << " -> " << send_node_name
|
||||
<< label_edge(arg, node, arg_index, jump_distance) << "\n";
|
||||
|
@ -32,8 +32,7 @@ bool pattern::op::Skip::match_value(Matcher* matcher,
|
||||
const Output<Node>& graph_value)
|
||||
{
|
||||
matcher->add_node(graph_value);
|
||||
return m_predicate(graph_value)
|
||||
? matcher->match_arguments(pattern_value.get_node(),
|
||||
graph_value.get_node_shared_ptr())
|
||||
: matcher->match_value(input_value(0), graph_value);
|
||||
return m_predicate(graph_value) ? matcher->match_arguments(pattern_value.get_node(),
|
||||
graph_value.get_node_shared_ptr())
|
||||
: matcher->match_value(input_value(0), graph_value);
|
||||
}
|
||||
|
@ -53,9 +53,7 @@ ngraph::Shape::Shape(size_t n, size_t initial_value)
|
||||
{
|
||||
}
|
||||
|
||||
ngraph::Shape::~Shape()
|
||||
{
|
||||
}
|
||||
ngraph::Shape::~Shape() {}
|
||||
|
||||
ngraph::Shape& ngraph::Shape::operator=(const Shape& v)
|
||||
{
|
||||
|
@ -22,9 +22,7 @@ using namespace ngraph;
|
||||
constexpr VariantTypeInfo VariantWrapper<std::string>::type_info;
|
||||
constexpr VariantTypeInfo VariantWrapper<int64_t>::type_info;
|
||||
|
||||
Variant::~Variant()
|
||||
{
|
||||
}
|
||||
Variant::~Variant() {}
|
||||
|
||||
std::shared_ptr<ngraph::Variant> Variant::init(const std::shared_ptr<ngraph::Node>& node)
|
||||
{
|
||||
|
@ -87,9 +87,9 @@ namespace
|
||||
}
|
||||
}
|
||||
|
||||
throw ngraph::ngraph_error{"Source node not found in the graph for node: " +
|
||||
std::to_string(current_node_idx) + " and input name: " +
|
||||
input_name};
|
||||
throw ngraph::ngraph_error{
|
||||
"Source node not found in the graph for node: " + std::to_string(current_node_idx) +
|
||||
" and input name: " + input_name};
|
||||
}
|
||||
|
||||
/// \brief Looks up a descriptor for a given tensor name. This descriptor contains inferred
|
||||
|
@ -272,9 +272,9 @@ void onnx_import::ONNXModelEditor::set_input_types(
|
||||
}
|
||||
else
|
||||
{
|
||||
throw ngraph_error("Could not set a custom element type for input: " +
|
||||
input_desc.first +
|
||||
". Such input was not found in the original ONNX model.");
|
||||
throw ngraph_error(
|
||||
"Could not set a custom element type for input: " + input_desc.first +
|
||||
". Such input was not found in the original ONNX model.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -36,7 +36,6 @@
|
||||
#include "ngraph/shape.hpp"
|
||||
#include "ngraph/type/element_type.hpp"
|
||||
#include "op/lstm.hpp"
|
||||
#include "op/lstm.hpp"
|
||||
|
||||
namespace ngraph
|
||||
{
|
||||
@ -251,8 +250,9 @@ namespace ngraph
|
||||
|
||||
if (m_input_forget != 0)
|
||||
{
|
||||
NGRAPH_WARN << (node) << " Attribute `input_forget` is not supported "
|
||||
"and will be ignored ";
|
||||
NGRAPH_WARN << (node)
|
||||
<< " Attribute `input_forget` is not supported "
|
||||
"and will be ignored ";
|
||||
}
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user