MSVC warnings fix (#2620)
* Fix MSVC build warnings in Ngraph * Fix MSVC build warnings in transformations library * Fix MSVC build warnings in core,legacy,preprocessing * Fix MSVC build warnings in XLink * Fix MSVC build warnings in Myriad plugin
This commit is contained in:
parent
d36bd8c87b
commit
a6520995fe
@ -37,7 +37,7 @@ std::streampos InferenceEngine::details::BlobStream::BlobBuffer::seekoff(std::st
|
||||
setg(eback(), eback() + off, egptr());
|
||||
break;
|
||||
case std::ios_base::cur:
|
||||
gbump(off);
|
||||
gbump(static_cast<int>(off));
|
||||
break;
|
||||
case std::ios_base::end:
|
||||
setg(eback(), egptr() + off, egptr());
|
||||
|
@ -44,7 +44,8 @@ Blob::Ptr Blob::CreateFromData(const DataPtr& data) {
|
||||
}
|
||||
}
|
||||
|
||||
struct Data::Impl {
|
||||
class Data::Impl {
|
||||
public:
|
||||
/**
|
||||
* @brief A pointer to the layer that creates this data element, null for input data elements
|
||||
*/
|
||||
|
@ -52,9 +52,9 @@ void IStreamsExecutor::Config::SetConfig(const std::string& key, const std::stri
|
||||
}
|
||||
} else if (key == CONFIG_KEY(CPU_THROUGHPUT_STREAMS)) {
|
||||
if (value == CONFIG_VALUE(CPU_THROUGHPUT_NUMA)) {
|
||||
_streams = getAvailableNUMANodes().size();
|
||||
_streams = static_cast<int>(getAvailableNUMANodes().size());
|
||||
} else if (value == CONFIG_VALUE(CPU_THROUGHPUT_AUTO)) {
|
||||
const int sockets = getAvailableNUMANodes().size();
|
||||
const int sockets = static_cast<int>(getAvailableNUMANodes().size());
|
||||
// bare minimum of streams (that evenly divides available number of core)
|
||||
const int num_cores = sockets == 1 ? std::thread::hardware_concurrency() : getNumberOfCPUCores();
|
||||
if (0 == num_cores % 4)
|
||||
@ -149,4 +149,4 @@ IStreamsExecutor::Config IStreamsExecutor::Config::MakeDefaultMultiThreaded(cons
|
||||
return streamExecutorConfig;
|
||||
}
|
||||
|
||||
} // namespace InferenceEngine
|
||||
} // namespace InferenceEngine
|
||||
|
@ -109,7 +109,7 @@ ngraph::graph_rewrite_callback get_callback() {
|
||||
3. data_shape{64, 64} and const_shape{1, 1, 1} - constant broadcasts data_shape with additional dimension
|
||||
*/
|
||||
auto constant_broadcast_output = [](const ngraph::PartialShape & data_pshape, const ngraph::Shape & const_shape) -> bool {
|
||||
if (data_pshape.rank().is_dynamic() || const_shape.size() > data_pshape.rank().get_length()) {
|
||||
if (data_pshape.rank().is_dynamic() || const_shape.size() > static_cast<size_t>(data_pshape.rank().get_length())) {
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -276,11 +276,11 @@ ngraph::graph_rewrite_callback get_callback() {
|
||||
// In case Add we create fake scale equal to 1, in case of Multiply we create fake shift equal to 0
|
||||
std::shared_ptr<ngraph::op::PowerIE> power;
|
||||
if (std::is_same<T, ngraph::opset1::Add>()) {
|
||||
power = std::make_shared<ngraph::op::PowerIE>(data_node, 1., 1., value, lin_op->get_output_element_type(0));
|
||||
power = std::make_shared<ngraph::op::PowerIE>(data_node, 1.0f, 1.0f, value, lin_op->get_output_element_type(0));
|
||||
} else if (std::is_same<T, ngraph::opset1::Multiply>()) {
|
||||
power = std::make_shared<ngraph::op::PowerIE>(data_node, 1., value, 0., lin_op->get_output_element_type(0));
|
||||
power = std::make_shared<ngraph::op::PowerIE>(data_node, 1.0f, value, 0.0f, lin_op->get_output_element_type(0));
|
||||
} else if (std::is_same<T, ngraph::opset1::Subtract>()) {
|
||||
power = std::make_shared<ngraph::op::PowerIE>(data_node, 1., 1., -value, lin_op->get_output_element_type(0));
|
||||
power = std::make_shared<ngraph::op::PowerIE>(data_node, 1.0f, 1.0f, -value, lin_op->get_output_element_type(0));
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
|
@ -73,7 +73,7 @@ private:
|
||||
|
||||
// Check that Weights[O, C*H*W] consistent with Input[N, C, H, W]
|
||||
auto shape_w = fc->input_value(1).get_shape();
|
||||
if (shape_in[0] != shape_out[0] || std::accumulate(shape_in.begin() + 1, shape_in.end(), 1UL, std::multiplies<size_t>()) != shape_w[1]) {
|
||||
if (shape_in[0] != shape_out[0] || std::accumulate(shape_in.begin() + 1, shape_in.end(), size_t{1}, std::multiplies<size_t>()) != shape_w[1]) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -372,7 +372,10 @@ static CNNLayerPtr replace_with_static_reshape(CNNLayerPtr &layer) {
|
||||
// tensor statistic for particular reshape.
|
||||
auto reshape = std::make_shared<ReshapeLayer>(
|
||||
LayerParams{layer->name, "Reshape", precision});
|
||||
reshape->shape = std::vector<int>(shape.begin(), shape.end());
|
||||
|
||||
reshape->shape.resize(shape.size());
|
||||
for (size_t p = 0; p < shape.size(); ++p)
|
||||
reshape->shape[p] = static_cast<int>(shape[p]);
|
||||
|
||||
// replacement
|
||||
auto &input_to_map = getInputTo(in_data);
|
||||
|
@ -1532,7 +1532,7 @@ CNNLayer::Ptr NodeConverter<ngraph::op::PriorBoxIE>::createLayer(const std::shar
|
||||
auto img_H = img_shape[2];
|
||||
auto data_H = data_shape[2];
|
||||
if (attr.step == -1)
|
||||
attr.step = 1. * img_H / data_H;
|
||||
attr.step = static_cast<float>(1. * img_H / data_H);
|
||||
else
|
||||
attr.step *= img_H;
|
||||
for (auto& size : attr.min_size)
|
||||
|
@ -50,9 +50,9 @@ Paddings getPaddingsInternal(const Layer& layer) {
|
||||
if (shape_size < 4 || shape_size > 5) THROW_IE_EXCEPTION << "input shape must be 4D or 5D";
|
||||
|
||||
std::vector<int> shapes;
|
||||
shapes.push_back(shape[shape_size - 1]);
|
||||
shapes.push_back(shape[shape_size - 2]);
|
||||
if (shape_size > 4) shapes.push_back(shape[shape_size - 3]);
|
||||
shapes.push_back(static_cast<int>(shape[shape_size - 1]));
|
||||
shapes.push_back(static_cast<int>(shape[shape_size - 2]));
|
||||
if (shape_size > 4) shapes.push_back(static_cast<int>(shape[shape_size - 3]));
|
||||
|
||||
PropertyVector<unsigned int> pad_begin, pad_end;
|
||||
|
||||
@ -134,8 +134,8 @@ int getNumIteration(const TensorIterator& tensorIterator) {
|
||||
<< rule.axis << ", dimensions number = " << dimensions.size() << " (out of range)";
|
||||
}
|
||||
const auto space = dimensions[axis];
|
||||
const int start = (rule.start < 0 ? (space + 1) : 0) + rule.start;
|
||||
const int end = (rule.end < 0 ? (space + 1) : 0) + rule.end;
|
||||
const int start = static_cast<int>((rule.start < 0 ? (space + 1) : 0) + rule.start);
|
||||
const int end = static_cast<int>((rule.end < 0 ? (space + 1) : 0) + rule.end);
|
||||
|
||||
const auto stride = rule.stride;
|
||||
if (stride == 0) {
|
||||
|
@ -199,7 +199,7 @@ inline bool is_full_ranged(const TensorIterator::PortMap& rule, const DataPtr& d
|
||||
if (rule.axis == -1 || !one_of(rule.stride, 1, -1)) return false;
|
||||
|
||||
auto& shape = data->getDims();
|
||||
int size = shape[rule.axis];
|
||||
int size = static_cast<int>(shape[rule.axis]);
|
||||
|
||||
int begin = rule.start >= 0 ? rule.start : size + rule.start + 1;
|
||||
int end = rule.end >= 0 ? rule.end : size + rule.end + 1;
|
||||
@ -406,7 +406,7 @@ bool convertToRNNSeq(CNNLayerPtr cur, const N& net) {
|
||||
|
||||
// Check port mapping
|
||||
auto _indx_in = [&](const std::vector<DataPtr>& scope, const DataPtr& data) {
|
||||
int indx = std::find(scope.begin(), scope.end(), data) - scope.begin();
|
||||
int indx = static_cast<int>(std::find(scope.begin(), scope.end(), data) - scope.begin());
|
||||
return indx == scope.size() ? -1 : indx;
|
||||
};
|
||||
|
||||
@ -670,7 +670,7 @@ static CNNLayerPtr _fc(std::string name, Precision prc, SizeVector dims, Blob::P
|
||||
|
||||
res->_weights = W;
|
||||
res->_biases = B;
|
||||
res->_out_num = dims[1];
|
||||
res->_out_num = static_cast<unsigned>(dims[1]);
|
||||
res->blobs["weights"] = W;
|
||||
res->blobs["biases"] = B;
|
||||
res->params["out-size"] = std::to_string(dims[1]);
|
||||
@ -945,7 +945,7 @@ static bool unrollLSTMCellBody(CNNLayerPtr cur) {
|
||||
|
||||
// operations
|
||||
auto concat = _concat(name + ":concat", prc, {N, D + S}, 2);
|
||||
auto split = _split(name + ":split", prc, {N, S}, G);
|
||||
auto split = _split(name + ":split", prc, {N, S}, static_cast<int>(G));
|
||||
auto fc = _fc(name + ":fc", prc, {N, S * G}, cell->_weights, cell->_biases);
|
||||
|
||||
const std::string _f = cell->activations[0], _g = cell->activations[1], _h = cell->activations[2];
|
||||
|
@ -38,7 +38,7 @@ void op::CropIE::validate_and_infer_types() {
|
||||
|
||||
ngraph::Shape output_shape(input_shape);
|
||||
for (int i = 0; i < axes.size(); ++i) {
|
||||
NODE_VALIDATION_CHECK(this, axes[i] >= 0 && axes[i] < output_shape.size(),
|
||||
NODE_VALIDATION_CHECK(this, axes[i] >= 0 && axes[i] < static_cast<int64_t>(output_shape.size()),
|
||||
"axes should be positive and less than number of input dims");
|
||||
output_shape[axes[i]] = dim[i];
|
||||
}
|
||||
|
@ -44,8 +44,9 @@ void op::Interp::validate_and_infer_types() {
|
||||
scale /= m_attrs.shrink_factor;
|
||||
}
|
||||
}
|
||||
output_shape[2] = input_shape[2] * scale;
|
||||
output_shape[3] = input_shape[3] * scale;
|
||||
|
||||
output_shape[2] = static_cast<Shape::value_type>(input_shape[2] * scale);
|
||||
output_shape[3] = static_cast<Shape::value_type>(input_shape[3] * scale);
|
||||
}
|
||||
|
||||
if (m_attrs.height > 0) {
|
||||
|
@ -74,10 +74,10 @@ ngraph::pass::ConvertInterpolateToInterpOrResampleMatcher::ConvertInterpolateToI
|
||||
|
||||
if (num_of_spatial_vars == 2 && interpolate_axes.size() == 2 && std::set<std::string>{"nearest", "cubic", "area"}.count(interpolate_mode) == 0) {
|
||||
auto attrs = ngraph::op::InterpolateIEAttrs();
|
||||
attrs.pad_beg = interpolate_attrs.pads_begin[0];
|
||||
attrs.pad_end = interpolate_attrs.pads_end[0];
|
||||
attrs.height = out_spatial_shape[0];
|
||||
attrs.width = out_spatial_shape[1];
|
||||
attrs.pad_beg = static_cast<int>(interpolate_attrs.pads_begin[0]);
|
||||
attrs.pad_end = static_cast<int>(interpolate_attrs.pads_end[0]);
|
||||
attrs.height = static_cast<int>(out_spatial_shape[0]);
|
||||
attrs.width = static_cast<int>(out_spatial_shape[1]);
|
||||
attrs.align_corners = interpolate_attrs.align_corners;
|
||||
attrs.mode = interpolate_mode;
|
||||
attrs.antialias = interpolate_attrs.antialias;
|
||||
|
@ -38,7 +38,7 @@ ngraph::pass::ConvertLRNToLegacyMatcher::ConvertLRNToLegacyMatcher() {
|
||||
} else {
|
||||
std::vector<bool> norm(lrn->get_shape().size(), false);
|
||||
for (auto & axis : axis_value) {
|
||||
if (axis < 0 || axis >= norm.size()) {
|
||||
if (axis < 0 || static_cast<size_t>(axis) >= norm.size()) {
|
||||
return false;
|
||||
}
|
||||
norm[axis] = true;
|
||||
|
@ -200,7 +200,7 @@ void ngraph::pass::ConvertMulAddToScaleShiftOrPower::convert_mul_add_to_scaleshi
|
||||
}
|
||||
|
||||
auto output_type = m.get_match_root()->get_output_element_type(0);
|
||||
auto power = std::make_shared<ngraph::op::PowerIE>(data_node, 1., scale, shift, output_type);
|
||||
auto power = std::make_shared<ngraph::op::PowerIE>(data_node, 1.0f, scale, shift, output_type);
|
||||
power->set_friendly_name(add_node->get_friendly_name());
|
||||
ngraph::copy_runtime_info({mul_node, add_node}, power);
|
||||
ngraph::replace_node(m.get_match_root(), power);
|
||||
|
@ -19,7 +19,7 @@ ngraph::pass::ConvertNormalizeL2WithMulToNormalizeIE::ConvertNormalizeL2WithMulT
|
||||
auto input_1 = std::make_shared<pattern::op::Label>(element::f32, Shape{1, 1, 1, 1});
|
||||
auto axis = std::make_shared<ngraph::opset1::Constant>(element::i64, Shape{1}, std::vector<int64_t>{0});
|
||||
|
||||
auto normalize = std::make_shared<ngraph::op::NormalizeL2>(input_0, axis, 0, ngraph::op::EpsMode::ADD);
|
||||
auto normalize = std::make_shared<ngraph::op::NormalizeL2>(input_0, axis, 0.0f, ngraph::op::EpsMode::ADD);
|
||||
auto mul = std::make_shared<ngraph::opset1::Multiply> (normalize, input_1);
|
||||
|
||||
ngraph::graph_rewrite_callback callback = [](pattern::Matcher& m) {
|
||||
@ -80,7 +80,7 @@ NGRAPH_RTTI_DEFINITION(ngraph::pass::ConvertNormalizeL2ToLegacyMatcher, "Convert
|
||||
ngraph::pass::ConvertNormalizeL2ToLegacyMatcher::ConvertNormalizeL2ToLegacyMatcher() {
|
||||
auto input_0 = std::make_shared<pattern::op::Label>(element::f32, Shape{1, 1, 1, 1});
|
||||
auto axis = std::make_shared<ngraph::opset1::Constant>(element::i64, Shape{1}, std::vector<int64_t>{0});
|
||||
auto normalize = std::make_shared<ngraph::op::NormalizeL2>(input_0, axis, 0, ngraph::op::EpsMode::ADD);
|
||||
auto normalize = std::make_shared<ngraph::op::NormalizeL2>(input_0, axis, 0.0f, ngraph::op::EpsMode::ADD);
|
||||
|
||||
ngraph::matcher_pass_callback callback = [](pattern::Matcher& m) {
|
||||
auto normalize = std::dynamic_pointer_cast<ngraph::op::NormalizeL2> (m.get_match_root());
|
||||
|
@ -40,7 +40,7 @@ ngraph::pass::ConvertOneHotToOneHotIEMatcher::ConvertOneHotToOneHotIEMatcher() {
|
||||
auto off_value = std::stof(off_value_node->convert_value_to_string(0));
|
||||
|
||||
auto one_hot_ie = std::make_shared<ngraph::op::OneHotIE>(one_hot->input_value(0),
|
||||
one_hot->get_axis(), depth_value, on_value, off_value, m_output_type);
|
||||
static_cast<int>(one_hot->get_axis()), depth_value, on_value, off_value, m_output_type);
|
||||
one_hot_ie->set_friendly_name(one_hot->get_friendly_name());
|
||||
|
||||
// insert Convert layer to cast output to a correct data type defined by the on/off values
|
||||
@ -63,4 +63,4 @@ ngraph::pass::ConvertOneHotToOneHotIEMatcher::ConvertOneHotToOneHotIEMatcher() {
|
||||
|
||||
void ngraph::pass::ConvertOneHotToOneHotIEMatcher::detect_output_type(const std::shared_ptr<ngraph::Function> &f) {
|
||||
m_output_type = ngraph::op::util::has_f16_constants(f) ? element::Type_t::f16 : element::Type_t::f32;
|
||||
}
|
||||
}
|
||||
|
@ -33,7 +33,7 @@ ngraph::pass::ConvertPowerToPowerIEMatcher::ConvertPowerToPowerIEMatcher() {
|
||||
return false;
|
||||
}
|
||||
|
||||
auto power_ie = std::make_shared<ngraph::op::PowerIE>(power->input(0).get_source_output(), value, 1, 0, power->output(0).get_element_type());
|
||||
auto power_ie = std::make_shared<ngraph::op::PowerIE>(power->input(0).get_source_output(), value, 1.0f, 0.0f, power->output(0).get_element_type());
|
||||
power_ie->set_friendly_name(power->get_friendly_name());
|
||||
ngraph::copy_runtime_info(power, power_ie);
|
||||
ngraph::replace_node(power, power_ie);
|
||||
|
@ -25,7 +25,7 @@ ngraph::pass::ConvertSqrtToPowerIEMatcher::ConvertSqrtToPowerIEMatcher() {
|
||||
if (!sqrt) {
|
||||
return false;
|
||||
}
|
||||
auto power_ie = std::make_shared<ngraph::op::PowerIE>(sqrt->input(0).get_source_output(), 0.5f, 1, 0, sqrt->output(0).get_element_type());
|
||||
auto power_ie = std::make_shared<ngraph::op::PowerIE>(sqrt->input(0).get_source_output(), 0.5f, 1.0f, 0.0f, sqrt->output(0).get_element_type());
|
||||
power_ie->set_friendly_name(sqrt->get_friendly_name());
|
||||
ngraph::copy_runtime_info(sqrt, power_ie);
|
||||
ngraph::replace_node(sqrt, power_ie);
|
||||
|
@ -94,8 +94,8 @@ ngraph::pass::ConvertStridedSliceToCropMatcher::ConvertStridedSliceToCropMatcher
|
||||
}
|
||||
|
||||
// -1 because it's a position of ellipses
|
||||
unsigned long num_input_axis_after_ellipses = (begin.size() - axis - num_new_axis_after_ellipses - 1);
|
||||
unsigned long num_of_hidden_dims = input_shape.size() - num_input_axis_after_ellipses
|
||||
size_t num_input_axis_after_ellipses = (begin.size() - axis - num_new_axis_after_ellipses - 1);
|
||||
size_t num_of_hidden_dims = input_shape.size() - num_input_axis_after_ellipses
|
||||
- num_input_axis_before_ellipses;
|
||||
for (size_t i = 0; i < num_of_hidden_dims; ++i) {
|
||||
axes.emplace_back(uniq_id);
|
||||
|
@ -47,7 +47,7 @@ ngraph::pass::FullyConnectedBiasFusion::FullyConnectedBiasFusion() {
|
||||
}
|
||||
|
||||
Shape output_shape(m_fc->get_shape());
|
||||
size_t bias_size = std::accumulate(bias_shape.begin(), bias_shape.end(), 1, std::multiplies<int64_t>());
|
||||
size_t bias_size = std::accumulate(bias_shape.begin(), bias_shape.end(), size_t{1}, std::multiplies<int64_t>());
|
||||
if (bias_shape.empty() || bias_shape.back() != output_shape.back() || bias_shape.back() != bias_size) {
|
||||
return false;
|
||||
}
|
||||
|
@ -113,24 +113,24 @@ public:
|
||||
|
||||
static float getMin(const size_t quantizationLevels, const bool signedInterval) {
|
||||
if (quantizationLevels == 255) {
|
||||
return signedInterval ? -127.0 : 0.0;
|
||||
return signedInterval ? -127.0f : 0.0f;
|
||||
} else if (quantizationLevels == 256) {
|
||||
return signedInterval ? -128.0 : 0.0;
|
||||
return signedInterval ? -128.0f : 0.0f;
|
||||
} else {
|
||||
// THROW_TRANSFORMATION_EXCEPTION << "quantization level " << quantizationLevels << " is not supported";
|
||||
// FIXME: not completed
|
||||
return signedInterval ? -128.0 : 0.0;
|
||||
return signedInterval ? -128.0f : 0.0f;
|
||||
}
|
||||
}
|
||||
|
||||
static float getMax(const size_t quantizationLevels, const bool signedInterval) {
|
||||
if ((quantizationLevels == 255) || (quantizationLevels == 256)) {
|
||||
return signedInterval ? 127.0 : 255.0;
|
||||
return signedInterval ? 127.0f : 255.0f;
|
||||
} else {
|
||||
// THROW_TRANSFORMATION_EXCEPTION << "quantization level " << quantizationLevels << " is not supported";
|
||||
// FIXME: not completed
|
||||
// return quantizationLevels - 1.0;
|
||||
return signedInterval ? 127.0 : 255.0;
|
||||
return signedInterval ? 127.0f : 255.0f;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -129,7 +129,7 @@ int EltwiseBaseTransformation::getNotEmpty(const std::shared_ptr<Node>& eltwise)
|
||||
const std::shared_ptr<Node>& data = dataNodes[i];
|
||||
if ((allBranchesAreEqual && isBroadcasted(data->get_output_shape(0))) ||
|
||||
(!allBranchesAreEqual && isBranchWithTargetType(as_type_ptr<opset1::FakeQuantize>(data)))) {
|
||||
return i;
|
||||
return static_cast<int>(i);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -49,7 +49,7 @@ bool MultiplyToGroupConvolutionTransformation::transform(TransformationContext&
|
||||
}
|
||||
}
|
||||
} else {
|
||||
const float channelsInGroup = outputChannelsCount / group;
|
||||
const size_t channelsInGroup = outputChannelsCount / group;
|
||||
for (size_t outputChannel = 0ul; outputChannel < outputChannelsCount; ++outputChannel) {
|
||||
const size_t groupIndex = outputChannel / channelsInGroup;
|
||||
for (size_t kernel = 0ul; kernel < kernelsCount; ++kernel) {
|
||||
|
@ -27,7 +27,7 @@ std::shared_ptr<ngraph::op::Constant> createNewScalesConst(const ngraph::op::Con
|
||||
|
||||
std::vector<T> newData(source.size());
|
||||
for (size_t i = 0; i < source.size(); ++i) {
|
||||
newData[i] = source[i] < 0 ? -1 : 1;
|
||||
newData[i] = source[i] < 0 ? T{-1} : T{1};
|
||||
}
|
||||
|
||||
const ngraph::element::Type type = originalConst.get_output_element_type(0);
|
||||
|
@ -26,7 +26,7 @@ std::shared_ptr<ngraph::op::Constant> createNewScalesConst(const ngraph::op::Con
|
||||
|
||||
std::vector<T> newData(source.size());
|
||||
for (size_t i = 0; i < source.size(); ++i) {
|
||||
newData[i] = source[i] < 0 ? -1 : 1;
|
||||
newData[i] = source[i] < 0 ? T{-1} : T{1};
|
||||
}
|
||||
|
||||
const ngraph::element::Type type = originalConst.get_output_element_type(0);
|
||||
|
@ -64,7 +64,7 @@ void reshapeDequantizationConstant(const std::shared_ptr<opset1::Reshape>& resha
|
||||
// update Reshape constant
|
||||
const std::vector<int> reshapeConstValues = as_type_ptr<opset1::Constant>(reshape->get_input_node_shared_ptr(1))->cast_vector<int>();
|
||||
std::vector<int> newReshapeConstValues(reshapeConstValues);
|
||||
for (int i = newReshapeConstValues.size() - 1; i >= 0; --i) {
|
||||
for (int i = static_cast<int>(newReshapeConstValues.size() - 1); i >= 0; --i) {
|
||||
if (newOperationConstantShape.size() <= i) {
|
||||
newReshapeConstValues[i] = 1;
|
||||
} else if (newOperationConstantShape[i] == 1ul) {
|
||||
@ -116,7 +116,7 @@ bool ReshapeTransformation::isPrecisionPreserved(std::shared_ptr<Node> op) const
|
||||
}
|
||||
|
||||
size_t getLastNotBroadcastedChannel(const Shape& shape) {
|
||||
for (int i = shape.size() - 1; i >= 0; --i) {
|
||||
for (int i = static_cast<int>(shape.size()) - 1; i >= 0; --i) {
|
||||
if (shape[i] != 1ul) {
|
||||
return i;
|
||||
}
|
||||
|
@ -13,6 +13,10 @@
|
||||
|
||||
namespace InferenceEngine {
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
#pragma warning(disable : 4250)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief minimum API to be implemented by plugin, which is used in InferRequestBase forwarding mechanism
|
||||
* @ingroup ie_dev_api_async_infer_request_api
|
||||
|
@ -129,6 +129,11 @@ f16tof32Arrays(float* dst, const ie_fp16* src, size_t nelem, float scale = 1.f,
|
||||
INFERENCE_ENGINE_API_CPP(void)
|
||||
f32tof16Arrays(ie_fp16* dst, const float* src, size_t nelem, float scale = 1.f, float bias = 0.f);
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
#pragma warning(push)
|
||||
#pragma warning(disable : 4018)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief Converts one integral type to another saturating the result if the source value doesn't fit
|
||||
* into destination type range
|
||||
@ -152,7 +157,7 @@ inline OutT saturate_cast(const InT& value) {
|
||||
const InT min = std::numeric_limits<OutT>::min() > std::numeric_limits<InT>::min() ? std::numeric_limits<OutT>::min() :
|
||||
std::numeric_limits<InT>::min();
|
||||
|
||||
return std::min(std::max(value, min), max);
|
||||
return static_cast<OutT>(std::min(std::max(value, min), max));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -175,9 +180,13 @@ inline OutT saturate_cast(const InT& value) {
|
||||
const InT max = std::numeric_limits<OutT>::max() < std::numeric_limits<InT>::max() ? std::numeric_limits<OutT>::max() :
|
||||
std::numeric_limits<InT>::max();
|
||||
|
||||
return std::min(value, max);
|
||||
return static_cast<OutT>(std::min(value, max));
|
||||
}
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
#pragma warning(pop)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief Converts one integral type to another saturating the result if the source value doesn't fit
|
||||
* into destination type range
|
||||
|
@ -78,7 +78,7 @@ void resize_bilinear(const Blob::Ptr inBlob, Blob::Ptr outBlob, uint8_t* buffer)
|
||||
|
||||
for (int dx = dst_go_x; dx < dst_go_x + dwidth; dx++) {
|
||||
auto fx = static_cast<float>((dx + 0.5) * scale_x - 0.5);
|
||||
int32_t sx = floor(fx);
|
||||
int32_t sx = static_cast<int32_t>(floor(fx));
|
||||
fx -= sx;
|
||||
|
||||
int32_t sx0 = sx;
|
||||
@ -98,7 +98,7 @@ void resize_bilinear(const Blob::Ptr inBlob, Blob::Ptr outBlob, uint8_t* buffer)
|
||||
|
||||
for (int dy = dst_go_y; dy < dst_go_y + dheight; dy++) {
|
||||
auto fy = static_cast<float>((dy + 0.5) * scale_y - 0.5);
|
||||
int32_t sy = floor(fy);
|
||||
int32_t sy = static_cast<int32_t>(floor(fy));
|
||||
fy -= sy;
|
||||
|
||||
int32_t sy0 = sy;
|
||||
@ -124,8 +124,8 @@ void resize_bilinear(const Blob::Ptr inBlob, Blob::Ptr outBlob, uint8_t* buffer)
|
||||
for (int x = 0; x < swidth; x++) {
|
||||
bool use_constant0 = yofs[y] + 0 < 0 || yofs[y] + 0 >= src_full_height;
|
||||
bool use_constant1 = yofs[y] + 1 < 0 || yofs[y] + 1 >= src_full_height;
|
||||
float val0 = use_constant0 ? border.value : sptr_[(yofs[y] + 0) * sstep + x];
|
||||
float val1 = use_constant1 ? border.value : sptr_[(yofs[y] + 1) * sstep + x];
|
||||
float val0 = static_cast<float>(use_constant0 ? border.value : sptr_[(yofs[y] + 0) * sstep + x]);
|
||||
float val1 = static_cast<float>(use_constant1 ? border.value : sptr_[(yofs[y] + 1) * sstep + x]);
|
||||
|
||||
float res = val0 + beta[y] * (val1 - val0);
|
||||
tptr_[x] = res;
|
||||
@ -159,8 +159,8 @@ int getResizeAreaTabSize(int dst_go, int ssize, int dsize, float scale) {
|
||||
float fsx1 = col * scale;
|
||||
float fsx2 = fsx1 + scale;
|
||||
|
||||
int sx1 = ceil(fsx1);
|
||||
int sx2 = floor(fsx2);
|
||||
int sx1 = static_cast<int>(ceil(fsx1));
|
||||
int sx2 = static_cast<int>(floor(fsx2));
|
||||
|
||||
sx2 = (std::min)(sx2, ssize - 1);
|
||||
sx1 = (std::min)(sx1, sx2);
|
||||
@ -194,8 +194,8 @@ void computeResizeAreaTab(int src_go, int dst_go, int ssize, int dsize, float sc
|
||||
float fsx2 = fsx1 + scale;
|
||||
float cellWidth = (std::min)(scale, ssize - fsx1);
|
||||
|
||||
int sx1 = ceil(fsx1);
|
||||
int sx2 = floor(fsx2);
|
||||
int sx1 = static_cast<int>(ceil(fsx1));
|
||||
int sx2 = static_cast<int>(floor(fsx2));
|
||||
|
||||
sx2 = (std::min)(sx2, ssize - 1);
|
||||
sx1 = (std::min)(sx1, sx2);
|
||||
@ -263,8 +263,8 @@ int computeResizeAreaTabFP32(int src_go, int dst_go, int ssize, int dsize, float
|
||||
float fsx2 = fsx1 + scale;
|
||||
float cellWidth = (std::min)(scale, ssize - fsx1);
|
||||
|
||||
int sx1 = ceil(fsx1);
|
||||
int sx2 = floor(fsx2);
|
||||
int sx1 = static_cast<int>(ceil(fsx1));
|
||||
int sx2 = static_cast<int>(floor(fsx2));
|
||||
|
||||
sx2 = (std::min)(sx2, ssize - 1);
|
||||
sx1 = (std::min)(sx1, sx2);
|
||||
@ -447,7 +447,7 @@ void VResizeLinear(float** src, data_t* dst, const float* beta, int width) {
|
||||
|
||||
if (sizeof(data_t) == 4) {
|
||||
for (int x = 0; x < width; x++)
|
||||
dst[x] = (S0[x] * b0 + S1[x] * b1);
|
||||
dst[x] = static_cast<data_t>(S0[x] * b0 + S1[x] * b1);
|
||||
} else {
|
||||
for (int x = 0; x < width; x++)
|
||||
dst[x] = saturateU32toU8(static_cast<uint32_t>(S0[x] * b0 + S1[x] * b1));
|
||||
@ -499,7 +499,7 @@ static void resize_area_upscale(const Blob::Ptr inBlob, Blob::Ptr outBlob, uint8
|
||||
float cbuf[2] = {0};
|
||||
|
||||
for (int dx = 0; dx < dwidth; dx++) {
|
||||
int sx = floor(dx*scale_x);
|
||||
int sx = static_cast<int>(floor(dx*scale_x));
|
||||
float fx = (dx+1) - (sx+1)*inv_scale_x;
|
||||
fx = fx <= 0 ? 0.f : fx - floor(fx);
|
||||
|
||||
@ -525,7 +525,7 @@ static void resize_area_upscale(const Blob::Ptr inBlob, Blob::Ptr outBlob, uint8
|
||||
}
|
||||
|
||||
for (int dy = 0; dy < dheight; dy++) {
|
||||
int sy = floor(dy*scale_y);
|
||||
int sy = static_cast<int>(floor(dy*scale_y));
|
||||
float fy = (dy+1) - (sy+1)*inv_scale_y;
|
||||
fy = fy <= 0 ? 0.f : fy - floor(fy);
|
||||
|
||||
@ -592,10 +592,10 @@ size_t resize_get_buffer_size(Blob::Ptr inBlob, Blob::Ptr outBlob, const ResizeA
|
||||
size_t origW = strides[2];
|
||||
size_t origH = strides[1] / strides[2];
|
||||
|
||||
const int src_full_width = origW;
|
||||
const int src_full_height = origH;
|
||||
const int dst_full_width = dstDims[3];
|
||||
const int dst_full_height = dstDims[2];
|
||||
const int src_full_width = static_cast<int>(origW);
|
||||
const int src_full_height = static_cast<int>(origH);
|
||||
const int dst_full_width = static_cast<int>(dstDims[3]);
|
||||
const int dst_full_height = static_cast<int>(dstDims[2]);
|
||||
|
||||
float scale_x = static_cast<float>(dstDims[3]) / srcDims[3];
|
||||
float scale_y = static_cast<float>(dstDims[2]) / srcDims[2];
|
||||
@ -619,9 +619,9 @@ size_t resize_get_buffer_size(Blob::Ptr inBlob, Blob::Ptr outBlob, const ResizeA
|
||||
};
|
||||
|
||||
auto resize_area_u8_downscale_sse_buffer_size = [&]() {
|
||||
const int dwidth = dstDims[3];
|
||||
const int dheight = dstDims[2];
|
||||
const int swidth = srcDims[3];
|
||||
const int dwidth = static_cast<int>(dstDims[3]);
|
||||
const int dheight = static_cast<int>(dstDims[2]);
|
||||
const int swidth = static_cast<int>(srcDims[3]);
|
||||
|
||||
const int dst_go_x = 0;
|
||||
const int dst_go_y = 0;
|
||||
@ -780,7 +780,7 @@ public:
|
||||
void isApplicable(const Blob::Ptr &src, const Blob::Ptr &dst) override;
|
||||
};
|
||||
|
||||
StatusCode CreatePreProcessData(IPreProcessData *& data, ResponseDesc */*resp*/) noexcept {
|
||||
StatusCode CreatePreProcessData(IPreProcessData *& data, ResponseDesc * /*resp*/) noexcept {
|
||||
data = new PreProcessData();
|
||||
return StatusCode::OK;
|
||||
}
|
||||
|
@ -1252,7 +1252,7 @@ struct Mapper {
|
||||
typedef MapperUnit<short, short> Unit;
|
||||
|
||||
static inline Unit map(double ratio, int start, int max, int outCoord) {
|
||||
float f = ((outCoord + 0.5f) * ratio - 0.5f);
|
||||
float f = static_cast<float>((outCoord + 0.5) * ratio - 0.5);
|
||||
int s = cvFloor(f);
|
||||
f -= s;
|
||||
|
||||
@ -1278,7 +1278,7 @@ struct Mapper {
|
||||
typedef MapperUnit<float, int> Unit;
|
||||
|
||||
static inline Unit map(double ratio, int start, int max, int outCoord) {
|
||||
float f = ((outCoord + 0.5f) * ratio - 0.5f);
|
||||
float f = static_cast<float>((outCoord + 0.5) * ratio - 0.5);
|
||||
int s = cvFloor(f);
|
||||
f -= s;
|
||||
|
||||
@ -1687,8 +1687,8 @@ static int getResizeAreaTabSize(int dst_go, int ssize, int dsize, float scale) {
|
||||
float fsx1 = col * scale;
|
||||
float fsx2 = fsx1 + scale;
|
||||
|
||||
int sx1 = ceil(fsx1);
|
||||
int sx2 = floor(fsx2);
|
||||
int sx1 = static_cast<int>(ceil(fsx1));
|
||||
int sx2 = static_cast<int>(floor(fsx2));
|
||||
|
||||
sx2 = (std::min)(sx2, ssize - 1);
|
||||
sx1 = (std::min)(sx1, sx2);
|
||||
@ -1723,8 +1723,8 @@ static void computeResizeAreaTab(int src_go, int dst_go, int ssize, int dsize, f
|
||||
float fsx2 = fsx1 + scale;
|
||||
float cellWidth = (std::min)(scale, ssize - fsx1);
|
||||
|
||||
int sx1 = ceil(fsx1);
|
||||
int sx2 = floor(fsx2);
|
||||
int sx1 = static_cast<int>(ceil(fsx1));
|
||||
int sx2 = static_cast<int>(floor(fsx2));
|
||||
|
||||
sx2 = (std::min)(sx2, ssize - 1);
|
||||
sx1 = (std::min)(sx1, sx2);
|
||||
|
@ -51,7 +51,7 @@ template<> inline uint16_t saturate_cast(uint8_t x) { return x; }
|
||||
template<> inline float saturate_cast(uint8_t x) { return x; }
|
||||
template<> inline uint8_t saturate_cast(uint8_t x) { return x; }
|
||||
|
||||
template<> inline uint8_t saturate_cast(uint16_t x) { using lim = std::numeric_limits<uint8_t>; return std::min(static_cast<uint16_t>(lim::max()), std::max(static_cast<uint16_t>(lim::min()), x));}
|
||||
template<> inline uint8_t saturate_cast(uint16_t x) { using lim = std::numeric_limits<uint8_t>; return (uint8_t)std::min(static_cast<uint16_t>(lim::max()), std::max(static_cast<uint16_t>(lim::min()), x));}
|
||||
template<> inline uint8_t saturate_cast(float x) { return saturate_cast<uint8_t>(static_cast<int>(std::rint(x))); }
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
|
@ -568,7 +568,7 @@ std::shared_ptr<ngraph::Node> V10Parser::createNode(const std::vector<ngraph::Ou
|
||||
if (!length)
|
||||
THROW_IE_EXCEPTION << "Cannot read network! The model requires weights data! "
|
||||
<< "Bin file cannot be found! Please specify the path to bin file.";
|
||||
if (length < offset + size)
|
||||
if (static_cast<uint64_t>(length) < offset + size)
|
||||
THROW_IE_EXCEPTION << "Cannot create " << params.type << " layer with name: " << params.name
|
||||
<< ". Layer has incorrect weights!";
|
||||
Blob::Ptr wBlob = make_blob_with_precision(TensorDesc(precision, {size / precision.size()}, Layout::C));
|
||||
@ -1514,7 +1514,7 @@ std::shared_ptr<ngraph::Node> V10Parser::LayerCreator<ngraph::op::Constant>::cre
|
||||
if (!length)
|
||||
THROW_IE_EXCEPTION << "Cannot read network! The model requires weights data! "
|
||||
<< "Bin file cannot be found! Please specify the path to bin file.";
|
||||
if (length < offset + size)
|
||||
if (static_cast<size_t>(length) < offset + size)
|
||||
THROW_IE_EXCEPTION << "Cannot create " << getType() << " layer with name: " << layerParsePrms.name
|
||||
<< ". Layer has incorrect weights!";
|
||||
|
||||
|
@ -149,7 +149,7 @@ StatusCode CNNNetReaderImpl::ReadNetwork(const pugi::xml_node& const_root, Respo
|
||||
_parser = parserCreator->create(_version);
|
||||
InferenceEngine::details::CNNNetworkImplPtr local_network = _parser->Parse(root);
|
||||
name = local_network->getName();
|
||||
local_network->validate(_version);
|
||||
local_network->validate(static_cast<int>(_version));
|
||||
network = local_network;
|
||||
parseSuccess = true;
|
||||
} else {
|
||||
|
@ -193,12 +193,12 @@ CNNLayer::Ptr TILayerCreator::CreateLayer(pugi::xml_node& node, LayerParseParame
|
||||
std::vector<DataPtr> inputs, outputs;
|
||||
for (const auto& p : all_inputs) {
|
||||
IE_ASSERT(ins.find(p) != ins.end());
|
||||
p2i[p] = inputs.size();
|
||||
p2i[p] = static_cast<int>(inputs.size());
|
||||
inputs.push_back(ins[p]);
|
||||
}
|
||||
for (const auto& p : all_outputs) {
|
||||
IE_ASSERT(outs.find(p) != outs.end());
|
||||
p2i[p] = outputs.size();
|
||||
p2i[p] = static_cast<int>(outputs.size());
|
||||
outputs.push_back(outs[p]);
|
||||
}
|
||||
|
||||
|
@ -672,13 +672,13 @@ void GemmValidator::checkShapes(const CNNLayer* layer, const vector<SizeVector>&
|
||||
THROW_IE_EXCEPTION << "Gemm input shapes must have at least 2 dimensions";
|
||||
}
|
||||
|
||||
unsigned long xAxis0 = dims0.size() - 1;
|
||||
unsigned long yAxis0 = dims0.size() - 2;
|
||||
unsigned long xAxis0 = static_cast<unsigned long>(dims0.size() - 1);
|
||||
unsigned long yAxis0 = static_cast<unsigned long>(dims0.size() - 2);
|
||||
|
||||
if (casted->transpose_a) std::swap(xAxis0, yAxis0);
|
||||
|
||||
unsigned long xAxis1 = dims1.size() - 1;
|
||||
unsigned long yAxis1 = dims1.size() - 2;
|
||||
unsigned long xAxis1 = static_cast<unsigned long>(dims1.size() - 1);
|
||||
unsigned long yAxis1 = static_cast<unsigned long>(dims1.size() - 2);
|
||||
|
||||
if (casted->transpose_b) std::swap(xAxis1, yAxis1);
|
||||
|
||||
@ -692,8 +692,8 @@ void GemmValidator::checkShapes(const CNNLayer* layer, const vector<SizeVector>&
|
||||
THROW_IE_EXCEPTION << "Gemm input shapes must have at least 2 dimensions";
|
||||
}
|
||||
|
||||
unsigned long xAxis2 = dims2.size() - 1;
|
||||
unsigned long yAxis2 = dims2.size() - 2;
|
||||
unsigned long xAxis2 = static_cast<unsigned long>(dims2.size() - 1);
|
||||
unsigned long yAxis2 = static_cast<unsigned long>(dims2.size() - 2);
|
||||
|
||||
if (dims2[xAxis2] != dims1[xAxis1])
|
||||
THROW_IE_EXCEPTION << "Gemm input2 x dimension must be equal to input1 x dimension (" << dims2[xAxis2]
|
||||
@ -820,7 +820,7 @@ void ShuffleChannelsValidator::checkShapes(const CNNLayer* layer, const vector<S
|
||||
<< " and axis number " << casted->axis;
|
||||
|
||||
int axis = casted->axis;
|
||||
if (axis < 0) axis += inShapes[0].size();
|
||||
if (axis < 0) axis += static_cast<int>(inShapes[0].size());
|
||||
|
||||
if (inShapes[0][axis] % casted->group)
|
||||
THROW_IE_EXCEPTION << layer->name << " Group parameter must evenly divide the channel dimension!";
|
||||
@ -1200,7 +1200,7 @@ void ReverseSequenceValidator::checkShapes(const CNNLayer* layer, const vector<S
|
||||
<< " and batch_axis number " << casted->batch_axis;
|
||||
|
||||
int batch_axis = casted->batch_axis;
|
||||
if (batch_axis < 0) batch_axis += inShapes[0].size();
|
||||
if (batch_axis < 0) batch_axis += static_cast<int>(inShapes[0].size());
|
||||
if (inShapes[1][0] != inShapes[0][batch_axis])
|
||||
THROW_IE_EXCEPTION << layer->name << " Incorrect 'seq_lengths_dims' parameter dimensions!";
|
||||
}
|
||||
@ -1368,7 +1368,7 @@ void RNNBaseValidator::checkParams(const InferenceEngine::CNNLayer* layer) {
|
||||
if (!one_of(act, "sigmoid", "tanh", "relu"))
|
||||
THROW_IE_EXCEPTION << "Unsupported activation function (" << act << ") for RNN layer.";
|
||||
|
||||
int act_num_required = def_acts.size();
|
||||
int act_num_required = static_cast<int>(def_acts.size());
|
||||
if (rnn->activations.size() != act_num_required)
|
||||
THROW_IE_EXCEPTION << "Expected " << act_num_required << " activations, but provided "
|
||||
<< rnn->activations.size();
|
||||
|
@ -158,7 +158,7 @@ ngraph::matcher_pass_callback ConvertReduceBase::convert_reduce_to_pooling() {
|
||||
// In case if reduction applies not to spatial dimensions
|
||||
// we have to fit it into 4D Pooling
|
||||
size_t dims_prod = 1, dims_begin = 1, dims_end = 1;
|
||||
for (size_t i = 0; i < input_shape.size(); ++i) {
|
||||
for (int64_t i = 0; static_cast<size_t>(i) < input_shape.size(); ++i) {
|
||||
if (i < *axes_vector.begin()) {
|
||||
dims_begin *= input_shape[i];
|
||||
} else if (i >= axes_vector.front() && i <= axes_vector.back()) {
|
||||
|
@ -26,7 +26,7 @@ bool normalize_single_value(std::vector<T> vec, float & value) {
|
||||
if (val != *vec.begin()) return false;
|
||||
}
|
||||
|
||||
float ref_val = *vec.begin();
|
||||
float ref_val = static_cast<float>(*vec.begin());
|
||||
|
||||
if (ref_val < std::numeric_limits<float>::lowest() || ref_val > std::numeric_limits<float>::max()) {
|
||||
return false;
|
||||
|
@ -151,7 +151,7 @@ static bool replace_transpose_with_reshape(shared_ptr<Node> transpose) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const auto input_shape_rank = input_shape.rank().get_length();
|
||||
const size_t input_shape_rank = input_shape.rank().get_length();
|
||||
|
||||
auto order = as_type_ptr<opset3::Constant>(transpose->input_value(1).get_node_shared_ptr());
|
||||
if (!order || !ngraph::shape_size(order->get_shape())) {
|
||||
|
@ -18,7 +18,7 @@ bool check_block_first(const ngraph::Shape& shape_input, const ngraph::Shape& sh
|
||||
possible_block_size = shape_reshape_before[1];
|
||||
if (possible_block_size == 0)
|
||||
return false;
|
||||
uint64_t c_dim = shape_input[1] / std::pow(possible_block_size, spatial_dims);
|
||||
uint64_t c_dim = static_cast<uint64_t>(shape_input[1] / std::pow(possible_block_size, spatial_dims));
|
||||
|
||||
// x' = reshape(data, [N, block_size, block_size, ..., block_size, C / (block_size ^ K), D1, D2, ..., DK])
|
||||
ngraph::Shape expected_shape = {shape_input[0]};
|
||||
@ -54,7 +54,7 @@ bool check_depth_first(const ngraph::Shape& shape_input, const ngraph::Shape& sh
|
||||
possible_block_size = shape_reshape_before[2];
|
||||
if (possible_block_size == 0)
|
||||
return false;
|
||||
uint64_t c_dim = shape_input[1] / std::pow(possible_block_size, spatial_dims);
|
||||
uint64_t c_dim = static_cast<uint64_t>(shape_input[1] / std::pow(possible_block_size, spatial_dims));
|
||||
|
||||
// x' = reshape(data, [N, C / (block_size ^ K), block_size, block_size, ..., block_size, D1, D2, ..., DK])
|
||||
ngraph::Shape expected_shape = {shape_input[0], static_cast<size_t>(c_dim)};
|
||||
@ -161,4 +161,4 @@ void ngraph::pass::DepthToSpaceFusion::depth_to_space_fusion() {
|
||||
|
||||
auto m = std::make_shared<ngraph::pattern::Matcher>(reshape_after, "DepthToSpaceFusion");
|
||||
this->add_matcher(m, callback, PassProperty::CHANGE_DYNAMIC_STATE);
|
||||
}
|
||||
}
|
||||
|
@ -165,7 +165,7 @@ static bool replace_squeeze_unsqueeze(const std::shared_ptr<Node>& node) {
|
||||
static std::vector<int64_t> get_unsqueeze_axes(const PartialShape& data_shape,
|
||||
const PartialShape& out_shape) {
|
||||
std::vector<int64_t> axes;
|
||||
size_t i = 0;
|
||||
int64_t i = 0;
|
||||
for (auto o = 0; o < out_shape.rank().get_length(); o++) {
|
||||
if (i < data_shape.rank().get_length() && data_shape[i].same_scheme(out_shape[o])) {
|
||||
i += 1;
|
||||
@ -181,7 +181,7 @@ static std::vector<int64_t> get_unsqueeze_axes(const PartialShape& data_shape,
|
||||
static std::vector<int64_t> get_squeeze_axes(const PartialShape& data_shape,
|
||||
const PartialShape& out_shape) {
|
||||
std::vector<int64_t> axes;
|
||||
size_t out_i = 0;
|
||||
int64_t out_i = 0;
|
||||
for (auto i = 0; i < data_shape.rank().get_length(); i++) {
|
||||
if (out_i < out_shape.rank().get_length() && data_shape[i].same_scheme(out_shape[out_i])) {
|
||||
out_i += 1;
|
||||
|
@ -169,7 +169,7 @@ bool ngraph::pass::GroupedStridedSliceOptimizer::run_on_function(std::shared_ptr
|
||||
for (const auto & ss_plan : pair.second) {
|
||||
if (ss_plan.second.begins[i] != 0 || ss_plan.second.ends[i] != input_shape[i]) {
|
||||
if (axis == -1 || axis == i)
|
||||
axis = i;
|
||||
axis = static_cast<int>(i);
|
||||
else
|
||||
valid_for_replacement = false;
|
||||
if (ss_plan.second.strides[i] != 1)
|
||||
@ -189,12 +189,12 @@ bool ngraph::pass::GroupedStridedSliceOptimizer::run_on_function(std::shared_ptr
|
||||
{return lhs.begin < rhs.begin;});
|
||||
|
||||
std::vector<std::pair<Output<Node>, uint64_t>> output_to_size;
|
||||
uint64_t prev_r = 0;
|
||||
int64_t prev_r = 0;
|
||||
for (auto & record : output_to_partition) {
|
||||
valid_for_replacement &= (record.begin >= prev_r);
|
||||
prev_r = record.end;
|
||||
}
|
||||
valid_for_replacement &= (prev_r <= input_shape[axis]);
|
||||
valid_for_replacement &= (static_cast<size_t>(prev_r) <= input_shape[axis]);
|
||||
if (!valid_for_replacement) continue;
|
||||
|
||||
prev_r = 0;
|
||||
@ -205,7 +205,7 @@ bool ngraph::pass::GroupedStridedSliceOptimizer::run_on_function(std::shared_ptr
|
||||
prev_r = record.end;
|
||||
output_to_size.emplace_back(record.output, record.end - record.begin);
|
||||
}
|
||||
if (prev_r < input_shape[axis]) {
|
||||
if (static_cast<size_t>(prev_r) < input_shape[axis]) {
|
||||
output_to_size.emplace_back(fake_output, input_shape[axis] - prev_r);
|
||||
}
|
||||
|
||||
|
@ -35,7 +35,7 @@ ngraph::pass::PullTransposeThroughFQUp::PullTransposeThroughFQUp() {
|
||||
auto fq_input = fq->input_value(i);
|
||||
auto fq_input_rank = fq_input.get_partial_shape().rank().get_length();
|
||||
std::vector<int64_t> unsqueeze_axes;
|
||||
for (size_t j = 0; j < input_rank - fq_input_rank; ++j) {
|
||||
for (int64_t j = 0; j < input_rank - fq_input_rank; ++j) {
|
||||
unsqueeze_axes.push_back(j);
|
||||
}
|
||||
if (!unsqueeze_axes.empty()) {
|
||||
|
@ -34,7 +34,7 @@ ngraph::pass::UnrollTensorIterator::UnrollTensorIterator() : MatcherPass() {
|
||||
// Create copies of the TensorIterator body, the number of copies is equal to the number of iterations.
|
||||
// Assign names to the created layers.
|
||||
std::vector<std::shared_ptr<ngraph::Function>> body_functions(num_iter);
|
||||
for (uint64_t idx = 0; idx < num_iter; ++idx) {
|
||||
for (int64_t idx = 0; idx < num_iter; ++idx) {
|
||||
body_functions[idx] = clone_function(*function);
|
||||
for (auto &node : body_functions[idx]->get_ops()) {
|
||||
node->set_friendly_name(ti->get_friendly_name() + "/" + std::to_string(idx + 1) + "/" + node->get_friendly_name());
|
||||
@ -64,7 +64,7 @@ ngraph::pass::UnrollTensorIterator::UnrollTensorIterator() : MatcherPass() {
|
||||
copy_runtime_info(ti, split);
|
||||
auto stride = input_desc->m_stride;
|
||||
// connect to the body
|
||||
for (uint64_t j = 0; j < num_iter; j++) {
|
||||
for (int64_t j = 0; j < num_iter; j++) {
|
||||
auto idx = stride > 0 ? j : num_iter - j - 1;
|
||||
auto param = body_functions[j]->get_parameters()[input_desc->m_body_parameter_index];
|
||||
for (auto &output : param->outputs()) {
|
||||
@ -92,7 +92,7 @@ ngraph::pass::UnrollTensorIterator::UnrollTensorIterator() : MatcherPass() {
|
||||
}
|
||||
|
||||
// Back-edge processing. Connect the copies of the body to each other.
|
||||
for (uint64_t j = 1; j < num_iter; j++) {
|
||||
for (int64_t j = 1; j < num_iter; j++) {
|
||||
auto cur_param = body_functions[j]->get_parameters()[input_desc->m_body_parameter_index];
|
||||
auto prev_val = body_functions[j - 1]->get_results()[input_desc->m_body_value_index];
|
||||
for (auto &output : cur_param->outputs()) {
|
||||
@ -108,7 +108,7 @@ ngraph::pass::UnrollTensorIterator::UnrollTensorIterator() : MatcherPass() {
|
||||
|
||||
// Connect the input to the corresponding copy of the body.
|
||||
auto in_data = ti->input_values()[input_desc->m_input_index].get_node_shared_ptr();
|
||||
for (uint64_t j = 0; j < num_iter; j++) {
|
||||
for (int64_t j = 0; j < num_iter; j++) {
|
||||
auto param = body_functions[j]->get_parameters()[input_desc->m_body_parameter_index];
|
||||
for (auto &output : param->outputs()) {
|
||||
output.replace(in_data);
|
||||
@ -138,7 +138,7 @@ ngraph::pass::UnrollTensorIterator::UnrollTensorIterator() : MatcherPass() {
|
||||
auto stride = output_desc->m_stride;
|
||||
|
||||
// Connect outputs of the bodies to the Concat layer
|
||||
for (uint64_t j = 0; j < num_iter; j++) {
|
||||
for (int64_t j = 0; j < num_iter; j++) {
|
||||
auto idx = stride > 0 ? j : num_iter - j - 1;
|
||||
std::shared_ptr<opset4::Result> result = body_functions[idx]->get_results()[output_desc->m_body_value_index];
|
||||
auto input_to_res = result->get_input_source_output(0);
|
||||
@ -188,4 +188,4 @@ ngraph::pass::UnrollTensorIterator::UnrollTensorIterator() : MatcherPass() {
|
||||
|
||||
auto m = std::make_shared<ngraph::pattern::Matcher>(tensor_iterator, "UnrollTensorIterator");
|
||||
register_matcher(m, callback);
|
||||
}
|
||||
}
|
||||
|
@ -322,7 +322,7 @@ inline int32_t convert_value<uint64_t, int32_t>(uint64_t val) {
|
||||
|
||||
template <>
|
||||
inline int32_t convert_value<uint32_t, int32_t>(uint32_t val) {
|
||||
if (val > std::numeric_limits<int32_t>::max()) {
|
||||
if (val > static_cast<uint32_t>(std::numeric_limits<int32_t>::max())) {
|
||||
return std::numeric_limits<int32_t>::max();
|
||||
}
|
||||
return static_cast<int32_t>(val);
|
||||
|
@ -49,7 +49,7 @@ ngraph::pass::BatchNormDecomposition::BatchNormDecomposition() {
|
||||
|
||||
// TODO: instead of getting full shape we can concatenate sequence of ones with ShapeOf
|
||||
Shape input_aligned_shape = m_gamma.get_shape();
|
||||
for (size_t i = 0; i < dims_to_add; ++i)
|
||||
for (int64_t i = 0; i < dims_to_add; ++i)
|
||||
input_aligned_shape.push_back(1);
|
||||
auto new_shape = opset5::Constant::create(element::i64, Shape{input_aligned_shape.size()}, input_aligned_shape);
|
||||
|
||||
@ -113,7 +113,7 @@ ngraph::pass::BatchNormV5Decomposition::BatchNormV5Decomposition() {
|
||||
|
||||
// TODO: instead of getting full shape we can concatenate sequence of ones with ShapeOf
|
||||
Shape input_aligned_shape = m_gamma.get_shape();
|
||||
for (size_t i = 0; i < dims_to_add; ++i)
|
||||
for (int64_t i = 0; i < dims_to_add; ++i)
|
||||
input_aligned_shape.push_back(1);
|
||||
auto new_shape = opset5::Constant::create(element::i64, Shape{input_aligned_shape.size()}, input_aligned_shape);
|
||||
|
||||
|
@ -87,8 +87,8 @@ void ngraph::pass::ConvertScatterElementsToScatter::convert_scatter_elements_to_
|
||||
auto compare_shapes_ranges = [](const PartialShape & lhsShape, const PartialShape & rhsShape, const Range & lhsRange, const Range & rhsRange) -> bool {
|
||||
// Check that ranges are equal and suits to Shapes sizes
|
||||
if (lhsRange != rhsRange ||
|
||||
lhsRange.r > lhsShape.rank().get_length() ||
|
||||
rhsRange.r > rhsShape.rank().get_length()) {
|
||||
lhsRange.r > static_cast<uint64_t>(lhsShape.rank().get_length()) ||
|
||||
rhsRange.r > static_cast<uint64_t>(rhsShape.rank().get_length())) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -210,4 +210,4 @@ void ngraph::pass::ConvertScatterElementsToScatter::convert_scatter_elements_to_
|
||||
|
||||
auto m = std::make_shared<ngraph::pattern::Matcher>(scatter, "ConvertScatterElementsToScatter");
|
||||
this->add_matcher(m, callback, PassProperty::CHANGE_DYNAMIC_STATE);
|
||||
}
|
||||
}
|
||||
|
@ -25,7 +25,7 @@
|
||||
namespace vpu {
|
||||
|
||||
template <typename T>
|
||||
Optional<int> parseNumber(const std::string& s) {
|
||||
Optional<T> parseNumber(const std::string& s) {
|
||||
T value{};
|
||||
if ((std::istringstream(s) >> value >> std::ws).eof()) {
|
||||
return {value};
|
||||
@ -78,6 +78,7 @@ public:
|
||||
}
|
||||
|
||||
float toFloat() const { return isInt ? static_cast<float>(value.i) : value.f; }
|
||||
int toInt() const { return isInt ? value.i : static_cast<int>(value.f); }
|
||||
|
||||
OPERATOR(+)
|
||||
OPERATOR(-)
|
||||
|
@ -305,7 +305,11 @@ public:
|
||||
const_reverse_iterator crend() const noexcept { return _base.crend(); }
|
||||
|
||||
bool empty() const noexcept { return _base.empty(); }
|
||||
size_type size() const noexcept { return _base.size(); }
|
||||
#if ENABLE_MYRIAD
|
||||
int size() const noexcept { return static_cast<int>(_base.size()); }
|
||||
#else
|
||||
size_t size() const noexcept { return _base.size(); }
|
||||
#endif
|
||||
|
||||
void reserve(size_type cap) { _base.reserve(cap); }
|
||||
|
||||
|
@ -98,7 +98,7 @@ bool setShapeToHostTensorData(const HostTensorPtr& data, const Shape& shape) {
|
||||
}
|
||||
|
||||
for (int i = 0; i < outputRank; i++) {
|
||||
dataPtr[i] = shape[i];
|
||||
dataPtr[i] = static_cast<T>(shape[i]);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
@ -189,9 +189,9 @@ bool evaluateOutShapeOfReshape(
|
||||
return false;
|
||||
}
|
||||
|
||||
int zeroDimsCount = std::count_if(outputShape.begin(), outputShape.end(),
|
||||
int64_t zeroDimsCount = std::count_if(outputShape.begin(), outputShape.end(),
|
||||
[](int64_t value) { return value == 0; });
|
||||
int negativeDimsCount = std::count_if(outputShape.begin(), outputShape.end(),
|
||||
int64_t negativeDimsCount = std::count_if(outputShape.begin(), outputShape.end(),
|
||||
[](int64_t value) { return value == -1; });
|
||||
if (negativeDimsCount > 1) {
|
||||
return false;
|
||||
@ -220,7 +220,7 @@ bool evaluateOutShapeOfReshape(
|
||||
outputShape[i] = inputShape[i];
|
||||
outputTotalDimCount *= inputShape[i];
|
||||
} else if (outputShape[i] == -1) {
|
||||
negativeDimIdx = i;
|
||||
negativeDimIdx = static_cast<int>(i);
|
||||
} else {
|
||||
outputTotalDimCount *= outputShape[i];
|
||||
}
|
||||
|
@ -66,14 +66,14 @@ void evaluateStaticShapeNonZero(const Shape& inputShape,
|
||||
const auto inputRank = nonZeroOutput->get_partial_shape()[0].get_length();
|
||||
const auto nonZeroCount = nonZeroOutput->get_partial_shape()[1].get_length();
|
||||
|
||||
for (size_t i = 0; i < inputRank; ++i) {
|
||||
for (size_t j = 0; j < nonZeroCount; j++) {
|
||||
for (int64_t i = 0; i < inputRank; ++i) {
|
||||
for (int64_t j = 0; j < nonZeroCount; j++) {
|
||||
outIndicesBuffer[i * totalInputSize + j] = nonZeroOutputBuffer[i * nonZeroCount + j];
|
||||
}
|
||||
}
|
||||
|
||||
outShapeBuffer[0] = inputRank;
|
||||
outShapeBuffer[1] = nonZeroCount;
|
||||
outShapeBuffer[0] = static_cast<typename ngraph::element_type_traits<OutType>::value_type>(inputRank);
|
||||
outShapeBuffer[1] = static_cast<typename ngraph::element_type_traits<OutType>::value_type>(nonZeroCount);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
@ -18,7 +18,7 @@ namespace vpu {
|
||||
|
||||
void get_normalized_shape(ngraph::Output<ngraph::Node>& shape, size_t actual_rank_value, size_t max_rank_value, bool transpose,
|
||||
const ngraph::element::Type& elementType) {
|
||||
if (const unsigned rank_diff = max_rank_value - actual_rank_value) {
|
||||
if (const size_t rank_diff = max_rank_value - actual_rank_value) {
|
||||
ngraph::OutputVector extended_shape_parts =
|
||||
{ngraph::opset3::Constant::create(elementType, {rank_diff}, std::vector<int64_t>(rank_diff, 1)), shape};
|
||||
shape = std::make_shared<ngraph::opset3::Concat>(extended_shape_parts, 0);
|
||||
|
@ -38,12 +38,12 @@ std::shared_ptr<ngraph::Node> calculate_output_shape(
|
||||
|
||||
VPU_THROW_UNLESS(begin.size() == end.size() && begin.size() == strides.size(),
|
||||
"Begin, end and strides inputs must be of the same size, but {}, {} and {} given accordingly", begin.size(), end.size(), strides.size());
|
||||
const auto inputShapeRank = input_shape.get_partial_shape()[0].get_length();
|
||||
const auto inputShapeRank = static_cast<size_t>(input_shape.get_partial_shape()[0].get_length());
|
||||
VPU_THROW_UNLESS(inputShapeRank >= begin.size(),
|
||||
"Input shape rank must not be less than begin/end/strides size, but {} and {} given accordingly", inputShapeRank, begin.size());
|
||||
|
||||
ngraph::OutputVector output_dimensions;
|
||||
for (int64_t axis = 0; axis < begin.size(); ++axis) {
|
||||
for (size_t axis = 0; axis < begin.size(); ++axis) {
|
||||
auto lb = begin[axis], ub = end[axis], stride = strides[axis];
|
||||
|
||||
ngraph::Output<ngraph::Node> lower_bound = ngraph::opset3::Constant::create(shape_type, {1}, {lb});
|
||||
|
@ -189,7 +189,7 @@ int MathExpression::evaluate() const {
|
||||
VPU_THROW_EXCEPTION << "Illegal expression: not enough operators";
|
||||
}
|
||||
|
||||
return values.top().toFloat();
|
||||
return values.top().toInt();
|
||||
}
|
||||
|
||||
} // namespace vpu
|
||||
|
@ -189,7 +189,7 @@ public:
|
||||
}
|
||||
|
||||
inline int numConsumers() const {
|
||||
return _consumerEdges.size();
|
||||
return static_cast<int>(_consumerEdges.size());
|
||||
}
|
||||
inline auto consumers() const -> decltype(mapRange<ConsumerAccess>(consumerEdges())) {
|
||||
return mapRange<ConsumerAccess>(consumerEdges());
|
||||
@ -207,7 +207,7 @@ public:
|
||||
}
|
||||
|
||||
inline int numChildDatas() const {
|
||||
return _childDataToDataEdges.size();
|
||||
return static_cast<int>(_childDataToDataEdges.size());
|
||||
}
|
||||
inline auto childDatas() const -> decltype(mapRange<ChildDataAccess>(childDataToDataEdges())) {
|
||||
return mapRange<ChildDataAccess>(childDataToDataEdges());
|
||||
|
@ -409,7 +409,7 @@ public:
|
||||
|
||||
DimsOrder() = default;
|
||||
static DimsOrder fromCode(StorageOrder64 code);
|
||||
static DimsOrder fromNumDims(int numDims);
|
||||
static DimsOrder fromNumDims(size_t numDims);
|
||||
static DimsOrder fromPermutation(const DimVector& perm);
|
||||
static DimsOrder fromLayout(ie::Layout const& layout);
|
||||
|
||||
@ -513,7 +513,7 @@ public:
|
||||
int ind = 0;
|
||||
for (auto i = dimsBegin; i < dimsEnd; i++) {
|
||||
auto val = *i;
|
||||
_dims.set(perm[ind], val);
|
||||
_dims.set(perm[ind], static_cast<int>(val));
|
||||
++ind;
|
||||
}
|
||||
} else {
|
||||
|
@ -308,12 +308,12 @@ public:
|
||||
// Nodes accessors
|
||||
//
|
||||
|
||||
inline int numDatas() const { return _dataPtrList.size(); }
|
||||
inline int numDatas() const { return static_cast<int>(_dataPtrList.size()); }
|
||||
inline auto datas() const -> decltype(_dataList | asRange()) {
|
||||
return _dataList | asRange();
|
||||
}
|
||||
|
||||
inline int numStages() const { return _stagePtrList.size(); }
|
||||
inline int numStages() const { return static_cast<int>(_stagePtrList.size()); }
|
||||
inline auto initialStages() const -> decltype(_initialStages | asRange()) {
|
||||
return _initialStages | asRange();
|
||||
}
|
||||
|
@ -466,11 +466,11 @@ private:
|
||||
public:
|
||||
inline int numInputs() const { return _inputEdges.size(); }
|
||||
inline StageInput inputEdge(int ind) const {
|
||||
IE_ASSERT(ind >= 0 && static_cast<std::size_t>(ind) < _inputEdges.size());
|
||||
IE_ASSERT(ind >= 0 && ind < _inputEdges.size());
|
||||
return _inputEdges[ind];
|
||||
}
|
||||
inline Data input(int ind) const {
|
||||
IE_ASSERT(ind >= 0 && static_cast<std::size_t>(ind) < _inputEdges.size());
|
||||
IE_ASSERT(ind >= 0 && ind < _inputEdges.size());
|
||||
return _inputEdges[ind]->input();
|
||||
}
|
||||
inline auto inputs() const -> decltype(mapRange<InputAccess>(inputEdges())) {
|
||||
@ -479,11 +479,11 @@ public:
|
||||
|
||||
inline int numOutputs() const { return _outputEdges.size(); }
|
||||
inline StageOutput outputEdge(int ind) const {
|
||||
IE_ASSERT(ind >= 0 && static_cast<std::size_t>(ind) < _outputEdges.size());
|
||||
IE_ASSERT(ind >= 0 && ind < _outputEdges.size());
|
||||
return _outputEdges[ind];
|
||||
}
|
||||
inline Data output(int ind) const {
|
||||
IE_ASSERT(ind >= 0 && static_cast<std::size_t>(ind) < _outputEdges.size());
|
||||
IE_ASSERT(ind >= 0 && ind < _outputEdges.size());
|
||||
return _outputEdges[ind]->output();
|
||||
}
|
||||
inline auto outputs() const -> decltype(mapRange<OutputAccess>(outputEdges())) {
|
||||
@ -499,11 +499,11 @@ public:
|
||||
|
||||
inline int numTempBuffers() const { return _tempBufferEdges.size(); }
|
||||
inline StageTempBuffer tempBufferEdge(int ind) const {
|
||||
IE_ASSERT(ind >= 0 && static_cast<std::size_t>(ind) < _tempBufferEdges.size());
|
||||
IE_ASSERT(ind >= 0 && ind < _tempBufferEdges.size());
|
||||
return _tempBufferEdges[ind];
|
||||
}
|
||||
inline Data tempBuffer(int ind) const {
|
||||
IE_ASSERT(ind >= 0 && static_cast<std::size_t>(ind) < _tempBufferEdges.size());
|
||||
IE_ASSERT(ind >= 0 && ind < _tempBufferEdges.size());
|
||||
return _tempBufferEdges[ind]->tempBuffer();
|
||||
}
|
||||
inline auto tempBuffers() const -> decltype(mapRange<TempBufferAccess>(tempBufferEdges())) {
|
||||
|
@ -235,9 +235,9 @@ void BackEnd::serialize(
|
||||
blobHdr.bss_mem_size = checked_cast<uint32_t>(usedMemory.BSS);
|
||||
blobHdr.number_of_cmx_slices = checked_cast<uint32_t>(env.resources.numCMXSlices);
|
||||
blobHdr.number_of_shaves = checked_cast<uint32_t>(env.resources.numSHAVEs);
|
||||
blobHdr.has_hw_stage = checked_cast<uint32_t>(modelStagesStat.hasHwStage);
|
||||
blobHdr.has_shave_stage = checked_cast<uint32_t>(modelStagesStat.hasShaveStage);
|
||||
blobHdr.has_dma_stage = checked_cast<uint32_t>(modelStagesStat.hasDmaStage);
|
||||
blobHdr.has_hw_stage = static_cast<uint32_t>(modelStagesStat.hasHwStage);
|
||||
blobHdr.has_shave_stage = static_cast<uint32_t>(modelStagesStat.hasShaveStage);
|
||||
blobHdr.has_dma_stage = static_cast<uint32_t>(modelStagesStat.hasDmaStage);
|
||||
blobHdr.input_info_section_offset = checked_cast<uint32_t>(hdrSize);
|
||||
blobHdr.output_info_section_offset = checked_cast<uint32_t>(blobHdr.input_info_section_offset + inputInfoSecSize);
|
||||
blobHdr.stage_section_offset = checked_cast<uint32_t>(blobHdr.output_info_section_offset + outputInfoSecSize);
|
||||
|
@ -113,7 +113,7 @@ SmallVector<std::string> deduceKernelParameters(const md_parser_t& parser, int k
|
||||
|
||||
auto arguments = SmallVector<std::string>{};
|
||||
arguments.reserve(argCount);
|
||||
for (size_t i = 0; i < argCount; i++) {
|
||||
for (uint32_t i = 0; i < argCount; i++) {
|
||||
const auto arg = parser.get_argument(kernelDesc, i);
|
||||
VPU_THROW_UNLESS(arg, "Error while parsing custom layer elf file.");
|
||||
|
||||
@ -243,7 +243,7 @@ CustomKernel::CustomKernel(const pugi::xml_node& kernel, std::string configDir):
|
||||
param.type == CustomParamType::Data;
|
||||
};
|
||||
|
||||
_inputDataCount = std::count_if(begin(_kernelParams), end(_kernelParams), isInputData);
|
||||
_inputDataCount = static_cast<int>(std::count_if(begin(_kernelParams), end(_kernelParams), isInputData));
|
||||
}
|
||||
|
||||
std::pair<CustomDimSource, int> parseDimSource(const std::string& dims) {
|
||||
|
@ -178,10 +178,10 @@ CustomLayer::CustomLayer(std::string configDir, const pugi::xml_node& customLaye
|
||||
"each kernel should be provided with 'stage' attribute.", _layerName);
|
||||
|
||||
const auto stageNum = std::stod(stageAttr.value());
|
||||
VPU_THROW_UNLESS(stageOrder.find(stageNum) == stageOrder.end(),
|
||||
VPU_THROW_UNLESS(stageOrder.find(static_cast<int>(stageNum)) == stageOrder.end(),
|
||||
"Error while binding %s custom layer: found duplicating stage id.", _layerName);
|
||||
|
||||
stageOrder.emplace(stageNum, CustomKernel{kernel, _configDir});
|
||||
stageOrder.emplace(static_cast<int>(stageNum), CustomKernel{kernel, _configDir});
|
||||
}
|
||||
|
||||
VPU_THROW_UNLESS(!stageOrder.empty(),
|
||||
|
@ -87,7 +87,7 @@ int calcOutputSize(
|
||||
int padBefore, int padAfter,
|
||||
bool useCeil) {
|
||||
if (useCeil) {
|
||||
return std::ceil(static_cast<double>(inputSize - kernelSize + padBefore + padAfter) / kernelStride + 1);
|
||||
return static_cast<int>(std::ceil(static_cast<double>(inputSize - kernelSize + padBefore + padAfter) / kernelStride + 1));
|
||||
} else {
|
||||
return (inputSize - kernelSize + padBefore + padAfter) / kernelStride + 1;
|
||||
}
|
||||
|
@ -434,10 +434,10 @@ void PassImpl::splitHwConv(
|
||||
const auto tileInfo = tileInfos[i];
|
||||
const auto tile = tileInfo.tile;
|
||||
|
||||
const std::string postfix = getPostfix(postfixDescription, i+1, tileInfos.size());
|
||||
const std::string postfix = getPostfix(postfixDescription, i+1, static_cast<int>(tileInfos.size()));
|
||||
|
||||
const auto startChannel = tileInfo.slice.start;
|
||||
const auto numChannels = tileInfo.slice.size;
|
||||
const auto numChannels = static_cast<int>(tileInfo.slice.size);
|
||||
|
||||
const auto newWeights = splitWeights(model, weights, postfix, startChannel, numChannels);
|
||||
const auto newBiases = splitBiases(model, biases, postfix, startChannel, numChannels);
|
||||
@ -610,7 +610,7 @@ std::vector<HwConvTileInfo> PassImpl::splitHwConvInMultipleOutChannelsTiles(
|
||||
tileInfo.cost = descCost;
|
||||
|
||||
bestSol.clear();
|
||||
for (int i = 0; i < numDescr; ++i) {
|
||||
for (uint32_t i = 0; i < numDescr; ++i) {
|
||||
if (i == numDescr-1)
|
||||
tileInfo.lastOutChans = (remOutChans > 0) ? remOutChans : outChansPerDescr;
|
||||
|
||||
|
@ -157,7 +157,7 @@ void PassImpl::run(const Model& model) {
|
||||
stage->attrs().set<float>("reluScale", 1.0f);
|
||||
} else {
|
||||
stage->attrs().set<uint32_t>("a0", 1);
|
||||
stage->attrs().set<uint32_t>("a1", 1.0f / negativeSlope);
|
||||
stage->attrs().set<uint32_t>("a1", static_cast<uint32_t>(1.0f / negativeSlope));
|
||||
stage->attrs().set<float>("reluScale", negativeSlope);
|
||||
}
|
||||
}
|
||||
|
@ -58,7 +58,7 @@ private:
|
||||
static bool isTrivialPermute(const PermutationIndexVector& permutation, const vpu::DimValues& dims) {
|
||||
InferenceEngine::SizeVector dimsVector(dims.size());
|
||||
for (const auto& dim : dims) {
|
||||
auto index = dimToIeInd(dim.first, dims.size());
|
||||
auto index = dimToIeInd(dim.first, static_cast<int>(dims.size()));
|
||||
dimsVector[dims.size() - 1 - index] = dim.second;
|
||||
}
|
||||
|
||||
|
@ -133,7 +133,7 @@ void PassImpl::run(const Model& model) {
|
||||
static_cast<float>(InputExtended_height)
|
||||
/ static_cast<float>(input->desc().dim(Dim::H)));
|
||||
|
||||
const float MAX_INPUTEXTENDED_SCALE = 1.8;
|
||||
const float MAX_INPUTEXTENDED_SCALE = 1.8f;
|
||||
const float MIN_INPUTEXTENDED_SCALE = 1;
|
||||
|
||||
if (InputExtended_scale >= MAX_INPUTEXTENDED_SCALE) {
|
||||
|
@ -78,7 +78,7 @@ void PassImpl::run(const Model& model) {
|
||||
auto groups = stage->attrs().get<int>("groups");
|
||||
auto try_hw = stage->attrs().get<int>("try_hw");
|
||||
|
||||
int kernelNDims = pads_begin.size();
|
||||
int kernelNDims = static_cast<int>(pads_begin.size());
|
||||
VPU_THROW_UNLESS(kernelNDims == pads_end.size(),
|
||||
"wrong pads ndims=%lu, expected=%d", pads_end.size(), kernelNDims);
|
||||
VPU_THROW_UNLESS(kernelNDims == strides.size(),
|
||||
|
@ -137,7 +137,7 @@ void PassImpl::run(const Model& model) {
|
||||
if (stage->type() == StageType::StubDeconv) {
|
||||
deconvolutionRelayout(
|
||||
origWeights, weights->desc().totalDimSize(),
|
||||
newWeightsPtr, newWeightsSize,
|
||||
newWeightsPtr, static_cast<int>(newWeightsSize),
|
||||
kernelSizeX, kernelSizeY,
|
||||
input->desc().dim(Dim::C),
|
||||
output->desc().dim(Dim::C),
|
||||
|
@ -76,7 +76,7 @@ void PassImpl::run(const Model& model) {
|
||||
|
||||
const auto try_hw = stage->attrs().get<int>("try_hw");
|
||||
|
||||
int kernelNDims = kernel_shape.size();
|
||||
auto kernelNDims = kernel_shape.size();
|
||||
if (kernelNDims != 3) {
|
||||
continue;
|
||||
}
|
||||
|
@ -67,7 +67,7 @@ int getMeanValue(const std::vector<short>& exponents) {
|
||||
if (realSize == 0) {
|
||||
return smallestExp;
|
||||
} else {
|
||||
return sum / realSize;
|
||||
return static_cast<int>(sum / realSize);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -31,7 +31,7 @@ void BatchNormalizationWeightsContent::fillTempBuf(void* tempBuf) const {
|
||||
auto srcPtr = _origContent->get<fp16_t>();
|
||||
auto dstPtr = static_cast<fp16_t*>(tempBuf);
|
||||
|
||||
ie::parallel_for(_origContent->byteSize() / sizeof(fp16_t), [this, srcPtr, dstPtr](int i) {
|
||||
ie::parallel_for(_origContent->byteSize() / sizeof(fp16_t), [this, srcPtr, dstPtr](size_t i) {
|
||||
float val = ie::PrecisionUtils::f16tof32(srcPtr[i]) + _epsilon;
|
||||
val = 1.0f / std::sqrt(val);
|
||||
dstPtr[i] = ie::PrecisionUtils::f32tof16(val);
|
||||
@ -58,7 +58,7 @@ void BatchNormalizationBiasesContent::fillTempBuf(void* tempBuf) const {
|
||||
|
||||
auto dstPtr = static_cast<fp16_t*>(tempBuf);
|
||||
|
||||
ie::parallel_for(_origContent->byteSize() / sizeof(fp16_t), [origPtr, weightsPtr, dstPtr](int i) {
|
||||
ie::parallel_for(_origContent->byteSize() / sizeof(fp16_t), [origPtr, weightsPtr, dstPtr](size_t i) {
|
||||
// TODO : need to be extracted from IE layer.
|
||||
float beta = 0.0f;
|
||||
|
||||
|
@ -64,8 +64,8 @@ DepthDeconvolutionCHWWeightsContent::DepthDeconvolutionCHWWeightsContent(
|
||||
void DepthDeconvolutionCHWWeightsContent::fillTempBuf(void* tempBuf) const {
|
||||
VPU_PROFILE(DepthDeconvolutionCHWWeightsContent);
|
||||
depthDeconvolutionRelayoutCHW(
|
||||
_origContent->get<fp16_t>(), _origContent->byteSize() / sizeof(fp16_t),
|
||||
static_cast<fp16_t*>(tempBuf), _origContent->byteSize() / sizeof(fp16_t),
|
||||
_origContent->get<fp16_t>(), static_cast<int>(_origContent->byteSize() / sizeof(fp16_t)),
|
||||
static_cast<fp16_t*>(tempBuf), static_cast<int>(_origContent->byteSize() / sizeof(fp16_t)),
|
||||
_KX, _KY, _channels);
|
||||
}
|
||||
|
||||
@ -105,8 +105,8 @@ DepthDeconvolutionHWCWeightsContent::DepthDeconvolutionHWCWeightsContent(
|
||||
void DepthDeconvolutionHWCWeightsContent::fillTempBuf(void* tempBuf) const {
|
||||
VPU_PROFILE(DepthDeconvolutionHWCWeightsContent);
|
||||
depthDeconvolutionRelayoutHWC(
|
||||
_origContent->get<fp16_t>(), _origContent->byteSize() / sizeof(fp16_t),
|
||||
static_cast<fp16_t*>(tempBuf), _origContent->byteSize() / sizeof(fp16_t),
|
||||
_origContent->get<fp16_t>(), static_cast<int>(_origContent->byteSize() / sizeof(fp16_t)),
|
||||
static_cast<fp16_t*>(tempBuf), static_cast<int>(_origContent->byteSize() / sizeof(fp16_t)),
|
||||
_KX, _KY, _channels);
|
||||
}
|
||||
|
||||
|
@ -65,7 +65,7 @@ void HwConstData::fillTempBuf(void* outBuf) const {
|
||||
const auto inChannelStride = K * kernelStride;
|
||||
const auto outerStride = IC * inChannelStride;
|
||||
|
||||
ie::parallel_for(numOC, [=](int oc) {
|
||||
ie::parallel_for(numOC, [=](size_t oc) {
|
||||
const auto ocSlice = oc;
|
||||
oc += startOC;
|
||||
|
||||
|
@ -91,14 +91,14 @@ void PriorBoxContent::fillTempBuf(void* tempBuf) const {
|
||||
if (!_densitys.empty()) {
|
||||
for (const auto& _density : _densitys) {
|
||||
if (!_fixed_ratios.empty()) {
|
||||
_num_priors += _fixed_ratios.size() * (static_cast<int>(pow(_density, 2)) - 1);
|
||||
_num_priors += static_cast<int>(_fixed_ratios.size()) * (static_cast<int>(pow(_density, 2)) - 1);
|
||||
} else {
|
||||
_num_priors += _aspect_ratios.size() * (static_cast<int>(pow(_density, 2)) - 1);
|
||||
_num_priors += static_cast<int>(_aspect_ratios.size()) * (static_cast<int>(pow(_density, 2)) - 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_num_priors += _max_sizes.size();
|
||||
_num_priors += static_cast<int>(_max_sizes.size());
|
||||
|
||||
auto W = _inDesc0.dim(Dim::W);
|
||||
auto H = _inDesc0.dim(Dim::H);
|
||||
@ -317,7 +317,7 @@ void PriorBoxClusteredContent::fillTempBuf(void* tempBuf) const {
|
||||
auto num_priors_ = widths_.size();
|
||||
|
||||
if (variance_.empty()) {
|
||||
variance_.push_back(0.1);
|
||||
variance_.push_back(0.1f);
|
||||
}
|
||||
|
||||
auto layer_width = _inDesc0.dim(Dim::W);
|
||||
|
@ -29,7 +29,7 @@ void ScaledContent::fillTempBuf(void *tempBuf) const {
|
||||
|
||||
auto dstPtr = static_cast<fp16_t*>(tempBuf);
|
||||
|
||||
ie::parallel_for(totalSize, [this, srcPtr, dstPtr](int i) {
|
||||
ie::parallel_for(totalSize, [this, srcPtr, dstPtr](size_t i) {
|
||||
dstPtr[i] = ie::PrecisionUtils::f32tof16(ie::PrecisionUtils::f16tof32(srcPtr[i]) * _factor);
|
||||
});
|
||||
}
|
||||
|
@ -103,7 +103,7 @@ DimsOrder DimsOrder::fromCode(StorageOrder64 code) {
|
||||
return out;
|
||||
}
|
||||
|
||||
DimsOrder DimsOrder::fromNumDims(int numDims) {
|
||||
DimsOrder DimsOrder::fromNumDims(size_t numDims) {
|
||||
static const StorageOrder64 FULL_ORDER_DEFAULT =
|
||||
maskOrder(static_cast<StorageOrder64>(0x0fedcba987654321ull), MAX_DIMS_64);
|
||||
|
||||
@ -118,7 +118,7 @@ DimsOrder DimsOrder::fromNumDims(int numDims) {
|
||||
} else if (numDims == 5) {
|
||||
return DimsOrder::NCDHW;
|
||||
} else {
|
||||
return DimsOrder::fromCode(maskOrder(FULL_ORDER_DEFAULT, numDims));
|
||||
return DimsOrder::fromCode(maskOrder(FULL_ORDER_DEFAULT, static_cast<int>(numDims)));
|
||||
}
|
||||
}
|
||||
|
||||
@ -300,7 +300,7 @@ void printTo(std::ostream& os, DimsOrder order) {
|
||||
}
|
||||
|
||||
for (; i >= 0; i--) {
|
||||
auto curDim = (code >> (i * 4)) & 0xF;
|
||||
auto curDim = static_cast<int>((code >> (i * 4)) & 0xF);
|
||||
|
||||
auto it = DIM_NAMES.find(curDim);
|
||||
if (it != DIM_NAMES.end()) {
|
||||
@ -343,7 +343,7 @@ DataDesc::DataDesc(const ie::TensorDesc& ieDesc) {
|
||||
// IE dims are always in ChannelMajor Layout, so we need to use fromNumDims() layout to perform permutation.
|
||||
const auto perm = DimsOrder::fromNumDims(ieDims.size()).toPermutation();
|
||||
for (size_t i = 0; i < perm.size(); ++i) {
|
||||
_dims.set(perm[i], ieDims[ieDims.size() - 1 - i]);
|
||||
_dims.set(perm[i], static_cast<int>(ieDims[ieDims.size() - 1 - i]));
|
||||
}
|
||||
}
|
||||
|
||||
@ -510,7 +510,7 @@ StridesRequirement StridesRequirement::fixed(const std::vector<int>& strides, co
|
||||
};
|
||||
|
||||
for (const auto& dim : dimOrderVec) {
|
||||
const auto idx = dimToIeInd(dim, dims.size());
|
||||
const auto idx = dimToIeInd(dim, static_cast<int>(dims.size()));
|
||||
setStride(dim, strides[idx]);
|
||||
}
|
||||
|
||||
@ -576,7 +576,7 @@ DimValues calcStrides(const DataDesc& desc, const StridesRequirement& reqs) {
|
||||
|
||||
for (std::size_t i = 1; i < perm.size(); i++) {
|
||||
strides.set(perm[i], strides[perm[i - 1]] * desc.dim(perm[i - 1]));
|
||||
strides.set(perm[i], applyStrideRequirement(strides[perm[i]], i, reqs));
|
||||
strides.set(perm[i], applyStrideRequirement(strides[perm[i]], static_cast<int>(i), reqs));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -282,7 +282,7 @@ void FrontEnd::parseConcat(
|
||||
"{} layer with name {} must be able to convert to ie::ConcatLayer",
|
||||
layer->type, layer->name);
|
||||
|
||||
VPU_THROW_UNLESS(concat->_axis < output->desc().numDims(),
|
||||
VPU_THROW_UNLESS(static_cast<int>(concat->_axis) < output->desc().numDims(),
|
||||
"{} layer with name {} must have axis attribute no grater than number of "
|
||||
"dimensions, actually provided axis = {}, numDims = {}",
|
||||
layer->type, layer->name, concat->_axis, output->desc().numDims());
|
||||
|
@ -316,7 +316,7 @@ private:
|
||||
}
|
||||
|
||||
static void append_pv(BlobSerializer& serializer, const PV& pv) {
|
||||
int ndims = pv.size();
|
||||
int ndims = static_cast<int>(pv.size());
|
||||
append_i(serializer, ndims);
|
||||
for (int i = 0; i < ndims; i++) {
|
||||
append_i(serializer, pv[i]);
|
||||
@ -345,7 +345,7 @@ void parseConvND(const Model & model,
|
||||
VPU_THROW_UNLESS(convLayer != nullptr, "failed dynamic cast to ConvolutionLayer");
|
||||
|
||||
auto kernelShape = convLayer->_kernel;
|
||||
int kernelNDims = kernelShape.size();
|
||||
int kernelNDims = static_cast<int>(kernelShape.size());
|
||||
// Yet, only 3D kernel supported (NCDHW)
|
||||
// Later, if support 4D, 5D, etc, please
|
||||
// check if (kernelNDims >= 3), so that
|
||||
|
@ -196,7 +196,7 @@ private:
|
||||
IE_ASSERT(origData != nullptr);
|
||||
|
||||
auto dims = origData->getDims();
|
||||
int ndims = dims.size();
|
||||
auto ndims = dims.size();
|
||||
|
||||
if (ndims > 4) {
|
||||
VPU_THROW_UNLESS(dim.length() == 1,
|
||||
@ -477,7 +477,7 @@ void FrontEnd::parseCustom(const Model& model, const ie::CNNLayerPtr& layer, con
|
||||
stage->attrs().set("inputOrders", std::move(inputOrders));
|
||||
stage->attrs().set("outputOrders", std::move(outputOrders));
|
||||
|
||||
int buffer_size = kernel.kernelBinary().length() + 1024;
|
||||
auto buffer_size = kernel.kernelBinary().length() + 1024;
|
||||
model->addTempBuffer(stage, buffer_size);
|
||||
}
|
||||
}
|
||||
|
@ -286,14 +286,14 @@ void FrontEnd::parseEltwise(const Model& model, const ie::CNNLayerPtr& _layer, c
|
||||
if (type == DataType::FP16) {
|
||||
stage->attrs().set<float>("coeff1", layer->coeff[0]);
|
||||
} else {
|
||||
stage->attrs().set<std::int32_t>("coeff1", layer->coeff[0]);
|
||||
stage->attrs().set<std::int32_t>("coeff1", static_cast<int32_t>(layer->coeff[0]));
|
||||
}
|
||||
}
|
||||
if (layer->coeff.size() > 1 || subCoefficient != 1) {
|
||||
if (type == DataType::FP16) {
|
||||
stage->attrs().set<float>("coeff2", subCoefficient * (layer->coeff.size() > 1 ? layer->coeff[1] : 1.0f));
|
||||
} else {
|
||||
stage->attrs().set<std::int32_t>("coeff2", subCoefficient * (layer->coeff.size() > 1 ? layer->coeff[1] : 1));
|
||||
stage->attrs().set<std::int32_t>("coeff2", subCoefficient * (layer->coeff.size() > 1 ? static_cast<int32_t>(layer->coeff[1]) : 1));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -94,9 +94,9 @@ void FrontEnd::parseExpDetectionOutput(const Model& model, const ie::CNNLayerPtr
|
||||
params.max_delta_log_wh = layer->GetParamAsFloat("max_delta_log_wh", 0.0f);
|
||||
params.nms_threshold = layer->GetParamAsFloat("nms_threshold", 0.0f);
|
||||
params.score_threshold = layer->GetParamAsFloat("score_threshold", 0.0f);
|
||||
params.max_detections_per_image = layer->GetParamAsFloat("max_detections_per_image", 0);
|
||||
params.num_classes = layer->GetParamAsFloat("num_classes", 0);
|
||||
params.post_nms_count = layer->GetParamAsFloat("post_nms_count", 0);
|
||||
params.max_detections_per_image = layer->GetParamAsInt("max_detections_per_image", 0);
|
||||
params.num_classes = layer->GetParamAsInt("num_classes", 0);
|
||||
params.post_nms_count = layer->GetParamAsInt("post_nms_count", 0);
|
||||
params.class_agnostic_box_regression = layer->GetParamAsFloat("class_agnostic_box_regression", 0) ? 1 : 0;
|
||||
|
||||
auto inputBoxes = inputs[0]; // [numRois][4]
|
||||
|
@ -59,7 +59,7 @@ void FrontEnd::parseFullyConnected(const Model& model, const ie::CNNLayerPtr& _l
|
||||
std::tie(weights, biases) = getWeightsAndBiases(model, layer);
|
||||
|
||||
IE_ASSERT(weights->desc().totalDimSize() >=
|
||||
input->desc().totalDimSize() / input->desc().dim(Dim::N, 1) * layer->_out_num);
|
||||
input->desc().totalDimSize() / input->desc().dim(Dim::N, 1) * static_cast<int>(layer->_out_num));
|
||||
weights = model->duplicateData(
|
||||
weights,
|
||||
"@fc",
|
||||
|
@ -28,8 +28,8 @@ protected:
|
||||
void propagateDataOrderImpl(StageDataInfo<DimsOrder>& orderInfo) override {
|
||||
const auto& endCopies = attrs().getOrDefault<IterationComponents>("end-iteration-components", {});
|
||||
for (const auto& iteration : endCopies) {
|
||||
const auto& dstIdx = iteration.first.first;
|
||||
const auto& srcIdx = iteration.second;
|
||||
const auto& dstIdx = static_cast<int>(iteration.first.first);
|
||||
const auto& srcIdx = static_cast<int>(iteration.second);
|
||||
orderInfo.setOutput(outputEdge(dstIdx), inputEdge(srcIdx)->input()->desc().dimsOrder());
|
||||
}
|
||||
|
||||
@ -71,7 +71,7 @@ protected:
|
||||
for (const auto& component : endCopies) {
|
||||
const auto& rule = component.first.second;
|
||||
auto axis = rule.axis;
|
||||
auto axisInd = static_cast<int32_t>(output(component.first.first)->desc().dimsOrder().dimInd(axis));
|
||||
auto axisInd = static_cast<int32_t>(output(static_cast<int>(component.first.first))->desc().dimsOrder().dimInd(axis));
|
||||
|
||||
serializer.append(axisInd);
|
||||
serializer.append(rule.start);
|
||||
@ -89,8 +89,8 @@ protected:
|
||||
}
|
||||
|
||||
for (const auto& iteration : endCopies) {
|
||||
output(iteration.first.first)->serializeBuffer(serializer);
|
||||
input(iteration.second)->serializeBuffer(serializer);
|
||||
output(static_cast<int>(iteration.first.first))->serializeBuffer(serializer);
|
||||
input(static_cast<int>(iteration.second))->serializeBuffer(serializer);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -60,7 +60,7 @@ protected:
|
||||
for (const auto& component : startCopies) {
|
||||
const auto& rule = component.first.second;
|
||||
auto axis = rule.axis;
|
||||
auto axisInd = static_cast<int32_t>(input(component.first.first)->desc().dimsOrder().dimInd(axis));
|
||||
auto axisInd = static_cast<int32_t>(input(static_cast<int>(component.first.first))->desc().dimsOrder().dimInd(axis));
|
||||
|
||||
serializer.append(axisInd);
|
||||
serializer.append(rule.start);
|
||||
@ -78,8 +78,8 @@ protected:
|
||||
}
|
||||
|
||||
for (const auto& iteration : startCopies) {
|
||||
input(iteration.first.first)->serializeBuffer(serializer);
|
||||
output(iteration.second)->serializeBuffer(serializer);
|
||||
input(static_cast<int>(iteration.first.first))->serializeBuffer(serializer);
|
||||
output(static_cast<int>(iteration.second))->serializeBuffer(serializer);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -141,7 +141,7 @@ ie::CNNNetwork loadSubNetwork(
|
||||
ie::SizeVector inputShape;
|
||||
std::tie(inputName, inputShape) = *inputShapes.begin();
|
||||
if (zdir_batchsize != nullptr)
|
||||
*zdir_batchsize = inputShape[1]/3;
|
||||
*zdir_batchsize = static_cast<int>(inputShape[1]/3);
|
||||
inputShape[0] = 1; // set batch size to the first input dimension
|
||||
inputShape[2] = imgSize.second; // changes input height to the image one
|
||||
inputShape[3] = imgSize.first; // changes input width to the image one
|
||||
|
@ -128,7 +128,7 @@ void MyriadXHwStage::serializeParamsImpl(BlobSerializer& serializer) const {
|
||||
|
||||
serializer.append(checked_cast<uint32_t>(hwOpParams.opMode));
|
||||
|
||||
serializer.append(checked_cast<uint32_t>(hwOpParams.withPad));
|
||||
serializer.append(static_cast<uint32_t>(hwOpParams.withPad));
|
||||
if (hwOpParams.withPad) {
|
||||
serializer.append(checked_cast<uint32_t>(hwOpParams.padMode));
|
||||
}
|
||||
@ -147,7 +147,7 @@ void MyriadXHwStage::serializeParamsImpl(BlobSerializer& serializer) const {
|
||||
serializer.append(checked_cast<uint32_t>(hwOpParams.fcInputNum));
|
||||
serializer.append(checked_cast<uint32_t>(hwOpParams.fcOutputOffset));
|
||||
serializer.append(checked_cast<uint32_t>(hwOpParams.fcOutputNum));
|
||||
serializer.append(checked_cast<uint32_t>(hwOpParams.fcAccum));
|
||||
serializer.append(static_cast<uint32_t>(hwOpParams.fcAccum));
|
||||
}
|
||||
|
||||
if (hwOpParams.opType != HwOpType::FC) {
|
||||
@ -161,20 +161,20 @@ void MyriadXHwStage::serializeParamsImpl(BlobSerializer& serializer) const {
|
||||
serializer.append(checked_cast<uint32_t>(hwOpParams.poolKernelHeight));
|
||||
}
|
||||
|
||||
serializer.append(checked_cast<uint32_t>(hwOpParams.withReLU));
|
||||
serializer.append(static_cast<uint32_t>(hwOpParams.withReLU));
|
||||
if (hwOpParams.withReLU) {
|
||||
serializer.append(checked_cast<uint32_t>(hwOpParams.t0));
|
||||
serializer.append(checked_cast<uint32_t>(hwOpParams.a0));
|
||||
serializer.append(checked_cast<uint32_t>(hwOpParams.a1));
|
||||
}
|
||||
|
||||
serializer.append(checked_cast<uint32_t>(hwOpParams.withClamp));
|
||||
serializer.append(static_cast<uint32_t>(hwOpParams.withClamp));
|
||||
if (hwOpParams.withClamp) {
|
||||
serializer.append(checked_cast<float>(hwOpParams.clampMaxVal));
|
||||
}
|
||||
|
||||
serializer.append(checked_cast<uint32_t>(hwOpParams.reuseData));
|
||||
serializer.append(checked_cast<uint32_t>(hwOpParams.reuseCoeff));
|
||||
serializer.append(static_cast<uint32_t>(hwOpParams.reuseData));
|
||||
serializer.append(static_cast<uint32_t>(hwOpParams.reuseCoeff));
|
||||
}
|
||||
|
||||
serializer.append(checked_cast<uint32_t>(injectedStage() == nullptr ? 0 : 1));
|
||||
|
@ -64,7 +64,7 @@ private:
|
||||
auto beta = attrs().get<float>("beta");
|
||||
|
||||
serializer.append(static_cast<uint32_t>(size));
|
||||
serializer.append(ie::PrecisionUtils::f32tof16(k));
|
||||
serializer.append(ie::PrecisionUtils::f32tof16(static_cast<float>(k))); // why float?
|
||||
serializer.append(ie::PrecisionUtils::f32tof16(alpha));
|
||||
serializer.append(ie::PrecisionUtils::f32tof16(beta));
|
||||
serializer.append(ie::PrecisionUtils::f32tof16(0)); // for alignment
|
||||
|
@ -360,7 +360,7 @@ private:
|
||||
}
|
||||
|
||||
static void append_pv(BlobSerializer& serializer, const PV& pv) {
|
||||
int ndims = pv.size();
|
||||
int ndims = static_cast<int>(pv.size());
|
||||
append_i(serializer, ndims);
|
||||
for (int i = 0; i < ndims; i++) {
|
||||
append_i(serializer, pv[i]);
|
||||
@ -387,7 +387,7 @@ void parsePoolND(const Model & model,
|
||||
VPU_THROW_UNLESS(poolLayer != nullptr, "failed dynamic cast to PoolingLayer");
|
||||
|
||||
auto kernel_shape = poolLayer->_kernel;
|
||||
int kernel_ndims = kernel_shape.size();
|
||||
int kernel_ndims = static_cast<int>(kernel_shape.size());
|
||||
// Yet, only 3D kernel supported (NCDHW)
|
||||
// Later, if support 4D, 5D, etc, please
|
||||
// check if (kernelNDims >= 3), so that
|
||||
|
@ -178,7 +178,7 @@ void FrontEnd::parseProposal(const Model& model, const ie::CNNLayerPtr& layer, c
|
||||
stage->attrs().set("scales", scales);
|
||||
stage->attrs().set("ratios", ratios);
|
||||
|
||||
int number_of_anchors = ratios.size() * scales.size();
|
||||
int number_of_anchors = static_cast<int>(ratios.size() * scales.size());
|
||||
|
||||
// Allocate slightly larger buffer than needed for handling remnant in distribution among SHAVEs
|
||||
int buffer_size = (inputs[0]->desc().dim(Dim::H) + 16) * inputs[0]->desc().dim(Dim::W) * number_of_anchors * 5 * sizeof(float);
|
||||
@ -189,8 +189,8 @@ void FrontEnd::parseProposal(const Model& model, const ie::CNNLayerPtr& layer, c
|
||||
};
|
||||
const int num_proposals = number_of_anchors * inputs[0]->desc().dim(Dim::H) * inputs[0]->desc().dim(Dim::W);
|
||||
const int pre_nms_topn = std::min(num_proposals, stage->attrs().get<int>("pre_nms_topn"));
|
||||
const int required_cmx_size_per_shave = std::max(2 * (1 + pre_nms_topn) * sizeof(SortItem),
|
||||
(1 + pre_nms_topn) * sizeof(SortItem) + number_of_anchors * sizeof(float));
|
||||
const int required_cmx_size_per_shave = static_cast<int>(std::max(2 * (1 + pre_nms_topn) * sizeof(SortItem),
|
||||
(1 + pre_nms_topn) * sizeof(SortItem) + number_of_anchors * sizeof(float)));
|
||||
const auto& env = CompileEnv::get();
|
||||
const int required_cmx_buffer_size = env.resources.numSHAVEs * required_cmx_size_per_shave;
|
||||
|
||||
|
@ -69,7 +69,7 @@ private:
|
||||
auto irIndex = oldIndices[i];
|
||||
if (irIndex < 0) {
|
||||
// handle negative indices
|
||||
irIndex = ndims - std::abs(irIndex);
|
||||
irIndex = static_cast<int>(ndims - std::abs(irIndex));
|
||||
}
|
||||
VPU_THROW_UNLESS(irIndex < ndims,
|
||||
"Stage {} of type {} expects input with index {} ({}) include values less than ",
|
||||
|
@ -80,7 +80,7 @@ void FrontEnd::parseResample(const Model& model, const ie::CNNLayerPtr& layer, c
|
||||
auto stage = model->addNewStage<ResampleStage>(layer->name, StageType::Resample, layer, inputs, outputs);
|
||||
|
||||
stage->attrs().set<bool>("antialias", layer->GetParamAsInt("antialias", 0));
|
||||
stage->attrs().set<float>("factor", layer->GetParamAsInt("factor", -1.0f));
|
||||
stage->attrs().set<float>("factor", layer->GetParamAsFloat("factor", -1.0f));
|
||||
|
||||
auto method = layer->GetParamAsString("type", "caffe.ResampleParameter.NEAREST");
|
||||
if (cmp(method, "caffe.ResampleParameter.NEAREST")) {
|
||||
|
@ -88,7 +88,7 @@ private:
|
||||
inputEdges().size());
|
||||
|
||||
// check number of outputs, without temp buffer
|
||||
const int outputsNumber = outputEdges().size();
|
||||
const int outputsNumber = static_cast<int>(outputEdges().size());
|
||||
const int useCellState = outputsNumber >= 2;
|
||||
const int outputEdgesExpected = 1
|
||||
+ (useCellState ? 1 : 0)
|
||||
@ -193,8 +193,8 @@ void FrontEnd::parseRNN(const Model& model, const ie::CNNLayerPtr& _layer, const
|
||||
newWeightsPtr + ngates * stateSize * inputSize,
|
||||
|
||||
ngates,
|
||||
stateSize,
|
||||
inputSize);
|
||||
static_cast<int>(stateSize),
|
||||
static_cast<int>(inputSize));
|
||||
};
|
||||
|
||||
auto newWeights = model->addConstData(_layer->name + "@weights", weights->desc(), generator);
|
||||
@ -215,8 +215,8 @@ void FrontEnd::parseRNN(const Model& model, const ie::CNNLayerPtr& _layer, const
|
||||
|
||||
bool RNNForward = layer->direction == ie::RNNSequenceLayer::FWD;
|
||||
stage->attrs().set<bool>("RNNForward", RNNForward);
|
||||
stage->attrs().set<int>("nCells", nCells);
|
||||
stage->attrs().set<int>("nBatches", nBatches);
|
||||
stage->attrs().set<int>("nCells", static_cast<int>(nCells));
|
||||
stage->attrs().set<int>("nBatches", static_cast<int>(nBatches));
|
||||
}
|
||||
|
||||
void FrontEnd::parseLSTMCell(const Model& model, const ie::CNNLayerPtr& _layer, const DataVector &inputs, const DataVector &outputs) {
|
||||
@ -262,8 +262,8 @@ void FrontEnd::parseLSTMCell(const Model& model, const ie::CNNLayerPtr& _layer,
|
||||
newWeightsPtr,
|
||||
newWeightsPtr + ngates * stateSize * inputSize,
|
||||
ngates,
|
||||
stateSize,
|
||||
inputSize);
|
||||
static_cast<int>(stateSize),
|
||||
static_cast<int>(inputSize));
|
||||
};
|
||||
|
||||
auto newWeights = model->addConstData(_layer->name + "@weights", weights->desc(), generator);
|
||||
@ -306,7 +306,7 @@ void FrontEnd::parseLSTMCell(const Model& model, const ie::CNNLayerPtr& _layer,
|
||||
auto stage = model->addNewStage<LSTMCellStage>(layer->name, StageType::LSTMCell, layer, stageInputs, realOutputs);
|
||||
stage->attrs().set<bool>("RNNForward", true);
|
||||
stage->attrs().set<int>("nCells", 1);
|
||||
stage->attrs().set<int>("nBatches", nBatches);
|
||||
stage->attrs().set<int>("nBatches", static_cast<int>(nBatches));
|
||||
}
|
||||
|
||||
} // namespace vpu
|
||||
|
@ -132,14 +132,14 @@ ncStatus_t MyriadExecutor::bootNextDevice(std::vector<DevicePtr> &devicePool,
|
||||
configDevName.copy(in_deviceDesc.name, NC_MAX_NAME_SIZE - 1);
|
||||
}
|
||||
|
||||
statusOpen = ncSetDeviceConnectTimeout(config.deviceConnectTimeout().count());
|
||||
statusOpen = ncSetDeviceConnectTimeout(static_cast<int>(config.deviceConnectTimeout().count()));
|
||||
if (statusOpen) {
|
||||
return statusOpen;
|
||||
}
|
||||
|
||||
ncDeviceOpenParams_t deviceOpenParams = {};
|
||||
deviceOpenParams.watchdogHndl = _mvnc->watchdogHndl();
|
||||
deviceOpenParams.watchdogInterval = config.watchdogInterval().count();
|
||||
deviceOpenParams.watchdogInterval = static_cast<int>(config.watchdogInterval().count());
|
||||
deviceOpenParams.memoryType = checked_cast<char>(config.memoryType());
|
||||
deviceOpenParams.customFirmwareDirectory = dirName.c_str();
|
||||
|
||||
@ -308,7 +308,7 @@ void MyriadExecutor::allocateGraph(DevicePtr &device, GraphDesc &graphDesc,
|
||||
const std::pair<const char*, size_t> &graphHeaderDesc,
|
||||
size_t numStages, const std::string & networkName, int executors) {
|
||||
VPU_PROFILE(allocateGraph);
|
||||
_numStages = numStages;
|
||||
_numStages = static_cast<int>(numStages);
|
||||
graphDesc._name = networkName;
|
||||
if (device->_deviceHandle == nullptr) {
|
||||
THROW_IE_EXCEPTION << "Failed to allocate graph: MYRIAD device is not opened.";
|
||||
@ -331,7 +331,7 @@ void MyriadExecutor::allocateGraph(DevicePtr &device, GraphDesc &graphDesc,
|
||||
graphFileContent.data(),
|
||||
static_cast<unsigned int>(graphFileContent.size()),
|
||||
graphHeaderDesc.first,
|
||||
graphHeaderDesc.second);
|
||||
static_cast<unsigned>(graphHeaderDesc.second));
|
||||
if (status != NC_OK) {
|
||||
THROW_IE_EXCEPTION << "Failed to allocate graph: " << ncStatusToStr(nullptr, status);
|
||||
}
|
||||
@ -418,7 +418,7 @@ void MyriadExecutor::queueInference(GraphDesc &graphDesc, void *input_data, size
|
||||
}
|
||||
|
||||
if (result_data != nullptr && result_bytes != 0) {
|
||||
getResult(graphDesc, result_data, result_bytes);
|
||||
getResult(graphDesc, result_data, static_cast<unsigned>(result_bytes));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -208,12 +208,12 @@ void MyriadInferRequest::GetResult() {
|
||||
const auto& blob = (*it).second;
|
||||
|
||||
if (blob->getTensorDesc().getLayout() == getVpuLayout(name)) {
|
||||
_executor->getResult(_graphDesc, blob->buffer(), blob->byteSize());
|
||||
_executor->getResult(_graphDesc, blob->buffer(), static_cast<unsigned>(blob->byteSize()));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
_executor->getResult(_graphDesc, resultBuffer.data(), resultBuffer.size());
|
||||
_executor->getResult(_graphDesc, resultBuffer.data(), static_cast<unsigned>(resultBuffer.size()));
|
||||
|
||||
for (const auto& output : _outputs) {
|
||||
const auto& ieBlobName = output.first;
|
||||
@ -291,6 +291,6 @@ void MyriadInferRequest::GetPerformanceCounts(std::map<std::string, InferenceEng
|
||||
|
||||
perfMap = vpu::parsePerformanceReport(
|
||||
_stagesMetaData,
|
||||
perfInfo.data(), perfInfo.size(),
|
||||
perfInfo.data(), static_cast<int>(perfInfo.size()),
|
||||
_config.perfReport(), _config.printReceiveTensorTime());
|
||||
}
|
||||
|
@ -347,7 +347,7 @@ int usb_read(libusb_device_handle *f, void *data, size_t size)
|
||||
const int chunk_size = DEFAULT_CHUNKSZ;
|
||||
while(size > 0)
|
||||
{
|
||||
int bt, ss = size;
|
||||
int bt, ss = (int)size;
|
||||
if(ss > chunk_size)
|
||||
ss = chunk_size;
|
||||
#if (defined(_WIN32) || defined(_WIN64))
|
||||
@ -368,7 +368,7 @@ int usb_write(libusb_device_handle *f, const void *data, size_t size)
|
||||
const int chunk_size = DEFAULT_CHUNKSZ;
|
||||
while(size > 0)
|
||||
{
|
||||
int bt, ss = size;
|
||||
int bt, ss = (int)size;
|
||||
if(ss > chunk_size)
|
||||
ss = chunk_size;
|
||||
#if (defined(_WIN32) || defined(_WIN64) )
|
||||
|
@ -158,7 +158,7 @@ int XLinkPlatformBootFirmware(deviceDesc_t* deviceDesc, const char* firmware, si
|
||||
printf("Path to your boot util is too long for the char array here!\n");
|
||||
}
|
||||
// Boot it
|
||||
int rc = usb_boot(deviceDesc->name, firmware, length);
|
||||
int rc = usb_boot(deviceDesc->name, firmware, (unsigned)length);
|
||||
|
||||
if(!rc) {
|
||||
mvLog(MVLOG_DEBUG, "Boot successful, device address %s", deviceDesc->name);
|
||||
@ -228,7 +228,7 @@ libusb_device_handle *usbLinkOpen(const char *path)
|
||||
libusb_device *dev = NULL;
|
||||
double waittm = seconds() + statuswaittimeout;
|
||||
while(seconds() < waittm){
|
||||
int size = strlen(path);
|
||||
int size = (int)strlen(path);
|
||||
|
||||
#if (!defined(_WIN32) && !defined(_WIN64))
|
||||
uint16_t bcdusb = -1;
|
||||
|
@ -110,7 +110,7 @@ int pthread_create(pthread_t *thread, pthread_attr_t *attr,
|
||||
if (attr)
|
||||
{
|
||||
thread->pthread_state = attr->pthread_state;
|
||||
stack_size = attr->stack_size;
|
||||
stack_size = (unsigned)attr->stack_size;
|
||||
}
|
||||
|
||||
thread->handle = (HANDLE)_beginthreadex((void *)NULL, stack_size, _pthread_start_routine, thread, 0, NULL);
|
||||
|
@ -158,7 +158,7 @@ int pcie_write(HANDLE fd, void * buf, size_t bufSize)
|
||||
|
||||
Overlapped.hEvent = Event;
|
||||
ResetEvent(Overlapped.hEvent);
|
||||
OutputCode = WriteFile(dev, buf, bufSize, NULL, &Overlapped);
|
||||
OutputCode = WriteFile(dev, buf, (DWORD)bufSize, NULL, &Overlapped);
|
||||
|
||||
if (OutputCode == FALSE) {
|
||||
if (GetLastError() == ERROR_IO_PENDING) {
|
||||
@ -235,7 +235,7 @@ int pcie_read(HANDLE fd, void * buf, size_t bufSize)
|
||||
|
||||
Overlapped.hEvent = Event;
|
||||
ResetEvent(Overlapped.hEvent);
|
||||
OutputCode = ReadFile(dev, buf, bufSize, NULL, &Overlapped);
|
||||
OutputCode = ReadFile(dev, buf, (DWORD)bufSize, NULL, &Overlapped);
|
||||
|
||||
if (OutputCode == FALSE) {
|
||||
if (GetLastError() == ERROR_IO_PENDING) {
|
||||
@ -583,7 +583,7 @@ pcieHostError_t pcie_boot_device(HANDLE fd, const char *buffer, size_t length)
|
||||
|
||||
bResult = DeviceIoControl(fd, // device to be queried
|
||||
MXLK_BOOT_DEV, // operation to perform
|
||||
(void*)buffer, length,
|
||||
(void*)buffer, (DWORD)length,
|
||||
&output_buffer, sizeof(output_buffer), // output buffer
|
||||
&junk, // # bytes returned
|
||||
(LPOVERLAPPED) NULL); // synchronous I/O
|
||||
|
@ -544,7 +544,7 @@ static int wait_findopen(const char *device_address, int timeout, libusb_device
|
||||
for(;;)
|
||||
{
|
||||
highres_gettime(&t1);
|
||||
int addr_size = strlen(device_address);
|
||||
int addr_size = (int)strlen(device_address);
|
||||
#if (!defined(_WIN32) && !defined(_WIN64) )
|
||||
rc = usb_find_device_with_bcd(0, (char*)device_address, addr_size, (void**)dev,
|
||||
DEFAULT_VID, get_pid_by_name(device_address), bcdusb);
|
||||
|
@ -91,7 +91,9 @@ static int global_lock_fd = -1;
|
||||
|
||||
|
||||
// To suppress warning in the macro below
|
||||
#if defined __GNUC__ || defined __clang__
|
||||
#pragma GCC diagnostic ignored "-Wformat-extra-args"
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief The macro checks a stream id passed to it
|
||||
@ -286,7 +288,7 @@ static void resetAll()
|
||||
|
||||
// Try to reboot them
|
||||
int i;
|
||||
for (i = 0; i < stalled_count; ++i) {
|
||||
for (i = 0; i < (int)stalled_count; ++i) {
|
||||
mvLog(MVLOG_DEBUG, "Found stalled device %s", stalledDevices[i].name);
|
||||
|
||||
XLinkHandler_t* handler = calloc(1, sizeof(XLinkHandler_t));
|
||||
@ -388,7 +390,7 @@ static char getPathSeparator() {
|
||||
*/
|
||||
|
||||
static void addEndPathSeparator(char* buffer, const int buffer_length) {
|
||||
const int filePathLen = strnlen(buffer, buffer_length);
|
||||
const int filePathLen = (int)strnlen(buffer, buffer_length);
|
||||
if ((filePathLen > 1) && (filePathLen < buffer_length - 1) &&
|
||||
buffer[filePathLen - 1] != getPathSeparator()) {
|
||||
buffer[filePathLen] = getPathSeparator();
|
||||
@ -969,9 +971,9 @@ ncStatus_t ncDeviceOpen(struct ncDeviceHandle_t **deviceHandlePtr,
|
||||
device_disappear = 1;
|
||||
}
|
||||
int i, j;
|
||||
for (i = 0; i < numberOfDevicesAfterBoot; ++i) {
|
||||
for (i = 0; i < (int)numberOfDevicesAfterBoot; ++i) {
|
||||
int found_in_before_boot_list = 0;
|
||||
for (j = 0; j < numberOfDevicesBeforeBoot; ++j) {
|
||||
for (j = 0; j < (int)numberOfDevicesBeforeBoot; ++j) {
|
||||
if(strcmp(afterBootDevices[i].name, beforeBootDevices[j].name) == 0) {
|
||||
found_in_before_boot_list = 1;
|
||||
}
|
||||
@ -1183,7 +1185,7 @@ ncStatus_t ncAvailableDevices(struct ncDeviceDescr_t *deviceDescrPtr,
|
||||
XLinkFindAllSuitableDevices(
|
||||
X_LINK_UNBOOTED, in_deviceDsc, deviceDescArray, NC_MAX_DEVICES, &amountOfFoundDevices);
|
||||
int i;
|
||||
for (i = 0; i < amountOfFoundDevices; ++i) {
|
||||
for (i = 0; i < (int)amountOfFoundDevices; ++i) {
|
||||
copyXLinkDeviceDescrToNc(&deviceDescArray[i], &deviceDescrPtr[i]);
|
||||
}
|
||||
|
||||
@ -1834,9 +1836,9 @@ ncStatus_t ncDeviceClose(struct ncDeviceHandle_t **deviceHandlePtr, WatchdogHndl
|
||||
booted_disappeared = 1;
|
||||
}
|
||||
int i, j;
|
||||
for (i = 0; i < foundDevicesAfterReset; ++i) {
|
||||
for (i = 0; i < (int)foundDevicesAfterReset; ++i) {
|
||||
int found_in_before_reset_list = 0;
|
||||
for (j = 0; j < foundDevicesBeforeReset; ++j) {
|
||||
for (j = 0; j < (int)foundDevicesBeforeReset; ++j) {
|
||||
if(strcmp(beforeResetDevices[i].name, afterResetDevices[j].name) == 0) {
|
||||
found_in_before_reset_list = 1;
|
||||
}
|
||||
@ -2008,7 +2010,7 @@ ncStatus_t ncGraphAllocate(struct ncDeviceHandle_t * deviceHandle,
|
||||
g->id = graphIdCount++;
|
||||
streamId_t streamId;
|
||||
|
||||
if (g->executors_number > d->dev_attr.max_executors) {
|
||||
if (g->executors_number > (int)d->dev_attr.max_executors) {
|
||||
mvLog(MVLOG_ERROR, "Executors number is greater than max allowed!");
|
||||
unlockAllInferences();
|
||||
return NC_INVALID_PARAMETERS;
|
||||
@ -2473,7 +2475,7 @@ static ncStatus_t getGraphOption(struct _graphPrivate_t *g,
|
||||
break;
|
||||
}
|
||||
case NC_RW_GRAPH_EXECUTORS_NUM:{
|
||||
int size = sizeof(int);
|
||||
unsigned size = sizeof(int);
|
||||
if (*dataLength < size) {
|
||||
mvLog(MVLOG_ERROR,
|
||||
"data length of data (%d) is smaller that required (%d)!\n",
|
||||
@ -2637,7 +2639,7 @@ static ncStatus_t getDeviceOption(struct _devicePrivate_t *d,
|
||||
if (rc) {
|
||||
return rc;
|
||||
}
|
||||
d->throttle_happened = d->thermal_stats[0];
|
||||
d->throttle_happened = (int)d->thermal_stats[0];
|
||||
*(int *) data = d->throttle_happened;
|
||||
*dataLength = sizeof(int);
|
||||
break;
|
||||
@ -2654,10 +2656,10 @@ static ncStatus_t getDeviceOption(struct _devicePrivate_t *d,
|
||||
mvLog(MVLOG_ERROR,
|
||||
"data length of output buffer (%d) is smaller that required (%zu)!\n",
|
||||
*dataLength, strlen(d->dev_addr) + 1);
|
||||
*dataLength = strlen(d->dev_addr) + 1;
|
||||
*dataLength = (unsigned)(strlen(d->dev_addr) + 1);
|
||||
return NC_INVALID_DATA_LENGTH;
|
||||
}
|
||||
*dataLength = strlen(d->dev_addr) + 1;
|
||||
*dataLength = (unsigned)(strlen(d->dev_addr) + 1);
|
||||
mv_strncpy((char *) data, *dataLength, d->dev_addr, *dataLength - 1);
|
||||
break;
|
||||
case NC_RO_DEVICE_PLATFORM:
|
||||
@ -3256,7 +3258,7 @@ ncStatus_t ncFifoReadElem(struct ncFifoHandle_t * fifoHandle, void *outputData,
|
||||
return NC_UNAUTHORIZED;
|
||||
}
|
||||
|
||||
if (*outputDataLen < handle->datasize) {
|
||||
if (*outputDataLen < (unsigned)handle->datasize) {
|
||||
mvLog(MVLOG_ERROR,
|
||||
"This datasize in tensorDesc (%d) is smaller than required (%d)!",
|
||||
*outputDataLen, handle->datasize);
|
||||
|
@ -274,7 +274,7 @@ ncStatus_t bootDevice(deviceDesc_t* deviceDescToBoot,
|
||||
}
|
||||
}
|
||||
|
||||
XLinkError_t rc = XLinkBootFirmware(deviceDescToBoot, firmware, length);
|
||||
XLinkError_t rc = XLinkBootFirmware(deviceDescToBoot, firmware, (unsigned long)length);
|
||||
free(firmware);
|
||||
|
||||
if(rc) {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user