Unused variables (#3963)
* Added -Wused-variable flag * Fixes for clang compiler * Removed wrong -Wno-error from protobuf compilation * More fixes
This commit is contained in:
parent
86bf2c2bba
commit
9cfe909e1e
@ -271,6 +271,7 @@ else()
|
||||
ie_add_compiler_flags(-fdiagnostics-show-option)
|
||||
ie_add_compiler_flags(-Wundef)
|
||||
ie_add_compiler_flags(-Wreturn-type)
|
||||
ie_add_compiler_flags(-Wunused-variable)
|
||||
|
||||
# Disable noisy warnings
|
||||
|
||||
|
@ -3,6 +3,10 @@
|
||||
#
|
||||
|
||||
if(NOT ENABLE_DOCKER)
|
||||
if(CMAKE_COMPILER_IS_GNUCXX)
|
||||
ie_add_compiler_flags(-Wall)
|
||||
endif()
|
||||
|
||||
add_subdirectory(snippets)
|
||||
|
||||
# Detect nGraph
|
||||
|
@ -54,4 +54,12 @@ if(NGRAPH_ONNX_IMPORT_ENABLE)
|
||||
target_link_libraries(${TARGET_NAME} PRIVATE onnx_importer)
|
||||
endif()
|
||||
|
||||
target_link_libraries(${TARGET_NAME} PRIVATE inference_engine_plugin_api ngraph inference_engine_transformations)
|
||||
if(NOT MSVC)
|
||||
target_compile_options(${TARGET_NAME} PRIVATE -Wno-unused-variable)
|
||||
if(CMAKE_COMPILER_IS_GNUCXX)
|
||||
target_compile_options(${TARGET_NAME} PRIVATE -Wno-unused-but-set-variable)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
target_link_libraries(${TARGET_NAME} PRIVATE inference_engine_plugin_api
|
||||
ngraph inference_engine_transformations)
|
||||
|
@ -8,7 +8,7 @@ int main() {
|
||||
auto cnnNetwork = ie.ReadNetwork("sample.xml");
|
||||
std::string allDevices = "MULTI:";
|
||||
std::vector<std::string> myriadDevices = ie.GetMetric("MYRIAD", METRIC_KEY(AVAILABLE_DEVICES));
|
||||
for (int i = 0; i < myriadDevices.size(); ++i) {
|
||||
for (size_t i = 0; i < myriadDevices.size(); ++i) {
|
||||
allDevices += std::string("MYRIAD.")
|
||||
+ myriadDevices[i]
|
||||
+ std::string(i < (myriadDevices.size() -1) ? "," : "");
|
||||
|
@ -11,6 +11,10 @@ set(IE_MAIN_TEMPLATE_PLUGIN_SOURCE_DIR ${InferenceEngineTemplatePlugin_SOURCE_DI
|
||||
|
||||
find_package(InferenceEngineDeveloperPackage REQUIRED)
|
||||
|
||||
if(CMAKE_COMPILER_IS_GNUCXX)
|
||||
ie_add_compiler_flags(-Wall)
|
||||
endif()
|
||||
|
||||
add_subdirectory(src)
|
||||
|
||||
if(ENABLE_TESTS)
|
||||
|
@ -54,6 +54,13 @@ find_package(IEDevScripts REQUIRED
|
||||
NO_CMAKE_FIND_ROOT_PATH
|
||||
NO_DEFAULT_PATH)
|
||||
|
||||
if(NOT MSVC)
|
||||
ie_add_compiler_flags(-Wno-error=unused-variable)
|
||||
if(CMAKE_COMPILER_IS_GNUCXX)
|
||||
ie_add_compiler_flags(-Wno-error=unused-but-set-variable)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# Don't threat deprecated API warnings as errors in 3rd party apps
|
||||
ie_deprecated_no_errors()
|
||||
|
||||
|
@ -15,10 +15,6 @@ static const char* model_message = "Required. Path to an .xml file with a traine
|
||||
/// @brief message for images argument
|
||||
static const char *image_message = "Required. Path to one or more .bmp images.";
|
||||
|
||||
/// @brief message for plugin argument
|
||||
static const char *plugin_message = "Plugin name. For example MKLDNNPlugin. If this parameter is pointed, " \
|
||||
"the sample will look for this plugin only";
|
||||
|
||||
/// @brief message for assigning cnn calculation to device
|
||||
static const char *target_device_message = "Optional. Specify the target device to infer on (the list of available devices is shown below). " \
|
||||
"Default value is CPU. Use \"-d HETERO:<comma-separated_devices_list>\" format to specify HETERO plugin. " \
|
||||
|
@ -76,6 +76,8 @@ else()
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror") #treating warnings as errors
|
||||
endif()
|
||||
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall")
|
||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall")
|
||||
if (APPLE)
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=unused-command-line-argument")
|
||||
elseif(UNIX)
|
||||
@ -116,6 +118,10 @@ set (BUILD_TESTING OFF)
|
||||
|
||||
if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/thirdparty/gflags")
|
||||
function(add_gflags)
|
||||
if(NOT WIN32)
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-all")
|
||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-all")
|
||||
endif()
|
||||
set(BUILD_SHARED_LIBS OFF)
|
||||
add_subdirectory(thirdparty/gflags EXCLUDE_FROM_ALL)
|
||||
set_target_properties(gflags_nothreads_static PROPERTIES FOLDER thirdparty)
|
||||
|
@ -191,6 +191,7 @@ void Config::UpdateFromMap(const std::map<std::string, std::string>& configMap)
|
||||
// Validate if passed value is postivie number.
|
||||
try {
|
||||
int val_i = std::stoi(val);
|
||||
(void)val_i;
|
||||
} catch (const std::exception&) {
|
||||
THROW_IE_EXCEPTION << "Wrong value for property key " << PluginConfigParams::KEY_DEVICE_ID
|
||||
<< ". DeviceIDs are only represented by positive numbers";
|
||||
|
@ -265,7 +265,6 @@ InferenceEngine::CNNNetwork CLDNNGraph::GetExecGraphInfoByPrimitivesInfo(std::ve
|
||||
ngraph::OutputVector inputs;
|
||||
|
||||
auto& deps = prim_info.c_dependencies;
|
||||
size_t in_size = deps.size();
|
||||
|
||||
// Decrease expected dependencies count if there is a const input without original id in the IR
|
||||
for (auto& dep : deps) {
|
||||
@ -321,7 +320,6 @@ InferenceEngine::CNNNetwork CLDNNGraph::GetExecGraphInfoByPrimitivesInfo(std::ve
|
||||
};
|
||||
|
||||
auto create_ngraph_node = [&](const cldnn::primitive_info& prim_info) {
|
||||
const auto& deps = prim_info.c_dependencies;
|
||||
const auto& user_ids = prim_info.c_users;
|
||||
size_t output_size = user_ids.size();
|
||||
bool is_output = user_ids.empty();
|
||||
|
@ -234,7 +234,6 @@ void CreateDeformableConvolutionOp(Program& p, const std::shared_ptr<ngraph::op:
|
||||
|
||||
auto params = GetConvolutionParameters(op->get_pads_begin(), op->get_dilations(), op->get_strides(), op->get_group());
|
||||
auto outDims = op->get_output_shape(0);
|
||||
auto outPrecision = op->get_output_element_type(0);
|
||||
|
||||
std::vector<cldnn::primitive_id> weights = {inputs[2]};
|
||||
if (params.groups > 1) {
|
||||
@ -302,7 +301,6 @@ void CreateBinaryConvolutionOp(Program& p, const std::shared_ptr<ngraph::op::v1:
|
||||
|
||||
auto params = GetConvolutionParameters(op->get_pads_begin(), op->get_dilations(), op->get_strides(), 1);
|
||||
auto outDims = op->get_output_shape(0);
|
||||
auto outPrecision = op->get_output_element_type(0);
|
||||
|
||||
std::vector<cldnn::primitive_id> weights = {inputs[1]};
|
||||
cldnn::data_types calc_precision = DataTypeFromPrecision(op->get_output_element_type(0));
|
||||
|
@ -71,7 +71,6 @@ void CreateMatMulOp(Program& p, const std::shared_ptr<ngraph::op::v0::MatMul>& o
|
||||
THROW_IE_EXCEPTION << "MatMul " << op->get_friendly_name() << " shapes are inconsistent.";
|
||||
}
|
||||
size_t K = *(shape_a_aligned.end() - 1);
|
||||
size_t O = *(shape_b_aligned.end() - 1);
|
||||
|
||||
auto inputName = inputPrimitives[0];
|
||||
auto weightsName = inputPrimitives[1];
|
||||
|
@ -59,11 +59,6 @@ void CreateNonMaxSuppressionIEInternalOp(Program& p, const std::shared_ptr<ngrap
|
||||
auto outputIndices = op->get_output_shape(0)[0];
|
||||
|
||||
auto boxesShape = op->get_input_shape(0);
|
||||
int32_t num_batches = boxesShape.at(0);
|
||||
int32_t num_boxes = boxesShape.at(1);
|
||||
|
||||
auto scoresShape = op->get_input_shape(1);
|
||||
int32_t num_classes = scoresShape.at(1);
|
||||
|
||||
std::size_t num_output = op->get_output_size();
|
||||
|
||||
|
@ -226,7 +226,6 @@ void CreateStridedSliceOp(Program& p, const std::shared_ptr<ngraph::op::v1::Stri
|
||||
}
|
||||
|
||||
|
||||
const size_t ods = crop_shape.size();
|
||||
cldnn::tensor refSize = CldnnTensorFromIEDims(crop_shape);
|
||||
cldnn::tensor offSize = CldnnTensorFromIEDims(offset, 0);
|
||||
|
||||
|
57
inference-engine/src/gna_plugin/backend/dnn_types.cpp
Normal file
57
inference-engine/src/gna_plugin/backend/dnn_types.cpp
Normal file
@ -0,0 +1,57 @@
|
||||
// Copyright (C) 2018-2020 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "dnn_types.h"
|
||||
|
||||
const char *intel_dnn_activation_name[kActNumType] = {
|
||||
"kActNone",
|
||||
"kActSigmoid",
|
||||
"kActTanh",
|
||||
"kActRelu",
|
||||
"kActLeakyRelu",
|
||||
"kActIdentity",
|
||||
"kActKaldiLstmClipping",
|
||||
"kActExp",
|
||||
"kActLog",
|
||||
"kActSign",
|
||||
"kActAbs",
|
||||
"kActNegLog",
|
||||
"kActNegHalfLog",
|
||||
"kActCustom",
|
||||
"kActSoftSign",
|
||||
"kActPow",
|
||||
"kActFakeQuantize"
|
||||
};
|
||||
|
||||
const char *intel_dnn_softmax_name[kSoftmaxNumType] = {
|
||||
"kSoftmaxNone",
|
||||
"kSoftmaxKaldiSumGroup",
|
||||
"kSoftmaxKaldiApplyLog",
|
||||
"kSoftmaxGoogle"
|
||||
};
|
||||
|
||||
const char* intel_dnn_operation_name[kDnnNumOp] = {
|
||||
"kDnnNullOp",
|
||||
"kDnnAffineOp",
|
||||
"kDnnDiagonalOp",
|
||||
"kDnnConvolutional1dOp",
|
||||
"kDnnConvolutional2dOp",
|
||||
"kDnnPiecewiselinearOp",
|
||||
"kDnnMaxPoolOp",
|
||||
"kDnnRecurrentOp",
|
||||
"kDnnInterleaveOp",
|
||||
"kDnnDeinterleaveOp",
|
||||
"kDnnCopyOp"
|
||||
};
|
||||
|
||||
const char *intel_dnn_macro_operation_name[kDnnNumMacroOp] = {
|
||||
"kDnnMacroOpNone",
|
||||
"kDnnMacroOpLstm",
|
||||
"kDnnMacroOpBiLstm"
|
||||
};
|
||||
|
||||
const char *intel_dnn_number_type_name[kDnnNumNumberType] = {
|
||||
"kDnnFloat",
|
||||
"kDnnInt"
|
||||
};
|
@ -71,25 +71,7 @@ struct DnnActivation {
|
||||
|
||||
static_assert(std::is_trivial<DnnActivation>::value, "DnnActivation is not trival type");
|
||||
|
||||
static const char *intel_dnn_activation_name[kActNumType] = {
|
||||
"kActNone",
|
||||
"kActSigmoid",
|
||||
"kActTanh",
|
||||
"kActRelu",
|
||||
"kActLeakyRelu",
|
||||
"kActIdentity",
|
||||
"kActKaldiLstmClipping",
|
||||
"kActExp",
|
||||
"kActLog",
|
||||
"kActSign",
|
||||
"kActAbs",
|
||||
"kActNegLog",
|
||||
"kActNegHalfLog",
|
||||
"kActCustom",
|
||||
"kActSoftSign",
|
||||
"kActPow",
|
||||
"kActFakeQuantize"
|
||||
};
|
||||
extern const char *intel_dnn_activation_name[kActNumType];
|
||||
|
||||
typedef enum DnnSoftmaxType {
|
||||
kSoftmaxNone,
|
||||
@ -99,12 +81,7 @@ typedef enum DnnSoftmaxType {
|
||||
kSoftmaxNumType
|
||||
} intel_dnn_softmax_type_t;
|
||||
|
||||
static const char *intel_dnn_softmax_name[kSoftmaxNumType] = {
|
||||
"kSoftmaxNone",
|
||||
"kSoftmaxKaldiSumGroup",
|
||||
"kSoftmaxKaldiApplyLog",
|
||||
"kSoftmaxGoogle"
|
||||
};
|
||||
extern const char *intel_dnn_softmax_name[kSoftmaxNumType];
|
||||
|
||||
typedef enum {
|
||||
kDnnUnknownOrientation = 100,
|
||||
@ -128,19 +105,7 @@ typedef enum {
|
||||
kDnnNumOp
|
||||
} intel_dnn_operation_t;
|
||||
|
||||
static const char* intel_dnn_operation_name[kDnnNumOp] = {
|
||||
"kDnnNullOp",
|
||||
"kDnnAffineOp",
|
||||
"kDnnDiagonalOp",
|
||||
"kDnnConvolutional1dOp",
|
||||
"kDnnConvolutional2dOp",
|
||||
"kDnnPiecewiselinearOp",
|
||||
"kDnnMaxPoolOp",
|
||||
"kDnnRecurrentOp",
|
||||
"kDnnInterleaveOp",
|
||||
"kDnnDeinterleaveOp",
|
||||
"kDnnCopyOp"
|
||||
};
|
||||
extern const char* intel_dnn_operation_name[kDnnNumOp];
|
||||
|
||||
typedef enum {
|
||||
kDnnMacroOpNone,
|
||||
@ -149,11 +114,7 @@ typedef enum {
|
||||
kDnnNumMacroOp
|
||||
} intel_dnn_macro_operation_t;
|
||||
|
||||
static const char *intel_dnn_macro_operation_name[kDnnNumMacroOp] = {
|
||||
"kDnnMacroOpNone",
|
||||
"kDnnMacroOpLstm",
|
||||
"kDnnMacroOpBiLstm"
|
||||
};
|
||||
extern const char *intel_dnn_macro_operation_name[kDnnNumMacroOp];
|
||||
|
||||
typedef enum {
|
||||
kDnnFloat,
|
||||
@ -161,10 +122,7 @@ typedef enum {
|
||||
kDnnNumNumberType
|
||||
} intel_dnn_number_type_t;
|
||||
|
||||
static const char *intel_dnn_number_type_name[kDnnNumNumberType] = {
|
||||
"kDnnFloat",
|
||||
"kDnnInt"
|
||||
};
|
||||
extern const char *intel_dnn_number_type_name[kDnnNumNumberType];
|
||||
|
||||
typedef struct {
|
||||
uint32_t num_bytes_per_weight;
|
||||
|
@ -50,11 +50,6 @@ class ModelQuantizer {
|
||||
IE_ASSERT(copiedNet.get() != nullptr);
|
||||
copiedNet = InferenceEngine::CNNNetCopy(*copiedNet, visitor);
|
||||
|
||||
// TODO: probably not the best way of using dynamic cast in order to transform Precision
|
||||
// one of solution is to create not copyNet overloads, that accepts 2 functors, one for layer copy
|
||||
// and another one for net copy
|
||||
auto rawNet = dynamic_cast<InferenceEngine::details::CNNNetworkImpl *>(copiedNet.get());
|
||||
|
||||
// allow client code to access copied topology, to avoid copies if user would like to chain quantisation with
|
||||
// another preprocessing
|
||||
cb(copiedNet, false);
|
||||
|
@ -162,7 +162,6 @@ class ScaleFactorPerLayer<InferenceEngine::CNNLayer *> {
|
||||
|
||||
if (CNNNetHasPrevLayer(cnnLayer)) {
|
||||
auto prevLayer = CNNNetPrevLayer(cnnLayer);
|
||||
auto prevInfo = LayerInfo(prevLayer);
|
||||
auto inputQuant = InferenceEngine::getInjectedData<QuantizedLayerParams>(prevLayer);
|
||||
// locating corresponding memory layers with same ID
|
||||
for (auto&& input : CNNNetGetAllInputLayers(cnnLayer)) {
|
||||
|
@ -572,11 +572,7 @@ void GNAGraphCompiler::finalizeConvolution2DPrimitive(InferenceEngine::CNNLayerP
|
||||
uint32_t num_feature_map_rows = (in_channels * in_height * in_width) / num_feature_map_columns;
|
||||
|
||||
uint32_t filter_n = convolution._out_depth;
|
||||
uint32_t num_columns_in = num_inputs;
|
||||
|
||||
uint32_t original_num_feature_map_rows = num_feature_map_rows;
|
||||
uint32_t original_input_padding = num_input_padding;
|
||||
uint32_t additional_padding = 0;
|
||||
|
||||
// if kernel padding to multiple of 8 will cause missed outputs, need to pad further
|
||||
if (num_input_padding == 0) {
|
||||
@ -689,7 +685,6 @@ void GNAGraphCompiler::finalizeConvolution2DPrimitive(InferenceEngine::CNNLayerP
|
||||
transposedWeights.resize(transposedWeights.size() + kernelPad);
|
||||
}
|
||||
|
||||
const auto t = convolution._weights->byteSize();
|
||||
gnamem->readonly().push_local_ptr(ptr_weights,
|
||||
transposedWeights.data(),
|
||||
transposedWeights.size(),
|
||||
@ -2011,6 +2006,7 @@ void GNAGraphCompiler::CreateLayerPrimitive(CNNLayerPtr layer) {
|
||||
{{"LSTMCell"}, SKIP},
|
||||
{{"FakeQuantize"}, CREATE(FakeQuantizePrimitive)} // TODO: fakequantize layer should be properly converted to GNA scale factors for integer case
|
||||
};
|
||||
(void)layersBuilder;
|
||||
auto it = LayersBuilder::getStorage().find(layer->type);
|
||||
if (it != LayersBuilder::getStorage().end()) {
|
||||
it->second(this, layer);
|
||||
|
@ -1097,7 +1097,6 @@ uint32_t GNAPlugin::QueueInference(const InferenceEngine::BlobMap &inputs, Infer
|
||||
}
|
||||
if (CNN2DAtInput) {
|
||||
auto dims = input.second->getTensorDesc().getDims();
|
||||
auto layout = input.second->getTensorDesc().getLayout();
|
||||
auto hwDim = dims[2] * dims[3];
|
||||
auto chanelsDim = dims[1];
|
||||
RotateFeatures(reinterpret_cast<uint8_t*>(inputsDesc->getPtrInputsGlobal(input.first)[idx]),
|
||||
|
@ -63,8 +63,6 @@ static const char softSignLayersCounter[] = "numSoftSignLayers";
|
||||
* @brief helper injections of diagonal layer with certain value
|
||||
*/
|
||||
|
||||
static const char diagonalLayerCounterName[] = "diagonalLayerCounter";
|
||||
|
||||
static void insertDiagonalLayerBetween(InferenceEngine::CNNLayerPtr prevLayer,
|
||||
InferenceEngine::CNNLayerPtr nextLayer,
|
||||
std::shared_ptr<IPassManager> passmanager,
|
||||
@ -550,13 +548,6 @@ void ReversePermutationsPass::run() {
|
||||
return prev;
|
||||
};
|
||||
|
||||
auto prevLayerSkipReshape = [&prevLayerSkipCertain](CNNLayerPtr layer) -> CNNLayerPtr {
|
||||
return prevLayerSkipCertain(layer, [] (CNNLayerPtr l2) {
|
||||
return LayerInfo(l2).isNonFunctional();
|
||||
});
|
||||
};
|
||||
|
||||
|
||||
std::function<CNNLayerPtr(CNNLayerPtr)> nextLayerSkipReshape = [&nextLayerSkipReshape](CNNLayerPtr layer) -> CNNLayerPtr {
|
||||
if (layer->outData.empty()) {
|
||||
return nullptr;
|
||||
@ -1445,7 +1436,6 @@ void SubstituteScaleShiftBroadCastPass::run() {
|
||||
auto batchSize = dataDims[0];
|
||||
auto nElements = product(begin(dataDims), end(dataDims)) / batchSize;
|
||||
auto weightsElements = scaleShift->_weights->size();
|
||||
auto weightsBytes = scaleShift->_weights->byteSize();
|
||||
|
||||
if (!reshape_batch && nElements == weightsElements) {
|
||||
continue;
|
||||
@ -1941,7 +1931,6 @@ void MoveFakeQuantizeLayerIntoQuantParamsPass :: run() {
|
||||
}
|
||||
|
||||
float fqLevels = fqLayer.getLevels();
|
||||
float scaleInput = (fqLevels - 1) / (inputRange.second[0] - inputRange.first[0]);
|
||||
float scaleOutputs = (fqLevels - 1) / (outputRange.second[0] - outputRange.first[0]);
|
||||
|
||||
// Before FQ layer is removed, the previous layer has to be updated with its quantization data
|
||||
|
@ -68,7 +68,6 @@ void FP::ApplyDiagonalTransform(intel_dnn_component_t *component) {
|
||||
auto transform = &component->op.affine;
|
||||
int m = component->num_rows_out;
|
||||
int n = component->num_columns_in;
|
||||
int ldb = component->num_columns_in;
|
||||
int ldc = component->num_columns_out;
|
||||
|
||||
auto A = reinterpret_cast<float *>(transform->ptr_weights);
|
||||
|
@ -74,7 +74,6 @@ double pivot_search(std::vector<pwl_t>& result,
|
||||
double max_epsilon = 0.0;
|
||||
double max_epsilon_prev;
|
||||
double min_epsilon;
|
||||
double min_epsilon2;
|
||||
double sgn = (negative) ? -1.0 : 1.0;
|
||||
int j;
|
||||
|
||||
|
@ -283,8 +283,6 @@ std::shared_ptr<ngraph::Function> CNNNetworkNGraphImpl::cloneFunction(bool const
|
||||
}
|
||||
|
||||
void CNNNetworkNGraphImpl::reshape() {
|
||||
ResponseDesc desc;
|
||||
|
||||
// Disable reshape for generic nodes
|
||||
::ngraph::op::GenericIE::DisableReshape noReshape(_ngraph_function);
|
||||
reshape({});
|
||||
|
@ -16,10 +16,6 @@
|
||||
#include <legacy/ie_layers.h>
|
||||
#include "ie_layer_validators.hpp"
|
||||
|
||||
#ifdef __clang__
|
||||
#pragma clang diagnostic ignored "-Wunused-variable"
|
||||
#endif
|
||||
|
||||
namespace InferenceEngine {
|
||||
|
||||
using namespace details;
|
||||
|
@ -1522,7 +1522,6 @@ void MKLDNNGraphOptimizer::FuseInterpolateAndSimpleOperation(MKLDNNGraph &graph)
|
||||
}
|
||||
|
||||
auto childNode = parentNode->getChildEdgeAt(0)->getChild();
|
||||
auto interpolateNode = dynamic_cast<MKLDNNInterpolateNode*>(parentNode.get());
|
||||
if (!isSutableChildNode(parentNode, childNode)) {
|
||||
parent++;
|
||||
continue;
|
||||
|
@ -32,7 +32,6 @@ inline void cpu_memcpy(void* dst, const void* src, size_t count) {
|
||||
}
|
||||
|
||||
inline int cpu_memcpy_s(void* dst, size_t dst_size, const void* src, size_t count) {
|
||||
size_t i;
|
||||
if (!src ||
|
||||
count > dst_size ||
|
||||
count > (dst > src ? ((uintptr_t)dst - (uintptr_t)src) : ((uintptr_t)src - (uintptr_t)dst))) {
|
||||
|
@ -2084,7 +2084,6 @@ void MKLDNNInterpolateNode::buildTblCubic(SizeVector& srcDimPad5d, SizeVector& d
|
||||
}
|
||||
|
||||
void MKLDNNInterpolateNode::setPostOps(mkldnn::primitive_attr &attr, bool initWeights) {
|
||||
int blob_idx = 0;
|
||||
mkldnn::post_ops ops;
|
||||
|
||||
for (auto &node : fusedWith) {
|
||||
@ -2643,7 +2642,6 @@ void MKLDNNInterpolateNode::cubicCGathered(const uint8_t *in_ptr_, uint8_t *out_
|
||||
}
|
||||
|
||||
void MKLDNNInterpolateNode::cubicPlanar(const uint8_t *in_ptr_, uint8_t *out_ptr_, int B, int C, int IH, int IW, int OH, int OW) {
|
||||
const int idxNum = 1;
|
||||
int tblAdvance = 0;
|
||||
int *xOrigin = static_cast<int*>(&indexTable[tblAdvance]);
|
||||
tblAdvance += OW;
|
||||
|
@ -638,7 +638,6 @@ void MKLDNNMVNNode::createPrimitive() {
|
||||
}
|
||||
|
||||
void MKLDNNMVNNode::setPostOps(mkldnn::primitive_attr &attr, bool initWeights) {
|
||||
int blob_idx = 0;
|
||||
mkldnn::post_ops ops;
|
||||
|
||||
for (auto &node : fusedWith) {
|
||||
|
@ -858,7 +858,6 @@ void MKLDNNNormalizeNode::initSupportedPrimitiveDescriptors() {
|
||||
}
|
||||
|
||||
void MKLDNNNormalizeNode::setPostOps(mkldnn::primitive_attr &attr, bool initWeights) {
|
||||
int blob_idx = 0;
|
||||
mkldnn::post_ops ops;
|
||||
|
||||
for (auto &node : fusedWith) {
|
||||
|
@ -1685,7 +1685,7 @@ void MKLDNNReduceNode::reduce_BLK_concern_padding(const uint8_t *in_ptr, uint8_t
|
||||
size_t ob = ReduceN ? 0 : ib; GET_PTR_N_BLK;
|
||||
if (!ReduceD && ReduceH && ReduceW) {
|
||||
for (size_t icb = 0; icb < ICB; icb++) {
|
||||
size_t ocb = 0; GET_PTR_NC_BLK;
|
||||
size_t ocb = 0;;
|
||||
size_t ic = icb * blk_size;
|
||||
parallel_for(ID, [&](size_t id) {
|
||||
size_t od = id; GET_PTR_NCD_BASE_PTR_N_BLK;
|
||||
|
@ -151,7 +151,6 @@ template <typename inputType, typename outputType>
|
||||
void MKLDNNROIAlignNode::executeSpecified() {
|
||||
auto &srcMemory0 = getParentEdgeAt(0)->getMemory();
|
||||
auto &srcMemory1 = getParentEdgeAt(1)->getMemory();
|
||||
auto &srcMemory2 = getParentEdgeAt(2)->getMemory();
|
||||
auto &dstMemory = getChildEdgeAt(0)->getMemory();
|
||||
|
||||
auto srcBlockDesc = srcMemory0.GetDescriptor().data.layout_desc.blocking;
|
||||
|
@ -438,7 +438,6 @@ void MKLDNNScatterUpdateNode::scatterElementsUpdate(uint8_t *indices, uint8_t *u
|
||||
SizeVector srcDataDim = getParentEdgeAt(DATA_ID)->getDesc().getDims();
|
||||
SizeVector updateDim = getParentEdgeAt(UPDATE_ID)->getDesc().getDims();
|
||||
SizeVector indicesDim = getParentEdgeAt(INDICES_ID)->getDesc().getDims();
|
||||
size_t srcRank = srcDataDim.size();
|
||||
size_t updateRank = updateDim.size();
|
||||
|
||||
std::vector<size_t> srcBlockND = getBlockND(srcDataDim);
|
||||
|
@ -548,7 +548,6 @@ void V10Parser::parsePreProcess(CNNNetwork& network, const pugi::xml_node& root,
|
||||
if (!meanSegmentPrecision || meanSegmentPrecision == Precision::MIXED)
|
||||
THROW_IE_EXCEPTION << "mean blob defined without specifying precision.";
|
||||
|
||||
ResponseDesc resp;
|
||||
InferenceEngine::PreProcessChannel::Ptr preProcessChannel;
|
||||
|
||||
int lastChanNo = -1;
|
||||
|
@ -19,10 +19,6 @@
|
||||
#include <legacy/ie_layers.h>
|
||||
#include "xml_parse_utils.h"
|
||||
|
||||
#ifdef __clang__
|
||||
#pragma clang diagnostic ignored "-Wunused-variable"
|
||||
#endif
|
||||
|
||||
namespace InferenceEngine {
|
||||
|
||||
using namespace details;
|
||||
|
@ -16,7 +16,6 @@ function(add_common_target TARGET_NAME STATIC_IE)
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
# TODO: enable some day and fix all warnings
|
||||
# target_compile_options(${TARGET_NAME} PRIVATE "-Wall")
|
||||
target_compile_options(${TARGET_NAME} PRIVATE "-Werror=unused-variable")
|
||||
target_compile_options(${TARGET_NAME} PRIVATE "-Werror=unused-function")
|
||||
target_compile_options(${TARGET_NAME} PRIVATE "-Werror=strict-aliasing")
|
||||
endif()
|
||||
|
@ -12,7 +12,6 @@ function(add_graph_transformer_target TARGET_NAME STATIC_IE)
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
# TODO: enable some day and fix all warnings
|
||||
# target_compile_options(${TARGET_NAME} PRIVATE "-Wall")
|
||||
target_compile_options(${TARGET_NAME} PRIVATE "-Werror=unused-variable")
|
||||
target_compile_options(${TARGET_NAME} PRIVATE "-Werror=unused-function")
|
||||
target_compile_options(${TARGET_NAME} PRIVATE "-Werror=strict-aliasing")
|
||||
endif()
|
||||
|
@ -81,7 +81,7 @@ TEST(InferRequestCPPTests, throwsOnUninitializedSetCompletionCallback) {
|
||||
|
||||
TEST(InferRequestCPPTests, throwsOnUninitializedCast) {
|
||||
InferRequest req;
|
||||
ASSERT_THROW(auto &ireq = static_cast<IInferRequest::Ptr &>(req), InferenceEngine::details::InferenceEngineException);
|
||||
ASSERT_THROW((void)static_cast<IInferRequest::Ptr &>(req), InferenceEngine::details::InferenceEngineException);
|
||||
}
|
||||
|
||||
TEST(InferRequestCPPTests, throwsOnUninitializedQueryState) {
|
||||
|
@ -107,7 +107,6 @@ template<typename T>
|
||||
void FillBlobRandom(Blob::Ptr& inputBlob) {
|
||||
srand(1);
|
||||
auto inputBlobData = inputBlob->buffer().as<T*>();
|
||||
unsigned int seed = RAND_MAX;
|
||||
for (size_t i = 0; i < inputBlob->size(); i++) {
|
||||
inputBlobData[i] = (T) (GenerateRandom(RAND_MAX) / static_cast<float>(RAND_MAX) * 100);
|
||||
}
|
||||
|
@ -35,7 +35,6 @@ struct ReshapeMatMulTestCase {
|
||||
class SmartReshapeMatMulTests : public CommonTestUtils::TestsCommon, public testing::WithParamInterface<std::tuple<ReshapeMatMulTestCase>> {
|
||||
public:
|
||||
void SetUp() override {
|
||||
const auto& parameters = GetParam();
|
||||
const auto& test_case = std::get<0>(GetParam());
|
||||
|
||||
std::shared_ptr<ngraph::Function> ngraph;
|
||||
|
@ -41,12 +41,12 @@ TEST_F(CNNNetworkTests, throwsOnUninitializedGetName) {
|
||||
|
||||
TEST_F(CNNNetworkTests, throwsOnUninitializedCastToICNNNetwork) {
|
||||
CNNNetwork network;
|
||||
ASSERT_THROW(auto & net = static_cast<ICNNNetwork&>(network), InferenceEngine::details::InferenceEngineException);
|
||||
ASSERT_THROW((void)static_cast<ICNNNetwork&>(network), InferenceEngine::details::InferenceEngineException);
|
||||
}
|
||||
|
||||
TEST_F(CNNNetworkTests, throwsOnConstUninitializedCastToICNNNetwork) {
|
||||
const CNNNetwork network;
|
||||
ASSERT_THROW(const auto & net = static_cast<const ICNNNetwork&>(network), InferenceEngine::details::InferenceEngineException);
|
||||
ASSERT_THROW((void)static_cast<const ICNNNetwork&>(network), InferenceEngine::details::InferenceEngineException);
|
||||
}
|
||||
|
||||
TEST_F(CNNNetworkTests, throwsOnUninitializedGetFunction) {
|
||||
|
@ -37,7 +37,7 @@ TEST(ExecutableNetworkTests, throwsOnUninitializedExportStream) {
|
||||
|
||||
TEST(ExecutableNetworkTests, nothrowsOnUninitializedCast) {
|
||||
ExecutableNetwork exec;
|
||||
ASSERT_NO_THROW(auto &enet = static_cast<IExecutableNetwork::Ptr &>(exec));
|
||||
ASSERT_NO_THROW((void)static_cast<IExecutableNetwork::Ptr &>(exec));
|
||||
}
|
||||
|
||||
TEST(ExecutableNetworkTests, throwsOnUninitializedGetExecGraphInfo) {
|
||||
|
@ -116,7 +116,6 @@ public:
|
||||
}
|
||||
|
||||
static std::string getTestCaseName(testing::TestParamInfo<ConcatTransformationParams> obj) {
|
||||
const ngraph::element::Type precision = std::get<0>(obj.param);
|
||||
const ngraph::Shape shape = std::get<1>(obj.param);
|
||||
ConcatTransformationTestValues testValues = std::get<2>(obj.param);
|
||||
|
||||
|
@ -50,9 +50,7 @@ typedef std::tuple<
|
||||
class MultiplyTransformation : public LayerTransformation, public testing::WithParamInterface<MultiplyTransformationParams> {
|
||||
public:
|
||||
void SetUp() override {
|
||||
const ngraph::element::Type precision = std::get<0>(GetParam());
|
||||
const ngraph::Shape shape = std::get<1>(GetParam());
|
||||
const bool broadcast = std::get<2>(GetParam());
|
||||
const MultiplyTransformationTestValues testParams = std::get<3>(GetParam());
|
||||
|
||||
actualFunction = MultiplyFunction::get(shape, testParams.actual);
|
||||
|
@ -98,8 +98,8 @@ TEST_F(ParameterTests, StringParameterAsInt) {
|
||||
Parameter p = "4";
|
||||
ASSERT_FALSE(p.is<int>());
|
||||
ASSERT_TRUE(p.is<std::string>());
|
||||
ASSERT_THROW(int test = p, std::bad_cast);
|
||||
ASSERT_THROW(int test = p.as<int>(), std::bad_cast);
|
||||
ASSERT_THROW((void)static_cast<int>(p), std::bad_cast);
|
||||
ASSERT_THROW((void)p.as<int>(), std::bad_cast);
|
||||
}
|
||||
|
||||
TEST_F(ParameterTests, ParameterAsTensorDesc) {
|
||||
@ -259,10 +259,10 @@ TEST_F(ParameterTests, CompareParametersWithoutEqualOperator) {
|
||||
Parameter parB = b;
|
||||
Parameter parC = c;
|
||||
|
||||
ASSERT_THROW(bool equal = parA == parB, details::InferenceEngineException);
|
||||
ASSERT_THROW(bool equal = parA != parB, details::InferenceEngineException);
|
||||
ASSERT_THROW(bool equal = parA == parC, details::InferenceEngineException);
|
||||
ASSERT_THROW(bool equal = parA != parC, details::InferenceEngineException);
|
||||
ASSERT_THROW((void)(parA == parB), details::InferenceEngineException);
|
||||
ASSERT_THROW((void)(parA != parB), details::InferenceEngineException);
|
||||
ASSERT_THROW((void)(parA == parC), details::InferenceEngineException);
|
||||
ASSERT_THROW((void)(parA != parC), details::InferenceEngineException);
|
||||
}
|
||||
|
||||
TEST_F(ParameterTests, ParameterRemovedRealObject) {
|
||||
|
@ -194,7 +194,6 @@ static auto Executors = ::testing::Values(
|
||||
streams, threads/streams, IStreamsExecutor::ThreadBindingType::NONE});
|
||||
},
|
||||
[] {
|
||||
auto threads = parallel_get_max_threads();
|
||||
return std::make_shared<ImmediateExecutor>();
|
||||
}
|
||||
);
|
||||
|
@ -283,7 +283,6 @@ TEST(TransformationTests, ConvertNMS5ToNMSIEDynamic1SixInputs) {
|
||||
|
||||
f = std::make_shared<Function>(NodeVector{nms}, ParameterVector{boxes, scores});
|
||||
|
||||
const auto &orig_selected_indices_shape = f->get_output_partial_shape(0);
|
||||
pass::Manager manager;
|
||||
manager.register_pass<pass::InitNodeInfo>();
|
||||
manager.register_pass<pass::ConvertNMS5ToLegacyMatcher>();
|
||||
@ -337,7 +336,6 @@ TEST(TransformationTests, ConvertNMS5ToNMSIEDynamic1FiveInputs) {
|
||||
|
||||
f = std::make_shared<Function>(NodeVector{nms}, ParameterVector{boxes, scores});
|
||||
|
||||
const auto &orig_selected_indices_shape = f->get_output_partial_shape(0);
|
||||
pass::Manager manager;
|
||||
manager.register_pass<pass::InitNodeInfo>();
|
||||
manager.register_pass<pass::ConvertNMS5ToLegacyMatcher>();
|
||||
@ -386,7 +384,6 @@ TEST(TransformationTests, ConvertNMS5ToNMSIEDynamic1FourInputs) {
|
||||
|
||||
f = std::make_shared<Function>(NodeVector{nms}, ParameterVector{boxes, scores});
|
||||
|
||||
const auto &orig_selected_indices_shape = f->get_output_partial_shape(0);
|
||||
pass::Manager manager;
|
||||
manager.register_pass<pass::InitNodeInfo>();
|
||||
manager.register_pass<pass::ConvertNMS5ToLegacyMatcher>();
|
||||
@ -434,7 +431,6 @@ TEST(TransformationTests, ConvertNMS5ToNMSIEDynamic1ThreeInputs) {
|
||||
|
||||
f = std::make_shared<Function>(NodeVector{nms}, ParameterVector{boxes, scores});
|
||||
|
||||
const auto &orig_selected_indices_shape = f->get_output_partial_shape(0);
|
||||
pass::Manager manager;
|
||||
manager.register_pass<pass::InitNodeInfo>();
|
||||
manager.register_pass<pass::ConvertNMS5ToLegacyMatcher>();
|
||||
@ -481,7 +477,6 @@ TEST(TransformationTests, ConvertNMS5ToNMSIEDynamic1TwoInputs) {
|
||||
|
||||
f = std::make_shared<Function>(NodeVector{nms}, ParameterVector{boxes, scores});
|
||||
|
||||
const auto &orig_selected_indices_shape = f->get_output_partial_shape(0);
|
||||
pass::Manager manager;
|
||||
manager.register_pass<pass::InitNodeInfo>();
|
||||
manager.register_pass<pass::ConvertNMS5ToLegacyMatcher>();
|
||||
|
@ -38,7 +38,6 @@ TEST(TransformationTests, ConvertNMS1ToNMSIEInternal) {
|
||||
|
||||
f = std::make_shared<Function>(NodeVector{nms}, ParameterVector{boxes, scores});
|
||||
|
||||
const auto & orig_shape = f->get_output_partial_shape(0);
|
||||
ngraph::pass::Manager manager;
|
||||
manager.register_pass<ngraph::pass::InitNodeInfo>();
|
||||
manager.register_pass<ngraph::pass::ConvertNMS1ToNMS5>();
|
||||
@ -80,7 +79,6 @@ TEST(TransformationTests, ConvertNMS3ToNMSIEInternal) {
|
||||
|
||||
f = std::make_shared<Function>(NodeVector{nms}, ParameterVector{boxes, scores});
|
||||
|
||||
const auto & orig_shape = f->get_output_partial_shape(0);
|
||||
ngraph::pass::Manager manager;
|
||||
manager.register_pass<ngraph::pass::InitNodeInfo>();
|
||||
manager.register_pass<ngraph::pass::ConvertNMS3ToNMS5>();
|
||||
@ -121,7 +119,6 @@ TEST(TransformationTests, ConvertNMS4ToNMSIEInternal) {
|
||||
|
||||
f = std::make_shared<Function>(NodeVector{nms}, ParameterVector{boxes, scores});
|
||||
|
||||
const auto & orig_shape = f->get_output_partial_shape(0);
|
||||
ngraph::pass::Manager manager;
|
||||
manager.register_pass<ngraph::pass::InitNodeInfo>();
|
||||
manager.register_pass<ngraph::pass::ConvertNMS4ToNMS5>();
|
||||
@ -163,7 +160,6 @@ TEST(TransformationTests, ConvertNMS5ToNMSIEInternal) {
|
||||
|
||||
f = std::make_shared<Function>(NodeVector{nms}, ParameterVector{boxes, scores});
|
||||
|
||||
const auto & orig_shape = f->get_output_partial_shape(0);
|
||||
ngraph::pass::Manager manager;
|
||||
manager.register_pass<ngraph::pass::InitNodeInfo>();
|
||||
manager.register_pass<ngraph::pass::ConvertNMSToNMSIEInternal>();
|
||||
|
@ -37,7 +37,6 @@ TEST(TransformationTests, ConvertNMSToNMSIEStatic) {
|
||||
|
||||
f = std::make_shared<Function>(NodeVector{nms}, ParameterVector{boxes, scores});
|
||||
|
||||
const auto & orig_shape = f->get_output_partial_shape(0);
|
||||
ngraph::pass::Manager manager;
|
||||
manager.register_pass<ngraph::pass::InitNodeInfo>();
|
||||
manager.register_pass<ngraph::pass::ConvertNMSToNMSIEMatcher>();
|
||||
@ -156,7 +155,6 @@ TEST(TransformationTests, ConvertNMST1oNMSIE) {
|
||||
|
||||
f = std::make_shared<Function>(NodeVector{nms}, ParameterVector{boxes, scores});
|
||||
|
||||
const auto & orig_shape = f->get_output_partial_shape(0);
|
||||
ngraph::pass::Manager manager;
|
||||
manager.register_pass<ngraph::pass::InitNodeInfo>();
|
||||
manager.register_pass<ngraph::pass::ConvertOpSet1ToLegacy>();
|
||||
@ -196,7 +194,6 @@ TEST(TransformationTests, ConvertNMST3oNMSIE) {
|
||||
|
||||
f = std::make_shared<Function>(NodeVector{nms}, ParameterVector{boxes, scores});
|
||||
|
||||
const auto & orig_shape = f->get_output_partial_shape(0);
|
||||
ngraph::pass::Manager manager;
|
||||
manager.register_pass<ngraph::pass::InitNodeInfo>();
|
||||
manager.register_pass<ngraph::pass::ConvertOpSet1ToLegacy>();
|
||||
@ -235,7 +232,6 @@ TEST(TransformationTests, ConvertNMST4oNMSIE) {
|
||||
|
||||
f = std::make_shared<Function>(NodeVector{nms}, ParameterVector{boxes, scores});
|
||||
|
||||
const auto & orig_shape = f->get_output_partial_shape(0);
|
||||
ngraph::pass::Manager manager;
|
||||
manager.register_pass<ngraph::pass::InitNodeInfo>();
|
||||
manager.register_pass<ngraph::pass::ConvertOpSet1ToLegacy>();
|
||||
|
@ -36,7 +36,6 @@ public:
|
||||
std::shared_ptr<ngraph::Function> f, ref_f;
|
||||
|
||||
void SetUp() override {
|
||||
const auto& parameters = GetParam();
|
||||
const auto& test_case = std::get<0>(GetParam());
|
||||
f = get_initial_function(test_case);
|
||||
if (test_case.is_negative)
|
||||
|
@ -22,7 +22,6 @@ public:
|
||||
static std::string getTestCaseName(testing::TestParamInfo<NormalizeL2LayerCPUTestParamSet> obj) {
|
||||
LayerTestsDefinitions::NormalizeL2LayerTestParams basicParamsSet;
|
||||
CPUSpecificParams cpuParams;
|
||||
Precision inputPrecision, outputPrecision;
|
||||
std::tie(basicParamsSet, cpuParams) = obj.param;
|
||||
|
||||
std::ostringstream result;
|
||||
|
@ -117,9 +117,6 @@ const std::vector<std::vector<int64_t>> masks = {
|
||||
const std::vector<bool> do_softmax = {true, false};
|
||||
const std::vector<size_t> classes = {80, 20};
|
||||
const std::vector<size_t> num_regions = {5, 9};
|
||||
const size_t coords = 4;
|
||||
const int start_axis = 1;
|
||||
const int end_axis = 3;
|
||||
|
||||
const regionYoloAttributes yoloV3attr = {80, 4, 9, false, 1, 3};
|
||||
|
||||
|
@ -35,7 +35,7 @@ TEST_P(CoreThreadingTestsWithIterations, smoke_LoadNetwork_RemoteContext) {
|
||||
runParallel([&] () {
|
||||
auto value = counter++;
|
||||
auto remote_context = make_shared_context(ie, CommonTestUtils::DEVICE_GPU, ocl_instance->_context.get());
|
||||
(void)ie.LoadNetwork(networks[(counter++) % networks.size()], remote_context);
|
||||
(void)ie.LoadNetwork(networks[value % networks.size()], remote_context);
|
||||
}, numIterations, numThreads);
|
||||
}
|
||||
|
||||
|
@ -23,8 +23,8 @@ class DynamicToStaticShapeClamp : public CommonTestUtils::TestsCommon,
|
||||
public:
|
||||
void SetUp() override {
|
||||
const auto& parameters = GetParam();
|
||||
const auto& dataType = std::get<0>(GetParam());
|
||||
const auto& dataDims = std::get<1>(GetParam());
|
||||
const auto& dataType = std::get<0>(parameters);
|
||||
const auto& dataDims = std::get<1>(parameters);
|
||||
|
||||
ngraph::helpers::CompareFunctions(*transform(dataType, dataDims), *reference(dataType, dataDims));
|
||||
}
|
||||
|
@ -23,8 +23,8 @@ class DynamicToStaticShapeConvert : public CommonTestUtils::TestsCommon,
|
||||
public:
|
||||
void SetUp() override {
|
||||
const auto& parameters = GetParam();
|
||||
const auto& dataType = std::get<0>(GetParam());
|
||||
const auto& dataDims = std::get<1>(GetParam());
|
||||
const auto& dataType = std::get<0>(parameters);
|
||||
const auto& dataDims = std::get<1>(parameters);
|
||||
|
||||
ngraph::helpers::CompareFunctions(*transform(dataType, dataDims), *reference(dataType, dataDims));
|
||||
}
|
||||
|
@ -134,8 +134,6 @@ protected:
|
||||
logical_reduce->set_keep_dims(reduce_setup.keep_dims);
|
||||
node->validate_and_infer_types();
|
||||
|
||||
const auto data_rank_value = reduce_setup.data_shape.size();
|
||||
|
||||
ngraph::Output<ngraph::Node> output_shape;
|
||||
if (reduce_setup.keep_dims) {
|
||||
output_shape = std::make_shared<ngraph::opset3::ScatterElementsUpdate>(
|
||||
|
@ -43,8 +43,8 @@ class DynamicToStaticShapeTranspose : public CommonTestUtils::TestsCommon, publi
|
||||
public:
|
||||
void SetUp() override {
|
||||
const auto& parameters = GetParam();
|
||||
const auto& dataType = std::get<0>(GetParam());
|
||||
const auto& dataDims = std::get<1>(GetParam());
|
||||
const auto& dataType = std::get<0>(parameters);
|
||||
const auto& dataDims = std::get<1>(parameters);
|
||||
|
||||
auto permutation = std::vector<std::int64_t>(dataDims.size());
|
||||
std::iota(permutation.begin(), permutation.end(), 0);
|
||||
|
@ -24,9 +24,9 @@ class DynamicToStaticShapeUnaryElementwise : public CommonTestUtils::TestsCommon
|
||||
public:
|
||||
void SetUp() override {
|
||||
const auto& parameters = GetParam();
|
||||
const auto& dataType = std::get<0>(GetParam());
|
||||
const auto& dataDims = std::get<1>(GetParam());
|
||||
const auto& type_info = std::get<2>(GetParam());
|
||||
const auto& dataType = std::get<0>(parameters);
|
||||
const auto& dataDims = std::get<1>(parameters);
|
||||
const auto& type_info = std::get<2>(parameters);
|
||||
|
||||
ngraph::helpers::CompareFunctions(*transform(dataType, dataDims, type_info), *reference(dataType, dataDims, type_info));
|
||||
}
|
||||
|
@ -20,9 +20,9 @@ class DSR_ReshapeWithStaticDescriptor : public testing::WithParamInterface<Param
|
||||
protected:
|
||||
std::shared_ptr<ngraph::Node> createTestedOp() override {
|
||||
const auto& parameters = GetParam();
|
||||
const auto& inDataType = std::get<0>(GetParam());
|
||||
const auto& reshapeTestParams = std::get<1>(GetParam());
|
||||
targetDevice = std::get<2>(GetParam());
|
||||
const auto& inDataType = std::get<0>(parameters);
|
||||
const auto& reshapeTestParams = std::get<1>(parameters);
|
||||
targetDevice = std::get<2>(parameters);
|
||||
|
||||
const auto& inDataShapes = std::get<0>(reshapeTestParams);
|
||||
const auto& specialZero = std::get<1>(reshapeTestParams);
|
||||
@ -46,9 +46,9 @@ class DSR_ReshapeWithDynamicDescriptor : public testing::WithParamInterface<Para
|
||||
protected:
|
||||
std::shared_ptr<ngraph::Node> createTestedOp() override {
|
||||
const auto& parameters = GetParam();
|
||||
const auto& inDataType = std::get<0>(GetParam());
|
||||
const auto& inDataShapes = std::get<0>(std::get<1>(GetParam()));
|
||||
targetDevice = std::get<2>(GetParam());
|
||||
const auto& inDataType = std::get<0>(parameters);
|
||||
const auto& inDataShapes = std::get<0>(std::get<1>(parameters));
|
||||
targetDevice = std::get<2>(parameters);
|
||||
|
||||
const auto inputSubgraph = createInputSubgraphWithDSR(inDataType, inDataShapes);
|
||||
|
||||
|
@ -59,7 +59,6 @@ protected:
|
||||
std::shared_ptr<ngraph::Node> createTestedOp() override {
|
||||
const auto& parameters = GetParam();
|
||||
const auto& dataType = std::get<0>(parameters);
|
||||
const auto& idxType = std::get<1>(parameters);
|
||||
const auto& topkSetup = std::get<2>(parameters);
|
||||
targetDevice = std::get<3>(parameters);
|
||||
|
||||
|
@ -23,9 +23,9 @@ protected:
|
||||
SetRefMode(LayerTestsUtils::RefMode::CONSTANT_FOLDING);
|
||||
|
||||
const auto& parameters = GetParam();
|
||||
const auto& dataType = std::get<0>(GetParam());
|
||||
const auto& dataDims = std::get<1>(GetParam());
|
||||
targetDevice = std::get<2>(GetParam());
|
||||
const auto& dataType = std::get<0>(parameters);
|
||||
const auto& dataDims = std::get<1>(parameters);
|
||||
targetDevice = std::get<2>(parameters);
|
||||
|
||||
const auto data = std::make_shared<ngraph::opset3::Parameter>(dataType, dataDims);
|
||||
const auto nonZero = std::make_shared<ngraph::opset3::NonZero>(data);
|
||||
|
@ -90,7 +90,7 @@ namespace BehaviorTestsDefinitions {
|
||||
} else {
|
||||
try {
|
||||
ie->SetConfig(configuration, targetDevice);
|
||||
} catch (InferenceEngine::details::InferenceEngineException ex) {}
|
||||
} catch (InferenceEngine::details::InferenceEngineException &) {}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -11,7 +11,6 @@
|
||||
#include <fstream>
|
||||
#include <ngraph/variant.hpp>
|
||||
#include <hetero/hetero_plugin_config.hpp>
|
||||
#include <legacy/graph_tools.hpp>
|
||||
#include <functional_test_utils/plugin_cache.hpp>
|
||||
#include <multi-device/multi_device_config.hpp>
|
||||
#include <ngraph/op/util/op_types.hpp>
|
||||
@ -1071,9 +1070,10 @@ TEST_P(IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS, GetMet
|
||||
Parameter deviceConfigValue = deviceExeNetwork.GetConfig(deviceConf);
|
||||
|
||||
// HETERO returns EXCLUSIVE_ASYNC_REQUESTS as a boolean value
|
||||
if (CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS) != deviceConf)
|
||||
if (CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS) != deviceConf) {
|
||||
ASSERT_EQ(deviceConfigValue, heteroConfigValue);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST_P(IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_METRICS, GetMetricNoThrow) {
|
||||
@ -1109,9 +1109,10 @@ TEST_P(IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_METRICS, GetMetricN
|
||||
Parameter deviceMetricValue = deviceExeNetwork.GetMetric(deviceMetricName);
|
||||
|
||||
if (std::find(heteroSpecificMetrics.begin(), heteroSpecificMetrics.end(), deviceMetricName) ==
|
||||
heteroSpecificMetrics.end())
|
||||
heteroSpecificMetrics.end()) {
|
||||
ASSERT_TRUE(heteroMetricValue == deviceMetricValue);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -89,7 +89,6 @@ public:
|
||||
}
|
||||
|
||||
static std::string getTestCaseName(testing::TestParamInfo<Params> obj) {
|
||||
unsigned int numThreads, numIterations;
|
||||
std::string deviceName;
|
||||
Config config;
|
||||
std::tie(deviceName, config) = obj.param;
|
||||
|
@ -422,7 +422,7 @@ TEST_P(InferRequestTests, canStartAsyncInferWithGetInOutWithStatusOnlyWait) {
|
||||
InferenceEngine::StatusCode sts;
|
||||
sts = req.Wait(InferenceEngine::IInferRequest::WaitMode::STATUS_ONLY);
|
||||
ASSERT_TRUE(sts == InferenceEngine::StatusCode::OK ||
|
||||
InferenceEngine::StatusCode::RESULT_NOT_READY);
|
||||
sts == InferenceEngine::StatusCode::RESULT_NOT_READY);
|
||||
}
|
||||
|
||||
// Plugin correct infer request with allocating input and result BlobMaps inside plugin
|
||||
@ -482,8 +482,6 @@ TEST_P(InferRequestTests, canRun3AsyncRequestsConsistentlyWithWait) {
|
||||
auto req1 = execNet.CreateInferRequest();
|
||||
auto req2 = execNet.CreateInferRequest();
|
||||
auto req3 = execNet.CreateInferRequest();
|
||||
InferenceEngine::ResponseDesc response1, response2, response3;
|
||||
InferenceEngine::StatusCode sts1, sts2, sts3;
|
||||
|
||||
req1.StartAsync();
|
||||
ASSERT_NO_THROW(req1.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY));
|
||||
@ -644,7 +642,6 @@ TEST_P(InferRequestTestsResultNotReady, ReturnResultNotReadyFromWaitInAsyncModeF
|
||||
// Create InferRequest
|
||||
InferenceEngine::InferRequest req;
|
||||
ASSERT_NO_THROW(req = execNet.CreateInferRequest());
|
||||
InferenceEngine::ResponseDesc response;
|
||||
InferenceEngine::StatusCode sts = InferenceEngine::StatusCode::OK;
|
||||
std::promise<std::chrono::system_clock::time_point> callbackTimeStamp;
|
||||
auto callbackTimeStampFuture = callbackTimeStamp.get_future();
|
||||
|
@ -13,16 +13,6 @@
|
||||
#include "ie_preprocess.hpp"
|
||||
#include "base/behavior_test_utils.hpp"
|
||||
|
||||
namespace {
|
||||
void setInputNetworkPrecision(InferenceEngine::CNNNetwork &network, InferenceEngine::InputsDataMap &inputs_info,
|
||||
InferenceEngine::Precision input_precision) {
|
||||
inputs_info = network.getInputsInfo();
|
||||
ASSERT_EQ(1u, inputs_info.size());
|
||||
inputs_info.begin()->second->setPrecision(input_precision);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
namespace BehaviorTestsDefinitions {
|
||||
|
||||
using PreprocessingPrecisionConvertParams = std::tuple<
|
||||
|
@ -87,7 +87,7 @@ TEST_P(BehaviorTestInput, canSetInputPrecisionForNetwork) {
|
||||
InferenceEngine::StatusCode sts = InferenceEngine::StatusCode::OK;
|
||||
try {
|
||||
ie->LoadNetwork(cnnNet, targetDevice, configuration);
|
||||
} catch (InferenceEngine::details::InferenceEngineException ex) {
|
||||
} catch (InferenceEngine::details::InferenceEngineException & ex) {
|
||||
msg = ex.what();
|
||||
sts = ex.getStatus();
|
||||
}
|
||||
@ -113,7 +113,7 @@ TEST_P(BehaviorTestOutput, canSetOutputPrecisionForNetwork) {
|
||||
|
||||
try {
|
||||
InferenceEngine::ExecutableNetwork exeNetwork = ie->LoadNetwork(cnnNet, targetDevice, configuration);
|
||||
} catch (InferenceEngine::details::InferenceEngineException ex) {
|
||||
} catch (InferenceEngine::details::InferenceEngineException & ex) {
|
||||
sts = ex.getStatus();
|
||||
msg = ex.what();
|
||||
std::cout << "LoadNetwork() threw InferenceEngineException. Status: " << sts << ", message: " << msg << std::endl;
|
||||
|
@ -18,7 +18,7 @@ TEST_P(ConcatQuantization, CompareWithRefImpl) {
|
||||
InferenceEngine::CNNNetwork cnnNetwork = InferenceEngine::CNNNetwork{ function };
|
||||
executableNetwork = core->LoadNetwork(cnnNetwork, targetDevice);
|
||||
}
|
||||
catch (InferenceEngine::details::InferenceEngineException ex) {
|
||||
catch (InferenceEngine::details::InferenceEngineException & ex) {
|
||||
FAIL() << ex.what();
|
||||
}
|
||||
};
|
||||
|
@ -118,7 +118,6 @@ void HeteroSyntheticTest::TearDown() {
|
||||
}
|
||||
|
||||
std::string HeteroSyntheticTest::SetUpAffinity() {
|
||||
int id = 0;
|
||||
auto& param = GetParam();
|
||||
std::string affinities;
|
||||
auto& pluginParameters = std::get<Plugin>(param);
|
||||
|
@ -43,7 +43,6 @@ InferenceEngine::Blob::Ptr OutputLayersHandlingInTransformations::GenerateInput(
|
||||
const float hight = 255.f / k;
|
||||
|
||||
InferenceEngine::Blob::Ptr input = FuncTestUtils::createAndFillBlobConsistently(info.getTensorDesc(), hight - low, static_cast<int32_t>(low), 1ul);
|
||||
const auto buffer = input->buffer().as<float*>();
|
||||
return input;
|
||||
}
|
||||
|
||||
|
@ -46,7 +46,6 @@ InferenceEngine::Blob::Ptr OutputLayersHandlingInTransformationsForConcat::Gener
|
||||
const float low = 0.f / k;
|
||||
const float hight = 255.f / k;
|
||||
InferenceEngine::Blob::Ptr input = FuncTestUtils::createAndFillBlobConsistently(info.getTensorDesc(), hight - low, static_cast<int32_t>(low), 1ul);
|
||||
const auto buffer = input->buffer().as<float*>();
|
||||
return input;
|
||||
}
|
||||
|
||||
@ -74,8 +73,6 @@ void OutputLayersHandlingInTransformationsForConcat::SetUp() {
|
||||
const auto input1 = std::make_shared<ngraph::opset1::Parameter>(ngPrecision, ngraph::Shape(inputShape1));
|
||||
input1->set_friendly_name("input1");
|
||||
|
||||
const float low = 0.f;
|
||||
const float hight = 255.f;
|
||||
const auto fakeQuantize1 = ngraph::builder::makeFakeQuantize(
|
||||
input1->output(0), ngPrecision, 256ul, { 1ul },
|
||||
{ 0.f }, { 255.f }, { 0.f }, { 255.f });
|
||||
|
@ -56,7 +56,6 @@ InferenceEngine::Blob::Ptr OutputLayersHandlingInTransformationsForConcatMultiCh
|
||||
const float hight = interval.second / k;
|
||||
|
||||
InferenceEngine::Blob::Ptr input = FuncTestUtils::createAndFillBlobConsistently(info.getTensorDesc(), hight - low, static_cast<int32_t>(low), 1ul);
|
||||
const auto buffer = input->buffer().as<float*>();
|
||||
return input;
|
||||
}
|
||||
|
||||
|
@ -116,6 +116,7 @@ void TestEnvironment::TearDown() {
|
||||
for (const auto &op : opsInfo) {
|
||||
std::string name = std::string(op.name) + "-" + std::to_string(op.version);
|
||||
pugi::xml_node entry = opsNode.append_child(name.c_str());
|
||||
(void)entry;
|
||||
}
|
||||
|
||||
pugi::xml_node resultsNode = root.child("results");
|
||||
|
@ -13,7 +13,7 @@ std::string ExtractImagePatchesTest::getTestCaseName(const testing::TestParamInf
|
||||
ngraph::op::PadType pad_type;
|
||||
InferenceEngine::Precision netPrc;
|
||||
InferenceEngine::Precision inPrc, outPrc;
|
||||
InferenceEngine::Layout inLayout, outLayout;
|
||||
InferenceEngine::Layout inLayout;
|
||||
std::string targetName;
|
||||
std::tie(inputShape, kernel, strides, rates, pad_type, netPrc, inPrc, outPrc, inLayout, targetName) = obj.param;
|
||||
std::ostringstream result;
|
||||
|
@ -25,7 +25,6 @@ void ReorgYoloLayerTest::SetUp() {
|
||||
size_t stride;
|
||||
InferenceEngine::Precision netPrecision;
|
||||
std::tie(inputShape, stride, netPrecision, targetDevice) = this->GetParam();
|
||||
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
|
||||
auto param = std::make_shared<ngraph::op::Parameter>(ngraph::element::f32, inputShape);
|
||||
auto reorg_yolo = std::make_shared<ngraph::op::v0::ReorgYolo>(param, stride);
|
||||
function = std::make_shared<ngraph::Function>(std::make_shared<ngraph::opset1::Result>(reorg_yolo), ngraph::ParameterVector{param}, "ReorgYolo");
|
||||
|
@ -32,7 +32,7 @@ std::string SpaceToBatchLayerTest::getTestCaseName(const testing::TestParamInfo<
|
||||
void SpaceToBatchLayerTest::SetUp() {
|
||||
std::vector<size_t> inputShape;
|
||||
std::vector<int64_t> blockShape, padsBegin, padsEnd;
|
||||
InferenceEngine::Precision inputPrecision, netPrecision;
|
||||
InferenceEngine::Precision netPrecision;
|
||||
std::tie(blockShape, padsBegin, padsEnd, inputShape, netPrecision, inPrc, outPrc, inLayout, outLayout, targetDevice) = this->GetParam();
|
||||
|
||||
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
|
||||
|
@ -30,7 +30,6 @@ void Basic_LSTM_S::SetUp() {
|
||||
|
||||
InferenceEngine::Precision netPrecision;
|
||||
std::tie(netPrecision, targetDevice, configuration) = this->GetParam();
|
||||
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
|
||||
hidden_size = 118;
|
||||
outPrc = InferenceEngine::Precision::FP32;
|
||||
|
||||
|
@ -76,7 +76,7 @@ void ConcatMultiInput::GenerateConstOnlyModel() {
|
||||
std::vector<float> res;
|
||||
|
||||
std::uniform_real_distribution<float> dist(min, max);
|
||||
for (int i = 0; i < vec_len; i++)
|
||||
for (std::size_t i = 0; i < vec_len; i++)
|
||||
res.emplace_back(static_cast<float>(dist(gen)));
|
||||
|
||||
return res;
|
||||
|
@ -16,7 +16,6 @@ std::string ConstantResultSubgraphTest::getTestCaseName(testing::TestParamInfo<c
|
||||
|
||||
void ConstantResultSubgraphTest::SetUp() {
|
||||
InferenceEngine::SizeVector inputShapes;
|
||||
InferenceEngine::Precision inputPrecision;
|
||||
std::tie(targetDevice) = this->GetParam();
|
||||
std::vector<float> data(300);
|
||||
for (size_t i = 0; i < 300; i++)
|
||||
|
@ -40,7 +40,7 @@ void MemoryEltwiseReshapeConcatTest::SetUp() {
|
||||
std::vector<float> res;
|
||||
|
||||
std::uniform_real_distribution<float> dist(min, max);
|
||||
for (int i = 0; i < vec_len; i++)
|
||||
for (std::size_t i = 0; i < vec_len; i++)
|
||||
res.emplace_back(static_cast<float>(dist(gen)));
|
||||
|
||||
return res;
|
||||
|
@ -16,7 +16,6 @@ std::string ParameterResultSubgraphTest::getTestCaseName(testing::TestParamInfo<
|
||||
|
||||
void ParameterResultSubgraphTest::SetUp() {
|
||||
InferenceEngine::SizeVector inputShapes;
|
||||
InferenceEngine::Precision inputPrecision;
|
||||
std::tie(targetDevice) = this->GetParam();
|
||||
|
||||
auto parameter = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::Type_t::f32, ngraph::Shape{1, 3, 10, 10});
|
||||
|
@ -10,7 +10,6 @@ namespace SubgraphTestsDefinitions {
|
||||
ShapeAxesTuple squeezeShape;
|
||||
InferenceEngine::Precision netPrecision;
|
||||
std::string targetName;
|
||||
bool is_squeeze;
|
||||
ngraph::helpers::SqueezeOpType opType;
|
||||
std::tie(squeezeShape, netPrecision, targetName, opType) = obj.param;
|
||||
std::ostringstream results;
|
||||
|
@ -7,11 +7,8 @@
|
||||
namespace SubgraphTestsDefinitions {
|
||||
|
||||
std::string TrivialConcatLayerTest::getTestCaseName(const testing::TestParamInfo<trivialConcatParamsTuple> &obj) {
|
||||
int axis;
|
||||
std::vector<size_t> inputShapes;
|
||||
InferenceEngine::Precision netPrecision;
|
||||
InferenceEngine::Precision inPrc, outPrc;
|
||||
InferenceEngine::Layout inLayout, outLayout;
|
||||
std::string targetName;
|
||||
std::map<std::string, std::string> config;
|
||||
std::tie(inputShapes, netPrecision, targetName, config) = obj.param;
|
||||
|
@ -75,7 +75,7 @@ void FakeQuantizeSubgraphTest::SetUp() {
|
||||
std::vector<float> res;
|
||||
|
||||
std::uniform_real_distribution<float> dist(min, max);
|
||||
for (int i = 0; i < vec_len; i++)
|
||||
for (std::size_t i = 0; i < vec_len; i++)
|
||||
res.emplace_back(static_cast<float>(dist(gen)));
|
||||
|
||||
return res;
|
||||
|
@ -16,7 +16,7 @@
|
||||
|
||||
namespace CommonTestUtils {
|
||||
|
||||
static void fill_data(float *data, size_t size, size_t duty_ratio = 10) {
|
||||
inline void fill_data(float *data, size_t size, size_t duty_ratio = 10) {
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
if ((i / duty_ratio) % 2 == 1) {
|
||||
data[i] = 0.0f;
|
||||
@ -26,7 +26,7 @@ static void fill_data(float *data, size_t size, size_t duty_ratio = 10) {
|
||||
}
|
||||
}
|
||||
|
||||
static void fill_data_sine(float *data, size_t size, float center, float ampl, float omega) {
|
||||
inline void fill_data_sine(float *data, size_t size, float center, float ampl, float omega) {
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
data[i] = center + ampl * sin(static_cast<float>(i) * omega);
|
||||
}
|
||||
@ -36,12 +36,12 @@ static void fill_data_sine(float *data, size_t size, float center, float ampl, f
|
||||
* @brief Create vector of floats with length of vec_len, with values ranging from min to max,
|
||||
* with initial seed equal to variable seed with default of 0
|
||||
*/
|
||||
static inline std::vector<float> generate_float_numbers(std::size_t vec_len, float min, float max, int seed = 0) {
|
||||
inline std::vector<float> generate_float_numbers(std::size_t vec_len, float min, float max, int seed = 0) {
|
||||
std::vector<float> res;
|
||||
std::mt19937 gen(static_cast<float>(seed));
|
||||
|
||||
std::uniform_real_distribution<float> dist(min, max);
|
||||
for (int i = 0; i < vec_len; i++)
|
||||
for (std::size_t i = 0; i < vec_len; i++)
|
||||
res.emplace_back(static_cast<float>(dist(gen)));
|
||||
|
||||
return res;
|
||||
@ -96,7 +96,7 @@ void fill_data_const(InferenceEngine::Blob::Ptr& blob, float val);
|
||||
*/
|
||||
size_t byte_size(const InferenceEngine::TensorDesc &tdesc);
|
||||
|
||||
static void fill_data_bbox(float *data, size_t size, int height, int width, float omega) {
|
||||
inline void fill_data_bbox(float *data, size_t size, int height, int width, float omega) {
|
||||
float center_h = (height - 1.0f) / 2;
|
||||
float center_w = (width - 1.0f) / 2;
|
||||
for (size_t i = 0; i < size; i = i + 5) {
|
||||
@ -123,7 +123,7 @@ static void fill_data_bbox(float *data, size_t size, int height, int width, floa
|
||||
}
|
||||
}
|
||||
|
||||
static void fill_data_roi(float *data, size_t size, const uint32_t range, const int height, const int width, const float omega,
|
||||
inline void fill_data_roi(float *data, size_t size, const uint32_t range, const int height, const int width, const float omega,
|
||||
const bool is_roi_max_mode, const int seed = 1) {
|
||||
std::default_random_engine random(seed);
|
||||
std::uniform_int_distribution<int32_t> distribution(0, range);
|
||||
@ -342,21 +342,21 @@ void inline fill_data_random<InferenceEngine::Precision::BF16>(InferenceEngine::
|
||||
|
||||
template<typename T>
|
||||
typename std::enable_if<std::is_signed<T>::value, T>::type
|
||||
static ie_abs(const T &val) {
|
||||
inline ie_abs(const T &val) {
|
||||
return std::abs(val);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
typename std::enable_if<std::is_unsigned<T>::value, T>::type
|
||||
static ie_abs(const T &val) {
|
||||
inline ie_abs(const T &val) {
|
||||
return val;
|
||||
}
|
||||
|
||||
static ngraph::bfloat16 ie_abs(const ngraph::bfloat16& val) {
|
||||
inline ngraph::bfloat16 ie_abs(const ngraph::bfloat16& val) {
|
||||
return ngraph::bfloat16::from_bits(val.to_bits() & 0x7FFF);
|
||||
}
|
||||
|
||||
static ngraph::float16 ie_abs(const ngraph::float16& val) {
|
||||
inline ngraph::float16 ie_abs(const ngraph::float16& val) {
|
||||
return ngraph::float16::from_bits(val.to_bits() ^ 0x8000);
|
||||
}
|
||||
|
||||
|
@ -0,0 +1,25 @@
|
||||
// Copyright (C) 2019 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include "unicode_utils.hpp"
|
||||
|
||||
#ifdef ENABLE_UNICODE_PATH_SUPPORT
|
||||
|
||||
namespace CommonTestUtils {
|
||||
|
||||
const std::vector<std::wstring> test_unicode_postfix_vector = {
|
||||
L"unicode_Яㅎあ",
|
||||
L"ひらがな日本語",
|
||||
L"大家有天分",
|
||||
L"עפצקרשתםןףץ",
|
||||
L"ث خ ذ ض ظ غ",
|
||||
L"그것이정당하다",
|
||||
L"АБВГДЕЁЖЗИЙ",
|
||||
L"СТУФХЦЧШЩЬЮЯ"
|
||||
};
|
||||
|
||||
} // namespace CommonTestUtils
|
||||
|
||||
#endif // ENABLE_UNICODE_PATH_SUPPORT
|
@ -16,19 +16,19 @@
|
||||
#ifdef ENABLE_UNICODE_PATH_SUPPORT
|
||||
namespace CommonTestUtils {
|
||||
|
||||
static void fixSlashes(std::string &str) {
|
||||
inline void fixSlashes(std::string &str) {
|
||||
std::replace(str.begin(), str.end(), '/', '\\');
|
||||
}
|
||||
|
||||
static void fixSlashes(std::wstring &str) {
|
||||
inline void fixSlashes(std::wstring &str) {
|
||||
std::replace(str.begin(), str.end(), L'/', L'\\');
|
||||
}
|
||||
|
||||
static std::wstring stringToWString(std::string input) {
|
||||
inline std::wstring stringToWString(std::string input) {
|
||||
return ::FileUtils::multiByteCharToWString(input.c_str());
|
||||
}
|
||||
|
||||
static bool copyFile(std::wstring source_path, std::wstring dest_path) {
|
||||
inline bool copyFile(std::wstring source_path, std::wstring dest_path) {
|
||||
#ifndef _WIN32
|
||||
std::ifstream source(FileUtils::wStringtoMBCSstringChar(source_path), std::ios::binary);
|
||||
std::ofstream dest(FileUtils::wStringtoMBCSstringChar(dest_path), std::ios::binary);
|
||||
@ -49,11 +49,11 @@ static bool copyFile(std::wstring source_path, std::wstring dest_path) {
|
||||
return result;
|
||||
}
|
||||
|
||||
static bool copyFile(std::string source_path, std::wstring dest_path) {
|
||||
inline bool copyFile(std::string source_path, std::wstring dest_path) {
|
||||
return copyFile(stringToWString(source_path), dest_path);
|
||||
}
|
||||
|
||||
static std::wstring addUnicodePostfixToPath(std::string source_path, std::wstring postfix) {
|
||||
inline std::wstring addUnicodePostfixToPath(std::string source_path, std::wstring postfix) {
|
||||
fixSlashes(source_path);
|
||||
std::wstring result = stringToWString(source_path);
|
||||
std::wstring file_name = result.substr(0, result.size() - 4);
|
||||
@ -62,7 +62,7 @@ static std::wstring addUnicodePostfixToPath(std::string source_path, std::wstrin
|
||||
return result;
|
||||
}
|
||||
|
||||
static void removeFile(std::wstring path) {
|
||||
inline void removeFile(std::wstring path) {
|
||||
int result = 0;
|
||||
if (!path.empty()) {
|
||||
#ifdef _WIN32
|
||||
@ -71,6 +71,7 @@ static void removeFile(std::wstring path) {
|
||||
result = remove(FileUtils::wStringtoMBCSstringChar(path).c_str());
|
||||
#endif
|
||||
}
|
||||
(void)result;
|
||||
}
|
||||
|
||||
inline bool endsWith(const std::wstring& source, const std::wstring& expectedSuffix) {
|
||||
@ -127,7 +128,7 @@ inline int removeFilesWithExt(std::wstring path, std::wstring ext) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int removeDir(std::wstring path) {
|
||||
inline int removeDir(std::wstring path) {
|
||||
int result = 0;
|
||||
if (!path.empty()) {
|
||||
#ifdef _WIN32
|
||||
@ -155,16 +156,7 @@ inline bool directoryExists(const std::wstring &path) {
|
||||
return false;
|
||||
}
|
||||
|
||||
static const std::vector<std::wstring> test_unicode_postfix_vector = {
|
||||
L"unicode_Яㅎあ",
|
||||
L"ひらがな日本語",
|
||||
L"大家有天分",
|
||||
L"עפצקרשתםןףץ",
|
||||
L"ث خ ذ ض ظ غ",
|
||||
L"그것이정당하다",
|
||||
L"АБВГДЕЁЖЗИЙ",
|
||||
L"СТУФХЦЧШЩЬЮЯ"
|
||||
};
|
||||
extern const std::vector<std::wstring> test_unicode_postfix_vector;
|
||||
|
||||
} // namespace CommonTestUtils
|
||||
#endif // ENABLE_UNICODE_PATH_SUPPORT
|
||||
|
@ -36,6 +36,7 @@
|
||||
#include <windef.h>
|
||||
#include <fileapi.h>
|
||||
#include <Winbase.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
|
||||
// Copied from linux libc sys/stat.h:
|
||||
|
@ -21,8 +21,7 @@
|
||||
|
||||
namespace FuncTestUtils {
|
||||
namespace Bf16TestUtils {
|
||||
static float reducePrecisionBitwise(const float in);
|
||||
static short reducePrecisionBitwiseS(const float in);
|
||||
inline short reducePrecisionBitwiseS(const float in);
|
||||
} // namespace Bf16TestUtils
|
||||
|
||||
enum CompareType{
|
||||
@ -46,7 +45,7 @@ enum CompareType{
|
||||
* @param printData A flag if data printing is demanded
|
||||
*/
|
||||
template<typename dType>
|
||||
static void inline compareRawBuffers(const dType *res, const dType *ref,
|
||||
inline void compareRawBuffers(const dType *res, const dType *ref,
|
||||
size_t resSize, size_t refSize,
|
||||
CompareType compareType, float thr1 = 0.01, float thr2 = 0.01,
|
||||
bool printData = false) {
|
||||
@ -103,7 +102,7 @@ static void inline compareRawBuffers(const dType *res, const dType *ref,
|
||||
* @param printData Flag if data printing is demanded
|
||||
*/
|
||||
template<typename dType>
|
||||
static void inline compareRawBuffers(const dType *res, const dType *ref,
|
||||
inline void compareRawBuffers(const dType *res, const dType *ref,
|
||||
size_t resSize, size_t refSize,
|
||||
float thr = 0.01,
|
||||
bool printData = false) {
|
||||
@ -125,7 +124,7 @@ static void inline compareRawBuffers(const dType *res, const dType *ref,
|
||||
* @param printData A flag if data printing is demanded
|
||||
*/
|
||||
template<typename dType>
|
||||
static void inline compareRawBuffers(const std::vector<dType *> res, const std::vector<dType *> ref,
|
||||
inline void compareRawBuffers(const std::vector<dType *> res, const std::vector<dType *> ref,
|
||||
const std::vector<size_t> &resSizes, const std::vector<size_t> &refSizes,
|
||||
CompareType compareType,
|
||||
float thr1 = 0.01, float thr2 = 0.01, bool printData = false) {
|
||||
@ -150,7 +149,7 @@ static void inline compareRawBuffers(const std::vector<dType *> res, const std::
|
||||
* @param printData A flag if data printing is demanded
|
||||
*/
|
||||
template<typename dType>
|
||||
static void inline compareRawBuffers(const std::vector<dType *> res, const std::vector<dType *> ref,
|
||||
inline void compareRawBuffers(const std::vector<dType *> res, const std::vector<dType *> ref,
|
||||
const std::vector<size_t> &resSizes, const std::vector<size_t> &refSizes,
|
||||
float thr = 0.01, bool printData = false) {
|
||||
compareRawBuffers(res, ref, resSizes, refSizes, CompareType::ABS_AND_REL, thr, thr, printData);
|
||||
@ -171,7 +170,7 @@ static void inline compareRawBuffers(const std::vector<dType *> res, const std::
|
||||
* @param printData A flag if data printing is demanded
|
||||
*/
|
||||
template<typename dType>
|
||||
static void inline compareRawBuffers(const std::vector<dType *> res, const std::vector<std::shared_ptr<dType *>> ref,
|
||||
inline void compareRawBuffers(const std::vector<dType *> res, const std::vector<std::shared_ptr<dType *>> ref,
|
||||
const std::vector<size_t> &resSizes, const std::vector<size_t> &refSizes,
|
||||
CompareType compareType,
|
||||
float thr1 = 0.01, float thr2 = 0.01, bool printData = false) {
|
||||
@ -196,14 +195,14 @@ static void inline compareRawBuffers(const std::vector<dType *> res, const std::
|
||||
* @param printData A flag if data printing is demanded
|
||||
*/
|
||||
template<typename dType>
|
||||
static void inline compareRawBuffers(const std::vector<dType *> res, const std::vector<std::shared_ptr<dType *>> ref,
|
||||
inline void compareRawBuffers(const std::vector<dType *> res, const std::vector<std::shared_ptr<dType *>> ref,
|
||||
const std::vector<size_t> &resSizes, const std::vector<size_t> &refSizes,
|
||||
float thr = 0.01, bool printData = false) {
|
||||
compareRawBuffers(res, ref, resSizes, refSizes, CompareType::ABS_AND_REL, thr, thr, printData);
|
||||
}
|
||||
|
||||
template<InferenceEngine::Precision::ePrecision PRC>
|
||||
void inline
|
||||
inline void
|
||||
compareBlobData(const InferenceEngine::Blob::Ptr &res, const InferenceEngine::Blob::Ptr &ref, float max_diff = 0.01,
|
||||
const std::string &assertDetails = "", bool printData = false) {
|
||||
using dataType = typename InferenceEngine::PrecisionTrait<PRC>::value_type;
|
||||
@ -243,13 +242,12 @@ compareBlobData(const InferenceEngine::Blob::Ptr &res, const InferenceEngine::Bl
|
||||
|
||||
|
||||
template<InferenceEngine::Precision::ePrecision PRC>
|
||||
void inline
|
||||
inline void
|
||||
compareBlobData(const std::vector<InferenceEngine::Blob::Ptr> &res, const std::vector<InferenceEngine::Blob::Ptr> &ref,
|
||||
float max_diff = 0.01,
|
||||
const std::string &assertDetails = "", bool printData = false) {
|
||||
IE_ASSERT(res.size() == ref.size()) << "Length of comparing and references blobs vector are not equal!"
|
||||
<< assertDetails;
|
||||
using dataType = typename InferenceEngine::PrecisionTrait<PRC>::value_type;
|
||||
for (size_t i = 0; i < res.size(); i++) {
|
||||
if (printData)
|
||||
std::cout << "BEGIN CHECK BLOB [" << i << "]" << std::endl;
|
||||
@ -259,7 +257,7 @@ compareBlobData(const std::vector<InferenceEngine::Blob::Ptr> &res, const std::v
|
||||
}
|
||||
}
|
||||
|
||||
void inline
|
||||
inline void
|
||||
compareBlobs(const InferenceEngine::Blob::Ptr &res, const InferenceEngine::Blob::Ptr &ref, float max_diff = 0.01,
|
||||
const std::string &assertDetails = "", bool printData = false) {
|
||||
ASSERT_EQ(res->byteSize(), ref->byteSize()) << "Blobs have different byteSize(): "
|
||||
@ -284,7 +282,7 @@ compareBlobs(const InferenceEngine::Blob::Ptr &res, const InferenceEngine::Blob:
|
||||
}
|
||||
}
|
||||
|
||||
void inline GetComparisonThreshold(InferenceEngine::Precision prc, float &absoluteThreshold, float &relativeThreshold) {
|
||||
inline void GetComparisonThreshold(InferenceEngine::Precision prc, float &absoluteThreshold, float &relativeThreshold) {
|
||||
switch (prc) {
|
||||
case InferenceEngine::Precision::FP32:
|
||||
absoluteThreshold = relativeThreshold = 1e-4;
|
||||
@ -302,7 +300,7 @@ void inline GetComparisonThreshold(InferenceEngine::Precision prc, float &absolu
|
||||
}
|
||||
}
|
||||
|
||||
float inline GetComparisonThreshold(InferenceEngine::Precision prc) {
|
||||
inline float GetComparisonThreshold(InferenceEngine::Precision prc) {
|
||||
float res;
|
||||
GetComparisonThreshold(prc, res, res);
|
||||
return res;
|
||||
@ -310,7 +308,7 @@ float inline GetComparisonThreshold(InferenceEngine::Precision prc) {
|
||||
|
||||
// Copy from net_pass.h
|
||||
template<InferenceEngine::Precision::ePrecision PREC_FROM, InferenceEngine::Precision::ePrecision PREC_TO>
|
||||
void inline convertArrayPrecision(typename InferenceEngine::PrecisionTrait<PREC_TO>::value_type *dst,
|
||||
inline void convertArrayPrecision(typename InferenceEngine::PrecisionTrait<PREC_TO>::value_type *dst,
|
||||
const typename InferenceEngine::PrecisionTrait<PREC_FROM>::value_type *src,
|
||||
size_t nelem) {
|
||||
using dst_type = typename InferenceEngine::PrecisionTrait<PREC_TO>::value_type;
|
||||
@ -321,15 +319,14 @@ void inline convertArrayPrecision(typename InferenceEngine::PrecisionTrait<PREC_
|
||||
}
|
||||
|
||||
template<>
|
||||
void inline
|
||||
inline void
|
||||
convertArrayPrecision<InferenceEngine::Precision::FP16, InferenceEngine::Precision::FP32>(float *dst, const short *src,
|
||||
size_t nelem) {
|
||||
uint16_t a = *reinterpret_cast<const uint16_t *>(src);
|
||||
InferenceEngine::PrecisionUtils::f16tof32Arrays(dst, src, nelem, 1.0f, 0.0f);
|
||||
}
|
||||
|
||||
template<>
|
||||
void inline
|
||||
inline void
|
||||
convertArrayPrecision<InferenceEngine::Precision::BF16, InferenceEngine::Precision::FP32>(float *dst, const short *src,
|
||||
size_t nelem) {
|
||||
auto srcBf16 = reinterpret_cast<const ngraph::bfloat16*>(src);
|
||||
@ -339,7 +336,7 @@ convertArrayPrecision<InferenceEngine::Precision::BF16, InferenceEngine::Precisi
|
||||
}
|
||||
|
||||
template<InferenceEngine::Precision::ePrecision PREC_FROM, InferenceEngine::Precision::ePrecision PREC_TO>
|
||||
InferenceEngine::Blob::Ptr inline convertBlobPrecision(const InferenceEngine::Blob::Ptr &blob) {
|
||||
inline InferenceEngine::Blob::Ptr convertBlobPrecision(const InferenceEngine::Blob::Ptr &blob) {
|
||||
using from_d_type = typename InferenceEngine::PrecisionTrait<PREC_FROM>::value_type;
|
||||
using to_d_type = typename InferenceEngine::PrecisionTrait<PREC_TO>::value_type;
|
||||
|
||||
@ -356,7 +353,7 @@ InferenceEngine::Blob::Ptr inline convertBlobPrecision(const InferenceEngine::Bl
|
||||
|
||||
|
||||
template<InferenceEngine::Precision::ePrecision targetPRC>
|
||||
InferenceEngine::Blob::Ptr inline copyBlobWithCast(const InferenceEngine::Blob::Ptr &blob) {
|
||||
inline InferenceEngine::Blob::Ptr copyBlobWithCast(const InferenceEngine::Blob::Ptr &blob) {
|
||||
InferenceEngine::Blob::Ptr newBlob;
|
||||
switch (blob->getTensorDesc().getPrecision()) {
|
||||
case InferenceEngine::Precision::FP32:
|
||||
@ -387,7 +384,7 @@ InferenceEngine::Blob::Ptr inline copyBlobWithCast(const InferenceEngine::Blob::
|
||||
return newBlob;
|
||||
}
|
||||
|
||||
InferenceEngine::Blob::Ptr inline createAndFillBlobFloatNormalDistribution(const InferenceEngine::TensorDesc &td,
|
||||
inline InferenceEngine::Blob::Ptr createAndFillBlobFloatNormalDistribution(const InferenceEngine::TensorDesc &td,
|
||||
const float mean,
|
||||
const float stddev,
|
||||
const int32_t seed = 1) {
|
||||
@ -412,7 +409,7 @@ InferenceEngine::Blob::Ptr inline createAndFillBlobFloatNormalDistribution(const
|
||||
return blob;
|
||||
}
|
||||
|
||||
InferenceEngine::Blob::Ptr inline createAndFillBlobFloat(const InferenceEngine::TensorDesc &td,
|
||||
inline InferenceEngine::Blob::Ptr createAndFillBlobFloat(const InferenceEngine::TensorDesc &td,
|
||||
const uint32_t range = 10,
|
||||
const int32_t start_from = 0,
|
||||
const int32_t resolution = 1,
|
||||
@ -439,7 +436,7 @@ InferenceEngine::Blob::Ptr inline createAndFillBlobFloat(const InferenceEngine::
|
||||
return blob;
|
||||
}
|
||||
|
||||
InferenceEngine::Blob::Ptr inline createAndFillBlobWithFloatArray(const InferenceEngine::TensorDesc &td,
|
||||
inline InferenceEngine::Blob::Ptr createAndFillBlobWithFloatArray(const InferenceEngine::TensorDesc &td,
|
||||
const float values[],
|
||||
const int size) {
|
||||
InferenceEngine::Blob::Ptr blob = make_blob_with_precision(td);
|
||||
@ -463,7 +460,7 @@ InferenceEngine::Blob::Ptr inline createAndFillBlobWithFloatArray(const Inferenc
|
||||
return blob;
|
||||
}
|
||||
|
||||
InferenceEngine::Blob::Ptr inline createAndFillBlob(const InferenceEngine::TensorDesc &td,
|
||||
inline InferenceEngine::Blob::Ptr createAndFillBlob(const InferenceEngine::TensorDesc &td,
|
||||
const uint32_t range = 10,
|
||||
const int32_t start_from = 0,
|
||||
const int32_t resolution = 1,
|
||||
@ -491,7 +488,7 @@ InferenceEngine::Blob::Ptr inline createAndFillBlob(const InferenceEngine::Tenso
|
||||
return blob;
|
||||
}
|
||||
|
||||
InferenceEngine::Blob::Ptr inline createAndFillBlobConsistently(
|
||||
inline InferenceEngine::Blob::Ptr createAndFillBlobConsistently(
|
||||
const InferenceEngine::TensorDesc &td,
|
||||
const uint32_t range,
|
||||
const int32_t start_from,
|
||||
@ -517,7 +514,7 @@ InferenceEngine::Blob::Ptr inline createAndFillBlobConsistently(
|
||||
return blob;
|
||||
}
|
||||
|
||||
InferenceEngine::Blob::Ptr inline createAndFillBlobUniqueSequence(
|
||||
inline InferenceEngine::Blob::Ptr createAndFillBlobUniqueSequence(
|
||||
const InferenceEngine::TensorDesc &td,
|
||||
const int32_t start_from = 0,
|
||||
const int32_t resolution = 1,
|
||||
@ -543,7 +540,7 @@ InferenceEngine::Blob::Ptr inline createAndFillBlobUniqueSequence(
|
||||
return blob;
|
||||
}
|
||||
|
||||
InferenceEngine::Blob::Ptr inline convertBlobLayout(const InferenceEngine::Blob::Ptr& in,
|
||||
inline InferenceEngine::Blob::Ptr convertBlobLayout(const InferenceEngine::Blob::Ptr& in,
|
||||
InferenceEngine::Layout layout) {
|
||||
IE_ASSERT(in != nullptr) << "Got NULL pointer";
|
||||
|
||||
@ -564,7 +561,7 @@ InferenceEngine::Blob::Ptr inline convertBlobLayout(const InferenceEngine::Blob:
|
||||
}
|
||||
|
||||
template<typename dType>
|
||||
static void fillInputsBySinValues(dType* data, size_t size) {
|
||||
inline void fillInputsBySinValues(dType* data, size_t size) {
|
||||
if (std::is_same<dType, float>::value) {
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
data[i] = sin(static_cast<float>(i));
|
||||
@ -577,7 +574,7 @@ static void fillInputsBySinValues(dType* data, size_t size) {
|
||||
}
|
||||
|
||||
template<typename dType>
|
||||
static void fillInputsByCosValues(dType* data, size_t size) {
|
||||
inline void fillInputsByCosValues(dType* data, size_t size) {
|
||||
if (std::is_same<dType, float>::value) {
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
data[i] = sin(static_cast<float>(i));
|
||||
@ -589,7 +586,7 @@ static void fillInputsByCosValues(dType* data, size_t size) {
|
||||
}
|
||||
}
|
||||
|
||||
static int fillInputsBySinValues(InferenceEngine::Blob::Ptr blob) {
|
||||
inline int fillInputsBySinValues(InferenceEngine::Blob::Ptr blob) {
|
||||
InferenceEngine::MemoryBlob::Ptr mblob = InferenceEngine::as<InferenceEngine::MemoryBlob>(blob);
|
||||
if (!mblob) {
|
||||
return -1;
|
||||
@ -602,7 +599,7 @@ static int fillInputsBySinValues(InferenceEngine::Blob::Ptr blob) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int fillInputsByCosValues(InferenceEngine::Blob::Ptr blob) {
|
||||
inline int fillInputsByCosValues(InferenceEngine::Blob::Ptr blob) {
|
||||
InferenceEngine::MemoryBlob::Ptr mblob = InferenceEngine::as<InferenceEngine::MemoryBlob>(blob);
|
||||
if (!mblob) {
|
||||
return -1;
|
||||
@ -617,7 +614,13 @@ static int fillInputsByCosValues(InferenceEngine::Blob::Ptr blob) {
|
||||
|
||||
|
||||
namespace Bf16TestUtils {
|
||||
static float reducePrecisionBitwise(const float in) {
|
||||
|
||||
#if defined __GNUC__
|
||||
# pragma GCC diagnostic push
|
||||
# pragma GCC diagnostic ignored "-Wstrict-aliasing"
|
||||
#endif
|
||||
|
||||
inline float reducePrecisionBitwise(const float in) {
|
||||
float f = in;
|
||||
int* i = reinterpret_cast<int*>(&f);
|
||||
int t2 = *i & 0xFFFF0000;
|
||||
@ -629,13 +632,18 @@ static float reducePrecisionBitwise(const float in) {
|
||||
return ft1;
|
||||
}
|
||||
|
||||
static short reducePrecisionBitwiseS(const float in) {
|
||||
inline short reducePrecisionBitwiseS(const float in) {
|
||||
float f = reducePrecisionBitwise(in);
|
||||
int intf = *reinterpret_cast<int*>(&f);
|
||||
intf = intf >> 16;
|
||||
short s = intf;
|
||||
return s;
|
||||
}
|
||||
|
||||
#if defined __GNUC__
|
||||
# pragma GCC diagnostic pop
|
||||
#endif
|
||||
|
||||
} // namespace Bf16TestUtils
|
||||
|
||||
enum class BlobKind {
|
||||
|
@ -16,7 +16,6 @@ std::shared_ptr<ngraph::Function> FakeQuantizeAndConvolutionFunction::getOrigina
|
||||
const ngraph::Shape& inputShape,
|
||||
const FakeQuantizeOnData& fqOnData,
|
||||
const FakeQuantizeOnWeights& fqOnWeights) {
|
||||
const float k = 50.f;
|
||||
|
||||
const auto input = std::make_shared<ngraph::opset1::Parameter>(precision, ngraph::Shape(inputShape));
|
||||
const auto fakeQuantizeOnActivations = fqOnData.empty() ?
|
||||
|
@ -9,7 +9,7 @@
|
||||
namespace ngraph {
|
||||
namespace builder {
|
||||
namespace subgraph {
|
||||
static std::shared_ptr<ngraph::Function> makeConvPoolRelu(std::vector<size_t> inputShape = {1, 1, 32, 32},
|
||||
inline std::shared_ptr<ngraph::Function> makeConvPoolRelu(std::vector<size_t> inputShape = {1, 1, 32, 32},
|
||||
ngraph::element::Type_t ngPrc = ngraph::element::Type_t::f32) {
|
||||
auto params = ngraph::builder::makeParams(ngPrc, {inputShape});
|
||||
params.front()->set_friendly_name("Param_1");
|
||||
@ -39,7 +39,7 @@ static std::shared_ptr<ngraph::Function> makeConvPoolRelu(std::vector<size_t> in
|
||||
return fnPtr;
|
||||
}
|
||||
|
||||
static std::shared_ptr<ngraph::Function> makeSplitConvConcat(std::vector<size_t> inputShape = {1, 4, 20, 20},
|
||||
inline std::shared_ptr<ngraph::Function> makeSplitConvConcat(std::vector<size_t> inputShape = {1, 4, 20, 20},
|
||||
ngraph::element::Type_t ngPrc = ngraph::element::Type_t::f32) {
|
||||
auto params = ngraph::builder::makeParams(ngPrc, {inputShape});
|
||||
auto split = ngraph::builder::makeSplit(params[0], ngPrc, 2, 1);
|
||||
@ -59,7 +59,7 @@ static std::shared_ptr<ngraph::Function> makeSplitConvConcat(std::vector<size_t>
|
||||
return fnPtr;
|
||||
}
|
||||
|
||||
static std::shared_ptr<ngraph::Function> makeKSOFunction(std::vector<size_t> inputShape = {1, 4, 20, 20},
|
||||
inline std::shared_ptr<ngraph::Function> makeKSOFunction(std::vector<size_t> inputShape = {1, 4, 20, 20},
|
||||
ngraph::element::Type_t ngPrc = ngraph::element::Type_t::f32) {
|
||||
auto params = ngraph::builder::makeParams(ngPrc, {inputShape});
|
||||
|
||||
@ -78,7 +78,7 @@ static std::shared_ptr<ngraph::Function> makeKSOFunction(std::vector<size_t> inp
|
||||
return fnPtr;
|
||||
}
|
||||
|
||||
static std::shared_ptr<ngraph::Function> makeSplitMultiConvConcat(std::vector<size_t> inputShape = {1, 4, 20, 20}) {
|
||||
inline std::shared_ptr<ngraph::Function> makeSplitMultiConvConcat(std::vector<size_t> inputShape = {1, 4, 20, 20}) {
|
||||
auto ngPrc = ngraph::element::Type_t::f32;
|
||||
auto params = ngraph::builder::makeParams(ngPrc, {inputShape});
|
||||
auto split = ngraph::builder::makeSplit(params[0], ngPrc, 2, 1);
|
||||
@ -122,7 +122,7 @@ static std::shared_ptr<ngraph::Function> makeSplitMultiConvConcat(std::vector<si
|
||||
return fnPtr;
|
||||
}
|
||||
|
||||
static std::shared_ptr<ngraph::Function> makeTIwithLSTMcell(ngraph::element::Type_t ngPRC = ngraph::element::Type_t::f32) {
|
||||
inline std::shared_ptr<ngraph::Function> makeTIwithLSTMcell(ngraph::element::Type_t ngPRC = ngraph::element::Type_t::f32) {
|
||||
// That which we iterate over
|
||||
const size_t N = 32; // Batch size
|
||||
const size_t L = 10; // Sequence length
|
||||
@ -180,7 +180,7 @@ static std::shared_ptr<ngraph::Function> makeTIwithLSTMcell(ngraph::element::Typ
|
||||
return fn_ptr;
|
||||
}
|
||||
|
||||
static std::shared_ptr<ngraph::Function> makeSingleConv(std::vector<size_t> inputShape = {1, 3, 24, 24},
|
||||
inline std::shared_ptr<ngraph::Function> makeSingleConv(std::vector<size_t> inputShape = {1, 3, 24, 24},
|
||||
ngraph::element::Type_t type = ngraph::element::Type_t::f32) {
|
||||
auto param0 = std::make_shared<ngraph::opset1::Parameter>(type, ngraph::Shape(inputShape));
|
||||
|
||||
@ -192,7 +192,7 @@ static std::shared_ptr<ngraph::Function> makeSingleConv(std::vector<size_t> inpu
|
||||
return fn_ptr;
|
||||
}
|
||||
|
||||
static std::shared_ptr<ngraph::Function> makeMultiSingleConv(std::vector<size_t> inputShape = {1, 3, 24, 24}) {
|
||||
inline std::shared_ptr<ngraph::Function> makeMultiSingleConv(std::vector<size_t> inputShape = {1, 3, 24, 24}) {
|
||||
ngraph::element::Type type = ngraph::element::Type_t::f32;
|
||||
auto param0 = std::make_shared<ngraph::opset1::Parameter>(type, ngraph::Shape(inputShape));
|
||||
auto conv1 = ngraph::builder::makeConvolution(param0, type, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1},
|
||||
@ -221,7 +221,7 @@ static std::shared_ptr<ngraph::Function> makeMultiSingleConv(std::vector<size_t>
|
||||
return fn_ptr;
|
||||
}
|
||||
|
||||
static std::shared_ptr<ngraph::Function> make2InputSubtract(std::vector<size_t> inputShape = {1, 3, 24, 24},
|
||||
inline std::shared_ptr<ngraph::Function> make2InputSubtract(std::vector<size_t> inputShape = {1, 3, 24, 24},
|
||||
ngraph::element::Type_t type = ngraph::element::Type_t::f32) {
|
||||
auto param0 = std::make_shared<ngraph::opset1::Parameter>(type, ngraph::Shape(inputShape));
|
||||
auto param1 = std::make_shared<ngraph::opset1::Parameter>(type, ngraph::Shape(inputShape));
|
||||
@ -232,7 +232,7 @@ static std::shared_ptr<ngraph::Function> make2InputSubtract(std::vector<size_t>
|
||||
return fn_ptr;
|
||||
}
|
||||
|
||||
static std::shared_ptr<ngraph::Function> makeNestedSplitConvConcat(std::vector<size_t> inputShape = {1, 4, 20, 20},
|
||||
inline std::shared_ptr<ngraph::Function> makeNestedSplitConvConcat(std::vector<size_t> inputShape = {1, 4, 20, 20},
|
||||
ngraph::element::Type ngPrc = ngraph::element::Type_t::f32) {
|
||||
auto params = ngraph::builder::makeParams(ngPrc, {inputShape});
|
||||
auto split = ngraph::builder::makeSplit(params[0], ngPrc, 2, 1);
|
||||
@ -264,7 +264,7 @@ static std::shared_ptr<ngraph::Function> makeNestedSplitConvConcat(std::vector<s
|
||||
return fnPtr;
|
||||
}
|
||||
|
||||
static std::shared_ptr<ngraph::Function> makeSplitConvConcatInputInBranch(std::vector<size_t> inputShape = {1, 4, 20, 20},
|
||||
inline std::shared_ptr<ngraph::Function> makeSplitConvConcatInputInBranch(std::vector<size_t> inputShape = {1, 4, 20, 20},
|
||||
ngraph::element::Type ngPrc = ngraph::element::Type_t::f32) {
|
||||
auto params = ngraph::builder::makeParams(ngPrc, {inputShape, inputShape});
|
||||
auto split = ngraph::builder::makeSplit(params[0], ngPrc, 2, 1);
|
||||
@ -294,7 +294,7 @@ static std::shared_ptr<ngraph::Function> makeSplitConvConcatInputInBranch(std::v
|
||||
return fnPtr;
|
||||
}
|
||||
|
||||
static std::shared_ptr<ngraph::Function> makeSplitConvConcatNestedInBranch(std::vector<size_t> inputShape = {1, 4, 20, 20},
|
||||
inline std::shared_ptr<ngraph::Function> makeSplitConvConcatNestedInBranch(std::vector<size_t> inputShape = {1, 4, 20, 20},
|
||||
ngraph::element::Type ngPrc = ngraph::element::Type_t::f32) {
|
||||
auto params = ngraph::builder::makeParams(ngPrc, {inputShape, inputShape});
|
||||
int localId = 0;
|
||||
@ -355,7 +355,7 @@ static std::shared_ptr<ngraph::Function> makeSplitConvConcatNestedInBranch(std::
|
||||
return fnPtr;
|
||||
}
|
||||
|
||||
static std::shared_ptr<ngraph::Function> makeSplitConvConcatNestedInBranchNestedOut(
|
||||
inline std::shared_ptr<ngraph::Function> makeSplitConvConcatNestedInBranchNestedOut(
|
||||
std::vector<size_t> inputShape = {1, 4, 20, 20},
|
||||
ngraph::element::Type ngPrc = ngraph::element::Type_t::f32) {
|
||||
auto params = ngraph::builder::makeParams(ngPrc, {inputShape, inputShape});
|
||||
@ -457,7 +457,7 @@ static std::shared_ptr<ngraph::Function> makeSplitConvConcatNestedInBranchNested
|
||||
return fnPtr;
|
||||
}
|
||||
|
||||
static std::shared_ptr<ngraph::Function> makeConvBias(std::vector<size_t> inputShape = {1, 3, 24, 24},
|
||||
inline std::shared_ptr<ngraph::Function> makeConvBias(std::vector<size_t> inputShape = {1, 3, 24, 24},
|
||||
ngraph::element::Type type = ngraph::element::Type_t::f32) {
|
||||
auto parameter = ngraph::builder::makeParams(type, {inputShape});
|
||||
parameter[0]->set_friendly_name("parameter");
|
||||
@ -475,7 +475,7 @@ static std::shared_ptr<ngraph::Function> makeConvBias(std::vector<size_t> inputS
|
||||
return fn_ptr;
|
||||
}
|
||||
|
||||
static std::shared_ptr<ngraph::Function> makeReadConcatSplitAssign(std::vector<size_t> inputShape = {1, 1, 2, 4},
|
||||
inline std::shared_ptr<ngraph::Function> makeReadConcatSplitAssign(std::vector<size_t> inputShape = {1, 1, 2, 4},
|
||||
ngraph::element::Type type = ngraph::element::Type_t::f32) {
|
||||
auto parameter = ngraph::builder::makeParams(type, {inputShape});
|
||||
parameter[0]->set_friendly_name("parameter");
|
||||
|
@ -28,7 +28,7 @@ generateVector(size_t vec_len, uint32_t upTo = 10, uint32_t startFrom = 1, int32
|
||||
// chose values between this range to avoid type overrun (e.g. in case of I8 precision)
|
||||
std::uniform_int_distribution<unsigned long> dist(startFrom, upTo);
|
||||
|
||||
for (int i = 0; i < vec_len; i++) {
|
||||
for (size_t i = 0; i < vec_len; i++) {
|
||||
res.push_back(
|
||||
static_cast<typename ngraph::helpers::nGraphTypesTrait<dType>::value_type>(dist(gen)));
|
||||
}
|
||||
@ -46,7 +46,7 @@ std::vector<ngraph::float16> inline generateF16Vector(size_t vec_len, uint32_t u
|
||||
// chose values between this range to avoid type overrun (e.g. in case of I8 precision)
|
||||
std::uniform_int_distribution<unsigned long> dist(startFrom, upTo);
|
||||
|
||||
for (int i = 0; i < vec_len; i++) {
|
||||
for (size_t i = 0; i < vec_len; i++) {
|
||||
res.emplace_back(ngraph::float16(static_cast<float>(dist(gen))));
|
||||
}
|
||||
return res;
|
||||
@ -62,7 +62,7 @@ std::vector<ngraph::bfloat16> inline generateBF16Vector(size_t vec_len, uint32_t
|
||||
// chose values between this range to avoid type overrun (e.g. in case of I8 precision)
|
||||
std::uniform_int_distribution<unsigned long> dist(startFrom, upTo);
|
||||
|
||||
for (int i = 0; i < vec_len; i++) {
|
||||
for (size_t i = 0; i < vec_len; i++) {
|
||||
res.emplace_back(ngraph::bfloat16(static_cast<float>(dist(gen))));
|
||||
}
|
||||
return res;
|
||||
|
@ -64,7 +64,6 @@ TEST(ONNX_Importer_Tests, ImportModelWithMultiOutput) {
|
||||
|
||||
int count_topk = 0;
|
||||
int count_constants = 0;
|
||||
int count_goe = 0;
|
||||
int count_parameters = 0;
|
||||
|
||||
for (auto op : function->get_ops()) {
|
||||
|
@ -347,6 +347,8 @@ TEST_F(VPU_AdjustDataBatchTest, DISABLED_BranchedWithBatchAndSplitItemsInTheEnd)
|
||||
const auto& branch1 = branches[0];
|
||||
const auto& branch2 = branches[1];
|
||||
const auto& data4 = CheckSingleConnection(branch1, 3);
|
||||
(void)data4;
|
||||
const auto& data5 = CheckSingleConnection(branch2, 4);
|
||||
const auto& data6 = checkSingleLoopEnd(data5);
|
||||
(void)data6;
|
||||
}
|
||||
|
@ -4,6 +4,10 @@
|
||||
|
||||
enable_testing()
|
||||
|
||||
if(NOT MSVC)
|
||||
ie_add_compiler_flags(-Wno-unused-variable)
|
||||
endif()
|
||||
|
||||
add_subdirectory(helpers)
|
||||
|
||||
if (ENABLE_GAPI_TESTS)
|
||||
|
@ -25,8 +25,6 @@ std::string getTestCaseName(testing::TestParamInfo<BehTestParams> obj) {
|
||||
return obj.param.device + "_" + obj.param.input_blob_precision.name()
|
||||
+ (obj.param.config.size() ? "_" + obj.param.config.begin()->second : "");
|
||||
}
|
||||
|
||||
const int BLOB_VERSION_MAJOR = 3;
|
||||
}
|
||||
|
||||
#if (defined(_WIN32) || defined(_WIN64) )
|
||||
@ -84,7 +82,7 @@ class AOTBehaviorTests : public BehaviorPluginTest {
|
||||
{
|
||||
ret = core.ImportNetwork("local_tmp.fw", GetParam().device);
|
||||
}
|
||||
catch (InferenceEngine::details::InferenceEngineException ex)
|
||||
catch (InferenceEngine::details::InferenceEngineException & ex)
|
||||
{
|
||||
return ex.getStatus();
|
||||
}
|
||||
|
@ -250,9 +250,6 @@ void Regression::Matchers::CustomMatcher::checkResult() {
|
||||
*/
|
||||
if (isSaveOutput) {
|
||||
if (!config.fetch_result) {
|
||||
|
||||
decltype(ctx.allOutputs().begin()) output;
|
||||
|
||||
// calculating all outputs size
|
||||
SizeVector dimsMerged;
|
||||
for(auto && output : ctx.allOutputs()) {
|
||||
@ -318,13 +315,12 @@ void Regression::Matchers::CustomMatcher::checkResult() {
|
||||
|
||||
if (cmpNear || cmpNearAvg) {
|
||||
int idx = 0;
|
||||
float avgDiff = 0.0;
|
||||
float sz = 0.0;
|
||||
float maxDiff = 0.0;
|
||||
float maxAverageDiff = 0.0;
|
||||
float rms = 0.0;
|
||||
float avgDiff = 0.0f;
|
||||
float maxDiff = 0.0f;
|
||||
float maxAverageDiff = 0.0f;
|
||||
float rms = 0.0f;
|
||||
int nFrame = -1;
|
||||
float avgFrames = 0.0;
|
||||
float avgFrames = 0.0f;
|
||||
|
||||
if (!config.fetch_result) {
|
||||
decltype(ctx.allOutputs().begin()) output;
|
||||
|
@ -174,7 +174,6 @@ void RawMatcher::match() {
|
||||
for (auto &&item : out) {
|
||||
Blob::Ptr output;
|
||||
auto outputName = item.first;
|
||||
auto& outBlob = item.second;
|
||||
if (!inferRequest) {
|
||||
output = allocateBlob(item.second->getTensorDesc());
|
||||
} else {
|
||||
|
@ -127,9 +127,6 @@ void SegmentationMatcher::match() {
|
||||
// Load image to blob
|
||||
ConvertImageToInput(reader->getData().get(), reader->size(), *input);
|
||||
|
||||
InferenceEngine::ResponseDesc dsc;
|
||||
InferenceEngine::StatusCode sts;
|
||||
|
||||
auto loadedExecutableNetwork = config.ie_core->LoadNetwork(network, config._device_name, config.plugin_config);
|
||||
InferenceEngine::ExecutableNetwork executableNetwork;
|
||||
if (config.useExportImport) {
|
||||
|
@ -66,7 +66,6 @@ static void ref_region_yolo(InferenceEngine::TBlob<float> &src, InferenceEngine:
|
||||
|
||||
int IW = (src.getTensorDesc().getDims().size() > 3) ? src.getTensorDesc().getDims()[3] : 1;
|
||||
int IH = (src.getTensorDesc().getDims().size() > 2) ? src.getTensorDesc().getDims()[2] : 1;
|
||||
int IC = (src.getTensorDesc().getDims().size() > 1) ? src.getTensorDesc().getDims()[1] : 1;
|
||||
int B = (src.getTensorDesc().getDims().size() > 0) ? src.getTensorDesc().getDims()[0] : 1;
|
||||
|
||||
for (int i = 0; i < src.size(); i++) {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user