[GNA] Migrated the deprecated backward compatibility tests to the new infra (#15839)

* Migrated the deprecated backward compatibility tests to the new infra

* clang format

* Fixed build of deprecated tests

* Fixed code style

* Reverted snake_case functions names

* Added dependency on data

* Moved exported test models to teh source path

* Fixed old tests
This commit is contained in:
Mikhail Ryzhov
2023-06-29 20:32:37 +02:00
committed by GitHub
parent 8bc880a740
commit 1a60d40bf2
31 changed files with 252 additions and 2169 deletions

View File

@@ -28,6 +28,5 @@ if(ENABLE_TESTS)
endif()
if(ENABLE_FUNCTIONAL_TESTS)
add_subdirectory(deprecated/functional)
add_subdirectory(functional)
endif()

View File

@@ -1,11 +0,0 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
ov_disable_deprecated_warnings()
add_subdirectory(shared_tests)
if (ENABLE_INTEL_GNA)
add_subdirectory(gna)
endif()

View File

@@ -1,51 +0,0 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
set(TARGET_NAME GnaFunctionalTests)
file(GLOB TEST_SRC
${CMAKE_CURRENT_SOURCE_DIR}/*.cpp
${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/input_tests/*.cpp
${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/single_layer_tests/*.cpp
${CMAKE_CURRENT_SOURCE_DIR}/backward_compatibility/*.cpp)
list(APPEND DEPENDENCIES openvino_intel_gna_plugin)
if(ENABLE_HETERO)
list(APPEND DEPENDENCIES openvino_hetero_plugin)
endif()
if(ENABLE_INTEL_CPU)
list(APPEND DEPENDENCIES openvino_intel_cpu_plugin)
endif()
add_executable(${TARGET_NAME} ${TEST_SRC} ${TEST_INCLUDE})
target_compile_definitions(${TARGET_NAME}
PRIVATE
USE_GNA=ON
PUBLIC ${ARGV}
DATA_PATH=\"${DATA_PATH}\"
MODELS_PATH=\"${MODELS_PATH}\")
target_link_libraries(${TARGET_NAME}
PRIVATE
IESharedTests
funcTestUtils
)
target_include_directories(${TARGET_NAME}
PRIVATE
${CMAKE_CURRENT_SOURCE_DIR}/include)
add_dependencies(${TARGET_NAME} ${DEPENDENCIES})
add_test(NAME ${TARGET_NAME} COMMAND ${TARGET_NAME})
set_property(TEST ${TARGET_NAME} PROPERTY LABELS GNA)
install(TARGETS ${TARGET_NAME}
RUNTIME DESTINATION tests
COMPONENT tests
EXCLUDE_FROM_ALL)

View File

@@ -1,131 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <functional_test_utils/precision_utils.hpp>
#include <ie_core.hpp>
#include <ngraph_functions/builders.hpp>
#include <test_model_repo.hpp>
#include <single_layer_common.hpp>
#include "gtest/gtest.h"
//TODO : need move to new test infrastructure @IrinaEfode
using namespace InferenceEngine;
typedef std::tuple<
InferenceEngine::Precision, // Network Precision
std::string, // Target Device
std::string, // Name Export Model
std::map<std::string, std::string>, // Export Configuration
std::map<std::string, std::string> // Import Configuration
> exportImportNetworkParams;
class BackwardCompatibilityTests : public testing::WithParamInterface<exportImportNetworkParams>,
public testing::Test{
public:
static std::string getTestCaseName(testing::TestParamInfo<exportImportNetworkParams> obj) {
InferenceEngine::Precision netPrecision;
std::string targetDevice;
std::map<std::string, std::string> exportConfiguration;
std::map<std::string, std::string> importConfiguration;
std::string nameExportModel;
std::tie(netPrecision, targetDevice, nameExportModel, exportConfiguration, importConfiguration) = obj.param;
std::ostringstream result;
result << "netPRC=" << netPrecision.name() << "_";
result << "targetDevice=" << targetDevice << "_";
result << "nameExportModel=" << nameExportModel << "_";
for (auto const& configItem : exportConfiguration) {
result << "_exportConfigItem=" << configItem.first << "_" << configItem.second;
}
for (auto const& configItem : importConfiguration) {
result << "_importConfigItem=" << configItem.first << "_" << configItem.second;
}
return result.str();
}
void Run() {
InferenceEngine::Precision netPrecision;
std::string targetDevice;
std::map<std::string, std::string> exportConfiguration;
std::map<std::string, std::string> importConfiguration;
std::string nameExportModel;
std::tie(netPrecision, targetDevice, nameExportModel, exportConfiguration, importConfiguration) = this->GetParam();
GenerateFunction();
Core ie;
CNNNetwork network = CNNNetwork(function);
ExecutableNetwork executableNetwork = ie.LoadNetwork(network, "GNA", exportConfiguration);
InferRequest inferRequest = executableNetwork.CreateInferRequest();
inferRequest.Infer();
auto refOutputs = std::vector<InferenceEngine::Blob::Ptr>{};
for (const auto& output : executableNetwork.GetOutputsInfo()) {
const auto& name = output.first;
refOutputs.push_back(inferRequest.GetBlob(name));
}
auto models = TestDataHelpers::get_data_path() + "/gna/" + nameExportModel;
auto ImportNetwork = ie.ImportNetwork(models, "GNA", importConfiguration);
InferRequest inferRequestImport = ImportNetwork.CreateInferRequest();
auto input_names = executableNetwork.GetInputsInfo();
for (const auto& input_name : input_names) {
auto i_blob = inferRequest.GetBlob(input_name.first);
for (const auto& infer_name : ImportNetwork.GetInputsInfo()) {
inferRequestImport.SetBlob(infer_name.first, i_blob);
}
}
inferRequestImport.Infer();
for (const auto& output : ImportNetwork.GetOutputsInfo()) {
const auto& name = output.first;
refOutputs.push_back(inferRequestImport.GetBlob(name));
}
CompareCommonExact(refOutputs[1], refOutputs[0]);
}
protected:
void SetUp() override {
}
private:
std::shared_ptr<ngraph::Function> function;
void GenerateFunction() {
auto param = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, ngraph::Shape{1, 336});
auto const_eltwise = std::make_shared<ngraph::opset1::Constant>(ngraph::element::f32, ngraph::Shape{1, 336},
std::vector<float>{-1});
auto relu = std::make_shared<ngraph::opset1::Multiply>(param, const_eltwise);
ngraph::ResultVector results{ std::make_shared<ngraph::opset1::Result>(relu) };
function = std::make_shared<ngraph::Function>(results, ngraph::ParameterVector{param}, "ExportBackwordCompatibility");
}
};
TEST_P(BackwardCompatibilityTests, smoke_BackwardCompatibility){
Run();
}
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> exportConfigs = {
{
{"GNA_DEVICE_MODE", "GNA_SW_EXACT"},
{"GNA_SCALE_FACTOR_0", "327.67"}
}
};
const std::vector<std::map<std::string, std::string>> importConfigs = {
{
{"GNA_DEVICE_MODE", "GNA_SW_EXACT"},
{"GNA_SCALE_FACTOR_0", "327.67"}
},
};
const std::vector<std::string> nameExportModel = {"export2dot1.blob", "export2dot2.blob", "export2dot3.blob", "export2dot4.blob"};
INSTANTIATE_TEST_SUITE_P(smoke_OldVersion, BackwardCompatibilityTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values("GNA"),
::testing::ValuesIn(nameExportModel),
::testing::ValuesIn(exportConfigs),
::testing::ValuesIn(importConfigs)),
BackwardCompatibilityTests::getTestCaseName);

View File

@@ -1,67 +0,0 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
set(TARGET_NAME IESharedTests)
list(APPEND SHARED_LIBRARIES
commonTestUtils
openvino::runtime
ngraphFunctions
ieTestHelpers
)
file(GLOB SHARED_TESTS_SRC
${CMAKE_CURRENT_SOURCE_DIR}/lstm/*.cpp
${CMAKE_CURRENT_SOURCE_DIR}/graph_tools/*.cpp
)
add_library(${TARGET_NAME} STATIC ${SHARED_TESTS_SRC})
add_dependencies(${TARGET_NAME} openvino_gapi_preproc mock_engine)
if(ENABLE_AUTO OR ENABLE_MULTI)
add_dependencies(${TARGET_NAME} openvino_auto_plugin)
endif()
if(ENABLE_AUTO_BATCH)
add_dependencies(${TARGET_NAME} openvino_auto_batch_plugin)
endif()
set_ie_threading_interface_for(${TARGET_NAME})
ie_faster_build(${TARGET_NAME}
UNITY
PCH PRIVATE "precomp.hpp"
)
# Find OpenCV components if exist
find_package(OpenCV QUIET COMPONENTS core imgproc)
if(OpenCV_FOUND)
target_compile_definitions(${TARGET_NAME} PUBLIC USE_OPENCV)
else()
message(WARNING "No suitable OpenCV version detected, pre-processing tests are skipped in ${TARGET_NAME}")
endif()
target_include_directories(${TARGET_NAME} PUBLIC
${CMAKE_CURRENT_SOURCE_DIR}/input_tests
${CMAKE_CURRENT_SOURCE_DIR}/lstm
${CMAKE_CURRENT_SOURCE_DIR}/common_single_layer_tests
${CMAKE_CURRENT_SOURCE_DIR}/single_layer_tests
$<TARGET_PROPERTY:openvino::runtime::dev,INTERFACE_INCLUDE_DIRECTORIES>
)
if(OpenCV_FOUND)
target_include_directories(${TARGET_NAME} PUBLIC ${OpenCV_INCLUDE_DIRS})
list(APPEND SHARED_LIBRARIES ${OpenCV_LIBS})
endif()
target_link_libraries(${TARGET_NAME} PUBLIC ${SHARED_LIBRARIES})
if(ENABLE_HETERO)
add_dependencies(${TARGET_NAME} openvino_hetero_plugin)
endif()
# developer package
openvino_developer_export_targets(COMPONENT tests TARGETS ${TARGET_NAME})

View File

@@ -1,519 +0,0 @@
/*
Copyright 2017 Leon Merten Lohse
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#ifndef NPY_H
#define NPY_H
#include <complex>
#include <fstream>
#include <string>
#include <iostream>
#include <sstream>
#include <cstdint>
#include <cstring>
#include <vector>
#include <stdexcept>
#include <algorithm>
#include <unordered_map>
namespace npy {
/* Compile-time test for byte order.
If your compiler does not define these per default, you may want to define
one of these constants manually.
Defaults to little endian order. */
#if defined(__BYTE_ORDER) && __BYTE_ORDER == __BIG_ENDIAN || \
defined(__BIG_ENDIAN__) || \
defined(__ARMEB__) || \
defined(__THUMBEB__) || \
defined(__AARCH64EB__) || \
defined(_MIBSEB) || defined(__MIBSEB) || defined(__MIBSEB__)
const bool big_endian = true;
#else
const bool big_endian = false;
#endif
const char magic_string[] = "\x93NUMPY";
const size_t magic_string_length = 6;
const char little_endian_char = '<';
const char big_endian_char = '>';
const char no_endian_char = '|';
constexpr char host_endian_char = ( big_endian ?
big_endian_char :
little_endian_char );
/* npy array length */
typedef unsigned long int ndarray_len_t;
inline void write_magic(std::ostream& ostream, unsigned char v_major=1, unsigned char v_minor=0) {
ostream.write(magic_string, magic_string_length);
ostream.put(v_major);
ostream.put(v_minor);
}
inline void read_magic(std::istream& istream, unsigned char& v_major, unsigned char& v_minor) {
char buf[magic_string_length+2];
istream.read(buf, magic_string_length+2);
if(!istream) {
throw std::runtime_error("io error: failed reading file");
}
if (0 != std::memcmp(buf, magic_string, magic_string_length))
throw std::runtime_error("this file does not have a valid npy format.");
v_major = buf[magic_string_length];
v_minor = buf[magic_string_length+1];
}
// typestring magic
struct Typestring {
private:
char c_endian;
char c_type;
int len;
public:
inline std::string str() {
const size_t max_buflen = 16;
char buf[max_buflen];
std::sprintf(buf, "%c%c%u", c_endian, c_type, len);
return std::string(buf);
}
Typestring(const std::vector<float>& v)
:c_endian {host_endian_char}, c_type {'f'}, len {sizeof(float)} {}
Typestring(const std::vector<double>& v)
:c_endian {host_endian_char}, c_type {'f'}, len {sizeof(double)} {}
Typestring(const std::vector<long double>& v)
:c_endian {host_endian_char}, c_type {'f'}, len {sizeof(long double)} {}
Typestring(const std::vector<char>& v)
:c_endian {no_endian_char}, c_type {'i'}, len {sizeof(char)} {}
Typestring(const std::vector<short>& v)
:c_endian {host_endian_char}, c_type {'i'}, len {sizeof(short)} {}
Typestring(const std::vector<int>& v)
:c_endian {host_endian_char}, c_type {'i'}, len {sizeof(int)} {}
Typestring(const std::vector<long>& v)
:c_endian {host_endian_char}, c_type {'i'}, len {sizeof(long)} {}
Typestring(const std::vector<long long>& v) :c_endian {host_endian_char}, c_type {'i'}, len {sizeof(long long)} {}
Typestring(const std::vector<unsigned char>& v)
:c_endian {no_endian_char}, c_type {'u'}, len {sizeof(unsigned char)} {}
Typestring(const std::vector<unsigned short>& v)
:c_endian {host_endian_char}, c_type {'u'}, len {sizeof(unsigned short)} {}
Typestring(const std::vector<unsigned int>& v)
:c_endian {host_endian_char}, c_type {'u'}, len {sizeof(unsigned int)} {}
Typestring(const std::vector<unsigned long>& v)
:c_endian {host_endian_char}, c_type {'u'}, len {sizeof(unsigned long)} {}
Typestring(const std::vector<unsigned long long>& v)
:c_endian {host_endian_char}, c_type {'u'}, len {sizeof(unsigned long long)} {}
Typestring(const std::vector<std::complex<float>>& v)
:c_endian {host_endian_char}, c_type {'c'}, len {sizeof(std::complex<float>)} {}
Typestring(const std::vector<std::complex<double>>& v)
:c_endian {host_endian_char}, c_type {'c'}, len {sizeof(std::complex<double>)} {}
Typestring(const std::vector<std::complex<long double>>& v)
:c_endian {host_endian_char}, c_type {'c'}, len {sizeof(std::complex<long double>)} {}
};
inline void parse_typestring( std::string typestring){
// std::regex re ("'([<>|])([ifuc])(\\d+)'");
// std::smatch sm;
//
// std::regex_match(typestring, sm, re );
//
// if ( sm.size() != 4 ) {
// throw std::runtime_error("invalid typestring");
// }
}
namespace pyparse {
/**
Removes leading and trailing whitespaces
*/
inline std::string trim(const std::string& str) {
const std::string whitespace = " \t";
auto begin = str.find_first_not_of(whitespace);
if (begin == std::string::npos)
return "";
auto end = str.find_last_not_of(whitespace);
return str.substr(begin, end-begin+1);
}
inline std::string get_value_from_map(const std::string& mapstr) {
size_t sep_pos = mapstr.find_first_of(":");
if (sep_pos == std::string::npos)
return "";
std::string tmp = mapstr.substr(sep_pos+1);
return trim(tmp);
}
/**
Parses the string representation of a Python dict
The keys need to be known and may not appear anywhere else in the data.
*/
inline std::unordered_map<std::string, std::string> parse_dict(std::string in, std::vector<std::string>& keys) {
std::unordered_map<std::string, std::string> map;
if (keys.size() == 0)
return map;
in = trim(in);
// unwrap dictionary
if ((in.front() == '{') && (in.back() == '}'))
in = in.substr(1, in.length()-2);
else
throw std::runtime_error("Not a Python dictionary.");
std::vector<std::pair<size_t, std::string>> positions;
for (auto const& value : keys) {
size_t pos = in.find( "'" + value + "'" );
if (pos == std::string::npos)
throw std::runtime_error("Missing '"+value+"' key.");
std::pair<size_t, std::string> position_pair { pos, value };
positions.push_back(position_pair);
}
// sort by position in dict
std::sort(positions.begin(), positions.end() );
for(size_t i = 0; i < positions.size(); ++i) {
std::string raw_value;
size_t begin { positions[i].first };
size_t end { std::string::npos };
std::string key = positions[i].second;
if ( i+1 < positions.size() )
end = positions[i+1].first;
raw_value = in.substr(begin, end-begin);
raw_value = trim(raw_value);
if (raw_value.back() == ',')
raw_value.pop_back();
map[key] = get_value_from_map(raw_value);
}
return map;
}
/**
Parses the string representation of a Python boolean
*/
inline bool parse_bool(const std::string& in) {
if (in == "True")
return true;
if (in == "False")
return false;
throw std::runtime_error("Invalid python boolan.");
}
/**
Parses the string representation of a Python str
*/
inline std::string parse_str(const std::string& in) {
if ((in.front() == '\'') && (in.back() == '\''))
return in.substr(1, in.length()-2);
throw std::runtime_error("Invalid python string.");
}
/**
Parses the string represenatation of a Python tuple into a vector of its items
*/
inline std::vector<std::string> parse_tuple(std::string in) {
std::vector<std::string> v;
const char seperator = ',';
in = trim(in);
if ((in.front() == '(') && (in.back() == ')'))
in = in.substr(1, in.length()-2);
else
throw std::runtime_error("Invalid Python tuple.");
std::istringstream iss(in);
for (std::string token; std::getline(iss, token, seperator);) {
v.push_back(token);
}
return v;
}
template <typename T>
inline std::string write_tuple(const std::vector<T>& v) {
if (v.size() == 0)
return "";
std::ostringstream ss;
if (v.size() == 1) {
ss << "(" << v.front() << ",)";
} else {
const std::string delimiter = ", ";
// v.size() > 1
ss << "(";
std::copy(v.begin(), v.end()-1, std::ostream_iterator<T>(ss, delimiter.c_str()));
ss << v.back();
ss << ")";
}
return ss.str();
}
inline std::string write_boolean(bool b) {
if(b)
return "True";
else
return "False";
}
} // namespace pyparse
inline void parse_header(std::string header, std::string& descr, bool& fortran_order, std::vector<ndarray_len_t>& shape) {
/*
The first 6 bytes are a magic string: exactly "x93NUMPY".
The next 1 byte is an unsigned byte: the major version number of the file format, e.g. x01.
The next 1 byte is an unsigned byte: the minor version number of the file format, e.g. x00. Note: the version of the file format is not tied to the version of the numpy package.
The next 2 bytes form a little-endian unsigned short int: the length of the header data HEADER_LEN.
The next HEADER_LEN bytes form the header data describing the array's format. It is an ASCII string which contains a Python literal expression of a dictionary. It is terminated by a newline ('n') and padded with spaces ('x20') to make the total length of the magic string + 4 + HEADER_LEN be evenly divisible by 16 for alignment purposes.
The dictionary contains three keys:
"descr" : dtype.descr
An object that can be passed as an argument to the numpy.dtype() constructor to create the array's dtype.
"fortran_order" : bool
Whether the array data is Fortran-contiguous or not. Since Fortran-contiguous arrays are a common form of non-C-contiguity, we allow them to be written directly to disk for efficiency.
"shape" : tuple of int
The shape of the array.
For repeatability and readability, this dictionary is formatted using pprint.pformat() so the keys are in alphabetic order.
*/
// remove trailing newline
if (header.back() != '\n')
throw std::runtime_error("invalid header");
header.pop_back();
// parse the dictionary
std::vector<std::string> keys { "descr", "fortran_order", "shape" };
auto dict_map = npy::pyparse::parse_dict(header, keys);
if (dict_map.size() == 0)
throw std::runtime_error("invalid dictionary in header");
std::string descr_s = dict_map["descr"];
std::string fortran_s = dict_map["fortran_order"];
std::string shape_s = dict_map["shape"];
// TODO: extract info from typestring
parse_typestring(descr_s);
// remove
descr = npy::pyparse::parse_str(descr_s);
// convert literal Python bool to C++ bool
fortran_order = npy::pyparse::parse_bool(fortran_s);
// parse the shape tuple
auto shape_v = npy::pyparse::parse_tuple(shape_s);
if (shape_v.size() == 0)
throw std::runtime_error("invalid shape tuple in header");
for ( auto item : shape_v ) {
ndarray_len_t dim = static_cast<ndarray_len_t>(std::stoul(item));
shape.push_back(dim);
}
}
inline std::string write_header_dict(const std::string& descr, bool fortran_order, const std::vector<ndarray_len_t>& shape) {
std::string s_fortran_order = npy::pyparse::write_boolean(fortran_order);
std::string shape_s = npy::pyparse::write_tuple(shape);
return "{'descr': '" + descr + "', 'fortran_order': " + s_fortran_order + ", 'shape': " + shape_s + ", }";
}
inline void write_header(std::ostream& out, const std::string& descr, bool fortran_order, const std::vector<ndarray_len_t>& shape_v)
{
std::string header_dict = write_header_dict(descr, fortran_order, shape_v);
size_t length = magic_string_length + 2 + 2 + header_dict.length() + 1;
unsigned char version[2] = {1, 0};
if (length >= 255*255) {
length = magic_string_length + 2 + 4 + header_dict.length() + 1;
version[0] = 2;
version[1] = 0;
}
size_t padding_len = 16 - length % 16;
std::string padding (padding_len, ' ');
// write magic
write_magic(out, version[0], version[1]);
// write header length
if (version[0] == 1 && version[1] == 0) {
char header_len_le16[2];
uint16_t header_len = header_dict.length() + padding.length() + 1;
header_len_le16[0] = (header_len >> 0) & 0xff;
header_len_le16[1] = (header_len >> 8) & 0xff;
out.write(reinterpret_cast<char *>(header_len_le16), 2);
}else{
char header_len_le32[4];
uint32_t header_len = header_dict.length() + padding.length() + 1;
header_len_le32[0] = (header_len >> 0) & 0xff;
header_len_le32[1] = (header_len >> 8) & 0xff;
header_len_le32[2] = (header_len >> 16) & 0xff;
header_len_le32[3] = (header_len >> 24) & 0xff;
out.write(reinterpret_cast<char *>(header_len_le32), 4);
}
out << header_dict << padding << '\n';
}
inline std::string read_header(std::istream& istream) {
// check magic bytes an version number
unsigned char v_major, v_minor;
read_magic(istream, v_major, v_minor);
uint32_t header_length;
if(v_major == 1 && v_minor == 0){
char header_len_le16[2];
istream.read(header_len_le16, 2);
header_length = (header_len_le16[0] << 0) | (header_len_le16[1] << 8);
if((magic_string_length + 2 + 2 + header_length) % 16 != 0) {
// TODO: display warning
}
}else if(v_major == 2 && v_minor == 0) {
char header_len_le32[4];
istream.read(header_len_le32, 4);
header_length = (header_len_le32[0] << 0) | (header_len_le32[1] << 8)
| (header_len_le32[2] << 16) | (header_len_le32[3] << 24);
if((magic_string_length + 2 + 4 + header_length) % 16 != 0) {
// TODO: display warning
}
}else{
throw std::runtime_error("unsupported file format version");
}
auto buf_v = std::vector<char>();
buf_v.reserve(header_length);
istream.read(buf_v.data(), header_length);
std::string header(buf_v.data(), header_length);
return header;
}
inline ndarray_len_t comp_size(const std::vector<ndarray_len_t>& shape) {
ndarray_len_t size = 1;
for (ndarray_len_t i : shape )
size *= i;
return size;
}
template<typename Scalar>
inline void SaveArrayAsNumpy( const std::string& filename, bool fortran_order, unsigned int n_dims, const unsigned long shape[], const std::vector<Scalar>& data)
{
Typestring typestring_o(data);
std::string typestring = typestring_o.str();
std::ofstream stream( filename, std::ofstream::binary);
if(!stream) {
throw std::runtime_error("io error: failed to open a file.");
}
std::vector<ndarray_len_t> shape_v(shape, shape+n_dims);
write_header(stream, typestring, fortran_order, shape_v);
auto size = static_cast<size_t>(comp_size(shape_v));
stream.write(reinterpret_cast<const char*>(data.data()), sizeof(Scalar) * size);
}
template<typename Scalar>
inline void LoadArrayFromNumpy(const std::string& filename, std::vector<unsigned long>& shape, std::vector<Scalar>& data)
{
std::ifstream stream(filename, std::ifstream::binary);
if(!stream) {
throw std::runtime_error("io error: failed to open a file.");
}
std::string header = read_header(stream);
// parse header
bool fortran_order;
std::string typestr;
parse_header(header, typestr, fortran_order, shape);
// check if the typestring matches the given one
Typestring typestring_o {data};
std::string expect_typestr = typestring_o.str();
if (typestr != expect_typestr) {
throw std::runtime_error("formatting error: typestrings not matching");
}
// compute the data size based on the shape
auto size = static_cast<size_t>(comp_size(shape));
data.resize(size);
// read the data
stream.read(reinterpret_cast<char*>(data.data()), sizeof(Scalar)*size);
}
} // namespace npy
#endif // NPY_H

View File

@@ -1,73 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
/**
* @brief Base test class for Per Plugin tests
*
* Helper to handle test cases for all Plugins.
* @file
*/
#include <gtest/gtest.h>
#include <cstddef>
#include <string>
#include <tuple>
#include <tests_common.hpp>
#include <ie_core.hpp>
/**
* @brief Container for plugin_name and test params
*
* plugin_name is mandatory field.
*/
template<typename P>
using PlgTestParam = std::tuple<std::string, P>;
/**
* @brief Base class for per plugin tests
*/
template<typename P = std::nullptr_t>
class PlgTest : public testing::TestWithParam<PlgTestParam<P>> {
protected:
std::map<std::string, std::string> config;
void SetUp() override {
device_name = std::get<0>(this->GetParam());
std::transform(device_name.begin(), device_name.end(),
device_name.begin(), [] (char v) { return v == '_' ? ':' : v; });
}
const P &param() const {
return std::get<1>(this->GetParam());
}
std::string device_name;
};
/**
* @brief Helper to print name
*/
template<typename P>
class Named {
public:
Named(std::function<std::string(P)> clb) : _clb(clb) {}
const std::string operator() (const testing::TestParamInfo<PlgTestParam<P>> &p) {
return _clb(std::get<1>(p.param));
}
private:
const std::function<std::string(P)> _clb;
};
/**
* @brief Macros to specify Per Plugin Run Test Case with parameters.
*/
#define RUN_CASE_P_WITH_SUFFIX(_plugin, _suffix, _test, _params) \
INSTANTIATE_TEST_SUITE_P(_plugin##_run##_suffix, _test, ::testing::Combine(::testing::Values(#_plugin), ::testing::ValuesIn(_params) ))
/**
* @brief Macros to specify Per Plugin Run Test Case with Cartesian Product of parameters.
*/
#define RUN_CASE_CP_WITH_SUFFIX(_plugin, _suffix, _test, _params, ...) \
INSTANTIATE_TEST_SUITE_P(_plugin##_run##_suffix, _test, ::testing::Combine(::testing::Values(#_plugin), _params), __VA_ARGS__ )

View File

@@ -1,319 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "rnn_gen.hpp"
#include "rnn_referee.hpp"
#include "rnn_util.hpp"
#include "xml_net_builder.hpp"
#include <ie_core.hpp>
#include <vector>
#include <string>
using namespace InferenceEngine;
using std::map;
using std::pair;
using std::vector;
using std::string;
using Shape = InferenceEngine::SizeVector;
RNNGen::RNNGen(size_t batch, size_t seq, CellDesc cell, Mode mode, Direction dir, int axis) :
N(batch), T(seq), cell(cell), mode(mode), dir(dir),
axis(axis), neg(mode == TI_CSTM) {
size_t effective_T = (mode == DYN_SEQ) ? T - 1 : T;
referee = RNN_Referee::create_referee(cell, N, effective_T, D, S);
st_dim = {N, S};
id_dim = (axis == 1) ? Shape{N, T, D} : Shape{T, N, D};
od_dim = (axis == 1) ? Shape{N, T, S} : Shape{T, N, S};
seq_l_dim = {N};
state_num = referee->stateNum();
wSzB = referee->wSize() * sizeof(float);
bSzB = referee->bSize() * sizeof(float);
weights = std::make_shared<TBlob<uint8_t>>(TensorDesc(Precision::U8, SizeVector{(wSzB + bSzB)}, Layout::C));
weights->allocate();
auto ptr = weights->buffer().as<float *>();
SizeVector w_dims{referee->wSize()};
SizeVector b_dims{referee->bSize()};
w_blob = make_shared_blob<float>({Precision::FP32, w_dims, TensorDesc::getLayoutByDims(w_dims)}, ptr);
b_blob = make_shared_blob<float>({Precision::FP32, b_dims, TensorDesc::getLayoutByDims(b_dims)},
ptr + referee->wSize());
}
string RNNGen::model() {
auto net_b = CommonTestUtils::V2NetBuilder::buildNetworkWithOneInput("RNN_Net", id_dim, "FP32");
for (int i = 0; i < state_num; i++)
net_b.addInputLayer("FP32", st_dim);
if (mode == DYN_SEQ)
net_b.addInputLayer("FP32", seq_l_dim);
if (mode == CELL)
add_CELL(net_b);
else if (mode == SEQ || mode == DYN_SEQ)
add_SEQ(net_b);
else {
add_TI(net_b);
}
size_t num_input = 1 + state_num + (mode == DYN_SEQ ? 1 : 0);
vector<pair<string, string>> edges;
switch (num_input) {
case 4:
edges = {
{"0,0", "4,4"},
{"1,1", "4,5"},
{"2,2", "4,6"},
{"3,3", "4,7"},
};
break;
case 3:
edges = {
{"0,0", "3,3"},
{"1,1", "3,4"},
{"2,2", "3,5"},
};
break;
case 2:
edges = {
{"0,0", "2,2"},
{"1,1", "2,3"},
};
break;
}
return net_b.finish(&edges);
}
static const std::string cell_type(Cell cell) {
return cell == LSTM ? "LSTM" :
cell == GRU ? "GRU" :
cell == GRU_lbr ? "GRU" :
cell == RNN ? "RNN" : "Unknown";
}
static const std::string cell_layer_type(CellDesc cell) {
return cell_type(cell.type) + "Cell";
}
map<string, string> RNNGen::basic_cell_attr() {
map<string, string> attr{};
// Prepare activations attributes
string algs, alpha, beta;
for (auto &act : cell.acts) {
algs += act.alg + ',';
alpha += std::to_string(act.alpha) + ',';
beta += std::to_string(act.beta) + ',';
}
algs.pop_back(); // remove last comma
alpha.pop_back();
beta.pop_back();
attr["activations"] = algs;
attr["activations_alpha"] = alpha;
attr["activations_beta"] = beta;
attr["clip"] = std::to_string(cell.clip);
attr["hidden_size"] = std::to_string(S);
if (cell.type == GRU_lbr)
attr["linear_before_reset"] = std::to_string(true);
return attr;
}
void RNNGen::add_TI(CommonTestUtils::V2NetBuilder &builder) {
/// Generate TI body
Shape id_ti = id_dim;
Shape od_ti = od_dim;
id_ti[axis] = 1;
od_ti[axis] = 1;
std::map<std::string, std::string>
cell_attr = basic_cell_attr(),
rsh1_attr{{"dim", "-1," + std::to_string(D)}},
rsh2_attr{{"dim", (axis == 1 ? "-1,1," : "1,-1,") + std::to_string(S)}},
negt_attr{{"scale", "-1"},
{"shift", "0"},
{"power", "1"}};
CommonTestUtils::InOutShapes cell_inout{{{N, D}},
{}};
for (int i = 0; i < state_num; i++) {
cell_inout.inDims.push_back({N, S});
cell_inout.outDims.push_back({N, S});
}
auto body_bilder = CommonTestUtils::V2NetBuilder::buildBody();
body_bilder.addLayer("Reshape", "FP32", &rsh1_attr, {{id_ti},
{{N, D}}});
body_bilder.addLayer(cell_layer_type(cell), "FP32", &cell_attr, cell_inout, wSzB, bSzB);
body_bilder.addLayer("Reshape", "FP32", &rsh2_attr, {{{N, S}},
{od_ti}});
if (neg)
body_bilder.addLayer("Power", "FP32", &negt_attr, {{od_ti},
{od_ti}});
// body edges
int last_l = 2, last_p = 6;
vector<pair<string, string>> body_edges{
{"0,1", "1,2"},
{"1,4", "2,5"}};
if (state_num == 2) {
body_edges[1] = {"1,5", "2,7"};
last_p += 2;
}
if (neg) {
using std::to_string;
body_edges.push_back({to_string(last_l) + ',' + to_string(last_p),
to_string(last_l + 1) + ',' + to_string(last_p + 1)});
last_l += 1;
last_p += 2;
}
auto body = body_bilder.finish(&body_edges);
/// body is generated
bool fwd = (dir == FWD);
int st = fwd ? 1 : -1;
int bgn = fwd ? 0 : -1;
int end = fwd ? -1 : 0;
CommonTestUtils::InOutShapes ti_inout{{id_dim},
{od_dim}};
for (int i = 0; i < state_num; i++) {
ti_inout.inDims.push_back({N, S});
ti_inout.outDims.push_back({N, S});
}
int &ll = last_l, lp = last_p;
if (state_num == 2) {
builder.TILayer(ti_inout, body,
/* frm_l | frm_p | to_l | to_p | axis | step | start | end */
{{3, 3, 0, 0, axis, st, bgn, end},
{3, 4, 1, 3, -1},
{3, 5, 1, 4, -1}},
{{3, 6, ll, lp, axis, st, bgn, end},
{3, 7, 1, 5, -1},
{3, 8, 1, 6, -1}},
{{1, 5, 1, 3},
{1, 6, 1, 4}});
} else {
builder.TILayer(ti_inout, body,
/* frm_l | frm_p | to_l | to_p | axis | step | start | end */
{{2, 2, 0, 0, axis, st, bgn, end},
{2, 3, 1, 3, -1}},
{{2, 4, ll, lp, axis, st, bgn, end},
{2, 5, 1, 4, -1}},
{{1, 4, 1, 3}});
}
}
void RNNGen::add_SEQ(CommonTestUtils::V2NetBuilder &builder) {
map<string, string> seq_attr = basic_cell_attr();
string direction = dir == FWD ? "Forward" :
dir == BWD ? "Backward" :
dir == BDR ? "Bidirectional" :
"Unknown";
seq_attr["direction"] = direction;
seq_attr["axis"] = std::to_string(axis);
CommonTestUtils::InOutShapes inout{{id_dim},
{od_dim}};
for (int i = 0; i < state_num; i++) {
inout.inDims.push_back({N, S});
inout.outDims.push_back({N, S});
}
if (mode == DYN_SEQ) {
inout.inDims.push_back(seq_l_dim);
}
auto seq_type = cell_type(cell.type) + "Sequence";
builder.addLayer(seq_type, "FP32", &seq_attr, inout, wSzB, bSzB);
}
void RNNGen::add_CELL(CommonTestUtils::V2NetBuilder &builder) {
auto id = Shape{N, D};
auto od = Shape{N, S};
map<string, string> cell_p = {{"hidden_size", std::to_string(S)}};
builder.addLayer("LSTMCell", "FP32", &cell_p,
{{id, {N, S}, {N, S}},
{od, {N, S}, {N, S}}},
wSzB, bSzB);
}
CNNNetwork RNNGen::net() {
referee->wFiller(w_blob);
referee->bFiller(b_blob);
Core ie;
return ie.ReadNetwork(model(), weights);
}
const std::vector<Filler> RNNGen::fillers() const {
auto fillers = referee->getDataFillers();
if (dir == BWD)
// Reverse seq dim for input and output
fillers[0] = reverse(fillers[0], 1);
if (axis == 0)
// Swap N and T dims
fillers[0] = permute(fillers[0], {1, 0, 2});
// filler for sequence length tensor
if (mode == DYN_SEQ) {
using namespace std::placeholders;
fillers.push_back(std::bind(scalar_filler, _1, SizeVector{N}, T - 1));
auto zero_shape = id_dim;
zero_shape[axis] = 1;
Filler zero_filler(std::bind(scalar_filler, _1, zero_shape, 0.0f));
fillers[0] = concat(fillers[0], zero_filler, axis);
}
return fillers;
}
const std::vector<Checker> RNNGen::checkers() const {
auto checkers = referee->getDataChecker();
if (mode == TI_CSTM)
// Negative data blob checker. Customization is negative Power layer at the end of TI body
checkers[0] = negative(checkers[0]);
if (dir == BWD)
// Reverse seq dim for input and output
checkers[0] = reverse(checkers[0], 1);
if (axis == 0)
// Swap N and T dims
checkers[0] = permute(checkers[0], {1, 0, 2});
if (mode == DYN_SEQ) {
using namespace std::placeholders;
auto zero_shape = od_dim;
zero_shape[axis] = 1;
Checker zero_checker(std::bind(scalar_checker, _1, zero_shape, 0.0f));
checkers[0] = concat(checkers[0], zero_checker, axis);
}
return checkers;
}

View File

@@ -1,75 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "xml_net_builder.hpp"
#include "rnn_referee.hpp"
#include <cpp/ie_cnn_network.h>
#include <vector>
#include <string>
enum Mode {
CELL, /**< single LSTMCell layer */
SEQ, /**< single LSTMSeq layer */
DYN_SEQ, /**< single LSTMSeq layer with seq length input*/
TI, /**< TI layer with LSTM body */
TI_CSTM /**< TI layer with LSTM plus negative at the body */
};
enum Direction {
FWD, /**< Forward. With stride 1 */
BWD, /**< Backward. WIth stride -1 */
BDR /**< Bidirectional. With stride 1 and -1 */
};
/**
* Topology generator for some RNN specific cases
*/
class RNNGen {
public:
/** Sequence topology */
RNNGen(size_t batch, size_t seq, CellDesc cell, Mode mode, Direction dir, int axis);
const std::vector<Filler> fillers() const;
const std::vector<Checker> checkers() const;
InferenceEngine::CNNNetwork net();
private:
const size_t D = 10; // Data size
const size_t S = 5; // State size
const size_t G = 4; // Number of gate
const size_t N; // Batch
const size_t T; // Sequence
const int axis; // Axis of sequence
const Mode mode;
const CellDesc cell;
const Direction dir;
const bool neg;
size_t state_num = 0;
size_t wSzB = 0;
size_t bSzB = 0;
InferenceEngine::SizeVector seq_l_dim, st_dim, id_dim, od_dim;
InferenceEngine::TBlob<uint8_t>::Ptr weights;
InferenceEngine::Blob::Ptr w_blob, b_blob;
std::shared_ptr<RNN_Referee> referee;
private:
std::string model();
void add_TI(CommonTestUtils::V2NetBuilder &builder);
void add_SEQ(CommonTestUtils::V2NetBuilder &builder);
void add_CELL(CommonTestUtils::V2NetBuilder &builder);
std::map<std::string, std::string> basic_cell_attr();
};

View File

@@ -1,290 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "rnn_referee.hpp"
#include <cmath>
#include <vector>
#include <string>
using namespace InferenceEngine;
using namespace std::placeholders;
using std::vector;
class RNN_ReferBase : public RNN_Referee {
protected:
RNN_ReferBase(float clip, size_t D, size_t S, size_t G, size_t Gb, size_t ST_N)
: clip(clip), D(D), S(S), G(G), Gb(Gb), state_num(ST_N) {}
const size_t D, S, G, Gb;
const size_t state_num;
const float clip;
vector<float> W, B;
vector<Filler> _d_filler;
vector<Checker> _d_checker;
const vector<Filler>& getDataFillers() override { return _d_filler; }
const vector<Checker>& getDataChecker() override { return _d_checker; }
size_t wSize() override { return G*S*(S+D); }
size_t bSize() override { return Gb*S; }
size_t stateNum() override { return state_num; }
using Act = std::function<float(const float)>;
static float _clip (const float x, const float clip) {
return std::min(std::max(x, -clip), clip);
}
static Act clip_before(Act act, const float clip) {
return [=] (const float x) {
return act(_clip(x, clip));
};
}
Act act(ActivationDesc act) {
float alpha = act.alpha;
Act res;
if (act.alg == "sigmoid")
res = [=] (const float x) { return 1 / (1 + std::exp(-x)); };
else if (act.alg == "tanh")
res = [=] (const float x) { return std::tanh(x); };
else if (act.alg == "relu")
res = [=] (const float x) { return (x > 0) ? x : alpha*x; };
else
IE_THROW() << "Unknown activation type " << act.alg;
return res;
}
public:
void wFiller(Blob::Ptr blob) override {
IE_ASSERT(blob->size() == wSize());
auto ptr = blob->buffer().as<float*>();
for (int g = 0; g < G; g++)
for (int s = 0; s < S; s++) {
for (int i = 0; i < D; i++) *ptr++ = W[g] / D;
for (int i = 0; i < S; i++) *ptr++ = W[g] / S;
}
}
void bFiller(Blob::Ptr blob) override {
IE_ASSERT(blob->size() == bSize());
auto ptr = blob->buffer().as<float*>();
for (int g = 0; g < Gb; g++)
for (int s = 0; s < S; s++) *ptr++ = B[g];
}
};
#define Vals(_name) std::vector<float> _name(T+1)
class LSTMCell_Refer : public RNN_ReferBase {
public:
LSTMCell_Refer(CellDesc cell, size_t N, size_t T, size_t D, size_t S) : RNN_ReferBase(cell.clip, D, S, 4, 4, 2) {
// Some random values in range [0,1]
const float H0 = 0.3, C0 = 0.77;
const float Wf = 0.1, Bf = 0.35;
const float Wi = 0.2, Bi = 0.25;
const float Wc = 0.5, Bc = 0.15;
const float Wo = 0.7, Bo = 0.05;
auto _f = act(cell.acts[0]);
auto _g = act(cell.acts[1]);
auto _h = act(cell.acts[2]);
if (clip > 0.0f) {
_f = clip_before(_f, clip);
_g = clip_before(_g, clip);
}
Vals(f); Vals(i); Vals(c); Vals(o);
Vals(X); Vals(H); Vals(C);
H[0] = H0;
C[0] = C0;
for (int t = 1; t < T+1; t++) {
X[t] = t;
f[t] = _f(Wf*(H[t-1] + X[t]) + Bf);
i[t] = _f(Wi*(H[t-1] + X[t]) + Bi);
c[t] = _g(Wc*(H[t-1] + X[t]) + Bc);
o[t] = _f(Wo*(H[t-1] + X[t]) + Bo);
C[t] = f[t] * C[t-1] + i[t] * c[t];
H[t] = o[t] * _h(C[t]);
}
W = {Wf, Wi, Wc, Wo};
B = {Bf, Bi, Bc, Bo};
X.erase(X.begin()); // remove first element (unused zero element)
H.erase(H.begin());
C.erase(C.begin());
_d_filler.resize(3);
_d_filler[0] = std::bind(vector_filler, _1, SizeVector {N,T,D}, X, 1);
_d_filler[1] = std::bind(scalar_filler, _1, SizeVector {N,S}, H0);
_d_filler[2] = std::bind(scalar_filler, _1, SizeVector {N,S}, C0);
_d_checker.resize(3);
_d_checker[0] = std::bind(vector_checker, _1, SizeVector {N,T,S}, H, 1);
_d_checker[1] = std::bind(scalar_checker, _1, SizeVector {N,S} , H[T-1]);
_d_checker[2] = std::bind(scalar_checker, _1, SizeVector {N,S} , C[T-1]);
}
};
class GRUCell_Refer : public RNN_ReferBase {
public:
GRUCell_Refer(CellDesc cell, size_t N, size_t T, size_t D, size_t S) : RNN_ReferBase(cell.clip, D, S, 3, 3, 1) {
// Some random values in range [0,1]
const float H0 = 0.3;
const float Wz = 0.1, Bz = 0.35;
const float Wr = 0.2, Br = 0.25;
const float Wh = 0.5, Bh = 0.15;
auto _f = act(cell.acts[0]);
auto _g = act(cell.acts[1]);
if (clip > 0.0f) {
_f = clip_before(_f, clip);
_g = clip_before(_g, clip);
}
Vals(z); Vals(r); Vals(h);
Vals(X); Vals(H);
H[0] = H0;
for (int t = 1; t < T+1; t++) {
X[t] = t;
z[t] = _f(Wz*(H[t-1] + X[t]) + Bz);
r[t] = _f(Wr*(H[t-1] + X[t]) + Br);
h[t] = _g(Wh*(H[t-1]*r[t] + X[t]) + Bh);
H[t] = (1 - z[t])*h[t] + z[t]*H[t-1];
}
W = {Wz, Wr, Wh};
B = {Bz, Br, Bh};
X.erase(X.begin());
H.erase(H.begin());
_d_filler.resize(2);
_d_filler[0] = std::bind(vector_filler, _1, SizeVector {N,T,D}, X, 1);
_d_filler[1] = std::bind(scalar_filler, _1, SizeVector {N,S} , H0);
_d_checker.resize(2);
_d_checker[0] = std::bind(vector_checker, _1, SizeVector {N,T,S}, H, 1);
_d_checker[1] = std::bind(scalar_checker, _1, SizeVector {N,S} , H[T-1]);
}
};
class GRUlbrCell_Refer : public RNN_ReferBase {
public:
GRUlbrCell_Refer(CellDesc cell, size_t N, size_t T, size_t D, size_t S) : RNN_ReferBase(cell.clip, D, S, 3, 4, 1) {
// Some random values in range [0,1]
const float H0 = 0.3;
const float Wz = 0.1, Bz = 0.35;
const float Wr = 0.2, Br = 0.25;
const float Wh = 0.5, Bh = 0.15, Bhr = 0.33;
auto _f = act(cell.acts[0]);
auto _g = act(cell.acts[1]);
if (clip > 0.0f) {
_f = clip_before(_f, clip);
_g = clip_before(_g, clip);
}
Vals(z); Vals(r); Vals(h);
Vals(X); Vals(H);
H[0] = H0;
for (int t = 1; t < T+1; t++) {
X[t] = 0.1 * t;
z[t] = _f(Wz*(H[t-1] + X[t]) + Bz);
r[t] = _f(Wr*(H[t-1] + X[t]) + Br);
h[t] = _g(Wh*X[t] + r[t]*(Wh*H[t-1] + Bhr) + Bh);
H[t] = (1 - z[t])*h[t] + z[t]*H[t-1];
}
W = {Wz, Wr, Wh};
B = {Bz, Br, Bh, Bhr};
X.erase(X.begin());
H.erase(H.begin());
_d_filler.resize(2);
_d_filler[0] = std::bind(vector_filler, _1, SizeVector {N,T,D}, X, 1);
_d_filler[1] = std::bind(scalar_filler, _1, SizeVector {N,S} , H0);
_d_checker.resize(2);
_d_checker[0] = std::bind(vector_checker, _1, SizeVector {N,T,S}, H, 1);
_d_checker[1] = std::bind(scalar_checker, _1, SizeVector {N,S} , H[T-1]);
}
};
class RNNCell_Refer : public RNN_ReferBase {
public:
RNNCell_Refer(CellDesc cell, size_t N, size_t T, size_t D, size_t S) : RNN_ReferBase(cell.clip, D, S, 1, 1, 1) {
// Some random values in range [0,1]
const float H0 = 0.3;
const float Wh = 0.5, Bh = 0.15;
auto _f = act(cell.acts[0]);
if (clip > 0.0f)
_f = clip_before(_f, clip);
Vals(X); Vals(H);
H[0] = H0;
for (int t = 1; t < T+1; t++) {
X[t] = t;
H[t] = _f(Wh*(H[t-1] + X[t]) + Bh);
}
W = {Wh};
B = {Bh};
X.erase(X.begin());
H.erase(H.begin());
_d_filler.resize(2);
_d_filler[0] = std::bind(vector_filler, _1, SizeVector {N,T,D}, X, 1);
_d_filler[1] = std::bind(scalar_filler, _1, SizeVector {N,S} , H0);
_d_checker.resize(2);
_d_checker[0] = std::bind(vector_checker, _1, SizeVector {N,T,S}, H, 1);
_d_checker[1] = std::bind(scalar_checker, _1, SizeVector {N,S} , H[T-1]);
}
};
std::shared_ptr<RNN_Referee> RNN_Referee::create_referee(CellDesc cell, size_t N, size_t T, size_t D, size_t S) {
std::shared_ptr<RNN_Referee> res;
switch (cell.type) {
case LSTM:
res = std::shared_ptr<RNN_Referee>(new LSTMCell_Refer(cell, N, T, D, S));
break;
case GRU:
res = std::shared_ptr<RNN_Referee>(new GRUCell_Refer(cell, N, T, D, S));
break;
case GRU_lbr:
res = std::shared_ptr<RNN_Referee>(new GRUlbrCell_Refer(cell, N, T, D, S));
break;
case RNN:
res = std::shared_ptr<RNN_Referee>(new RNNCell_Refer(cell, N, T, D, S));
break;
}
return res;
};

View File

@@ -1,58 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "ie_blob.h"
#include "rnn_util.hpp"
#include <vector>
enum Cell {
LSTM, /**< Vanilla LSTMCell */
GRU, /**< Vanilla GRUCell */
RNN, /**< Vanilla RNNCell */
GRU_lbr /**< Vanilla GRUCell */
};
/**
* Descriptor of activation function
* type : [sigm, tanh, relu, ...]
* alpha, beta : optional
*/
struct ActivationDesc {
std::string alg;
float alpha;
float beta;
};
using ActivationSet = std::vector<ActivationDesc>;
/**
* Descriptor of general RNN cell
*/
struct CellDesc {
Cell type; /**< Type of RNN cell */
ActivationSet acts; /**< Activations aplgorithm */
float clip; /**< Clip value. 0 - no clipping */
};
/**
* Ref scoring for some RNN cells
* Provide weight filler and in_data filler and out_data checker
*/
class RNN_Referee {
public:
static std::shared_ptr<RNN_Referee> create_referee(CellDesc cell, size_t N, size_t T, size_t D, size_t S);
virtual ~RNN_Referee() = default;
virtual void wFiller(InferenceEngine::Blob::Ptr) = 0;
virtual void bFiller(InferenceEngine::Blob::Ptr) = 0;
virtual size_t wSize() = 0;
virtual size_t bSize() = 0;
virtual size_t stateNum() = 0;
virtual const std::vector<Filler>& getDataFillers() = 0;
virtual const std::vector<Checker>& getDataChecker() = 0;
};

View File

@@ -1,187 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "rnn_gen.hpp"
#include "plg_test.hpp"
#include <cmath>
#include <vector>
#include <string>
using namespace ::testing;
using namespace InferenceEngine;
using std::map;
using std::pair;
using std::vector;
using std::string;
enum Reshape {
RESH_NO = 0, /**< No reshape step */
RESH_B = 1, /**< Reshape for batch dim */
RESH_T = 2, /**< Reshape for time dim */
RESH_BT = 3 /**< Reshape for both batch and time dims */
};
using rnn_param = std::tuple<
CellDesc, /**< cell - Descriptor of RNN cell */
float, /**< clip - Clip value */
Direction, /**< fwd - Direction */
Mode, /**< mode - Modes of LSTM representation */
size_t, /**< N - Batch size */
size_t, /**< T - Sequence length */
size_t, /**< axis - Dimension with T */
Reshape /**< shape - Apply reshape. +1 to original dim */
>;
const Named<rnn_param> test_name( [] (const rnn_param &p) {
CellDesc _cell; Direction _dir; Mode _mode; Reshape _resh;
size_t _N, _S, _axis;
float _clip;
std::tie(_cell,_clip,_dir,_mode,_N,_S,_axis,_resh) = p;
string res = _cell.type == LSTM ? "LSTM_" : _cell.type == GRU ? "GRU__" : "RNN__";
for (auto &act : _cell.acts) res += act.alg[0];
res += _dir == FWD ? "_FWD" : _dir == BWD ? "_BWD" : _dir == BDR ? "_BDR" : "_XXX";
res += _mode == SEQ ? "_SEQ" : _mode == TI ? "__TI" : _mode == TI_CSTM ? "_TIX" : "_XXX";
res += (_clip == 0.0f) ? "_c0" : "_cX";
res += "_b" + std::to_string(_N);
res += "_s" + std::to_string(_S);
res += "_axis" + std::to_string(_axis);
res += _resh == RESH_NO ? "_reshNO" :
_resh == RESH_B ? "__reshB" :
_resh == RESH_T ? "__reshT" :
_resh == RESH_BT ? "_reshBT" : "_X";
return res;
});
using RNNSeqTest = PlgTest<rnn_param>;
// disabled due to transition to ngraph transformation
// DO NOT DELETE, part of the functionality is still needed
TEST_P(RNNSeqTest, DISABLED_SingleRNN) {
auto p = param();
auto cell = std::get<0>(p);
auto clip = std::get<1>(p);
auto dir = std::get<2>(p);
auto mode = std::get<3>(p);
auto N = std::get<4>(p);
auto T = std::get<5>(p);
auto axis = std::get<6>(p);
auto resh = std::get<7>(p);
if (device_name == "GPU" && cell.type != LSTM)
GTEST_SKIP();
cell.clip = clip;
/************ Test Body *****************************/
RNNGen topology(N, T, cell, mode , dir, axis);
auto net = topology.net();
auto fillers = topology.fillers();
auto checkers = topology.checkers();
// Reshape if requested
if (resh != RESH_NO) {
const bool resh_b = resh & RESH_B;
const bool resh_t = resh & RESH_T;
auto shapes = net.getInputShapes();
for (auto &pair : shapes) {
// Blobs with data
if (pair.second.size() == 3) {
if (resh_b) pair.second[(axis+1)%2]++;
if (resh_t) pair.second[axis]++;
}
// Blobs with state or Seq Len
if (pair.second.size() == 1 || pair.second.size() == 2) {
if (resh_b) pair.second[0]++;
}
}
net.reshape(shapes);
// Also need new fillers/checkers for new shapes
RNNGen resh_topology(resh_b ? N+1 : N, resh_t ? T+1 : T, cell, mode , dir, axis);
fillers = resh_topology.fillers();
checkers = resh_topology.checkers();
}
Core ie;
auto execNet = ie.LoadNetwork(net, device_name);
auto req = execNet.CreateInferRequest();
ASSERT_TRUE(net.getInputsInfo().size() == fillers.size());
ASSERT_TRUE(net.getOutputsInfo().size() == checkers.size());
int i = 0;
for (auto &info : net.getInputsInfo())
fillers[i++](req.GetBlob(info.first));
req.Infer();
i = 0;
for (auto &info : net.getOutputsInfo())
EXPECT_TRUE(checkers[i++](req.GetBlob(info.first))) << "Error with #" << i << " output";
}
const std::vector<CellDesc> cells = {
/** LSTM modifications */
{LSTM, {{"sigmoid",0,0}, {"tanh",0,0}, {"tanh",0,0}} }, // default
{LSTM, {{"tanh",0,0}, {"sigmoid",0,0}, {"relu",0,0}} },
/** GRU modifications */
{GRU , {{"sigmoid",0,0}, {"tanh",0,0}} }, // default
{GRU , {{"tanh",0,0}, {"relu",0,0}} },
/** GRU linear_before_reset modifications */
{GRU_lbr , {{"sigmoid",0,0}, {"tanh",0,0}} }, // default
{GRU_lbr , {{"tanh",0,0}, {"relu",0,0}} },
/** RNN modifications */
{RNN , {{"tanh",0,0}} }, // default
{RNN , {{"sigmoid",0,0}} },
{RNN , {{"relu",0,0}} },
};
#if 0
// All combination of next parameters
const auto workload = Combine(
ValuesIn(cells), // Cell desc
Values(0.0f, 0.7f), // Clip arg
Values(FWD, BWD), // Direction
Values(SEQ, DYN_SEQ, // Representation mode
TI, TI_CSTM), //
Values(1, 3), // Batch
Values(3), // Sequence size
Values(0, 1), // Axis of sequence
Values(RESH_NO, RESH_B, // Reshape mode for batch, sequence or both
RESH_T, RESH_BT) //
);
#else
// All combination of next parameters ( small subset for fast CI testing)
const auto workload = Combine(
ValuesIn(cells.begin(), // Cell desc (only first 5)
cells.begin()+7), //
Values(0.0f, 0.7f), // Clip arg
Values(FWD, BWD), // Direction
Values(SEQ, TI), // Representation mode
Values(2), // Batch
Values(3), // Sequence size
Values(0, 1), // Axis of sequence
Values(RESH_NO /*, RESH_B TODO: migrate to ngraph reshape */) // Reshape mode for batch, sequence or both
);
#endif
// All combination of next parameters ( small subset for fast CI testing)
const auto dyn_seq_workload = Combine(
ValuesIn(std::vector<CellDesc> {cells[0], cells[2], cells[4], cells[6]}),
Values(0.0f), // Clip arg
Values(FWD, BWD, BDR), // Direction
Values(DYN_SEQ), // Representation mode
Values(1, 8), // Batch
Values(3, 100), // Sequence size
Values(0, 1), // Axis of sequence
Values(RESH_NO /*, RESH_B TODO: migrate to ngraph reshape */) // Reshape mode for batch, sequence or both
);

View File

@@ -1,307 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "rnn_util.hpp"
#include <string>
#include <cmath>
using namespace InferenceEngine;
#define T_LOOP_RANK 5
/**
* @brief Iterate through tensor values and do action for each
* elements.
*
* Signature of action is : (data_t &x, int *i) -> void
* x - is reference on tensor element
* i - array of logical indexes
*
* @tparam T action functor type. Generally is lambda
* @param blob to iterate through
* @param act functor to apply for each value in tensor
*/
template <typename T>
void T_LOOP(Blob::Ptr &blob, const T &act) {
const auto &td = blob->getTensorDesc();
const auto &dims = td.getDims();
const auto &blk_d = td.getBlockingDesc();
const auto &strides = blk_d.getStrides();
int D[] = {1, 1, 1, 1, 1};
std::copy(dims.begin(), dims.end(), std::end(D) - dims.size() );
int i[] = {0, 0, 0, 0, 0};
int &i0 = i[0], &i1 = i[1], &i2 = i[2], &i3 = i[3], &i4 = i[4];
int s[] = {0, 0, 0, 0, 0};
std::copy(strides.begin(), strides.end(), std::end(s) - dims.size()); \
int &s0 = s[0], &s1 = s[1], &s2 = s[2], &s3 = s[3], &s4 = s[4];
size_t off_ = blk_d.getOffsetPadding();
auto *ptr = blob->buffer().as<float*>();
for (i0 = 0; i0 < D[0]; i0++) { auto off0 = off_ + i0 * s0;
for (i1 = 0; i1 < D[1]; i1++) { auto off1 = off0 + i1 * s1;
for (i2 = 0; i2 < D[2]; i2++) { auto off2 = off1 + i2 * s2;
for (i3 = 0; i3 < D[3]; i3++) { auto off3 = off2 + i3 * s3;
for (i4 = 0; i4 < D[4]; i4++) { auto off4 = off3 + i4 * s4; auto &off = off4;
act(ptr[off], i);
}}}}}
}
Checker negative(Checker checker) {
return [=] (Blob::Ptr blob) -> bool {
auto dims = blob->getTensorDesc().getDims();
auto layout = blob->getTensorDesc().getLayout();
auto new_blob = make_shared_blob<float>({Precision::FP32, dims, layout});
new_blob->allocate();
float *new_blob_ptr = new_blob->buffer().as<float*>();
float *blob_ptr = blob->buffer().as<float*>();
int size = blob->size();
for (int i = 0; i < size; i++)
*new_blob_ptr++ = -(*blob_ptr++);
return checker(new_blob);
};
}
static void copy_with_reverse(Blob::Ptr &src, Blob::Ptr &dst, int axis) {
IE_ASSERT(src->getTensorDesc().getDims() == dst->getTensorDesc().getDims());
const auto &td = src->getTensorDesc();
const auto &dims = td.getDims();
const auto &blk_d = td.getBlockingDesc();
const auto &strides = blk_d.getStrides();
int D[] = {1, 1, 1, 1, 1};
std::copy(dims.begin(), dims.end(), std::end(D) - dims.size() );
int s[] = {0, 0, 0, 0, 0};
std::copy(strides.begin(), strides.end(), std::end(s) - dims.size()); \
int &s0 = s[0], &s1 = s[1], &s2 = s[2], &s3 = s[3], &s4 = s[4];
size_t off_ = blk_d.getOffsetPadding();
axis += T_LOOP_RANK - dims.size();
// to iterate through tensor with reversed one dimension we need to
// make stride negative and update offset.
int reverse_str = s[axis];
s[axis] = -reverse_str;
off_ += (D[axis] - 1)*reverse_str;
auto src_off = [=] (const int *i) {
return off_ + i[0]*s0 + i[1]*s1 + i[2]*s2 + i[3]*s3 + i[4]*s4;
};
const auto *src_ptr = src->buffer().as<float*>();
T_LOOP( dst, [&](float &x, const int *i) {
x = src_ptr[ src_off(i) ];
});
}
/** Make view blob (ROI) on parent blob. Doesn't hold parent blob */
static Blob::Ptr make_view(const Blob::Ptr &src, const SizeVector dims, const SizeVector offsets) {
auto src_dims = src->getTensorDesc().getDims();
IE_ASSERT(dims.size() == src_dims.size());
IE_ASSERT(dims.size() == offsets.size());
for (size_t i = 0; i < dims.size(); i++)
IE_ASSERT(dims[i] + offsets[i] <= src_dims[i]);
auto desc = src->getTensorDesc();
auto b_desc = desc.getBlockingDesc();
// move T desc to specified offset
const auto new_off = desc.offset(offsets);
TensorDesc new_desc { desc.getPrecision(), dims,
BlockingDesc { dims,
b_desc.getOrder(), new_off,
b_desc.getOffsetPaddingToData(),
b_desc.getStrides() }
};
// TODO: Only FP32 supported here
IE_ASSERT(desc.getPrecision() == Precision::FP32) << "Current limitation. Only FP32 is supported";
return make_shared_blob<float>(new_desc, src->buffer());
}
Checker reverse(const Checker checker, int axis) {
return [=] (Blob::Ptr blob) -> bool {
auto dims = blob->getTensorDesc().getDims();
auto layout = blob->getTensorDesc().getLayout();
Blob::Ptr new_blob = make_shared_blob<float>({Precision::FP32, dims, layout});
new_blob->allocate();
copy_with_reverse(blob, new_blob, axis);
return checker(new_blob);
};
}
Filler reverse(const Filler filler, int axis) {
return [=] (Blob::Ptr blob) {
auto dims = blob->getTensorDesc().getDims();
auto layout = blob->getTensorDesc().getLayout();
Blob::Ptr new_blob = make_shared_blob<float>({Precision::FP32, dims, layout});
new_blob->allocate();
filler(new_blob);
copy_with_reverse(new_blob, blob, axis);
};
}
static void copy_with_permute(Blob::Ptr &src, Blob::Ptr &dst, const std::vector<int> order) {
IE_ASSERT(order == std::vector<int>({1,0,2}));
IE_ASSERT(src->getTensorDesc().getDims().size() == order.size());
SizeVector prm_dims, dims = src->getTensorDesc().getDims();
for (int i : order) prm_dims.push_back(dims[i]);
IE_ASSERT(prm_dims == dst->getTensorDesc().getDims());
size_t stride_2 = 1;
size_t stride_1 = prm_dims[2] * stride_2;
size_t stride_0 = prm_dims[1] * stride_1;
float *src_ptr = src->buffer().as<float*>();
float *dst_ptr = dst->buffer().as<float*>();
for (int i0 = 0; i0 < dims[0]; i0++)
for (int i1 = 0; i1 < dims[1]; i1++)
for (int i2 = 0; i2 < dims[2]; i2++)
dst_ptr[i1*stride_0 + i0*stride_1 + i2*stride_2] = *src_ptr++;
}
Filler permute(const Filler filler, const std::vector<int> order) {
return [=] (Blob::Ptr blob) {
SizeVector perm_dims, dims = blob->getTensorDesc().getDims();
for (int i : order) perm_dims.push_back(dims[i]);
Blob::Ptr new_blob = make_shared_blob<float>({Precision::FP32, perm_dims, blob->getTensorDesc().getLayout()});
new_blob->allocate();
filler(new_blob);
copy_with_permute(new_blob, blob, order);
};
}
Checker permute(const Checker checker, const std::vector<int> order) {
return [=] (Blob::Ptr blob) -> bool {
SizeVector perm_dims, dims = blob->getTensorDesc().getDims();
for (int i : order) perm_dims.push_back(dims[i]);
Blob::Ptr new_blob = make_shared_blob<float>({Precision::FP32, perm_dims, blob->getTensorDesc().getLayout()});
new_blob->allocate();
copy_with_permute(blob, new_blob, order);
return checker(new_blob);
};
}
Checker concat(const Checker checker1, const Checker checker2, int axis) {
return [=] (Blob::Ptr blob) -> bool {
auto dims = blob->getTensorDesc().getDims();
const size_t split_size = 1; // counting from end
SizeVector dims1(dims);
SizeVector offs1(dims.size(), 0);
dims1[axis] -= split_size;
SizeVector dims2 = dims;
SizeVector offs2(dims.size(), 0);
dims2[axis] = split_size;
offs2[axis] = dims1[axis];
auto blob1 = make_view(blob, dims1, offs1);
auto blob2 = make_view(blob, dims2, offs2);
return checker1(blob1) && checker2(blob2);
};
}
Filler concat(const Filler filler1, const Filler filler2, int axis) {
return [=] (Blob::Ptr blob) {
auto dims = blob->getTensorDesc().getDims();
const size_t split_size = 1; // counting from end
SizeVector dims1(dims);
SizeVector offs1(dims.size(), 0);
dims1[axis] -= split_size;
SizeVector dims2 = dims;
SizeVector offs2(dims.size(), 0);
dims2[axis] = split_size;
offs2[axis] = dims1[axis];
auto blob1 = make_view(blob, dims1, offs1);
auto blob2 = make_view(blob, dims2, offs2);
filler1(blob1);
filler2(blob2);
};
}
static inline bool cmp_near(float res, float ref) {
constexpr float eps = 1e-4;
auto ref_abs = std::abs(ref);
if (ref_abs > eps)
return std::abs(res-ref)/ref_abs < eps;
else
return std::abs(res-ref) < eps;
}
bool scalar_checker(Blob::Ptr blob, SizeVector dims, float val) {
IE_ASSERT(blob->getTensorDesc().getDims() == dims);
bool res = true;
T_LOOP(blob, [&](float x, int *i) {
if (!cmp_near(x, val))
res = false;
});
return res;
}
bool vector_checker(Blob::Ptr blob, SizeVector dims, std::vector<float> val, int axis) {
IE_ASSERT(blob->getTensorDesc().getDims() == dims);
IE_ASSERT(dims[axis] == val.size());
axis += T_LOOP_RANK - dims.size();
bool res = true;
T_LOOP( blob, [&](float &x, int *i) {
if (!cmp_near(x, val[ i[axis] ]))
res = false;
});
return res;
}
void scalar_filler (Blob::Ptr blob, SizeVector dims, float val) {
IE_ASSERT(blob->getTensorDesc().getDims() == dims);
T_LOOP( blob, [&](float &x, int *i) {
x = val;
});
}
void vector_filler (Blob::Ptr blob, SizeVector dims, std::vector<float> val, int axis) {
IE_ASSERT(blob->getTensorDesc().getDims() == dims);
IE_ASSERT(dims[axis] == val.size());
axis += T_LOOP_RANK - dims.size();
T_LOOP( blob, [&](float &x, int *i) {
x = val[ i[axis] ];
});
}

View File

@@ -1,36 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "ie_blob.h"
/**
* Checkers section (Blob::Ptr) -> bool
*/
using Filler = std::function<void(InferenceEngine::Blob::Ptr)>;
/** Fillers conversion */
Filler reverse(const Filler checker, int axis);
Filler permute(const Filler filler, const std::vector<int> order);
Filler concat(const Filler filler1, const Filler filler2, int axis);
/** Some helpful fillers. To use with std::bind() */
void scalar_filler(InferenceEngine::Blob::Ptr blob, InferenceEngine::SizeVector dims, float val);
void vector_filler(InferenceEngine::Blob::Ptr blob, InferenceEngine::SizeVector dims, std::vector<float> val, int axis);
/**
* Filler section (Blob::Ptr) -> void
*/
using Checker = std::function<bool(InferenceEngine::Blob::Ptr)>;
/** Checker conversion */
Checker negative(const Checker checker);
Checker reverse(const Checker checker, int axis);
Checker permute(const Checker checker, const std::vector<int> order);
Checker concat(const Checker checker1, const Checker checker2, int axis);
/** Some helpful checkers. To use with std::bind() */
bool scalar_checker (InferenceEngine::Blob::Ptr blob, InferenceEngine::SizeVector dims, float val);
bool vector_checker (InferenceEngine::Blob::Ptr blob, InferenceEngine::SizeVector dims, std::vector<float> val, int axis);

View File

@@ -1,31 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <gtest/gtest.h>
#include <algorithm>
#include <functional>
#include <initializer_list>
#include <iterator>
#include <map>
#include <memory>
#include <numeric>
#include <ostream>
#include <set>
#include <sstream>
#include <string>
#include <tuple>
#include <type_traits>
#include <typeinfo>
#include <unordered_set>
#include <utility>
#include <vector>
#include <cassert>
#include <cctype>
#include <cmath>
#include <cstdlib>
#include <cstring>

View File

@@ -21,6 +21,7 @@ function(add_helpers target_name)
target_include_directories(${target_name}
PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}"
$<TARGET_PROPERTY:ov_gna_func_tests,INCLUDE_DIRECTORIES>
$<TARGET_PROPERTY:ie_samples_utils,INTERFACE_INCLUDE_DIRECTORIES>
$<TARGET_PROPERTY:inference_engine_legacy,INTERFACE_INCLUDE_DIRECTORIES>
$<TARGET_PROPERTY:inference_engine_plugin_api,INTERFACE_INCLUDE_DIRECTORIES>

View File

@@ -6,7 +6,7 @@
// Created by user on 19.10.18.
//
#include "test_model_repo.hpp"
#include "helpers/test_model_repo.hpp"
#include "test_model_path.hpp"
#ifndef _WIN32
@@ -39,7 +39,7 @@ static std::string getDirname(std::string filePath) {
#endif
static std::string get_models_path() {
const char* models_path = TestDataHelpers::getModelPathNonFatal();
const char* models_path = TestDataHelpers::get_model_path_non_fatal();
if (nullptr == models_path) {
::testing::AssertionFailure() << "MODELS_PATH not defined";

View File

@@ -21,7 +21,7 @@
#include <ie_blob.h>
#include <ie_input_info.hpp>
#include "test_model_repo.hpp"
#include "helpers/test_model_repo.hpp"
#include "test_model_path.hpp"
#include <tests_file_utils.hpp>
#include <chrono>

View File

@@ -11,6 +11,8 @@ set(TARGET_NAME ov_gna_func_tests)
addIeTargetTest(
NAME ${TARGET_NAME}
ROOT ${CMAKE_CURRENT_SOURCE_DIR}
INCLUDES
${CMAKE_CURRENT_SOURCE_DIR}
DEPENDENCIES
openvino_intel_gna_plugin
LINK_LIBRARIES
@@ -19,3 +21,11 @@ addIeTargetTest(
LABELS
GNA
)
target_compile_definitions(${TARGET_NAME}
PUBLIC ${ARGV}
GNA_DATA_PATH=\"${CMAKE_CURRENT_SOURCE_DIR}/data\")
if (ENABLE_DATA)
add_dependencies(${TARGET_NAME} data)
endif()

View File

@@ -0,0 +1,228 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include <map>
#include <memory>
#include <vector>
#include "helpers/test_model_repo.hpp"
#include "ngraph_functions/builders.hpp"
#include "openvino/core/model.hpp"
#include "openvino/core/shape.hpp"
#include "openvino/core/type.hpp"
#include "openvino/opsets/opset10.hpp"
#include "shared_test_classes/base/layer_test_utils.hpp"
using namespace ov::opset10;
typedef std::tuple<ov::element::Type, // Network Precision
std::string, // Target Device
std::string, // Name Export Model
std::map<std::string, std::string>, // Export Configuration
std::map<std::string, std::string> // Import Configuration
>
exportImportNetworkParams;
class BackwardCompatibility : public testing::WithParamInterface<exportImportNetworkParams>,
virtual public LayerTestsUtils::LayerTestsCommon {
public:
static std::string get_test_case_name(testing::TestParamInfo<exportImportNetworkParams> obj) {
ov::element::Type input_prc;
std::string target_device;
std::map<std::string, std::string> conf_export;
std::map<std::string, std::string> conf_import;
std::string name_export_model;
std::tie(input_prc, target_device, name_export_model, conf_export, conf_import) = obj.param;
std::ostringstream result;
result << "input_prc=" << input_prc << "_";
result << "target_device=" << target_device << "_";
result << "name_export_model=" << name_export_model << "_";
for (auto const& conf_item : conf_export) {
result << "_exportConfigItem=" << conf_item.first << "_" << conf_item.second;
}
for (auto const& conf_item : conf_import) {
result << "_importConfigItem=" << conf_item.first << "_" << conf_item.second;
}
return result.str();
}
void Run() override {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
functionRefs = ngraph::clone_function(*function);
// load export configuration and save outputs
configuration.insert(m_conf_export.begin(), m_conf_export.end());
LoadNetwork();
GenerateInputs();
Infer();
auto outputs = GetOutputs();
auto output_refs = CalculateRefs();
Compare(output_refs, outputs);
for (auto const& config_item : m_conf_import) {
configuration[config_item.first] = config_item.second;
}
const auto compiled_network = executableNetwork;
auto model = TestDataHelpers::get_data_path() + "/exported_models/" + m_export_model_name;
const auto imported_network = core->ImportNetwork(model, targetDevice, configuration);
GenerateInputs();
Infer();
ASSERT_EQ(imported_network.GetInputsInfo().size(), compiled_network.GetInputsInfo().size());
ASSERT_EQ(imported_network.GetOutputsInfo().size(), compiled_network.GetOutputsInfo().size());
for (const auto& next_output : imported_network.GetOutputsInfo()) {
ASSERT_NO_THROW(compiled_network.GetOutputsInfo()[next_output.first]);
}
auto outputs_imported = GetOutputs();
ASSERT_EQ(outputs.size(), outputs_imported.size());
for (size_t i = 0; i < outputs.size(); i++) {
Compare(outputs[i], outputs_imported[i]);
}
}
protected:
void SetUp() override {
ov::element::Type prc = ov::element::undefined;
std::tie(prc, targetDevice, m_export_model_name, m_conf_export, m_conf_import) = this->GetParam();
ov::Shape input_shape{1, 80};
ov::Shape conv_shape{1, 2, 1, 40};
ov::Shape split_shape = {input_shape[0], 2 * input_shape[1]};
ov::ParameterVector inputs = {std::make_shared<Parameter>(prc, split_shape),
std::make_shared<Parameter>(prc, input_shape)};
// split layer to split inputs and transpose the part connected to convolution only
auto axis_const = std::make_shared<Constant>(ov::element::i32, ov::Shape{}, std::vector<int32_t>{1});
auto split = std::make_shared<Split>(inputs[0], axis_const, 2);
std::vector<int32_t> reshape_pattern{1, 2, 1, -1};
auto reshape_const =
std::make_shared<Constant>(ov::element::i32, ov::Shape{reshape_pattern.size()}, reshape_pattern);
auto split_0_reshape = std::make_shared<Reshape>(split->output(0), reshape_const, true);
auto split_1_reshape = std::make_shared<Reshape>(split->output(1), reshape_const, true);
auto input_1_reshape = std::make_shared<Reshape>(inputs[1], reshape_const, true);
auto add = std::make_shared<Add>(split_0_reshape, input_1_reshape);
auto relu_1 = std::make_shared<Relu>(add);
// Convolution to test nchw->nhwc
size_t num_out_channels = 8;
size_t kernel_size = 8;
std::vector<float> filter_weights =
CommonTestUtils::generate_float_numbers(num_out_channels * reshape_pattern[1] * kernel_size, -0.1f, 0.1f);
auto conv = ngraph::builder::makeConvolution(relu_1,
prc,
{1, kernel_size},
{1, 1},
{0, 0},
{0, 0},
{1, 1},
ngraph::op::PadType::VALID,
num_out_channels,
true,
filter_weights);
auto relu_2 = std::make_shared<Relu>(conv);
// Memory layers
ov::op::util::VariableInfo vi{};
vi.data_shape = ov::PartialShape(conv_shape);
vi.variable_id = "test_variable";
vi.data_type = prc;
const auto var = std::make_shared<ov::op::util::Variable>(vi);
std::vector<float> initial_state =
CommonTestUtils::generate_float_numbers(ov::shape_size(conv_shape), -3.f, 3.f);
auto initial_state_node = std::make_shared<Constant>(prc, conv_shape, initial_state);
auto read = std::make_shared<ReadValue>(initial_state_node, var);
auto mul = std::make_shared<Multiply>(split_1_reshape, read);
auto assign = std::make_shared<Assign>(mul, var);
auto relu_3 = std::make_shared<Relu>(mul);
ov::SinkVector sinks = {assign};
ov::ResultVector results;
results.emplace_back(std::make_shared<Result>(relu_2));
results.emplace_back(std::make_shared<Result>(relu_3));
function = std::make_shared<ov::Model>(results, sinks, inputs, "universal_export_model");
}
std::map<std::string, std::string> m_conf_export;
std::map<std::string, std::string> m_conf_import;
std::string m_export_model_name;
};
class BackwardCompatibilityLegacy : public BackwardCompatibility {
protected:
void SetUp() override {
ov::element::Type prc = ov::element::undefined;
std::tie(prc, targetDevice, m_export_model_name, m_conf_export, m_conf_import) = this->GetParam();
ov::Shape input_shape{1, 336};
auto param = std::make_shared<Parameter>(prc, input_shape);
auto const_eltwise = std::make_shared<Constant>(prc, input_shape, std::vector<float>{-1});
auto mul = std::make_shared<Multiply>(param, const_eltwise);
ov::ResultVector results{std::make_shared<Result>(mul)};
function = std::make_shared<ov::Model>(results, ov::ParameterVector{param}, "ExportBackwordCompatibility");
}
};
TEST_P(BackwardCompatibility, BackwardCompatibility) {
Run();
}
TEST_P(BackwardCompatibilityLegacy, BackwardCompatibility) {
Run();
}
const std::vector<ov::element::Type> input_precisions = {ov::element::f32, ov::element::f16};
const std::vector<std::map<std::string, std::string>> export_configs_legacy = {
{{"GNA_DEVICE_MODE", "GNA_SW_EXACT"}, {"GNA_SCALE_FACTOR_0", "327.67"}}};
const std::vector<std::map<std::string, std::string>> import_configs_legacy = {
{{"GNA_DEVICE_MODE", "GNA_SW_EXACT"}, {"GNA_SCALE_FACTOR_0", "327.67"}},
};
const std::vector<std::map<std::string, std::string>> export_configs = {
{{"GNA_DEVICE_MODE", "GNA_SW_EXACT"}, {"GNA_SCALE_FACTOR_0", "327.67"}, {"GNA_SCALE_FACTOR_1", "327.67"}}};
const std::vector<std::map<std::string, std::string>> import_configs = {
{{"GNA_DEVICE_MODE", "GNA_SW_EXACT"}, {"GNA_SCALE_FACTOR_0", "327.67"}, {"GNA_SCALE_FACTOR_1", "327.67"}}};
const std::vector<std::string> export_models_legacy = {"export2dot1.blob",
"export2dot2.blob",
"export2dot3.blob",
"export2dot4.blob",
"export2dot5.blob"};
const std::vector<std::string> export_models = {"export2dot6.blob", "export2dot7.blob", "export2dot8.blob"};
// Those tests should not be run in CI due to dependency on model blobs
INSTANTIATE_TEST_SUITE_P(OldVersion,
BackwardCompatibilityLegacy,
::testing::Combine(::testing::ValuesIn(input_precisions),
::testing::Values(CommonTestUtils::DEVICE_GNA),
::testing::ValuesIn(export_models_legacy),
::testing::ValuesIn(export_configs_legacy),
::testing::ValuesIn(import_configs_legacy)),
BackwardCompatibilityLegacy::get_test_case_name);
INSTANTIATE_TEST_SUITE_P(OldVersion,
BackwardCompatibility,
::testing::Combine(::testing::ValuesIn(input_precisions),
::testing::Values(CommonTestUtils::DEVICE_GNA),
::testing::ValuesIn(export_models),
::testing::ValuesIn(export_configs),
::testing::ValuesIn(import_configs)),
BackwardCompatibility::get_test_case_name);

View File

@@ -8,8 +8,8 @@ std::string get_model_repo() {
return "models:";
};
const char* TestDataHelpers::getModelPathNonFatal() noexcept {
return TestDataHelpers::getModelPathNonFatalDefault();
const char* TestDataHelpers::get_model_path_non_fatal() noexcept {
return TestDataHelpers::get_model_path_non_fatal_default();
}
std::string TestDataHelpers::get_data_path() {

View File

@@ -9,13 +9,13 @@ std::string get_model_repo();
namespace TestDataHelpers {
const char *getModelPathNonFatal() noexcept;
const char* get_model_path_non_fatal() noexcept;
std::string get_data_path();
inline const char *getModelPathNonFatalDefault() noexcept {
if (const auto envVar = std::getenv("MODELS_PATH")) {
return envVar;
inline const char* get_model_path_non_fatal_default() noexcept {
if (const auto env_var = std::getenv("MODELS_PATH")) {
return env_var;
}
#ifdef MODELS_PATH
@@ -26,12 +26,12 @@ inline const char *getModelPathNonFatalDefault() noexcept {
};
inline std::string get_data_path_default() {
if (const auto envVar = std::getenv("DATA_PATH")) {
return envVar;
if (const auto env_var = std::getenv("GNA_DATA_PATH")) {
return env_var;
}
#ifdef DATA_PATH
return DATA_PATH;
#ifdef GNA_DATA_PATH
return GNA_DATA_PATH;
#else
return nullptr;
#endif