Commits from PR11438 (#12337)

Draft version of test

Added i8 test

Some fixes

Fix static build issue
This commit is contained in:
Nadezhda Ageeva 2022-08-22 16:34:24 +04:00 committed by GitHub
parent df538196df
commit 7106f4b626
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 231 additions and 139 deletions

View File

@ -6,7 +6,7 @@ set(TARGET_NAME gnaUnitTests)
# TODO: fix CVS-71010 and remove BUILD_SHARED_LIBS
if(NOT BUILD_SHARED_LIBS)
set(exclude_path EXCLUDED_SOURCE_PATHS "${CMAKE_CURRENT_SOURCE_DIR}/(gna_api_stub|gna_wait_test).cpp")
set(exclude_path EXCLUDED_SOURCE_PATHS "${CMAKE_CURRENT_SOURCE_DIR}/(gna_api_stub|gna_wait_test|gna_export_import_test).cpp")
endif()
addIeTargetTest(

View File

@ -28,12 +28,12 @@ GNA2_API enum Gna2Status Gna2MemoryAlloc(
uint32_t sizeRequested,
uint32_t *sizeGranted,
void **memoryAddress) {
if (current != nullptr) {
return current->Gna2MemoryAlloc(sizeRequested, sizeGranted, memoryAddress);
}
if (sizeGranted != nullptr) {
*sizeGranted = sizeRequested;
}
if (current != nullptr) {
return current->Gna2MemoryAlloc(sizeRequested, sizeGranted, memoryAddress);
}
*memoryAddress = reinterpret_cast<void*>(1);
return Gna2StatusSuccess;
}
@ -50,8 +50,12 @@ GNA2_API enum Gna2Status Gna2DeviceCreateForExport(
*deviceIndex = 1;
return Gna2StatusSuccess;
}
GNA2_API enum Gna2Status Gna2DeviceOpen(
uint32_t deviceIndex) {
if (current != nullptr) {
return current->Gna2DeviceOpen(deviceIndex);
}
return Gna2StatusSuccess;
}
@ -88,6 +92,9 @@ GNA2_API enum Gna2Status Gna2ModelCreate(
uint32_t deviceIndex,
struct Gna2Model const * model,
uint32_t * modelId) {
if (current != nullptr) {
return current->Gna2ModelCreate(deviceIndex, model, modelId);
}
return Gna2StatusSuccess;
}
@ -103,6 +110,9 @@ GNA2_API enum Gna2Status Gna2ModelGetLastError(struct Gna2ModelError* error) {
GNA2_API enum Gna2Status Gna2RequestConfigCreate(
uint32_t modelId,
uint32_t * requestConfigId) {
if (current != nullptr) {
return current->Gna2RequestConfigCreate(modelId, requestConfigId);
}
return Gna2StatusSuccess;
}
@ -170,6 +180,9 @@ GNA2_API enum Gna2Status Gna2ModelExport(
GNA2_API enum Gna2Status Gna2DeviceGetVersion(
uint32_t deviceIndex,
enum Gna2DeviceVersion * deviceVersion) {
if (current != nullptr) {
return current->Gna2DeviceGetVersion(deviceIndex, deviceVersion);
}
*deviceVersion = Gna2DeviceVersionSoftwareEmulation;
return Gna2StatusSuccess;
}
@ -179,18 +192,27 @@ GNA2_API enum Gna2Status Gna2InstrumentationConfigCreate(
enum Gna2InstrumentationPoint* selectedInstrumentationPoints,
uint64_t * results,
uint32_t * instrumentationConfigId) {
if (current != nullptr) {
return current->Gna2InstrumentationConfigCreate(numberOfInstrumentationPoints, selectedInstrumentationPoints, results, instrumentationConfigId);
}
return Gna2StatusSuccess;
}
GNA2_API enum Gna2Status Gna2InstrumentationConfigAssignToRequestConfig(
uint32_t instrumentationConfigId,
uint32_t requestConfigId) {
if (current != nullptr) {
return current->Gna2InstrumentationConfigAssignToRequestConfig(instrumentationConfigId, requestConfigId);
}
return Gna2StatusSuccess;
}
GNA2_API enum Gna2Status Gna2GetLibraryVersion(
char* versionBuffer,
uint32_t versionBufferSize) {
if (current != nullptr) {
return current->Gna2GetLibraryVersion(versionBuffer, versionBufferSize);
}
if (versionBuffer != nullptr && versionBufferSize > 0) {
versionBuffer[0] = '\0';
return Gna2StatusSuccess;

View File

@ -0,0 +1,172 @@
// Copyright (C) 2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include <vector>
#include <map>
#include "common_test_utils/ngraph_test_utils.hpp"
#include "gna_mock_api.hpp"
#include "gna_plugin.hpp"
#include "ngraph_functions/builders.hpp"
#include "common_test_utils/data_utils.hpp"
using namespace ::testing;
using GNAPluginNS::GNAPlugin;
using namespace InferenceEngine;
class GNAExportImportTest : public ::testing::Test {
public:
void ExportModel(std::string exportedModelFileName) {
auto function = getFunction();
auto weights = make_shared_blob<uint8_t>({ Precision::U8, {1, 10}, Layout::NC });
weights->allocate();
fillWeights(weights);
CNNNetwork cnnNetwork = CNNNetwork{function};
GNACppApi mockApi;
std::vector<std::vector<uint8_t>> data;
ExpectEnqueueCalls(&mockApi, data);
GNAPlugin plugin(gna_config);
plugin.LoadNetwork(cnnNetwork);
plugin.Export(exportedModelFileName);
}
void ImportModel(std::string modelPath) {
GNACppApi mockApi;
std::vector<std::vector<uint8_t>> data;
ExpectEnqueueCalls(&mockApi, data);
GNAPlugin plugin(gna_config);
std::fstream inputStream(modelPath, std::ios_base::in | std::ios_base::binary);
if (inputStream.fail()) {
THROW_GNA_EXCEPTION << "Cannot open file to import model: " << modelPath;
}
auto sp = plugin.ImportNetwork(inputStream);
auto inputsInfo = plugin.GetNetworkInputs();
auto outputsInfo = plugin.GetNetworkOutputs();
BlobMap input, output;
AllocateInput(input, &plugin);
AllocateOutput(output, &plugin);
plugin.Infer(input, output);
}
protected:
void AllocateInput(BlobMap& input, GNAPlugin *plugin) {
auto inputsInfo = plugin->GetNetworkInputs();
for (auto&& info : inputsInfo) {
auto& inputBlob = input[info.first];
inputBlob = make_blob_with_precision({ Precision::FP32, info.second->getTensorDesc().getDims(),
info.second->getLayout() });
inputBlob->allocate();
}
}
void AllocateOutput(BlobMap& output, GNAPlugin *plugin) {
auto outputsInfo = plugin->GetNetworkOutputs();
for (auto&& out : outputsInfo) {
auto& outputBlob = output[out.first];
auto dims = out.second->getDims();
auto outsize = details::product(std::begin(dims), std::end(dims));
outputBlob.reset(new TBlob<float>({ Precision::FP32, {1, outsize}, Layout::NC }));
outputBlob->allocate();
}
}
void fillWeights(InferenceEngine::Blob::Ptr weights, std::vector<float> pattern = {(1.0F)}) {
float * p = weights->buffer().as<float *>();
float * pEnd = p + weights->byteSize() / sizeof(float);
for (; p != pEnd ;) {
for (int i = 0; i != (weights->byteSize() / sizeof(float) / 3) + 1; i++) {
for (int j = 0; j != pattern.size() && p != pEnd; j++, p++) {
*p = pattern[j];
}
}
}
}
std::shared_ptr<ngraph::Function> getFunction() {
auto ngPrc = ngraph::element::f32;
size_t shape = 10;
auto params = ngraph::builder::makeParams(ngPrc, {{1, shape}});
auto mul_const = ngraph::builder::makeConstant<float>(ngPrc, { shape, shape },
CommonTestUtils::generate_float_numbers(shape * shape, -0.5f, 0.5f), false);
auto matmul = std::make_shared<ngraph::op::MatMul>(params[0], mul_const, false, true);
auto res = std::make_shared<ngraph::op::Result>(matmul);
auto function = std::make_shared<ngraph::Function>(res, params, "MatMul");
return function;
}
void ExpectEnqueueCalls(GNACppApi *mockApi, std::vector<std::vector<uint8_t>>& data) {
EXPECT_CALL(*mockApi, Gna2MemoryAlloc(_, _, _)).Times(AtLeast(1)).WillRepeatedly(Invoke([&data](
uint32_t sizeRequested,
uint32_t *sizeGranted,
void **memoryAddress) {
data.push_back(std::vector<uint8_t>(sizeRequested));
*sizeGranted = sizeRequested;
*memoryAddress = data.back().data();
return Gna2StatusSuccess;
}));
EXPECT_CALL(*mockApi, Gna2DeviceGetVersion(_, _)).WillOnce(Invoke([](
uint32_t deviceIndex,
enum Gna2DeviceVersion * deviceVersion) {
*deviceVersion = Gna2DeviceVersionSoftwareEmulation;
return Gna2StatusSuccess;
}));
EXPECT_CALL(*mockApi, Gna2DeviceOpen(_)).WillOnce(Return(Gna2StatusSuccess));
EXPECT_CALL(*mockApi, Gna2GetLibraryVersion(_, _)).Times(AtLeast(0)).WillRepeatedly(Return(Gna2StatusSuccess));
EXPECT_CALL(*mockApi, Gna2InstrumentationConfigCreate(_, _, _, _)).WillOnce(Return(Gna2StatusSuccess));
EXPECT_CALL(*mockApi, Gna2ModelCreate(_, _, _)).WillOnce(Invoke([](
uint32_t deviceIndex,
struct Gna2Model const * model,
uint32_t * modelId) {
*modelId = 0;
return Gna2StatusSuccess;
}));
EXPECT_CALL(*mockApi, Gna2RequestConfigCreate(_, _)).WillOnce(Invoke([](
uint32_t modelId,
uint32_t * requestConfigId) {
*requestConfigId = 0;
return Gna2StatusSuccess;
}));
EXPECT_CALL(*mockApi, Gna2InstrumentationConfigAssignToRequestConfig(_, _)).Times(AtLeast(1)).WillRepeatedly(Return(Gna2StatusSuccess));
}
void TearDown() override {
std::remove(exported_file_name.c_str());
}
std::map<std::string, std::string> gna_config;
std::string exported_file_name;
};
TEST_F(GNAExportImportTest, ExportImportI16) {
ov::AnyMap gna_config = {
ov::intel_gna::execution_mode(ov::intel_gna::ExecutionMode::SW_EXACT),
ov::hint::inference_precision(ngraph::element::i16)
};
exported_file_name = "export_test.bin";
ExportModel(exported_file_name);
ImportModel(exported_file_name);
}
TEST_F(GNAExportImportTest, ExportImportI8) {
ov::AnyMap gna_config = {
ov::intel_gna::execution_mode(ov::intel_gna::ExecutionMode::SW_EXACT),
ov::hint::inference_precision(ngraph::element::i8)
};
exported_file_name = "export_test.bin";
ExportModel(exported_file_name);
ImportModel(exported_file_name);
}

View File

@ -4,6 +4,8 @@
#pragma once
#include <gmock/gmock-generated-function-mockers.h>
#include <gna2-instrumentation-api.h>
#include <gna2-model-export-api.h>
#if defined(_WIN32)
#ifdef libGNAStubs_EXPORTS
@ -24,7 +26,38 @@ public:
uint32_t sizeRequested,
uint32_t * sizeGranted,
void ** memoryAddress));
MOCK_METHOD1(Gna2DeviceOpen, Gna2Status(
uint32_t deviceIndex));
MOCK_METHOD3(Gna2ModelCreate, Gna2Status(
uint32_t deviceIndex,
struct Gna2Model const * model,
uint32_t * modelId));
MOCK_METHOD2(Gna2RequestConfigCreate, Gna2Status(
uint32_t modelId,
uint32_t * requestConfigId));
MOCK_METHOD2(Gna2RequestWait, Gna2Status(
uint32_t requestId,
uint32_t timeoutMilliseconds));
MOCK_METHOD2(Gna2DeviceGetVersion, Gna2Status(
uint32_t deviceIndex,
enum Gna2DeviceVersion * deviceVersion));
MOCK_METHOD4(Gna2InstrumentationConfigCreate, Gna2Status(
uint32_t numberOfInstrumentationPoints,
enum Gna2InstrumentationPoint* selectedInstrumentationPoints,
uint64_t * results,
uint32_t * instrumentationConfigId));
MOCK_METHOD2(Gna2InstrumentationConfigAssignToRequestConfig, Gna2Status(
uint32_t instrumentationConfigId,
uint32_t requestConfigId));
MOCK_METHOD2(Gna2GetLibraryVersion, Gna2Status(
char* versionBuffer,
uint32_t versionBufferSize));
};

View File

@ -1,135 +0,0 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include <thread>
#include <chrono>
#include <gtest/gtest.h>
#include <legacy/layer_transform.hpp>
#include "gna_matcher.hpp"
using namespace InferenceEngine;
using namespace GNAPluginNS;
using namespace GNATestIRs;
class GNAAOTTests : public GNATest<>{
protected:
std::list<std::string> files_to_remove;
std::string registerFileForRemove(std::string file_to_remove) {
files_to_remove.push_back(file_to_remove);
return file_to_remove;
}
std::string generateFileName(const std::string& baseName) const {
using namespace std::chrono;
std::stringstream ss;
auto ts = duration_cast<microseconds>(high_resolution_clock::now().time_since_epoch());
ss << std::this_thread::get_id() << "_" << ts.count() << "_" << baseName;
return ss.str();
}
void TearDown() override {
for (auto & file : files_to_remove) {
std::remove(file.c_str());
}
}
void SetUp() override {
}
};
TEST_F(GNAAOTTests, DISABLED_AffineWith2AffineOutputs_canbe_export_imported) {
const std::string X = registerFileForRemove(generateFileName("unit_tests.bin"));
// running export to a file
export_network(AffineWith2AffineOutputsModel())
.inNotCompactMode().withGNAConfig(GNA_CONFIG_KEY(SCALE_FACTOR), 1.0f).as().gna().model().to(X);
// running infer using imported model instead of IR
assert_that().onInferModel().importedFrom(X)
.inNotCompactMode().gna().propagate_forward().called().once();
}
TEST_F(GNAAOTTests, DISABLED_AffineWith2AffineOutputs_canbe_imported_verify_structure) {
auto & nnet_type = storage<gna_nnet_type_t>();
// saving pointer to nnet - todo probably deep copy required
save_args().onInferModel(AffineWith2AffineOutputsModel())
.inNotCompactMode().withGNAConfig(GNA_CONFIG_KEY(SCALE_FACTOR), 1.0f).from().gna().propagate_forward().to(&nnet_type);
const std::string X = registerFileForRemove(generateFileName("unit_tests.bin"));
// running export to a file
export_network(AffineWith2AffineOutputsModel())
.inNotCompactMode().withGNAConfig(GNA_CONFIG_KEY(SCALE_FACTOR), 1.0f).as().gna().model().to(X);
// running infer using imported model instead of IR
assert_that().onInferModel().importedFrom(X)
.inNotCompactMode().withGNAConfig(GNA_CONFIG_KEY(SCALE_FACTOR), 1.0f).gna()
.propagate_forward().called_with().exact_nnet_structure(&nnet_type);
}
TEST_F(GNAAOTTests, DISABLED_TwoInputsModel_canbe_export_imported) {
const std::string X = registerFileForRemove(generateFileName("unit_tests.bin"));
// running export to a file
export_network(TwoInputsModelForIO())
.inNotCompactMode()
.withGNAConfig(GNA_CONFIG_KEY(SCALE_FACTOR) + std::string("_0"), 1.0f)
.withGNAConfig(GNA_CONFIG_KEY(SCALE_FACTOR) + std::string("_1"), 1.0f)
.as().gna().model().to(X);
// running infer using imported model instead of IR
assert_that().onInferModel().importedFrom(X)
.inNotCompactMode().gna().propagate_forward().called().once();
}
TEST_F(GNAAOTTests, DISABLED_PermuteModel_canbe_export_imported) {
const std::string X = registerFileForRemove(generateFileName("unit_tests.bin"));
// running export to a file
export_network(PermuteModelForIO())
.inNotCompactMode().withGNAConfig(GNA_CONFIG_KEY(SCALE_FACTOR), 1.0f).as().gna().model().to(X);
// running infer using imported model instead of IR
assert_that().onInferModel().importedFrom(X)
.inNotCompactMode().gna().propagate_forward().called().once();
}
TEST_F(GNAAOTTests, DISABLED_PoolingModel_canbe_export_imported) {
const std::string X = registerFileForRemove(generateFileName("unit_tests.bin"));
// running export to a file
export_network(maxpoolAfterRelu())
.inNotCompactMode().withGNAConfig(GNA_CONFIG_KEY(SCALE_FACTOR), 1.0f).as().gna().model().to(X);
// running infer using imported model instead of IR
assert_that().onInferModel().importedFrom(X)
.inNotCompactMode().gna().propagate_forward().called().once();
}
TEST_F(GNAAOTTests, DISABLED_CanConvertFromAOTtoSueModel) {
auto & nnet_type = storage<gna_nnet_type_t>();
// saving pointer to nnet - todo probably deep copy required
save_args().onInferModel(AffineWith2AffineOutputsModel())
.inNotCompactMode().inNotCompactMode().withGNAConfig(GNA_CONFIG_KEY(SCALE_FACTOR), 1.0f)
.from().gna().propagate_forward().to(&nnet_type);
const std::string X = registerFileForRemove(generateFileName("unit_tests.bin"));
// running export to a file
export_network(AffineWith2AffineOutputsModel())
.inNotCompactMode().inNotCompactMode().withGNAConfig(GNA_CONFIG_KEY(SCALE_FACTOR), 1.0f)
.as().gna().model().to(X);
// running infer using imported model instead of IR
assert_that().onInferModel().importedFrom(X)
.inNotCompactMode().withGNAConfig(GNA_CONFIG_KEY(SCALE_FACTOR), 1.0f).withGNAConfig(GNA_CONFIG_KEY(FIRMWARE_MODEL_IMAGE), "sue.dump")
.gna().dumpXNN().called();
}