[GNA] Add missing metrics to GNAExecutableNetwork (#14680)
* Added NETWORK_NAME, SUPPORTED_CONFIG_KEYS and SUPPORTED_METRICS metrics to GNAExecutableNetwork * Added unit tests for GNAExecutableNetwork metrics * Moved model name from GNAExecutableNetwork to GNAPlugin
This commit is contained in:
parent
d75d5f99ec
commit
974af13788
@ -1,4 +1,4 @@
|
||||
// Copyright (C) 2018-2022 Intel Corporation
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
@ -647,6 +647,7 @@ void GNAPlugin::AddDebugProperties(const InferenceEngine::CNNLayerPtr layer,
|
||||
|
||||
void GNAPlugin::LoadNetwork(const CNNNetwork& _network) {
|
||||
OV_ITT_SCOPED_TASK(itt::domains::GNAPlugin, "LoadNetwork");
|
||||
_network_name = _network.getName();
|
||||
std::shared_ptr<InferenceEngine::details::CNNNetworkImpl> convertedNetwork;
|
||||
|
||||
const auto effectiveGnaCompileTargetValue = effectiveGnaCompileTarget();
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright (C) 2018-2022 Intel Corporation
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
@ -67,6 +67,8 @@ protected:
|
||||
InferenceEngine::InputsDataMap inputs_data_map_; //!< Holds information about network inputs info
|
||||
InferenceEngine::OutputsDataMap outputs_data_map_; //!< Holds information about network outputs data
|
||||
|
||||
std::string _network_name;
|
||||
|
||||
std::vector<InferenceEngine::IVariableStateInternal::Ptr> memoryStates;
|
||||
bool trivialTopology = false;
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright (C) 2018-2022 Intel Corporation
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
@ -59,6 +59,8 @@ Parameter GNAPlugin::GetMetric(const std::string& name, const std::map<std::stri
|
||||
return GNADeviceHelper::GetGnaLibraryVersion();
|
||||
} else if (ov::execution_devices == name) {
|
||||
return decltype(ov::execution_devices)::value_type {GetName()};
|
||||
} else if (ov::model_name == name) {
|
||||
return _network_name;
|
||||
} else {
|
||||
const std::unordered_map<std::string, std::function<Parameter()>> queryApiSupported = {
|
||||
{METRIC_KEY(AVAILABLE_DEVICES), [this]() {return GetAvailableDevices();}},
|
||||
|
@ -0,0 +1,69 @@
|
||||
// Copyright (C) 2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include "any_copy.hpp"
|
||||
#include <ie_system_conf.h>
|
||||
#include "ngraph_functions/builders.hpp"
|
||||
|
||||
#include "gna_executable_network.hpp"
|
||||
#include "gna_plugin.hpp"
|
||||
#include "memory/gna_memory.hpp"
|
||||
|
||||
using namespace ov::intel_gna;
|
||||
using namespace InferenceEngine;
|
||||
|
||||
|
||||
class GNAPluginForNetworkMetricsTest : public GNAPlugin {
|
||||
public:
|
||||
GNAPluginForNetworkMetricsTest(const std::map<std::string, std::string>& configMap) : GNAPlugin(configMap) {
|
||||
gnamem.reset(new gna_memory_float(memory::GNAFloatAllocator{}));
|
||||
graphCompiler.setGNAMemoryPtr(gnamem);
|
||||
gnadevice.reset();
|
||||
}
|
||||
};
|
||||
|
||||
class GnaExecutableNetworkMetricsTest : public ::testing::Test {
|
||||
public:
|
||||
void Run(const std::string& metricName, const std::string& expectedResult) {
|
||||
ov::AnyMap gnaConfig = {
|
||||
ov::intel_gna::execution_mode(ov::intel_gna::ExecutionMode::SW_EXACT),
|
||||
};
|
||||
auto plugin = std::make_shared<GNAPluginForNetworkMetricsTest>(ov::any_copy(gnaConfig));
|
||||
auto function = getFunction();
|
||||
CNNNetwork cnnNetwork(function);
|
||||
GNAExecutableNetwork gnaNetwork = GNAExecutableNetwork(cnnNetwork, plugin);
|
||||
std::string result = gnaNetwork.GetMetric(metricName);
|
||||
ASSERT_EQ(result, expectedResult);
|
||||
}
|
||||
|
||||
protected:
|
||||
std::shared_ptr<ov::Model> getFunction() {
|
||||
auto firstInput = std::make_shared<ngraph::opset8::Parameter>(net_precision, shape);
|
||||
auto secondInput = std::make_shared<ngraph::opset8::Constant>(net_precision, shape);
|
||||
auto matmul = std::make_shared<ngraph::opset8::MatMul>(firstInput, secondInput, false, true);
|
||||
auto result = std::make_shared<ngraph::opset8::Result>(matmul);
|
||||
auto function =
|
||||
std::make_shared<ov::Model>(ov::ResultVector({result}), ov::ParameterVector({firstInput}), "MatMul");
|
||||
return function;
|
||||
}
|
||||
const ngraph::element::Type net_precision = ngraph::element::f32;
|
||||
const ngraph::Shape shape = {1, 10};
|
||||
};
|
||||
|
||||
|
||||
TEST_F(GnaExecutableNetworkMetricsTest, TestNetworkName) {
|
||||
Run(ov::model_name.name(), "MatMul");
|
||||
}
|
||||
|
||||
TEST_F(GnaExecutableNetworkMetricsTest, TestSupportedProperties) {
|
||||
std::string supportedProperties =
|
||||
"SUPPORTED_PROPERTIES AVAILABLE_DEVICES OPTIMAL_NUMBER_OF_INFER_REQUESTS RANGE_FOR_ASYNC_INFER_REQUESTS "
|
||||
"OPTIMIZATION_CAPABILITIES FULL_DEVICE_NAME GNA_LIBRARY_FULL_VERSION GNA_SCALE_FACTOR_PER_INPUT "
|
||||
"GNA_FIRMWARE_MODEL_IMAGE GNA_DEVICE_MODE GNA_HW_EXECUTION_TARGET GNA_HW_COMPILE_TARGET "
|
||||
"GNA_PWL_DESIGN_ALGORITHM GNA_PWL_MAX_ERROR_PERCENT PERFORMANCE_HINT INFERENCE_PRECISION_HINT "
|
||||
"PERFORMANCE_HINT_NUM_REQUESTS LOG_LEVEL EXECUTION_DEVICES";
|
||||
Run(ov::supported_properties.name(), supportedProperties);
|
||||
}
|
Loading…
Reference in New Issue
Block a user