Adding missed conversion logics between layout and string (#15103)

* added missed layout to string conversion

* added an unit test for layout_from_string

* added missing spaces around =

* replaced 95 with SCALAR

* moved layout_to_string into a new file

* changed to parametrized test
This commit is contained in:
Eddy Kim
2023-01-16 22:32:30 +09:00
committed by GitHub
parent 89e2c0e2fa
commit 14a7e443d0
6 changed files with 107 additions and 28 deletions

View File

@@ -120,6 +120,10 @@ inline std::ostream& operator<<(std::ostream& out, const Layout& p) {
PRINT_LAYOUT(NCDHW);
PRINT_LAYOUT(NDHWC);
PRINT_LAYOUT(OIHW);
PRINT_LAYOUT(GOIHW);
PRINT_LAYOUT(OIDHW);
PRINT_LAYOUT(GOIDHW);
PRINT_LAYOUT(SCALAR);
PRINT_LAYOUT(C);
PRINT_LAYOUT(CHW);
PRINT_LAYOUT(HWC);

View File

@@ -67,8 +67,7 @@ std::vector<std::string> disabledTestPatterns() {
// TODO: 57562 No dynamic output shape support
R"(.*NonZeroLayerTest.*)",
// Not expected behavior
R"(.*Behavior.*InferRequestIOBBlobSetLayoutTest.*layout=(95|OIHW).*)",
R"(.*Behavior.*InferRequestIOBBlobSetLayoutTest.*layout=(95|OIHW).*)",
R"(.*Behavior.*InferRequestIOBBlobSetLayoutTest.*layout=(SCALAR|OIHW).*)",
R"(.*Behavior.*InferRequestIOBBlobSetLayoutTest.*CanSetOutBlobWithDifferentLayouts.*layout=HW.*)",
R"(.*Behavior.*InferRequestIOBBlobSetLayoutTest.*CanSetInBlobWithDifferentLayouts.*layout=NHWC.*targetDevice=(AUTO|MULTI).*)",
R"(.*Behavior.*InferRequestIOBBlobSetLayoutTest.*CanSetOutBlobWithDifferentLayouts.*layout=CN.*targetDevice=(AUTO|MULTI).*)",

View File

@@ -0,0 +1,37 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <unordered_map>
#include "ie/ie_common.h"
namespace cldnn {
static InferenceEngine::Layout layout_from_string(const std::string& name) {
static const std::unordered_map<std::string, InferenceEngine::Layout> layouts = {
{ "ANY", InferenceEngine::Layout::ANY },
{ "NCHW", InferenceEngine::Layout::NCHW },
{ "NHWC", InferenceEngine::Layout::NHWC },
{ "NCDHW", InferenceEngine::Layout::NCDHW },
{ "NDHWC", InferenceEngine::Layout::NDHWC },
{ "OIHW", InferenceEngine::Layout::OIHW },
{ "GOIHW", InferenceEngine::Layout::GOIHW },
{ "OIDHW", InferenceEngine::Layout::OIDHW },
{ "GOIDHW", InferenceEngine::Layout::GOIDHW },
{ "SCALAR", InferenceEngine::Layout::SCALAR },
{ "C", InferenceEngine::Layout::C },
{ "CHW", InferenceEngine::Layout::CHW },
{ "HWC", InferenceEngine::Layout::HWC },
{ "HW", InferenceEngine::Layout::HW },
{ "NC", InferenceEngine::Layout::NC },
{ "CN", InferenceEngine::Layout::CN },
{ "BLOCKED", InferenceEngine::Layout::BLOCKED }
};
auto it = layouts.find(name);
if (it != layouts.end()) {
return it->second;
}
IE_THROW(NetworkNotRead) << "Unknown layout with name '" << name << "'";
}
} // namespace cldnn

View File

@@ -5,6 +5,7 @@
#include "ie_metric_helpers.hpp"
#include "intel_gpu/graph/serialization/binary_buffer.hpp"
#include "intel_gpu/graph/serialization/string_serializer.hpp"
#include "intel_gpu/graph/serialization/utils.hpp"
#include "intel_gpu/plugin/graph.hpp"
#include "intel_gpu/runtime/itt.hpp"
#include "intel_gpu/plugin/infer_request.hpp"
@@ -62,29 +63,6 @@ CompiledModel::CompiledModel(InferenceEngine::CNNNetwork &network,
}
}
static InferenceEngine::Layout layout_from_string(const std::string & name) {
static const std::unordered_map<std::string, InferenceEngine::Layout> layouts = {
{ "ANY", InferenceEngine::Layout::ANY },
{ "NCHW", InferenceEngine::Layout::NCHW },
{ "NHWC", InferenceEngine::Layout::NHWC },
{ "NCDHW", InferenceEngine::Layout::NCDHW },
{ "NDHWC", InferenceEngine::Layout::NDHWC },
{ "OIHW", InferenceEngine::Layout::OIHW },
{ "C", InferenceEngine::Layout::C },
{ "CHW", InferenceEngine::Layout::CHW },
{ "HWC", InferenceEngine::Layout::HWC },
{ "HW", InferenceEngine::Layout::HW },
{ "NC", InferenceEngine::Layout::NC },
{ "CN", InferenceEngine::Layout::CN },
{ "BLOCKED", InferenceEngine::Layout::BLOCKED }
};
auto it = layouts.find(name);
if (it != layouts.end()) {
return it->second;
}
IE_THROW(NetworkNotRead) << "Unknown layout with name '" << name << "'";
}
CompiledModel::CompiledModel(std::istream& networkModel, InferenceEngine::RemoteContext::Ptr context, const ExecutionConfig& config) :
InferenceEngine::ExecutableNetworkThreadSafeDefault{[&]() -> InferenceEngine::ITaskExecutor::Ptr {
if (config.get_property(ov::intel_gpu::exclusive_async_requests)) {
@@ -122,7 +100,7 @@ CompiledModel::CompiledModel(std::istream& networkModel, InferenceEngine::Remote
ib >> precision;
ib >> layout;
DataPtr input = std::make_shared<Data>(name, Precision::FromStr(precision), layout_from_string(layout));
DataPtr input = std::make_shared<Data>(name, Precision::FromStr(precision), cldnn::layout_from_string(layout));
InputInfo::Ptr infoNew = std::make_shared<InputInfo>();
infoNew->setInputData(input);
inputs.emplace(std::make_pair(name, infoNew));
@@ -141,7 +119,7 @@ CompiledModel::CompiledModel(std::istream& networkModel, InferenceEngine::Remote
ib >> precision;
ib >> layout;
DataPtr output = std::make_shared<Data>(name, Precision::FromStr(precision), layout_from_string(layout));
DataPtr output = std::make_shared<Data>(name, Precision::FromStr(precision), cldnn::layout_from_string(layout));
outputs.emplace(std::make_pair(name, output));
}

View File

@@ -0,0 +1,61 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "test_utils.h"
#include "intel_gpu/graph/serialization/binary_buffer.hpp"
#include "intel_gpu/graph/serialization/string_serializer.hpp"
#include "intel_gpu/graph/serialization/utils.hpp"
using namespace cldnn;
using namespace ::tests;
struct ie_layout_serialization_test : testing::TestWithParam<InferenceEngine::Layout> {
void run_test() {
InferenceEngine::Layout test_layout = GetParam();
membuf mem_buf;
{
std::ostream out_mem(&mem_buf);
BinaryOutputBuffer ob = BinaryOutputBuffer(out_mem);
std::stringstream ss;
ss << test_layout;
ob << ss.str();
}
{
std::istream in_mem(&mem_buf);
BinaryInputBuffer ib = BinaryInputBuffer(in_mem, get_test_engine());
std::string str_layout;
ib >> str_layout;
EXPECT_EQ(cldnn::layout_from_string(str_layout), test_layout);
}
}
};
TEST_P(ie_layout_serialization_test, basic) {
run_test();
}
INSTANTIATE_TEST_SUITE_P(
gpu_serialization,
ie_layout_serialization_test,
testing::Values(InferenceEngine::Layout::ANY,
InferenceEngine::Layout::NCHW,
InferenceEngine::Layout::NHWC,
InferenceEngine::Layout::NCDHW,
InferenceEngine::Layout::NDHWC,
InferenceEngine::Layout::OIHW,
InferenceEngine::Layout::GOIHW,
InferenceEngine::Layout::OIDHW,
InferenceEngine::Layout::GOIDHW,
InferenceEngine::Layout::SCALAR,
InferenceEngine::Layout::C,
InferenceEngine::Layout::CHW,
InferenceEngine::Layout::HWC,
InferenceEngine::Layout::HW,
InferenceEngine::Layout::NC,
InferenceEngine::Layout::CN,
InferenceEngine::Layout::BLOCKED)
);

View File

@@ -50,7 +50,7 @@ std::vector<std::string> disabledTestPatterns() {
// Not allowed dynamic loop tests on GPU
R"(.*smoke_StaticShapeLoop_dynamic_exit.*)",
// Not expected behavior
R"(.*Behavior.*InferRequestIOBBlobSetLayoutTest.*layout=(95|OIHW).*)",
R"(.*Behavior.*InferRequestIOBBlobSetLayoutTest.*layout=(SCALAR|OIHW).*)",
R"(.*Behavior.*InferRequestIOBBlobSetLayoutTest.*CanSetInBlobWithDifferentLayouts.*layout=NHWC.*)",
R"(.*Behavior.*InferRequestIOBBlobSetLayoutTest.*CanSetOutBlobWithDifferentLayouts.*layout=(CN|HW).*)",
R"(.*Behavior.*(Multi|Auto).*InferRequestSetBlobByType.*Batched.*)",