Remove ov::core dependency after CPU CompiledModel constructed (#17156)

* fx repr

* remove isNewApi

* add cisLegacyApi

* remove isNewApi

* fix logic

* add test case

* remove comment

* fix bug

* add configuration

* skip in template

* skip failed case

* skip AUTO and MULTI to test

* resolve conflicts

* fix confilcts

* skip AUTO and MULTI to test

* template on

* Revert "template on"

This reverts commit d82a5a4964.

* get log

* add tmp core

* reset to test

* Revert "reset to test"

This reverts commit 7a39d4e24b.

* try to push

* use createCoreWithTemplate()

* remove AUTO and MULTI in skip_config

* Revert "remove AUTO and MULTI in skip_config"

This reverts commit d42420590f.

* fix confilcts

* fix conflicts

* remove AUTO and MULTI in skip config

* fix confilcts

* remove isLegacyAPI

* remove useless code

---------

Co-authored-by: Chen Peter <peter.chen@intel.com>
This commit is contained in:
Haiqi Pan
2023-06-15 05:50:24 +08:00
committed by GitHub
parent 79683c24ca
commit 3e63ab0dc3
6 changed files with 88 additions and 18 deletions

View File

@@ -82,7 +82,7 @@ struct Config {
std::map<std::string, std::string> _config;
bool isNewApi = true;
bool isLegacyApi = false;
#ifdef CPU_DEBUG_CAPS
DebugCapsConfig debugCaps;

View File

@@ -79,8 +79,12 @@ ExecNetwork::ExecNetwork(const InferenceEngine::CNNNetwork &network,
}
bool isFloatModel = !ov::op::util::has_op_with_type<ngraph::op::FakeQuantize>(function);
_cfg.isNewApi = !isLegacyAPI();
_mutex = std::make_shared<std::mutex>();
const auto& core = _plugin->GetCore();
if (!core)
IE_THROW() << "Unable to get API version. Core is unavailable";
_cfg.isLegacyApi = !core->isNewAPI();
if (cfg.exclusiveAsyncRequests) {
// special case when all InferRequests are muxed into a single queue
@@ -209,14 +213,6 @@ std::shared_ptr<ngraph::Function> ExecNetwork::GetExecGraphInfo() {
return GetGraph()._graph.dump();
}
bool ExecNetwork::isLegacyAPI() const {
const auto& core = _plugin->GetCore();
if (!core)
IE_THROW() << "Unable to get API version. Core is unavailable";
return !core->isNewAPI();
}
Parameter ExecNetwork::GetConfigLegacy(const std::string &name) const {
if (_graphs.empty())
IE_THROW() << "No graph was found";
@@ -279,7 +275,7 @@ InferenceEngine::Parameter ExecNetwork::GetMetric(const std::string &name) const
const auto& graph = graphLock._graph;
const auto& config = graph.getConfig();
if (isLegacyAPI()) {
if (_cfg.isLegacyApi) {
return GetMetricLegacy(name, graph);
}

View File

@@ -76,8 +76,6 @@ protected:
*/
GraphGuard::Lock GetGraph() const;
bool isLegacyAPI() const;
InferenceEngine::Parameter GetConfigLegacy(const std::string &name) const;
InferenceEngine::Parameter GetMetricLegacy(const std::string &name, const GraphGuard& graph) const;

View File

@@ -961,9 +961,9 @@ void Graph::PullOutputData(BlobMap &out) {
auto srcPrec = actualDesc.getPrecision();
auto dstPrec = expectedDesc.getPrecision();
if (getConfig().isNewApi && srcPrec == dstPrec && ext_blob->byteSize() != intr_blob.GetSize())
IE_THROW() << "Output blob byte size is not equal network output byte size ("
<< ext_blob->byteSize() << "!=" << intr_blob.GetSize() << ").";
if (!getConfig().isLegacyApi && srcPrec == dstPrec && ext_blob->byteSize() != intr_blob.GetSize())
IE_THROW() << "Output blob byte size is not equal network output byte size (" << ext_blob->byteSize()
<< "!=" << intr_blob.GetSize() << ").";
void *ext_blob_ptr = ext_blob->buffer();
void *intr_blob_ptr = intr_blob.GetData();

View File

@@ -139,4 +139,4 @@ std::vector<std::string> disabledTestPatterns() {
R"(.*ReferenceConversionLayerTest.CompareWithHardcodedRefs/conversionType=(Convert|ConvertLike)_shape=.*_iType=(f16|f32|bf16)_oType=u4.*)");
#endif
return retVector;
}
}

View File

@@ -157,7 +157,83 @@ TEST_P(OVCompiledModelBaseTest, canCompileModelFromMemory) {
</edges>
</net>
)V0G0N";
EXPECT_NO_THROW(auto execNet = core->compile_model(model, ov::Tensor(), target_device, configuration));
EXPECT_NO_THROW(auto execNet = core ->compile_model(model, ov::Tensor(), target_device, configuration));
}
TEST_P(OVCompiledModelBaseTest, canCompileModelwithBrace) {
std::string model = R"V0G0N(
<net name="Network" version="10">
<layers>
<layer name="in1" type="Parameter" id="0" version="opset8">
<data element_type="f16" shape="1,3,22,22"/>
<output>
<port id="0" precision="FP16" names="data1">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</output>
</layer>
<layer name="in2" type="Parameter" id="1" version="opset8">
<data element_type="f16" shape="1,3,22,22"/>
<output>
<port id="0" precision="FP16" names="data2">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</output>
</layer>
<layer name="concat" id="2" type="Concat" version="opset8">
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
<port id="1" precision="FP16">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</input>
<output>
<port id="2" precision="FP16" names="r">
<dim>1</dim>
<dim>6</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</output>
</layer>
<layer name="output" type="Result" id="3" version="opset8">
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>6</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="2" to-port="0"/>
<edge from-layer="1" from-port="0" to-layer="2" to-port="1"/>
<edge from-layer="2" from-port="2" to-layer="3" to-port="0"/>
</edges>
</net>
)V0G0N";
ov::CompiledModel compiled_model;
{
ov::Core tmp_core = createCoreWithTemplate();
compiled_model = tmp_core.compile_model(model, ov::Tensor(), target_device, configuration);
}
EXPECT_NO_THROW(compiled_model.get_property(ov::optimal_number_of_infer_requests));
}
TEST(OVCompiledModelBaseTest, canCompileModelToDefaultDevice) {