Merge remote-tracking branch 'github/master' into auto-batch-master
This commit is contained in:
commit
e3d7a2c5a9
@ -88,7 +88,7 @@ jobs:
|
|||||||
rm -rf $(BUILD_SAMPLES_DIR) ; mkdir $(BUILD_SAMPLES_DIR)
|
rm -rf $(BUILD_SAMPLES_DIR) ; mkdir $(BUILD_SAMPLES_DIR)
|
||||||
sudo rm -rf $(TMP_DIR) ; sudo mkdir $(TMP_DIR) ; sudo chmod 777 -R $(TMP_DIR)
|
sudo rm -rf $(TMP_DIR) ; sudo mkdir $(TMP_DIR) ; sudo chmod 777 -R $(TMP_DIR)
|
||||||
sudo mkdir -p $(SHARE_DIR)
|
sudo mkdir -p $(SHARE_DIR)
|
||||||
sudo apt --assume-yes install nfs-common
|
sudo apt --assume-yes update && sudo apt --assume-yes install nfs-common
|
||||||
sudo mount -vvv -t nfs cinfsshare.file.core.windows.net:/cinfsshare/onnxtestdata $(SHARE_DIR) -o vers=4,minorversion=1,sec=sys
|
sudo mount -vvv -t nfs cinfsshare.file.core.windows.net:/cinfsshare/onnxtestdata $(SHARE_DIR) -o vers=4,minorversion=1,sec=sys
|
||||||
mkdir -p $(CCACHE_DIR)
|
mkdir -p $(CCACHE_DIR)
|
||||||
displayName: 'Make dir'
|
displayName: 'Make dir'
|
||||||
|
@ -306,7 +306,7 @@ In more advanced cases, when replaced operation has several outputs and we add a
|
|||||||
|
|
||||||
###4. Runtime Info
|
###4. Runtime Info
|
||||||
|
|
||||||
Runtime info is a map `std::map<std::string, std::shared_ptr<Variant>>` located inside `ngraph::Node` class. It represents additional attributes in `ngraph::Node`.
|
Runtime info is a map `std::map<std::string, ov::Any>` located inside `ngraph::Node` class. It represents additional attributes in `ngraph::Node`.
|
||||||
These attributes can be set by users or by plugins and when executing transformation that changes `ngraph::Function` we need to preserve these attributes as they will not be automatically propagated.
|
These attributes can be set by users or by plugins and when executing transformation that changes `ngraph::Function` we need to preserve these attributes as they will not be automatically propagated.
|
||||||
In most cases, transformations have the following types: 1:1 (replace node with another node), 1:N (replace node with a sub-graph), N:1 (fuse sub-graph into a single node), N:M (any other transformation).
|
In most cases, transformations have the following types: 1:1 (replace node with another node), 1:N (replace node with a sub-graph), N:1 (fuse sub-graph into a single node), N:M (any other transformation).
|
||||||
Currently, there is no mechanism that automatically detects transformation types, so we need to propagate this runtime information manually. See the examples below.
|
Currently, there is no mechanism that automatically detects transformation types, so we need to propagate this runtime information manually. See the examples below.
|
||||||
|
@ -14,7 +14,7 @@ using namespace InferenceEngine;
|
|||||||
auto it = std::find(keys.begin(), keys.end(), METRIC_KEY(IMPORT_EXPORT_SUPPORT));
|
auto it = std::find(keys.begin(), keys.end(), METRIC_KEY(IMPORT_EXPORT_SUPPORT));
|
||||||
|
|
||||||
// If metric 'IMPORT_EXPORT_SUPPORT' exists, check it's value
|
// If metric 'IMPORT_EXPORT_SUPPORT' exists, check it's value
|
||||||
bool cachingSupported = (it != keys.end()) && ie.GetMetric(deviceName, METRIC_KEY(IMPORT_EXPORT_SUPPORT)).as<bool>();
|
auto cachingSupported = (it != keys.end()) && ie.GetMetric(deviceName, METRIC_KEY(IMPORT_EXPORT_SUPPORT)).as<bool>();
|
||||||
//! [part3]
|
//! [part3]
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -45,21 +45,21 @@ struct DetectionOutputParams {
|
|||||||
refData(CreateTensor(iType, oValues)),
|
refData(CreateTensor(iType, oValues)),
|
||||||
testcaseName(test_name) {
|
testcaseName(test_name) {
|
||||||
attrs.num_classes = num_classes;
|
attrs.num_classes = num_classes;
|
||||||
attrs.background_label_id = background_label_id;
|
attrs_v8.background_label_id = attrs.background_label_id = background_label_id;
|
||||||
attrs.top_k = top_k;
|
attrs_v8.top_k = attrs.top_k = top_k;
|
||||||
attrs.variance_encoded_in_target = variance_encoded_in_target;
|
attrs_v8.variance_encoded_in_target = attrs.variance_encoded_in_target = variance_encoded_in_target;
|
||||||
attrs.keep_top_k = keep_top_k;
|
attrs_v8.keep_top_k = attrs.keep_top_k = keep_top_k;
|
||||||
attrs.code_type = code_type;
|
attrs_v8.code_type = attrs.code_type = code_type;
|
||||||
attrs.share_location = share_location;
|
attrs_v8.share_location = attrs.share_location = share_location;
|
||||||
attrs.nms_threshold = nms_threshold;
|
attrs_v8.nms_threshold = attrs.nms_threshold = nms_threshold;
|
||||||
attrs.confidence_threshold = confidence_threshold;
|
attrs_v8.confidence_threshold = attrs.confidence_threshold = confidence_threshold;
|
||||||
attrs.clip_after_nms = clip_after_nms;
|
attrs_v8.clip_after_nms = attrs.clip_after_nms = clip_after_nms;
|
||||||
attrs.clip_before_nms = clip_before_nms;
|
attrs_v8.clip_before_nms = attrs.clip_before_nms = clip_before_nms;
|
||||||
attrs.decrease_label_id = decrease_label_id;
|
attrs_v8.decrease_label_id = attrs.decrease_label_id = decrease_label_id;
|
||||||
attrs.normalized = normalized;
|
attrs_v8.normalized = attrs.normalized = normalized;
|
||||||
attrs.input_height = input_height;
|
attrs_v8.input_height = attrs.input_height = input_height;
|
||||||
attrs.input_width = input_width;
|
attrs_v8.input_width = attrs.input_width = input_width;
|
||||||
attrs.objectness_score = objectness_score;
|
attrs_v8.objectness_score = attrs.objectness_score = objectness_score;
|
||||||
|
|
||||||
size_t num_loc_classes = attrs.share_location ? 1 : attrs.num_classes;
|
size_t num_loc_classes = attrs.share_location ? 1 : attrs.num_classes;
|
||||||
size_t prior_box_size = attrs.normalized ? 4 : 5;
|
size_t prior_box_size = attrs.normalized ? 4 : 5;
|
||||||
@ -107,21 +107,21 @@ template <class IT>
|
|||||||
auxConfData(CreateTensor(iType, auxConfValues)),
|
auxConfData(CreateTensor(iType, auxConfValues)),
|
||||||
testcaseName(test_name) {
|
testcaseName(test_name) {
|
||||||
attrs.num_classes = num_classes;
|
attrs.num_classes = num_classes;
|
||||||
attrs.background_label_id = background_label_id;
|
attrs_v8.background_label_id = attrs.background_label_id = background_label_id;
|
||||||
attrs.top_k = top_k;
|
attrs_v8.top_k = attrs.top_k = top_k;
|
||||||
attrs.variance_encoded_in_target = variance_encoded_in_target;
|
attrs_v8.variance_encoded_in_target = attrs.variance_encoded_in_target = variance_encoded_in_target;
|
||||||
attrs.keep_top_k = keep_top_k;
|
attrs_v8.keep_top_k = attrs.keep_top_k = keep_top_k;
|
||||||
attrs.code_type = code_type;
|
attrs_v8.code_type = attrs.code_type = code_type;
|
||||||
attrs.share_location = share_location;
|
attrs_v8.share_location = attrs.share_location = share_location;
|
||||||
attrs.nms_threshold = nms_threshold;
|
attrs_v8.nms_threshold = attrs.nms_threshold = nms_threshold;
|
||||||
attrs.confidence_threshold = confidence_threshold;
|
attrs_v8.confidence_threshold = attrs.confidence_threshold = confidence_threshold;
|
||||||
attrs.clip_after_nms = clip_after_nms;
|
attrs_v8.clip_after_nms = attrs.clip_after_nms = clip_after_nms;
|
||||||
attrs.clip_before_nms = clip_before_nms;
|
attrs_v8.clip_before_nms = attrs.clip_before_nms = clip_before_nms;
|
||||||
attrs.decrease_label_id = decrease_label_id;
|
attrs_v8.decrease_label_id = attrs.decrease_label_id = decrease_label_id;
|
||||||
attrs.normalized = normalized;
|
attrs_v8.normalized = attrs.normalized = normalized;
|
||||||
attrs.input_height = input_height;
|
attrs_v8.input_height = attrs.input_height = input_height;
|
||||||
attrs.input_width = input_width;
|
attrs_v8.input_width = attrs.input_width = input_width;
|
||||||
attrs.objectness_score = objectness_score;
|
attrs_v8.objectness_score = attrs.objectness_score = objectness_score;
|
||||||
|
|
||||||
size_t num_loc_classes = attrs.share_location ? 1 : attrs.num_classes;
|
size_t num_loc_classes = attrs.share_location ? 1 : attrs.num_classes;
|
||||||
size_t prior_box_size = attrs.normalized ? 4 : 5;
|
size_t prior_box_size = attrs.normalized ? 4 : 5;
|
||||||
@ -135,6 +135,7 @@ template <class IT>
|
|||||||
}
|
}
|
||||||
|
|
||||||
ov::op::v0::DetectionOutput::Attributes attrs;
|
ov::op::v0::DetectionOutput::Attributes attrs;
|
||||||
|
ov::op::v8::DetectionOutput::Attributes attrs_v8;
|
||||||
ov::PartialShape locShape;
|
ov::PartialShape locShape;
|
||||||
ov::PartialShape confShape;
|
ov::PartialShape confShape;
|
||||||
ov::PartialShape priorBoxesShape;
|
ov::PartialShape priorBoxesShape;
|
||||||
@ -194,10 +195,61 @@ private:
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class ReferenceDetectionOutputV8LayerTest : public testing::TestWithParam<DetectionOutputParams>,
|
||||||
|
public CommonReferenceTest {
|
||||||
|
public:
|
||||||
|
void SetUp() override {
|
||||||
|
auto params = GetParam();
|
||||||
|
function = CreateFunction(params);
|
||||||
|
if ((params.auxLocShape.size() != 0) && (params.auxConfShape.size() != 0))
|
||||||
|
inputData = {params.locData, params.confData, params.priorBoxesData, params.auxConfData, params.auxLocData};
|
||||||
|
else
|
||||||
|
inputData = {params.locData, params.confData, params.priorBoxesData};
|
||||||
|
refOutData = {params.refData};
|
||||||
|
}
|
||||||
|
static std::string getTestCaseName(const testing::TestParamInfo<DetectionOutputParams>& obj) {
|
||||||
|
auto param = obj.param;
|
||||||
|
std::ostringstream result;
|
||||||
|
result << "locShape=" << param.locShape << "_";
|
||||||
|
result << "confShape=" << param.confShape << "_";
|
||||||
|
result << "priorBoxesShape=" << param.priorBoxesShape << "_";
|
||||||
|
if ((param.auxLocShape.size() != 0) && (param.auxConfShape.size() != 0)) {
|
||||||
|
result << "auxLocShape=" << param.locShape << "_";
|
||||||
|
result << "auxConfShape=" << param.confShape << "_";
|
||||||
|
}
|
||||||
|
result << "iType=" << param.inType;
|
||||||
|
if (param.testcaseName != "")
|
||||||
|
result << "_" << param.testcaseName;
|
||||||
|
return result.str();
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
static std::shared_ptr<Function> CreateFunction(const DetectionOutputParams& params) {
|
||||||
|
const auto loc = std::make_shared<op::v0::Parameter>(params.inType, params.locShape);
|
||||||
|
const auto conf = std::make_shared<op::v0::Parameter>(params.inType, params.confShape);
|
||||||
|
const auto priorBoxes = std::make_shared<op::v0::Parameter>(params.inType, params.priorBoxesShape);
|
||||||
|
if ((params.auxLocShape.size() != 0) && (params.auxConfShape.size() != 0)) {
|
||||||
|
const auto auxConf = std::make_shared<op::v0::Parameter>(params.inType, params.auxConfShape);
|
||||||
|
const auto auxLoc = std::make_shared<op::v0::Parameter>(params.inType, params.auxLocShape);
|
||||||
|
const auto DetectionOutput =
|
||||||
|
std::make_shared<op::v8::DetectionOutput>(loc, conf, priorBoxes, auxConf, auxLoc, params.attrs_v8);
|
||||||
|
return std::make_shared<ov::Function>(NodeVector{DetectionOutput},
|
||||||
|
ParameterVector{loc, conf, priorBoxes, auxConf, auxLoc});
|
||||||
|
} else {
|
||||||
|
const auto DetectionOutput = std::make_shared<op::v8::DetectionOutput>(loc, conf, priorBoxes, params.attrs_v8);
|
||||||
|
return std::make_shared<ov::Function>(NodeVector{DetectionOutput}, ParameterVector{loc, conf, priorBoxes});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
TEST_P(ReferenceDetectionOutputLayerTest, CompareWithRefs) {
|
TEST_P(ReferenceDetectionOutputLayerTest, CompareWithRefs) {
|
||||||
Exec();
|
Exec();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_P(ReferenceDetectionOutputV8LayerTest, CompareWithRefs) {
|
||||||
|
Exec();
|
||||||
|
}
|
||||||
|
|
||||||
template <element::Type_t IN_ET>
|
template <element::Type_t IN_ET>
|
||||||
std::vector<DetectionOutputParams> generateDetectionOutputFloatParams() {
|
std::vector<DetectionOutputParams> generateDetectionOutputFloatParams() {
|
||||||
using T = typename element_type_traits<IN_ET>::value_type;
|
using T = typename element_type_traits<IN_ET>::value_type;
|
||||||
@ -517,4 +569,9 @@ std::vector<DetectionOutputParams> generateDetectionOutputCombinedParams() {
|
|||||||
INSTANTIATE_TEST_SUITE_P(smoke_DetectionOutput_With_Hardcoded_Refs, ReferenceDetectionOutputLayerTest,
|
INSTANTIATE_TEST_SUITE_P(smoke_DetectionOutput_With_Hardcoded_Refs, ReferenceDetectionOutputLayerTest,
|
||||||
testing::ValuesIn(generateDetectionOutputCombinedParams()), ReferenceDetectionOutputLayerTest::getTestCaseName);
|
testing::ValuesIn(generateDetectionOutputCombinedParams()), ReferenceDetectionOutputLayerTest::getTestCaseName);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(smoke_DetectionOutput_With_Hardcoded_Refs,
|
||||||
|
ReferenceDetectionOutputV8LayerTest,
|
||||||
|
testing::ValuesIn(generateDetectionOutputCombinedParams()),
|
||||||
|
ReferenceDetectionOutputV8LayerTest::getTestCaseName);
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
@ -130,10 +130,10 @@ TEST_F(IEClassGetConfigTestTEMPLATE, smoke_GetConfigNoThrow) {
|
|||||||
std::string defaultDeviceID = ie.GetConfig(deviceName, CONFIG_KEY(DEVICE_ID));
|
std::string defaultDeviceID = ie.GetConfig(deviceName, CONFIG_KEY(DEVICE_ID));
|
||||||
std::cout << CONFIG_KEY(DEVICE_ID) << " : " << defaultDeviceID << std::endl;
|
std::cout << CONFIG_KEY(DEVICE_ID) << " : " << defaultDeviceID << std::endl;
|
||||||
} else if (CONFIG_KEY(PERF_COUNT) == confKey) {
|
} else if (CONFIG_KEY(PERF_COUNT) == confKey) {
|
||||||
bool defaultPerfCount = ie.GetConfig(deviceName, CONFIG_KEY(PERF_COUNT)).as<bool>();
|
auto defaultPerfCount = ie.GetConfig(deviceName, CONFIG_KEY(PERF_COUNT)).as<bool>();
|
||||||
std::cout << CONFIG_KEY(PERF_COUNT) << " : " << defaultPerfCount << std::endl;
|
std::cout << CONFIG_KEY(PERF_COUNT) << " : " << defaultPerfCount << std::endl;
|
||||||
} else if (CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS) == confKey) {
|
} else if (CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS) == confKey) {
|
||||||
bool defaultExclusive = ie.GetConfig(deviceName, CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS)).as<bool>();
|
auto defaultExclusive = ie.GetConfig(deviceName, CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS)).as<bool>();
|
||||||
std::cout << CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS) << " : " << defaultExclusive << std::endl;
|
std::cout << CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS) << " : " << defaultExclusive << std::endl;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -94,12 +94,12 @@ MKLDNNDescriptor::operator std::shared_ptr<mkldnn::lrn_forward::desc>() {
|
|||||||
return typeDesc->getPtr();
|
return typeDesc->getPtr();
|
||||||
}
|
}
|
||||||
|
|
||||||
MKLDNNDescriptor::MKLDNNDescriptor(std::shared_ptr<mkldnn::pooling_forward::desc> desc) {
|
MKLDNNDescriptor::MKLDNNDescriptor(std::shared_ptr<mkldnn::pooling_v2_forward::desc> desc) {
|
||||||
this->desc.reset(new DescFwdImpl<mkldnn::pooling_forward::desc>(desc));
|
this->desc.reset(new DescFwdImpl<mkldnn::pooling_v2_forward::desc>(desc));
|
||||||
}
|
}
|
||||||
|
|
||||||
MKLDNNDescriptor::operator std::shared_ptr<mkldnn::pooling_forward::desc>() {
|
MKLDNNDescriptor::operator std::shared_ptr<mkldnn::pooling_v2_forward::desc>() {
|
||||||
auto typeDesc = std::dynamic_pointer_cast<DescFwdImpl<mkldnn::pooling_forward::desc>>(desc);
|
auto typeDesc = std::dynamic_pointer_cast<DescFwdImpl<mkldnn::pooling_v2_forward::desc>>(desc);
|
||||||
if (typeDesc == nullptr) {
|
if (typeDesc == nullptr) {
|
||||||
IE_THROW() << "Cannot cast descriptor!";
|
IE_THROW() << "Cannot cast descriptor!";
|
||||||
}
|
}
|
||||||
|
@ -28,8 +28,8 @@ public:
|
|||||||
explicit MKLDNNDescriptor(std::shared_ptr<mkldnn::lrn_forward::desc> desc);
|
explicit MKLDNNDescriptor(std::shared_ptr<mkldnn::lrn_forward::desc> desc);
|
||||||
operator std::shared_ptr<mkldnn::lrn_forward::desc>();
|
operator std::shared_ptr<mkldnn::lrn_forward::desc>();
|
||||||
|
|
||||||
explicit MKLDNNDescriptor(std::shared_ptr<mkldnn::pooling_forward::desc> desc);
|
explicit MKLDNNDescriptor(std::shared_ptr<mkldnn::pooling_v2_forward::desc> desc);
|
||||||
operator std::shared_ptr<mkldnn::pooling_forward::desc>();
|
operator std::shared_ptr<mkldnn::pooling_v2_forward::desc>();
|
||||||
|
|
||||||
explicit MKLDNNDescriptor(std::shared_ptr<mkldnn::softmax_forward::desc> desc);
|
explicit MKLDNNDescriptor(std::shared_ptr<mkldnn::softmax_forward::desc> desc);
|
||||||
operator std::shared_ptr<mkldnn::softmax_forward::desc>();
|
operator std::shared_ptr<mkldnn::softmax_forward::desc>();
|
||||||
|
@ -186,7 +186,7 @@ std::shared_ptr<ngraph::Function> dump_graph_as_ie_ngraph_net(const MKLDNNGraph
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (auto && kvp : meta_data)
|
for (auto && kvp : meta_data)
|
||||||
return_node->get_rt_info()[kvp.first] = std::make_shared<::ngraph::VariantWrapper<std::string>>(kvp.second);
|
return_node->get_rt_info()[kvp.first] = std::make_shared<::ov::RuntimeAttributeWrapper<std::string>>(kvp.second);
|
||||||
return_node->set_friendly_name(node->getName());
|
return_node->set_friendly_name(node->getName());
|
||||||
|
|
||||||
return return_node;
|
return return_node;
|
||||||
|
@ -526,10 +526,13 @@ void MKLDNNPlugin::MKLDNNInferRequest::changeDefaultPtr() {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (child->getType() == Concatenation && dynamic_cast<MKLDNNConcatNode*>(child.get())->isOptimized()) {
|
if (child->getType() == Concatenation) {
|
||||||
|
auto concat = dynamic_cast<MKLDNNConcatNode*>(child.get());
|
||||||
|
if (concat && concat->isOptimized()) {
|
||||||
canBeInPlace = false;
|
canBeInPlace = false;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Cannot be in-place before split because split is using different ptrs without offsets
|
// Cannot be in-place before split because split is using different ptrs without offsets
|
||||||
if (child->getType() == Split) {
|
if (child->getType() == Split) {
|
||||||
|
@ -137,7 +137,7 @@ MKLDNNNode::MKLDNNNode(const std::shared_ptr<ngraph::Node>& op, const mkldnn::en
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (op != nullptr) {
|
if (op != nullptr) {
|
||||||
std::string inputMemoryFormats = ngraph::getMLKDNNInputMemoryFormats(op);
|
std::string inputMemoryFormats = ngraph::getMKLDNNInputMemoryFormats(op);
|
||||||
if (!inputMemoryFormats.empty()) {
|
if (!inputMemoryFormats.empty()) {
|
||||||
std::istringstream stream(inputMemoryFormats);
|
std::istringstream stream(inputMemoryFormats);
|
||||||
std::string str;
|
std::string str;
|
||||||
@ -148,7 +148,7 @@ MKLDNNNode::MKLDNNNode(const std::shared_ptr<ngraph::Node>& op, const mkldnn::en
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string outputMemoryFormats = ngraph::getMLKDNNOutputMemoryFormats(op);
|
std::string outputMemoryFormats = ngraph::getMKLDNNOutputMemoryFormats(op);
|
||||||
if (!outputMemoryFormats.empty()) {
|
if (!outputMemoryFormats.empty()) {
|
||||||
std::istringstream stream(outputMemoryFormats);
|
std::istringstream stream(outputMemoryFormats);
|
||||||
std::string str;
|
std::string str;
|
||||||
@ -162,7 +162,7 @@ MKLDNNNode::MKLDNNNode(const std::shared_ptr<ngraph::Node>& op, const mkldnn::en
|
|||||||
|
|
||||||
const auto it = rtInfo.find("enforceBF16evenForGraphTail");
|
const auto it = rtInfo.find("enforceBF16evenForGraphTail");
|
||||||
if (it != rtInfo.end()) {
|
if (it != rtInfo.end()) {
|
||||||
if (const auto value = std::dynamic_pointer_cast<ngraph::VariantImpl<int64_t>>(it->second))
|
if (const auto value = std::dynamic_pointer_cast<ov::RuntimeAttributeImpl<int64_t>>(it->second))
|
||||||
enforceBF16evenForGraphTail = value->get();
|
enforceBF16evenForGraphTail = value->get();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -85,7 +85,7 @@ MKLDNNPlugin::MoveEltwiseUpThroughDataMov::MoveEltwiseUpThroughDataMov() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// eltwise constant shape should match new input shape
|
// eltwise constant shape should match new input shape
|
||||||
if (is_binary_op && current->get_output_shape(0).size() != eltwise->get_input_shape(1).size()) {
|
if (is_binary_op && current->get_output_partial_shape(0).rank().get_length() != eltwise->get_input_partial_shape(1).rank().get_length()) {
|
||||||
auto old_eltwise_const = std::dynamic_pointer_cast<ngraph::opset8::Constant>(eltwise->get_input_node_shared_ptr(1));
|
auto old_eltwise_const = std::dynamic_pointer_cast<ngraph::opset8::Constant>(eltwise->get_input_node_shared_ptr(1));
|
||||||
auto new_constant = std::make_shared<ngraph::opset8::Constant>(*old_eltwise_const.get(), ngraph::Shape{});
|
auto new_constant = std::make_shared<ngraph::opset8::Constant>(*old_eltwise_const.get(), ngraph::Shape{});
|
||||||
ngraph::replace_node(old_eltwise_const, new_constant);
|
ngraph::replace_node(old_eltwise_const, new_constant);
|
||||||
|
@ -67,7 +67,7 @@ namespace {
|
|||||||
ngraph::replace_node(transposeAfter, {reshape2->output(0)});
|
ngraph::replace_node(transposeAfter, {reshape2->output(0)});
|
||||||
}
|
}
|
||||||
|
|
||||||
sequenceOp->get_rt_info()["seqAxis"] = std::make_shared<ngraph::VariantWrapper<int64_t>>(seqAxis);
|
sequenceOp->get_rt_info()["seqAxis"] = std::make_shared<ov::RuntimeAttributeWrapper<int64_t>>(seqAxis);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -39,8 +39,8 @@ private:
|
|||||||
const int* offsetsData_ = nullptr;
|
const int* offsetsData_ = nullptr;
|
||||||
const int* defaultIndices_ = nullptr;
|
const int* defaultIndices_ = nullptr;
|
||||||
|
|
||||||
size_t _indicesLen;
|
size_t _indicesLen = 0;
|
||||||
size_t _offsetsLen;
|
size_t _offsetsLen = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace MKLDNNPlugin
|
} // namespace MKLDNNPlugin
|
||||||
|
@ -215,12 +215,16 @@ static void nms_cf(const float* conf_data,
|
|||||||
detections = (post_nms_topn == -1 ? detections : (std::min)(post_nms_topn, detections));
|
detections = (post_nms_topn == -1 ? detections : (std::min)(post_nms_topn, detections));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool MKLDNNExperimentalDetectronDetectionOutputNode::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
|
bool MKLDNNExperimentalDetectronDetectionOutputNode::needShapeInfer() const {
|
||||||
try {
|
|
||||||
if (isDynamicNgraphNode(op)) {
|
|
||||||
errorMessage = "Doesn't support op with dynamic shapes";
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool MKLDNNExperimentalDetectronDetectionOutputNode::needPrepareParams() const {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool MKLDNNExperimentalDetectronDetectionOutputNode::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
|
||||||
|
try {
|
||||||
const auto doOp = ngraph::as_type_ptr<const ngraph::op::v6::ExperimentalDetectronDetectionOutput>(op);
|
const auto doOp = ngraph::as_type_ptr<const ngraph::op::v6::ExperimentalDetectronDetectionOutput>(op);
|
||||||
if (!doOp) {
|
if (!doOp) {
|
||||||
errorMessage = "Node is not an instance of the ExperimentalDetectronDetectionOutput from the operations set v6.";
|
errorMessage = "Node is not an instance of the ExperimentalDetectronDetectionOutput from the operations set v6.";
|
||||||
@ -268,6 +272,12 @@ void MKLDNNExperimentalDetectronDetectionOutputNode::initSupportedPrimitiveDescr
|
|||||||
impl_desc_type::ref_any);
|
impl_desc_type::ref_any);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void MKLDNNExperimentalDetectronDetectionOutputNode::createPrimitive() {
|
||||||
|
if (inputShapesDefined()) {
|
||||||
|
updateLastInputDims();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void MKLDNNExperimentalDetectronDetectionOutputNode::execute(mkldnn::stream strm) {
|
void MKLDNNExperimentalDetectronDetectionOutputNode::execute(mkldnn::stream strm) {
|
||||||
const int rois_num = getParentEdgeAt(INPUT_ROIS)->getMemory().getStaticDims()[0];
|
const int rois_num = getParentEdgeAt(INPUT_ROIS)->getMemory().getStaticDims()[0];
|
||||||
assert(classes_num_ == static_cast<int>(getParentEdgeAt(INPUT_SCORES)->getMemory().getStaticDims()[1]));
|
assert(classes_num_ == static_cast<int>(getParentEdgeAt(INPUT_SCORES)->getMemory().getStaticDims()[1]));
|
||||||
|
@ -15,10 +15,13 @@ public:
|
|||||||
|
|
||||||
void getSupportedDescriptors() override {};
|
void getSupportedDescriptors() override {};
|
||||||
void initSupportedPrimitiveDescriptors() override;
|
void initSupportedPrimitiveDescriptors() override;
|
||||||
void createPrimitive() override {};
|
void createPrimitive() override;
|
||||||
void execute(mkldnn::stream strm) override;
|
void execute(mkldnn::stream strm) override;
|
||||||
bool created() const override;
|
bool created() const override;
|
||||||
|
|
||||||
|
bool needShapeInfer() const override;
|
||||||
|
bool needPrepareParams() const override;
|
||||||
|
void executeDynamicImpl(mkldnn::stream strm) override { execute(strm); }
|
||||||
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
|
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
@ -275,10 +275,6 @@ void fill_output_blobs(const float* proposals, const int* roi_indices,
|
|||||||
bool MKLDNNExperimentalDetectronGenerateProposalsSingleImageNode::isSupportedOperation
|
bool MKLDNNExperimentalDetectronGenerateProposalsSingleImageNode::isSupportedOperation
|
||||||
(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
|
(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
|
||||||
try {
|
try {
|
||||||
if (isDynamicNgraphNode(op)) {
|
|
||||||
errorMessage = "Doesn't support op with dynamic shapes";
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
const auto proposalOp = ngraph::as_type_ptr<const ngraph::op::v6::ExperimentalDetectronGenerateProposalsSingleImage>(op);
|
const auto proposalOp = ngraph::as_type_ptr<const ngraph::op::v6::ExperimentalDetectronGenerateProposalsSingleImage>(op);
|
||||||
if (!proposalOp) {
|
if (!proposalOp) {
|
||||||
errorMessage = "Node is not an instance of the Proposal from the operations set v0.";
|
errorMessage = "Node is not an instance of the Proposal from the operations set v0.";
|
||||||
@ -324,6 +320,12 @@ void MKLDNNExperimentalDetectronGenerateProposalsSingleImageNode::initSupportedP
|
|||||||
impl_desc_type::ref_any);
|
impl_desc_type::ref_any);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void MKLDNNExperimentalDetectronGenerateProposalsSingleImageNode::createPrimitive() {
|
||||||
|
if (inputShapesDefined()) {
|
||||||
|
updateLastInputDims();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void MKLDNNExperimentalDetectronGenerateProposalsSingleImageNode::execute(mkldnn::stream strm) {
|
void MKLDNNExperimentalDetectronGenerateProposalsSingleImageNode::execute(mkldnn::stream strm) {
|
||||||
try {
|
try {
|
||||||
if (inputShapes.size() != 4 || outputShapes.size() != 2) {
|
if (inputShapes.size() != 4 || outputShapes.size() != 2) {
|
||||||
@ -431,4 +433,12 @@ bool MKLDNNExperimentalDetectronGenerateProposalsSingleImageNode::created() cons
|
|||||||
return getType() == ExperimentalDetectronGenerateProposalsSingleImage;
|
return getType() == ExperimentalDetectronGenerateProposalsSingleImage;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool MKLDNNExperimentalDetectronGenerateProposalsSingleImageNode::needShapeInfer() const {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool MKLDNNExperimentalDetectronGenerateProposalsSingleImageNode::needPrepareParams() const {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
REG_MKLDNN_PRIM_FOR(MKLDNNExperimentalDetectronGenerateProposalsSingleImageNode, ExperimentalDetectronGenerateProposalsSingleImage)
|
REG_MKLDNN_PRIM_FOR(MKLDNNExperimentalDetectronGenerateProposalsSingleImageNode, ExperimentalDetectronGenerateProposalsSingleImage)
|
||||||
|
@ -16,10 +16,13 @@ public:
|
|||||||
|
|
||||||
void getSupportedDescriptors() override {};
|
void getSupportedDescriptors() override {};
|
||||||
void initSupportedPrimitiveDescriptors() override;
|
void initSupportedPrimitiveDescriptors() override;
|
||||||
void createPrimitive() override {};
|
void createPrimitive() override;
|
||||||
void execute(mkldnn::stream strm) override;
|
void execute(mkldnn::stream strm) override;
|
||||||
bool created() const override;
|
bool created() const override;
|
||||||
|
|
||||||
|
bool needShapeInfer() const override;
|
||||||
|
bool needPrepareParams() const override;
|
||||||
|
void executeDynamicImpl(mkldnn::stream strm) override { execute(strm); }
|
||||||
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
|
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
@ -14,10 +14,6 @@ using namespace InferenceEngine;
|
|||||||
bool MKLDNNExperimentalDetectronPriorGridGeneratorNode::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op,
|
bool MKLDNNExperimentalDetectronPriorGridGeneratorNode::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op,
|
||||||
std::string& errorMessage) noexcept {
|
std::string& errorMessage) noexcept {
|
||||||
try {
|
try {
|
||||||
if (isDynamicNgraphNode(op)) {
|
|
||||||
errorMessage = "Doesn't support op with dynamic shapes";
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
const auto priorGridGen = std::dynamic_pointer_cast<const ngraph::opset6::ExperimentalDetectronPriorGridGenerator>(op);
|
const auto priorGridGen = std::dynamic_pointer_cast<const ngraph::opset6::ExperimentalDetectronPriorGridGenerator>(op);
|
||||||
if (!priorGridGen) {
|
if (!priorGridGen) {
|
||||||
errorMessage = "Only opset6 ExperimentalDetectronPriorGridGenerator operation is supported";
|
errorMessage = "Only opset6 ExperimentalDetectronPriorGridGenerator operation is supported";
|
||||||
@ -42,11 +38,6 @@ MKLDNNExperimentalDetectronPriorGridGeneratorNode::MKLDNNExperimentalDetectronPr
|
|||||||
if (getOriginalInputsNumber() != 3 || getOriginalOutputsNumber() != 1)
|
if (getOriginalInputsNumber() != 3 || getOriginalOutputsNumber() != 1)
|
||||||
IE_THROW() << errorPrefix << " has incorrect number of input/output edges!";
|
IE_THROW() << errorPrefix << " has incorrect number of input/output edges!";
|
||||||
|
|
||||||
if (op->get_input_shape(INPUT_PRIORS).size() != 2 ||
|
|
||||||
op->get_input_shape(INPUT_FEATUREMAP).size() != 4 ||
|
|
||||||
op->get_input_shape(INPUT_IMAGE).size() != 4)
|
|
||||||
IE_THROW() << errorPrefix << " has unsupported input shape";
|
|
||||||
|
|
||||||
const auto &attr = priorGridGen->get_attrs();
|
const auto &attr = priorGridGen->get_attrs();
|
||||||
grid_w_ = attr.w;
|
grid_w_ = attr.w;
|
||||||
grid_h_ = attr.h;
|
grid_h_ = attr.h;
|
||||||
@ -65,6 +56,12 @@ void MKLDNNExperimentalDetectronPriorGridGeneratorNode::initSupportedPrimitiveDe
|
|||||||
impl_desc_type::ref_any);
|
impl_desc_type::ref_any);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void MKLDNNExperimentalDetectronPriorGridGeneratorNode::createPrimitive() {
|
||||||
|
if (inputShapesDefined()) {
|
||||||
|
updateLastInputDims();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void MKLDNNExperimentalDetectronPriorGridGeneratorNode::execute(mkldnn::stream strm) {
|
void MKLDNNExperimentalDetectronPriorGridGeneratorNode::execute(mkldnn::stream strm) {
|
||||||
const int num_priors_ = getParentEdgeAt(INPUT_PRIORS)->getMemory().getStaticDims()[0];
|
const int num_priors_ = getParentEdgeAt(INPUT_PRIORS)->getMemory().getStaticDims()[0];
|
||||||
assert(getParentEdgeAt(INPUT_PRIORS)->getMemory().getStaticDims()[1] == 4);
|
assert(getParentEdgeAt(INPUT_PRIORS)->getMemory().getStaticDims()[1] == 4);
|
||||||
@ -95,4 +92,8 @@ bool MKLDNNExperimentalDetectronPriorGridGeneratorNode::created() const {
|
|||||||
return getType() == ExperimentalDetectronPriorGridGenerator;
|
return getType() == ExperimentalDetectronPriorGridGenerator;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool MKLDNNExperimentalDetectronPriorGridGeneratorNode::needPrepareParams() const {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
REG_MKLDNN_PRIM_FOR(MKLDNNExperimentalDetectronPriorGridGeneratorNode, ExperimentalDetectronPriorGridGenerator)
|
REG_MKLDNN_PRIM_FOR(MKLDNNExperimentalDetectronPriorGridGeneratorNode, ExperimentalDetectronPriorGridGenerator)
|
||||||
|
@ -15,10 +15,12 @@ public:
|
|||||||
|
|
||||||
void getSupportedDescriptors() override {};
|
void getSupportedDescriptors() override {};
|
||||||
void initSupportedPrimitiveDescriptors() override;
|
void initSupportedPrimitiveDescriptors() override;
|
||||||
void createPrimitive() override {};
|
void createPrimitive() override;
|
||||||
void execute(mkldnn::stream strm) override;
|
void execute(mkldnn::stream strm) override;
|
||||||
bool created() const override;
|
bool created() const override;
|
||||||
|
|
||||||
|
bool needPrepareParams() const override;
|
||||||
|
void executeDynamicImpl(mkldnn::stream strm) override { execute(strm); }
|
||||||
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
|
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
@ -163,26 +163,11 @@ void MKLDNNGenericNode::execLayer() {
|
|||||||
|
|
||||||
// TODO: use ngraph-based extension mechnism if needed to recompute shape
|
// TODO: use ngraph-based extension mechnism if needed to recompute shape
|
||||||
isDynBatch = false;
|
isDynBatch = false;
|
||||||
// TODO: uncomment after using ngraph-based extension mechnism
|
|
||||||
// if (isDynBatch) {
|
|
||||||
// for (size_t i = 0; i < inputs.size(); i++) {
|
|
||||||
// auto td = inputs[i]->getTensorDesc();
|
|
||||||
// td.setDims(inputDescs[i].getDims());
|
|
||||||
// inputs[i] = make_blob_with_precision(td, getParentEdgeAt(i)->getMemory().GetData());
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
std::vector<InferenceEngine::Blob::Ptr> outputs;
|
std::vector<InferenceEngine::Blob::Ptr> outputs;
|
||||||
for (size_t i = 0; i < outputShapes.size(); i++) {
|
for (size_t i = 0; i < outputShapes.size(); i++) {
|
||||||
if (isDynBatch) {
|
|
||||||
auto out_edge = getChildEdgesAtPort(i)[0];
|
|
||||||
auto td = MemoryDescUtils::convertToTensorDesc(out_edge->getMemory().getDesc());
|
|
||||||
td.setDims(execOutputShapes[i]);
|
|
||||||
outputs.push_back(make_blob_with_precision(td, out_edge->getMemory().GetData()));
|
|
||||||
} else {
|
|
||||||
outputs.push_back(MemoryDescUtils::interpretAsBlob(getChildEdgesAtPort(i)[0]->getMemory()));
|
outputs.push_back(MemoryDescUtils::interpretAsBlob(getChildEdgesAtPort(i)[0]->getMemory()));
|
||||||
}
|
}
|
||||||
}
|
|
||||||
InferenceEngine::ResponseDesc resp;
|
InferenceEngine::ResponseDesc resp;
|
||||||
InferenceEngine::StatusCode rc = impls[0]->execute(inputs, outputs, &resp);
|
InferenceEngine::StatusCode rc = impls[0]->execute(inputs, outputs, &resp);
|
||||||
if (rc != InferenceEngine::OK) {
|
if (rc != InferenceEngine::OK) {
|
||||||
|
@ -48,10 +48,10 @@ private:
|
|||||||
static const size_t NMS_SELECTED_INDICES = 1;
|
static const size_t NMS_SELECTED_INDICES = 1;
|
||||||
static const size_t NMS_VALID_OUTPUTS = 2;
|
static const size_t NMS_VALID_OUTPUTS = 2;
|
||||||
|
|
||||||
size_t m_numBatches;
|
size_t m_numBatches = 0;
|
||||||
size_t m_numBoxes;
|
size_t m_numBoxes = 0;
|
||||||
size_t m_numClasses;
|
size_t m_numClasses = 0;
|
||||||
size_t m_maxBoxesPerBatch;
|
size_t m_maxBoxesPerBatch = 0;
|
||||||
|
|
||||||
MatrixNmsSortResultType m_sortResultType;
|
MatrixNmsSortResultType m_sortResultType;
|
||||||
bool m_sortResultAcrossBatch;
|
bool m_sortResultAcrossBatch;
|
||||||
|
@ -20,10 +20,15 @@ using namespace mkldnn;
|
|||||||
using namespace MKLDNNPlugin;
|
using namespace MKLDNNPlugin;
|
||||||
using namespace InferenceEngine;
|
using namespace InferenceEngine;
|
||||||
|
|
||||||
bool MKLDNNPoolingNode::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
|
bool MKLDNNPoolingNode::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
|
||||||
try {
|
try {
|
||||||
if (!ngraph::as_type_ptr<const ngraph::op::v1::MaxPool>(op) && !ngraph::as_type_ptr<const ngraph::op::v1::AvgPool>(op)) {
|
if (ov::is_type<const ov::op::v8::MaxPool>(op)) {
|
||||||
errorMessage = "Only opset1 MaxPool and AvgPool operations are supported";
|
if (!op->get_output_target_inputs(1).empty()) {
|
||||||
|
errorMessage = "MaxPool from opset8 is supported only with one output";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
} else if (!ov::is_type<const ov::op::v1::MaxPool>(op) && !ov::is_type<const ov::op::v1::AvgPool>(op)) {
|
||||||
|
errorMessage = "MaxPool and AvgPool from opset1 and MaxPool from opset8 are supported";
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
} catch (...) {
|
} catch (...) {
|
||||||
@ -32,48 +37,52 @@ bool MKLDNNPoolingNode::isSupportedOperation(const std::shared_ptr<const ngraph:
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
MKLDNNPoolingNode::MKLDNNPoolingNode(const std::shared_ptr<ngraph::Node>& op, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache)
|
MKLDNNPoolingNode::MKLDNNPoolingNode(const std::shared_ptr<ov::Node>& op, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache)
|
||||||
: MKLDNNNode(op, eng, cache) {
|
: MKLDNNNode(op, eng, cache) {
|
||||||
std::string errorMessage;
|
std::string errorMessage;
|
||||||
if (!isSupportedOperation(op, errorMessage)) {
|
if (!isSupportedOperation(op, errorMessage)) {
|
||||||
IE_THROW(NotImplemented) << errorMessage;
|
IE_THROW(NotImplemented) << errorMessage;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto maxPoolOp = ngraph::as_type_ptr<ngraph::op::v1::MaxPool>(op);
|
auto get_attributes = [](std::vector<ptrdiff_t>& internal_attribute, const std::vector<size_t> external_attribute) {
|
||||||
auto avgPoolOp = ngraph::as_type_ptr<ngraph::op::v1::AvgPool>(op);
|
for (size_t i = 0; i < external_attribute.size(); i++) {
|
||||||
if (maxPoolOp) {
|
internal_attribute.push_back(static_cast<ptrdiff_t>(external_attribute[i]));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if (auto maxPoolOp_v8 = ov::as_type_ptr<const ov::op::v8::MaxPool>(op)) {
|
||||||
|
isMaxPool8 = true;
|
||||||
algorithm = PoolingMax;
|
algorithm = PoolingMax;
|
||||||
exclude_pad = false;
|
exclude_pad = false;
|
||||||
|
|
||||||
for (int i = 0; i < maxPoolOp->get_strides().size(); i++) {
|
get_attributes(dilation, maxPoolOp_v8->get_dilations());
|
||||||
stride.push_back(static_cast<ptrdiff_t>(maxPoolOp->get_strides()[i]));
|
get_attributes(stride, maxPoolOp_v8->get_strides());
|
||||||
}
|
get_attributes(kernel, maxPoolOp_v8->get_kernel());
|
||||||
for (int i = 0; i < maxPoolOp->get_kernel().size(); i++) {
|
get_attributes(data_pad_begin, maxPoolOp_v8->get_pads_begin());
|
||||||
kernel.push_back(static_cast<ptrdiff_t>(maxPoolOp->get_kernel()[i]));
|
get_attributes(data_pad_end, maxPoolOp_v8->get_pads_end());
|
||||||
}
|
|
||||||
for (int i = 0; i < maxPoolOp->get_pads_begin().size(); i++) {
|
auto_pad = (maxPoolOp_v8->get_auto_pad() == ov::op::PadType::SAME_LOWER || maxPoolOp_v8->get_auto_pad() == ov::op::PadType::SAME_UPPER);
|
||||||
data_pad_begin.push_back(static_cast<ptrdiff_t>(maxPoolOp->get_pads_begin()[i]));
|
} else if (auto maxPoolOp_v1 = ov::as_type_ptr<const ov::op::v1::MaxPool>(op)) {
|
||||||
}
|
algorithm = PoolingMax;
|
||||||
for (int i = 0; i < maxPoolOp->get_pads_end().size(); i++) {
|
exclude_pad = false;
|
||||||
data_pad_end.push_back(static_cast<ptrdiff_t>(maxPoolOp->get_pads_end()[i]));
|
|
||||||
}
|
get_attributes(stride, maxPoolOp_v1->get_strides());
|
||||||
auto_pad = (maxPoolOp->get_auto_pad() == ov::op::PadType::SAME_LOWER || maxPoolOp->get_auto_pad() == ov::op::PadType::SAME_UPPER);
|
get_attributes(kernel, maxPoolOp_v1->get_kernel());
|
||||||
} else if (avgPoolOp) {
|
get_attributes(data_pad_begin, maxPoolOp_v1->get_pads_begin());
|
||||||
|
get_attributes(data_pad_end, maxPoolOp_v1->get_pads_end());
|
||||||
|
dilation.resize(kernel.size(), 1);
|
||||||
|
|
||||||
|
auto_pad = (maxPoolOp_v1->get_auto_pad() == ov::op::PadType::SAME_LOWER || maxPoolOp_v1->get_auto_pad() == ov::op::PadType::SAME_UPPER);
|
||||||
|
} else if (auto avgPoolOp = ov::as_type_ptr<const ov::op::v1::AvgPool>(op)) {
|
||||||
algorithm = PoolingAvg;
|
algorithm = PoolingAvg;
|
||||||
exclude_pad = avgPoolOp->get_exclude_pad();
|
exclude_pad = avgPoolOp->get_exclude_pad();
|
||||||
|
|
||||||
for (int i = 0; i < avgPoolOp->get_strides().size(); i++) {
|
get_attributes(stride, avgPoolOp->get_strides());
|
||||||
stride.push_back(static_cast<ptrdiff_t>(avgPoolOp->get_strides()[i]));
|
get_attributes(kernel, avgPoolOp->get_kernel());
|
||||||
}
|
get_attributes(data_pad_begin, avgPoolOp->get_pads_begin());
|
||||||
for (int i = 0; i < avgPoolOp->get_kernel().size(); i++) {
|
get_attributes(data_pad_end, avgPoolOp->get_pads_end());
|
||||||
kernel.push_back(static_cast<ptrdiff_t>(avgPoolOp->get_kernel()[i]));
|
dilation.resize(kernel.size(), 1);
|
||||||
}
|
|
||||||
for (int i = 0; i < avgPoolOp->get_pads_begin().size(); i++) {
|
|
||||||
data_pad_begin.push_back(static_cast<ptrdiff_t>(avgPoolOp->get_pads_begin()[i]));
|
|
||||||
}
|
|
||||||
for (int i = 0; i < avgPoolOp->get_pads_end().size(); i++) {
|
|
||||||
data_pad_end.push_back(static_cast<ptrdiff_t>(avgPoolOp->get_pads_end()[i]));
|
|
||||||
}
|
|
||||||
auto_pad = (avgPoolOp->get_auto_pad() == ov::op::PadType::SAME_LOWER || avgPoolOp->get_auto_pad() == ov::op::PadType::SAME_UPPER);
|
auto_pad = (avgPoolOp->get_auto_pad() == ov::op::PadType::SAME_LOWER || avgPoolOp->get_auto_pad() == ov::op::PadType::SAME_UPPER);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -94,20 +103,23 @@ std::vector<memory::format_tag> MKLDNNPoolingNode::getAvailableFormatsForDims(co
|
|||||||
return {memory::format_tag::any};
|
return {memory::format_tag::any};
|
||||||
}
|
}
|
||||||
|
|
||||||
void MKLDNNPoolingNode::initEffectivePad(const Shape &inShape, const Shape &outShape) {
|
void MKLDNNPoolingNode::initEffectiveAttributes(const Shape &inShape, const Shape &outShape) {
|
||||||
effective_pad_begin = data_pad_begin;
|
effective_pad_begin = data_pad_begin;
|
||||||
effective_pad_end.resize(data_pad_end.size());
|
effective_pad_end.resize(data_pad_end.size());
|
||||||
|
effective_dilation.resize(dilation.size(), 0);
|
||||||
|
|
||||||
const auto &inDims = inShape.getStaticDims();
|
const auto &inDims = inShape.getStaticDims();
|
||||||
const auto &outDims = outShape.getStaticDims();
|
const auto &outDims = outShape.getStaticDims();
|
||||||
|
|
||||||
for (int i = 0; i < effective_pad_end.size(); i++) {
|
for (int i = 0; i < effective_pad_end.size(); i++) {
|
||||||
int krn = kernel[i];
|
int krn = kernel[i];
|
||||||
|
int dil = dilation[i];
|
||||||
int src = inDims[2 + i];
|
int src = inDims[2 + i];
|
||||||
int dst = outDims[2 + i];
|
int dst = outDims[2 + i];
|
||||||
|
|
||||||
int calc_dst = (src - krn + data_pad_begin[i]) / stride[i] + 1;
|
int calc_dst = (src - (1 + (krn - 1) * dil) + data_pad_begin[i]) / stride[i] + 1;
|
||||||
effective_pad_end[i] = (dst - calc_dst) * stride[i];
|
effective_pad_end[i] = (dst - calc_dst) * stride[i];
|
||||||
|
effective_dilation[i] = dil - 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -120,8 +132,8 @@ void MKLDNNPoolingNode::getSupportedDescriptors() {
|
|||||||
if (getChildEdges().empty())
|
if (getChildEdges().empty())
|
||||||
IE_THROW() << "Incorrect number of output edges for layer " << getName();
|
IE_THROW() << "Incorrect number of output edges for layer " << getName();
|
||||||
|
|
||||||
inputPrecision = getOriginalInputPrecisionAtPort(0);
|
InferenceEngine::Precision inputPrecision = getOriginalInputPrecisionAtPort(0);
|
||||||
outputPrecision = getOriginalOutputPrecisionAtPort(0);
|
InferenceEngine::Precision outputPrecision = getOriginalOutputPrecisionAtPort(0);
|
||||||
|
|
||||||
// WA: LPT transformation has WA which allows average pooling has I8/U8 output precision instead of FP32,
|
// WA: LPT transformation has WA which allows average pooling has I8/U8 output precision instead of FP32,
|
||||||
// so we explicitly set output precision as FP32
|
// so we explicitly set output precision as FP32
|
||||||
@ -151,7 +163,7 @@ void MKLDNNPoolingNode::getSupportedDescriptors() {
|
|||||||
if ((inputRank < 3) || (inputRank > 5))
|
if ((inputRank < 3) || (inputRank > 5))
|
||||||
IE_THROW() << "Pooling layer. Unsupported mode. Only 3D, 4D and 5D blobs are supported as input.";
|
IE_THROW() << "Pooling layer. Unsupported mode. Only 3D, 4D and 5D blobs are supported as input.";
|
||||||
|
|
||||||
initEffectivePad(MemoryDescUtils::makeDummyShape(parentShape),
|
initEffectiveAttributes(MemoryDescUtils::makeDummyShape(parentShape),
|
||||||
MemoryDescUtils::makeDummyShape(childShape));
|
MemoryDescUtils::makeDummyShape(childShape));
|
||||||
|
|
||||||
if (inputPrecision == Precision::I8 || inputPrecision == Precision::U8) {
|
if (inputPrecision == Precision::I8 || inputPrecision == Precision::U8) {
|
||||||
@ -185,7 +197,7 @@ void MKLDNNPoolingNode::getSupportedDescriptors() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::pair<std::vector<ptrdiff_t>, std::vector<ptrdiff_t>> MKLDNNPoolingNode::getPaddingFromNode(std::shared_ptr<ngraph::Node> node) const {
|
std::pair<std::vector<ptrdiff_t>, std::vector<ptrdiff_t>> MKLDNNPoolingNode::getPaddingFromNode(std::shared_ptr<ov::Node> node) const {
|
||||||
const auto convertPadding = [](const VectorDims &newPads) {
|
const auto convertPadding = [](const VectorDims &newPads) {
|
||||||
std::vector<ptrdiff_t> pads(newPads.size());
|
std::vector<ptrdiff_t> pads(newPads.size());
|
||||||
for (int i = 0; i < newPads.size(); i++) {
|
for (int i = 0; i < newPads.size(); i++) {
|
||||||
@ -195,12 +207,16 @@ std::pair<std::vector<ptrdiff_t>, std::vector<ptrdiff_t>> MKLDNNPoolingNode::get
|
|||||||
};
|
};
|
||||||
|
|
||||||
VectorDims padsBegin, padsEnd;
|
VectorDims padsBegin, padsEnd;
|
||||||
if (getAlgorithm() == PoolingMax) {
|
if (isMaxPool8) {
|
||||||
const auto pool = ngraph::as_type_ptr<const ngraph::op::v1::MaxPool>(opToShapeInfer);
|
const auto pool = ov::as_type_ptr<const ov::op::v8::MaxPool>(opToShapeInfer);
|
||||||
|
padsBegin = pool->get_pads_begin();
|
||||||
|
padsEnd = pool->get_pads_end();
|
||||||
|
} else if (getAlgorithm() == PoolingMax) {
|
||||||
|
const auto pool = ov::as_type_ptr<const ov::op::v1::MaxPool>(opToShapeInfer);
|
||||||
padsBegin = pool->get_pads_begin();
|
padsBegin = pool->get_pads_begin();
|
||||||
padsEnd = pool->get_pads_end();
|
padsEnd = pool->get_pads_end();
|
||||||
} else if (getAlgorithm() == PoolingAvg) {
|
} else if (getAlgorithm() == PoolingAvg) {
|
||||||
const auto pool = ngraph::as_type_ptr<const ngraph::op::v1::AvgPool>(opToShapeInfer);
|
const auto pool = ov::as_type_ptr<const ov::op::v1::AvgPool>(opToShapeInfer);
|
||||||
padsBegin = pool->get_pads_begin();
|
padsBegin = pool->get_pads_begin();
|
||||||
padsEnd = pool->get_pads_end();
|
padsEnd = pool->get_pads_end();
|
||||||
}
|
}
|
||||||
@ -231,12 +247,12 @@ void MKLDNNPoolingNode::prepareParams() {
|
|||||||
if (auto_pad) {
|
if (auto_pad) {
|
||||||
std::tie(data_pad_begin, data_pad_end) = getPaddingFromNode(opToShapeInfer);
|
std::tie(data_pad_begin, data_pad_end) = getPaddingFromNode(opToShapeInfer);
|
||||||
}
|
}
|
||||||
initEffectivePad(inDesc->getShape(), outDesc->getShape());
|
initEffectiveAttributes(inDesc->getShape(), outDesc->getShape());
|
||||||
}
|
}
|
||||||
|
|
||||||
mkldnn::algorithm alg = getPoolingAlgorithm();
|
mkldnn::algorithm alg = getPoolingAlgorithm();
|
||||||
MKLDNNDescriptor desc{createDescriptorInternal(in_candidate, out_candidate, alg)};
|
MKLDNNDescriptor desc{createDescriptorInternal(in_candidate, out_candidate, alg)};
|
||||||
pooling_forward::primitive_desc prim_desc;
|
pooling_v2_forward::primitive_desc prim_desc;
|
||||||
primitive_desc_iterator itpd = desc.createPrimitiveDescriptorIterator(getEngine(), *attr);
|
primitive_desc_iterator itpd = desc.createPrimitiveDescriptorIterator(getEngine(), *attr);
|
||||||
|
|
||||||
while (static_cast<bool>(itpd)) {
|
while (static_cast<bool>(itpd)) {
|
||||||
@ -250,7 +266,7 @@ void MKLDNNPoolingNode::prepareParams() {
|
|||||||
IE_THROW() << "Primitive descriptor was not found for node " << getName() << ".";
|
IE_THROW() << "Primitive descriptor was not found for node " << getName() << ".";
|
||||||
}
|
}
|
||||||
|
|
||||||
prim.reset(new pooling_forward(prim_desc));
|
prim.reset(new pooling_v2_forward(prim_desc));
|
||||||
|
|
||||||
auto src = getParentEdgesAtPort(0)[0]->getMemoryPtr()->GetPrimitive();
|
auto src = getParentEdgesAtPort(0)[0]->getMemoryPtr()->GetPrimitive();
|
||||||
auto dst = getChildEdgesAtPort(0)[0]->getMemoryPtr()->GetPrimitive();
|
auto dst = getChildEdgesAtPort(0)[0]->getMemoryPtr()->GetPrimitive();
|
||||||
@ -296,7 +312,7 @@ mkldnn::algorithm MKLDNNPoolingNode::getPoolingAlgorithm() const {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::shared_ptr<pooling_forward::desc> MKLDNNPoolingNode::createDescriptorInternal(const mkldnn::memory::desc& in_candidate,
|
std::shared_ptr<pooling_v2_forward::desc> MKLDNNPoolingNode::createDescriptorInternal(const mkldnn::memory::desc& in_candidate,
|
||||||
const mkldnn::memory::desc& out_candidate,
|
const mkldnn::memory::desc& out_candidate,
|
||||||
const mkldnn::algorithm alg) const {
|
const mkldnn::algorithm alg) const {
|
||||||
if (alg == mkldnn::algorithm::undef) {
|
if (alg == mkldnn::algorithm::undef) {
|
||||||
@ -306,11 +322,12 @@ std::shared_ptr<pooling_forward::desc> MKLDNNPoolingNode::createDescriptorIntern
|
|||||||
auto convert = [] (std::vector<ptrdiff_t> orig_dims) {
|
auto convert = [] (std::vector<ptrdiff_t> orig_dims) {
|
||||||
return memory::dims(orig_dims.begin(), orig_dims.end());
|
return memory::dims(orig_dims.begin(), orig_dims.end());
|
||||||
};
|
};
|
||||||
std::shared_ptr<pooling_forward::desc> desc_ptr(
|
std::shared_ptr<pooling_v2_forward::desc> desc_ptr(
|
||||||
new pooling_forward::desc(prop_kind::forward_scoring, alg,
|
new pooling_v2_forward::desc(prop_kind::forward_scoring, alg,
|
||||||
in_candidate, out_candidate,
|
in_candidate, out_candidate,
|
||||||
convert(stride),
|
convert(stride),
|
||||||
convert(kernel),
|
convert(kernel),
|
||||||
|
convert(effective_dilation),
|
||||||
convert(effective_pad_begin),
|
convert(effective_pad_begin),
|
||||||
convert(effective_pad_end)));
|
convert(effective_pad_end)));
|
||||||
|
|
||||||
@ -343,14 +360,12 @@ void MKLDNNPoolingNode::createDescriptor(const std::vector<MemoryDescPtr> &input
|
|||||||
if (auto_pad) {
|
if (auto_pad) {
|
||||||
std::tie(data_pad_begin, data_pad_end) = getPaddingFromNode(opToShapeInfer);
|
std::tie(data_pad_begin, data_pad_end) = getPaddingFromNode(opToShapeInfer);
|
||||||
}
|
}
|
||||||
initEffectivePad(inDesc->getShape(), outDesc->getShape());
|
initEffectiveAttributes(inDesc->getShape(), outDesc->getShape());
|
||||||
}
|
}
|
||||||
auto dnnlOutDesc = MemoryDescUtils::convertToDnnlBlockedMemoryDesc(*outDesc);
|
auto dnnlOutDesc = MemoryDescUtils::convertToDnnlBlockedMemoryDesc(*outDesc);
|
||||||
auto out_candidate = dnnlOutDesc.getDnnlDesc();
|
auto out_candidate = dnnlOutDesc.getDnnlDesc();
|
||||||
|
|
||||||
mkldnn::algorithm alg = getPoolingAlgorithm();
|
auto desc_ptr = createDescriptorInternal(in_candidate, out_candidate, getPoolingAlgorithm());
|
||||||
auto desc_ptr = createDescriptorInternal(in_candidate, out_candidate, alg);
|
|
||||||
|
|
||||||
descs.emplace_back(desc_ptr);
|
descs.emplace_back(desc_ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -383,6 +398,18 @@ void MKLDNNPoolingNode::initSupportedPrimitiveDescriptors() {
|
|||||||
|
|
||||||
config.outConfs.push_back(dataConfig);
|
config.outConfs.push_back(dataConfig);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CPU plugin doesn't support second output of MaxPool-8, but anyway we should have out config for second port as stub
|
||||||
|
if (isMaxPool8) {
|
||||||
|
auto& creatorsMap = BlockedDescCreator::getCommonCreators();
|
||||||
|
PortConfig dataConfig;
|
||||||
|
dataConfig.inPlace = -1;
|
||||||
|
dataConfig.constant = false;
|
||||||
|
dataConfig.desc = creatorsMap.at(LayoutType::ncsp)->createSharedDesc(config.outConfs.front().desc->getPrecision(), getOutputShapeAtPort(1));
|
||||||
|
|
||||||
|
config.outConfs.push_back(dataConfig);
|
||||||
|
}
|
||||||
|
|
||||||
impl_desc_type impl_type = parse_impl_name(itpd.impl_info_str());
|
impl_desc_type impl_type = parse_impl_name(itpd.impl_info_str());
|
||||||
|
|
||||||
supportedPrimitiveDescriptors.emplace_back(config, impl_type);
|
supportedPrimitiveDescriptors.emplace_back(config, impl_type);
|
||||||
@ -434,6 +461,18 @@ void MKLDNNPoolingNode::initDescriptor(const NodeConfig& config) {
|
|||||||
dataConfig.desc = getDstMemDesc(itpd, i);
|
dataConfig.desc = getDstMemDesc(itpd, i);
|
||||||
cfg.outConfs.push_back(dataConfig);
|
cfg.outConfs.push_back(dataConfig);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CPU plugin doesn't support second output of MaxPool-8, but anyway we should have out config for second port as stub
|
||||||
|
if (isMaxPool8) {
|
||||||
|
auto& creatorsMap = BlockedDescCreator::getCommonCreators();
|
||||||
|
PortConfig dataConfig;
|
||||||
|
dataConfig.inPlace = -1;
|
||||||
|
dataConfig.constant = false;
|
||||||
|
dataConfig.desc = creatorsMap.at(LayoutType::ncsp)->createSharedDesc(cfg.outConfs.front().desc->getPrecision(), getOutputShapeAtPort(1));
|
||||||
|
|
||||||
|
cfg.outConfs.push_back(dataConfig);
|
||||||
|
}
|
||||||
|
|
||||||
impl_desc_type impl_type = parse_impl_name(itpd.impl_info_str());
|
impl_desc_type impl_type = parse_impl_name(itpd.impl_info_str());
|
||||||
if (selected_count == selectedPrimitiveDescriptorIndex) {
|
if (selected_count == selectedPrimitiveDescriptorIndex) {
|
||||||
if (impl_type != selectedPD->getImplementationType()) {
|
if (impl_type != selectedPD->getImplementationType()) {
|
||||||
|
@ -14,7 +14,7 @@ namespace MKLDNNPlugin {
|
|||||||
|
|
||||||
class MKLDNNPoolingNode : public MKLDNNNode {
|
class MKLDNNPoolingNode : public MKLDNNNode {
|
||||||
public:
|
public:
|
||||||
MKLDNNPoolingNode(const std::shared_ptr<ngraph::Node>& op, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache);
|
MKLDNNPoolingNode(const std::shared_ptr<ov::Node>& op, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache);
|
||||||
|
|
||||||
void createDescriptor(const std::vector<MemoryDescPtr>& inputDesc,
|
void createDescriptor(const std::vector<MemoryDescPtr>& inputDesc,
|
||||||
const std::vector<MemoryDescPtr>& outputDesc) override;
|
const std::vector<MemoryDescPtr>& outputDesc) override;
|
||||||
@ -31,7 +31,7 @@ public:
|
|||||||
void prepareParams() override;
|
void prepareParams() override;
|
||||||
void executeDynamicImpl(mkldnn::stream strm) override { execute(strm); }
|
void executeDynamicImpl(mkldnn::stream strm) override { execute(strm); }
|
||||||
|
|
||||||
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
|
static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
AttrPtr initPrimitiveAttr() const override;
|
AttrPtr initPrimitiveAttr() const override;
|
||||||
@ -39,17 +39,19 @@ protected:
|
|||||||
private:
|
private:
|
||||||
void setPostOps(mkldnn::primitive_attr &attr, bool initWeights = false) const;
|
void setPostOps(mkldnn::primitive_attr &attr, bool initWeights = false) const;
|
||||||
|
|
||||||
std::pair<std::vector<ptrdiff_t>, std::vector<ptrdiff_t>> getPaddingFromNode(std::shared_ptr<ngraph::Node> node) const;
|
std::pair<std::vector<ptrdiff_t>, std::vector<ptrdiff_t>> getPaddingFromNode(std::shared_ptr<ov::Node> node) const;
|
||||||
void initEffectivePad(const Shape &inDims, const Shape &outDims);
|
void initEffectiveAttributes(const Shape &inDims, const Shape &outDims);
|
||||||
mkldnn::algorithm getPoolingAlgorithm() const;
|
mkldnn::algorithm getPoolingAlgorithm() const;
|
||||||
std::shared_ptr<mkldnn::pooling_forward::desc> createDescriptorInternal(const mkldnn::memory::desc& in_candidate,
|
std::shared_ptr<mkldnn::pooling_v2_forward::desc> createDescriptorInternal(const mkldnn::memory::desc& in_candidate,
|
||||||
const mkldnn::memory::desc& out_candidate,
|
const mkldnn::memory::desc& out_candidate,
|
||||||
const mkldnn::algorithm alg) const;
|
const mkldnn::algorithm alg) const;
|
||||||
|
|
||||||
AttrPtr pAttr;
|
AttrPtr pAttr;
|
||||||
|
|
||||||
|
bool isMaxPool8 = false;
|
||||||
bool auto_pad = false;
|
bool auto_pad = false;
|
||||||
bool exclude_pad = false;
|
bool exclude_pad = false;
|
||||||
|
std::vector<ptrdiff_t> dilation;
|
||||||
std::vector<ptrdiff_t> stride;
|
std::vector<ptrdiff_t> stride;
|
||||||
std::vector<ptrdiff_t> kernel;
|
std::vector<ptrdiff_t> kernel;
|
||||||
|
|
||||||
@ -59,15 +61,16 @@ private:
|
|||||||
std::vector<ptrdiff_t> effective_pad_begin;
|
std::vector<ptrdiff_t> effective_pad_begin;
|
||||||
std::vector<ptrdiff_t> effective_pad_end;
|
std::vector<ptrdiff_t> effective_pad_end;
|
||||||
|
|
||||||
|
/// Effective dilation. Used to define correct dilation for OneDNN.
|
||||||
|
/// For OneDNN default dilation is vector of zero
|
||||||
|
std::vector<ptrdiff_t> effective_dilation;
|
||||||
|
|
||||||
/// Effective pad value. Describe how much zero element added to input
|
/// Effective pad value. Describe how much zero element added to input
|
||||||
/// data tensor. May be less than "Effective padding" values.
|
/// data tensor. May be less than "Effective padding" values.
|
||||||
/// If pooling window is out of this padding, the region of averaging
|
/// If pooling window is out of this padding, the region of averaging
|
||||||
/// is decreased.
|
/// is decreased.
|
||||||
std::vector<ptrdiff_t> data_pad_begin;
|
std::vector<ptrdiff_t> data_pad_begin;
|
||||||
std::vector<ptrdiff_t> data_pad_end;
|
std::vector<ptrdiff_t> data_pad_end;
|
||||||
|
|
||||||
InferenceEngine::Precision inputPrecision = InferenceEngine::Precision::FP32;
|
|
||||||
InferenceEngine::Precision outputPrecision = InferenceEngine::Precision::FP32;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace MKLDNNPlugin
|
} // namespace MKLDNNPlugin
|
||||||
|
@ -138,7 +138,7 @@ private:
|
|||||||
using Vmm = typename conditional3<isa == cpu::x64::sse41, Xbyak::Xmm, isa == cpu::x64::avx2,
|
using Vmm = typename conditional3<isa == cpu::x64::sse41, Xbyak::Xmm, isa == cpu::x64::avx2,
|
||||||
Xbyak::Ymm, Xbyak::Zmm>::type;
|
Xbyak::Ymm, Xbyak::Zmm>::type;
|
||||||
size_t vlen = cpu_isa_traits<isa>::vlen;
|
size_t vlen = cpu_isa_traits<isa>::vlen;
|
||||||
bool planar_layout;
|
bool planar_layout = false;
|
||||||
|
|
||||||
Xbyak::Address table_val(int index) { return ptr[reg_table + index * vlen]; }
|
Xbyak::Address table_val(int index) { return ptr[reg_table + index * vlen]; }
|
||||||
|
|
||||||
@ -1136,7 +1136,7 @@ private:
|
|||||||
using Vmm = typename conditional3<isa == cpu::x64::sse41, Xbyak::Xmm, isa == cpu::x64::avx2,
|
using Vmm = typename conditional3<isa == cpu::x64::sse41, Xbyak::Xmm, isa == cpu::x64::avx2,
|
||||||
Xbyak::Ymm, Xbyak::Zmm>::type;
|
Xbyak::Ymm, Xbyak::Zmm>::type;
|
||||||
size_t vlen = cpu_isa_traits<isa>::vlen;
|
size_t vlen = cpu_isa_traits<isa>::vlen;
|
||||||
bool planar_layout;
|
bool planar_layout = false;
|
||||||
|
|
||||||
Xbyak::Reg64 reg_dst = r8;
|
Xbyak::Reg64 reg_dst = r8;
|
||||||
Xbyak::Reg64 reg_work_amount = r9;
|
Xbyak::Reg64 reg_work_amount = r9;
|
||||||
|
@ -367,7 +367,7 @@ void MKLDNNRNN::initSeq(const std::shared_ptr<ngraph::Node>& op) {
|
|||||||
const auto rtInfo = op->get_rt_info();
|
const auto rtInfo = op->get_rt_info();
|
||||||
|
|
||||||
if (rtInfo.count("seqAxis")) {
|
if (rtInfo.count("seqAxis")) {
|
||||||
nativeOrder = std::dynamic_pointer_cast<ngraph::VariantWrapper<int64_t>>(rtInfo.at("seqAxis"))->get() == 0;
|
nativeOrder = std::dynamic_pointer_cast<ov::RuntimeAttributeWrapper<int64_t>>(rtInfo.at("seqAxis"))->get() == 0;
|
||||||
}
|
}
|
||||||
out_data_dims.erase(out_data_dims.begin() + 1);
|
out_data_dims.erase(out_data_dims.begin() + 1);
|
||||||
|
|
||||||
|
@ -114,13 +114,16 @@ void MKLDNNScatterUpdateNode::initSupportedPrimitiveDescriptors() {
|
|||||||
<< "which should be smaller than or equal to input tensor rank";
|
<< "which should be smaller than or equal to input tensor rank";
|
||||||
}
|
}
|
||||||
|
|
||||||
SizeVector expectUpdateShape = {};
|
|
||||||
size_t tupleRank = indicesRank - 1;
|
size_t tupleRank = indicesRank - 1;
|
||||||
|
SizeVector expectUpdateShape(tupleRank + srcRank - k, 0);
|
||||||
|
int updateAxisIter = 0;
|
||||||
for (size_t ri = 0; ri < tupleRank; ri++) {
|
for (size_t ri = 0; ri < tupleRank; ri++) {
|
||||||
expectUpdateShape.push_back(indicesDim[ri]);
|
expectUpdateShape[updateAxisIter] = indicesDim[ri];
|
||||||
|
updateAxisIter++;
|
||||||
}
|
}
|
||||||
for (size_t rd = k; rd < srcRank; rd++) {
|
for (size_t rd = k; rd < srcRank; rd++) {
|
||||||
expectUpdateShape.push_back(srcDataDim[rd]);
|
expectUpdateShape[updateAxisIter] = srcDataDim[rd];
|
||||||
|
updateAxisIter++;
|
||||||
}
|
}
|
||||||
if (expectUpdateShape.size() != updateRank) {
|
if (expectUpdateShape.size() != updateRank) {
|
||||||
IE_THROW() << errorPrefix << " do not have matched tensor rank relationship for input, indices and update";
|
IE_THROW() << errorPrefix << " do not have matched tensor rank relationship for input, indices and update";
|
||||||
@ -315,13 +318,16 @@ void MKLDNNScatterUpdateNode::execute(mkldnn::stream strm) {
|
|||||||
SizeVector updateDim = getParentEdgeAt(UPDATE_ID)->getMemory().getStaticDims();
|
SizeVector updateDim = getParentEdgeAt(UPDATE_ID)->getMemory().getStaticDims();
|
||||||
size_t indicesRank = indicesDim.size();
|
size_t indicesRank = indicesDim.size();
|
||||||
size_t updateRank = updateDim.size();
|
size_t updateRank = updateDim.size();
|
||||||
SizeVector expectUpdateShape = {};
|
SizeVector expectUpdateShape(srcRank + indicesRank - 1, 0);
|
||||||
|
int axisIter = 0;
|
||||||
for (size_t rs = 0; rs < srcRank; rs++) {
|
for (size_t rs = 0; rs < srcRank; rs++) {
|
||||||
if (rs != axis) {
|
if (rs != axis) {
|
||||||
expectUpdateShape.push_back(srcDataDim[rs]);
|
expectUpdateShape[axisIter] = srcDataDim[rs];
|
||||||
|
axisIter++;
|
||||||
} else {
|
} else {
|
||||||
for (size_t ri = 0; ri < indicesRank; ri++) {
|
for (size_t ri = 0; ri < indicesRank; ri++) {
|
||||||
expectUpdateShape.push_back(indicesDim[ri]);
|
expectUpdateShape[axisIter] = indicesDim[ri];
|
||||||
|
axisIter++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -92,7 +92,8 @@ private:
|
|||||||
bool sort_value = false;
|
bool sort_value = false;
|
||||||
bool mode_max = true;
|
bool mode_max = true;
|
||||||
|
|
||||||
int dim, before_num;
|
int dim = 0;
|
||||||
|
int before_num = 0;
|
||||||
|
|
||||||
std::string errorPrefix;
|
std::string errorPrefix;
|
||||||
|
|
||||||
|
@ -13,7 +13,7 @@ namespace MKLDNNPlugin {
|
|||||||
inline std::string getRTInfoValue(const std::map<std::string, ov::Any>& rtInfo, std::string paramName) {
|
inline std::string getRTInfoValue(const std::map<std::string, ov::Any>& rtInfo, std::string paramName) {
|
||||||
auto it = rtInfo.find(paramName);
|
auto it = rtInfo.find(paramName);
|
||||||
if (it != rtInfo.end()) {
|
if (it != rtInfo.end()) {
|
||||||
auto value = std::dynamic_pointer_cast<ngraph::VariantImpl<std::string>>(it->second);
|
auto value = std::dynamic_pointer_cast<ov::RuntimeAttributeImpl<std::string>>(it->second);
|
||||||
return value->get();
|
return value->get();
|
||||||
} else {
|
} else {
|
||||||
return "";
|
return "";
|
||||||
@ -23,10 +23,13 @@ inline std::string getRTInfoValue(const std::map<std::string, ov::Any>& rtInfo,
|
|||||||
inline std::string getPrimitivesPriorityValue(const std::shared_ptr<ngraph::Node> &node) {
|
inline std::string getPrimitivesPriorityValue(const std::shared_ptr<ngraph::Node> &node) {
|
||||||
const auto &rtInfo = node->get_rt_info();
|
const auto &rtInfo = node->get_rt_info();
|
||||||
|
|
||||||
if (!rtInfo.count(ov::PrimitivesPriority::get_type_info_static())) return "";
|
auto it_info = rtInfo.find(ov::PrimitivesPriority::get_type_info_static());
|
||||||
|
|
||||||
const auto &attr = rtInfo.at(ov::PrimitivesPriority::get_type_info_static());
|
if (it_info == rtInfo.end()) {
|
||||||
return ngraph::as_type_ptr<ov::PrimitivesPriority>(attr)->get();
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
return it_info->second.as<ov::PrimitivesPriority>().value;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
|
@ -11,25 +11,25 @@
|
|||||||
using namespace ngraph;
|
using namespace ngraph;
|
||||||
using namespace ov;
|
using namespace ov;
|
||||||
|
|
||||||
MLKDNNInputMemoryFormats::~MLKDNNInputMemoryFormats() = default;
|
MKLDNNInputMemoryFormats::~MKLDNNInputMemoryFormats() = default;
|
||||||
|
|
||||||
std::string ngraph::getMLKDNNInputMemoryFormats(const std::shared_ptr<ngraph::Node>& node) {
|
std::string ngraph::getMKLDNNInputMemoryFormats(const std::shared_ptr<ngraph::Node>& node) {
|
||||||
auto it_info = node->get_rt_info().find(MLKDNNInputMemoryFormatsAttr);
|
auto it_info = node->get_rt_info().find(MKLDNNInputMemoryFormats::get_type_info_static());
|
||||||
if (it_info != node->get_rt_info().end()) {
|
if (it_info != node->get_rt_info().end()) {
|
||||||
if (auto ptr = it_info->second.as<std::shared_ptr<MLKDNNInputMemoryFormats>>()) {
|
if (it_info->second.is<MKLDNNInputMemoryFormats>()) {
|
||||||
return ptr->getMemoryFormats();
|
return it_info->second.as<MKLDNNInputMemoryFormats>().getMemoryFormats();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
MLKDNNOutputMemoryFormats::~MLKDNNOutputMemoryFormats() = default;
|
MKLDNNOutputMemoryFormats::~MKLDNNOutputMemoryFormats() = default;
|
||||||
|
|
||||||
std::string ngraph::getMLKDNNOutputMemoryFormats(const std::shared_ptr<ngraph::Node>& node) {
|
std::string ngraph::getMKLDNNOutputMemoryFormats(const std::shared_ptr<ngraph::Node>& node) {
|
||||||
auto it_info = node->get_rt_info().find(MLKDNNOutputMemoryFormatsAttr);
|
auto it_info = node->get_rt_info().find(MKLDNNOutputMemoryFormats::get_type_info_static());
|
||||||
if (it_info != node->get_rt_info().end()) {
|
if (it_info != node->get_rt_info().end()) {
|
||||||
if (auto ptr = it_info->second.as<std::shared_ptr<MLKDNNOutputMemoryFormats>>()) {
|
if (it_info->second.is<MKLDNNOutputMemoryFormats>()) {
|
||||||
return ptr->getMemoryFormats();
|
return it_info->second.as<MKLDNNOutputMemoryFormats>().getMemoryFormats();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return {};
|
return {};
|
||||||
|
@ -12,33 +12,31 @@
|
|||||||
|
|
||||||
namespace ngraph {
|
namespace ngraph {
|
||||||
|
|
||||||
constexpr const char *MLKDNNInputMemoryFormatsAttr = "MLKDNNInputMemoryFormats";
|
constexpr const char *MKLDNNInputMemoryFormatsAttr = "MKLDNNInputMemoryFormats";
|
||||||
constexpr const char *MLKDNNOutputMemoryFormatsAttr = "MLKDNNOutputMemoryFormats";
|
constexpr const char *MKLDNNOutputMemoryFormatsAttr = "MKLDNNOutputMemoryFormats";
|
||||||
|
|
||||||
template<typename MemoryFormat>
|
template<typename MemoryFormat>
|
||||||
class MLKDNNMemoryFormats : public Variant {
|
class MKLDNNMemoryFormats : public ov::RuntimeAttribute {
|
||||||
protected:
|
protected:
|
||||||
std::string memory_format;
|
std::string memory_format;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
MLKDNNMemoryFormats() = default;
|
MKLDNNMemoryFormats() = default;
|
||||||
explicit MLKDNNMemoryFormats(const std::string &_memory_format) : memory_format(_memory_format) {}
|
explicit MKLDNNMemoryFormats(const std::string &_memory_format) : memory_format(_memory_format) {}
|
||||||
std::string getMemoryFormats() const { return memory_format; }
|
std::string getMemoryFormats() const { return memory_format; }
|
||||||
|
|
||||||
ov::Any merge(const ngraph::NodeVector & nodes) override {
|
ov::Any merge(const ngraph::NodeVector & nodes) const override {
|
||||||
std::set<std::string> unique_mem_format;
|
std::set<std::string> unique_mem_format;
|
||||||
|
|
||||||
for (auto &node : nodes) {
|
for (auto &node : nodes) {
|
||||||
auto it_info = node->get_rt_info().find(MemoryFormat::get_type_info_static().name);
|
auto it_info = node->get_rt_info().find(MemoryFormat::get_type_info_static());
|
||||||
if (it_info != node->get_rt_info().end()) {
|
if (it_info != node->get_rt_info().end()) {
|
||||||
if (auto ptr = it_info->second.template as<std::shared_ptr<MemoryFormat>>()) {
|
std::string mem_format = it_info->second.template as<MemoryFormat>().getMemoryFormats();
|
||||||
std::string mem_format = ptr->getMemoryFormats();
|
|
||||||
if (!mem_format.empty()) {
|
if (!mem_format.empty()) {
|
||||||
unique_mem_format.insert(mem_format);
|
unique_mem_format.insert(mem_format);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if (unique_mem_format.size() > 1) {
|
if (unique_mem_format.size() > 1) {
|
||||||
throw ngraph::ngraph_error(
|
throw ngraph::ngraph_error(
|
||||||
@ -50,28 +48,28 @@ public:
|
|||||||
if (unique_mem_format.size() == 1) {
|
if (unique_mem_format.size() == 1) {
|
||||||
final_mem_format = *unique_mem_format.begin();
|
final_mem_format = *unique_mem_format.begin();
|
||||||
}
|
}
|
||||||
return std::make_shared<MemoryFormat>(final_mem_format);
|
return MemoryFormat{final_mem_format};
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
class MLKDNNInputMemoryFormats : public MLKDNNMemoryFormats<MLKDNNInputMemoryFormats> {
|
class MKLDNNInputMemoryFormats : public MKLDNNMemoryFormats<MKLDNNInputMemoryFormats> {
|
||||||
public:
|
public:
|
||||||
OPENVINO_RTTI(MLKDNNInputMemoryFormatsAttr);
|
OPENVINO_RTTI(MKLDNNInputMemoryFormatsAttr);
|
||||||
MLKDNNInputMemoryFormats() = default;
|
MKLDNNInputMemoryFormats() = default;
|
||||||
explicit MLKDNNInputMemoryFormats(const std::string &_memory_format) : MLKDNNMemoryFormats(_memory_format) {}
|
explicit MKLDNNInputMemoryFormats(const std::string &_memory_format) : MKLDNNMemoryFormats(_memory_format) {}
|
||||||
~MLKDNNInputMemoryFormats() override;
|
~MKLDNNInputMemoryFormats() override;
|
||||||
};
|
};
|
||||||
|
|
||||||
std::string getMLKDNNInputMemoryFormats(const std::shared_ptr<ngraph::Node>& node);
|
std::string getMKLDNNInputMemoryFormats(const std::shared_ptr<ngraph::Node>& node);
|
||||||
|
|
||||||
class MLKDNNOutputMemoryFormats : public MLKDNNMemoryFormats<MLKDNNOutputMemoryFormats> {
|
class MKLDNNOutputMemoryFormats : public MKLDNNMemoryFormats<MKLDNNOutputMemoryFormats> {
|
||||||
public:
|
public:
|
||||||
OPENVINO_RTTI(MLKDNNOutputMemoryFormatsAttr);
|
OPENVINO_RTTI(MKLDNNOutputMemoryFormatsAttr);
|
||||||
MLKDNNOutputMemoryFormats() = default;
|
MKLDNNOutputMemoryFormats() = default;
|
||||||
explicit MLKDNNOutputMemoryFormats(const std::string &_memory_format) : MLKDNNMemoryFormats(_memory_format) {}
|
explicit MKLDNNOutputMemoryFormats(const std::string &_memory_format) : MKLDNNMemoryFormats(_memory_format) {}
|
||||||
~MLKDNNOutputMemoryFormats() override;
|
~MKLDNNOutputMemoryFormats() override;
|
||||||
};
|
};
|
||||||
std::string getMLKDNNOutputMemoryFormats(const std::shared_ptr<ngraph::Node>& node);
|
std::string getMKLDNNOutputMemoryFormats(const std::shared_ptr<ngraph::Node>& node);
|
||||||
|
|
||||||
} // namespace ngraph
|
} // namespace ngraph
|
||||||
|
@ -16,7 +16,6 @@
|
|||||||
#include <set>
|
#include <set>
|
||||||
|
|
||||||
#include <ngraph/node.hpp>
|
#include <ngraph/node.hpp>
|
||||||
#include <ngraph/variant.hpp>
|
|
||||||
|
|
||||||
namespace ngraph {
|
namespace ngraph {
|
||||||
|
|
||||||
@ -28,6 +27,11 @@ namespace ngraph {
|
|||||||
class Mask : public std::vector<std::set<uint64_t>>,
|
class Mask : public std::vector<std::set<uint64_t>>,
|
||||||
public std::enable_shared_from_this<Mask> {
|
public std::enable_shared_from_this<Mask> {
|
||||||
public:
|
public:
|
||||||
|
static const ::ov::DiscreteTypeInfo& get_type_info_static() {
|
||||||
|
static const ::ov::DiscreteTypeInfo type_info{"Mask", 0, "0"};
|
||||||
|
return type_info;
|
||||||
|
}
|
||||||
|
|
||||||
using Ptr = std::shared_ptr<Mask>;
|
using Ptr = std::shared_ptr<Mask>;
|
||||||
|
|
||||||
Mask() = default;
|
Mask() = default;
|
||||||
@ -180,6 +184,7 @@ public:
|
|||||||
item.clear();
|
item.clear();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
bool m_is_shape_like{false};
|
bool m_is_shape_like{false};
|
||||||
|
|
||||||
@ -199,22 +204,3 @@ Mask::Ptr getMask(const Output<Node> & output);
|
|||||||
void setMask(Output<Node> output, const Mask::Ptr & mask);
|
void setMask(Output<Node> output, const Mask::Ptr & mask);
|
||||||
|
|
||||||
} // namespace ngraph
|
} // namespace ngraph
|
||||||
|
|
||||||
namespace ov {
|
|
||||||
|
|
||||||
extern template class VariantImpl<ngraph::Mask::Ptr>;
|
|
||||||
|
|
||||||
template<>
|
|
||||||
class VariantWrapper<ngraph::Mask::Ptr> : public VariantImpl<ngraph::Mask::Ptr> {
|
|
||||||
public:
|
|
||||||
OPENVINO_RTTI("VariantWrapper<Mask::Ptr>");
|
|
||||||
BWDCMP_RTTI_DECLARATION;
|
|
||||||
|
|
||||||
static std::shared_ptr<VariantWrapper<ngraph::Mask::Ptr>> create(const value_type & value) {
|
|
||||||
return std::make_shared<VariantWrapper<ngraph::Mask::Ptr>>(value);
|
|
||||||
}
|
|
||||||
|
|
||||||
explicit VariantWrapper(const value_type &value) : VariantImpl<value_type>(value) {}
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace ov
|
|
||||||
|
@ -14,28 +14,22 @@ namespace ngraph {
|
|||||||
|
|
||||||
Mask::Ptr getMask(const Output<const Node> & output) {
|
Mask::Ptr getMask(const Output<const Node> & output) {
|
||||||
auto &rtInfo = output.get_rt_info();
|
auto &rtInfo = output.get_rt_info();
|
||||||
using MaskWrapper = VariantWrapper<Mask::Ptr>;
|
if (!rtInfo.count(Mask::get_type_info_static())) return nullptr;
|
||||||
|
|
||||||
if (!rtInfo.count(MaskWrapper::get_type_info_static().name)) return nullptr;
|
const auto &attr = rtInfo.at(Mask::get_type_info_static());
|
||||||
|
return attr.as<Mask::Ptr>();
|
||||||
const auto &attr = rtInfo.at(MaskWrapper::get_type_info_static().name);
|
|
||||||
return ov::as_type_ptr<MaskWrapper>(attr)->get();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Mask::Ptr getMask(const Output<Node> & output) {
|
Mask::Ptr getMask(const Output<Node> & output) {
|
||||||
auto &rtInfo = output.get_rt_info();
|
auto &rtInfo = output.get_rt_info();
|
||||||
using MaskWrapper = VariantWrapper<Mask::Ptr>;
|
if (!rtInfo.count(Mask::get_type_info_static())) return nullptr;
|
||||||
|
const auto &attr = rtInfo.at(Mask::get_type_info_static());
|
||||||
if (!rtInfo.count(MaskWrapper::get_type_info_static().name)) return nullptr;
|
return attr.as<Mask::Ptr>();
|
||||||
|
|
||||||
const auto &attr = rtInfo.at(MaskWrapper::get_type_info_static().name);
|
|
||||||
return ov::as_type_ptr<MaskWrapper>(attr)->get();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void setMask(Output<Node> output, const Mask::Ptr & mask) {
|
void setMask(Output<Node> output, const Mask::Ptr & mask) {
|
||||||
auto &rtInfo = output.get_rt_info();
|
auto &rtInfo = output.get_rt_info();
|
||||||
using MaskWrapper = VariantWrapper<Mask::Ptr>;
|
rtInfo[Mask::get_type_info_static()] = mask;
|
||||||
rtInfo[MaskWrapper::get_type_info_static().name] = MaskWrapper::create(mask);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::ostream & operator<< (std::ostream & out, const Mask & mask) {
|
std::ostream & operator<< (std::ostream & out, const Mask & mask) {
|
||||||
@ -54,11 +48,3 @@ std::ostream & operator<< (std::ostream & out, const Mask & mask) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
} // namespace ngraph
|
} // namespace ngraph
|
||||||
|
|
||||||
namespace ov {
|
|
||||||
|
|
||||||
template class ngraph::VariantImpl<ngraph::Mask::Ptr>;
|
|
||||||
|
|
||||||
BWDCMP_RTTI_DEFINITION(VariantWrapper<ngraph::Mask::Ptr>);
|
|
||||||
|
|
||||||
} // namespace ov
|
|
||||||
|
@ -40,7 +40,7 @@ protected:
|
|||||||
ov::frontend::FrontEnd::Ptr FE;
|
ov::frontend::FrontEnd::Ptr FE;
|
||||||
ov::frontend::InputModel::Ptr inputModel;
|
ov::frontend::InputModel::Ptr inputModel;
|
||||||
|
|
||||||
ov::VariantVector params{ov::make_variant(&modelStream)};
|
ov::RuntimeAttributeVector params{&modelStream};
|
||||||
|
|
||||||
FE = manager.load_by_model(params);
|
FE = manager.load_by_model(params);
|
||||||
if (FE)
|
if (FE)
|
||||||
@ -119,7 +119,7 @@ TEST_F(RTInfoDeserialization, NodeV10) {
|
|||||||
ASSERT_NE(nullptr, f);
|
ASSERT_NE(nullptr, f);
|
||||||
|
|
||||||
auto check_rt_info = [](const RTMap& info) {
|
auto check_rt_info = [](const RTMap& info) {
|
||||||
const std::string& key = VariantWrapper<ngraph::FusedNames>::get_type_info_static();
|
const std::string& key = ngraph::FusedNames::get_type_info_static();
|
||||||
EXPECT_FALSE(info.count(key));
|
EXPECT_FALSE(info.count(key));
|
||||||
|
|
||||||
const std::string& key_old_api_order = ov::OldApiMapOrder::get_type_info_static();
|
const std::string& key_old_api_order = ov::OldApiMapOrder::get_type_info_static();
|
||||||
@ -278,7 +278,7 @@ TEST_F(RTInfoDeserialization, InputAndOutputV10) {
|
|||||||
ASSERT_NE(nullptr, f);
|
ASSERT_NE(nullptr, f);
|
||||||
|
|
||||||
auto check_rt_info = [](const RTMap& info) {
|
auto check_rt_info = [](const RTMap& info) {
|
||||||
const std::string& key = VariantWrapper<ngraph::FusedNames>::get_type_info_static();
|
const std::string& key = ngraph::FusedNames::get_type_info_static();
|
||||||
ASSERT_FALSE(info.count(key));
|
ASSERT_FALSE(info.count(key));
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -421,27 +421,22 @@ TEST_F(RTInfoDeserialization, NodeV11) {
|
|||||||
ASSERT_NE(nullptr, f);
|
ASSERT_NE(nullptr, f);
|
||||||
|
|
||||||
auto check_fused_names = [](const RTMap& info, const std::string& names) {
|
auto check_fused_names = [](const RTMap& info, const std::string& names) {
|
||||||
const std::string& key = VariantWrapper<ngraph::FusedNames>::get_type_info_static();
|
const std::string& key = ngraph::FusedNames::get_type_info_static();
|
||||||
ASSERT_TRUE(info.count(key));
|
ASSERT_TRUE(info.count(key));
|
||||||
auto fused_names_attr = std::dynamic_pointer_cast<VariantWrapper<ngraph::FusedNames>>(info.at(key));
|
auto fused_names_attr = info.at(key).as<ngraph::FusedNames>();
|
||||||
ASSERT_TRUE(fused_names_attr);
|
EXPECT_EQ(fused_names_attr.getNames(), names);
|
||||||
EXPECT_EQ(fused_names_attr->get().getNames(), names);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
auto check_old_api_map_order = [](const RTMap & info, const std::vector<uint64_t> & order) {
|
auto check_old_api_map_order = [](const RTMap & info, const std::vector<uint64_t> & order) {
|
||||||
const std::string & old_api_map_key = ov::OldApiMapOrder::get_type_info_static();
|
const std::string & old_api_map_key = ov::OldApiMapOrder::get_type_info_static();
|
||||||
ASSERT_TRUE(info.count(old_api_map_key));
|
ASSERT_TRUE(info.count(old_api_map_key));
|
||||||
auto old_api_map_attr = std::dynamic_pointer_cast<ov::OldApiMapOrder>(info.at(old_api_map_key));
|
auto old_api_map_attr_val = info.at(old_api_map_key).as<ov::OldApiMapOrder>().value;
|
||||||
ASSERT_TRUE(old_api_map_attr);
|
|
||||||
auto old_api_map_attr_val = old_api_map_attr->get();
|
|
||||||
EXPECT_EQ(old_api_map_attr_val, order);
|
EXPECT_EQ(old_api_map_attr_val, order);
|
||||||
};
|
};
|
||||||
auto check_old_api_map_type = [](const RTMap & info, const ngraph::element::Type& type) {
|
auto check_old_api_map_type = [](const RTMap & info, const ngraph::element::Type& type) {
|
||||||
const std::string & old_api_map_key = ov::OldApiMapElementType::get_type_info_static();
|
const std::string & old_api_map_key = ov::OldApiMapElementType::get_type_info_static();
|
||||||
ASSERT_TRUE(info.count(old_api_map_key));
|
ASSERT_TRUE(info.count(old_api_map_key));
|
||||||
auto old_api_map_attr = std::dynamic_pointer_cast<ov::OldApiMapElementType>(info.at(old_api_map_key));
|
auto old_api_map_attr_val = info.at(old_api_map_key).as<ov::OldApiMapElementType>().value;
|
||||||
ASSERT_TRUE(old_api_map_attr);
|
|
||||||
auto old_api_map_attr_val = old_api_map_attr->get();
|
|
||||||
EXPECT_EQ(old_api_map_attr_val, type);
|
EXPECT_EQ(old_api_map_attr_val, type);
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -501,8 +496,7 @@ TEST_F(RTInfoDeserialization, NodeV11) {
|
|||||||
auto round = std::make_shared<opset8::Round>(convert_param,
|
auto round = std::make_shared<opset8::Round>(convert_param,
|
||||||
ngraph::opset8::Round::RoundMode::HALF_TO_EVEN);
|
ngraph::opset8::Round::RoundMode::HALF_TO_EVEN);
|
||||||
// TODO: runtime information should migrate as well?
|
// TODO: runtime information should migrate as well?
|
||||||
round->get_rt_info()[VariantWrapper<ngraph::FusedNames>::get_type_info_static()] =
|
round->get_rt_info()[ngraph::FusedNames::get_type_info_static()] = ngraph::FusedNames("Round1,Round2");
|
||||||
std::make_shared<VariantWrapper<ngraph::FusedNames>>(ngraph::FusedNames("Round1,Round2"));
|
|
||||||
|
|
||||||
// TODO: No guarantee that exactly 'convert, then transpose' will be added by implicit post-processing
|
// TODO: No guarantee that exactly 'convert, then transpose' will be added by implicit post-processing
|
||||||
auto constant_result = std::make_shared<opset8::Constant>(ngraph::element::u64,
|
auto constant_result = std::make_shared<opset8::Constant>(ngraph::element::u64,
|
||||||
@ -722,20 +716,20 @@ TEST_F(RTInfoDeserialization, InputAndOutputV11) {
|
|||||||
check_version(f, 11);
|
check_version(f, 11);
|
||||||
|
|
||||||
auto check_fused_names = [](const RTMap& info, const std::string& names) {
|
auto check_fused_names = [](const RTMap& info, const std::string& names) {
|
||||||
const std::string& key = VariantWrapper<ngraph::FusedNames>::get_type_info_static();
|
const std::string& key = ngraph::FusedNames::get_type_info_static();
|
||||||
ASSERT_TRUE(info.count(key));
|
ASSERT_TRUE(info.count(key));
|
||||||
auto fused_names_attr = std::dynamic_pointer_cast<VariantWrapper<ngraph::FusedNames>>(info.at(key));
|
auto fused_names_attr = info.at(key).as<ngraph::FusedNames>();
|
||||||
ASSERT_TRUE(fused_names_attr);
|
ASSERT_EQ(fused_names_attr.getNames(), names);
|
||||||
ASSERT_EQ(fused_names_attr->get().getNames(), names);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
auto param = f->get_parameters()[0];
|
auto param = f->get_parameters()[0];
|
||||||
check_fused_names(param->output(0).get_rt_info(), "test1,test2");
|
check_fused_names(param->output(0).get_rt_info(), "test1,test2");
|
||||||
EXPECT_EQ(param->get_layout(), "NCHW");
|
EXPECT_EQ(param->get_layout(), "NCHW");
|
||||||
auto var0 = std::dynamic_pointer_cast<ov::preprocess::TensorInfoMemoryType>(
|
auto var0 = f->input(0).get_rt_info()
|
||||||
f->input(0).get_rt_info()[ov::preprocess::TensorInfoMemoryType::get_type_info_static()]);
|
.at(ov::preprocess::TensorInfoMemoryType::get_type_info_static())
|
||||||
EXPECT_EQ(var0->get(), "test_memory_type");
|
.as<ov::preprocess::TensorInfoMemoryType>().value;
|
||||||
|
EXPECT_EQ(var0, "test_memory_type");
|
||||||
|
|
||||||
auto result = f->get_result();
|
auto result = f->get_result();
|
||||||
check_fused_names(result->input(0).get_rt_info(), "test5,test6");
|
check_fused_names(result->input(0).get_rt_info(), "test5,test6");
|
||||||
|
@ -34,7 +34,7 @@ protected:
|
|||||||
ov::frontend::FrontEnd::Ptr FE;
|
ov::frontend::FrontEnd::Ptr FE;
|
||||||
ov::frontend::InputModel::Ptr inputModel;
|
ov::frontend::InputModel::Ptr inputModel;
|
||||||
|
|
||||||
ov::VariantVector params{ov::make_variant(model_path), ov::make_variant(weights_path)};
|
ov::RuntimeAttributeVector params{model_path, weights_path};
|
||||||
|
|
||||||
FE = manager.load_by_model(params);
|
FE = manager.load_by_model(params);
|
||||||
if (FE)
|
if (FE)
|
||||||
@ -52,15 +52,11 @@ private:
|
|||||||
|
|
||||||
TEST_F(RTInfoSerializationTest, all_attributes_latest) {
|
TEST_F(RTInfoSerializationTest, all_attributes_latest) {
|
||||||
auto init_info = [](RTMap & info) {
|
auto init_info = [](RTMap & info) {
|
||||||
info[VariantWrapper<ngraph::FusedNames>::get_type_info_static()] =
|
info[ngraph::FusedNames::get_type_info_static()] = ngraph::FusedNames("add");
|
||||||
std::make_shared<VariantWrapper<ngraph::FusedNames>>(ngraph::FusedNames("add"));
|
info[ov::PrimitivesPriority::get_type_info_static()] = ov::PrimitivesPriority("priority");
|
||||||
info[ov::PrimitivesPriority::get_type_info_static()] =
|
info[ov::OldApiMapOrder::get_type_info_static()] = ov::OldApiMapOrder(std::vector<uint64_t>{0, 2, 3, 1});
|
||||||
std::make_shared<ov::PrimitivesPriority>("priority");
|
info[ov::OldApiMapElementType::get_type_info_static()] = ov::OldApiMapElementType(ngraph::element::Type_t::f32);
|
||||||
info[ov::OldApiMapOrder::get_type_info_static()] =
|
info[ov::Decompression::get_type_info_static()] = ov::Decompression{};
|
||||||
std::make_shared<ov::OldApiMapOrder>(std::vector<uint64_t>{0, 2, 3, 1});
|
|
||||||
info[ov::OldApiMapElementType::get_type_info_static()] = std::make_shared<ov::OldApiMapElementType>(
|
|
||||||
ngraph::element::Type_t::f32);
|
|
||||||
info[ov::Decompression::get_type_info_static()] = std::make_shared<ov::Decompression>();
|
|
||||||
};
|
};
|
||||||
|
|
||||||
std::shared_ptr<ngraph::Function> function;
|
std::shared_ptr<ngraph::Function> function;
|
||||||
@ -85,36 +81,29 @@ TEST_F(RTInfoSerializationTest, all_attributes_latest) {
|
|||||||
ASSERT_NE(nullptr, f);
|
ASSERT_NE(nullptr, f);
|
||||||
|
|
||||||
auto check_info = [](const RTMap & info) {
|
auto check_info = [](const RTMap & info) {
|
||||||
const std::string & key = VariantWrapper<ngraph::FusedNames>::get_type_info_static();
|
const std::string & key = ngraph::FusedNames::get_type_info_static();
|
||||||
ASSERT_TRUE(info.count(key));
|
ASSERT_TRUE(info.count(key));
|
||||||
auto fused_names_attr = std::dynamic_pointer_cast<VariantWrapper<ngraph::FusedNames>>(info.at(key));
|
auto fused_names_attr = info.at(key).as<ngraph::FusedNames>();
|
||||||
ASSERT_TRUE(fused_names_attr);
|
ASSERT_EQ(fused_names_attr.getNames(), "add");
|
||||||
ASSERT_EQ(fused_names_attr->get().getNames(), "add");
|
|
||||||
|
|
||||||
const std::string & pkey = ov::PrimitivesPriority::get_type_info_static();
|
const std::string & pkey = ov::PrimitivesPriority::get_type_info_static();
|
||||||
ASSERT_TRUE(info.count(pkey));
|
ASSERT_TRUE(info.count(pkey));
|
||||||
auto primitives_priority_attr = std::dynamic_pointer_cast<ov::PrimitivesPriority>(info.at(pkey));
|
auto primitives_priority_attr = info.at(pkey).as<ov::PrimitivesPriority>().value;
|
||||||
ASSERT_TRUE(primitives_priority_attr);
|
ASSERT_EQ(primitives_priority_attr, "priority");
|
||||||
ASSERT_EQ(primitives_priority_attr->get(), "priority");
|
|
||||||
|
|
||||||
const std::string & old_api_map_key_order = ov::OldApiMapOrder::get_type_info_static();
|
const std::string & old_api_map_key_order = ov::OldApiMapOrder::get_type_info_static();
|
||||||
ASSERT_TRUE(info.count(old_api_map_key_order));
|
ASSERT_TRUE(info.count(old_api_map_key_order));
|
||||||
auto old_api_map_attr = std::dynamic_pointer_cast<ov::OldApiMapOrder>(info.at(old_api_map_key_order));
|
auto old_api_map_attr_val = info.at(old_api_map_key_order).as<ov::OldApiMapOrder>().value;
|
||||||
ASSERT_TRUE(old_api_map_attr);
|
|
||||||
auto old_api_map_attr_val = old_api_map_attr->get();
|
|
||||||
ASSERT_EQ(old_api_map_attr_val, std::vector<uint64_t>({0, 2, 3, 1}));
|
ASSERT_EQ(old_api_map_attr_val, std::vector<uint64_t>({0, 2, 3, 1}));
|
||||||
|
|
||||||
const std::string & old_api_map_key = ov::OldApiMapElementType::get_type_info_static();
|
const std::string & old_api_map_key = ov::OldApiMapElementType::get_type_info_static();
|
||||||
ASSERT_TRUE(info.count(old_api_map_key));
|
ASSERT_TRUE(info.count(old_api_map_key));
|
||||||
auto old_api_map_type = std::dynamic_pointer_cast<ov::OldApiMapElementType>(info.at(old_api_map_key));
|
auto old_api_map_type_val = info.at(old_api_map_key).as<ov::OldApiMapElementType>().value;
|
||||||
ASSERT_TRUE(old_api_map_type);
|
|
||||||
auto old_api_map_type_val = old_api_map_type->get();
|
|
||||||
ASSERT_EQ(old_api_map_type_val, ngraph::element::Type_t::f32);
|
ASSERT_EQ(old_api_map_type_val, ngraph::element::Type_t::f32);
|
||||||
|
|
||||||
const std::string& dkey = ov::Decompression::get_type_info_static();
|
const std::string& dkey = ov::Decompression::get_type_info_static();
|
||||||
ASSERT_TRUE(info.count(dkey));
|
ASSERT_TRUE(info.count(dkey));
|
||||||
auto decompression_attr = std::dynamic_pointer_cast<ov::Decompression>(info.at(dkey));
|
ASSERT_NO_THROW(info.at(dkey).as<ov::Decompression>());
|
||||||
ASSERT_TRUE(decompression_attr);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
auto add = f->get_results()[0]->get_input_node_ptr(0);
|
auto add = f->get_results()[0]->get_input_node_ptr(0);
|
||||||
@ -128,10 +117,8 @@ TEST_F(RTInfoSerializationTest, all_attributes_latest) {
|
|||||||
|
|
||||||
TEST_F(RTInfoSerializationTest, all_attributes_v10) {
|
TEST_F(RTInfoSerializationTest, all_attributes_v10) {
|
||||||
auto init_info = [](RTMap & info) {
|
auto init_info = [](RTMap & info) {
|
||||||
info[VariantWrapper<ngraph::FusedNames>::get_type_info_static()] =
|
info[ngraph::FusedNames::get_type_info_static()] = ngraph::FusedNames("add");
|
||||||
std::make_shared<VariantWrapper<ngraph::FusedNames>>(ngraph::FusedNames("add"));
|
info["PrimitivesPriority"] = ov::PrimitivesPriority("priority");
|
||||||
info[ov::PrimitivesPriority::get_type_info_static()] =
|
|
||||||
std::make_shared<ov::PrimitivesPriority>("priority");
|
|
||||||
};
|
};
|
||||||
|
|
||||||
std::shared_ptr<ngraph::Function> function;
|
std::shared_ptr<ngraph::Function> function;
|
||||||
@ -154,7 +141,7 @@ TEST_F(RTInfoSerializationTest, all_attributes_v10) {
|
|||||||
ASSERT_NE(nullptr, f);
|
ASSERT_NE(nullptr, f);
|
||||||
|
|
||||||
auto check_info = [](const RTMap & info) {
|
auto check_info = [](const RTMap & info) {
|
||||||
const std::string & key = VariantWrapper<ngraph::FusedNames>::get_type_info_static();
|
const std::string & key = ngraph::FusedNames::get_type_info_static();
|
||||||
ASSERT_FALSE(info.count(key));
|
ASSERT_FALSE(info.count(key));
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -168,10 +155,8 @@ TEST_F(RTInfoSerializationTest, all_attributes_v10) {
|
|||||||
|
|
||||||
TEST_F(RTInfoSerializationTest, all_attributes_v11) {
|
TEST_F(RTInfoSerializationTest, all_attributes_v11) {
|
||||||
auto init_info = [](RTMap & info) {
|
auto init_info = [](RTMap & info) {
|
||||||
info[VariantWrapper<ngraph::FusedNames>::get_type_info_static()] =
|
info[ngraph::FusedNames::get_type_info_static()] = ngraph::FusedNames("add");
|
||||||
std::make_shared<VariantWrapper<ngraph::FusedNames>>(ngraph::FusedNames("add"));
|
info[ov::PrimitivesPriority::get_type_info_static()] = ov::PrimitivesPriority("priority");
|
||||||
info[ov::PrimitivesPriority::get_type_info_static()] =
|
|
||||||
std::make_shared<ov::PrimitivesPriority>("priority");
|
|
||||||
};
|
};
|
||||||
|
|
||||||
std::shared_ptr<ngraph::Function> function;
|
std::shared_ptr<ngraph::Function> function;
|
||||||
@ -199,24 +184,23 @@ TEST_F(RTInfoSerializationTest, all_attributes_v11) {
|
|||||||
ASSERT_NE(nullptr, f);
|
ASSERT_NE(nullptr, f);
|
||||||
|
|
||||||
auto check_info = [](const RTMap & info) {
|
auto check_info = [](const RTMap & info) {
|
||||||
const std::string & key = VariantWrapper<ngraph::FusedNames>::get_type_info_static();
|
const std::string & key = ngraph::FusedNames::get_type_info_static();
|
||||||
ASSERT_TRUE(info.count(key));
|
ASSERT_TRUE(info.count(key));
|
||||||
auto fused_names_attr = std::dynamic_pointer_cast<VariantWrapper<ngraph::FusedNames>>(info.at(key));
|
auto fused_names_attr = info.at(key).as<ngraph::FusedNames>();
|
||||||
ASSERT_TRUE(fused_names_attr);
|
ASSERT_EQ(fused_names_attr.getNames(), "add");
|
||||||
ASSERT_EQ(fused_names_attr->get().getNames(), "add");
|
|
||||||
|
|
||||||
const std::string & pkey = ov::PrimitivesPriority::get_type_info_static();
|
const std::string & pkey = ov::PrimitivesPriority::get_type_info_static();
|
||||||
ASSERT_TRUE(info.count(pkey));
|
ASSERT_TRUE(info.count(pkey));
|
||||||
auto primitives_priority_attr = std::dynamic_pointer_cast<ov::PrimitivesPriority>(info.at(pkey));
|
auto primitives_priority_attr = info.at(pkey).as<ov::PrimitivesPriority>().value;
|
||||||
ASSERT_TRUE(primitives_priority_attr);
|
ASSERT_EQ(primitives_priority_attr, "priority");
|
||||||
ASSERT_EQ(primitives_priority_attr->get(), "priority");
|
|
||||||
};
|
};
|
||||||
|
|
||||||
auto add = f->get_results()[0]->get_input_node_ptr(0);
|
auto add = f->get_results()[0]->get_input_node_ptr(0);
|
||||||
EXPECT_EQ(f->get_parameters()[0]->get_layout(), "NCHW");
|
EXPECT_EQ(f->get_parameters()[0]->get_layout(), "NCHW");
|
||||||
auto var0 = std::dynamic_pointer_cast<ov::preprocess::TensorInfoMemoryType>(
|
auto var0 = f->input(0).get_rt_info()
|
||||||
f->input(0).get_rt_info()[ov::preprocess::TensorInfoMemoryType::get_type_info_static()]);
|
.at(ov::preprocess::TensorInfoMemoryType::get_type_info_static())
|
||||||
EXPECT_EQ(var0->get(), "test_memory_type");
|
.as<ov::preprocess::TensorInfoMemoryType>().value;
|
||||||
|
EXPECT_EQ(var0, "test_memory_type");
|
||||||
EXPECT_EQ(f->get_results()[0]->get_layout(), "????");
|
EXPECT_EQ(f->get_results()[0]->get_layout(), "????");
|
||||||
check_info(add->get_rt_info());
|
check_info(add->get_rt_info());
|
||||||
check_info(add->input(0).get_rt_info());
|
check_info(add->input(0).get_rt_info());
|
||||||
|
@ -199,9 +199,9 @@ public:
|
|||||||
testValues.result.convert2,
|
testValues.result.convert2,
|
||||||
testValues.result.dequantization2,
|
testValues.result.dequantization2,
|
||||||
{
|
{
|
||||||
make_shared_attribute_ptr<PrecisionPreservedAttribute>(true),
|
PrecisionPreservedAttribute(true),
|
||||||
make_shared_attribute_ptr<IntervalsAlignmentAttribute>(interval, 256),
|
IntervalsAlignmentAttribute(interval, 256),
|
||||||
make_shared_attribute_ptr<QuantizationAlignmentAttribute>(false)
|
QuantizationAlignmentAttribute(false)
|
||||||
},
|
},
|
||||||
testValues.result.precisionAfterOperation,
|
testValues.result.precisionAfterOperation,
|
||||||
testValues.result.dequantizationAfter,
|
testValues.result.dequantizationAfter,
|
||||||
@ -235,13 +235,13 @@ TEST_P(ConcatTransformation, CompareFunctions) {
|
|||||||
ConcatTransformationTestValues testValues = std::get<2>(GetParam());
|
ConcatTransformationTestValues testValues = std::get<2>(GetParam());
|
||||||
const auto actualFakeQuantizes = LayerTransformation::get<opset1::FakeQuantize>(actualFunction);
|
const auto actualFakeQuantizes = LayerTransformation::get<opset1::FakeQuantize>(actualFunction);
|
||||||
if (testValues.axis == 1) {
|
if (testValues.axis == 1) {
|
||||||
ASSERT_TRUE(checkIfOutputAttributesSharedValuesAreTheSame<std::shared_ptr<PrecisionsAttribute>>(actualFakeQuantizes)) <<
|
ASSERT_TRUE(checkIfOutputAttributesSharedValuesAreTheSame<PrecisionsAttribute>(actualFakeQuantizes)) <<
|
||||||
"PrecisionsAttribute are not the same";
|
"PrecisionsAttribute are not the same";
|
||||||
|
|
||||||
if (testValues.checkIntervalsAlignmentAttributes) {
|
if (testValues.checkIntervalsAlignmentAttributes) {
|
||||||
auto operations = LayerTransformation::get<opset1::Concat>(actualFunction);
|
auto operations = LayerTransformation::get<opset1::Concat>(actualFunction);
|
||||||
operations.insert(operations.end(), actualFakeQuantizes.begin(), actualFakeQuantizes.end());
|
operations.insert(operations.end(), actualFakeQuantizes.begin(), actualFakeQuantizes.end());
|
||||||
ASSERT_TRUE(checkIfAttributesSharedValuesAreTheSame<std::shared_ptr<IntervalsAlignmentAttribute>>(operations)) <<
|
ASSERT_TRUE(checkIfAttributesSharedValuesAreTheSame<IntervalsAlignmentAttribute>(operations)) <<
|
||||||
"IntervalsAlignmentAttribute are not the same";
|
"IntervalsAlignmentAttribute are not the same";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -275,13 +275,13 @@ const std::vector<ConcatTransformationTestValues> testValues = {
|
|||||||
{
|
{
|
||||||
{
|
{
|
||||||
256ul, {}, {0.f}, {2550.f}, {0.f}, {255.f}, ngraph::element::u8,
|
256ul, {}, {0.f}, {2550.f}, {0.f}, {255.f}, ngraph::element::u8,
|
||||||
{ make_shared_attribute_ptr<IntervalsAlignmentAttribute>(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
{ IntervalsAlignmentAttribute(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
||||||
},
|
},
|
||||||
{},
|
{},
|
||||||
{},
|
{},
|
||||||
{
|
{
|
||||||
256ul, {}, {0.f}, {0.1f}, {0.f}, {255.f}, ngraph::element::u8,
|
256ul, {}, {0.f}, {0.1f}, {0.f}, {255.f}, ngraph::element::u8,
|
||||||
{ make_shared_attribute_ptr<IntervalsAlignmentAttribute>(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
{ IntervalsAlignmentAttribute(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
||||||
},
|
},
|
||||||
{},
|
{},
|
||||||
{},
|
{},
|
||||||
@ -304,7 +304,7 @@ const std::vector<ConcatTransformationTestValues> testValues = {
|
|||||||
{
|
{
|
||||||
{
|
{
|
||||||
256ul, {}, {0.f}, {2.55f}, {0.f}, {2.55f}, ngraph::element::f32,
|
256ul, {}, {0.f}, {2.55f}, {0.f}, {2.55f}, ngraph::element::f32,
|
||||||
{ make_shared_attribute_ptr<IntervalsAlignmentAttribute>(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
{ IntervalsAlignmentAttribute(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
||||||
},
|
},
|
||||||
{},
|
{},
|
||||||
{},
|
{},
|
||||||
@ -331,7 +331,7 @@ const std::vector<ConcatTransformationTestValues> testValues = {
|
|||||||
{},
|
{},
|
||||||
{
|
{
|
||||||
256ul, {}, {0.f}, {2.55f}, {0.f}, {2.55f}, ngraph::element::f32,
|
256ul, {}, {0.f}, {2.55f}, {0.f}, {2.55f}, ngraph::element::f32,
|
||||||
{ make_shared_attribute_ptr<IntervalsAlignmentAttribute>(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
{ IntervalsAlignmentAttribute(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
||||||
},
|
},
|
||||||
{},
|
{},
|
||||||
{},
|
{},
|
||||||
@ -352,13 +352,13 @@ const std::vector<ConcatTransformationTestValues> testValues = {
|
|||||||
{
|
{
|
||||||
{
|
{
|
||||||
256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
||||||
{ make_shared_attribute_ptr<IntervalsAlignmentAttribute>(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
{ IntervalsAlignmentAttribute(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
||||||
},
|
},
|
||||||
{},
|
{},
|
||||||
{},
|
{},
|
||||||
{
|
{
|
||||||
256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
||||||
{ make_shared_attribute_ptr<IntervalsAlignmentAttribute>(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
{ IntervalsAlignmentAttribute(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
||||||
},
|
},
|
||||||
{},
|
{},
|
||||||
{},
|
{},
|
||||||
@ -390,13 +390,13 @@ const std::vector<ConcatTransformationTestValues> testValues = {
|
|||||||
{
|
{
|
||||||
{
|
{
|
||||||
256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
||||||
{ make_shared_attribute_ptr<IntervalsAlignmentAttribute>(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
{ IntervalsAlignmentAttribute(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
||||||
},
|
},
|
||||||
{},
|
{},
|
||||||
{},
|
{},
|
||||||
{
|
{
|
||||||
256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
||||||
{ make_shared_attribute_ptr<IntervalsAlignmentAttribute>(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
{ IntervalsAlignmentAttribute(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
||||||
},
|
},
|
||||||
{},
|
{},
|
||||||
{},
|
{},
|
||||||
@ -428,13 +428,13 @@ const std::vector<ConcatTransformationTestValues> testValues = {
|
|||||||
{
|
{
|
||||||
{
|
{
|
||||||
256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
||||||
{ make_shared_attribute_ptr<IntervalsAlignmentAttribute>(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
{ IntervalsAlignmentAttribute(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
||||||
},
|
},
|
||||||
{},
|
{},
|
||||||
{},
|
{},
|
||||||
{
|
{
|
||||||
256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
||||||
{ make_shared_attribute_ptr<IntervalsAlignmentAttribute>(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
{ IntervalsAlignmentAttribute(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
||||||
},
|
},
|
||||||
{},
|
{},
|
||||||
{},
|
{},
|
||||||
@ -462,13 +462,13 @@ const std::vector<ConcatTransformationTestValues> testValues = {
|
|||||||
{
|
{
|
||||||
{
|
{
|
||||||
256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
||||||
{ make_shared_attribute_ptr<IntervalsAlignmentAttribute>(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
{ IntervalsAlignmentAttribute(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
||||||
},
|
},
|
||||||
{},
|
{},
|
||||||
{},
|
{},
|
||||||
{
|
{
|
||||||
256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
||||||
{ make_shared_attribute_ptr<IntervalsAlignmentAttribute>(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
{ IntervalsAlignmentAttribute(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
||||||
},
|
},
|
||||||
{},
|
{},
|
||||||
{},
|
{},
|
||||||
@ -496,13 +496,13 @@ const std::vector<ConcatTransformationTestValues> testValues = {
|
|||||||
{
|
{
|
||||||
{
|
{
|
||||||
256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
||||||
{ make_shared_attribute_ptr<IntervalsAlignmentAttribute>(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
{ IntervalsAlignmentAttribute(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
||||||
},
|
},
|
||||||
{},
|
{},
|
||||||
{},
|
{},
|
||||||
{
|
{
|
||||||
256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
||||||
{ make_shared_attribute_ptr<IntervalsAlignmentAttribute>(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
{ IntervalsAlignmentAttribute(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
||||||
},
|
},
|
||||||
{},
|
{},
|
||||||
{},
|
{},
|
||||||
@ -526,13 +526,13 @@ const std::vector<ConcatTransformationTestValues> testValues = {
|
|||||||
{
|
{
|
||||||
{
|
{
|
||||||
256ul, {{1}, {1}, {}, {}}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
256ul, {{1}, {1}, {}, {}}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
||||||
{ make_shared_attribute_ptr<IntervalsAlignmentAttribute>(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
{ IntervalsAlignmentAttribute(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
||||||
},
|
},
|
||||||
{},
|
{},
|
||||||
{},
|
{},
|
||||||
{
|
{
|
||||||
256ul, {{1}, {1}, {}, {}}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
256ul, {{1}, {1}, {}, {}}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
||||||
{ make_shared_attribute_ptr<IntervalsAlignmentAttribute>(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
{ IntervalsAlignmentAttribute(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
||||||
},
|
},
|
||||||
{},
|
{},
|
||||||
{},
|
{},
|
||||||
@ -556,13 +556,13 @@ const std::vector<ConcatTransformationTestValues> testValues = {
|
|||||||
{
|
{
|
||||||
{
|
{
|
||||||
256ul, {{1, 1, 1, 1}, {1, 1, 1, 1}, {}, {}}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
256ul, {{1, 1, 1, 1}, {1, 1, 1, 1}, {}, {}}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
||||||
{ make_shared_attribute_ptr<IntervalsAlignmentAttribute>(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
{ IntervalsAlignmentAttribute(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
||||||
},
|
},
|
||||||
{},
|
{},
|
||||||
{},
|
{},
|
||||||
{
|
{
|
||||||
256ul, {{1, 1, 1, 1}, {1, 1, 1, 1}, {}, {}}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
256ul, {{1, 1, 1, 1}, {1, 1, 1, 1}, {}, {}}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
||||||
{ make_shared_attribute_ptr<IntervalsAlignmentAttribute>(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
{ IntervalsAlignmentAttribute(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
||||||
},
|
},
|
||||||
{},
|
{},
|
||||||
{},
|
{},
|
||||||
@ -586,13 +586,13 @@ const std::vector<ConcatTransformationTestValues> testValues = {
|
|||||||
{
|
{
|
||||||
{
|
{
|
||||||
256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
||||||
{ make_shared_attribute_ptr<IntervalsAlignmentAttribute>(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
{ IntervalsAlignmentAttribute(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
||||||
},
|
},
|
||||||
{},
|
{},
|
||||||
{},
|
{},
|
||||||
{
|
{
|
||||||
256ul, {}, {0.f}, {1.275f}, {0.f}, {255.f}, ngraph::element::u8,
|
256ul, {}, {0.f}, {1.275f}, {0.f}, {255.f}, ngraph::element::u8,
|
||||||
{ make_shared_attribute_ptr<IntervalsAlignmentAttribute>(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
{ IntervalsAlignmentAttribute(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
||||||
},
|
},
|
||||||
{},
|
{},
|
||||||
{},
|
{},
|
||||||
@ -616,13 +616,13 @@ const std::vector<ConcatTransformationTestValues> testValues = {
|
|||||||
{
|
{
|
||||||
{
|
{
|
||||||
256ul, {{1}, {1}, {}, {}}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
256ul, {{1}, {1}, {}, {}}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
||||||
{ make_shared_attribute_ptr<IntervalsAlignmentAttribute>(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
{ IntervalsAlignmentAttribute(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
||||||
},
|
},
|
||||||
{},
|
{},
|
||||||
{},
|
{},
|
||||||
{
|
{
|
||||||
256ul, {{1}, {1}, {}, {}}, {0.f}, {1.275f}, {0.f}, {255.f}, ngraph::element::u8,
|
256ul, {{1}, {1}, {}, {}}, {0.f}, {1.275f}, {0.f}, {255.f}, ngraph::element::u8,
|
||||||
{ make_shared_attribute_ptr<IntervalsAlignmentAttribute>(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
{ IntervalsAlignmentAttribute(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
||||||
},
|
},
|
||||||
{},
|
{},
|
||||||
{},
|
{},
|
||||||
@ -657,7 +657,7 @@ const std::vector<ConcatTransformationTestValues> testValues = {
|
|||||||
{{1, 3, 1, 1}, {1, 3, 1, 1}, {}, {}},
|
{{1, 3, 1, 1}, {1, 3, 1, 1}, {}, {}},
|
||||||
{0.f, 0.f, 0.f}, {2.55f, 2.55f, 2.55f}, {0.f}, {255.f},
|
{0.f, 0.f, 0.f}, {2.55f, 2.55f, 2.55f}, {0.f}, {255.f},
|
||||||
ngraph::element::u8,
|
ngraph::element::u8,
|
||||||
{ make_shared_attribute_ptr<IntervalsAlignmentAttribute>(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
{ IntervalsAlignmentAttribute(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
||||||
},
|
},
|
||||||
{},
|
{},
|
||||||
{},
|
{},
|
||||||
@ -666,7 +666,7 @@ const std::vector<ConcatTransformationTestValues> testValues = {
|
|||||||
{{1, 3, 1, 1}, {1, 3, 1, 1}, {}, {}},
|
{{1, 3, 1, 1}, {1, 3, 1, 1}, {}, {}},
|
||||||
{0.f, 0.f, 0.f}, {1.275f, 1.275f, 1.275f}, {0.f}, {255.f},
|
{0.f, 0.f, 0.f}, {1.275f, 1.275f, 1.275f}, {0.f}, {255.f},
|
||||||
ngraph::element::u8,
|
ngraph::element::u8,
|
||||||
{ make_shared_attribute_ptr<IntervalsAlignmentAttribute>(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
{ IntervalsAlignmentAttribute(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
||||||
},
|
},
|
||||||
{},
|
{},
|
||||||
{},
|
{},
|
||||||
@ -692,13 +692,13 @@ const std::vector<ConcatTransformationTestValues> testValues = {
|
|||||||
{
|
{
|
||||||
{
|
{
|
||||||
256ul, {}, {-1.28f}, {1.27f}, {-128.f}, {127.f}, ngraph::element::i8,
|
256ul, {}, {-1.28f}, {1.27f}, {-128.f}, {127.f}, ngraph::element::i8,
|
||||||
{ make_shared_attribute_ptr<IntervalsAlignmentAttribute>(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
{ IntervalsAlignmentAttribute(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
||||||
},
|
},
|
||||||
{},
|
{},
|
||||||
{},
|
{},
|
||||||
{
|
{
|
||||||
256ul, {}, {-1.28f}, {1.27f}, {-128.f}, {127.f}, ngraph::element::i8,
|
256ul, {}, {-1.28f}, {1.27f}, {-128.f}, {127.f}, ngraph::element::i8,
|
||||||
{ make_shared_attribute_ptr<IntervalsAlignmentAttribute>(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
{ IntervalsAlignmentAttribute(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
||||||
},
|
},
|
||||||
{},
|
{},
|
||||||
{},
|
{},
|
||||||
@ -722,13 +722,13 @@ const std::vector<ConcatTransformationTestValues> testValues = {
|
|||||||
{
|
{
|
||||||
{
|
{
|
||||||
256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
||||||
{ make_shared_attribute_ptr<IntervalsAlignmentAttribute>(IntervalsAlignmentSharedValue::Interval{-1.28f, 2.55f}, 256ul) }
|
{ IntervalsAlignmentAttribute(IntervalsAlignmentSharedValue::Interval{-1.28f, 2.55f}, 256ul) }
|
||||||
},
|
},
|
||||||
{},
|
{},
|
||||||
{},
|
{},
|
||||||
{
|
{
|
||||||
256ul, {}, {-1.28f}, {1.27f}, {0.f}, {255.f}, ngraph::element::u8,
|
256ul, {}, {-1.28f}, {1.27f}, {0.f}, {255.f}, ngraph::element::u8,
|
||||||
{ make_shared_attribute_ptr<IntervalsAlignmentAttribute>(IntervalsAlignmentSharedValue::Interval{-1.28f, 2.55f}, 256ul) }
|
{ IntervalsAlignmentAttribute(IntervalsAlignmentSharedValue::Interval{-1.28f, 2.55f}, 256ul) }
|
||||||
},
|
},
|
||||||
{},
|
{},
|
||||||
{},
|
{},
|
||||||
@ -752,13 +752,13 @@ const std::vector<ConcatTransformationTestValues> testValues = {
|
|||||||
{
|
{
|
||||||
{
|
{
|
||||||
256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
||||||
{ make_shared_attribute_ptr<IntervalsAlignmentAttribute>(IntervalsAlignmentSharedValue::Interval{-1.28f, 2.55f}, 256ul) }
|
{ IntervalsAlignmentAttribute(IntervalsAlignmentSharedValue::Interval{-1.28f, 2.55f}, 256ul) }
|
||||||
},
|
},
|
||||||
{},
|
{},
|
||||||
{},
|
{},
|
||||||
{
|
{
|
||||||
256ul, {}, {-1.28f}, {1.27f}, {0.f}, {255.f}, ngraph::element::u8,
|
256ul, {}, {-1.28f}, {1.27f}, {0.f}, {255.f}, ngraph::element::u8,
|
||||||
{ make_shared_attribute_ptr<IntervalsAlignmentAttribute>(IntervalsAlignmentSharedValue::Interval{-1.28f, 2.55f}, 256ul) }
|
{ IntervalsAlignmentAttribute(IntervalsAlignmentSharedValue::Interval{-1.28f, 2.55f}, 256ul) }
|
||||||
},
|
},
|
||||||
{},
|
{},
|
||||||
{},
|
{},
|
||||||
@ -896,13 +896,13 @@ const std::vector<ConcatTransformationTestValues> testValues = {
|
|||||||
{
|
{
|
||||||
{
|
{
|
||||||
256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
||||||
{ make_shared_attribute_ptr<IntervalsAlignmentAttribute>(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
{ IntervalsAlignmentAttribute(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
||||||
},
|
},
|
||||||
{},
|
{},
|
||||||
{},
|
{},
|
||||||
{
|
{
|
||||||
256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
||||||
{ make_shared_attribute_ptr<IntervalsAlignmentAttribute>(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
{ IntervalsAlignmentAttribute(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
||||||
},
|
},
|
||||||
{},
|
{},
|
||||||
{},
|
{},
|
||||||
@ -940,13 +940,13 @@ const std::vector<ConcatTransformationTestValues> testValues = {
|
|||||||
{
|
{
|
||||||
{
|
{
|
||||||
256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
||||||
{ make_shared_attribute_ptr<IntervalsAlignmentAttribute>(IntervalsAlignmentSharedValue::Interval{-1.28f, 2.55f}, 256ul) }
|
{ IntervalsAlignmentAttribute(IntervalsAlignmentSharedValue::Interval{-1.28f, 2.55f}, 256ul) }
|
||||||
},
|
},
|
||||||
{},
|
{},
|
||||||
{},
|
{},
|
||||||
{
|
{
|
||||||
256ul, {}, {-1.28f}, {1.27f}, {-1.28f}, {1.27f}, ngraph::element::u8,
|
256ul, {}, {-1.28f}, {1.27f}, {-1.28f}, {1.27f}, ngraph::element::u8,
|
||||||
{ make_shared_attribute_ptr<IntervalsAlignmentAttribute>(IntervalsAlignmentSharedValue::Interval{-1.28f, 2.55f}, 256ul) }
|
{ IntervalsAlignmentAttribute(IntervalsAlignmentSharedValue::Interval{-1.28f, 2.55f}, 256ul) }
|
||||||
},
|
},
|
||||||
{},
|
{},
|
||||||
{},
|
{},
|
||||||
@ -974,13 +974,13 @@ const std::vector<ConcatTransformationTestValues> testValues = {
|
|||||||
{
|
{
|
||||||
{
|
{
|
||||||
256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
||||||
{ make_shared_attribute_ptr<IntervalsAlignmentAttribute>(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
{ IntervalsAlignmentAttribute(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
||||||
},
|
},
|
||||||
{},
|
{},
|
||||||
{},
|
{},
|
||||||
{
|
{
|
||||||
256ul, {}, {1.275f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
256ul, {}, {1.275f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
||||||
{ make_shared_attribute_ptr<IntervalsAlignmentAttribute>(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
{ IntervalsAlignmentAttribute(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
||||||
},
|
},
|
||||||
{},
|
{},
|
||||||
{},
|
{},
|
||||||
@ -1012,13 +1012,13 @@ const std::vector<ConcatTransformationTestValues> testValues = {
|
|||||||
{
|
{
|
||||||
{
|
{
|
||||||
256ul, {}, {1.275f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
256ul, {}, {1.275f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
||||||
{ make_shared_attribute_ptr<IntervalsAlignmentAttribute>(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
{ IntervalsAlignmentAttribute(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul) }
|
||||||
},
|
},
|
||||||
{},
|
{},
|
||||||
{},
|
{},
|
||||||
{
|
{
|
||||||
256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8,
|
||||||
{ make_shared_attribute_ptr<IntervalsAlignmentAttribute>(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul)
|
{ IntervalsAlignmentAttribute(IntervalsAlignmentSharedValue::Interval{0.f, 2.55f}, 256ul)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{},
|
{},
|
||||||
|
@ -160,16 +160,16 @@ TEST_P(ConcatWithNeighborsWithConvolutionTransformation, CompareFunctions) {
|
|||||||
auto actualFakeQuantizes = LayerTransformation::get<opset1::FakeQuantize>(actualFunction);
|
auto actualFakeQuantizes = LayerTransformation::get<opset1::FakeQuantize>(actualFunction);
|
||||||
ASSERT_EQ(3ul, actualFakeQuantizes.size()) << "unexpected FakeQuantize operations count " << actualFakeQuantizes.size();
|
ASSERT_EQ(3ul, actualFakeQuantizes.size()) << "unexpected FakeQuantize operations count " << actualFakeQuantizes.size();
|
||||||
|
|
||||||
ASSERT_TRUE(checkIfOutputAttributesSharedValuesAreTheSame<std::shared_ptr<PrecisionsAttribute>>(actualFakeQuantizes)) <<
|
ASSERT_TRUE(checkIfOutputAttributesSharedValuesAreTheSame<PrecisionsAttribute>(actualFakeQuantizes)) <<
|
||||||
"PrecisionsAttribute shared values are not the same";
|
"PrecisionsAttribute shared values are not the same";
|
||||||
|
|
||||||
auto actualConcatOperations = LayerTransformation::get<opset1::Concat>(actualFunction);
|
auto actualConcatOperations = LayerTransformation::get<opset1::Concat>(actualFunction);
|
||||||
ASSERT_EQ(2ul, actualConcatOperations.size()) << "unexpected concat operations";
|
ASSERT_EQ(2ul, actualConcatOperations.size()) << "unexpected concat operations";
|
||||||
ASSERT_NE(nullptr, ngraph::pass::low_precision::getAttribute<std::shared_ptr<QuantizationAlignmentAttribute>>(actualConcatOperations[0]));
|
ASSERT_FALSE(ngraph::pass::low_precision::getAttribute<QuantizationAlignmentAttribute>(actualConcatOperations[0]).empty());
|
||||||
ASSERT_NE(nullptr, ngraph::pass::low_precision::getAttribute<std::shared_ptr<QuantizationAlignmentAttribute>>(actualConcatOperations[1]));
|
ASSERT_FALSE(ngraph::pass::low_precision::getAttribute<QuantizationAlignmentAttribute>(actualConcatOperations[1]).empty());
|
||||||
|
|
||||||
actualConcatOperations.insert(actualConcatOperations.end(), actualFakeQuantizes.begin(), actualFakeQuantizes.end());
|
actualConcatOperations.insert(actualConcatOperations.end(), actualFakeQuantizes.begin(), actualFakeQuantizes.end());
|
||||||
ASSERT_TRUE(checkIfAttributesSharedValuesAreTheSame<std::shared_ptr<IntervalsAlignmentAttribute>>(actualConcatOperations)) <<
|
ASSERT_TRUE(checkIfAttributesSharedValuesAreTheSame<IntervalsAlignmentAttribute>(actualConcatOperations)) <<
|
||||||
"IntervalsAlignmentAttribute shared values are not the same";
|
"IntervalsAlignmentAttribute shared values are not the same";
|
||||||
|
|
||||||
auto convolutions = LayerTransformation::get<opset1::Convolution>(actualFunction);
|
auto convolutions = LayerTransformation::get<opset1::Convolution>(actualFunction);
|
||||||
@ -177,8 +177,8 @@ TEST_P(ConcatWithNeighborsWithConvolutionTransformation, CompareFunctions) {
|
|||||||
ASSERT_EQ(2ul, convolutions[0]->input(0).get_rt_info().size()) <<
|
ASSERT_EQ(2ul, convolutions[0]->input(0).get_rt_info().size()) <<
|
||||||
"unexpected input 0 attributes count: LowPrecision::PerTensorQuantization & LowPrecision::Precisions";
|
"unexpected input 0 attributes count: LowPrecision::PerTensorQuantization & LowPrecision::Precisions";
|
||||||
ASSERT_EQ(1ul, convolutions[0]->input(1).get_rt_info().size()) << "unexpected input 1 attributes count";
|
ASSERT_EQ(1ul, convolutions[0]->input(1).get_rt_info().size()) << "unexpected input 1 attributes count";
|
||||||
auto a1 = std::dynamic_pointer_cast<ngraph::VariantWrapper<std::shared_ptr<PrecisionsAttribute>>>(convolutions[0]->input(1).get_rt_info().begin()->second);
|
auto& a1 = convolutions[0]->input(1).get_rt_info().begin()->second.as<PrecisionsAttribute>();
|
||||||
ASSERT_EQ(element::i8, *a1->get().get()->sharedValue->precisions.begin());
|
ASSERT_EQ(element::i8, a1.value().front());
|
||||||
}
|
}
|
||||||
|
|
||||||
const std::vector<ngraph::element::Type> precisions = {
|
const std::vector<ngraph::element::Type> precisions = {
|
||||||
@ -204,15 +204,15 @@ const std::vector<ConcatWithNeighborsWithConvolutionTestValues> testValues = {
|
|||||||
{
|
{
|
||||||
{
|
{
|
||||||
256ul, ngraph::Shape({}), {-1.28f / 3.f}, {1.27f / 3.f}, {0.f}, {255.f}, element::u8,
|
256ul, ngraph::Shape({}), {-1.28f / 3.f}, {1.27f / 3.f}, {0.f}, {255.f}, element::u8,
|
||||||
{ make_shared_attribute_ptr<IntervalsAlignmentAttribute>(IntervalsAlignmentSharedValue::Interval{-1.28f, 1.27f}, 256ul) }
|
{ IntervalsAlignmentAttribute(IntervalsAlignmentSharedValue::Interval{-1.28f, 1.27f}, 256ul) }
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
256ul, ngraph::Shape({}), {-1.28f / 2.f}, {1.27f / 2.f}, {64.f}, {192.f}, element::u8,
|
256ul, ngraph::Shape({}), {-1.28f / 2.f}, {1.27f / 2.f}, {64.f}, {192.f}, element::u8,
|
||||||
{ make_shared_attribute_ptr<IntervalsAlignmentAttribute>(IntervalsAlignmentSharedValue::Interval{-1.28f, 1.27f}, 256ul) }
|
{ IntervalsAlignmentAttribute(IntervalsAlignmentSharedValue::Interval{-1.28f, 1.27f}, 256ul) }
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {0.f}, {255.f}, element::u8,
|
256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {0.f}, {255.f}, element::u8,
|
||||||
{ make_shared_attribute_ptr<IntervalsAlignmentAttribute>(IntervalsAlignmentSharedValue::Interval{-1.28f, 1.27f}, 256ul) }
|
{ IntervalsAlignmentAttribute(IntervalsAlignmentSharedValue::Interval{-1.28f, 1.27f}, 256ul) }
|
||||||
},
|
},
|
||||||
ngraph::element::u8,
|
ngraph::element::u8,
|
||||||
{{}, {}, {}},
|
{{}, {}, {}},
|
||||||
|
@ -218,9 +218,9 @@ public:
|
|||||||
testValues.result.dequantization2,
|
testValues.result.dequantization2,
|
||||||
true,
|
true,
|
||||||
{
|
{
|
||||||
make_shared_attribute_ptr<PrecisionPreservedAttribute>(true),
|
PrecisionPreservedAttribute(true),
|
||||||
make_shared_attribute_ptr<IntervalsAlignmentAttribute>(IntervalsAlignmentSharedValue::Interval{-1.28f, 2.55f}, 256ul),
|
IntervalsAlignmentAttribute(IntervalsAlignmentSharedValue::Interval{-1.28f, 2.55f}, 256ul),
|
||||||
make_shared_attribute_ptr<QuantizationAlignmentAttribute>(false)
|
QuantizationAlignmentAttribute(false)
|
||||||
},
|
},
|
||||||
testValues.result.precisionAfterOperation,
|
testValues.result.precisionAfterOperation,
|
||||||
testValues.result.dequantizationAfter,
|
testValues.result.dequantizationAfter,
|
||||||
@ -258,14 +258,14 @@ TEST_P(ConcatWithNotQuantizedParentTransformation, CompareFunctions) {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ASSERT_TRUE(checkIfOutputAttributesSharedValuesAreTheSame<std::shared_ptr<PrecisionsAttribute>>(actualFakeQuantizes)) <<
|
ASSERT_TRUE(checkIfOutputAttributesSharedValuesAreTheSame<PrecisionsAttribute>(actualFakeQuantizes)) <<
|
||||||
"PrecisionsAttribute are not the same";
|
"PrecisionsAttribute are not the same";
|
||||||
|
|
||||||
ConcatWithNotQuantizedParentTransformationTestValues testValues = std::get<2>(GetParam());
|
ConcatWithNotQuantizedParentTransformationTestValues testValues = std::get<2>(GetParam());
|
||||||
if (testValues.checkIntervalsAlignmentAttributes) {
|
if (testValues.checkIntervalsAlignmentAttributes) {
|
||||||
auto operations = LayerTransformation::get<opset1::Concat>(actualFunction);
|
auto operations = LayerTransformation::get<opset1::Concat>(actualFunction);
|
||||||
operations.insert(operations.end(), actualFakeQuantizes.begin(), actualFakeQuantizes.end());
|
operations.insert(operations.end(), actualFakeQuantizes.begin(), actualFakeQuantizes.end());
|
||||||
ASSERT_TRUE(checkIfAttributesSharedValuesAreTheSame<std::shared_ptr<IntervalsAlignmentAttribute>>(operations)) <<
|
ASSERT_TRUE(checkIfAttributesSharedValuesAreTheSame<IntervalsAlignmentAttribute>(operations)) <<
|
||||||
"IntervalsAlignmentAttribute are not the same";
|
"IntervalsAlignmentAttribute are not the same";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -169,7 +169,8 @@ const std::vector<ConvertSubtractConstantTransformationTestValues> testValues =
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
{ ngraph::element::f32, false },
|
{ ngraph::element::f32, false },
|
||||||
{ {127.f}, element::f32, {}, false, 1ul, element::i8, true, {}, { "disabled_constant_folding_0" } },
|
{ {127.f}, element::f32, {}, false, 1ul, element::i8, true, {},
|
||||||
|
{ ov::pass::DisableConstantFolding::get_type_info_static() } },
|
||||||
{ {0.03f}, element::f32, {}, false }
|
{ {0.03f}, element::f32, {}, false }
|
||||||
},
|
},
|
||||||
{ std::vector<float>{ 2.f }, ngraph::element::i8},
|
{ std::vector<float>{ 2.f }, ngraph::element::i8},
|
||||||
|
@ -284,7 +284,8 @@ const std::vector<ConvolutionBackpropDataTransformationTestValues> testValues =
|
|||||||
{
|
{
|
||||||
ngraph::element::u8,
|
ngraph::element::u8,
|
||||||
{{}, { { 128.f }, ngraph::element::f32, {}, false }, {}},
|
{{}, { { 128.f }, ngraph::element::f32, {}, false }, {}},
|
||||||
{{}, { { 2.f }, ngraph::element::f32, {1, 2, 1, 1}, true, 1ul, element::i8, false, { "disabled_constant_folding_0" } }, {}},
|
{{}, { { 2.f }, ngraph::element::f32, {1, 2, 1, 1}, true, 1ul, element::i8, false,
|
||||||
|
{ ov::pass::DisableConstantFolding::get_type_info_static() } }, {}},
|
||||||
{{}, {}, {{ 0.0002f }, ngraph::element::f32, {}}},
|
{{}, {}, {{ 0.0002f }, ngraph::element::f32, {}}},
|
||||||
op::Constant::create(ngraph::element::i8, ngraph::Shape{}, std::vector<float>{ 2.f }),
|
op::Constant::create(ngraph::element::i8, ngraph::Shape{}, std::vector<float>{ 2.f }),
|
||||||
true
|
true
|
||||||
|
@ -174,7 +174,8 @@ const std::vector<ConvolutionQDqTransformationTestValues> testValues = {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
{},
|
{},
|
||||||
{ { 127.f }, ngraph::element::f32, { 6, 1, 1, 1 }, false, 1ul, element::i8, false, { "disabled_constant_folding_0" } },
|
{ { 127.f }, ngraph::element::f32, { 6, 1, 1, 1 }, false, 1ul, element::i8, false,
|
||||||
|
{ ov::pass::DisableConstantFolding::get_type_info_static() } },
|
||||||
{}
|
{}
|
||||||
},
|
},
|
||||||
{ std::vector<float>{ 100.f }, ngraph::element::i8},
|
{ std::vector<float>{ 100.f }, ngraph::element::i8},
|
||||||
@ -349,7 +350,8 @@ const std::vector<ConvolutionQDqTransformationTestValues> testValues = {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
{},
|
{},
|
||||||
{ { 127.f }, ngraph::element::f32, { 6, 1, 1, 1 }, false, 1ul, element::i8, false, { "disabled_constant_folding_0" } },
|
{ { 127.f }, ngraph::element::f32, { 6, 1, 1, 1 }, false, 1ul, element::i8, false,
|
||||||
|
{ ov::pass::DisableConstantFolding::get_type_info_static() } },
|
||||||
{}
|
{}
|
||||||
},
|
},
|
||||||
{ std::vector<float>{ 2.f }, ngraph::element::i8},
|
{ std::vector<float>{ 2.f }, ngraph::element::i8},
|
||||||
@ -417,7 +419,8 @@ const std::vector<ConvolutionQDqTransformationTestValues> testValues = {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
{},
|
{},
|
||||||
{ { 127.f }, ngraph::element::f32, { 6, 1, 1, 1 }, false, 1ul, element::i8, false, { "disabled_constant_folding_0" } },
|
{ { 127.f }, ngraph::element::f32, { 6, 1, 1, 1 }, false, 1ul, element::i8, false,
|
||||||
|
{ ov::pass::DisableConstantFolding::get_type_info_static() } },
|
||||||
{}
|
{}
|
||||||
},
|
},
|
||||||
{ std::vector<float>{ 2.f }, ngraph::element::i8},
|
{ std::vector<float>{ 2.f }, ngraph::element::i8},
|
||||||
|
@ -145,12 +145,14 @@ const std::vector<DisableConvertOnConstPathTransformationValues> testValues = {
|
|||||||
ngraph::element::u8,
|
ngraph::element::u8,
|
||||||
{
|
{
|
||||||
{ngraph::element::f32},
|
{ngraph::element::f32},
|
||||||
{ {128.f}, element::f32, {}, false, 1ul, element::u8, true, {}, { "disabled_constant_folding_0" } },
|
{ {128.f}, element::f32, {}, false, 1ul, element::u8, true, {},
|
||||||
|
{ov::pass::DisableConstantFolding::get_type_info_static() } },
|
||||||
{ {0.02f}, element::f32, {}, false }
|
{ {0.02f}, element::f32, {}, false }
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
{ ngraph::element::f32, false },
|
{ ngraph::element::f32, false },
|
||||||
{ {128.f}, element::f32, {}, false, 1ul, element::i8, true, {}, { "disabled_constant_folding_0" } },
|
{ {128.f}, element::f32, {}, false, 1ul, element::i8, true, {},
|
||||||
|
{ov::pass::DisableConstantFolding::get_type_info_static() } },
|
||||||
{ {0.03f}, element::f32, {}, false }
|
{ {0.03f}, element::f32, {}, false }
|
||||||
},
|
},
|
||||||
{ std::vector<float>{ 1.f }, ngraph::element::f32},
|
{ std::vector<float>{ 1.f }, ngraph::element::f32},
|
||||||
|
@ -198,7 +198,9 @@ const std::vector<FakeQuantizeWithNotOptimalTransformationTestValues> fakeQuanti
|
|||||||
{},
|
{},
|
||||||
{
|
{
|
||||||
{},
|
{},
|
||||||
{ std::vector<float>(64, 127.f), ngraph::element::f32, {64, 1, 1, 1}, false, 1ul, ngraph::element::i8, false, {"disabled_constant_folding_0"}},
|
{ std::vector<float>(64, 127.f), ngraph::element::f32,
|
||||||
|
{64, 1, 1, 1}, false, 1ul, ngraph::element::i8, false,
|
||||||
|
{ov::pass::DisableConstantFolding::get_type_info_static()}},
|
||||||
{}
|
{}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -473,7 +473,7 @@ const std::vector<GroupConvolutionTestValues> testValuesGroupConv = {
|
|||||||
1,
|
1,
|
||||||
ngraph::element::i8,
|
ngraph::element::i8,
|
||||||
false,
|
false,
|
||||||
{"disabled_constant_folding_0"}
|
{ov::pass::DisableConstantFolding::get_type_info_static()}
|
||||||
},
|
},
|
||||||
{}
|
{}
|
||||||
},
|
},
|
||||||
|
@ -83,10 +83,9 @@ public:
|
|||||||
for (size_t nodeIndex = 0ul; nodeIndex < nodes.size(); nodeIndex++) {
|
for (size_t nodeIndex = 0ul; nodeIndex < nodes.size(); nodeIndex++) {
|
||||||
auto& rt = nodes[nodeIndex]->get_rt_info();
|
auto& rt = nodes[nodeIndex]->get_rt_info();
|
||||||
for (auto& it : rt) {
|
for (auto& it : rt) {
|
||||||
auto reference = std::dynamic_pointer_cast<VariantWrapper<std::shared_ptr<IntervalsAlignmentAttribute>>>(it.second);
|
auto& reference = it.second.as<IntervalsAlignmentAttribute>();
|
||||||
assert(reference != nullptr);
|
if ((reference.value().combinedInterval.low != intervalLow) &&
|
||||||
if ((reference->get()->sharedValue->combinedInterval.low != intervalLow) &&
|
(reference.value().combinedInterval.high != intervalHigh)) {
|
||||||
(reference->get()->sharedValue->combinedInterval.high != intervalHigh)) {
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -96,10 +95,10 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
static bool compare(
|
static bool compare(
|
||||||
const std::shared_ptr<IntervalsAlignmentAttribute>& value1,
|
const IntervalsAlignmentAttribute& value1,
|
||||||
const std::shared_ptr<IntervalsAlignmentAttribute>& value2) {
|
const IntervalsAlignmentAttribute& value2) {
|
||||||
if ((value1->sharedValue->combinedInterval.low != value2->sharedValue->combinedInterval.low) ||
|
if ((value1.value().combinedInterval.low != value2.value().combinedInterval.low) ||
|
||||||
(value1->sharedValue->combinedInterval.high != value2->sharedValue->combinedInterval.high)) {
|
(value1.value().combinedInterval.high != value2.value().combinedInterval.high)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
@ -124,10 +123,8 @@ public:
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto reference = std::dynamic_pointer_cast<VariantWrapper<Operation>>(referenceIt->second);
|
if (!referenceIt->second.empty() && !actualIt.second.empty()) {
|
||||||
auto actual = std::dynamic_pointer_cast<VariantWrapper<Operation>>(actualIt.second);
|
if (!compare(referenceIt->second.template as<Operation>(), actualIt.second.template as<Operation>())) {
|
||||||
if ((actual != nullptr) && (reference != nullptr)) {
|
|
||||||
if (!compare(reference->get(), actual->get())) {
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -139,24 +136,22 @@ public:
|
|||||||
|
|
||||||
template <class Attribute>
|
template <class Attribute>
|
||||||
static bool checkIfOutputAttributesAreTheSame(const NodeVector& nodes) {
|
static bool checkIfOutputAttributesAreTheSame(const NodeVector& nodes) {
|
||||||
void* first = nullptr;
|
ov::Any first;
|
||||||
for (auto node : nodes) {
|
for (auto node : nodes) {
|
||||||
for (auto output : node->outputs()) {
|
for (auto output : node->outputs()) {
|
||||||
auto& rt = output.get_rt_info();
|
auto& rt = output.get_rt_info();
|
||||||
const std::string& name = VariantWrapper<Attribute>::type_info.name;
|
const std::string& name = Attribute::get_type_info_static();
|
||||||
auto it = rt.find(name);
|
auto it = rt.find(name);
|
||||||
if (it == rt.end()) {
|
if (it == rt.end()) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto value = it->second;
|
auto value = it->second;
|
||||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
if (first.empty()) {
|
||||||
if (first == nullptr) {
|
first = value;
|
||||||
first = value.get();
|
} else if (value.template as<Attribute>().attribute != first.template as<Attribute>().attribute) {
|
||||||
} else if (value.get() != first) {
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
@ -164,15 +159,15 @@ public:
|
|||||||
|
|
||||||
template <class Attribute>
|
template <class Attribute>
|
||||||
static bool checkIfOutputAttributesSharedValuesAreTheSame(const NodeVector& nodes) {
|
static bool checkIfOutputAttributesSharedValuesAreTheSame(const NodeVector& nodes) {
|
||||||
std::shared_ptr<Variant> first = nullptr;
|
ov::Any first;
|
||||||
for (auto node : nodes) {
|
for (auto node : nodes) {
|
||||||
for (auto output : node->outputs()) {
|
for (auto output : node->outputs()) {
|
||||||
auto value = ngraph::pass::low_precision::getAttributeFromOutput<Attribute>(output);
|
auto value = ngraph::pass::low_precision::getAttributeFromOutput<Attribute>(output);
|
||||||
if (first == nullptr) {
|
if (first.empty()) {
|
||||||
first = value;
|
first = value;
|
||||||
} else {
|
} else {
|
||||||
const auto sharedValue1 = std::dynamic_pointer_cast<ngraph::VariantWrapper<Attribute>>(value)->get()->sharedValue;
|
const auto sharedValue1 = value.template as<Attribute>().attribute->sharedValue;
|
||||||
const auto sharedValue2 = std::dynamic_pointer_cast<ngraph::VariantWrapper<Attribute>>(first)->get()->sharedValue;
|
const auto sharedValue2 = first.template as<Attribute>().attribute->sharedValue;
|
||||||
if (sharedValue1 != sharedValue2) {
|
if (sharedValue1 != sharedValue2) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -184,18 +179,18 @@ public:
|
|||||||
|
|
||||||
template <class Attribute>
|
template <class Attribute>
|
||||||
static bool checkIfAttributesSharedValuesAreTheSame(const NodeVector& nodes) {
|
static bool checkIfAttributesSharedValuesAreTheSame(const NodeVector& nodes) {
|
||||||
std::shared_ptr<Variant> first = nullptr;
|
ov::Any first;
|
||||||
for (auto node : nodes) {
|
for (auto node : nodes) {
|
||||||
auto value = ngraph::pass::low_precision::getAttribute<Attribute>(node);
|
auto value = ngraph::pass::low_precision::getAttribute<Attribute>(node);
|
||||||
if (value == nullptr) {
|
if (value.empty()) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (first == nullptr) {
|
if (first.empty()) {
|
||||||
first = value;
|
first = value;
|
||||||
} else {
|
} else {
|
||||||
const auto sharedValue1 = std::dynamic_pointer_cast<ngraph::VariantWrapper<Attribute>>(value)->get()->sharedValue;
|
const auto sharedValue1 = value.template as<Attribute>().attribute->sharedValue;
|
||||||
const auto sharedValue2 = std::dynamic_pointer_cast<ngraph::VariantWrapper<Attribute>>(first)->get()->sharedValue;
|
const auto sharedValue2 = first.template as<Attribute>().attribute->sharedValue;
|
||||||
if (sharedValue1 != sharedValue2) {
|
if (sharedValue1 != sharedValue2) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -206,16 +201,16 @@ public:
|
|||||||
|
|
||||||
template <class Attribute>
|
template <class Attribute>
|
||||||
static bool checkIfAttributesAreTheSame(const NodeVector& nodes) {
|
static bool checkIfAttributesAreTheSame(const NodeVector& nodes) {
|
||||||
Variant* first = nullptr;
|
ov::Any first;
|
||||||
for (auto node : nodes) {
|
for (auto node : nodes) {
|
||||||
auto value = ngraph::pass::low_precision::getAttribute<Attribute>(node);
|
auto value = ngraph::pass::low_precision::getAttribute<Attribute>(node);
|
||||||
if (value == nullptr) {
|
if (value.empty()) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (first == nullptr) {
|
if (first.empty()) {
|
||||||
first = value.get();
|
first = value;
|
||||||
} else if (value.get() != first) {
|
} else if (value.template as<Attribute>().attribute != first.template as<Attribute>().attribute) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -227,9 +227,7 @@ TEST(LPT, AvoidDequantizationToShapeOfPropagationFakeQuantizeDecompositionTransf
|
|||||||
auto shapeOf = std::make_shared<opset1::ShapeOf>(fakeQuantize);
|
auto shapeOf = std::make_shared<opset1::ShapeOf>(fakeQuantize);
|
||||||
|
|
||||||
auto& outInfo = fakeQuantize->output(0).get_rt_info();
|
auto& outInfo = fakeQuantize->output(0).get_rt_info();
|
||||||
auto attribute = ngraph::pass::low_precision::make_shared_attribute<PrecisionsAttribute>(element::TypeVector{ element::u8, element::i8 });
|
outInfo.emplace(PrecisionsAttribute::get_type_info_static(), PrecisionsAttribute(element::TypeVector{ element::u8, element::i8 }));
|
||||||
auto attributeWrapper = std::make_shared<ngraph::VariantWrapper<std::shared_ptr<PrecisionsAttribute>>>(attribute);
|
|
||||||
outInfo.emplace(ngraph::VariantWrapper<std::shared_ptr<PrecisionsAttribute>>::type_info.name, attributeWrapper);
|
|
||||||
|
|
||||||
auto result1 = std::make_shared<opset1::Result>(fakeQuantize);
|
auto result1 = std::make_shared<opset1::Result>(fakeQuantize);
|
||||||
auto result2 = std::make_shared<opset1::Result>(shapeOf);
|
auto result2 = std::make_shared<opset1::Result>(shapeOf);
|
||||||
|
@ -140,14 +140,14 @@ TEST_P(MarkupAvgPoolPrecisionsTransformation, CompareFunctions) {
|
|||||||
ASSERT_EQ(1ul, avgPoolOperations.size()) << "unexpected avgPoolOperations size: " << avgPoolOperations.size();
|
ASSERT_EQ(1ul, avgPoolOperations.size()) << "unexpected avgPoolOperations size: " << avgPoolOperations.size();
|
||||||
|
|
||||||
{
|
{
|
||||||
auto avgPoolPrecisioinPreservedAttribute = ngraph::pass::low_precision::getAttribute<AvgPoolPrecisionPreservedAttributePtr>(
|
auto avgPoolPrecisioinPreservedAttribute = ngraph::pass::low_precision::getAttribute<AvgPoolPrecisionPreservedAttribute>(
|
||||||
*avgPoolOperations.begin());
|
*avgPoolOperations.begin());
|
||||||
ASSERT_NE(nullptr, avgPoolPrecisioinPreservedAttribute);
|
ASSERT_FALSE(avgPoolPrecisioinPreservedAttribute.empty());
|
||||||
ASSERT_EQ(true, avgPoolPrecisioinPreservedAttribute->get()->sharedValue->value);
|
ASSERT_EQ(true, avgPoolPrecisioinPreservedAttribute.as<AvgPoolPrecisionPreservedAttribute>().value());
|
||||||
}
|
}
|
||||||
|
|
||||||
const auto precisionPreserved = LayerTransformation::get<opset1::MaxPool>(actualFunction);
|
const auto precisionPreserved = LayerTransformation::get<opset1::MaxPool>(actualFunction);
|
||||||
ASSERT_TRUE(checkIfAttributesAreTheSame<std::shared_ptr<AvgPoolPrecisionPreservedAttribute>>(precisionPreserved)) <<
|
ASSERT_TRUE(checkIfAttributesAreTheSame<AvgPoolPrecisionPreservedAttribute>(precisionPreserved)) <<
|
||||||
"AvgPoolPrecisionPreservedAttribute are not the same";
|
"AvgPoolPrecisionPreservedAttribute are not the same";
|
||||||
|
|
||||||
//auto res = compare_functions(referenceFunction, actualFunction, true, true);
|
//auto res = compare_functions(referenceFunction, actualFunction, true, true);
|
||||||
|
@ -162,9 +162,9 @@ public:
|
|||||||
testValues.actual.convertAfter,
|
testValues.actual.convertAfter,
|
||||||
testValues.actual.dequantizationAfter,
|
testValues.actual.dequantizationAfter,
|
||||||
{
|
{
|
||||||
ngraph::builder::subgraph::make_shared_attribute_ptr<PrecisionPreservedAttribute>(true),
|
PrecisionPreservedAttribute(true),
|
||||||
ngraph::builder::subgraph::make_shared_attribute_ptr<IntervalsAlignmentAttribute>(interval, 256),
|
IntervalsAlignmentAttribute(interval, 256),
|
||||||
ngraph::builder::subgraph::make_shared_attribute_ptr<QuantizationAlignmentAttribute>(false)
|
QuantizationAlignmentAttribute(false)
|
||||||
},
|
},
|
||||||
ngraph::element::undefined,
|
ngraph::element::undefined,
|
||||||
{},
|
{},
|
||||||
@ -209,9 +209,9 @@ public:
|
|||||||
testValues.result.convertAfter,
|
testValues.result.convertAfter,
|
||||||
testValues.result.dequantizationAfter,
|
testValues.result.dequantizationAfter,
|
||||||
{
|
{
|
||||||
ngraph::builder::subgraph::make_shared_attribute_ptr<PrecisionPreservedAttribute>(true),
|
PrecisionPreservedAttribute(true),
|
||||||
ngraph::builder::subgraph::make_shared_attribute_ptr<IntervalsAlignmentAttribute>(interval, 256),
|
IntervalsAlignmentAttribute(interval, 256),
|
||||||
ngraph::builder::subgraph::make_shared_attribute_ptr<QuantizationAlignmentAttribute>(false)
|
QuantizationAlignmentAttribute(false)
|
||||||
},
|
},
|
||||||
testValues.result.precisionAfterOperation,
|
testValues.result.precisionAfterOperation,
|
||||||
{},
|
{},
|
||||||
@ -242,7 +242,7 @@ TEST_P(MoveFakeQuantizeTransformation, CompareFunctions) {
|
|||||||
ASSERT_TRUE(LayerTransformation::allNamesAreUnique(actualFunction)) << "Not all names are unique";
|
ASSERT_TRUE(LayerTransformation::allNamesAreUnique(actualFunction)) << "Not all names are unique";
|
||||||
|
|
||||||
const auto actualFakeQuantizes = LayerTransformation::get<opset1::FakeQuantize>(actualFunction);
|
const auto actualFakeQuantizes = LayerTransformation::get<opset1::FakeQuantize>(actualFunction);
|
||||||
ASSERT_TRUE(checkIfOutputAttributesSharedValuesAreTheSame<std::shared_ptr<PrecisionsAttribute>>(actualFakeQuantizes)) <<
|
ASSERT_TRUE(checkIfOutputAttributesSharedValuesAreTheSame<PrecisionsAttribute>(actualFakeQuantizes)) <<
|
||||||
"PrecisionsAttribute are not the same";
|
"PrecisionsAttribute are not the same";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -9,7 +9,6 @@
|
|||||||
#include <ngraph/variant.hpp>
|
#include <ngraph/variant.hpp>
|
||||||
|
|
||||||
#include <snippets/snippets_isa.hpp>
|
#include <snippets/snippets_isa.hpp>
|
||||||
#include <snippets/register_info.hpp>
|
|
||||||
#include <snippets/pass/assign_registers.hpp>
|
#include <snippets/pass/assign_registers.hpp>
|
||||||
|
|
||||||
#include <transformations/init_node_info.hpp>
|
#include <transformations/init_node_info.hpp>
|
||||||
@ -53,7 +52,7 @@ TEST(TransformationTests, AssignRegisters) {
|
|||||||
|
|
||||||
auto it_rinfo = rt.find("reginfo");
|
auto it_rinfo = rt.find("reginfo");
|
||||||
if (it_rinfo != rt.end()) {
|
if (it_rinfo != rt.end()) {
|
||||||
auto reginfo = ov::as_type_ptr<VariantWrapper<std::vector<size_t>>>(it_rinfo->second)->get();
|
auto reginfo = it_rinfo->second.as<std::vector<size_t>>();
|
||||||
auto reg = reginfo[0];
|
auto reg = reginfo[0];
|
||||||
ASSERT_TRUE(ref_registers[op->get_friendly_name()] == reg);
|
ASSERT_TRUE(ref_registers[op->get_friendly_name()] == reg);
|
||||||
total_ops++;
|
total_ops++;
|
||||||
@ -127,7 +126,7 @@ TEST(TransformationTests, AssignRegisters2) {
|
|||||||
auto& rt = op->get_rt_info();
|
auto& rt = op->get_rt_info();
|
||||||
auto it_rinfo = rt.find("reginfo");
|
auto it_rinfo = rt.find("reginfo");
|
||||||
if (it_rinfo != rt.end()) {
|
if (it_rinfo != rt.end()) {
|
||||||
auto reginfo = ov::as_type_ptr<VariantWrapper<std::vector<size_t>>>(it_rinfo->second)->get();
|
auto reginfo = it_rinfo->second.as<std::vector<size_t>>();
|
||||||
auto reg = reginfo[0];
|
auto reg = reginfo[0];
|
||||||
ASSERT_TRUE(ref_registers[op->get_friendly_name()] == reg);
|
ASSERT_TRUE(ref_registers[op->get_friendly_name()] == reg);
|
||||||
total_ops++;
|
total_ops++;
|
||||||
|
@ -0,0 +1,189 @@
|
|||||||
|
// Copyright (C) 2018-2021 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
#include "transformations/op_conversions/detection_output_downgrade.hpp"
|
||||||
|
|
||||||
|
#include <gtest/gtest.h>
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
#include <ngraph/function.hpp>
|
||||||
|
#include <ngraph/op/util/detection_output_base.hpp>
|
||||||
|
#include <ngraph/opsets/opset1.hpp>
|
||||||
|
#include <ngraph/opsets/opset7.hpp>
|
||||||
|
#include <ngraph/opsets/opset8.hpp>
|
||||||
|
#include <ngraph/pass/manager.hpp>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
#include "common_test_utils/ngraph_test_utils.hpp"
|
||||||
|
#include "transformations/init_node_info.hpp"
|
||||||
|
|
||||||
|
using namespace ngraph;
|
||||||
|
using namespace testing;
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
void create_attributes_vectors(std::vector<opset1::DetectionOutput::Attributes>& attrs_v1_vector,
|
||||||
|
std::vector<opset8::DetectionOutput::Attributes>& attrs_v8_vector) {
|
||||||
|
// initialize attributes affecting shape inference
|
||||||
|
// others remain by default
|
||||||
|
for (int keep_top_k : {10, -1}) {
|
||||||
|
for (int top_k : {5, -1}) {
|
||||||
|
for (bool variance_encoded_in_target : {true, false}) {
|
||||||
|
for (bool share_location : {true, false}) {
|
||||||
|
for (bool normalized : {true, false}) {
|
||||||
|
opset1::DetectionOutput::Attributes attrs_v1;
|
||||||
|
opset8::DetectionOutput::Attributes attrs_v8;
|
||||||
|
attrs_v1.top_k = attrs_v8.top_k = top_k;
|
||||||
|
attrs_v1.keep_top_k = attrs_v8.keep_top_k = {keep_top_k};
|
||||||
|
attrs_v1.variance_encoded_in_target = attrs_v8.variance_encoded_in_target =
|
||||||
|
variance_encoded_in_target;
|
||||||
|
attrs_v1.share_location = attrs_v8.share_location = share_location;
|
||||||
|
attrs_v1.normalized = attrs_v8.normalized = normalized;
|
||||||
|
attrs_v1_vector.push_back(attrs_v1);
|
||||||
|
attrs_v8_vector.push_back(attrs_v8);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
TEST(TransformationTests, DetectionOutput8ToDetectionOutput1) {
|
||||||
|
std::vector<opset1::DetectionOutput::Attributes> attrs_v1_vector;
|
||||||
|
std::vector<opset8::DetectionOutput::Attributes> attrs_v8_vector;
|
||||||
|
Dimension N = 5;
|
||||||
|
Dimension num_prior_boxes = 100;
|
||||||
|
Dimension priors_batch_size = N;
|
||||||
|
Dimension num_classes = 23;
|
||||||
|
|
||||||
|
create_attributes_vectors(attrs_v1_vector, attrs_v8_vector);
|
||||||
|
ASSERT_TRUE(attrs_v1_vector.size() == attrs_v8_vector.size()) << "Sizes of attribute test vectors must be equal";
|
||||||
|
for (size_t ind = 0; ind < attrs_v1_vector.size(); ++ind) {
|
||||||
|
std::shared_ptr<ngraph::Function> f(nullptr), f_ref(nullptr);
|
||||||
|
// this case covers deducing a number of classes value
|
||||||
|
// since this value is not saved in attributes
|
||||||
|
opset8::DetectionOutput::Attributes attributes_v8 = attrs_v8_vector[ind];
|
||||||
|
opset1::DetectionOutput::Attributes attributes_v1 = attrs_v1_vector[ind];
|
||||||
|
if (num_classes.is_static()) {
|
||||||
|
attributes_v1.num_classes = num_classes.get_length();
|
||||||
|
}
|
||||||
|
|
||||||
|
Dimension num_loc_classes = attributes_v8.share_location ? 1 : num_classes;
|
||||||
|
Dimension prior_box_size = attributes_v8.normalized ? 4 : 5;
|
||||||
|
|
||||||
|
PartialShape box_logits_shape = {N, num_prior_boxes * num_loc_classes * 4};
|
||||||
|
PartialShape class_preds_shape = {N, num_prior_boxes * num_classes};
|
||||||
|
PartialShape proposals_shape = {priors_batch_size,
|
||||||
|
attributes_v8.variance_encoded_in_target ? 1 : 2,
|
||||||
|
num_prior_boxes * prior_box_size};
|
||||||
|
|
||||||
|
{
|
||||||
|
auto box_logits = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, box_logits_shape);
|
||||||
|
auto class_preds = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, class_preds_shape);
|
||||||
|
auto proposals = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, proposals_shape);
|
||||||
|
|
||||||
|
auto detection_output_v8 =
|
||||||
|
std::make_shared<ngraph::opset8::DetectionOutput>(box_logits, class_preds, proposals, attributes_v8);
|
||||||
|
|
||||||
|
f = std::make_shared<ngraph::Function>(ngraph::NodeVector{detection_output_v8},
|
||||||
|
ngraph::ParameterVector{box_logits, class_preds, proposals});
|
||||||
|
|
||||||
|
ngraph::pass::Manager manager;
|
||||||
|
manager.register_pass<ngraph::pass::ConvertDetectionOutput8ToDetectionOutput1>();
|
||||||
|
manager.run_passes(f);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
auto box_logits = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, box_logits_shape);
|
||||||
|
auto class_preds = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, class_preds_shape);
|
||||||
|
auto proposals = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, proposals_shape);
|
||||||
|
|
||||||
|
auto detection_output_v1 =
|
||||||
|
std::make_shared<ngraph::opset1::DetectionOutput>(box_logits, class_preds, proposals, attributes_v1);
|
||||||
|
|
||||||
|
f_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{detection_output_v1},
|
||||||
|
ngraph::ParameterVector{box_logits, class_preds, proposals});
|
||||||
|
}
|
||||||
|
auto res = compare_functions(f, f_ref);
|
||||||
|
ASSERT_TRUE(res.first) << res.second;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(TransformationTests, DetectionOutput8ToDetectionOutput1FiveArguments) {
|
||||||
|
// In this case num_classes attribute value is deduced using inputs shapes
|
||||||
|
std::vector<opset1::DetectionOutput::Attributes> attrs_v1_vector;
|
||||||
|
std::vector<opset8::DetectionOutput::Attributes> attrs_v8_vector;
|
||||||
|
Dimension N = 5;
|
||||||
|
Dimension num_prior_boxes = 15;
|
||||||
|
Dimension priors_batch_size = N;
|
||||||
|
Dimension num_classes = 23;
|
||||||
|
|
||||||
|
create_attributes_vectors(attrs_v1_vector, attrs_v8_vector);
|
||||||
|
ASSERT_TRUE(attrs_v1_vector.size() == attrs_v8_vector.size()) << "Sizes of attribute test vectors must be equal";
|
||||||
|
for (size_t ind = 0; ind < attrs_v1_vector.size(); ++ind) {
|
||||||
|
std::shared_ptr<ngraph::Function> f(nullptr), f_ref(nullptr);
|
||||||
|
opset8::DetectionOutput::Attributes attributes_v8 = attrs_v8_vector[ind];
|
||||||
|
opset1::DetectionOutput::Attributes attributes_v1 = attrs_v1_vector[ind];
|
||||||
|
if (num_classes.is_static()) {
|
||||||
|
attributes_v1.num_classes = num_classes.get_length();
|
||||||
|
}
|
||||||
|
|
||||||
|
Dimension num_loc_classes = attributes_v8.share_location ? 1 : num_classes;
|
||||||
|
Dimension prior_box_size = attributes_v8.normalized ? 4 : 5;
|
||||||
|
|
||||||
|
PartialShape box_logits_shape = {N, num_prior_boxes * num_loc_classes * 4};
|
||||||
|
PartialShape class_preds_shape = {N, num_prior_boxes * num_classes};
|
||||||
|
PartialShape proposals_shape = {priors_batch_size,
|
||||||
|
attributes_v8.variance_encoded_in_target ? 1 : 2,
|
||||||
|
num_prior_boxes * prior_box_size};
|
||||||
|
PartialShape ad_class_preds_shape = {N, num_prior_boxes * 2};
|
||||||
|
PartialShape ad_box_preds_shape = {N, num_prior_boxes * num_loc_classes * 4};
|
||||||
|
|
||||||
|
{
|
||||||
|
auto box_logits = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, box_logits_shape);
|
||||||
|
auto class_preds = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, class_preds_shape);
|
||||||
|
auto proposals = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, proposals_shape);
|
||||||
|
auto ad_class_preds =
|
||||||
|
std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, ad_class_preds_shape);
|
||||||
|
auto ad_box_preds = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, ad_box_preds_shape);
|
||||||
|
|
||||||
|
auto detection_output_v8 = std::make_shared<ngraph::opset8::DetectionOutput>(box_logits,
|
||||||
|
class_preds,
|
||||||
|
proposals,
|
||||||
|
ad_class_preds,
|
||||||
|
ad_box_preds,
|
||||||
|
attributes_v8);
|
||||||
|
|
||||||
|
f = std::make_shared<ngraph::Function>(
|
||||||
|
ngraph::NodeVector{detection_output_v8},
|
||||||
|
ngraph::ParameterVector{box_logits, class_preds, proposals, ad_class_preds, ad_box_preds});
|
||||||
|
|
||||||
|
ngraph::pass::Manager manager;
|
||||||
|
manager.register_pass<ngraph::pass::ConvertDetectionOutput8ToDetectionOutput1>();
|
||||||
|
manager.run_passes(f);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
auto box_logits = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, box_logits_shape);
|
||||||
|
auto class_preds = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, class_preds_shape);
|
||||||
|
auto proposals = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, proposals_shape);
|
||||||
|
auto ad_class_preds =
|
||||||
|
std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, ad_class_preds_shape);
|
||||||
|
auto ad_box_preds = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, ad_box_preds_shape);
|
||||||
|
|
||||||
|
auto detection_output_v1 = std::make_shared<ngraph::opset1::DetectionOutput>(box_logits,
|
||||||
|
class_preds,
|
||||||
|
proposals,
|
||||||
|
ad_class_preds,
|
||||||
|
ad_box_preds,
|
||||||
|
attributes_v1);
|
||||||
|
|
||||||
|
f_ref = std::make_shared<ngraph::Function>(
|
||||||
|
ngraph::NodeVector{detection_output_v1},
|
||||||
|
ngraph::ParameterVector{box_logits, class_preds, proposals, ad_class_preds, ad_box_preds});
|
||||||
|
}
|
||||||
|
auto res = compare_functions(f, f_ref);
|
||||||
|
ASSERT_TRUE(res.first) << res.second;
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,189 @@
|
|||||||
|
// Copyright (C) 2018-2021 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
#include "transformations/op_conversions/detection_output_upgrade.hpp"
|
||||||
|
|
||||||
|
#include <gtest/gtest.h>
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
#include <ngraph/function.hpp>
|
||||||
|
#include <ngraph/op/util/detection_output_base.hpp>
|
||||||
|
#include <ngraph/opsets/opset1.hpp>
|
||||||
|
#include <ngraph/opsets/opset7.hpp>
|
||||||
|
#include <ngraph/opsets/opset8.hpp>
|
||||||
|
#include <ngraph/pass/manager.hpp>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
#include "common_test_utils/ngraph_test_utils.hpp"
|
||||||
|
#include "transformations/init_node_info.hpp"
|
||||||
|
|
||||||
|
using namespace ngraph;
|
||||||
|
using namespace testing;
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
void create_attributes_vectors(std::vector<opset1::DetectionOutput::Attributes>& attrs_v1_vector,
|
||||||
|
std::vector<opset8::DetectionOutput::Attributes>& attrs_v8_vector) {
|
||||||
|
// initialize attributes affecting shape inference
|
||||||
|
// others remain by default
|
||||||
|
for (int keep_top_k : {10, -1}) {
|
||||||
|
for (int top_k : {5, -1}) {
|
||||||
|
for (bool variance_encoded_in_target : {true, false}) {
|
||||||
|
for (bool share_location : {true, false}) {
|
||||||
|
for (bool normalized : {true, false}) {
|
||||||
|
opset1::DetectionOutput::Attributes attrs_v1;
|
||||||
|
opset8::DetectionOutput::Attributes attrs_v8;
|
||||||
|
attrs_v1.top_k = attrs_v8.top_k = top_k;
|
||||||
|
attrs_v1.keep_top_k = attrs_v8.keep_top_k = {keep_top_k};
|
||||||
|
attrs_v1.variance_encoded_in_target = attrs_v8.variance_encoded_in_target =
|
||||||
|
variance_encoded_in_target;
|
||||||
|
attrs_v1.share_location = attrs_v8.share_location = share_location;
|
||||||
|
attrs_v1.normalized = attrs_v8.normalized = normalized;
|
||||||
|
attrs_v1_vector.push_back(attrs_v1);
|
||||||
|
attrs_v8_vector.push_back(attrs_v8);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
TEST(TransformationTests, DetectionOutput1ToDetectionOutput8) {
|
||||||
|
std::vector<opset1::DetectionOutput::Attributes> attrs_v1_vector;
|
||||||
|
std::vector<opset8::DetectionOutput::Attributes> attrs_v8_vector;
|
||||||
|
Dimension N = 5;
|
||||||
|
Dimension num_prior_boxes = 100;
|
||||||
|
Dimension priors_batch_size = N;
|
||||||
|
Dimension num_classes = 23;
|
||||||
|
|
||||||
|
create_attributes_vectors(attrs_v1_vector, attrs_v8_vector);
|
||||||
|
ASSERT_TRUE(attrs_v1_vector.size() == attrs_v8_vector.size()) << "Sizes of attribute test vectors must be equal";
|
||||||
|
for (size_t ind = 0; ind < attrs_v1_vector.size(); ++ind) {
|
||||||
|
std::shared_ptr<ngraph::Function> f(nullptr), f_ref(nullptr);
|
||||||
|
// this case covers deducing a number of classes value
|
||||||
|
// since this value is not saved in attributes
|
||||||
|
opset8::DetectionOutput::Attributes attributes_v8 = attrs_v8_vector[ind];
|
||||||
|
opset1::DetectionOutput::Attributes attributes_v1 = attrs_v1_vector[ind];
|
||||||
|
if (num_classes.is_static()) {
|
||||||
|
attributes_v1.num_classes = num_classes.get_length();
|
||||||
|
}
|
||||||
|
|
||||||
|
Dimension num_loc_classes = attributes_v8.share_location ? 1 : num_classes;
|
||||||
|
Dimension prior_box_size = attributes_v8.normalized ? 4 : 5;
|
||||||
|
|
||||||
|
PartialShape box_logits_shape = {N, num_prior_boxes * num_loc_classes * 4};
|
||||||
|
PartialShape class_preds_shape = {N, num_prior_boxes * num_classes};
|
||||||
|
PartialShape proposals_shape = {priors_batch_size,
|
||||||
|
attributes_v8.variance_encoded_in_target ? 1 : 2,
|
||||||
|
num_prior_boxes * prior_box_size};
|
||||||
|
|
||||||
|
{
|
||||||
|
auto box_logits = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, box_logits_shape);
|
||||||
|
auto class_preds = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, class_preds_shape);
|
||||||
|
auto proposals = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, proposals_shape);
|
||||||
|
|
||||||
|
auto detection_output_v1 =
|
||||||
|
std::make_shared<ngraph::opset1::DetectionOutput>(box_logits, class_preds, proposals, attributes_v1);
|
||||||
|
|
||||||
|
f = std::make_shared<ngraph::Function>(ngraph::NodeVector{detection_output_v1},
|
||||||
|
ngraph::ParameterVector{box_logits, class_preds, proposals});
|
||||||
|
|
||||||
|
ngraph::pass::Manager manager;
|
||||||
|
manager.register_pass<ngraph::pass::ConvertDetectionOutput1ToDetectionOutput8>();
|
||||||
|
manager.run_passes(f);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
auto box_logits = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, box_logits_shape);
|
||||||
|
auto class_preds = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, class_preds_shape);
|
||||||
|
auto proposals = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, proposals_shape);
|
||||||
|
|
||||||
|
auto detection_output_v8 =
|
||||||
|
std::make_shared<ngraph::opset8::DetectionOutput>(box_logits, class_preds, proposals, attributes_v8);
|
||||||
|
|
||||||
|
f_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{detection_output_v8},
|
||||||
|
ngraph::ParameterVector{box_logits, class_preds, proposals});
|
||||||
|
}
|
||||||
|
auto res = compare_functions(f, f_ref);
|
||||||
|
ASSERT_TRUE(res.first) << res.second;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(TransformationTests, DetectionOutput1ToDetectionOutput8FiveArguments) {
|
||||||
|
// In this case num_classes attribute value is deduced using inputs shapes
|
||||||
|
std::vector<opset1::DetectionOutput::Attributes> attrs_v1_vector;
|
||||||
|
std::vector<opset8::DetectionOutput::Attributes> attrs_v8_vector;
|
||||||
|
Dimension N = 5;
|
||||||
|
Dimension num_prior_boxes = 15;
|
||||||
|
Dimension priors_batch_size = N;
|
||||||
|
Dimension num_classes = 23;
|
||||||
|
|
||||||
|
create_attributes_vectors(attrs_v1_vector, attrs_v8_vector);
|
||||||
|
ASSERT_TRUE(attrs_v1_vector.size() == attrs_v8_vector.size()) << "Sizes of attribute test vectors must be equal";
|
||||||
|
for (size_t ind = 0; ind < attrs_v1_vector.size(); ++ind) {
|
||||||
|
std::shared_ptr<ngraph::Function> f(nullptr), f_ref(nullptr);
|
||||||
|
opset8::DetectionOutput::Attributes attributes_v8 = attrs_v8_vector[ind];
|
||||||
|
opset1::DetectionOutput::Attributes attributes_v1 = attrs_v1_vector[ind];
|
||||||
|
if (num_classes.is_static()) {
|
||||||
|
attributes_v1.num_classes = num_classes.get_length();
|
||||||
|
}
|
||||||
|
|
||||||
|
Dimension num_loc_classes = attributes_v8.share_location ? 1 : num_classes;
|
||||||
|
Dimension prior_box_size = attributes_v8.normalized ? 4 : 5;
|
||||||
|
|
||||||
|
PartialShape box_logits_shape = {N, num_prior_boxes * num_loc_classes * 4};
|
||||||
|
PartialShape class_preds_shape = {N, num_prior_boxes * num_classes};
|
||||||
|
PartialShape proposals_shape = {priors_batch_size,
|
||||||
|
attributes_v8.variance_encoded_in_target ? 1 : 2,
|
||||||
|
num_prior_boxes * prior_box_size};
|
||||||
|
PartialShape ad_class_preds_shape = {N, num_prior_boxes * 2};
|
||||||
|
PartialShape ad_box_preds_shape = {N, num_prior_boxes * num_loc_classes * 4};
|
||||||
|
|
||||||
|
{
|
||||||
|
auto box_logits = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, box_logits_shape);
|
||||||
|
auto class_preds = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, class_preds_shape);
|
||||||
|
auto proposals = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, proposals_shape);
|
||||||
|
auto ad_class_preds =
|
||||||
|
std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, ad_class_preds_shape);
|
||||||
|
auto ad_box_preds = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, ad_box_preds_shape);
|
||||||
|
|
||||||
|
auto detection_output_v1 = std::make_shared<ngraph::opset1::DetectionOutput>(box_logits,
|
||||||
|
class_preds,
|
||||||
|
proposals,
|
||||||
|
ad_class_preds,
|
||||||
|
ad_box_preds,
|
||||||
|
attributes_v1);
|
||||||
|
|
||||||
|
f = std::make_shared<ngraph::Function>(
|
||||||
|
ngraph::NodeVector{detection_output_v1},
|
||||||
|
ngraph::ParameterVector{box_logits, class_preds, proposals, ad_class_preds, ad_box_preds});
|
||||||
|
|
||||||
|
ngraph::pass::Manager manager;
|
||||||
|
manager.register_pass<ngraph::pass::ConvertDetectionOutput1ToDetectionOutput8>();
|
||||||
|
manager.run_passes(f);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
auto box_logits = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, box_logits_shape);
|
||||||
|
auto class_preds = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, class_preds_shape);
|
||||||
|
auto proposals = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, proposals_shape);
|
||||||
|
auto ad_class_preds =
|
||||||
|
std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, ad_class_preds_shape);
|
||||||
|
auto ad_box_preds = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, ad_box_preds_shape);
|
||||||
|
|
||||||
|
auto detection_output_v8 = std::make_shared<ngraph::opset8::DetectionOutput>(box_logits,
|
||||||
|
class_preds,
|
||||||
|
proposals,
|
||||||
|
ad_class_preds,
|
||||||
|
ad_box_preds,
|
||||||
|
attributes_v8);
|
||||||
|
|
||||||
|
f_ref = std::make_shared<ngraph::Function>(
|
||||||
|
ngraph::NodeVector{detection_output_v8},
|
||||||
|
ngraph::ParameterVector{box_logits, class_preds, proposals, ad_class_preds, ad_box_preds});
|
||||||
|
}
|
||||||
|
auto res = compare_functions(f, f_ref);
|
||||||
|
ASSERT_TRUE(res.first) << res.second;
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,158 @@
|
|||||||
|
// Copyright (C) 2021 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
#include <gtest/gtest.h>
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
#include <ngraph/function.hpp>
|
||||||
|
#include <ngraph/opsets/opset8.hpp>
|
||||||
|
#include <ngraph/pass/manager.hpp>
|
||||||
|
#include <ngraph/pass/visualize_tree.hpp>
|
||||||
|
#include <transformations/common_optimizations/nearest_neighbor_upsampling_fusion.hpp>
|
||||||
|
#include <transformations/init_node_info.hpp>
|
||||||
|
#include <transformations/utils/utils.hpp>
|
||||||
|
|
||||||
|
#include "common_test_utils/ngraph_test_utils.hpp"
|
||||||
|
|
||||||
|
using namespace testing;
|
||||||
|
|
||||||
|
TEST_F(TransformationTestsF, NearestNeighborUpsamplingFusionSpatial2D1) {
|
||||||
|
ngraph::Shape input_shape { 1, 120, 150, 32 };
|
||||||
|
size_t input_rank = input_shape.size();
|
||||||
|
std::vector<int64_t> new_spatial_shape { 240, 450 };
|
||||||
|
std::vector<float> scales_as_floats { 2.0f, 3.0f };
|
||||||
|
std::vector<int64_t> constants_for_concat_1 { 1, 120, 1, 150, 1, 32 };
|
||||||
|
std::vector<int64_t> constants_for_concat_2 { 1, 240, 450, 32 };
|
||||||
|
ngraph::Shape mul_const_shape {1, 1, 2, 1, 3, 1};
|
||||||
|
std::vector<float> mul_const_value {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f};
|
||||||
|
{
|
||||||
|
auto input = std::make_shared<ngraph::opset8::Parameter>(ngraph::element::f32, input_shape);
|
||||||
|
auto shape_node = std::make_shared<ngraph::opset8::ShapeOf>(input);
|
||||||
|
|
||||||
|
auto sslice_begin = ngraph::opset8::Constant::create(ngraph::element::i64, {1}, std::vector<int64_t>{0});
|
||||||
|
auto sslice_end = ngraph::opset8::Constant::create(ngraph::element::i64, {1}, std::vector<int64_t>{1});
|
||||||
|
std::vector<int64_t> begin_mask = {0};
|
||||||
|
std::vector<int64_t> end_mask = {0};
|
||||||
|
auto strided_slice_node = std::make_shared<ngraph::opset8::StridedSlice>(shape_node, sslice_begin, sslice_end, begin_mask, end_mask);
|
||||||
|
|
||||||
|
ngraph::OutputVector concat_1_inputs_vec(2 + 2 * (input_rank - 2));
|
||||||
|
concat_1_inputs_vec[0] = strided_slice_node;
|
||||||
|
for (size_t i = 1; i < 2 + 2 * (input_rank - 2); ++i) {
|
||||||
|
const auto unsqueezed_const = ngraph::opset8::Constant::create(ngraph::element::i64, {}, std::vector<int64_t>{constants_for_concat_1[i]});
|
||||||
|
const auto unsqueeze_axis = ngraph::opset8::Constant::create(ngraph::element::i64, {}, std::vector<int64_t>{0});
|
||||||
|
const auto current_unsqueeze = std::make_shared<ngraph::opset8::Unsqueeze>(unsqueezed_const, unsqueeze_axis);
|
||||||
|
concat_1_inputs_vec[i] = current_unsqueeze;
|
||||||
|
}
|
||||||
|
auto concat_1 = std::make_shared<ngraph::opset8::Concat>(concat_1_inputs_vec, 0);
|
||||||
|
|
||||||
|
auto reshape_1 = std::make_shared<ngraph::opset8::Reshape>(input, concat_1, true);
|
||||||
|
|
||||||
|
ngraph::OutputVector concat_2_inputs_vec(input_rank);
|
||||||
|
concat_2_inputs_vec[0] = strided_slice_node;
|
||||||
|
for (size_t i = 1; i < input_rank; ++i) {
|
||||||
|
const auto unsqueezed_const = ngraph::opset8::Constant::create(ngraph::element::i64, {}, std::vector<int64_t>{constants_for_concat_2[i]});
|
||||||
|
const auto unsqueeze_axis = ngraph::opset8::Constant::create(ngraph::element::i64, {}, std::vector<int64_t>{0});
|
||||||
|
const auto current_unsqueeze = std::make_shared<ngraph::opset8::Unsqueeze>(unsqueezed_const, unsqueeze_axis);
|
||||||
|
concat_2_inputs_vec[i] = current_unsqueeze;
|
||||||
|
}
|
||||||
|
auto concat_2 = std::make_shared<ngraph::opset8::Concat>(concat_2_inputs_vec, 0);
|
||||||
|
|
||||||
|
const auto mul_const = ngraph::opset8::Constant::create(ngraph::element::f32, mul_const_shape, mul_const_value);
|
||||||
|
const auto mul = std::make_shared<ngraph::opset8::Multiply>(reshape_1, mul_const);
|
||||||
|
|
||||||
|
auto reshape_2 = std::make_shared<ngraph::opset8::Reshape>(mul, concat_2, true);
|
||||||
|
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{ reshape_2 }, ngraph::ParameterVector{ input });
|
||||||
|
manager.register_pass<ngraph::pass::NearestNeighborUpsamplingFusion>();
|
||||||
|
}
|
||||||
|
{
|
||||||
|
ngraph::opset8::Interpolate::InterpolateAttrs attrs;
|
||||||
|
|
||||||
|
attrs.mode = ngraph::opset8::Interpolate::InterpolateMode::NEAREST;
|
||||||
|
attrs.shape_calculation_mode = ngraph::opset8::Interpolate::ShapeCalcMode::SCALES;
|
||||||
|
attrs.nearest_mode = ngraph::opset8::Interpolate::NearestMode::ROUND_PREFER_FLOOR;
|
||||||
|
attrs.pads_begin = std::vector<size_t>{0};
|
||||||
|
attrs.pads_end = std::vector<size_t>{0};
|
||||||
|
attrs.antialias = false;
|
||||||
|
attrs.coordinate_transformation_mode = ngraph::opset8::Interpolate::CoordinateTransformMode::HALF_PIXEL;
|
||||||
|
attrs.cube_coeff = -0.75f;
|
||||||
|
|
||||||
|
auto input = std::make_shared<ngraph::opset8::Parameter>(ngraph::element::f32, input_shape);
|
||||||
|
auto sizes_node = ngraph::opset8::Constant::create(ngraph::element::i64, {new_spatial_shape.size()}, new_spatial_shape);
|
||||||
|
auto scales_node = ngraph::opset8::Constant::create(ngraph::element::f32, {scales_as_floats.size()}, scales_as_floats);
|
||||||
|
auto axes_node = ngraph::opset8::Constant::create(ngraph::element::i64, {2}, std::vector<int64_t>{1, 2});
|
||||||
|
auto interpolate = std::make_shared<ngraph::opset8::Interpolate>(input, sizes_node, scales_node, axes_node, attrs);
|
||||||
|
function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ interpolate }, ngraph::ParameterVector{ input });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(TransformationTestsF, NearestNeighborUpsamplingFusionSpatial3D1) {
|
||||||
|
ngraph::Shape input_shape { 1, 130, 120, 85, 3 };
|
||||||
|
size_t input_rank = input_shape.size();
|
||||||
|
std::vector<int64_t> new_spatial_shape { 260, 360, 340 };
|
||||||
|
std::vector<float> scales_as_floats { 2.0f, 3.0, 4.0f };
|
||||||
|
std::vector<int64_t> constants_for_concat_1 { 1, 130, 1, 120, 1, 85, 1, 3 };
|
||||||
|
std::vector<int64_t> constants_for_concat_2 { 1, 260, 360, 340, 3 };
|
||||||
|
ngraph::Shape mul_const_shape {1, 1, 2, 1, 3, 1, 4, 1};
|
||||||
|
std::vector<float> mul_const_value(24, 1.0f);
|
||||||
|
{
|
||||||
|
auto input = std::make_shared<ngraph::opset8::Parameter>(ngraph::element::f32, input_shape);
|
||||||
|
auto shape_node = std::make_shared<ngraph::opset8::ShapeOf>(input);
|
||||||
|
|
||||||
|
auto sslice_begin = ngraph::opset8::Constant::create(ngraph::element::i64, {1}, std::vector<int64_t>{0});
|
||||||
|
auto sslice_end = ngraph::opset8::Constant::create(ngraph::element::i64, {1}, std::vector<int64_t>{1});
|
||||||
|
std::vector<int64_t> begin_mask = {0};
|
||||||
|
std::vector<int64_t> end_mask = {0};
|
||||||
|
auto strided_slice_node = std::make_shared<ngraph::opset8::StridedSlice>(shape_node, sslice_begin, sslice_end, begin_mask, end_mask);
|
||||||
|
|
||||||
|
ngraph::OutputVector concat_1_inputs_vec(2 + 2 * (input_rank - 2));
|
||||||
|
concat_1_inputs_vec[0] = strided_slice_node;
|
||||||
|
for (size_t i = 1; i < 2 + 2 * (input_rank - 2); ++i) {
|
||||||
|
const auto unsqueezed_const = ngraph::opset8::Constant::create(ngraph::element::i64, {}, std::vector<int64_t>{constants_for_concat_1[i]});
|
||||||
|
const auto unsqueeze_axis = ngraph::opset8::Constant::create(ngraph::element::i64, {}, std::vector<int64_t>{0});
|
||||||
|
const auto current_unsqueeze = std::make_shared<ngraph::opset8::Unsqueeze>(unsqueezed_const, unsqueeze_axis);
|
||||||
|
concat_1_inputs_vec[i] = current_unsqueeze;
|
||||||
|
}
|
||||||
|
auto concat_1 = std::make_shared<ngraph::opset8::Concat>(concat_1_inputs_vec, 0);
|
||||||
|
|
||||||
|
auto reshape_1 = std::make_shared<ngraph::opset8::Reshape>(input, concat_1, true);
|
||||||
|
|
||||||
|
ngraph::OutputVector concat_2_inputs_vec(input_rank);
|
||||||
|
concat_2_inputs_vec[0] = strided_slice_node;
|
||||||
|
for (size_t i = 1; i < input_rank; ++i) {
|
||||||
|
const auto unsqueezed_const = ngraph::opset8::Constant::create(ngraph::element::i64, {}, std::vector<int64_t>{constants_for_concat_2[i]});
|
||||||
|
const auto unsqueeze_axis = ngraph::opset8::Constant::create(ngraph::element::i64, {}, std::vector<int64_t>{0});
|
||||||
|
const auto current_unsqueeze = std::make_shared<ngraph::opset8::Unsqueeze>(unsqueezed_const, unsqueeze_axis);
|
||||||
|
concat_2_inputs_vec[i] = current_unsqueeze;
|
||||||
|
}
|
||||||
|
auto concat_2 = std::make_shared<ngraph::opset8::Concat>(concat_2_inputs_vec, 0);
|
||||||
|
|
||||||
|
const auto mul_const = ngraph::opset8::Constant::create(ngraph::element::f32, mul_const_shape, mul_const_value);
|
||||||
|
const auto mul = std::make_shared<ngraph::opset8::Multiply>(reshape_1, mul_const);
|
||||||
|
|
||||||
|
auto reshape_2 = std::make_shared<ngraph::opset8::Reshape>(mul, concat_2, true);
|
||||||
|
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{ reshape_2 }, ngraph::ParameterVector{ input });
|
||||||
|
manager.register_pass<ngraph::pass::NearestNeighborUpsamplingFusion>();
|
||||||
|
}
|
||||||
|
{
|
||||||
|
ngraph::opset8::Interpolate::InterpolateAttrs attrs;
|
||||||
|
|
||||||
|
attrs.mode = ngraph::opset8::Interpolate::InterpolateMode::NEAREST;
|
||||||
|
attrs.shape_calculation_mode = ngraph::opset8::Interpolate::ShapeCalcMode::SCALES;
|
||||||
|
attrs.nearest_mode = ngraph::opset8::Interpolate::NearestMode::ROUND_PREFER_FLOOR;
|
||||||
|
attrs.pads_begin = std::vector<size_t>{0};
|
||||||
|
attrs.pads_end = std::vector<size_t>{0};
|
||||||
|
attrs.antialias = false;
|
||||||
|
attrs.coordinate_transformation_mode = ngraph::opset8::Interpolate::CoordinateTransformMode::HALF_PIXEL;
|
||||||
|
attrs.cube_coeff = -0.75f;
|
||||||
|
|
||||||
|
auto input = std::make_shared<ngraph::opset8::Parameter>(ngraph::element::f32, input_shape);
|
||||||
|
auto sizes_node = ngraph::opset8::Constant::create(ngraph::element::i64, {new_spatial_shape.size()}, new_spatial_shape);
|
||||||
|
auto scales_node = ngraph::opset8::Constant::create(ngraph::element::f32, {scales_as_floats.size()}, scales_as_floats);
|
||||||
|
auto axes_node = ngraph::opset8::Constant::create(ngraph::element::i64, {3}, std::vector<int64_t>{1, 2, 3});
|
||||||
|
auto interpolate = std::make_shared<ngraph::opset8::Interpolate>(input, sizes_node, scales_node, axes_node, attrs);
|
||||||
|
function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ interpolate }, ngraph::ParameterVector{ input });
|
||||||
|
}
|
||||||
|
}
|
@ -50,7 +50,7 @@ TEST(TransformationTests, ConvBiasFusion) {
|
|||||||
for (auto & op : nGraph->get_ops()) {
|
for (auto & op : nGraph->get_ops()) {
|
||||||
if (auto conv = std::dynamic_pointer_cast<ngraph::opset1::Convolution>(op)) {
|
if (auto conv = std::dynamic_pointer_cast<ngraph::opset1::Convolution>(op)) {
|
||||||
auto & rtInfo = conv->get_rt_info();
|
auto & rtInfo = conv->get_rt_info();
|
||||||
rtInfo["PrimitivesPriority"] = std::make_shared<ngraph::VariantWrapper<std::string>>("test");
|
rtInfo[ov::PrimitivesPriority::get_type_info_static()] = ov::PrimitivesPriority("test");
|
||||||
pp[op->get_friendly_name()] = "test";
|
pp[op->get_friendly_name()] = "test";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,69 @@
|
|||||||
|
// Copyright (C) 2021 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
#include <vector>
|
||||||
|
#include "single_layer_tests/experimental_detectron_detection_output.hpp"
|
||||||
|
|
||||||
|
using namespace ov::test;
|
||||||
|
using namespace ov::test::subgraph;
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
const std::vector<float> score_threshold = { 0.01000000074505806f };
|
||||||
|
|
||||||
|
const std::vector<float> nms_threshold = { 0.2f };
|
||||||
|
|
||||||
|
//// specifies maximal delta of logarithms for width and height
|
||||||
|
const std::vector<float> max_delta_log_wh = { 2.0f };
|
||||||
|
|
||||||
|
// specifies number of detected classes
|
||||||
|
const std::vector<int64_t> num_classes = { 2 };
|
||||||
|
|
||||||
|
// specifies maximal number of detections per class
|
||||||
|
const std::vector<int64_t> post_nms_count = { 500 };
|
||||||
|
|
||||||
|
// specifies maximual number of detections per image
|
||||||
|
const std::vector<size_t> max_detections_per_image = { 5 };
|
||||||
|
|
||||||
|
// a flag specifies whether to delete background classes or not
|
||||||
|
// `true` means background classes should be deleted,
|
||||||
|
// `false` means background classes shouldn't be deleted.
|
||||||
|
const std::vector<bool> class_agnostic_box_regression = { true };
|
||||||
|
|
||||||
|
// specifies deltas of weights
|
||||||
|
const std::vector<std::vector<float>> deltas_weights = { {10.0f, 10.0f, 5.0f, 5.0f} };
|
||||||
|
|
||||||
|
const std::vector<std::vector<InputShape>> inputShapes = {
|
||||||
|
// inputRois / inputDeltas / inputScores / inputImInfos
|
||||||
|
static_shapes_to_test_representation({{16, 4}, {16, 8}, {16, 2}, {1, 3}}),
|
||||||
|
{
|
||||||
|
{{-1, -1}, {{16, 4}, {16, 4}}},
|
||||||
|
{{-1, -1}, {{16, 8}, {16, 8}}},
|
||||||
|
{{-1, -1}, {{16, 2}, {16, 2}}},
|
||||||
|
{{-1, -1}, {{1, 3}, {1, 3}}}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{{{16, 32}, {4, 8}}, {{16, 4}, {16, 4}}},
|
||||||
|
{{{16, 32}, {8, 16}}, {{16, 8}, {16, 8}}},
|
||||||
|
{{{16, 32}, {2, 4}}, {{16, 2}, {16, 2}}},
|
||||||
|
{{{1, 2}, {3, 6}}, {{1, 3}, {1, 3}}}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(smoke_ExperimentalDetectronDetectionOutput, ExperimentalDetectronDetectionOutputLayerTest,
|
||||||
|
::testing::Combine(
|
||||||
|
::testing::ValuesIn(inputShapes),
|
||||||
|
::testing::ValuesIn(score_threshold),
|
||||||
|
::testing::ValuesIn(nms_threshold),
|
||||||
|
::testing::ValuesIn(max_delta_log_wh),
|
||||||
|
::testing::ValuesIn(num_classes),
|
||||||
|
::testing::ValuesIn(post_nms_count),
|
||||||
|
::testing::ValuesIn(max_detections_per_image),
|
||||||
|
::testing::ValuesIn(class_agnostic_box_regression),
|
||||||
|
::testing::ValuesIn(deltas_weights),
|
||||||
|
::testing::Values(ov::element::Type_t::f32),
|
||||||
|
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||||
|
ExperimentalDetectronDetectionOutputLayerTest::getTestCaseName);
|
||||||
|
|
||||||
|
} // namespace
|
@ -0,0 +1,128 @@
|
|||||||
|
// Copyright (C) 2021 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
#include <vector>
|
||||||
|
#include "single_layer_tests/experimental_detectron_generate_proposals_single_image.hpp"
|
||||||
|
#include "common_test_utils/data_utils.hpp"
|
||||||
|
|
||||||
|
using namespace ov::test;
|
||||||
|
using namespace ov::test::subgraph;
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
const std::vector<float> min_size = { 0 };
|
||||||
|
const std::vector<float> nms_threshold = { 0.699999988079071 };
|
||||||
|
const std::vector<int64_t> post_nms_count = { 6 };
|
||||||
|
const std::vector<int64_t> pre_nms_count = { 1000 };
|
||||||
|
|
||||||
|
const std::vector<std::pair<std::string, std::vector<ov::runtime::Tensor>>> inputTensors = {
|
||||||
|
{
|
||||||
|
"empty",
|
||||||
|
{
|
||||||
|
// 3
|
||||||
|
CommonTestUtils::create_tensor<float>(ov::element::f32, ov::Shape{3}, {1.0f, 1.0f, 1.0f}),
|
||||||
|
// 36 x 4 = 144
|
||||||
|
CommonTestUtils::create_tensor<float>(ov::element::f32, ov::Shape{36, 4}, {
|
||||||
|
1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
|
||||||
|
1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
|
||||||
|
1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
|
||||||
|
1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
|
||||||
|
|
||||||
|
1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
|
||||||
|
1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
|
||||||
|
1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
|
||||||
|
1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
|
||||||
|
|
||||||
|
1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f}),
|
||||||
|
// 12 x 2 x 6 = 144
|
||||||
|
CommonTestUtils::create_tensor<float>(ov::element::f32, ov::Shape{12, 2, 6}, {
|
||||||
|
1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
|
||||||
|
1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
|
||||||
|
1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
|
||||||
|
1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
|
||||||
|
|
||||||
|
1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
|
||||||
|
1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
|
||||||
|
1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
|
||||||
|
1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
|
||||||
|
|
||||||
|
1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f}),
|
||||||
|
// {3 x 2 x 6} = 36
|
||||||
|
CommonTestUtils::create_tensor<float>(ov::element::f32, ov::Shape{3, 2, 6}, {
|
||||||
|
5.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
|
||||||
|
1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 4.0f, 1.0f, 1.0f, 1.0f,
|
||||||
|
1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 8.0f, 1.0f})
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"filled",
|
||||||
|
{
|
||||||
|
CommonTestUtils::create_tensor<float>(ov::element::f32, ov::Shape{3}, {150.0, 150.0, 1.0}),
|
||||||
|
CommonTestUtils::create_tensor<float>(ov::element::f32, ov::Shape{36, 4}, {
|
||||||
|
12.0, 68.0, 102.0, 123.0, 46.0, 80.0, 79.0, 128.0, 33.0, 71.0, 127.0, 86.0, 33.0, 56.0, 150.0, 73.0,
|
||||||
|
5.0, 41.0, 93.0, 150.0, 74.0, 66.0, 106.0, 115.0, 17.0, 37.0, 87.0, 150.0, 31.0, 27.0, 150.0, 39.0,
|
||||||
|
29.0, 23.0, 112.0, 123.0, 41.0, 37.0, 103.0, 150.0, 8.0, 46.0, 98.0, 111.0, 7.0, 69.0, 114.0, 150.0,
|
||||||
|
70.0, 21.0, 150.0, 125.0, 54.0, 19.0, 132.0, 68.0, 62.0, 8.0, 150.0, 101.0, 57.0, 81.0, 150.0, 97.0,
|
||||||
|
79.0, 29.0, 109.0, 130.0, 12.0, 63.0, 100.0, 150.0, 17.0, 33.0, 113.0, 150.0, 90.0, 78.0, 150.0, 111.0,
|
||||||
|
47.0, 68.0, 150.0, 71.0, 66.0, 103.0, 111.0, 150.0, 4.0, 17.0, 112.0, 94.0, 12.0, 8.0, 119.0, 98.0,
|
||||||
|
54.0, 56.0, 120.0, 150.0, 56.0, 29.0, 150.0, 31.0, 42.0, 3.0, 139.0, 92.0, 41.0, 65.0, 150.0, 130.0,
|
||||||
|
49.0, 13.0, 143.0, 30.0, 40.0, 60.0, 150.0, 150.0, 23.0, 73.0, 24.0, 115.0, 56.0, 84.0, 107.0, 108.0,
|
||||||
|
63.0, 8.0, 142.0, 125.0, 78.0, 37.0, 93.0, 144.0, 40.0, 34.0, 150.0, 46.0, 30.0, 21.0, 150.0, 120.0}),
|
||||||
|
CommonTestUtils::create_tensor<float>(ov::element::f32, ov::Shape{12, 2, 6}, {
|
||||||
|
9.062256, 10.883133, 9.8441105, 12.694285, 0.41781136, 8.749107, 14.990341, 6.587644, 1.4206103,
|
||||||
|
13.299262, 12.432549, 2.736371, 0.22732796, 6.3361835, 12.268727, 2.1009045, 4.771589, 2.5131326,
|
||||||
|
5.610736, 9.3604145, 4.27379, 8.317948, 0.60510135, 6.7446275, 1.0207708, 1.1352817, 1.5785321,
|
||||||
|
1.718335, 1.8093798, 0.99247587, 1.3233583, 1.7432803, 1.8534478, 1.2593061, 1.7394226, 1.7686696,
|
||||||
|
1.647999, 1.7611449, 1.3119122, 0.03007332, 1.1106564, 0.55669737, 0.2546148, 1.9181818, 0.7134989,
|
||||||
|
2.0407224, 1.7211134, 1.8565536, 14.562747, 2.8786168, 0.5927796, 0.2064463, 7.6794515, 8.672126,
|
||||||
|
10.139171, 8.002429, 7.002932, 12.6314945, 10.550842, 0.15784842, 0.3194304, 10.752157, 3.709805,
|
||||||
|
11.628928, 0.7136225, 14.619964, 15.177284, 2.2824087, 15.381494, 0.16618137, 7.507227, 11.173228,
|
||||||
|
0.4923559, 1.8227729, 1.4749299, 1.7833921, 1.2363617, -0.23659119, 1.5737582, 1.779316, 1.9828427,
|
||||||
|
1.0482665, 1.4900246, 1.3563544, 1.5341306, 0.7634312, 4.6216766e-05, 1.6161222, 1.7512476, 1.9363779,
|
||||||
|
0.9195784, 1.4906164, -0.03244795, 0.681073, 0.6192401, 1.8033613, 14.146055, 3.4043705, 15.292292,
|
||||||
|
3.5295358, 11.138999, 9.952057, 5.633434, 12.114562, 9.427372, 12.384038, 9.583308, 8.427233,
|
||||||
|
15.293704, 3.288159, 11.64898, 9.350885, 2.0037227, 13.523184, 4.4176426, 6.1057625, 14.400079,
|
||||||
|
8.248259, 11.815807, 15.713364, 1.0023532, 1.3203261, 1.7100681, 0.7407832, 1.09448, 1.7188418,
|
||||||
|
1.4412547, 1.4862992, 0.74790007, 0.31571656, 0.6398838, 2.0236106, 1.1869069, 1.7265586, 1.2624544,
|
||||||
|
0.09934269, 1.3508598, 0.85212964, -0.38968498, 1.7059708, 1.6533034, 1.7400402, 1.8123854, -0.43063712}),
|
||||||
|
CommonTestUtils::create_tensor<float>(ov::element::f32, ov::Shape{3, 2, 6}, {
|
||||||
|
0.7719922, 0.35906568, 0.29054508, 0.18124384, 0.5604661, 0.84750974, 0.98948747, 0.009793862, 0.7184191,
|
||||||
|
0.5560748, 0.6952493, 0.6732593, 0.3306898, 0.6790913, 0.41128764, 0.34593266, 0.94296855, 0.7348507,
|
||||||
|
0.24478768, 0.94024557, 0.05405676, 0.06466125, 0.36244348, 0.07942984, 0.10619422, 0.09412837, 0.9053611,
|
||||||
|
0.22870538, 0.9237487, 0.20986171, 0.5067282, 0.29709867, 0.53138554, 0.189101, 0.4786443, 0.88421875}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const std::vector<std::vector<InputShape>> dynamicInputShape = {
|
||||||
|
// im_info / anchors / deltas / scores
|
||||||
|
static_shapes_to_test_representation({{3}, {36, 4}, {12, 2, 6}, {3, 2, 6}}),
|
||||||
|
{
|
||||||
|
{{-1}, {{3}}},
|
||||||
|
{{-1, -1}, {{36, 4}}},
|
||||||
|
{{-1, -1, -1}, {{12, 2, 6}}},
|
||||||
|
{{-1, -1, -1}, {{3, 2, 6}}}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{{{3, 6}}, {{3}}},
|
||||||
|
{{{36, 72}, {4, 8}}, {{36, 4}}},
|
||||||
|
{{{12, 24}, {2, 4}, {6, 12}}, {{12, 2, 6}}},
|
||||||
|
{{{3, 6}, {2, 4}, {6, 12}}, {{3, 2, 6}}}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
|
smoke_ExperimentalDetectronGenerateProposalsSingleImageLayerTest,
|
||||||
|
ExperimentalDetectronGenerateProposalsSingleImageLayerTest,
|
||||||
|
::testing::Combine(
|
||||||
|
::testing::ValuesIn(dynamicInputShape),
|
||||||
|
::testing::ValuesIn(min_size),
|
||||||
|
::testing::ValuesIn(nms_threshold),
|
||||||
|
::testing::ValuesIn(post_nms_count),
|
||||||
|
::testing::ValuesIn(pre_nms_count),
|
||||||
|
::testing::ValuesIn(inputTensors),
|
||||||
|
::testing::Values(ov::element::Type_t::f32),
|
||||||
|
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||||
|
ExperimentalDetectronGenerateProposalsSingleImageLayerTest::getTestCaseName);
|
||||||
|
} // namespace
|
@ -0,0 +1,106 @@
|
|||||||
|
// Copyright (C) 2021 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
#include <vector>
|
||||||
|
#include "single_layer_tests/experimental_detectron_prior_grid_generator.hpp"
|
||||||
|
#include "common_test_utils/data_utils.hpp"
|
||||||
|
|
||||||
|
using namespace ov::test;
|
||||||
|
using namespace ov::test::subgraph;
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
const std::vector<ov::test::subgraph::ExperimentalDetectronPriorGridGeneratorTestParam> params = {
|
||||||
|
// flatten = true (output tensor is 2D)
|
||||||
|
{
|
||||||
|
{true, 0, 0, 4.0f, 4.0f},
|
||||||
|
ov::test::static_shapes_to_test_representation({{3, 4}, {1, 16, 4, 5}, {1, 3, 100, 200}})
|
||||||
|
},
|
||||||
|
// task #72587
|
||||||
|
//{
|
||||||
|
// {true, 3, 6, 64.0f, 64.0f},
|
||||||
|
// ov::test::static_shapes_to_test_representation({{3, 4}, {1, 16, 100, 100}, {1, 3, 100, 200}})
|
||||||
|
//},
|
||||||
|
{
|
||||||
|
{true, 0, 0, 4.0f, 4.0f},
|
||||||
|
{
|
||||||
|
// priors
|
||||||
|
{{-1, -1}, {{3, 4}, {3, 4}}},
|
||||||
|
// feature_map
|
||||||
|
{{-1, -1, -1, -1}, {{1, 16, 4, 5}, {1, 16, 100, 100}}},
|
||||||
|
// im_data
|
||||||
|
{{-1, -1, -1, -1}, {{1, 3, 100, 200}, {1, 3, 100, 200}}}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
// flatten = false (output tensor is 4D)
|
||||||
|
{
|
||||||
|
{false, 0, 0, 8.0f, 8.0f},
|
||||||
|
ov::test::static_shapes_to_test_representation({{3, 4}, {1, 16, 3, 7}, {1, 3, 100, 200}})
|
||||||
|
},
|
||||||
|
// task #72587
|
||||||
|
//{
|
||||||
|
// {false, 5, 3, 32.0f, 32.0f},
|
||||||
|
// ov::test::static_shapes_to_test_representation({{3, 4}, {1, 16, 100, 100}, {1, 3, 100, 200}})
|
||||||
|
//},
|
||||||
|
{
|
||||||
|
{false, 0, 0, 8.0f, 8.0f},
|
||||||
|
{
|
||||||
|
// priors
|
||||||
|
{{-1, -1}, {{3, 4}, {3, 4}}},
|
||||||
|
// feature_map
|
||||||
|
{{-1, -1, -1, -1}, {{1, 16, 3, 7}, {1, 16, 100, 100}}},
|
||||||
|
// im_data
|
||||||
|
{{-1, -1, -1, -1}, {{1, 3, 100, 200}, {1, 3, 100, 200}}}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
std::vector<std::pair<std::string, std::vector<ov::runtime::Tensor>>> inputTensors = {
|
||||||
|
{
|
||||||
|
"test#1",
|
||||||
|
{
|
||||||
|
CommonTestUtils::create_tensor<float>(
|
||||||
|
ov::element::f32,
|
||||||
|
ov::Shape{3, 4},
|
||||||
|
{-24.5, -12.5, 24.5, 12.5, -16.5, -16.5, 16.5, 16.5, -12.5, -24.5, 12.5, 24.5})
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"test#2",
|
||||||
|
{
|
||||||
|
CommonTestUtils::create_tensor<float>(
|
||||||
|
ov::element::f32,
|
||||||
|
ov::Shape{3, 4},
|
||||||
|
{-44.5, -24.5, 44.5, 24.5, -32.5, -32.5, 32.5, 32.5, -24.5, -44.5, 24.5, 44.5})
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"test#3",
|
||||||
|
{
|
||||||
|
CommonTestUtils::create_tensor<float>(
|
||||||
|
ov::element::f32,
|
||||||
|
ov::Shape{3, 4},
|
||||||
|
{-364.5, -184.5, 364.5, 184.5, -256.5, -256.5, 256.5, 256.5, -180.5, -360.5, 180.5, 360.5})
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"test#4",
|
||||||
|
{
|
||||||
|
CommonTestUtils::create_tensor<float>(
|
||||||
|
ov::element::f32,
|
||||||
|
ov::Shape{3, 4},
|
||||||
|
{-180.5, -88.5, 180.5, 88.5, -128.5, -128.5, 128.5, 128.5, -92.5, -184.5, 92.5, 184.5})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(smoke_ExperimentalDetectronPriorGridGenerator, ExperimentalDetectronPriorGridGeneratorLayerTest,
|
||||||
|
::testing::Combine(
|
||||||
|
::testing::ValuesIn(params),
|
||||||
|
::testing::ValuesIn(inputTensors),
|
||||||
|
::testing::Values(ov::element::Type_t::f32),
|
||||||
|
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||||
|
ExperimentalDetectronPriorGridGeneratorLayerTest::getTestCaseName);
|
||||||
|
|
||||||
|
} // namespace
|
@ -351,6 +351,172 @@ INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_SameLowerPad_CeilRounding_5Dinput, Poolin
|
|||||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||||
PoolingLayerTest::getTestCaseName);
|
PoolingLayerTest::getTestCaseName);
|
||||||
|
|
||||||
|
////* ========== Max Pooling V8 ========== */
|
||||||
|
|
||||||
|
const std::vector<std::vector<size_t>> dilation = {{1, 1}, {2, 2}};
|
||||||
|
const std::vector<std::vector<size_t >> dilation3D = {{1, 1, 1}, {2, 2, 2}};
|
||||||
|
|
||||||
|
/* ========== Explicit Pad Floor Rounding ========== */
|
||||||
|
const auto maxPoolv8_ExplicitPad_FloorRounding_Params = ::testing::Combine(
|
||||||
|
::testing::ValuesIn(kernels),
|
||||||
|
::testing::ValuesIn(strides),
|
||||||
|
::testing::ValuesIn(dilation),
|
||||||
|
::testing::ValuesIn(padBegins),
|
||||||
|
::testing::ValuesIn(padEnds),
|
||||||
|
::testing::Values(ngraph::op::RoundingType::FLOOR),
|
||||||
|
::testing::Values(ngraph::op::PadType::EXPLICIT)
|
||||||
|
);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(smoke_MaxPoolV8_ExplicitPad_FloorRounding, MaxPoolingV8LayerTest,
|
||||||
|
::testing::Combine(
|
||||||
|
maxPoolv8_ExplicitPad_FloorRounding_Params,
|
||||||
|
::testing::ValuesIn(netPrecisions),
|
||||||
|
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||||
|
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||||
|
::testing::Values(InferenceEngine::Layout::ANY),
|
||||||
|
::testing::Values(InferenceEngine::Layout::ANY),
|
||||||
|
::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
|
||||||
|
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||||
|
MaxPoolingV8LayerTest::getTestCaseName);
|
||||||
|
|
||||||
|
/* ========== Same Upper Pad Floor Rounding ========== */
|
||||||
|
const auto maxPoolv8_SameUpperPad_FloorRounding_Params = ::testing::Combine(
|
||||||
|
::testing::ValuesIn(kernels),
|
||||||
|
::testing::ValuesIn(strides),
|
||||||
|
::testing::ValuesIn(dilation),
|
||||||
|
::testing::ValuesIn(padBegins),
|
||||||
|
::testing::ValuesIn(padEnds),
|
||||||
|
::testing::Values(ngraph::op::RoundingType::FLOOR),
|
||||||
|
::testing::Values(ngraph::op::PadType::SAME_UPPER)
|
||||||
|
);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(smoke_MaxPoolv8_SameUpperPad_FloorRounding, MaxPoolingV8LayerTest,
|
||||||
|
::testing::Combine(
|
||||||
|
maxPoolv8_SameUpperPad_FloorRounding_Params,
|
||||||
|
::testing::ValuesIn(netPrecisions),
|
||||||
|
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||||
|
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||||
|
::testing::Values(InferenceEngine::Layout::ANY),
|
||||||
|
::testing::Values(InferenceEngine::Layout::ANY),
|
||||||
|
::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
|
||||||
|
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||||
|
MaxPoolingV8LayerTest::getTestCaseName);
|
||||||
|
|
||||||
|
/* ========== Same Lower Pad Floor Rounding ========== */
|
||||||
|
const auto maxPoolv8_SameLowerPad_FloorRounding_Params = ::testing::Combine(
|
||||||
|
::testing::ValuesIn(kernels),
|
||||||
|
::testing::ValuesIn(strides),
|
||||||
|
::testing::ValuesIn(dilation),
|
||||||
|
::testing::ValuesIn(padBegins),
|
||||||
|
::testing::ValuesIn(padEnds),
|
||||||
|
::testing::Values(ngraph::op::RoundingType::FLOOR),
|
||||||
|
::testing::Values(ngraph::op::PadType::SAME_LOWER)
|
||||||
|
);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(smoke_MaxPoolv8_SameLowerPad_FloorRounding, MaxPoolingV8LayerTest,
|
||||||
|
::testing::Combine(
|
||||||
|
maxPoolv8_SameLowerPad_FloorRounding_Params,
|
||||||
|
::testing::ValuesIn(netPrecisions),
|
||||||
|
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||||
|
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||||
|
::testing::Values(InferenceEngine::Layout::ANY),
|
||||||
|
::testing::Values(InferenceEngine::Layout::ANY),
|
||||||
|
::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
|
||||||
|
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||||
|
MaxPoolingV8LayerTest::getTestCaseName);
|
||||||
|
|
||||||
|
/* ========= Explicit Pad Floor Rounding 5D input========== */
|
||||||
|
const auto maxPoolv8_ExplicitPad_FloorRounding_5Dinput_Params = ::testing::Combine(
|
||||||
|
::testing::ValuesIn(kernel3D),
|
||||||
|
::testing::ValuesIn(strides3D),
|
||||||
|
::testing::Values(dilation3D[0]),
|
||||||
|
::testing::ValuesIn(padBegins3D),
|
||||||
|
::testing::ValuesIn(padEnds3D),
|
||||||
|
::testing::Values(ngraph::op::RoundingType::FLOOR),
|
||||||
|
::testing::Values(ngraph::op::PadType::EXPLICIT)
|
||||||
|
);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(smoke_MaxPoolv8_ExplicitPad_FloorRounding_5Dinput, MaxPoolingV8LayerTest,
|
||||||
|
::testing::Combine(
|
||||||
|
maxPoolv8_ExplicitPad_FloorRounding_5Dinput_Params,
|
||||||
|
::testing::ValuesIn(netPrecisions),
|
||||||
|
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||||
|
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||||
|
::testing::Values(InferenceEngine::Layout::ANY),
|
||||||
|
::testing::Values(InferenceEngine::Layout::ANY),
|
||||||
|
::testing::Values(std::vector<size_t >({32, 32, 2, 2, 2})),
|
||||||
|
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||||
|
MaxPoolingV8LayerTest::getTestCaseName);
|
||||||
|
|
||||||
|
/* ========= Same Upper Pad Floor Rounding 5D input========== */
|
||||||
|
const auto maxPoolv8_SameUpperPad_FloorRounding_5Dinput_Params = ::testing::Combine(
|
||||||
|
::testing::ValuesIn(kernel3D),
|
||||||
|
::testing::ValuesIn(strides3D),
|
||||||
|
::testing::ValuesIn(dilation3D),
|
||||||
|
::testing::ValuesIn(padBegins3D),
|
||||||
|
::testing::ValuesIn(padEnds3D),
|
||||||
|
::testing::Values(ngraph::op::RoundingType::FLOOR),
|
||||||
|
::testing::Values(ngraph::op::PadType::SAME_UPPER)
|
||||||
|
);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(smoke_MaxPoolv8_SameUpperPad_FloorRounding_5Dinput, MaxPoolingV8LayerTest,
|
||||||
|
::testing::Combine(
|
||||||
|
maxPoolv8_SameUpperPad_FloorRounding_5Dinput_Params,
|
||||||
|
::testing::ValuesIn(netPrecisions),
|
||||||
|
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||||
|
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||||
|
::testing::Values(InferenceEngine::Layout::ANY),
|
||||||
|
::testing::Values(InferenceEngine::Layout::ANY),
|
||||||
|
::testing::Values(std::vector<size_t >({32, 32, 2, 2, 2})),
|
||||||
|
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||||
|
MaxPoolingV8LayerTest::getTestCaseName);
|
||||||
|
|
||||||
|
/* ========= Same Lower Pad Ceil Rounding 5D input========== */
|
||||||
|
const auto maxPoolv8_SameLowerPad_CeilRounding_5Dinput_Params = ::testing::Combine(
|
||||||
|
::testing::ValuesIn(kernel3D),
|
||||||
|
::testing::ValuesIn(strides3D),
|
||||||
|
::testing::ValuesIn(dilation3D),
|
||||||
|
::testing::ValuesIn(padBegins3D),
|
||||||
|
::testing::ValuesIn(padEnds3D),
|
||||||
|
::testing::Values(ngraph::op::RoundingType::CEIL),
|
||||||
|
::testing::Values(ngraph::op::PadType::SAME_LOWER)
|
||||||
|
);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(smoke_MaxPoolv8_SameLowerPad_CeilRounding_5Dinput, MaxPoolingV8LayerTest,
|
||||||
|
::testing::Combine(
|
||||||
|
maxPoolv8_SameLowerPad_CeilRounding_5Dinput_Params,
|
||||||
|
::testing::ValuesIn(netPrecisions),
|
||||||
|
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||||
|
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||||
|
::testing::Values(InferenceEngine::Layout::ANY),
|
||||||
|
::testing::Values(InferenceEngine::Layout::ANY),
|
||||||
|
::testing::Values(std::vector<size_t >({32, 32, 2, 2, 2})),
|
||||||
|
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||||
|
MaxPoolingV8LayerTest::getTestCaseName);
|
||||||
|
|
||||||
|
/* ========= Explicit Pad Ceil Rounding ========== */
|
||||||
|
const auto maxPoolv8_ExplicitPad_CeilRounding_Params = ::testing::Combine(
|
||||||
|
::testing::ValuesIn(kernels),
|
||||||
|
::testing::ValuesIn(strides),
|
||||||
|
::testing::ValuesIn(dilation),
|
||||||
|
::testing::ValuesIn(padBegins),
|
||||||
|
::testing::ValuesIn(padEnds),
|
||||||
|
::testing::Values(ngraph::op::RoundingType::CEIL),
|
||||||
|
::testing::Values(ngraph::op::PadType::EXPLICIT)
|
||||||
|
);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(smoke_MaxPoolv8_ExplicitPad_CeilRounding, MaxPoolingV8LayerTest,
|
||||||
|
::testing::Combine(
|
||||||
|
maxPoolv8_ExplicitPad_CeilRounding_Params,
|
||||||
|
::testing::ValuesIn(netPrecisions),
|
||||||
|
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||||
|
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||||
|
::testing::Values(InferenceEngine::Layout::ANY),
|
||||||
|
::testing::Values(InferenceEngine::Layout::ANY),
|
||||||
|
::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
|
||||||
|
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||||
|
MaxPoolingV8LayerTest::getTestCaseName);
|
||||||
|
|
||||||
////* ========== Avg and Max Polling Cases ========== */
|
////* ========== Avg and Max Polling Cases ========== */
|
||||||
/* ========== Valid Pad Rounding Not Applicable ========== */
|
/* ========== Valid Pad Rounding Not Applicable ========== */
|
||||||
const auto allPools_ValidPad_Params = ::testing::Combine(
|
const auto allPools_ValidPad_Params = ::testing::Combine(
|
||||||
@ -376,4 +542,27 @@ INSTANTIATE_TEST_SUITE_P(smoke_MAX_and_AVGPool_ValidPad, PoolingLayerTest,
|
|||||||
::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
|
::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
|
||||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||||
PoolingLayerTest::getTestCaseName);
|
PoolingLayerTest::getTestCaseName);
|
||||||
|
|
||||||
|
const auto maxPoolv8_ValidPad_Params = ::testing::Combine(
|
||||||
|
::testing::ValuesIn(kernels),
|
||||||
|
::testing::ValuesIn(strides),
|
||||||
|
::testing::ValuesIn(dilation),
|
||||||
|
::testing::Values(std::vector<size_t>({0, 0})),
|
||||||
|
::testing::Values(std::vector<size_t>({0, 0})),
|
||||||
|
::testing::Values(ngraph::op::RoundingType::FLOOR), // placeholder value - Rounding Type not applicable for Valid pad type
|
||||||
|
::testing::Values(ngraph::op::PadType::VALID)
|
||||||
|
);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(smoke_MAXPoolv8_ValidPad, MaxPoolingV8LayerTest,
|
||||||
|
::testing::Combine(
|
||||||
|
maxPoolv8_ValidPad_Params,
|
||||||
|
::testing::ValuesIn(netPrecisions),
|
||||||
|
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||||
|
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||||
|
::testing::Values(InferenceEngine::Layout::ANY),
|
||||||
|
::testing::Values(InferenceEngine::Layout::ANY),
|
||||||
|
::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
|
||||||
|
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||||
|
MaxPoolingV8LayerTest::getTestCaseName);
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
@ -226,6 +226,20 @@ const std::vector<InferenceEngine::Precision> netPrecisions = {
|
|||||||
InferenceEngine::Precision::FP32
|
InferenceEngine::Precision::FP32
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const auto defConvSpecificParams_Smoke = ::testing::Combine(
|
||||||
|
::testing::ValuesIn(std::vector<bool> {
|
||||||
|
true,
|
||||||
|
false
|
||||||
|
}), // with_bilinear_interpolation_pad
|
||||||
|
::testing::ValuesIn(std::vector<bool> {
|
||||||
|
true,
|
||||||
|
false
|
||||||
|
}), // with_modulation
|
||||||
|
::testing::ValuesIn(std::vector<OffsetType> {
|
||||||
|
OffsetType::REAL_MISC,
|
||||||
|
}) // offset type
|
||||||
|
);
|
||||||
|
|
||||||
const auto defConvSpecificParams = ::testing::Combine(
|
const auto defConvSpecificParams = ::testing::Combine(
|
||||||
::testing::ValuesIn(std::vector<bool> {
|
::testing::ValuesIn(std::vector<bool> {
|
||||||
true,
|
true,
|
||||||
@ -251,8 +265,8 @@ std::vector<ngraph::op::PadType> padTypes = {
|
|||||||
|
|
||||||
const auto spParams1 = ::testing::Combine(
|
const auto spParams1 = ::testing::Combine(
|
||||||
::testing::Values(1), // batch
|
::testing::Values(1), // batch
|
||||||
::testing::Values(std::vector<size_t>({68, 68})), // in. spat. shape
|
::testing::Values(std::vector<size_t>({34, 34})), // in. spat. shape
|
||||||
::testing::Values(std::vector<size_t>({66, 66})), // off. spat. shape
|
::testing::Values(std::vector<size_t>({32, 32})), // off. spat. shape
|
||||||
::testing::Values(std::vector<size_t>({3, 3})), // ker. spat. shape
|
::testing::Values(std::vector<size_t>({3, 3})), // ker. spat. shape
|
||||||
::testing::ValuesIn(padTypes), // pad. type
|
::testing::ValuesIn(padTypes), // pad. type
|
||||||
::testing::Values(std::vector<ptrdiff_t>({0, 0})), // pad. begin
|
::testing::Values(std::vector<ptrdiff_t>({0, 0})), // pad. begin
|
||||||
@ -308,6 +322,52 @@ const auto chParamsMulGr = ::testing::Combine(
|
|||||||
::testing::ValuesIn(std::vector<size_t> {3, 7}), // in. ch. per gr.
|
::testing::ValuesIn(std::vector<size_t> {3, 7}), // in. ch. per gr.
|
||||||
::testing::ValuesIn(std::vector<size_t> {3, 7})); // out. ch. per gr.
|
::testing::ValuesIn(std::vector<size_t> {3, 7})); // out. ch. per gr.
|
||||||
|
|
||||||
|
const auto params1_Smoke = ::testing::Combine(
|
||||||
|
::testing::Combine(
|
||||||
|
spParams1,
|
||||||
|
chParamsSingleGr,
|
||||||
|
defConvSpecificParams_Smoke,
|
||||||
|
::testing::ValuesIn(netPrecisions),
|
||||||
|
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||||
|
::testing::ValuesIn(filterCPUInfoForDevice()));
|
||||||
|
const auto params2_Smoke = ::testing::Combine(
|
||||||
|
::testing::Combine(
|
||||||
|
spParams2,
|
||||||
|
chParamsSingleGr,
|
||||||
|
defConvSpecificParams_Smoke,
|
||||||
|
::testing::ValuesIn(netPrecisions),
|
||||||
|
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||||
|
::testing::ValuesIn(filterCPUInfoForDevice()));
|
||||||
|
const auto params3_Smoke = ::testing::Combine(
|
||||||
|
::testing::Combine(
|
||||||
|
spParams3,
|
||||||
|
chParamsSingleGr,
|
||||||
|
defConvSpecificParams_Smoke,
|
||||||
|
::testing::ValuesIn(netPrecisions),
|
||||||
|
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||||
|
::testing::ValuesIn(filterCPUInfoForDevice()));
|
||||||
|
const auto params4_Smoke = ::testing::Combine(
|
||||||
|
::testing::Combine(
|
||||||
|
spParams4,
|
||||||
|
chParamsSingleGr,
|
||||||
|
defConvSpecificParams_Smoke,
|
||||||
|
::testing::ValuesIn(netPrecisions),
|
||||||
|
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||||
|
::testing::ValuesIn(filterCPUInfoForDevice()));
|
||||||
|
const auto params5_Smoke = ::testing::Combine(
|
||||||
|
::testing::Combine(
|
||||||
|
spParams4,
|
||||||
|
chParamsMulGr,
|
||||||
|
defConvSpecificParams_Smoke,
|
||||||
|
::testing::ValuesIn(netPrecisions),
|
||||||
|
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||||
|
::testing::ValuesIn(filterCPUInfoForDevice(true)));
|
||||||
|
INSTANTIATE_TEST_SUITE_P(smoke_DefConvLayoutTest1, DefConvLayerCPUTest, params1_Smoke, DefConvLayerCPUTest::getTestCaseName);
|
||||||
|
INSTANTIATE_TEST_SUITE_P(smoke_DefConvLayoutTest2, DefConvLayerCPUTest, params2_Smoke, DefConvLayerCPUTest::getTestCaseName);
|
||||||
|
INSTANTIATE_TEST_SUITE_P(smoke_DefConvLayoutTest3, DefConvLayerCPUTest, params3_Smoke, DefConvLayerCPUTest::getTestCaseName);
|
||||||
|
INSTANTIATE_TEST_SUITE_P(smoke_DefConvLayoutTest4, DefConvLayerCPUTest, params4_Smoke, DefConvLayerCPUTest::getTestCaseName);
|
||||||
|
INSTANTIATE_TEST_SUITE_P(smoke_DefConvLayoutTest5, DefConvLayerCPUTest, params5_Smoke, DefConvLayerCPUTest::getTestCaseName);
|
||||||
|
|
||||||
const auto params1 = ::testing::Combine(
|
const auto params1 = ::testing::Combine(
|
||||||
::testing::Combine(
|
::testing::Combine(
|
||||||
spParams1,
|
spParams1,
|
||||||
@ -348,10 +408,11 @@ const auto params5 = ::testing::Combine(
|
|||||||
::testing::ValuesIn(netPrecisions),
|
::testing::ValuesIn(netPrecisions),
|
||||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||||
::testing::ValuesIn(filterCPUInfoForDevice(true)));
|
::testing::ValuesIn(filterCPUInfoForDevice(true)));
|
||||||
INSTANTIATE_TEST_SUITE_P(smoke_DefConvLayoutTest1, DefConvLayerCPUTest, params1, DefConvLayerCPUTest::getTestCaseName);
|
INSTANTIATE_TEST_SUITE_P(DefConvLayoutTest1, DefConvLayerCPUTest, params1, DefConvLayerCPUTest::getTestCaseName);
|
||||||
INSTANTIATE_TEST_SUITE_P(smoke_DefConvLayoutTest2, DefConvLayerCPUTest, params2, DefConvLayerCPUTest::getTestCaseName);
|
INSTANTIATE_TEST_SUITE_P(DefConvLayoutTest2, DefConvLayerCPUTest, params2, DefConvLayerCPUTest::getTestCaseName);
|
||||||
INSTANTIATE_TEST_SUITE_P(smoke_DefConvLayoutTest3, DefConvLayerCPUTest, params3, DefConvLayerCPUTest::getTestCaseName);
|
INSTANTIATE_TEST_SUITE_P(DefConvLayoutTest3, DefConvLayerCPUTest, params3, DefConvLayerCPUTest::getTestCaseName);
|
||||||
INSTANTIATE_TEST_SUITE_P(smoke_DefConvLayoutTest4, DefConvLayerCPUTest, params4, DefConvLayerCPUTest::getTestCaseName);
|
INSTANTIATE_TEST_SUITE_P(DefConvLayoutTest4, DefConvLayerCPUTest, params4, DefConvLayerCPUTest::getTestCaseName);
|
||||||
INSTANTIATE_TEST_SUITE_P(smoke_DefConvLayoutTest5, DefConvLayerCPUTest, params5, DefConvLayerCPUTest::getTestCaseName);
|
INSTANTIATE_TEST_SUITE_P(DefConvLayoutTest5, DefConvLayerCPUTest, params5, DefConvLayerCPUTest::getTestCaseName);
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
} // namespace CPULayerTestsDefinitions
|
} // namespace CPULayerTestsDefinitions
|
||||||
|
@ -289,7 +289,12 @@ std::vector<CPUSpecificParams> filterCPUInfoForDevice() {
|
|||||||
return resCPUParams;
|
return resCPUParams;
|
||||||
}
|
}
|
||||||
/* ========== */
|
/* ========== */
|
||||||
const std::vector<ngraph::op::v4::Interpolate::CoordinateTransformMode> coordinateTransformModes = {
|
const std::vector<ngraph::op::v4::Interpolate::CoordinateTransformMode> coordinateTransformModes_Smoke = {
|
||||||
|
ngraph::op::v4::Interpolate::CoordinateTransformMode::HALF_PIXEL,
|
||||||
|
ngraph::op::v4::Interpolate::CoordinateTransformMode::ASYMMETRIC,
|
||||||
|
};
|
||||||
|
|
||||||
|
const std::vector<ngraph::op::v4::Interpolate::CoordinateTransformMode> coordinateTransformModes_Full = {
|
||||||
ngraph::op::v4::Interpolate::CoordinateTransformMode::TF_HALF_PIXEL_FOR_NN,
|
ngraph::op::v4::Interpolate::CoordinateTransformMode::TF_HALF_PIXEL_FOR_NN,
|
||||||
ngraph::op::v4::Interpolate::CoordinateTransformMode::PYTORCH_HALF_PIXEL,
|
ngraph::op::v4::Interpolate::CoordinateTransformMode::PYTORCH_HALF_PIXEL,
|
||||||
ngraph::op::v4::Interpolate::CoordinateTransformMode::HALF_PIXEL,
|
ngraph::op::v4::Interpolate::CoordinateTransformMode::HALF_PIXEL,
|
||||||
@ -297,12 +302,13 @@ const std::vector<ngraph::op::v4::Interpolate::CoordinateTransformMode> coordina
|
|||||||
ngraph::op::v4::Interpolate::CoordinateTransformMode::ALIGN_CORNERS,
|
ngraph::op::v4::Interpolate::CoordinateTransformMode::ALIGN_CORNERS,
|
||||||
};
|
};
|
||||||
|
|
||||||
const std::vector<ngraph::op::v4::Interpolate::ShapeCalcMode> shapeCalculationMode = {
|
const std::vector<ngraph::op::v4::Interpolate::NearestMode> nearestModes_Smoke = {
|
||||||
ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES,
|
ngraph::op::v4::Interpolate::NearestMode::SIMPLE,
|
||||||
ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES,
|
ngraph::op::v4::Interpolate::NearestMode::ROUND_PREFER_FLOOR,
|
||||||
|
ngraph::op::v4::Interpolate::NearestMode::FLOOR,
|
||||||
};
|
};
|
||||||
|
|
||||||
const std::vector<ngraph::op::v4::Interpolate::NearestMode> nearestModes = {
|
const std::vector<ngraph::op::v4::Interpolate::NearestMode> nearestModes_Full = {
|
||||||
ngraph::op::v4::Interpolate::NearestMode::SIMPLE,
|
ngraph::op::v4::Interpolate::NearestMode::SIMPLE,
|
||||||
ngraph::op::v4::Interpolate::NearestMode::ROUND_PREFER_FLOOR,
|
ngraph::op::v4::Interpolate::NearestMode::ROUND_PREFER_FLOOR,
|
||||||
ngraph::op::v4::Interpolate::NearestMode::FLOOR,
|
ngraph::op::v4::Interpolate::NearestMode::FLOOR,
|
||||||
@ -351,7 +357,7 @@ const std::vector<std::vector<int64_t>> defaultAxes4D = {
|
|||||||
{0, 1, 2, 3}
|
{0, 1, 2, 3}
|
||||||
};
|
};
|
||||||
|
|
||||||
const std::vector<ShapeParams> shapeParams4D = {
|
const std::vector<ShapeParams> shapeParams4D_Smoke = {
|
||||||
ShapeParams{
|
ShapeParams{
|
||||||
ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES,
|
ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES,
|
||||||
InputShape{{}, {{1, 11, 4, 4}}},
|
InputShape{{}, {{1, 11, 4, 4}}},
|
||||||
@ -366,20 +372,6 @@ const std::vector<ShapeParams> shapeParams4D = {
|
|||||||
{{1, 11, 5, 6}},
|
{{1, 11, 5, 6}},
|
||||||
defaultAxes4D.front()
|
defaultAxes4D.front()
|
||||||
},
|
},
|
||||||
ShapeParams{
|
|
||||||
ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES,
|
|
||||||
InputShape{{-1, {2, 20}, -1, -1}, {{1, 11, 4, 4}, {2, 7, 6, 5}}},
|
|
||||||
ngraph::helpers::InputLayerType::CONSTANT,
|
|
||||||
{{1.f, 1.f, 1.25f, 1.5f}},
|
|
||||||
defaultAxes4D.front()
|
|
||||||
},
|
|
||||||
ShapeParams{
|
|
||||||
ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES,
|
|
||||||
InputShape{{-1, {2, 20}, -1, -1}, {{1, 11, 4, 4}, {1, 11, 5, 5}}},
|
|
||||||
ngraph::helpers::InputLayerType::CONSTANT,
|
|
||||||
{{1, 11, 5, 6}},
|
|
||||||
defaultAxes4D.front()
|
|
||||||
},
|
|
||||||
ShapeParams{
|
ShapeParams{
|
||||||
ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES,
|
ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES,
|
||||||
InputShape{{-1, {2, 20}, -1, -1}, {{1, 11, 4, 4}, {2, 7, 6, 5}}},
|
InputShape{{-1, {2, 20}, -1, -1}, {{1, 11, 4, 4}, {2, 7, 6, 5}}},
|
||||||
@ -396,10 +388,36 @@ const std::vector<ShapeParams> shapeParams4D = {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
const auto interpolateCasesNN = ::testing::Combine(
|
const std::vector<ShapeParams> shapeParams4D_Full = {
|
||||||
|
ShapeParams{
|
||||||
|
ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES,
|
||||||
|
InputShape{{-1, {2, 20}, -1, -1}, {{1, 11, 4, 4}, {2, 7, 6, 5}}},
|
||||||
|
ngraph::helpers::InputLayerType::CONSTANT,
|
||||||
|
{{1.f, 1.f, 1.25f, 1.5f}},
|
||||||
|
defaultAxes4D.front()
|
||||||
|
},
|
||||||
|
ShapeParams{
|
||||||
|
ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES,
|
||||||
|
InputShape{{-1, {2, 20}, -1, -1}, {{1, 11, 4, 4}, {1, 11, 5, 5}}},
|
||||||
|
ngraph::helpers::InputLayerType::CONSTANT,
|
||||||
|
{{1, 11, 5, 6}},
|
||||||
|
defaultAxes4D.front()
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const auto interpolateCasesNN_Smoke = ::testing::Combine(
|
||||||
::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::nearest),
|
::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::nearest),
|
||||||
::testing::ValuesIn(coordinateTransformModes),
|
::testing::ValuesIn(coordinateTransformModes_Smoke),
|
||||||
::testing::ValuesIn(nearestModes),
|
::testing::ValuesIn(nearestModes_Smoke),
|
||||||
|
::testing::ValuesIn(antialias),
|
||||||
|
::testing::ValuesIn(pads4D),
|
||||||
|
::testing::ValuesIn(pads4D),
|
||||||
|
::testing::ValuesIn(cubeCoefs));
|
||||||
|
|
||||||
|
const auto interpolateCasesNN_Full = ::testing::Combine(
|
||||||
|
::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::nearest),
|
||||||
|
::testing::ValuesIn(coordinateTransformModes_Full),
|
||||||
|
::testing::ValuesIn(nearestModes_Full),
|
||||||
::testing::ValuesIn(antialias),
|
::testing::ValuesIn(antialias),
|
||||||
::testing::ValuesIn(pads4D),
|
::testing::ValuesIn(pads4D),
|
||||||
::testing::ValuesIn(pads4D),
|
::testing::ValuesIn(pads4D),
|
||||||
@ -407,8 +425,18 @@ const auto interpolateCasesNN = ::testing::Combine(
|
|||||||
|
|
||||||
INSTANTIATE_TEST_SUITE_P(smoke_InterpolateNN_Layout_Test, InterpolateLayerCPUTest,
|
INSTANTIATE_TEST_SUITE_P(smoke_InterpolateNN_Layout_Test, InterpolateLayerCPUTest,
|
||||||
::testing::Combine(
|
::testing::Combine(
|
||||||
interpolateCasesNN,
|
interpolateCasesNN_Smoke,
|
||||||
::testing::ValuesIn(shapeParams4D),
|
::testing::ValuesIn(shapeParams4D_Smoke),
|
||||||
|
::testing::Values(ElementType::f32),
|
||||||
|
::testing::ValuesIn(filterCPUInfoForDevice()),
|
||||||
|
::testing::ValuesIn(interpolateFusingParamsSet),
|
||||||
|
::testing::ValuesIn(filterAdditionalConfig())),
|
||||||
|
InterpolateLayerCPUTest::getTestCaseName);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(InterpolateNN_Layout_Test, InterpolateLayerCPUTest,
|
||||||
|
::testing::Combine(
|
||||||
|
interpolateCasesNN_Full,
|
||||||
|
::testing::ValuesIn(shapeParams4D_Full),
|
||||||
::testing::Values(ElementType::f32),
|
::testing::Values(ElementType::f32),
|
||||||
::testing::ValuesIn(filterCPUInfoForDevice()),
|
::testing::ValuesIn(filterCPUInfoForDevice()),
|
||||||
::testing::ValuesIn(interpolateFusingParamsSet),
|
::testing::ValuesIn(interpolateFusingParamsSet),
|
||||||
@ -434,7 +462,7 @@ const std::vector<ShapeParams> shapeParams4D_fixed_C = {
|
|||||||
|
|
||||||
INSTANTIATE_TEST_SUITE_P(smoke_InterpolateNN_Layout_PerChannelFuse_Test, InterpolateLayerCPUTest,
|
INSTANTIATE_TEST_SUITE_P(smoke_InterpolateNN_Layout_PerChannelFuse_Test, InterpolateLayerCPUTest,
|
||||||
::testing::Combine(
|
::testing::Combine(
|
||||||
interpolateCasesNN,
|
interpolateCasesNN_Smoke,
|
||||||
::testing::ValuesIn(shapeParams4D_fixed_C),
|
::testing::ValuesIn(shapeParams4D_fixed_C),
|
||||||
::testing::Values(ElementType::f32),
|
::testing::Values(ElementType::f32),
|
||||||
::testing::ValuesIn(filterCPUInfoForDevice()),
|
::testing::ValuesIn(filterCPUInfoForDevice()),
|
||||||
@ -442,9 +470,28 @@ INSTANTIATE_TEST_SUITE_P(smoke_InterpolateNN_Layout_PerChannelFuse_Test, Interpo
|
|||||||
::testing::ValuesIn(filterAdditionalConfig())),
|
::testing::ValuesIn(filterAdditionalConfig())),
|
||||||
InterpolateLayerCPUTest::getTestCaseName);
|
InterpolateLayerCPUTest::getTestCaseName);
|
||||||
|
|
||||||
const auto interpolateCasesLinearOnnx = ::testing::Combine(
|
INSTANTIATE_TEST_SUITE_P(InterpolateNN_Layout_PerChannelFuse_Test, InterpolateLayerCPUTest,
|
||||||
|
::testing::Combine(
|
||||||
|
interpolateCasesNN_Full,
|
||||||
|
::testing::ValuesIn(shapeParams4D_fixed_C),
|
||||||
|
::testing::Values(ElementType::f32),
|
||||||
|
::testing::ValuesIn(filterCPUInfoForDevice()),
|
||||||
|
::testing::Values(fusingFakeQuantizePerChannelRelu),
|
||||||
|
::testing::ValuesIn(filterAdditionalConfig())),
|
||||||
|
InterpolateLayerCPUTest::getTestCaseName);
|
||||||
|
|
||||||
|
const auto interpolateCasesLinearOnnx_Smoke = ::testing::Combine(
|
||||||
::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::linear_onnx),
|
::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::linear_onnx),
|
||||||
::testing::ValuesIn(coordinateTransformModes),
|
::testing::ValuesIn(coordinateTransformModes_Smoke),
|
||||||
|
::testing::ValuesIn(defNearestModes),
|
||||||
|
::testing::ValuesIn(antialias),
|
||||||
|
::testing::ValuesIn(pads4D),
|
||||||
|
::testing::ValuesIn(pads4D),
|
||||||
|
::testing::ValuesIn(cubeCoefs));
|
||||||
|
|
||||||
|
const auto interpolateCasesLinearOnnx_Full = ::testing::Combine(
|
||||||
|
::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::linear_onnx),
|
||||||
|
::testing::ValuesIn(coordinateTransformModes_Full),
|
||||||
::testing::ValuesIn(defNearestModes),
|
::testing::ValuesIn(defNearestModes),
|
||||||
::testing::ValuesIn(antialias),
|
::testing::ValuesIn(antialias),
|
||||||
::testing::ValuesIn(pads4D),
|
::testing::ValuesIn(pads4D),
|
||||||
@ -453,17 +500,36 @@ const auto interpolateCasesLinearOnnx = ::testing::Combine(
|
|||||||
|
|
||||||
INSTANTIATE_TEST_SUITE_P(smoke_InterpolateLinearOnnx_Layout_Test, InterpolateLayerCPUTest,
|
INSTANTIATE_TEST_SUITE_P(smoke_InterpolateLinearOnnx_Layout_Test, InterpolateLayerCPUTest,
|
||||||
::testing::Combine(
|
::testing::Combine(
|
||||||
interpolateCasesLinearOnnx,
|
interpolateCasesLinearOnnx_Smoke,
|
||||||
::testing::ValuesIn(shapeParams4D),
|
::testing::ValuesIn(shapeParams4D_Smoke),
|
||||||
::testing::Values(ElementType::f32),
|
::testing::Values(ElementType::f32),
|
||||||
::testing::ValuesIn(filterCPUInfoForDevice()),
|
::testing::ValuesIn(filterCPUInfoForDevice()),
|
||||||
::testing::ValuesIn(interpolateFusingParamsSet),
|
::testing::ValuesIn(interpolateFusingParamsSet),
|
||||||
::testing::ValuesIn(filterAdditionalConfig())),
|
::testing::ValuesIn(filterAdditionalConfig())),
|
||||||
InterpolateLayerCPUTest::getTestCaseName);
|
InterpolateLayerCPUTest::getTestCaseName);
|
||||||
|
|
||||||
const auto interpolateCasesLinear = ::testing::Combine(
|
INSTANTIATE_TEST_SUITE_P(InterpolateLinearOnnx_Layout_Test, InterpolateLayerCPUTest,
|
||||||
|
::testing::Combine(
|
||||||
|
interpolateCasesLinearOnnx_Full,
|
||||||
|
::testing::ValuesIn(shapeParams4D_Full),
|
||||||
|
::testing::Values(ElementType::f32),
|
||||||
|
::testing::ValuesIn(filterCPUInfoForDevice()),
|
||||||
|
::testing::ValuesIn(interpolateFusingParamsSet),
|
||||||
|
::testing::ValuesIn(filterAdditionalConfig())),
|
||||||
|
InterpolateLayerCPUTest::getTestCaseName);
|
||||||
|
|
||||||
|
const auto interpolateCasesLinear_Smoke = ::testing::Combine(
|
||||||
::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::linear),
|
::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::linear),
|
||||||
::testing::ValuesIn(coordinateTransformModes),
|
::testing::ValuesIn(coordinateTransformModes_Smoke),
|
||||||
|
::testing::ValuesIn(defNearestModes),
|
||||||
|
::testing::ValuesIn(antialias),
|
||||||
|
::testing::ValuesIn(pads4D),
|
||||||
|
::testing::ValuesIn(pads4D),
|
||||||
|
::testing::ValuesIn(cubeCoefs));
|
||||||
|
|
||||||
|
const auto interpolateCasesLinear_Full = ::testing::Combine(
|
||||||
|
::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::linear),
|
||||||
|
::testing::ValuesIn(coordinateTransformModes_Full),
|
||||||
::testing::ValuesIn(defNearestModes),
|
::testing::ValuesIn(defNearestModes),
|
||||||
::testing::ValuesIn(antialias),
|
::testing::ValuesIn(antialias),
|
||||||
::testing::ValuesIn(pads4D),
|
::testing::ValuesIn(pads4D),
|
||||||
@ -472,17 +538,36 @@ const auto interpolateCasesLinear = ::testing::Combine(
|
|||||||
|
|
||||||
INSTANTIATE_TEST_SUITE_P(smoke_InterpolateLinear_Layout_Test, InterpolateLayerCPUTest,
|
INSTANTIATE_TEST_SUITE_P(smoke_InterpolateLinear_Layout_Test, InterpolateLayerCPUTest,
|
||||||
::testing::Combine(
|
::testing::Combine(
|
||||||
interpolateCasesLinear,
|
interpolateCasesLinear_Smoke,
|
||||||
::testing::ValuesIn(shapeParams4D),
|
::testing::ValuesIn(shapeParams4D_Smoke),
|
||||||
::testing::Values(ElementType::f32),
|
::testing::Values(ElementType::f32),
|
||||||
::testing::ValuesIn(filterCPUInfoForDevice()),
|
::testing::ValuesIn(filterCPUInfoForDevice()),
|
||||||
::testing::ValuesIn(interpolateFusingParamsSet),
|
::testing::ValuesIn(interpolateFusingParamsSet),
|
||||||
::testing::ValuesIn(filterAdditionalConfig())),
|
::testing::ValuesIn(filterAdditionalConfig())),
|
||||||
InterpolateLayerCPUTest::getTestCaseName);
|
InterpolateLayerCPUTest::getTestCaseName);
|
||||||
|
|
||||||
const auto interpolateCasesCubic = ::testing::Combine(
|
INSTANTIATE_TEST_SUITE_P(InterpolateLinear_Layout_Test, InterpolateLayerCPUTest,
|
||||||
|
::testing::Combine(
|
||||||
|
interpolateCasesLinear_Full,
|
||||||
|
::testing::ValuesIn(shapeParams4D_Full),
|
||||||
|
::testing::Values(ElementType::f32),
|
||||||
|
::testing::ValuesIn(filterCPUInfoForDevice()),
|
||||||
|
::testing::ValuesIn(interpolateFusingParamsSet),
|
||||||
|
::testing::ValuesIn(filterAdditionalConfig())),
|
||||||
|
InterpolateLayerCPUTest::getTestCaseName);
|
||||||
|
|
||||||
|
const auto interpolateCasesCubic_Smoke = ::testing::Combine(
|
||||||
::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::cubic),
|
::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::cubic),
|
||||||
::testing::ValuesIn(coordinateTransformModes),
|
::testing::ValuesIn(coordinateTransformModes_Smoke),
|
||||||
|
::testing::ValuesIn(defNearestModes),
|
||||||
|
::testing::ValuesIn(antialias),
|
||||||
|
::testing::ValuesIn(pads4D),
|
||||||
|
::testing::ValuesIn(pads4D),
|
||||||
|
::testing::ValuesIn(cubeCoefs));
|
||||||
|
|
||||||
|
const auto interpolateCasesCubic_Full = ::testing::Combine(
|
||||||
|
::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::cubic),
|
||||||
|
::testing::ValuesIn(coordinateTransformModes_Full),
|
||||||
::testing::ValuesIn(defNearestModes),
|
::testing::ValuesIn(defNearestModes),
|
||||||
::testing::ValuesIn(antialias),
|
::testing::ValuesIn(antialias),
|
||||||
::testing::ValuesIn(pads4D),
|
::testing::ValuesIn(pads4D),
|
||||||
@ -491,8 +576,18 @@ const auto interpolateCasesCubic = ::testing::Combine(
|
|||||||
|
|
||||||
INSTANTIATE_TEST_SUITE_P(smoke_InterpolateCubic_Layout_Test, InterpolateLayerCPUTest,
|
INSTANTIATE_TEST_SUITE_P(smoke_InterpolateCubic_Layout_Test, InterpolateLayerCPUTest,
|
||||||
::testing::Combine(
|
::testing::Combine(
|
||||||
interpolateCasesCubic,
|
interpolateCasesCubic_Smoke,
|
||||||
::testing::ValuesIn(shapeParams4D),
|
::testing::ValuesIn(shapeParams4D_Smoke),
|
||||||
|
::testing::Values(ElementType::f32),
|
||||||
|
::testing::ValuesIn(filterCPUInfoForDevice()),
|
||||||
|
::testing::ValuesIn(interpolateFusingParamsSet),
|
||||||
|
::testing::ValuesIn(filterAdditionalConfig())),
|
||||||
|
InterpolateLayerCPUTest::getTestCaseName);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(InterpolateCubic_Layout_Test, InterpolateLayerCPUTest,
|
||||||
|
::testing::Combine(
|
||||||
|
interpolateCasesCubic_Full,
|
||||||
|
::testing::ValuesIn(shapeParams4D_Full),
|
||||||
::testing::Values(ElementType::f32),
|
::testing::Values(ElementType::f32),
|
||||||
::testing::ValuesIn(filterCPUInfoForDevice()),
|
::testing::ValuesIn(filterCPUInfoForDevice()),
|
||||||
::testing::ValuesIn(interpolateFusingParamsSet),
|
::testing::ValuesIn(interpolateFusingParamsSet),
|
||||||
@ -526,7 +621,7 @@ const std::vector<std::vector<int64_t>> defaultAxes5D = {
|
|||||||
{0, 1, 2, 3, 4}
|
{0, 1, 2, 3, 4}
|
||||||
};
|
};
|
||||||
|
|
||||||
const std::vector<ShapeParams> shapeParams5D = {
|
const std::vector<ShapeParams> shapeParams5D_Smoke = {
|
||||||
ShapeParams{
|
ShapeParams{
|
||||||
ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES,
|
ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES,
|
||||||
InputShape{{}, {{1, 11, 4, 4, 4}}},
|
InputShape{{}, {{1, 11, 4, 4, 4}}},
|
||||||
@ -541,20 +636,6 @@ const std::vector<ShapeParams> shapeParams5D = {
|
|||||||
{{1, 11, 5, 6, 2}},
|
{{1, 11, 5, 6, 2}},
|
||||||
defaultAxes5D.front()
|
defaultAxes5D.front()
|
||||||
},
|
},
|
||||||
ShapeParams{
|
|
||||||
ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES,
|
|
||||||
InputShape{{-1, {2, 20}, -1, -1, -1}, {{1, 11, 4, 4, 4}, {2, 7, 6, 5, 8}}},
|
|
||||||
ngraph::helpers::InputLayerType::CONSTANT,
|
|
||||||
{{1.f, 1.f, 1.25f, 1.5f, 0.5f}},
|
|
||||||
defaultAxes5D.front()
|
|
||||||
},
|
|
||||||
ShapeParams{
|
|
||||||
ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES,
|
|
||||||
InputShape{{-1, {2, 20}, -1, -1, -1}, {{1, 11, 4, 4, 4}, {1, 11, 5, 5, 8}}},
|
|
||||||
ngraph::helpers::InputLayerType::CONSTANT,
|
|
||||||
{{1, 11, 5, 6, 4}},
|
|
||||||
defaultAxes5D.front()
|
|
||||||
},
|
|
||||||
ShapeParams{
|
ShapeParams{
|
||||||
ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES,
|
ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES,
|
||||||
InputShape{{-1, {2, 20}, -1, -1, -1}, {{1, 11, 4, 4, 4}, {2, 7, 6, 5, 8}}},
|
InputShape{{-1, {2, 20}, -1, -1, -1}, {{1, 11, 4, 4, 4}, {2, 7, 6, 5, 8}}},
|
||||||
@ -571,10 +652,35 @@ const std::vector<ShapeParams> shapeParams5D = {
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
const auto interpolateCasesLinearOnnx5D = ::testing::Combine(
|
const std::vector<ShapeParams> shapeParams5D_Full = {
|
||||||
|
ShapeParams{
|
||||||
|
ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES,
|
||||||
|
InputShape{{-1, {2, 20}, -1, -1, -1}, {{1, 11, 4, 4, 4}, {2, 7, 6, 5, 8}}},
|
||||||
|
ngraph::helpers::InputLayerType::CONSTANT,
|
||||||
|
{{1.f, 1.f, 1.25f, 1.5f, 0.5f}},
|
||||||
|
defaultAxes5D.front()
|
||||||
|
},
|
||||||
|
ShapeParams{
|
||||||
|
ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES,
|
||||||
|
InputShape{{-1, {2, 20}, -1, -1, -1}, {{1, 11, 4, 4, 4}, {1, 11, 5, 5, 8}}},
|
||||||
|
ngraph::helpers::InputLayerType::CONSTANT,
|
||||||
|
{{1, 11, 5, 6, 4}},
|
||||||
|
defaultAxes5D.front()
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const auto interpolateCasesLinearOnnx5D_Smoke = ::testing::Combine(
|
||||||
::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::linear_onnx),
|
::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::linear_onnx),
|
||||||
::testing::ValuesIn(coordinateTransformModes),
|
::testing::ValuesIn(coordinateTransformModes_Smoke),
|
||||||
::testing::ValuesIn(nearestModes),
|
::testing::ValuesIn(defNearestModes),
|
||||||
|
::testing::ValuesIn(antialias),
|
||||||
|
::testing::ValuesIn(pads5D),
|
||||||
|
::testing::ValuesIn(pads5D),
|
||||||
|
::testing::ValuesIn(cubeCoefs));
|
||||||
|
const auto interpolateCasesLinearOnnx5D_Full = ::testing::Combine(
|
||||||
|
::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::linear_onnx),
|
||||||
|
::testing::ValuesIn(coordinateTransformModes_Full),
|
||||||
|
::testing::ValuesIn(defNearestModes),
|
||||||
::testing::ValuesIn(antialias),
|
::testing::ValuesIn(antialias),
|
||||||
::testing::ValuesIn(pads5D),
|
::testing::ValuesIn(pads5D),
|
||||||
::testing::ValuesIn(pads5D),
|
::testing::ValuesIn(pads5D),
|
||||||
@ -582,18 +688,37 @@ const auto interpolateCasesLinearOnnx5D = ::testing::Combine(
|
|||||||
|
|
||||||
INSTANTIATE_TEST_SUITE_P(smoke_InterpolateLinearOnnx5D_Layout_Test, InterpolateLayerCPUTest,
|
INSTANTIATE_TEST_SUITE_P(smoke_InterpolateLinearOnnx5D_Layout_Test, InterpolateLayerCPUTest,
|
||||||
::testing::Combine(
|
::testing::Combine(
|
||||||
interpolateCasesLinearOnnx5D,
|
interpolateCasesLinearOnnx5D_Smoke,
|
||||||
::testing::ValuesIn(shapeParams5D),
|
::testing::ValuesIn(shapeParams5D_Smoke),
|
||||||
::testing::Values(ElementType::f32),
|
::testing::Values(ElementType::f32),
|
||||||
::testing::ValuesIn(filterCPUInfoForDevice5D()),
|
::testing::ValuesIn(filterCPUInfoForDevice5D()),
|
||||||
::testing::ValuesIn(interpolateFusingParamsSet),
|
::testing::ValuesIn(interpolateFusingParamsSet),
|
||||||
::testing::ValuesIn(filterAdditionalConfig())),
|
::testing::ValuesIn(filterAdditionalConfig())),
|
||||||
InterpolateLayerCPUTest::getTestCaseName);
|
InterpolateLayerCPUTest::getTestCaseName);
|
||||||
|
|
||||||
const auto interpolateCasesNN5D = ::testing::Combine(
|
INSTANTIATE_TEST_SUITE_P(InterpolateLinearOnnx5D_Layout_Test, InterpolateLayerCPUTest,
|
||||||
|
::testing::Combine(
|
||||||
|
interpolateCasesLinearOnnx5D_Full,
|
||||||
|
::testing::ValuesIn(shapeParams5D_Full),
|
||||||
|
::testing::Values(ElementType::f32),
|
||||||
|
::testing::ValuesIn(filterCPUInfoForDevice5D()),
|
||||||
|
::testing::ValuesIn(interpolateFusingParamsSet),
|
||||||
|
::testing::ValuesIn(filterAdditionalConfig())),
|
||||||
|
InterpolateLayerCPUTest::getTestCaseName);
|
||||||
|
|
||||||
|
const auto interpolateCasesNN5D_Smoke = ::testing::Combine(
|
||||||
::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::nearest),
|
::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::nearest),
|
||||||
::testing::ValuesIn(coordinateTransformModes),
|
::testing::ValuesIn(coordinateTransformModes_Smoke),
|
||||||
::testing::ValuesIn(defNearestModes),
|
::testing::ValuesIn(nearestModes_Smoke),
|
||||||
|
::testing::ValuesIn(antialias),
|
||||||
|
::testing::ValuesIn(pads5D),
|
||||||
|
::testing::ValuesIn(pads5D),
|
||||||
|
::testing::ValuesIn(cubeCoefs));
|
||||||
|
|
||||||
|
const auto interpolateCasesNN5D_Full = ::testing::Combine(
|
||||||
|
::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::nearest),
|
||||||
|
::testing::ValuesIn(coordinateTransformModes_Full),
|
||||||
|
::testing::ValuesIn(nearestModes_Full),
|
||||||
::testing::ValuesIn(antialias),
|
::testing::ValuesIn(antialias),
|
||||||
::testing::ValuesIn(pads5D),
|
::testing::ValuesIn(pads5D),
|
||||||
::testing::ValuesIn(pads5D),
|
::testing::ValuesIn(pads5D),
|
||||||
@ -601,8 +726,18 @@ const auto interpolateCasesNN5D = ::testing::Combine(
|
|||||||
|
|
||||||
INSTANTIATE_TEST_SUITE_P(smoke_InterpolateNN5D_Layout_Test, InterpolateLayerCPUTest,
|
INSTANTIATE_TEST_SUITE_P(smoke_InterpolateNN5D_Layout_Test, InterpolateLayerCPUTest,
|
||||||
::testing::Combine(
|
::testing::Combine(
|
||||||
interpolateCasesNN5D,
|
interpolateCasesNN5D_Smoke,
|
||||||
::testing::ValuesIn(shapeParams5D),
|
::testing::ValuesIn(shapeParams5D_Smoke),
|
||||||
|
::testing::Values(ElementType::f32),
|
||||||
|
::testing::ValuesIn(filterCPUInfoForDevice5D()),
|
||||||
|
::testing::ValuesIn(interpolateFusingParamsSet),
|
||||||
|
::testing::ValuesIn(filterAdditionalConfig())),
|
||||||
|
InterpolateLayerCPUTest::getTestCaseName);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(InterpolateNN5D_Layout_Test, InterpolateLayerCPUTest,
|
||||||
|
::testing::Combine(
|
||||||
|
interpolateCasesNN5D_Full,
|
||||||
|
::testing::ValuesIn(shapeParams5D_Full),
|
||||||
::testing::Values(ElementType::f32),
|
::testing::Values(ElementType::f32),
|
||||||
::testing::ValuesIn(filterCPUInfoForDevice5D()),
|
::testing::ValuesIn(filterCPUInfoForDevice5D()),
|
||||||
::testing::ValuesIn(interpolateFusingParamsSet),
|
::testing::ValuesIn(interpolateFusingParamsSet),
|
||||||
|
@ -102,7 +102,7 @@ const std::vector<ElementType> inputPrecisions = {
|
|||||||
ElementType::i8
|
ElementType::i8
|
||||||
};
|
};
|
||||||
|
|
||||||
const std::vector<float> argPadValue = {0.f, 1.f, 2.5f, -1.f};
|
const std::vector<float> argPadValue = {0.f, 2.5f, -1.f};
|
||||||
|
|
||||||
const std::vector<ngraph::helpers::PadMode> padMode = {
|
const std::vector<ngraph::helpers::PadMode> padMode = {
|
||||||
ngraph::helpers::PadMode::EDGE,
|
ngraph::helpers::PadMode::EDGE,
|
||||||
@ -112,14 +112,23 @@ const std::vector<ngraph::helpers::PadMode> padMode = {
|
|||||||
|
|
||||||
/* *======================* Static Shapes Tests 4D *======================* */
|
/* *======================* Static Shapes Tests 4D *======================* */
|
||||||
|
|
||||||
const std::vector<std::vector<int64_t>> padsBegin4DConstBlocked = {{0, 0, 0, 0}, {0, 0, 1, 3}, {2, 16, 1, 0}, {0, 0, 2, 0}};
|
const std::vector<std::vector<int64_t>> padsBegin4DConstBlocked_Smoke = {{0, 0, 1, 3}, {2, 16, 1, 0}};
|
||||||
const std::vector<std::vector<int64_t>> padsEnd4DConstBlocked = {{0, 0, 0, 0}, {0, 0, 2, 1}, {2, 0, 0, 1}, {1, 32, 2, 0}};
|
const std::vector<std::vector<int64_t>> padsEnd4DConstBlocked_Smoke = {{0, 0, 2, 1}, {2, 0, 0, 1}};
|
||||||
|
|
||||||
const std::vector<std::vector<int64_t>> padsBegin4DBlocked = {{0, 0, 0, 0}, {0, 0, 1, 3}, {2, 0, 1, 0}, {0, 0, 2, 0}};
|
const std::vector<std::vector<int64_t>> padsBegin4DBlocked_Smoke = {{0, 0, 1, 3}, {2, 0, 1, 0}};
|
||||||
const std::vector<std::vector<int64_t>> padsEnd4DBlocked = {{0, 0, 0, 0}, {0, 0, 2, 1}, {2, 0, 0, 1}, {1, 0, 2, 0}};
|
const std::vector<std::vector<int64_t>> padsEnd4DBlocked_Smoke = {{0, 0, 2, 1}, {2, 0, 0, 1}};
|
||||||
|
|
||||||
const std::vector<std::vector<int64_t>> padsBegin4D = {{0, 0, 0, 0}, {0, 1, 1, 1}, {0, 2, 1, 0}, {0, 0, 0, 1}};
|
const std::vector<std::vector<int64_t>> padsBegin4D_Smoke = {{0, 1, 1, 1}, {0, 2, 1, 0}};
|
||||||
const std::vector<std::vector<int64_t>> padsEnd4D = {{0, 0, 0, 0}, {0, 2, 1, 1}, {0, 0, 2, 0}, {1, 1, 0, 0}};
|
const std::vector<std::vector<int64_t>> padsEnd4D_Smoke = {{0, 2, 1, 1}, {0, 0, 2, 0}};
|
||||||
|
|
||||||
|
const std::vector<std::vector<int64_t>> padsBegin4DConstBlocked_Full = {{0, 0, 0, 0}, {0, 0, 1, 3}, {2, 16, 1, 0}, {0, 0, 2, 0}};
|
||||||
|
const std::vector<std::vector<int64_t>> padsEnd4DConstBlocked_Full = {{0, 0, 0, 0}, {0, 0, 2, 1}, {2, 0, 0, 1}, {1, 32, 2, 0}};
|
||||||
|
|
||||||
|
const std::vector<std::vector<int64_t>> padsBegin4DBlocked_Full = {{0, 0, 0, 0}, {0, 0, 1, 3}, {2, 0, 1, 0}, {0, 0, 2, 0}};
|
||||||
|
const std::vector<std::vector<int64_t>> padsEnd4DBlocked_Full = {{0, 0, 0, 0}, {0, 0, 2, 1}, {2, 0, 0, 1}, {1, 0, 2, 0}};
|
||||||
|
|
||||||
|
const std::vector<std::vector<int64_t>> padsBegin4D_Full = {{0, 0, 0, 0}, {0, 1, 1, 1}, {0, 2, 1, 0}, {0, 0, 0, 1}};
|
||||||
|
const std::vector<std::vector<int64_t>> padsEnd4D_Full = {{0, 0, 0, 0}, {0, 2, 1, 1}, {0, 0, 2, 0}, {1, 1, 0, 0}};
|
||||||
|
|
||||||
const std::vector<CPUSpecificParams> CPUParams4DBlocked = {
|
const std::vector<CPUSpecificParams> CPUParams4DBlocked = {
|
||||||
cpuParams_nChw16c,
|
cpuParams_nChw16c,
|
||||||
@ -132,8 +141,8 @@ INSTANTIATE_TEST_SUITE_P(
|
|||||||
::testing::Combine(
|
::testing::Combine(
|
||||||
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 5, 5}})),
|
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 5, 5}})),
|
||||||
::testing::ValuesIn(inputPrecisions),
|
::testing::ValuesIn(inputPrecisions),
|
||||||
::testing::ValuesIn(padsBegin4DConstBlocked),
|
::testing::ValuesIn(padsBegin4DConstBlocked_Smoke),
|
||||||
::testing::ValuesIn(padsEnd4DConstBlocked),
|
::testing::ValuesIn(padsEnd4DConstBlocked_Smoke),
|
||||||
::testing::ValuesIn(argPadValue),
|
::testing::ValuesIn(argPadValue),
|
||||||
::testing::Values(ngraph::helpers::PadMode::CONSTANT),
|
::testing::Values(ngraph::helpers::PadMode::CONSTANT),
|
||||||
::testing::ValuesIn(CPUParams4DBlocked)),
|
::testing::ValuesIn(CPUParams4DBlocked)),
|
||||||
@ -146,8 +155,8 @@ INSTANTIATE_TEST_SUITE_P(
|
|||||||
::testing::Combine(
|
::testing::Combine(
|
||||||
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 5, 5}})),
|
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 5, 5}})),
|
||||||
::testing::ValuesIn(inputPrecisions),
|
::testing::ValuesIn(inputPrecisions),
|
||||||
::testing::ValuesIn(padsBegin4D),
|
::testing::ValuesIn(padsBegin4D_Smoke),
|
||||||
::testing::ValuesIn(padsEnd4D),
|
::testing::ValuesIn(padsEnd4D_Smoke),
|
||||||
::testing::ValuesIn(argPadValue),
|
::testing::ValuesIn(argPadValue),
|
||||||
::testing::Values(ngraph::helpers::PadMode::CONSTANT),
|
::testing::Values(ngraph::helpers::PadMode::CONSTANT),
|
||||||
::testing::Values(cpuParams_nhwc)),
|
::testing::Values(cpuParams_nhwc)),
|
||||||
@ -160,8 +169,8 @@ INSTANTIATE_TEST_SUITE_P(
|
|||||||
::testing::Combine(
|
::testing::Combine(
|
||||||
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 10, 5}})),
|
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 10, 5}})),
|
||||||
::testing::ValuesIn(inputPrecisions),
|
::testing::ValuesIn(inputPrecisions),
|
||||||
::testing::ValuesIn(padsBegin4DBlocked),
|
::testing::ValuesIn(padsBegin4DBlocked_Smoke),
|
||||||
::testing::ValuesIn(padsEnd4DBlocked),
|
::testing::ValuesIn(padsEnd4DBlocked_Smoke),
|
||||||
::testing::Values(0),
|
::testing::Values(0),
|
||||||
::testing::ValuesIn(padMode),
|
::testing::ValuesIn(padMode),
|
||||||
::testing::ValuesIn(CPUParams4DBlocked)),
|
::testing::ValuesIn(CPUParams4DBlocked)),
|
||||||
@ -174,8 +183,64 @@ INSTANTIATE_TEST_SUITE_P(
|
|||||||
::testing::Combine(
|
::testing::Combine(
|
||||||
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 10, 5}})),
|
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 10, 5}})),
|
||||||
::testing::ValuesIn(inputPrecisions),
|
::testing::ValuesIn(inputPrecisions),
|
||||||
::testing::ValuesIn(padsBegin4DBlocked),
|
::testing::ValuesIn(padsBegin4DBlocked_Smoke),
|
||||||
::testing::ValuesIn(padsEnd4DBlocked),
|
::testing::ValuesIn(padsEnd4DBlocked_Smoke),
|
||||||
|
::testing::Values(0),
|
||||||
|
::testing::ValuesIn(padMode),
|
||||||
|
::testing::Values(cpuParams_nhwc)),
|
||||||
|
PadLayerCPUTest::getTestCaseName
|
||||||
|
);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
|
CPUPad4DConstBlocked,
|
||||||
|
PadLayerCPUTest,
|
||||||
|
::testing::Combine(
|
||||||
|
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 5, 5}})),
|
||||||
|
::testing::ValuesIn(inputPrecisions),
|
||||||
|
::testing::ValuesIn(padsBegin4DConstBlocked_Full),
|
||||||
|
::testing::ValuesIn(padsEnd4DConstBlocked_Full),
|
||||||
|
::testing::ValuesIn(argPadValue),
|
||||||
|
::testing::Values(ngraph::helpers::PadMode::CONSTANT),
|
||||||
|
::testing::ValuesIn(CPUParams4DBlocked)),
|
||||||
|
PadLayerCPUTest::getTestCaseName
|
||||||
|
);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
|
CPUPad4DConst,
|
||||||
|
PadLayerCPUTest,
|
||||||
|
::testing::Combine(
|
||||||
|
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 5, 5}})),
|
||||||
|
::testing::ValuesIn(inputPrecisions),
|
||||||
|
::testing::ValuesIn(padsBegin4D_Full),
|
||||||
|
::testing::ValuesIn(padsEnd4D_Full),
|
||||||
|
::testing::ValuesIn(argPadValue),
|
||||||
|
::testing::Values(ngraph::helpers::PadMode::CONSTANT),
|
||||||
|
::testing::Values(cpuParams_nhwc)),
|
||||||
|
PadLayerCPUTest::getTestCaseName
|
||||||
|
);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
|
CPUPad4DBlocked,
|
||||||
|
PadLayerCPUTest,
|
||||||
|
::testing::Combine(
|
||||||
|
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 10, 5}})),
|
||||||
|
::testing::ValuesIn(inputPrecisions),
|
||||||
|
::testing::ValuesIn(padsBegin4DBlocked_Full),
|
||||||
|
::testing::ValuesIn(padsEnd4DBlocked_Full),
|
||||||
|
::testing::Values(0),
|
||||||
|
::testing::ValuesIn(padMode),
|
||||||
|
::testing::ValuesIn(CPUParams4DBlocked)),
|
||||||
|
PadLayerCPUTest::getTestCaseName
|
||||||
|
);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
|
CPUPad4D,
|
||||||
|
PadLayerCPUTest,
|
||||||
|
::testing::Combine(
|
||||||
|
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 10, 5}})),
|
||||||
|
::testing::ValuesIn(inputPrecisions),
|
||||||
|
::testing::ValuesIn(padsBegin4DBlocked_Full),
|
||||||
|
::testing::ValuesIn(padsEnd4DBlocked_Full),
|
||||||
::testing::Values(0),
|
::testing::Values(0),
|
||||||
::testing::ValuesIn(padMode),
|
::testing::ValuesIn(padMode),
|
||||||
::testing::Values(cpuParams_nhwc)),
|
::testing::Values(cpuParams_nhwc)),
|
||||||
@ -208,8 +273,8 @@ INSTANTIATE_TEST_SUITE_P(
|
|||||||
::testing::Combine(
|
::testing::Combine(
|
||||||
::testing::ValuesIn(inputShapesDynamic4D),
|
::testing::ValuesIn(inputShapesDynamic4D),
|
||||||
::testing::ValuesIn(inputPrecisions),
|
::testing::ValuesIn(inputPrecisions),
|
||||||
::testing::ValuesIn(padsBegin4D),
|
::testing::ValuesIn(padsBegin4D_Smoke),
|
||||||
::testing::ValuesIn(padsEnd4D),
|
::testing::ValuesIn(padsEnd4D_Smoke),
|
||||||
::testing::ValuesIn(argPadValue),
|
::testing::ValuesIn(argPadValue),
|
||||||
::testing::Values(ngraph::helpers::PadMode::CONSTANT),
|
::testing::Values(ngraph::helpers::PadMode::CONSTANT),
|
||||||
::testing::ValuesIn(CPUParams4DDynamic)),
|
::testing::ValuesIn(CPUParams4DDynamic)),
|
||||||
@ -222,8 +287,8 @@ INSTANTIATE_TEST_SUITE_P(
|
|||||||
::testing::Combine(
|
::testing::Combine(
|
||||||
::testing::Values(inputShapesDynamic4D[1]),
|
::testing::Values(inputShapesDynamic4D[1]),
|
||||||
::testing::ValuesIn(inputPrecisions),
|
::testing::ValuesIn(inputPrecisions),
|
||||||
::testing::ValuesIn(padsBegin4DConstBlocked),
|
::testing::ValuesIn(padsBegin4DConstBlocked_Smoke),
|
||||||
::testing::ValuesIn(padsEnd4DConstBlocked),
|
::testing::ValuesIn(padsEnd4DConstBlocked_Smoke),
|
||||||
::testing::ValuesIn(argPadValue),
|
::testing::ValuesIn(argPadValue),
|
||||||
::testing::Values(ngraph::helpers::PadMode::CONSTANT),
|
::testing::Values(ngraph::helpers::PadMode::CONSTANT),
|
||||||
::testing::ValuesIn(CPUParams4DBlocked)),
|
::testing::ValuesIn(CPUParams4DBlocked)),
|
||||||
@ -236,8 +301,8 @@ INSTANTIATE_TEST_SUITE_P(
|
|||||||
::testing::Combine(
|
::testing::Combine(
|
||||||
::testing::ValuesIn(inputShapesDynamic4D),
|
::testing::ValuesIn(inputShapesDynamic4D),
|
||||||
::testing::ValuesIn(inputPrecisions),
|
::testing::ValuesIn(inputPrecisions),
|
||||||
::testing::ValuesIn(padsBegin4D),
|
::testing::ValuesIn(padsBegin4D_Smoke),
|
||||||
::testing::ValuesIn(padsEnd4D),
|
::testing::ValuesIn(padsEnd4D_Smoke),
|
||||||
::testing::Values(0),
|
::testing::Values(0),
|
||||||
::testing::ValuesIn(padMode),
|
::testing::ValuesIn(padMode),
|
||||||
::testing::ValuesIn(CPUParams4DDynamic)),
|
::testing::ValuesIn(CPUParams4DDynamic)),
|
||||||
@ -250,8 +315,64 @@ INSTANTIATE_TEST_SUITE_P(
|
|||||||
::testing::Combine(
|
::testing::Combine(
|
||||||
::testing::Values(inputShapesDynamic4D[1]),
|
::testing::Values(inputShapesDynamic4D[1]),
|
||||||
::testing::ValuesIn(inputPrecisions),
|
::testing::ValuesIn(inputPrecisions),
|
||||||
::testing::ValuesIn(padsBegin4DBlocked),
|
::testing::ValuesIn(padsBegin4DBlocked_Smoke),
|
||||||
::testing::ValuesIn(padsEnd4DBlocked),
|
::testing::ValuesIn(padsEnd4DBlocked_Smoke),
|
||||||
|
::testing::Values(0),
|
||||||
|
::testing::ValuesIn(padMode),
|
||||||
|
::testing::ValuesIn(CPUParams4DBlocked)),
|
||||||
|
PadLayerCPUTest::getTestCaseName
|
||||||
|
);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
|
CPUPadDynamic4DConst,
|
||||||
|
PadLayerCPUTest,
|
||||||
|
::testing::Combine(
|
||||||
|
::testing::ValuesIn(inputShapesDynamic4D),
|
||||||
|
::testing::ValuesIn(inputPrecisions),
|
||||||
|
::testing::ValuesIn(padsBegin4D_Full),
|
||||||
|
::testing::ValuesIn(padsEnd4D_Full),
|
||||||
|
::testing::ValuesIn(argPadValue),
|
||||||
|
::testing::Values(ngraph::helpers::PadMode::CONSTANT),
|
||||||
|
::testing::ValuesIn(CPUParams4DDynamic)),
|
||||||
|
PadLayerCPUTest::getTestCaseName
|
||||||
|
);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
|
CPUPadDynamic4DConstBlocked,
|
||||||
|
PadLayerCPUTest,
|
||||||
|
::testing::Combine(
|
||||||
|
::testing::Values(inputShapesDynamic4D[1]),
|
||||||
|
::testing::ValuesIn(inputPrecisions),
|
||||||
|
::testing::ValuesIn(padsBegin4DConstBlocked_Full),
|
||||||
|
::testing::ValuesIn(padsEnd4DConstBlocked_Full),
|
||||||
|
::testing::ValuesIn(argPadValue),
|
||||||
|
::testing::Values(ngraph::helpers::PadMode::CONSTANT),
|
||||||
|
::testing::ValuesIn(CPUParams4DBlocked)),
|
||||||
|
PadLayerCPUTest::getTestCaseName
|
||||||
|
);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
|
CPUPadDynamic4D,
|
||||||
|
PadLayerCPUTest,
|
||||||
|
::testing::Combine(
|
||||||
|
::testing::ValuesIn(inputShapesDynamic4D),
|
||||||
|
::testing::ValuesIn(inputPrecisions),
|
||||||
|
::testing::ValuesIn(padsBegin4D_Full),
|
||||||
|
::testing::ValuesIn(padsEnd4D_Full),
|
||||||
|
::testing::Values(0),
|
||||||
|
::testing::ValuesIn(padMode),
|
||||||
|
::testing::ValuesIn(CPUParams4DDynamic)),
|
||||||
|
PadLayerCPUTest::getTestCaseName
|
||||||
|
);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
|
CPUPadDynamic4DBlocked,
|
||||||
|
PadLayerCPUTest,
|
||||||
|
::testing::Combine(
|
||||||
|
::testing::Values(inputShapesDynamic4D[1]),
|
||||||
|
::testing::ValuesIn(inputPrecisions),
|
||||||
|
::testing::ValuesIn(padsBegin4DBlocked_Full),
|
||||||
|
::testing::ValuesIn(padsEnd4DBlocked_Full),
|
||||||
::testing::Values(0),
|
::testing::Values(0),
|
||||||
::testing::ValuesIn(padMode),
|
::testing::ValuesIn(padMode),
|
||||||
::testing::ValuesIn(CPUParams4DBlocked)),
|
::testing::ValuesIn(CPUParams4DBlocked)),
|
||||||
@ -262,14 +383,23 @@ INSTANTIATE_TEST_SUITE_P(
|
|||||||
|
|
||||||
/* *======================* Static Shapes Tests 5D *======================* */
|
/* *======================* Static Shapes Tests 5D *======================* */
|
||||||
|
|
||||||
const std::vector<std::vector<int64_t>> padsBegin5DConstBlocked = {{0, 0, 0, 0, 0}, {0, 0, 1, 1, 0}, {2, 32, 1, 1, 0}, {0, 0, 1, 3, 1}, {0, 0, 0, 1, 0}};
|
const std::vector<std::vector<int64_t>> padsBegin5DConstBlocked_Smoke = {{0, 0, 1, 1, 0}, {2, 32, 1, 1, 0}};
|
||||||
const std::vector<std::vector<int64_t>> padsEnd5DConstBlocked = {{0, 0, 0, 0, 0}, {1, 16, 1, 1, 0}, {0, 0, 0, 1, 0}, {0, 0, 0, 1, 1}, {0, 0, 1, 0, 1}};
|
const std::vector<std::vector<int64_t>> padsEnd5DConstBlocked_Smoke = {{1, 16, 1, 1, 0}, {0, 0, 0, 1, 0}};
|
||||||
|
|
||||||
const std::vector<std::vector<int64_t>> padsBegin5DBlocked = {{0, 0, 0, 0, 0}, {0, 0, 1, 1, 0}, {2, 0, 1, 1, 0}, {0, 0, 1, 3, 1}, {0, 0, 0, 1, 0}};
|
const std::vector<std::vector<int64_t>> padsBegin5DBlocked_Smoke = {{0, 0, 1, 1, 0}, {2, 0, 1, 1, 0}};
|
||||||
const std::vector<std::vector<int64_t>> padsEnd5DBlocked = {{0, 0, 0, 0, 0}, {1, 0, 1, 1, 0}, {0, 0, 0, 1, 0}, {0, 0, 0, 1, 1}, {0, 0, 1, 0, 1}};
|
const std::vector<std::vector<int64_t>> padsEnd5DBlocked_Smoke = {{1, 0, 1, 1, 0}, {0, 0, 0, 1, 0}};
|
||||||
|
|
||||||
const std::vector<std::vector<int64_t>> padsBegin5D = {{0, 0, 0, 0, 0}, {0, 0, 2, 0, 0}, {1, 1, 1, 1, 0}, {2, 0, 1, 0, 1}, {0, 2, 1, 3, 1}};
|
const std::vector<std::vector<int64_t>> padsBegin5D_Smoke = {{0, 0, 2, 0, 0}, {1, 1, 1, 1, 0}};
|
||||||
const std::vector<std::vector<int64_t>> padsEnd5D = {{0, 0, 0, 0, 0}, {0, 0, 1, 0, 0}, {1, 0, 1, 1, 2}, {2, 2, 0, 1, 0}, {1, 1, 2, 0, 1}};
|
const std::vector<std::vector<int64_t>> padsEnd5D_Smoke = {{0, 0, 1, 0, 0}, {1, 0, 1, 1, 2}};
|
||||||
|
|
||||||
|
const std::vector<std::vector<int64_t>> padsBegin5DConstBlocked_Full = {{0, 0, 0, 0, 0}, {0, 0, 1, 1, 0}, {2, 32, 1, 1, 0}, {0, 0, 1, 3, 1}, {0, 0, 0, 1, 0}};
|
||||||
|
const std::vector<std::vector<int64_t>> padsEnd5DConstBlocked_Full = {{0, 0, 0, 0, 0}, {1, 16, 1, 1, 0}, {0, 0, 0, 1, 0}, {0, 0, 0, 1, 1}, {0, 0, 1, 0, 1}};
|
||||||
|
|
||||||
|
const std::vector<std::vector<int64_t>> padsBegin5DBlocked_Full = {{0, 0, 0, 0, 0}, {0, 0, 1, 1, 0}, {2, 0, 1, 1, 0}, {0, 0, 1, 3, 1}, {0, 0, 0, 1, 0}};
|
||||||
|
const std::vector<std::vector<int64_t>> padsEnd5DBlocked_Full = {{0, 0, 0, 0, 0}, {1, 0, 1, 1, 0}, {0, 0, 0, 1, 0}, {0, 0, 0, 1, 1}, {0, 0, 1, 0, 1}};
|
||||||
|
|
||||||
|
const std::vector<std::vector<int64_t>> padsBegin5D_Full = {{0, 0, 0, 0, 0}, {0, 0, 2, 0, 0}, {1, 1, 1, 1, 0}, {2, 0, 1, 0, 1}, {0, 2, 1, 3, 1}};
|
||||||
|
const std::vector<std::vector<int64_t>> padsEnd5D_Full = {{0, 0, 0, 0, 0}, {0, 0, 1, 0, 0}, {1, 0, 1, 1, 2}, {2, 2, 0, 1, 0}, {1, 1, 2, 0, 1}};
|
||||||
|
|
||||||
const std::vector<CPUSpecificParams> CPUParams5DBlocked = {
|
const std::vector<CPUSpecificParams> CPUParams5DBlocked = {
|
||||||
cpuParams_nCdhw16c,
|
cpuParams_nCdhw16c,
|
||||||
@ -282,8 +412,8 @@ INSTANTIATE_TEST_SUITE_P(
|
|||||||
::testing::Combine(
|
::testing::Combine(
|
||||||
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 5, 5, 5}})),
|
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 5, 5, 5}})),
|
||||||
::testing::ValuesIn(inputPrecisions),
|
::testing::ValuesIn(inputPrecisions),
|
||||||
::testing::ValuesIn(padsBegin5DConstBlocked),
|
::testing::ValuesIn(padsBegin5DConstBlocked_Smoke),
|
||||||
::testing::ValuesIn(padsEnd5DConstBlocked),
|
::testing::ValuesIn(padsEnd5DConstBlocked_Smoke),
|
||||||
::testing::ValuesIn(argPadValue),
|
::testing::ValuesIn(argPadValue),
|
||||||
::testing::Values(ngraph::helpers::PadMode::CONSTANT),
|
::testing::Values(ngraph::helpers::PadMode::CONSTANT),
|
||||||
::testing::ValuesIn(CPUParams5DBlocked)),
|
::testing::ValuesIn(CPUParams5DBlocked)),
|
||||||
@ -296,8 +426,8 @@ INSTANTIATE_TEST_SUITE_P(
|
|||||||
::testing::Combine(
|
::testing::Combine(
|
||||||
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 5, 5, 5}})),
|
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 5, 5, 5}})),
|
||||||
::testing::ValuesIn(inputPrecisions),
|
::testing::ValuesIn(inputPrecisions),
|
||||||
::testing::ValuesIn(padsBegin5D),
|
::testing::ValuesIn(padsBegin5D_Smoke),
|
||||||
::testing::ValuesIn(padsEnd5D),
|
::testing::ValuesIn(padsEnd5D_Smoke),
|
||||||
::testing::ValuesIn(argPadValue),
|
::testing::ValuesIn(argPadValue),
|
||||||
::testing::Values(ngraph::helpers::PadMode::CONSTANT),
|
::testing::Values(ngraph::helpers::PadMode::CONSTANT),
|
||||||
::testing::Values(cpuParams_ndhwc)),
|
::testing::Values(cpuParams_ndhwc)),
|
||||||
@ -310,8 +440,8 @@ INSTANTIATE_TEST_SUITE_P(
|
|||||||
::testing::Combine(
|
::testing::Combine(
|
||||||
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 5, 5, 5}})),
|
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 5, 5, 5}})),
|
||||||
::testing::ValuesIn(inputPrecisions),
|
::testing::ValuesIn(inputPrecisions),
|
||||||
::testing::ValuesIn(padsBegin5DBlocked),
|
::testing::ValuesIn(padsBegin5DBlocked_Smoke),
|
||||||
::testing::ValuesIn(padsEnd5DBlocked),
|
::testing::ValuesIn(padsEnd5DBlocked_Smoke),
|
||||||
::testing::Values(0),
|
::testing::Values(0),
|
||||||
::testing::ValuesIn(padMode),
|
::testing::ValuesIn(padMode),
|
||||||
::testing::ValuesIn(CPUParams5DBlocked)),
|
::testing::ValuesIn(CPUParams5DBlocked)),
|
||||||
@ -324,8 +454,64 @@ INSTANTIATE_TEST_SUITE_P(
|
|||||||
::testing::Combine(
|
::testing::Combine(
|
||||||
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 5, 5, 5}})),
|
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 5, 5, 5}})),
|
||||||
::testing::ValuesIn(inputPrecisions),
|
::testing::ValuesIn(inputPrecisions),
|
||||||
::testing::ValuesIn(padsBegin5D),
|
::testing::ValuesIn(padsBegin5D_Smoke),
|
||||||
::testing::ValuesIn(padsEnd5D),
|
::testing::ValuesIn(padsEnd5D_Smoke),
|
||||||
|
::testing::Values(0),
|
||||||
|
::testing::ValuesIn(padMode),
|
||||||
|
::testing::Values(cpuParams_ndhwc)),
|
||||||
|
PadLayerCPUTest::getTestCaseName
|
||||||
|
);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
|
CPUPad5DConstBlocked,
|
||||||
|
PadLayerCPUTest,
|
||||||
|
::testing::Combine(
|
||||||
|
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 5, 5, 5}})),
|
||||||
|
::testing::ValuesIn(inputPrecisions),
|
||||||
|
::testing::ValuesIn(padsBegin5DConstBlocked_Full),
|
||||||
|
::testing::ValuesIn(padsEnd5DConstBlocked_Full),
|
||||||
|
::testing::ValuesIn(argPadValue),
|
||||||
|
::testing::Values(ngraph::helpers::PadMode::CONSTANT),
|
||||||
|
::testing::ValuesIn(CPUParams5DBlocked)),
|
||||||
|
PadLayerCPUTest::getTestCaseName
|
||||||
|
);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
|
CPUPad5DConst,
|
||||||
|
PadLayerCPUTest,
|
||||||
|
::testing::Combine(
|
||||||
|
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 5, 5, 5}})),
|
||||||
|
::testing::ValuesIn(inputPrecisions),
|
||||||
|
::testing::ValuesIn(padsBegin5D_Full),
|
||||||
|
::testing::ValuesIn(padsEnd5D_Full),
|
||||||
|
::testing::ValuesIn(argPadValue),
|
||||||
|
::testing::Values(ngraph::helpers::PadMode::CONSTANT),
|
||||||
|
::testing::Values(cpuParams_ndhwc)),
|
||||||
|
PadLayerCPUTest::getTestCaseName
|
||||||
|
);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
|
CPUPad5DBlocked,
|
||||||
|
PadLayerCPUTest,
|
||||||
|
::testing::Combine(
|
||||||
|
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 5, 5, 5}})),
|
||||||
|
::testing::ValuesIn(inputPrecisions),
|
||||||
|
::testing::ValuesIn(padsBegin5DBlocked_Full),
|
||||||
|
::testing::ValuesIn(padsEnd5DBlocked_Full),
|
||||||
|
::testing::Values(0),
|
||||||
|
::testing::ValuesIn(padMode),
|
||||||
|
::testing::ValuesIn(CPUParams5DBlocked)),
|
||||||
|
PadLayerCPUTest::getTestCaseName
|
||||||
|
);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
|
CPUPad5D,
|
||||||
|
PadLayerCPUTest,
|
||||||
|
::testing::Combine(
|
||||||
|
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 5, 5, 5}})),
|
||||||
|
::testing::ValuesIn(inputPrecisions),
|
||||||
|
::testing::ValuesIn(padsBegin5D_Full),
|
||||||
|
::testing::ValuesIn(padsEnd5D_Full),
|
||||||
::testing::Values(0),
|
::testing::Values(0),
|
||||||
::testing::ValuesIn(padMode),
|
::testing::ValuesIn(padMode),
|
||||||
::testing::Values(cpuParams_ndhwc)),
|
::testing::Values(cpuParams_ndhwc)),
|
||||||
@ -358,8 +544,8 @@ INSTANTIATE_TEST_SUITE_P(
|
|||||||
::testing::Combine(
|
::testing::Combine(
|
||||||
::testing::ValuesIn(inputShapesDynamic5D),
|
::testing::ValuesIn(inputShapesDynamic5D),
|
||||||
::testing::ValuesIn(inputPrecisions),
|
::testing::ValuesIn(inputPrecisions),
|
||||||
::testing::ValuesIn(padsBegin5D),
|
::testing::ValuesIn(padsBegin5D_Smoke),
|
||||||
::testing::ValuesIn(padsEnd5D),
|
::testing::ValuesIn(padsEnd5D_Smoke),
|
||||||
::testing::ValuesIn(argPadValue),
|
::testing::ValuesIn(argPadValue),
|
||||||
::testing::Values(ngraph::helpers::PadMode::CONSTANT),
|
::testing::Values(ngraph::helpers::PadMode::CONSTANT),
|
||||||
::testing::ValuesIn(CPUParams5DDynamic)),
|
::testing::ValuesIn(CPUParams5DDynamic)),
|
||||||
@ -372,8 +558,8 @@ INSTANTIATE_TEST_SUITE_P(
|
|||||||
::testing::Combine(
|
::testing::Combine(
|
||||||
::testing::Values(inputShapesDynamic5D[1]),
|
::testing::Values(inputShapesDynamic5D[1]),
|
||||||
::testing::ValuesIn(inputPrecisions),
|
::testing::ValuesIn(inputPrecisions),
|
||||||
::testing::ValuesIn(padsBegin5DConstBlocked),
|
::testing::ValuesIn(padsBegin5DConstBlocked_Smoke),
|
||||||
::testing::ValuesIn(padsEnd5DConstBlocked),
|
::testing::ValuesIn(padsEnd5DConstBlocked_Smoke),
|
||||||
::testing::ValuesIn(argPadValue),
|
::testing::ValuesIn(argPadValue),
|
||||||
::testing::Values(ngraph::helpers::PadMode::CONSTANT),
|
::testing::Values(ngraph::helpers::PadMode::CONSTANT),
|
||||||
::testing::ValuesIn(CPUParams5DBlocked)),
|
::testing::ValuesIn(CPUParams5DBlocked)),
|
||||||
@ -386,8 +572,8 @@ INSTANTIATE_TEST_SUITE_P(
|
|||||||
::testing::Combine(
|
::testing::Combine(
|
||||||
::testing::ValuesIn(inputShapesDynamic5D),
|
::testing::ValuesIn(inputShapesDynamic5D),
|
||||||
::testing::ValuesIn(inputPrecisions),
|
::testing::ValuesIn(inputPrecisions),
|
||||||
::testing::ValuesIn(padsBegin5D),
|
::testing::ValuesIn(padsBegin5D_Smoke),
|
||||||
::testing::ValuesIn(padsEnd5D),
|
::testing::ValuesIn(padsEnd5D_Smoke),
|
||||||
::testing::Values(0),
|
::testing::Values(0),
|
||||||
::testing::ValuesIn(padMode),
|
::testing::ValuesIn(padMode),
|
||||||
::testing::ValuesIn(CPUParams5DDynamic)),
|
::testing::ValuesIn(CPUParams5DDynamic)),
|
||||||
@ -400,8 +586,64 @@ INSTANTIATE_TEST_SUITE_P(
|
|||||||
::testing::Combine(
|
::testing::Combine(
|
||||||
::testing::Values(inputShapesDynamic5D[1]),
|
::testing::Values(inputShapesDynamic5D[1]),
|
||||||
::testing::ValuesIn(inputPrecisions),
|
::testing::ValuesIn(inputPrecisions),
|
||||||
::testing::ValuesIn(padsBegin5DBlocked),
|
::testing::ValuesIn(padsBegin5DBlocked_Smoke),
|
||||||
::testing::ValuesIn(padsEnd5DBlocked),
|
::testing::ValuesIn(padsEnd5DBlocked_Smoke),
|
||||||
|
::testing::Values(0),
|
||||||
|
::testing::ValuesIn(padMode),
|
||||||
|
::testing::ValuesIn(CPUParams5DBlocked)),
|
||||||
|
PadLayerCPUTest::getTestCaseName
|
||||||
|
);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
|
CPUPadDynamic5DConst,
|
||||||
|
PadLayerCPUTest,
|
||||||
|
::testing::Combine(
|
||||||
|
::testing::ValuesIn(inputShapesDynamic5D),
|
||||||
|
::testing::ValuesIn(inputPrecisions),
|
||||||
|
::testing::ValuesIn(padsBegin5D_Full),
|
||||||
|
::testing::ValuesIn(padsEnd5D_Full),
|
||||||
|
::testing::ValuesIn(argPadValue),
|
||||||
|
::testing::Values(ngraph::helpers::PadMode::CONSTANT),
|
||||||
|
::testing::ValuesIn(CPUParams5DDynamic)),
|
||||||
|
PadLayerCPUTest::getTestCaseName
|
||||||
|
);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
|
CPUPadDynamic5DConstBlocked,
|
||||||
|
PadLayerCPUTest,
|
||||||
|
::testing::Combine(
|
||||||
|
::testing::Values(inputShapesDynamic5D[1]),
|
||||||
|
::testing::ValuesIn(inputPrecisions),
|
||||||
|
::testing::ValuesIn(padsBegin5DConstBlocked_Full),
|
||||||
|
::testing::ValuesIn(padsEnd5DConstBlocked_Full),
|
||||||
|
::testing::ValuesIn(argPadValue),
|
||||||
|
::testing::Values(ngraph::helpers::PadMode::CONSTANT),
|
||||||
|
::testing::ValuesIn(CPUParams5DBlocked)),
|
||||||
|
PadLayerCPUTest::getTestCaseName
|
||||||
|
);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
|
CPUPadDynamic5D,
|
||||||
|
PadLayerCPUTest,
|
||||||
|
::testing::Combine(
|
||||||
|
::testing::ValuesIn(inputShapesDynamic5D),
|
||||||
|
::testing::ValuesIn(inputPrecisions),
|
||||||
|
::testing::ValuesIn(padsBegin5D_Full),
|
||||||
|
::testing::ValuesIn(padsEnd5D_Full),
|
||||||
|
::testing::Values(0),
|
||||||
|
::testing::ValuesIn(padMode),
|
||||||
|
::testing::ValuesIn(CPUParams5DDynamic)),
|
||||||
|
PadLayerCPUTest::getTestCaseName
|
||||||
|
);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
|
CPUPadDynamic5DBlocked,
|
||||||
|
PadLayerCPUTest,
|
||||||
|
::testing::Combine(
|
||||||
|
::testing::Values(inputShapesDynamic5D[1]),
|
||||||
|
::testing::ValuesIn(inputPrecisions),
|
||||||
|
::testing::ValuesIn(padsBegin5DBlocked_Full),
|
||||||
|
::testing::ValuesIn(padsEnd5DBlocked_Full),
|
||||||
::testing::Values(0),
|
::testing::Values(0),
|
||||||
::testing::ValuesIn(padMode),
|
::testing::ValuesIn(padMode),
|
||||||
::testing::ValuesIn(CPUParams5DBlocked)),
|
::testing::ValuesIn(CPUParams5DBlocked)),
|
||||||
|
@ -20,6 +20,11 @@ using poolLayerCpuTestParamsSet = std::tuple<LayerTestsDefinitions::poolSpecific
|
|||||||
CPUSpecificParams,
|
CPUSpecificParams,
|
||||||
fusingSpecificParams>;
|
fusingSpecificParams>;
|
||||||
|
|
||||||
|
using maxPoolV8LayerCpuTestParamsSet = std::tuple<LayerTestsDefinitions::maxPoolV8SpecificParams,
|
||||||
|
InputShape,
|
||||||
|
ElementType,
|
||||||
|
CPUSpecificParams>;
|
||||||
|
|
||||||
class PoolingLayerCPUTest : public testing::WithParamInterface<poolLayerCpuTestParamsSet>,
|
class PoolingLayerCPUTest : public testing::WithParamInterface<poolLayerCpuTestParamsSet>,
|
||||||
virtual public SubgraphBaseTest, public CpuTestWithFusing {
|
virtual public SubgraphBaseTest, public CpuTestWithFusing {
|
||||||
public:
|
public:
|
||||||
@ -68,8 +73,6 @@ public:
|
|||||||
results << CPUTestsBase::getTestCaseName(cpuParams);
|
results << CPUTestsBase::getTestCaseName(cpuParams);
|
||||||
results << CpuTestWithFusing::getTestCaseName(fusingParams);
|
results << CpuTestWithFusing::getTestCaseName(fusingParams);
|
||||||
return results.str();
|
return results.str();
|
||||||
|
|
||||||
return results.str();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
@ -98,7 +101,10 @@ protected:
|
|||||||
if (selectedType.empty()) {
|
if (selectedType.empty()) {
|
||||||
selectedType = getPrimitiveType();
|
selectedType = getPrimitiveType();
|
||||||
}
|
}
|
||||||
selectedType = selectedType + "_" + InferenceEngine::details::convertPrecision(inPrc).name();
|
if (isInt8)
|
||||||
|
selectedType = selectedType + "_I8";
|
||||||
|
else
|
||||||
|
selectedType = makeSelectedTypeStr(selectedType, inPrc);
|
||||||
|
|
||||||
init_input_shapes({inputShapes});
|
init_input_shapes({inputShapes});
|
||||||
|
|
||||||
@ -124,11 +130,87 @@ protected:
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class MaxPoolingV8LayerCPUTest : public testing::WithParamInterface<maxPoolV8LayerCpuTestParamsSet>,
|
||||||
|
virtual public SubgraphBaseTest, public CPUTestsBase {
|
||||||
|
public:
|
||||||
|
static std::string getTestCaseName(const testing::TestParamInfo<maxPoolV8LayerCpuTestParamsSet>& obj) {
|
||||||
|
LayerTestsDefinitions::maxPoolV8SpecificParams basicParamsSet;
|
||||||
|
InputShape inputShapes;
|
||||||
|
ElementType inPrc;
|
||||||
|
CPUSpecificParams cpuParams;
|
||||||
|
std::tie(basicParamsSet, inputShapes, inPrc, cpuParams) = obj.param;
|
||||||
|
|
||||||
|
std::vector<size_t> kernel, stride, dilation;
|
||||||
|
std::vector<size_t> padBegin, padEnd;
|
||||||
|
ngraph::op::PadType padType;
|
||||||
|
ngraph::op::RoundingType roundingType;
|
||||||
|
std::tie(kernel, stride, dilation, padBegin, padEnd, roundingType, padType) = basicParamsSet;
|
||||||
|
|
||||||
|
std::ostringstream results;
|
||||||
|
results << "IS=(";
|
||||||
|
results << CommonTestUtils::partialShape2str({inputShapes.first}) << ")_";
|
||||||
|
results << "TS=";
|
||||||
|
for (const auto& shape : inputShapes.second) {
|
||||||
|
results << CommonTestUtils::vec2str(shape) << "_";
|
||||||
|
}
|
||||||
|
results << "Prc=" << inPrc << "_";
|
||||||
|
results << "MaxPool_";
|
||||||
|
results << "K" << CommonTestUtils::vec2str(kernel) << "_";
|
||||||
|
results << "S" << CommonTestUtils::vec2str(stride) << "_";
|
||||||
|
results << "D" << CommonTestUtils::vec2str(dilation) << "_";
|
||||||
|
results << "PB" << CommonTestUtils::vec2str(padBegin) << "_";
|
||||||
|
results << "PE" << CommonTestUtils::vec2str(padEnd) << "_";
|
||||||
|
results << "Rounding=" << roundingType << "_";
|
||||||
|
results << "AutoPad=" << padType << "_";
|
||||||
|
|
||||||
|
results << CPUTestsBase::getTestCaseName(cpuParams);
|
||||||
|
return results.str();
|
||||||
|
}
|
||||||
|
|
||||||
|
protected:
|
||||||
|
void SetUp() override {
|
||||||
|
targetDevice = CommonTestUtils::DEVICE_CPU;
|
||||||
|
|
||||||
|
LayerTestsDefinitions::maxPoolV8SpecificParams basicParamsSet;
|
||||||
|
InputShape inputShapes;
|
||||||
|
ElementType inPrc;
|
||||||
|
CPUSpecificParams cpuParams;
|
||||||
|
std::tie(basicParamsSet, inputShapes, inPrc, cpuParams) = this->GetParam();
|
||||||
|
|
||||||
|
std::vector<size_t> kernel, stride, dilation;
|
||||||
|
std::vector<size_t> padBegin, padEnd;
|
||||||
|
ngraph::op::PadType padType;
|
||||||
|
ngraph::op::RoundingType roundingType;
|
||||||
|
std::tie(kernel, stride, dilation, padBegin, padEnd, roundingType, padType) = basicParamsSet;
|
||||||
|
std::tie(inFmts, outFmts, priority, selectedType) = cpuParams;
|
||||||
|
if (selectedType.empty()) {
|
||||||
|
selectedType = getPrimitiveType();
|
||||||
|
}
|
||||||
|
selectedType = makeSelectedTypeStr(selectedType, inPrc);
|
||||||
|
|
||||||
|
init_input_shapes({inputShapes});
|
||||||
|
|
||||||
|
auto params = ngraph::builder::makeDynamicParams(inPrc, inputDynamicShapes);
|
||||||
|
std::shared_ptr<ngraph::Node> pooling = ngraph::builder::makeMaxPoolingV8(params[0], stride, dilation, padBegin, padEnd,
|
||||||
|
kernel, roundingType, padType);
|
||||||
|
pooling->get_rt_info() = getCPUInfo();
|
||||||
|
ngraph::ResultVector results{std::make_shared<ngraph::opset3::Result>(pooling->output(0))};
|
||||||
|
function = std::make_shared<ngraph::Function>(results, params, "MaxPooling");
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
TEST_P(PoolingLayerCPUTest, CompareWithRefs) {
|
TEST_P(PoolingLayerCPUTest, CompareWithRefs) {
|
||||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||||
|
|
||||||
run();
|
run();
|
||||||
// CheckPluginRelatedResults(executableNetwork, "Pooling");
|
CheckPluginRelatedResults(executableNetwork, "Pooling");
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_P(MaxPoolingV8LayerCPUTest, CompareWithRefs) {
|
||||||
|
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||||
|
|
||||||
|
run();
|
||||||
|
CheckPluginRelatedResults(executableNetwork, "Pooling");
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
@ -291,6 +373,20 @@ const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsMax4D = {
|
|||||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false },
|
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const std::vector<LayerTestsDefinitions::maxPoolV8SpecificParams> paramsMaxV84D = {
|
||||||
|
LayerTestsDefinitions::maxPoolV8SpecificParams{ {2, 2}, {2, 2}, {1, 1}, {0, 0}, {0, 0},
|
||||||
|
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER },
|
||||||
|
};
|
||||||
|
|
||||||
|
const std::vector<LayerTestsDefinitions::maxPoolV8SpecificParams> paramsMaxV84D_ref = {
|
||||||
|
LayerTestsDefinitions::maxPoolV8SpecificParams{ {2, 2}, {2, 2}, {2, 2}, {0, 0}, {0, 0},
|
||||||
|
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER },
|
||||||
|
LayerTestsDefinitions::maxPoolV8SpecificParams{ {4, 2}, {2, 2}, {1, 2}, {0, 0}, {0, 0},
|
||||||
|
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT },
|
||||||
|
LayerTestsDefinitions::maxPoolV8SpecificParams{ {4, 2}, {2, 1}, {2, 2}, {0, 0}, {0, 0},
|
||||||
|
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT },
|
||||||
|
};
|
||||||
|
|
||||||
const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsAvg4D = {
|
const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsAvg4D = {
|
||||||
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0},
|
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0},
|
||||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, true },
|
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, true },
|
||||||
@ -321,6 +417,22 @@ INSTANTIATE_TEST_SUITE_P(smoke_MaxPool_CPU_4D, PoolingLayerCPUTest,
|
|||||||
::testing::Values(emptyFusingSpec)),
|
::testing::Values(emptyFusingSpec)),
|
||||||
PoolingLayerCPUTest::getTestCaseName);
|
PoolingLayerCPUTest::getTestCaseName);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(smoke_MaxPoolV8_CPU_4D, MaxPoolingV8LayerCPUTest,
|
||||||
|
::testing::Combine(
|
||||||
|
::testing::ValuesIn(paramsMaxV84D),
|
||||||
|
::testing::ValuesIn(inputShapes4D),
|
||||||
|
::testing::ValuesIn(inpOutPrecision),
|
||||||
|
::testing::ValuesIn(filterCPUInfoForDevice(vecCpuConfigs))),
|
||||||
|
MaxPoolingV8LayerCPUTest::getTestCaseName);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(smoke_MaxPoolV8_CPU_4D_ref, MaxPoolingV8LayerCPUTest,
|
||||||
|
::testing::Combine(
|
||||||
|
::testing::ValuesIn(paramsMaxV84D_ref),
|
||||||
|
::testing::ValuesIn(inputShapes4D),
|
||||||
|
::testing::ValuesIn(inpOutPrecision),
|
||||||
|
::testing::Values(ref)),
|
||||||
|
MaxPoolingV8LayerCPUTest::getTestCaseName);
|
||||||
|
|
||||||
INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_4D, PoolingLayerCPUTest,
|
INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_4D, PoolingLayerCPUTest,
|
||||||
::testing::Combine(
|
::testing::Combine(
|
||||||
::testing::ValuesIn(paramsAvg4D),
|
::testing::ValuesIn(paramsAvg4D),
|
||||||
@ -349,10 +461,24 @@ const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsMax5D = {
|
|||||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, false },
|
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, false },
|
||||||
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1},
|
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1},
|
||||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false },
|
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false },
|
||||||
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 3, 4}, {2, 2, 2}, {1, 1, 1}, {1, 2, 3},
|
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {3, 3, 3}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1},
|
||||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false },
|
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const std::vector<LayerTestsDefinitions::maxPoolV8SpecificParams> paramsMaxV85D = {
|
||||||
|
LayerTestsDefinitions::maxPoolV8SpecificParams{ {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0},
|
||||||
|
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER },
|
||||||
|
};
|
||||||
|
|
||||||
|
const std::vector<LayerTestsDefinitions::maxPoolV8SpecificParams> paramsMaxV85D_ref = {
|
||||||
|
LayerTestsDefinitions::maxPoolV8SpecificParams{ {2, 2, 2}, {1, 1, 1}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0},
|
||||||
|
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER },
|
||||||
|
LayerTestsDefinitions::maxPoolV8SpecificParams{ {2, 2, 2}, {1, 1, 1}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1},
|
||||||
|
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT },
|
||||||
|
LayerTestsDefinitions::maxPoolV8SpecificParams{ {2, 3, 4}, {2, 2, 2}, {2, 1, 1}, {1, 1, 1}, {1, 2, 2},
|
||||||
|
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT },
|
||||||
|
};
|
||||||
|
|
||||||
const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsAvg5D = {
|
const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsAvg5D = {
|
||||||
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0},
|
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0},
|
||||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, true },
|
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, true },
|
||||||
@ -366,7 +492,7 @@ const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsAvg5D = {
|
|||||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true },
|
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true },
|
||||||
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {3, 3, 3}, {3, 3, 3}, {1, 1, 1}, {0, 0, 0},
|
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {3, 3, 3}, {3, 3, 3}, {1, 1, 1}, {0, 0, 0},
|
||||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true },
|
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true },
|
||||||
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {4, 4, 4}, {4, 4, 4}, {2, 2, 2}, {2, 2, 2},
|
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {4, 4, 4}, {2, 2, 2}, {2, 2, 2}, {2, 2, 2},
|
||||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true },
|
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true },
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -385,6 +511,22 @@ INSTANTIATE_TEST_SUITE_P(smoke_MaxPool_CPU_5D, PoolingLayerCPUTest,
|
|||||||
::testing::Values(emptyFusingSpec)),
|
::testing::Values(emptyFusingSpec)),
|
||||||
PoolingLayerCPUTest::getTestCaseName);
|
PoolingLayerCPUTest::getTestCaseName);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(smoke_MaxPoolV8_CPU_5D, MaxPoolingV8LayerCPUTest,
|
||||||
|
::testing::Combine(
|
||||||
|
::testing::ValuesIn(paramsMaxV85D),
|
||||||
|
::testing::ValuesIn(inputShapes5D),
|
||||||
|
::testing::ValuesIn(inpOutPrecision),
|
||||||
|
::testing::ValuesIn(filterCPUInfoForDevice(vecCpuConfigs))),
|
||||||
|
MaxPoolingV8LayerCPUTest::getTestCaseName);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(smoke_MaxPoolV8_CPU_5D_ref, MaxPoolingV8LayerCPUTest,
|
||||||
|
::testing::Combine(
|
||||||
|
::testing::ValuesIn(paramsMaxV85D_ref),
|
||||||
|
::testing::ValuesIn(inputShapes5D),
|
||||||
|
::testing::ValuesIn(inpOutPrecision),
|
||||||
|
::testing::Values(ref)),
|
||||||
|
MaxPoolingV8LayerCPUTest::getTestCaseName);
|
||||||
|
|
||||||
INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_5D, PoolingLayerCPUTest,
|
INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_5D, PoolingLayerCPUTest,
|
||||||
::testing::Combine(
|
::testing::Combine(
|
||||||
::testing::ValuesIn(paramsAvg5D),
|
::testing::ValuesIn(paramsAvg5D),
|
||||||
|
@ -258,6 +258,12 @@ const std::vector<ngraph::helpers::ReductionType> reductionTypes = {
|
|||||||
ngraph::helpers::ReductionType::L2,
|
ngraph::helpers::ReductionType::L2,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const std::vector<ngraph::helpers::ReductionType> reductionTypesFusing = {
|
||||||
|
ngraph::helpers::ReductionType::Mean,
|
||||||
|
ngraph::helpers::ReductionType::Max,
|
||||||
|
ngraph::helpers::ReductionType::L2,
|
||||||
|
};
|
||||||
|
|
||||||
const std::vector<ngraph::helpers::ReductionType> reductionLogicalTypes = {
|
const std::vector<ngraph::helpers::ReductionType> reductionLogicalTypes = {
|
||||||
ngraph::helpers::ReductionType::LogicalOr,
|
ngraph::helpers::ReductionType::LogicalOr,
|
||||||
ngraph::helpers::ReductionType::LogicalAnd
|
ngraph::helpers::ReductionType::LogicalAnd
|
||||||
@ -315,13 +321,9 @@ std::vector<CPUSpecificParams> cpuParams_HybridLayout_5D = {
|
|||||||
|
|
||||||
const std::vector<fusingSpecificParams> fusingParamsSet {
|
const std::vector<fusingSpecificParams> fusingParamsSet {
|
||||||
/* activations */
|
/* activations */
|
||||||
fusingRelu,
|
|
||||||
fusingElu,
|
|
||||||
fusingTanh,
|
|
||||||
fusingSwish,
|
fusingSwish,
|
||||||
|
|
||||||
/* FQ */
|
/* FQ */
|
||||||
fusingFakeQuantizePerChannel,
|
|
||||||
fusingFakeQuantizePerChannelRelu,
|
fusingFakeQuantizePerChannelRelu,
|
||||||
fusingFakeQuantizePerTensorRelu,
|
fusingFakeQuantizePerTensorRelu,
|
||||||
/* another patterns */
|
/* another patterns */
|
||||||
@ -576,7 +578,7 @@ const auto params_OneAxis_fusing = testing::Combine(
|
|||||||
testing::ValuesIn(axes),
|
testing::ValuesIn(axes),
|
||||||
testing::ValuesIn(opTypes),
|
testing::ValuesIn(opTypes),
|
||||||
testing::Values(true),
|
testing::Values(true),
|
||||||
testing::ValuesIn(reductionTypes),
|
testing::ValuesIn(reductionTypesFusing),
|
||||||
testing::ValuesIn(inpOutPrc),
|
testing::ValuesIn(inpOutPrc),
|
||||||
testing::Values(ElementType::undefined),
|
testing::Values(ElementType::undefined),
|
||||||
testing::Values(ElementType::undefined),
|
testing::Values(ElementType::undefined),
|
||||||
@ -589,7 +591,7 @@ const auto params_MultiAxis_4D_fusing = testing::Combine(
|
|||||||
testing::ValuesIn(axesND),
|
testing::ValuesIn(axesND),
|
||||||
testing::Values(CommonTestUtils::OpType::VECTOR),
|
testing::Values(CommonTestUtils::OpType::VECTOR),
|
||||||
testing::Values(true),
|
testing::Values(true),
|
||||||
testing::ValuesIn(reductionTypes),
|
testing::ValuesIn(reductionTypesFusing),
|
||||||
testing::ValuesIn(inpOutPrc),
|
testing::ValuesIn(inpOutPrc),
|
||||||
testing::Values(ElementType::undefined),
|
testing::Values(ElementType::undefined),
|
||||||
testing::Values(ElementType::undefined),
|
testing::Values(ElementType::undefined),
|
||||||
@ -602,7 +604,7 @@ const auto params_MultiAxis_5D_fusing = testing::Combine(
|
|||||||
testing::ValuesIn(axes5D),
|
testing::ValuesIn(axes5D),
|
||||||
testing::Values(CommonTestUtils::OpType::VECTOR),
|
testing::Values(CommonTestUtils::OpType::VECTOR),
|
||||||
testing::Values(true),
|
testing::Values(true),
|
||||||
testing::ValuesIn(reductionTypes),
|
testing::ValuesIn(reductionTypesFusing),
|
||||||
testing::ValuesIn(inpOutPrc),
|
testing::ValuesIn(inpOutPrc),
|
||||||
testing::Values(ElementType::undefined),
|
testing::Values(ElementType::undefined),
|
||||||
testing::Values(ElementType::undefined),
|
testing::Values(ElementType::undefined),
|
||||||
@ -637,7 +639,7 @@ const auto params_OneAxis_fusing_KeepNoDims = testing::Combine(
|
|||||||
testing::ValuesIn(axes),
|
testing::ValuesIn(axes),
|
||||||
testing::ValuesIn(opTypes),
|
testing::ValuesIn(opTypes),
|
||||||
testing::Values(false),
|
testing::Values(false),
|
||||||
testing::ValuesIn(reductionTypes),
|
testing::ValuesIn(reductionTypesFusing),
|
||||||
testing::ValuesIn(inpOutPrc),
|
testing::ValuesIn(inpOutPrc),
|
||||||
testing::Values(ElementType::undefined),
|
testing::Values(ElementType::undefined),
|
||||||
testing::Values(ElementType::undefined),
|
testing::Values(ElementType::undefined),
|
||||||
@ -650,7 +652,7 @@ const auto params_MultiAxis_4D_Hybrid_fusing_KeepNoDims = testing::Combine(
|
|||||||
testing::ValuesIn(axesNDFusing),
|
testing::ValuesIn(axesNDFusing),
|
||||||
testing::Values(CommonTestUtils::OpType::VECTOR),
|
testing::Values(CommonTestUtils::OpType::VECTOR),
|
||||||
testing::Values(false),
|
testing::Values(false),
|
||||||
testing::ValuesIn(reductionTypes),
|
testing::ValuesIn(reductionTypesFusing),
|
||||||
testing::ValuesIn(inpOutPrc),
|
testing::ValuesIn(inpOutPrc),
|
||||||
testing::Values(ElementType::undefined),
|
testing::Values(ElementType::undefined),
|
||||||
testing::Values(ElementType::undefined),
|
testing::Values(ElementType::undefined),
|
||||||
@ -663,7 +665,7 @@ const auto params_MultiAxis_5D_Hybrid_fusing_KeepNoDims = testing::Combine(
|
|||||||
testing::ValuesIn(axes5DFusing),
|
testing::ValuesIn(axes5DFusing),
|
||||||
testing::Values(CommonTestUtils::OpType::VECTOR),
|
testing::Values(CommonTestUtils::OpType::VECTOR),
|
||||||
testing::Values(false),
|
testing::Values(false),
|
||||||
testing::ValuesIn(reductionTypes),
|
testing::ValuesIn(reductionTypesFusing),
|
||||||
testing::ValuesIn(inpOutPrc),
|
testing::ValuesIn(inpOutPrc),
|
||||||
testing::Values(ElementType::undefined),
|
testing::Values(ElementType::undefined),
|
||||||
testing::Values(ElementType::undefined),
|
testing::Values(ElementType::undefined),
|
||||||
|
@ -265,18 +265,18 @@ CPUTestsBase::makeCPUInfo(std::vector<cpu_memory_format_t> inFmts, std::vector<c
|
|||||||
CPUInfo cpuInfo;
|
CPUInfo cpuInfo;
|
||||||
|
|
||||||
if (!inFmts.empty()) {
|
if (!inFmts.empty()) {
|
||||||
cpuInfo.insert({std::string(ngraph::MLKDNNInputMemoryFormatsAttr),
|
cpuInfo.insert({ngraph::MKLDNNInputMemoryFormats::get_type_info_static(),
|
||||||
std::make_shared<ngraph::MLKDNNInputMemoryFormats>(fmts2str(inFmts, "cpu:"))});
|
ngraph::MKLDNNInputMemoryFormats(fmts2str(inFmts, "cpu:"))});
|
||||||
}
|
}
|
||||||
if (!outFmts.empty()) {
|
if (!outFmts.empty()) {
|
||||||
cpuInfo.insert({std::string(ngraph::MLKDNNOutputMemoryFormatsAttr),
|
cpuInfo.insert({ngraph::MKLDNNOutputMemoryFormats::get_type_info_static(),
|
||||||
std::make_shared<ngraph::MLKDNNOutputMemoryFormats>(fmts2str(outFmts, "cpu:"))});
|
ngraph::MKLDNNOutputMemoryFormats(fmts2str(outFmts, "cpu:"))});
|
||||||
}
|
}
|
||||||
if (!priority.empty()) {
|
if (!priority.empty()) {
|
||||||
cpuInfo.insert({"PrimitivesPriority", std::make_shared<ngraph::VariantWrapper<std::string>>(impls2str(priority))});
|
cpuInfo.insert({"PrimitivesPriority", std::make_shared<ngraph::VariantWrapper<std::string>>(impls2str(priority))});
|
||||||
}
|
}
|
||||||
|
|
||||||
cpuInfo.insert({"enforceBF16evenForGraphTail", ov::make_variant<int64_t>(true)});
|
cpuInfo.insert({"enforceBF16evenForGraphTail", ov::make_runtime_attribute<int64_t>(true)});
|
||||||
|
|
||||||
return cpuInfo;
|
return cpuInfo;
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,37 @@
|
|||||||
|
// Copyright (C) 2021 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
#include <vector>
|
||||||
|
#include "single_layer_tests/experimental_detectron_roifeatureextractor.hpp"
|
||||||
|
|
||||||
|
using namespace ov::test;
|
||||||
|
using namespace ov::test::subgraph;
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
const std::vector<int64_t> outputSize = {7, 14};
|
||||||
|
const std::vector<int64_t> samplingRatio = {1, 2, 3};
|
||||||
|
|
||||||
|
const std::vector<std::vector<int64_t>> pyramidScales = {
|
||||||
|
{8, 16, 32, 64},
|
||||||
|
{4, 8, 16, 32},
|
||||||
|
{2, 4, 8, 16}
|
||||||
|
};
|
||||||
|
|
||||||
|
const std::vector<std::vector<InputShape>> staticInputShape = {
|
||||||
|
static_shapes_to_test_representation({{1000, 4}, {1, 8, 200, 336}, {1, 8, 100, 168}, {1, 8, 50, 84}, {1, 8, 25, 42}}),
|
||||||
|
static_shapes_to_test_representation({{1000, 4}, {1, 16, 200, 336}, {1, 16, 100, 168}, {1, 16, 50, 84}, {1, 16, 25, 42}}),
|
||||||
|
static_shapes_to_test_representation({{1200, 4}, {1, 8, 200, 42}, {1, 8, 100, 336}, {1, 8, 50, 168}, {1, 8, 25, 84}})
|
||||||
|
};
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(smoke_ExperimentalROI_static, ExperimentalDetectronROIFeatureExtractorLayerTest,
|
||||||
|
::testing::Combine(
|
||||||
|
::testing::ValuesIn(staticInputShape),
|
||||||
|
::testing::ValuesIn(outputSize),
|
||||||
|
::testing::ValuesIn(samplingRatio),
|
||||||
|
::testing::ValuesIn(pyramidScales),
|
||||||
|
::testing::Values(false),
|
||||||
|
::testing::Values(ov::element::Type_t::f32),
|
||||||
|
::testing::Values(CommonTestUtils::DEVICE_GPU)),
|
||||||
|
ExperimentalDetectronROIFeatureExtractorLayerTest::getTestCaseName);
|
||||||
|
} // namespace
|
@ -0,0 +1,62 @@
|
|||||||
|
// Copyright (C) 2018-2021 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "single_layer_tests/roi_align.hpp"
|
||||||
|
#include "common_test_utils/test_constants.hpp"
|
||||||
|
|
||||||
|
using namespace LayerTestsDefinitions;
|
||||||
|
|
||||||
|
|
||||||
|
const std::vector<InferenceEngine::Precision> netPRCs = {
|
||||||
|
InferenceEngine::Precision::FP32,
|
||||||
|
// There is no possibility to test ROIAlign in fp16 precision,
|
||||||
|
// because on edge cases where in fp32 version ROI value is
|
||||||
|
// a little bit smaller than the nearest integer value,
|
||||||
|
// it would be bigger than the nearest integer in fp16 precision.
|
||||||
|
// Such behavior leads to completely different results of ROIAlign
|
||||||
|
// in fp32 and fp16 precisions.
|
||||||
|
// In real AI applications this problem is solved by precision-aware training.
|
||||||
|
|
||||||
|
// InferenceEngine::Precision::FP16,
|
||||||
|
};
|
||||||
|
|
||||||
|
const auto ROIAlignCases_average =
|
||||||
|
::testing::Combine(
|
||||||
|
::testing::ValuesIn(
|
||||||
|
std::vector<std::vector<size_t>> {
|
||||||
|
{ 3, 8, 16, 16 },
|
||||||
|
{ 2, 1, 16, 16 },
|
||||||
|
{ 2, 1, 8, 16 }}),
|
||||||
|
::testing::Values(std::vector<size_t>{ 2, 4 }),
|
||||||
|
::testing::Values(2),
|
||||||
|
::testing::Values(2),
|
||||||
|
::testing::ValuesIn(std::vector<float> { 1, 0.625 }),
|
||||||
|
::testing::Values(2),
|
||||||
|
::testing::Values("avg"),
|
||||||
|
::testing::ValuesIn(netPRCs),
|
||||||
|
::testing::Values(CommonTestUtils::DEVICE_GPU)
|
||||||
|
);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(smoke_TestsROIAlign_average, ROIAlignLayerTest, ROIAlignCases_average, ROIAlignLayerTest::getTestCaseName);
|
||||||
|
|
||||||
|
const auto ROIAlignCases_max =
|
||||||
|
::testing::Combine(
|
||||||
|
::testing::ValuesIn(
|
||||||
|
std::vector<std::vector<size_t>> {
|
||||||
|
{ 2, 8, 20, 20 },
|
||||||
|
{ 2, 1, 20, 20 },
|
||||||
|
{ 2, 1, 10, 20 }
|
||||||
|
}),
|
||||||
|
::testing::Values(std::vector<size_t>{ 2, 4 }),
|
||||||
|
::testing::Values(2),
|
||||||
|
::testing::Values(2),
|
||||||
|
::testing::ValuesIn(std::vector<float> { 1, 0.625 }),
|
||||||
|
::testing::Values(2),
|
||||||
|
::testing::Values("max"),
|
||||||
|
::testing::ValuesIn(netPRCs),
|
||||||
|
::testing::Values(CommonTestUtils::DEVICE_GPU)
|
||||||
|
);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(smoke_TestsROIAlign_max, ROIAlignLayerTest, ROIAlignCases_max, ROIAlignLayerTest::getTestCaseName);
|
@ -0,0 +1,19 @@
|
|||||||
|
// Copyright (C) 2021 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <shared_test_classes/single_layer/experimental_detectron_detection_output.hpp>
|
||||||
|
|
||||||
|
namespace ov {
|
||||||
|
namespace test {
|
||||||
|
namespace subgraph {
|
||||||
|
|
||||||
|
TEST_P(ExperimentalDetectronDetectionOutputLayerTest, ExperimentalDetectronDetectionOutputLayerTests) {
|
||||||
|
run();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace subgraph
|
||||||
|
} // namespace test
|
||||||
|
} // namespace ov
|
@ -0,0 +1,19 @@
|
|||||||
|
// Copyright (C) 2021 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <shared_test_classes/single_layer/experimental_detectron_generate_proposals_single_image.hpp>
|
||||||
|
|
||||||
|
namespace ov {
|
||||||
|
namespace test {
|
||||||
|
namespace subgraph {
|
||||||
|
|
||||||
|
TEST_P(ExperimentalDetectronGenerateProposalsSingleImageLayerTest, ExperimentalDetectronGenerateProposalsSingleImageLayerTests) {
|
||||||
|
run();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace subgraph
|
||||||
|
} // namespace test
|
||||||
|
} // namespace ov
|
@ -0,0 +1,19 @@
|
|||||||
|
// Copyright (C) 2021 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <shared_test_classes/single_layer/experimental_detectron_prior_grid_generator.hpp>
|
||||||
|
|
||||||
|
namespace ov {
|
||||||
|
namespace test {
|
||||||
|
namespace subgraph {
|
||||||
|
|
||||||
|
TEST_P(ExperimentalDetectronPriorGridGeneratorLayerTest, ExperimentalDetectronPriorGridGeneratorLayerTests) {
|
||||||
|
run();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace subgraph
|
||||||
|
} // namespace test
|
||||||
|
} // namespace ov
|
@ -19,4 +19,9 @@ TEST_P(GlobalPoolingLayerTest, CompareWithRefs) {
|
|||||||
PluginCache::get().reset();
|
PluginCache::get().reset();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_P(MaxPoolingV8LayerTest, CompareWithRefs) {
|
||||||
|
Run();
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace LayerTestsDefinitions
|
} // namespace LayerTestsDefinitions
|
||||||
|
@ -120,7 +120,7 @@ bool LoadNetworkCacheTestBase::importExportSupported(InferenceEngine::Core& ie)
|
|||||||
std::vector<std::string> supportedMetricKeys = ie.GetMetric(targetDevice, METRIC_KEY(SUPPORTED_METRICS));
|
std::vector<std::string> supportedMetricKeys = ie.GetMetric(targetDevice, METRIC_KEY(SUPPORTED_METRICS));
|
||||||
auto it = std::find(supportedMetricKeys.begin(), supportedMetricKeys.end(),
|
auto it = std::find(supportedMetricKeys.begin(), supportedMetricKeys.end(),
|
||||||
METRIC_KEY(IMPORT_EXPORT_SUPPORT));
|
METRIC_KEY(IMPORT_EXPORT_SUPPORT));
|
||||||
bool supported = (it != supportedMetricKeys.end()) &&
|
auto supported = (it != supportedMetricKeys.end()) &&
|
||||||
ie.GetMetric(targetDevice, METRIC_KEY(IMPORT_EXPORT_SUPPORT)).as<bool>();
|
ie.GetMetric(targetDevice, METRIC_KEY(IMPORT_EXPORT_SUPPORT)).as<bool>();
|
||||||
return supported;
|
return supported;
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,39 @@
|
|||||||
|
// Copyright (C) 2021 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
#include "ngraph_functions/utils/ngraph_helpers.hpp"
|
||||||
|
#include "common_test_utils/common_utils.hpp"
|
||||||
|
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||||
|
|
||||||
|
namespace ov {
|
||||||
|
namespace test {
|
||||||
|
namespace subgraph {
|
||||||
|
|
||||||
|
typedef std::tuple<
|
||||||
|
std::vector<InputShape>, // inputShapes
|
||||||
|
float, // score_threshold
|
||||||
|
float, // nms_threshol
|
||||||
|
float, // max_delta_log_wh
|
||||||
|
int64_t, // num_classes
|
||||||
|
int64_t, // post_nms_count
|
||||||
|
size_t, // max_detections_per_image
|
||||||
|
bool, // class_agnostic_box_regression
|
||||||
|
std::vector<float>, // deltas_weights
|
||||||
|
ElementType, // Network precision
|
||||||
|
std::string // Device name
|
||||||
|
> ExperimentalDetectronDetectionOutputTestParams;
|
||||||
|
|
||||||
|
class ExperimentalDetectronDetectionOutputLayerTest :
|
||||||
|
public testing::WithParamInterface<ExperimentalDetectronDetectionOutputTestParams>,
|
||||||
|
virtual public SubgraphBaseTest {
|
||||||
|
protected:
|
||||||
|
void SetUp() override;
|
||||||
|
void generate_inputs(const std::vector<ngraph::Shape>& targetInputStaticShapes) override;
|
||||||
|
|
||||||
|
public:
|
||||||
|
static std::string getTestCaseName(const testing::TestParamInfo<ExperimentalDetectronDetectionOutputTestParams>& obj);
|
||||||
|
};
|
||||||
|
} // namespace subgraph
|
||||||
|
} // namespace test
|
||||||
|
} // namespace ov
|
@ -0,0 +1,36 @@
|
|||||||
|
// Copyright (C) 2021 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
#include "ngraph_functions/utils/ngraph_helpers.hpp"
|
||||||
|
#include "common_test_utils/common_utils.hpp"
|
||||||
|
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||||
|
|
||||||
|
namespace ov {
|
||||||
|
namespace test {
|
||||||
|
namespace subgraph {
|
||||||
|
|
||||||
|
typedef std::tuple<
|
||||||
|
std::vector<InputShape>, // Input shapes
|
||||||
|
float, // min_size: minimum box width & height
|
||||||
|
float, // nms_threshold: specifies NMS threshold
|
||||||
|
int64_t, // post_nms_count: number of top-n proposals after NMS
|
||||||
|
int64_t, // pre_nms_count: number of top-n proposals after NMS
|
||||||
|
std::pair<std::string, std::vector<ov::runtime::Tensor>>, // input tensors
|
||||||
|
ElementType, // Network precision
|
||||||
|
std::string // Device name>;
|
||||||
|
> ExperimentalDetectronGenerateProposalsSingleImageTestParams;
|
||||||
|
|
||||||
|
class ExperimentalDetectronGenerateProposalsSingleImageLayerTest :
|
||||||
|
public testing::WithParamInterface<ExperimentalDetectronGenerateProposalsSingleImageTestParams>,
|
||||||
|
virtual public SubgraphBaseTest {
|
||||||
|
protected:
|
||||||
|
void SetUp() override;
|
||||||
|
void generate_inputs(const std::vector<ngraph::Shape>& targetInputStaticShapes) override;
|
||||||
|
|
||||||
|
public:
|
||||||
|
static std::string getTestCaseName(const testing::TestParamInfo<ExperimentalDetectronGenerateProposalsSingleImageTestParams>& obj);
|
||||||
|
};
|
||||||
|
} // namespace subgraph
|
||||||
|
} // namespace test
|
||||||
|
} // namespace ov
|
@ -0,0 +1,38 @@
|
|||||||
|
// Copyright (C) 2021 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
#include "ngraph_functions/utils/ngraph_helpers.hpp"
|
||||||
|
#include "common_test_utils/common_utils.hpp"
|
||||||
|
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||||
|
|
||||||
|
namespace ov {
|
||||||
|
namespace test {
|
||||||
|
namespace subgraph {
|
||||||
|
|
||||||
|
class ExperimentalDetectronPriorGridGeneratorTestParam {
|
||||||
|
public:
|
||||||
|
ov::op::v6::ExperimentalDetectronPriorGridGenerator::Attributes attributes;
|
||||||
|
std::vector<InputShape> inputShapes;
|
||||||
|
};
|
||||||
|
|
||||||
|
typedef std::tuple<
|
||||||
|
ExperimentalDetectronPriorGridGeneratorTestParam,
|
||||||
|
std::pair<std::string, std::vector<ov::runtime::Tensor>>,
|
||||||
|
ElementType, // Network precision
|
||||||
|
std::string // Device name>;
|
||||||
|
> ExperimentalDetectronPriorGridGeneratorTestParams;
|
||||||
|
|
||||||
|
class ExperimentalDetectronPriorGridGeneratorLayerTest :
|
||||||
|
public testing::WithParamInterface<ExperimentalDetectronPriorGridGeneratorTestParams>,
|
||||||
|
virtual public SubgraphBaseTest {
|
||||||
|
protected:
|
||||||
|
void SetUp() override;
|
||||||
|
void generate_inputs(const std::vector<ngraph::Shape>& targetInputStaticShapes) override;
|
||||||
|
|
||||||
|
public:
|
||||||
|
static std::string getTestCaseName(const testing::TestParamInfo<ExperimentalDetectronPriorGridGeneratorTestParams>& obj);
|
||||||
|
};
|
||||||
|
} // namespace subgraph
|
||||||
|
} // namespace test
|
||||||
|
} // namespace ov
|
@ -48,6 +48,27 @@ typedef std::tuple<
|
|||||||
std::string // Device name
|
std::string // Device name
|
||||||
> globalPoolLayerTestParamsSet;
|
> globalPoolLayerTestParamsSet;
|
||||||
|
|
||||||
|
typedef std::tuple<
|
||||||
|
std::vector<size_t>, // Kernel size
|
||||||
|
std::vector<size_t>, // Stride
|
||||||
|
std::vector<size_t>, // Dilation
|
||||||
|
std::vector<size_t>, // Pad begin
|
||||||
|
std::vector<size_t>, // Pad end
|
||||||
|
ngraph::op::RoundingType, // Rounding type
|
||||||
|
ngraph::op::PadType // Pad type
|
||||||
|
> maxPoolV8SpecificParams;
|
||||||
|
|
||||||
|
typedef std::tuple<
|
||||||
|
maxPoolV8SpecificParams,
|
||||||
|
InferenceEngine::Precision, // Net precision
|
||||||
|
InferenceEngine::Precision, // Input precision
|
||||||
|
InferenceEngine::Precision, // Output precision
|
||||||
|
InferenceEngine::Layout, // Input layout
|
||||||
|
InferenceEngine::Layout, // Output layout
|
||||||
|
std::vector<size_t>, // Input shape
|
||||||
|
std::string // Device name
|
||||||
|
> maxPoolV8LayerTestParamsSet;
|
||||||
|
|
||||||
class PoolingLayerTest : public testing::WithParamInterface<poolLayerTestParamsSet>,
|
class PoolingLayerTest : public testing::WithParamInterface<poolLayerTestParamsSet>,
|
||||||
virtual public LayerTestsUtils::LayerTestsCommon {
|
virtual public LayerTestsUtils::LayerTestsCommon {
|
||||||
public:
|
public:
|
||||||
@ -66,4 +87,13 @@ protected:
|
|||||||
void SetUp() override;
|
void SetUp() override;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class MaxPoolingV8LayerTest : public testing::WithParamInterface<maxPoolV8LayerTestParamsSet>,
|
||||||
|
virtual public LayerTestsUtils::LayerTestsCommon {
|
||||||
|
public:
|
||||||
|
static std::string getTestCaseName(const testing::TestParamInfo<maxPoolV8LayerTestParamsSet>& obj);
|
||||||
|
|
||||||
|
protected:
|
||||||
|
void SetUp() override;
|
||||||
|
};
|
||||||
|
|
||||||
} // namespace LayerTestsDefinitions
|
} // namespace LayerTestsDefinitions
|
||||||
|
@ -0,0 +1,144 @@
|
|||||||
|
// Copyright (C) 2021 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
#include "shared_test_classes/single_layer/experimental_detectron_detection_output.hpp"
|
||||||
|
#include "ngraph_functions/builders.hpp"
|
||||||
|
#include "common_test_utils/data_utils.hpp"
|
||||||
|
#include "functional_test_utils/ov_tensor_utils.hpp"
|
||||||
|
|
||||||
|
namespace ov {
|
||||||
|
namespace test {
|
||||||
|
namespace subgraph {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
std::ostream& operator <<(std::ostream& ss, const ngraph::opset6::ExperimentalDetectronDetectionOutput::Attributes& attributes) {
|
||||||
|
ss << "score_threshold=" << attributes.score_threshold << "_";
|
||||||
|
ss << "nms_threshold=" << attributes.nms_threshold << "_";
|
||||||
|
ss << "max_delta_log_wh=" << attributes.max_delta_log_wh << "_";
|
||||||
|
ss << "num_classes=" << attributes.num_classes << "_";
|
||||||
|
ss << "post_nms_count=" << attributes.post_nms_count << "_";
|
||||||
|
ss << "max_detections_per_image=" << attributes.max_detections_per_image << "_";
|
||||||
|
ss << "class_agnostic_box_regression=" << (attributes.class_agnostic_box_regression ? "true" : "false") << "_";
|
||||||
|
ss << "deltas_weights=" << CommonTestUtils::vec2str(attributes.deltas_weights);
|
||||||
|
return ss;
|
||||||
|
}
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
std::string ExperimentalDetectronDetectionOutputLayerTest::getTestCaseName(
|
||||||
|
const testing::TestParamInfo<ExperimentalDetectronDetectionOutputTestParams>& obj) {
|
||||||
|
std::vector<ov::test::InputShape> inputShapes;
|
||||||
|
ngraph::opset6::ExperimentalDetectronDetectionOutput::Attributes attributes;
|
||||||
|
ElementType netPrecision;
|
||||||
|
std::string targetName;
|
||||||
|
std::tie(
|
||||||
|
inputShapes,
|
||||||
|
attributes.score_threshold,
|
||||||
|
attributes.nms_threshold,
|
||||||
|
attributes.max_delta_log_wh,
|
||||||
|
attributes.num_classes,
|
||||||
|
attributes.post_nms_count,
|
||||||
|
attributes.max_detections_per_image,
|
||||||
|
attributes.class_agnostic_box_regression,
|
||||||
|
attributes.deltas_weights,
|
||||||
|
netPrecision,
|
||||||
|
targetName) = obj.param;
|
||||||
|
|
||||||
|
std::ostringstream result;
|
||||||
|
|
||||||
|
using ov::test::operator<<;
|
||||||
|
result << "input_rois=" << inputShapes[0] << "_";
|
||||||
|
result << "input_deltas=" << inputShapes[1] << "_";
|
||||||
|
result << "input_scores=" << inputShapes[2] << "_";
|
||||||
|
result << "input_im_info=" << inputShapes[3] << "_";
|
||||||
|
|
||||||
|
using ov::test::subgraph::operator<<;
|
||||||
|
result << "attributes={" << attributes << "}_";
|
||||||
|
result << "netPRC=" << netPrecision << "_";
|
||||||
|
result << "trgDev=" << targetName;
|
||||||
|
return result.str();
|
||||||
|
}
|
||||||
|
|
||||||
|
void ExperimentalDetectronDetectionOutputLayerTest::SetUp() {
|
||||||
|
std::vector<InputShape> inputShapes;
|
||||||
|
ngraph::opset6::ExperimentalDetectronDetectionOutput::Attributes attributes;
|
||||||
|
|
||||||
|
ElementType netPrecision;
|
||||||
|
std::string targetName;
|
||||||
|
std::tie(
|
||||||
|
inputShapes,
|
||||||
|
attributes.score_threshold,
|
||||||
|
attributes.nms_threshold,
|
||||||
|
attributes.max_delta_log_wh,
|
||||||
|
attributes.num_classes,
|
||||||
|
attributes.post_nms_count,
|
||||||
|
attributes.max_detections_per_image,
|
||||||
|
attributes.class_agnostic_box_regression,
|
||||||
|
attributes.deltas_weights,
|
||||||
|
netPrecision,
|
||||||
|
targetName) = this->GetParam();
|
||||||
|
|
||||||
|
inType = outType = netPrecision;
|
||||||
|
targetDevice = targetName;
|
||||||
|
|
||||||
|
init_input_shapes(inputShapes);
|
||||||
|
|
||||||
|
auto params = ngraph::builder::makeDynamicParams(netPrecision, {inputDynamicShapes});
|
||||||
|
auto paramsOuts = ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(params));
|
||||||
|
auto experimentalDetectron = std::make_shared<ngraph::opset6::ExperimentalDetectronDetectionOutput>(
|
||||||
|
params[0], // input_rois
|
||||||
|
params[1], // input_deltas
|
||||||
|
params[2], // input_scores
|
||||||
|
params[3], // input_im_info
|
||||||
|
attributes);
|
||||||
|
function = std::make_shared<ov::Function>(
|
||||||
|
ov::OutputVector{experimentalDetectron->output(0), experimentalDetectron->output(1)},
|
||||||
|
"ExperimentalDetectronDetectionOutput");
|
||||||
|
}
|
||||||
|
|
||||||
|
void ExperimentalDetectronDetectionOutputLayerTest::generate_inputs(const std::vector<ngraph::Shape>& targetInputStaticShapes) {
|
||||||
|
static const std::vector<ov::runtime::Tensor> inputTensors = {
|
||||||
|
// 16 x 4 = 64
|
||||||
|
CommonTestUtils::create_tensor<float>(ov::element::f32, Shape{16, 4}, {
|
||||||
|
1.0f, 1.0f, 10.0f, 10.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
|
||||||
|
1.0f, 1.0f, 1.0f, 4.0f, 1.0f, 8.0f, 5.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
|
||||||
|
1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
|
||||||
|
1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
|
||||||
|
1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f
|
||||||
|
}),
|
||||||
|
// 16 x 8
|
||||||
|
CommonTestUtils::create_tensor<float>(ov::element::f32, Shape{16, 8}, {
|
||||||
|
5.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
|
||||||
|
1.0f, 1.0f, 1.0f, 1.0f, 4.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
|
||||||
|
1.0f, 1.0f, 8.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
|
||||||
|
1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
|
||||||
|
|
||||||
|
1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
|
||||||
|
1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
|
||||||
|
1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
|
||||||
|
1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f
|
||||||
|
}),
|
||||||
|
// 16 x 2 = 32
|
||||||
|
CommonTestUtils::create_tensor<float>(ov::element::f32, Shape{16, 2}, {
|
||||||
|
1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
|
||||||
|
1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
|
||||||
|
1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f
|
||||||
|
}),
|
||||||
|
// 1 x 3 = 3
|
||||||
|
CommonTestUtils::create_tensor<float>(ov::element::f32, Shape{1, 3}, {1.0f, 1.0f, 1.0f})
|
||||||
|
};
|
||||||
|
|
||||||
|
inputs.clear();
|
||||||
|
const auto& funcInputs = function->inputs();
|
||||||
|
for (auto i = 0ul; i < funcInputs.size(); ++i) {
|
||||||
|
if (targetInputStaticShapes[i] != inputTensors[i].get_shape()) {
|
||||||
|
throw Exception("input shape is different from tensor shape");
|
||||||
|
}
|
||||||
|
|
||||||
|
inputs.insert({funcInputs[i].get_node_shared_ptr(), inputTensors[i]});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace subgraph
|
||||||
|
} // namespace test
|
||||||
|
} // namespace ov
|
@ -0,0 +1,107 @@
|
|||||||
|
// Copyright (C) 2021 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
#include "shared_test_classes/single_layer/experimental_detectron_generate_proposals_single_image.hpp"
|
||||||
|
#include "ngraph_functions/builders.hpp"
|
||||||
|
#include "functional_test_utils/ov_tensor_utils.hpp"
|
||||||
|
|
||||||
|
namespace ov {
|
||||||
|
namespace test {
|
||||||
|
namespace subgraph {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
std::ostream& operator <<(
|
||||||
|
std::ostream& ss,
|
||||||
|
const ov::op::v6::ExperimentalDetectronGenerateProposalsSingleImage::Attributes& attributes) {
|
||||||
|
ss << "score_threshold=" << attributes.min_size << "_";
|
||||||
|
ss << "nms_threshold=" << attributes.nms_threshold << "_";
|
||||||
|
ss << "max_delta_log_wh=" << attributes.post_nms_count << "_";
|
||||||
|
ss << "num_classes=" << attributes.pre_nms_count;
|
||||||
|
return ss;
|
||||||
|
}
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
std::string ExperimentalDetectronGenerateProposalsSingleImageLayerTest::getTestCaseName(
|
||||||
|
const testing::TestParamInfo<ExperimentalDetectronGenerateProposalsSingleImageTestParams>& obj) {
|
||||||
|
std::vector<InputShape> inputShapes;
|
||||||
|
ov::op::v6::ExperimentalDetectronGenerateProposalsSingleImage::Attributes attributes;
|
||||||
|
std::pair<std::string, std::vector<ov::runtime::Tensor>> inputTensors;
|
||||||
|
ElementType netPrecision;
|
||||||
|
std::string targetName;
|
||||||
|
std::tie(
|
||||||
|
inputShapes,
|
||||||
|
attributes.min_size,
|
||||||
|
attributes.nms_threshold,
|
||||||
|
attributes.post_nms_count,
|
||||||
|
attributes.pre_nms_count,
|
||||||
|
inputTensors,
|
||||||
|
netPrecision,
|
||||||
|
targetName) = obj.param;
|
||||||
|
|
||||||
|
std::ostringstream result;
|
||||||
|
using ov::test::operator<<;
|
||||||
|
result << "im_info=" << inputShapes[0] << "_";
|
||||||
|
result << "anchors=" << inputShapes[1] << "_";
|
||||||
|
result << "deltas=" << inputShapes[2] << "_";
|
||||||
|
result << "scores=" << inputShapes[3] << "_";
|
||||||
|
|
||||||
|
using ov::test::subgraph::operator<<;
|
||||||
|
result << "attributes={" << attributes << "}_";
|
||||||
|
result << "inputTensors=" << inputTensors.first << "_";
|
||||||
|
result << "netPRC=" << netPrecision << "_";
|
||||||
|
result << "trgDev=" << targetName;
|
||||||
|
return result.str();
|
||||||
|
}
|
||||||
|
|
||||||
|
void ExperimentalDetectronGenerateProposalsSingleImageLayerTest::SetUp() {
|
||||||
|
std::vector<InputShape> inputShapes;
|
||||||
|
ov::op::v6::ExperimentalDetectronGenerateProposalsSingleImage::Attributes attributes;
|
||||||
|
std::pair<std::string, std::vector<ov::runtime::Tensor>> inputTensors;
|
||||||
|
ElementType netPrecision;
|
||||||
|
std::string targetName;
|
||||||
|
std::tie(
|
||||||
|
inputShapes,
|
||||||
|
attributes.min_size,
|
||||||
|
attributes.nms_threshold,
|
||||||
|
attributes.post_nms_count,
|
||||||
|
attributes.pre_nms_count,
|
||||||
|
inputTensors,
|
||||||
|
netPrecision,
|
||||||
|
targetName) = this->GetParam();
|
||||||
|
|
||||||
|
inType = outType = netPrecision;
|
||||||
|
targetDevice = targetName;
|
||||||
|
|
||||||
|
init_input_shapes(inputShapes);
|
||||||
|
|
||||||
|
auto params = ngraph::builder::makeDynamicParams(netPrecision, {inputDynamicShapes});
|
||||||
|
auto paramsOuts = ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(params));
|
||||||
|
auto experimentalDetectron = std::make_shared<ov::op::v6::ExperimentalDetectronGenerateProposalsSingleImage>(
|
||||||
|
params[0], // im_info
|
||||||
|
params[1], // anchors
|
||||||
|
params[2], // deltas
|
||||||
|
params[3], // scores
|
||||||
|
attributes);
|
||||||
|
function = std::make_shared<ov::Function>(
|
||||||
|
ov::OutputVector{experimentalDetectron->output(0), experimentalDetectron->output(1)},
|
||||||
|
"ExperimentalDetectronGenerateProposalsSingleImage");
|
||||||
|
}
|
||||||
|
|
||||||
|
void ExperimentalDetectronGenerateProposalsSingleImageLayerTest::generate_inputs(const std::vector<ngraph::Shape>& targetInputStaticShapes) {
|
||||||
|
auto inputTensors = std::get<5>(GetParam());
|
||||||
|
|
||||||
|
inputs.clear();
|
||||||
|
const auto& funcInputs = function->inputs();
|
||||||
|
for (auto i = 0ul; i < funcInputs.size(); ++i) {
|
||||||
|
if (targetInputStaticShapes[i] != inputTensors.second[i].get_shape()) {
|
||||||
|
throw Exception("input shape is different from tensor shape");
|
||||||
|
}
|
||||||
|
|
||||||
|
inputs.insert({funcInputs[i].get_node_shared_ptr(), inputTensors.second[i]});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace subgraph
|
||||||
|
} // namespace test
|
||||||
|
} // namespace ov
|
@ -0,0 +1,100 @@
|
|||||||
|
// Copyright (C) 2021 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
#include "shared_test_classes/single_layer/experimental_detectron_prior_grid_generator.hpp"
|
||||||
|
#include "ngraph_functions/builders.hpp"
|
||||||
|
#include "common_test_utils/data_utils.hpp"
|
||||||
|
#include "functional_test_utils/ov_tensor_utils.hpp"
|
||||||
|
|
||||||
|
namespace ov {
|
||||||
|
namespace test {
|
||||||
|
namespace subgraph {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
std::ostream& operator <<(
|
||||||
|
std::ostream& ss,
|
||||||
|
const ov::op::v6::ExperimentalDetectronPriorGridGenerator::Attributes& attributes) {
|
||||||
|
ss << "flatten=" << attributes.flatten << "_";
|
||||||
|
ss << "h=" << attributes.h << "_";
|
||||||
|
ss << "w=" << attributes.w << "_";
|
||||||
|
ss << "stride_x=" << attributes.stride_x << "_";
|
||||||
|
ss << "stride_y=" << attributes.stride_y;
|
||||||
|
return ss;
|
||||||
|
}
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
std::string ExperimentalDetectronPriorGridGeneratorLayerTest::getTestCaseName(
|
||||||
|
const testing::TestParamInfo<ExperimentalDetectronPriorGridGeneratorTestParams>& obj) {
|
||||||
|
ExperimentalDetectronPriorGridGeneratorTestParam param;
|
||||||
|
std::pair<std::string, std::vector<ov::runtime::Tensor>> inputTensors;
|
||||||
|
ElementType netPrecision;
|
||||||
|
std::string targetName;
|
||||||
|
std::tie(param, inputTensors, netPrecision, targetName) = obj.param;
|
||||||
|
|
||||||
|
std::ostringstream result;
|
||||||
|
using ov::test::operator<<;
|
||||||
|
result << "priors=" << param.inputShapes[0] << "_";
|
||||||
|
result << "feature_map=" << param.inputShapes[1] << "_";
|
||||||
|
result << "im_data=" << param.inputShapes[2] << "_";
|
||||||
|
|
||||||
|
using ov::test::subgraph::operator<<;
|
||||||
|
result << "attributes=" << param.attributes << "_";
|
||||||
|
result << "priorValues=" << inputTensors.first << "_";
|
||||||
|
result << "netPRC=" << netPrecision << "_";
|
||||||
|
result << "trgDev=" << targetName;
|
||||||
|
return result.str();
|
||||||
|
}
|
||||||
|
|
||||||
|
void ExperimentalDetectronPriorGridGeneratorLayerTest::SetUp() {
|
||||||
|
ExperimentalDetectronPriorGridGeneratorTestParam param;
|
||||||
|
std::pair<std::string, std::vector<ov::runtime::Tensor>> inputTensors;
|
||||||
|
ElementType netPrecision;
|
||||||
|
std::string targetName;
|
||||||
|
std::tie(param, inputTensors, netPrecision, targetName) = this->GetParam();
|
||||||
|
|
||||||
|
inType = outType = netPrecision;
|
||||||
|
targetDevice = targetName;
|
||||||
|
|
||||||
|
init_input_shapes(param.inputShapes);
|
||||||
|
|
||||||
|
auto params = ngraph::builder::makeDynamicParams(netPrecision, {inputDynamicShapes});
|
||||||
|
auto paramsOuts = ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(params));
|
||||||
|
auto experimentalDetectron = std::make_shared<op::v6::ExperimentalDetectronPriorGridGenerator>(
|
||||||
|
params[0], // priors
|
||||||
|
params[1], // feature_map
|
||||||
|
params[2], // im_data
|
||||||
|
param.attributes);
|
||||||
|
function = std::make_shared<ov::Function>(
|
||||||
|
ov::OutputVector{experimentalDetectron->output(0)},
|
||||||
|
"ExperimentalDetectronPriorGridGenerator");
|
||||||
|
}
|
||||||
|
|
||||||
|
void ExperimentalDetectronPriorGridGeneratorLayerTest::generate_inputs(const std::vector<ngraph::Shape>& targetInputStaticShapes) {
|
||||||
|
auto inputTensors = std::get<1>(GetParam());
|
||||||
|
|
||||||
|
inputs.clear();
|
||||||
|
const auto& funcInputs = function->inputs();
|
||||||
|
|
||||||
|
auto i = 0ul;
|
||||||
|
for (; i < inputTensors.second.size(); ++i) {
|
||||||
|
if (targetInputStaticShapes[i] != inputTensors.second[i].get_shape()) {
|
||||||
|
throw Exception("input shape is different from tensor shape");
|
||||||
|
}
|
||||||
|
|
||||||
|
inputs.insert({funcInputs[i].get_node_shared_ptr(), inputTensors.second[i]});
|
||||||
|
}
|
||||||
|
|
||||||
|
for (auto j = i; j < funcInputs.size(); ++j) {
|
||||||
|
ov::runtime::Tensor inputTensor = CommonTestUtils::create_tensor<float>(
|
||||||
|
ov::element::f32,
|
||||||
|
targetInputStaticShapes[j],
|
||||||
|
std::vector<float>(0.f, shape_size(targetInputStaticShapes[j])));
|
||||||
|
|
||||||
|
inputs.insert({funcInputs[j].get_node_shared_ptr(), inputTensor});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace subgraph
|
||||||
|
} // namespace test
|
||||||
|
} // namespace ov
|
@ -46,9 +46,9 @@ namespace LayerTestsDefinitions {
|
|||||||
}
|
}
|
||||||
|
|
||||||
auto hostTensor = std::make_shared<HostTensor>(ngPrc, inputShape);
|
auto hostTensor = std::make_shared<HostTensor>(ngPrc, inputShape);
|
||||||
auto variable_context = std::make_shared<VariantWrapper<VariableContext>>(VariableContext());
|
auto variable_context = VariableContext();
|
||||||
auto variable_value = std::make_shared<VariableValue>(hostTensor);
|
auto variable_value = std::make_shared<VariableValue>(hostTensor);
|
||||||
variable_context->get().set_variable_value(function->get_variable_by_id("v0"), variable_value);
|
variable_context.set_variable_value(function->get_variable_by_id("v0"), variable_value);
|
||||||
eval_context["VariableContext"] = variable_context;
|
eval_context["VariableContext"] = variable_context;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -94,6 +94,38 @@ std::string GlobalPoolingLayerTest::getTestCaseName(const testing::TestParamInfo
|
|||||||
return result.str();
|
return result.str();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::string MaxPoolingV8LayerTest::getTestCaseName(const testing::TestParamInfo<maxPoolV8LayerTestParamsSet>& obj) {
|
||||||
|
maxPoolV8SpecificParams poolParams;
|
||||||
|
InferenceEngine::Precision netPrecision;
|
||||||
|
InferenceEngine::Precision inPrc, outPrc;
|
||||||
|
InferenceEngine::Layout inLayout, outLayout;
|
||||||
|
std::vector<size_t> inputShapes;
|
||||||
|
std::string targetDevice;
|
||||||
|
std::tie(poolParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShapes, targetDevice) = obj.param;
|
||||||
|
std::vector<size_t> kernel, stride, dilation;
|
||||||
|
std::vector<size_t> padBegin, padEnd;
|
||||||
|
ngraph::op::PadType padType;
|
||||||
|
ngraph::op::RoundingType roundingType;
|
||||||
|
std::tie(kernel, stride, dilation, padBegin, padEnd, roundingType, padType) = poolParams;
|
||||||
|
|
||||||
|
std::ostringstream result;
|
||||||
|
result << "IS=" << CommonTestUtils::vec2str(inputShapes) << "_";
|
||||||
|
result << "K" << CommonTestUtils::vec2str(kernel) << "_";
|
||||||
|
result << "S" << CommonTestUtils::vec2str(stride) << "_";
|
||||||
|
result << "D" << CommonTestUtils::vec2str(dilation) << "_";
|
||||||
|
result << "PB" << CommonTestUtils::vec2str(padBegin) << "_";
|
||||||
|
result << "PE" << CommonTestUtils::vec2str(padEnd) << "_";
|
||||||
|
result << "Rounding=" << roundingType << "_";
|
||||||
|
result << "AutoPad=" << padType << "_";
|
||||||
|
result << "netPRC=" << netPrecision.name() << "_";
|
||||||
|
result << "inPRC=" << inPrc.name() << "_";
|
||||||
|
result << "outPRC=" << outPrc.name() << "_";
|
||||||
|
result << "inL=" << inLayout << "_";
|
||||||
|
result << "outL=" << outLayout << "_";
|
||||||
|
result << "trgDev=" << targetDevice;
|
||||||
|
return result.str();
|
||||||
|
}
|
||||||
|
|
||||||
void PoolingLayerTest::SetUp() {
|
void PoolingLayerTest::SetUp() {
|
||||||
poolSpecificParams poolParams;
|
poolSpecificParams poolParams;
|
||||||
std::vector<size_t> inputShape;
|
std::vector<size_t> inputShape;
|
||||||
@ -159,4 +191,28 @@ void GlobalPoolingLayerTest::SetUp() {
|
|||||||
ngraph::ResultVector results{std::make_shared<ngraph::opset3::Result>(pooling)};
|
ngraph::ResultVector results{std::make_shared<ngraph::opset3::Result>(pooling)};
|
||||||
function = std::make_shared<ngraph::Function>(results, params, "pooling");
|
function = std::make_shared<ngraph::Function>(results, params, "pooling");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void MaxPoolingV8LayerTest::SetUp() {
|
||||||
|
maxPoolV8SpecificParams poolParams;
|
||||||
|
std::vector<size_t> inputShape;
|
||||||
|
InferenceEngine::Precision netPrecision;
|
||||||
|
std::tie(poolParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, targetDevice) = this->GetParam();
|
||||||
|
std::vector<size_t> kernel, stride, dilation;
|
||||||
|
std::vector<size_t> padBegin, padEnd;
|
||||||
|
ngraph::op::PadType padType;
|
||||||
|
ngraph::op::RoundingType roundingType;
|
||||||
|
std::tie(kernel, stride, dilation, padBegin, padEnd, roundingType, padType) = poolParams;
|
||||||
|
|
||||||
|
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
|
||||||
|
auto params = ngraph::builder::makeParams(ngPrc, {inputShape});
|
||||||
|
auto paramOuts = ngraph::helpers::convert2OutputVector(
|
||||||
|
ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(params));
|
||||||
|
|
||||||
|
std::shared_ptr<ngraph::Node> maxPool = ngraph::builder::makeMaxPoolingV8(paramOuts[0], stride, dilation, padBegin, padEnd,
|
||||||
|
kernel, roundingType, padType);
|
||||||
|
|
||||||
|
ngraph::ResultVector results{std::make_shared<ngraph::opset3::Result>(maxPool->output(0))};
|
||||||
|
function = std::make_shared<ngraph::Function>(results, params, "MaxPoolV8");
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace LayerTestsDefinitions
|
} // namespace LayerTestsDefinitions
|
||||||
|
@ -418,6 +418,18 @@ void inline fill_data_random<InferenceEngine::Precision::BF16>(InferenceEngine::
|
|||||||
fill_data_random_float<InferenceEngine::Precision::BF16>(blob, range, start_from, k, seed);
|
fill_data_random_float<InferenceEngine::Precision::BF16>(blob, range, start_from, k, seed);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <class T>
|
||||||
|
static ov::runtime::Tensor create_tensor(
|
||||||
|
const ov::element::Type& element_type,
|
||||||
|
const ov::Shape& shape,
|
||||||
|
const std::vector<T>& values,
|
||||||
|
const size_t size = 0) {
|
||||||
|
const size_t real_size = size ? size : values.size() * sizeof(T) / element_type.size();
|
||||||
|
ov::runtime::Tensor tensor { element_type, shape };
|
||||||
|
std::memcpy(tensor.data(), values.data(), std::min(real_size * element_type.size(), sizeof(T) * values.size()));
|
||||||
|
return tensor;
|
||||||
|
}
|
||||||
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
typename std::enable_if<std::is_signed<T>::value, T>::type
|
typename std::enable_if<std::is_signed<T>::value, T>::type
|
||||||
inline ie_abs(const T &val) {
|
inline ie_abs(const T &val) {
|
||||||
|
@ -94,46 +94,7 @@ std::shared_ptr<ngraph::opset1::FakeQuantize> makeFakeQuantizeTypeRelaxed(
|
|||||||
const ngraph::element::Type constantPrecision,
|
const ngraph::element::Type constantPrecision,
|
||||||
const FakeQuantizeOnDataWithConstant& fqOnData);
|
const FakeQuantizeOnDataWithConstant& fqOnData);
|
||||||
|
|
||||||
template <typename ... Args>
|
void addAttributes(std::vector<std::shared_ptr<ngraph::Node>> nodes, std::vector<ov::Any> attributes);
|
||||||
void addAttribute(std::vector<std::shared_ptr<ngraph::Node>> nodes, Args&& ... args) {
|
|
||||||
const auto attribute = std::make_shared<ngraph::VariantWrapper<QuantizationAlignmentAttributePtr>>(
|
|
||||||
QuantizationAlignmentAttribute(std::forward<Args>(args)...));
|
|
||||||
|
|
||||||
for (const auto& node : nodes) {
|
|
||||||
node->get_rt_info()[ngraph::VariantWrapper<QuantizationAlignmentAttributePtr>::type_info.name] = attribute;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
void addAttribute2(std::vector<std::shared_ptr<ngraph::Node>> nodes, T attribute) {
|
|
||||||
const std::string typeInfoName = attribute->get_type_info().name;
|
|
||||||
for (const auto& node : nodes) {
|
|
||||||
auto& rt = node->get_rt_info();
|
|
||||||
rt[typeInfoName] = attribute;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T, typename ... Args>
|
|
||||||
void addAttribute3(std::vector<std::shared_ptr<ngraph::Node>> nodes, Args&& ... args) {
|
|
||||||
const auto attribute = std::make_shared<::ngraph::VariantWrapper<T>>(T(std::forward<Args>(args)...));
|
|
||||||
for (const auto& node : nodes) {
|
|
||||||
node->get_rt_info()[ngraph::VariantWrapper<T>::type_info.name] = attribute;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void addAttributes(std::vector<std::shared_ptr<ngraph::Node>> nodes, std::vector<std::shared_ptr<Variant>> attributes);
|
|
||||||
|
|
||||||
template <typename T, typename ... Args>
|
|
||||||
std::shared_ptr<Variant> make_shared_attribute(Args&& ... args) {
|
|
||||||
const auto attribute = std::make_shared<::ngraph::VariantWrapper<T>>(T(std::forward<Args>(args)...));
|
|
||||||
return attribute;
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T, typename ... Args>
|
|
||||||
std::shared_ptr<Variant> make_shared_attribute_ptr(Args&& ... args) {
|
|
||||||
const auto attribute = std::make_shared<::ngraph::VariantWrapper<std::shared_ptr<T>>>(std::make_shared<T>(std::forward<Args>(args)...));
|
|
||||||
return attribute;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::shared_ptr<Node> makeConvolution(
|
std::shared_ptr<Node> makeConvolution(
|
||||||
const std::shared_ptr<Node>& parent,
|
const std::shared_ptr<Node>& parent,
|
||||||
|
@ -24,7 +24,7 @@ public:
|
|||||||
const std::vector<float>& outputLowValues,
|
const std::vector<float>& outputLowValues,
|
||||||
const std::vector<float>& outputHighValues,
|
const std::vector<float>& outputHighValues,
|
||||||
const ngraph::element::Type outputPrecision = ngraph::element::undefined,
|
const ngraph::element::Type outputPrecision = ngraph::element::undefined,
|
||||||
const std::vector<std::shared_ptr<Variant>>& attributes = {});
|
const std::vector<ov::Any>& attributes = {});
|
||||||
|
|
||||||
virtual ~FakeQuantizeOnData();
|
virtual ~FakeQuantizeOnData();
|
||||||
|
|
||||||
@ -38,7 +38,7 @@ public:
|
|||||||
std::vector<float> outputLowValues;
|
std::vector<float> outputLowValues;
|
||||||
std::vector<float> outputHighValues;
|
std::vector<float> outputHighValues;
|
||||||
ngraph::element::Type outputPrecision;
|
ngraph::element::Type outputPrecision;
|
||||||
std::vector<std::shared_ptr<Variant>> attributes;
|
std::vector<ov::Any> attributes;
|
||||||
};
|
};
|
||||||
|
|
||||||
inline std::ostream& operator<<(std::ostream& os, const std::vector<float>& values) {
|
inline std::ostream& operator<<(std::ostream& os, const std::vector<float>& values) {
|
||||||
@ -71,8 +71,8 @@ public:
|
|||||||
const std::vector<float>& outputLowValues,
|
const std::vector<float>& outputLowValues,
|
||||||
const std::vector<float>& outputHighValues,
|
const std::vector<float>& outputHighValues,
|
||||||
const ngraph::element::Type outputPrecision = ngraph::element::undefined,
|
const ngraph::element::Type outputPrecision = ngraph::element::undefined,
|
||||||
const std::vector<std::shared_ptr<Variant>>& attributes = {});
|
|
||||||
|
|
||||||
|
const std::vector<ov::Any>& attributes = {});
|
||||||
virtual ~FakeQuantizeOnDataWithConstant();
|
virtual ~FakeQuantizeOnDataWithConstant();
|
||||||
|
|
||||||
virtual bool empty() const;
|
virtual bool empty() const;
|
||||||
@ -84,7 +84,7 @@ public:
|
|||||||
std::vector<float> outputLowValues;
|
std::vector<float> outputLowValues;
|
||||||
std::vector<float> outputHighValues;
|
std::vector<float> outputHighValues;
|
||||||
ngraph::element::Type outputPrecision;
|
ngraph::element::Type outputPrecision;
|
||||||
std::vector<std::shared_ptr<Variant>> attributes;
|
std::vector<ov::Any> attributes;
|
||||||
};
|
};
|
||||||
|
|
||||||
inline std::ostream& operator<<(std::ostream& out, const FakeQuantizeOnDataWithConstant& data) {
|
inline std::ostream& operator<<(std::ostream& out, const FakeQuantizeOnDataWithConstant& data) {
|
||||||
|
@ -123,7 +123,7 @@ public:
|
|||||||
const FakeQuantizeOnDataWithConstant& fakeQuantize2,
|
const FakeQuantizeOnDataWithConstant& fakeQuantize2,
|
||||||
const DequantizationOperations::Convert& convert2,
|
const DequantizationOperations::Convert& convert2,
|
||||||
const DequantizationOperations& dequantization2,
|
const DequantizationOperations& dequantization2,
|
||||||
const std::vector<std::shared_ptr<Variant>>& concatAttributes,
|
const std::vector<ov::Any>& concatAttributes,
|
||||||
const ngraph::element::Type precisionAfterOperation,
|
const ngraph::element::Type precisionAfterOperation,
|
||||||
const DequantizationOperations& dequantizationAfter,
|
const DequantizationOperations& dequantizationAfter,
|
||||||
const std::int64_t& axis,
|
const std::int64_t& axis,
|
||||||
@ -141,7 +141,7 @@ public:
|
|||||||
const DequantizationOperations::Convert& convert2,
|
const DequantizationOperations::Convert& convert2,
|
||||||
const DequantizationOperations& dequantization2,
|
const DequantizationOperations& dequantization2,
|
||||||
const bool addReshape2,
|
const bool addReshape2,
|
||||||
const std::vector<std::shared_ptr<Variant>>& concatAttributes,
|
const std::vector<ov::Any>& concatAttributes,
|
||||||
const ngraph::element::Type precisionAfterOperation,
|
const ngraph::element::Type precisionAfterOperation,
|
||||||
const DequantizationOperations& dequantizationAfter,
|
const DequantizationOperations& dequantizationAfter,
|
||||||
const std::int64_t& axis,
|
const std::int64_t& axis,
|
||||||
|
@ -30,7 +30,7 @@ public:
|
|||||||
const FakeQuantizeOnDataWithConstant& fqOnData3,
|
const FakeQuantizeOnDataWithConstant& fqOnData3,
|
||||||
const DequantizationOperations::Convert& convert3,
|
const DequantizationOperations::Convert& convert3,
|
||||||
const DequantizationOperations& dequantization3,
|
const DequantizationOperations& dequantization3,
|
||||||
const std::vector<std::shared_ptr<Variant>>& concatAttributes,
|
const std::vector<ov::Any>& concatAttributes,
|
||||||
const ngraph::element::Type precisionAfterOperation,
|
const ngraph::element::Type precisionAfterOperation,
|
||||||
const DequantizationOperations& dequantizationAfter,
|
const DequantizationOperations& dequantizationAfter,
|
||||||
const std::int64_t& axis);
|
const std::int64_t& axis);
|
||||||
|
@ -282,7 +282,9 @@ std::shared_ptr<ngraph::opset1::FakeQuantize> makeFakeQuantize(
|
|||||||
|
|
||||||
auto& rt = fq->get_rt_info();
|
auto& rt = fq->get_rt_info();
|
||||||
for (auto& attribute : fqOnData.attributes) {
|
for (auto& attribute : fqOnData.attributes) {
|
||||||
rt[attribute->get_type_info().name] = attribute;
|
if (attribute.is<ov::RuntimeAttribute>()) {
|
||||||
|
rt[attribute.as<ov::RuntimeAttribute>().get_type_info()] = attribute;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return fq;
|
return fq;
|
||||||
@ -298,12 +300,12 @@ std::shared_ptr<ngraph::opset1::FakeQuantize> makeFakeQuantizeTypeRelaxed(
|
|||||||
fqOnData.outputPrecision == ngraph::element::undefined ? constantPrecision : fqOnData.outputPrecision);
|
fqOnData.outputPrecision == ngraph::element::undefined ? constantPrecision : fqOnData.outputPrecision);
|
||||||
}
|
}
|
||||||
|
|
||||||
void addAttributes(std::vector<std::shared_ptr<ngraph::Node>> nodes, std::vector<std::shared_ptr<Variant>> attributes) {
|
void addAttributes(std::vector<std::shared_ptr<ngraph::Node>> nodes, std::vector<ov::Any> attributes) {
|
||||||
for (const auto& node : nodes) {
|
for (const auto& node : nodes) {
|
||||||
for (const auto& attribute : attributes) {
|
for (const auto& attribute : attributes) {
|
||||||
auto& rt = node->get_rt_info();
|
if (attribute.is<ov::RuntimeAttribute>()) {
|
||||||
const std::string typeInfoName = attribute->get_type_info().name;
|
node->get_rt_info()[attribute.as<ov::RuntimeAttribute>().get_type_info()] = attribute;
|
||||||
rt[typeInfoName] = attribute;
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -19,7 +19,7 @@ FakeQuantizeOnData::FakeQuantizeOnData(
|
|||||||
const std::vector<float>& outputLowValues,
|
const std::vector<float>& outputLowValues,
|
||||||
const std::vector<float>& outputHighValues,
|
const std::vector<float>& outputHighValues,
|
||||||
const ngraph::element::Type outputPrecision,
|
const ngraph::element::Type outputPrecision,
|
||||||
const std::vector<std::shared_ptr<Variant>>& attributes) :
|
const std::vector<ov::Any>& attributes) :
|
||||||
quantizationLevel(quantizationLevel),
|
quantizationLevel(quantizationLevel),
|
||||||
constantShape(constantShape),
|
constantShape(constantShape),
|
||||||
inputLowValues(inputLowValues),
|
inputLowValues(inputLowValues),
|
||||||
@ -58,7 +58,7 @@ FakeQuantizeOnDataWithConstant::FakeQuantizeOnDataWithConstant(
|
|||||||
const std::vector<float>& outputLowValues,
|
const std::vector<float>& outputLowValues,
|
||||||
const std::vector<float>& outputHighValues,
|
const std::vector<float>& outputHighValues,
|
||||||
const ngraph::element::Type outputPrecision,
|
const ngraph::element::Type outputPrecision,
|
||||||
const std::vector<std::shared_ptr<Variant>>& attributes) :
|
const std::vector<ov::Any>& attributes) :
|
||||||
quantizationLevel(quantizationLevel),
|
quantizationLevel(quantizationLevel),
|
||||||
constantShapes(constantShapes),
|
constantShapes(constantShapes),
|
||||||
inputLowValues(inputLowValues),
|
inputLowValues(inputLowValues),
|
||||||
|
@ -874,7 +874,7 @@ std::shared_ptr<ngraph::Function> ConcatFunction::get(
|
|||||||
const FakeQuantizeOnDataWithConstant& fqOnData2,
|
const FakeQuantizeOnDataWithConstant& fqOnData2,
|
||||||
const DequantizationOperations::Convert& convert2,
|
const DequantizationOperations::Convert& convert2,
|
||||||
const DequantizationOperations& dequantization2,
|
const DequantizationOperations& dequantization2,
|
||||||
const std::vector<std::shared_ptr<Variant>>& concatAttributes,
|
const std::vector<ov::Any>& concatAttributes,
|
||||||
const ngraph::element::Type precisionAfterOperation,
|
const ngraph::element::Type precisionAfterOperation,
|
||||||
const DequantizationOperations& dequantizationAfter,
|
const DequantizationOperations& dequantizationAfter,
|
||||||
const std::int64_t& axis,
|
const std::int64_t& axis,
|
||||||
@ -960,7 +960,7 @@ std::shared_ptr<ngraph::Function> ConcatFunction::get(
|
|||||||
const DequantizationOperations::Convert& convert2,
|
const DequantizationOperations::Convert& convert2,
|
||||||
const DequantizationOperations& dequantization2,
|
const DequantizationOperations& dequantization2,
|
||||||
const bool addReshape2,
|
const bool addReshape2,
|
||||||
const std::vector<std::shared_ptr<Variant>>& concatAttributes,
|
const std::vector<ov::Any>& concatAttributes,
|
||||||
const ngraph::element::Type precisionAfterOperation,
|
const ngraph::element::Type precisionAfterOperation,
|
||||||
const DequantizationOperations& dequantizationAfter,
|
const DequantizationOperations& dequantizationAfter,
|
||||||
const std::int64_t& axis,
|
const std::int64_t& axis,
|
||||||
|
@ -32,7 +32,7 @@ std::shared_ptr<ngraph::Function> MoveFakeQuantize::get(
|
|||||||
const FakeQuantizeOnDataWithConstant& fqOnData3,
|
const FakeQuantizeOnDataWithConstant& fqOnData3,
|
||||||
const DequantizationOperations::Convert& convert3,
|
const DequantizationOperations::Convert& convert3,
|
||||||
const DequantizationOperations& dequantization3,
|
const DequantizationOperations& dequantization3,
|
||||||
const std::vector<std::shared_ptr<Variant>>& concatAttributes,
|
const std::vector<ov::Any>& concatAttributes,
|
||||||
const ngraph::element::Type precisionAfterOperation,
|
const ngraph::element::Type precisionAfterOperation,
|
||||||
const DequantizationOperations& dequantizationAfter,
|
const DequantizationOperations& dequantizationAfter,
|
||||||
const std::int64_t& axis) {
|
const std::int64_t& axis) {
|
||||||
|
@ -428,6 +428,15 @@ std::shared_ptr<Node> makePooling(const ngraph::Output<Node> &in,
|
|||||||
bool excludePad,
|
bool excludePad,
|
||||||
const ngraph::helpers::PoolingTypes &poolType);
|
const ngraph::helpers::PoolingTypes &poolType);
|
||||||
|
|
||||||
|
std::shared_ptr<Node> makeMaxPoolingV8(const ngraph::Output<Node> &in,
|
||||||
|
const std::vector<size_t> &strides,
|
||||||
|
const std::vector<size_t> &dilation,
|
||||||
|
const std::vector<size_t> &padsBegin,
|
||||||
|
const std::vector<size_t> &padsEnd,
|
||||||
|
const std::vector<size_t> &kernel,
|
||||||
|
const op::RoundingType &roundingType,
|
||||||
|
const op::PadType &padType);
|
||||||
|
|
||||||
std::shared_ptr<Node> makeROIPooling(const Output<Node>& input,
|
std::shared_ptr<Node> makeROIPooling(const Output<Node>& input,
|
||||||
const Output<Node>& coords,
|
const Output<Node>& coords,
|
||||||
const Shape& output_size,
|
const Shape& output_size,
|
||||||
|
@ -35,5 +35,18 @@ std::shared_ptr<Node> makePooling(const ngraph::Output<Node> &in,
|
|||||||
return pooling;
|
return pooling;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::shared_ptr<Node> makeMaxPoolingV8(const ngraph::Output<Node> &in,
|
||||||
|
const std::vector<size_t> &strides,
|
||||||
|
const std::vector<size_t> &dilation,
|
||||||
|
const std::vector<size_t> &padsBegin,
|
||||||
|
const std::vector<size_t> &padsEnd,
|
||||||
|
const std::vector<size_t> &kernel,
|
||||||
|
const op::RoundingType &roundingType,
|
||||||
|
const op::PadType &padType) {
|
||||||
|
std::shared_ptr<ngraph::Node> pooling = std::make_shared<ngraph::opset8::MaxPool>(in, strides, dilation, padsBegin, padsEnd,
|
||||||
|
kernel, roundingType, padType);
|
||||||
|
return pooling;
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace builder
|
} // namespace builder
|
||||||
} // namespace ngraph
|
} // namespace ngraph
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
#include <gmock/gmock.h>
|
#include <gmock/gmock.h>
|
||||||
#include "plugin/mock_auto_device_plugin.hpp"
|
#include "plugin/mock_auto_device_plugin.hpp"
|
||||||
#include "cpp/ie_plugin.hpp"
|
#include "cpp/ie_plugin.hpp"
|
||||||
|
#include "mock_common.hpp"
|
||||||
|
|
||||||
using ::testing::MatcherCast;
|
using ::testing::MatcherCast;
|
||||||
using ::testing::AllOf;
|
using ::testing::AllOf;
|
||||||
@ -33,15 +34,18 @@ using ::testing::AnyNumber;
|
|||||||
using Config = std::map<std::string, std::string>;
|
using Config = std::map<std::string, std::string>;
|
||||||
using namespace MockMultiDevice;
|
using namespace MockMultiDevice;
|
||||||
|
|
||||||
#define IE_SET_METRIC(key, name, ...) \
|
|
||||||
typename ::InferenceEngine::Metrics::MetricType<::InferenceEngine::Metrics::key>::type name = \
|
|
||||||
__VA_ARGS__;
|
|
||||||
|
|
||||||
using DeviceParams = std::tuple<std::string, bool>;
|
using DeviceParams = std::tuple<std::string, bool>;
|
||||||
|
|
||||||
|
enum MODEL {
|
||||||
|
GENERAL = 0,
|
||||||
|
LATENCY = 1,
|
||||||
|
THROUGHPUT = 2,
|
||||||
|
};
|
||||||
|
|
||||||
using ConfigParams = std::tuple<
|
using ConfigParams = std::tuple<
|
||||||
bool, // if can continue to run
|
bool, // if can continue to run
|
||||||
bool, // if select throw exception
|
bool, // if select throw exception
|
||||||
|
MODEL, // config model general, latency, throughput
|
||||||
std::vector<DeviceParams>, // {device, loadSuccess}
|
std::vector<DeviceParams>, // {device, loadSuccess}
|
||||||
unsigned int, // select count
|
unsigned int, // select count
|
||||||
unsigned int, // load count
|
unsigned int, // load count
|
||||||
@ -72,7 +76,8 @@ public:
|
|||||||
std::vector<std::tuple<std::string, bool>> deviceConfigs;
|
std::vector<std::tuple<std::string, bool>> deviceConfigs;
|
||||||
bool continueRun;
|
bool continueRun;
|
||||||
bool thrExcWheSelect;
|
bool thrExcWheSelect;
|
||||||
std::tie(continueRun, thrExcWheSelect, deviceConfigs,
|
MODEL configModel;
|
||||||
|
std::tie(continueRun, thrExcWheSelect, configModel, deviceConfigs,
|
||||||
selectCount, loadCount, loadSuccessCount) = obj.param;
|
selectCount, loadCount, loadSuccessCount) = obj.param;
|
||||||
std::ostringstream result;
|
std::ostringstream result;
|
||||||
for (auto& item : deviceConfigs) {
|
for (auto& item : deviceConfigs) {
|
||||||
@ -87,6 +92,22 @@ public:
|
|||||||
} else {
|
} else {
|
||||||
result << "select_success_";
|
result << "select_success_";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
switch (configModel) {
|
||||||
|
case GENERAL:
|
||||||
|
result << "GENERAL";
|
||||||
|
break;
|
||||||
|
case LATENCY:
|
||||||
|
result << "LATENCY";
|
||||||
|
break;
|
||||||
|
case THROUGHPUT:
|
||||||
|
result << "THROUGHPUT";
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
LOG_ERROR("should not come here");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
result << "select_" << selectCount << "_loadCount_"
|
result << "select_" << selectCount << "_loadCount_"
|
||||||
<< loadCount << "_loadSuccessCount_" << loadSuccessCount;
|
<< loadCount << "_loadSuccessCount_" << loadSuccessCount;
|
||||||
return result.str();
|
return result.str();
|
||||||
@ -142,7 +163,8 @@ TEST_P(AutoLoadFailedTest, LoadCNNetWork) {
|
|||||||
std::vector<std::tuple<std::string, bool>> deviceConfigs;
|
std::vector<std::tuple<std::string, bool>> deviceConfigs;
|
||||||
bool continueRun;
|
bool continueRun;
|
||||||
bool thrExcWheSelect;
|
bool thrExcWheSelect;
|
||||||
std::tie(continueRun, thrExcWheSelect, deviceConfigs, selectCount,
|
MODEL configModel;
|
||||||
|
std::tie(continueRun, thrExcWheSelect, configModel, deviceConfigs, selectCount,
|
||||||
loadCount, loadSuccessCount) = this->GetParam();
|
loadCount, loadSuccessCount) = this->GetParam();
|
||||||
|
|
||||||
// test auto plugin
|
// test auto plugin
|
||||||
@ -163,7 +185,24 @@ TEST_P(AutoLoadFailedTest, LoadCNNetWork) {
|
|||||||
::testing::Matcher<const Config&>(_)))
|
::testing::Matcher<const Config&>(_)))
|
||||||
.WillByDefault(Throw(InferenceEngine::GeneralError{""}));
|
.WillByDefault(Throw(InferenceEngine::GeneralError{""}));
|
||||||
}
|
}
|
||||||
DeviceInformation devInfo = {deviceName, {}, 2, ""};
|
DeviceInformation devInfo;
|
||||||
|
switch (configModel) {
|
||||||
|
case GENERAL:
|
||||||
|
devInfo = {deviceName, {}, 2, ""};
|
||||||
|
break;
|
||||||
|
case LATENCY:
|
||||||
|
devInfo = {deviceName, {{CONFIG_KEY(PERFORMANCE_HINT),
|
||||||
|
InferenceEngine::PluginConfigParams::LATENCY}}, 2, ""};
|
||||||
|
break;
|
||||||
|
case THROUGHPUT:
|
||||||
|
devInfo = {deviceName, {{CONFIG_KEY(PERFORMANCE_HINT),
|
||||||
|
InferenceEngine::PluginConfigParams::THROUGHPUT}}, 2, ""};
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
LOG_ERROR("should not come here");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
metaDevices.push_back(std::move(devInfo));
|
metaDevices.push_back(std::move(devInfo));
|
||||||
// set the return value of SelectDevice
|
// set the return value of SelectDevice
|
||||||
// for example if there are three device, if will return GPU on the first call, and then MYRIAD
|
// for example if there are three device, if will return GPU on the first call, and then MYRIAD
|
||||||
@ -207,13 +246,13 @@ TEST_P(AutoLoadFailedTest, LoadCNNetWork) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// the test configure, for example
|
// the test configure, for example
|
||||||
// ConfigParams {true, false, {DeviceParams {CommonTestUtils::DEVICE_GPU, false},
|
// ConfigParams {true, false, GENERAL, {DeviceParams {CommonTestUtils::DEVICE_GPU, false},
|
||||||
// DeviceParams {CommonTestUtils::DEVICE_MYRIAD, true},
|
// DeviceParams {CommonTestUtils::DEVICE_MYRIAD, true},
|
||||||
// DeviceParams {CommonTestUtils::DEVICE_CPU, true}}, 2, 3, 2},
|
// DeviceParams {CommonTestUtils::DEVICE_CPU, true}}, 2, 3, 2},
|
||||||
//
|
//
|
||||||
// every element for ConfigParams
|
// every element for ConfigParams
|
||||||
// {continueRun, selectThrowException, deviceLoadsuccessVector, selectCount, loadCount, loadSuccessCount}
|
// {continueRun, selectThrowException, config model, deviceLoadsuccessVector, selectCount, loadCount, loadSuccessCount}
|
||||||
// { true, false, 3 device, 2, 3, 2}
|
// { true, false, GENERAL, 3 device, 2, 3, 2}
|
||||||
//
|
//
|
||||||
// there are three devices for loading
|
// there are three devices for loading
|
||||||
// CPU load for accelerator success, but GPU will load faild and then select MYRIAD and load again
|
// CPU load for accelerator success, but GPU will load faild and then select MYRIAD and load again
|
||||||
@ -223,52 +262,62 @@ TEST_P(AutoLoadFailedTest, LoadCNNetWork) {
|
|||||||
// the inference request num is loadSuccessCount * optimalNum, in this test case optimalNum is 2
|
// the inference request num is loadSuccessCount * optimalNum, in this test case optimalNum is 2
|
||||||
// so inference request num is 4 (CPU 2, MYRIAD 2)
|
// so inference request num is 4 (CPU 2, MYRIAD 2)
|
||||||
//
|
//
|
||||||
const std::vector<ConfigParams> testConfigs = {ConfigParams {true, false, {DeviceParams {CommonTestUtils::DEVICE_GPU, true},
|
const std::vector<ConfigParams> testConfigs = {ConfigParams {true, false, GENERAL, {DeviceParams {CommonTestUtils::DEVICE_GPU, true},
|
||||||
DeviceParams {CommonTestUtils::DEVICE_MYRIAD, true},
|
DeviceParams {CommonTestUtils::DEVICE_MYRIAD, true},
|
||||||
DeviceParams {CommonTestUtils::DEVICE_CPU, true}}, 1, 2, 2},
|
DeviceParams {CommonTestUtils::DEVICE_CPU, true}}, 1, 2, 2},
|
||||||
ConfigParams {true, false, {DeviceParams {CommonTestUtils::DEVICE_GPU, false},
|
ConfigParams {true, false, GENERAL, {DeviceParams {CommonTestUtils::DEVICE_GPU, false},
|
||||||
DeviceParams {CommonTestUtils::DEVICE_MYRIAD, true},
|
DeviceParams {CommonTestUtils::DEVICE_MYRIAD, true},
|
||||||
DeviceParams {CommonTestUtils::DEVICE_CPU, true}}, 2, 3, 2},
|
DeviceParams {CommonTestUtils::DEVICE_CPU, true}}, 2, 3, 2},
|
||||||
ConfigParams {true, false, {DeviceParams {CommonTestUtils::DEVICE_GPU, true},
|
ConfigParams {true, false, GENERAL, {DeviceParams {CommonTestUtils::DEVICE_GPU, true},
|
||||||
DeviceParams {CommonTestUtils::DEVICE_MYRIAD, false},
|
DeviceParams {CommonTestUtils::DEVICE_MYRIAD, false},
|
||||||
DeviceParams {CommonTestUtils::DEVICE_CPU, true}}, 1, 2, 2},
|
DeviceParams {CommonTestUtils::DEVICE_CPU, true}}, 1, 2, 2},
|
||||||
ConfigParams {true, false, {DeviceParams {CommonTestUtils::DEVICE_GPU, true},
|
ConfigParams {true, false, GENERAL, {DeviceParams {CommonTestUtils::DEVICE_GPU, true},
|
||||||
DeviceParams {CommonTestUtils::DEVICE_MYRIAD, true},
|
DeviceParams {CommonTestUtils::DEVICE_MYRIAD, true},
|
||||||
DeviceParams {CommonTestUtils::DEVICE_CPU, false}}, 1, 2, 1},
|
DeviceParams {CommonTestUtils::DEVICE_CPU, false}}, 1, 2, 1},
|
||||||
ConfigParams {true, false, {DeviceParams {CommonTestUtils::DEVICE_GPU, true},
|
ConfigParams {true, false, GENERAL, {DeviceParams {CommonTestUtils::DEVICE_GPU, true},
|
||||||
DeviceParams {CommonTestUtils::DEVICE_MYRIAD, false},
|
DeviceParams {CommonTestUtils::DEVICE_MYRIAD, false},
|
||||||
DeviceParams {CommonTestUtils::DEVICE_CPU, false}}, 1, 2, 1},
|
DeviceParams {CommonTestUtils::DEVICE_CPU, false}}, 1, 2, 1},
|
||||||
ConfigParams {true, false, {DeviceParams {CommonTestUtils::DEVICE_GPU, false},
|
ConfigParams {true, false, GENERAL, {DeviceParams {CommonTestUtils::DEVICE_GPU, false},
|
||||||
DeviceParams {CommonTestUtils::DEVICE_MYRIAD, true},
|
DeviceParams {CommonTestUtils::DEVICE_MYRIAD, true},
|
||||||
DeviceParams {CommonTestUtils::DEVICE_CPU, false}}, 2, 3, 1},
|
DeviceParams {CommonTestUtils::DEVICE_CPU, false}}, 2, 3, 1},
|
||||||
ConfigParams {true, false, {DeviceParams {CommonTestUtils::DEVICE_GPU, false},
|
ConfigParams {true, false, GENERAL, {DeviceParams {CommonTestUtils::DEVICE_GPU, false},
|
||||||
|
DeviceParams {CommonTestUtils::DEVICE_MYRIAD, false},
|
||||||
|
DeviceParams {CommonTestUtils::DEVICE_CPU, true}}, 3, 4, 2},
|
||||||
|
ConfigParams {false, false, GENERAL, {DeviceParams {CommonTestUtils::DEVICE_GPU, false},
|
||||||
|
DeviceParams {CommonTestUtils::DEVICE_MYRIAD, false},
|
||||||
|
DeviceParams {CommonTestUtils::DEVICE_CPU, false}}, 3, 4, 0},
|
||||||
|
ConfigParams {true, false, GENERAL, {DeviceParams {CommonTestUtils::DEVICE_GPU, true},
|
||||||
|
DeviceParams {CommonTestUtils::DEVICE_CPU, true}}, 1, 2, 2},
|
||||||
|
ConfigParams {true, false, GENERAL, {DeviceParams {CommonTestUtils::DEVICE_GPU, false},
|
||||||
|
DeviceParams {CommonTestUtils::DEVICE_CPU, true}}, 2, 3, 2},
|
||||||
|
ConfigParams {true, false, GENERAL, {DeviceParams {CommonTestUtils::DEVICE_GPU, true},
|
||||||
|
DeviceParams {CommonTestUtils::DEVICE_CPU, false}}, 1, 2, 1},
|
||||||
|
ConfigParams {false, false, GENERAL, {DeviceParams {CommonTestUtils::DEVICE_GPU, false},
|
||||||
|
DeviceParams {CommonTestUtils::DEVICE_CPU, false}}, 2, 3, 0},
|
||||||
|
ConfigParams {false, false, GENERAL, {DeviceParams {CommonTestUtils::DEVICE_GPU, false}}, 1, 1, 0},
|
||||||
|
ConfigParams {false, false, GENERAL, {DeviceParams {CommonTestUtils::DEVICE_CPU, false}}, 1, 1, 0},
|
||||||
|
ConfigParams {true, false, GENERAL, {DeviceParams {CommonTestUtils::DEVICE_GPU, true}}, 1, 1, 1},
|
||||||
|
ConfigParams {true, false, GENERAL, {DeviceParams {CommonTestUtils::DEVICE_CPU, true}}, 1, 1, 1},
|
||||||
|
ConfigParams {false, true, GENERAL, {DeviceParams {CommonTestUtils::DEVICE_GPU, true}}, 1, 0, 0},
|
||||||
|
ConfigParams {false, true, GENERAL, {DeviceParams {CommonTestUtils::DEVICE_CPU, true}}, 1, 0, 0},
|
||||||
|
ConfigParams {true, true, GENERAL, {DeviceParams {CommonTestUtils::DEVICE_GPU, false},
|
||||||
|
DeviceParams {CommonTestUtils::DEVICE_MYRIAD, true},
|
||||||
|
DeviceParams {CommonTestUtils::DEVICE_CPU, true}}, 2, 2, 1},
|
||||||
|
ConfigParams {false, true, GENERAL, {DeviceParams {CommonTestUtils::DEVICE_GPU, false},
|
||||||
|
DeviceParams {CommonTestUtils::DEVICE_MYRIAD, true},
|
||||||
|
DeviceParams {CommonTestUtils::DEVICE_CPU, false}}, 2, 2, 0},
|
||||||
|
ConfigParams {true, true, GENERAL, {DeviceParams {CommonTestUtils::DEVICE_GPU, false},
|
||||||
|
DeviceParams {CommonTestUtils::DEVICE_CPU, true}}, 2, 2, 1},
|
||||||
|
ConfigParams {true, false, LATENCY, {DeviceParams {CommonTestUtils::DEVICE_GPU, false},
|
||||||
DeviceParams {CommonTestUtils::DEVICE_MYRIAD, false},
|
DeviceParams {CommonTestUtils::DEVICE_MYRIAD, false},
|
||||||
DeviceParams {CommonTestUtils::DEVICE_CPU, true}}, 3, 3, 1},
|
DeviceParams {CommonTestUtils::DEVICE_CPU, true}}, 3, 3, 1},
|
||||||
ConfigParams {false, false, {DeviceParams {CommonTestUtils::DEVICE_GPU, false},
|
ConfigParams {true, false, LATENCY, {DeviceParams {CommonTestUtils::DEVICE_GPU, false},
|
||||||
|
DeviceParams {CommonTestUtils::DEVICE_CPU, true}}, 2, 2, 1},
|
||||||
|
ConfigParams {true, false, THROUGHPUT, {DeviceParams {CommonTestUtils::DEVICE_GPU, false},
|
||||||
DeviceParams {CommonTestUtils::DEVICE_MYRIAD, false},
|
DeviceParams {CommonTestUtils::DEVICE_MYRIAD, false},
|
||||||
DeviceParams {CommonTestUtils::DEVICE_CPU, false}}, 3, 3, 0},
|
DeviceParams {CommonTestUtils::DEVICE_CPU, true}}, 3, 4, 2},
|
||||||
ConfigParams {true, false, {DeviceParams {CommonTestUtils::DEVICE_GPU, true},
|
ConfigParams {true, false, THROUGHPUT, {DeviceParams {CommonTestUtils::DEVICE_GPU, false},
|
||||||
DeviceParams {CommonTestUtils::DEVICE_CPU, true}}, 1, 2, 2},
|
DeviceParams {CommonTestUtils::DEVICE_CPU, true}}, 2, 3, 2}
|
||||||
ConfigParams {true, false, {DeviceParams {CommonTestUtils::DEVICE_GPU, false},
|
|
||||||
DeviceParams {CommonTestUtils::DEVICE_CPU, true}}, 2, 2, 1},
|
|
||||||
ConfigParams {true, false, {DeviceParams {CommonTestUtils::DEVICE_GPU, true},
|
|
||||||
DeviceParams {CommonTestUtils::DEVICE_CPU, false}}, 1, 2, 1},
|
|
||||||
ConfigParams {false, false, {DeviceParams {CommonTestUtils::DEVICE_GPU, false},
|
|
||||||
DeviceParams {CommonTestUtils::DEVICE_CPU, false}}, 2, 2, 0},
|
|
||||||
ConfigParams {false, false, {DeviceParams {CommonTestUtils::DEVICE_GPU, false}}, 1, 1, 0},
|
|
||||||
ConfigParams {false, false, {DeviceParams {CommonTestUtils::DEVICE_CPU, false}}, 1, 1, 0},
|
|
||||||
ConfigParams {true, false, {DeviceParams {CommonTestUtils::DEVICE_GPU, true}}, 1, 1, 1},
|
|
||||||
ConfigParams {true, false, {DeviceParams {CommonTestUtils::DEVICE_CPU, true}}, 1, 1, 1},
|
|
||||||
ConfigParams {false, true, {DeviceParams {CommonTestUtils::DEVICE_GPU, true}}, 1, 0, 0},
|
|
||||||
ConfigParams {false, true, {DeviceParams {CommonTestUtils::DEVICE_CPU, true}}, 1, 0, 0},
|
|
||||||
ConfigParams {true, true, {DeviceParams {CommonTestUtils::DEVICE_GPU, false},
|
|
||||||
DeviceParams {CommonTestUtils::DEVICE_MYRIAD, true},
|
|
||||||
DeviceParams {CommonTestUtils::DEVICE_CPU, true}}, 2, 2, 1},
|
|
||||||
ConfigParams {false, true, {DeviceParams {CommonTestUtils::DEVICE_GPU, false},
|
|
||||||
DeviceParams {CommonTestUtils::DEVICE_MYRIAD, true},
|
|
||||||
DeviceParams {CommonTestUtils::DEVICE_CPU, false}}, 2, 2, 0},
|
|
||||||
ConfigParams {true, true, {DeviceParams {CommonTestUtils::DEVICE_GPU, false},
|
|
||||||
DeviceParams {CommonTestUtils::DEVICE_CPU, true}}, 2, 2, 1}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, AutoLoadFailedTest,
|
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, AutoLoadFailedTest,
|
||||||
|
269
inference-engine/tests/unit/auto/exec_network_get_metrics.cpp
Normal file
269
inference-engine/tests/unit/auto/exec_network_get_metrics.cpp
Normal file
@ -0,0 +1,269 @@
|
|||||||
|
// Copyright (C) 2018-2021 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
#include <ie_metric_helpers.hpp>
|
||||||
|
#include <common_test_utils/test_constants.hpp>
|
||||||
|
#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp"
|
||||||
|
#include "unit_test_utils/mocks/mock_iinfer_request.hpp"
|
||||||
|
#include "unit_test_utils/mocks/cpp_interfaces/impl/mock_inference_plugin_internal.hpp"
|
||||||
|
#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_iexecutable_network_internal.hpp"
|
||||||
|
#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_ivariable_state_internal.hpp"
|
||||||
|
#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_iinference_plugin.hpp"
|
||||||
|
#include <ie_core.hpp>
|
||||||
|
#include <multi-device/multi_device_config.hpp>
|
||||||
|
#include <ngraph_functions/subgraph_builders.hpp>
|
||||||
|
#include <gtest/gtest.h>
|
||||||
|
#include <gmock/gmock.h>
|
||||||
|
#include "plugin/mock_auto_device_plugin.hpp"
|
||||||
|
#include "cpp/ie_plugin.hpp"
|
||||||
|
#include <chrono>
|
||||||
|
#include <thread>
|
||||||
|
#include "mock_common.hpp"
|
||||||
|
|
||||||
|
using ::testing::MatcherCast;
|
||||||
|
using ::testing::AllOf;
|
||||||
|
using ::testing::Throw;
|
||||||
|
using ::testing::Matches;
|
||||||
|
using ::testing::_;
|
||||||
|
using ::testing::StrEq;
|
||||||
|
using ::testing::Return;
|
||||||
|
using ::testing::Property;
|
||||||
|
using ::testing::Eq;
|
||||||
|
using ::testing::ReturnRef;
|
||||||
|
using ::testing::AtLeast;
|
||||||
|
using ::testing::AnyNumber;
|
||||||
|
using ::testing::InvokeWithoutArgs;
|
||||||
|
using Config = std::map<std::string, std::string>;
|
||||||
|
using namespace MockMultiDevice;
|
||||||
|
|
||||||
|
using ConfigParams = std::tuple<
|
||||||
|
unsigned int, // cpu OPTIMAL_NUMBER_OF_INFER_REQUESTS
|
||||||
|
int, // cpu infer requet num of customer want
|
||||||
|
bool, // if cpu sleep, cpu device will load slow
|
||||||
|
unsigned int, // gpu OPTIMAL_NUMBER_OF_INFER_REQUESTS
|
||||||
|
int, // gpu infer requet num of customer want
|
||||||
|
bool, // if gpu sleep, cpu device will load slow
|
||||||
|
unsigned int // expect OPTIMAL_NUMBER_OF_INFER_REQUESTS
|
||||||
|
>;
|
||||||
|
class ExecNetworkGetMetric : public ::testing::TestWithParam<ConfigParams> {
|
||||||
|
public:
|
||||||
|
std::shared_ptr<ngraph::Function> function;
|
||||||
|
InferenceEngine::CNNNetwork cnnNet;
|
||||||
|
std::shared_ptr<MockICore> core;
|
||||||
|
std::shared_ptr<MockMultiDeviceInferencePlugin> plugin;
|
||||||
|
|
||||||
|
//mock cpu exeNetwork
|
||||||
|
std::shared_ptr<MockIExecutableNetworkInternal> cpuMockIExeNet;
|
||||||
|
ov::runtime::SoPtr<IExecutableNetworkInternal> cpuMockExeNetwork;
|
||||||
|
MockIInferencePlugin* cpuMockIPlugin;
|
||||||
|
InferenceEngine::InferencePlugin cpuMockPlugin;
|
||||||
|
//mock gpu exeNetwork
|
||||||
|
std::shared_ptr<MockIExecutableNetworkInternal> gpuMockIExeNet;
|
||||||
|
ov::runtime::SoPtr<IExecutableNetworkInternal> gpuMockExeNetwork;
|
||||||
|
MockIInferencePlugin* gpuMockIPlugin;
|
||||||
|
InferenceEngine::InferencePlugin gpuMockPlugin;
|
||||||
|
// config for Auto device
|
||||||
|
std::map<std::string, std::string> config;
|
||||||
|
std::vector<DeviceInformation> metaDevices;
|
||||||
|
std::shared_ptr<MockIInferRequestInternal> inferReqInternal;
|
||||||
|
|
||||||
|
public:
|
||||||
|
static std::string getTestCaseName(testing::TestParamInfo<ConfigParams> obj) {
|
||||||
|
unsigned int cpuOptimalNum;
|
||||||
|
int cpuCustomerNum;
|
||||||
|
unsigned int gpuOptimalNum;
|
||||||
|
int gpuCustomerNum;
|
||||||
|
unsigned int expectOptimalNum;
|
||||||
|
bool cpuSleep;
|
||||||
|
bool gpuSleep;
|
||||||
|
std::tie(cpuOptimalNum, cpuCustomerNum, cpuSleep,
|
||||||
|
gpuOptimalNum, gpuCustomerNum, gpuSleep, expectOptimalNum) = obj.param;
|
||||||
|
std::ostringstream result;
|
||||||
|
result << "cpuOptimalNum_" << cpuOptimalNum << "cpuCustomerNum_" << cpuCustomerNum;
|
||||||
|
result << "gpuOptimalNum_" << gpuOptimalNum << "gpuCustomerNum_" << gpuCustomerNum;
|
||||||
|
result << "expectOptimalNum_" << expectOptimalNum;
|
||||||
|
if (cpuSleep) {
|
||||||
|
result << "_cpuSleep_" << "true";
|
||||||
|
} else {
|
||||||
|
result << "_cpuSleep_" << "false";
|
||||||
|
}
|
||||||
|
|
||||||
|
if (gpuSleep) {
|
||||||
|
result << "_gpuSleep_" << "true";
|
||||||
|
} else {
|
||||||
|
result << "_gpuSleep_" << "false";
|
||||||
|
}
|
||||||
|
|
||||||
|
return result.str();
|
||||||
|
}
|
||||||
|
|
||||||
|
void TearDown() override {
|
||||||
|
core.reset();
|
||||||
|
plugin.reset();
|
||||||
|
cpuMockIExeNet.reset();
|
||||||
|
cpuMockExeNetwork = {};
|
||||||
|
cpuMockPlugin = {};
|
||||||
|
gpuMockIExeNet.reset();
|
||||||
|
gpuMockExeNetwork = {};
|
||||||
|
gpuMockPlugin = {};
|
||||||
|
config.clear();
|
||||||
|
metaDevices.clear();
|
||||||
|
inferReqInternal.reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
void SetUp() override {
|
||||||
|
// prepare cpuMockExeNetwork
|
||||||
|
cpuMockIExeNet = std::make_shared<MockIExecutableNetworkInternal>();
|
||||||
|
auto cpuMockIPluginPtr = std::make_shared<MockIInferencePlugin>();
|
||||||
|
ON_CALL(*cpuMockIPluginPtr, LoadNetwork(MatcherCast<const CNNNetwork&>(_), _)).WillByDefault(Return(cpuMockIExeNet));
|
||||||
|
cpuMockPlugin = InferenceEngine::InferencePlugin{{}, cpuMockIPluginPtr};
|
||||||
|
// remove annoying ON CALL message
|
||||||
|
EXPECT_CALL(*cpuMockIPluginPtr, LoadNetwork(MatcherCast<const CNNNetwork&>(_), _)).Times(1);
|
||||||
|
cpuMockExeNetwork = cpuMockPlugin.LoadNetwork(CNNNetwork{}, {});
|
||||||
|
|
||||||
|
// prepare gpuMockExeNetwork
|
||||||
|
gpuMockIExeNet = std::make_shared<MockIExecutableNetworkInternal>();
|
||||||
|
auto gpuMockIPluginPtr = std::make_shared<MockIInferencePlugin>();
|
||||||
|
ON_CALL(*gpuMockIPluginPtr, LoadNetwork(MatcherCast<const CNNNetwork&>(_), _)).WillByDefault(Return(gpuMockIExeNet));
|
||||||
|
gpuMockPlugin = InferenceEngine::InferencePlugin{{}, gpuMockIPluginPtr};
|
||||||
|
// remove annoying ON CALL message
|
||||||
|
EXPECT_CALL(*gpuMockIPluginPtr, LoadNetwork(MatcherCast<const CNNNetwork&>(_), _)).Times(1);
|
||||||
|
gpuMockExeNetwork = gpuMockPlugin.LoadNetwork(CNNNetwork{}, {});
|
||||||
|
|
||||||
|
// prepare mockicore and cnnNetwork for loading
|
||||||
|
core = std::shared_ptr<MockICore>(new MockICore());
|
||||||
|
auto* origin_plugin = new MockMultiDeviceInferencePlugin();
|
||||||
|
plugin = std::shared_ptr<MockMultiDeviceInferencePlugin>(origin_plugin);
|
||||||
|
function = ngraph::builder::subgraph::makeConvPoolRelu();
|
||||||
|
cnnNet = InferenceEngine::CNNNetwork(function);
|
||||||
|
// replace core with mock Icore
|
||||||
|
plugin->SetCore(core);
|
||||||
|
// mock execNetwork can work
|
||||||
|
inferReqInternal = std::make_shared<MockIInferRequestInternal>();
|
||||||
|
ON_CALL(*cpuMockIExeNet.get(), CreateInferRequest()).WillByDefault(Return(inferReqInternal));
|
||||||
|
ON_CALL(*gpuMockIExeNet.get(), CreateInferRequest()).WillByDefault(Return(inferReqInternal));
|
||||||
|
EXPECT_CALL(*inferReqInternal, SetCallback).Times(AtLeast(1));
|
||||||
|
IE_SET_METRIC(SUPPORTED_CONFIG_KEYS, supportConfigs, {});
|
||||||
|
ON_CALL(*core, GetMetric(_, StrEq(METRIC_KEY(SUPPORTED_CONFIG_KEYS)), _))
|
||||||
|
.WillByDefault(RETURN_MOCK_VALUE(supportConfigs));
|
||||||
|
EXPECT_CALL(*core, GetMetric(_, StrEq(METRIC_KEY(SUPPORTED_CONFIG_KEYS)), _)).Times(AnyNumber());
|
||||||
|
|
||||||
|
// test auto plugin
|
||||||
|
config.insert({CONFIG_KEY_INTERNAL(MULTI_WORK_MODE_AS_AUTO), InferenceEngine::PluginConfigParams::YES});
|
||||||
|
config.insert({InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES,
|
||||||
|
CommonTestUtils::DEVICE_CPU + std::string(",") + CommonTestUtils::DEVICE_GPU});
|
||||||
|
|
||||||
|
ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) {
|
||||||
|
std::cout << stream.str() << std::endl;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
TEST_P(ExecNetworkGetMetric, OPTIMAL_NUMBER_OF_INFER_REQUESTS) {
|
||||||
|
unsigned int cpuOptimalNum;
|
||||||
|
int cpuCustomerNum;
|
||||||
|
unsigned int gpuOptimalNum;
|
||||||
|
int gpuCustomerNum;
|
||||||
|
unsigned int expectOptimalNum;
|
||||||
|
bool cpuSleep;
|
||||||
|
bool gpuSleep;
|
||||||
|
std::tie(cpuOptimalNum, cpuCustomerNum, cpuSleep,
|
||||||
|
gpuOptimalNum, gpuCustomerNum, gpuSleep, expectOptimalNum) = this->GetParam();
|
||||||
|
|
||||||
|
metaDevices.push_back({CommonTestUtils::DEVICE_CPU, {}, cpuCustomerNum, ""});
|
||||||
|
metaDevices.push_back({CommonTestUtils::DEVICE_GPU, {}, gpuCustomerNum, ""});
|
||||||
|
ON_CALL(*plugin, SelectDevice(_, _)).WillByDefault(Return(metaDevices[1]));
|
||||||
|
ON_CALL(*plugin, ParseMetaDevices(_, _)).WillByDefault(Return(metaDevices));
|
||||||
|
EXPECT_CALL(*plugin, ParseMetaDevices(_, _)).Times(1);
|
||||||
|
EXPECT_CALL(*plugin, SelectDevice(_, _)).Times(1);
|
||||||
|
|
||||||
|
if (cpuSleep) {
|
||||||
|
ON_CALL(*core, LoadNetwork(::testing::Matcher<const InferenceEngine::CNNNetwork&>(_),
|
||||||
|
::testing::Matcher<const std::string&>(StrEq(CommonTestUtils::DEVICE_CPU)),
|
||||||
|
::testing::Matcher<const Config&>(_))).WillByDefault(InvokeWithoutArgs([this]() {
|
||||||
|
std::this_thread::sleep_for(std::chrono::milliseconds(100));
|
||||||
|
return cpuMockExeNetwork;
|
||||||
|
}));
|
||||||
|
} else {
|
||||||
|
ON_CALL(*core, LoadNetwork(::testing::Matcher<const InferenceEngine::CNNNetwork&>(_),
|
||||||
|
::testing::Matcher<const std::string&>(StrEq(CommonTestUtils::DEVICE_CPU)),
|
||||||
|
::testing::Matcher<const Config&>(_))).WillByDefault(Return(cpuMockExeNetwork));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (gpuSleep) {
|
||||||
|
ON_CALL(*core, LoadNetwork(::testing::Matcher<const InferenceEngine::CNNNetwork&>(_),
|
||||||
|
::testing::Matcher<const std::string&>(StrEq(CommonTestUtils::DEVICE_GPU)),
|
||||||
|
::testing::Matcher<const Config&>(_))).WillByDefault(InvokeWithoutArgs([this]() {
|
||||||
|
std::this_thread::sleep_for(std::chrono::milliseconds(100));
|
||||||
|
return gpuMockExeNetwork;
|
||||||
|
}));
|
||||||
|
} else {
|
||||||
|
ON_CALL(*core, LoadNetwork(::testing::Matcher<const InferenceEngine::CNNNetwork&>(_),
|
||||||
|
::testing::Matcher<const std::string&>(StrEq(CommonTestUtils::DEVICE_GPU)),
|
||||||
|
::testing::Matcher<const Config&>(_))).WillByDefault(Return(gpuMockExeNetwork));
|
||||||
|
}
|
||||||
|
|
||||||
|
ON_CALL(*cpuMockIExeNet.get(), GetMetric(StrEq(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS))))
|
||||||
|
.WillByDefault(RETURN_MOCK_VALUE(cpuOptimalNum));
|
||||||
|
ON_CALL(*gpuMockIExeNet.get(), GetMetric(StrEq(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS))))
|
||||||
|
.WillByDefault(RETURN_MOCK_VALUE(gpuOptimalNum));
|
||||||
|
|
||||||
|
EXPECT_CALL(*cpuMockIExeNet.get(), GetMetric(StrEq(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS))))
|
||||||
|
.Times(AtLeast(1));
|
||||||
|
|
||||||
|
EXPECT_CALL(*gpuMockIExeNet.get(), GetMetric(StrEq(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS))))
|
||||||
|
.Times(AtLeast(1));
|
||||||
|
|
||||||
|
EXPECT_CALL(*core, LoadNetwork(::testing::Matcher<const InferenceEngine::CNNNetwork&>(_),
|
||||||
|
::testing::Matcher<const std::string&>(CommonTestUtils::DEVICE_CPU),
|
||||||
|
::testing::Matcher<const Config&>(_))).Times(1);
|
||||||
|
|
||||||
|
EXPECT_CALL(*core, LoadNetwork(::testing::Matcher<const InferenceEngine::CNNNetwork&>(_),
|
||||||
|
::testing::Matcher<const std::string&>(CommonTestUtils::DEVICE_GPU),
|
||||||
|
::testing::Matcher<const Config&>(_))).Times(1);
|
||||||
|
|
||||||
|
if (cpuCustomerNum == -1) {
|
||||||
|
EXPECT_CALL(*cpuMockIExeNet.get(), CreateInferRequest()).Times(cpuOptimalNum);
|
||||||
|
} else {
|
||||||
|
EXPECT_CALL(*cpuMockIExeNet.get(), CreateInferRequest()).Times(cpuCustomerNum);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (gpuCustomerNum == -1) {
|
||||||
|
EXPECT_CALL(*gpuMockIExeNet.get(), CreateInferRequest()).Times(gpuOptimalNum);
|
||||||
|
} else {
|
||||||
|
EXPECT_CALL(*gpuMockIExeNet.get(), CreateInferRequest()).Times(gpuCustomerNum);
|
||||||
|
}
|
||||||
|
|
||||||
|
auto AutoExecNetwork = plugin->LoadExeNetworkImpl(cnnNet, config);
|
||||||
|
auto result = AutoExecNetwork->GetMetric(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)).as<unsigned int>();
|
||||||
|
EXPECT_EQ(result, expectOptimalNum);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// ConfigParams {unsigned int, int, bool,
|
||||||
|
// unsigned int, int, bool, unsigned int}
|
||||||
|
//
|
||||||
|
// every element for ConfigParams
|
||||||
|
// {cpuOptimalNum, customer hope for cpu infer requset num, if cpu sleep when load,
|
||||||
|
// gpuOptimalNum, customer hope for gpu infer requset num, if gpu sleep when load,
|
||||||
|
// expectOptimalNum of Auto ExecNetwork}
|
||||||
|
//
|
||||||
|
const std::vector<ConfigParams> testConfigs = {
|
||||||
|
ConfigParams {1, -1, false, 2, -1, true, 8},
|
||||||
|
ConfigParams {1, -1, false, 10, -1, true, 8},
|
||||||
|
ConfigParams {12, -1, false, 2, -1, true, 12},
|
||||||
|
ConfigParams {12, -1, false, 10, -1, true, 12},
|
||||||
|
ConfigParams {1, -1, true, 2, -1, false, 8},
|
||||||
|
ConfigParams {1, -1, true, 10, -1, false, 10},
|
||||||
|
ConfigParams {6, -1, true, 2, -1, false, 8},
|
||||||
|
ConfigParams {6, -1, true, 10, -1, false, 10},
|
||||||
|
ConfigParams {6, 4, false, 2, 3, true, 8},
|
||||||
|
ConfigParams {6, 4, false, 10, 3, true, 8},
|
||||||
|
ConfigParams {1, 4, true, 2, 3, false, 8},
|
||||||
|
ConfigParams {1, 4, true, 10, 3, false, 10}
|
||||||
|
};
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, ExecNetworkGetMetric,
|
||||||
|
::testing::ValuesIn(testConfigs),
|
||||||
|
ExecNetworkGetMetric::getTestCaseName);
|
16
inference-engine/tests/unit/auto/mock_common.cpp
Normal file
16
inference-engine/tests/unit/auto/mock_common.cpp
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
// Copyright (C) 2018-2021 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
#include "mock_common.hpp"
|
||||||
|
|
||||||
|
// getMetric will return a fake ov::Any, gmock will call ostreamer << ov::Any
|
||||||
|
// it will cause core dump, so add this special implemented
|
||||||
|
namespace testing {
|
||||||
|
namespace internal {
|
||||||
|
template<>
|
||||||
|
void PrintTo<ov::Any>(const ov::Any& a, std::ostream* os) {
|
||||||
|
*os << "using custom PrintTo ov::Any";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
24
inference-engine/tests/unit/auto/mock_common.hpp
Normal file
24
inference-engine/tests/unit/auto/mock_common.hpp
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
// Copyright (C) 2018-2021 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
#include <gmock/gmock.h>
|
||||||
|
#include <ie_metric_helpers.hpp>
|
||||||
|
#include <ie_core.hpp>
|
||||||
|
|
||||||
|
#define IE_SET_METRIC(key, name, ...) \
|
||||||
|
typename ::InferenceEngine::Metrics::MetricType<::InferenceEngine::Metrics::key>::type name = \
|
||||||
|
__VA_ARGS__;
|
||||||
|
|
||||||
|
#define RETURN_MOCK_VALUE(value) \
|
||||||
|
InvokeWithoutArgs([value](){return value;})
|
||||||
|
|
||||||
|
// getMetric will return a fake ov::Any, gmock will call ostreamer << ov::Any
|
||||||
|
// it will cause core dump, so add this special implemented
|
||||||
|
namespace testing {
|
||||||
|
namespace internal {
|
||||||
|
template<>
|
||||||
|
void PrintTo<ov::Any>(const ov::Any& a, std::ostream* os);
|
||||||
|
}
|
||||||
|
}
|
@ -134,13 +134,22 @@ static std::shared_ptr<ngraph::Function> create_simple_function() {
|
|||||||
data->get_output_tensor(0).set_names({"parameter"});
|
data->get_output_tensor(0).set_names({"parameter"});
|
||||||
|
|
||||||
auto mul_constant = ngraph::opset6::Constant::create(ngraph::element::i8, ngraph::Shape{1}, {3});
|
auto mul_constant = ngraph::opset6::Constant::create(ngraph::element::i8, ngraph::Shape{1}, {3});
|
||||||
|
mul_constant->set_friendly_name("mul_constant");
|
||||||
|
mul_constant->get_output_tensor(0).set_names({"mul_constant"});
|
||||||
auto mul = std::make_shared<ngraph::opset6::Multiply>(data, mul_constant);
|
auto mul = std::make_shared<ngraph::opset6::Multiply>(data, mul_constant);
|
||||||
|
mul->set_friendly_name("mul");
|
||||||
|
mul->get_output_tensor(0).set_names({"mul"});
|
||||||
|
|
||||||
auto add_constant = ngraph::opset6::Constant::create(ngraph::element::i8, ngraph::Shape{1}, {2});
|
auto add_constant = ngraph::opset6::Constant::create(ngraph::element::i8, ngraph::Shape{1}, {2});
|
||||||
|
add_constant->set_friendly_name("add_constant");
|
||||||
|
add_constant->get_output_tensor(0).set_names({"add_constant"});
|
||||||
auto add = std::make_shared<ngraph::opset6::Add>(mul, add_constant);
|
auto add = std::make_shared<ngraph::opset6::Add>(mul, add_constant);
|
||||||
|
add->set_friendly_name("add");
|
||||||
|
add->get_output_tensor(0).set_names({"add"});
|
||||||
|
|
||||||
// Create opset3::Result operation
|
// Create opset3::Result operation
|
||||||
auto res = std::make_shared<ngraph::opset6::Result>(add);
|
auto res = std::make_shared<ngraph::opset6::Result>(add);
|
||||||
|
res->set_friendly_name("res");
|
||||||
|
|
||||||
// Create nGraph function
|
// Create nGraph function
|
||||||
auto func = std::make_shared<ngraph::Function>(ngraph::ResultVector{res}, ngraph::ParameterVector{data});
|
auto func = std::make_shared<ngraph::Function>(ngraph::ResultVector{res}, ngraph::ParameterVector{data});
|
||||||
@ -208,7 +217,7 @@ TEST(NetworkContext_CNNNetwork, HashWithPrimitivesPriority) {
|
|||||||
auto net2 = createNetwork();
|
auto net2 = createNetwork();
|
||||||
auto net3 = createNetwork();
|
auto net3 = createNetwork();
|
||||||
auto & op2 = net2.getFunction()->get_ops().front()->get_rt_info();
|
auto & op2 = net2.getFunction()->get_ops().front()->get_rt_info();
|
||||||
op2["PrimitivesPriority"] = std::make_shared<ngraph::VariantWrapper<std::string> > ("testPriority");
|
op2[ov::PrimitivesPriority::get_type_info_static()] = ov::PrimitivesPriority("testPriority");
|
||||||
|
|
||||||
auto & op3 = net3.getFunction()->get_ops().front()->get_rt_info();
|
auto & op3 = net3.getFunction()->get_ops().front()->get_rt_info();
|
||||||
op3["PrimitivesPriority"] = std::make_shared<ngraph::VariantWrapper<std::string> > ("testPriority");
|
op3["PrimitivesPriority"] = std::make_shared<ngraph::VariantWrapper<std::string> > ("testPriority");
|
||||||
@ -222,24 +231,20 @@ TEST(NetworkContext_CNNNetwork, HashWithPrimitivesPriority) {
|
|||||||
|
|
||||||
TEST(NetworkContext_CNNNetwork, HashWithFusedNames) {
|
TEST(NetworkContext_CNNNetwork, HashWithFusedNames) {
|
||||||
auto setFusedEmpty = [&](Node::RTMap& rtInfo) {
|
auto setFusedEmpty = [&](Node::RTMap& rtInfo) {
|
||||||
rtInfo[VariantWrapper<ngraph::FusedNames>::get_type_info_static()] =
|
rtInfo[ngraph::FusedNames::get_type_info_static()] = ngraph::FusedNames();
|
||||||
std::make_shared<VariantWrapper<ngraph::FusedNames>>(ngraph::FusedNames());
|
|
||||||
};
|
};
|
||||||
auto setFused = [&](Node::RTMap& rtInfo, const std::string& name) {
|
auto setFused = [&](Node::RTMap& rtInfo, const std::string& name) {
|
||||||
rtInfo[VariantWrapper<ngraph::FusedNames>::get_type_info_static()] =
|
rtInfo[ngraph::FusedNames::get_type_info_static()] = ngraph::FusedNames(name);
|
||||||
std::make_shared<VariantWrapper<ngraph::FusedNames>>(ngraph::FusedNames(name));
|
|
||||||
};
|
};
|
||||||
checkCustomRt(setFusedEmpty, setFused);
|
checkCustomRt(setFusedEmpty, setFused);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(NetworkContext_CNNNetwork, HashWithPrimitivesPriorityType) {
|
TEST(NetworkContext_CNNNetwork, HashWithPrimitivesPriorityType) {
|
||||||
auto setPrimEmpty = [&](Node::RTMap& rtInfo) {
|
auto setPrimEmpty = [&](Node::RTMap& rtInfo) {
|
||||||
rtInfo[ov::PrimitivesPriority::get_type_info_static()] =
|
rtInfo[ov::PrimitivesPriority::get_type_info_static()] = ov::PrimitivesPriority("");
|
||||||
std::make_shared<ov::PrimitivesPriority>("");
|
|
||||||
};
|
};
|
||||||
auto setPrim = [&](Node::RTMap& rtInfo, const std::string& name) {
|
auto setPrim = [&](Node::RTMap& rtInfo, const std::string& name) {
|
||||||
rtInfo[ov::PrimitivesPriority::get_type_info_static()] =
|
rtInfo[ov::PrimitivesPriority::get_type_info_static()] = ov::PrimitivesPriority(name);
|
||||||
std::make_shared<ov::PrimitivesPriority>(name);
|
|
||||||
};
|
};
|
||||||
checkCustomRt(setPrimEmpty, setPrim);
|
checkCustomRt(setPrimEmpty, setPrim);
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,56 @@
|
|||||||
|
// Copyright (C) 2021 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
#pragma once
|
||||||
|
#include "primitive.hpp"
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
namespace cldnn {
|
||||||
|
/// @addtogroup cpp_api C++ API
|
||||||
|
/// @{
|
||||||
|
/// @addtogroup cpp_topology Network Topology
|
||||||
|
/// @{
|
||||||
|
/// @addtogroup cpp_primitives Primitives
|
||||||
|
/// @{
|
||||||
|
|
||||||
|
/// @brief experimental detectron ROI feature extractor
|
||||||
|
struct experimental_detectron_roi_feature_extractor : public primitive_base<experimental_detectron_roi_feature_extractor> {
|
||||||
|
CLDNN_DECLARE_PRIMITIVE(experimental_detectron_roi_feature_extractor)
|
||||||
|
|
||||||
|
/// @brief Constructs experimental_detectron_roi_feature_extractor primitive
|
||||||
|
/// @param id This primitive id
|
||||||
|
/// @param inputs Inputs for primitive id (ROIs, {pyramid levels, ...}, second_output)
|
||||||
|
/// @param output_dim Attribute specifies the width and height of the output tensor
|
||||||
|
/// @param pyramid_scales Scales of pyramid levels
|
||||||
|
/// @param sampling_ratio Attribute specifies the number of sampling points per the output value
|
||||||
|
/// @param aligned Attribute specifies add offset (-0.5) to ROIs sizes or not
|
||||||
|
experimental_detectron_roi_feature_extractor(const primitive_id& id,
|
||||||
|
const std::vector<primitive_id>& inputs,
|
||||||
|
int output_dim,
|
||||||
|
const std::vector<int64_t>& pyramid_scales,
|
||||||
|
int sampling_ratio,
|
||||||
|
bool aligned,
|
||||||
|
const primitive_id& ext_prim_id = "",
|
||||||
|
const padding& output_padding = padding()) :
|
||||||
|
primitive_base(id, inputs, ext_prim_id, output_padding),
|
||||||
|
output_dim(output_dim),
|
||||||
|
pooled_height(output_dim),
|
||||||
|
pooled_width(output_dim),
|
||||||
|
pyramid_scales(pyramid_scales),
|
||||||
|
sampling_ratio(sampling_ratio),
|
||||||
|
aligned(aligned) {}
|
||||||
|
|
||||||
|
int output_dim = 0;
|
||||||
|
int pooled_height = 0;
|
||||||
|
int pooled_width = 0;
|
||||||
|
std::vector<int64_t> pyramid_scales;
|
||||||
|
int sampling_ratio = 0;
|
||||||
|
bool aligned = false;
|
||||||
|
};
|
||||||
|
|
||||||
|
/// @}
|
||||||
|
/// @}
|
||||||
|
/// @}
|
||||||
|
} // namespace cldnn
|
69
inference-engine/thirdparty/clDNN/api/cldnn/primitives/roi_align.hpp
vendored
Normal file
69
inference-engine/thirdparty/clDNN/api/cldnn/primitives/roi_align.hpp
vendored
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
// Copyright (C) 2018-2021 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
#pragma once
|
||||||
|
#include "primitive.hpp"
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
namespace cldnn {
|
||||||
|
/// @addtogroup cpp_api C++ API
|
||||||
|
/// @{
|
||||||
|
/// @addtogroup cpp_topology Network Topology
|
||||||
|
/// @{
|
||||||
|
/// @addtogroup cpp_primitives Primitives
|
||||||
|
/// @{
|
||||||
|
|
||||||
|
/// @brief ROIAlign is a pooling layer used over feature maps of
|
||||||
|
/// non-uniform input sizes and outputs a feature map of a fixed size.
|
||||||
|
struct roi_align : public primitive_base<roi_align> {
|
||||||
|
CLDNN_DECLARE_PRIMITIVE(roi_align)
|
||||||
|
|
||||||
|
/// @brief Pooling mode for the @ref roi_align
|
||||||
|
enum PoolingMode {
|
||||||
|
Max,
|
||||||
|
Avg
|
||||||
|
};
|
||||||
|
|
||||||
|
/// @brief Constructs roi_align primitive.
|
||||||
|
/// @param id This primitive id.
|
||||||
|
/// @param inputs Inputs data primitive ids.
|
||||||
|
/// @param pooled_h Height of the ROI output feature map.
|
||||||
|
/// @param pooled_w Width of the ROI output feature map.
|
||||||
|
/// @param sampling_ratio Number of bins over height and width to use to calculate each output feature map element.
|
||||||
|
/// @param spatial_scale multiplicative spatial scale factor to translate ROI coordinates
|
||||||
|
/// from their input spatial scale to the scale used when pooling.
|
||||||
|
/// @param mode Method to perform pooling to produce output feature map elements.
|
||||||
|
/// @param shrink_axis_mask Array of bits, that provide shrinks the dimensionality by 1, taking on the value at index begin[i].
|
||||||
|
roi_align(const primitive_id& id,
|
||||||
|
const std::vector<primitive_id>& inputs,
|
||||||
|
int pooled_h,
|
||||||
|
int pooled_w,
|
||||||
|
int sampling_ratio,
|
||||||
|
float spatial_scale,
|
||||||
|
PoolingMode mode,
|
||||||
|
const primitive_id& ext_prim_id = "",
|
||||||
|
const padding& output_padding = padding())
|
||||||
|
: primitive_base(id, inputs, ext_prim_id, output_padding),
|
||||||
|
pooled_h {pooled_h},
|
||||||
|
pooled_w {pooled_w},
|
||||||
|
sampling_ratio {sampling_ratio},
|
||||||
|
spatial_scale {spatial_scale},
|
||||||
|
mode {mode}
|
||||||
|
{}
|
||||||
|
|
||||||
|
/// @brief Height of the ROI output feature map.
|
||||||
|
int pooled_h;
|
||||||
|
/// @brief Width of the ROI output feature map.
|
||||||
|
int pooled_w;
|
||||||
|
/// @brief Number of bins over height and width to use to calculate each output feature map element.
|
||||||
|
int sampling_ratio;
|
||||||
|
/// @brief multiplicative spatial scale factor to translate ROI coordinates
|
||||||
|
/// from their input spatial scale to the scale used when pooling.
|
||||||
|
float spatial_scale;
|
||||||
|
/// @brief Method to perform pooling to produce output feature map elements.
|
||||||
|
PoolingMode mode;
|
||||||
|
};
|
||||||
|
/// @}
|
||||||
|
/// @}
|
||||||
|
/// @}
|
||||||
|
} // namespace cldnn
|
@ -34,8 +34,6 @@ struct format_traits {
|
|||||||
size_t feature_num;
|
size_t feature_num;
|
||||||
/// @brief Number of spatial (x,y) dimensions in a format.
|
/// @brief Number of spatial (x,y) dimensions in a format.
|
||||||
size_t spatial_num;
|
size_t spatial_num;
|
||||||
/// @brief Number of local (x,y) dimensions in a format.
|
|
||||||
size_t local_num;
|
|
||||||
/// @brief Number of groups in a format.
|
/// @brief Number of groups in a format.
|
||||||
size_t group_num;
|
size_t group_num;
|
||||||
/// @brief Dimensions changing order from rare to often.
|
/// @brief Dimensions changing order from rare to often.
|
||||||
@ -50,8 +48,6 @@ struct format_traits {
|
|||||||
static const char* feature_chars() { return "fic"; }
|
static const char* feature_chars() { return "fic"; }
|
||||||
/// @brief Characters representing spatial dimensions in an order.
|
/// @brief Characters representing spatial dimensions in an order.
|
||||||
static const char* spatial_chars() { return "xyzhsw"; }
|
static const char* spatial_chars() { return "xyzhsw"; }
|
||||||
/// @brief Characters representing local dimensions in an order.
|
|
||||||
static const char* local_chars() { return "kl"; }
|
|
||||||
/// @brief Characters representing group dimensions in an order.
|
/// @brief Characters representing group dimensions in an order.
|
||||||
static const char* group_chars() { return "g"; }
|
static const char* group_chars() { return "g"; }
|
||||||
/// @brief Checks if @p c represents batch dimension.
|
/// @brief Checks if @p c represents batch dimension.
|
||||||
@ -60,8 +56,6 @@ struct format_traits {
|
|||||||
static bool is_feature_char(char c) { return std::string(feature_chars()).find_first_of(c) != std::string::npos; }
|
static bool is_feature_char(char c) { return std::string(feature_chars()).find_first_of(c) != std::string::npos; }
|
||||||
/// @brief Checks if @p c represents spatial dimension.
|
/// @brief Checks if @p c represents spatial dimension.
|
||||||
static bool is_spatial_char(char c) { return std::string(spatial_chars()).find_first_of(c) != std::string::npos; }
|
static bool is_spatial_char(char c) { return std::string(spatial_chars()).find_first_of(c) != std::string::npos; }
|
||||||
/// @brief Checks if @p c represents local dimensions.
|
|
||||||
static bool is_local_char(char c) { return std::string(local_chars()).find_first_of(c) != std::string::npos; }
|
|
||||||
/// @brief Checks if @p c represents group dimensions.
|
/// @brief Checks if @p c represents group dimensions.
|
||||||
static bool is_group_char(char c) { return std::string(group_chars()).find_first_of(c) != std::string::npos; }
|
static bool is_group_char(char c) { return std::string(group_chars()).find_first_of(c) != std::string::npos; }
|
||||||
};
|
};
|
||||||
@ -235,139 +229,138 @@ struct format {
|
|||||||
// B - number of Batch dimensions
|
// B - number of Batch dimensions
|
||||||
// F - number of Feature dimensions
|
// F - number of Feature dimensions
|
||||||
// S - number of Spatial dimensions
|
// S - number of Spatial dimensions
|
||||||
// L - number of Local dimensions
|
|
||||||
// G - number of Group dimensions
|
// G - number of Group dimensions
|
||||||
// Order - dims changing order from rare to often
|
// Order - dims changing order from rare to often
|
||||||
// Inner order - dims order for internal storage in _sizes array
|
// Inner order - dims order for internal storage in _sizes array
|
||||||
// Block sizes - vector of pairs of dimension number (by inner order) and block size ordered from rare to often
|
// Block sizes - vector of pairs of dimension number (by inner order) and block size ordered from rare to often
|
||||||
// Format B F S L G Order Inner order Block sizes
|
// Format B F S G Order Inner order Block sizes
|
||||||
{ yxfb, { 1, 1, 2, 0, 0, "yxfb", "bfxy?", {}}},
|
{ yxfb, { 1, 1, 2, 0, "yxfb", "bfxy?", {}}},
|
||||||
{ byxf, { 1, 1, 2, 0, 0, "byxf", "bfxy?", {}}},
|
{ byxf, { 1, 1, 2, 0, "byxf", "bfxy?", {}}},
|
||||||
{ bfyx, { 1, 1, 2, 0, 0, "bfyx", "bfxy?", {}}},
|
{ bfyx, { 1, 1, 2, 0, "bfyx", "bfxy?", {}}},
|
||||||
{ fyxb, { 1, 1, 2, 0, 0, "fyxb", "bfxy?", {}}},
|
{ fyxb, { 1, 1, 2, 0, "fyxb", "bfxy?", {}}},
|
||||||
{ b_fs_yx_fsv16, { 1, 1, 2, 0, 0, "bfyx", "bfxy", {{1, 16}}}},
|
{ b_fs_yx_fsv16, { 1, 1, 2, 0, "bfyx", "bfxy", {{1, 16}}}},
|
||||||
{ b_fs_yx_fsv32, { 1, 1, 2, 0, 0, "bfyx", "bfxy", {{1, 32}}}},
|
{ b_fs_yx_fsv32, { 1, 1, 2, 0, "bfyx", "bfxy", {{1, 32}}}},
|
||||||
{ b_fs_zyx_fsv32, { 1, 1, 3, 0, 0, "bfzyx", "bfxyz", {{1, 32}}}},
|
{ b_fs_zyx_fsv32, { 1, 1, 3, 0, "bfzyx", "bfxyz", {{1, 32}}}},
|
||||||
{ bs_xs_xsv8_bsv8, { 1, 1, 1, 0, 0, "bx", "b?x??", {{2, 8}, {0, 8}}}},
|
{ bs_xs_xsv8_bsv8, { 1, 1, 1, 0, "bx", "b?x??", {{2, 8}, {0, 8}}}},
|
||||||
{ bs_xs_xsv8_bsv16, { 1, 1, 1, 0, 0, "bx", "b?x??", {{2, 8}, {0, 16}}}},
|
{ bs_xs_xsv8_bsv16, { 1, 1, 1, 0, "bx", "b?x??", {{2, 8}, {0, 16}}}},
|
||||||
{ bs_x_bsv16, { 1, 1, 1, 0, 0, "bx", "b?x??", {{0, 16}}}},
|
{ bs_x_bsv16, { 1, 1, 1, 0, "bx", "b?x??", {{0, 16}}}},
|
||||||
{ winograd_2x3_s1_data, { 1, 1, 2, 0, 0, "bxyf", "bfxy?", {}}},
|
{ winograd_2x3_s1_data, { 1, 1, 2, 0, "bxyf", "bfxy?", {}}},
|
||||||
{ b_fs_yx_fsv4, { 1, 1, 2, 0, 0, "bfyx", "bfxy?", {{1, 4}}}},
|
{ b_fs_yx_fsv4, { 1, 1, 2, 0, "bfyx", "bfxy?", {{1, 4}}}},
|
||||||
{ bfzyx, { 1, 1, 3, 0, 0, "bfzyx", "bfxyz", {}}},
|
{ bfzyx, { 1, 1, 3, 0, "bfzyx", "bfxyz", {}}},
|
||||||
{ bfwzyx, { 1, 1, 4, 0, 0, "bfwzyx", "bfxyzw", {}}},
|
{ bfwzyx, { 1, 1, 4, 0, "bfwzyx", "bfxyzw", {}}},
|
||||||
{ fs_b_yx_fsv32, { 1, 1, 2, 0, 0, "fbyx", "bfxy?", {{1, 32}}}},
|
{ fs_b_yx_fsv32, { 1, 1, 2, 0, "fbyx", "bfxy?", {{1, 32}}}},
|
||||||
{ b_fs_yx_32fp, { 1, 1, 2, 0, 0, "bfyx", "bfxy?", {}}},
|
{ b_fs_yx_32fp, { 1, 1, 2, 0, "bfyx", "bfxy?", {}}},
|
||||||
{ b_fs_zyx_fsv16, { 1, 1, 3, 0, 0, "bfzyx", "bfxyz", {{1, 16}}}},
|
{ b_fs_zyx_fsv16, { 1, 1, 3, 0, "bfzyx", "bfxyz", {{1, 16}}}},
|
||||||
{ bs_fs_zyx_bsv16_fsv16, { 1, 1, 3, 0, 0, "bfzyx", "bfxyz", {{0, 16 }, {1, 16}}}},
|
{ bs_fs_zyx_bsv16_fsv16, { 1, 1, 3, 0, "bfzyx", "bfxyz", {{0, 16 }, {1, 16}}}},
|
||||||
{ bs_fs_yx_bsv16_fsv16, { 1, 1, 2, 0, 0, "bfyx", "bfxy?", {{0, 16 }, {1, 16}}}},
|
{ bs_fs_yx_bsv16_fsv16, { 1, 1, 2, 0, "bfyx", "bfxy?", {{0, 16 }, {1, 16}}}},
|
||||||
{ bs_fs_yx_bsv4_fsv4, { 1, 1, 2, 0, 0, "bfyx", "bfxy?", {{0, 4 }, {1, 4}}}},
|
{ bs_fs_yx_bsv4_fsv4, { 1, 1, 2, 0, "bfyx", "bfxy?", {{0, 4 }, {1, 4}}}},
|
||||||
{ bs_fs_yx_bsv4_fsv2, { 1, 1, 2, 0, 0, "bfyx", "bfxy?", {{0, 4 }, {1, 2}}}},
|
{ bs_fs_yx_bsv4_fsv2, { 1, 1, 2, 0, "bfyx", "bfxy?", {{0, 4 }, {1, 2}}}},
|
||||||
{ bs_fs_zyx_bsv4_fsv4, { 1, 1, 3, 0, 0, "bfzyx", "bfxyz", {{0, 4 }, {1, 4}}}},
|
{ bs_fs_zyx_bsv4_fsv4, { 1, 1, 3, 0, "bfzyx", "bfxyz", {{0, 4 }, {1, 4}}}},
|
||||||
{ bs_fs_zyx_bsv4_fsv2, { 1, 1, 3, 0, 0, "bfzyx", "bfxyz", {{0, 4 }, {1, 2}}}},
|
{ bs_fs_zyx_bsv4_fsv2, { 1, 1, 3, 0, "bfzyx", "bfxyz", {{0, 4 }, {1, 2}}}},
|
||||||
{ bs_fs_zyx_bsv32_fsv32, { 1, 1, 3, 0, 0, "bfzyx", "bfxyz", {{0, 32 }, {1, 32}}}},
|
{ bs_fs_zyx_bsv32_fsv32, { 1, 1, 3, 0, "bfzyx", "bfxyz", {{0, 32 }, {1, 32}}}},
|
||||||
{ bs_fs_zyx_bsv32_fsv16, { 1, 1, 3, 0, 0, "bfzyx", "bfxyz", {{0, 32 }, {1, 16}}}},
|
{ bs_fs_zyx_bsv32_fsv16, { 1, 1, 3, 0, "bfzyx", "bfxyz", {{0, 32 }, {1, 16}}}},
|
||||||
{ bs_fs_yx_bsv32_fsv32, { 1, 1, 2, 0, 0, "bfyx", "bfxy?", {{0, 32 }, {1, 32}}}},
|
{ bs_fs_yx_bsv32_fsv32, { 1, 1, 2, 0, "bfyx", "bfxy?", {{0, 32 }, {1, 32}}}},
|
||||||
{ bs_fs_yx_bsv32_fsv16, { 1, 1, 2, 0, 0, "bfyx", "bfxy?", {{0, 32 }, {1, 16}}}},
|
{ bs_fs_yx_bsv32_fsv16, { 1, 1, 2, 0, "bfyx", "bfxy?", {{0, 32 }, {1, 16}}}},
|
||||||
{ nv12, { 1, 1, 2, 0, 0, "bfyx", "bfxy?", {}}},
|
{ nv12, { 1, 1, 2, 0, "bfyx", "bfxy?", {}}},
|
||||||
{ image_2d_rgba, { 1, 1, 2, 0, 0, "bfyx", "bfxy?", {}}},
|
{ image_2d_rgba, { 1, 1, 2, 0, "bfyx", "bfxy?", {}}},
|
||||||
|
|
||||||
{ oiyx, { 1, 1, 2, 0, 0, "oiyx", "oixy", {}}},
|
{ oiyx, { 1, 1, 2, 0, "oiyx", "oixy", {}}},
|
||||||
{ ioyx, { 1, 1, 2, 0, 0, "ioyx", "oixy", {}}},
|
{ ioyx, { 1, 1, 2, 0, "ioyx", "oixy", {}}},
|
||||||
{ iyxo, { 1, 1, 2, 0, 0, "iyxo", "oixy", {}}},
|
{ iyxo, { 1, 1, 2, 0, "iyxo", "oixy", {}}},
|
||||||
{ yxio, { 1, 1, 2, 0, 0, "yxio", "oixy?", {}}},
|
{ yxio, { 1, 1, 2, 0, "yxio", "oixy?", {}}},
|
||||||
{ oizyx, { 1, 1, 3, 0, 0, "oizyx", "oixyz", {}}},
|
{ oizyx, { 1, 1, 3, 0, "oizyx", "oixyz", {}}},
|
||||||
{ iozyx, { 1, 1, 3, 0, 0, "iozyx", "oixyz", {}}},
|
{ iozyx, { 1, 1, 3, 0, "iozyx", "oixyz", {}}},
|
||||||
{ os_is_yx_isv16_osv16, { 1, 1, 2, 0, 0, "oiyx", "oixy", {{1, 16}, {0, 16}}}},
|
{ os_is_yx_isv16_osv16, { 1, 1, 2, 0, "oiyx", "oixy", {{1, 16}, {0, 16}}}},
|
||||||
{ o_is_yx_isv16, { 1, 1, 2, 0, 0, "oiyx", "oixy?", {{1, 16}}}},
|
{ o_is_yx_isv16, { 1, 1, 2, 0, "oiyx", "oixy?", {{1, 16}}}},
|
||||||
{ os_yxi_osv16, { 1, 1, 2, 0, 0, "oyxi", "oixy?", {{0, 16}}}},
|
{ os_yxi_osv16, { 1, 1, 2, 0, "oyxi", "oixy?", {{0, 16}}}},
|
||||||
{ os_iyx_osv16, { 1, 1, 2, 0, 0, "oiyx", "oixy?", {{0, 16}}}},
|
{ os_iyx_osv16, { 1, 1, 2, 0, "oiyx", "oixy?", {{0, 16}}}},
|
||||||
{ os_iyx_osv32, { 1, 1, 2, 0, 0, "oiyx", "oixy?", {{0, 32}}}},
|
{ os_iyx_osv32, { 1, 1, 2, 0, "oiyx", "oixy?", {{0, 32}}}},
|
||||||
{ os_iyx_osv64, { 1, 1, 2, 0, 0, "oiyx", "oixy?", {{0, 64}}}},
|
{ os_iyx_osv64, { 1, 1, 2, 0, "oiyx", "oixy?", {{0, 64}}}},
|
||||||
{ winograd_2x3_s1_weights, { 1, 1, 2, 0, 0, "oiyx", "oixy?", {}}},
|
{ winograd_2x3_s1_weights, { 1, 1, 2, 0, "oiyx", "oixy?", {}}},
|
||||||
{ winograd_2x3_s1_fused_weights, { 1, 1, 2, 0, 0, "xyio", "oixy?", {}}},
|
{ winograd_2x3_s1_fused_weights, { 1, 1, 2, 0, "xyio", "oixy?", {}}},
|
||||||
{ winograd_6x3_s1_fused_weights, { 1, 1, 2, 0, 0, "xyio", "oixy?", {}}},
|
{ winograd_6x3_s1_fused_weights, { 1, 1, 2, 0, "xyio", "oixy?", {}}},
|
||||||
{ image_2d_weights_winograd_6x3_s1_fbxyb, { 1, 1, 2, 0, 0, "xyio", "oixy?", {}}},
|
{ image_2d_weights_winograd_6x3_s1_fbxyb, { 1, 1, 2, 0, "xyio", "oixy?", {}}},
|
||||||
{ image_2d_weights_winograd_6x3_s1_xfbyb, { 1, 1, 2, 0, 0, "xyio", "oixy?", {}}},
|
{ image_2d_weights_winograd_6x3_s1_xfbyb, { 1, 1, 2, 0, "xyio", "oixy?", {}}},
|
||||||
{ image_2d_weights_c4_fyx_b, { 1, 1, 2, 0, 0, "oiyx", "oixy?", {}}},
|
{ image_2d_weights_c4_fyx_b, { 1, 1, 2, 0, "oiyx", "oixy?", {}}},
|
||||||
{ image_2d_weights_c1_b_fyx, { 1, 1, 2, 0, 0, "oiyx", "oixy?", {}}},
|
{ image_2d_weights_c1_b_fyx, { 1, 1, 2, 0, "oiyx", "oixy?", {}}},
|
||||||
{ lstm_weights_dio, { 1, 1, 2, 0, 0, "oixy", "oixy?", {}}},
|
{ lstm_weights_dio, { 1, 1, 2, 0, "oixy", "oixy?", {}}},
|
||||||
{ os_is_yx_isa8_osv8_isv4, { 1, 1, 2, 0, 0, "oiyx", "oixy?", {}}},
|
{ os_is_yx_isa8_osv8_isv4, { 1, 1, 2, 0, "oiyx", "oixy?", {}}},
|
||||||
{ os_is_yx_isa8_osv16_isv4, { 1, 1, 2, 0, 0, "oiyx", "oixy?", {}}},
|
{ os_is_yx_isa8_osv16_isv4, { 1, 1, 2, 0, "oiyx", "oixy?", {}}},
|
||||||
{ os_is_yx_isa8_osv8_isv4_swizzled_by_4, { 1, 1, 2, 0, 0, "oiyx", "oixy?", {}}},
|
{ os_is_yx_isa8_osv8_isv4_swizzled_by_4, { 1, 1, 2, 0, "oiyx", "oixy?", {}}},
|
||||||
{ os_is_yx_osa4_isa8_osv8_isv2, { 1, 1, 2, 0, 0, "oiyx", "oixy?", {{0, 32}, {1, 16}}}},
|
{ os_is_yx_osa4_isa8_osv8_isv2, { 1, 1, 2, 0, "oiyx", "oixy?", {{0, 32}, {1, 16}}}},
|
||||||
{ os_is_yx_osa4_isa8_osv8_isv4, { 1, 1, 2, 0, 0, "oiyx", "oixy", {{0, 32}, {1, 32}}}},
|
{ os_is_yx_osa4_isa8_osv8_isv4, { 1, 1, 2, 0, "oiyx", "oixy", {{0, 32}, {1, 32}}}},
|
||||||
{ os_is_zyx_osa4_isa8_osv8_isv2, { 1, 1, 3, 0, 0, "oizyx", "oixyz", {{0, 32}, {1, 16}}}},
|
{ os_is_zyx_osa4_isa8_osv8_isv2, { 1, 1, 3, 0, "oizyx", "oixyz", {{0, 32}, {1, 16}}}},
|
||||||
{ os_is_zyx_osa4_isa8_osv8_isv4, { 1, 1, 3, 0, 0, "oizyx", "oixyz", {{0, 32}, {1, 32}}}},
|
{ os_is_zyx_osa4_isa8_osv8_isv4, { 1, 1, 3, 0, "oizyx", "oixyz", {{0, 32}, {1, 32}}}},
|
||||||
{ os_is_yx_osa2_isa8_osv16_isv2, { 1, 1, 2, 0, 0, "oiyx", "oixy", {{0, 32}, {1, 16}}}},
|
{ os_is_yx_osa2_isa8_osv16_isv2, { 1, 1, 2, 0, "oiyx", "oixy", {{0, 32}, {1, 16}}}},
|
||||||
{ os_is_yx_osa2_isa8_osv16_isv4, { 1, 1, 2, 0, 0, "oiyx", "oixy", {{0, 32}, {1, 32}}}},
|
{ os_is_yx_osa2_isa8_osv16_isv4, { 1, 1, 2, 0, "oiyx", "oixy", {{0, 32}, {1, 32}}}},
|
||||||
{ os_is_zyx_isa8_osv8_isv4, { 1, 1, 3, 0, 0, "oizyx", "oixyz", {{1, 8}, {0, 8}, {1, 4}}}},
|
{ os_is_zyx_isa8_osv8_isv4, { 1, 1, 3, 0, "oizyx", "oixyz", {{1, 8}, {0, 8}, {1, 4}}}},
|
||||||
{ os_is_zyx_isa8_osv16_isv4, { 1, 1, 3, 0, 0, "oizyx", "oixyz", {{1, 8}, {0, 16}, {1, 4}}}},
|
{ os_is_zyx_isa8_osv16_isv4, { 1, 1, 3, 0, "oizyx", "oixyz", {{1, 8}, {0, 16}, {1, 4}}}},
|
||||||
{ os_is_yx_osa4_isa8_osv8_isv4_swizzled_by_4, { 1, 1, 2, 0, 0, "oiyx", "oixy?", {{0, 32}, {1, 32}}}},
|
{ os_is_yx_osa4_isa8_osv8_isv4_swizzled_by_4, { 1, 1, 2, 0, "oiyx", "oixy?", {{0, 32}, {1, 32}}}},
|
||||||
{ os_is_zyx_osa4_isa8_osv8_isv4_swizzled_by_4, { 1, 1, 3, 0, 0, "oizyx", "oixyz", {{0, 32}, {1, 32}}}},
|
{ os_is_zyx_osa4_isa8_osv8_isv4_swizzled_by_4, { 1, 1, 3, 0, "oizyx", "oixyz", {{0, 32}, {1, 32}}}},
|
||||||
{ is_o_yx_isv32, { 1, 1, 2, 0, 0, "oyxi", "oixy?", {{1, 32}}}},
|
{ is_o_yx_isv32, { 1, 1, 2, 0, "oyxi", "oixy?", {{1, 32}}}},
|
||||||
{ is_o32_yx_isv32_swizzled_by_4, { 1, 1, 2, 0, 0, "oyxi", "oixy?", {}}},
|
{ is_o32_yx_isv32_swizzled_by_4, { 1, 1, 2, 0, "oyxi", "oixy?", {}}},
|
||||||
{ os_is_y_x8_osv8_isv4, { 1, 1, 2, 0, 0, "oyxi", "oixy?", {}}},
|
{ os_is_y_x8_osv8_isv4, { 1, 1, 2, 0, "oyxi", "oixy?", {}}},
|
||||||
{ os_is_y_x8_osv8_isv4_swizzled_by_4, { 1, 1, 2, 0, 0, "oyxi", "oixy?", {}}},
|
{ os_is_y_x8_osv8_isv4_swizzled_by_4, { 1, 1, 2, 0, "oyxi", "oixy?", {}}},
|
||||||
{ os_is_yx_osv16_isv4, { 1, 1, 2, 0, 0, "oixy", "oixy?", {{0, 16}, {1, 4}}}},
|
{ os_is_yx_osv16_isv4, { 1, 1, 2, 0, "oixy", "oixy?", {{0, 16}, {1, 4}}}},
|
||||||
{ os_is_yx_osv8_isv4, { 1, 1, 2, 0, 0, "oiyx", "oixy", {{1, 4}, {0, 8}}}},
|
{ os_is_yx_osv8_isv4, { 1, 1, 2, 0, "oiyx", "oixy", {{1, 4}, {0, 8}}}},
|
||||||
{ os_is_yx_osv8_isv2, { 1, 1, 2, 0, 0, "oiyx", "oixy", {{1, 2}, {0, 8}}}},
|
{ os_is_yx_osv8_isv2, { 1, 1, 2, 0, "oiyx", "oixy", {{1, 2}, {0, 8}}}},
|
||||||
{ os_is_zyx_osv16_isv16, { 1, 1, 3, 0, 0, "oizyx", "oixyz", {{0, 16}, {1, 16}}}},
|
{ os_is_zyx_osv16_isv16, { 1, 1, 3, 0, "oizyx", "oixyz", {{0, 16}, {1, 16}}}},
|
||||||
{ os_is_yx_osv32_isv4_swizzled_by_2, { 1, 1, 2, 0, 0, "oixy", "oixy?", {{0, 32}, {1, 4}}}},
|
{ os_is_yx_osv32_isv4_swizzled_by_2, { 1, 1, 2, 0, "oixy", "oixy?", {{0, 32}, {1, 4}}}},
|
||||||
{ os_is_yx_osv32_isv4, { 1, 1, 2, 0, 0, "oixy", "oixy?", {{0, 32}, {1, 4}}}},
|
{ os_is_yx_osv32_isv4, { 1, 1, 2, 0, "oixy", "oixy?", {{0, 32}, {1, 4}}}},
|
||||||
{ os_is_zyx_osv32_isv4, { 1, 1, 3, 0, 0, "oizyx", "oixyz", {{0, 32}, {1, 4}}}},
|
{ os_is_zyx_osv32_isv4, { 1, 1, 3, 0, "oizyx", "oixyz", {{0, 32}, {1, 4}}}},
|
||||||
{ os_is_yx_osv32_isv32p, { 1, 1, 1, 0, 0, "oixy", "oixy?", {}}},
|
{ os_is_yx_osv32_isv32p, { 1, 1, 1, 0, "oixy", "oixy?", {}}},
|
||||||
{ os_is_zyx_isv16_osv16, { 1, 1, 3, 0, 0, "oizyx", "oixyz", {{0, 16}, {1, 16}}}},
|
{ os_is_zyx_isv16_osv16, { 1, 1, 3, 0, "oizyx", "oixyz", {{0, 16}, {1, 16}}}},
|
||||||
{ is_os_zyx_isv16_osv16, { 1, 1, 3, 0, 0, "iozyx", "oixyz", {{1, 16}, {0, 16}}}},
|
{ is_os_zyx_isv16_osv16, { 1, 1, 3, 0, "iozyx", "oixyz", {{1, 16}, {0, 16}}}},
|
||||||
{ is_os_yx_isv16_osv16, { 1, 1, 2, 0, 0, "ioyx", "oixyz", {{1, 16}, {0, 16}}}},
|
{ is_os_yx_isv16_osv16, { 1, 1, 2, 0, "ioyx", "oixyz", {{1, 16}, {0, 16}}}},
|
||||||
{ os_is_osv32_isv32_swizzled_by_4, { 1, 1, 0, 0, 0, "oixy", "oixy?", {{0, 32}, {1, 32}}}},
|
{ os_is_osv32_isv32_swizzled_by_4, { 1, 1, 0, 0, "oixy", "oixy?", {{0, 32}, {1, 32}}}},
|
||||||
{ os_is_zyx_isv8_osv16_isv2, { 1, 1, 3, 0, 0, "oizyx", "oixyz", {{1, 8}, {0, 16}, {1, 2}}}},
|
{ os_is_zyx_isv8_osv16_isv2, { 1, 1, 3, 0, "oizyx", "oixyz", {{1, 8}, {0, 16}, {1, 2}}}},
|
||||||
{ os_zyxi_osv16, { 1, 1, 3, 0, 0, "ozyxi", "oixyz", {{0, 16}}}},
|
{ os_zyxi_osv16, { 1, 1, 3, 0, "ozyxi", "oixyz", {{0, 16}}}},
|
||||||
{ os_is_yx_isv8_osv16_isv2, { 1, 1, 2, 0, 0, "oizyx", "oixyz", {{1, 8}, {0, 16}, {1, 2}}}},
|
{ os_is_yx_isv8_osv16_isv2, { 1, 1, 2, 0, "oizyx", "oixyz", {{1, 8}, {0, 16}, {1, 2}}}},
|
||||||
{ os_is_yx_osv16_isv16, { 1, 1, 2, 0, 0, "oiyx", "oixy", {{1, 16}, {0, 16}}}},
|
{ os_is_yx_osv16_isv16, { 1, 1, 2, 0, "oiyx", "oixy", {{1, 16}, {0, 16}}}},
|
||||||
{ os_is_zyx_osv32_isv16, { 1, 1, 3, 0, 0, "oizyx", "oixyz", {{0, 32}, {1, 16}}}},
|
{ os_is_zyx_osv32_isv16, { 1, 1, 3, 0, "oizyx", "oixyz", {{0, 32}, {1, 16}}}},
|
||||||
{ os_is_zyx_osv64_isv16, { 1, 1, 3, 0, 0, "oizyx", "oixyz", {{0, 64}, {1, 16}}}},
|
{ os_is_zyx_osv64_isv16, { 1, 1, 3, 0, "oizyx", "oixyz", {{0, 64}, {1, 16}}}},
|
||||||
{ os_iyx_osv32__ai32, { 1, 1, 2, 0, 0, "oiyx", "oixy", {{0, 32}}}},
|
{ os_iyx_osv32__ai32, { 1, 1, 2, 0, "oiyx", "oixy", {{0, 32}}}},
|
||||||
{ i_yxs_os_yxsv2_osv16, { 1, 1, 2, 0, 0, "iyxo", "oixy", {{0, 16}}}},
|
{ i_yxs_os_yxsv2_osv16, { 1, 1, 2, 0, "iyxo", "oixy", {{0, 16}}}},
|
||||||
{ iy_xs_os_xsv2_osv8__ao32, { 1, 1, 2, 0, 0, "iyxo", "oixy", {{2, 2}, {0, 8}}}},
|
{ iy_xs_os_xsv2_osv8__ao32, { 1, 1, 2, 0, "iyxo", "oixy", {{2, 2}, {0, 8}}}},
|
||||||
{ iy_xs_os_xsv2_osv16__ao32, { 1, 1, 2, 0, 0, "iyxo", "oixy", {{2, 2}, {0, 16}}}},
|
{ iy_xs_os_xsv2_osv16__ao32, { 1, 1, 2, 0, "iyxo", "oixy", {{2, 2}, {0, 16}}}},
|
||||||
{ os_i_yxs_osv4_yxsv4, { 1, 1, 2, 0, 0, "oiyx", "oixy", {{0, 4}}}},
|
{ os_i_yxs_osv4_yxsv4, { 1, 1, 2, 0, "oiyx", "oixy", {{0, 4}}}},
|
||||||
|
|
||||||
{ goiyx, { 1, 1, 2, 0, 1, "goiyx", "oixy????g", {}}},
|
{ goiyx, { 1, 1, 2, 1, "goiyx", "oixy??g", {}}},
|
||||||
{ gioyx, { 1, 1, 2, 0, 1, "gioyx", "oixy????g", {}}},
|
{ gioyx, { 1, 1, 2, 1, "gioyx", "oixy??g", {}}},
|
||||||
{ goizyx, { 1, 1, 3, 0, 1, "goizyx", "oixyz???g", {}}},
|
{ goizyx, { 1, 1, 3, 1, "goizyx", "oixyz?g", {}}},
|
||||||
{ giozyx, { 1, 1, 3, 0, 1, "giozyx", "oixyz???g", {}}},
|
{ giozyx, { 1, 1, 3, 1, "giozyx", "oixyz?g", {}}},
|
||||||
{ g_os_iyx_osv16, { 1, 1, 2, 0, 1, "goiyx", "oixy????g", {{0, 16}}}},
|
{ g_os_iyx_osv16, { 1, 1, 2, 1, "goiyx", "oixy??g", {{0, 16}}}},
|
||||||
{ g_os_iyx_osv32, { 1, 1, 2, 0, 1, "goiyx", "oixy????g", {{0, 32}}}},
|
{ g_os_iyx_osv32, { 1, 1, 2, 1, "goiyx", "oixy??g", {{0, 32}}}},
|
||||||
{ gs_oiyx_gsv16, { 1, 1, 2, 0, 1, "goiyx", "oixy????g", {{8, 16}}}},
|
{ gs_oiyx_gsv16, { 1, 1, 2, 1, "goiyx", "oixy??g", {{6, 16}}}},
|
||||||
{ gs_oizyx_gsv16, { 1, 1, 3, 0, 1, "goizyx", "oixyz???g", {{8, 16}}}},
|
{ gs_oizyx_gsv16, { 1, 1, 3, 1, "goizyx", "oixyz?g", {{6, 16}}}},
|
||||||
{ gs_oiyx_gsv32, { 1, 1, 2, 0, 1, "goiyx", "oixy????g", {{8, 32}}}},
|
{ gs_oiyx_gsv32, { 1, 1, 2, 1, "goiyx", "oixy??g", {{6, 32}}}},
|
||||||
{ gyxio, { 1, 1, 2, 0, 1, "gyxio", "oixy????g", {}}},
|
{ gyxio, { 1, 1, 2, 1, "gyxio", "oixy??g", {}}},
|
||||||
{ g_is_os_zyx_isv16_osv16, { 1, 1, 3, 0, 1, "giozyx", "oixyz???g", {{1, 16}, {0, 16}}}},
|
{ g_is_os_zyx_isv16_osv16, { 1, 1, 3, 1, "giozyx", "oixyz?g", {{1, 16}, {0, 16}}}},
|
||||||
{ g_is_os_yx_isv16_osv16, { 1, 1, 2, 0, 1, "gioyx", "oixy????g", {{1, 16}, {0, 16}}}},
|
{ g_is_os_yx_isv16_osv16, { 1, 1, 2, 1, "gioyx", "oixy??g", {{1, 16}, {0, 16}}}},
|
||||||
{ g_os_is_zyx_isv8_osv16_isv2, { 1, 1, 3, 0, 1, "goizyx", "oixyz???g", {{1, 8}, {0, 16}, {1, 2}}}},
|
{ g_os_is_zyx_isv8_osv16_isv2, { 1, 1, 3, 1, "goizyx", "oixyz?g", {{1, 8}, {0, 16}, {1, 2}}}},
|
||||||
{ g_os_is_yx_isv8_osv16_isv2, { 1, 1, 2, 0, 1, "goiyx", "oixy????g", {{1, 8}, {0, 16}, {1, 2}}}},
|
{ g_os_is_yx_isv8_osv16_isv2, { 1, 1, 2, 1, "goiyx", "oixy??g", {{1, 8}, {0, 16}, {1, 2}}}},
|
||||||
{ g_os_is_zyx_isv16_osv16, { 1, 1, 3, 0, 1, "goizyx", "oixyz???g", {{0, 16}, {1, 16}}}},
|
{ g_os_is_zyx_isv16_osv16, { 1, 1, 3, 1, "goizyx", "oixyz?g", {{0, 16}, {1, 16}}}},
|
||||||
{ g_os_is_yx_osv16_isv4, { 1, 1, 2, 0, 1, "goixy", "oixy????g", {{0, 16}, {1, 4}}}},
|
{ g_os_is_yx_osv16_isv4, { 1, 1, 2, 1, "goixy", "oixy??g", {{0, 16}, {1, 4}}}},
|
||||||
{ g_os_is_zyx_osv16_isv16, { 1, 1, 3, 0, 1, "goizyx", "oixyz???g", {{0, 16}, {1, 16}}}},
|
{ g_os_is_zyx_osv16_isv16, { 1, 1, 3, 1, "goizyx", "oixyz?g", {{0, 16}, {1, 16}}}},
|
||||||
{ g_os_zyx_is_osv16_isv4, { 1, 1, 3, 0, 1, "gozyxi", "oixyz???g", {{0, 16}, {1, 4}}}},
|
{ g_os_zyx_is_osv16_isv4, { 1, 1, 3, 1, "gozyxi", "oixyz?g", {{0, 16}, {1, 4}}}},
|
||||||
{ g_os_zyx_is_osv16_isv16, { 1, 1, 3, 0, 1, "gozyxi", "oixyz???g", {{0, 16}, {1, 16}}}},
|
{ g_os_zyx_is_osv16_isv16, { 1, 1, 3, 1, "gozyxi", "oixyz?g", {{0, 16}, {1, 16}}}},
|
||||||
{ g_os_zyx_is_osv16_isv32, { 1, 1, 3, 0, 1, "gozyxi", "oixyz???g", {{0, 16}, {1, 32}}}},
|
{ g_os_zyx_is_osv16_isv32, { 1, 1, 3, 1, "gozyxi", "oixyz?g", {{0, 16}, {1, 32}}}},
|
||||||
{ g_os_zyx_is_osv32_isv4, { 1, 1, 3, 0, 1, "gozyxi", "oixyz???g", {{0, 32}, {1, 4}}}},
|
{ g_os_zyx_is_osv32_isv4, { 1, 1, 3, 1, "gozyxi", "oixyz?g", {{0, 32}, {1, 4}}}},
|
||||||
{ g_os_zyx_is_osv32_isv16, { 1, 1, 3, 0, 1, "gozyxi", "oixyz???g", {{0, 32}, {1, 16}}}},
|
{ g_os_zyx_is_osv32_isv16, { 1, 1, 3, 1, "gozyxi", "oixyz?g", {{0, 32}, {1, 16}}}},
|
||||||
{ g_os_zyx_is_osv32_isv32, { 1, 1, 3, 0, 1, "gozyxi", "oixyz???g", {{0, 32}, {1, 32}}}},
|
{ g_os_zyx_is_osv32_isv32, { 1, 1, 3, 1, "gozyxi", "oixyz?g", {{0, 32}, {1, 32}}}},
|
||||||
{ g_os_is_yx_osa4_isa8_osv8_isv4, { 1, 1, 2, 0, 1, "goiyx", "oixy????g", {{0, 32}, {1, 32}}}},
|
{ g_os_is_yx_osa4_isa8_osv8_isv4, { 1, 1, 2, 1, "goiyx", "oixy??g", {{0, 32}, {1, 32}}}},
|
||||||
{ g_os_is_zyx_osa4_isa8_osv8_isv4, { 1, 1, 3, 0, 1, "goizyx", "oixyz???g", {{0, 32}, {1, 32}}}},
|
{ g_os_is_zyx_osa4_isa8_osv8_isv4, { 1, 1, 3, 1, "goizyx", "oixyz?g", {{0, 32}, {1, 32}}}},
|
||||||
{ g_os_is_yx_osa4_isa8_osv8_isv2, { 1, 1, 2, 0, 1, "goiyx", "oixy????g", {{0, 32}, {1, 16}}}},
|
{ g_os_is_yx_osa4_isa8_osv8_isv2, { 1, 1, 2, 1, "goiyx", "oixy??g", {{0, 32}, {1, 16}}}},
|
||||||
{ g_os_is_zyx_osa4_isa8_osv8_isv2, { 1, 1, 3, 0, 1, "goizyx", "oixyz???g", {{0, 32}, {1, 16}}}},
|
{ g_os_is_zyx_osa4_isa8_osv8_isv2, { 1, 1, 3, 1, "goizyx", "oixyz?g", {{0, 32}, {1, 16}}}},
|
||||||
{ g_os_is_yx_osa2_isa8_osv16_isv4, { 1, 1, 2, 0, 1, "goiyx", "oixy????g", {{0, 32}, {1, 32}}}},
|
{ g_os_is_yx_osa2_isa8_osv16_isv4, { 1, 1, 2, 1, "goiyx", "oixy??g", {{0, 32}, {1, 32}}}},
|
||||||
{ g_os_is_yx_osa2_isa8_osv16_isv2, { 1, 1, 2, 0, 1, "goiyx", "oixy????g", {{0, 32}, {1, 16}}}},
|
{ g_os_is_yx_osa2_isa8_osv16_isv2, { 1, 1, 2, 1, "goiyx", "oixy??g", {{0, 32}, {1, 16}}}},
|
||||||
{ gs_oi_yxs_gsv4_yxsv4, { 1, 1, 2, 0, 1, "goiyx", "oixy????g", {{8, 4}}}},
|
{ gs_oi_yxs_gsv4_yxsv4, { 1, 1, 2, 1, "goiyx", "oixy??g", {{6, 4}}}},
|
||||||
{ gs_oi_yxs_gsv16_yxsv4, { 1, 1, 2, 0, 1, "goiyx", "oixy????g", {{8, 16}}}},
|
{ gs_oi_yxs_gsv16_yxsv4, { 1, 1, 2, 1, "goiyx", "oixy??g", {{6, 16}}}},
|
||||||
{ gs_oi_yxs_gsv32_yxsv4, { 1, 1, 2, 0, 1, "goiyx", "oixy????g", {{8, 32}}}},
|
{ gs_oi_yxs_gsv32_yxsv4, { 1, 1, 2, 1, "goiyx", "oixy??g", {{6, 32}}}},
|
||||||
{ g_os_is_yx_isv16_osv16, { 1, 1, 2, 0, 1, "goiyx", "oixy????g", {{1, 16}, {0, 16}}}},
|
{ g_os_is_yx_isv16_osv16, { 1, 1, 2, 1, "goiyx", "oixy??g", {{1, 16}, {0, 16}}}},
|
||||||
{ gi_yxs_os_yxsv2_osv16, { 1, 1, 2, 0, 1, "giyxo", "oixy????g", {{0, 16}}}},
|
{ gi_yxs_os_yxsv2_osv16, { 1, 1, 2, 1, "giyxo", "oixy??g", {{0, 16}}}},
|
||||||
{ giy_xs_os_xsv2_osv8__ao32, { 1, 1, 2, 0, 1, "giyxo", "oixy????g", {{2, 2}, {0, 8}}}},
|
{ giy_xs_os_xsv2_osv8__ao32, { 1, 1, 2, 1, "giyxo", "oixy??g", {{2, 2}, {0, 8}}}},
|
||||||
{ giy_xs_os_xsv2_osv16__ao32, { 1, 1, 2, 0, 1, "giyxo", "oixy????g", {{2, 2}, {0, 16}}}},
|
{ giy_xs_os_xsv2_osv16__ao32, { 1, 1, 2, 1, "giyxo", "oixy??g", {{2, 2}, {0, 16}}}},
|
||||||
};
|
};
|
||||||
if (traits.find(fmt) == traits.end()) {
|
if (traits.find(fmt) == traits.end()) {
|
||||||
throw std::runtime_error("[clDNN] Format description is missing in fmt traits");
|
throw std::runtime_error("[clDNN] Format description is missing in fmt traits");
|
||||||
@ -381,8 +374,6 @@ struct format {
|
|||||||
static size_t feature_num(type fmt) { return traits(fmt).feature_num; }
|
static size_t feature_num(type fmt) { return traits(fmt).feature_num; }
|
||||||
/// @brief Returns number of spatial dimensions for a @p format.
|
/// @brief Returns number of spatial dimensions for a @p format.
|
||||||
static size_t spatial_num(type fmt) { return traits(fmt).spatial_num; }
|
static size_t spatial_num(type fmt) { return traits(fmt).spatial_num; }
|
||||||
/// @brief Returns number of local dimensions for a @p format.
|
|
||||||
static size_t local_num(type fmt) { return traits(fmt).local_num; }
|
|
||||||
/// @brief Returns number of group dimensions for a @p format.
|
/// @brief Returns number of group dimensions for a @p format.
|
||||||
static size_t group_num(type fmt) { return traits(fmt).group_num; }
|
static size_t group_num(type fmt) { return traits(fmt).group_num; }
|
||||||
/// @brief Returns an order of dimensions for a @ format.
|
/// @brief Returns an order of dimensions for a @ format.
|
||||||
@ -442,8 +433,6 @@ struct format {
|
|||||||
size_t feature_num() const { return traits(value).feature_num; }
|
size_t feature_num() const { return traits(value).feature_num; }
|
||||||
/// @brief Returns number of spatial dimensions.
|
/// @brief Returns number of spatial dimensions.
|
||||||
size_t spatial_num() const { return traits(value).spatial_num; }
|
size_t spatial_num() const { return traits(value).spatial_num; }
|
||||||
/// @brief Returns number of local dimensions.
|
|
||||||
size_t local_num() const { return traits(value).local_num; }
|
|
||||||
/// @brief Returns number of group dimensions.
|
/// @brief Returns number of group dimensions.
|
||||||
size_t group_num() const { return traits(value).group_num; }
|
size_t group_num() const { return traits(value).group_num; }
|
||||||
/// @brief Returns an order of dimensions in form of string.
|
/// @brief Returns an order of dimensions in form of string.
|
||||||
@ -483,9 +472,8 @@ struct format {
|
|||||||
constexpr int32_t tensor_batch_dim_max = 1;
|
constexpr int32_t tensor_batch_dim_max = 1;
|
||||||
constexpr int32_t tensor_feature_dim_max = 1;
|
constexpr int32_t tensor_feature_dim_max = 1;
|
||||||
constexpr int32_t tensor_spatial_dim_max = 4;
|
constexpr int32_t tensor_spatial_dim_max = 4;
|
||||||
constexpr int32_t tensor_local_dim_max = 2;
|
|
||||||
constexpr int32_t tensor_group_dim_max = 1;
|
constexpr int32_t tensor_group_dim_max = 1;
|
||||||
constexpr int32_t tensor_dim_max = tensor_batch_dim_max + tensor_feature_dim_max + tensor_spatial_dim_max + tensor_local_dim_max + tensor_group_dim_max;
|
constexpr int32_t tensor_dim_max = tensor_batch_dim_max + tensor_feature_dim_max + tensor_spatial_dim_max + tensor_group_dim_max;
|
||||||
|
|
||||||
struct tensor;
|
struct tensor;
|
||||||
|
|
||||||
@ -496,7 +484,6 @@ enum class dim_vec_kind {
|
|||||||
batch,
|
batch,
|
||||||
feature,
|
feature,
|
||||||
spatial,
|
spatial,
|
||||||
local,
|
|
||||||
group
|
group
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -524,16 +511,10 @@ struct dim_vec_limits<dim_vec_kind::spatial> {
|
|||||||
static constexpr int32_t dim_offset = tensor_batch_dim_max + tensor_feature_dim_max;
|
static constexpr int32_t dim_offset = tensor_batch_dim_max + tensor_feature_dim_max;
|
||||||
};
|
};
|
||||||
|
|
||||||
template <>
|
|
||||||
struct dim_vec_limits<dim_vec_kind::local> {
|
|
||||||
static constexpr int32_t max_dimentionality = tensor_local_dim_max;
|
|
||||||
static constexpr int32_t dim_offset = tensor_batch_dim_max + tensor_feature_dim_max + tensor_spatial_dim_max;
|
|
||||||
};
|
|
||||||
|
|
||||||
template <>
|
template <>
|
||||||
struct dim_vec_limits<dim_vec_kind::group> {
|
struct dim_vec_limits<dim_vec_kind::group> {
|
||||||
static constexpr int32_t max_dimentionality = tensor_group_dim_max;
|
static constexpr int32_t max_dimentionality = tensor_group_dim_max;
|
||||||
static constexpr int32_t dim_offset = tensor_batch_dim_max + tensor_feature_dim_max + tensor_spatial_dim_max + tensor_local_dim_max;
|
static constexpr int32_t dim_offset = tensor_batch_dim_max + tensor_feature_dim_max + tensor_spatial_dim_max;
|
||||||
};
|
};
|
||||||
|
|
||||||
/// @brief Template class used in tensor constructor using dim_vec_kinds
|
/// @brief Template class used in tensor constructor using dim_vec_kinds
|
||||||
@ -570,11 +551,6 @@ details::dim_vec_kind_init<details::dim_vec_kind::spatial> spatial(InitTys&&...
|
|||||||
return details::dim_vec_kind_init<details::dim_vec_kind::spatial>(std::forward<InitTys>(inits)...);
|
return details::dim_vec_kind_init<details::dim_vec_kind::spatial>(std::forward<InitTys>(inits)...);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename... InitTys>
|
|
||||||
details::dim_vec_kind_init<details::dim_vec_kind::local> local(InitTys&&... inits) {
|
|
||||||
return details::dim_vec_kind_init<details::dim_vec_kind::local>(std::forward<InitTys>(inits)...);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename... InitTys>
|
template <typename... InitTys>
|
||||||
details::dim_vec_kind_init<details::dim_vec_kind::group> group(InitTys&&... inits) {
|
details::dim_vec_kind_init<details::dim_vec_kind::group> group(InitTys&&... inits) {
|
||||||
return details::dim_vec_kind_init<details::dim_vec_kind::group>(std::forward<InitTys>(inits)...);
|
return details::dim_vec_kind_init<details::dim_vec_kind::group>(std::forward<InitTys>(inits)...);
|
||||||
@ -585,7 +561,6 @@ struct tensor {
|
|||||||
friend class details::dim_vec_kind_init<details::dim_vec_kind::batch>;
|
friend class details::dim_vec_kind_init<details::dim_vec_kind::batch>;
|
||||||
friend class details::dim_vec_kind_init<details::dim_vec_kind::feature>;
|
friend class details::dim_vec_kind_init<details::dim_vec_kind::feature>;
|
||||||
friend class details::dim_vec_kind_init<details::dim_vec_kind::spatial>;
|
friend class details::dim_vec_kind_init<details::dim_vec_kind::spatial>;
|
||||||
friend class details::dim_vec_kind_init<details::dim_vec_kind::local>;
|
|
||||||
friend class details::dim_vec_kind_init<details::dim_vec_kind::group>;
|
friend class details::dim_vec_kind_init<details::dim_vec_kind::group>;
|
||||||
|
|
||||||
typedef int32_t value_type; ///< Values type stored in tensor.
|
typedef int32_t value_type; ///< Values type stored in tensor.
|
||||||
@ -594,7 +569,6 @@ struct tensor {
|
|||||||
mutable_array_ref<value_type> batch; ///< Batch dimensions.
|
mutable_array_ref<value_type> batch; ///< Batch dimensions.
|
||||||
mutable_array_ref<value_type> feature; ///< Feature maps.
|
mutable_array_ref<value_type> feature; ///< Feature maps.
|
||||||
mutable_array_ref<value_type> spatial; ///< Spatial dimensions.
|
mutable_array_ref<value_type> spatial; ///< Spatial dimensions.
|
||||||
mutable_array_ref<value_type> local; ///< Local dimensions.
|
|
||||||
mutable_array_ref<value_type> group; ///< Group dimensions.
|
mutable_array_ref<value_type> group; ///< Group dimensions.
|
||||||
|
|
||||||
private:
|
private:
|
||||||
@ -606,8 +580,7 @@ public:
|
|||||||
batch(_sizes, tensor_batch_dim_max),
|
batch(_sizes, tensor_batch_dim_max),
|
||||||
feature(_sizes + tensor_batch_dim_max, tensor_feature_dim_max),
|
feature(_sizes + tensor_batch_dim_max, tensor_feature_dim_max),
|
||||||
spatial(_sizes + tensor_batch_dim_max + tensor_feature_dim_max, tensor_spatial_dim_max),
|
spatial(_sizes + tensor_batch_dim_max + tensor_feature_dim_max, tensor_spatial_dim_max),
|
||||||
local(_sizes + tensor_batch_dim_max + tensor_feature_dim_max + tensor_spatial_dim_max, tensor_local_dim_max),
|
group(_sizes + tensor_batch_dim_max + tensor_feature_dim_max + tensor_spatial_dim_max, tensor_group_dim_max) {
|
||||||
group(_sizes + tensor_batch_dim_max + tensor_feature_dim_max + tensor_spatial_dim_max + tensor_local_dim_max, tensor_group_dim_max) {
|
|
||||||
std::fill_n(_sizes, tensor_dim_max, default_size);
|
std::fill_n(_sizes, tensor_dim_max, default_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -21,6 +21,7 @@ enum class KernelType {
|
|||||||
NORMALIZE,
|
NORMALIZE,
|
||||||
POOLING,
|
POOLING,
|
||||||
ROI_POOLING,
|
ROI_POOLING,
|
||||||
|
ROI_ALIGN,
|
||||||
FULLY_CONNECTED,
|
FULLY_CONNECTED,
|
||||||
ACTIVATION,
|
ACTIVATION,
|
||||||
SOFT_MAX,
|
SOFT_MAX,
|
||||||
@ -71,7 +72,8 @@ enum class KernelType {
|
|||||||
EXTRACT_IMAGE_PATCHES,
|
EXTRACT_IMAGE_PATCHES,
|
||||||
LOOP,
|
LOOP,
|
||||||
NON_MAX_SUPPRESSION,
|
NON_MAX_SUPPRESSION,
|
||||||
DETECTION_OUTPUT
|
DETECTION_OUTPUT,
|
||||||
|
EXPERIMENTAL_DETECTRON_ROI_FEATURE_EXTRACTOR
|
||||||
};
|
};
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
@ -45,110 +45,110 @@ DataTensor::DataChannelArray DataTensor::dataChannelArray {{
|
|||||||
}};
|
}};
|
||||||
|
|
||||||
WeightsTensor::WeightsChannelArray WeightsTensor::weightsChannelArray {{
|
WeightsTensor::WeightsChannelArray WeightsTensor::weightsChannelArray {{
|
||||||
// X, Y, Z, IFM, OFM, LX, LY, G
|
// X, Y, Z, IFM, OFM, G
|
||||||
{ WeightsLayout::oi, { -1, -1, -1, 0, 1, -1, -1, -1 } },
|
{ WeightsLayout::oi, { -1, -1, -1, 0, 1, -1 } },
|
||||||
{ WeightsLayout::io, { -1, -1, -1, 1, 0, -1, -1, -1 } },
|
{ WeightsLayout::io, { -1, -1, -1, 1, 0, -1 } },
|
||||||
{ WeightsLayout::oiyx, { 0, 1, -1, 2, 3, -1, -1, -1 } },
|
{ WeightsLayout::oiyx, { 0, 1, -1, 2, 3, -1 } },
|
||||||
{ WeightsLayout::ioyx, { 0, 1, -1, 3, 2, -1, -1, -1 } },
|
{ WeightsLayout::ioyx, { 0, 1, -1, 3, 2, -1 } },
|
||||||
{ WeightsLayout::oyxi, { 1, 2, -1, 0, 3, -1, -1, -1 } },
|
{ WeightsLayout::oyxi, { 1, 2, -1, 0, 3, -1 } },
|
||||||
{ WeightsLayout::iyxo, { 1, 2, -1, 3, 0, -1, -1, -1 } },
|
{ WeightsLayout::iyxo, { 1, 2, -1, 3, 0, -1 } },
|
||||||
{ WeightsLayout::yxio, { 2, 3, -1, 1, 0, -1, -1, -1 } },
|
{ WeightsLayout::yxio, { 2, 3, -1, 1, 0, -1 } },
|
||||||
{ WeightsLayout::os_iyx_osv16, { 0, 1, -1, 2, 3, -1, -1, -1 } },
|
{ WeightsLayout::os_iyx_osv16, { 0, 1, -1, 2, 3, -1 } },
|
||||||
{ WeightsLayout::os_iyx_osv32, { 0, 1, -1, 2, 3, -1, -1, -1 } },
|
{ WeightsLayout::os_iyx_osv32, { 0, 1, -1, 2, 3, -1 } },
|
||||||
{ WeightsLayout::os_iyx_osv32__ai32, { 0, 1, -1, 2, 3, -1, -1, -1 } },
|
{ WeightsLayout::os_iyx_osv32__ai32, { 0, 1, -1, 2, 3, -1 } },
|
||||||
{ WeightsLayout::os_iyx_osv64, { 0, 1, -1, 2, 3, -1, -1, -1 } },
|
{ WeightsLayout::os_iyx_osv64, { 0, 1, -1, 2, 3, -1 } },
|
||||||
{ WeightsLayout::os_iyx_osv16_rotate_180, { 0, 1, -1, 2, 3, -1, -1, -1 } },
|
{ WeightsLayout::os_iyx_osv16_rotate_180, { 0, 1, -1, 2, 3, -1 } },
|
||||||
{ WeightsLayout::o_is_yx_isv16, { 0, 1, -1, 2, 3, -1, -1, -1 } },
|
{ WeightsLayout::o_is_yx_isv16, { 0, 1, -1, 2, 3, -1 } },
|
||||||
{ WeightsLayout::os_yxi_osv16, { 1, 2, -1, 0, 3, -1, -1, -1 } },
|
{ WeightsLayout::os_yxi_osv16, { 1, 2, -1, 0, 3, -1 } },
|
||||||
{ WeightsLayout::os_i_osv8__ai8, { -1, -1, -1, 0, 1, -1, -1, -1 } },
|
{ WeightsLayout::os_i_osv8__ai8, { -1, -1, -1, 0, 1, -1 } },
|
||||||
{ WeightsLayout::os_i_osv16__ai8, { -1, -1, -1, 0, 1, -1, -1, -1 } },
|
{ WeightsLayout::os_i_osv16__ai8, { -1, -1, -1, 0, 1, -1 } },
|
||||||
{ WeightsLayout::os_i_osv16, { -1, -1, -1, 0, 1, -1, -1, -1 } },
|
{ WeightsLayout::os_i_osv16, { -1, -1, -1, 0, 1, -1 } },
|
||||||
{ WeightsLayout::os_is_yx_osv16_isv16, { 0, 1, -1, 2, 3, -1, -1, -1 } },
|
{ WeightsLayout::os_is_yx_osv16_isv16, { 0, 1, -1, 2, 3, -1 } },
|
||||||
{ WeightsLayout::os_is_zyx_osv16_isv16, { 0, 1, 2, 3, 4, -1, -1, -1 } },
|
{ WeightsLayout::os_is_zyx_osv16_isv16, { 0, 1, 2, 3, 4, -1 } },
|
||||||
{ WeightsLayout::g_os_is_zyx_osv16_isv16, { 0, 1, 2, 3, 4, -1, -1, 5 } },
|
{ WeightsLayout::g_os_is_zyx_osv16_isv16, { 0, 1, 2, 3, 4, 5 } },
|
||||||
{ WeightsLayout::os_is_zyx_osv32_isv16, { 0, 1, 2, 3, 4, -1, -1, -1 } },
|
{ WeightsLayout::os_is_zyx_osv32_isv16, { 0, 1, 2, 3, 4, -1 } },
|
||||||
{ WeightsLayout::os_is_zyx_osv64_isv16, { 0, 1, 2, 3, 4, -1, -1, -1 } },
|
{ WeightsLayout::os_is_zyx_osv64_isv16, { 0, 1, 2, 3, 4, -1 } },
|
||||||
{ WeightsLayout::i_yxs_os_yxsv2_osv16, { 1, 2, -1, 3, 0, -1, -1, -1 } },
|
{ WeightsLayout::i_yxs_os_yxsv2_osv16, { 1, 2, -1, 3, 0, -1 } },
|
||||||
{ WeightsLayout::iy_xs_os_xsv2_osv16__ao32, { 1, 2, -1, 3, 0, -1, -1, -1 } },
|
{ WeightsLayout::iy_xs_os_xsv2_osv16__ao32, { 1, 2, -1, 3, 0, -1 } },
|
||||||
{ WeightsLayout::iy_xs_os_xsv2_osv8__ao32, { 1, 2, -1, 3, 0, -1, -1, -1 } },
|
{ WeightsLayout::iy_xs_os_xsv2_osv8__ao32, { 1, 2, -1, 3, 0, -1 } },
|
||||||
{ WeightsLayout::image_2d_weights_c4_fyx_b, { 0, 1, -1, 2, 3, -1, -1, -1 } },
|
{ WeightsLayout::image_2d_weights_c4_fyx_b, { 0, 1, -1, 2, 3, -1 } },
|
||||||
{ WeightsLayout::image_2d_weights_c1_b_fyx, { 0, 1, -1, 2, 3, -1, -1, -1 } },
|
{ WeightsLayout::image_2d_weights_c1_b_fyx, { 0, 1, -1, 2, 3, -1 } },
|
||||||
{ WeightsLayout::winograd_2x3_s1_weights, { 3, 2, -1, 1, 0, -1, -1, -1 } },
|
{ WeightsLayout::winograd_2x3_s1_weights, { 3, 2, -1, 1, 0, -1 } },
|
||||||
{ WeightsLayout::winograd_2x3_s1_fused_weights, { 0, 1, -1, 2, 3, -1, -1, -1 } },
|
{ WeightsLayout::winograd_2x3_s1_fused_weights, { 0, 1, -1, 2, 3, -1 } },
|
||||||
{ WeightsLayout::winograd_6x3_s1_fused_weights, { 0, 1, -1, 2, 3, -1, -1, -1 } },
|
{ WeightsLayout::winograd_6x3_s1_fused_weights, { 0, 1, -1, 2, 3, -1 } },
|
||||||
{ WeightsLayout::image_2d_weights_winograd_6x3_s1_fbxyb, { 0, 1, -1, 2, 3, -1, -1, -1 } },
|
{ WeightsLayout::image_2d_weights_winograd_6x3_s1_fbxyb, { 0, 1, -1, 2, 3, -1 } },
|
||||||
{ WeightsLayout::image_2d_weights_winograd_6x3_s1_xfbyb, { 0, 1, -1, 2, 3, -1, -1, -1 } },
|
{ WeightsLayout::image_2d_weights_winograd_6x3_s1_xfbyb, { 0, 1, -1, 2, 3, -1 } },
|
||||||
{ WeightsLayout::dlstm_dir_io, { 1, 0, -1, 2, 3, -1, -1, -1 } },
|
{ WeightsLayout::dlstm_dir_io, { 1, 0, -1, 2, 3, -1 } },
|
||||||
{ WeightsLayout::os_is_yx_isa8_osv8_isv4, { 0, 1, -1, 2, 3, -1, -1, -1 } },
|
{ WeightsLayout::os_is_yx_isa8_osv8_isv4, { 0, 1, -1, 2, 3, -1 } },
|
||||||
{ WeightsLayout::os_is_yx_isa8_osv16_isv4, { 0, 1, -1, 2, 3, -1, -1, -1 } },
|
{ WeightsLayout::os_is_yx_isa8_osv16_isv4, { 0, 1, -1, 2, 3, -1 } },
|
||||||
{ WeightsLayout::os_is_yx_isa8_osv8_isv4_swizzled_by_4, { 0, 1, -1, 2, 3, -1, -1, -1 } },
|
{ WeightsLayout::os_is_yx_isa8_osv8_isv4_swizzled_by_4, { 0, 1, -1, 2, 3, -1 } },
|
||||||
{ WeightsLayout::os_is_yx_osa4_isa8_osv8_isv4, { 0, 1, -1, 2, 3, -1, -1, -1 } },
|
{ WeightsLayout::os_is_yx_osa4_isa8_osv8_isv4, { 0, 1, -1, 2, 3, -1 } },
|
||||||
{ WeightsLayout::g_os_is_yx_osa4_isa8_osv8_isv4, { 0, 1, -1, 2, 3, -1, -1, 4 } },
|
{ WeightsLayout::g_os_is_yx_osa4_isa8_osv8_isv4, { 0, 1, -1, 2, 3, 4 } },
|
||||||
{ WeightsLayout::g_os_is_zyx_osa4_isa8_osv8_isv4, { 0, 1, 2, 3, 4, -1, -1, 5 } },
|
{ WeightsLayout::g_os_is_zyx_osa4_isa8_osv8_isv4, { 0, 1, 2, 3, 4, 5 } },
|
||||||
{ WeightsLayout::os_is_yx_osa4_isa8_osv8_isv2, { 0, 1, -1, 2, 3, -1, -1, -1 } },
|
{ WeightsLayout::os_is_yx_osa4_isa8_osv8_isv2, { 0, 1, -1, 2, 3, -1 } },
|
||||||
{ WeightsLayout::os_is_zyx_osa4_isa8_osv8_isv2, { 0, 1, 2, 3, 4, -1, -1, -1 } },
|
{ WeightsLayout::os_is_zyx_osa4_isa8_osv8_isv2, { 0, 1, 2, 3, 4, -1 } },
|
||||||
{ WeightsLayout::os_is_zyx_osa4_isa8_osv8_isv4, { 0, 1, 2, 3, 4, -1, -1, -1 } },
|
{ WeightsLayout::os_is_zyx_osa4_isa8_osv8_isv4, { 0, 1, 2, 3, 4, -1 } },
|
||||||
{ WeightsLayout::g_os_is_yx_osa4_isa8_osv8_isv2, { 0, 1, -1, 2, 3, -1, -1, 4 } },
|
{ WeightsLayout::g_os_is_yx_osa4_isa8_osv8_isv2, { 0, 1, -1, 2, 3, 4 } },
|
||||||
{ WeightsLayout::g_os_is_zyx_osa4_isa8_osv8_isv2, { 0, 1, 2, 3, 4, -1, -1, 5 } },
|
{ WeightsLayout::g_os_is_zyx_osa4_isa8_osv8_isv2, { 0, 1, 2, 3, 4, 5 } },
|
||||||
{ WeightsLayout::os_is_yx_osa2_isa8_osv16_isv4, { 0, 1, -1, 2, 3, -1, -1, -1 } },
|
{ WeightsLayout::os_is_yx_osa2_isa8_osv16_isv4, { 0, 1, -1, 2, 3, -1 } },
|
||||||
{ WeightsLayout::g_os_is_yx_osa2_isa8_osv16_isv4, { 0, 1, -1, 2, 3, -1, -1, 4 } },
|
{ WeightsLayout::g_os_is_yx_osa2_isa8_osv16_isv4, { 0, 1, -1, 2, 3, 4 } },
|
||||||
{ WeightsLayout::os_is_yx_osa2_isa8_osv16_isv2, { 0, 1, -1, 2, 3, -1, -1, -1 } },
|
{ WeightsLayout::os_is_yx_osa2_isa8_osv16_isv2, { 0, 1, -1, 2, 3, -1 } },
|
||||||
{ WeightsLayout::g_os_is_yx_osa2_isa8_osv16_isv2, { 0, 1, -1, 2, 3, -1, -1, 4 } },
|
{ WeightsLayout::g_os_is_yx_osa2_isa8_osv16_isv2, { 0, 1, -1, 2, 3, 4 } },
|
||||||
{ WeightsLayout::os_is_zyx_isa8_osv8_isv4, { 0, 1, 2, 3, 4, -1, -1, -1 } },
|
{ WeightsLayout::os_is_zyx_isa8_osv8_isv4, { 0, 1, 2, 3, 4, -1 } },
|
||||||
{ WeightsLayout::os_is_zyx_isa8_osv16_isv4, { 0, 1, 2, 3, 4, -1, -1, -1 } },
|
{ WeightsLayout::os_is_zyx_isa8_osv16_isv4, { 0, 1, 2, 3, 4, -1 } },
|
||||||
{ WeightsLayout::os_is_yx_osa4_isa8_osv8_isv4_swizzled_by_4, { 0, 1, -1, 2, 3, -1, -1, -1 } },
|
{ WeightsLayout::os_is_yx_osa4_isa8_osv8_isv4_swizzled_by_4, { 0, 1, -1, 2, 3, -1 } },
|
||||||
{ WeightsLayout::os_is_zyx_osa4_isa8_osv8_isv4_swizzled_by_4, { 0, 1, 2, 3, 4, -1, -1, -1 } },
|
{ WeightsLayout::os_is_zyx_osa4_isa8_osv8_isv4_swizzled_by_4, { 0, 1, 2, 3, 4, -1 } },
|
||||||
{ WeightsLayout::is_o_yx_isv32, { 1, 2, -1, 0, 3, -1, -1, -1 } },
|
{ WeightsLayout::is_o_yx_isv32, { 1, 2, -1, 0, 3, -1 } },
|
||||||
{ WeightsLayout::is_o32_yx_isv32_swizzled_by_4, { 1, 2, -1, 0, 3, -1, -1, -1 } },
|
{ WeightsLayout::is_o32_yx_isv32_swizzled_by_4, { 1, 2, -1, 0, 3, -1 } },
|
||||||
{ WeightsLayout::os_is_y_x8_osv8_isv4, { 0, 1, -1, 2, 3, -1, -1, -1 } },
|
{ WeightsLayout::os_is_y_x8_osv8_isv4, { 0, 1, -1, 2, 3, -1 } },
|
||||||
{ WeightsLayout::os_is_y_x8_osv8_isv4_swizzled_by_4, { 0, 1, -1, 2, 3, -1, -1, -1 } },
|
{ WeightsLayout::os_is_y_x8_osv8_isv4_swizzled_by_4, { 0, 1, -1, 2, 3, -1 } },
|
||||||
{ WeightsLayout::os_is_yx_osv8_isv4, { 0, 1, -1, 2, 3, -1, -1, -1 } },
|
{ WeightsLayout::os_is_yx_osv8_isv4, { 0, 1, -1, 2, 3, -1 } },
|
||||||
{ WeightsLayout::os_is_yx_osv8_isv2, { 0, 1, -1, 2, 3, -1, -1, -1 } },
|
{ WeightsLayout::os_is_yx_osv8_isv2, { 0, 1, -1, 2, 3, -1 } },
|
||||||
{ WeightsLayout::os_is_yx_osv16_isv4, { 0, 1, -1, 2, 3, -1, -1, -1 } },
|
{ WeightsLayout::os_is_yx_osv16_isv4, { 0, 1, -1, 2, 3, -1 } },
|
||||||
{ WeightsLayout::os_is_yx_osv32_isv4_swizzled_by_2, { 0, 1, -1, 2, 3, -1, -1, -1 } },
|
{ WeightsLayout::os_is_yx_osv32_isv4_swizzled_by_2, { 0, 1, -1, 2, 3, -1 } },
|
||||||
{ WeightsLayout::os_is_yx_osv32_isv4, { 0, 1, -1, 2, 3, -1, -1, -1 } },
|
{ WeightsLayout::os_is_yx_osv32_isv4, { 0, 1, -1, 2, 3, -1 } },
|
||||||
{ WeightsLayout::os_is_zyx_osv32_isv4, { 0, 1, 2, 3, 4, -1, -1, -1 } },
|
{ WeightsLayout::os_is_zyx_osv32_isv4, { 0, 1, 2, 3, 4, -1 } },
|
||||||
{ WeightsLayout::oizyx, { 0, 1, 2, 3, 4, -1, -1, -1 } },
|
{ WeightsLayout::oizyx, { 0, 1, 2, 3, 4, -1 } },
|
||||||
{ WeightsLayout::iozyx, { 0, 1, 2, 4, 3, -1, -1, -1 } },
|
{ WeightsLayout::iozyx, { 0, 1, 2, 4, 3, -1 } },
|
||||||
{ WeightsLayout::os_is_yx_osv32_isv32p, { 0, 1, -1, 2, 3, -1, -1, -1 } },
|
{ WeightsLayout::os_is_yx_osv32_isv32p, { 0, 1, -1, 2, 3, -1 } },
|
||||||
{ WeightsLayout::os_is_zyx_isv16_osv16, { 0, 1, 2, 3, 4, -1, -1, -1 } },
|
{ WeightsLayout::os_is_zyx_isv16_osv16, { 0, 1, 2, 3, 4, -1 } },
|
||||||
{ WeightsLayout::os_is_yx_isv16_osv16, { 0, 1, -1, 2, 3, -1, -1, -1 } },
|
{ WeightsLayout::os_is_yx_isv16_osv16, { 0, 1, -1, 2, 3, -1 } },
|
||||||
{ WeightsLayout::is_os_zyx_isv16_osv16, { 0, 1, 2, 4, 3, -1, -1, -1 } },
|
{ WeightsLayout::is_os_zyx_isv16_osv16, { 0, 1, 2, 4, 3, -1 } },
|
||||||
{ WeightsLayout::is_os_yx_isv16_osv16, { 0, 1, -1, 3, 2, -1, -1, -1 } },
|
{ WeightsLayout::is_os_yx_isv16_osv16, { 0, 1, -1, 3, 2, -1 } },
|
||||||
{ WeightsLayout::os_is_osv32_isv32_swizzled_by_4, { -1, -1, -1, 0, 1, -1, -1, -1 } },
|
{ WeightsLayout::os_is_osv32_isv32_swizzled_by_4, { -1, -1, -1, 0, 1, -1 } },
|
||||||
{ WeightsLayout::os_is_zyx_isv8_osv16_isv2, { 0, 1, 2, 3, 4, -1, -1, -1 } },
|
{ WeightsLayout::os_is_zyx_isv8_osv16_isv2, { 0, 1, 2, 3, 4, -1 } },
|
||||||
{ WeightsLayout::os_is_yx_isv8_osv16_isv2, { 0, 1, -1, 2, 3, -1, -1, -1 } },
|
{ WeightsLayout::os_is_yx_isv8_osv16_isv2, { 0, 1, -1, 2, 3, -1 } },
|
||||||
{ WeightsLayout::os_zyxi_osv16, { 1, 2, 3, 0, 4, -1, -1, -1 } },
|
{ WeightsLayout::os_zyxi_osv16, { 1, 2, 3, 0, 4, -1 } },
|
||||||
{ WeightsLayout::os_i_yxs_osv4_yxsv4, { 0, 1, -1, 2, 3, -1, -1, -1 } },
|
{ WeightsLayout::os_i_yxs_osv4_yxsv4, { 0, 1, -1, 2, 3, -1 } },
|
||||||
{ WeightsLayout::goiyx, { 0, 1, -1, 2, 3, -1, -1, 4 } },
|
{ WeightsLayout::goiyx, { 0, 1, -1, 2, 3, 4 } },
|
||||||
{ WeightsLayout::gioyx, { 0, 1, -1, 3, 2, -1, -1, 4 } },
|
{ WeightsLayout::gioyx, { 0, 1, -1, 3, 2, 4 } },
|
||||||
{ WeightsLayout::goizyx, { 0, 1, 2, 3, 4, -1, -1, 5 } },
|
{ WeightsLayout::goizyx, { 0, 1, 2, 3, 4, 5 } },
|
||||||
{ WeightsLayout::giozyx, { 0, 1, 2, 4, 3, -1, -1, 5 } },
|
{ WeightsLayout::giozyx, { 0, 1, 2, 4, 3, 5 } },
|
||||||
{ WeightsLayout::g_os_iyx_osv16, { 0, 1, -1, 2, 3, -1, -1, 4 } },
|
{ WeightsLayout::g_os_iyx_osv16, { 0, 1, -1, 2, 3, 4 } },
|
||||||
{ WeightsLayout::g_os_iyx_osv32, { 0, 1, -1, 2, 3, -1, -1, 4 } },
|
{ WeightsLayout::g_os_iyx_osv32, { 0, 1, -1, 2, 3, 4 } },
|
||||||
{ WeightsLayout::gs_oiyx_gsv16, { 0, 1, -1, 2, 3, -1, -1, 4 } },
|
{ WeightsLayout::gs_oiyx_gsv16, { 0, 1, -1, 2, 3, 4 } },
|
||||||
{ WeightsLayout::gs_oizyx_gsv16, { 0, 1, 2, 3, 4, -1, -1, 5 } },
|
{ WeightsLayout::gs_oizyx_gsv16, { 0, 1, 2, 3, 4, 5 } },
|
||||||
{ WeightsLayout::gs_oiyx_gsv32, { 0, 1, -1, 2, 3, -1, -1, 4 } },
|
{ WeightsLayout::gs_oiyx_gsv32, { 0, 1, -1, 2, 3, 4 } },
|
||||||
{ WeightsLayout::gyxio, { 2, 3, -1, 1, 0, -1, -1, 4 } },
|
{ WeightsLayout::gyxio, { 2, 3, -1, 1, 0, 4 } },
|
||||||
{ WeightsLayout::gi_yxs_os_yxsv2_osv16, { 1, 2, -1, 3, 0, -1, -1, 4 } },
|
{ WeightsLayout::gi_yxs_os_yxsv2_osv16, { 1, 2, -1, 3, 0, 4 } },
|
||||||
{ WeightsLayout::g_is_os_zyx_isv16_osv16, { 0, 1, 2, 4, 3, -1, -1, 5 } },
|
{ WeightsLayout::g_is_os_zyx_isv16_osv16, { 0, 1, 2, 4, 3, 5 } },
|
||||||
{ WeightsLayout::g_is_os_yx_isv16_osv16, { 0, 1, -1, 3, 2, -1, -1, 4 } },
|
{ WeightsLayout::g_is_os_yx_isv16_osv16, { 0, 1, -1, 3, 2, 4 } },
|
||||||
{ WeightsLayout::g_os_is_zyx_isv8_osv16_isv2, { 0, 1, 2, 3, 4, -1, -1, 5 } },
|
{ WeightsLayout::g_os_is_zyx_isv8_osv16_isv2, { 0, 1, 2, 3, 4, 5 } },
|
||||||
{ WeightsLayout::g_os_is_yx_isv8_osv16_isv2, { 0, 1, -1, 2, 3, -1, -1, 4 } },
|
{ WeightsLayout::g_os_is_yx_isv8_osv16_isv2, { 0, 1, -1, 2, 3, 4 } },
|
||||||
{ WeightsLayout::g_os_is_zyx_isv16_osv16, { 0, 1, 2, 3, 4, -1, -1, 5 } },
|
{ WeightsLayout::g_os_is_zyx_isv16_osv16, { 0, 1, 2, 3, 4, 5 } },
|
||||||
{ WeightsLayout::giy_xs_os_xsv2_osv16__ao32, { 1, 2, -1, 3, 0, -1, -1, 4 } },
|
{ WeightsLayout::giy_xs_os_xsv2_osv16__ao32, { 1, 2, -1, 3, 0, 4 } },
|
||||||
{ WeightsLayout::giy_xs_os_xsv2_osv8__ao32, { 1, 2, -1, 3, 0, -1, -1, 4 } },
|
{ WeightsLayout::giy_xs_os_xsv2_osv8__ao32, { 1, 2, -1, 3, 0, 4 } },
|
||||||
{ WeightsLayout::g_os_is_yx_isv16_osv16, { 0, 1, -1, 2, 3, -1, -1, 4 } },
|
{ WeightsLayout::g_os_is_yx_isv16_osv16, { 0, 1, -1, 2, 3, 4 } },
|
||||||
{ WeightsLayout::gs_oi_yxs_gsv4_yxsv4, { 0, 1, -1, 2, 3, -1, -1, 4 } },
|
{ WeightsLayout::gs_oi_yxs_gsv4_yxsv4, { 0, 1, -1, 2, 3, 4 } },
|
||||||
{ WeightsLayout::gs_oi_yxs_gsv16_yxsv4, { 0, 1, -1, 2, 3, -1, -1, 4 } },
|
{ WeightsLayout::gs_oi_yxs_gsv16_yxsv4, { 0, 1, -1, 2, 3, 4 } },
|
||||||
{ WeightsLayout::gs_oi_yxs_gsv32_yxsv4, { 0, 1, -1, 2, 3, -1, -1, 4 } },
|
{ WeightsLayout::gs_oi_yxs_gsv32_yxsv4, { 0, 1, -1, 2, 3, 4 } },
|
||||||
{ WeightsLayout::g_os_is_yx_osv16_isv4, { 0, 1, -1, 2, 3, -1, -1, 4 } },
|
{ WeightsLayout::g_os_is_yx_osv16_isv4, { 0, 1, -1, 2, 3, 4 } },
|
||||||
{ WeightsLayout::g_os_zyx_is_osv16_isv4, { 1, 2, 3, 0, 4, -1, -1, 5 } },
|
{ WeightsLayout::g_os_zyx_is_osv16_isv4, { 1, 2, 3, 0, 4, 5 } },
|
||||||
{ WeightsLayout::g_os_zyx_is_osv16_isv16, { 1, 2, 3, 0, 4, -1, -1, 5 } },
|
{ WeightsLayout::g_os_zyx_is_osv16_isv16, { 1, 2, 3, 0, 4, 5 } },
|
||||||
{ WeightsLayout::g_os_zyx_is_osv16_isv32, { 1, 2, 3, 0, 4, -1, -1, 5 } },
|
{ WeightsLayout::g_os_zyx_is_osv16_isv32, { 1, 2, 3, 0, 4, 5 } },
|
||||||
{ WeightsLayout::g_os_zyx_is_osv32_isv4, { 1, 2, 3, 0, 4, -1, -1, 5 } },
|
{ WeightsLayout::g_os_zyx_is_osv32_isv4, { 1, 2, 3, 0, 4, 5 } },
|
||||||
{ WeightsLayout::g_os_zyx_is_osv32_isv16, { 1, 2, 3, 0, 4, -1, -1, 5 } },
|
{ WeightsLayout::g_os_zyx_is_osv32_isv16, { 1, 2, 3, 0, 4, 5 } },
|
||||||
{ WeightsLayout::g_os_zyx_is_osv32_isv32, { 1, 2, 3, 0, 4, -1, -1, 5 } },
|
{ WeightsLayout::g_os_zyx_is_osv32_isv32, { 1, 2, 3, 0, 4, 5 } },
|
||||||
}};
|
}};
|
||||||
|
|
||||||
NDims DataTensor::GetSimpleDims(const std::vector<size_t>& d, DataLayout l) {
|
NDims DataTensor::GetSimpleDims(const std::vector<size_t>& d, DataLayout l) {
|
||||||
@ -859,13 +859,6 @@ WeightsTensor WeightsTensor::TransformIgnorePadding(WeightsLayout l, WeightsType
|
|||||||
vec[Channelndex(l, WeightsChannelName::IFM)] = IFM().v;
|
vec[Channelndex(l, WeightsChannelName::IFM)] = IFM().v;
|
||||||
vec[Channelndex(l, WeightsChannelName::OFM)] = OFM().v;
|
vec[Channelndex(l, WeightsChannelName::OFM)] = OFM().v;
|
||||||
vec[Channelndex(l, WeightsChannelName::Z)] = Z().v;
|
vec[Channelndex(l, WeightsChannelName::Z)] = Z().v;
|
||||||
} else if (src_channels == 6 && dst_channels == 6) {
|
|
||||||
vec[Channelndex(l, WeightsChannelName::X)] = X().v;
|
|
||||||
vec[Channelndex(l, WeightsChannelName::Y)] = Y().v;
|
|
||||||
vec[Channelndex(l, WeightsChannelName::IFM)] = IFM().v;
|
|
||||||
vec[Channelndex(l, WeightsChannelName::OFM)] = OFM().v;
|
|
||||||
vec[Channelndex(l, WeightsChannelName::LX)] = LX().v;
|
|
||||||
vec[Channelndex(l, WeightsChannelName::LY)] = LY().v;
|
|
||||||
} else if (src_channels == 4 && dst_channels == 5) {
|
} else if (src_channels == 4 && dst_channels == 5) {
|
||||||
vec[Channelndex(l, WeightsChannelName::X)] = X().v;
|
vec[Channelndex(l, WeightsChannelName::X)] = X().v;
|
||||||
vec[Channelndex(l, WeightsChannelName::Y)] = Y().v;
|
vec[Channelndex(l, WeightsChannelName::Y)] = Y().v;
|
||||||
|
@ -203,7 +203,7 @@ using NDims = std::vector<Dim>;
|
|||||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
enum class DataChannelName { X = 0, Y = 1, Z = 2, W = 3, FEATURE = 4, BATCH = 5, COUNT = 6 };
|
enum class DataChannelName { X = 0, Y = 1, Z = 2, W = 3, FEATURE = 4, BATCH = 5, COUNT = 6 };
|
||||||
|
|
||||||
enum class WeightsChannelName { X = 0, Y = 1, Z = 2, IFM = 3, OFM = 4, LX = 5, LY = 6, G = 7, COUNT = 8 };
|
enum class WeightsChannelName { X = 0, Y = 1, Z = 2, IFM = 3, OFM = 4, G = 5, COUNT = 6 };
|
||||||
|
|
||||||
inline bool SimpleLayout(WeightsLayout l) {
|
inline bool SimpleLayout(WeightsLayout l) {
|
||||||
switch (l) {
|
switch (l) {
|
||||||
@ -568,8 +568,6 @@ struct WeightsTensor : TensorBaseT<WeightsType, WeightsLayout> {
|
|||||||
Dim Z() const { return Extract(layout, WeightsChannelName::Z, dims); }
|
Dim Z() const { return Extract(layout, WeightsChannelName::Z, dims); }
|
||||||
Dim IFM() const { return Extract(layout, WeightsChannelName::IFM, dims); }
|
Dim IFM() const { return Extract(layout, WeightsChannelName::IFM, dims); }
|
||||||
Dim OFM() const { return Extract(layout, WeightsChannelName::OFM, dims); }
|
Dim OFM() const { return Extract(layout, WeightsChannelName::OFM, dims); }
|
||||||
Dim LX() const { return Extract(layout, WeightsChannelName::LX, dims); }
|
|
||||||
Dim LY() const { return Extract(layout, WeightsChannelName::LY, dims); }
|
|
||||||
Dim G() const { return Extract(layout, WeightsChannelName::G, dims); }
|
Dim G() const { return Extract(layout, WeightsChannelName::G, dims); }
|
||||||
|
|
||||||
static inline Dim Extract(WeightsLayout l, WeightsChannelName channel, const NDims& d) {
|
static inline Dim Extract(WeightsLayout l, WeightsChannelName channel, const NDims& d) {
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user