[IE][VPU]: Bidirectional mode for broadcast on vpu (#2627)
* Support for bidirectional broadcast mode
This commit is contained in:
parent
77365bcb4c
commit
0040d47b00
@ -19,7 +19,7 @@ set(VPU_SUPPORTED_FIRMWARES usb-ma2450 usb-ma2x8x pcie-ma248x)
|
||||
# Default packages
|
||||
#
|
||||
|
||||
set(FIRMWARE_PACKAGE_VERSION 1426)
|
||||
set(FIRMWARE_PACKAGE_VERSION 1430)
|
||||
set(VPU_CLC_MA2X8X_VERSION "movi-cltools-20.09.1")
|
||||
|
||||
#
|
||||
|
@ -8,13 +8,14 @@
|
||||
#include "ngraph/op/op.hpp"
|
||||
#include "ngraph/op/util/broadcast_base.hpp"
|
||||
#include "ngraph/op/util/attr_types.hpp"
|
||||
#include "ngraph/op/broadcast.hpp"
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
namespace ngraph { namespace vpu { namespace op {
|
||||
|
||||
class StaticShapeBroadcast : public ::ngraph::op::util::BroadcastBase {
|
||||
class StaticShapeBroadcast : public ::ngraph::op::v3::Broadcast {
|
||||
public:
|
||||
static constexpr NodeTypeInfo type_info{"StaticShapeBroadcast", 0};
|
||||
|
||||
|
@ -18,7 +18,7 @@ StaticShapeBroadcast::StaticShapeBroadcast(const Output<Node>& arg,
|
||||
const Output<Node>& targetShape,
|
||||
const Output<Node>& axesMapping,
|
||||
const ngraph::op::BroadcastModeSpec& broadcastSpec)
|
||||
: ::ngraph::op::util::BroadcastBase{arg, targetShape, axesMapping, broadcastSpec},
|
||||
: ::ngraph::op::v3::Broadcast{arg, targetShape, axesMapping, broadcastSpec},
|
||||
m_evaluatedOutputShape{PartialShape::dynamic()} {
|
||||
constructor_validate_and_infer_types();
|
||||
}
|
||||
@ -26,7 +26,7 @@ StaticShapeBroadcast::StaticShapeBroadcast(const Output<Node>& arg,
|
||||
StaticShapeBroadcast::StaticShapeBroadcast(const Output<Node>& arg,
|
||||
const Output<Node>& targetShape,
|
||||
const ngraph::op::BroadcastModeSpec& broadcastSpec)
|
||||
: ::ngraph::op::util::BroadcastBase{arg, targetShape, broadcastSpec},
|
||||
: ::ngraph::op::v3::Broadcast{arg, targetShape, broadcastSpec},
|
||||
m_evaluatedOutputShape{PartialShape::dynamic()} {
|
||||
constructor_validate_and_infer_types();
|
||||
}
|
||||
@ -37,10 +37,10 @@ void StaticShapeBroadcast::validate_and_infer_types() {
|
||||
"StaticShapeBroadcast (", get_friendly_name(), ") ",
|
||||
"with explicit mode must have 3 inputs, provided: ",
|
||||
get_input_size());
|
||||
} else if (m_mode.m_type == ngraph::op::BroadcastType::NUMPY) {
|
||||
} else if (m_mode.m_type == ngraph::op::BroadcastType::NUMPY || m_mode.m_type == ngraph::op::BroadcastType::BIDIRECTIONAL) {
|
||||
NODE_VALIDATION_CHECK(this, get_input_size() == 2,
|
||||
"StaticShapeBroadcast (", get_friendly_name(), ") ",
|
||||
"with numpy mode must have 2 inputs, provided: ",
|
||||
"with ", m_mode.m_type, " mode must have 2 inputs, provided: ",
|
||||
get_input_size());
|
||||
} else {
|
||||
NODE_VALIDATION_CHECK(this, false,
|
||||
@ -49,7 +49,7 @@ void StaticShapeBroadcast::validate_and_infer_types() {
|
||||
}
|
||||
|
||||
if (get_output_partial_shape(0).is_dynamic()) {
|
||||
::ngraph::op::util::BroadcastBase::validate_and_infer_types();
|
||||
::ngraph::op::v3::Broadcast::validate_and_infer_types();
|
||||
// Try to evaluate output shape. After some transformations further, we may not be able
|
||||
// to evaluate the target shape again, then we will leave the evaluated shape unchanged.
|
||||
// For example, EliminateShapeOfAfterDSR remove ShapeOf and pass the second input of DSR.
|
||||
@ -58,7 +58,25 @@ void StaticShapeBroadcast::validate_and_infer_types() {
|
||||
|
||||
const auto evaluatedTargetShape = ngraph::PartialShape(evaluatedDimensionValues);
|
||||
if (evaluatedTargetShape.is_static()) {
|
||||
m_evaluatedOutputShape = evaluatedTargetShape;
|
||||
if (m_mode.m_type == ngraph::op::BroadcastType::BIDIRECTIONAL) {
|
||||
auto targetShape = evaluatedTargetShape.get_shape();
|
||||
auto inputShape = get_input_partial_shape(0).get_shape();
|
||||
|
||||
auto& lowRankShape = targetShape.size() < inputShape.size() ? targetShape : inputShape;
|
||||
auto& highRankShape = lowRankShape == targetShape ? inputShape : targetShape;
|
||||
|
||||
while (lowRankShape.size() < highRankShape.size()) {
|
||||
lowRankShape.insert(lowRankShape.begin(), 1);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < targetShape.size(); i++) {
|
||||
targetShape[i] = std::max(targetShape[i], inputShape[i]);
|
||||
}
|
||||
|
||||
m_evaluatedOutputShape = targetShape;
|
||||
} else {
|
||||
m_evaluatedOutputShape = evaluatedTargetShape;
|
||||
}
|
||||
}
|
||||
NODE_VALIDATION_CHECK(this, m_evaluatedOutputShape.is_static(),
|
||||
"StaticShapeBroadcast (", get_friendly_name(), ") ",
|
||||
@ -87,6 +105,12 @@ bool StaticShapeBroadcast::visit_attributes(ngraph::AttributeVisitor& visitor) {
|
||||
mode = "explicit";
|
||||
} else if (m_mode.m_type == ngraph::op::BroadcastType::NUMPY) {
|
||||
mode = "numpy";
|
||||
} else if (m_mode.m_type == ngraph::op::BroadcastType::BIDIRECTIONAL) {
|
||||
mode = "bidirectional";
|
||||
} else {
|
||||
NODE_VALIDATION_CHECK(this, false,
|
||||
"StaticShapeBroadcast (", get_friendly_name(), ") ",
|
||||
"has ", m_mode.m_type, " mode which isn't supported");
|
||||
}
|
||||
visitor.on_attribute("mode", mode);
|
||||
|
||||
@ -94,7 +118,7 @@ bool StaticShapeBroadcast::visit_attributes(ngraph::AttributeVisitor& visitor) {
|
||||
}
|
||||
|
||||
bool StaticShapeBroadcast::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
|
||||
return ::ngraph::op::util::BroadcastBase::evaluate(outputs, inputs);
|
||||
return ::ngraph::op::v3::Broadcast::evaluate(outputs, inputs);
|
||||
}
|
||||
|
||||
} // namespace op
|
||||
|
@ -261,7 +261,8 @@ VPU_DECLARE_ENUM(ConcatInferRequirement,
|
||||
// Modes for Broadcast operation according to specification
|
||||
VPU_DECLARE_ENUM(BroadcastMode,
|
||||
NUMPY = 0,
|
||||
EXPLICIT = 1)
|
||||
EXPLICIT = 1,
|
||||
BIDIRECTIONAL = 2)
|
||||
|
||||
//
|
||||
// StageDataInfo
|
||||
|
@ -60,27 +60,26 @@ protected:
|
||||
VPU_THROW_UNLESS(numOutputs() == 1,
|
||||
"{} stage with name {} must have only 1 output, actually provided {} outputs",
|
||||
type(), name(), numOutputs());
|
||||
if (mode == BroadcastMode::NUMPY) {
|
||||
VPU_THROW_UNLESS(numInputs() == 2,
|
||||
"{} stage with name {} and numpy mode must have 2 inputs, actually "
|
||||
"provided {} inputs", type(), name(), numInputs());
|
||||
assertInputsOutputsTypes(this,
|
||||
{{dataPrecision}, {DataType::S32}},
|
||||
{{dataPrecision}});
|
||||
|
||||
} else {
|
||||
if (mode == BroadcastMode::EXPLICIT) {
|
||||
VPU_THROW_UNLESS(numInputs() == 3,
|
||||
"{} stage with name {} and explicit mode must have 3 inputs, actually "
|
||||
"provided {} inputs", type(), name(), numInputs());
|
||||
assertInputsOutputsTypes(this,
|
||||
{{dataPrecision}, {DataType::S32}, {DataType::S32}},
|
||||
{{dataPrecision}});
|
||||
} else {
|
||||
VPU_THROW_UNLESS(numInputs() == 2,
|
||||
"{} stage with name {} and numpy or bidirectional mode must have 2 inputs, actually "
|
||||
"provided {} inputs", type(), name(), numInputs());
|
||||
assertInputsOutputsTypes(this,
|
||||
{{dataPrecision}, {DataType::S32}},
|
||||
{{dataPrecision}});
|
||||
}
|
||||
}
|
||||
|
||||
void serializeParamsImpl(BlobSerializer& serializer) const override {
|
||||
const auto mode = attrs().getOrDefault<BroadcastMode>("mode", BroadcastMode::NUMPY);
|
||||
serializer.append(static_cast<uint32_t>(mode == BroadcastMode::NUMPY ? 0 : 1));
|
||||
serializer.append(mode);
|
||||
}
|
||||
|
||||
void serializeDataImpl(BlobSerializer& serializer) const override {
|
||||
@ -104,18 +103,26 @@ void FrontEnd::parseBroadcast(
|
||||
const DataVector& outputs) const {
|
||||
VPU_THROW_UNLESS(layer != nullptr,
|
||||
"parseBroadcast expects valid CNNLayerPtr, got nullptr");
|
||||
|
||||
VPU_THROW_UNLESS(outputs.size() == 1,
|
||||
"{} layer with name {} must have only 1 output, actually provided {} outputs",
|
||||
layer->type, layer->name, outputs.size());
|
||||
const auto output = outputs[0];
|
||||
|
||||
const auto modeString = layer->GetParamAsString("mode", "numpy");
|
||||
if (modeString == "numpy") {
|
||||
const std::map<std::string, BroadcastMode> modeFromString = {
|
||||
{"numpy", BroadcastMode::NUMPY},
|
||||
{"explicit", BroadcastMode::EXPLICIT},
|
||||
{"bidirectional", BroadcastMode::BIDIRECTIONAL}
|
||||
};
|
||||
const auto& modeFind = modeFromString.find(modeString);
|
||||
VPU_THROW_UNLESS(modeFind != modeFromString.end(),
|
||||
"{} layer with name {}: Graph Transformer doesn't support {} mode",
|
||||
layer->type, layer->name, modeString);
|
||||
const auto mode = modeFind->second;
|
||||
if (mode == BroadcastMode::NUMPY || mode == BroadcastMode::BIDIRECTIONAL) {
|
||||
VPU_THROW_UNLESS(inputs.size() == 2,
|
||||
"{} layer with name {} and numpy mode must have 2 inputs, actually "
|
||||
"provided {} inputs", layer->type, layer->name, inputs.size());
|
||||
} else if (modeString == "explicit") {
|
||||
"{} layer with name {} and {} mode must have 2 inputs, actually "
|
||||
"provided {} inputs", layer->type, layer->name, modeString, inputs.size());
|
||||
} else if (mode == BroadcastMode::EXPLICIT) {
|
||||
VPU_THROW_UNLESS(inputs.size() == 3,
|
||||
"{} layer with name {} and explicit mode must have 3 inputs, actually "
|
||||
"provided {} inputs", layer->type, layer->name, inputs.size());
|
||||
@ -143,13 +150,11 @@ void FrontEnd::parseBroadcast(
|
||||
"{} layer with name {} and explicit mode must have 1D target shape tensor, "
|
||||
"actually provided {}D tensor",
|
||||
layer->type, layer->name, shapeDesc.numDims());
|
||||
VPU_THROW_UNLESS(shapeDim == output->desc().numDims(),
|
||||
VPU_THROW_UNLESS(shapeDim == output->desc().numDims() || mode != BroadcastMode::EXPLICIT,
|
||||
"{} layer with name {} and explicit mode must have target shape tensor with "
|
||||
"size equals to number of output dims, expected [{}], provided [{}]",
|
||||
layer->type, layer->name, output->desc().numDims(), shapeDim);
|
||||
|
||||
const auto mode = modeString == "numpy" ? BroadcastMode::NUMPY : BroadcastMode::EXPLICIT;
|
||||
|
||||
auto stage = model->addNewStage<BroadcastStage>(
|
||||
layer->name,
|
||||
StageType::Broadcast,
|
||||
|
@ -30,8 +30,14 @@ struct BroadcastExplicitShapes {
|
||||
AxesMapping axesMapping;
|
||||
};
|
||||
|
||||
struct BroadcastBidirectionalShapes {
|
||||
TensorShape srcShape;
|
||||
TensorShape targetShape;
|
||||
TensorShape outputShape;
|
||||
};
|
||||
using BroadcastNumpyTestParams = std::tuple<TensorType, BroadcastNumpyShapes>;
|
||||
using BroadcastExplicitTestParams = std::tuple<TensorType, BroadcastExplicitShapes>;
|
||||
using BroadcastBidirectionalTestParams = std::tuple<TensorType, BroadcastBidirectionalShapes>;
|
||||
|
||||
class StaticShapeBroadcastNumpyTests
|
||||
: public CommonTestUtils::TestsCommon,
|
||||
@ -73,6 +79,27 @@ protected:
|
||||
std::shared_ptr<ngraph::opset3::Constant> m_axesMapping;
|
||||
};
|
||||
|
||||
class StaticShapeBroadcastBidirectionalTests
|
||||
: public CommonTestUtils::TestsCommon,
|
||||
public testing::WithParamInterface<BroadcastBidirectionalTestParams> {
|
||||
public:
|
||||
void SetUp() override {
|
||||
const auto& parameters = GetParam();
|
||||
const auto& tensorType = std::get<0>(parameters);
|
||||
const auto& tensorShape = std::get<1>(parameters).srcShape;
|
||||
const auto& targetShape = std::get<1>(parameters).targetShape;
|
||||
const auto& outputShape = std::get<1>(parameters).outputShape;
|
||||
|
||||
m_tensor = std::make_shared<ngraph::opset3::Parameter>(tensorType, tensorShape);
|
||||
m_tensorWithTargetShape = std::make_shared<ngraph::opset3::Parameter>(tensorType, targetShape);
|
||||
m_tensorWithOutput = std::make_shared<ngraph::opset3::Parameter>(tensorType, outputShape);
|
||||
}
|
||||
protected:
|
||||
std::shared_ptr<ngraph::opset3::Parameter> m_tensor;
|
||||
std::shared_ptr<ngraph::opset3::Parameter> m_tensorWithTargetShape;
|
||||
std::shared_ptr<ngraph::opset3::Parameter> m_tensorWithOutput;
|
||||
};
|
||||
|
||||
std::vector<BroadcastNumpyShapes> testNumpyStaticShapes {
|
||||
BroadcastNumpyShapes{TensorShape{1, 100}, TensorShape{4, 100}},
|
||||
BroadcastNumpyShapes{TensorShape{1, 100}, TensorShape{2, 4, 100}},
|
||||
@ -84,6 +111,15 @@ std::vector<BroadcastExplicitShapes> testExplicitStaticShapes {
|
||||
BroadcastExplicitShapes{TensorShape{50, 50}, TensorShape{1, 50, 50, 16}, AxesMapping{1, 2}},
|
||||
};
|
||||
|
||||
std::vector<BroadcastBidirectionalShapes> testBidirectionalStaticShapes {
|
||||
BroadcastBidirectionalShapes{TensorShape{1, 100}, TensorShape{4, 100}, TensorShape{4, 100}},
|
||||
BroadcastBidirectionalShapes{TensorShape{1, 100}, TensorShape{2, 4, 100}, TensorShape{2, 4, 100}},
|
||||
BroadcastBidirectionalShapes{TensorShape{16, 1, 1}, TensorShape{2, 16, 50, 50}, TensorShape{2, 16, 50, 50}},
|
||||
BroadcastBidirectionalShapes{TensorShape{4, 100}, TensorShape{1, 100}, TensorShape{4, 100}},
|
||||
BroadcastBidirectionalShapes{TensorShape{2, 4, 100}, TensorShape{1, 100}, {2, 4, 100}},
|
||||
BroadcastBidirectionalShapes{TensorShape{2, 16, 1, 50}, TensorShape{16, 50, 1}, TensorShape{2, 16, 50, 50}},
|
||||
};
|
||||
|
||||
std::vector<ngraph::element::Type> testNGraphNumericTypes {
|
||||
ngraph::element::dynamic,
|
||||
ngraph::element::bf16,
|
||||
@ -129,7 +165,7 @@ TEST_P(StaticShapeBroadcastExplicitTests, CanValidateAndInferTypes) {
|
||||
ASSERT_NO_THROW(std::make_shared<ngraph::Function>(
|
||||
ngraph::OutputVector{op->output(0)},
|
||||
ngraph::ParameterVector{m_tensor, m_tensorWithTargetShape}));
|
||||
ASSERT_EQ(m_tensorWithTargetShape->get_shape(), op->output(0).get_shape());
|
||||
ASSERT_EQ(m_tensorWithTargetShape->get_shape(), op->get_output_shape(0));
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(smoke_NGraph, StaticShapeBroadcastExplicitTests, testing::Combine(
|
||||
@ -137,6 +173,23 @@ INSTANTIATE_TEST_CASE_P(smoke_NGraph, StaticShapeBroadcastExplicitTests, testing
|
||||
testing::ValuesIn(testExplicitStaticShapes))
|
||||
);
|
||||
|
||||
TEST_P(StaticShapeBroadcastBidirectionalTests, CanValidateAndInferTypes) {
|
||||
const auto shapeOf = std::make_shared<ngraph::opset3::ShapeOf>(m_tensorWithTargetShape);
|
||||
std::shared_ptr<ngraph::vpu::op::StaticShapeBroadcast> op;
|
||||
ASSERT_NO_THROW(op = std::make_shared<ngraph::vpu::op::StaticShapeBroadcast>(
|
||||
m_tensor, shapeOf, ngraph::op::BroadcastType::BIDIRECTIONAL));
|
||||
ASSERT_NO_THROW(std::make_shared<ngraph::Function>(
|
||||
ngraph::OutputVector{op->output(0)},
|
||||
ngraph::ParameterVector{m_tensor, m_tensorWithTargetShape}));
|
||||
ASSERT_EQ(m_tensorWithOutput->get_shape(), op->output(0).get_shape());
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(smoke_NGraph, StaticShapeBroadcastBidirectionalTests, testing::Combine(
|
||||
testing::ValuesIn(testNGraphNumericTypes),
|
||||
testing::ValuesIn(testBidirectionalStaticShapes))
|
||||
);
|
||||
|
||||
|
||||
//
|
||||
// Negative tests
|
||||
//
|
||||
@ -171,15 +224,18 @@ INSTANTIATE_TEST_CASE_P(smoke_NGraph, StaticShapeBroadcastExplicitTestsNegativeN
|
||||
testing::Values(testExplicitStaticShapes[0]))
|
||||
);
|
||||
|
||||
using StaticShapeBroadcastTestsNegativeMode = StaticShapeBroadcastNumpyTests;
|
||||
TEST_P(StaticShapeBroadcastTestsNegativeMode, ThrowsOnInvalidMode) {
|
||||
using StaticShapeBroadcastBidirectionalTestsNegativeNumInputs = StaticShapeBroadcastBidirectionalTests;
|
||||
TEST_P(StaticShapeBroadcastBidirectionalTestsNegativeNumInputs, ThrowsOnInvalidNumInputs) {
|
||||
const auto shapeOf = std::make_shared<ngraph::opset3::ShapeOf>(m_tensorWithTargetShape);
|
||||
const auto axesMapping = std::make_shared<ngraph::opset3::Constant>(
|
||||
ngraph::element::u64, ngraph::Shape{1}, 0);
|
||||
std::shared_ptr<ngraph::vpu::op::StaticShapeBroadcast> op;
|
||||
ASSERT_THROW(op = std::make_shared<ngraph::vpu::op::StaticShapeBroadcast>(
|
||||
m_tensor, shapeOf, ngraph::op::BroadcastType::BIDIRECTIONAL),
|
||||
m_tensor, shapeOf, axesMapping, ngraph::op::BroadcastType::BIDIRECTIONAL),
|
||||
ngraph::NodeValidationFailure);
|
||||
}
|
||||
|
||||
using StaticShapeBroadcastTestsNegativeMode = StaticShapeBroadcastNumpyTests;
|
||||
INSTANTIATE_TEST_CASE_P(smoke_NGraph, StaticShapeBroadcastTestsNegativeMode, testing::Combine(
|
||||
testing::Values(ngraph::element::f16),
|
||||
testing::Values(testNumpyStaticShapes[0]))
|
||||
|
@ -20,7 +20,8 @@ using TensorShape = InferenceEngine::SizeVector;
|
||||
using StaticShapeBroadcastParam = std::tuple<
|
||||
TensorShape, // Input shape
|
||||
TensorShape, // Target shape
|
||||
TensorShape>; // Axes mapping
|
||||
TensorShape, // Axes mapping
|
||||
std::string>; // mode
|
||||
|
||||
using StaticShapeBroadcastTestParam = std::tuple<
|
||||
StaticShapeBroadcastParam, // Shapes param
|
||||
@ -41,11 +42,13 @@ public:
|
||||
const auto inputShape = std::get<0>(shapes);
|
||||
const auto targetShape = std::get<1>(shapes);
|
||||
const auto axesMapping = std::get<2>(shapes);
|
||||
const auto mode = std::get<3>(shapes);
|
||||
|
||||
std::ostringstream result;
|
||||
result << "IS=" << CommonTestUtils::vec2str(inputShape) << "_";
|
||||
result << "TS=" << CommonTestUtils::vec2str(targetShape) << "_";
|
||||
if (!axesMapping.empty()) {
|
||||
result << "mode=" << mode << "_";
|
||||
if (mode == "explicit") {
|
||||
result << "AM=" << CommonTestUtils::vec2str(axesMapping) << "_";
|
||||
}
|
||||
result << "inPrc=" << inputPrecision.name() << "_";
|
||||
@ -64,6 +67,7 @@ protected:
|
||||
const auto inputShape = std::get<0>(shapes);
|
||||
const auto targetShape = std::get<1>(shapes);
|
||||
const auto axesMapping = std::get<2>(shapes);
|
||||
const auto mode = std::get<3>(shapes);
|
||||
|
||||
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inPrc);
|
||||
|
||||
@ -73,14 +77,17 @@ protected:
|
||||
ngraph::element::i64, ngraph::Shape{targetShape.size()}, targetShape);
|
||||
|
||||
std::shared_ptr<ngraph::vpu::op::StaticShapeBroadcast> staticShapeBroadcast;
|
||||
if (axesMapping.empty()) {
|
||||
if (mode == "numpy") {
|
||||
staticShapeBroadcast = std::make_shared<ngraph::vpu::op::StaticShapeBroadcast>(
|
||||
inputParam, targetShapeConst);
|
||||
} else {
|
||||
} else if (mode == "explicit") {
|
||||
const auto axesMappingConst = std::make_shared<ngraph::opset3::Constant>(
|
||||
ngraph::element::i64, ngraph::Shape{axesMapping.size()}, axesMapping);
|
||||
staticShapeBroadcast = std::make_shared<ngraph::vpu::op::StaticShapeBroadcast>(
|
||||
inputParam, targetShapeConst, axesMappingConst);
|
||||
} else {
|
||||
staticShapeBroadcast = std::make_shared<ngraph::vpu::op::StaticShapeBroadcast>(
|
||||
inputParam, targetShapeConst, ngraph::op::BroadcastType::BIDIRECTIONAL);
|
||||
}
|
||||
|
||||
ngraph::ResultVector results{std::make_shared<ngraph::opset3::Result>(staticShapeBroadcast->output(0))};
|
||||
@ -93,16 +100,23 @@ TEST_P(StaticShapeBroadcastLayerTest, accuracy) {
|
||||
}
|
||||
|
||||
std::vector<StaticShapeBroadcastParam> broadcastParam = {
|
||||
std::make_tuple(TensorShape{ 14 }, TensorShape{ 2, 16, 15, 14 }, TensorShape{}),
|
||||
std::make_tuple(TensorShape{ 15, 1 }, TensorShape{ 2, 16, 15, 14 }, TensorShape{}),
|
||||
std::make_tuple(TensorShape{ 15, 14 }, TensorShape{ 2, 16, 15, 14 }, TensorShape{}),
|
||||
std::make_tuple(TensorShape{ 16, 1, 1 }, TensorShape{ 2, 16, 15, 14 }, TensorShape{}),
|
||||
std::make_tuple(TensorShape{ 16, 1, 14 }, TensorShape{ 2, 16, 15, 14 }, TensorShape{}),
|
||||
std::make_tuple(TensorShape{ 16, 15, 1 }, TensorShape{ 2, 16, 15, 14 }, TensorShape{}),
|
||||
std::make_tuple(TensorShape{ 14 }, TensorShape{ 2, 16, 15, 14 }, TensorShape{}, "numpy"),
|
||||
std::make_tuple(TensorShape{ 15, 1 }, TensorShape{ 2, 16, 15, 14 }, TensorShape{}, "numpy"),
|
||||
std::make_tuple(TensorShape{ 15, 14 }, TensorShape{ 2, 16, 15, 14 }, TensorShape{}, "numpy"),
|
||||
std::make_tuple(TensorShape{ 16, 1, 1 }, TensorShape{ 2, 16, 15, 14 }, TensorShape{}, "numpy"),
|
||||
std::make_tuple(TensorShape{ 16, 1, 14 }, TensorShape{ 2, 16, 15, 14 }, TensorShape{}, "numpy"),
|
||||
std::make_tuple(TensorShape{ 16, 15, 1 }, TensorShape{ 2, 16, 15, 14 }, TensorShape{}, "numpy"),
|
||||
|
||||
std::make_tuple(TensorShape{ 80 }, TensorShape{ 80, 1 }, TensorShape{ 0 }),
|
||||
std::make_tuple(TensorShape{ 16 }, TensorShape{ 1, 16, 50, 50 }, TensorShape{ 1 }),
|
||||
std::make_tuple(TensorShape{ 50, 50 }, TensorShape{ 1, 50, 50, 16 }, TensorShape{ 1, 2 }),
|
||||
std::make_tuple(TensorShape{ 80 }, TensorShape{ 80, 1 }, TensorShape{ 0 }, "explicit"),
|
||||
std::make_tuple(TensorShape{ 16 }, TensorShape{ 1, 16, 50, 50 }, TensorShape{ 1 }, "explicit"),
|
||||
std::make_tuple(TensorShape{ 50, 50 }, TensorShape{ 1, 50, 50, 16 }, TensorShape{ 1, 2 }, "explicit"),
|
||||
|
||||
std::make_tuple(TensorShape{ 14 }, TensorShape{ 2, 16, 15, 14 }, TensorShape{}, "bidirectional"),
|
||||
std::make_tuple(TensorShape{ 15, 1 }, TensorShape{ 2, 16, 15, 14 }, TensorShape{}, "bidirectional"),
|
||||
std::make_tuple(TensorShape{ 2, 16, 15, 14 }, TensorShape{ 15, 14 }, TensorShape{}, "bidirectional"),
|
||||
std::make_tuple(TensorShape{ 2, 16, 15, 14 }, TensorShape{ 16, 1, 1 }, TensorShape{}, "bidirectional"),
|
||||
std::make_tuple(TensorShape{ 2, 16, 15, 14 }, TensorShape{ 16, 1, 14 }, TensorShape{}, "bidirectional"),
|
||||
std::make_tuple(TensorShape{ 16, 15, 1 }, TensorShape{ 2, 1, 15, 14 }, TensorShape{}, "bidirectional"),
|
||||
};
|
||||
|
||||
std::vector<InferenceEngine::Precision> broadcastPrecisions = {
|
||||
|
Loading…
Reference in New Issue
Block a user