[IE CLDNN] WA to 1d input for concat (#1040)
This commit is contained in:
@@ -3518,10 +3518,25 @@ void Program::AddConstantBlobInput(cldnn::topology& topology, InferenceEngine::C
|
||||
return false;
|
||||
};
|
||||
|
||||
// WA to inconsistency between input and const 1d tensors
|
||||
// For Concat along batch we go with batch interpretation
|
||||
bool concatAlongBatch = false;
|
||||
if (constDims.size() == 1) {
|
||||
for (auto next : GetNextLayers(layer->outData[0])) {
|
||||
if (LayerTypeFromStr(next->type) == Concatenate) {
|
||||
auto nextConcat = as<InferenceEngine::ConcatLayer*>(next);
|
||||
if (nextConcat->_axis == cldnn::concatenation::concatenation_axis::along_b) {
|
||||
concatAlongBatch = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If quantize on weights has per-channel ranges, we have to swap channel and batch dimensions, because
|
||||
// quantization should be applied per output channel of weights
|
||||
// TODO: Check if it's still needed once LowPrecisionTransformations ready
|
||||
if (inputToConstQuantize(layer)) {
|
||||
if (inputToConstQuantize(layer) || concatAlongBatch) {
|
||||
constTensor.batch[0] = constTensor.count();
|
||||
constTensor.feature[0] = 1;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user