20 #include <ActivationFunctor.h>
21 #include <CpuExecutor.h>
22 #include <OperationsUtils.h>
39 #include <nnapi/OperandTypes.h>
40 #include <nnapi/Result.h>
41 #include <nnapi/TypeUtils.h>
42 #include <nnapi/Types.h>
43 #include <nnapi/Validation.h>
121 std::vector<uint8_t> m_SwizzledTensorData;
146 template<
class... Args>
147 static bool Fail(
const char* formatStr, Args&&... args)
149 ALOGD(formatStr, std::forward<Args>(args)...);
155 #define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, setBackend, ...) \
158 for (auto&& backendId : backends) \
160 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
161 if (layerSupportObject.IsBackendRegistered()) \
163 std::string reasonIfUnsupported; \
165 layerSupportObject.func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
168 setBackend = backendId; \
173 if (reasonIfUnsupported.size() > 0) \
175 VLOG(DRIVER) << funcName << ": not supported by armnn: " << reasonIfUnsupported.c_str(); \
179 VLOG(DRIVER) << funcName << ": not supported by armnn"; \
185 VLOG(DRIVER) << funcName << ": backend not registered: " << backendId.Get().c_str(); \
190 VLOG(DRIVER) << funcName << ": not supported by any specified backend"; \
193 catch (const armnn::InvalidArgumentException &e) \
195 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
204 inline bool IsOperandTypeSupportedForTensors(
OperandType type)
206 return type == OperandType::BOOL ||
207 type == OperandType::TENSOR_BOOL8 ||
208 type == OperandType::TENSOR_FLOAT16 ||
209 type == OperandType::TENSOR_FLOAT32 ||
210 type == OperandType::TENSOR_QUANT8_ASYMM ||
211 type == OperandType::TENSOR_QUANT8_ASYMM_SIGNED ||
212 type == OperandType::TENSOR_QUANT8_SYMM ||
213 type == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
214 type == OperandType::TENSOR_QUANT16_SYMM ||
215 type == OperandType::TENSOR_INT32;
218 inline bool IsBool(
Operand operand)
220 return operand.type == OperandType::BOOL;
223 inline bool Is12OrLaterOperand(
Operand)
229 template<
typename LayerHandleType>
231 LayerHandleType& inputLayer,
238 if (reshapeLayer ==
nullptr)
247 return *reshapeLayer;
257 unsigned int inputSize = weightsShape[1];
259 unsigned int batchSize = totalInputElements / inputSize;
261 if(totalInputElements % batchSize != 0)
263 throw std::runtime_error(
"Failed to deduce tensor shape");
277 bool transposeWeightMatrix)
279 unsigned int dimIdx = transposeWeightMatrix ? 0 : 1;
280 return (inputShape[0] == outputShape[0] && weightsShape[dimIdx] == outputShape[1]);
288 if (startLayer ==
nullptr)
299 if (inputDimensions0 == inputDimensions1)
311 unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
312 unsigned int sizeDifference = std::abs(armnn::numeric_cast<int>(inputDimensions0) -
313 armnn::numeric_cast<int>(inputDimensions1));
315 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
320 std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
321 for (
unsigned int i = sizeDifference; i < maxInputDimensions; i++)
323 reshapedDimensions[i] = smallShape[i - sizeDifference];
328 reshapedDimensions.data() });
333 bool isSupported =
false;
351 "BroadcastTensor: the conversion data handed in has a null network pointer");
386 void CalcPadding(uint32_t input,
389 uint32_t& outPadHead,
390 uint32_t& outPadTail,
391 PaddingScheme scheme)
395 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
396 outPadHead = armnn::numeric_cast<uint32_t>(padHead);
397 outPadTail = armnn::numeric_cast<uint32_t>(padTail);
400 void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
401 uint32_t& outPadTail, ::android::nn::PaddingScheme scheme)
405 calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
406 outPadHead = armnn::numeric_cast<uint32_t>(padHead);
407 outPadTail = armnn::numeric_cast<uint32_t>(padTail);
410 inline void CalcPaddingTransposeConv(uint32_t output, uint32_t kernel, int32_t stride, int32_t& outPadHead,
411 int32_t& outPadTail, ::android::nn::PaddingScheme scheme)
413 calculateExplicitPaddingTransposeConv(output, stride, kernel, scheme, &outPadHead, &outPadTail);
420 shape.dimensions = operand.dimensions;
421 shape.scale = operand.scale;
422 shape.offset = operand.zeroPoint;
439 auto UpdateBiasScaleValue = [&inputInfo](
float biasScale) ->
float
445 std::transform(biasScales.begin(), biasScales.end(), biasScales.begin(), UpdateBiasScaleValue);
451 VLOG(DRIVER) <<
"Bias quantization params have been updated for per-axis quantization";
460 VLOG(DRIVER) <<
"Bias quantization scale has been modified to match input * weights";
476 template<
typename OSlot>
483 if (layer ==
nullptr)
498 bool ValidateConcatOutputShape(
const std::vector<armnn::TensorShape> & inputShapes,
503 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
506 return Fail(
"%s: Output shape has wrong number of dimensions", __func__);
509 unsigned int outputSizeAlongConcatenatedDimension = 0;
510 for (
unsigned int i = 0; i < inputShapes.size(); i++)
512 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
515 for (
unsigned int i = 0; i < numDimensions; ++i)
519 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
522 "%s: Invalid output shape for dimension %d (%d != %d)",
526 outputSizeAlongConcatenatedDimension);
531 if (outputShape[i] != inputShapes[0][i])
533 return Fail(
"%s: Invalid output shape", __func__);
547 std::vector<LayerInputHandle>& inputs,
548 std::vector<armnn::TensorShape>& inputShapes,
550 std::vector<armnn::BackendId>& setBackends)
552 if (!mapping.
IsEqual(IdentityPermutation4D))
554 size_t nInputs = inputs.size();
555 for (
size_t i=0; i<nInputs; ++i)
564 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
570 std::vector<LayerInputHandle>& inputs,
571 std::vector<armnn::TensorShape>& inputShapes,
575 if (!mapping.
IsEqual(IdentityPermutation4D) && !mapping.
IsEqual(IdentityPermutation3D))
577 std::vector<armnn::BackendId> setBackendsVec;
579 size_t nInputs = inputs.size();
580 for (
size_t i=0; i<nInputs; ++i)
587 bool isSupported =
false;
590 IsTransposeSupported,
594 inputs[i].GetTensorInfo(),
597 setBackendsVec.push_back(setBackend);
604 SwizzleInputs(*data.
m_Network, inputs, inputShapes, mapping, setBackendsVec);
609 bool CreateConcatPermutationParameters(
const unsigned int numberOfDimensions,
610 int32_t & concatDimension,
611 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
613 bool needPermute =
false;
614 if (numberOfDimensions < 3)
617 "CreateConcatPermutationParameters: numberOfDimensions handed in cannot be less than three");
623 if (numberOfDimensions == 4 && concatDimension == 2)
626 permutationPair = std::make_pair(SwapDim2And3, SwapDim2And3);
629 else if (numberOfDimensions == 3 && concatDimension == 1)
632 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
637 else if (numberOfDimensions == 3 && concatDimension == 2)
639 permutationPair = std::make_pair(IdentityPermutation3D, IdentityPermutation3D);
657 ActivationFn activation,
665 bool failOnIndexOutOfBounds =
true)
667 if (inputIndex >= operation.inputs.size())
669 if (failOnIndexOutOfBounds)
671 Fail(
"%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
677 if (!(operation.inputs[inputIndex] <
getMainModel(model).operands.size()))
679 std::ostringstream os;
680 os <<
"GetInputOperand: inputIndex [" << inputIndex <<
"]";
681 os <<
" is too large. The number of main model operands is [";
685 return &
getMainModel(model).operands[operation.inputs[inputIndex]];
689 uint32_t outputIndex,
692 if (outputIndex >= operation.outputs.size())
694 Fail(
"%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
699 if (!(operation.outputs[outputIndex] <
getMainModel(model).operands.size()))
701 std::ostringstream os;
702 os <<
"GetOutputOperand: outputIndex [" << outputIndex <<
"]";
703 os <<
" is too large. The number of main model operands is [";
708 return &
getMainModel(model).operands[operation.outputs[outputIndex]];
714 bool optional =
false);
724 return Fail(
"%s: invalid input operand at index %i", __func__, inputIndex);
727 type = operand->type;
735 return lifetime == OperandLifeTime::CONSTANT_COPY ||
736 lifetime == OperandLifeTime::CONSTANT_REFERENCE ||
737 lifetime == OperandLifeTime::POINTER ||
738 lifetime == OperandLifeTime::NO_VALUE;
748 bool optional =
false,
758 bool optional =
false)
763 Fail(
"%s: failed to get input operand: index=%u", __func__, inputIndex);
774 template <
typename OutputType>
778 OutputType& outValue,
781 bool optional =
false)
784 if (!optional && !operand)
786 return Fail(
"%s: invalid input operand at index %i", __func__, inputIndex);
789 if (!optional && operand->type != type)
791 VLOG(DRIVER) << __func__ <<
": unexpected operand type: " << operand->type <<
" should be: " << type;
795 if (!optional && operand->location.length !=
sizeof(OutputType))
797 return Fail(
"%s: incorrect operand location length: %i (should be %i)",
798 __func__, operand->location.length,
sizeof(OutputType));
802 if (!optional && !valueAddress)
804 return Fail(
"%s: failed to get address for operand", __func__);
809 outValue = *(
static_cast<const OutputType*
>(valueAddress));
821 return GetInputScalar(operation, inputIndex, OperandType::INT32, outValue, model, data);
830 return GetInputScalar(operation, inputIndex, OperandType::FLOAT32, outValue, model, data);
836 ActivationFn& outActivationFunction,
840 if (type != OperandType::INT32 && type != OperandType::TENSOR_INT32)
842 VLOG(DRIVER) << __func__ <<
": unexpected operand type: " << type
843 <<
" should be OperandType::INT32 or OperandType::TENSOR_INT32";
847 int32_t activationFunctionAsInt;
848 if (!
GetInputScalar(operation, inputIndex, type, activationFunctionAsInt, model, data))
850 return Fail(
"%s: failed to get activation input value", __func__);
852 outActivationFunction =
static_cast<ActivationFn
>(activationFunctionAsInt);
858 ActivationFn& outActivationFunction,
865 outActivationFunction,
872 ActivationFn& outActivationFunction,
880 outActivationFunction,
888 ActivationFn& activationFunction,
892 if (operation.inputs.size() <= inputIndex)
894 activationFunction = ActivationFn::kActivationNone;
900 return Fail(
"%s: Operation has invalid inputs", __func__);
906 template<
typename ConvolutionDescriptor>
908 uint32_t dilationXIndex,
909 ConvolutionDescriptor& descriptor,
914 if (operation.inputs.size() >= dilationXIndex + 2)
919 descriptor.m_DilationX,
925 descriptor.m_DilationY,
944 if (!IsBool(*operand))
955 return *(
static_cast<const bool*
>(valueAddress));
959 std::vector<int32_t>& outValues,
965 PaddingScheme& outPaddingScheme,
977 uint32_t operationOutputIndex,
979 uint32_t layerOutputIndex,
983 const std::function <
void (
const armnn::TensorInfo&,
bool&)>& validateFunc =
nullptr,
984 const ActivationFn& activationFunction = ActivationFn::kActivationNone,
985 bool inferOutputShapes =
false);
994 uint32_t outputIndex,
999 const std::function <
void (
const armnn::TensorInfo&,
bool&)>& validateFunc =
nullptr,
1000 const ActivationFn& activationFunction = ActivationFn::kActivationNone)
1010 activationFunction);
1014 const char* operationName,
1030 const char* operationName,
1037 return operand.type == OperandType::TENSOR_QUANT8_SYMM;
1057 size_t operandIndex,
1058 bool optional =
false);