18 : m_OutputSlot(nullptr)
23 : m_OutputSlot(outputSlot)
25 , m_TensorInfo(tensorInfo)
37 throw armnn::Exception(
"cannot invoke Connect on an invalid LayerInputHandle");
41 m_OutputSlot->
Connect(inputSlot);
49 throw armnn::Exception(
"cannot invoke Disconnect on an invalid LayerInputHandle");
70 SanitizeBiasQuantizationScale(biasInfo, weightInfo, inputInfo);
72 m_TensorInfo = biasInfo;
83 : m_Optional(optional)
87 const void* valueStart,
95 VLOG(DRIVER) <<
"The size of ConstTensor does not match its TensorInfo.";
98 const bool needsSwizzling = (mappings.
GetSize() > 0);
101 m_SwizzledTensorData.resize(tensorInfo.
GetNumBytes());
124 return m_ConstTensor;
131 return &m_ConstTensor;
144 const bool isOptional =
true)
149 Fail(
"%s: failed to get input operand %i", __func__, inputIndex);
153 if (!isOptional && operand->lifetime == OperandLifeTime::NO_VALUE)
157 if (operand->lifetime != OperandLifeTime::CONSTANT_COPY
158 && operand->lifetime != OperandLifeTime::CONSTANT_REFERENCE
159 && operand->lifetime != OperandLifeTime::NO_VALUE)
174 if (!IsOperandTypeSupportedForTensors(operand.type))
176 VLOG(DRIVER) << __func__ <<
": unsupported operand type for tensor" << operand.type;
182 VLOG(DRIVER) << __func__ <<
": lifetime for input tensor: r" << operand.lifetime;
195 Fail(
"%s: failed to get operand address", __func__);
201 if (overrideTensorShape)
203 tensorInfo.
SetShape(*overrideTensorShape);
206 if (overrideDataType)
213 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
227 Fail(
"%s: failed to get input operand %i", __func__, inputIndex);
231 if (!IsOperandTypeSupportedForTensors(operand->type))
233 VLOG(DRIVER) << __func__ <<
": unsupported operand type for tensor: " << operand->type;
245 const uint32_t operandIndex = operation.inputs[inputIndex];
255 Fail(
"%s: Type 2 dynamic input tensors are not supported", __func__);
260 switch (operand->lifetime)
262 case OperandLifeTime::SUBGRAPH_INPUT:
266 bool isInputSupported =
false;
274 if (!isInputSupported)
276 Fail(
"%s: unsupported input tensor", __func__);
280 [[clang::fallthrough]];
282 case OperandLifeTime::TEMPORARY_VARIABLE:
283 case OperandLifeTime::SUBGRAPH_OUTPUT:
289 const uint32_t operandIndex = operation.inputs[inputIndex];
292 case OperandLifeTime::CONSTANT_COPY:
293 case OperandLifeTime::POINTER:
294 case OperandLifeTime::CONSTANT_REFERENCE:
296 auto constantTensorDataType = operandTensorInfo.
GetDataType();
304 &constantTensorDataType);
307 bool isSupported =
false;
331 Fail(
"%s: invalid operand tensor", __func__);
338 VLOG(DRIVER) << __func__ <<
": unsupported lifetime for input tensor: " << operand->lifetime;
345 VLOG(DRIVER) << __func__ <<
": Operand type: " << e.
m_type <<
" not supported in ArmnnDriver";
357 if (!paddingsOperand)
359 return Fail(
"%s: Could not read paddings operand", __func__);
365 return Fail(
"%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
368 std::vector<int32_t> paddings;
371 return Fail(
"%s: Operation has invalid or unsupported paddings operand", __func__);
375 for (
unsigned int i = 0; i < paddings.size() - 1; i += 2)
377 int paddingBeforeInput = paddings[i];
378 int paddingAfterInput = paddings[i + 1];
380 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
382 return Fail(
"%s: Operation has invalid paddings operand, invalid padding values.", __func__);
385 padDescriptor.
m_PadList.emplace_back((
unsigned int) paddingBeforeInput, (
unsigned int) paddingAfterInput);
393 const char* operationName,
399 VLOG(DRIVER) <<
"Converter::ConvertL2Pool2d()";
404 return Fail(
"%s: Operation Could not read input 0", operationName);
410 return Fail(
"%s: Could not read output 0", __func__);
421 ActivationFn activation;
423 auto inputSize = operation.inputs.size();
438 return Fail(
"%s: Operation has invalid inputs", operationName);
441 if (Is12OrLaterOperand(*output))
449 ::android::nn::PaddingScheme scheme;
457 return Fail(
"%s: Operation has invalid inputs", operationName);
460 if (Is12OrLaterOperand(*output))
473 bool isSupported =
false;
478 IsPooling2dSupported,
494 validateFunc(outputInfo, isSupported);
506 return Fail(
"%s: AddPooling2dLayer failed", __func__);
517 data,
nullptr, validateFunc, activation);
531 return Fail(
"%s: Operation has invalid inputs", __func__);
538 return Fail(
"%s: Could not read output 0", __func__);
545 return Fail(
"%s: Could not read input 1", __func__);
547 std::vector<int32_t> axis;
550 return Fail(
"%s: Input 1 has invalid values", __func__);
555 std::set<unsigned int> uniqueAxis;
556 std::transform(axis.begin(), axis.end(),
557 std::inserter(uniqueAxis, uniqueAxis.begin()),
558 [rank](
int i) ->
unsigned int { return (i + rank) % rank; });
559 descriptor.
m_vAxis.assign(uniqueAxis.begin(), uniqueAxis.end());
564 return Fail(
"%s: Could not read input 2", __func__);
567 bool isSupported =
false;
583 validateFunc(outputInfo, isSupported);
597 assert(layer !=
nullptr);
605 const char* operationName,
613 return Fail(
"%s: Input 0 is invalid", operationName);
624 bool isSupported =
false;
629 IsActivationSupported,
644 validateFunc(outInfo, isSupported);
653 if (layer ==
nullptr)
680 const size_t weightsInputIndex = operation.inputs[operand_index];
684 for (uint32_t operationIdx = 0; operationIdx <
getMainModel(model).operations.size(); ++operationIdx)
687 const auto& operationIt =
getMainModel(model).operations[operationIdx];
688 if (operationIt.type != OperationType::DEQUANTIZE)
693 size_t outOpIndex = weightsInputIndex + 1;
694 for (
size_t i = 0; outOpIndex != weightsInputIndex && i < operationIt.outputs.size(); ++i)
696 outOpIndex = operationIt.outputs[i];
699 if (outOpIndex != weightsInputIndex)
705 if (operand ==
nullptr)
724 const uint8_t* quantizedBuffer =
reinterpret_cast<const uint8_t*
>(startValue);
725 size_t dequantizedBufferLength = operand->location.length;
726 const float quantizationScale = operand->scale;
728 auto dequantizedBuffer = std::make_unique<float[]>(dequantizedBufferLength + 1);
729 for (
size_t i = 0; i < dequantizedBufferLength; ++i)
731 float* dstPtr = dequantizedBuffer.get();
732 if (dstPtr ==
nullptr)
736 *dstPtr++ = quantizedBuffer[i] * quantizationScale;
741 operand->dimensions.data(),
744 return { std::move(dequantizedBuffer), dequantizedBufferLength *
sizeof(float),
745 std::move(tensorInfo),
771 operation, operandIndex, model, data,
g_DontPermute,
nullptr, optional);
777 std::get<2>(dequantized), std::get<0>(dequantized).get(), std::get<1>(dequantized),
g_DontPermute);
784 PaddingScheme& outPaddingScheme,
788 int32_t paddingSchemeAsInt;
789 if (!
GetInputInt32(operation, inputIndex, paddingSchemeAsInt, model, data))
791 return Fail(
"%s: failed to get padding scheme input value", __func__);
794 outPaddingScheme =
static_cast<::android::nn::PaddingScheme
>(paddingSchemeAsInt);
803 const void* valueStart =
nullptr;
804 switch (operand.lifetime)
806 case OperandLifeTime::CONSTANT_COPY:
808 valueStart = model.operandValues.data() + operand.location.offset;
811 case OperandLifeTime::POINTER:
814 valueStart = std::get<const void*>(operand.location.pointer);
817 case OperandLifeTime::CONSTANT_REFERENCE:
823 case OperandLifeTime::NO_VALUE:
828 valueStart =
nullptr;
835 VLOG(DRIVER) << __func__ <<
": unsupported/invalid operand lifetime:: " << operand.lifetime;
836 valueStart =
nullptr;
844 std::vector<int32_t>& outValues,
848 if (operand.type != OperandType::TENSOR_INT32)
850 VLOG(DRIVER) << __func__ <<
": invalid operand type: " << operand.type;
857 VLOG(DRIVER) << __func__ <<
": failed to get operand address " << operand.type;
862 const uint32_t numBytes = operand.location.length;
863 if (numBytes %
sizeof(int32_t) != 0)
865 return Fail(
"%s: invalid number of bytes: %i, expected to be a multiple of %i",
866 __func__, numBytes,
sizeof(int32_t));
869 outValues.resize(numBytes /
sizeof(int32_t));
870 memcpy(outValues.data(), startAddress, numBytes);
885 if (!IsBool(*operand))
896 if (*(
static_cast<const bool*
>(valueAddress)))
907 ActivationFn activation,
913 throw armnn::Exception(
"ProcessActivation: previous layer does not have a single output slot");
920 if (activation != ActivationFn::kActivationNone)
925 case ActivationFn::kActivationRelu:
930 case ActivationFn::kActivationRelu1:
933 activationDesc.
m_A = 1.0f;
934 activationDesc.
m_B = -1.0f;
937 case ActivationFn::kActivationRelu6:
940 activationDesc.
m_A = 6.0f;
943 case ActivationFn::kActivationSigmoid:
948 case ActivationFn::kActivationTanh:
951 activationDesc.
m_A = 1.0f;
952 activationDesc.
m_B = 1.0f;
957 Fail(
"%s: Invalid activation enum value %i", __func__, activation);
962 bool isSupported =
false;
965 IsActivationSupported,
977 activationLayer = data.
m_Network->AddActivationLayer(activationDesc);
984 return activationLayer;
988 uint32_t operationOutputIndex,
990 uint32_t layerOutputIndex,
995 const ActivationFn& activationFunction,
996 bool inferOutputShapes)
999 if ((outputOperand ==
nullptr) || (operationOutputIndex >= layer.
GetNumOutputSlots()))
1005 if (overrideOutputInfo ==
nullptr)
1014 bool isSupported =
false;
1018 for (
unsigned int inputSlotIndex = 0; inputSlotIndex < layer.
GetNumInputSlots(); ++inputSlotIndex)
1032 for (
unsigned int inputSlotIndex = 0; inputSlotIndex < layer.
GetNumInputSlots(); ++inputSlotIndex)
1040 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
1042 if (activationFunction != ActivationFn::kActivationNone)
1050 return Fail(
"%s: ProcessActivation failed", __func__);
1066 VLOG(DRIVER) <<
"ConversionUtils::IsConnectedToDequantize()";
1071 VLOG(DRIVER) <<
"ConversionUtils::IsConnectedToDequantize() ioutputSlot is valid.";
1076 VLOG(DRIVER) <<
"ConversionUtils::IsConnectedToDequantize() connected to Dequantize Layer.";
1081 VLOG(DRIVER) <<
"ConversionUtils::IsConnectedToDequantize() Dequantize Layer has a connection.";
1086 VLOG(DRIVER) <<
"ConversionUtils::IsConnectedToDequantize() Dequantize Layer connected to Constant";