18 #include <fmt/format.h> 28 switch (inputDataType)
30 case DataType::Float16:
31 return DataType::Float16;
33 case DataType::Float32:
34 return DataType::Float32;
35 case DataType::QAsymmS8:
36 return DataType::Signed32;
37 case DataType::QAsymmU8:
38 return DataType::Signed32;
39 case DataType::QSymmS8:
40 return DataType::Signed32;
41 case DataType::QSymmS16:
42 return DataType::Signed32;
45 return DataType::Float32;
55 std::string to_string(T value)
57 std::ostringstream os;
63 void ValidatePointer(
const void* ptr, std::string
const& descName, std::string
const& paramName)
68 paramName +
" parameter must be set.");
73 void ValidateTensorShapesMatch(
const TensorInfo& first,
75 std::string
const& descName,
76 std::string
const& firstName,
77 std::string
const& secondName)
82 + firstName +
" & " + secondName +
" must have identical shapes");
87 void ValidateNumInputs(
const WorkloadInfo& workloadInfo, std::string
const& descName,
const unsigned int expectedSize)
92 ": Requires exactly " + to_string(expectedSize) +
"input(s). " +
98 void ValidateNumOutputs(
const WorkloadInfo& workloadInfo, std::string
const& descName,
const unsigned int expectedSize)
103 ": Requires exactly " + to_string(expectedSize) +
" output(s). " +
109 void ValidateTensorNumDimensions(
const TensorInfo& tensor,
110 std::string
const& descName,
111 unsigned int numDimensions,
112 std::string
const& tensorName)
118 tensorName +
" tensor.");
123 void ValidateTensorNumElements(
const TensorInfo& tensor,
124 std::string
const& descName,
125 unsigned int numElements,
126 std::string
const& tensorName)
132 tensorName +
" tensor.");
137 void ValidateTensorNumDimNumElem(
const TensorInfo& tensorInfo,
138 unsigned int numDimension,
139 unsigned int numElements,
140 std::string
const& tensorName)
142 const std::string functionName{
"ValidateTensorNumDimNumElem"};
143 ValidateTensorNumDimensions(tensorInfo, functionName, numDimension, tensorName);
144 ValidateTensorNumElements(tensorInfo, functionName, numElements, tensorName);
149 const std::string& descName, std::string
const& tensorName)
158 void ValidPerAxisQuantizedDataType(
const TensorInfo& tensor,
const std::string& descName,
const std::string& tensorName)
162 tensor.
GetDataType() != DataType::QuantizedSymm8PerAxis)
165 ": Expected data type which supports per-axis quantization scheme but got " +
172 void ValidateTensorQuantizationSpace(
const TensorInfo& first,
174 const std::string& descName,
175 std::string
const& firstName,
176 std::string
const& secondName)
188 if (firstDataType != secondDataType)
191 " must be of the same quantized type, " +
199 " must have the same quantization space, " +
208 void ValidateBiasTensorQuantization(
const TensorInfo& biasTensor,
211 const std::string& descName)
214 auto VerifyBiasQuantizationScale = [&descName](
float biasScale,
float expectedScale) ->
void 216 constexpr
float tolerance = 0.0001f;
217 if (std::abs(biasScale - expectedScale) > tolerance)
220 ARMNN_LOG(
warning) << std::setprecision(6) << descName <<
": Expected " << expectedScale <<
221 " for bias quantization scale (product of input and weight scales), but got " <<
222 biasScale <<
". Using scale provided.";
238 if (weightScales.size() != biasScales.size())
240 std::stringstream msg;
241 msg << descName <<
": Expected matching number of per-axis quantization scales for weights and bias, " 242 <<
"but got different values. This is currently unsupported: weights=" << weightScales.size()
243 <<
", biases=" << biasScales.size();
247 for (
size_t i = 0ul; i < biasScales.size(); ++i)
250 VerifyBiasQuantizationScale(biasScales[i], expectedScale);
262 void ValidateTensors(
const std::vector<ITensorHandle*>& vec,
263 unsigned int numExpected,
264 const std::string& descName,
265 const std::string& varName)
267 if (vec.empty() && numExpected > 0)
272 for (
unsigned int i = 0; i < numExpected; ++i)
282 void ValidateBroadcastTensorShapesMatch(
const TensorInfo& first,
285 std::string
const& descName,
286 std::string
const& firstName,
287 std::string
const& secondName)
294 + firstName +
" & " + secondName
295 +
" must have the same number of dimensions in order to be broadcasted");
298 std::vector<uint32_t> outputDims(numDims, 0u);
299 for (uint32_t i = 0; i < numDims; i++)
302 const bool dimsNotOne = (first.
GetShape()[i] != 1) && (second.
GetShape()[i] != 1);
303 if (dimsNotEqual && dimsNotOne)
309 TensorShape broadcastShape =
TensorShape(armnn::numeric_cast<unsigned int>(outputDims.size()), outputDims.data());
310 if (broadcastShape != output.
GetShape())
313 + firstName +
" & " + secondName
314 +
" does not match the output shape");
320 const std::vector<armnn::DataType>& supportedTypes,
321 std::string
const& descName)
323 auto iterator = std::find(supportedTypes.begin(), supportedTypes.end(), info.
GetDataType());
324 if (iterator == supportedTypes.end())
331 void ValidateTensorDataTypesMatch(
const TensorInfo& first,
333 std::string
const& descName,
334 std::string
const& firstName,
335 std::string
const& secondName)
340 " must have identical data types.");
345 void ValidateTensorNumElementsMatch(
const TensorInfo& first,
347 std::string
const& descName,
348 std::string
const& firstName,
349 std::string
const& secondName)
354 " must have the same number of elements.");
358 void ValidateWeightDataType(
const TensorInfo& inputInfo,
360 const std::string& descName)
366 const std::vector<DataType> validTypes =
371 DataType::QuantizedSymm8PerAxis
375 ValidateDataTypes(weightInfo, validTypes, descName);
379 ValidateTensorDataTypesMatch(inputInfo, weightInfo, descName,
"input",
"weight");
383 void ValidatePerAxisQuantizationDimension(
const TensorInfo& tensorInfo,
384 const std::string& descName,
385 const std::string& tensorName)
391 "not set on tensor {1}.", descName, tensorName));
394 if (quantizationDim.
value() != 0)
397 "{0}: Quantization dimension for per-axis quantization expected to be 0 on tensor {1}, " 398 "but got: {2}", descName, tensorName, quantizationDim.
value()));
402 void ValidatePerAxisQuantizationOffset(
const TensorInfo& tensorInfo,
403 const std::string& descName,
404 const std::string& tensorName)
407 if (quantizationOffset != 0)
410 "{0}: Quantization offset for per-axis quantization expected to be 0 on tensor {1}, but got: {2}",
411 descName, tensorName, quantizationOffset));
415 void ValidatePerAxisQuantization(
const TensorInfo& inputInfo,
419 const std::string& descName)
426 const bool canHavePerAxisQuantization = (
IsQuantized8BitType(inputDataType)) && inputDataType == outputDataType;
428 if (!canHavePerAxisQuantization)
431 "{0}: Per-axis quantization parameters set on tensor {1}, but data type does not support " 432 "per-axis quantization.", descName,
"weight"));
436 ValidPerAxisQuantizedDataType(weightInfo, descName,
"weight");
437 ValidatePerAxisQuantizationDimension(weightInfo, descName,
"weight");
438 ValidatePerAxisQuantizationOffset(weightInfo, descName,
"weight");
446 "{}: Per-axis quantization parameters not set on bias tensor, " 447 "despite being set on weight tensor.", descName));
450 ValidateTensorDataType(biasInfo, DataType::Signed32, descName,
"bias");
451 ValidatePerAxisQuantizationDimension(biasInfo, descName,
"bias");
452 ValidatePerAxisQuantizationOffset(biasInfo, descName,
"bias");
460 unsigned int numExpectedIn,
unsigned int numExpectedOut)
const 462 ValidateTensors(
m_Inputs, numExpectedIn, descName,
"input");
463 ValidateTensors(
m_Outputs, numExpectedOut, descName,
"output");
469 const std::string descriptorName{
"MapQueueDescriptor"};
471 ValidateNumInputs(workloadInfo, descriptorName, 1);
472 ValidateNumOutputs(workloadInfo, descriptorName, 0);
474 for (
unsigned int i = 0; i <
m_Inputs.size(); ++i)
479 fmt::format(
"{}: Invalid NULL input {}.", descriptorName, static_cast<int>(i)));
487 const std::string descriptorName{
"UnmapQueueDescriptor"};
489 ValidateNumInputs(workloadInfo, descriptorName, 1);
490 ValidateNumOutputs(workloadInfo, descriptorName, 0);
492 for (
unsigned int i = 0; i <
m_Inputs.size(); ++i)
497 fmt::format(
"{}: Invalid NULL input {}.", descriptorName, static_cast<int>(i)));
505 const std::string descriptorName{
"MemCopyQueueDescriptor"};
507 ValidateNumInputs(workloadInfo, descriptorName, 1);
508 ValidateNumOutputs(workloadInfo, descriptorName , 1);
513 ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
514 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
519 "{0}: Number of inputs ({1}) does not match the number of outputs ({2}).",
523 for (
unsigned int i = 0; i <
m_Inputs.size(); ++i)
528 "{0}: Invalid NULL input {1}.", descriptorName, i));
541 ValidateNumInputs(workloadInfo,
"MemImportQueueDescriptor", 1);
542 ValidateNumOutputs(workloadInfo,
"MemImportQueueDescriptor" , 1);
554 "Number of input infos ({0}) does not match the number of output infos ({1})",
564 "Number of elements for tensor input and output {} does not match", i ));
576 "Number of inputs ({0}) does not match the number of outputs ({1})",
580 for (
unsigned int i = 0; i <
m_Inputs.size(); ++i)
597 ValidateNumInputs(workloadInfo,
"MemSyncQueueDescriptor", 1);
598 ValidateNumOutputs(workloadInfo,
"MemSyncQueueDescriptor" , 1);
619 const std::string descriptorName{
"ActivationQueueDescriptor"};
621 ValidateNumInputs(workloadInfo, descriptorName, 1);
622 ValidateNumOutputs(workloadInfo, descriptorName, 1);
627 std::vector<DataType> supportedTypes =
637 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
638 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
639 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
644 const std::string descriptorName{
"ArgMinMaxQueueDescriptor"};
646 ValidateNumInputs(workloadInfo, descriptorName, 1);
647 ValidateNumOutputs(workloadInfo, descriptorName, 1);
658 std::vector<DataType> supportedInputTypes =
670 ValidateDataTypes(inputTensorInfo, supportedInputTypes, descriptorName);
672 auto inputShape = inputTensorInfo.
GetShape();
673 auto outputShape = outputTensorInfo.
GetShape();
678 const std::string outputShapeError{
": Output tensor shape does not match shape inferred from input tensor."};
681 if (inputShape.GetNumDimensions() == 1)
683 if (outputShape.GetNumDimensions() != 1 && outputShape[0] != 1)
690 for (
unsigned int i = 0; i < unsignedAxis; ++i)
692 if (outputShape[i] != inputShape[i])
698 for (
auto i = unsignedAxis + 1; i < inputNumDimensions; ++i)
700 if (outputShape[i - 1] != inputShape[i])
710 const std::string descriptorName{
"CastQueueDescriptor"};
712 ValidateNumInputs(workloadInfo, descriptorName, 1);
713 ValidateNumOutputs(workloadInfo, descriptorName, 1);
718 std::vector<DataType> supportedTypes =
731 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
732 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
737 const std::string descriptorName{
"SoftmaxQueueDescriptor"};
739 ValidateNumInputs(workloadInfo, descriptorName, 1);
740 ValidateNumOutputs(workloadInfo, descriptorName, 1);
745 std::vector<DataType> supportedTypes =
755 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
756 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
757 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
762 const std::string descriptorName{
"SplitterQueueDescriptor"};
764 ValidateNumInputs(workloadInfo, descriptorName, 1);
767 std::vector<DataType> supportedTypes =
783 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
785 const std::string outputName =
"output_" + std::to_string(i);
786 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input", outputName);
797 descriptorName +
": Number of split windows " 798 "has to match number of workloadInfo.m_OutputTensorInfos. " 799 "Number of windows: " +
800 to_string(m_ViewOrigins.size()) +
801 ". Number of workloadInfo.m_OutputTensorInfos: " + to_string(workloadInfo.
m_OutputTensorInfos.size()));
806 for(
unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
813 "have the same dimensionality as the input tensor. " 814 "Window origin (index: " +
815 to_string(w) +
") has " + to_string(e.
m_Origin.size()) +
816 " dimensions, the input " 818 to_string(inputDims) +
" dimensions.");
820 for (
unsigned int i = 0; i < e.
m_Origin.size(); ++i)
826 "be smaller or equal than the size of the input in that coord.");
834 const std::string descriptorName{
"ConcatQueueDescriptor"};
836 ValidateNumOutputs(workloadInfo, descriptorName, 1);
856 if(m_Parameters.GetConcatAxis() > workloadInfo.
m_InputTensorInfos[0].GetShape().GetNumDimensions())
861 if (workloadInfo.
m_InputTensorInfos[0].GetShape().GetNumDimensions() - m_Parameters.GetConcatAxis() == 1)
869 descriptorName +
": Number of split windows " 870 "has to match number of workloadInfo.m_InputTensorInfos. " 871 "Number of windows: " +
872 to_string(m_ViewOrigins.size()) +
873 ". Number of workloadInfo.m_InputTensorInfos: " + to_string(workloadInfo.
m_InputTensorInfos.size()));
878 for(
unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
882 if (e.
m_Origin.size() != outputDims)
885 "have the same dimensionality as the output tensor. " 886 "Window origin (index: " +
887 to_string(w) +
") has " + to_string(e.
m_Origin.size()) +
888 " dimensions, the output " 890 to_string(outputDims) +
" dimensions.");
893 for (
unsigned int i = 0; i < e.
m_Origin.size(); ++i)
899 "be smaller or equal than the size of the output in that coord.");
905 std::vector<DataType> supportedTypes =
921 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
923 const std::string inputName =
"input_" + std::to_string(i);
924 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, inputName,
"output");
930 const std::string descriptorName{
"StackQueueDescriptor"};
932 ValidateNumOutputs(workloadInfo, descriptorName, 1);
940 const TensorShape& inputShape = m_Parameters.m_InputShape;
959 "than the number of input dimensions.");
964 for (
unsigned int i = 0; i < m_Parameters.m_Axis; ++i)
966 if (outputShape[i] != inputShape[i])
969 "match shape inferred from input tensor.");
973 if (outputShape[m_Parameters.m_Axis] != m_Parameters.m_NumInputs)
976 "match shape inferred from input tensor.");
979 for (
unsigned int i = m_Parameters.m_Axis + 1; i < inputShape.
GetNumDimensions() + 1; ++i)
981 if (outputShape[i] != inputShape[i-1])
984 "match shape inferred from input tensor.");
994 std::vector<DataType> supportedTypes =
1006 ValidateDataTypes(workloadInfo.
m_InputTensorInfos[0], supportedTypes, descriptorName);
1014 "input_" + std::to_string(i));
1026 const std::string descriptorName{
"FillQueueDescriptor"};
1028 ValidateNumInputs(workloadInfo, descriptorName, 1);
1029 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1034 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 1,
"input");
1036 std::vector<DataType> supportedTypes =
1044 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1049 const std::string descriptorName{
"FullyConnectedQueueDescriptor"};
1051 uint32_t numInputs = 1;
1052 if (!m_Parameters.m_ConstantWeights)
1055 if (m_Parameters.m_BiasEnabled)
1060 ValidateNumInputs(workloadInfo, descriptorName, numInputs);
1061 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1066 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 2,
"output");
1074 if (m_Parameters.m_ConstantWeights)
1076 ValidatePointer(m_Weight, descriptorName,
"weight");
1077 weightTensorInfo = m_Weight->GetTensorInfo();
1083 ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 2,
"weight");
1085 if (m_Parameters.m_BiasEnabled)
1088 if (m_Parameters.m_ConstantWeights)
1090 ValidatePointer(m_Bias, descriptorName,
"bias");
1091 biasTensorInfo = m_Bias->GetTensorInfo();
1098 ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
1100 ValidateTensorNumDimensions(biasTensorInfo, descriptorName, 1,
"bias");
1104 std::vector<DataType> supportedTypes =
1114 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1122 "for BFloat16 input.");
1127 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1133 const std::string descriptorName{
"NormalizationQueueDescriptor"};
1135 ValidateNumInputs(workloadInfo, descriptorName, 1);
1136 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1142 std::vector<DataType> supportedTypes =
1152 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1154 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1156 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1161 const std::string descriptorName{
"AdditionQueueDescriptor"};
1163 ValidateNumInputs(workloadInfo, descriptorName, 2);
1164 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1170 std::vector<DataType> supportedTypes =
1181 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
1182 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
1183 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1185 ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName,
"input_0",
"input_1");
1186 ValidateTensorDataTypesMatch(inputTensorInfo1, outputTensorInfo, descriptorName,
"input_1",
"output");
1188 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
1198 const std::string descriptorName{
"MultiplicationQueueDescriptor"};
1200 ValidateNumInputs(workloadInfo, descriptorName, 2);
1201 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1207 std::vector<DataType> supportedTypes =
1218 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
1219 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
1220 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1222 ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName,
"input_0",
"input_1");
1223 ValidateTensorDataTypesMatch(inputTensorInfo1, outputTensorInfo, descriptorName,
"input_1",
"output");
1225 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
1235 const std::string descriptorName{
"BatchNormalizationQueueDescriptor"};
1237 ValidateNumInputs(workloadInfo, descriptorName, 1);
1238 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1243 std::vector<DataType> supportedTypes =
1253 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1254 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1256 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1257 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1259 ValidatePointer(m_Mean, descriptorName,
"mean");
1260 ValidatePointer(m_Variance, descriptorName,
"variance");
1261 ValidatePointer(m_Beta, descriptorName,
"beta");
1262 ValidatePointer(m_Gamma, descriptorName,
"gamma");
1264 const TensorInfo& mean = m_Mean->GetTensorInfo();
1265 const TensorInfo& variance = m_Variance->GetTensorInfo();
1266 const TensorInfo& beta = m_Beta->GetTensorInfo();
1267 const TensorInfo& gamma = m_Gamma->GetTensorInfo();
1269 ValidateTensorNumDimensions(mean, descriptorName, 1,
"mean");
1270 ValidateTensorNumDimensions(variance, descriptorName, 1,
"variance");
1271 ValidateTensorNumDimensions(beta, descriptorName, 1,
"beta");
1272 ValidateTensorNumDimensions(gamma, descriptorName, 1,
"gamma");
1274 ValidateTensorShapesMatch(mean, variance, descriptorName,
"mean",
"variance");
1275 ValidateTensorShapesMatch(mean, beta, descriptorName,
"mean",
"beta");
1276 ValidateTensorShapesMatch(mean, gamma, descriptorName,
"mean",
"gamma");
1281 const std::string descriptorName{
"Convolution2dQueueDescriptor"};
1283 ValidateNumInputs(workloadInfo, descriptorName, 1);
1284 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1289 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4,
"input");
1290 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4,
"output");
1292 ValidatePointer(m_Weight, descriptorName,
"weight");
1294 const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
1295 ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4,
"weight");
1297 ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
1300 if (m_Parameters.m_BiasEnabled)
1302 ValidatePointer(m_Bias, descriptorName,
"bias");
1304 optionalBiasTensorInfo = MakeOptional<TensorInfo>(m_Bias->GetTensorInfo());
1308 ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
1311 if (m_Parameters.m_StrideX <= 0 || m_Parameters.m_StrideY <= 0 )
1314 fmt::format(
"{}: strideX (provided {}) and strideY (provided {}) " 1315 "cannot be either negative or 0.",
1316 descriptorName, m_Parameters.m_StrideX, m_Parameters.m_StrideY));
1319 ValidatePerAxisQuantization(inputTensorInfo,
1322 optionalBiasTensorInfo,
1325 std::vector<DataType> supportedTypes =
1336 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1344 "for BFloat16 input.");
1349 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1355 const std::string descriptorName{
"DepthwiseConvolution2dQueueDescriptor"};
1357 ValidateNumInputs(workloadInfo, descriptorName, 1);
1358 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1363 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4,
"input");
1364 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4,
"output");
1366 ValidatePointer(m_Weight, descriptorName,
"weight");
1368 const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
1369 ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4,
"weight");
1371 if (m_Parameters.m_DilationX < 1 || m_Parameters.m_DilationY < 1 )
1374 fmt::format(
"{}: dilationX (provided {}) and dilationY (provided {}) " 1375 "cannot be smaller than 1.",
1376 descriptorName, m_Parameters.m_DilationX, m_Parameters.m_DilationX));
1379 if (m_Parameters.m_StrideX <= 0 || m_Parameters.m_StrideY <= 0 )
1382 fmt::format(
"{}: strideX (provided {}) and strideY (provided {}) " 1383 "cannot be either negative or 0.",
1384 descriptorName, m_Parameters.m_StrideX, m_Parameters.m_StrideY));
1387 const unsigned int channelIndex = (m_Parameters.m_DataLayout ==
DataLayout::NCHW) ? 1 : 3;
1391 const unsigned int numWeightChannelMultiplier = weightTensorInfo.
GetShape()[0];
1392 const unsigned int numWeightInputChannels = weightTensorInfo.
GetShape()[1];
1393 const unsigned int numWeightOutputChannels = outputTensorInfo.
GetShape()[channelIndex];
1394 if (numWeightChannelMultiplier * numWeightInputChannels != numWeightOutputChannels)
1397 "{0}: output_channels (provided {1}) should be equal to input_channels (provided {2}) " 1398 "multiplied by channel_multiplier (provided {3}).",
1399 descriptorName, numWeightOutputChannels, numWeightInputChannels, numWeightChannelMultiplier));
1402 ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
1405 if (m_Parameters.m_BiasEnabled)
1407 ValidatePointer(m_Bias, descriptorName,
"bias");
1409 optionalBiasTensorInfo = MakeOptional<TensorInfo>(m_Bias->GetTensorInfo());
1412 ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
1415 ValidatePerAxisQuantization(inputTensorInfo,
1418 optionalBiasTensorInfo,
1421 std::vector<DataType> supportedTypes =
1431 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1432 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1437 const std::string descriptorName{
"PermuteQueueDescriptor"};
1439 ValidateNumInputs(workloadInfo, descriptorName, 1);
1440 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1447 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, mapping.
GetSize(),
"input");
1448 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, mapping.
GetSize(),
"output");
1450 for (
unsigned int i = 0u; i < mapping.
GetSize(); ++i)
1452 if (inputTensorInfo.
GetShape()[i] != outputTensorInfo.
GetShape()[mapping[i]])
1455 " (=" + to_string(inputTensorInfo.
GetShape()[i]) +
") " +
1456 "must match dst dimension " + to_string(mapping[i]) +
1457 " (=" + to_string(outputTensorInfo.
GetShape()[mapping[i]]) +
")");
1461 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1466 const std::string descriptorName{
"Pooling2dQueueDescriptor"};
1468 ValidateNumInputs(workloadInfo, descriptorName, 1);
1469 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1474 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4,
"input");
1475 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4,
"output");
1477 std::vector<DataType> supportedTypes =
1487 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1488 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1493 const std::string descriptorName{
"ResizeBilinearQueueDescriptor"};
1495 ValidateNumInputs(workloadInfo, descriptorName, 1);
1496 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1501 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4,
"input");
1502 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4,
"output");
1504 std::vector<DataType> supportedTypes =
1514 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1515 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1518 const unsigned int inputBatchSize = inputTensorInfo.
GetShape()[0];
1519 const unsigned int outputBatchSize = outputTensorInfo.
GetShape()[0];
1520 if (inputBatchSize != outputBatchSize)
1523 fmt::format(
"{}: Input batch size ({}) does not match output batch size ({})",
1524 descriptorName, inputBatchSize, outputBatchSize));
1530 if (inputChannelCount != outputChannelCount)
1533 fmt::format(
"{}: Input channel count ({}) does not match output channel count ({})",
1534 descriptorName, inputChannelCount, outputChannelCount));
1540 const std::string descriptorName{
"ResizeQueueDescriptor"};
1542 ValidateNumInputs(workloadInfo, descriptorName, 1);
1543 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1548 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4,
"input");
1549 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4,
"output");
1551 std::vector<DataType> supportedTypes =
1561 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1562 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1565 const unsigned int inputBatchSize = inputTensorInfo.
GetShape()[0];
1566 const unsigned int outputBatchSize = outputTensorInfo.
GetShape()[0];
1567 if (inputBatchSize != outputBatchSize)
1570 fmt::format(
"{}: Input batch size ({}) does not match output batch size ({})",
1571 descriptorName, inputBatchSize, outputBatchSize));
1577 if (inputChannelCount != outputChannelCount)
1580 fmt::format(
"{}: Input channel count ({}) does not match output channel count ({})",
1581 descriptorName, inputChannelCount, outputChannelCount));
1587 const std::string descriptorName{
"FakeQuantizationQueueDescriptor"};
1589 ValidateNumInputs(workloadInfo, descriptorName, 1);
1590 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1595 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 2,
"input");
1596 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 2,
"output");
1598 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1600 if (m_Parameters.m_Min > m_Parameters.m_Max)
1608 const std::string descriptorName{
"InstanceNormalizationQueueDescriptor"};
1610 ValidateNumInputs(workloadInfo, descriptorName, 1);
1611 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1621 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1624 std::vector<DataType> supportedTypes =
1631 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1632 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1637 const std::string descriptorName{
"L2NormalizationQueueDescriptor"};
1639 ValidateNumInputs(workloadInfo, descriptorName, 1);
1640 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1650 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1653 std::vector<DataType> supportedTypes =
1663 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1664 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1669 const std::string descriptorName{
"LogSoftmaxQueueDescriptor"};
1671 ValidateNumInputs(workloadInfo, descriptorName, 1);
1672 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1677 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1679 std::vector<DataType> supportedTypes =
1686 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1687 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1692 const std::string descriptorName{
"ConstantQueueDescriptor"};
1694 ValidateNumInputs(workloadInfo, descriptorName, 0);
1695 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1703 ValidateTensorShapesMatch(m_LayerOutput->GetTensorInfo(), outputTensorInfo, descriptorName,
"constant",
"output");
1706 std::vector<DataType> supportedTypes =
1718 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1723 const std::string descriptorName{
"ReshapeQueueDescriptor"};
1725 ValidateNumInputs(workloadInfo, descriptorName, 1);
1726 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1731 ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1734 std::vector<DataType> supportedTypes =
1746 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1747 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1752 const std::string descriptorName{
"SpaceToBatchNdQueueDescriptor"};
1754 ValidateNumInputs(workloadInfo, descriptorName, 1);
1755 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1760 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4,
"input");
1761 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4,
"output");
1763 if (m_Parameters.m_BlockShape.size() != 2)
1768 if (m_Parameters.m_BlockShape.size() != m_Parameters.m_PadList.size())
1771 "dimensions as Block Shape.");
1776 std::pair<unsigned int, unsigned int> heightPad = m_Parameters.m_PadList[0];
1777 std::pair<unsigned int, unsigned int> widthPad = m_Parameters.m_PadList[1];
1781 const unsigned int inputWidth = inputShape[dimensionIndices.
GetWidthIndex()] +
1782 widthPad.first + widthPad.second;
1783 const unsigned int inputHeight = inputShape[dimensionIndices.
GetHeightIndex()] +
1784 heightPad.first + heightPad.second;
1786 const unsigned int numInputElements = inputShape[0] * inputHeight * inputWidth *
1788 const unsigned int numOutputElements = outputTensorInfo.
GetNumElements();
1790 if (numOutputElements != numInputElements)
1793 to_string(numInputElements) +
" after padding but output tensor has " +
1794 to_string(numOutputElements) +
" elements.");
1797 if (inputHeight % m_Parameters.m_BlockShape[0] != 0 || inputWidth % m_Parameters.m_BlockShape[1] != 0)
1800 "divisible by Block Shape in all spatial dimensions");
1803 std::vector<DataType> supportedTypes =
1813 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1814 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1819 const std::string descriptorName{
"SpaceToDepthQueueDescriptor"};
1821 ValidateNumInputs(workloadInfo, descriptorName, 1);
1822 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1827 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4,
"input");
1828 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4,
"output");
1830 std::vector<DataType> supportedTypes =
1840 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1841 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1843 ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1845 if (m_Parameters.m_BlockSize == 0)
1851 const unsigned int wIndex = dimensionIndices.
GetWidthIndex();
1856 if (inputShape[hIndex] % m_Parameters.m_BlockSize != 0 || inputShape[wIndex] % m_Parameters.m_BlockSize != 0)
1859 "by block size in all spatial dimensions");
1863 if (outputShape[cIndex] % (m_Parameters.m_BlockSize * m_Parameters.m_BlockSize) != 0)
1866 "must be divisible by the square of block size." );
1872 const std::string descriptorName{
"FloorQueueDescriptor"};
1874 ValidateNumInputs(workloadInfo, descriptorName, 1);
1875 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1880 std::vector<DataType> supportedTypes =
1888 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1890 if (inputTensorInfo != outputTensorInfo)
1900 const std::string descriptorName{
"LstmQueueDescriptor"};
1912 std::vector<DataType> supportedTypes =
1921 ValidateDataTypes(workloadInfo.
m_InputTensorInfos[0], supportedTypes, descriptorName);
1930 "input_" + std::to_string(i));
1937 "LstmQueueDescriptor",
1939 "output_" + std::to_string(i));
1945 if (m_Parameters.m_ClippingThresCell < 0.0f)
1949 if (m_Parameters.m_ClippingThresProj < 0.0f)
1958 ValidatePointer(m_InputToOutputWeights,
"Null pointer check",
"InputToOutputWeights");
1959 const uint32_t n_cell = m_InputToOutputWeights->GetShape()[0];
1960 ValidatePointer(m_RecurrentToOutputWeights,
"Null pointer check",
"RecurrentToOutputWeights");
1961 const uint32_t n_output = m_RecurrentToOutputWeights->GetShape()[1];
1964 ValidateTensorNumDimNumElem(workloadInfo.
m_InputTensorInfos[0], 2, (n_batch * n_input),
1965 descriptorName +
" input_0");
1967 ValidateTensorNumDimNumElem(workloadInfo.
m_InputTensorInfos[1], 2, (n_batch * n_output),
1968 descriptorName +
" input_1");
1970 ValidateTensorNumDimNumElem(workloadInfo.
m_InputTensorInfos[2], 2, (n_batch * n_cell),
1971 descriptorName +
" input_2");
1973 unsigned int scratchBufferSize = m_Parameters.m_CifgEnabled ? n_cell * 3 : n_cell * 4;
1974 ValidateTensorNumDimNumElem(workloadInfo.
m_OutputTensorInfos[0], 2, (n_batch * scratchBufferSize),
1975 descriptorName +
" output_0");
1978 descriptorName +
" output_1");
1981 descriptorName +
" output_2");
1984 descriptorName +
" output_3");
1988 if ( m_InputToInputWeights )
1990 ValidateTensorNumDimNumElem(m_InputToInputWeights->GetTensorInfo(), 2,
1991 (n_cell * n_input),
"InputLayerNormWeights");
1994 ValidatePointer(m_InputToForgetWeights,
"Null pointer check",
"InputToForgetWeights");
1995 ValidateTensorNumDimNumElem(m_InputToForgetWeights->GetTensorInfo(), 2,
1996 (n_cell * n_input),
"InputToForgetWeights");
1998 ValidatePointer(m_InputToCellWeights,
"Null pointer check",
"InputToCellWeights");
1999 ValidateTensorNumDimNumElem(m_InputToCellWeights->GetTensorInfo(), 2,
2000 (n_cell * n_input),
"InputToCellWeights");
2002 if ( m_RecurrentToInputWeights )
2004 ValidateTensorNumDimNumElem(m_RecurrentToInputWeights->GetTensorInfo(), 2,
2005 (n_cell * n_output),
"RecurrentToInputWeights");
2008 ValidatePointer(m_RecurrentToForgetWeights,
"Null pointer check",
"RecurrentToForgetWeights");
2009 ValidateTensorNumDimNumElem(m_RecurrentToForgetWeights->GetTensorInfo(), 2,
2010 (n_cell * n_output),
"RecurrentToForgetWeights");
2012 ValidatePointer(m_RecurrentToCellWeights,
"Null pointer check",
"RecurrentToCellWeights");
2013 ValidateTensorNumDimNumElem(m_RecurrentToCellWeights->GetTensorInfo(), 2,
2014 (n_cell * n_output),
"RecurrentToCellWeights");
2018 bool cifg_weights_all_or_none = ((m_InputToInputWeights && m_RecurrentToInputWeights &&
2019 !m_Parameters.m_CifgEnabled) ||
2020 (!m_InputToInputWeights && !m_RecurrentToInputWeights &&
2021 m_Parameters.m_CifgEnabled));
2022 if (!cifg_weights_all_or_none)
2025 "RecurrentToInputWeights must either both be present (regular LSTM) " 2026 "or both not present (CIFG-LSTM). In addition CifgEnable must be set " 2030 if ( m_CellToInputWeights )
2032 ValidateTensorNumDimNumElem(m_CellToInputWeights->GetTensorInfo(), 1,
2033 n_cell,
"CellToInputWeights");
2035 if ( m_CellToForgetWeights )
2037 ValidateTensorNumDimNumElem(m_CellToForgetWeights->GetTensorInfo(), 1,
2038 n_cell,
"CellToForgetWeights");
2040 if ( m_CellToOutputWeights )
2042 ValidateTensorNumDimNumElem(m_CellToOutputWeights->GetTensorInfo(), 1,
2043 n_cell,
"CellToOutputWeights");
2047 bool peephole_weights_all_or_none =
2048 (((m_CellToInputWeights || m_Parameters.m_CifgEnabled) && m_CellToForgetWeights
2049 && m_CellToOutputWeights && m_Parameters.m_PeepholeEnabled)
2050 || ( !m_CellToInputWeights && !m_CellToForgetWeights
2051 && !m_CellToOutputWeights && !m_Parameters.m_PeepholeEnabled));
2052 if (!peephole_weights_all_or_none)
2058 if (m_Parameters.m_CifgEnabled)
2060 if (m_InputGateBias)
2067 if (!m_InputGateBias)
2070 "must be present.");
2072 ValidateTensorNumDimNumElem(m_InputGateBias->GetTensorInfo(), 1,
2073 n_cell,
"InputGateBias");
2076 ValidatePointer(m_ForgetGateBias,
"Null pointer check",
"ForgetGateBias");
2077 ValidateTensorNumDimNumElem(m_ForgetGateBias->GetTensorInfo(), 1, n_cell,
"ForgetGateBias");
2079 ValidatePointer(m_CellBias,
"Null pointer check",
"CellBias");
2080 ValidateTensorNumDimNumElem(m_CellBias->GetTensorInfo(), 1, n_cell,
"CellBias");
2082 ValidatePointer(m_OutputGateBias,
"Null pointer check",
"OutputGateBias");
2083 ValidateTensorNumDimNumElem(m_OutputGateBias->GetTensorInfo(), 1, n_cell,
"OutputGateBias");
2085 if (m_ProjectionWeights)
2087 ValidateTensorNumDimNumElem(m_ProjectionWeights->GetTensorInfo(), 2,
2088 (n_cell * n_output),
"ProjectionWeights");
2090 if (m_ProjectionBias)
2092 ValidateTensorNumDimNumElem(m_ProjectionBias->GetTensorInfo(), 1, n_output,
"ProjectionBias");
2099 bool projecton_tensors_consistent = ((!m_ProjectionWeights && !m_ProjectionBias &&
2100 !m_Parameters.m_ProjectionEnabled)
2101 || (m_ProjectionWeights && !m_ProjectionBias &&
2102 m_Parameters.m_ProjectionEnabled)
2103 || (m_ProjectionWeights && m_ProjectionBias &&
2104 m_Parameters.m_ProjectionEnabled));
2105 if (!projecton_tensors_consistent)
2114 if (m_InputLayerNormWeights)
2116 ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(), 1, n_cell,
"InputLayerNormWeights");
2118 if (m_ForgetLayerNormWeights)
2120 ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell,
"ForgetLayerNormWeights");
2122 if (m_CellLayerNormWeights)
2124 ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell,
"CellLayerNormWeights");
2126 if (m_OutputLayerNormWeights)
2128 ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell,
"OutputLayerNormWeights");
2131 if (m_Parameters.m_LayerNormEnabled)
2133 if (!m_Parameters.m_CifgEnabled)
2135 if (!m_InputLayerNormWeights)
2138 "disabled but InputLayerNormWeights are not present");
2140 ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(),
2141 1, n_cell,
"InputLayerNormWeights");
2143 else if (m_InputLayerNormWeights)
2149 ValidatePointer(m_ForgetLayerNormWeights,
"Null pointer check layer normalisation enabled",
2150 "ForgetLayerNormWeights");
2151 ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell,
"ForgetLayerNormWeights");
2153 ValidatePointer(m_OutputLayerNormWeights,
"Null pointer check layer normalisation enabled",
2154 "OutputLayerNormWeights");
2155 ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell,
"OutputLayerNormWeights");
2157 ValidatePointer(m_CellLayerNormWeights,
"Null pointer check layer normalisation enabled",
2158 "CellLayerNormWeights");
2159 ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell,
"CellLayerNormWeights");
2161 else if (m_InputLayerNormWeights || m_ForgetLayerNormWeights || m_OutputLayerNormWeights || m_CellLayerNormWeights)
2164 "normalisation weights are present.");
2170 const std::string descriptorName{
"ConvertBf16ToFp32QueueDescriptor"};
2172 ValidateNumInputs(workloadInfo, descriptorName, 1);
2173 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2188 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2193 const std::string descriptorName{
"ConvertFp32ToBf16QueueDescriptor"};
2195 ValidateNumInputs(workloadInfo, descriptorName, 1);
2196 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2211 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2216 const std::string descriptorName{
"ConvertFp32ToFp16QueueDescriptor"};
2218 ValidateNumInputs(workloadInfo, descriptorName, 1);
2219 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2234 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2239 const std::string descriptorName{
"ConvertFp16ToFp32QueueDescriptor"};
2241 ValidateNumInputs(workloadInfo, descriptorName, 1);
2242 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2257 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2262 const std::string descriptorName{
"DivisionQueueDescriptor"};
2264 ValidateNumInputs(workloadInfo, descriptorName, 2);
2265 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2271 std::vector<DataType> supportedTypes =
2282 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2283 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2284 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2286 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2296 const std::string descriptorName{
"SubtractionQueueDescriptor"};
2298 ValidateNumInputs(workloadInfo, descriptorName, 2);
2299 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2305 std::vector<DataType> supportedTypes =
2316 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2317 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2318 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2320 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2330 const std::string descriptorName{
"MaximumQueueDescriptor"};
2332 ValidateNumInputs(workloadInfo, descriptorName, 2);
2333 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2339 std::vector<DataType> supportedTypes =
2350 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2351 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2352 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2354 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2364 const std::string descriptorName{
"MeanQueueDescriptor"};
2366 ValidateNumInputs(workloadInfo, descriptorName, 1);
2367 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2372 std::vector<DataType> supportedTypes =
2384 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2385 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2387 if (m_Parameters.m_KeepDims)
2389 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, inputTensorInfo.
GetNumDimensions(),
"output");
2391 else if (m_Parameters.m_Axis.empty())
2393 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 1,
"output");
2397 unsigned int outputDim =
2399 ValidateTensorNumDimensions(outputTensorInfo,
2401 outputDim > 0 ? outputDim : 1,
2408 const std::string descriptorName{
"PadQueueDescriptor"};
2410 ValidateNumInputs(workloadInfo, descriptorName, 1);
2411 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2417 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, inputTensorInfo.
GetNumDimensions(),
"output");
2422 "as there are dimensions in the input tensor that is " +
2424 " not " + std::to_string(m_Parameters.m_PadList.size()) +
" entries.");
2430 const std::string descriptorName{
"QuantizeQueueDescriptor"};
2432 ValidateNumInputs(workloadInfo, descriptorName, 1);
2433 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2438 std::vector<DataType> supportedTypes =
2449 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2459 const std::string descriptorName{
"BatchToSpaceNdQueueDescriptor"};
2461 ValidateNumInputs(workloadInfo, descriptorName, 1);
2462 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2467 std::vector<DataType> supportedTypes =
2477 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2478 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2483 const std::string descriptorName{
"StridedSliceQueueDescriptor"};
2485 ValidateNumInputs(workloadInfo, descriptorName, 1);
2486 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2491 std::vector<DataType> supportedTypes =
2501 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2502 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2504 ValidateTensorQuantizationSpace(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2513 if (m_Parameters.m_Begin.size() != rank)
2518 if (m_Parameters.m_End.size() != rank)
2523 if (m_Parameters.m_Stride.size() != rank)
2529 for (
auto& stride : m_Parameters.m_Stride)
2540 const std::string descriptorName{
"MinimumQueueDescriptor"};
2542 ValidateNumInputs(workloadInfo, descriptorName, 2);
2543 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2549 std::vector<DataType> supportedTypes =
2560 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2561 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2562 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2564 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2574 const std::string descriptorName{
"DebugQueueDescriptor"};
2576 ValidateNumInputs(workloadInfo, descriptorName, 1);
2577 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2582 const std::string descriptorName{
"EqualQueueDescriptor"};
2584 ValidateNumInputs(workloadInfo, descriptorName, 2);
2585 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2591 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2606 const std::string descriptorName{
"GreaterQueueDescriptor"};
2608 ValidateNumInputs(workloadInfo, descriptorName, 2);
2609 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2615 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2630 const std::string descriptorName{
"RsqrtQueueDescriptor"};
2632 ValidateNumInputs(workloadInfo, descriptorName, 1);
2633 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2638 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2640 std::vector<DataType> supportedTypes =
2650 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2651 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2656 const std::string descriptorName{
"GatherQueueDescriptor"};
2658 ValidateNumInputs(workloadInfo, descriptorName, 2);
2659 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2670 std::vector<DataType> supportedTypes =
2681 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2683 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2686 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, outputDim,
"output");
2691 const std::string& descriptorName{
"DetectionPostProcessQueueDescriptor"};
2693 ValidateNumInputs(workloadInfo, descriptorName, 2);
2701 if (m_Anchors ==
nullptr)
2715 ValidateTensorNumDimensions(boxEncodingsInfo, descriptorName, 3,
"box encodings");
2716 ValidateTensorNumDimensions(scoresInfo, descriptorName, 3,
"scores");
2717 ValidateTensorNumDimensions(anchorsInfo, descriptorName, 2,
"anchors");
2719 const std::vector<DataType> supportedInputTypes =
2729 ValidateDataTypes(boxEncodingsInfo, supportedInputTypes, descriptorName);
2730 ValidateDataTypes(scoresInfo, supportedInputTypes, descriptorName);
2731 ValidateDataTypes(anchorsInfo, supportedInputTypes, descriptorName);
2733 ValidateTensorNumDimensions(detectionBoxesInfo, descriptorName, 3,
"detection boxes");
2734 ValidateTensorNumDimensions(detectionScoresInfo, descriptorName, 2,
"detection scores");
2735 ValidateTensorNumDimensions(detectionClassesInfo, descriptorName, 2,
"detection classes");
2736 ValidateTensorNumDimensions(numDetectionsInfo, descriptorName, 1,
"num detections");
2739 ValidateTensorDataType(detectionBoxesInfo,
DataType::Float32, descriptorName,
"detection boxes");
2740 ValidateTensorDataType(detectionScoresInfo,
DataType::Float32, descriptorName,
"detection scores");
2741 ValidateTensorDataType(detectionClassesInfo,
DataType::Float32, descriptorName,
"detection classes");
2742 ValidateTensorDataType(numDetectionsInfo,
DataType::Float32, descriptorName,
"num detections");
2744 if (m_Parameters.m_NmsIouThreshold <= 0.0f || m_Parameters.m_NmsIouThreshold > 1.0f)
2747 "must be positive and less than or equal to 1.");
2750 if (scoresInfo.
GetShape()[2] != m_Parameters.m_NumClasses + 1)
2753 "should be equal to number of classes + 1.");
2759 const std::string& descriptorName{
"DequantizeQueueDescriptor"};
2761 ValidateNumInputs(workloadInfo, descriptorName, 1);
2762 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2772 std::vector<DataType> supportedTypes =
2779 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2784 const std::string& descriptorName{
"MergeQueueDescriptor"};
2786 ValidateNumInputs(workloadInfo, descriptorName, 2);
2787 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2793 ValidateTensorShapesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName,
"input_0",
"input_1");
2794 ValidateTensorShapesMatch(inputTensorInfo0, outputTensorInfo, descriptorName,
"input_0",
"output");
2796 ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName,
"input_0",
"input_1");
2797 ValidateTensorDataTypesMatch(inputTensorInfo0, outputTensorInfo, descriptorName,
"input_0",
"output");
2802 const std::string& descriptorName{
"SwitchQueueDescriptor"};
2804 ValidateNumInputs(workloadInfo, descriptorName, 2);
2805 ValidateNumOutputs(workloadInfo, descriptorName, 2);
2813 std::vector<DataType> supportedTypes =
2822 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2823 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2825 ValidateDataTypes(outputTensorInfo0, supportedTypes, descriptorName);
2826 ValidateDataTypes(outputTensorInfo1, supportedTypes, descriptorName);
2828 ValidateTensorShapesMatch(inputTensorInfo0,
2834 ValidateTensorShapesMatch(inputTensorInfo0,
2848 const std::string& descriptorName{
"PreluQueueDescriptor"};
2850 ValidateNumInputs(workloadInfo, descriptorName, 2);
2851 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2857 std::vector<DataType> supportedTypes
2867 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2868 ValidateDataTypes(alphaTensorInfo, supportedTypes, descriptorName);
2870 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2872 ValidateTensorDataTypesMatch(inputTensorInfo, alphaTensorInfo, descriptorName,
"input",
"alpha");
2873 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"ouptut");
2875 ValidateBroadcastTensorShapesMatch(inputTensorInfo,
2885 const std::string descriptorName{
"TransposeConvolution2dQueueDescriptor"};
2887 ValidateNumInputs(workloadInfo, descriptorName, 1);
2888 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2893 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4,
"input");
2894 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4,
"output");
2896 ValidatePointer(m_Weight, descriptorName,
"weight");
2898 const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
2899 ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4,
"weight");
2901 ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
2904 if (m_Parameters.m_BiasEnabled)
2906 ValidatePointer(m_Bias, descriptorName,
"bias");
2908 optionalBiasTensorInfo = MakeOptional<TensorInfo>(m_Bias->GetTensorInfo());
2912 ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
2915 ValidatePerAxisQuantization(inputTensorInfo,
2918 optionalBiasTensorInfo,
2921 std::vector<DataType> supportedTypes =
2931 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2932 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2937 const std::string descriptorName{
"TransposeQueueDescriptor"};
2939 ValidateNumInputs(workloadInfo, descriptorName, 1);
2940 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2947 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, mapping.
GetSize(),
"input");
2948 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, mapping.
GetSize(),
"output");
2950 for (
unsigned int i = 0u; i < mapping.
GetSize(); ++i)
2952 if (inputTensorInfo.
GetShape()[mapping[i]] != outputTensorInfo.
GetShape()[i])
2955 " (=" + to_string(inputTensorInfo.
GetShape()[mapping[i]]) +
") " +
2956 "must match dst dimension " + to_string(i) +
2957 " (=" + to_string(outputTensorInfo.
GetShape()[i]) +
")");
2961 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2966 const std::string descriptorName{
"QLstmQueueDescriptor"};
2969 ValidateNumInputs(workloadInfo, descriptorName, 3);
2970 ValidateNumOutputs(workloadInfo, descriptorName, 3);
2982 std::vector<DataType> inputOutputSupportedTypes =
2987 std::vector<DataType> cellStateSupportedTypes =
2992 std::vector<DataType> weightsSupportedTypes =
2997 std::vector<DataType> layerNormPeepholeWeightsSupportedTypes =
3002 std::vector<DataType> biasSupportedTypes =
3008 ValidateDataTypes(inputInfo, inputOutputSupportedTypes, descriptorName);
3009 ValidateDataTypes(outputStateInInfo, inputOutputSupportedTypes, descriptorName);
3010 ValidateDataTypes(cellStateInInfo, cellStateSupportedTypes, descriptorName);
3012 ValidateDataTypes(outputStateOutInfo, inputOutputSupportedTypes, descriptorName);
3013 ValidateDataTypes(cellStateOutInfo, cellStateSupportedTypes, descriptorName);
3014 ValidateDataTypes(outputInfo, inputOutputSupportedTypes, descriptorName);
3017 ValidateTensorDataTypesMatch(inputInfo, outputStateInInfo, descriptorName,
"input",
"outputStateIn");
3018 ValidateTensorDataTypesMatch(outputStateInInfo, outputStateOutInfo, descriptorName,
3019 "outputStateIn",
"outputStateOut");
3020 ValidateTensorDataTypesMatch(cellStateInInfo, cellStateOutInfo, descriptorName,
"cellStateIn",
"cellStateOut");
3023 const uint32_t numBatches = inputInfo.GetShape()[0];
3024 const uint32_t inputSize = inputInfo.GetShape()[1];
3025 const uint32_t outputSize = outputStateInInfo.GetShape()[1];
3026 const uint32_t numUnits = cellStateInInfo.GetShape()[1];
3029 ValidateTensorNumDimNumElem(inputInfo, 2, (numBatches * inputSize), descriptorName +
" input");
3030 ValidateTensorNumDimNumElem(outputStateInInfo, 2, (numBatches * outputSize), descriptorName +
" outputStateIn");
3031 ValidateTensorNumDimNumElem(cellStateInInfo, 2, (numBatches * numUnits), descriptorName +
" cellStateIn");
3033 ValidateTensorNumDimNumElem(outputStateOutInfo, 2, (numBatches * outputSize), descriptorName +
" outputStateOut");
3034 ValidateTensorNumDimNumElem(cellStateOutInfo, 2, (numBatches * numUnits), descriptorName +
" cellStateOut");
3035 ValidateTensorNumDimNumElem(outputInfo, 2, (numBatches * outputSize), descriptorName +
" output");
3038 ValidatePointer(m_InputToForgetWeights, descriptorName,
"InputToForgetWeights");
3039 auto inputToForgetWeightsInfo = m_InputToForgetWeights->GetTensorInfo();
3040 ValidateTensorNumDimNumElem(inputToForgetWeightsInfo, 2, (numUnits * inputSize),
" InputToForgetWeights");
3042 ValidatePointer(m_InputToCellWeights, descriptorName,
"InputToCellWeights");
3043 auto inputToCellWeightsInfo = m_InputToCellWeights->GetTensorInfo();
3044 ValidateTensorNumDimNumElem(inputToCellWeightsInfo, 2, (numUnits * inputSize),
" InputToCellWeights");
3046 ValidatePointer(m_InputToOutputWeights, descriptorName,
"InputToOutputWeights");
3047 auto inputToOutputWeightsInfo = m_InputToOutputWeights->GetTensorInfo();
3048 ValidateTensorNumDimNumElem(inputToOutputWeightsInfo, 2, (numUnits * inputSize),
" InputToOutputWeights");
3050 ValidatePointer(m_RecurrentToForgetWeights, descriptorName,
"RecurrentToForgetWeights");
3051 auto recurrentToForgetWeightsInfo = m_RecurrentToForgetWeights->GetTensorInfo();
3052 ValidateTensorNumDimNumElem(recurrentToForgetWeightsInfo, 2, (numUnits * outputSize),
3053 " RecurrentToForgetWeights");
3055 ValidatePointer(m_RecurrentToCellWeights, descriptorName,
"RecurrentToCellWeights");
3056 auto recurrentToCellWeightsInfo = m_RecurrentToCellWeights->GetTensorInfo();
3057 ValidateTensorNumDimNumElem(recurrentToCellWeightsInfo, 2, (numUnits * outputSize),
" RecurrentToCellWeights");
3059 ValidatePointer(m_RecurrentToOutputWeights, descriptorName,
"RecurrentToOutputWeights");
3060 auto recurrentToOutputWeightsInfo = m_RecurrentToOutputWeights->GetTensorInfo();
3061 ValidateTensorNumDimNumElem(recurrentToOutputWeightsInfo, 2, (numUnits * outputSize),
" RecurrentToCellWeights");
3064 ValidateDataTypes(inputToForgetWeightsInfo, weightsSupportedTypes, descriptorName);
3066 ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, inputToCellWeightsInfo, descriptorName,
3067 "inputToForgetWeights",
"inputToCellWeights");
3068 ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, inputToOutputWeightsInfo, descriptorName,
3069 "inputToForgetWeights",
"inputToOutputWeights");
3071 ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, recurrentToForgetWeightsInfo, descriptorName,
3072 "inputToForgetWeights",
"recurrentToForgeteights");
3073 ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, recurrentToCellWeightsInfo, descriptorName,
3074 "inputToForgetWeights",
"recurrentToCellWeights");
3075 ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, recurrentToOutputWeightsInfo, descriptorName,
3076 "inputToForgetWeights",
"recurrentToOutputWeights");
3079 ValidatePointer(m_ForgetGateBias, descriptorName,
"ForgetGateBias");
3080 auto forgetGateBiasInfo = m_ForgetGateBias->GetTensorInfo();
3081 ValidateTensorNumDimNumElem(forgetGateBiasInfo, 1, numUnits,
" ForgetGateBias");
3083 ValidatePointer(m_CellBias, descriptorName,
"CellBias");
3084 auto cellBiasInfo = m_CellBias->GetTensorInfo();
3085 ValidateTensorNumDimNumElem(cellBiasInfo, 1, numUnits,
" CellBias");
3087 ValidatePointer(m_OutputGateBias, descriptorName,
"OutputGateBias");
3088 auto outputGateBiasInfo = m_OutputGateBias->GetTensorInfo();
3089 ValidateTensorNumDimNumElem(outputGateBiasInfo, 1, numUnits,
" OutputGateBias");
3092 ValidateDataTypes(forgetGateBiasInfo, biasSupportedTypes, descriptorName);
3094 ValidateTensorDataTypesMatch(forgetGateBiasInfo, cellBiasInfo, descriptorName,
3095 "forgetGateBias",
"cellBias");
3096 ValidateTensorDataTypesMatch(forgetGateBiasInfo, outputGateBiasInfo, descriptorName,
3097 "forgetGateBias",
"outputGateBias");
3100 const bool allCifgParamsPresentOrNot = ((m_InputToInputWeights && m_RecurrentToInputWeights && m_InputGateBias &&
3101 !m_Parameters.m_CifgEnabled) ||
3102 (!m_InputToInputWeights && !m_RecurrentToInputWeights &&
3103 !m_InputGateBias && m_Parameters.m_CifgEnabled));
3105 if (!allCifgParamsPresentOrNot)
3108 ": InputToInputWeights, RecurrentToInputWeights and InputGateBias must either all be present " 3109 "(CIFG disabled) or not be present at all (CIFG enabled). m_Parameters.m_CifgEnabled should be " 3110 "set appropriately.");
3113 if (!m_Parameters.m_CifgEnabled)
3116 auto inputToInputWeightsInfo = m_InputToInputWeights->GetTensorInfo();
3117 ValidateTensorNumDimNumElem(inputToInputWeightsInfo, 2, (numUnits * inputSize),
" InputToInputWeights");
3119 auto recurrentToInputWeightsInfo = m_RecurrentToInputWeights->GetTensorInfo();
3120 ValidateTensorNumDimNumElem(recurrentToInputWeightsInfo, 2, (numUnits * outputSize),
3121 " RecurrentToInputWeights");
3123 auto inputGateBiasInfo = m_InputGateBias->GetTensorInfo();
3124 ValidateTensorNumDimNumElem(inputGateBiasInfo, 1, numUnits,
" InputGateBias");
3127 ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, inputToInputWeightsInfo, descriptorName,
3128 "inputToForgetWeights",
"inputToInputWeights");
3129 ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, recurrentToInputWeightsInfo, descriptorName,
3130 "inputToForgetWeights",
"recurrentToInputWeights");
3131 ValidateTensorDataTypesMatch(forgetGateBiasInfo, inputGateBiasInfo, descriptorName,
3132 "forgetGateBias",
"inputGateBias");
3136 bool allPeepholeWeightsPresentOrNot =
3137 (((m_CellToInputWeights || m_Parameters.m_CifgEnabled) && m_CellToForgetWeights
3138 && m_CellToOutputWeights && m_Parameters.m_PeepholeEnabled)
3139 || (!m_CellToInputWeights && !m_CellToForgetWeights
3140 && !m_CellToOutputWeights && !m_Parameters.m_PeepholeEnabled));
3142 if (!allPeepholeWeightsPresentOrNot)
3145 ": CellToInputWeights, CellToForgetWeights and CellToOutputWeights should all be present (Peephole " 3146 "enabled) or not be present at all (Peephole disabled). CellToInputWeights should only be present " 3147 "when Peephole is enabled and CIFG is disabled. m_Parameters.m_PeepholeEnabled should be set " 3151 if (m_Parameters.m_PeepholeEnabled)
3153 auto cellToForgetWeightsInfo = m_CellToForgetWeights->GetTensorInfo();
3154 ValidateTensorNumDimNumElem(cellToForgetWeightsInfo, 1, numUnits,
" cellToForgetWeights");
3155 ValidateDataTypes(cellToForgetWeightsInfo, layerNormPeepholeWeightsSupportedTypes, descriptorName);
3157 auto cellToOutputWeightsInfo = m_CellToOutputWeights->GetTensorInfo();
3158 ValidateTensorNumDimNumElem(cellToOutputWeightsInfo, 1, numUnits,
" cellToOutputWeights");
3159 ValidateTensorDataTypesMatch(cellToForgetWeightsInfo, cellToOutputWeightsInfo, descriptorName,
3160 "cellToForgetWeight",
"cellToOutputWeights");
3162 if (!m_Parameters.m_CifgEnabled)
3164 auto cellToInputWeightsInfo = m_CellToInputWeights->GetTensorInfo();
3165 ValidateTensorNumDimNumElem(cellToInputWeightsInfo, 1, numUnits,
" cellToInputWeights");
3166 ValidateTensorDataTypesMatch(cellToForgetWeightsInfo, cellToInputWeightsInfo, descriptorName,
3167 "cellToForgetWeights",
"cellToInputWeights");
3172 bool allLayerNormWeightsPresentOrNot =
3173 (((m_InputLayerNormWeights || m_Parameters.m_CifgEnabled) && m_ForgetLayerNormWeights
3174 && m_CellLayerNormWeights && m_OutputLayerNormWeights && m_Parameters.m_LayerNormEnabled)
3175 || (!m_InputLayerNormWeights && !m_ForgetLayerNormWeights && !m_CellLayerNormWeights
3176 && !m_OutputLayerNormWeights && !m_Parameters.m_LayerNormEnabled));
3178 if (!allLayerNormWeightsPresentOrNot)
3181 ": InputLayerNormWeights, ForgetLayerNormWeights, m_OutputLayerNormWeights " 3182 "and CellLayerNormWeights should all be present (Layer Norm enabled) or not " 3183 "be present at all (Layer Norm disabled). InputLayerNormWeights should " 3184 "only be present when Layer Norm is enabled and CIFG is disabled. " 3185 "m_Parameters.m_LayerNormEnabled should be set appropriately.");
3188 if (m_Parameters.m_LayerNormEnabled)
3190 auto forgetLayerNormWeightsInfo = m_ForgetLayerNormWeights->GetTensorInfo();
3191 ValidateTensorNumDimNumElem(forgetLayerNormWeightsInfo, 1, numUnits,
" forgetLayerNormWeights");
3192 ValidateDataTypes(forgetLayerNormWeightsInfo, layerNormPeepholeWeightsSupportedTypes, descriptorName);
3194 auto cellLayerNormWeightsInfo = m_CellLayerNormWeights->GetTensorInfo();
3195 ValidateTensorNumDimNumElem(cellLayerNormWeightsInfo, 1, numUnits,
" cellLayerNormWeights");
3196 ValidateTensorDataTypesMatch(forgetLayerNormWeightsInfo, cellLayerNormWeightsInfo, descriptorName,
3197 "forgetLayerNormWeights",
"cellLayerNormWeights");
3199 auto outputLayerNormWeightsInfo = m_OutputLayerNormWeights->GetTensorInfo();
3200 ValidateTensorNumDimNumElem(outputLayerNormWeightsInfo, 1, numUnits,
" outputLayerNormWeights");
3201 ValidateTensorDataTypesMatch(forgetLayerNormWeightsInfo, outputLayerNormWeightsInfo, descriptorName,
3202 "forgetLayerNormWeights",
"outputLayerNormWeights");
3204 if (!m_Parameters.m_CifgEnabled)
3206 auto inputLayerNormWeightsInfo = m_InputLayerNormWeights->GetTensorInfo();
3207 ValidateTensorNumDimNumElem(inputLayerNormWeightsInfo, 1, numUnits,
" inputLayerNormWeights");
3208 ValidateTensorDataTypesMatch(forgetLayerNormWeightsInfo, inputLayerNormWeightsInfo, descriptorName,
3209 "forgetLayerNormWeights",
"inputLayerNormWeights");
3214 bool correctProjectionTensorsPresent =
3215 ((!m_ProjectionWeights && !m_ProjectionBias && !m_Parameters.m_ProjectionEnabled) ||
3216 (m_ProjectionWeights && !m_ProjectionBias && m_Parameters.m_ProjectionEnabled) ||
3217 (m_ProjectionWeights && m_ProjectionBias && m_Parameters.m_ProjectionEnabled));
3219 if (!correctProjectionTensorsPresent)
3222 ": If projection is enabled, ProjectionWeights should be present and " 3223 "ProjectionBias is optional. If projection is disabled, neither " 3224 "ProjectionWeights nor ProjectionBias should be present.");
3227 if (m_Parameters.m_ProjectionEnabled)
3229 auto projectionWeightsInfo = m_ProjectionWeights->GetTensorInfo();
3230 ValidateTensorNumDimNumElem(projectionWeightsInfo, 2, (numUnits * outputSize),
"ProjectionWeights");
3231 ValidateDataTypes(projectionWeightsInfo, weightsSupportedTypes, descriptorName);
3233 if (m_ProjectionBias)
3235 auto projectionBiasInfo = m_ProjectionBias->GetTensorInfo();
3236 ValidateTensorNumDimNumElem(projectionBiasInfo, 1, outputSize,
"ProjectionBias");
3237 ValidateDataTypes(projectionBiasInfo, biasSupportedTypes, descriptorName);
3241 else if ((outputInfo.GetQuantizationScale() != m_Parameters.m_HiddenStateScale) &&
3242 outputInfo.GetQuantizationOffset() != m_Parameters.m_HiddenStateZeroPoint) {
3244 ": If projection is disabled, output quantization info (scale, offset) " 3245 "should match HiddenStateScale and HiddenStateZeroPoint.");
3252 const std::string descriptorName{
"QuantizedLstmQueueDescriptor"};
3255 ValidateNumInputs(workloadInfo, descriptorName, 3);
3256 ValidateNumOutputs(workloadInfo, descriptorName, 2);
3266 std::vector<DataType> inputOutputSupportedTypes =
3271 std::vector<DataType> cellStateSupportedTypes =
3276 std::vector<DataType> weightsSupportedTypes =
3281 std::vector<DataType> biasSupportedTypes =
3287 ValidateDataTypes(inputInfo, inputOutputSupportedTypes, descriptorName);
3288 ValidateDataTypes(cellStateInInfo, cellStateSupportedTypes, descriptorName);
3289 ValidateDataTypes(outputStateInInfo, inputOutputSupportedTypes, descriptorName);
3291 ValidateDataTypes(cellStateOutInfo, cellStateSupportedTypes, descriptorName);
3292 ValidateDataTypes(outputStateOutInfo, inputOutputSupportedTypes, descriptorName);
3295 ValidateTensorDataTypesMatch(inputInfo, outputStateInInfo, descriptorName,
"input",
"outputStateIn");
3296 ValidateTensorDataTypesMatch(outputStateInInfo, outputStateOutInfo, descriptorName,
3297 "outputStateIn",
"outputStateOut");
3298 ValidateTensorDataTypesMatch(cellStateInInfo, cellStateOutInfo, descriptorName,
"cellStateIn",
"cellStateOut");
3301 ValidateTensorQuantizationSpace(inputInfo, outputStateInInfo, descriptorName,
"input",
"outputStateIn");
3302 ValidateTensorQuantizationSpace(inputInfo, outputStateOutInfo, descriptorName,
"input",
"outputStateOut");
3303 ValidateTensorQuantizationSpace(cellStateInInfo, cellStateOutInfo, descriptorName,
"cellStateIn",
"cellStateOut");
3306 const uint32_t numBatches = inputInfo.GetShape()[0];
3307 const uint32_t inputSize = inputInfo.GetShape()[1];
3308 const uint32_t outputSize = cellStateInInfo.GetShape()[1];
3311 ValidateTensorNumDimNumElem(inputInfo, 2, (numBatches * inputSize), descriptorName +
" input");
3312 ValidateTensorNumDimNumElem(cellStateInInfo, 2, (numBatches * outputSize), descriptorName +
" cellStateIn");
3313 ValidateTensorNumDimNumElem(outputStateInInfo, 2, (numBatches * outputSize), descriptorName +
" outputStateIn");
3314 ValidateTensorNumDimNumElem(cellStateOutInfo, 2, (numBatches * outputSize), descriptorName +
" cellStateOut");
3315 ValidateTensorNumDimNumElem(outputStateOutInfo, 2, (numBatches * outputSize), descriptorName +
" outputStateOut");
3318 ValidatePointer(m_InputToInputWeights, descriptorName,
"InputToInputWeights");
3319 auto inputToInputWeightsInfo = m_InputToInputWeights->GetTensorInfo();
3320 ValidateTensorNumDimNumElem(inputToInputWeightsInfo, 2, (outputSize * inputSize),
" InputToInputWeights");
3322 ValidatePointer(m_InputToForgetWeights, descriptorName,
"InputToForgetWeights");
3323 auto inputToForgetWeightsInfo = m_InputToForgetWeights->GetTensorInfo();
3324 ValidateTensorNumDimNumElem(inputToForgetWeightsInfo, 2, (outputSize * inputSize),
" InputToForgetWeights");
3326 ValidatePointer(m_InputToCellWeights, descriptorName,
"InputToCellWeights");
3327 auto inputToCellWeightsInfo = m_InputToCellWeights->GetTensorInfo();
3328 ValidateTensorNumDimNumElem(inputToCellWeightsInfo, 2, (outputSize * inputSize),
" InputToCellWeights");
3330 ValidatePointer(m_InputToOutputWeights, descriptorName,
"InputToOutputWeights");
3331 auto inputToOutputWeightsInfo = m_InputToOutputWeights->GetTensorInfo();
3332 ValidateTensorNumDimNumElem(inputToOutputWeightsInfo, 2, (outputSize * inputSize),
" InputToOutputWeights");
3334 ValidatePointer(m_RecurrentToInputWeights, descriptorName,
"RecurrentToInputWeights");
3335 auto recurrentToInputWeightsInfo = m_RecurrentToInputWeights->GetTensorInfo();
3336 ValidateTensorNumDimNumElem(recurrentToInputWeightsInfo, 2, (outputSize * outputSize),
" RecurrentToInputWeights");
3338 ValidatePointer(m_RecurrentToForgetWeights, descriptorName,
"RecurrentToForgetWeights");
3339 auto recurrentToForgetWeightsInfo = m_RecurrentToForgetWeights->GetTensorInfo();
3340 ValidateTensorNumDimNumElem(recurrentToForgetWeightsInfo, 2, (outputSize * outputSize),
3341 " RecurrentToForgetWeights");
3343 ValidatePointer(m_RecurrentToCellWeights, descriptorName,
"RecurrentToCellWeights");
3344 auto recurrentToCellWeightsInfo = m_RecurrentToCellWeights->GetTensorInfo();
3345 ValidateTensorNumDimNumElem(recurrentToCellWeightsInfo, 2, (outputSize * outputSize),
" RecurrentToCellWeights");
3347 ValidatePointer(m_RecurrentToOutputWeights, descriptorName,
"RecurrentToOutputWeights");
3348 auto recurrentToOutputWeightsInfo = m_RecurrentToOutputWeights->GetTensorInfo();
3349 ValidateTensorNumDimNumElem(recurrentToOutputWeightsInfo, 2, (outputSize * outputSize),
" RecurrentToCellWeights");
3352 ValidateDataTypes(inputToInputWeightsInfo, weightsSupportedTypes, descriptorName);
3354 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToForgetWeightsInfo, descriptorName,
3355 "inputToInputWeights",
"inputToForgetWeights");
3356 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToCellWeightsInfo, descriptorName,
3357 "inputToInputWeights",
"inputToCellWeights");
3358 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToOutputWeightsInfo, descriptorName,
3359 "inputToInputWeights",
"inputToOutputWeights");
3361 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToInputWeightsInfo, descriptorName,
3362 "inputToInputWeights",
"recurrentToInputWeights");
3363 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToForgetWeightsInfo, descriptorName,
3364 "inputToInputWeights",
"recurrentToForgeteights");
3365 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToCellWeightsInfo, descriptorName,
3366 "inputToInputWeights",
"recurrentToCellWeights");
3367 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToOutputWeightsInfo, descriptorName,
3368 "inputToInputWeights",
"recurrentToOutputWeights");
3371 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToForgetWeightsInfo,
3372 descriptorName,
"inputToInputWeights",
"inputToForgetWeights");
3373 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToCellWeightsInfo,
3374 descriptorName,
"inputToInputWeights",
"inputToCellWeights");
3375 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToOutputWeightsInfo,
3376 descriptorName,
"inputToInputWeights",
"inputToOutputWeights");
3378 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToInputWeightsInfo,
3379 descriptorName,
"inputToInputWeights",
"recurrentToInputWeights");
3380 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToForgetWeightsInfo,
3381 descriptorName,
"inputToInputWeights",
"recurrentToForgetWeights");
3382 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToCellWeightsInfo,
3383 descriptorName,
"inputToInputWeights",
"recurrentToCellWeights");
3384 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToOutputWeightsInfo,
3385 descriptorName,
"inputToInputWeights",
"recurrentToOutputWeights");
3388 ValidatePointer(m_InputGateBias, descriptorName,
"InputGateBias");
3389 auto inputGateBiasInfo = m_InputGateBias->GetTensorInfo();
3390 ValidateTensorNumDimNumElem(inputGateBiasInfo, 1, outputSize,
" InputGateBias");
3392 ValidatePointer(m_ForgetGateBias, descriptorName,
"ForgetGateBias");
3393 auto forgetGateBiasInfo = m_ForgetGateBias->GetTensorInfo();
3394 ValidateTensorNumDimNumElem(forgetGateBiasInfo, 1, outputSize,
" ForgetGateBias");
3396 ValidatePointer(m_CellBias, descriptorName,
"CellBias");
3397 auto cellBiasInfo = m_CellBias->GetTensorInfo();
3398 ValidateTensorNumDimNumElem(cellBiasInfo, 1, outputSize,
" CellBias");
3400 ValidatePointer(m_OutputGateBias, descriptorName,
"OutputGateBias");
3401 auto outputGateBiasInfo = m_OutputGateBias->GetTensorInfo();
3402 ValidateTensorNumDimNumElem(outputGateBiasInfo, 1, outputSize,
" OutputGateBias");
3405 ValidateDataTypes(inputGateBiasInfo, biasSupportedTypes, descriptorName);
3407 ValidateTensorDataTypesMatch(inputGateBiasInfo, forgetGateBiasInfo, descriptorName,
3408 "inputGateBias",
"forgetGateBias");
3409 ValidateTensorDataTypesMatch(inputGateBiasInfo, cellBiasInfo, descriptorName,
3410 "inputGateBias",
"cellBias");
3411 ValidateTensorDataTypesMatch(inputGateBiasInfo, outputGateBiasInfo, descriptorName,
3412 "inputGateBias",
"outputGateBias");
3415 ValidateBiasTensorQuantization(inputGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
3416 ValidateBiasTensorQuantization(forgetGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
3417 ValidateBiasTensorQuantization(cellBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
3418 ValidateBiasTensorQuantization(outputGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
3423 const std::string descriptorName{
"AbsQueueDescriptor"};
3425 ValidateNumInputs(workloadInfo, descriptorName, 1);
3426 ValidateNumOutputs(workloadInfo, descriptorName, 1);
3431 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
3433 std::vector<DataType> supportedTypes =
3444 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
3445 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
3450 const std::string descriptorName{
"SliceQueueDescriptor"};
3452 ValidateNumInputs(workloadInfo, descriptorName, 1);
3453 ValidateNumOutputs(workloadInfo, descriptorName, 1);
3458 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
3466 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, rank,
"output");
3469 if (m_Parameters.m_Begin.size() != rank)
3472 ": Length of begin offset descriptor must equal rank " + std::to_string(rank));
3474 if (m_Parameters.m_Size.size() != rank)
3477 ": Length of size descriptor must equal rank " + std::to_string(rank));
3482 for (
unsigned int i = 0u; i < rank; ++i)
3484 if (m_Parameters.m_Size[i] != outputShape[i])
3493 for(
unsigned int i = 0u; i < rank; ++i)
3495 if (m_Parameters.m_Begin[i] + m_Parameters.m_Size[i] > inputShape[i])
3498 std::to_string(i) +
" exceeds input size.");
3505 const std::string descriptorName{
"DepthToSpaceQueueDescriptor"};
3507 ValidateNumInputs(workloadInfo, descriptorName, 1);
3508 ValidateNumOutputs(workloadInfo, descriptorName, 1);
3513 ValidateTensorNumDimensions(inputInfo, descriptorName, 4,
"input");
3514 ValidateTensorNumDimensions(outputInfo, descriptorName, 4,
"output");
3516 std::vector<DataType> supportedTypes =
3526 ValidateDataTypes(inputInfo, supportedTypes, descriptorName);
3527 ValidateDataTypes(outputInfo, supportedTypes, descriptorName);
3529 ValidateTensorNumElementsMatch(inputInfo, outputInfo, descriptorName,
"input",
"output");
3531 if (m_Parameters.m_BlockSize == 0)
3537 const unsigned int wIndex = dimensionIndices.
GetWidthIndex();
3542 if (outputShape[hIndex] % m_Parameters.m_BlockSize != 0 || outputShape[wIndex] % m_Parameters.m_BlockSize != 0)
3545 "must be divisible by block size.");
3549 if (inputShape[cIndex] % (m_Parameters.m_BlockSize * m_Parameters.m_BlockSize) != 0)
3552 "must be divisible by the square of block size." );
3558 const std::string descriptorName{
"ComparisonQueueDescriptor"};
3560 ValidateNumInputs(workloadInfo, descriptorName, 2);
3561 ValidateNumOutputs(workloadInfo, descriptorName, 1);
3567 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
3582 const std::string descriptorName{
"ElementwiseUnaryQueueDescriptor"};
3584 ValidateNumInputs(workloadInfo, descriptorName, 1);
3585 ValidateNumOutputs(workloadInfo, descriptorName, 1);
3590 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
3592 std::vector<DataType> supportedTypes =
3603 std::vector<DataType> logicalSupportedTypes =
3610 ValidateDataTypes(inputTensorInfo, logicalSupportedTypes, descriptorName);
3614 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
3618 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
3623 const std::string descriptorName{
"RankQueueDescriptor"};
3625 ValidateNumInputs(workloadInfo, descriptorName, 1);
3626 ValidateNumOutputs(workloadInfo, descriptorName, 1);
3631 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 1,
"output");
3632 ValidateTensorNumElements(outputTensorInfo, descriptorName, 1,
"output");
3634 std::vector<DataType> supportedTypes =
3646 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
3647 ValidateDataTypes(outputTensorInfo, { DataType::Signed32 }, descriptorName);
3652 const std::string descriptorName{
"LogicalBinaryQueueDescriptor"};
3654 ValidateNumInputs(workloadInfo, descriptorName, 2);
3655 ValidateNumOutputs(workloadInfo, descriptorName, 1);
3661 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
3686 const std::string descriptorName{
"ReduceQueueDescriptor"};
3688 ValidateNumInputs(workloadInfo, descriptorName, 1);
3689 ValidateNumOutputs(workloadInfo, descriptorName, 1);
3694 std::vector<DataType> supportedTypes =
3705 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
3706 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same...
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetWidthIndex() const
std::vector< unsigned int > m_Origin
const TensorShape & GetShape() const
constexpr bool IsQuantizedType()
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
bool HasPerAxisQuantization() const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
Optional< unsigned int > GetQuantizationDim() const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
armnn::TensorInfo anchorsInfo({ 6, 4 }, armnn::DataType::Float32)
#define ARMNN_LOG(severity)
void ValidateInputsOutputs(const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
Copyright (c) 2021 ARM Limited and Contributors.
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< float > GetQuantizationScales() const
bool HasMultipleQuantizationScales() const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetHeightIndex() const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
constexpr const char * GetDataTypeName(DataType dataType)
constexpr bool IsQuantized8BitType(DataType dataType)
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< TensorInfo > m_InputTensorInfos
void Validate(const WorkloadInfo &workloadInfo) const
#define ARMNN_NO_DEPRECATE_WARN_END
void Validate(const WorkloadInfo &workloadInfo) const
#define ARMNN_ASSERT_MSG(COND, MSG)
void Validate(const WorkloadInfo &workloadInfo) const
int32_t GetQuantizationOffset() const
float GetQuantizationScale() const
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...
DataType GetDataType() const
bool has_value() const noexcept
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetUnsignedAxis(const unsigned int inputDimension, const int axis)
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< TensorInfo > m_OutputTensorInfos
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
DataType GetBiasDataType(DataType inputDataType)
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< ITensorHandle * > m_Outputs
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32)
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Contains information about inputs and outputs to a layer.
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< ITensorHandle * > m_Inputs
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetNumDimensions() const
unsigned int GetChannelsIndex() const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetNumElements() const
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< unsigned int > m_Origin