14 #include <fmt/format.h>
16 using namespace armnn;
22 unsigned int numberOfChannels,
30 return TensorShape({numberOfBatches, numberOfChannels, height, width});
32 return TensorShape({numberOfBatches, height, width, numberOfChannels});
35 + std::to_string(
static_cast<int>(dataLayout)) +
41 unsigned int numberOfChannels,
50 return TensorInfo({numberOfBatches, numberOfChannels, height, width}, dataType);
52 return TensorInfo({numberOfBatches, height, width, numberOfChannels}, dataType);
55 + std::to_string(
static_cast<int>(dataLayout)) +
61 unsigned int numberOfChannels,
71 return TensorInfo({numberOfBatches, depth, height, width, numberOfChannels}, dataType);
73 return TensorInfo({numberOfBatches, numberOfChannels, depth, height, width}, dataType);
76 + std::to_string(
static_cast<int>(dataLayout)) +
83 auto tensor_data =
static_cast<const float *
>(tensorHandle->
Map(
true));
87 float min = tensor_data[0];
88 float max = tensor_data[0];
91 for (
unsigned int val = 1; val < tensor_size; val++)
93 if (tensor_data[val] < min)
95 min = tensor_data[val];
97 else if (tensor_data[val] > max)
99 max = tensor_data[val];
103 tensorHandle->
Unmap();
105 return std::make_pair(min, max);
114 std::vector<unsigned int> newShape;
117 unsigned int dimsSkipped = 0;
118 bool insertRemainder =
false;
122 if (tensorShape[i] == 1 && dimsSkipped < dimsToSkip && !insertRemainder)
127 newShape.push_back(tensorShape[i]);
129 insertRemainder =
true;
131 return TensorShape(
static_cast<unsigned int>(newShape.size()), newShape.data());
138 strippedTensor.
SetShape(strippedShape);
139 return strippedTensor;
146 if (axis < -armnn::numeric_cast<int>(outputDim) || axis > armnn::numeric_cast<int>(tensorShape.
GetNumDimensions()))
156 axis = armnn::numeric_cast<int>(outputDim) + axis;
159 std::vector<unsigned int> outputShape;
163 outputShape.push_back(tensorShape[i]);
165 outputShape.insert(outputShape.begin() + axis, 1);
167 return { outputDim, outputShape.data() };
178 std::vector<unsigned int> newShape;
183 newShape.push_back(1);
189 newShape.push_back(tensorShape[i]);
192 return TensorShape(
static_cast<unsigned int>(newShape.size()), newShape.data());
197 std::vector<unsigned int> squeezedDims;
201 if (tensorShape[i] != 1)
203 squeezedDims.push_back(tensorShape[i]);
210 const unsigned int firstAxisInclusive,
211 const unsigned int lastAxisExclusive)
213 if (firstAxisInclusive > lastAxisExclusive)
216 "GetNumElementsBetween: firstAxisInclusive [{}D] is greater than lastAxisExclusive [{}D]",
223 "{}: lastAxisExclusive [{}D] is greater than the number of dimensions of the tensor shape [{}D]"
224 "GetNumElementsBetween",
228 unsigned int count = 1;
229 for (
unsigned int i = firstAxisInclusive; i < lastAxisExclusive; i++)
238 if (axis >= armnn::numeric_cast<int>(inputDimension))
241 "{}: axis index [{}] is not less than the number of dimensions [{}D]",
246 if (axis < -armnn::numeric_cast<int>(inputDimension))
249 "{}: axis index [{}] lower than the negative of the number of dimensions [{}]",
252 -armnn::numeric_cast<int>(inputDimension)));
255 unsigned int uAxis = axis < 0 ?
256 inputDimension - armnn::numeric_cast<unsigned int>(
abs(axis))
257 : armnn::numeric_cast<unsigned int>(axis);
267 "{}: axis index [{}D] indexes beyond the number of dimesions of the tensor shape [{}D]",
268 "GetNumElementsAfter",
272 unsigned int count = 1;
273 for (
unsigned int i = axis+1; i < numDim; i++)
282 const std::vector<float>& scales =
info.GetQuantizationScales();
284 if (!
info.HasPerAxisQuantization())
287 std::string(
"Per-axis quantization params not set for tensor of type ") +
292 return { axisFactor, scales };
295 template<
typename PrimitiveType>
301 fmt::format(
"The data does not contain the expected number of elements {} != {}. {}",
306 template<
typename PrimitiveType>
311 std::unique_ptr<float[]> returnBuffer(
new float[tensorInfo.
GetNumElements()]);
316 auto axisDimensionality = tensorInfo.
GetShape()[axis];
321 unsigned int axisIndex;
329 axisIndex = (i / axisFactor) % axisDimensionality;
331 returnBuffer[i] = Dequantize<PrimitiveType>(data[i],
340 returnBuffer[i] = Dequantize<PrimitiveType>(data[i],
354 ::memcpy(buffer.data(), data.data(), data.size());
355 return ToFloatArray<int8_t>(buffer, tensorInfo);
360 return ToFloatArray<uint8_t>(data, tensorInfo);
366 ::memcpy(buffer.data(), data.data(), data.size());
367 return ToFloatArray<int32_t>(buffer, tensorInfo);
373 ::memcpy(buffer.data(), data.data(), data.size());
374 return ToFloatArray<int64_t>(buffer, tensorInfo);
377 fmt::format(
"Unsupported datatype {}. {}",
virtual void Unmap() const =0
Unmap the tensor data.
virtual const void * Map(bool blocking=true) const =0
Map the tensor data for access.
virtual TensorShape GetShape() const =0
Get the number of elements for each dimension ordered from slowest iterating dimension to fastest ite...
float GetQuantizationScale() const
int32_t GetQuantizationOffset() const
unsigned int GetNumElements() const
const TensorShape & GetShape() const
Optional< unsigned int > GetQuantizationDim() const
std::vector< float > GetQuantizationScales() const
bool HasPerAxisQuantization() const
void SetShape(const TensorShape &newShape)
DataType GetDataType() const
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
unsigned int GetNumElements() const
Function that calculates the tensor elements by multiplying all dimension size which are Specified.
Copyright (c) 2021 ARM Limited and Contributors.
constexpr const char * GetDataTypeName(DataType dataType)
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
armnn::TensorShape ExpandDims(const armnn::TensorShape &tensorShape, int axis)
std::pair< unsigned int, std::vector< float > > GetPerAxisParams(const armnn::TensorInfo &info)
std::pair< float, float > FindMinMax(armnn::ITensorHandle *tensorHandle)
unsigned int GetNumElementsAfter(const armnn::TensorShape &shape, unsigned int axis)
armnn::TensorShape ReduceDims(const armnn::TensorShape &tensorInfo, unsigned int dimensions)
std::unique_ptr< float[]> ToFloatArray(const std::vector< PrimitiveType > &data, const armnn::TensorInfo &tensorInfo)
void CheckSizes(const std::vector< PrimitiveType > &data, const armnn::TensorInfo &tensorInfo, unsigned int size=1)
armnn::TensorShape ExpandDimsToRank(const armnn::TensorShape &tensorShape, unsigned int rank)
armnn::TensorShape GetTensorShape(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout)
std::vector< unsigned int > SqueezeDims(const armnn::TensorShape &tensorShape)
unsigned int GetUnsignedAxis(const unsigned int inputDimension, const int axis)
unsigned int GetNumElementsBetween(const armnn::TensorShape &shape, unsigned int firstAxisInclusive, unsigned int lastAxisExclusive)