14 #include <arm_compute/runtime/FunctionDescriptors.h>
15 #include <arm_compute/function_info/FullyConnectedLayerInfo.h>
17 #if defined(ARMCOMPUTENEON_ENABLED)
21 #if defined(ARMCOMPUTECL_ENABLED)
28 inline arm_compute::NormalizationLayerInfo
33 const unsigned int depth = tensorInfo.
GetShape()[depthDimension];
46 const uint32_t normSize = depth * 2u + 1u;
50 const float alpha = 1.0f;
53 const float kappa = 0.0f;
56 const float beta = 0.5f;
58 return arm_compute::NormalizationLayerInfo(arm_compute::NormType::CROSS_MAP, normSize, alpha, beta, kappa,
false);
66 switch (armnnFunction)
86 inline arm_compute::ActivationLayerInfo
93 inline arm_compute::ActivationLayerInfo
96 if (activationDescPtr !=
nullptr)
101 return arm_compute::ActivationLayerInfo();
104 inline arm_compute::ActivationLayerInfo
109 if (activationDescPtr !=
nullptr)
112 *activationDescPtr));
114 return arm_compute::ActivationLayerInfo();
117 inline arm_compute::ActivationLayerInfo
121 switch (activationFunction)
124 return arm_compute::ActivationLayerInfo();
126 return arm_compute::ActivationLayerInfo(arm_compute::ActivationLayerInfo::ActivationFunction::RELU);
128 return arm_compute::ActivationLayerInfo(
129 arm_compute::ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.0);
131 return arm_compute::ActivationLayerInfo(
132 arm_compute::ActivationLayerInfo::ActivationFunction::TANH, 1.0, 1.0);
134 return arm_compute::ActivationLayerInfo(
135 arm_compute::ActivationLayerInfo::ActivationFunction::LOGISTIC);
157 using arm_compute::PoolingType;
159 switch (poolingAlgorithm)
171 using arm_compute::DimensionRoundingType;
181 inline arm_compute::NormType
184 using arm_compute::NormType;
193 inline arm_compute::FullyConnectedLayerInfo
197 arm_compute::FullyConnectedLayerInfo fc_info;
203 inline arm_compute::FullyConnectedLayerInfo
205 arm_compute::ActivationLayerInfo activationLayerInfo)
207 arm_compute::FullyConnectedLayerInfo fc_info;
209 fc_info.activation_info = activationLayerInfo;
215 switch (resizeMethod)
218 return arm_compute::InterpolationPolicy::BILINEAR;
220 return arm_compute::InterpolationPolicy::NEAREST_NEIGHBOR;
230 if (softmaxDesc.
m_Axis == -1)
240 auto aclAxis = (
static_cast<T
>(dim) - 1);
241 aclAxis = aclAxis > 0 ? aclAxis -1 : aclAxis;
250 std::set<unsigned int> splitAxis;
258 for (
unsigned int i = 0; i < numSplit; ++i)
260 for (
unsigned int dimIdx = 0; dimIdx < numDimensions; ++dimIdx)
264 splitAxis.insert(dimIdx);
281 int sign = (armnnAxis < 0) ? -1 : 1;
282 int aclAxis = sign * rank - 1 - armnnAxis;
289 bool isFastMathEnabled,
298 const arm_compute::ActivationLayerInfo activationInfo =
300 const auto roundType = arm_compute::DimensionRoundingType::FLOOR;
302 return arm_compute::Conv3dInfo{stride, padding, activationInfo, dilation, roundType, isFastMathEnabled};
306 bool isFastMathEnabled)
309 const arm_compute::Size3D stride{descriptor.
m_StrideX, descriptor.m_StrideY, descriptor.m_StrideZ};
310 const arm_compute::Padding3D padding{descriptor.m_PadLeft, descriptor.m_PadRight,
311 descriptor.m_PadTop, descriptor.m_PadBottom,
312 descriptor.m_PadFront, descriptor.m_PadBack};
313 const arm_compute::Size3D dilation{descriptor.m_DilationX, descriptor.m_DilationY, descriptor.m_DilationZ};
315 const arm_compute::ActivationLayerInfo activationInfo =
317 const auto roundType = arm_compute::DimensionRoundingType::FLOOR;
319 return arm_compute::Conv3dInfo{stride, padding, activationInfo, dilation, roundType, isFastMathEnabled};
348 const std::vector<uint32_t>& vAxis,
351 auto reducedTensorInfo = input;
353 unsigned int outputRank = 0;
359 else if (vAxis.empty())
363 else if (vAxis.size() > reducedTensorInfo.GetNumDimensions())
369 outputRank = reducedTensorInfo.GetNumDimensions() - armnn::numeric_cast<unsigned int>(vAxis.size());
375 std::vector<unsigned int> dimSizes(outputRank, 1);
379 unsigned int outputIndex = 0;
380 for (
unsigned int i = 0; i < reducedTensorInfo.GetNumDimensions(); ++i)
382 if (std::find(vAxis.begin(), vAxis.end(), i) == vAxis.end())
384 dimSizes[outputIndex] = armnn::numeric_cast<unsigned int>(reducedTensorInfo.GetShape()[i]);
389 dimSizes[outputIndex] = 1;
395 reducedTensorInfo.SetShape(inferredShape);
396 return reducedTensorInfo;
400 #define IS_MULTI_AXES_REDUCE_SUPPORTED(func, input, desc, status) \
401 armnn::TensorInfo inputTensorInfo = input; \
402 unsigned int recalulatedAxis = 0; \
403 std::vector<uint32_t> axes; \
405 for (unsigned int i = 0; i != desc.m_vAxis.size(); ++i) \
407 axes.emplace_back(desc.m_vAxis[i]); \
409 const armnn::TensorInfo& reducedTensorInfo = \
410 ComputeReductionTensorShape(input, axes, desc.m_KeepDims); \
412 std::vector<uint32_t> singleAxis(1, desc.m_vAxis[i] - recalulatedAxis); \
414 armnn::ReduceDescriptor newReduceDescriptor = desc; \
415 newReduceDescriptor.m_vAxis.assign(singleAxis.begin(), singleAxis.end()); \
417 status = func(inputTensorInfo, reducedTensorInfo, newReduceDescriptor); \
423 if (!desc.m_KeepDims) \
428 inputTensorInfo = reducedTensorInfo; \