ArmNN
 25.11
Loading...
Searching...
No Matches
Descriptors.hpp
Go to the documentation of this file.
1//
2// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5#pragma once
6
7#include "Deprecated.hpp"
8#include "DescriptorsFwd.hpp" // Required for class equivalence declarations.
9#include "Tensor.hpp"
10#include "Types.hpp"
11#include <armnn/Exceptions.hpp>
12
13#include <cstdint>
14#include <iterator>
15#include <utility>
16#include <vector>
17
18namespace armnn
19{
20
21/// Base class for all descriptors.
23{
24 virtual bool IsNull() const { return false; }
25 virtual ~BaseDescriptor() = default;
26};
27
28/// Null Descriptor used as a return value from the IConnectableLayer GetParameters method
29/// by layers which do not have a descriptor
31{
32 bool IsNull() const override { return true; }
33};
34
35/// An ActivationDescriptor for the ActivationLayer.
37{
43
45 float a = 0,
46 float b = 0)
47 : m_Function(activation)
48 , m_A(a)
49 , m_B(b)
50 {}
51
52 bool operator ==(const ActivationDescriptor &rhs) const
53 {
54 return m_Function == rhs.m_Function && m_A == rhs.m_B && m_B == rhs.m_B;
55 }
56
57 /// @brief The activation function to use
58 /// (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
60 /// Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
61 float m_A;
62 /// Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
63 float m_B;
64};
65
66/// An ArgMinMaxDescriptor for ArgMinMaxLayer
68{
74
75 bool operator ==(const ArgMinMaxDescriptor &rhs) const
76 {
77 return m_Function == rhs.m_Function && m_Axis == rhs.m_Axis && m_Output_Type == rhs.m_Output_Type;
78 }
79
80 /// Specify if the function is to find Min or Max.
82 /// Axis to reduce across the input tensor.
83 int m_Axis;
84 /// Deprecated and will be removed in future release.
86};
87
88/// A ComparisonDescriptor for the ComparisonLayer
90{
94
96 : m_Operation(operation)
97 {}
98
99 bool operator ==(const ComparisonDescriptor &rhs) const
100 {
101 return m_Operation == rhs.m_Operation;
102 }
103
104 /// Specifies the comparison operation to execute
106};
107
108/// A ElementwiseBinaryDescriptor for the ElementwiseBinaryLayer
110{
114
116 : m_Operation(operation)
117 {}
118
120 {
121 return m_Operation == rhs.m_Operation;
122 }
123
124 /// Specifies the elementwiseBinary operation to execute
126};
127
128/// A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer
130{
134
136 : m_Operation(operation)
137 {}
138
140 {
141 return m_Operation == rhs.m_Operation;
142 }
143
144 /// Specifies the elementwiseUnary operation to execute
146};
147
148/// A PermuteDescriptor for the PermuteLayer.
150{
154
156 : m_DimMappings(dimMappings)
157 {}
158
159 bool operator ==(const PermuteDescriptor &rhs) const
160 {
161 return m_DimMappings.IsEqual(rhs.m_DimMappings);
162 }
163
164 /// @brief Indicates how to translate tensor elements from a given source into the target destination, when
165 /// source and target potentially have different memory layouts e.g.
166 /// Input Shape {1, 1, 4, 4}
167 /// Permutation Vector {0, 2, 3, 1}
168 /// Output Shape {1, 4, 1, 4}
169 /// dim "0" goes into index 0 ([ 1, X, X, X ])
170 /// dim "1" goes into index 2 ([ 1, X, 1, X ])
171 /// dim "2" goes into index 3 ([ 1, X, 1, 4 ])
172 /// dim "3" goes into index 1 ([ 1, 4, 1, 4 ])
174};
175
176/// A SoftmaxDescriptor for the SoftmaxLayer.
178{
180 : m_Beta(1.0f)
181 , m_Axis(-1)
182 {}
183
184 bool operator ==(const SoftmaxDescriptor& rhs) const
185 {
186 return m_Beta == rhs.m_Beta && m_Axis == rhs.m_Axis;
187 }
188
189 /// Exponentiation value.
190 float m_Beta;
191 /// Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed on.
193};
194
195/// A LogSoftmaxDescriptor for the LogSoftmaxLayer
197
198/// @brief An OriginsDescriptor for the ConcatLayer.
199/// Descriptor to configure the concatenation process. Number of views must be equal to the number of inputs, and
200/// their order must match - e.g. first view corresponds to the first input, second view to the second input, etc.
202{
204 OriginsDescriptor(uint32_t numViews, uint32_t numDimensions = 4);
207
209
211
212 bool operator ==(const OriginsDescriptor& rhs) const;
213
214 /// @Brief Set the view origin coordinates. The arguments are: view, dimension, value.
215 /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
216 /// If the coord is greater than or equal to GetNumDimensions(), then the coord argument is out of range.
217 Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value);
218 /// Get the number of views.
219 uint32_t GetNumViews() const;
220 /// Get the number of dimensions.
221 uint32_t GetNumDimensions() const;
222 /// Return the view origin at the int value idx.
223 const uint32_t* GetViewOrigin(uint32_t idx) const;
224 /// @brief Reorders the viewOrigins in accordance with the indices presented in newOrdering array.
225 /// The number of views must match number of elements in the new ordering array.
226 void ReorderOrigins(unsigned int* newOrdering, unsigned int numNewOrdering);
227 /// Swap the ViewsDescriptor values first and second.
228 friend void swap(OriginsDescriptor& first, OriginsDescriptor& second);
229 /// Set the concatenation axis value.
230 void SetConcatAxis(unsigned int concatAxis);
231 /// Get the concatenation axis value.
232 unsigned int GetConcatAxis() const;
233
234private:
235 unsigned int m_ConcatAxis;
236 uint32_t m_NumViews;
237 uint32_t m_NumDimensions;
238 uint32_t** m_ViewOrigins;
239};
240
241/// @brief A ViewsDescriptor for the SplitterLayer.
242/// Descriptor to configure the splitting process. Number of Views must be equal to the number of outputs, and
243/// their order must match - e.g. first view corresponds to the first output, second view to the second output, etc.
245{
246 ViewsDescriptor(uint32_t numViews, uint32_t numDimensions = 4);
247 ViewsDescriptor(const ViewsDescriptor& other);
250
252
254
255 bool operator ==(const ViewsDescriptor& rhs) const;
256
257 /// @Brief Set the view origin coordinates. The arguments are: view, dimension, value.
258 /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
259 /// If the coord is greater than or equal to GetNumDimensions(), then the coord argument is out of range.
260 Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value);
261 /// @brief Set the size of the views. The arguments are: view, dimension, value.
262 /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
263 /// If the coord is greater than or equal to GetNumDimensions(), then the coord argument is out of range.
264 Status SetViewSize(uint32_t view, uint32_t coord, uint32_t value);
265
266 /// Get the number of views.
267 uint32_t GetNumViews() const;
268 /// Get the number of dimensions.
269 uint32_t GetNumDimensions() const;
270 /// Get the view origin at the int value idx.
271 const uint32_t* GetViewOrigin(uint32_t idx) const;
272 /// Get the view sizes at the int value idx.
273 const uint32_t* GetViewSizes(uint32_t idx) const;
274 /// Get the View Origins
275 const OriginsDescriptor& GetOrigins() const;
276
277 /// Swap the ViewsDescriptor value first and second.
278 friend void swap(ViewsDescriptor& first, ViewsDescriptor& second);
279
280 /// Set the axis value.
281 void SetAxis(int32_t axis);
282
283 /// Get the axis value.
284 int32_t GetAxis() const;
285
286 /// Returns true if an axis has been set.
287 bool HasAxis() const;
288
289private:
290 OriginsDescriptor m_Origins;
291 uint32_t** m_ViewSizes;
292 bool m_IsAxisSet = false;
293 int32_t m_Axis = 0;
294};
295
296
297/// @brief Convenience template to create an OriginsDescriptor to use when creating a ConcatLayer for performing
298/// concatenation of a number of input tensors.
299template <typename TensorShapeIt>
301 TensorShapeIt last,
302 unsigned int concatenationDimension)
303{
304 auto numInputs = std::distance(first, last);
305
306 if (numInputs < 2)
307 {
308 throw InvalidArgumentException("Concatenation requires at least 2 inputs");
309 }
310
311 const auto& firstInputShape = *first;
312
313 const unsigned int numDimensions = firstInputShape.GetNumDimensions();
314 for (auto it = first + 1; it != last; ++it)
315 {
316 if (it->GetNumDimensions() != numDimensions)
317 {
318 throw InvalidArgumentException("All inputs to concatenation must have the same number of dimensions");
319 }
320 }
321
322 if (concatenationDimension >= numDimensions)
323 {
324 throw InvalidArgumentException("concatenationDimension must be between 0 and the number of dimensions.");
325 }
326
327 for (auto it = first; it != last; ++it)
328 {
329 for (unsigned int d = 0; d < numDimensions; ++d)
330 {
331 const bool dimSizeOk = (d == concatenationDimension) || (firstInputShape[d] == (*it)[d]);
332 if (!dimSizeOk)
333 {
334 throw InvalidArgumentException("All inputs to concatenation must be the same size along all dimensions "
335 " except the concatenation dimension");
336 }
337 }
338 }
339
340 OriginsDescriptor viewsDescriptor(static_cast<uint32_t>(numInputs), numDimensions);
341 viewsDescriptor.SetConcatAxis(concatenationDimension);
342
343 uint32_t viewIndex = 0u;
344 uint32_t coordAlongConcatDim = 0u;
345 for (auto it = first; it != last; ++it)
346 {
347 const auto& inputShape = *it;
348
349 for (unsigned int i = 0; i < concatenationDimension; ++i)
350 {
351 viewsDescriptor.SetViewOriginCoord(viewIndex, i, 0);
352 }
353
354 viewsDescriptor.SetViewOriginCoord(viewIndex, concatenationDimension, coordAlongConcatDim);
355 unsigned int dimSize = inputShape[concatenationDimension];
356 coordAlongConcatDim += dimSize;
357
358
359 for (unsigned int i = concatenationDimension + 1; i < numDimensions; ++i)
360 {
361 viewsDescriptor.SetViewOriginCoord(viewIndex, i, 0);
362 }
363
364 ++viewIndex;
365 }
366
367 return viewsDescriptor;
368}
369
370/// A Pooling2dDescriptor for the Pooling2dLayer.
372{
387
388 bool operator ==(const Pooling2dDescriptor& rhs) const
389 {
390 return m_PoolType == rhs.m_PoolType &&
391 m_PadLeft == rhs.m_PadLeft &&
392 m_PadRight == rhs.m_PadRight &&
393 m_PadTop == rhs.m_PadTop &&
394 m_PadBottom == rhs.m_PadBottom &&
395 m_PoolWidth == rhs.m_PoolWidth &&
396 m_PoolHeight == rhs.m_PoolHeight &&
397 m_StrideX == rhs.m_StrideX &&
398 m_StrideY == rhs.m_StrideY &&
402 }
403
404 /// The pooling algorithm to use (Max. Average, L2).
406 /// Padding left value in the width dimension.
407 uint32_t m_PadLeft;
408 /// Padding right value in the width dimension.
409 uint32_t m_PadRight;
410 /// Padding top value in the height dimension.
411 uint32_t m_PadTop;
412 /// Padding bottom value in the height dimension.
413 uint32_t m_PadBottom;
414 /// Pooling width value.
415 uint32_t m_PoolWidth;
416 /// Pooling height value.
417 uint32_t m_PoolHeight;
418 /// Stride value when proceeding through input for the width dimension.
419 uint32_t m_StrideX;
420 /// Stride value when proceeding through input for the height dimension.
421 uint32_t m_StrideY;
422 /// The rounding method for the output shape. (Floor, Ceiling).
424 /// The padding method to be used. (Exclude, IgnoreValue).
426 /// The data layout to be used (NCHW, NHWC).
428};
429
430/// A Pooling3dDescriptor for the Pooling3dLayer.
432{
451
452 bool operator ==(const Pooling3dDescriptor& rhs) const
453 {
454 return m_PoolType == rhs.m_PoolType &&
455 m_PadLeft == rhs.m_PadLeft &&
456 m_PadRight == rhs.m_PadRight &&
457 m_PadTop == rhs.m_PadTop &&
458 m_PadBottom == rhs.m_PadBottom &&
459 m_PadFront == rhs.m_PadFront &&
460 m_PadBack == rhs.m_PadBack &&
461 m_PoolWidth == rhs.m_PoolWidth &&
462 m_PoolHeight == rhs.m_PoolHeight &&
463 m_PoolDepth == rhs.m_PoolDepth &&
464 m_StrideX == rhs.m_StrideX &&
465 m_StrideY == rhs.m_StrideY &&
466 m_StrideZ == rhs.m_StrideZ &&
470 }
471
472 /// The pooling algorithm to use (Max. Average, L2).
474 /// Padding left value in the width dimension.
475 uint32_t m_PadLeft;
476 /// Padding right value in the width dimension.
477 uint32_t m_PadRight;
478 /// Padding top value in the height dimension.
479 uint32_t m_PadTop;
480 /// Padding bottom value in the height dimension.
481 uint32_t m_PadBottom;
482 /// Padding front value in the depth dimension.
483 uint32_t m_PadFront;
484 /// Padding back value in the depth dimension.
485 uint32_t m_PadBack;
486 /// Pooling width value.
487 uint32_t m_PoolWidth;
488 /// Pooling height value.
489 uint32_t m_PoolHeight;
490 /// Pooling depth value.
491 uint32_t m_PoolDepth;
492 /// Stride value when proceeding through input for the width dimension.
493 uint32_t m_StrideX;
494 /// Stride value when proceeding through input for the height dimension.
495 uint32_t m_StrideY;
496 /// Stride value when proceeding through input for the depth dimension.
497 uint32_t m_StrideZ;
498 /// The rounding method for the output shape. (Floor, Ceiling).
500 /// The padding method to be used. (Exclude, IgnoreValue).
502 /// The data layout to be used (NCDHW, NDHWC).
504};
505
506/// A FullyConnectedDescriptor for the FullyConnectedLayer.
508{
514
521
522 /// Get the number of inputs.
523 uint32_t GetNumInputs() const;
524
525 /// Enable/disable bias.
527 /// Enable/disable transpose weight matrix.
529 /// Enable/disable constant weights and biases.
531};
532
533/// A Convolution2dDescriptor for the Convolution2dLayer.
535{
537 : m_PadLeft(0)
538 , m_PadRight(0)
539 , m_PadTop(0)
540 , m_PadBottom(0)
541 , m_StrideX(1)
542 , m_StrideY(1)
543 , m_DilationX(1)
544 , m_DilationY(1)
545 , m_BiasEnabled(false)
547 {}
548
550 {
551 return m_PadLeft == rhs.m_PadLeft &&
552 m_PadRight == rhs.m_PadRight &&
553 m_PadTop == rhs.m_PadTop &&
554 m_PadBottom == rhs.m_PadBottom &&
555 m_StrideX == rhs.m_StrideX &&
556 m_StrideY == rhs.m_StrideY &&
557 m_DilationX == rhs.m_DilationX &&
558 m_DilationY == rhs.m_DilationY &&
561 }
562 uint32_t GetNumInputs() const;
563
564
565 /// Padding left value in the width dimension.
566 uint32_t m_PadLeft;
567 /// Padding right value in the width dimension.
568 uint32_t m_PadRight;
569 /// Padding top value in the height dimension.
570 uint32_t m_PadTop;
571 /// Padding bottom value in the height dimension.
572 uint32_t m_PadBottom;
573 /// Stride value when proceeding through input for the width dimension.
574 uint32_t m_StrideX;
575 /// Stride value when proceeding through input for the height dimension.
576 uint32_t m_StrideY;
577 /// Dilation along x axis
578 uint32_t m_DilationX;
579 /// Dilation along y axis
580 uint32_t m_DilationY;
581 /// Enable/disable bias.
583 /// The data layout to be used (NCHW, NHWC).
585};
586
587/// A Convolution3dDescriptor for the Convolution3dLayer.
589{
591 : m_PadLeft(0)
592 , m_PadRight(0)
593 , m_PadTop(0)
594 , m_PadBottom(0)
595 , m_PadFront(0)
596 , m_PadBack(0)
597 , m_StrideX(1)
598 , m_StrideY(1)
599 , m_StrideZ(1)
600 , m_DilationX(1)
601 , m_DilationY(1)
602 , m_DilationZ(1)
603 , m_BiasEnabled(false)
605 {}
606
608 {
609 return m_PadLeft == rhs.m_PadLeft &&
610 m_PadRight == rhs.m_PadRight &&
611 m_PadTop == rhs.m_PadTop &&
612 m_PadBottom == rhs.m_PadBottom &&
613 m_PadFront == rhs.m_PadFront &&
614 m_PadBack == rhs.m_PadBack &&
615 m_StrideX == rhs.m_StrideX &&
616 m_StrideY == rhs.m_StrideY &&
617 m_StrideZ == rhs.m_StrideZ &&
618 m_DilationX == rhs.m_DilationX &&
619 m_DilationY == rhs.m_DilationY &&
620 m_DilationZ == rhs.m_DilationZ &&
623 }
624
625 /// Get the number of views/inputs.
626 uint32_t GetNumInputs() const;
627
628 /// Padding left value in the width dimension.
629 uint32_t m_PadLeft;
630 /// Padding right value in the width dimension.
631 uint32_t m_PadRight;
632 /// Padding top value in the height dimension.
633 uint32_t m_PadTop;
634 /// Padding bottom value in the height dimension.
635 uint32_t m_PadBottom;
636 /// Padding front value in the depth dimension.
637 uint32_t m_PadFront;
638 /// Padding back value in the depth dimension.
639 uint32_t m_PadBack;
640 /// Stride value when proceeding through input for the width dimension.
641 uint32_t m_StrideX;
642 /// Stride value when proceeding through input for the height dimension.
643 uint32_t m_StrideY;
644 /// Stride value when proceeding through input for the depth dimension.
645 uint32_t m_StrideZ;
646 /// Dilation along x axis
647 uint32_t m_DilationX;
648 /// Dilation along y axis
649 uint32_t m_DilationY;
650 /// Dilation along z axis
651 uint32_t m_DilationZ;
652 /// Enable/disable bias.
654 /// The data layout to be used (NDHWC, NCDHW).
656};
657
658/// A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
660{
662 : m_PadLeft(0)
663 , m_PadRight(0)
664 , m_PadTop(0)
665 , m_PadBottom(0)
666 , m_StrideX(1)
667 , m_StrideY(1)
668 , m_DilationX(1)
669 , m_DilationY(1)
670 , m_BiasEnabled(false)
672 {}
673
675 {
676 return m_PadLeft == rhs.m_PadLeft &&
677 m_PadRight == rhs.m_PadRight &&
678 m_PadTop == rhs.m_PadTop &&
679 m_PadBottom == rhs.m_PadBottom &&
680 m_StrideX == rhs.m_StrideX &&
681 m_StrideY == rhs.m_StrideY &&
682 m_DilationX == rhs.m_DilationX &&
683 m_DilationY == rhs.m_DilationY &&
686 }
687
688 /// Get the number of views/inputs.
689 uint32_t GetNumInputs() const;
690
691 /// Padding left value in the width dimension.
692 uint32_t m_PadLeft;
693 /// Padding right value in the width dimension.
694 uint32_t m_PadRight;
695 /// Padding top value in the height dimension.
696 uint32_t m_PadTop;
697 /// Padding bottom value in the height dimension.
698 uint32_t m_PadBottom;
699 /// Stride value when proceeding through input for the width dimension.
700 uint32_t m_StrideX;
701 /// Stride value when proceeding through input for the height dimension.
702 uint32_t m_StrideY;
703 /// Dilation factor value for width dimension.
704 uint32_t m_DilationX;
705 /// Dilation factor value for height dimension.
706 uint32_t m_DilationY;
707 /// Enable/disable bias.
709 /// The data layout to be used (NCHW, NHWC).
711};
712
714{
728
743
744 /// Maximum numbers of detections.
746 /// Maximum numbers of classes per detection, used in Fast NMS.
748 /// Detections per classes, used in Regular NMS.
750 /// NMS score threshold.
752 /// Intersection over union threshold.
754 /// Number of classes.
755 uint32_t m_NumClasses;
756 /// Use Regular NMS.
758 /// Center size encoding scale x.
759 float m_ScaleX;
760 /// Center size encoding scale y.
761 float m_ScaleY;
762 /// Center size encoding scale weight.
763 float m_ScaleW;
764 /// Center size encoding scale height.
765 float m_ScaleH;
766};
767
768/// A NormalizationDescriptor for the NormalizationLayer.
770{
780
782 {
783 return m_NormChannelType == rhs.m_NormChannelType &&
785 m_NormSize == rhs.m_NormSize &&
786 m_Alpha == rhs.m_Alpha &&
787 m_Beta == rhs.m_Beta &&
788 m_K == rhs.m_K &&
790 }
791
792 /// Normalization channel algorithm to use (Across, Within).
794 /// Normalization method algorithm to use (LocalBrightness, LocalContrast).
796 /// Depth radius value.
797 uint32_t m_NormSize;
798 /// Alpha value for the normalization equation.
799 float m_Alpha;
800 /// Beta value for the normalization equation.
801 float m_Beta;
802 /// Kappa value used for the across channel normalization equation.
803 float m_K;
804 /// The data layout to be used (NCHW, NHWC).
806};
807
808/// A L2NormalizationDescriptor for the L2NormalizationLayer.
810{
815
817 {
818 return m_Eps == rhs.m_Eps && m_DataLayout == rhs.m_DataLayout;
819 }
820
821 /// Used to avoid dividing by zero.
822 float m_Eps;
823 /// The data layout to be used (NCHW, NHWC).
825};
826
827/// A BatchNormalizationDescriptor for the BatchNormalizationLayer.
829{
834
836 {
837 return m_Eps == rhs.m_Eps && m_DataLayout == rhs.m_DataLayout;
838 }
839
840 /// Value to add to the variance. Used to avoid dividing by zero.
841 float m_Eps;
842 /// The data layout to be used (NCHW, NHWC).
844};
845
846/// An InstanceNormalizationDescriptor for InstanceNormalizationLayer
848{
850 : m_Gamma(1.0f)
851 , m_Beta(0.0f)
852 , m_Eps(1e-12f)
854 {}
855
857 {
858 return m_Gamma == rhs.m_Gamma &&
859 m_Beta == rhs.m_Beta &&
860 m_Eps == rhs.m_Eps &&
862 }
863
864 /// Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0.
865 float m_Gamma;
866 /// Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0.
867 float m_Beta;
868 /// Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f.
869 float m_Eps;
870 /// The data layout to be used (NCHW, NHWC).
872};
873
874/// A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
876{
878 : m_BlockShape({1, 1})
879 , m_Crops({{0, 0}, {0, 0}})
881 {}
882
883 BatchToSpaceNdDescriptor(std::vector<unsigned int> blockShape,
884 std::vector<std::pair<unsigned int, unsigned int>> crops)
885 : m_BlockShape(blockShape)
886 , m_Crops(crops)
888 {}
889
891 {
892 return m_BlockShape == rhs.m_BlockShape &&
893 m_Crops == rhs.m_Crops &&
895 }
896
897 /// Block shape values.
898 std::vector<unsigned int> m_BlockShape;
899 /// The values to crop from the input dimension.
900 std::vector<std::pair<unsigned int, unsigned int>> m_Crops;
901 /// The data layout to be used (NCHW, NHWC).
903};
904
905/// A FakeQuantizationDescriptor for the FakeQuantizationLayer.
907{
909 : m_Min(-6.0f)
910 , m_Max(6.0f)
911 {}
912
914 {
915 return m_Min == rhs.m_Min && m_Max == rhs.m_Max;
916 }
917
918 /// Minimum value.
919 float m_Min;
920 /// Maximum value.
921 float m_Max;
922};
923
924/// A FillDescriptor for the FillLayer
926{
928 : m_Value(0)
929 {}
930
931 FillDescriptor(const float& value)
932 : m_Value(value)
933 {}
934
935 bool operator ==(const FillDescriptor& rhs) const
936 {
937 return m_Value == rhs.m_Value;
938 }
939
940 float m_Value;
941};
942
943/// A FusedDescriptor for the FusedLayer.
945{
946 FusedDescriptor(unsigned int numInputSlots = 4u,
947 unsigned int numOutputSlots = 2u,
949 : m_NumInputSlots(numInputSlots), m_NumOutputSlots(numOutputSlots), m_FusedKernelType(fusedType)
950 {}
951
952 bool operator ==(const FusedDescriptor& rhs) const
953 {
954 return m_NumInputSlots == rhs.m_NumInputSlots &&
957 }
958
959 unsigned int m_NumInputSlots;
960 unsigned int m_NumOutputSlots;
962};
963
964/// A GatherDescriptor for the GatherLayer.
966{
968 : m_Axis(0)
969 {}
970
971 GatherDescriptor(int32_t axis)
972 : m_Axis(axis)
973 {}
974
975 bool operator ==(const GatherDescriptor& rhs) const
976 {
977 return m_Axis == rhs.m_Axis;
978 }
979
980 /// The axis in params to gather indices from
981 int32_t m_Axis;
982};
983
984/// A ResizeDescriptor for the ResizeLayer.
986{
995
996 bool operator ==(const ResizeDescriptor& rhs) const
997 {
998 return m_TargetWidth == rhs.m_TargetWidth &&
1000 m_Method == rhs.m_Method &&
1001 m_DataLayout == rhs.m_DataLayout &&
1004 }
1005
1006 /// Target width value.
1008 /// Target height value.
1010 /// The Interpolation method to use
1011 /// (Bilinear, NearestNeighbor).
1013 /// The data layout to be used (NCHW, NHWC).
1015 /// Aligned corners
1017 /// Half Pixel Centers
1019};
1020
1021
1022/// A ReshapeDescriptor for the ReshapeLayer.
1024{
1028
1030 : m_TargetShape(shape)
1031 {}
1032
1033 bool operator ==(const ReshapeDescriptor& rhs) const
1034 {
1035 return m_TargetShape == rhs.m_TargetShape;
1036 }
1037
1038 /// Target shape value.
1040};
1041
1042/// A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
1044{
1046 : m_BlockShape({1, 1})
1047 , m_PadList({{0, 0}, {0, 0}})
1049 {}
1050
1051 SpaceToBatchNdDescriptor(const std::vector<unsigned int>& blockShape,
1052 const std::vector<std::pair<unsigned int, unsigned int>>& padList)
1053 : m_BlockShape(blockShape)
1054 , m_PadList(padList)
1056 {}
1057
1059 {
1060 return m_BlockShape == rhs.m_BlockShape &&
1061 m_PadList == rhs.m_PadList &&
1063 }
1064
1065 /// Block shape value.
1066 std::vector<unsigned int> m_BlockShape;
1067 /// @brief Specifies the padding values for the input dimension:
1068 /// heightPad{top, bottom} widthPad{left, right}.
1069 std::vector<std::pair<unsigned int, unsigned int>> m_PadList;
1070 /// The data layout to be used (NCHW, NHWC).
1072};
1073
1074/// A SpaceToDepthDescriptor for the SpaceToDepthLayer
1076{
1080
1081 SpaceToDepthDescriptor(unsigned int blockSize, DataLayout dataLayout)
1082 : m_BlockSize(blockSize)
1083 , m_DataLayout(dataLayout)
1084 {}
1085
1087 {
1088 return m_BlockSize == rhs.m_BlockSize && m_DataLayout == rhs.m_DataLayout;
1089 }
1090
1091 /// Scalar specifying the input block size. It must be >= 1
1092 unsigned int m_BlockSize;
1093
1094 /// The data layout to be used (NCHW, NHWC).
1096};
1097
1098/// A DepthToSpaceDescriptor for the DepthToSpaceLayer
1100
1101/// An LstmDescriptor for the LstmLayer.
1103{
1105 : m_ActivationFunc(1) // 0: None, 1: Relu, 3: Relu6, 4: Tanh, 6: Sigmoid
1106 , m_ClippingThresCell(0.0)
1107 , m_ClippingThresProj(0.0)
1108 , m_CifgEnabled(true)
1109 , m_PeepholeEnabled(false)
1110 , m_ProjectionEnabled(false)
1111 , m_LayerNormEnabled(false)
1112 , m_TimeMajor(false)
1118 , m_HiddenStateScale(0.0)
1119 {}
1120
1137
1138 /// @brief The activation function to use.
1139 /// 0: None, 1: Relu, 3: Relu6, 4: Tanh, 6: Sigmoid.
1141 /// Clipping threshold value for the cell state.
1143 /// Clipping threshold value for the projection.
1145 /// Enable/disable cifg (coupled input & forget gate).
1147 /// Enable/disable peephole.
1149 /// Enable/disable the projection layer.
1151 /// Enable/disable layer normalization
1153 /// Enable/disable time major
1155 /// Input intermediate quantization scale
1157 /// Forget intermediate quantization scale
1159 /// Cell intermediate quantization scale
1161 /// Output intermediate quantization scale
1163 /// Hidden State zero point
1165 /// Hidden State quantization scale
1167};
1168
1170
1171/// A MeanDescriptor for the MeanLayer.
1173{
1175 : m_Axis()
1176 , m_KeepDims(false)
1177 {}
1178
1179 MeanDescriptor(const std::vector<unsigned int>& axis, bool keepDims)
1180 : m_Axis(axis)
1181 , m_KeepDims(keepDims)
1182 {}
1183
1184 bool operator ==(const MeanDescriptor& rhs) const
1185 {
1186 return m_Axis == rhs.m_Axis && m_KeepDims == rhs.m_KeepDims;
1187 }
1188
1189 /// Values for the dimensions to reduce.
1190 std::vector<unsigned int> m_Axis;
1191 /// Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept.
1193};
1194
1195/// A PadDescriptor for the PadLayer.
1197{
1200
1201 PadDescriptor(const std::vector<std::pair<unsigned int, unsigned int>>& padList,
1202 const float& padValue = 0,
1203 const PaddingMode& paddingMode = PaddingMode::Constant)
1204 : m_PadList(padList)
1205 , m_PadValue(padValue)
1206 , m_PaddingMode(paddingMode)
1207 {}
1208
1209 bool operator ==(const PadDescriptor& rhs) const
1210 {
1211 return m_PadList == rhs.m_PadList && m_PadValue == rhs.m_PadValue && m_PaddingMode == rhs.m_PaddingMode;
1212 }
1213
1214 /// @brief Specifies the padding for input dimension.
1215 /// First is the number of values to add before the tensor in the dimension.
1216 /// Second is the number of values to add after the tensor in the dimension.
1217 /// The number of pairs should match the number of dimensions in the input tensor.
1218 std::vector<std::pair<unsigned int, unsigned int>> m_PadList;
1219
1220 /// Optional value to use for padding, defaults to 0
1222
1223 /// Specifies the Padding mode (Constant, Reflect or Symmetric)
1225};
1226
1227/// A SliceDescriptor for the SliceLayer.
1229{
1230 SliceDescriptor(const std::vector<unsigned int>& begin, const std::vector<unsigned int>& size)
1231 : m_Begin(begin)
1232 , m_Size(size)
1233 {}
1234
1237
1238 bool operator ==(const SliceDescriptor& rhs) const
1239 {
1240 return m_Begin == rhs.m_Begin && m_Size == rhs.m_Size;
1241 }
1242
1243 /// Beginning indices of the slice in each dimension.
1244 std::vector<unsigned int> m_Begin;
1245
1246 /// Size of the slice in each dimension.
1247 std::vector<unsigned int> m_Size;
1248};
1249
1250/// A StackDescriptor for the StackLayer.
1252{
1254 : m_Axis(0)
1255 , m_NumInputs(0)
1256 , m_InputShape()
1257 {}
1258
1259 StackDescriptor(uint32_t axis, uint32_t numInputs, const TensorShape& inputShape)
1260 : m_Axis(axis)
1261 , m_NumInputs(numInputs)
1262 , m_InputShape(inputShape)
1263 {}
1264
1265 bool operator ==(const StackDescriptor& rhs) const
1266 {
1267 return m_Axis == rhs.m_Axis &&
1268 m_NumInputs == rhs.m_NumInputs &&
1270 }
1271
1272 /// 0-based axis along which to stack the input tensors.
1273 uint32_t m_Axis;
1274 /// Number of input tensors.
1275 uint32_t m_NumInputs;
1276 /// Required shape of all input tensors.
1278};
1279
1280/// A StandInDescriptor for the StandIn layer
1282{
1284
1285 StandInDescriptor(uint32_t numInputs, uint32_t numOutputs)
1286 : m_NumInputs(numInputs)
1287 , m_NumOutputs(numOutputs)
1288 {}
1289
1290 bool operator ==(const StandInDescriptor& rhs) const
1291 {
1292 return m_NumInputs == rhs.m_NumInputs &&
1294 }
1295
1296 /// Number of input tensors
1297 uint32_t m_NumInputs = 0;
1298 /// Number of output tensors
1299 uint32_t m_NumOutputs = 0;
1300};
1301
1302/// A StridedSliceDescriptor for the StridedSliceLayer.
1304{
1305 StridedSliceDescriptor(const std::vector<int>& begin,
1306 const std::vector<int>& end,
1307 const std::vector<int>& stride)
1308 : m_Begin(begin)
1309 , m_End(end)
1310 , m_Stride(stride)
1311 , m_BeginMask(0)
1312 , m_EndMask(0)
1313 , m_ShrinkAxisMask(0)
1314 , m_EllipsisMask(0)
1315 , m_NewAxisMask(0)
1317 {}
1318
1322
1324 {
1325 return m_Begin == rhs.m_Begin &&
1326 m_End == rhs.m_End &&
1327 m_Stride == rhs.m_Stride &&
1328 m_BeginMask == rhs.m_BeginMask &&
1329 m_EndMask == rhs.m_EndMask &&
1334 }
1335
1336 int GetStartForAxis(const TensorShape& inputShape, unsigned int axis) const;
1337 int GetStopForAxis(const TensorShape& inputShape,
1338 unsigned int axis,
1339 int startForAxis) const;
1340
1341 /// Begin values for the input that will be sliced.
1342 std::vector<int> m_Begin;
1343 /// End values for the input that will be sliced.
1344 std::vector<int> m_End;
1345 /// Stride values for the input that will be sliced.
1346 std::vector<int> m_Stride;
1347
1348 /// @brief Begin mask value. If set, then the begin is disregarded and the fullest
1349 /// range is used for the dimension.
1351 /// @brief End mask value. If set, then the end is disregarded and the fullest range
1352 /// is used for the dimension.
1353 int32_t m_EndMask;
1354 /// Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1.
1356 /// Ellipsis mask value.
1358 /// @brief New axis mask value. If set, the begin, end and stride is disregarded and
1359 /// a new 1 dimension is inserted to this location of the output tensor.
1361
1362 /// The data layout to be used (NCHW, NHWC).
1364};
1365
1366/// A PreCompiledDescriptor for the PreCompiledLayer.
1368{
1369 PreCompiledDescriptor(unsigned int numInputSlots = 1u, unsigned int numOutputSlots = 1u)
1370 : m_NumInputSlots(numInputSlots), m_NumOutputSlots(numOutputSlots)
1371 {}
1372
1374
1375 unsigned int m_NumInputSlots;
1376 unsigned int m_NumOutputSlots;
1377};
1378
1379/// A QLstmDescriptor for the QLstmLayer.
1381{
1383 : m_CellClip(0.0)
1384 , m_ProjectionClip(0.0)
1385 , m_CifgEnabled(true)
1386 , m_PeepholeEnabled(false)
1387 , m_ProjectionEnabled(false)
1388 , m_LayerNormEnabled(false)
1394 , m_HiddenStateScale(0.0)
1395 {}
1396
1412
1413 /// Clipping threshold value for the cell state
1415 /// Clipping threshold value for the projection
1417 /// Enable/disable CIFG (coupled input & forget gate).
1419 /// Enable/disable peephole
1421 /// Enable/disable the projection layer
1423 /// Enable/disable layer normalization
1425 /// Input intermediate quantization scale
1427 /// Forget intermediate quantization scale
1429 /// Cell intermediate quantization scale
1431 /// Output intermediate quantization scale
1433 /// Hidden State zero point
1435 /// Hidden State quantization scale
1437};
1438
1439/// A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
1441{
1443 m_PadLeft(0),
1444 m_PadRight(0),
1445 m_PadTop(0),
1446 m_PadBottom(0),
1447 m_StrideX(0),
1448 m_StrideY(0),
1449 m_BiasEnabled(false),
1452 {}
1453
1455 {
1456 return m_PadLeft == rhs.m_PadLeft &&
1457 m_PadRight == rhs.m_PadRight &&
1458 m_PadTop == rhs.m_PadTop &&
1459 m_PadBottom == rhs.m_PadBottom &&
1460 m_StrideX == rhs.m_StrideX &&
1461 m_StrideY == rhs.m_StrideY &&
1463 m_DataLayout == rhs.m_DataLayout &&
1466 }
1467
1468 /// Padding left value in the width dimension.
1469 uint32_t m_PadLeft;
1470 /// Padding right value in the width dimension.
1471 uint32_t m_PadRight;
1472 /// Padding top value in the height dimension.
1473 uint32_t m_PadTop;
1474 /// Padding bottom value in the height dimension.
1475 uint32_t m_PadBottom;
1476 /// Stride value when proceeding through input for the width dimension.
1477 uint32_t m_StrideX;
1478 /// Stride value when proceeding through input for the height dimension.
1479 uint32_t m_StrideY;
1480 /// Enable/disable bias.
1482 /// The data layout to be used (NCHW, NHWC).
1484 /// Output shape if it has been specified.
1486 std::vector<unsigned int> m_OutputShape;
1487};
1488
1489/// A TransposeDescriptor for the TransposeLayer.
1491{
1495
1497 : m_DimMappings(dimMappings)
1498 {}
1499
1500 bool operator ==(const TransposeDescriptor &rhs) const
1501 {
1502 return m_DimMappings.IsEqual(rhs.m_DimMappings);
1503 }
1504
1505 /// @brief Indicates how to translate tensor elements from a given source into the target destination, when
1506 /// source and target potentially have different memory layouts e.g.
1507 /// Input Shape {1, 1, 4, 4}
1508 /// Permutation Vector {0, 2, 3, 1}
1509 /// Output Shape {1, 4, 4, 1}
1510 /// dim "0" of input goes into index 0 ([ 1, X, X, X])
1511 /// dim "2" of input goes into index 1 ([ 1, 4, X, X ])
1512 /// dim "3" of input goes into index 2 ([ 1, 4, 4, X ])
1513 /// dim "1" of input goes into index 3 ([ 1, 4, 4, 1 ])
1515};
1516
1517/// A LogicalBinaryDescriptor for the LogicalBinaryLayer
1519{
1523
1525 : m_Operation(operation)
1526 {}
1527
1529 {
1530 return m_Operation == rhs.m_Operation;
1531 }
1532
1533 /// Specifies the logical operation to execute
1535};
1536
1537/// A ReduceDescriptor for the REDUCE operators.
1539{
1541 : m_KeepDims(false)
1542 , m_vAxis()
1544 {}
1545
1546 bool operator ==(const ReduceDescriptor& rhs) const
1547 {
1548 return m_KeepDims == rhs.m_KeepDims &&
1549 m_vAxis == rhs.m_vAxis &&
1551 }
1552
1553 /// if true then output shape has no change.
1555 /// The indices of the dimensions to reduce.
1556 std::vector<uint32_t> m_vAxis;
1557 /// Specifies the reduction operation to execute
1559};
1560
1561/// A ChannelShuffleDescriptor for the ChannelShuffle operator
1563{
1567
1568 ChannelShuffleDescriptor(const uint32_t& numGroups, const uint32_t& axis)
1569 : m_NumGroups(numGroups), m_Axis(axis)
1570 {}
1571
1573 {
1574 return m_NumGroups == rhs.m_NumGroups;
1575 }
1576
1577 /// Number of groups for the channel shuffle operation
1578 uint32_t m_NumGroups;
1579 /// Axis to apply channel shuffle operation on
1580 uint32_t m_Axis;
1581};
1582
1583/// A BatchMatMulDescriptor for the BatchMatMul operator
1585{
1586 BatchMatMulDescriptor(bool transposeX = false,
1587 bool transposeY = false,
1588 bool adjointX = false,
1589 bool adjointY = false,
1590 DataLayout dataLayoutX = DataLayout::NCHW,
1591 DataLayout dataLayoutY = DataLayout::NCHW)
1592 : m_TransposeX(transposeX)
1593 , m_TransposeY(transposeY)
1594 , m_AdjointX(adjointX)
1595 , m_AdjointY(adjointY)
1596 , m_DataLayoutX(dataLayoutX)
1597 , m_DataLayoutY(dataLayoutY)
1598 {}
1599
1600 bool operator ==(const BatchMatMulDescriptor &rhs) const
1601 {
1602 return m_TransposeX == rhs.m_TransposeX &&
1603 m_TransposeY == rhs.m_TransposeY &&
1604 m_AdjointX == rhs.m_AdjointX &&
1605 m_AdjointY == rhs.m_AdjointY &&
1608 }
1609
1610 /// Transpose the slices of each input tensor
1611 /// Transpose and Adjoint can not both be set to true for the same tensor at the same time
1614
1615 /// Adjoint the slices of each input tensor
1616 /// Transpose and Adjoint can not both be set to true for the same tensor at the same time
1619
1620 /// Data layout of each input tensor, such as NHWC/NDHWC (leave as default for arbitrary layout)
1623
1624 /// Static helper to get the two axes (for each input) for multiplication
1625 static std::pair<unsigned int, unsigned int> GetAxesToMul(
1626 DataLayout dataLayout,
1627 const TensorShape& tensorShape);
1628
1629 /// Static helper to get the axes (for each input) that will not be multiplied together
1630 static std::vector<unsigned int> GetAxesNotMul(
1631 DataLayout dataLayout,
1632 const TensorShape& tensorShape);
1633
1634 /// Static helper to get the axes which will be transposed
1636 DataLayout dataLayout,
1637 const TensorShape& tensorShape);
1638};
1639
1641{
1643 : m_Multiples()
1644 {}
1645
1646 explicit TileDescriptor(std::vector<uint32_t> multiples)
1647 : m_Multiples(std::move(multiples))
1648 {}
1649
1650 bool operator ==(const TileDescriptor& rhs) const
1651 {
1652 return m_Multiples == rhs.m_Multiples;
1653 }
1654
1655 /// The vector to multiply the input shape by
1656 std::vector<uint32_t> m_Multiples;
1657};
1658
1660{
1664
1665 explicit BroadcastToDescriptor(const TensorShape& shape)
1666 : m_BroadcastToShape(shape)
1667 {}
1668
1669 bool operator ==(const BroadcastToDescriptor& rhs) const
1670 {
1672 }
1673
1674 /// Target shape value.
1676};
1677
1678/// A ScatterNdDescriptor for the ScatterNdLayer.
1680{
1681 // default constructor
1684 , m_InputEnabled(true)
1685 , m_Axis(0)
1686 , m_AxisEnabled(false)
1687 {}
1688
1689 // constructor for operators except for ScatterElement operator
1691 bool inputEnabled)
1692 : m_Function(function)
1693 , m_InputEnabled(inputEnabled)
1694 , m_Axis(0)
1695 , m_AxisEnabled(false)
1696
1697 {}
1698
1699 // constructor for ScatterElement operator
1701 bool inputEnabled,
1702 int32_t axis)
1703 : m_Function(function)
1704 , m_InputEnabled(inputEnabled)
1705 , m_Axis(axis)
1706 , m_AxisEnabled(true)
1707
1708 {}
1709
1710 bool operator ==(const ScatterNdDescriptor &rhs) const
1711 {
1712 return ((m_Function == rhs.m_Function) &&
1713 (m_InputEnabled == rhs.m_InputEnabled) &&
1714 (m_AxisEnabled == rhs.m_AxisEnabled) &&
1715 (m_Axis == rhs.m_Axis));
1716 }
1717
1718 /// Specify if the function is update, add, sub, max or min.
1720
1721 /// Flag to show if input tensor is accepted.
1723
1724 /// Extra attribute for ScatterElement, will be set to 0 by default, we do not support axis != 0
1725 int32_t m_Axis;
1726
1727 /// Flag for ScatterElement, will be set to false by default, we do not support m_AxisEnable = true for now.
1729};
1730
1731} // namespace armnn
Copyright (c) 2021 ARM Limited and Contributors.
PaddingMode
The padding mode controls whether the padding should be filled with constant values (Constant),...
Definition Types.hpp:202
UnaryOperation
Definition Types.hpp:126
ComparisonOperation
Definition Types.hpp:110
LogicalBinaryOperation
Definition Types.hpp:120
PaddingMethod
The padding method modifies the output of pooling layers.
Definition Types.hpp:190
@ Exclude
The padding fields don't count and are ignored.
Definition Types.hpp:194
OriginsDescriptor CreateDescriptorForConcatenation(TensorShapeIt first, TensorShapeIt last, unsigned int concatenationDimension)
Convenience template to create an OriginsDescriptor to use when creating a ConcatLayer for performing...
FusedKernelType
Definition Types.hpp:268
ActivationFunction
Definition Types.hpp:87
SpaceToDepthDescriptor DepthToSpaceDescriptor
A DepthToSpaceDescriptor for the DepthToSpaceLayer.
Status
enumeration
Definition Types.hpp:43
ScatterNdFunction
Definition Types.hpp:503
PoolingAlgorithm
Definition Types.hpp:152
ResizeMethod
Definition Types.hpp:168
ReduceOperation
Definition Types.hpp:159
NormalizationAlgorithmChannel
Definition Types.hpp:209
BinaryOperation
Definition Types.hpp:139
SoftmaxDescriptor LogSoftmaxDescriptor
A LogSoftmaxDescriptor for the LogSoftmaxLayer.
LstmDescriptor UnidirectionalSequenceLstmDescriptor
DataLayout
Definition Types.hpp:63
NormalizationAlgorithmMethod
Definition Types.hpp:215
@ LocalBrightness
Krichevsky 2012: Local Brightness Normalization.
Definition Types.hpp:217
DataType
Definition Types.hpp:49
OutputShapeRounding
Definition Types.hpp:223
ArgMinMaxFunction
Definition Types.hpp:104
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH,...
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
bool operator==(const ActivationDescriptor &rhs) const
ActivationDescriptor(armnn::ActivationFunction activation, float a=0, float b=0)
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu,...
int m_Axis
Axis to reduce across the input tensor.
bool operator==(const ArgMinMaxDescriptor &rhs) const
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
armnn::DataType m_Output_Type
Deprecated and will be removed in future release.
Base class for all descriptors.
virtual ~BaseDescriptor()=default
virtual bool IsNull() const
bool m_AdjointX
Adjoint the slices of each input tensor Transpose and Adjoint can not both be set to true for the sam...
static std::pair< unsigned int, unsigned int > GetAxesToMul(DataLayout dataLayout, const TensorShape &tensorShape)
Static helper to get the two axes (for each input) for multiplication.
bool operator==(const BatchMatMulDescriptor &rhs) const
static PermutationVector GetPermuteVec(DataLayout dataLayout, const TensorShape &tensorShape)
Static helper to get the axes which will be transposed.
static std::vector< unsigned int > GetAxesNotMul(DataLayout dataLayout, const TensorShape &tensorShape)
Static helper to get the axes (for each input) that will not be multiplied together.
bool m_TransposeX
Transpose the slices of each input tensor Transpose and Adjoint can not both be set to true for the s...
DataLayout m_DataLayoutX
Data layout of each input tensor, such as NHWC/NDHWC (leave as default for arbitrary layout)
BatchMatMulDescriptor(bool transposeX=false, bool transposeY=false, bool adjointX=false, bool adjointY=false, DataLayout dataLayoutX=DataLayout::NCHW, DataLayout dataLayoutY=DataLayout::NCHW)
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
bool operator==(const BatchNormalizationDescriptor &rhs) const
std::vector< unsigned int > m_BlockShape
Block shape values.
bool operator==(const BatchToSpaceNdDescriptor &rhs) const
BatchToSpaceNdDescriptor(std::vector< unsigned int > blockShape, std::vector< std::pair< unsigned int, unsigned int > > crops)
std::vector< std::pair< unsigned int, unsigned int > > m_Crops
The values to crop from the input dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
TensorShape m_BroadcastToShape
Target shape value.
bool operator==(const BroadcastToDescriptor &rhs) const
BroadcastToDescriptor(const TensorShape &shape)
uint32_t m_NumGroups
Number of groups for the channel shuffle operation.
ChannelShuffleDescriptor(const uint32_t &numGroups, const uint32_t &axis)
bool operator==(const ChannelShuffleDescriptor &rhs) const
uint32_t m_Axis
Axis to apply channel shuffle operation on.
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
bool operator==(const ComparisonDescriptor &rhs) const
ComparisonDescriptor(ComparisonOperation operation)
uint32_t m_PadRight
Padding right value in the width dimension.
uint32_t m_DilationY
Dilation along y axis.
bool operator==(const Convolution2dDescriptor &rhs) const
uint32_t m_PadTop
Padding top value in the height dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_DilationX
Dilation along x axis.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
bool m_BiasEnabled
Enable/disable bias.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_PadRight
Padding right value in the width dimension.
uint32_t m_PadBack
Padding back value in the depth dimension.
uint32_t m_DilationZ
Dilation along z axis.
uint32_t m_DilationY
Dilation along y axis.
uint32_t m_StrideZ
Stride value when proceeding through input for the depth dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
DataLayout m_DataLayout
The data layout to be used (NDHWC, NCDHW).
uint32_t GetNumInputs() const
Get the number of views/inputs.
uint32_t m_PadFront
Padding front value in the depth dimension.
uint32_t m_DilationX
Dilation along x axis.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
bool operator==(const Convolution3dDescriptor &rhs) const
bool m_BiasEnabled
Enable/disable bias.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_PadRight
Padding right value in the width dimension.
uint32_t m_DilationY
Dilation factor value for height dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
bool operator==(const DepthwiseConvolution2dDescriptor &rhs) const
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t GetNumInputs() const
Get the number of views/inputs.
uint32_t m_DilationX
Dilation factor value for width dimension.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
bool m_BiasEnabled
Enable/disable bias.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
bool operator==(const DetectionPostProcessDescriptor &rhs) const
uint32_t m_NumClasses
Number of classes.
float m_NmsScoreThreshold
NMS score threshold.
float m_NmsIouThreshold
Intersection over union threshold.
float m_ScaleY
Center size encoding scale y.
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
bool m_UseRegularNms
Use Regular NMS.
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
float m_ScaleH
Center size encoding scale height.
float m_ScaleW
Center size encoding scale weight.
DetectionPostProcessDescriptor()
float m_ScaleX
Center size encoding scale x.
uint32_t m_MaxDetections
Maximum numbers of detections.
bool operator==(const ElementwiseBinaryDescriptor &rhs) const
BinaryOperation m_Operation
Specifies the elementwiseBinary operation to execute.
ElementwiseBinaryDescriptor(BinaryOperation operation)
bool operator==(const ElementwiseUnaryDescriptor &rhs) const
ElementwiseUnaryDescriptor(UnaryOperation operation)
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
bool operator==(const FakeQuantizationDescriptor &rhs) const
bool operator==(const FillDescriptor &rhs) const
FillDescriptor(const float &value)
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
bool m_ConstantWeights
Enable/disable constant weights and biases.
uint32_t GetNumInputs() const
Get the number of inputs.
bool operator==(const FullyConnectedDescriptor &rhs) const
bool m_BiasEnabled
Enable/disable bias.
unsigned int m_NumOutputSlots
FusedKernelType m_FusedKernelType
bool operator==(const FusedDescriptor &rhs) const
FusedDescriptor(unsigned int numInputSlots=4u, unsigned int numOutputSlots=2u, FusedKernelType fusedType=FusedKernelType::AddMulAdd)
unsigned int m_NumInputSlots
int32_t m_Axis
The axis in params to gather indices from.
bool operator==(const GatherDescriptor &rhs) const
GatherDescriptor(int32_t axis)
float m_Eps
Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f.
bool operator==(const InstanceNormalizationDescriptor &rhs) const
float m_Gamma
Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_Beta
Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0.
float m_Eps
Used to avoid dividing by zero.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
bool operator==(const L2NormalizationDescriptor &rhs) const
LogicalBinaryDescriptor(LogicalBinaryOperation operation)
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
bool operator==(const LogicalBinaryDescriptor &rhs) const
An LstmDescriptor for the LstmLayer.
float m_CellIntermediateScale
Cell intermediate quantization scale.
float m_InputIntermediateScale
Input intermediate quantization scale.
bool m_PeepholeEnabled
Enable/disable peephole.
bool m_TimeMajor
Enable/disable time major.
bool operator==(const LstmDescriptor &rhs) const
int32_t m_HiddenStateZeroPoint
Hidden State zero point.
bool m_LayerNormEnabled
Enable/disable layer normalization.
float m_ClippingThresCell
Clipping threshold value for the cell state.
bool m_ProjectionEnabled
Enable/disable the projection layer.
float m_ClippingThresProj
Clipping threshold value for the projection.
float m_OutputIntermediateScale
Output intermediate quantization scale.
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
uint32_t m_ActivationFunc
The activation function to use.
float m_HiddenStateScale
Hidden State quantization scale.
float m_ForgetIntermediateScale
Forget intermediate quantization scale.
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept.
bool operator==(const MeanDescriptor &rhs) const
MeanDescriptor(const std::vector< unsigned int > &axis, bool keepDims)
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
float m_Alpha
Alpha value for the normalization equation.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_Beta
Beta value for the normalization equation.
float m_K
Kappa value used for the across channel normalization equation.
uint32_t m_NormSize
Depth radius value.
bool operator==(const NormalizationDescriptor &rhs) const
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
Null Descriptor used as a return value from the IConnectableLayer GetParameters method by layers whic...
bool IsNull() const override
An OriginsDescriptor for the ConcatLayer.
friend void swap(OriginsDescriptor &first, OriginsDescriptor &second)
Swap the ViewsDescriptor values first and second.
bool operator==(const OriginsDescriptor &rhs) const
Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value)
@Brief Set the view origin coordinates.
uint32_t GetNumViews() const
Get the number of views.
unsigned int GetConcatAxis() const
Get the concatenation axis value.
void SetConcatAxis(unsigned int concatAxis)
Set the concatenation axis value.
uint32_t GetNumDimensions() const
Get the number of dimensions.
void ReorderOrigins(unsigned int *newOrdering, unsigned int numNewOrdering)
Reorders the viewOrigins in accordance with the indices presented in newOrdering array.
const uint32_t * GetViewOrigin(uint32_t idx) const
Return the view origin at the int value idx.
OriginsDescriptor & operator=(OriginsDescriptor rhs)
float m_PadValue
Optional value to use for padding, defaults to 0.
PaddingMode m_PaddingMode
Specifies the Padding mode (Constant, Reflect or Symmetric)
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding for input dimension.
bool operator==(const PadDescriptor &rhs) const
PadDescriptor(const std::vector< std::pair< unsigned int, unsigned int > > &padList, const float &padValue=0, const PaddingMode &paddingMode=PaddingMode::Constant)
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination,...
bool operator==(const PermuteDescriptor &rhs) const
PermuteDescriptor(const PermutationVector &dimMappings)
uint32_t m_PadRight
Padding right value in the width dimension.
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_PadTop
Padding top value in the height dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_PoolWidth
Pooling width value.
bool operator==(const Pooling2dDescriptor &rhs) const
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
uint32_t m_PadRight
Padding right value in the width dimension.
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
uint32_t m_PadBack
Padding back value in the depth dimension.
uint32_t m_StrideZ
Stride value when proceeding through input for the depth dimension.
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_PadTop
Padding top value in the height dimension.
DataLayout m_DataLayout
The data layout to be used (NCDHW, NDHWC).
uint32_t m_PoolWidth
Pooling width value.
uint32_t m_PadFront
Padding front value in the depth dimension.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_PoolDepth
Pooling depth value.
bool operator==(const Pooling3dDescriptor &rhs) const
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
PreCompiledDescriptor(unsigned int numInputSlots=1u, unsigned int numOutputSlots=1u)
float m_CellIntermediateScale
Cell intermediate quantization scale.
float m_InputIntermediateScale
Input intermediate quantization scale.
bool m_PeepholeEnabled
Enable/disable peephole.
int32_t m_HiddenStateZeroPoint
Hidden State zero point.
bool m_LayerNormEnabled
Enable/disable layer normalization.
bool m_ProjectionEnabled
Enable/disable the projection layer.
float m_OutputIntermediateScale
Output intermediate quantization scale.
float m_ProjectionClip
Clipping threshold value for the projection.
float m_CellClip
Clipping threshold value for the cell state.
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
bool operator==(const QLstmDescriptor &rhs) const
float m_HiddenStateScale
Hidden State quantization scale.
float m_ForgetIntermediateScale
Forget intermediate quantization scale.
bool m_KeepDims
if true then output shape has no change.
bool operator==(const ReduceDescriptor &rhs) const
std::vector< uint32_t > m_vAxis
The indices of the dimensions to reduce.
ReduceOperation m_ReduceOperation
Specifies the reduction operation to execute.
TensorShape m_TargetShape
Target shape value.
ReshapeDescriptor(const TensorShape &shape)
bool operator==(const ReshapeDescriptor &rhs) const
bool m_HalfPixelCenters
Half Pixel Centers.
uint32_t m_TargetHeight
Target height value.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
uint32_t m_TargetWidth
Target width value.
bool m_AlignCorners
Aligned corners.
bool operator==(const ResizeDescriptor &rhs) const
bool m_InputEnabled
Flag to show if input tensor is accepted.
ScatterNdFunction m_Function
Specify if the function is update, add, sub, max or min.
int32_t m_Axis
Extra attribute for ScatterElement, will be set to 0 by default, we do not support axis !...
bool m_AxisEnabled
Flag for ScatterElement, will be set to false by default, we do not support m_AxisEnable = true for n...
bool operator==(const ScatterNdDescriptor &rhs) const
ScatterNdDescriptor(ScatterNdFunction function, bool inputEnabled)
ScatterNdDescriptor(ScatterNdFunction function, bool inputEnabled, int32_t axis)
std::vector< unsigned int > m_Begin
Beginning indices of the slice in each dimension.
bool operator==(const SliceDescriptor &rhs) const
std::vector< unsigned int > m_Size
Size of the slice in each dimension.
SliceDescriptor(const std::vector< unsigned int > &begin, const std::vector< unsigned int > &size)
A SoftmaxDescriptor for the SoftmaxLayer.
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
bool operator==(const SoftmaxDescriptor &rhs) const
float m_Beta
Exponentiation value.
std::vector< unsigned int > m_BlockShape
Block shape value.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding values for the input dimension: heightPad{top, bottom} widthPad{left,...
bool operator==(const SpaceToBatchNdDescriptor &rhs) const
SpaceToBatchNdDescriptor(const std::vector< unsigned int > &blockShape, const std::vector< std::pair< unsigned int, unsigned int > > &padList)
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
bool operator==(const SpaceToDepthDescriptor &rhs) const
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
unsigned int m_BlockSize
Scalar specifying the input block size. It must be >= 1.
SpaceToDepthDescriptor(unsigned int blockSize, DataLayout dataLayout)
TensorShape m_InputShape
Required shape of all input tensors.
bool operator==(const StackDescriptor &rhs) const
uint32_t m_Axis
0-based axis along which to stack the input tensors.
StackDescriptor(uint32_t axis, uint32_t numInputs, const TensorShape &inputShape)
uint32_t m_NumInputs
Number of input tensors.
StandInDescriptor(uint32_t numInputs, uint32_t numOutputs)
uint32_t m_NumOutputs
Number of output tensors.
bool operator==(const StandInDescriptor &rhs) const
uint32_t m_NumInputs
Number of input tensors.
std::vector< int > m_Stride
Stride values for the input that will be sliced.
std::vector< int > m_Begin
Begin values for the input that will be sliced.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
int32_t m_BeginMask
Begin mask value.
bool operator==(const StridedSliceDescriptor &rhs) const
int32_t m_ShrinkAxisMask
Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1.
int32_t m_NewAxisMask
New axis mask value.
std::vector< int > m_End
End values for the input that will be sliced.
int32_t m_EndMask
End mask value.
int GetStartForAxis(const TensorShape &inputShape, unsigned int axis) const
int32_t m_EllipsisMask
Ellipsis mask value.
int GetStopForAxis(const TensorShape &inputShape, unsigned int axis, int startForAxis) const
StridedSliceDescriptor(const std::vector< int > &begin, const std::vector< int > &end, const std::vector< int > &stride)
std::vector< uint32_t > m_Multiples
The vector to multiply the input shape by.
bool operator==(const TileDescriptor &rhs) const
TileDescriptor(std::vector< uint32_t > multiples)
uint32_t m_PadRight
Padding right value in the width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
std::vector< unsigned int > m_OutputShape
bool m_OutputShapeEnabled
Output shape if it has been specified.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
bool operator==(const TransposeConvolution2dDescriptor &rhs) const
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
bool operator==(const TransposeDescriptor &rhs) const
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination,...
TransposeDescriptor(const PermutationVector &dimMappings)
bool HasAxis() const
Returns true if an axis has been set.
int32_t GetAxis() const
Get the axis value.
Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value)
@Brief Set the view origin coordinates.
uint32_t GetNumViews() const
Get the number of views.
const uint32_t * GetViewSizes(uint32_t idx) const
Get the view sizes at the int value idx.
friend void swap(ViewsDescriptor &first, ViewsDescriptor &second)
Swap the ViewsDescriptor value first and second.
uint32_t GetNumDimensions() const
Get the number of dimensions.
bool operator==(const ViewsDescriptor &rhs) const
const OriginsDescriptor & GetOrigins() const
Get the View Origins.
Status SetViewSize(uint32_t view, uint32_t coord, uint32_t value)
Set the size of the views.
ViewsDescriptor(uint32_t numViews, uint32_t numDimensions=4)
const uint32_t * GetViewOrigin(uint32_t idx) const
Get the view origin at the int value idx.
void SetAxis(int32_t axis)
Set the axis value.
ViewsDescriptor & operator=(ViewsDescriptor rhs)