Compute Library
 23.11
activation_float_helpers.h File Reference
#include "helpers.h"

Go to the source code of this file.

Macros

#define MLA(a, b, c)   ((b) * (c) + (a))
 
#define hard_swish_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL)   (x * ((min(max((x + (DATA_TYPE)3.0), (DATA_TYPE)0.0), (DATA_TYPE)6.0)) * (DATA_TYPE)0.166666667))
 
#define logistic_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL)   ((DATA_TYPE)1.0 / ((DATA_TYPE)1.0 + exp(-x)))
 
#define tanh_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL)   ((DATA_TYPE)A_VAL * tanh((DATA_TYPE)B_VAL * x))
 
#define relu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL)   (max((DATA_TYPE)0.0, x))
 
#define brelu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL)   (min((DATA_TYPE)A_VAL, max((DATA_TYPE)0.0, x)))
 
#define lu_brelu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL)   (min(max(x, (DATA_TYPE)B_VAL), (DATA_TYPE)A_VAL))
 
#define lrelu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL)   ((min(x, (DATA_TYPE)0.0) * (DATA_TYPE)A_VAL) + max(x, (DATA_TYPE)0.0))
 
#define srelu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL)   (log((DATA_TYPE)1.0 + exp(x)))
 
#define elu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL)
 
#define abs_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL)   (fabs(x))
 
#define square_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL)   (x * x)
 
#define sqrt_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL)   (sqrt(x))
 
#define linear_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL)   (MLA((DATA_TYPE)B_VAL, (DATA_TYPE)A_VAL, x))
 
#define gelu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL)   (x * (DATA_TYPE)0.5 * ((DATA_TYPE)1.0 + erf(x / (DATA_TYPE)1.41421356237)))
 
#define identity_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL)   (x)
 
#define ACT_OP(op, DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL)   op##_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL)
 
#define ACTIVATION(op, DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL)   ACT_OP(op, DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL)
 

Macro Definition Documentation

◆ abs_op

#define abs_op (   DATA_TYPE,
  VEC_SIZE,
  x,
  A_VAL,
  B_VAL 
)    (fabs(x))

Definition at line 65 of file activation_float_helpers.h.

◆ ACT_OP

#define ACT_OP (   op,
  DATA_TYPE,
  VEC_SIZE,
  x,
  A_VAL,
  B_VAL 
)    op##_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL)

Definition at line 83 of file activation_float_helpers.h.

◆ ACTIVATION

#define ACTIVATION (   op,
  DATA_TYPE,
  VEC_SIZE,
  x,
  A_VAL,
  B_VAL 
)    ACT_OP(op, DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL)

Definition at line 85 of file activation_float_helpers.h.

◆ brelu_op

#define brelu_op (   DATA_TYPE,
  VEC_SIZE,
  x,
  A_VAL,
  B_VAL 
)    (min((DATA_TYPE)A_VAL, max((DATA_TYPE)0.0, x)))

Definition at line 47 of file activation_float_helpers.h.

◆ elu_op

#define elu_op (   DATA_TYPE,
  VEC_SIZE,
  x,
  A_VAL,
  B_VAL 
)
Value:
(select(((DATA_TYPE)A_VAL * (exp(x) - (DATA_TYPE)1.0)), x, \
(SELECT_VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))isgreaterequal(x, (DATA_TYPE)0.0)))

Definition at line 60 of file activation_float_helpers.h.

◆ gelu_op

#define gelu_op (   DATA_TYPE,
  VEC_SIZE,
  x,
  A_VAL,
  B_VAL 
)    (x * (DATA_TYPE)0.5 * ((DATA_TYPE)1.0 + erf(x / (DATA_TYPE)1.41421356237)))

Definition at line 77 of file activation_float_helpers.h.

◆ hard_swish_op

#define hard_swish_op (   DATA_TYPE,
  VEC_SIZE,
  x,
  A_VAL,
  B_VAL 
)    (x * ((min(max((x + (DATA_TYPE)3.0), (DATA_TYPE)0.0), (DATA_TYPE)6.0)) * (DATA_TYPE)0.166666667))

Definition at line 34 of file activation_float_helpers.h.

◆ identity_op

#define identity_op (   DATA_TYPE,
  VEC_SIZE,
  x,
  A_VAL,
  B_VAL 
)    (x)

Definition at line 81 of file activation_float_helpers.h.

◆ linear_op

#define linear_op (   DATA_TYPE,
  VEC_SIZE,
  x,
  A_VAL,
  B_VAL 
)    (MLA((DATA_TYPE)B_VAL, (DATA_TYPE)A_VAL, x))

Definition at line 74 of file activation_float_helpers.h.

◆ logistic_op

#define logistic_op (   DATA_TYPE,
  VEC_SIZE,
  x,
  A_VAL,
  B_VAL 
)    ((DATA_TYPE)1.0 / ((DATA_TYPE)1.0 + exp(-x)))

Definition at line 38 of file activation_float_helpers.h.

◆ lrelu_op

#define lrelu_op (   DATA_TYPE,
  VEC_SIZE,
  x,
  A_VAL,
  B_VAL 
)    ((min(x, (DATA_TYPE)0.0) * (DATA_TYPE)A_VAL) + max(x, (DATA_TYPE)0.0))

Definition at line 53 of file activation_float_helpers.h.

◆ lu_brelu_op

#define lu_brelu_op (   DATA_TYPE,
  VEC_SIZE,
  x,
  A_VAL,
  B_VAL 
)    (min(max(x, (DATA_TYPE)B_VAL), (DATA_TYPE)A_VAL))

Definition at line 50 of file activation_float_helpers.h.

◆ MLA

#define MLA (   a,
  b,
 
)    ((b) * (c) + (a))

Definition at line 30 of file activation_float_helpers.h.

◆ relu_op

#define relu_op (   DATA_TYPE,
  VEC_SIZE,
  x,
  A_VAL,
  B_VAL 
)    (max((DATA_TYPE)0.0, x))

Definition at line 44 of file activation_float_helpers.h.

◆ sqrt_op

#define sqrt_op (   DATA_TYPE,
  VEC_SIZE,
  x,
  A_VAL,
  B_VAL 
)    (sqrt(x))

Definition at line 71 of file activation_float_helpers.h.

◆ square_op

#define square_op (   DATA_TYPE,
  VEC_SIZE,
  x,
  A_VAL,
  B_VAL 
)    (x * x)

Definition at line 68 of file activation_float_helpers.h.

◆ srelu_op

#define srelu_op (   DATA_TYPE,
  VEC_SIZE,
  x,
  A_VAL,
  B_VAL 
)    (log((DATA_TYPE)1.0 + exp(x)))

Definition at line 57 of file activation_float_helpers.h.

◆ tanh_op

#define tanh_op (   DATA_TYPE,
  VEC_SIZE,
  x,
  A_VAL,
  B_VAL 
)    ((DATA_TYPE)A_VAL * tanh((DATA_TYPE)B_VAL * x))

Definition at line 41 of file activation_float_helpers.h.

VEC_SIZE
#define VEC_SIZE
Definition: qlstm_layer_normalization.cl:54
arm_compute::test::validation::reference::select
SimpleTensor< T > select(const SimpleTensor< uint8_t > &c, const SimpleTensor< T > &x, const SimpleTensor< T > &y)
Definition: Select.cpp:38
SELECT_VEC_DATA_TYPE
#define SELECT_VEC_DATA_TYPE(type, size)
Definition: helpers.h:787