|
void | arm_nn_softmax_common_s8 (const int8_t *input, const int32_t num_rows, const int32_t row_size, const int32_t mult, const int32_t shift, const int32_t diff_min, const bool int16_output, void *output) |
| Common softmax function for s8 input and s8 or s16 output. More...
|
|
Support functions for Softmax
◆ arm_nn_softmax_common_s8()
void arm_nn_softmax_common_s8 |
( |
const int8_t * |
input, |
|
|
const int32_t |
num_rows, |
|
|
const int32_t |
row_size, |
|
|
const int32_t |
mult, |
|
|
const int32_t |
shift, |
|
|
const int32_t |
diff_min, |
|
|
const bool |
int16_output, |
|
|
void * |
output |
|
) |
| |
- Parameters
-
[in] | input | Pointer to the input tensor |
[in] | num_rows | Number of rows in the input tensor |
[in] | row_size | Number of elements in each input row |
[in] | mult | Input quantization multiplier |
[in] | shift | Input quantization shift within the range [0, 31] |
[in] | diff_min | Minimum difference with max in row. Used to check if the quantized exponential operation can be performed |
[in] | int16_output | Indicating s8 output if 0 else s16 output |
[out] | output | Pointer to the output tensor |
- Note
- Supported framework: TensorFlow Lite micro (bit-accurate)