Compute Library
 22.05
CLSoftmaxLayer.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_CLSOFTMAXLAYER_H
25 #define ARM_COMPUTE_CLSOFTMAXLAYER_H
26 
30 
31 #include <memory>
32 
33 namespace arm_compute
34 {
35 class ICLTensor;
36 class ITensorInfo;
37 class CLCompileContext;
38 
39 /** Basic function to compute a SoftmaxLayer.
40  *
41  * Softmax is calculated by :
42  * @f[ out = exp((x - max(x)) * beta) / sum(exp((x - max(x)) * beta)) @f]
43  *
44  * Log Softmax is calculated by :
45  * @f[ out = (x - max(x) * beta) - log(\sum{e^{x - max(x) * beta}}) @f]
46  *
47  * This function runs the following operators/kernels:
48  * -# If axis is not 0:
49  * -# @ref opencl::ClPermute
50  * -# @ref opencl::kernels::ClLogits1DNormKernel
51  * -# @ref opencl::kernels::ClLogits1DMaxShiftExpSumKernel
52  */
53 template <bool IS_LOG = false>
55 {
56 public:
57  /** Constructor */
58  CLSoftmaxLayerGeneric(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
59  /** Default destructor */
61  /** Set the input and output tensors.
62  *
63  * Valid data layouts:
64  * - All
65  *
66  * Valid data type configurations:
67  * |src |dst |
68  * |:--------------|:--------------|
69  * |QASYMM8 |QASYMM8 |
70  * |QASYMM8_SIGNED |QASYMM8_SIGNED |
71  * |F16 |F16 |
72  * |F32 |F32 |
73  *
74  * @param[in] input Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32 for Softmax and F16/F32 for Log Softmax
75  * @param[out] output Destination tensor. Data types supported: same as @p input
76  * @param[in] beta (Optional) A scaling factor for the exponent. Defaults to 1.f
77  * @param[in] axis (Optional) The dimension in which to apply the function. E.g. for input of shape 4x5x6 and
78  * axis=1, softmax will be applied to 4x6=24 vectors of size 5. Defaults to 0
79  */
80  void configure(const ICLTensor *input, ICLTensor *output, float beta = 1.0f, int32_t axis = 0);
81  /** Set the input and output tensors.
82  *
83  * @param[in] compile_context The compile context to be used.
84  * @param[in] input Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32 for Softmax and F16/F32 for Log Softmax
85  * @param[out] output Destination tensor. Data types supported: same as @p input
86  * @param[in] beta (Optional) A scaling factor for the exponent. Defaults to 1.f
87  * @param[in] axis (Optional) The dimension in which to apply the function. E.g. for input of shape 4x5x6 and
88  * axis=1, softmax will be applied to 4x6=24 vectors of size 5. Defaults to 0
89  */
90  void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, float beta = 1.0f, int32_t axis = 0);
91  /** Static function to check if given info will lead to a valid configuration of @ref CLSoftmaxLayer
92  *
93  * @param[in] input Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32 for Softmax and F16/F32 for Log Softmax
94  * @param[in] output Destination tensor. Data types supported: same as @p input
95  * @param[in] beta (Optional) A scaling factor for the exponent. Defaults to 1.f
96  * @param[in] axis (Optional) The dimension in which to apply the function. E.g. for input of shape 4x5x6 and
97  * axis=1, softmax will be applied to 4x6=24 vectors of size 5. Defaults to 0
98  *
99  * @return a status
100  */
101  static Status validate(const ITensorInfo *input, const ITensorInfo *output, float beta = 1.0f, int32_t axis = 0);
102 
103  // Inherited methods overridden:
104  void run() override;
105 
106 private:
107  struct Impl;
108  std::unique_ptr<Impl> _impl;
109 };
110 
113 } // namespace arm_compute
114 #endif /* ARM_COMPUTE_CLSOFTMAXLAYER_H */
void run() override
Run the kernels contained in the function.
Base class for all functions.
Definition: IFunction.h:30
CLSoftmaxLayerGeneric(std::shared_ptr< IMemoryManager > memory_manager=nullptr)
Constructor.
void configure(const ICLTensor *input, ICLTensor *output, float beta=1.0f, int32_t axis=0)
Set the input and output tensors.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
Status class.
Definition: Error.h:52
Copyright (c) 2017-2022 Arm Limited.
Basic function to compute a SoftmaxLayer.
static Status validate(const ITensorInfo *input, const ITensorInfo *output, float beta=1.0f, int32_t axis=0)
Static function to check if given info will lead to a valid configuration of CLSoftmaxLayer.
CLCompileContext class.
Interface for OpenCL tensor.
Definition: ICLTensor.h:42
~CLSoftmaxLayerGeneric()
Default destructor.