Compute Library
 22.08
CpuActivationKernel.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2022 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
28 #include "arm_compute/core/Utils.h"
29 #include "src/core/CPP/Validate.h"
32 
35 
36 #include <array>
37 
38 namespace arm_compute
39 {
40 namespace cpu
41 {
42 namespace kernels
43 {
44 namespace
45 {
46 static const std::vector<CpuActivationKernel::ActivationKernel> available_kernels =
47 {
48 #ifdef __aarch64__
49  { // Neon LUT implementantion takes precedence
50  "neon_qu8_activation_lut",
51  [](const ActivationDataTypeISASelectorData & data) { return ActivationLayerInfo::is_lut_supported(data.f, data.dt); },
53  },
54 #endif // __aarch64__
55  {
56  "sve2_qu8_activation",
57  [](const ActivationDataTypeISASelectorData & data) { return data.dt == DataType::QASYMM8 && data.isa.sve2; },
59  },
60  {
61  "sve2_qs8_activation",
62  [](const ActivationDataTypeISASelectorData & data) { return data.dt == DataType::QASYMM8_SIGNED && data.isa.sve2; },
64  },
65  {
66  "sve2_qs16_activation",
67  [](const ActivationDataTypeISASelectorData & data) { return data.dt == DataType::QSYMM16 && data.isa.sve2; },
69  },
70  {
71  "sve_fp16_activation",
72  [](const ActivationDataTypeISASelectorData & data) { return data.dt == DataType::F16 && data.isa.sve && data.isa.fp16; },
74  },
75  {
76  "sve_fp32_activation",
77  [](const ActivationDataTypeISASelectorData & data) { return data.dt == DataType::F32 && data.isa.sve; },
79  },
80  {
81  "neon_fp16_activation",
82  [](const ActivationDataTypeISASelectorData & data) { return data.dt == DataType::F16 && data.isa.fp16; },
84  },
85  {
86  "neon_fp32_activation",
87  [](const ActivationDataTypeISASelectorData & data) { return data.dt == DataType::F32; },
89  },
90  {
91  "neon_qu8_activation",
92  [](const ActivationDataTypeISASelectorData & data) { return data.dt == DataType::QASYMM8; },
94  },
95  {
96  "neon_qs8_activation",
97  [](const ActivationDataTypeISASelectorData & data) { return data.dt == DataType::QASYMM8_SIGNED; },
99  },
100  {
101  "neon_qs16_activation",
102  [](const ActivationDataTypeISASelectorData & data) { return data.dt == DataType::QSYMM16; },
104  },
105 };
106 
107 /* Supported activation in the 8-bit integer domain */
108 static const std::array<ActivationLayerInfo::ActivationFunction, 7> qasymm8_activations =
109 {
117 };
118 /* Supported activation in the 16-bit integer domain */
119 static const std::array<ActivationLayerInfo::ActivationFunction, 4> qsymm16_activations =
120 {
125 };
126 
127 Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, const ActivationLayerInfo &activation_info)
128 {
131 
132  const auto *uk = CpuActivationKernel::get_implementation(ActivationDataTypeISASelectorData{ src->data_type(), CPUInfo::get().get_isa(), activation_info.activation() });
133 
134  ARM_COMPUTE_RETURN_ERROR_ON(uk == nullptr || uk->ukernel == nullptr);
135 
136  const DataType data_type = src->data_type();
137  const QuantizationInfo &oq_info = (dst != nullptr) ? dst->quantization_info() : src->quantization_info();
138  const ActivationLayerInfo::ActivationFunction f_act = activation_info.activation();
139 
140  ARM_COMPUTE_RETURN_ERROR_ON_MSG(is_data_type_quantized_asymmetric(data_type) && (std::find(std::begin(qasymm8_activations), std::end(qasymm8_activations), f_act) == std::end(qasymm8_activations)),
141  "For QASYMM8 only hard swish, leaky relu, tanh, logistic, relu and lower/upper bounded relu are supported");
142 
143  ARM_COMPUTE_RETURN_ERROR_ON_MSG(is_data_type_quantized_symmetric(data_type) && (std::find(std::begin(qsymm16_activations), std::end(qsymm16_activations), f_act) == std::end(qsymm16_activations)),
144  "For QSYMM16 only tanh and logistic are supported");
146  && (oq_info != QuantizationInfo(1.f / 128.f, 128)));
148  && (oq_info != QuantizationInfo(1.f / 256.f, 0)));
149 
150  ARM_COMPUTE_RETURN_ERROR_ON(data_type == DataType::QASYMM8_SIGNED && (f_act == ActivationLayerInfo::ActivationFunction::TANH) && (oq_info != QuantizationInfo(1.f / 128.f, 0)));
151  ARM_COMPUTE_RETURN_ERROR_ON(data_type == DataType::QASYMM8_SIGNED && (f_act == ActivationLayerInfo::ActivationFunction::LOGISTIC) && (oq_info != QuantizationInfo(1.f / 256.f, -128)));
152 
153  ARM_COMPUTE_RETURN_ERROR_ON(is_data_type_quantized_symmetric(data_type) && (f_act == ActivationLayerInfo::ActivationFunction::TANH) && (oq_info != QuantizationInfo(1.f / 32768.f, 0)));
154  ARM_COMPUTE_RETURN_ERROR_ON(is_data_type_quantized_symmetric(data_type) && (f_act == ActivationLayerInfo::ActivationFunction::LOGISTIC) && (oq_info != QuantizationInfo(1.f / 32768.f, 0)));
155 
156  // Checks performed when dst is configured
157  if((dst != nullptr) && (dst->total_size() != 0))
158  {
161  }
162 
163  return Status{};
164 }
165 
166 std::pair<Status, Window> validate_and_configure_window(const ITensorInfo *src, ITensorInfo *dst)
167 {
168  // Configure kernel window
169  Window win = calculate_max_window(*src, Steps());
170 
171  if(dst != nullptr)
172  {
173  // dst auto inizialitation if not yet initialized
174  auto_init_if_empty(*dst, *src->clone());
175  }
176 
177  return std::make_pair(Status{}, win);
178 }
179 } // namespace
180 
182 {
184  ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst, activation_info));
185 
187 
189 
190  _run_method = uk->ukernel;
191  _name = std::string("CpuActivationKernel").append("/").append(uk->name);
192 
193 #ifdef __aarch64__
194  if(ActivationLayerInfo::is_lut_supported(activation_info.activation(), src->data_type()))
195  {
196  activation_info.init_lut(src->quantization_info().uniform(),(dst)?dst->quantization_info().uniform():src->quantization_info().uniform());
197  }
198 #endif // __aarch64__
199  _act_info = activation_info;
200 
201  // Configure kernel window
202  auto win_config = validate_and_configure_window(src, dst);
203  ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
204  ICPPKernel::configure(win_config.second);
205 }
206 
208 {
209  ARM_COMPUTE_UNUSED(act_info);
210  ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst, act_info));
211  ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(src->clone().get(), (dst != nullptr) ? dst->clone().get() : nullptr).first);
212 
213  return Status{};
214 }
215 
216 size_t CpuActivationKernel::get_mws(const CPUInfo &platform, size_t thread_count) const
217 {
218  ARM_COMPUTE_UNUSED(thread_count);
219  ARM_COMPUTE_UNUSED(platform);
220 
222 }
223 
225 {
226  // Early exit on disabled activation
227  if(!_act_info.enabled())
228  {
229  return;
230  }
231 
232  ARM_COMPUTE_UNUSED(info);
235 
236  ARM_COMPUTE_ERROR_ON(tensors.empty());
237  ARM_COMPUTE_ERROR_ON(_run_method == nullptr);
238 
239  const ITensor *src = tensors.get_const_tensor(TensorType::ACL_SRC);
240  ITensor *dst = tensors.get_tensor(TensorType::ACL_DST);
241 
242  _run_method(src, dst, _act_info, window);
243 }
244 
245 const char *CpuActivationKernel::name() const
246 {
247  return _name.c_str();
248 }
249 
250 const std::vector<CpuActivationKernel::ActivationKernel> &CpuActivationKernel::get_available_kernels()
251 {
252  return available_kernels;
253 }
254 } // namespace kernels
255 } // namespace cpu
256 } // namespace arm_compute
Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps, bool skip_border, BorderSize border_size)
const Window & window() const
The maximum window the kernel can be executed on.
Definition: IKernel.cpp:28
void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override
Execute the kernel on the passed window.
quantized, symmetric fixed-point 16-bit number
static const auto * get_implementation(const SelectorType &selector, KernelSelectionType selection_type=KernelSelectionType::Supported)
Micro-kernel selector.
Definition: ICpuKernel.h:53
void configure(const ITensorInfo *src, ITensorInfo *dst, ActivationLayerInfo activation_info)
Configure kernel for a given list of arguments.
#define ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(tensor)
Definition: Validate.h:115
#define REGISTER_FP16_NEON(func_name)
Definition: Registrars.h:48
bool enabled() const
Check if initialised.
Definition: Types.h:1678
#define REGISTER_QSYMM16_SVE2(func_name)
Definition: Registrars.h:141
bool empty() const
Checks if pack is empty.
Definition: ITensorPack.cpp:80
static const std::vector< ActivationKernel > & get_available_kernels()
#define REGISTER_FP32_NEON(func_name)
Definition: Registrars.h:74
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
Definition: Error.h:204
virtual DataType data_type() const =0
Data type used for each element of the tensor.
1 channel, 1 F32 per channel
void neon_qasymm8_signed_activation(const ITensor *src, ITensor *dst, const ActivationLayerInfo &act_info, const Window &window)
#define REGISTER_FP32_SVE(func_name)
Definition: Registrars.h:75
void neon_fp16_activation(const ITensor *src, ITensor *dst, const ActivationLayerInfo &act_info, const Window &window)
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define REGISTER_QASYMM8_SIGNED_NEON(func_name)
Definition: Registrars.h:96
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_ERROR_THROW_ON(status)
Definition: Error.h:455
quantized, asymmetric fixed-point 16-bit number
Status class.
Definition: Error.h:52
void sve2_qsymm16_activation(const ITensor *src, ITensor *dst, const ActivationLayerInfo &act_info, const Window &window)
Definition: qsymm16.cpp:40
void sve2_qasymm8_signed_activation(const ITensor *src, ITensor *dst, const ActivationLayerInfo &act_info, const Window &window)
bool is_data_type_quantized_symmetric(DataType dt)
Check if a given data type is of symmetric quantized type.
Definition: Utils.h:1088
Status validate_arguments(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *dst, const PadStrideInfo &conv_info)
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
Activation Layer Information class.
Definition: Types.h:1625
Interface for CPU tensor.
Definition: ITensor.h:36
SimpleTensor< float > src
Definition: DFT.cpp:155
const char * name() const override
Name of the kernel.
Copyright (c) 2017-2022 Arm Limited.
ActivationFunction
Available activation functions.
Definition: Types.h:1629
1 channel, 1 F16 per channel
const ITensor * get_const_tensor(int id) const
Get constant tensor of a given id.
Definition: ITensorPack.cpp:54
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
void neon_qasymm8_activation_lut(const ITensor *src, ITensor *dst, const ActivationLayerInfo &act_info, const Window &window)
Definition: qasymm8.cpp:420
#define REGISTER_QASYMM8_NEON(func_name)
Definition: Registrars.h:117
quantized, asymmetric fixed-point 8-bit number unsigned
void sve_fp32_activation(const ITensor *src, ITensor *dst, const ActivationLayerInfo &act_info, const Window &window)
Definition: fp32.cpp:39
#define REGISTER_QSYMM16_NEON(func_name)
Definition: Registrars.h:139
UniformQuantizationInfo uniform() const
Return per layer quantization info.
bool auto_init_if_empty(ITensorInfo &info, const TensorShape &shape, int num_channels, DataType data_type, QuantizationInfo quantization_info=QuantizationInfo())
Auto initialize the tensor info (shape, number of channels and data type) if the current assignment i...
virtual std::unique_ptr< T > clone() const =0
Provide a clone of the current object of class T.
#define REGISTER_QASYMM8_SIGNED_SVE2(func_name)
Definition: Registrars.h:98
void end(TokenStream &in, bool &valid)
Definition: MLGOParser.cpp:290
virtual QuantizationInfo quantization_info() const =0
Get the quantization settings (scale and offset) of the tensor.
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
Definition: Validate.h:915
void neon_fp32_activation(const ITensor *src, ITensor *dst, const ActivationLayerInfo &act_info, const Window &window)
Definition: fp32.cpp:34
void sve2_qasymm8_activation(const ITensor *src, ITensor *dst, const ActivationLayerInfo &act_info, const Window &window)
Definition: qasymm8.cpp:39
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
Definition: Utils.h:1052
void neon_qasymm8_activation(const ITensor *src, ITensor *dst, const ActivationLayerInfo &act_info, const Window &window)
Definition: qasymm8.cpp:445
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
std::pair< Status, Window > validate_and_configure_window(ITensorInfo *src, ITensorInfo *dst)
ITensor * get_tensor(int id)
Get tensor of a given id from the pac.
Definition: ITensorPack.cpp:64
Information about executing thread and CPU.
Definition: CPPTypes.h:179
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(...)
Definition: Validate.h:439
#define REGISTER_FP16_SVE(func_name)
Definition: Registrars.h:49
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...)
Definition: Validate.h:541
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Definition: Validate.h:788
#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)
If the condition is true, an error is returned.
Definition: Error.h:244
Tensor packing service.
Definition: ITensorPack.h:39
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Definition: Validate.h:157
void sve_fp16_activation(const ITensor *src, ITensor *dst, const ActivationLayerInfo &act_info, const Window &window)
ActivationFunction activation() const
Get the type of activation function.
Definition: Types.h:1663
quantized, asymmetric fixed-point 8-bit number signed
static CPUInfo & get()
Access the KernelLibrary singleton.
Definition: CPPTypes.cpp:40
static Status validate(const ITensorInfo *src, const ITensorInfo *dst, const ActivationLayerInfo &act_info)
Static function to check if given info will lead to a valid configuration.
size_t get_mws(const CPUInfo &platform, size_t thread_count) const override
Return minimum workload size of the relevant kernel.
#define REGISTER_QASYMM8_SVE2(func_name)
Definition: Registrars.h:119
DataType
Available data types.
Definition: Types.h:79
void neon_qsymm16_activation(const ITensor *src, ITensor *dst, const ActivationLayerInfo &act_info, const Window &window)
Definition: qsymm16.cpp:40
Describe a multidimensional execution window.
Definition: Window.h:39
static bool is_lut_supported(ActivationFunction act_func, DataType data_type)
Definition: Types.h:1702
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)
Definition: Validate.h:201
static constexpr size_t default_mws
Definition: ICPPKernel.h:41
cpuinfo::CpuIsaInfo get_isa() const
Gets the current cpu&#39;s ISA information.
Definition: CPPTypes.cpp:124