Compute Library
 21.02
NEInstanceNormalizationLayerKernel.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2019-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_NEINSTANCENORMALIZATIONLAYERKERNEL_H
25 #define ARM_COMPUTE_NEINSTANCENORMALIZATIONLAYERKERNEL_H
26 
28 
29 namespace arm_compute
30 {
31 class ITensor;
32 struct InstanceNormalizationLayerKernelInfo;
33 
34 /** Interface for performing an instance normalization */
36 {
37 public:
38  const char *name() const override
39  {
40  return "NEInstanceNormalizationLayerKernel";
41  }
42  /** Default constructor */
44  /** Prevent instances of this class from being copied (As this class contains pointers) */
46  /** Prevent instances of this class from being copied (As this class contains pointers) */
48  /** Allow instances of this class to be moved */
50  /** Allow instances of this class to be moved */
52  /** Default destructor */
54  /** Set the input and output tensors.
55  *
56  * @param[in, out] input Source tensor. Data types supported: F16/F32. Data layout supported: NCHW
57  * In case of @p output tensor = nullptr this tensor will store the result of the normalization.
58  * @param[out] output Destination tensor. Data types and data layouts supported: same as @p input.
59  * @param[in] info Kernel meta-data descriptor
60  */
62 
63  /** Static function to check if given info will lead to a valid configuration of @ref NEInstanceNormalizationLayer.
64  *
65  * @param[in] input Source tensor info. Data types supported: F16/F32. Data layout supported: NCHW
66  * @param[in] output Destination tensor info. Data types and data layouts supported: same as @p input.
67  * @param[in] info Kernel meta-data descriptor
68  *
69  * @return a status
70  */
71  static Status validate(const ITensorInfo *input, const ITensorInfo *output, const InstanceNormalizationLayerKernelInfo &info);
72 
73  // Inherited methods overridden:
74  void run(const Window &window, const ThreadInfo &info) override;
75 
76 private:
77  /** Common signature for all the specialized instance normalization functions
78  *
79  * @param[in, out] input An input tensor. In case of @p output tensor = nullptr this tensor will store the result of the normalization.
80  * @param[out] output The output tensor.
81  * @param[in] gamma The scale scalar value applied to the normalized tensor. Defaults to 1.0
82  * @param[in] beta The offset scalar value applied to the normalized tensor. Defaults to 0.0
83  * @param[in] epsilon Lower bound value for the normalization. Defaults to 1e-12
84  */
85  using NormalizationFunction = void(ITensor *input, ITensor *output, float gamma, float beta, float epsilon, const Window &window);
86 
87  NormalizationFunction *_func;
88  ITensor *_input;
89  ITensor *_output;
90  float _gamma;
91  float _beta;
92  float _epsilon;
93  bool _use_mixed_precision{ true };
94 };
95 } // namespace arm_compute
96 #endif /*ARM_COMPUTE_NEINSTANCENORMALIZATIONLAYERKERNEL_H */
const Window & window() const
The maximum window the kernel can be executed on.
Definition: IKernel.cpp:28
NEInstanceNormalizationLayerKernel & operator=(const NEInstanceNormalizationLayerKernel &)=delete
Prevent instances of this class from being copied (As this class contains pointers) ...
Common interface for all kernels implemented in C++.
Definition: ICPPKernel.h:38
static Status validate(const ITensorInfo *input, const ITensorInfo *output, const InstanceNormalizationLayerKernelInfo &info)
Static function to check if given info will lead to a valid configuration of NEInstanceNormalizationL...
Store the tensor's metadata.
Definition: ITensorInfo.h:40
void configure(ITensor *input, ITensor *output, const InstanceNormalizationLayerKernelInfo &info)
Set the input and output tensors.
Status class.
Definition: Error.h:52
Interface for performing an instance normalization.
Interface for Neon tensor.
Definition: ITensor.h:36
Copyright (c) 2017-2021 Arm Limited.
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
Information about executing thread and CPU.
Definition: CPPTypes.h:235
void run(const Window &window, const ThreadInfo &info) override
Execute the kernel on the passed window.
~NEInstanceNormalizationLayerKernel()=default
Default destructor.
const char * name() const override
Name of the kernel.
Describe a multidimensional execution window.
Definition: Window.h:39