Compute Library
 22.08
NEPixelWiseMultiplication.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_NEPIXELWISEMULTIPLICATION_H
25 #define ARM_COMPUTE_NEPIXELWISEMULTIPLICATION_H
26 
27 #include "arm_compute/core/Types.h"
29 
30 #include <memory>
31 
32 namespace arm_compute
33 {
34 class ITensor;
35 class ITensorInfo;
36 
37 /** Basic function to run @ref cpu::CpuMul */
39 {
40 public:
41  /** Default Constructor */
43  /** Default Destructor */
45  /** Prevent instances of this class from being copied (As this class contains pointers) */
47  /** Default move constructor */
49  /** Prevent instances of this class from being copied (As this class contains pointers) */
51  /** Default move assignment operator */
53  /** Initialise the kernel's inputs, output and convertion policy.
54  *
55  * Valid data layouts:
56  * - All
57  *
58  * Valid data type configurations:
59  * |src0 |src1 |dst |
60  * |:--------------|:--------------|:--------------|
61  * |QASYMM8 |QASYMM8 |QASYMM8 |
62  * |QASYMM8_SIGNED |QASYMM8_SIGNED |QASYMM8_SIGNED |
63  * |QSYMM16 |QSYMM16 |QASYMM16 |
64  * |QSYMM16 |QSYMM16 |S32 |
65  * |U8 |U8 |U8 |
66  * |U8 |U8 |S16 |
67  * |U8 |S16 |S16 |
68  * |S16 |U8 |S16 |
69  * |S16 |S16 |S16 |
70  * |F16 |F16 |F16 |
71  * |F32 |S32 |F32 |
72  *
73  * @note For @p scale equal to 1/255 only round to nearest even (implemented as round half up) is supported.
74  * For all other scale values only round to zero (implemented as round towards minus infinity) is supported.
75  *
76  * @param[in, out] input1 An input tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/S32/QSYMM16/F16/F32
77  * This input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
78  * @param[in, out] input2 An input tensor. Data types supported: U8, QASYMM8 (only if @p input1 is QASYMM8), QASYMM8_SIGNED (only if @p input1 is QASYMM8_SIGNED), S16, S32, QSYMM16 (only if @p input1 is QSYMM16), F16 (only if @p input1 is F16), F32 (only if @p input1 is F32).
79  * This input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
80  * @param[out] output Output tensor. Data types supported:
81  * - U8, only if both inputs are U8.
82  * - QASYMM8, only if both inputs are QASYMM8.
83  * - QASYMM8_SIGNED, only if @p input1 is QASYMM8_SIGNED.
84  * - S16.
85  * - QSYMM16, only if both inputs are QSYMM16.
86  * - S32, only if both inputs are S32 or both are QSYMM16.
87  * - F16, only if @p input1 is F16.
88  * - F32, only if both inputs are F32.
89  * @param[in] scale Scale to apply after multiplication.
90  * Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15.
91  * If both @p input1, @p input2 and @p output are of datatype S32, scale cannot be 1/255
92  * @param[in] overflow_policy Overflow policy. ConvertPolicy cannot be WRAP if any of the inputs is of quantized datatype
93  * @param[in] rounding_policy Rounding policy.
94  * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
95  */
96  void configure(const ITensor *input1, const ITensor *input2, ITensor *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy,
97  const ActivationLayerInfo &act_info = ActivationLayerInfo());
98  /** Static function to check if given info will lead to a valid configuration of @ref NEPixelWiseMultiplication
99  *
100  * @note For @p scale equal to 1/255 only round to nearest even (implemented as round half up) is supported.
101  * For all other scale values only round to zero (implemented as round towards minus infinity) is supported.
102  *
103  * @param[in] input1 An input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/S32/QSYMM16/F16/F32
104  * @param[in] input2 An input tensor info. Data types supported: U8, QASYMM8 (only if @p input1 is QASYMM8), QASYMM8_SIGNED (only if @p input1 is QASYMM8_SIGNED), S16, S32, QSYMM16 (only if both inputs are QSYMM16), F16 (only if @p input1 is F16), F32 (only if @p input1 is F32).
105  * @param[in] output Output tensor info. Data types supported:
106  * - U8, only if both inputs are U8.
107  * - QASYMM8, only if both inputs are QASYMM8.
108  * - QASYMM8_SIGNED, only if @p input1 is QASYMM8_SIGNED.
109  * - S16.
110  * - QSYMM16, only if both inputs are QSYMM16.
111  * - S32, only if both inputs are S32 or both are QSYMM16.
112  * - F16, only if @p input1 is F16.
113  * - F32, only if both inputs are F32.
114  * @param[in] scale Scale to apply after multiplication.
115  * Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15.
116  * If both @p input1, @p input2 and @p output are of datatype S32, scale cannot be 1/255
117  * @param[in] overflow_policy Overflow policy. ConvertPolicy cannot be WRAP if any of the inputs is of quantized datatype
118  * @param[in] rounding_policy Rounding policy.
119  * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
120  *
121  * @return a status
122  */
123  static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy,
124  const ActivationLayerInfo &act_info = ActivationLayerInfo());
125 
126  // Inherited methods overridden:
127  void run() override;
128 
129 private:
130  struct Impl;
131  std::unique_ptr<Impl> _impl;
132 };
133 
134 /** Basic function to run @ref cpu::CpuComplexMul. */
136 {
137 public:
138  /** Default Constructor */
140  /** Default Destructor */
142  /** Prevent instances of this class from being copied (As this class contains pointers) */
144  /** Default move constructor */
146  /** Prevent instances of this class from being copied (As this class contains pointers) */
148  /** Default move assignment operator */
150  /** Initialise the kernel's inputs, output.
151  *
152  * @param[in, out] input1 An input tensor. Data types supported: F32. Number of channels supported: 2 (complex tensor).
153  * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
154  * @param[in, out] input2 An input tensor. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
155  * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
156  * @param[out] output The output tensor. Data types supported: same as @p input1. Number of channels: same as @p input1.
157  * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
158  */
159  void configure(ITensor *input1, ITensor *input2, ITensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
160  /** Static function to check if given info will lead to a valid configuration of @ref NEComplexPixelWiseMultiplication
161  *
162  * @param[in] input1 An input tensor info. Data types supported: F32. Number of channels supported: 2 (complex tensor).
163  * @param[in] input2 An input tensor info. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
164  * @param[in] output The output tensor info. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
165  * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
166  */
167  static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
168 
169  // Inherited methods overridden:
170  void run() override;
171 
172 private:
173  struct Impl;
174  std::unique_ptr<Impl> _impl;
175 };
176 } // namespace arm_compute
177 #endif /*ARM_COMPUTE_NEPIXELWISEMULTIPLICATION_H */
NEPixelWiseMultiplication & operator=(const NEPixelWiseMultiplication &)=delete
Prevent instances of this class from being copied (As this class contains pointers) ...
void run() override
Run the kernels contained in the function.
Base class for all functions.
Definition: IFunction.h:30
Basic function to run cpu::CpuComplexMul.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
Status class.
Definition: Error.h:52
Activation Layer Information class.
Definition: Types.h:1625
Interface for CPU tensor.
Definition: ITensor.h:36
Copyright (c) 2017-2022 Arm Limited.
void configure(const ITensor *input1, const ITensor *input2, ITensor *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info=ActivationLayerInfo())
Initialise the kernel&#39;s inputs, output and convertion policy.
RoundingPolicy
Rounding method.
Definition: Rounding.h:30
~NEPixelWiseMultiplication()
Default Destructor.
static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info=ActivationLayerInfo())
Static function to check if given info will lead to a valid configuration of NEPixelWiseMultiplicatio...
Basic function to run cpu::CpuMul.
ConvertPolicy
Policy to handle integer overflow.
Definition: Types.h:404