Compute Library
 21.11
CLDirectConvolutionLayer.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_CLDIRECTCONVOLUTIONLAYER_H
25 #define ARM_COMPUTE_CLDIRECTCONVOLUTIONLAYER_H
26 
27 #include "arm_compute/core/Types.h"
30 
31 #include <memory>
32 
33 namespace arm_compute
34 {
35 class CLCompileContext;
36 class ICLTensor;
37 class ITensorInfo;
38 
39 /** Basic function to execute direct convolution function:
40  */
42 {
43 public:
44  /** Constructor */
46  /** Destructor */
48  /** Prevent instances of this class from being copied (As this class contains pointers) */
50  /** Default move constructor */
52  /** Prevent instances of this class from being copied (As this class contains pointers) */
54  /** Default move assignment operator */
56  /** Set the input and output tensors.
57  *
58  * Valid data layouts:
59  * - NHWC
60  * - NCHW
61  *
62  * Valid data type configurations:
63  * |src0 |src1 |src2 |dst |
64  * |:--------------|:--------------|:------|:--------------|
65  * |F16 |F16 |F16 |F16 |
66  * |F32 |F32 |F32 |F32 |
67  * |QASYMM8 |QASYMM8 |S32 |QASYMM8 |
68  * |QASYMM8_SIGNED |QASYMM8_SIGNED |S32 |QASYMM8_SIGNED |
69  *
70  * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
71  * while every optional dimension from 4 and above represent a batch of inputs.
72  * Data types supported: QASYMM8_SIGNED/QASYMM8/F16/F32.
73  * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input.
74  * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
75  * Data type supported: Should match @p input data type, except for input of QASYMM8 and QASYMM8_SIGNED type where biases should be of S32 type.
76  * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
77  * Data types supported: Same as @p input.
78  * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
79  * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
80  */
81  void configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info = ActivationLayerInfo());
82  /** Set the input and output tensors.
83  *
84  * @param[in] compile_context The compile context to be used.
85  * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
86  * while every optional dimension from 4 and above represent a batch of inputs.
87  * Data types supported: QASYMM8_SIGNED/QASYMM8/F16/F32.
88  * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input.
89  * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
90  * Data type supported: Should match @p input data type, except for input of QASYMM8 and QASYMM8_SIGNED type where biases should be of S32 type.
91  * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
92  * Data types supported: Same as @p input.
93  * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
94  * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
95  */
96  void configure(const CLCompileContext &compile_context, ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
97  const ActivationLayerInfo &act_info = ActivationLayerInfo());
98  /** Static function to check if given info will lead to a valid configuration of @ref CLDirectConvolutionLayer
99  *
100  * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
101  * while every optional dimension from 4 and above represent a batch of inputs.
102  * Data types supported: QASYMM8_SIGNED/QASYMM8/F16/F32.
103  * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input.
104  * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
105  * Data type supported: Should match @p input data type, except for input of QASYMM8 and QASYMM8_SIGNED type where biases should be of S32 type.
106  * @param[in] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
107  * Data types supported: Same as @p input.
108  * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
109  * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
110  *
111  * @return a status
112  */
113  static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
114  const ActivationLayerInfo &act_info = ActivationLayerInfo());
115 
116  // Inherited methods overridden:
117  void run() override;
118 
119 private:
120  struct Impl;
121  std::unique_ptr<Impl> _impl;
122 };
123 }
124 #endif /* ARM_COMPUTE_CLDIRECTCONVOLUTIONLAYER_H */
Base class for all functions.
Definition: IFunction.h:30
CLDirectConvolutionLayer & operator=(const CLDirectConvolutionLayer &)=delete
Prevent instances of this class from being copied (As this class contains pointers) ...
void run() override
Run the kernels contained in the function.
void configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info=ActivationLayerInfo())
Set the input and output tensors.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info=ActivationLayerInfo())
Static function to check if given info will lead to a valid configuration of CLDirectConvolutionLayer...
Status class.
Definition: Error.h:52
Activation Layer Information class.
Definition: Types.h:1509
Copyright (c) 2017-2021 Arm Limited.
Padding and stride information class.
Definition: Types.h:656
CLCompileContext class.
Interface for OpenCL tensor.
Definition: ICLTensor.h:42
Basic function to execute direct convolution function: