Compute Library
 22.05
CLDeconvolutionLayer.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_CLDECONVOLUTIONLAYER_H
25 #define ARM_COMPUTE_CLDECONVOLUTIONLAYER_H
26 
31 
32 #include <memory>
33 
34 namespace arm_compute
35 {
36 /** Basic function to compute the deconvolution layer. This function calls the following OpenCL kernels/functions:
37  *
38  * -# @ref CLGEMMDeconvolutionLayer
39  * -# @ref CLDirectDeconvolutionLayer
40  */
42 {
43 public:
44  /** Default constructor */
45  CLDeconvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
46 
47  /** Set the input, weights, biases and output tensors.
48  *
49  * Valid data layouts:
50  * - NHWC
51  * - NCHW
52  *
53  * Valid data type configurations:
54  * |src0 |src1 |src2 |dst |
55  * |:--------------|:------------------|:------|:--------------|
56  * |F16 |F16 |F16 |F16 |
57  * |F32 |F32 |F32 |F32 |
58  * |QASYMM8 |QASYMM8 |S32 |QASYMM8 |
59  * |QASYMM8 |QSYMM8_PER_CHANNEL |S32 |QASYMM8 |
60  * |QASYMM8_SIGNED |QASYMM8_SIGNED |S32 |QASYMM8_SIGNED |
61  * |QASYMM8_SIGNED |QSYMM8_PER_CHANNEL |S32 |QASYMM8_SIGNED |
62  *
63  * @param[in,out] input Input tensor. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. Data types supported: QASYMM8_SIGNED/QASYMM8/F16/F32.
64  * @param[in] weights The 4d weights with dimensions [width, height, IFM, OFM]. Data type supported: Same as @p input or QSYMM8_PER_CHANNEL if @p input is QASYMM8/QASYMM8_SIGNED.
65  * @param[in] bias (Optional) The biases have one dimension. Data type supported: Should match @p input data type, except for input of QASYMM8 and QASYMM8_SIGNED type where biases should be of S32 type
66  * @param[out] output Output tensor. The output has the same number of dimensions as the @p input.
67  * @param[in] deconv_info Contains padding and policies to be used in the deconvolution, this is described in @ref PadStrideInfo.
68  * @param[in] weights_info (Optional) Weights information needed for @ref CLConvolutionLayer, specifies if the weights tensor has been reshaped with @ref opencl::kernels::ClWeightsReshapeKernel.
69  *
70  */
71  void configure(ICLTensor *input, ICLTensor *weights, const ICLTensor *bias, ICLTensor *output, const PadStrideInfo &deconv_info, const WeightsInfo &weights_info = WeightsInfo());
72  /** Set the input, weights, biases and output tensors.
73  *
74  * @param[in] compile_context The compile context to be used.
75  * @param[in,out] input Input tensor. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. Data types supported: QASYMM8_SIGNED/QASYMM8/F16/F32.
76  * @param[in] weights The 4d weights with dimensions [width, height, IFM, OFM]. Data type supported: Same as @p input or QSYMM8_PER_CHANNEL if @p input is QASYMM8/QASYMM8_SIGNED.
77  * @param[in] bias (Optional) The biases have one dimension. Data type supported: Should match @p input data type, except for input of QASYMM8 and QASYMM8_SIGNED type where biases should be of S32 type
78  * @param[out] output Output tensor. The output has the same number of dimensions as the @p input.
79  * @param[in] deconv_info Contains padding and policies to be used in the deconvolution, this is described in @ref PadStrideInfo.
80  * @param[in] weights_info (Optional) Weights information needed for @ref CLConvolutionLayer, specifies if the weights tensor has been reshaped with @ref opencl::kernels::ClWeightsReshapeKernel.
81  *
82  */
83  void configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *weights, const ICLTensor *bias, ICLTensor *output, const PadStrideInfo &deconv_info,
85  /** Static function to check if given info will lead to a valid configuration of @ref CLDeconvolutionLayer
86  *
87  * @param[in] input Input tensor info. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. Data types supported: QASYMM8_SIGNED/QASYMM8/F16/F32.
88  * @param[in] weights The 4d weights info with dimensions [width, height, IFM, OFM]. Data type supported: Same as @p input or QSYMM8_PER_CHANNEL if @p input is QASYMM8/QASYMM8_SIGNED.
89  * @param[in] bias (Optional) The biases have one dimension. Data type supported: Should match @p input data type, except for input of QASYMM8 and QASYMM8_SIGNED type where biases should be of S32 type
90  * @param[in] output Output tensor info. The output has the same number of dimensions as the @p input.
91  * @param[in] deconv_info Contains padding and policies to be used in the deconvolution, this is described in @ref PadStrideInfo.
92  * @param[in] weights_info (Optional) Weights information needed for @ref CLConvolutionLayer, specifies if the weights tensor has been reshaped with @ref opencl::kernels::ClWeightsReshapeKernel.
93  *
94  * @return a status
95  */
96  static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *bias, ITensorInfo *output, const PadStrideInfo &deconv_info,
98 
99  static DeconvolutionMethod get_deconvolution_method(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *bias, ITensorInfo *output, const PadStrideInfo &deconv_info,
100  const WeightsInfo &weights_info);
101  // Inherited methods overridden:
102  void run() override;
103  void prepare() override;
104 
105 private:
106  std::shared_ptr<IMemoryManager> _memory_manager;
107  std::unique_ptr<IFunction> _function;
108 };
109 } // namespace arm_compute
110 #endif /* ARM_COMPUTE_CLDECONVOLUTIONLAYER_H */
DeconvolutionMethod
Available DeconvolutionMethod.
Definition: Types.h:151
Base class for all functions.
Definition: IFunction.h:30
Basic function to compute the deconvolution layer.
static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *bias, ITensorInfo *output, const PadStrideInfo &deconv_info, const WeightsInfo &weights_info=WeightsInfo())
Static function to check if given info will lead to a valid configuration of CLDeconvolutionLayer.
void prepare() override
Prepare the function for executing.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
Status class.
Definition: Error.h:52
void run() override
Run the kernels contained in the function.
Copyright (c) 2017-2022 Arm Limited.
Convolution Layer Weights Information class.
Definition: Types.h:1844
Padding and stride information class.
Definition: Types.h:669
CLCompileContext class.
CLDeconvolutionLayer(std::shared_ptr< IMemoryManager > memory_manager=nullptr)
Default constructor.
Interface for OpenCL tensor.
Definition: ICLTensor.h:42
void configure(ICLTensor *input, ICLTensor *weights, const ICLTensor *bias, ICLTensor *output, const PadStrideInfo &deconv_info, const WeightsInfo &weights_info=WeightsInfo())
Set the input, weights, biases and output tensors.
static DeconvolutionMethod get_deconvolution_method(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *bias, ITensorInfo *output, const PadStrideInfo &deconv_info, const WeightsInfo &weights_info)
const int32_t * bias