Compute Library
 21.02
ConvolutionLayerNode.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018-2019 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_GRAPH_CONVOLUTION_LAYER_NODE_H
25 #define ARM_COMPUTE_GRAPH_CONVOLUTION_LAYER_NODE_H
26 
28 
29 namespace arm_compute
30 {
31 namespace graph
32 {
33 /** Convolution Layer node */
34 class ConvolutionLayerNode final : public INode
35 {
36 public:
37  /** Constructor
38  *
39  * @param[in] info Convolution layer attributes
40  * @param[in] num_groups (Optional) Number of groups (Defaults to 1)
41  * @param[in] method (Optional) Convolution method to use
42  * @param[in] fast_math_hint (Optional) Fast math hint
43  * @param[in] out_quant_info (Optional) Output quantization info
44  */
46  unsigned int num_groups = 1,
49  QuantizationInfo out_quant_info = QuantizationInfo());
50  /** Sets the convolution layer method to use
51  *
52  * @param[in] method Method to use for convolution
53  */
55  /** Convolution layer method accessor
56  *
57  * @note This is an indication on which convolution layer implementation to use,
58  * if it fails to be created the library's heuristic approach will be used
59  *
60  * @return Convolution layer method to be used by the node
61  */
63  /** Sets the fast math fast hint
64  *
65  * @param[in] hint Hint to use for convolution
66  */
68  /** Fast math hint accessor
69  *
70  * @return Fast math hint to be used by the node
71  */
73  /** Convolution metadata accessor
74  *
75  * @return Convolution information
76  */
78  /** Number of groups in convolution accessor
79  *
80  * @return Number of groups in convolution
81  */
82  unsigned int num_groups() const;
83  /** Returns fused activation
84  *
85  * @return Fused activation
86  */
88  /** Sets fused activation
89  *
90  * @param[in] fused_activation Fused activation to set
91  */
92  void set_fused_activation(ActivationLayerInfo fused_activation);
93  /** Computes convolution output descriptor
94  *
95  * @param[in] input_descriptor Input descriptor
96  * @param[in] weights_descriptor Weights descriptor
97  * @param[in] info Convolution operation attributes
98  *
99  * @return Output descriptor
100  */
101  static TensorDescriptor compute_output_descriptor(const TensorDescriptor &input_descriptor,
102  const TensorDescriptor &weights_descriptor,
103  const PadStrideInfo &info);
104 
105  // Inherited overridden methods:
106  NodeType type() const override;
107  bool forward_descriptors() override;
108  TensorDescriptor configure_output(size_t idx) const override;
109  void accept(INodeVisitor &v) override;
110 
111 public:
113 
114 private:
115  PadStrideInfo _info;
116  unsigned int _num_groups;
117  ConvolutionMethod _method;
118  FastMathHint _fast_math_hint;
119  QuantizationInfo _out_quant_info;
120  ActivationLayerInfo _fused_activation;
121 };
122 } // namespace graph
123 } // namespace arm_compute
124 #endif /* ARM_COMPUTE_GRAPH_CONVOLUTION_LAYER_NODE_H */
bool forward_descriptors() override
Forwards descriptor information to outputs if possible.
FastMathHint fast_math_hint() const
Fast math hint accessor.
ActivationLayerInfo fused_activation() const
Returns fused activation.
NodeType type() const override
Returns node's type.
Activation Layer Information class.
Definition: Types.h:1550
unsigned int num_groups() const
Number of groups in convolution accessor.
Copyright (c) 2017-2021 Arm Limited.
Fast math disabled for Convolution layer.
void set_convolution_method(ConvolutionMethod method)
Sets the convolution layer method to use.
Quantization information.
Node interface.
Definition: INode.h:45
void set_fast_math_hint(FastMathHint hint)
Sets the fast math fast hint.
static TensorDescriptor compute_output_descriptor(const TensorDescriptor &input_descriptor, const TensorDescriptor &weights_descriptor, const PadStrideInfo &info)
Computes convolution output descriptor.
NodeType
Supported nodes.
Definition: Types.h:142
PadStrideInfo convolution_info() const
Convolution metadata accessor.
Padding and stride information class.
Definition: Types.h:722
FastMathHint
Enable or disable fast math for Convolution layer.
Definition: Types.h:135
ConvolutionMethod convolution_method() const
Convolution layer method accessor.
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
ConvolutionLayerNode(PadStrideInfo info, unsigned int num_groups=1, ConvolutionMethod method=ConvolutionMethod::Default, FastMathHint fast_math_hint=FastMathHint::Disabled, QuantizationInfo out_quant_info=QuantizationInfo())
Constructor.
ConvolutionMethod
Supported Convolution layer methods.
Definition: Types.h:118
void set_fused_activation(ActivationLayerInfo fused_activation)
Sets fused activation.
Default approach using internal heuristics.
Node visitor interface.
Definition: INodeVisitor.h:34
void accept(INodeVisitor &v) override
Accepts a node visitor.
TensorDescriptor configure_output(size_t idx) const override
Calculates output configuration.