Compute Library
 20.02.1
ConvolutionLayerNode.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018-2019 ARM Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
26 #include "arm_compute/core/Utils.h"
30 
31 namespace arm_compute
32 {
33 namespace graph
34 {
36  unsigned int num_groups,
37  ConvolutionMethod method,
38  FastMathHint fast_math_hint,
39  QuantizationInfo out_quant_info)
40  : _info(std::move(info)), _num_groups(num_groups), _method(method), _fast_math_hint(fast_math_hint), _out_quant_info(std::move(out_quant_info)), _fused_activation()
41 {
42  _input_edges.resize(3, EmptyEdgeID);
43  _outputs.resize(1, NullTensorID);
44 }
45 
47 {
48  _method = method;
49 }
50 
52 {
53  return _method;
54 }
55 
57 {
58  _fast_math_hint = hint;
59 }
60 
62 {
63  return _fast_math_hint;
64 }
65 
67 {
68  return _info;
69 }
70 
72 {
73  return _num_groups;
74 }
75 
77 {
78  return _fused_activation;
79 }
80 
82 {
83  _fused_activation = fused_activation;
84 }
85 
87  const TensorDescriptor &weights_descriptor,
88  const PadStrideInfo &info)
89 {
90  unsigned int output_width = 0;
91  unsigned int output_height = 0;
92 
93  const unsigned int input_width = get_dimension_size(input_descriptor, DataLayoutDimension::WIDTH);
94  const unsigned int input_height = get_dimension_size(input_descriptor, DataLayoutDimension::HEIGHT);
95  const unsigned int kernel_width = get_dimension_size(weights_descriptor, DataLayoutDimension::WIDTH);
96  const unsigned int kernel_height = get_dimension_size(weights_descriptor, DataLayoutDimension::HEIGHT);
97 
98  std::tie(output_width, output_height) = scaled_dimensions(input_width, input_height, kernel_width, kernel_height, info);
99 
100  const DataLayout data_layout = input_descriptor.layout;
101  TensorDescriptor output_descriptor = input_descriptor;
102  output_descriptor.shape.set(get_dimension_idx(data_layout, DataLayoutDimension::WIDTH), output_width);
103  output_descriptor.shape.set(get_dimension_idx(data_layout, DataLayoutDimension::HEIGHT), output_height);
104  output_descriptor.shape.set(get_dimension_idx(data_layout, DataLayoutDimension::CHANNEL), weights_descriptor.shape[3]);
105 
106  return output_descriptor;
107 }
108 
110 {
111  if((input_id(0) != NullTensorID) && (input_id(1) != NullTensorID) && (output_id(0) != NullTensorID))
112  {
113  Tensor *dst = output(0);
114  ARM_COMPUTE_ERROR_ON(dst == nullptr);
115  dst->desc() = configure_output(0);
116  return true;
117  }
118  return false;
119 }
120 
122 {
123  ARM_COMPUTE_UNUSED(idx);
124  const Tensor *src = input(0);
125  const Tensor *weights = input(1);
126 
127  ARM_COMPUTE_ERROR_ON(src == nullptr || weights == nullptr);
128 
130  if(!_out_quant_info.empty())
131  {
132  output_info.quant_info = _out_quant_info;
133  }
134 
135  return output_info;
136 }
137 
139 {
141 }
142 
144 {
145  v.visit(*this);
146 }
147 } // namespace graph
148 } // namespace arm_compute
const DataLayout data_layout
Definition: Im2Col.cpp:146
bool forward_descriptors() override
Forwards descriptor information to outputs if possible.
size_t get_dimension_size(const TensorDescriptor &descriptor, const DataLayoutDimension data_layout_dimension)
Get size of a tensor's given dimension depending on its layout.
Definition: Utils.cpp:123
FastMathHint fast_math_hint() const
Fast math hint accessor.
ActivationLayerInfo fused_activation() const
Returns fused activation.
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
NodeType type() const override
Returns node's type.
Activation Layer Information class.
Definition: Types.h:1615
unsigned int num_groups() const
Number of groups in convolution accessor.
Copyright (c) 2017-2020 ARM Limited.
std::pair< unsigned int, unsigned int > scaled_dimensions(int width, int height, int kernel_width, int kernel_height, const PadStrideInfo &pad_stride_info, const Size2D &dilation=Size2D(1U, 1U))
Returns expected width and height of output scaled tensor depending on dimensions rounding mode.
Definition: Utils.cpp:402
void set_convolution_method(ConvolutionMethod method)
Sets the convolution layer method to use.
Quantization information.
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
virtual void visit(INode &n)=0
Visit INode.
void set_fast_math_hint(FastMathHint hint)
Sets the fast math fast hint.
Tensor * output(size_t idx) const
Returns the tensor of a given output of the node.
Definition: INode.cpp:158
const unsigned int num_groups
Definition: Im2Col.cpp:148
static TensorDescriptor compute_output_descriptor(const TensorDescriptor &input_descriptor, const TensorDescriptor &weights_descriptor, const PadStrideInfo &info)
Computes convolution output descriptor.
TensorID input_id(size_t idx) const
Returns the tensor ID of a given input of the node.
Definition: INode.cpp:137
NodeType
Supported nodes.
Definition: Types.h:132
PadStrideInfo convolution_info() const
Convolution metadata accessor.
Padding and stride information class.
Definition: Types.h:686
constexpr EdgeID EmptyEdgeID
Definition: Types.h:73
FastMathHint
Enable or disable fast math for Convolution layer.
Definition: Types.h:125
ConvolutionMethod convolution_method() const
Convolution layer method accessor.
bool empty() const
Indicates whether this QuantizationInfo has valid settings or not.
ConvolutionLayerNode(PadStrideInfo info, unsigned int num_groups=1, ConvolutionMethod method=ConvolutionMethod::Default, FastMathHint fast_math_hint=FastMathHint::Disabled, QuantizationInfo out_quant_info=QuantizationInfo())
Constructor.
ConvolutionMethod
Supported Convolution layer methods.
Definition: Types.h:108
TensorID output_id(size_t idx) const
Returns the tensor ID of a given output of the node.
Definition: INode.cpp:144
TensorShape & set(size_t dimension, size_t value, bool apply_dim_correction=true)
Accessor to set the value of one of the dimensions.
Definition: TensorShape.h:78
void set_fused_activation(ActivationLayerInfo fused_activation)
Sets fused activation.
constexpr TensorID NullTensorID
Constant NodeID specifying an equivalent of null node.
Definition: Types.h:69
Node visitor interface.
Definition: INodeVisitor.h:34
Tensor * input(size_t idx) const
Returns the tensor of a given input of the node.
Definition: INode.cpp:150
void accept(INodeVisitor &v) override
Accepts a node visitor.
DataLayout
[DataLayout enum definition]
Definition: Types.h:117
size_t get_dimension_idx(DataLayout data_layout, const DataLayoutDimension data_layout_dimension)
Get index of a tensor's given dimension depending on its layout.
Definition: Utils.cpp:129
TensorDescriptor configure_output(size_t idx) const override
Calculates output configuration.
cast configure & src
Definition: Cast.cpp:169
Tensor object.
Definition: Tensor.h:41