Compute Library
 22.11
OperatorGraphImpl.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2022 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifdef ENABLE_EXPERIMENTAL_DYNAMIC_FUSION
28 
31 
32 namespace arm_compute
33 {
34 namespace experimental
35 {
36 namespace dynamic_fusion
37 {
38 namespace
39 {
40 Status add_kernel_tensor(ClKernelGraph &k_graph, const OperatorGraph::Implementation &op_graph, const OpTensorContent &op_tensor, MemoryType memory_type, AuxMemoryInfo memory_info,
42 {
43  ARM_COMPUTE_UNUSED(op_graph);
44  return k_graph.add_kernel_tensor(op_tensor.desc, memory_type, memory_info, id, op_tensor.id);
45 }
46 
47 Status add_kernel_tensor(ClKernelGraph &k_graph, const OperatorGraph::Implementation &op_graph, const OpTensorContent &op_tensor, DependencyGraph::Id &id)
48 {
49  // For a tensor t
50  // 1. If t is a src tensor of the entire op graph, then it's Core.
51  // (Optimisation opportunity, if we guanrantee that all translate methods are called in topological order, we can always assign t to Core.
52  // Because even if the op is non-root (which would mean t should be an Aux tensor), the src tensors would be already be determined by the ancestor ops (topological order), and thus would not be overriden by it)
53  // 2. If t is a dst tensor of the entire op graph, then it's Core.
54  // 3. Aux tensor with Persistent and Prepare lifetime is manually specified
55  // 4. All other ts not captured by the above are assigned Aux, with lifetime of Temporary.
56  // kernel_graph.add_kernel_tensor(input->desc, );
57  bool is_src_tensor_of_graph = is_in(op_tensor.id, op_graph.graph.src_tensors());
58  bool is_dst_tensor_of_graph = is_in(op_tensor.id, op_graph.graph.dst_tensors());
59  MemoryType memory_type;
60  AuxMemoryInfo memory_info;
61  if(is_src_tensor_of_graph || is_dst_tensor_of_graph)
62  {
63  memory_type = MemoryType::Core;
64  }
65  else
66  {
67  memory_type = MemoryType::Auxiliary;
68  memory_info.lifetime = AuxMemoryLifetime::Temporary;
69  memory_info.size = op_tensor.desc->total_size();
70  }
71  return add_kernel_tensor(k_graph, op_graph, op_tensor, memory_type, memory_info, id);
72 }
73 
74 /** Get the suitable kernel size for using direct convolution method with NHWC data layout.
75  *
76  * @note Duplicate of the function with the same name in src/gpu/cl/operators/ClConv2d.cpp
77  *
78  * @note Direct convolution should be executed when the kernel has the spatial dimensions greater than or equal to the value returned by this function
79  *
80  * @param[in] gpu_target GPU target
81  *
82  * @return the suitable kernel size for using direct convolution method with NHWC data layout
83  */
84 size_t get_direct_conv_kernel_threshold_nhwc(arm_compute::GPUTarget gpu_target)
85 {
86  switch(gpu_target)
87  {
91  return 5;
96  return 7;
97  default:
98  return 5;
99  }
100 }
101 } // namespace
102 
103 bool operator==(const OpTensor &t0, const OpTensor &t1)
104 {
105  return std::make_tuple(t0.id()) == std::make_tuple(t1.id());
106 }
107 bool operator==(const Conv2dDescriptor &conv2d0, const Conv2dDescriptor &conv2d1)
108 {
109  return std::make_tuple(conv2d0.stride, conv2d0.dilation) == std::make_tuple(conv2d1.stride, conv2d1.dilation);
110 }
111 
113 {
114  return ed0.op == ed1.op; // Compare Arithmatic Operations of two ElementwiseDescriptor objects
115 }
116 
118 {
119  return std::make_tuple() == std::make_tuple(); // Currently two Floor ops are always the same
120 }
121 
123 {
124  const auto converted = *utils::cast::polymorphic_downcast<const Conv2dContent *>(&other);
125  return desc == converted.desc;
126 }
127 
129 {
130  const auto converted = *utils::cast::polymorphic_downcast<const ElementwiseContent *>(&other);
131  return desc == converted.desc;
132 }
133 
135 {
136  const auto converted = *utils::cast::polymorphic_downcast<const FloorContent *>(&other);
137  return desc == converted.desc;
138 }
139 
141 {
142  // Modified from ClConv2d::get_convolution_method
143 
147 
148  const PadStrideInfo legacy_pad_stride(conv2d_desc.stride.x(), conv2d_desc.stride.y(), conv2d_desc.pad.left, conv2d_desc.pad.right, conv2d_desc.pad.top, conv2d_desc.pad.bottom, DimensionRoundingType{});
149  const Size2D dilation = conv2d_desc.dilation;
150 
154 
155  /* Input spatial dims, kernel size, IFM/OFM, conv info*/
156  using ConvolutionConfiguration = std::tuple<Size2D, Size2D, Size2D, PadStrideInfo, DataLayout>;
157  using ConfigurationMethod = std::pair<ConvolutionConfiguration, ConvolutionMethod>;
158 
159  const std::vector<ConfigurationMethod> known_configs =
160  {
161  // Alexnet
162  ConfigurationMethod(ConvolutionConfiguration(Size2D(27U, 27U), Size2D(5U, 5U), Size2D(48U, 128U), PadStrideInfo(1U, 1U, 2U, 2U), DataLayout::NCHW), ConvolutionMethod::DIRECT),
163  // VGG16 / VGG19
164  ConfigurationMethod(ConvolutionConfiguration(Size2D(224U, 224U), Size2D(3U, 3U), Size2D(3U, 64U), PadStrideInfo(1U, 1U, 1U, 1U), DataLayout::NCHW), ConvolutionMethod::DIRECT),
165  // Mobilenet 224
166  ConfigurationMethod(ConvolutionConfiguration(Size2D(224U, 224U), Size2D(3U, 3U), Size2D(3U, 32U), PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR), DataLayout::NCHW), ConvolutionMethod::GEMM),
167  // Mobilenet 160
168  ConfigurationMethod(ConvolutionConfiguration(Size2D(160U, 160U), Size2D(3U, 3U), Size2D(3U, 24U), PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR), DataLayout::NCHW), ConvolutionMethod::GEMM),
169  // Mobilenet 224
170  ConfigurationMethod(ConvolutionConfiguration(Size2D(224U, 224U), Size2D(3U, 3U), Size2D(3U, 32U), PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR), DataLayout::NHWC), ConvolutionMethod::GEMM),
171  // Mobilenet 160
172  ConfigurationMethod(ConvolutionConfiguration(Size2D(160U, 160U), Size2D(3U, 3U), Size2D(3U, 24U), PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR), DataLayout::NHWC), ConvolutionMethod::GEMM),
173  };
174 
175  const auto find_config = [&](ConfigurationMethod c)
176  {
177  const ConvolutionConfiguration config = c.first;
178  const PadStrideInfo info = std::get<3>(config);
179  const DataLayout data_layout = std::get<4>(config);
180 
181  return std::get<0>(config) == Size2D(src->dimension(idx_w), src->dimension(idx_h)) && std::get<1>(config) == Size2D(weights->dimension(idx_w), weights->dimension(idx_h))
182  && std::get<2>(config) == Size2D(weights->dimension(idx_c), weights->dimension(3)) && info.pad_top() == legacy_pad_stride.pad_top() && info.pad_right() == legacy_pad_stride.pad_right()
183  && info.pad_bottom() == legacy_pad_stride.pad_bottom() && info.pad_left() == legacy_pad_stride.pad_left() && info.stride() == legacy_pad_stride.stride() && (data_layout == src->data_layout());
184  };
185 
186  std::vector<ConfigurationMethod>::const_iterator found;
187  if((found = std::find_if(known_configs.begin(), known_configs.end(), find_config)) != known_configs.end())
188  {
189  return (*found).second;
190  }
191 
192  if(dilation != Size2D(1U, 1U))
193  {
195  }
196  else
197  {
198  if(src->data_layout() == DataLayout::NCHW)
199  {
200  ARM_COMPUTE_ERROR("NCHW not supported");
201  }
202  else
203  {
204  const bool is_direct_valid = bool(ClDirectConv2dKernel::validate(src, weights, nullptr, dst, ClDirectConv2dKernelDescriptor{ conv2d_desc }));
205  const size_t kernel_sz_direct_conv_thr = get_direct_conv_kernel_threshold_nhwc(gpu_target);
206 
207  // SRGAN case
208  if((src->dimension(idx_h) > 720U) && (dst->dimension(idx_h) > 720U) && (weights->dimension(idx_h) == 9) && (conv2d_desc.pad.top < 3)
209  && is_direct_valid)
210  {
212  }
213 
214  // Floating-point case: GeMM/Direct
215  if(is_data_type_float(src->data_type()))
216  {
217  // Get dst shape
219  const bool is_large_kernel_sz = (weights->dimension(idx_w) >= kernel_sz_direct_conv_thr) && (weights->dimension(idx_h) >= kernel_sz_direct_conv_thr);
220  const bool is_ifm_ge_16 = src->dimension(idx_c) >= 16;
221  const bool is_ofm_lte_8 = weights->dimension(3U) <= 8;
222  const bool workload_gte_8192 = (output_shape[0] * output_shape[1] * output_shape[2]) / 16 >= 8192;
223  const bool is_ifm_gt_ofm = src->dimension(idx_c) > weights->dimension(3U);
224 
225  // Direct convolution case
226  if(is_direct_valid)
227  {
228  if((gpu_target == arm_compute::GPUTarget::G71 || gpu_target == arm_compute::GPUTarget::G72 || gpu_target == arm_compute::GPUTarget::MIDGARD))
229  {
230  if(is_large_kernel_sz && is_ifm_ge_16 && is_ifm_gt_ofm)
231  {
233  }
234  }
235  else
236  {
237  if((is_large_kernel_sz && workload_gte_8192 && is_ifm_ge_16) || (is_ofm_lte_8 && is_ifm_ge_16))
238  {
240  }
241  }
242  }
243 
244  // Default case
246  }
247 
248  // Generic case for quantized. Only GeMM
250  }
251  }
253 }
254 
256 {
257  const auto input = _tensors.get_const_tensor(TensorType::ACL_SRC_0);
258  const auto weight = _tensors.get_const_tensor(TensorType::ACL_SRC_1);
259  const auto dst = _tensors.get_const_tensor(TensorType::ACL_DST_0);
260  const auto method = forced_method_enabled ? forced_method : Conv2dContent::select_conv_method(input->desc, weight->desc, dst->desc, desc, CLScheduler::get().target());
261  switch(method)
262  {
264  {
265  return translate_direct_conv2d(kernel_graph);
266  }
267  default:
268  {
269  ARM_COMPUTE_RETURN_ERROR_MSG("Not implemented");
270  }
271  }
272  return Status{};
273 }
274 Status Conv2dContent::translate_direct_conv2d(ClKernelGraph &kernel_graph) const
275 {
276  const auto input = _tensors.get_const_tensor(TensorType::ACL_SRC_0);
277  const auto weight = _tensors.get_const_tensor(TensorType::ACL_SRC_1);
278  const auto bias = _tensors.get_const_tensor(TensorType::ACL_SRC_2);
279  const auto dst = _tensors.get_const_tensor(TensorType::ACL_DST_0);
281 
283 
284  DependencyGraph::Id input_id;
285  auto st = add_kernel_tensor(kernel_graph, *_graph, *input, input_id);
287  tensors.add_const_tensor(ACL_SRC_0, kernel_graph.get_tensor(input_id));
288 
289  DependencyGraph::Id weight_id;
290  st = add_kernel_tensor(kernel_graph, *_graph, *weight, weight_id);
292  tensors.add_const_tensor(ACL_SRC_1, kernel_graph.get_tensor(weight_id));
293 
294  if(bias != nullptr)
295  {
296  DependencyGraph::Id bias_id;
297  st = add_kernel_tensor(kernel_graph, *_graph, *bias, bias_id);
299  tensors.add_const_tensor(ACL_SRC_2, kernel_graph.get_tensor(bias_id));
300  }
301 
302  DependencyGraph::Id dst_id;
303  st = add_kernel_tensor(kernel_graph, *_graph, *dst, dst_id);
305  tensors.add_const_tensor(ACL_DST_0, kernel_graph.get_tensor(dst_id));
306 
307  DependencyGraph::Id direct_conv2d_id;
308  const auto kernel_desc = ClDirectConv2dKernelDescriptor{ desc };
309 
310  st = ClDirectConv2dKernel::validate(input->desc, weight->desc, bias == nullptr ? nullptr : bias->desc, dst->desc, kernel_desc);
312 
314  st = kernel_graph.add_kernel<ClDirectConv2dKernel>(config, kernel_desc, tensors, direct_conv2d_id);
316  ARM_COMPUTE_UNUSED(direct_conv2d_id);
317 
318  return Status{};
319 }
320 
322 {
323  const auto lhs = _tensors.get_const_tensor(TensorType::ACL_SRC_0);
324  const auto rhs = _tensors.get_const_tensor(TensorType::ACL_SRC_1);
325  const auto dst = _tensors.get_const_tensor(TensorType::ACL_DST_0);
327 
329 
330  DependencyGraph::Id lhs_id;
331  auto st = add_kernel_tensor(kernel_graph, *_graph, *lhs, lhs_id);
333  tensors.add_const_tensor(ACL_SRC_0, kernel_graph.get_tensor(lhs_id));
334 
335  DependencyGraph::Id rhs_id;
336  st = add_kernel_tensor(kernel_graph, *_graph, *rhs, rhs_id);
338  tensors.add_const_tensor(ACL_SRC_1, kernel_graph.get_tensor(rhs_id));
339 
340  DependencyGraph::Id dst_id;
341  st = add_kernel_tensor(kernel_graph, *_graph, *dst, dst_id);
343  tensors.add_const_tensor(ACL_DST_0, kernel_graph.get_tensor(dst_id));
344 
345  DependencyGraph::Id add_id;
347 
348  st = ClElementwiseKernel::validate(lhs->desc, rhs->desc, dst->desc);
350 
351  st = kernel_graph.add_kernel<ClElementwiseKernel>(config, ClElementwiseKernelDescriptor{ desc }, tensors, add_id);
353  ARM_COMPUTE_UNUSED(add_id);
354 
355  return Status{};
356 }
357 
359 {
360  const auto src = _tensors.get_const_tensor(TensorType::ACL_SRC_0);
361  const auto dst = _tensors.get_const_tensor(TensorType::ACL_DST_0);
363 
365 
366  DependencyGraph::Id src_id;
367  auto st = add_kernel_tensor(kernel_graph, *_graph, *src, src_id);
369  tensors.add_const_tensor(ACL_SRC_0, kernel_graph.get_tensor(src_id));
370 
371  DependencyGraph::Id dst_id;
372  st = add_kernel_tensor(kernel_graph, *_graph, *dst, dst_id);
374  tensors.add_const_tensor(ACL_DST_0, kernel_graph.get_tensor(dst_id));
375 
376  DependencyGraph::Id add_id;
378 
379  st = ClFloorKernel::validate(src->desc, dst->desc);
381 
382  st = kernel_graph.add_kernel<ClFloorKernel>(config, ClFloorKernelDescriptor{ desc }, tensors, add_id);
384 
385  return Status{};
386 }
387 
388 std::vector<const OperatorContent *> traverse(const OperatorGraph::Implementation &graph)
389 {
390  std::vector<const OperatorContent *> ops;
391  const auto sorted = graph.graph.topological_sort();
392  for(const auto &pack : sorted.second)
393  {
394  ops.push_back(graph.operators.at(pack.op).get());
395  }
396  return ops;
397 }
398 
399 std::vector<OperatorContent *> traverse(OperatorGraph::Implementation &graph)
400 {
401  std::vector<OperatorContent *> ops;
402  const auto sorted = graph.graph.topological_sort();
403  for(const auto &pack : sorted.second)
404  {
405  ops.push_back(graph.operators.at(pack.op).get());
406  }
407  return ops;
408 }
409 
411 {
412  for(const auto &op : traverse(op_graph))
413  {
414  const auto st = op->translate(kernel_graph);
416  }
417  return Status{};
418 }
419 
420 } // namespace dynamic_fusion
421 } // namespace experimental
422 } // namespace arm_compute
423 #endif /* ENABLE_EXPERIMENTAL_DYNAMIC_FUSION */
Operator Tensor Handle This can be either an argument tensor, or an intermediate tensor linking 2 Ope...
Definition: OperatorGraph.h:68
Status translate(ClKernelGraph &kernel_graph) const override
Shape of a tensor.
Definition: TensorShape.h:39
virtual size_t dimension(size_t index) const =0
Return the size of the requested dimension.
static CLScheduler & get()
Access the scheduler singleton.
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:352
GPUTarget target() const
Get the target GPU.
Definition: CLScheduler.cpp:49
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
Definition: Error.h:204
Status translate(ClKernelGraph &kernel_graph) const override
virtual DataType data_type() const =0
Data type used for each element of the tensor.
DimensionRoundingType
Dimension rounding type when down-scaling on CNNs.
Definition: Types.h:550
size_t bottom
Padding across the height dimension on the bottom, in elements.
Definition: Types.h:796
size_t right
Padding across the width dimension on the right, in elements.
Definition: Types.h:794
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
static Status validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const ClDirectConv2dKernelDescriptor &conv2d_desc)
size_t x() const
Semantic accessor for width as x.
Definition: Size2D.h:75
unsigned int pad_top() const
Get the top padding.
Definition: Types.h:753
Core memory used by the Workload Tensor, e.g.
Status class.
Definition: Error.h:52
std::pair< Status, std::vector< OpPack > > topological_sort() const
Sort the graph in a topological order.
std::vector< const ClKernel * > traverse(const ClKernelFusionGroup &group)
MemoryType
Type of memory used by a Workload Tensor.
Definition: IWorkload.h:59
ConvolutionMethod
Available ConvolutionMethod.
Definition: Types.h:134
size_t left
Padding across the width dimension on the left, in elements.
Definition: Types.h:793
static Status validate(const ITensorInfo *src, const ITensorInfo *dst)
SimpleTensor< float > src
Definition: DFT.cpp:155
Copyright (c) 2017-2022 Arm Limited.
size_t top
Padding across the height dimension on the top, in elements.
Definition: Types.h:795
Interface to enqueue OpenCL kernels and get/set the OpenCL CommandQueue and ICLTuner.
bool is_in(const T &v, const std::vector< T > &vec)
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
std::pair< unsigned int, unsigned int > stride() const
Get the stride.
Definition: Types.h:717
bool operator==(const OperatorContent &other) const override
unsigned int pad_right() const
Get the right padding.
Definition: Types.h:748
Padding and stride information class.
Definition: Types.h:669
static Status validate(const ITensorInfo *lhs, const ITensorInfo *rhs, const ITensorInfo *dst)
Auxiliary memory required by the Workload Tensor, e.g.
Descriptor for Elementwise binary operation.
AuxMemoryInfo memory_info
Memory requirement.
Describes when a Unit Workload is run.
Definition: IWorkload.h:43
Num samples, channels, height, width.
size_t y() const
Semantic accessor for height as y.
Definition: Size2D.h:84
Descriptor for Conv2dDescriptor operation.
void add_const_tensor(int id, const TDesc *tensor)
Add const tensor to the pack.
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
#define ARM_COMPUTE_RETURN_ERROR_MSG(...)
An error is returned with the given description.
Definition: Error.h:194
GPUTarget
Available GPU Targets.
Definition: GPUTarget.h:34
size_t get_data_layout_dimension_index(const DataLayout &data_layout, const DataLayoutDimension &data_layout_dimension)
Get the index of the given dimension.
Definition: Helpers.inl:193
Class for specifying the size of an image or rectangle.
Definition: Size2D.h:34
Num samples, height, width, channels.
bool operator==(const OperatorContent &other) const override
Status add_kernel(const ClKernelConfig &config, const KernelDescT &desc, const ITensorDescPack< ClKernelTensor > &tensors, Id &kernel_id)
static ConvolutionMethod select_conv_method(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *dst, const Conv2dDescriptor &conv2d_desc, const GPUTarget gpu_target)
Replicate heuristics of ClConv2d::get_convolution_method(), except that non-supported data types and ...
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Definition: Validate.h:157
unsigned int pad_bottom() const
Get the bottom padding.
Definition: Types.h:758
unsigned int pad_left() const
Get the left padding.
Definition: Types.h:743
bool operator==(const OpTensor &t0, const OpTensor &t1)
DataLayout
[DataLayout enum definition]
Definition: Types.h:113
Convolution using GEMM.
TensorShape compute_deep_convolution_shape(const TensorShape &input_shape, DataLayout input_data_layout, const TensorShape &weights_shape, const PadStrideInfo &conv_info)
Calculate the deep convolution shape output shape of a tensor.
bool is_data_type_float(DataType dt)
Check if a given data type is of floating point type.
Definition: Utils.h:1010
bool operator==(const OperatorContent &other) const override
Status translate(ClKernelGraph &kernel_graph) const override
virtual DataLayout data_layout() const =0
Get the data layout of the tensor.
const int32_t * bias