Compute Library
 21.02
NEDepthwiseConvolutionLayer.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
31 
32 using namespace arm_compute::misc;
34 
35 namespace arm_compute
36 {
37 namespace
38 {
39 Status validate_arguments_optimized(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
40  unsigned int depth_multiplier, const ActivationLayerInfo &act_info, const Size2D &dilation)
41 {
42  ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
44  if(!is_data_type_quantized_per_channel(weights->data_type()))
45  {
47  }
48  ARM_COMPUTE_RETURN_ERROR_ON(input->data_layout() == DataLayout::UNKNOWN);
49  ARM_COMPUTE_RETURN_ERROR_ON(dilation.x() < 1 || dilation.y() < 1);
50  const size_t idx_w = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH);
51  const size_t idx_h = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT);
52  ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_w) + (weights->dimension(idx_w) - 1) * (dilation.x() - 1) > input->dimension(idx_w) + conv_info.pad_left() + conv_info.pad_right());
53  ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_h) + (weights->dimension(idx_h) - 1) * (dilation.y() - 1) > input->dimension(idx_h) + conv_info.pad_top() + conv_info.pad_bottom());
54 
55  if(biases != nullptr)
56  {
57  const unsigned int channel_idx = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::CHANNEL);
58  ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
59  ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != weights->dimension(channel_idx));
60  }
61 
62  ARM_COMPUTE_RETURN_ON_ERROR(NEDepthwiseConvolutionAssemblyDispatch::validate(input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation));
63 
64  //Validate Activation Layer
65  if(act_info.enabled())
66  {
67  ARM_COMPUTE_RETURN_ON_ERROR(NEActivationLayer::validate(output, nullptr, act_info));
68  }
69  return Status{};
70 }
71 } // namespace
72 
74 
75 NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerOptimizedInternal::NEDepthwiseConvolutionLayerOptimizedInternal(std::shared_ptr<IMemoryManager> memory_manager)
76  : _memory_group(memory_manager), _dwc_optimized_func(memory_manager), _permute_input(), _permute_weights(), _permute_output(), _activationlayer_function(), _accumulator(), _permuted_input(),
77  _permuted_weights(), _permuted_output(), _original_weights(nullptr), _has_bias(false), _is_quantized(false), _is_nchw(true), _permute(false), _is_activationlayer_enabled(false), _is_prepared(false)
78 {
79 }
80 
81 void NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerOptimizedInternal::configure(ITensor *input,
82  const ITensor *weights,
83  const ITensor *biases,
84  ITensor *output, const PadStrideInfo &conv_info,
85  unsigned int depth_multiplier,
86  const ActivationLayerInfo &act_info,
87  const Size2D &dilation)
88 {
89  ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
90  // Perform validation step
91  ARM_COMPUTE_ERROR_THROW_ON(NEDepthwiseConvolutionLayerOptimizedInternal::validate(input->info(), weights->info(), (biases == nullptr) ? nullptr : biases->info(),
92  output->info(), conv_info, depth_multiplier, act_info, dilation));
93 
94  _original_weights = weights;
95  _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
96  _has_bias = biases != nullptr;
97  _is_nchw = input->info()->data_layout() == DataLayout::NCHW;
98  _permute = _is_nchw;
99  _is_prepared = false;
100  _is_activationlayer_enabled = act_info.enabled();
101 
102  // Configure pipeline
103  ActivationLayerInfo act_info_to_use = ActivationLayerInfo();
104  const bool is_relu = arm_compute::utils::info_helpers::is_relu(act_info);
106  _is_activationlayer_enabled = act_info.enabled() && !(is_relu || is_relu6);
107  if(!_is_activationlayer_enabled)
108  {
109  act_info_to_use = act_info;
110  }
111 
112  if(_is_nchw)
113  {
114  _memory_group.manage(&_permuted_input);
115  _memory_group.manage(&_permuted_output);
116 
117  // Configure the function to transform the input tensor from NCHW -> NHWC
118  _permute_input.configure(input, &_permuted_input, PermutationVector(2U, 0U, 1U));
119  _permuted_input.info()->set_data_layout(DataLayout::NHWC);
120 
121  // Configure the function to transform the weights tensor from IHW -> HWI
122  _permute_weights.configure(weights, &_permuted_weights, PermutationVector(2U, 0U, 1U));
123  _permuted_weights.info()->set_data_layout(DataLayout::NHWC);
124 
125  _permuted_output.info()->set_data_layout(DataLayout::NHWC);
126  _permuted_output.info()->set_quantization_info(output->info()->quantization_info());
127 
128  // Configure optimized depthwise
129  _dwc_optimized_func.configure(&_permuted_input, &_permuted_weights, biases, &_permuted_output, conv_info, depth_multiplier, act_info_to_use, dilation);
130 
131  // Configure the function to transform the convoluted output to ACL's native ordering format NCHW
132  _permuted_output.info()->set_data_layout(DataLayout::NHWC);
133  _permute_output.configure(&_permuted_output, output, PermutationVector(1U, 2U, 0U));
134 
135  // Allocate tensors
136  _permuted_input.allocator()->allocate();
137  _permuted_output.allocator()->allocate();
138  }
139  else
140  {
141  _dwc_optimized_func.configure(input, weights, biases, output, conv_info, depth_multiplier, act_info_to_use, dilation);
142  }
143 
144  // Configure activation
145  if(_is_activationlayer_enabled)
146  {
147  _activationlayer_function.configure(output, nullptr, act_info);
148  }
149 }
150 
152  const ITensorInfo *weights,
153  const ITensorInfo *biases,
154  const ITensorInfo *output,
155  const PadStrideInfo &conv_info,
156  unsigned int depth_multiplier,
157  const ActivationLayerInfo &act_info,
158  const Size2D &dilation)
159 {
160  return validate_arguments_optimized(input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation);
161 }
162 
164 {
165  prepare();
166 
167  MemoryGroupResourceScope scope_mg(_memory_group);
168 
169  // Permute input
170  if(_permute)
171  {
172  _permute_input.run();
173  }
174 
175  // Run assembly function
176  _dwc_optimized_func.run();
177 
178  // Permute output
179  if(_is_nchw)
180  {
181  _permute_output.run();
182  }
183 
184  // Run activation
185  if(_is_activationlayer_enabled)
186  {
187  _activationlayer_function.run();
188  }
189 }
190 
191 void NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerOptimizedInternal::prepare()
192 {
193  if(!_is_prepared)
194  {
195  // Permute weights
196  if(_permute)
197  {
198  _permuted_weights.allocator()->allocate();
199  _permute_weights.run();
200  _original_weights->mark_as_unused();
201  }
202 
203  // Prepare optimized function
204  _dwc_optimized_func.prepare();
205  if(!_permuted_weights.is_used())
206  {
207  _permuted_weights.allocator()->free();
208  }
209 
210  _is_prepared = true;
211  }
212 }
213 
214 NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerGeneric::NEDepthwiseConvolutionLayerGeneric()
215  : _depthwise_conv_kernel(), _permute_input(), _permute_weights(), _permute_output(), _activationlayer_function(), _permuted_input(), _permuted_weights(), _permuted_output(), _is_prepared(false),
216  _is_nchw(false), _is_activationlayer_enabled(false), _original_weights(nullptr)
217 {
218 }
219 
220 void NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerGeneric::configure(ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info,
221  unsigned int depth_multiplier, const ActivationLayerInfo &act_info, const Size2D &dilation)
222 {
223  ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
224  ARM_COMPUTE_ERROR_THROW_ON(NEDepthwiseConvolutionLayer::validate(input->info(), weights->info(), (biases == nullptr) ? nullptr : biases->info(),
225  output->info(), conv_info, depth_multiplier, act_info, dilation));
226 
227  _is_nchw = input->info()->data_layout() == DataLayout::NCHW;
228  _is_prepared = !_is_nchw;
229 
230  ITensor *input_to_use = input;
231  const ITensor *weights_to_use = weights;
232  ITensor *output_to_use = output;
233  if(_is_nchw)
234  {
235  _permute_input.configure(input, &_permuted_input, PermutationVector(2U, 0U, 1U));
236  _permuted_input.info()->set_data_layout(DataLayout::NHWC);
237  input_to_use = &_permuted_input;
238 
239  _permute_weights.configure(weights, &_permuted_weights, PermutationVector(2U, 0U, 1U));
240  _permuted_weights.info()->set_data_layout(DataLayout::NHWC);
241  weights_to_use = &_permuted_weights;
242 
243  _permuted_output.allocator()->init(output->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(TensorShape()));
244  output_to_use = &_permuted_output;
245  }
246  _original_weights = weights_to_use;
247 
248  _depthwise_conv_kernel = std::make_unique<NEDepthwiseConvolutionLayerNativeKernel>();
249  _depthwise_conv_kernel->configure(input_to_use, weights_to_use, biases, output_to_use, conv_info, depth_multiplier, dilation);
250 
251  if(_is_nchw)
252  {
253  _permute_output.configure(&_permuted_output, output, PermutationVector(1U, 2U, 0U));
254  _permuted_output.info()->set_data_layout(DataLayout::NHWC);
255 
256  _permuted_input.allocator()->allocate();
257  _permuted_weights.allocator()->allocate();
258  _permuted_output.allocator()->allocate();
259  }
260 
261  //Configure Activation Layer
262  _is_activationlayer_enabled = act_info.enabled();
263  if(_is_activationlayer_enabled)
264  {
265  _activationlayer_function.configure(output, nullptr, act_info);
266  }
267 }
268 
269 Status NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerGeneric::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
270  const PadStrideInfo &conv_info,
271  unsigned int depth_multiplier, const ActivationLayerInfo &act_info, const Size2D &dilation)
272 {
273  ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
274  if(input->data_layout() == DataLayout::NCHW)
275  {
276  TensorShape permuted_input_shape = input->tensor_shape();
277  TensorShape permuted_weights_shape = weights->tensor_shape();
278  TensorShape permuted_output_shape = misc::shape_calculator::compute_depthwise_convolution_shape(*input, *weights, conv_info, depth_multiplier, dilation);
279  permute(permuted_input_shape, PermutationVector(2U, 0U, 1U));
280  permute(permuted_weights_shape, PermutationVector(2U, 0U, 1U));
281  permute(permuted_output_shape, PermutationVector(2U, 0U, 1U));
282 
283  const TensorInfo permuted_input = TensorInfo(input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(permuted_input_shape).set_data_layout(DataLayout::NHWC));
284  const TensorInfo permuted_weights = TensorInfo(weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(permuted_weights_shape).set_data_layout(DataLayout::NHWC));
285  const TensorInfo permuted_output = TensorInfo(output->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(permuted_output_shape).set_data_layout(DataLayout::NCHW));
286 
287  ARM_COMPUTE_RETURN_ON_ERROR(NEPermute::validate(input, &permuted_input, PermutationVector(2U, 0U, 1U)));
288  ARM_COMPUTE_RETURN_ON_ERROR(NEPermute::validate(weights, &permuted_weights, PermutationVector(2U, 0U, 1U)));
289  ARM_COMPUTE_RETURN_ON_ERROR(NEPermute::validate(&permuted_output, output, PermutationVector(1U, 2U, 0U)));
290 
291  ARM_COMPUTE_RETURN_ON_ERROR(NEDepthwiseConvolutionLayerNativeKernel::validate(&permuted_input, &permuted_weights, biases, &permuted_output, conv_info, depth_multiplier, dilation));
292  }
293  else
294  {
295  ARM_COMPUTE_RETURN_ON_ERROR(NEDepthwiseConvolutionLayerNativeKernel::validate(input, weights, biases, output, conv_info, depth_multiplier, dilation));
296  }
297 
298  // Validate Activation Layer
299  if(act_info.enabled())
300  {
301  ARM_COMPUTE_RETURN_ON_ERROR(NEActivationLayer::validate(output, nullptr, act_info));
302  }
303 
304  return Status{};
305 }
306 
308 {
309  if(_is_nchw)
310  {
311  prepare();
312  _permute_input.run();
313  }
314 
315  NEScheduler::get().schedule(_depthwise_conv_kernel.get(), Window::DimY);
316 
317  if(_is_nchw)
318  {
319  _permute_output.run();
320  }
321 
322  if(_is_activationlayer_enabled)
323  {
324  _activationlayer_function.run();
325  }
326 }
327 
328 void NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerGeneric::prepare()
329 {
330  if(!_is_prepared)
331  {
332  ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
333 
334  _permute_weights.run();
335  _original_weights->mark_as_unused();
336  _is_prepared = true;
337  }
338 }
339 
340 NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
341  : _depth_conv_func(DepthwiseConvolutionFunction::GENERIC), _func_optimized(std::move(memory_manager)), _func_generic()
342 {
343 }
344 
345 void NEDepthwiseConvolutionLayer::configure(ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier,
346  const ActivationLayerInfo &act_info, const Size2D &dilation)
347 {
348  _depth_conv_func = get_depthwiseconvolution_function(input->info(), weights->info(), (biases != nullptr) ? biases->info() : nullptr, output->info(), conv_info, depth_multiplier, act_info, dilation);
349  switch(_depth_conv_func)
350  {
352  _func_optimized.configure(input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation);
353  break;
355  _func_generic.configure(input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation);
356  break;
357  default:
358  ARM_COMPUTE_ERROR("Unsupported DepthwiseConvolutionFunction");
359  }
360 }
361 
362 Status NEDepthwiseConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
363  unsigned int depth_multiplier, const ActivationLayerInfo &act_info, const Size2D &dilation)
364 {
365  DepthwiseConvolutionFunction depth_conv_func = get_depthwiseconvolution_function(input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation);
366  switch(depth_conv_func)
367  {
369  return NEDepthwiseConvolutionLayerOptimizedInternal::validate(input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation);
370  break;
372  return NEDepthwiseConvolutionLayerGeneric::validate(input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation);
373  break;
374  default:
375  ARM_COMPUTE_ERROR("Unsupported DepthwiseConvolutionFunction");
376  }
377 }
378 
379 DepthwiseConvolutionFunction NEDepthwiseConvolutionLayer::get_depthwiseconvolution_function(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
380  const PadStrideInfo &conv_info,
381  unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation)
382 {
383  if(bool(NEDepthwiseConvolutionLayerOptimizedInternal::validate(input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation)))
384  {
386  }
387  else
388  {
390  }
391 }
392 
394 {
395  switch(_depth_conv_func)
396  {
398  _func_optimized.run();
399  break;
401  _func_generic.run();
402  break;
403  default:
404  ARM_COMPUTE_ERROR("DepthwiseConvolutionFunction not properly configured");
405  }
406 }
407 
409 {
410  switch(_depth_conv_func)
411  {
413  _func_optimized.prepare();
414  break;
416  _func_generic.prepare();
417  break;
418  default:
419  ARM_COMPUTE_ERROR("DepthwiseConvolutionFunction not properly configured");
420  }
421 }
422 } // namespace arm_compute
DepthwiseConvolutionFunction
Available DepthwiseConvolutionFunction.
Definition: Types.h:148
TensorShape compute_depthwise_convolution_shape(const ITensorInfo &input, const ITensorInfo &weights, PadStrideInfo conv_info, unsigned int depth_multiplier, const Size2D &dilation=Size2D(1U, 1U))
Calculate the depthwise convolution output shape of a tensor.
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:352
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
Definition: Error.h:204
static Status validate(const ITensorInfo *input, const ITensorInfo *output, const ActivationLayerInfo &act_info)
[NEActivationLayer snippet]
static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *bias, const ITensorInfo *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier=1, const ActivationLayerInfo &act_info=ActivationLayerInfo(), const Size2D &dilation=Size2D(1, 1))
Static function to check if given info will lead to a valid configuration of NEDepthwiseConvolutionAs...
1 channel, 1 F32 per channel
Strides PermutationVector
Permutation vector.
Definition: Types.h:49
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
static Status validate(const ITensorInfo *input, const ITensorInfo *output, const PermutationVector &perm)
Static function to check if given info will lead to a valid configuration of NEPermute.
Definition: NEPermute.cpp:59
#define ARM_COMPUTE_ERROR_THROW_ON(status)
Definition: Error.h:455
Status class.
Definition: Error.h:52
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
Activation Layer Information class.
Definition: Types.h:1550
Interface for Neon tensor.
Definition: ITensor.h:36
Copyright (c) 2017-2021 Arm Limited.
1 channel, 1 F16 per channel
void prepare() override
Prepare the function for executing.
#define ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(...)
Definition: Validate.h:163
void permute(Dimensions< T > &dimensions, const PermutationVector &perm)
Permutes given Dimensions according to a permutation vector.
Definition: Helpers.h:125
~NEDepthwiseConvolutionLayer()
Default destructor.
bool is_data_type_quantized_per_channel(DataType dt)
Check if a given data type is of per channel type.
Definition: Utils.h:1245
virtual void prepare()
Prepare the function for executing.
Definition: IFunction.h:57
static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier=1, const ActivationLayerInfo &act_info=ActivationLayerInfo(), const Size2D &dilation=Size2D(1U, 1U))
Static function to check if given info will lead to a valid configuration of NEDepthwiseConvolutionLa...
quantized, asymmetric fixed-point 8-bit number unsigned
NEDepthwiseConvolutionLayer(std::shared_ptr< IMemoryManager > memory_manager=nullptr)
Default constructor.
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor&#39;s metadata.
Padding and stride information class.
Definition: Types.h:722
Num samples, channels, height, width.
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
Definition: Utils.h:1190
bool is_relu6(ActivationLayerInfo activation_info)
Checks if activation information correspond to a relu6 activation function.
Definition: InfoHelpers.h:54
static constexpr size_t DimY
Alias for dimension 1 also known as Y dimension.
Definition: Window.h:45
virtual void schedule(ICPPKernel *kernel, const Hints &hints)=0
Runs the kernel in the same thread as the caller synchronously.
Class for specifying the size of an image or rectangle.
Definition: Size2D.h:34
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...)
Definition: Validate.h:545
Num samples, height, width, channels.
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Definition: Validate.h:792
bool is_relu(ActivationLayerInfo activation_info)
Checks if activation information correspond to a relu activation function.
Definition: InfoHelpers.h:43
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Definition: Validate.h:161
quantized, asymmetric fixed-point 8-bit number signed
size_t get_data_layout_dimension_index(const DataLayout data_layout, const DataLayoutDimension data_layout_dimension)
Get the index of the given dimension.
Definition: Helpers.inl:193
void configure(ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier=1, const ActivationLayerInfo &act_info=ActivationLayerInfo(), const Size2D &dilation=Size2D(1U, 1U))
Initialize the function&#39;s source, destination, weights and convolution information.
void run() override
Run the kernels contained in the function.
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)
static IScheduler & get()
Access the scheduler singleton.
Definition: Scheduler.cpp:94
static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier=1, const Size2D &dilation=Size2D(1U, 1U))
Static function to check if given info will lead to a valid configuration of NEDepthwiseConvolutionLa...