Compute Library
 23.08
CPPBoxWithNonMaximaSuppressionLimit.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_CPPBOXWITHNONMAXIMASUPPRESSIONLIMIT_H
25 #define ARM_COMPUTE_CPPBOXWITHNONMAXIMASUPPRESSIONLIMIT_H
26 
28 #include "arm_compute/core/Types.h"
33 
34 namespace arm_compute
35 {
36 class ITensor;
37 
38 /** Basic function to run @ref CPPBoxWithNonMaximaSuppressionLimitKernel */
40 {
41 public:
42  /** Constructor */
43  CPPBoxWithNonMaximaSuppressionLimit(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
44  /** Prevent instances of this class from being copied (As this class contains pointers) */
46  /** Prevent instances of this class from being copied (As this class contains pointers) */
48  /** Configure the BoxWithNonMaximaSuppressionLimit CPP kernel
49  *
50  * @param[in] scores_in The scores input tensor of size [count, num_classes]. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32
51  * @param[in] boxes_in The boxes input tensor of size [count, num_classes * 4].
52  * Data types supported: QASYMM16 with 0.125 scale and 0 offset if @p scores_in is QASYMM8/QASYMM8_SIGNED, otherwise same as @p scores_in
53  * @param[in] batch_splits_in The batch splits input tensor of size [batch_size]. Data types supported: Same as @p scores_in
54  * @note Can be a nullptr. If not a nullptr, @p scores_in and @p boxes_in have items from multiple images.
55  * @param[out] scores_out The scores output tensor of size [N]. Data types supported: Same as @p scores_in
56  * @param[out] boxes_out The boxes output tensor of size [N, 4].
57  * Data types supported: QASYMM16 with 0.125 scale and 0 offset if @p scores_in is QASYMM8/QASYMM8_SIGNED, otherwise same as @p scores_in
58  * @param[out] classes The classes output tensor of size [N]. Data types supported: Same as @p scores_in
59  * @param[out] batch_splits_out (Optional) The batch splits output tensor. Data types supported: Same as @p scores_in
60  * @param[out] keeps (Optional) The keeps output tensor of size [N]. Data types supported: Same as @p scores_in
61  * @param[in] keeps_size (Optional) Number of filtered indices per class tensor of size [num_classes]. Data types supported: U32.
62  * @param[in] info (Optional) BoxNMSLimitInfo information.
63  */
64  void configure(const ITensor *scores_in, const ITensor *boxes_in, const ITensor *batch_splits_in, ITensor *scores_out, ITensor *boxes_out, ITensor *classes,
65  ITensor *batch_splits_out = nullptr, ITensor *keeps = nullptr, ITensor *keeps_size = nullptr, const BoxNMSLimitInfo info = BoxNMSLimitInfo());
66  /** Static function to check if given info will lead to a valid configuration of @ref CPPDetectionOutputLayer
67  *
68  * @param[in] scores_in The scores input tensor of size [count, num_classes]. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32
69  * @param[in] boxes_in The boxes input tensor of size [count, num_classes * 4].
70  * Data types supported: QASYMM16 with 0.125 scale and 0 offset if @p scores_in is QASYMM8/QASYMM8_SIGNED, otherwise same as @p scores_in
71  * @param[in] batch_splits_in The batch splits input tensor of size [batch_size]. Data types supported: Same as @p scores_in
72  * @note Can be a nullptr. If not a nullptr, @p scores_in and @p boxes_in have items from multiple images.
73  * @param[in] scores_out The scores output tensor of size [N]. Data types supported: Same as @p scores_in
74  * @param[in] boxes_out The boxes output tensor of size [N, 4].
75  * Data types supported: QASYMM16 with 0.125 scale and 0 offset if @p scores_in is QASYMM8/QASYMM8_SIGNED, otherwise same as @p scores_in
76  * @param[in] classes The classes output tensor of size [N]. Data types supported: Same as @p scores_in
77  * @param[in] batch_splits_out (Optional) The batch splits output tensor. Data types supported: Same as @p scores_in
78  * @param[in] keeps (Optional) The keeps output tensor of size [N]. Data types supported: Same as @p scores_in
79  * @param[in] keeps_size (Optional) Number of filtered indices per class tensor of size [num_classes]. Data types supported: U32.
80  * @param[in] info (Optional) BoxNMSLimitInfo information.
81  *
82  * @return a status
83  */
84  static Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out,
85  const ITensorInfo *classes,
86  const ITensorInfo *batch_splits_out = nullptr, const ITensorInfo *keeps = nullptr, const ITensorInfo *keeps_size = nullptr, const BoxNMSLimitInfo info = BoxNMSLimitInfo());
87  // Inherited methods overridden:
88  void run() override;
89 
90 private:
91  MemoryGroup _memory_group;
92 
93  CPPBoxWithNonMaximaSuppressionLimitKernel _box_with_nms_limit_kernel;
94 
95  const ITensor *_scores_in;
96  const ITensor *_boxes_in;
97  const ITensor *_batch_splits_in;
98  ITensor *_scores_out;
99  ITensor *_boxes_out;
100  ITensor *_classes;
101  ITensor *_batch_splits_out;
102  ITensor *_keeps;
103 
104  Tensor _scores_in_f32;
105  Tensor _boxes_in_f32;
106  Tensor _batch_splits_in_f32;
107  Tensor _scores_out_f32;
108  Tensor _boxes_out_f32;
109  Tensor _classes_f32;
110  Tensor _batch_splits_out_f32;
111  Tensor _keeps_f32;
112 
113  bool _is_qasymm8;
114 };
115 } // namespace arm_compute
116 #endif /* ARM_COMPUTE_CPPBOXWITHNONMAXIMASUPPRESSIONLIMIT_H */
arm_compute::CPPBoxWithNonMaximaSuppressionLimitKernel
CPP kernel to perform computation of BoxWithNonMaximaSuppressionLimit.
Definition: CPPBoxWithNonMaximaSuppressionLimitKernel.h:35
arm_compute::IFunction
Base class for all functions.
Definition: IFunction.h:30
Types.h
arm_compute::ITensor
Interface for CPU tensor.
Definition: ITensor.h:36
arm_compute::BoxNMSLimitInfo
BoxWithNonMaximaSuppressionLimit Information class.
Definition: Types.h:507
arm_compute::CPPBoxWithNonMaximaSuppressionLimit::CPPBoxWithNonMaximaSuppressionLimit
CPPBoxWithNonMaximaSuppressionLimit(std::shared_ptr< IMemoryManager > memory_manager=nullptr)
Constructor.
Definition: CPPBoxWithNonMaximaSuppressionLimit.cpp:112
arm_compute::CPPBoxWithNonMaximaSuppressionLimit
Basic function to run CPPBoxWithNonMaximaSuppressionLimitKernel.
Definition: CPPBoxWithNonMaximaSuppressionLimit.h:39
arm_compute::CPPBoxWithNonMaximaSuppressionLimit::validate
static Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out=nullptr, const ITensorInfo *keeps=nullptr, const ITensorInfo *keeps_size=nullptr, const BoxNMSLimitInfo info=BoxNMSLimitInfo())
Static function to check if given info will lead to a valid configuration of CPPDetectionOutputLayer.
arm_compute::CPPBoxWithNonMaximaSuppressionLimit::configure
void configure(const ITensor *scores_in, const ITensor *boxes_in, const ITensor *batch_splits_in, ITensor *scores_out, ITensor *boxes_out, ITensor *classes, ITensor *batch_splits_out=nullptr, ITensor *keeps=nullptr, ITensor *keeps_size=nullptr, const BoxNMSLimitInfo info=BoxNMSLimitInfo())
Configure the BoxWithNonMaximaSuppressionLimit CPP kernel.
Definition: CPPBoxWithNonMaximaSuppressionLimit.cpp:135
arm_compute::CPPBoxWithNonMaximaSuppressionLimit::run
void run() override
Run the kernels contained in the function.
Definition: CPPBoxWithNonMaximaSuppressionLimit.cpp:235
MemoryGroup.h
IMemoryManager.h
arm_compute::Status
Status class.
Definition: Error.h:52
Tensor.h
arm_compute::CPPBoxWithNonMaximaSuppressionLimit::operator=
CPPBoxWithNonMaximaSuppressionLimit & operator=(const CPPBoxWithNonMaximaSuppressionLimit &)=delete
Prevent instances of this class from being copied (As this class contains pointers)
IFunction.h
arm_compute
Copyright (c) 2017-2023 Arm Limited.
Definition: introduction.dox:24
arm_compute::ITensorInfo
Store the tensor's metadata.
Definition: ITensorInfo.h:43
arm_compute::Tensor
Basic implementation of the tensor interface.
Definition: Tensor.h:37
arm_compute::test::validation::info
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
CPPBoxWithNonMaximaSuppressionLimitKernel.h
arm_compute::MemoryGroup
Memory group.
Definition: MemoryGroup.h:43