Compute Library
 21.02
NECannyEdge.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
26 #include "arm_compute/core/Error.h"
39 
40 #include <cstring>
41 #include <inttypes.h>
42 #include <utility>
43 
44 namespace arm_compute
45 {
46 NECannyEdge::~NECannyEdge() = default;
47 
48 NECannyEdge::NECannyEdge(std::shared_ptr<IMemoryManager> memory_manager) // NOLINT
49  : _memory_group(std::move(memory_manager)),
50  _sobel(),
51  _gradient(),
52  _non_max_suppr(),
53  _edge_trace(),
54  _border_mag_gradient(),
55  _border_edge_trace(),
56  _gx(),
57  _gy(),
58  _magnitude(),
59  _phase(),
60  _nonmax(),
61  _output(nullptr)
62 {
63 }
64 
65 void NECannyEdge::configure(ITensor *input, ITensor *output, int32_t upper_thr, int32_t lower_thr, int32_t gradient_size, int32_t norm_type, BorderMode border_mode, uint8_t constant_border_value)
66 {
67  ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
70  ARM_COMPUTE_ERROR_ON((1 != norm_type) && (2 != norm_type));
71  ARM_COMPUTE_ERROR_ON((gradient_size != 3) && (gradient_size != 5) && (gradient_size != 7));
72  ARM_COMPUTE_ERROR_ON((lower_thr < 0) || (lower_thr >= upper_thr));
73 
74  _output = output;
75 
76  const TensorShape &shape = input->info()->tensor_shape();
77  TensorInfo gradient_info;
78  TensorInfo magnitude_info;
79 
80  // Initialize images
81  if(gradient_size < 7)
82  {
83  gradient_info.init(shape, Format::S16);
84  magnitude_info.init(shape, Format::U16);
85  }
86  else
87  {
88  gradient_info.init(shape, Format::S32);
89  magnitude_info.init(shape, Format::U32);
90  }
91 
92  _gx.allocator()->init(gradient_info);
93  _gy.allocator()->init(gradient_info);
94  _magnitude.allocator()->init(magnitude_info);
95 
96  TensorInfo info(shape, Format::U8);
97  _phase.allocator()->init(info);
98  _nonmax.allocator()->init(info);
99 
100  // Manage intermediate buffers
101  _memory_group.manage(&_gx);
102  _memory_group.manage(&_gy);
103 
104  // Configure/Init sobelNxN
105  if(gradient_size == 3)
106  {
107  auto k = std::make_unique<NESobel3x3>();
108  k->configure(input, &_gx, &_gy, border_mode, constant_border_value);
109  _sobel = std::move(k);
110  }
111  else if(gradient_size == 5)
112  {
113  auto k = std::make_unique<NESobel5x5>();
114  k->configure(input, &_gx, &_gy, border_mode, constant_border_value);
115  _sobel = std::move(k);
116  }
117  else if(gradient_size == 7)
118  {
119  auto k = std::make_unique<NESobel7x7>();
120  k->configure(input, &_gx, &_gy, border_mode, constant_border_value);
121  _sobel = std::move(k);
122  }
123  else
124  {
125  ARM_COMPUTE_ERROR_VAR("Gradient size %+" PRId32 " not supported\n", gradient_size);
126  }
127 
128  // Manage intermediate buffers
129  _memory_group.manage(&_magnitude);
130  _memory_group.manage(&_phase);
131 
132  // Configure gradient
133  auto k = std::make_unique<NEGradientKernel>();
134  k->configure(&_gx, &_gy, &_magnitude, &_phase, norm_type);
135  _gradient = std::move(k);
136 
137  // Allocate intermediate tensors
138  _gx.allocator()->allocate();
139  _gy.allocator()->allocate();
140 
141  // Manage intermediate buffers
142  _memory_group.manage(&_nonmax);
143 
144  // Configure non-maxima suppression
145  _non_max_suppr = std::make_unique<NEEdgeNonMaxSuppressionKernel>();
146  _non_max_suppr->configure(&_magnitude, &_phase, &_nonmax, upper_thr, lower_thr, border_mode == BorderMode::UNDEFINED);
147 
148  // Fill border around magnitude image as non-maxima suppression will access
149  // it. If border mode is undefined filling the border is a nop.
150  _border_mag_gradient = std::make_unique<NEFillBorderKernel>();
151  _border_mag_gradient->configure(&_magnitude, _non_max_suppr->border_size(), border_mode, constant_border_value);
152 
153  // Allocate intermediate tensors
154  _phase.allocator()->allocate();
155  _magnitude.allocator()->allocate();
156 
157  // Configure edge tracing
158  _edge_trace = std::make_unique<NEEdgeTraceKernel>();
159  _edge_trace->configure(&_nonmax, output);
160 
161  // Fill border with "No edge" to stop recursion in edge trace
162  _border_edge_trace = std::make_unique<NEFillBorderKernel>();
163  _border_edge_trace->configure(&_nonmax, _edge_trace->border_size(), BorderMode::CONSTANT, static_cast<float>(0.f));
164 
165  // Allocate intermediate tensors
166  _nonmax.allocator()->allocate();
167 }
168 
170 {
171  ARM_COMPUTE_ERROR_ON_MSG(_sobel == nullptr, "Unconfigured function");
172 
173  MemoryGroupResourceScope scope_mg(_memory_group);
174 
175  // Run sobelNxN
176  _sobel->run();
177 
178  // Run gradient
179  NEScheduler::get().schedule(_gradient.get(), Window::DimY);
180 
181  // Fill border before non-maxima suppression. Nop for border mode undefined.
182  NEScheduler::get().schedule(_border_mag_gradient.get(), Window::DimZ);
183 
184  // Run non-maxima suppression
185  NEScheduler::get().schedule(_non_max_suppr.get(), Window::DimY);
186 
187  ARM_COMPUTE_ERROR_ON(_output->buffer() == nullptr);
188  std::fill_n(_output->buffer(), _output->info()->total_size(), 0);
189 
190  // Fill border before edge trace
191  NEScheduler::get().schedule(_border_edge_trace.get(), Window::DimZ);
192 
193  // Run edge tracing
194  NEScheduler::get().schedule(_edge_trace.get(), Window::DimY);
195 }
196 } // namespace arm_compute
BorderMode
Methods available to handle borders.
Definition: Types.h:265
Shape of a tensor.
Definition: TensorShape.h:39
void init(const TensorAllocator &allocator, const Coordinates &coords, TensorInfo &sub_info)
Shares the same backing memory with another tensor allocator, while the tensor info might be differen...
NECannyEdge(std::shared_ptr< IMemoryManager > memory_manager=nullptr)
Constructor.
Definition: NECannyEdge.cpp:48
1 channel, 1 U8 per channel
#define ARM_COMPUTE_ERROR_VAR(msg,...)
Print the given message then throw an std::runtime_error.
Definition: Error.h:346
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
1 channel, 1 U16 per channel
Interface for Neon tensor.
Definition: ITensor.h:36
void run() override
Run the kernels contained in the function.
Copyright (c) 2017-2021 Arm Limited.
TensorAllocator * allocator()
Return a pointer to the tensor&#39;s allocator.
Definition: Tensor.cpp:48
void configure(ITensor *input, ITensor *output, int32_t upper_thr, int32_t lower_thr, int32_t gradient_size, int32_t norm_type, BorderMode border_mode, uint8_t constant_border_value=0)
Initialise the function&#39;s source, destination, thresholds, gradient size, normalization type and bord...
Definition: NECannyEdge.cpp:65
1 channel, 1 S32 per channel
void manage(IMemoryManageable *obj) override
Sets a object to be managed by the given memory group.
Definition: MemoryGroup.h:79
~NECannyEdge()
Default destructor.
1 channel, 1 U32 per channel
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
#define ARM_COMPUTE_ERROR_ON_MSG(cond, msg)
Definition: Error.h:456
virtual uint8_t * buffer() const =0
Interface to be implemented by the child class to return a pointer to CPU memory. ...
void allocate() override
Allocate size specified by TensorInfo of CPU memory.
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor&#39;s metadata.
1 channel, 1 S16 per channel
#define ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Definition: Validate.h:790
void init(Format format)
Initialize the tensor info with just a format.
Definition: TensorInfo.cpp:109
static constexpr size_t DimY
Alias for dimension 1 also known as Y dimension.
Definition: Window.h:45
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
Memory group resources scope handling class.
Definition: IMemoryGroup.h:82
virtual size_t total_size() const =0
Returns the total size of the tensor in bytes.
virtual void schedule(ICPPKernel *kernel, const Hints &hints)=0
Runs the kernel in the same thread as the caller synchronously.
static constexpr size_t DimZ
Alias for dimension 2 also known as Z dimension.
Definition: Window.h:47
Borders are left undefined.
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Definition: Validate.h:161
Store the tensor&#39;s metadata.
Definition: TensorInfo.h:45
static IScheduler & get()
Access the scheduler singleton.
Definition: Scheduler.cpp:94