Compute Library
 20.08
NECannyEdge.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
26 #include "arm_compute/core/Error.h"
37 #include "support/MemorySupport.h"
38 
39 #include <cstring>
40 #include <inttypes.h>
41 #include <utility>
42 
43 using namespace arm_compute;
44 
45 NECannyEdge::NECannyEdge(std::shared_ptr<IMemoryManager> memory_manager) // NOLINT
46  : _memory_group(std::move(memory_manager)),
47  _sobel(),
48  _gradient(),
49  _non_max_suppr(),
50  _edge_trace(),
51  _border_mag_gradient(),
52  _border_edge_trace(),
53  _gx(),
54  _gy(),
55  _magnitude(),
56  _phase(),
57  _nonmax(),
58  _output(nullptr)
59 {
60 }
61 
62 void NECannyEdge::configure(ITensor *input, ITensor *output, int32_t upper_thr, int32_t lower_thr, int32_t gradient_size, int32_t norm_type, BorderMode border_mode, uint8_t constant_border_value)
63 {
67  ARM_COMPUTE_ERROR_ON((1 != norm_type) && (2 != norm_type));
68  ARM_COMPUTE_ERROR_ON((gradient_size != 3) && (gradient_size != 5) && (gradient_size != 7));
69  ARM_COMPUTE_ERROR_ON((lower_thr < 0) || (lower_thr >= upper_thr));
70 
71  _output = output;
72 
73  const TensorShape &shape = input->info()->tensor_shape();
74  TensorInfo gradient_info;
75  TensorInfo magnitude_info;
76 
77  // Initialize images
78  if(gradient_size < 7)
79  {
80  gradient_info.init(shape, Format::S16);
81  magnitude_info.init(shape, Format::U16);
82  }
83  else
84  {
85  gradient_info.init(shape, Format::S32);
86  magnitude_info.init(shape, Format::U32);
87  }
88 
89  _gx.allocator()->init(gradient_info);
90  _gy.allocator()->init(gradient_info);
91  _magnitude.allocator()->init(magnitude_info);
92 
94  _phase.allocator()->init(info);
95  _nonmax.allocator()->init(info);
96 
97  // Manage intermediate buffers
98  _memory_group.manage(&_gx);
99  _memory_group.manage(&_gy);
100 
101  // Configure/Init sobelNxN
102  if(gradient_size == 3)
103  {
104  auto k = arm_compute::support::cpp14::make_unique<NESobel3x3>();
105  k->configure(input, &_gx, &_gy, border_mode, constant_border_value);
106  _sobel = std::move(k);
107  }
108  else if(gradient_size == 5)
109  {
110  auto k = arm_compute::support::cpp14::make_unique<NESobel5x5>();
111  k->configure(input, &_gx, &_gy, border_mode, constant_border_value);
112  _sobel = std::move(k);
113  }
114  else if(gradient_size == 7)
115  {
116  auto k = arm_compute::support::cpp14::make_unique<NESobel7x7>();
117  k->configure(input, &_gx, &_gy, border_mode, constant_border_value);
118  _sobel = std::move(k);
119  }
120  else
121  {
122  ARM_COMPUTE_ERROR_VAR("Gradient size %+" PRId32 " not supported\n", gradient_size);
123  }
124 
125  // Manage intermediate buffers
126  _memory_group.manage(&_magnitude);
127  _memory_group.manage(&_phase);
128 
129  // Configure gradient
130  auto k = arm_compute::support::cpp14::make_unique<NEGradientKernel>();
131  k->configure(&_gx, &_gy, &_magnitude, &_phase, norm_type);
132  _gradient = std::move(k);
133 
134  // Allocate intermediate tensors
135  _gx.allocator()->allocate();
136  _gy.allocator()->allocate();
137 
138  // Manage intermediate buffers
139  _memory_group.manage(&_nonmax);
140 
141  // Configure non-maxima suppression
142  _non_max_suppr.configure(&_magnitude, &_phase, &_nonmax, upper_thr, lower_thr, border_mode == BorderMode::UNDEFINED);
143 
144  // Fill border around magnitude image as non-maxima suppression will access
145  // it. If border mode is undefined filling the border is a nop.
146  _border_mag_gradient.configure(&_magnitude, _non_max_suppr.border_size(), border_mode, constant_border_value);
147 
148  // Allocate intermediate tensors
149  _phase.allocator()->allocate();
150  _magnitude.allocator()->allocate();
151 
152  // Configure edge tracing
153  _edge_trace.configure(&_nonmax, output);
154 
155  // Fill border with "No edge" to stop recursion in edge trace
156  _border_edge_trace.configure(&_nonmax, _edge_trace.border_size(), BorderMode::CONSTANT, static_cast<float>(0.f));
157 
158  // Allocate intermediate tensors
159  _nonmax.allocator()->allocate();
160 }
161 
163 {
164  ARM_COMPUTE_ERROR_ON_MSG(_sobel == nullptr, "Unconfigured function");
165 
166  MemoryGroupResourceScope scope_mg(_memory_group);
167 
168  // Run sobelNxN
169  _sobel->run();
170 
171  // Run gradient
172  NEScheduler::get().schedule(_gradient.get(), Window::DimY);
173 
174  // Fill border before non-maxima suppression. Nop for border mode undefined.
175  NEScheduler::get().schedule(&_border_mag_gradient, Window::DimZ);
176 
177  // Run non-maxima suppression
178  NEScheduler::get().schedule(&_non_max_suppr, Window::DimY);
179 
180  ARM_COMPUTE_ERROR_ON(_output->buffer() == nullptr);
181  std::fill_n(_output->buffer(), _output->info()->total_size(), 0);
182 
183  // Fill border before edge trace
184  NEScheduler::get().schedule(&_border_edge_trace, Window::DimZ);
185 
186  // Run edge tracing
187  NEScheduler::get().schedule(&_edge_trace, Window::DimY);
188 }
BorderMode
Methods available to handle borders.
Definition: Types.h:264
Shape of a tensor.
Definition: TensorShape.h:39
void init(const TensorAllocator &allocator, const Coordinates &coords, TensorInfo &sub_info)
Shares the same backing memory with another tensor allocator, while the tensor info might be differen...
NECannyEdge(std::shared_ptr< IMemoryManager > memory_manager=nullptr)
Constructor.
Definition: NECannyEdge.cpp:45
1 channel, 1 U8 per channel
#define ARM_COMPUTE_ERROR_VAR(msg,...)
Print the given message then throw an std::runtime_error.
Definition: Error.h:346
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
1 channel, 1 U16 per channel
void configure(ITensor *tensor, BorderSize border_size, BorderMode border_mode, const PixelValue &constant_border_value=PixelValue())
Initialise the function.
Interface for NEON tensor.
Definition: ITensor.h:36
void run() override
Run the kernels contained in the function.
Copyright (c) 2017-2020 Arm Limited.
BorderSize border_size() const override
The size of the border for that kernel.
TensorAllocator * allocator()
Return a pointer to the tensor's allocator.
Definition: Tensor.cpp:48
void configure(ITensor *input, ITensor *output, int32_t upper_thr, int32_t lower_thr, int32_t gradient_size, int32_t norm_type, BorderMode border_mode, uint8_t constant_border_value=0)
Initialise the function's source, destination, thresholds, gradient size, normalization type and bord...
Definition: NECannyEdge.cpp:62
1 channel, 1 S32 per channel
void manage(IMemoryManageable *obj) override
Sets a object to be managed by the given memory group.
Definition: MemoryGroup.h:79
1 channel, 1 U32 per channel
BorderSize border_size() const override
The size of the border for that kernel.
#define ARM_COMPUTE_ERROR_ON_MSG(cond, msg)
Definition: Error.h:456
virtual uint8_t * buffer() const =0
Interface to be implemented by the child class to return a pointer to CPU memory.
void allocate() override
Allocate size specified by TensorInfo of CPU memory.
void configure(const ITensor *magnitude, const ITensor *phase, ITensor *output, int32_t upper_thr, int32_t lower_thr, bool border_undefined)
Initialise the kernel's sources, destination and border mode.
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor's metadata.
1 channel, 1 S16 per channel
#define ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Definition: Validate.h:790
void init(Format format)
Initialize the tensor info with just a format.
Definition: TensorInfo.cpp:107
static constexpr size_t DimY
Alias for dimension 1 also known as Y dimension.
Definition: Window.h:45
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Definition: Validate.h:161
Memory group resources scope handling class.
Definition: IMemoryGroup.h:82
virtual size_t total_size() const =0
Returns the total size of the tensor in bytes.
virtual void schedule(ICPPKernel *kernel, const Hints &hints)=0
Runs the kernel in the same thread as the caller synchronously.
static constexpr size_t DimZ
Alias for dimension 2 also known as Z dimension.
Definition: Window.h:47
void configure(ITensor *input, ITensor *output)
Initialise the kernel's source, destination and border mode.
Borders are left undefined.
Store the tensor's metadata.
Definition: TensorInfo.h:45
static IScheduler & get()
Access the scheduler singleton.
Definition: Scheduler.cpp:95