Compute Library
 21.02
NEL2NormalizeLayer.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
30 
31 namespace arm_compute
32 {
33 namespace
34 {
35 constexpr int max_input_tensor_dim = 3;
36 } // namespace
38 
39 NEL2NormalizeLayer::NEL2NormalizeLayer(std::shared_ptr<IMemoryManager> memory_manager)
40  : _memory_group(std::move(memory_manager)), _reduce_func(), _normalize_kernel(), _sumsq()
41 {
42 }
43 
44 void NEL2NormalizeLayer::configure(ITensor *input, ITensor *output, int axis, float epsilon)
45 {
46  // Manage intermediate buffers
47  _memory_group.manage(&_sumsq);
48 
49  // Configure Kernels
50  const uint32_t actual_axis = wrap_around(axis, max_input_tensor_dim);
51  _reduce_func.configure(input, &_sumsq, actual_axis, ReductionOperation::SUM_SQUARE);
52  _normalize_kernel = std::make_unique<NEL2NormalizeLayerKernel>();
53  _normalize_kernel->configure(input, &_sumsq, output, axis, epsilon);
54 
55  // Allocate intermediate tensors
56  _sumsq.allocator()->allocate();
57 }
58 
60 {
61  TensorShape shape(input->tensor_shape());
62 
63  // Create intermediate tensor info
64  TensorInfo sum_sq;
65  sum_sq.set_data_type(input->data_type());
66  sum_sq.set_tensor_shape(shape);
67 
68  const uint32_t actual_axis = wrap_around(axis, max_input_tensor_dim);
70 
71  // Reduce shape on axis
72  shape.set(actual_axis, 1);
73  sum_sq.set_tensor_shape(shape);
74 
75  ARM_COMPUTE_RETURN_ON_ERROR(NEL2NormalizeLayerKernel::validate(input, &sum_sq, output, axis, epsilon));
76 
77  return Status{};
78 }
79 
81 {
82  MemoryGroupResourceScope scope_mg(_memory_group);
83 
84  _reduce_func.run();
85  NEScheduler::get().schedule(_normalize_kernel.get(), Window::DimY);
86 }
87 } // namespace arm_compute
Shape of a tensor.
Definition: TensorShape.h:39
void run() override
Run the kernels contained in the function.
virtual ITensorInfo & set_tensor_shape(const TensorShape &shape)=0
Set the shape of an already initialized tensor.
void run() override
Run the kernels contained in the function.
~NEL2NormalizeLayer()
Default destructor.
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
Definition: Error.h:204
virtual DataType data_type() const =0
Data type used for each element of the tensor.
ITensorInfo & set_data_type(DataType data_type) override
Set the data type to the specified value.
Definition: TensorInfo.cpp:321
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
Status class.
Definition: Error.h:52
Interface for Neon tensor.
Definition: ITensor.h:36
Copyright (c) 2017-2021 Arm Limited.
TensorAllocator * allocator()
Return a pointer to the tensor&#39;s allocator.
Definition: Tensor.cpp:48
void manage(IMemoryManageable *obj) override
Sets a object to be managed by the given memory group.
Definition: MemoryGroup.h:79
T wrap_around(T x, T m)
Wrap-around a number within the range 0 <= x < m.
Definition: Helpers.h:231
static Status validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, ReductionOperation op, bool keep_dims=true)
Static function to check if given info will lead to a valid configuration of NEReductionOperation.
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
void configure(ITensor *input, ITensor *output, int axis, float epsilon=1e-6f)
Set the input and output tensors.
void allocate() override
Allocate size specified by TensorInfo of CPU memory.
NEL2NormalizeLayer(std::shared_ptr< IMemoryManager > memory_manager=nullptr)
Constructor.
static constexpr size_t DimY
Alias for dimension 1 also known as Y dimension.
Definition: Window.h:45
static Status validate(const ITensorInfo *input, const ITensorInfo *sum, const ITensorInfo *output, int axis, float epsilon)
Static function to check if given info will lead to a valid configuration of NEL2NormalizeLayerKernel...
Memory group resources scope handling class.
Definition: IMemoryGroup.h:82
virtual void schedule(ICPPKernel *kernel, const Hints &hints)=0
Runs the kernel in the same thread as the caller synchronously.
static Status validate(const ITensorInfo *input, const ITensorInfo *output, int axis, float epsilon=1e-6f)
Static function to check if given info will lead to a valid configuration of NEL2NormalizeLayer.
Store the tensor&#39;s metadata.
Definition: TensorInfo.h:45
void configure(ITensor *input, ITensor *output, unsigned int axis, ReductionOperation op, bool keep_dims=true)
Set the input and output tensors.
static IScheduler & get()
Access the scheduler singleton.
Definition: Scheduler.cpp:94