Compute Library
 21.11
TensorAllocator.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
31 
32 #include "tests/Globals.h"
33 #include "tests/Utils.h"
35 #include "tests/framework/Macros.h"
38 
39 #include <memory>
40 #include <random>
41 
42 namespace arm_compute
43 {
44 namespace test
45 {
46 namespace validation
47 {
48 TEST_SUITE(NEON)
49 TEST_SUITE(UNIT)
50 TEST_SUITE(TensorAllocator)
51 
52 TEST_CASE(ImportMemory, framework::DatasetMode::ALL)
53 {
54  // Init tensor info
56 
57  // Allocate memory buffer
58  const size_t total_size = info.total_size();
59  auto data = std::make_unique<uint8_t[]>(total_size);
60 
61  // Negative case : Import nullptr
62  Tensor t1;
63  t1.allocator()->init(info);
64  ARM_COMPUTE_ASSERT(!bool(t1.allocator()->import_memory(nullptr)));
65  ARM_COMPUTE_ASSERT(t1.info()->is_resizable());
66 
67  // Negative case : Import misaligned pointer
68  Tensor t2;
69  const size_t required_alignment = 339;
70  t2.allocator()->init(info, required_alignment);
71  ARM_COMPUTE_ASSERT(!bool(t2.allocator()->import_memory(data.get())));
72  ARM_COMPUTE_ASSERT(t2.info()->is_resizable());
73 
74  // Negative case : Import memory to a tensor that is memory managed
75  Tensor t3;
76  MemoryGroup mg;
77  t3.allocator()->set_associated_memory_group(&mg);
78  ARM_COMPUTE_ASSERT(!bool(t3.allocator()->import_memory(data.get())));
79  ARM_COMPUTE_ASSERT(t3.info()->is_resizable());
80 
81  // Positive case : Set raw pointer
82  Tensor t4;
83  t4.allocator()->init(info);
84  ARM_COMPUTE_ASSERT(bool(t4.allocator()->import_memory(data.get())));
85  ARM_COMPUTE_ASSERT(!t4.info()->is_resizable());
86  ARM_COMPUTE_ASSERT(t4.buffer() == reinterpret_cast<uint8_t *>(data.get()));
87  t4.allocator()->free();
88  ARM_COMPUTE_ASSERT(t4.info()->is_resizable());
89  ARM_COMPUTE_ASSERT(t4.buffer() == nullptr);
90 }
91 
92 TEST_CASE(ImportMemoryMalloc, framework::DatasetMode::ALL)
93 {
95  const TensorShape shape = TensorShape(24U, 16U, 3U);
97 
98  // Create tensor
99  const TensorInfo info(shape, 1, data_type);
100  const size_t required_alignment = 64;
101  Tensor tensor;
102  tensor.allocator()->init(info, required_alignment);
103 
104  // Create and configure activation function
105  NEActivationLayer act_func;
106  act_func.configure(&tensor, nullptr, act_info);
107 
108  // Allocate and import tensor
109  const size_t total_size_in_elems = tensor.info()->tensor_shape().total_size();
110  const size_t total_size_in_bytes = tensor.info()->total_size();
111  size_t space = total_size_in_bytes + required_alignment;
112  auto raw_data = std::make_unique<uint8_t[]>(space);
113 
114  void *aligned_ptr = raw_data.get();
115  std::align(required_alignment, total_size_in_bytes, aligned_ptr, space);
116 
117  ARM_COMPUTE_ASSERT(bool(tensor.allocator()->import_memory(aligned_ptr)));
118  ARM_COMPUTE_ASSERT(!tensor.info()->is_resizable());
119 
120  // Fill tensor
121  std::uniform_real_distribution<float> distribution(-5.f, 5.f);
122  std::mt19937 gen(library->seed());
123  auto *typed_ptr = reinterpret_cast<float *>(aligned_ptr);
124  for(unsigned int i = 0; i < total_size_in_elems; ++i)
125  {
126  typed_ptr[i] = distribution(gen);
127  }
128 
129  // Execute function and sync
130  act_func.run();
131 
132  // Validate result by checking that the input has no negative values
133  for(unsigned int i = 0; i < total_size_in_elems; ++i)
134  {
136  }
137 
138  // Release resources
139  tensor.allocator()->free();
140  ARM_COMPUTE_ASSERT(tensor.info()->is_resizable());
141 }
142 
143 TEST_CASE(ImportMemoryMallocPadded, framework::DatasetMode::ALL)
144 {
145  // Create tensor
146  Tensor tensor;
147  tensor.allocator()->init(TensorInfo(TensorShape(24U, 16U, 3U), 1, DataType::F32));
148 
149  // Enforce tensor padding and validate that meta-data were updated
150  // Note: Padding might be updated after the function configuration in case of increased padding requirements
151  const PaddingSize enforced_padding(3U, 5U, 2U, 4U);
152  tensor.info()->extend_padding(enforced_padding);
153  validate(tensor.info()->padding(), enforced_padding);
154 
155  // Create and configure activation function
156  NEActivationLayer act_func;
158 
159  // Allocate and import tensor
160  const size_t total_size_in_bytes = tensor.info()->total_size();
161  auto raw_data = std::make_unique<uint8_t[]>(total_size_in_bytes);
162 
163  ARM_COMPUTE_ASSERT(bool(tensor.allocator()->import_memory(raw_data.get())));
164  ARM_COMPUTE_ASSERT(!tensor.info()->is_resizable());
165 
166  // Fill tensor while accounting padding
167  std::uniform_real_distribution<float> distribution(-5.f, 5.f);
168  std::mt19937 gen(library->seed());
169 
170  Window tensor_window;
171  tensor_window.use_tensor_dimensions(tensor.info()->tensor_shape());
172  Iterator tensor_it(&tensor, tensor_window);
173 
174  execute_window_loop(tensor_window, [&](const Coordinates &)
175  {
176  *reinterpret_cast<float *>(tensor_it.ptr()) = distribution(gen);
177  },
178  tensor_it);
179 
180  // Execute function and sync
181  act_func.run();
182 
183  // Validate result by checking that the input has no negative values
184  execute_window_loop(tensor_window, [&](const Coordinates &)
185  {
186  const float val = *reinterpret_cast<float *>(tensor_it.ptr());
188  },
189  tensor_it);
190 
191  // Release resources
192  tensor.allocator()->free();
193  ARM_COMPUTE_ASSERT(tensor.info()->is_resizable());
194 }
195 
196 #if !defined(BARE_METAL)
197 TEST_CASE(ImportMemoryMappedFile, framework::DatasetMode::ALL)
198 {
200  const TensorShape shape = TensorShape(24U, 16U, 3U);
202 
203  // Create tensor
204  const TensorInfo info(shape, 1, data_type);
205  Tensor tensor;
206  tensor.allocator()->init(info);
207 
208  // Create and configure activation function
209  NEActivationLayer act_func;
210  act_func.configure(&tensor, nullptr, act_info);
211 
212  // Get number of elements
213  const size_t total_size_in_elems = tensor.info()->tensor_shape().total_size();
214  const size_t total_size_in_bytes = tensor.info()->total_size();
215 
216  // Create file
217  std::ofstream output_file("test_mmap_import.bin", std::ios::binary | std::ios::out);
218  output_file.seekp(total_size_in_bytes - 1);
219  output_file.write("", 1);
220  output_file.close();
221 
222  // Map file
223  utils::mmap_io::MMappedFile mmapped_file("test_mmap_import.bin", 0 /** Whole file */, 0);
224  ARM_COMPUTE_ASSERT(mmapped_file.is_mapped());
225  unsigned char *data = mmapped_file.data();
226 
227  // Import memory mapped memory
228  ARM_COMPUTE_ASSERT(bool(tensor.allocator()->import_memory(data)));
229  ARM_COMPUTE_ASSERT(!tensor.info()->is_resizable());
230 
231  // Fill tensor
232  std::uniform_real_distribution<float> distribution(-5.f, 5.f);
233  std::mt19937 gen(library->seed());
234  auto *typed_ptr = reinterpret_cast<float *>(data);
235  for(unsigned int i = 0; i < total_size_in_elems; ++i)
236  {
237  typed_ptr[i] = distribution(gen);
238  }
239 
240  // Execute function and sync
241  act_func.run();
242 
243  // Validate result by checking that the input has no negative values
244  for(unsigned int i = 0; i < total_size_in_elems; ++i)
245  {
247  }
248 
249  // Release resources
250  tensor.allocator()->free();
251  ARM_COMPUTE_ASSERT(tensor.info()->is_resizable());
252 }
253 #endif // !defined(BARE_METAL)
254 
256 {
257  // Init tensor info
259  const size_t requested_alignment = 1024;
260 
261  Tensor t;
262  t.allocator()->init(info, requested_alignment);
263  t.allocator()->allocate();
264 
265  ARM_COMPUTE_ASSERT(t.buffer() != nullptr);
266  ARM_COMPUTE_EXPECT(t.allocator()->alignment() == requested_alignment, framework::LogLevel::ERRORS);
267  ARM_COMPUTE_EXPECT(arm_compute::utility::check_aligned(reinterpret_cast<void *>(t.buffer()), requested_alignment),
269 }
270 
274 } // namespace validation
275 } // namespace test
276 } // namespace arm_compute
#define ARM_COMPUTE_ASSERT(cond)
Definition: Validate.h:37
Shape of a tensor.
Definition: TensorShape.h:39
void init(const TensorAllocator &allocator, const Coordinates &coords, TensorInfo &sub_info)
Shares the same backing memory with another tensor allocator, while the tensor info might be differen...
Container for 2D border size.
Definition: Types.h:269
1 channel, 1 F32 per channel
ARM_COMPUTE_EXPECT(has_error==expected, framework::LogLevel::ERRORS)
Memory mapped file class.
Definition: MMappedFile.h:39
Activation Layer Information class.
Definition: Types.h:1509
void use_tensor_dimensions(const TensorShape &shape, size_t first_dimension=Window::DimX)
Use the tensor&#39;s dimensions to fill the window dimensions.
Definition: Window.inl:276
Copyright (c) 2017-2021 Arm Limited.
TensorAllocator * allocator()
Return a pointer to the tensor&#39;s allocator.
Definition: Tensor.cpp:48
ITensorInfo * info() const override
Interface to be implemented by the child class to return the tensor&#39;s metadata.
Definition: Tensor.cpp:33
virtual bool is_resizable() const =0
Flag indicating whether the size of the tensor can be changed.
const DataType data_type
Definition: Im2Col.cpp:150
DatasetMode
Possible dataset modes.
Definition: DatasetModes.h:40
std::unique_ptr< AssetsLibrary > library
Definition: main.cpp:76
void run() override
Run the kernels contained in the function.
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
TEST_SUITE_END() FIXTURE_DATA_TEST_CASE(RunSmall
[CLActivationLayer Test snippet]
size_t total_size() const override
Returns the total size of the tensor in bytes.
Definition: TensorInfo.h:250
Coordinates of an item.
Definition: Coordinates.h:37
void allocate() override
Allocate size specified by TensorInfo of CPU memory.
size_t total_size() const
Collapses all dimensions to a single linear total size.
Definition: TensorShape.h:172
size_t alignment() const
Return underlying&#39;s tensor buffer alignment.
bool is_mapped() const
Checks if file mapped.
constexpr uint8_t * ptr() const
Return a pointer to the current pixel.
Definition: Helpers.inl:139
Basic implementation of the tensor interface.
Definition: Tensor.h:37
TEST_SUITE(U8_to_S8) FIXTURE_DATA_TEST_CASE(RunSmall
validate(CLAccessor(output_state), expected_output)
virtual PaddingSize padding() const =0
Padding of tensor.
std::uniform_real_distribution< float > distribution(-5.f, 5.f)
void free() override
Free allocated CPU memory.
unsigned char * data()
Mapped data accessor.
Basic function to run cpu::kernels::CpuActivationKernel.
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
virtual size_t total_size() const =0
Returns the total size of the tensor in bytes.
void configure(ITensor *input, ITensor *output, ActivationLayerInfo activation_info)
[NEActivationLayer snippet]
uint8_t * buffer() const override
Interface to be implemented by the child class to return a pointer to CPU memory. ...
Definition: Tensor.cpp:43
Status import_memory(void *memory)
Import an existing memory as a tensor&#39;s backing memory.
Store the tensor&#39;s metadata.
Definition: TensorInfo.h:43
void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators)
Iterate through the passed window, automatically adjusting the iterators and calling the lambda_funct...
Definition: Helpers.inl:77
TEST_CASE(FusedActivation, framework::DatasetMode::ALL)
Validate fused activation expecting the following behaviours:
Iterator updated by execute_window_loop for each window element.
Definition: Helpers.h:46
DataType
Available data types.
Definition: Types.h:79
virtual bool extend_padding(const PaddingSize &padding)=0
Update the offset to the first element, the strides and the total size.
Describe a multidimensional execution window.
Definition: Window.h:39
bool check_aligned(void *ptr, const size_t alignment)
Checks if a pointer complies with a given alignment.
Definition: Utility.h:194