Compute Library
 23.08
RuntimeContext.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2019-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
29 #include "tests/Globals.h"
30 #include "tests/NEON/Accessor.h"
31 #include "tests/Utils.h"
33 #include "tests/framework/Macros.h"
36 
37 #include <memory>
38 #include <random>
39 #if !defined(BARE_METAL)
40 #include <thread>
41 #endif // !defined(BARE_METAL)
42 
43 namespace arm_compute
44 {
45 namespace test
46 {
47 namespace validation
48 {
49 TEST_SUITE(NEON)
50 TEST_SUITE(UNIT)
51 TEST_SUITE(RuntimeContext)
52 
53 TEST_CASE(Scheduler, framework::DatasetMode::ALL)
54 {
55  using namespace arm_compute;
56  // Create a runtime context object
57  RuntimeContext ctx;
58 
59  // Check if it's been initialised properly
60  ARM_COMPUTE_ASSERT(ctx.scheduler() != nullptr);
61  ARM_COMPUTE_ASSERT(ctx.asset_manager() == nullptr);
62 
63  // Create a Scheduler
64  auto scheduler = SchedulerFactory::create();
65  ctx.set_scheduler(scheduler.get());
66  // Check if the scheduler has been properly setup
67  ARM_COMPUTE_ASSERT(ctx.scheduler() != nullptr);
68 
69  // Create a new activation function
70  NEActivationLayer act_layer(&ctx);
71 
72  Tensor src = create_tensor<Tensor>(TensorShape(32, 32), DataType::F32, 1);
73  Tensor dst = create_tensor<Tensor>(TensorShape(32, 32), DataType::F32, 1);
74 
75  act_layer.configure(&src, &dst, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LINEAR));
76 
77  ARM_COMPUTE_ASSERT(src.info()->is_resizable());
78  ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
79 
80  // Allocate tensors
81  src.allocator()->allocate();
82  dst.allocator()->allocate();
83 
84  ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
85 
86  float min_bound = 0;
87  float max_bound = 0;
88  std::tie(min_bound, max_bound) = get_activation_layer_test_bounds<float>(ActivationLayerInfo::ActivationFunction::LINEAR, DataType::F32);
89  std::uniform_real_distribution<> distribution(min_bound, max_bound);
90  library->fill(Accessor(src), distribution, 0);
91 
92  // Compute function
93  act_layer.run();
94 }
95 
96 #if !defined(BARE_METAL)
97 // This test tries scheduling work concurrently from two independent threads
98 TEST_CASE(MultipleThreadedScheduller, framework::DatasetMode::ALL)
99 {
100  // Create a runtime context object for thread 1
101  RuntimeContext ctx1;
102 
103  // Create a runtime context object for thread 2
104  RuntimeContext ctx2;
105 
106  // Create a new activation function
107  NEActivationLayer act_layer_thread0(&ctx1);
108  NEActivationLayer act_layer_thread1(&ctx2);
109 
110  const TensorShape tensor_shape(128, 128);
111  Tensor src_t0 = create_tensor<Tensor>(tensor_shape, DataType::F32, 1);
112  Tensor dst_t0 = create_tensor<Tensor>(tensor_shape, DataType::F32, 1);
113  Tensor src_t1 = create_tensor<Tensor>(tensor_shape, DataType::F32, 1);
114  Tensor dst_t1 = create_tensor<Tensor>(tensor_shape, DataType::F32, 1);
115  ActivationLayerInfo activation_info(ActivationLayerInfo::ActivationFunction::LINEAR);
116 
117  act_layer_thread0.configure(&src_t0, &dst_t0, activation_info);
118  act_layer_thread1.configure(&src_t1, &dst_t1, activation_info);
119 
120  ARM_COMPUTE_ASSERT(src_t0.info()->is_resizable());
121  ARM_COMPUTE_ASSERT(dst_t0.info()->is_resizable());
122  ARM_COMPUTE_ASSERT(src_t1.info()->is_resizable());
123  ARM_COMPUTE_ASSERT(dst_t1.info()->is_resizable());
124 
125  // Allocate tensors
126  src_t0.allocator()->allocate();
127  dst_t0.allocator()->allocate();
128  src_t1.allocator()->allocate();
129  dst_t1.allocator()->allocate();
130 
131  ARM_COMPUTE_ASSERT(!src_t0.info()->is_resizable());
132  ARM_COMPUTE_ASSERT(!src_t1.info()->is_resizable());
133 
134  float min_bound = 0;
135  float max_bound = 0;
136  std::tie(min_bound, max_bound) = get_activation_layer_test_bounds<float>(ActivationLayerInfo::ActivationFunction::LINEAR, DataType::F32);
137  std::uniform_real_distribution<> distribution(min_bound, max_bound);
138  library->fill(Accessor(src_t0), distribution, 0);
139  library->fill(Accessor(src_t1), distribution, 0);
140 
141  std::thread neon_thread1([&] { act_layer_thread0.run(); });
142  std::thread neon_thread2([&] { act_layer_thread1.run(); });
143 
144  neon_thread1.join();
145  neon_thread2.join();
146 
147  Window window;
148  window.use_tensor_dimensions(dst_t0.info()->tensor_shape());
149  Iterator t0_it(&dst_t0, window);
150  Iterator t1_it(&dst_t1, window);
151  execute_window_loop(window, [&](const Coordinates &)
152  {
153  const bool match = (*reinterpret_cast<float *>(t0_it.ptr()) == *reinterpret_cast<float *>(t1_it.ptr()));
155  },
156  t0_it, t1_it);
157 }
158 #endif // !defined(BARE_METAL)
159 
160 TEST_SUITE_END() // RuntimeContext
161 TEST_SUITE_END() // UNIT
162 TEST_SUITE_END() // Neon
163 } // namespace validation
164 } // namespace test
165 } // namespace arm_compute
arm_compute::test::validation::TEST_SUITE_END
TEST_SUITE_END() FIXTURE_DATA_TEST_CASE(RunSmall
[CLActivationLayer Test snippet]
Definition: DequantizationLayer.cpp:111
arm_compute::test::validation::TEST_CASE
TEST_CASE(FusedActivation, framework::DatasetMode::ALL)
Validate fused activation expecting the following behaviours:
Definition: ArithmeticAddition.cpp:93
arm_compute::test::validation::src
SimpleTensor< float > src
Definition: DFT.cpp:155
arm_compute::ITensorInfo::tensor_shape
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
arm_compute::RuntimeContext::asset_manager
IAssetManager * asset_manager() override
Asset manager accessor.
Definition: RuntimeContext.cpp:47
arm_compute::TensorShape
Shape of a tensor.
Definition: TensorShape.h:39
arm_compute::test::validation::dst
auto dst
Definition: DFT.cpp:170
RuntimeContext.h
NEActivationLayer.h
arm_compute::Window::use_tensor_dimensions
void use_tensor_dimensions(const TensorShape &shape, size_t first_dimension=Window::DimX)
Use the tensor's dimensions to fill the window dimensions.
Definition: Window.inl:276
arm_compute::test::Accessor
Accessor implementation for Tensor objects.
Definition: Accessor.h:35
arm_compute::ActivationLayerInfo
Activation Layer Information class.
Definition: ActivationLayerInfo.h:55
ActivationLayer.h
arm_compute::NEActivationLayer::configure
void configure(ITensor *input, ITensor *output, ActivationLayerInfo activation_info)
[NEActivationLayer snippet]
Definition: NEActivationLayer.cpp:48
arm_compute::test::framework::DatasetMode::ALL
@ ALL
arm_compute::Tensor::allocator
TensorAllocator * allocator()
Return a pointer to the tensor's allocator.
Definition: Tensor.cpp:48
arm_compute::test::validation::ARM_COMPUTE_EXPECT
ARM_COMPUTE_EXPECT(has_error==expected, framework::LogLevel::ERRORS)
arm_compute::Iterator
Iterator updated by execute_window_loop for each window element.
Definition: Helpers.h:46
Asserts.h
Accessor.h
arm_compute::NEActivationLayer
Basic function to run cpu::kernels::CpuActivationKernel.
Definition: NEActivationLayer.h:45
ARM_COMPUTE_ASSERT
#define ARM_COMPUTE_ASSERT(cond)
Definition: Validate.h:37
Macros.h
arm_compute::SchedulerFactory::create
static std::unique_ptr< IScheduler > create(Type type=_default_type)
Create a scheduler depending on the scheduler type.
Definition: SchedulerFactory.cpp:49
arm_compute::Coordinates
Coordinates of an item.
Definition: Coordinates.h:37
arm_compute::Tensor::info
ITensorInfo * info() const override
Interface to be implemented by the child class to return the tensor's metadata.
Definition: Tensor.cpp:33
arm_compute::RuntimeContext
Runtime context.
Definition: RuntimeContext.h:34
Tensor.h
Validation.h
arm_compute::test::library
std::unique_ptr< AssetsLibrary > library
Definition: main.cpp:77
arm_compute::Scheduler
Configurable scheduler which supports multiple multithreading APIs and choosing between different sch...
Definition: Scheduler.h:35
Globals.h
arm_compute::ITensorInfo::is_resizable
virtual bool is_resizable() const =0
Flag indicating whether the size of the tensor can be changed.
arm_compute::Window
Describe a multidimensional execution window.
Definition: Window.h:39
arm_compute::TensorAllocator::allocate
void allocate() override
Allocate size specified by TensorInfo of CPU memory.
Definition: TensorAllocator.cpp:132
arm_compute
Copyright (c) 2017-2023 Arm Limited.
Definition: introduction.dox:24
arm_compute::test::validation::TEST_SUITE
TEST_SUITE(QASYMM8_to_F32) FIXTURE_DATA_TEST_CASE(RunSmall
arm_compute::RuntimeContext::scheduler
IScheduler * scheduler() override
Scheduler accessor.
Definition: RuntimeContext.cpp:42
arm_compute::test::validation::distribution
std::uniform_real_distribution< float > distribution(-5.f, 5.f)
SchedulerFactory.h
Utils.h
arm_compute::DataType::F32
@ F32
32-bit floating-point number
arm_compute::Tensor
Basic implementation of the tensor interface.
Definition: Tensor.h:37
arm_compute::execute_window_loop
void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators)
Iterate through the passed window, automatically adjusting the iterators and calling the lambda_funct...
Definition: Helpers.inl:77
arm_compute::NEActivationLayer::run
void run() override
Run the kernels contained in the function.
Definition: NEActivationLayer.cpp:64
arm_compute::test::framework::DatasetMode
DatasetMode
Possible dataset modes.
Definition: DatasetModes.h:40
arm_compute::test::framework::LogLevel::ERRORS
@ ERRORS
arm_compute::RuntimeContext::set_scheduler
void set_scheduler(IScheduler *scheduler)
CPU Scheduler setter.
Definition: RuntimeContext.cpp:36