Compute Library
 22.05
IScheduler.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
27 #include "arm_compute/core/Error.h"
28 #include "arm_compute/core/Log.h"
32 
33 namespace arm_compute
34 {
36 {
37  // Work out the best possible number of execution threads
38  _num_threads_hint = cpuinfo::num_threads_hint();
39 }
40 
42 {
43  return CPUInfo::get();
44 }
45 
47 {
48  ARM_COMPUTE_UNUSED(num_threads, func);
49  ARM_COMPUTE_ERROR("Feature for affinity setting is not implemented");
50 }
51 
52 unsigned int IScheduler::num_threads_hint() const
53 {
54  return _num_threads_hint;
55 }
56 
57 void IScheduler::schedule_common(ICPPKernel *kernel, const Hints &hints, const Window &window, ITensorPack &tensors)
58 {
59  ARM_COMPUTE_ERROR_ON_MSG(!kernel, "The child class didn't set the kernel");
60 #ifndef BARE_METAL
61  const Window &max_window = window;
63  {
64  /*
65  * if the split dim is size_t max then this signals we should parallelise over
66  * all dimensions
67  */
68  const std::size_t m = max_window.num_iterations(Window::DimX);
69  const std::size_t n = max_window.num_iterations(Window::DimY);
70 
71  //in c++17 this can be swapped for auto [ m_threads, n_threads ] = split_2d(...
72  unsigned m_threads, n_threads;
73  std::tie(m_threads, n_threads) = scheduler_utils::split_2d(this->num_threads(), m, n);
74 
75  std::vector<IScheduler::Workload> workloads;
76  for(unsigned int ni = 0; ni != n_threads; ++ni)
77  {
78  for(unsigned int mi = 0; mi != m_threads; ++mi)
79  {
80  workloads.push_back(
81  [ni, mi, m_threads, n_threads, &max_window, &kernel](const ThreadInfo & info)
82  {
83  //narrow the window to our mi-ni workload
84  Window win = max_window.split_window(Window::DimX, mi, m_threads)
85  .split_window(Window::DimY, ni, n_threads);
86 
87  win.validate();
88 
89  Window thread_locator;
90  thread_locator.set(Window::DimX, Window::Dimension(mi, m_threads));
91  thread_locator.set(Window::DimY, Window::Dimension(ni, n_threads));
92 
93  thread_locator.validate();
94 
95  kernel->run_nd(win, info, thread_locator);
96  });
97  }
98  }
99  run_workloads(workloads);
100  }
101  else
102  {
103  const unsigned int num_iterations = max_window.num_iterations(hints.split_dimension());
104  const unsigned int num_threads = std::min(num_iterations, this->num_threads());
105 
106  if(num_iterations == 0)
107  {
108  return;
109  }
110 
111  if(!kernel->is_parallelisable() || num_threads == 1)
112  {
114  info.cpu_info = &cpu_info();
115  if(tensors.empty())
116  {
117  kernel->run(max_window, info);
118  }
119  else
120  {
121  kernel->run_op(tensors, max_window, info);
122  }
123  }
124  else
125  {
126  unsigned int num_windows = 0;
127  switch(hints.strategy())
128  {
130  num_windows = num_threads;
131  break;
133  {
134  const unsigned int granule_threshold = (hints.threshold() <= 0) ? num_threads : static_cast<unsigned int>(hints.threshold());
135  // Make sure we don't use some windows which are too small as this might create some contention on the ThreadFeeder
136  num_windows = num_iterations > granule_threshold ? granule_threshold : num_iterations;
137  break;
138  }
139  default:
140  ARM_COMPUTE_ERROR("Unknown strategy");
141  }
142  // Make sure the smallest window is larger than minimim workload size
143  num_windows = adjust_num_of_windows(max_window, hints.split_dimension(), num_windows, *kernel, cpu_info());
144 
145  std::vector<IScheduler::Workload> workloads(num_windows);
146  for(unsigned int t = 0; t < num_windows; ++t)
147  {
148  //Capture 't' by copy, all the other variables by reference:
149  workloads[t] = [t, &hints, &max_window, &num_windows, &kernel, &tensors](const ThreadInfo & info)
150  {
151  Window win = max_window.split_window(hints.split_dimension(), t, num_windows);
152  win.validate();
153 
154  if(tensors.empty())
155  {
156  kernel->run(win, info);
157  }
158  else
159  {
160  kernel->run_op(tensors, win, info);
161  }
162  };
163  }
164  run_workloads(workloads);
165  }
166  }
167 #else /* !BARE_METAL */
168  ARM_COMPUTE_UNUSED(kernel, hints, window, tensors);
169 #endif /* !BARE_METAL */
170 }
171 
172 void IScheduler::run_tagged_workloads(std::vector<Workload> &workloads, const char *tag)
173 {
174  ARM_COMPUTE_UNUSED(tag);
175  run_workloads(workloads);
176 }
177 
178 std::size_t IScheduler::adjust_num_of_windows(const Window &window, std::size_t split_dimension, std::size_t init_num_windows, const ICPPKernel &kernel, const CPUInfo &cpu_info)
179 {
180  // Mitigation of the narrow split issue, which occurs when the split dimension is too small to split (hence "narrow").
181  if(window.num_iterations(split_dimension) < init_num_windows )
182  {
183  auto recommended_split_dim = Window::DimX;
184  for(std::size_t dims = Window::DimY; dims <= Window::DimW; ++dims)
185  {
186  if(window.num_iterations(recommended_split_dim) < window.num_iterations(dims))
187  {
188  recommended_split_dim = dims;
189  }
190  }
191  ARM_COMPUTE_LOG_INFO_MSG_WITH_FORMAT_CORE("%lu dimension is not a suitable dimension to split the workload. Recommended: %lu recommended_split_dim", split_dimension,
192  recommended_split_dim);
193  }
194 
195  for(auto t = init_num_windows; t > 0; --t) // Trying the highest number of windows ,init_num_windows, first
196  {
197  // Try splitting the workload into t, subject to each subworkload size <= mws.
198  if((window.num_iterations(split_dimension) / kernel.get_mws(cpu_info, t)) >= t)
199  {
200  if(t != init_num_windows)
201  {
202  ARM_COMPUTE_LOG_INFO_MSG_CORE("The scheduler is using a different thread count than the one assigned by the user.");
203  }
204  return t;
205  }
206  }
207  ARM_COMPUTE_LOG_INFO_MSG_CORE("The scheduler is using single thread instead of the thread count assigned by the user.");
208  return 1; // If the workload is so small that it can't be split, we should run a single thread
209 }
210 
211 } // namespace arm_compute
virtual size_t get_mws(const CPUInfo &platform, size_t thread_count) const
Return minimum workload size of the relevant kernel.
Definition: ICPPKernel.h:100
std::pair< unsigned, unsigned > split_2d(unsigned max_threads, std::size_t m, std::size_t n)
Given two dimensions and a maximum number of threads to utilise, calculate the best combination of th...
IScheduler()
Default constructor.
Definition: IScheduler.cpp:35
bool empty() const
Checks if pack is empty.
Definition: ITensorPack.cpp:80
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:352
Common interface for all kernels implemented in C++.
Definition: ICPPKernel.h:38
Split the workload evenly among the threads.
static constexpr unsigned int split_dimensions_all
When arm_compute::ISchedular::Hints::_split_dimension is initialized with this value then the schedul...
Definition: IScheduler.h:62
Window split_window(size_t dimension, size_t id, size_t total) const
Split a window into a set of sub windows along a given dimension.
Definition: Window.inl:189
Describe one of the image&#39;s dimensions with a start, end and step.
Definition: Window.h:79
virtual void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info)
Execute the kernel on the passed window.
Definition: ICPPKernel.h:88
uint32_t num_threads_hint()
Some systems have both big and small cores, this fuction computes the minimum number of cores that ar...
Definition: CpuInfo.cpp:391
CPUInfo & cpu_info()
Get CPU info.
Definition: IScheduler.cpp:41
virtual void run_tagged_workloads(std::vector< Workload > &workloads, const char *tag)
Execute all the passed workloads.
Definition: IScheduler.cpp:172
void validate() const
Will validate all the window&#39;s dimensions&#39; values when asserts are enabled.
Definition: Window.inl:173
constexpr size_t num_iterations(size_t dimension) const
Return the number of iterations needed to iterate through a given dimension.
Definition: Window.inl:182
int threshold() const
Return the granule capping threshold to be used by dynamic scheduling.
Definition: IScheduler.h:124
Copyright (c) 2017-2022 Arm Limited.
Split the workload dynamically using a bucket system.
std::function< int(int, int)> BindFunc
Function to be used and map a given thread id to a logical core id.
Definition: IScheduler.h:56
#define ARM_COMPUTE_LOG_INFO_MSG_WITH_FORMAT_CORE(fmt,...)
Log information level formatted message to the core system logger.
Definition: Log.h:99
virtual void run_nd(const Window &window, const ThreadInfo &info, const Window &thread_locator)
legacy compatibility layer for implemantions which do not support thread_locator In these cases we si...
Definition: ICPPKernel.h:70
static constexpr size_t DimX
Alias for dimension 0 also known as X dimension.
Definition: Window.h:43
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
virtual void run(const Window &window, const ThreadInfo &info)
Execute the kernel on the passed window.
Definition: ICPPKernel.h:57
StrategyHint strategy() const
Return the prefered strategy to use to split workload.
Definition: IScheduler.h:116
virtual bool is_parallelisable() const
Indicates whether or not the kernel is parallelisable.
Definition: IKernel.cpp:41
#define ARM_COMPUTE_ERROR_ON_MSG(cond, msg)
Definition: Error.h:456
void set(size_t dimension, const Dimension &dim)
Set the values of a given dimension.
Definition: Window.inl:49
static constexpr size_t DimW
Alias for dimension 3 also known as W dimension.
Definition: Window.h:49
static constexpr size_t DimY
Alias for dimension 1 also known as Y dimension.
Definition: Window.h:45
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
Information about executing thread and CPU.
Definition: CPPTypes.h:169
unsigned int num_threads_hint() const
Get a hint for the best possible number of execution threads.
Definition: IScheduler.cpp:52
Tensor packing service.
Definition: ITensorPack.h:39
unsigned int split_dimension() const
Return the prefered split dimension.
Definition: IScheduler.h:96
#define ARM_COMPUTE_LOG_INFO_MSG_CORE(msg)
Log information level message to the core system logger.
Definition: Log.h:87
static CPUInfo & get()
Access the KernelLibrary singleton.
Definition: CPPTypes.cpp:40
virtual unsigned int num_threads() const =0
Returns the number of threads that the SingleThreadScheduler has in its pool.
virtual void set_num_threads_with_affinity(unsigned int num_threads, BindFunc func)
Sets the number of threads the scheduler will use to run the kernels but also using a binding functio...
Definition: IScheduler.cpp:46
const CPUInfo * cpu_info
Definition: CPPTypes.h:173
Describe a multidimensional execution window.
Definition: Window.h:39