Compute Library
 22.08
IWorkload.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2022 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifdef ENABLE_EXPERIMENTAL_DYNAMIC_FUSION
25 #ifndef ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_IWORKLOAD_H
26 #define ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_IWORKLOAD_H
27 
28 #include "arm_compute/core/Error.h"
31 
33 
34 namespace arm_compute
35 {
36 namespace experimental
37 {
38 namespace dynamic_fusion
39 {
40 /** Describes when a Unit Workload is run.
41  *
42  */
44 {
45  enum class Stage
46  {
47  Prepare, /**< Only run once at the beginning. */
48  Run, /**< Run every time after the first time. */
49  };
51  friend bool operator==(const UnitWorkloadStage &stage0, const UnitWorkloadStage &stage1)
52  {
53  return stage0.stage == stage1.stage;
54  }
55 };
56 /** Type of memory used by a Workload Tensor
57  *
58  */
59 enum class MemoryType
60 {
61  Core = 0, /**< Core memory used by the Workload Tensor, e.g. for argument tensors */
62  Auxiliary = 1, /**< Auxiliary memory required by the Workload Tensor, e.g. for temporary tensors */
63 };
64 
66 
67 /** Memory Info for a @ref WorkloadTensor of Auxiliary memory type. This communicates to the user how much additional
68  * memory is required for auxiliary tensors
69  */
71 {
72  AuxMemoryInfo() = default;
73 
74  AuxMemoryInfo(size_t size, size_t alignment = 0) noexcept
75  : size(size),
76  alignment(alignment)
77  {
78  }
79 
80  AuxMemoryInfo(AuxMemoryLifetime lifetime, size_t size, size_t alignment = 0) noexcept
81  : lifetime(lifetime),
82  size(size),
83  alignment(alignment)
84  {
85  }
86  friend bool operator==(const AuxMemoryInfo &info0, const AuxMemoryInfo &info1)
87  {
88  return info0.lifetime == info1.lifetime && info0.size == info1.size && info0.alignment == info1.alignment;
89  }
90 
91  AuxMemoryLifetime lifetime{ AuxMemoryLifetime::Temporary }; /**< Memory lifetime*/
92  size_t size{ 0 }; /**< Total memory size in bytes */
93  size_t alignment{ 64 }; /**< Memory alignment in bytes */
94 };
95 
96 /** A descriptor for IWorkload Tensors.
97  */
99 {
101  Id id{}; /**< Id of the workload tensor */
102  ITensorInfo *info{}; /**< TensorInfo associated with the workload tensor */
103  MemoryType memory_type{}; /**< Memory type */
104  AuxMemoryInfo memory_info{}; /**< Auxiliary memory information. This can be ignored if the memory type is Core */
105 };
106 /** The basic atomic unit in an @ref IWorkload. It contains exactly one kernel to run.
107  *
108  */
110 {
112  Id id{}; /**< Id of the unit workload */
113  UnitWorkloadStage stage{}; /**< Stage */
114 };
115 
116 /** Run-time-agnostic, platform-specific graph that describes everything required to run a workload
117  * It can be configured into an Arm Compute Library runtime, integrated into the runtime of another framework, or integrated into the compilation flow
118  */
119 struct IWorkload
120 {
123  IWorkload() = default;
124  virtual ~IWorkload() = default;
125  DependencyGraph graph{}; /**< Dependency graph of the workload tensors and the unit workloads */
126 };
127 
128 } // namespace dynamic_fusion
129 } // namespace experimental
130 } // namespace arm_compute
131 #endif //ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_IWORKLOAD_H
132 #endif /* ENABLE_EXPERIMENTAL_DYNAMIC_FUSION */
size_t size
Total memory size in bytes.
Definition: IWorkload.h:92
The dependency graph of a workload, where the nodes are of 2 types: Tensor or Operator Represented as...
friend bool operator==(const UnitWorkloadStage &stage0, const UnitWorkloadStage &stage1)
Definition: IWorkload.h:51
AuxMemoryLifetime lifetime
Memory lifetime.
Definition: IWorkload.h:91
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
Core memory used by the Workload Tensor, e.g.
MemoryType
Type of memory used by a Workload Tensor.
Definition: IWorkload.h:59
Copyright (c) 2017-2022 Arm Limited.
size_t alignment
Memory alignment in bytes.
Definition: IWorkload.h:93
The basic atomic unit in an IWorkload.
Definition: IWorkload.h:109
AuxMemoryInfo(AuxMemoryLifetime lifetime, size_t size, size_t alignment=0) noexcept
Definition: IWorkload.h:80
Auxiliary memory required by the Workload Tensor, e.g.
AuxMemoryInfo(size_t size, size_t alignment=0) noexcept
Definition: IWorkload.h:74
Describes when a Unit Workload is run.
Definition: IWorkload.h:43
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
Memory Info for a WorkloadTensor of Auxiliary memory type.
Definition: IWorkload.h:70
Run-time-agnostic, platform-specific graph that describes everything required to run a workload It ca...
Definition: IWorkload.h:119
friend bool operator==(const AuxMemoryInfo &info0, const AuxMemoryInfo &info1)
Definition: IWorkload.h:86
A descriptor for IWorkload Tensors.
Definition: IWorkload.h:98