Compute Library
 23.05
NEGEMM.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2023 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
28 #include "arm_compute/core/Types.h"
31 #include "src/core/CPP/Validate.h"
34 
35 using namespace arm_compute::experimental;
36 
37 namespace arm_compute
38 {
39 struct NEGEMM::Impl
40 {
41  MemoryGroup memory_group{};
42  IWeightsManager *weights_manager{ nullptr };
43 
44  std::unique_ptr<cpu::CpuGemm> op{ nullptr };
45 
46  const ITensor *original_b{ nullptr };
47  bool is_prepared{ false };
48 
49  ITensorPack run_pack{};
50  ITensorPack prep_pack{};
51  WorkspaceData<Tensor> workspace{};
53 };
54 
55 NEGEMM::NEGEMM(std::shared_ptr<IMemoryManager> memory_manager, IWeightsManager *weights_manager)
56  : _impl(std::make_unique<Impl>())
57 {
58  _impl->memory_group = MemoryGroup(std::move(memory_manager));
59  _impl->weights_manager = weights_manager;
60 }
61 
62 NEGEMM::~NEGEMM() = default;
63 
64 void NEGEMM::configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, float alpha, float beta, const GEMMInfo &gemm_info)
65 {
67  ARM_COMPUTE_ERROR_THROW_ON(cpu::CpuGemm::validate(a->info(), b->info(), (c != nullptr) ? c->info() : nullptr, d->info(), alpha, beta, gemm_info));
68 
69  // Check if we need to reshape the matrix B only on the first run
70  _impl->is_prepared = false;
71  _impl->original_b = b;
72  _impl->op = std::make_unique<cpu::CpuGemm>();
73 
74  // Make the B matrix dynamic values.
75  auto b_info_to_use = b->info()->clone();
76  if(!gemm_info.reshape_b_only_on_first_run())
77  {
78  b_info_to_use->set_are_values_constant(false);
79  }
80 
81  _impl->op->configure(a->info(), b_info_to_use.get(), (c != nullptr) ? c->info() : nullptr, d->info(), alpha, beta, gemm_info);
82 
83  _impl->aux_mem_req = _impl->op->workspace();
84  _impl->run_pack = { { ACL_SRC_0, a }, { ACL_SRC_1, b }, { ACL_SRC_2, c }, { ACL_DST, d } };
85  _impl->prep_pack = { { ACL_SRC_1, b }, { ACL_SRC_2, c } };
86  _impl->workspace = manage_workspace<Tensor>(_impl->aux_mem_req, _impl->memory_group, _impl->run_pack, _impl->prep_pack);
87 }
88 
89 Status NEGEMM::validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *output, float alpha, float beta, const GEMMInfo &gemm_info)
90 {
91  // Make the B matrix dynamic values.
92  auto b_to_use = b->clone();
93  if(!gemm_info.reshape_b_only_on_first_run())
94  {
95  b_to_use->set_are_values_constant(false);
96  }
97 
98  return cpu::CpuGemm::validate(a, b_to_use.get(), c, output, alpha, beta, gemm_info);
99 }
100 
101 Status NEGEMM::has_opt_impl(arm_compute::WeightFormat &expected_weight_format, const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *output,
102  float alpha, float beta, const GEMMInfo &gemm_info)
103 {
104  ARM_COMPUTE_UNUSED(alpha, beta);
105  return cpu::CpuGemm::has_opt_impl(expected_weight_format, a, b, c, output, gemm_info);
106 }
107 
109 {
110  prepare();
111 
112  MemoryGroupResourceScope scope_mg(_impl->memory_group);
113  _impl->op->run(_impl->run_pack);
114 }
115 
117 {
118  if(!_impl->is_prepared)
119  {
120  _impl->op->prepare(_impl->prep_pack);
121 
122  auto has_reshape = std::find_if(_impl->aux_mem_req.begin(),
123  _impl->aux_mem_req.end(),
124  [](const MemoryInfo & m) -> bool { return m.lifetime == MemoryLifetime::Persistent; });
125 
126  if(has_reshape != std::end(_impl->aux_mem_req))
127  {
128  _impl->original_b->mark_as_unused();
129  }
130  else
131  {
132  _impl->run_pack.add_const_tensor(ACL_SRC_1, _impl->original_b);
133  }
134 
135  // Release temporary tensors that are only used in prepare stage
136  release_temporaries<Tensor>(_impl->aux_mem_req, _impl->workspace);
137  _impl->is_prepared = true;
138  }
139 }
140 } // namespace arm_compute
~NEGEMM()
Default destructor.
static Status has_opt_impl(arm_compute::WeightFormat &weight_format, const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *d, const GEMMInfo &gemm_info=GEMMInfo())
Indicates whether or not there is an optimal assembly implementation that can be used to process the ...
Definition: CpuGemm.cpp:403
SimpleTensor< float > b
Definition: DFT.cpp:157
static Status validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *d, float alpha, float beta, const GEMMInfo &gemm_info=GEMMInfo())
Static function to check if given info will lead to a valid configuration of CpuGemm.
Definition: CpuGemm.cpp:155
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:43
#define ARM_COMPUTE_ERROR_THROW_ON(status)
Definition: Error.h:455
WeightFormat
Memory layouts for the weights tensor.
Definition: Types.h:2035
Status class.
Definition: Error.h:52
Interface for CPU tensor.
Definition: ITensor.h:36
NEGEMM(std::shared_ptr< IMemoryManager > memory_manager=nullptr, IWeightsManager *weights_manager=nullptr)
Constructor.
Definition: NEGEMM.cpp:55
Copyright (c) 2017-2023 Arm Limited.
std::vector< MemoryInfo > MemoryRequirements
Definition: Types.h:134
static Status validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *output, float alpha, float beta, const GEMMInfo &gemm_info=GEMMInfo())
Static function to check if given info will lead to a valid configuration of NEGEMM.
Definition: NEGEMM.cpp:89
void run() override
Run the kernels contained in the function.
Definition: NEGEMM.cpp:108
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
virtual std::unique_ptr< T > clone() const =0
Provide a clone of the current object of class T.
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor&#39;s metadata.
void end(TokenStream &in, bool &valid)
Definition: MLGOParser.cpp:290
Weights manager interface to handle weights transformations.
Memory group resources scope handling class.
Definition: IMemoryGroup.h:82
void configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, float alpha, float beta, const GEMMInfo &gemm_info=GEMMInfo())
Initialise the kernel&#39;s inputs, output.
Definition: NEGEMM.cpp:64
void prepare() override
Prepare the function for executing.
Definition: NEGEMM.cpp:116
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Definition: Validate.h:157
bool reshape_b_only_on_first_run() const
Flag which specifies if the reshape of matrix B should executed only for the first.
Definition: Types.h:2445
GEMM information class.
Definition: Types.h:2359
static Status has_opt_impl(arm_compute::WeightFormat &expected_weight_format, const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *output, float alpha, float beta, const GEMMInfo &gemm_info=GEMMInfo())
Static function that queries whether there exists fixed-format kernel and if it exists it will return...
Definition: NEGEMM.cpp:101