35 : FloatWorkload<UnidirectionalSequenceLstmQueueDescriptor>(descriptor, info)
36{
37
39 descriptor.m_Parameters,
40 info,
41 GetGuid());
42
43 const arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
44 arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[2])->GetTensor();
45
46 TensorInfo inputInfo =
info.m_InputTensorInfos[0];
47 TensorInfo outputInfo =
info.m_OutputTensorInfos[2];
48
49 arm_compute::DataType armComputeDataType = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetDataType();
51
52 TensorShape inputLayerShape = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetShape();
53 TensorShape cellStateLayerShape = static_cast<IClTensorHandle*>(m_Data.m_Inputs[2])->GetShape();
54 TensorShape outputLayerShape = static_cast<IClTensorHandle*>(m_Data.m_Outputs[2])->GetShape();
55
56 unsigned int maxTime = m_Data.m_Parameters.m_TimeMajor ? inputLayerShape[0] : inputLayerShape[1];
57 unsigned int batchSize = m_Data.m_Parameters.m_TimeMajor ? inputLayerShape[1] : inputLayerShape[0];
58 unsigned int inputSize = inputLayerShape[2];
59 unsigned int outputSize = outputLayerShape[2];
60 unsigned int numUnits = cellStateLayerShape[1];
61
62 const TensorShape timeMajorShapeInput({maxTime, batchSize, inputSize});
63 const TensorShape timeMajorShapeOutput({maxTime, batchSize, outputSize});
64
65
66
67
68 if (!m_Data.m_Parameters.m_TimeMajor)
69 {
70 std::unique_ptr<arm_compute::CLPermute> layer(new arm_compute::CLPermute());
71
72 TensorInfo permuteOutInfo = inputInfo;
73 permuteOutInfo.SetShape(timeMajorShapeInput);
74 BuildArmComputeTensor(m_PermuteFirstOut, permuteOutInfo);
75 armcomputetensorutils::InitialiseArmComputeTensorEmpty(m_PermuteFirstOut);
76
77
78 layer->configure(clCompileContext, &input, &m_PermuteFirstOut, arm_compute::PermutationVector(0U,2U,1U));
79 m_Permute1.reset(layer.release());
80 }
81
82
83
84
85 for (unsigned int i = 0; i < maxTime; ++i)
86 {
87 arm_compute::CLTensor splitter_out;
88 arm_compute::CLTensor concat_in;
89
90 auto splitterTensorInfo = inputInfo;
91 auto concatTensorInfo = outputInfo;
92 splitterTensorInfo.SetShape({batchSize, inputSize});
93 concatTensorInfo.SetShape({batchSize, outputSize});
94 BuildArmComputeTensor(splitter_out, splitterTensorInfo);
95 BuildArmComputeTensor(concat_in, concatTensorInfo);
96
97 armcomputetensorutils::InitialiseArmComputeTensorEmpty(splitter_out);
98 armcomputetensorutils::InitialiseArmComputeTensorEmpty(concat_in);
99
100
101 m_SplitterOutputsTensors.push_back(std::move(splitter_out));
102 m_ConcatInputsTensors.push_back(std::move(concat_in));
103 }
104
105 for (unsigned int i = 0; i < maxTime; ++i)
106 {
107
108 m_SplitterOutputs.push_back(&m_SplitterOutputsTensors[i]);
109 m_ConcatInputs.push_back(&m_ConcatInputsTensors[i]);
110 }
111
112
113
114
115 unsigned int numberDimensions = 3;
116 unsigned int dimension = 0;
117
118 if (maxTime != 1)
119 {
120 ViewsDescriptor splitterDesc(maxTime, numberDimensions);
121 unsigned int splitterDimSizes[3] = {1, batchSize, inputSize};
122 for (unsigned int outputIdx = 0u; outputIdx < maxTime; ++outputIdx)
123 {
124 splitterDesc.SetViewOriginCoord(outputIdx, dimension, splitterDimSizes[dimension] * outputIdx);
125 for (unsigned int dimIdx = 0u; dimIdx < numberDimensions; ++dimIdx)
126 {
127 splitterDesc.SetViewSize(outputIdx, dimIdx, splitterDimSizes[dimIdx]);
128 }
129 }
130
131 std::set<unsigned int> splitAxis =
ComputeSplitAxis(splitterDesc, timeMajorShapeInput);
132
133 std::unique_ptr<arm_compute::CLSplit> split_layer(new arm_compute::CLSplit());
134 unsigned int aclAxisSplit = CalcAclAxis(splitterDesc.GetNumDimensions(), *splitAxis.begin());
135 if (!m_Data.m_Parameters.m_TimeMajor)
136 {
137 split_layer->configure(&m_PermuteFirstOut, m_SplitterOutputs, aclAxisSplit);
138 }
139 else
140 {
141 split_layer->configure(&input, m_SplitterOutputs, aclAxisSplit);
142 }
143
144 split_layer->prepare();
145 m_Splitter.reset(split_layer.release());
146 }
147
148
149
150
151 arm_compute::LSTMParams<arm_compute::ICLTensor> lstm_param;
152
153 m_InputToForgetWeightsTensor = std::make_unique<arm_compute::CLTensor>();
154 BuildArmComputeTensor(*m_InputToForgetWeightsTensor, m_Data.m_InputToForgetWeights->GetTensorInfo());
155
156 m_InputToCellWeightsTensor = std::make_unique<arm_compute::CLTensor>();
157 BuildArmComputeTensor(*m_InputToCellWeightsTensor, m_Data.m_InputToCellWeights->GetTensorInfo());
158
159 m_InputToOutputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
160 BuildArmComputeTensor(*m_InputToOutputWeightsTensor, m_Data.m_InputToOutputWeights->GetTensorInfo());
161
162 m_RecurrentToForgetWeightsTensor = std::make_unique<arm_compute::CLTensor>();
163 BuildArmComputeTensor(*m_RecurrentToForgetWeightsTensor, m_Data.m_RecurrentToForgetWeights->GetTensorInfo());
164
165 m_RecurrentToCellWeightsTensor = std::make_unique<arm_compute::CLTensor>();
166 BuildArmComputeTensor(*m_RecurrentToCellWeightsTensor, m_Data.m_RecurrentToCellWeights->GetTensorInfo());
167
168 m_RecurrentToOutputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
169 BuildArmComputeTensor(*m_RecurrentToOutputWeightsTensor, m_Data.m_RecurrentToOutputWeights->GetTensorInfo());
170
171 m_ForgetGateBiasTensor = std::make_unique<arm_compute::CLTensor>();
172 BuildArmComputeTensor(*m_ForgetGateBiasTensor, m_Data.m_ForgetGateBias->GetTensorInfo());
173
174 m_CellBiasTensor = std::make_unique<arm_compute::CLTensor>();
175 BuildArmComputeTensor(*m_CellBiasTensor, m_Data.m_CellBias->GetTensorInfo());
176
177 m_OutputGateBiasTensor = std::make_unique<arm_compute::CLTensor>();
178 BuildArmComputeTensor(*m_OutputGateBiasTensor, m_Data.m_OutputGateBias->GetTensorInfo());
179
180
181 if (!m_Data.m_Parameters.m_CifgEnabled)
182 {
183 m_InputToInputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
184 BuildArmComputeTensor(*m_InputToInputWeightsTensor, m_Data.m_InputToInputWeights->GetTensorInfo());
185
186 m_RecurrentToInputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
187 BuildArmComputeTensor(*m_RecurrentToInputWeightsTensor, m_Data.m_RecurrentToInputWeights->GetTensorInfo());
188
189 m_CellToInputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
190 if (m_Data.m_CellToInputWeights != nullptr)
191 {
192 BuildArmComputeTensor(*m_CellToInputWeightsTensor, m_Data.m_CellToInputWeights->GetTensorInfo());
193 }
194
195 m_InputGateBiasTensor = std::make_unique<arm_compute::CLTensor>();
196 BuildArmComputeTensor(*m_InputGateBiasTensor, m_Data.m_InputGateBias->GetTensorInfo());
197
198 lstm_param.set_cifg_params(m_InputToInputWeightsTensor.get(),
199 m_RecurrentToInputWeightsTensor.get(),
200 m_Data.m_CellToInputWeights ? m_CellToInputWeightsTensor.get() : nullptr,
201 m_InputGateBiasTensor.get());
202 }
203
204 if (m_Data.m_Parameters.m_ProjectionEnabled)
205 {
206 m_ProjectionWeightsTensor = std::make_unique<arm_compute::CLTensor>();
207 BuildArmComputeTensor(*m_ProjectionWeightsTensor, m_Data.m_ProjectionWeights->GetTensorInfo());
208
209 m_ProjectionBiasTensor = std::make_unique<arm_compute::CLTensor>();
210 if (m_Data.m_ProjectionBias != nullptr)
211 {
212 BuildArmComputeTensor(*m_ProjectionBiasTensor, m_Data.m_ProjectionBias->GetTensorInfo());
213 }
214
215 lstm_param.set_projection_params(m_ProjectionWeightsTensor.get(),
216 m_Data.m_ProjectionBias ? m_ProjectionBiasTensor.get() : nullptr);
217 }
218
219 if (m_Data.m_Parameters.m_PeepholeEnabled)
220 {
221 m_CellToForgetWeightsTensor = std::make_unique<arm_compute::CLTensor>();
222 BuildArmComputeTensor(*m_CellToForgetWeightsTensor, m_Data.m_CellToForgetWeights->GetTensorInfo());
223
224 m_CellToOutputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
225 BuildArmComputeTensor(*m_CellToOutputWeightsTensor, m_Data.m_CellToOutputWeights->GetTensorInfo());
226
227 lstm_param.set_peephole_params(m_CellToForgetWeightsTensor.get(), m_CellToOutputWeightsTensor.get());
228 }
229
230 if (m_Data.m_Parameters.m_LayerNormEnabled)
231 {
232 m_InputLayerNormWeightsTensor = std::make_unique<arm_compute::CLTensor>();
233 if (!m_Data.m_Parameters.m_CifgEnabled)
234 {
235 BuildArmComputeTensor(*m_InputLayerNormWeightsTensor, m_Data.m_InputLayerNormWeights->GetTensorInfo());
236 }
237
238 m_ForgetLayerNormWeightsTensor = std::make_unique<arm_compute::CLTensor>();
239 BuildArmComputeTensor(*m_ForgetLayerNormWeightsTensor, m_Data.m_ForgetLayerNormWeights->GetTensorInfo());
240
241 m_CellLayerNormWeightsTensor = std::make_unique<arm_compute::CLTensor>();
242 BuildArmComputeTensor(*m_CellLayerNormWeightsTensor, m_Data.m_CellLayerNormWeights->GetTensorInfo());
243
244 m_OutputLayerNormWeightsTensor = std::make_unique<arm_compute::CLTensor>();
245 BuildArmComputeTensor(*m_OutputLayerNormWeightsTensor, m_Data.m_OutputLayerNormWeights->GetTensorInfo());
246
247 auto inputNormWeightTensor = m_Data.m_Parameters.m_CifgEnabled ? nullptr : m_InputLayerNormWeightsTensor.get();
248 lstm_param.set_layer_normalization_params(inputNormWeightTensor,
249 m_ForgetLayerNormWeightsTensor.get(),
250 m_CellLayerNormWeightsTensor.get(),
251 m_OutputLayerNormWeightsTensor.get());
252 }
253
254 arm_compute::ICLTensor& output_state_in = static_cast<IClTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
255 arm_compute::ICLTensor& cell_state_in = static_cast<IClTensorHandle*>(m_Data.m_Inputs[2])->GetTensor();
256
257 arm_compute::ICLTensor& output_state_out = static_cast<IClTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
258 arm_compute::ICLTensor& cell_state_out = static_cast<IClTensorHandle*>(m_Data.m_Inputs[2])->GetTensor();
259
260 m_ScratchBuffer = std::make_unique<arm_compute::CLTensor>();
261 if (m_Data.m_Parameters.m_CifgEnabled)
262 {
263
264 BuildArmComputeTensor(*m_ScratchBuffer, TensorInfo({batchSize, numUnits * 3}, armnnDataType));
265 }
266 else
267 {
268
269 BuildArmComputeTensor(*m_ScratchBuffer, TensorInfo({batchSize, numUnits * 4}, armnnDataType));
270 }
271
272
273 float cell_threshold = m_Data.m_Parameters.m_ClippingThresCell;
274 float projection_threshold = m_Data.m_Parameters.m_ClippingThresProj;
275
276
277 arm_compute::ActivationLayerInfo activationLayerInfo =
279
280 for (unsigned int i = 0; i != maxTime; ++i)
281 {
282
283
284 arm_compute::ICLTensor* outputLSTM;
285 arm_compute::ICLTensor* inputLSTM;
286
287
288
289
290 if (maxTime == 1 && m_Data.m_Parameters.m_TimeMajor)
291 {
294 TensorShape inputShapeShrink({inputShape[1], inputShape[2]});
295 TensorShape outputShapeShrink({outputShape[1], outputShape[2]});
296 auto acl_input_shape_shrink = BuildArmComputeTensorShape(inputShapeShrink);
297 auto acl_output_shape_shrink = BuildArmComputeTensorShape(outputShapeShrink);
298 (&input)->
info()->set_tensor_shape(acl_input_shape_shrink);
299 inputLSTM = const_cast<arm_compute::ICLTensor*>(&input);
300 (&output)->
info()->set_tensor_shape(acl_output_shape_shrink);
301 outputLSTM = &output;
302 }
303
304
305
306
307 else if (maxTime == 1 && !m_Data.m_Parameters.m_TimeMajor)
308 {
309 TensorShape inputShape =
GetTensorShape(m_PermuteFirstOut.info()->tensor_shape(), 1U);
310 TensorShape inputShapeShrink({inputShape[1], inputShape[2]});
311 auto acl_input_shape_shrink = BuildArmComputeTensorShape(inputShapeShrink);
312 m_PermuteFirstOut.info()->set_tensor_shape(acl_input_shape_shrink);
313 inputLSTM = &m_PermuteFirstOut;
314 outputLSTM = const_cast<arm_compute::ICLTensor*>(m_ConcatInputs[i]);
315 }
316
317 else
318 {
319 inputLSTM = m_SplitterOutputs[i];
320 outputLSTM = const_cast<arm_compute::ICLTensor*>(m_ConcatInputs[i]);
321 }
322
323 std::unique_ptr<arm_compute::CLLSTMLayer> lstm_layer(new arm_compute::CLLSTMLayer());
324 lstm_layer->configure(clCompileContext,
325 inputLSTM,
326 m_InputToForgetWeightsTensor.get(),
327 m_InputToCellWeightsTensor.get(),
328 m_InputToOutputWeightsTensor.get(),
329 m_RecurrentToForgetWeightsTensor.get(),
330 m_RecurrentToCellWeightsTensor.get(),
331 m_RecurrentToOutputWeightsTensor.get(),
332 m_ForgetGateBiasTensor.get(),
333 m_CellBiasTensor.get(),
334 m_OutputGateBiasTensor.get(),
335 &output_state_in,
336 &cell_state_in,
337 m_ScratchBuffer.get(),
338 &output_state_out,
339 &cell_state_out,
340 outputLSTM,
341 lstm_param,
342 activationLayerInfo,
343 cell_threshold,
344 projection_threshold);
345
346 m_Layers.emplace_back(std::move(lstm_layer));
347 }
348
349 armcomputetensorutils::InitialiseArmComputeTensorEmpty(*m_ScratchBuffer);
350
360
361 if (!m_Data.m_Parameters.m_CifgEnabled)
362 {
365 if (m_Data.m_CellToInputWeights != nullptr)
366 {
368 }
370 }
371
372 if (m_Data.m_Parameters.m_ProjectionEnabled)
373 {
375 if (m_Data.m_ProjectionBias != nullptr)
376 {
378 }
379 }
380
381 if (m_Data.m_Parameters.m_PeepholeEnabled)
382 {
385 }
386
387 if (m_Data.m_Parameters.m_LayerNormEnabled)
388 {
389 if (!m_Data.m_Parameters.m_CifgEnabled)
390 {
392 }
396 }
397
398
399
400 for (uint32_t i = 0; i < m_Layers.size(); ++i)
401 {
402 m_Layers[i]->prepare();
403 }
404
405
406
407
408
409
411 TensorShape shapeExpandTimeMajor({1, shape[0], shape[1]});
412 TensorShape shapeExpandBatchMajor({shape[0], 1, shape[1]});
413
414 if (maxTime != 1)
415 {
416 for (unsigned int i = 0; i < maxTime; ++i)
417 {
418 m_ConcatInputs[i]->info()->set_tensor_shape(BuildArmComputeTensorShape(shapeExpandTimeMajor));
419 }
420
422 for (unsigned int inputIdx = 0u; inputIdx < maxTime; ++inputIdx)
423 {
424 concatDescriptor.SetViewOriginCoord(inputIdx, dimension, inputIdx);
425 concatDescriptor.SetConcatAxis(dimension);
426 }
427
428 m_Concat.reset(new arm_compute::CLConcatenateLayer());
429 unsigned int aclAxisConcat = CalcAclAxis(concatDescriptor.GetNumDimensions(),
430 concatDescriptor.GetConcatAxis());
431 if (!m_Data.m_Parameters.m_TimeMajor)
432 {
433 TensorInfo concatOuputTensorInfo = outputInfo;
434 concatOuputTensorInfo.SetShape(timeMajorShapeOutput);
435 BuildArmComputeTensor(concat_out, concatOuputTensorInfo);
436 armcomputetensorutils::InitialiseArmComputeTensorEmpty(concat_out);
437
438 m_Concat->configure(m_ConcatInputs, &concat_out, aclAxisConcat);
439 }
440 else
441 {
442 m_Concat->configure(m_ConcatInputs, &output, aclAxisConcat);
443 }
444
445 m_Concat->prepare();
446 }
447
448
449 else
450 {
451 if (!m_Data.m_Parameters.m_TimeMajor)
452 {
453 (&output)->
info()->set_tensor_shape(BuildArmComputeTensorShape(shapeExpandBatchMajor));
454 }
455 else
456 {
457 (&output)->
info()->set_tensor_shape(BuildArmComputeTensorShape(shapeExpandTimeMajor));
458 }
459 }
460
461
462
463
464 if (!m_Data.m_Parameters.m_TimeMajor)
465 {
466
467 std::unique_ptr<arm_compute::CLPermute> layer(new arm_compute::CLPermute());
468 if (maxTime != 1)
469 {
470 layer->configure(clCompileContext, &concat_out, &output, arm_compute::PermutationVector(0U, 2U, 1U));
471 }
472 else
473 {
474 layer->configure(clCompileContext, m_ConcatInputs[0], &output, arm_compute::PermutationVector(0U, 2U, 1U));
475 }
476 m_Permute2.reset(layer.release());
477 }
478
479 FreeUnusedTensors();
480}
#define ARMNN_REPORT_PROFILING_WORKLOAD_DESC(name, desc, infos, guid)
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
Calculates the axis values for split operation.
OriginsDescriptor ConcatDescriptor
void InitializeArmComputeClTensorData(arm_compute::CLTensor &clTensor, const ConstTensorHandle *handle)
arm_compute::ActivationLayerInfo ConvertLstmActivationFuncToAclLayerInfo(uint32_t activationFunction)
armnn::TensorShape GetTensorShape(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout)