33 : FloatWorkload<UnidirectionalSequenceLstmQueueDescriptor>(descriptor, info)
34{
35
37 descriptor.m_Parameters,
38 info,
39 GetGuid());
40
41 const arm_compute::ITensor& input = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
42 arm_compute::ITensor& output = static_cast<IAclTensorHandle*>(m_Data.m_Outputs[2])->GetTensor();
43
44 TensorInfo inputInfo =
info.m_InputTensorInfos[0];
45 TensorInfo outputInfo =
info.m_OutputTensorInfos[0];
46
47 arm_compute::DataType armComputeDataType = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetDataType();
49
50 TensorShape inputLayerShape = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetShape();
51 TensorShape cellStateLayerShape = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[2])->GetShape();
52 TensorShape outputLayerShape = static_cast<IAclTensorHandle*>(m_Data.m_Outputs[2])->GetShape();
53
54 unsigned int maxTime = m_Data.m_Parameters.m_TimeMajor ? inputLayerShape[0] : inputLayerShape[1];
55 unsigned int batchSize = m_Data.m_Parameters.m_TimeMajor ? inputLayerShape[1] : inputLayerShape[0];
56 unsigned int inputSize = inputLayerShape[2];
57 unsigned int outputSize = outputLayerShape[2];
58 unsigned int numUnits = cellStateLayerShape[1];
59
60 const TensorShape timeMajorShapeInput({maxTime, batchSize, inputSize});
61 const TensorShape timeMajorShapeOutput({maxTime, batchSize, outputSize});
62
63
64
65
66 if (!m_Data.m_Parameters.m_TimeMajor)
67 {
68 std::unique_ptr<arm_compute::NEPermute> layer(new arm_compute::NEPermute());
69
70 TensorInfo permuteOutInfo = inputInfo;
71 permuteOutInfo.SetShape(timeMajorShapeInput);
72 BuildArmComputeTensor(m_PermuteFirstOut, permuteOutInfo);
73 armcomputetensorutils::InitialiseArmComputeTensorEmpty(m_PermuteFirstOut);
74
75
76 layer->configure(&input, &m_PermuteFirstOut, arm_compute::PermutationVector(0U,2U,1U));
77 m_Permute1.reset(layer.release());
78 }
79
80
81
82
83 for (unsigned int i = 0; i < maxTime; ++i)
84 {
85 arm_compute::Tensor splitter_out;
86 arm_compute::Tensor concat_in;
87
88 auto splitterTensorInfo = inputInfo;
89 auto concatTensorInfo = outputInfo;
90 splitterTensorInfo.SetShape({batchSize, inputSize});
91 concatTensorInfo.SetShape({batchSize, outputSize});
92 BuildArmComputeTensor(splitter_out, splitterTensorInfo);
93 BuildArmComputeTensor(concat_in, concatTensorInfo);
94
95 armcomputetensorutils::InitialiseArmComputeTensorEmpty(splitter_out);
96 armcomputetensorutils::InitialiseArmComputeTensorEmpty(concat_in);
97
98
99 m_SplitterOutputsTensors.push_back(std::move(splitter_out));
100 m_ConcatInputsTensors.push_back(std::move(concat_in));
101 }
102
103 for (unsigned int i = 0; i < maxTime; ++i)
104 {
105
106 m_SplitterOutputs.push_back(&m_SplitterOutputsTensors[i]);
107 m_ConcatInputs.push_back(&m_ConcatInputsTensors[i]);
108 }
109
110
111
112
113 unsigned int numberDimensions = 3;
114 unsigned int dimension = 0;
115
116 if (maxTime != 1)
117 {
118 ViewsDescriptor splitterDesc(maxTime, numberDimensions);
119 unsigned int splitterDimSizes[3] = {1, batchSize, inputSize};
120 for (unsigned int outputIdx = 0u; outputIdx < maxTime; ++outputIdx)
121 {
122 splitterDesc.SetViewOriginCoord(outputIdx, dimension, splitterDimSizes[dimension] * outputIdx);
123 for (unsigned int dimIdx = 0u; dimIdx < numberDimensions; ++dimIdx)
124 {
125 splitterDesc.SetViewSize(outputIdx, dimIdx, splitterDimSizes[dimIdx]);
126 }
127 }
128
129 std::set<unsigned int> splitAxis =
ComputeSplitAxis(splitterDesc, timeMajorShapeInput);
130
131 std::unique_ptr<arm_compute::NESplit> split_layer(new arm_compute::NESplit());
132 unsigned int aclAxisSplit = CalcAclAxis(splitterDesc.GetNumDimensions(),
133 *splitAxis.begin());
134 if (!m_Data.m_Parameters.m_TimeMajor)
135 {
136 split_layer->configure(&m_PermuteFirstOut, m_SplitterOutputs, aclAxisSplit);
137 } else
138 {
139 split_layer->configure(&input, m_SplitterOutputs, aclAxisSplit);
140 }
141
142 split_layer->prepare();
143 m_Splitter.reset(split_layer.release());
144 }
145
146
147
148
149 arm_compute::LSTMParams<arm_compute::ITensor> lstm_param;
150
151 m_InputToForgetWeightsTensor = std::make_unique<arm_compute::Tensor>();
152 BuildArmComputeTensor(*m_InputToForgetWeightsTensor, m_Data.m_InputToForgetWeights->GetTensorInfo());
153
154 m_InputToCellWeightsTensor = std::make_unique<arm_compute::Tensor>();
155 BuildArmComputeTensor(*m_InputToCellWeightsTensor, m_Data.m_InputToCellWeights->GetTensorInfo());
156
157 m_InputToOutputWeightsTensor = std::make_unique<arm_compute::Tensor>();
158 BuildArmComputeTensor(*m_InputToOutputWeightsTensor, m_Data.m_InputToOutputWeights->GetTensorInfo());
159
160 m_RecurrentToForgetWeightsTensor = std::make_unique<arm_compute::Tensor>();
161 BuildArmComputeTensor(*m_RecurrentToForgetWeightsTensor, m_Data.m_RecurrentToForgetWeights->GetTensorInfo());
162
163 m_RecurrentToCellWeightsTensor = std::make_unique<arm_compute::Tensor>();
164 BuildArmComputeTensor(*m_RecurrentToCellWeightsTensor, m_Data.m_RecurrentToCellWeights->GetTensorInfo());
165
166 m_RecurrentToOutputWeightsTensor = std::make_unique<arm_compute::Tensor>();
167 BuildArmComputeTensor(*m_RecurrentToOutputWeightsTensor, m_Data.m_RecurrentToOutputWeights->GetTensorInfo());
168
169 m_ForgetGateBiasTensor = std::make_unique<arm_compute::Tensor>();
170 BuildArmComputeTensor(*m_ForgetGateBiasTensor, m_Data.m_ForgetGateBias->GetTensorInfo());
171
172 m_CellBiasTensor = std::make_unique<arm_compute::Tensor>();
173 BuildArmComputeTensor(*m_CellBiasTensor, m_Data.m_CellBias->GetTensorInfo());
174
175 m_OutputGateBiasTensor = std::make_unique<arm_compute::Tensor>();
176 BuildArmComputeTensor(*m_OutputGateBiasTensor, m_Data.m_OutputGateBias->GetTensorInfo());
177
178
179 if (!m_Data.m_Parameters.m_CifgEnabled)
180 {
181 m_InputToInputWeightsTensor = std::make_unique<arm_compute::Tensor>();
182 BuildArmComputeTensor(*m_InputToInputWeightsTensor, m_Data.m_InputToInputWeights->GetTensorInfo());
183
184 m_RecurrentToInputWeightsTensor = std::make_unique<arm_compute::Tensor>();
185 BuildArmComputeTensor(*m_RecurrentToInputWeightsTensor, m_Data.m_RecurrentToInputWeights->GetTensorInfo());
186
187 m_CellToInputWeightsTensor = std::make_unique<arm_compute::Tensor>();
188 if (m_Data.m_CellToInputWeights != nullptr)
189 {
190 BuildArmComputeTensor(*m_CellToInputWeightsTensor, m_Data.m_CellToInputWeights->GetTensorInfo());
191 }
192
193 m_InputGateBiasTensor = std::make_unique<arm_compute::Tensor>();
194 BuildArmComputeTensor(*m_InputGateBiasTensor, m_Data.m_InputGateBias->GetTensorInfo());
195
196 lstm_param.set_cifg_params(m_InputToInputWeightsTensor.get(),
197 m_RecurrentToInputWeightsTensor.get(),
198 m_Data.m_CellToInputWeights ? m_CellToInputWeightsTensor.get() : nullptr,
199 m_InputGateBiasTensor.get());
200 }
201
202 if (m_Data.m_Parameters.m_ProjectionEnabled)
203 {
204 m_ProjectionWeightsTensor = std::make_unique<arm_compute::Tensor>();
205 BuildArmComputeTensor(*m_ProjectionWeightsTensor, m_Data.m_ProjectionWeights->GetTensorInfo());
206
207 m_ProjectionBiasTensor = std::make_unique<arm_compute::Tensor>();
208 if (m_Data.m_ProjectionBias != nullptr)
209 {
210 BuildArmComputeTensor(*m_ProjectionBiasTensor, m_Data.m_ProjectionBias->GetTensorInfo());
211 }
212
213 lstm_param.set_projection_params(m_ProjectionWeightsTensor.get(),
214 m_Data.m_ProjectionBias ? m_ProjectionBiasTensor.get() : nullptr);
215 }
216
217 if (m_Data.m_Parameters.m_PeepholeEnabled)
218 {
219 m_CellToForgetWeightsTensor = std::make_unique<arm_compute::Tensor>();
220 BuildArmComputeTensor(*m_CellToForgetWeightsTensor, m_Data.m_CellToForgetWeights->GetTensorInfo());
221
222 m_CellToOutputWeightsTensor = std::make_unique<arm_compute::Tensor>();
223 BuildArmComputeTensor(*m_CellToOutputWeightsTensor, m_Data.m_CellToOutputWeights->GetTensorInfo());
224
225 lstm_param.set_peephole_params(m_CellToForgetWeightsTensor.get(), m_CellToOutputWeightsTensor.get());
226 }
227
228 if (m_Data.m_Parameters.m_LayerNormEnabled)
229 {
230 m_InputLayerNormWeightsTensor = std::make_unique<arm_compute::Tensor>();
231 if (!m_Data.m_Parameters.m_CifgEnabled)
232 {
233 BuildArmComputeTensor(*m_InputLayerNormWeightsTensor, m_Data.m_InputLayerNormWeights->GetTensorInfo());
234 }
235
236 m_ForgetLayerNormWeightsTensor = std::make_unique<arm_compute::Tensor>();
237 BuildArmComputeTensor(*m_ForgetLayerNormWeightsTensor, m_Data.m_ForgetLayerNormWeights->GetTensorInfo());
238
239 m_CellLayerNormWeightsTensor = std::make_unique<arm_compute::Tensor>();
240 BuildArmComputeTensor(*m_CellLayerNormWeightsTensor, m_Data.m_CellLayerNormWeights->GetTensorInfo());
241
242 m_OutputLayerNormWeightsTensor = std::make_unique<arm_compute::Tensor>();
243 BuildArmComputeTensor(*m_OutputLayerNormWeightsTensor, m_Data.m_OutputLayerNormWeights->GetTensorInfo());
244
245 auto inputNormWeightTensor = m_Data.m_Parameters.m_CifgEnabled ? nullptr : m_InputLayerNormWeightsTensor.get();
246 lstm_param.set_layer_normalization_params(inputNormWeightTensor,
247 m_ForgetLayerNormWeightsTensor.get(),
248 m_CellLayerNormWeightsTensor.get(),
249 m_OutputLayerNormWeightsTensor.get());
250 }
251
252 arm_compute::ITensor& output_state_in = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
253 arm_compute::ITensor& cell_state_in = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[2])->GetTensor();
254
255 arm_compute::ITensor& output_state_out = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
256 arm_compute::ITensor& cell_state_out = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[2])->GetTensor();
257
258 m_ScratchBuffer = std::make_unique<arm_compute::Tensor>();
259 if (m_Data.m_Parameters.m_CifgEnabled)
260 {
261
262 BuildArmComputeTensor(*m_ScratchBuffer, TensorInfo({batchSize, numUnits * 3}, armnnDataType));
263 }
264 else
265 {
266
267 BuildArmComputeTensor(*m_ScratchBuffer, TensorInfo({batchSize, numUnits * 4}, armnnDataType));
268 }
269
270
271 float cell_threshold = m_Data.m_Parameters.m_ClippingThresCell;
272 float projection_threshold = m_Data.m_Parameters.m_ClippingThresProj;
273
274
275 arm_compute::ActivationLayerInfo activationLayerInfo =
277
278 for (unsigned int i = 0; i != maxTime; ++i)
279 {
280
281
282 arm_compute::ITensor* outputLSTM;
283 arm_compute::ITensor* inputLSTM;
284
285
286
287
288
289 if (maxTime == 1 && m_Data.m_Parameters.m_TimeMajor)
290 {
291 TensorShape inputShape =
GetTensorShape(input.info()->tensor_shape(), 1U);
293
294 TensorShape inputShapeShrink({inputShape[1], inputShape[2]});
295 TensorShape outputShapeShrink({outputShape[1], outputShape[2]});
296
297 auto acl_input_shape_shrink = BuildArmComputeTensorShape(inputShapeShrink);
298 auto acl_output_shape_shrink = BuildArmComputeTensorShape(outputShapeShrink);
299
300 input.info()->set_tensor_shape(acl_input_shape_shrink);
301 inputLSTM = const_cast<arm_compute::ITensor*>(&input);
302
303 output.info()->set_tensor_shape(acl_output_shape_shrink);
304 outputLSTM = &output;
305 }
306
307
308
309
310 else if (maxTime == 1 && !m_Data.m_Parameters.m_TimeMajor)
311 {
312 TensorShape inputShape =
GetTensorShape(m_PermuteFirstOut.info()->tensor_shape(), 1U);
313 TensorShape inputShapeShrink({inputShape[1], inputShape[2]});
314 auto acl_input_shape_shrink = BuildArmComputeTensorShape(inputShapeShrink);
315 m_PermuteFirstOut.info()->set_tensor_shape(acl_input_shape_shrink);
316 inputLSTM = &m_PermuteFirstOut;
317
318 outputLSTM = const_cast<arm_compute::ITensor*>(m_ConcatInputs[i]);
319 }
320
321 else
322 {
323 inputLSTM = m_SplitterOutputs[i];
324 outputLSTM = const_cast<arm_compute::ITensor*>(m_ConcatInputs[i]);
325 }
326
327 std::unique_ptr<arm_compute::NELSTMLayer> lstm_layer(new arm_compute::NELSTMLayer());
328 lstm_layer->configure(inputLSTM,
329 m_InputToForgetWeightsTensor.get(),
330 m_InputToCellWeightsTensor.get(),
331 m_InputToOutputWeightsTensor.get(),
332 m_RecurrentToForgetWeightsTensor.get(),
333 m_RecurrentToCellWeightsTensor.get(),
334 m_RecurrentToOutputWeightsTensor.get(),
335 m_ForgetGateBiasTensor.get(),
336 m_CellBiasTensor.get(),
337 m_OutputGateBiasTensor.get(),
338 &output_state_in,
339 &cell_state_in,
340 m_ScratchBuffer.get(),
341 &output_state_out,
342 &cell_state_out,
343 outputLSTM,
344 lstm_param,
345 activationLayerInfo,
346 cell_threshold,
347 projection_threshold);
348
349 m_Layers.emplace_back(std::move(lstm_layer));
350 }
351
352 armcomputetensorutils::InitialiseArmComputeTensorEmpty(*m_ScratchBuffer);
353
363
364 if (!m_Data.m_Parameters.m_CifgEnabled)
365 {
368 if (m_Data.m_CellToInputWeights != nullptr)
369 {
371 }
373 }
374
375 if (m_Data.m_Parameters.m_ProjectionEnabled)
376 {
378 if (m_Data.m_ProjectionBias != nullptr)
379 {
381 }
382 }
383
384 if (m_Data.m_Parameters.m_PeepholeEnabled)
385 {
388 }
389
390 if (m_Data.m_Parameters.m_LayerNormEnabled)
391 {
392 if (!m_Data.m_Parameters.m_CifgEnabled)
393 {
395 }
399 }
400
401
402
403 for (uint32_t i = 0; i < m_Layers.size(); ++i)
404 {
405 m_Layers[i]->prepare();
406 }
407
408
409
410
411
412
414 TensorShape shapeExpandTimeMajor({1, shape[0], shape[1]});
415 TensorShape shapeExpandBatchMajor({shape[0], 1, shape[1]});
416
417 if (maxTime != 1)
418 {
419 for (unsigned int i = 0; i < maxTime; ++i)
420 {
421 m_ConcatInputs[i]->info()->set_tensor_shape(BuildArmComputeTensorShape(shapeExpandTimeMajor));
422 }
423
425 for (unsigned int inputIdx = 0u; inputIdx < maxTime; ++inputIdx)
426 {
427 concatDescriptor.SetViewOriginCoord(inputIdx, dimension, inputIdx);
428 concatDescriptor.SetConcatAxis(dimension);
429 }
430
431 m_Concat.reset(new arm_compute::NEConcatenateLayer());
432 unsigned int aclAxisConcat = CalcAclAxis(concatDescriptor.GetNumDimensions(), concatDescriptor.GetConcatAxis());
433 if (!m_Data.m_Parameters.m_TimeMajor)
434 {
435 TensorInfo concatOutputTensorInfo = outputInfo;
436 concatOutputTensorInfo.SetShape(timeMajorShapeOutput);
437 BuildArmComputeTensor(concat_out, concatOutputTensorInfo);
438 armcomputetensorutils::InitialiseArmComputeTensorEmpty(concat_out);
439
440 m_Concat->configure(m_ConcatInputs, &concat_out, aclAxisConcat);
441 }
442 else
443 {
444 m_Concat->configure(m_ConcatInputs, &output, aclAxisConcat);
445 }
446
447 m_Concat->prepare();
448 }
449
450
451 else
452 {
453 if (!m_Data.m_Parameters.m_TimeMajor)
454 {
455 output.info()->set_tensor_shape(BuildArmComputeTensorShape(shapeExpandBatchMajor));
456 }
457 else
458 {
459 output.info()->set_tensor_shape(BuildArmComputeTensorShape(shapeExpandTimeMajor));
460 }
461 }
462
463
464
465
466 if (!m_Data.m_Parameters.m_TimeMajor)
467 {
468
469 std::unique_ptr<arm_compute::NEPermute> layer(new arm_compute::NEPermute());
470 if (maxTime != 1)
471 {
472 layer->configure(&concat_out, &output, arm_compute::PermutationVector(0U, 2U, 1U));
473 }
474 else
475 {
476 layer->configure(m_ConcatInputs[0], &output, arm_compute::PermutationVector(0U, 2U, 1U));
477 }
478 m_Permute2.reset(layer.release());
479 }
480
481 FreeUnusedTensors();
482}
#define ARMNN_REPORT_PROFILING_WORKLOAD_DESC(name, desc, infos, guid)
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
Calculates the axis values for split operation.
OriginsDescriptor ConcatDescriptor
arm_compute::ActivationLayerInfo ConvertLstmActivationFuncToAclLayerInfo(uint32_t activationFunction)
void InitializeArmComputeTensorData(arm_compute::Tensor &tensor, TensorInfo tensorInfo, const ITensorHandle *handle)
armnn::TensorShape GetTensorShape(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout)