ArmNN
 24.08
NeonUnidirectionalSequenceLstmWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 #include "NeonWorkloadUtils.hpp"
8 
11 
13 #include <armnnUtils/Permute.hpp>
14 #include <neon/test/NeonWorkloadFactoryHelper.hpp>
16 
18 
19 namespace
20 {
21 
22 unsigned int CalcAclAxis(unsigned int numDimensions, unsigned int axis)
23 {
24  return (numDimensions - axis) - 1;
25 }
26 } //namespace
27 
28 namespace armnn
29 {
30 using namespace armcomputetensorutils;
31 
35 {
36  // Report Profiling Details
37  ARMNN_REPORT_PROFILING_WORKLOAD_DESC("NeonUnidirectionalSequenceLstmWorkload_Construct",
38  descriptor.m_Parameters,
39  info,
40  GetGuid());
41 
42  // Input/Output tensors
43  const arm_compute::ITensor& input = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
44  arm_compute::ITensor& outputStateIn = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
45  const arm_compute::ITensor& cellStateIn = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[2])->GetTensor();
46 
47  arm_compute::ITensor& outputStateOut = static_cast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
48  arm_compute::ITensor& cellStateOut = static_cast<IAclTensorHandle*>(m_Data.m_Outputs[1])->GetTensor();
49  arm_compute::ITensor& output = static_cast<IAclTensorHandle*>(m_Data.m_Outputs[2])->GetTensor();
50 
51  TensorInfo inputInfo = info.m_InputTensorInfos[0];
52  TensorInfo outputInfo = info.m_OutputTensorInfos[2];
53 
54  TensorShape inputLayerShape = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetShape();
55  TensorShape outputLayerShape = static_cast<IAclTensorHandle*>(m_Data.m_Outputs[2])->GetShape();
56 
57  unsigned int maxTime = m_Data.m_Parameters.m_TimeMajor ? inputLayerShape[0] : inputLayerShape[1];
58  unsigned int batchSize = m_Data.m_Parameters.m_TimeMajor ? inputLayerShape[1] : inputLayerShape[0];
59  unsigned int inputSize = inputLayerShape[2];
60  unsigned int outputSize = outputLayerShape[2];
61 
62  const TensorShape timeMajorShapeInput({maxTime, batchSize, inputSize});
63  const TensorShape timeMajorShapeOutput({maxTime, batchSize, outputSize});
64 
65  //
66  // Permute: performed if Unidirectional Sequence Layer inputs/outputs are in batch major format.
67  //
68  if (!m_Data.m_Parameters.m_TimeMajor)
69  {
70  std::unique_ptr<arm_compute::NEPermute> layer(new arm_compute::NEPermute());
71 
72  TensorInfo permuteOutInfo = inputInfo;
73  permuteOutInfo.SetShape(timeMajorShapeInput);
74  BuildArmComputeTensor(m_PermuteFirstOut, permuteOutInfo);
75  armcomputetensorutils::InitialiseArmComputeTensorEmpty(m_PermuteFirstOut);
76 
77  // Permute to time major format.
78  layer->configure(&input, &m_PermuteFirstOut, arm_compute::PermutationVector(0U,2U,1U));
79  m_Permute1.reset(layer.release());
80  }
81 
82  //
83  // Split and Concat Tensors
84  //
85  for (unsigned int i = 0; i < maxTime; ++i)
86  {
87  arm_compute::Tensor splitter_out;
88  arm_compute::Tensor concat_in;
89 
90  auto splitterTensorInfo = inputInfo;
91  auto concatTensorInfo = outputInfo;
92  splitterTensorInfo.SetShape({batchSize, inputSize});
93  concatTensorInfo.SetShape({batchSize, outputSize});
94  BuildArmComputeTensor(splitter_out, splitterTensorInfo);
95  BuildArmComputeTensor(concat_in, concatTensorInfo);
96 
97  armcomputetensorutils::InitialiseArmComputeTensorEmpty(splitter_out);
98  armcomputetensorutils::InitialiseArmComputeTensorEmpty(concat_in);
99 
100  // append to std::vector<arm_compute::Tensor>
101  m_SplitterOutputsTensors.push_back(std::move(splitter_out));
102  m_ConcatInputsTensors.push_back(std::move(concat_in));
103  }
104 
105  for (unsigned int i = 0; i < maxTime; ++i)
106  {
107  // append to std::vector<arm_compute::ITensor*>
108  m_SplitterOutputs.push_back(&m_SplitterOutputsTensors[i]);
109  m_ConcatInputs.push_back(&m_ConcatInputsTensors[i]);
110  }
111 
112  //
113  // Split
114  //
115  unsigned int numberDimensions = 3;
116  unsigned int dimension = 0; // splitting on 0-dimension (i.e. maxTime dimension)
117 
118  if (maxTime != 1) // ACL split does not work with only one element to split.
119  {
120  ViewsDescriptor splitterDesc(maxTime, numberDimensions);
121  unsigned int splitterDimSizes[3] = {1, batchSize, inputSize};
122  for (unsigned int outputIdx = 0u; outputIdx < maxTime; ++outputIdx)
123  {
124  splitterDesc.SetViewOriginCoord(outputIdx, dimension, splitterDimSizes[dimension] * outputIdx);
125  for (unsigned int dimIdx = 0u; dimIdx < numberDimensions; ++dimIdx)
126  {
127  splitterDesc.SetViewSize(outputIdx, dimIdx, splitterDimSizes[dimIdx]);
128  }
129  }
130 
131  std::set<unsigned int> splitAxis = ComputeSplitAxis(splitterDesc, timeMajorShapeInput);
132 
133  std::unique_ptr<arm_compute::NESplit> split_layer(new arm_compute::NESplit());
134  unsigned int aclAxisSplit = CalcAclAxis(splitterDesc.GetNumDimensions(),
135  *splitAxis.begin());
136  if (!m_Data.m_Parameters.m_TimeMajor)
137  {
138  split_layer->configure(&m_PermuteFirstOut, m_SplitterOutputs, aclAxisSplit);
139  } else
140  {
141  split_layer->configure(&input, m_SplitterOutputs, aclAxisSplit);
142  }
143 
144  split_layer->prepare();
145  m_Splitter.reset(split_layer.release());
146  }
147 
148  //
149  // Lstm
150  //
151  arm_compute::LSTMParams<arm_compute::ITensor> lstm_param;
152 
153  lstm_param.set_cell_clip_params(descriptor.m_Parameters.m_ClippingThresCell);
154  lstm_param.set_projection_clip_params(descriptor.m_Parameters.m_ClippingThresProj);
155 
156  lstm_param.set_matmul_scale_params(descriptor.m_Parameters.m_InputIntermediateScale,
160 
161  lstm_param.set_hidden_state_params(descriptor.m_Parameters.m_HiddenStateZeroPoint,
162  descriptor.m_Parameters.m_HiddenStateScale);
163 
164  m_InputToForgetWeightsTensor = std::make_unique<arm_compute::Tensor>();
165  BuildArmComputeTensor(*m_InputToForgetWeightsTensor, m_Data.m_InputToForgetWeights->GetTensorInfo());
166 
167  m_InputToCellWeightsTensor = std::make_unique<arm_compute::Tensor>();
168  BuildArmComputeTensor(*m_InputToCellWeightsTensor, m_Data.m_InputToCellWeights->GetTensorInfo());
169 
170  m_InputToOutputWeightsTensor = std::make_unique<arm_compute::Tensor>();
171  BuildArmComputeTensor(*m_InputToOutputWeightsTensor, m_Data.m_InputToOutputWeights->GetTensorInfo());
172 
173  m_RecurrentToForgetWeightsTensor = std::make_unique<arm_compute::Tensor>();
174  BuildArmComputeTensor(*m_RecurrentToForgetWeightsTensor, m_Data.m_RecurrentToForgetWeights->GetTensorInfo());
175 
176  m_RecurrentToCellWeightsTensor = std::make_unique<arm_compute::Tensor>();
177  BuildArmComputeTensor(*m_RecurrentToCellWeightsTensor, m_Data.m_RecurrentToCellWeights->GetTensorInfo());
178 
179  m_RecurrentToOutputWeightsTensor = std::make_unique<arm_compute::Tensor>();
180  BuildArmComputeTensor(*m_RecurrentToOutputWeightsTensor, m_Data.m_RecurrentToOutputWeights->GetTensorInfo());
181 
182  m_ForgetGateBiasTensor = std::make_unique<arm_compute::Tensor>();
183  BuildArmComputeTensor(*m_ForgetGateBiasTensor, m_Data.m_ForgetGateBias->GetTensorInfo());
184 
185  m_CellBiasTensor = std::make_unique<arm_compute::Tensor>();
186  BuildArmComputeTensor(*m_CellBiasTensor, m_Data.m_CellBias->GetTensorInfo());
187 
188  m_OutputGateBiasTensor = std::make_unique<arm_compute::Tensor>();
189  BuildArmComputeTensor(*m_OutputGateBiasTensor, m_Data.m_OutputGateBias->GetTensorInfo());
190 
191  // for future reference: check the AndroidNN API for the logic here
192  if (!m_Data.m_Parameters.m_CifgEnabled)
193  {
194  m_InputToInputWeightsTensor = std::make_unique<arm_compute::Tensor>();
195  BuildArmComputeTensor(*m_InputToInputWeightsTensor, m_Data.m_InputToInputWeights->GetTensorInfo());
196 
197  m_RecurrentToInputWeightsTensor = std::make_unique<arm_compute::Tensor>();
198  BuildArmComputeTensor(*m_RecurrentToInputWeightsTensor, m_Data.m_RecurrentToInputWeights->GetTensorInfo());
199 
200  m_CellToInputWeightsTensor = std::make_unique<arm_compute::Tensor>();
201  if (m_Data.m_CellToInputWeights != nullptr)
202  {
203  BuildArmComputeTensor(*m_CellToInputWeightsTensor, m_Data.m_CellToInputWeights->GetTensorInfo());
204  }
205 
206  m_InputGateBiasTensor = std::make_unique<arm_compute::Tensor>();
207  BuildArmComputeTensor(*m_InputGateBiasTensor, m_Data.m_InputGateBias->GetTensorInfo());
208  lstm_param.set_cifg_params(m_InputToInputWeightsTensor.get(),
209  m_RecurrentToInputWeightsTensor.get(),
210  m_Data.m_CellToInputWeights ? m_CellToInputWeightsTensor.get() : nullptr,
211  m_InputGateBiasTensor.get());
212  }
213 
214  if (m_Data.m_Parameters.m_ProjectionEnabled)
215  {
216  m_ProjectionWeightsTensor = std::make_unique<arm_compute::Tensor>();
217  BuildArmComputeTensor(*m_ProjectionWeightsTensor, m_Data.m_ProjectionWeights->GetTensorInfo());
218 
219  m_ProjectionBiasTensor = std::make_unique<arm_compute::Tensor>();
220  if (m_Data.m_ProjectionBias != nullptr)
221  {
222  BuildArmComputeTensor(*m_ProjectionBiasTensor, m_Data.m_ProjectionBias->GetTensorInfo());
223  }
224 
225  lstm_param.set_projection_params(m_ProjectionWeightsTensor.get(),
226  m_Data.m_ProjectionBias ? m_ProjectionBiasTensor.get() : nullptr);
227  }
228 
229  if (m_Data.m_Parameters.m_PeepholeEnabled)
230  {
231  m_CellToForgetWeightsTensor = std::make_unique<arm_compute::Tensor>();
232  BuildArmComputeTensor(*m_CellToForgetWeightsTensor, m_Data.m_CellToForgetWeights->GetTensorInfo());
233 
234  m_CellToOutputWeightsTensor = std::make_unique<arm_compute::Tensor>();
235  BuildArmComputeTensor(*m_CellToOutputWeightsTensor, m_Data.m_CellToOutputWeights->GetTensorInfo());
236 
237  lstm_param.set_peephole_params(m_CellToForgetWeightsTensor.get(), m_CellToOutputWeightsTensor.get());
238  }
239 
240  if (m_Data.m_Parameters.m_LayerNormEnabled)
241  {
242  m_InputLayerNormWeightsTensor = std::make_unique<arm_compute::Tensor>();
243  if (!m_Data.m_Parameters.m_CifgEnabled)
244  {
245  BuildArmComputeTensor(*m_InputLayerNormWeightsTensor, m_Data.m_InputLayerNormWeights->GetTensorInfo());
246  }
247 
248  m_ForgetLayerNormWeightsTensor = std::make_unique<arm_compute::Tensor>();
249  BuildArmComputeTensor(*m_ForgetLayerNormWeightsTensor, m_Data.m_ForgetLayerNormWeights->GetTensorInfo());
250 
251  m_CellLayerNormWeightsTensor = std::make_unique<arm_compute::Tensor>();
252  BuildArmComputeTensor(*m_CellLayerNormWeightsTensor, m_Data.m_CellLayerNormWeights->GetTensorInfo());
253 
254  m_OutputLayerNormWeightsTensor = std::make_unique<arm_compute::Tensor>();
255  BuildArmComputeTensor(*m_OutputLayerNormWeightsTensor, m_Data.m_OutputLayerNormWeights->GetTensorInfo());
256 
257  auto inputNormWeightTensor = m_Data.m_Parameters.m_CifgEnabled ? nullptr : m_InputLayerNormWeightsTensor.get();
258  lstm_param.set_layer_normalization_params(inputNormWeightTensor,
259  m_ForgetLayerNormWeightsTensor.get(),
260  m_CellLayerNormWeightsTensor.get(),
261  m_OutputLayerNormWeightsTensor.get());
262  }
263 
264  for (unsigned int i = 0; i != maxTime; ++i)
265  {
266  // Set LSTM input and output ITensors depending on:
267  // input format (timeMajor) & number of LSTM batches (maxTime).
268  arm_compute::ITensor* outputLSTM;
269  arm_compute::ITensor* inputLSTM;
270 
271  // If there is only one LSTM time major batch, we will not concat OR permute.
272  // Set input of LSTM to be first input ITensor.
273  // Set output of LSTM to be final output ITensor.
274  // LSTM input/output cannot be > 2 dimensions so need to resize its TensorInfo.
275  if (maxTime == 1 && m_Data.m_Parameters.m_TimeMajor)
276  {
277  TensorShape inputShape = GetTensorShape(input.info()->tensor_shape(), 1U);
278  TensorShape outputShape = GetTensorShape(output.info()->tensor_shape(), 1U);
279 
280  TensorShape inputShapeShrink({inputShape[1], inputShape[2]});
281  TensorShape outputShapeShrink({outputShape[1], outputShape[2]});
282 
283  auto acl_input_shape_shrink = BuildArmComputeTensorShape(inputShapeShrink);
284  auto acl_output_shape_shrink = BuildArmComputeTensorShape(outputShapeShrink);
285 
286  input.info()->set_tensor_shape(acl_input_shape_shrink);
287  inputLSTM = const_cast<arm_compute::ITensor*>(&input);
288 
289  output.info()->set_tensor_shape(acl_output_shape_shrink);
290  outputLSTM = &output;
291  }
292  // If there is only one LSTM batch major batch, we will not concat, only permute.
293  // Set input of LSTM to be output of initial permute.
294  // Set output of LSTM to be first element of m_ConcatInputs & use that value later in permute.
295  // LSTM output cannot be > 2 dimensions so need to resize its TensorInfo.
296  else if (maxTime == 1 && !m_Data.m_Parameters.m_TimeMajor)
297  {
298  TensorShape inputShape = GetTensorShape(m_PermuteFirstOut.info()->tensor_shape(), 1U);
299  TensorShape inputShapeShrink({inputShape[1], inputShape[2]});
300  auto acl_input_shape_shrink = BuildArmComputeTensorShape(inputShapeShrink);
301  m_PermuteFirstOut.info()->set_tensor_shape(acl_input_shape_shrink);
302  inputLSTM = &m_PermuteFirstOut;
303 
304  outputLSTM = const_cast<arm_compute::ITensor*>(m_ConcatInputs[i]);
305  }
306  // Batch major AND/OR 2+ LSTM batches so will use concat AND/OR permute later on.
307  else
308  {
309  inputLSTM = m_SplitterOutputs[i];
310  outputLSTM = const_cast<arm_compute::ITensor*>(m_ConcatInputs[i]);
311  }
312 
313  std::unique_ptr<arm_compute::NEQLSTMLayer> lstm_layer(new arm_compute::NEQLSTMLayer());
314 
315  lstm_layer->configure(inputLSTM,
316  m_InputToForgetWeightsTensor.get(),
317  m_InputToCellWeightsTensor.get(),
318  m_InputToOutputWeightsTensor.get(),
319  m_RecurrentToForgetWeightsTensor.get(),
320  m_RecurrentToCellWeightsTensor.get(),
321  m_RecurrentToOutputWeightsTensor.get(),
322  m_ForgetGateBiasTensor.get(),
323  m_CellBiasTensor.get(),
324  m_OutputGateBiasTensor.get(),
325  &cellStateIn,
326  &outputStateIn,
327  &cellStateOut,
328  &outputStateOut,
329  outputLSTM,
330  lstm_param);
331 
332  m_Layers.emplace_back(std::move(lstm_layer));
333  }
334 
335  InitializeArmComputeTensorData(*m_InputToForgetWeightsTensor, m_Data.m_InputToForgetWeights);
336  InitializeArmComputeTensorData(*m_InputToCellWeightsTensor, m_Data.m_InputToCellWeights);
337  InitializeArmComputeTensorData(*m_InputToOutputWeightsTensor, m_Data.m_InputToOutputWeights);
338  InitializeArmComputeTensorData(*m_RecurrentToForgetWeightsTensor, m_Data.m_RecurrentToForgetWeights);
339  InitializeArmComputeTensorData(*m_RecurrentToCellWeightsTensor, m_Data.m_RecurrentToCellWeights);
340  InitializeArmComputeTensorData(*m_RecurrentToOutputWeightsTensor, m_Data.m_RecurrentToOutputWeights);
341  InitializeArmComputeTensorData(*m_ForgetGateBiasTensor, m_Data.m_ForgetGateBias);
342  InitializeArmComputeTensorData(*m_CellBiasTensor, m_Data.m_CellBias);
343  InitializeArmComputeTensorData(*m_OutputGateBiasTensor, m_Data.m_OutputGateBias);
344 
345  if (!m_Data.m_Parameters.m_CifgEnabled)
346  {
347  InitializeArmComputeTensorData(*m_InputToInputWeightsTensor, m_Data.m_InputToInputWeights);
348  InitializeArmComputeTensorData(*m_RecurrentToInputWeightsTensor, m_Data.m_RecurrentToInputWeights);
349  if (m_Data.m_CellToInputWeights != nullptr)
350  {
351  InitializeArmComputeTensorData(*m_CellToInputWeightsTensor, m_Data.m_CellToInputWeights);
352  }
353  InitializeArmComputeTensorData(*m_InputGateBiasTensor, m_Data.m_InputGateBias);
354  }
355 
356  if (m_Data.m_Parameters.m_ProjectionEnabled)
357  {
358  InitializeArmComputeTensorData(*m_ProjectionWeightsTensor, m_Data.m_ProjectionWeights);
359  if (m_Data.m_ProjectionBias != nullptr)
360  {
361  InitializeArmComputeTensorData(*m_ProjectionBiasTensor, m_Data.m_ProjectionBias);
362  }
363  }
364 
365  if (m_Data.m_Parameters.m_PeepholeEnabled)
366  {
367  InitializeArmComputeTensorData(*m_CellToForgetWeightsTensor, m_Data.m_CellToForgetWeights);
368  InitializeArmComputeTensorData(*m_CellToOutputWeightsTensor, m_Data.m_CellToOutputWeights);
369  }
370 
371  if (m_Data.m_Parameters.m_LayerNormEnabled)
372  {
373  if (!m_Data.m_Parameters.m_CifgEnabled)
374  {
375  InitializeArmComputeTensorData(*m_InputLayerNormWeightsTensor, m_Data.m_InputLayerNormWeights);
376  }
377  InitializeArmComputeTensorData(*m_ForgetLayerNormWeightsTensor, m_Data.m_ForgetLayerNormWeights);
378  InitializeArmComputeTensorData(*m_CellLayerNormWeightsTensor, m_Data.m_CellLayerNormWeights);
379  InitializeArmComputeTensorData(*m_OutputLayerNormWeightsTensor, m_Data.m_OutputLayerNormWeights);
380  }
381 
382  // Force Compute Library to perform the necessary copying and reshaping.
383  // After which delete all the input tensors that will no longer be needed.
384  for (uint32_t i = 0; i < m_Layers.size(); ++i)
385  {
386  m_Layers[i]->prepare();
387  }
388 
389  //
390  // Concat
391  //
392 
393  // Expand dimensions of LSTM outputs adding one empty dimension to fit concatenate inputs.
394  TensorShape shape = GetTensorShape(m_ConcatInputs[0]->info()->tensor_shape(), 1U);
395  TensorShape shapeExpandTimeMajor({1, shape[0], shape[1]});
396  TensorShape shapeExpandBatchMajor({shape[0], 1, shape[1]});
397 
398  if (maxTime != 1) // ACL concat does not work with only one element to concatenate.
399  {
400  for (unsigned int i = 0; i < maxTime; ++i)
401  {
402  m_ConcatInputs[i]->info()->set_tensor_shape(BuildArmComputeTensorShape(shapeExpandTimeMajor));
403  }
404  ConcatDescriptor concatDescriptor(maxTime, numberDimensions); // maxTime = num inputs (aka. number of views).
405 
406  for (unsigned int inputIdx = 0u; inputIdx < maxTime; ++inputIdx)
407  {
408  concatDescriptor.SetViewOriginCoord(inputIdx, dimension, inputIdx);
409  concatDescriptor.SetConcatAxis(dimension);
410  }
411  m_Concat.reset(new arm_compute::NEConcatenateLayer());
412 
413  unsigned int aclAxisConcat = CalcAclAxis(concatDescriptor.GetNumDimensions(), concatDescriptor.GetConcatAxis());
414  if (!m_Data.m_Parameters.m_TimeMajor)
415  {
416  TensorInfo concatOutputTensorInfo = outputInfo;
417  concatOutputTensorInfo.SetShape(timeMajorShapeOutput);
418  BuildArmComputeTensor(concat_out, concatOutputTensorInfo);
419  armcomputetensorutils::InitialiseArmComputeTensorEmpty(concat_out);
420 
421  m_Concat->configure(m_ConcatInputs, &concat_out, aclAxisConcat);
422  }
423  else
424  {
425  m_Concat->configure(m_ConcatInputs, &output, aclAxisConcat);
426  }
427 
428  m_Concat->prepare();
429  }
430  // If only one LSTM batch, we do not concat and/or permute.
431  // Must ensure final output info is expanded to correct batch major dimensions.
432  else
433  {
434  if (!m_Data.m_Parameters.m_TimeMajor)
435  {
436  output.info()->set_tensor_shape(BuildArmComputeTensorShape(shapeExpandBatchMajor));
437  }
438  else
439  {
440  output.info()->set_tensor_shape(BuildArmComputeTensorShape(shapeExpandTimeMajor));
441  }
442  }
443 
444  //
445  // Permute: only done if input/output are in batch major format.
446  //
447  if (!m_Data.m_Parameters.m_TimeMajor)
448  {
449  // Output now time major. Permute output back to batch major.
450  std::unique_ptr<arm_compute::NEPermute> layer(new arm_compute::NEPermute());
451  if (maxTime != 1)
452  {
453  layer->configure(&concat_out, &output, arm_compute::PermutationVector(0U, 2U, 1U));
454  }
455  else
456  {
457  layer->configure(m_ConcatInputs[0], &output, arm_compute::PermutationVector(0U, 2U, 1U));
458  }
459  m_Permute2.reset(layer.release());
460  }
461 
462  FreeUnusedTensors();
463 }
464 
466 {
467  ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonUnidirectionalSequenceLstmWorkload_Execute");
468  if (m_Permute1)
469  {
470  m_Permute1->run();
471  }
472  if (m_Splitter)
473  {
474  m_Splitter->run();
475  }
476  for (uint32_t i = 0; i < m_Layers.size(); ++i)
477  {
478  m_Layers[i]->run();
479  }
480  if (m_Concat)
481  {
482  m_Concat->run();
483  }
484  if (m_Permute2)
485  {
486  m_Permute2->run();
487  }
488 }
489 
492  const TensorInfo& outputStateIn,
493  const TensorInfo& cellStateIn,
494  const TensorInfo& outputStateOut,
495  const TensorInfo& cellStateOut,
496  const TensorInfo& output,
497  const UnidirectionalSequenceLstmDescriptor& descriptor,
498  const LstmInputParamsInfo& paramsInfo)
499 {
500  TensorShape inputLayerShape = input.GetShape();
501  TensorShape outputLayerShape = output.GetShape();
502 
503  if (inputLayerShape.GetNumDimensions() != 3)
504  {
505  return arm_compute::Status(arm_compute::ErrorCode::RUNTIME_ERROR,
506  "Unidirectional Sequence LSTM layer validate status failed.");
507  }
508 
509  unsigned int maxTime = descriptor.m_TimeMajor ? inputLayerShape[0] : inputLayerShape[1];
510  unsigned int batchSize = descriptor.m_TimeMajor ? inputLayerShape[1] : inputLayerShape[0];
511  unsigned int inputSize = inputLayerShape[2];
512  unsigned int outputSize = outputLayerShape[2];
513 
514  const TensorShape timeMajorShapeInput({maxTime, batchSize, inputSize});
515  const TensorShape timeMajorShapeOutput({maxTime, batchSize, outputSize});
516 
517  arm_compute::Status statusPermute1 = arm_compute::Status(arm_compute::ErrorCode::OK,
518  "Permute1 status");
519  arm_compute::Status statusSplit = arm_compute::Status(arm_compute::ErrorCode::OK,
520  "Split status");
521  arm_compute::Status statusLSTM = arm_compute::Status(arm_compute::ErrorCode::OK,
522  "LSTM status");
523  arm_compute::Status statusConcat = arm_compute::Status(arm_compute::ErrorCode::OK,
524  "Concat status");
525  arm_compute::Status statusPermute2 = arm_compute::Status(arm_compute::ErrorCode::OK,
526  "Permute2 status");
527 
528  const arm_compute::TensorInfo aclInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input);
529  const arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
530 
531  //
532  // Permute validate
533  //
534  TensorInfo permuteOutInfo = armnnUtils::Permuted(input, { 1U, 0U, 2U });
535  arm_compute::TensorInfo aclPermuteOutInfo = armcomputetensorutils::BuildArmComputeTensorInfo(permuteOutInfo);
536  if (!descriptor.m_TimeMajor)
537  {
538  statusPermute1 = arm_compute::NEPermute::validate(&aclInputInfo,
539  &aclPermuteOutInfo,
540  arm_compute::PermutationVector(0U, 2U, 1U));
541  }
542 
543  //
544  // Split and Concat Tensors validate
545  //
546  std::vector<arm_compute::TensorInfo> splitterOutputsTensorInfos;
547  std::vector<arm_compute::TensorInfo> concatInputsTensorInfos;
548  std::vector<arm_compute::ITensorInfo*> splitterOutputsTensorInfosPtr;
549  std::vector<const arm_compute::ITensorInfo*> concatInputsTensorInfosPtr;
550  splitterOutputsTensorInfos.reserve(maxTime);
551  concatInputsTensorInfos.reserve(maxTime);
552  for (unsigned int i = 0; i < maxTime; ++i)
553  {
554  arm_compute::TensorInfo splitter_out;
555  arm_compute::TensorInfo concat_in;
556 
557  auto splitterTensorInfo = TensorInfo(input);
558  auto concatTensorInfo = TensorInfo(output);
559  splitterTensorInfo.SetShape({batchSize, inputSize});
560  concatTensorInfo.SetShape({batchSize, outputSize});
561 
562  arm_compute::TensorInfo aclSplitterTensorInfo
563  = armcomputetensorutils::BuildArmComputeTensorInfo(splitterTensorInfo);
564  arm_compute::TensorInfo aclConcatTensorInfo
565  = armcomputetensorutils::BuildArmComputeTensorInfo(concatTensorInfo);
566 
567  splitterOutputsTensorInfos.emplace_back(aclSplitterTensorInfo);
568  concatInputsTensorInfos.emplace_back(aclConcatTensorInfo);
569  splitterOutputsTensorInfosPtr.emplace_back(&splitterOutputsTensorInfos[i]);
570  concatInputsTensorInfosPtr.emplace_back(&concatInputsTensorInfos[i]);
571  }
572 
573  //
574  // Split validate
575  //
576  unsigned int numberDimensions = 3;
577  unsigned int dimension = 0; // splitting on 0-dimension (i.e. maxTime dimension)
578  unsigned int aclAxisSplit = CalcAclAxis(numberDimensions, dimension);
579 
580  if (maxTime != 1) // ACL split does not work with only one element to split.
581  {
582  if (!descriptor.m_TimeMajor)
583  {
584  statusSplit = arm_compute::NESplit::validate(&aclPermuteOutInfo,
585  splitterOutputsTensorInfosPtr,
586  aclAxisSplit);
587  } else
588  {
589  statusSplit = arm_compute::NESplit::validate(&aclInputInfo, splitterOutputsTensorInfosPtr, aclAxisSplit);
590  }
591  }
592 
593  //
594  // LSTM validate
595  //
596 
597  arm_compute::LSTMParams<arm_compute::ITensorInfo> lstm_params_info;
598 
599  unsigned int numUnits = cellStateIn.GetShape()[1];
600  unsigned int scratchBufferFactor = 4;
601 
602  if (descriptor.m_CifgEnabled)
603  {
604  // scratchBuffer = { batchSize, numUnits * 3 } with CIFG
605  scratchBufferFactor = 3;
606  }
607 
608  const TensorInfo& scratchBuffer = TensorInfo({ batchSize, numUnits * scratchBufferFactor }, input.GetDataType());
609 
610 
611  lstm_params_info.set_cell_clip_params(descriptor.m_ClippingThresCell);
612  lstm_params_info.set_projection_clip_params(descriptor.m_ClippingThresProj);
613  // The inputs and outputs
614  const arm_compute::TensorInfo aclOutputStateInInfo = BuildArmComputeTensorInfo(outputStateIn);
615  const arm_compute::TensorInfo aclCellStateInInfo = BuildArmComputeTensorInfo(cellStateIn);
616  const arm_compute::TensorInfo aclScratchBufferInfo = BuildArmComputeTensorInfo(scratchBuffer);
617  const arm_compute::TensorInfo aclOutputStateOutInfo = BuildArmComputeTensorInfo(outputStateOut);
618  const arm_compute::TensorInfo aclCellStateOutInfo = BuildArmComputeTensorInfo(cellStateOut);
619 
620  // Basic parameters
621  const arm_compute::TensorInfo aclInputToForgetWeightsInfo
622  = BuildArmComputeTensorInfo(paramsInfo.GetInputToForgetWeights());
623  const arm_compute::TensorInfo aclInputToCellWeightsInfo
624  = BuildArmComputeTensorInfo(paramsInfo.GetInputToCellWeights());
625  const arm_compute::TensorInfo aclInputToOutputWeightsInfo
626  = BuildArmComputeTensorInfo(paramsInfo.GetInputToOutputWeights());
627  const arm_compute::TensorInfo aclRecurrentToForgetWeightsInfo
628  = BuildArmComputeTensorInfo(paramsInfo.GetRecurrentToForgetWeights());
629  const arm_compute::TensorInfo aclRecurrentToCellWeightsInfo
630  = BuildArmComputeTensorInfo(paramsInfo.GetRecurrentToCellWeights());
631  const arm_compute::TensorInfo aclRecurrentToOutputWeightsInfo
632  = BuildArmComputeTensorInfo(paramsInfo.GetRecurrentToOutputWeights());
633  const arm_compute::TensorInfo aclForgetGateBiasInfo
634  = BuildArmComputeTensorInfo(paramsInfo.GetForgetGateBias());
635  const arm_compute::TensorInfo aclCellBiasInfo
636  = BuildArmComputeTensorInfo(paramsInfo.GetCellBias());
637  const arm_compute::TensorInfo aclOutputGateBiasInfo
638  = BuildArmComputeTensorInfo(paramsInfo.GetOutputGateBias());
639 
640  arm_compute::TensorInfo aclInputToInputWeightsInfo;
641  arm_compute::TensorInfo aclRecurrentToInputWeightsInfo;
642  arm_compute::TensorInfo aclCellToInputWeightsInfo;
643  arm_compute::TensorInfo aclInputGateBiasInfo;
644  arm_compute::TensorInfo aclProjectionWeightsInfo;
645  arm_compute::TensorInfo aclProjectionBiasInfo;
646  arm_compute::TensorInfo aclCellToForgetWeightsInfo;
647  arm_compute::TensorInfo aclCellToOutputWeightsInfo;
648 
649  arm_compute::TensorInfo aclInputLayerNormWeightsInfo;
650  arm_compute::TensorInfo aclForgetLayerNormWeightsInfo;
651  arm_compute::TensorInfo aclCellLayerNormWeightsInfo;
652  arm_compute::TensorInfo aclOutputLayerNormWeightsInfo;
653 
654  if (!descriptor.m_CifgEnabled)
655  {
656  if (descriptor.m_PeepholeEnabled)
657  {
658  aclCellToInputWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetCellToInputWeights());
659  }
660  aclInputToInputWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetInputToInputWeights());
661  aclRecurrentToInputWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetRecurrentToInputWeights());
662  aclInputGateBiasInfo = BuildArmComputeTensorInfo(paramsInfo.GetInputGateBias());
663 
664  lstm_params_info.set_cifg_params(&aclInputToInputWeightsInfo,
665  &aclRecurrentToInputWeightsInfo,
666  descriptor.m_PeepholeEnabled ? &aclCellToInputWeightsInfo : nullptr,
667  &aclInputGateBiasInfo);
668  }
669 
670  if (descriptor.m_ProjectionEnabled)
671  {
672  if (paramsInfo.m_ProjectionBias != nullptr)
673  {
674  aclProjectionBiasInfo = BuildArmComputeTensorInfo(paramsInfo.GetProjectionBias());
675  }
676  aclProjectionWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetProjectionWeights());
677 
678  lstm_params_info.set_projection_params(&aclProjectionWeightsInfo,
679  paramsInfo.m_ProjectionBias ? &aclProjectionBiasInfo : nullptr);
680  }
681 
682  if (descriptor.m_PeepholeEnabled)
683  {
684  aclCellToForgetWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetCellToForgetWeights());
685  aclCellToOutputWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetCellToOutputWeights());
686 
687  lstm_params_info.set_peephole_params(&aclCellToForgetWeightsInfo, &aclCellToOutputWeightsInfo);
688  }
689 
690  if (descriptor.m_LayerNormEnabled)
691  {
692  if (!descriptor.m_CifgEnabled)
693  {
694  aclInputLayerNormWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetInputLayerNormWeights());
695  }
696  aclForgetLayerNormWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetForgetLayerNormWeights());
697  aclCellLayerNormWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetCellLayerNormWeights());
698  aclOutputLayerNormWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetOutputLayerNormWeights());
699 
700  lstm_params_info.set_layer_normalization_params(descriptor.m_CifgEnabled ? nullptr :
701  &aclInputLayerNormWeightsInfo,
702  &aclForgetLayerNormWeightsInfo,
703  &aclCellLayerNormWeightsInfo,
704  &aclOutputLayerNormWeightsInfo);
705  }
706 
707  lstm_params_info.set_matmul_scale_params(descriptor.m_InputIntermediateScale,
708  descriptor.m_ForgetIntermediateScale,
709  descriptor.m_CellIntermediateScale,
710  descriptor.m_OutputIntermediateScale);
711 
712  lstm_params_info.set_hidden_state_params(descriptor.m_HiddenStateZeroPoint, descriptor.m_HiddenStateScale);
713 
714  for (unsigned int i = 0; i != maxTime; ++i)
715  {
716 
717  // Set LSTM input and output ITensors depending on:
718  // input format (timeMajor) & number of LSTM batches (maxTime).
719  arm_compute::ITensorInfo* outputLSTM;
720  arm_compute::ITensorInfo* inputLSTM;
721 
722  // If there is only one LSTM time major batch, we will not concat OR permute.
723  // Set input of LSTM to be first input ITensor.
724  // Set output of LSTM to be final output ITensor.
725  // LSTM input/output cannot be > 2 dimensions so need to resize its TensorInfo.
726  if (maxTime == 1 && descriptor.m_TimeMajor)
727  {
728  TensorShape inputShape = GetTensorShape(aclInputInfo.tensor_shape(), 1U);
729  TensorShape outputShape = GetTensorShape(aclOutputInfo.tensor_shape(), 1U);
730 
731  TensorShape inputShapeShrink({inputShape[1], inputShape[2]});
732  TensorShape outputShapeShrink({outputShape[1], outputShape[2]});
733 
734  auto acl_input_shape_shrink = BuildArmComputeTensorShape(inputShapeShrink);
735  auto acl_output_shape_shrink = BuildArmComputeTensorShape(outputShapeShrink);
736 
737  const_cast<arm_compute::TensorInfo*>(&aclInputInfo)->set_tensor_shape(acl_input_shape_shrink);
738  inputLSTM = const_cast<arm_compute::TensorInfo*>(&aclInputInfo);
739 
740  const_cast<arm_compute::TensorInfo*>(&aclOutputInfo)->set_tensor_shape(acl_output_shape_shrink);
741  outputLSTM = const_cast<arm_compute::TensorInfo*>(&aclOutputInfo);
742  }
743  // If there is only one LSTM batch major batch, we will not concat, only permute.
744  // Set input of LSTM to be output of initial permute.
745  // Set output of LSTM to be first element of m_ConcatInputs & use that value later in permute.
746  // LSTM output cannot be > 2 dimensions so need to resize its TensorInfo.
747  else if (maxTime == 1 && !descriptor.m_TimeMajor)
748  {
749  TensorShape inputShape = GetTensorShape(aclPermuteOutInfo.tensor_shape(), 1U);
750  TensorShape inputShapeShrink({inputShape[1], inputShape[2]});
751  auto acl_input_shape_shrink = BuildArmComputeTensorShape(inputShapeShrink);
752  aclPermuteOutInfo.set_tensor_shape(acl_input_shape_shrink);
753  inputLSTM = &aclPermuteOutInfo;
754 
755  outputLSTM = const_cast<arm_compute::ITensorInfo*>(concatInputsTensorInfosPtr[i]);
756  }
757  // Batch major AND/OR 2+ LSTM batches so will use concat AND/OR permute later on.
758  else
759  {
760  inputLSTM = splitterOutputsTensorInfosPtr[i];
761  outputLSTM = const_cast<arm_compute::ITensorInfo*>(concatInputsTensorInfosPtr[i]);
762  }
763 
764  statusLSTM = arm_compute::NEQLSTMLayer::validate(inputLSTM,
765  &aclInputToForgetWeightsInfo,
766  &aclInputToCellWeightsInfo,
767  &aclInputToOutputWeightsInfo,
768  &aclRecurrentToForgetWeightsInfo,
769  &aclRecurrentToCellWeightsInfo,
770  &aclRecurrentToOutputWeightsInfo,
771  &aclForgetGateBiasInfo,
772  &aclCellBiasInfo,
773  &aclOutputGateBiasInfo,
774  &aclCellStateInInfo,
775  &aclOutputStateInInfo,
776  &aclCellStateOutInfo,
777  &aclOutputStateOutInfo,
778  outputLSTM,
779  lstm_params_info);
780  }
781 
782  //
783  // Concat validate
784  //
785 
786  // Expand dimensions of LSTM outputs adding one empty dimension to fit concatenate inputs.
787  TensorShape shape = GetTensorShape(concatInputsTensorInfosPtr[0]->tensor_shape(), 1U);
788  TensorShape shapeExpandTimeMajor({1, shape[0], shape[1]});
789  TensorShape shapeExpandBatchMajor({shape[0], 1, shape[1]});
790 
791  TensorInfo concatOutputTensorInfo = TensorInfo(output);
792  concatOutputTensorInfo.SetShape(timeMajorShapeOutput);
793  arm_compute::TensorInfo aclConcatOutputTensorInfo= BuildArmComputeTensorInfo(concatOutputTensorInfo);
794 
795  if (maxTime != 1) // ACL concat does not work with only one element to concatenate.
796  {
797  for (unsigned int i = 0; i < maxTime; ++i)
798  {
799  auto acl_shape_expand = BuildArmComputeTensorShape(shapeExpandTimeMajor);
800  concatInputsTensorInfos[i].set_tensor_shape(acl_shape_expand);
801  }
802 
803  unsigned int aclAxisConcat = CalcAclAxis(numberDimensions, dimension);
804  if (!descriptor.m_TimeMajor)
805  {
806  statusConcat = arm_compute::NEConcatenateLayer::validate(concatInputsTensorInfosPtr,
807  &aclConcatOutputTensorInfo,
808  aclAxisConcat);
809  }
810  else
811  {
812  statusConcat = arm_compute::NEConcatenateLayer::validate(concatInputsTensorInfosPtr,
813  &aclOutputInfo,
814  aclAxisConcat);
815  }
816  }
817  // If only one LSTM batch, we do not concat and/or permute.
818  // Must ensure final output info is expanded to correct batch major dimensions.
819  else
820  {
821  if (!descriptor.m_TimeMajor)
822  {
823  const_cast<arm_compute::TensorInfo*>(&aclInputInfo)->set_tensor_shape(
824  BuildArmComputeTensorShape(shapeExpandBatchMajor));
825  }
826  else
827  {
828  const_cast<arm_compute::TensorInfo*>(&aclInputInfo)->set_tensor_shape(
829  BuildArmComputeTensorShape(shapeExpandTimeMajor));
830  }
831  }
832 
833  //
834  // Permute validate
835  //
836  if (!descriptor.m_TimeMajor)
837  {
838  // Output now time major. Permute output back to batch major.
839  if (maxTime != 1)
840  {
841  statusPermute2 = arm_compute::NEPermute::validate(&aclConcatOutputTensorInfo,
842  &aclOutputInfo,
843  arm_compute::PermutationVector(0U, 2U, 1U));
844  }
845  else
846  {
847  statusPermute2 = arm_compute::NEPermute::validate(concatInputsTensorInfosPtr[0],
848  &aclOutputInfo,
849  arm_compute::PermutationVector(0U, 2U, 1U));
850  }
851  }
852 
853  auto okCode = arm_compute::ErrorCode::OK;
854  if (statusPermute1.error_code() == okCode &&
855  statusSplit.error_code() == okCode &&
856  statusLSTM .error_code() == okCode &&
857  statusConcat.error_code() == okCode &&
858  statusPermute2.error_code() == okCode)
859  {
860  return arm_compute::Status(arm_compute::ErrorCode::OK,
861  "All Unidirectional Sequence LSTM layer validate status OK.");
862  }
863  else
864  {
865  return arm_compute::Status(arm_compute::ErrorCode::RUNTIME_ERROR,
866  "Unidirectional Sequence LSTM layer validate status failed.");
867  }
868 }
869 
870 void NeonUnidirectionalSequenceLstmWorkload::FreeUnusedTensors()
871 {
872  FreeTensorIfUnused(m_InputToInputWeightsTensor);
873  FreeTensorIfUnused(m_InputToForgetWeightsTensor);
874  FreeTensorIfUnused(m_InputToCellWeightsTensor);
875  FreeTensorIfUnused(m_InputToOutputWeightsTensor);
876  FreeTensorIfUnused(m_RecurrentToInputWeightsTensor);
877  FreeTensorIfUnused(m_RecurrentToForgetWeightsTensor);
878  FreeTensorIfUnused(m_RecurrentToCellWeightsTensor);
879  FreeTensorIfUnused(m_RecurrentToOutputWeightsTensor);
880  FreeTensorIfUnused(m_CellToInputWeightsTensor);
881  FreeTensorIfUnused(m_CellToForgetWeightsTensor);
882  FreeTensorIfUnused(m_CellToOutputWeightsTensor);
883  FreeTensorIfUnused(m_InputGateBiasTensor);
884  FreeTensorIfUnused(m_ForgetGateBiasTensor);
885  FreeTensorIfUnused(m_CellBiasTensor);
886  FreeTensorIfUnused(m_OutputGateBiasTensor);
887  FreeTensorIfUnused(m_ProjectionWeightsTensor);
888  FreeTensorIfUnused(m_ProjectionBiasTensor);
889  FreeTensorIfUnused(m_InputLayerNormWeightsTensor);
890  FreeTensorIfUnused(m_ForgetLayerNormWeightsTensor);
891  FreeTensorIfUnused(m_CellLayerNormWeightsTensor);
892  FreeTensorIfUnused(m_OutputLayerNormWeightsTensor);
893 }
894 
895 } //namespace armnn
armnn::OriginsDescriptor::GetConcatAxis
unsigned int GetConcatAxis() const
Get the concatenation axis value.
Definition: Descriptors.cpp:162
armnn::ViewsDescriptor
A ViewsDescriptor for the SplitterLayer.
Definition: Descriptors.hpp:244
armnn::LstmInputParamsInfo::GetCellBias
const TensorInfo & GetCellBias() const
Definition: LstmParams.hpp:173
armnn::LstmDescriptor::m_TimeMajor
bool m_TimeMajor
Enable/disable time major.
Definition: Descriptors.hpp:1154
armnn::LstmInputParamsInfo::GetInputToCellWeights
const TensorInfo & GetInputToCellWeights() const
Definition: LstmParams.hpp:129
WorkloadUtils.hpp
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::OriginsDescriptor::GetNumDimensions
uint32_t GetNumDimensions() const
Get the number of dimensions.
Definition: Descriptors.cpp:192
NeonTensorHandle.hpp
armnn::LstmInputParamsInfo::GetProjectionBias
const TensorInfo & GetProjectionBias() const
Definition: LstmParams.hpp:185
armnn::LstmInputParamsInfo::GetInputGateBias
const TensorInfo & GetInputGateBias() const
Definition: LstmParams.hpp:165
armnn::LstmDescriptor::m_InputIntermediateScale
float m_InputIntermediateScale
Input intermediate quantization scale.
Definition: Descriptors.hpp:1156
armnn::LstmInputParamsInfo::GetRecurrentToInputWeights
const TensorInfo & GetRecurrentToInputWeights() const
Definition: LstmParams.hpp:137
armnn::LstmInputParamsInfo::GetRecurrentToForgetWeights
const TensorInfo & GetRecurrentToForgetWeights() const
Definition: LstmParams.hpp:141
armnnUtils::Permuted
armnn::TensorShape Permuted(const armnn::TensorShape &srcShape, const armnn::PermutationVector &mappings)
Definition: Permute.cpp:125
armnn::ViewsDescriptor::SetViewSize
Status SetViewSize(uint32_t view, uint32_t coord, uint32_t value)
Set the size of the views.
Definition: Descriptors.cpp:322
armnn::LstmInputParamsInfo::GetRecurrentToCellWeights
const TensorInfo & GetRecurrentToCellWeights() const
Definition: LstmParams.hpp:145
NumericCast.hpp
armnn::NeonUnidirectionalSequenceLstmWorkload::Execute
virtual void Execute() const override
Definition: NeonUnidirectionalSequenceLstmWorkload.cpp:465
armnn::InitializeArmComputeTensorData
void InitializeArmComputeTensorData(arm_compute::Tensor &tensor, TensorInfo tensorInfo, const ITensorHandle *handle)
Definition: NeonWorkloadUtils.hpp:69
armnn::ViewsDescriptor::SetViewOriginCoord
Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value)
@Brief Set the view origin coordinates.
Definition: Descriptors.cpp:317
armnn::LstmInputParamsInfo::GetInputLayerNormWeights
const TensorInfo & GetInputLayerNormWeights() const
Definition: LstmParams.hpp:189
armnn::LstmDescriptor::m_PeepholeEnabled
bool m_PeepholeEnabled
Enable/disable peephole.
Definition: Descriptors.hpp:1148
armnn::TensorShape
Definition: Tensor.hpp:20
armnn::LstmDescriptor::m_ClippingThresProj
float m_ClippingThresProj
Clipping threshold value for the projection.
Definition: Descriptors.hpp:1144
armnn::IAclTensorHandle
Definition: ArmComputeTensorHandle.hpp:16
armnn::QueueDescriptorWithParameters::m_Parameters
LayerDescriptor m_Parameters
Definition: WorkloadData.hpp:66
armnn::LstmInputParamsInfo::GetCellToInputWeights
const TensorInfo & GetCellToInputWeights() const
Definition: LstmParams.hpp:153
armnn::TensorShape::GetNumDimensions
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
armnn::LstmInputParamsInfo::GetRecurrentToOutputWeights
const TensorInfo & GetRecurrentToOutputWeights() const
Definition: LstmParams.hpp:149
armnn::LstmInputParamsInfo::GetInputToInputWeights
const TensorInfo & GetInputToInputWeights() const
Definition: LstmParams.hpp:121
armnn::WorkloadInfo
Contains information about TensorInfos of a layer.
Definition: WorkloadInfo.hpp:16
armnn::LstmInputParamsInfo::GetForgetGateBias
const TensorInfo & GetForgetGateBias() const
Definition: LstmParams.hpp:169
armnn::LstmDescriptor::m_HiddenStateScale
float m_HiddenStateScale
Hidden State quantization scale.
Definition: Descriptors.hpp:1166
armnn::LstmInputParamsInfo::GetCellToForgetWeights
const TensorInfo & GetCellToForgetWeights() const
Definition: LstmParams.hpp:157
armnn::LstmDescriptor::m_HiddenStateZeroPoint
int32_t m_HiddenStateZeroPoint
Hidden State zero point.
Definition: Descriptors.hpp:1164
ArmComputeUtils.hpp
Permute.hpp
armnn::BoostLogSeverityMapping::info
@ info
armnn::OriginsDescriptor::SetConcatAxis
void SetConcatAxis(unsigned int concatAxis)
Set the concatenation axis value.
Definition: Descriptors.cpp:158
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:200
armnn::LstmDescriptor::m_OutputIntermediateScale
float m_OutputIntermediateScale
Output intermediate quantization scale.
Definition: Descriptors.hpp:1162
ARMNN_REPORT_PROFILING_WORKLOAD_DESC
#define ARMNN_REPORT_PROFILING_WORKLOAD_DESC(name, desc, infos, guid)
Definition: Profiling.hpp:227
armnn::NeonUnidirectionalSequenceLstmWorkload::NeonUnidirectionalSequenceLstmWorkload
NeonUnidirectionalSequenceLstmWorkload(const UnidirectionalSequenceLstmQueueDescriptor &descriptor, const WorkloadInfo &info)
Definition: NeonUnidirectionalSequenceLstmWorkload.cpp:33
armnn::LstmDescriptor
An LstmDescriptor for the LstmLayer.
Definition: Descriptors.hpp:1102
armnn::Status
Status
Definition: Types.hpp:42
armnn::LstmInputParamsInfo::GetInputToOutputWeights
const TensorInfo & GetInputToOutputWeights() const
Definition: LstmParams.hpp:133
armnn::LstmDescriptor::m_CifgEnabled
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
Definition: Descriptors.hpp:1146
armnn::LstmDescriptor::m_ForgetIntermediateScale
float m_ForgetIntermediateScale
Forget intermediate quantization scale.
Definition: Descriptors.hpp:1158
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:193
armnn::LstmInputParamsInfo::GetOutputGateBias
const TensorInfo & GetOutputGateBias() const
Definition: LstmParams.hpp:177
armnn::NeonUnidirectionalSequenceLstmWorkloadValidate
arm_compute::Status NeonUnidirectionalSequenceLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: NeonUnidirectionalSequenceLstmWorkload.cpp:491
armnn::LstmDescriptor::m_LayerNormEnabled
bool m_LayerNormEnabled
Enable/disable layer normalization.
Definition: Descriptors.hpp:1152
armnn::LstmInputParamsInfo::GetCellToOutputWeights
const TensorInfo & GetCellToOutputWeights() const
Definition: LstmParams.hpp:161
armnn::ViewsDescriptor::GetNumDimensions
uint32_t GetNumDimensions() const
Get the number of dimensions.
Definition: Descriptors.cpp:307
NeonWorkloadUtils.hpp
armnn::OriginsDescriptor
An OriginsDescriptor for the ConcatLayer.
Definition: Descriptors.hpp:201
armnn::LstmInputParamsInfo::GetOutputLayerNormWeights
const TensorInfo & GetOutputLayerNormWeights() const
Definition: LstmParams.hpp:201
armnn::TensorInfo::SetShape
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:195
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::OriginsDescriptor::SetViewOriginCoord
Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value)
@Brief Set the view origin coordinates.
Definition: Descriptors.cpp:167
armnn::ComputeSplitAxis
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
Calculates the axis values for split operation.
Definition: WorkloadUtils.cpp:377
armnn::LstmInputParamsInfo
Definition: LstmParams.hpp:63
ArmComputeTensorUtils.hpp
armnn::LstmDescriptor::m_ProjectionEnabled
bool m_ProjectionEnabled
Enable/disable the projection layer.
Definition: Descriptors.hpp:1150
armnn::UnidirectionalSequenceLstmQueueDescriptor
Definition: WorkloadData.hpp:696
armnn::LstmInputParamsInfo::GetProjectionWeights
const TensorInfo & GetProjectionWeights() const
Definition: LstmParams.hpp:181
NeonUnidirectionalSequenceLstmWorkload.hpp
ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID
#define ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
Definition: NeonWorkloadUtils.hpp:33
armnn::LstmDescriptor::m_ClippingThresCell
float m_ClippingThresCell
Clipping threshold value for the cell state.
Definition: Descriptors.hpp:1142
armnn::LstmInputParamsInfo::m_ProjectionBias
const TensorInfo * m_ProjectionBias
Definition: LstmParams.hpp:105
armnnUtils::GetTensorShape
armnn::TensorShape GetTensorShape(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout)
Definition: TensorUtils.cpp:21
armnn::LstmInputParamsInfo::GetForgetLayerNormWeights
const TensorInfo & GetForgetLayerNormWeights() const
Definition: LstmParams.hpp:193
armnn::NeonBaseWorkload< UnidirectionalSequenceLstmQueueDescriptor >
armnn::LstmInputParamsInfo::GetCellLayerNormWeights
const TensorInfo & GetCellLayerNormWeights() const
Definition: LstmParams.hpp:197
armnn::LstmDescriptor::m_CellIntermediateScale
float m_CellIntermediateScale
Cell intermediate quantization scale.
Definition: Descriptors.hpp:1160
armnn::LstmInputParamsInfo::GetInputToForgetWeights
const TensorInfo & GetInputToForgetWeights() const
Definition: LstmParams.hpp:125