ArmNN
 24.08
NeonRankWorkload.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include "NeonBaseWorkload.hpp"
10 
11 #include "NeonWorkloadUtils.hpp"
12 
13 namespace armnn
14 {
15 
16 struct NeonRankWorkload : public NeonBaseWorkload<RankQueueDescriptor>
17 {
18 public:
20  virtual void Execute() const override
21  {
22  ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonRankWorkload_Execute");
23 
24  const NeonTensorHandle* neonTensorHandle = PolymorphicDowncast<const NeonTensorHandle*>(m_Data.m_Inputs[0]);
25  const int32_t rank = static_cast<int32_t>(neonTensorHandle->GetShape().GetNumDimensions());
26 
27  std::memcpy(GetOutputTensorData<void>(0, m_Data), &rank, sizeof(int32_t));
28  m_Data.m_Outputs[0]->Unmap();
29  }
30 };
31 
32 } //namespace armnn
WorkloadData.hpp
NeonBaseWorkload.hpp
armnn::NeonTensorHandle::GetShape
TensorShape GetShape() const override
Get the number of elements for each dimension ordered from slowest iterating dimension to fastest ite...
Definition: NeonTensorHandle.hpp:99
armnn::TensorShape::GetNumDimensions
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
armnn::NeonRankWorkload::Execute
virtual void Execute() const override
Definition: NeonRankWorkload.hpp:20
armnn::QueueDescriptor::m_Outputs
std::vector< ITensorHandle * > m_Outputs
Definition: WorkloadData.hpp:27
armnn::BaseWorkload< RankQueueDescriptor >::m_Data
RankQueueDescriptor m_Data
Definition: Workload.hpp:89
NeonWorkloadUtils.hpp
armnn::NeonRankWorkload
Definition: NeonRankWorkload.hpp:16
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID
#define ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
Definition: NeonWorkloadUtils.hpp:33
armnn::NeonBaseWorkload
Definition: NeonBaseWorkload.hpp:13
armnn::NeonTensorHandle
Definition: NeonTensorHandle.hpp:28
armnn::QueueDescriptor::m_Inputs
std::vector< ITensorHandle * > m_Inputs
Definition: WorkloadData.hpp:26