ArmNN
 24.08
RefArgMinMaxWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include "ArgMinMax.hpp"
9 #include "RefWorkloadUtils.hpp"
10 #include "Decoders.hpp"
11 #include "Encoders.hpp"
12 #include "Profiling.hpp"
13 
14 namespace armnn
15 {
17  const ArgMinMaxQueueDescriptor& descriptor,
18  const WorkloadInfo& info)
20 
21 
23 {
25 }
26 
28 {
29  WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
30  Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
31 }
32 
33 void RefArgMinMaxWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
34 {
35  ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefArgMinMaxWorkload_Execute");
36 
37  const TensorInfo &inputTensorInfo = GetTensorInfo(inputs[0]);
38 
39  std::unique_ptr<Decoder<float>> decoderPtr = MakeDecoder<float>(inputTensorInfo, inputs[0]->Map());
40  Decoder<float> &decoder = *decoderPtr;
41 
42  const TensorInfo &outputTensorInfo = GetTensorInfo(outputs[0]);
43 
44  if (outputTensorInfo.GetDataType() == armnn::DataType::Signed32) {
45  int32_t *output = GetOutputTensorData<int32_t>(outputs[0]);
46  ArgMinMax(decoder, output, inputTensorInfo, outputTensorInfo, m_Data.m_Parameters.m_Function,
48  } else {
49  int64_t *output = GetOutputTensorData<int64_t>(outputs[0]);
50  ArgMinMax(decoder, output, inputTensorInfo, outputTensorInfo, m_Data.m_Parameters.m_Function,
52  }
53 }
54 
55 } //namespace armnn
armnn::RefArgMinMaxWorkload::ExecuteAsync
void ExecuteAsync(ExecutionData &executionData) override
Definition: RefArgMinMaxWorkload.cpp:27
armnn::Decoder< float >
armnn::experimental::ExecutionData::m_Data
void * m_Data
Definition: ExecutionData.hpp:16
armnn::TensorInfo
Definition: Tensor.hpp:152
Profiling.hpp
armnn::ArgMinMaxDescriptor::m_Function
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
Definition: Descriptors.hpp:81
ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID
#define ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
Definition: RefWorkloadUtils.hpp:22
armnn::RefArgMinMaxWorkload::RefArgMinMaxWorkload
RefArgMinMaxWorkload(const ArgMinMaxQueueDescriptor &descriptor, const WorkloadInfo &info)
Definition: RefArgMinMaxWorkload.cpp:16
armnn::QueueDescriptorWithParameters::m_Parameters
LayerDescriptor m_Parameters
Definition: WorkloadData.hpp:66
armnn::ArgMinMax
void ArgMinMax(Decoder< float > &in, OUT *out, const TensorInfo &inputTensorInfo, const TensorInfo &outputTensorInfo, ArgMinMaxFunction function, int axis)
Definition: ArgMinMax.cpp:17
armnn::WorkloadInfo
Contains information about TensorInfos of a layer.
Definition: WorkloadInfo.hpp:16
armnn::GetTensorInfo
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
Definition: RefWorkloadUtils.hpp:33
armnn::RefArgMinMaxWorkload::Execute
void Execute() const override
Definition: RefArgMinMaxWorkload.cpp:22
RefArgMinMaxWorkload.hpp
armnn::BoostLogSeverityMapping::info
@ info
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:200
armnn::QueueDescriptor::m_Outputs
std::vector< ITensorHandle * > m_Outputs
Definition: WorkloadData.hpp:27
armnn::DataType::Signed32
@ Signed32
ArgMinMax.hpp
RefWorkloadUtils.hpp
armnn::BaseWorkload< ArgMinMaxQueueDescriptor >::m_Data
ArgMinMaxQueueDescriptor m_Data
Definition: Workload.hpp:89
Decoders.hpp
armnn::LayerType::Map
@ Map
armnn::experimental::WorkingMemDescriptor::m_Inputs
std::vector< ITensorHandle * > m_Inputs
Definition: WorkingMemDescriptor.hpp:20
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::ArgMinMaxDescriptor::m_Axis
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:83
armnn::experimental::WorkingMemDescriptor
Definition: WorkingMemDescriptor.hpp:18
armnn::ArgMinMaxQueueDescriptor
Definition: WorkloadData.hpp:163
Encoders.hpp
armnn::RefBaseWorkload
Definition: RefBaseWorkload.hpp:13
armnn::experimental::WorkingMemDescriptor::m_Outputs
std::vector< ITensorHandle * > m_Outputs
Definition: WorkingMemDescriptor.hpp:21
armnn::QueueDescriptor::m_Inputs
std::vector< ITensorHandle * > m_Inputs
Definition: WorkloadData.hpp:26
armnn::experimental::ExecutionData
Definition: ExecutionData.hpp:14