ArmNN
 25.11
Loading...
Searching...
No Matches
NeonSoftmaxWorkload.hpp
Go to the documentation of this file.
1//
2// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
10
11#include <arm_compute/core/Error.h>
12#include <arm_compute/runtime/IFunction.h>
13#include <arm_compute/runtime/MemoryManagerOnDemand.h>
14
15#include <memory>
16
17namespace armnn
18{
19
20arm_compute::Status NeonSoftmaxWorkloadValidate(const TensorInfo& input,
21 const TensorInfo& output,
22 const SoftmaxDescriptor& descriptor);
23
24class NeonSoftmaxWorkload : public NeonBaseWorkload<SoftmaxQueueDescriptor>
25{
26public:
28 std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
29 virtual void Execute() const override;
30
31private:
32 std::unique_ptr<arm_compute::IFunction> m_SoftmaxLayer;
33};
34
35} //namespace armnn
36
NeonBaseWorkload(const SoftmaxQueueDescriptor &descriptor, const WorkloadInfo &info)
NeonSoftmaxWorkload(const SoftmaxQueueDescriptor &descriptor, const WorkloadInfo &info, std::shared_ptr< arm_compute::MemoryManagerOnDemand > &memoryManager)
virtual void Execute() const override
Copyright (c) 2021 ARM Limited and Contributors.
arm_compute::Status NeonSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor)
A SoftmaxDescriptor for the SoftmaxLayer.
Contains information about TensorInfos of a layer.