ArmNN
 25.11
Loading...
Searching...
No Matches
NeonWorkloadFactory.hpp
Go to the documentation of this file.
1//
2// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5#pragma once
6
7#include <armnn/Optional.hpp>
9
13
14#include <arm_compute/runtime/IScheduler.h>
15
16namespace armnn
17{
18
19// Neon workload factory.
21{
22public:
23 NeonWorkloadFactory(const std::shared_ptr<NeonMemoryManager>& memoryManager);
24
25 NeonWorkloadFactory(const std::shared_ptr<NeonMemoryManager>& memoryManager,
27
28 const BackendId& GetBackendId() const override;
29
30 static bool IsLayerSupported(const Layer& layer,
31 Optional<DataType> dataType,
32 std::string& outReasonIfUnsupported);
33
34 static bool IsLayerSupported(const IConnectableLayer& layer,
35 Optional<DataType> dataType,
36 std::string& outReasonIfUnsupported,
37 const ModelOptions& modelOptions);
38
39 bool SupportsSubTensors() const override { return true; }
40
41 ARMNN_DEPRECATED_MSG("Use ITensorHandleFactory::CreateSubTensorHandle instead")
43 TensorShape const& subTensorShape,
44 unsigned int const* subTensorOrigin) const override;
45
46 ARMNN_DEPRECATED_MSG("Use ITensorHandleFactory::CreateTensorHandle instead")
47 std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
48 const bool IsMemoryManaged = true) const override;
49
50 ARMNN_DEPRECATED_MSG("Use ITensorHandleFactory::CreateTensorHandle instead")
51 std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
52 DataLayout dataLayout,
53 const bool IsMemoryManaged = true) const override;
54
55 std::unique_ptr<IWorkload> CreateWorkload(LayerType type,
56 const QueueDescriptor& descriptor,
57 const WorkloadInfo& info) const override;
58private:
59 void SetNumberOfThreads();
60
61 mutable std::shared_ptr<NeonMemoryManager> m_MemoryManager;
62 const IBackendInternal::IBackendSpecificModelContextPtr m_ModelContextPtr;
63};
64
65} // namespace armnn
#define ARMNN_DEPRECATED_MSG(message)
std::shared_ptr< IBackendModelContext > IBackendSpecificModelContextPtr
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition INetwork.hpp:81
Workload interface to enqueue a layer computation.
Definition IWorkload.hpp:14
NeonWorkloadFactory(const std::shared_ptr< NeonMemoryManager > &memoryManager)
std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const override
std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const override
Backends should implement their own CreateWorkload function with a switch statement.
static bool IsLayerSupported(const Layer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const override
const BackendId & GetBackendId() const override
bool SupportsSubTensors() const override
Copyright (c) 2021 ARM Limited and Contributors.
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition Types.hpp:494
std::vector< BackendOptions > ModelOptions
DataLayout
Definition Types.hpp:63
Contains information about TensorInfos of a layer.