24.02
|
Go to the documentation of this file.
28 std::string& outReasonIfUnsupported);
34 armnn::ITensorHandle& parent,
35 armnn::TensorShape const& subTensorShape,
36 unsigned int const* subTensorOrigin)
const override
44 const
armnn::TensorInfo& tensorInfo,
45 const
bool IsMemoryManaged = true) const override;
49 const
armnn::TensorInfo& tensorInfo,
51 const
bool IsMemoryManaged = true) const override;
54 const
armnn::AdditionQueueDescriptor& descriptor,
55 const
armnn::WorkloadInfo& info) const;
59 const
armnn::WorkloadInfo& info) const;
62 const
armnn::WorkloadInfo& info) const;
65 const
armnn::QueueDescriptor& descriptor,
66 const
armnn::WorkloadInfo& info) const override;
const armnn::BackendId & GetBackendId() const override
std::unique_ptr< armnn::ITensorHandle > CreateTensorHandle(const armnn::TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const override
std::unique_ptr< armnn::IWorkload > CreateWorkload(armnn::LayerType type, const armnn::QueueDescriptor &descriptor, const armnn::WorkloadInfo &info) const override
Backends should implement their own CreateWorkload function with a switch statement.
~SampleDynamicWorkloadFactory()
MemCopyQueueDescriptor OutputQueueDescriptor
bool SupportsSubTensors() const override
std::unique_ptr< armnn::IWorkload > CreateOutput(const armnn::OutputQueueDescriptor &descriptor, const armnn::WorkloadInfo &info) const
MemCopyQueueDescriptor InputQueueDescriptor
#define ARMNN_DEPRECATED_MSG(message)
void IgnoreUnused(Ts &&...)
Copyright (c) 2021 ARM Limited and Contributors.
static bool IsLayerSupported(const armnn::IConnectableLayer &layer, armnn::Optional< armnn::DataType > dataType, std::string &outReasonIfUnsupported)
std::unique_ptr< armnn::IWorkload > CreateInput(const armnn::InputQueueDescriptor &descriptor, const armnn::WorkloadInfo &info) const
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
SampleDynamicWorkloadFactory()
std::unique_ptr< armnn::ITensorHandle > CreateSubTensorHandle(armnn::ITensorHandle &parent, armnn::TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const override
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
std::unique_ptr< armnn::IWorkload > CreateAddition(const armnn::AdditionQueueDescriptor &descriptor, const armnn::WorkloadInfo &info) const