24.08
|
Go to the documentation of this file.
11 #include <arm_compute/core/CL/CLCompileContext.h>
29 std::string& outReasonIfUnsupported);
36 unsigned int const* )
const override
43 const
bool IsMemoryManaged = true) const override;
48 const
bool IsMemoryManaged = true) const override;
56 template <typename QueueDescriptorType>
60 arm_compute::CLCompileContext m_CLCompileContext;
std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &, TensorShape const &, unsigned int const *) const override
static bool IsLayerSupported(const Layer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
Workload interface to enqueue a layer computation.
std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const override
bool SupportsSubTensors() const override
void InitializeCLCompileContext()
Contains information about TensorInfos of a layer.
#define ARMNN_DEPRECATED_MSG(message)
Copyright (c) 2021 ARM Limited and Contributors.
const BackendId & GetBackendId() const override
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const override
Backends should implement their own CreateWorkload function with a switch statement.