24 template <
typename F32Workload,
typename U8Workload,
typename QueueDescriptorType>
25 std::unique_ptr<IWorkload> TosaRefWorkloadFactory::MakeWorkload(
const QueueDescriptorType& descriptor,
26 const WorkloadInfo& info)
const
28 return MakeWorkloadHelper<NullWorkload, F32Workload, U8Workload, NullWorkload, NullWorkload, NullWorkload>
32 template <DataType ArmnnType>
35 auto checkType = [](
const TensorInfo& tensorInfo) {
return tensorInfo.GetDataType() == ArmnnType;};
36 auto it = std::find_if(std::begin(
info.m_InputTensorInfos), std::end(
info.m_InputTensorInfos), checkType);
37 if (it != std::end(
info.m_InputTensorInfos))
41 it = std::find_if(std::begin(
info.m_OutputTensorInfos), std::end(
info.m_OutputTensorInfos), checkType);
42 if (it != std::end(
info.m_OutputTensorInfos))
50 : m_MemoryManager(memoryManager)
66 std::string& outReasonIfUnsupported)
73 std::string& outReasonIfUnsupported,
80 const bool isMemoryManaged)
const
84 return std::make_unique<TosaRefTensorHandle>(tensorInfo, m_MemoryManager);
88 return std::make_unique<TosaRefTensorHandle>(tensorInfo,
static_cast<unsigned int>(
MemorySource::Malloc));
94 const bool isMemoryManaged)
const
102 return std::make_unique<TosaRefTensorHandle>(tensorInfo, m_MemoryManager);
106 return std::make_unique<TosaRefTensorHandle>(tensorInfo,
static_cast<unsigned int>(
MemorySource::Malloc));
118 auto precompiledQueueDescriptor = PolymorphicDowncast<const PreCompiledQueueDescriptor*>(&descriptor);
119 return std::make_unique<TosaRefPreCompiledWorkload>(*precompiledQueueDescriptor,
info);
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const override
std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const override
Backends should implement their own CreateWorkload function with a switch statement.
static bool IsLayerSupported(const Layer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
const BackendId & GetBackendId() const override
Copyright (c) 2021 ARM Limited and Contributors.
constexpr const char * TosaRefBackendId()
bool IsDataType(const WorkloadInfo &info)
void IgnoreUnused(Ts &&...)
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
std::vector< BackendOptions > ModelOptions
Contains information about TensorInfos of a layer.