ArmNN
 24.08
ClBackend Class Reference

#include <ClBackend.hpp>

Inheritance diagram for ClBackend:
[legend]
Collaboration diagram for ClBackend:
[legend]

Classes

class  ClBackendCustomAllocatorMemoryRegion
 
class  ClBackendCustomAllocatorWrapper
 

Public Member Functions

 ClBackend ()
 
 ClBackend (std::shared_ptr< ICustomAllocator > allocator)
 
 ~ClBackend ()=default
 
const BackendIdGetId () const override
 
IBackendInternal::IMemoryManagerUniquePtr CreateMemoryManager () const override
 
IBackendInternal::IWorkloadFactoryPtr CreateWorkloadFactory (const IBackendInternal::IMemoryManagerSharedPtr &memoryManager=nullptr) const override
 
IBackendInternal::IWorkloadFactoryPtr CreateWorkloadFactory (TensorHandleFactoryRegistry &registry) const override
 
IWorkloadFactoryPtr CreateWorkloadFactory (const IMemoryManagerSharedPtr &memoryManager, const ModelOptions &modelOptions) const override
 
IWorkloadFactoryPtr CreateWorkloadFactory (class TensorHandleFactoryRegistry &tensorHandleFactoryRegistry, const ModelOptions &modelOptions) const override
 
IWorkloadFactoryPtr CreateWorkloadFactory (class TensorHandleFactoryRegistry &tensorHandleFactoryRegistry, const ModelOptions &modelOptions, MemorySourceFlags inputFlags, MemorySourceFlags outputFlags) const override
 
std::vector< ITensorHandleFactory::FactoryIdGetHandleFactoryPreferences () const override
 (Optional) Returns a vector of supported TensorHandleFactory ids in preference order. More...
 
void RegisterTensorHandleFactories (TensorHandleFactoryRegistry &registry) override
 (Optional) Register TensorHandleFactories Either this method or CreateMemoryManager() and IWorkloadFactory::CreateTensor() IWorkloadFactory::CreateSubtensor() methods must be implemented. More...
 
void RegisterTensorHandleFactories (TensorHandleFactoryRegistry &registry, MemorySourceFlags inputFlags, MemorySourceFlags outputFlags) override
 (Optional) Register TensorHandleFactories Either this method or CreateMemoryManager() and IWorkloadFactory::CreateTensor() IWorkloadFactory::CreateSubtensor() methods must be implemented. More...
 
IBackendInternal::IBackendContextPtr CreateBackendContext (const IRuntime::CreationOptions &) const override
 Create the runtime context of the backend. More...
 
IBackendInternal::IBackendProfilingContextPtr CreateBackendProfilingContext (const IRuntime::CreationOptions &, IBackendProfilingPtr &backendProfiling) override
 Create context specifically used for profiling interaction from backends. More...
 
IBackendInternal::ILayerSupportSharedPtr GetLayerSupport () const override
 
IBackendInternal::ILayerSupportSharedPtr GetLayerSupport (const ModelOptions &modelOptions) const override
 
OptimizationViews OptimizeSubgraphView (const SubgraphView &subgraph, const ModelOptions &modelOptions) const override
 
IBackendInternal::IBackendSpecificModelContextPtr CreateBackendSpecificModelContext (const ModelOptions &modelOptions) const override
 
std::unique_ptr< ICustomAllocatorGetDefaultAllocator () const override
 Returns the default memory allocator for the backend. More...
 
BackendCapabilities GetCapabilities () const override
 Returns a BackendCapability if the backend lists the capability The BackendCapability must then be inspected to check whether or not that BackendCapability is supported Otherwise returns an EmptyOptional if the BackendCapability is unlisted. More...
 
virtual bool UseCustomMemoryAllocator (std::shared_ptr< ICustomAllocator > allocator, armnn::Optional< std::string & > errMsg) override
 Signals the backend to use a custom memory allocator provided by the user. More...
 
virtual unsigned int GetNumberOfCacheFiles () const override
 Returns the number of files cached if backend supports caching. More...
 
- Public Member Functions inherited from IBackendInternal
 ~IBackendInternal () override=default
 Allow backends created by the factory function to be destroyed through IBackendInternal. More...
 
virtual OptimizationViews OptimizeSubgraphView (const SubgraphView &subgraph) const
 
bool SupportsTensorAllocatorAPI () const
 
ITensorHandleFactory::FactoryId GetBackwardCompatibleFavoriteHandleFactory ()
 
virtual ExecutionData CreateExecutionData (WorkingMemDescriptor &workingMemDescriptor) const
 Returns ExecutionData for the backend. More...
 
virtual void UpdateExecutionData (ExecutionData &executionData, WorkingMemDescriptor &workingMemDescriptor) const
 Update the ExecutionData for a layer. More...
 

Static Public Member Functions

static const BackendIdGetIdStatic ()
 
- Static Public Member Functions inherited from IBackendInternal
static constexpr BackendVersion GetApiVersion ()
 Returns the version of the Backend API. More...
 

Public Attributes

std::shared_ptr< ClBackendCustomAllocatorWrapperm_CustomAllocator
 
bool m_UsingCustomAllocator = false
 

Additional Inherited Members

- Public Types inherited from IBackendInternal
using IWorkloadFactoryPtr = std::unique_ptr< IWorkloadFactory >
 
using IBackendContextPtr = std::unique_ptr< IBackendContext >
 
using IBackendProfilingContextPtr = std::shared_ptr< arm::pipe::IBackendProfilingContext >
 This is the bridge between backend and backend profiling we'll keep it in the backend namespace. More...
 
using IBackendProfilingPtr = std::unique_ptr< arm::pipe::IBackendProfiling >
 
using ILayerSupportSharedPtr = std::shared_ptr< ILayerSupport >
 
using IBackendSpecificModelContextPtr = std::shared_ptr< IBackendModelContext >
 
using IMemoryManagerUniquePtr = std::unique_ptr< IMemoryManager >
 
using IMemoryManagerSharedPtr = std::shared_ptr< IMemoryManager >
 
- Protected Member Functions inherited from IBackendInternal
 IBackendInternal ()=default
 Creation must be done through a specific backend interface. More...
 
- Protected Member Functions inherited from IBackend
 IBackend ()
 
virtual ~IBackend ()
 

Detailed Description

Definition at line 24 of file ClBackend.hpp.

Constructor & Destructor Documentation

◆ ClBackend() [1/2]

ClBackend ( )
inline

Definition at line 27 of file ClBackend.hpp.

27 : m_CustomAllocator(nullptr) {};

◆ ClBackend() [2/2]

ClBackend ( std::shared_ptr< ICustomAllocator allocator)
inline

Definition at line 28 of file ClBackend.hpp.

29  {
30  std::string err;
31  UseCustomMemoryAllocator(allocator, err);
32  }

References ClBackend::UseCustomMemoryAllocator().

◆ ~ClBackend()

~ClBackend ( )
default

Member Function Documentation

◆ CreateBackendContext()

IBackendInternal::IBackendContextPtr CreateBackendContext ( const IRuntime::CreationOptions ) const
overridevirtual

Create the runtime context of the backend.

Implementations may return a default-constructed IBackendContextPtr if no context is needed at runtime. Implementations must throw BackendUnavailableException if the backend cannot be used (for example, necessary accelerator hardware is not present). The default implementation always returns a default-constructed pointer.

Reimplemented from IBackendInternal.

Definition at line 235 of file ClBackend.cpp.

236 {
237  return IBackendContextPtr{new ClBackendContext{options}};
238 }

◆ CreateBackendProfilingContext()

IBackendInternal::IBackendProfilingContextPtr CreateBackendProfilingContext ( const IRuntime::CreationOptions creationOptions,
IBackendProfilingPtr backendProfiling 
)
overridevirtual

Create context specifically used for profiling interaction from backends.

Reimplemented from IBackendInternal.

Definition at line 240 of file ClBackend.cpp.

242 {
244 }

◆ CreateBackendSpecificModelContext()

IBackendInternal::IBackendSpecificModelContextPtr CreateBackendSpecificModelContext ( const ModelOptions modelOptions) const
overridevirtual

Reimplemented from IBackendInternal.

Definition at line 246 of file ClBackend.cpp.

248 {
249  return IBackendSpecificModelContextPtr{new ClBackendModelContext{modelOptions}};
250 }

Referenced by ClBackend::CreateWorkloadFactory(), ClBackend::GetLayerSupport(), and ClBackend::OptimizeSubgraphView().

◆ CreateMemoryManager()

IBackendInternal::IMemoryManagerUniquePtr CreateMemoryManager ( ) const
overridevirtual

Reimplemented from IBackendInternal.

Definition at line 50 of file ClBackend.cpp.

51 {
53  {
54  return std::make_unique<ClMemoryManager>(m_CustomAllocator);
55  }
56  return std::make_unique<ClMemoryManager>(std::make_unique<arm_compute::CLBufferAllocator>());
57 }

References ClBackend::m_CustomAllocator, and ClBackend::m_UsingCustomAllocator.

◆ CreateWorkloadFactory() [1/5]

IBackendInternal::IWorkloadFactoryPtr CreateWorkloadFactory ( class TensorHandleFactoryRegistry tensorHandleFactoryRegistry,
const ModelOptions modelOptions 
) const
overridevirtual

Reimplemented from IBackendInternal.

Definition at line 101 of file ClBackend.cpp.

103 {
104  std::shared_ptr<ClMemoryManager> memoryManager;
106  {
107  memoryManager = std::make_shared<ClMemoryManager>(m_CustomAllocator);
108  }
109  else
110  {
111  memoryManager = std::make_shared<ClMemoryManager>(std::make_unique<arm_compute::CLBufferAllocator>());
112  }
113 
114  std::unique_ptr<ITensorHandleFactory> factory = std::make_unique<ClTensorHandleFactory>(memoryManager);
115  std::unique_ptr<ITensorHandleFactory> importFactory = std::make_unique<ClImportTensorHandleFactory>(
117 
118  registry.RegisterCopyAndImportFactoryPair(factory->GetId(), importFactory->GetId());
119  registry.RegisterCopyAndImportFactoryPair(importFactory->GetId(), factory->GetId());
120 
121  registry.RegisterMemoryManager(memoryManager);
122  registry.RegisterFactory(std::move(factory));
123  registry.RegisterFactory(std::move(importFactory));
124 
125  return std::make_unique<ClWorkloadFactory>(
126  PolymorphicPointerDowncast<ClMemoryManager>(memoryManager), CreateBackendSpecificModelContext(modelOptions));
127 }

References ClBackend::CreateBackendSpecificModelContext(), ClBackend::m_CustomAllocator, ClBackend::m_UsingCustomAllocator, armnn::Malloc, TensorHandleFactoryRegistry::RegisterCopyAndImportFactoryPair(), TensorHandleFactoryRegistry::RegisterFactory(), and TensorHandleFactoryRegistry::RegisterMemoryManager().

◆ CreateWorkloadFactory() [2/5]

IBackendInternal::IWorkloadFactoryPtr CreateWorkloadFactory ( class TensorHandleFactoryRegistry tensorHandleFactoryRegistry,
const ModelOptions modelOptions,
MemorySourceFlags  inputFlags,
MemorySourceFlags  outputFlags 
) const
overridevirtual

Reimplemented from IBackendInternal.

Definition at line 129 of file ClBackend.cpp.

134 {
135  // To allow force import if inputFlags/outputFlags are Undefined, set it as Malloc
136  if (inputFlags == static_cast<MemorySourceFlags>(MemorySource::Undefined))
137  {
138  inputFlags = static_cast<MemorySourceFlags>(MemorySource::Malloc);
139  }
140  if (outputFlags == static_cast<MemorySourceFlags>(MemorySource::Undefined))
141  {
142  outputFlags = static_cast<MemorySourceFlags>(MemorySource::Malloc);
143  }
144  std::shared_ptr<ClMemoryManager> memoryManager;
146  {
147  memoryManager = std::make_shared<ClMemoryManager>(m_CustomAllocator);
148  }
149  else
150  {
151  memoryManager = std::make_shared<ClMemoryManager>(std::make_unique<arm_compute::CLBufferAllocator>());
152  }
153 
154  std::unique_ptr<ITensorHandleFactory> factory = std::make_unique<ClTensorHandleFactory>(memoryManager);
155  std::unique_ptr<ITensorHandleFactory> importFactory = std::make_unique<ClImportTensorHandleFactory>(
156  inputFlags, outputFlags);
157 
158  registry.RegisterCopyAndImportFactoryPair(factory->GetId(), importFactory->GetId());
159  registry.RegisterCopyAndImportFactoryPair(importFactory->GetId(), factory->GetId());
160 
161  registry.RegisterMemoryManager(memoryManager);
162  registry.RegisterFactory(std::move(factory));
163  registry.RegisterFactory(std::move(importFactory));
164 
165  return std::make_unique<ClWorkloadFactory>(
166  PolymorphicPointerDowncast<ClMemoryManager>(memoryManager), CreateBackendSpecificModelContext(modelOptions));
167 }

References ClBackend::CreateBackendSpecificModelContext(), ClBackend::m_CustomAllocator, ClBackend::m_UsingCustomAllocator, armnn::Malloc, TensorHandleFactoryRegistry::RegisterCopyAndImportFactoryPair(), TensorHandleFactoryRegistry::RegisterFactory(), TensorHandleFactoryRegistry::RegisterMemoryManager(), and armnn::Undefined.

◆ CreateWorkloadFactory() [3/5]

IBackendInternal::IWorkloadFactoryPtr CreateWorkloadFactory ( const IBackendInternal::IMemoryManagerSharedPtr memoryManager = nullptr) const
overridevirtual

Implements IBackendInternal.

Definition at line 59 of file ClBackend.cpp.

61 {
62  return std::make_unique<ClWorkloadFactory>(
63  PolymorphicPointerDowncast<ClMemoryManager>(memoryManager));
64 }

◆ CreateWorkloadFactory() [4/5]

IBackendInternal::IWorkloadFactoryPtr CreateWorkloadFactory ( const IMemoryManagerSharedPtr memoryManager,
const ModelOptions modelOptions 
) const
overridevirtual

Reimplemented from IBackendInternal.

Definition at line 66 of file ClBackend.cpp.

68 {
69  return std::make_unique<ClWorkloadFactory>(
70  PolymorphicPointerDowncast<ClMemoryManager>(memoryManager), CreateBackendSpecificModelContext(modelOptions));
71 }

References ClBackend::CreateBackendSpecificModelContext().

◆ CreateWorkloadFactory() [5/5]

IBackendInternal::IWorkloadFactoryPtr CreateWorkloadFactory ( TensorHandleFactoryRegistry registry) const
overridevirtual

Reimplemented from IBackendInternal.

Definition at line 73 of file ClBackend.cpp.

75 {
76  std::shared_ptr<ClMemoryManager> memoryManager;
78  {
79  memoryManager = std::make_shared<ClMemoryManager>(m_CustomAllocator);
80  }
81  else
82  {
83  memoryManager = std::make_shared<ClMemoryManager>(std::make_unique<arm_compute::CLBufferAllocator>());
84  }
85 
86  std::unique_ptr<ITensorHandleFactory> factory = std::make_unique<ClTensorHandleFactory>(memoryManager);
87  std::unique_ptr<ITensorHandleFactory> importFactory = std::make_unique<ClImportTensorHandleFactory>(
89 
90  registry.RegisterCopyAndImportFactoryPair(factory->GetId(), importFactory->GetId());
91  registry.RegisterCopyAndImportFactoryPair(importFactory->GetId(), factory->GetId());
92 
93  registry.RegisterMemoryManager(memoryManager);
94  registry.RegisterFactory(std::move(factory));
95  registry.RegisterFactory(std::move(importFactory));
96 
97  return std::make_unique<ClWorkloadFactory>(
98  PolymorphicPointerDowncast<ClMemoryManager>(memoryManager));
99 }

References ClBackend::m_CustomAllocator, ClBackend::m_UsingCustomAllocator, armnn::Malloc, TensorHandleFactoryRegistry::RegisterCopyAndImportFactoryPair(), TensorHandleFactoryRegistry::RegisterFactory(), and TensorHandleFactoryRegistry::RegisterMemoryManager().

◆ GetCapabilities()

BackendCapabilities GetCapabilities ( ) const
overridevirtual

Returns a BackendCapability if the backend lists the capability The BackendCapability must then be inspected to check whether or not that BackendCapability is supported Otherwise returns an EmptyOptional if the BackendCapability is unlisted.

Reimplemented from IBackendInternal.

Definition at line 275 of file ClBackend.cpp.

276 {
277  // add new capabilities here..
278  return BackendCapabilities ("GpuAcc",
279  {
280  {"NonConstWeights", true},
281  {"AsyncExecution", false},
282  {"ProtectedContentAllocation", true},
283  {"ConstantTensorsAsInputs", true},
284  {"PreImportIOTensors", false},
285  {"ExternallyManagedMemory", true},
286  {"MultiAxisPacking", false},
287  {"SingleAxisPacking", true},
288  {"HasFp16", arm_compute::CLKernelLibrary::get().fp16_supported()}
289  });
290 }

◆ GetDefaultAllocator()

std::unique_ptr< ICustomAllocator > GetDefaultAllocator ( ) const
overridevirtual

Returns the default memory allocator for the backend.

Returns
- Returns unique pointer to the Default Allocator of the Backend

Reimplemented from IBackendInternal.

Definition at line 270 of file ClBackend.cpp.

271 {
272  return std::make_unique<ClBackendDefaultAllocator>();
273 }

◆ GetHandleFactoryPreferences()

std::vector< ITensorHandleFactory::FactoryId > GetHandleFactoryPreferences ( ) const
overridevirtual

(Optional) Returns a vector of supported TensorHandleFactory ids in preference order.

Reimplemented from IBackendInternal.

Definition at line 169 of file ClBackend.cpp.

170 {
171  return std::vector<ITensorHandleFactory::FactoryId> {ClTensorHandleFactory::GetIdStatic(),
173 }

References ClTensorHandleFactory::GetIdStatic(), and ClImportTensorHandleFactory::GetIdStatic().

◆ GetId()

const BackendId& GetId ( ) const
inlineoverridevirtual

Implements IBackend.

Definition at line 36 of file ClBackend.hpp.

36 { return GetIdStatic(); }

References ClBackend::GetIdStatic().

◆ GetIdStatic()

const BackendId & GetIdStatic ( )
static

Definition at line 44 of file ClBackend.cpp.

45 {
46  static const BackendId s_Id{ClBackendId()};
47  return s_Id;
48 }

References armnn::ClBackendId().

Referenced by ClBackend::GetId().

◆ GetLayerSupport() [1/2]

IBackendInternal::ILayerSupportSharedPtr GetLayerSupport ( ) const
overridevirtual

Implements IBackendInternal.

Definition at line 252 of file ClBackend.cpp.

253 {
254  static ILayerSupportSharedPtr layerSupport
255  {
257  };
258  return layerSupport;
259 }

◆ GetLayerSupport() [2/2]

IBackendInternal::ILayerSupportSharedPtr GetLayerSupport ( const ModelOptions modelOptions) const
overridevirtual

Reimplemented from IBackendInternal.

Definition at line 261 of file ClBackend.cpp.

262 {
263  static ILayerSupportSharedPtr layerSupport
264  {
265  new ClLayerSupport(CreateBackendSpecificModelContext(modelOptions))
266  };
267  return layerSupport;
268 }

References ClBackend::CreateBackendSpecificModelContext().

◆ GetNumberOfCacheFiles()

virtual unsigned int GetNumberOfCacheFiles ( ) const
inlineoverridevirtual

Returns the number of files cached if backend supports caching.

Returns
- Returns 0 if backend does not support caching otherwise number of files cached

Reimplemented from IBackendInternal.

Definition at line 94 of file ClBackend.hpp.

94 { return 1; }

◆ OptimizeSubgraphView()

OptimizationViews OptimizeSubgraphView ( const SubgraphView subgraph,
const ModelOptions modelOptions 
) const
overridevirtual

Reimplemented from IBackendInternal.

Definition at line 292 of file ClBackend.cpp.

294 {
295  OptimizationViews optimizationViews(modelOptions);
296 
297  auto it = subgraph.end();
298  bool isFastMathEnabled = false;
299  std::map<LayerGuid, Layer*> untouched;
300 
301  while (it != subgraph.begin())
302  {
303  --it;
304  Layer& base = *(PolymorphicDowncast<Layer*>(*it));
305  untouched.insert({base.GetGuid(), &base});
306  }
307 
308  it = subgraph.end();
309 #if defined(ARMCOMPUTECL_ENABLED)
311 
312  if (modelContextPtr)
313  {
314  auto clModelOptions = dynamic_cast<ClBackendModelContext*>(modelContextPtr.get());
315  if (clModelOptions)
316  {
317  isFastMathEnabled = clModelOptions->IsFastMathEnabled();
318  }
319  }
320 #endif
321  while (it != subgraph.begin())
322  {
323  --it;
324  Layer& base = *(PolymorphicDowncast<Layer*>(*it));
325 
326  // Fuse activation into previous layer if supported by backend
327  if ((base.GetType() == LayerType::DepthwiseConvolution2d || base.GetType() == LayerType::Convolution2d
328  || base.GetType() == LayerType::BatchNormalization || base.GetType() == LayerType::FullyConnected
329  || base.GetType() == LayerType::Addition || base.GetType() == LayerType::Multiplication
330  || base.GetType() == LayerType::Subtraction || base.GetType() == LayerType::Division
331  || base.GetType() == LayerType::ElementwiseBinary)
332  && (base.GetAdditionalInformation<ActivationDescriptor>() == nullptr))
333  {
334  for (auto output = base.BeginOutputSlots(); output != base.EndOutputSlots(); ++output)
335  {
336  if (output->GetNumConnections() == 1)
337  {
338  for (auto&& childInput : output->GetConnections())
339  {
340  if ((childInput->GetOwningLayer().GetType() == LayerType::Activation) &&
341  (checkDataTypeInputandOutput(childInput->GetOwningLayer())))
342  {
343  Layer& child = childInput->GetOwningLayer();
344 
345  auto* activationLayer = PolymorphicDowncast<ActivationLayer*>(&child);
346 
347  const std::string name = std::string("fused-") + child.GetName() + std::string("-into-") +
348  base.GetName();
349 
350  // Get params from activation layer
351  ActivationDescriptor activationDesc = activationLayer->GetParameters();
352 
353  if (base.GetType() == LayerType::Convolution2d)
354  {
355  Convolution2dLayer* baseLayer = PolymorphicDowncast<Convolution2dLayer*>(&base);
356 
357  Optional<TensorInfo> biases;
358 
359  if (baseLayer->GetParameters().m_BiasEnabled)
360  {
361  biases = baseLayer->GetInputSlot(2).GetConnectedOutputSlot()->GetTensorInfo();
362  }
363 
365  baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
366  activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
367  baseLayer->GetParameters(),
368  baseLayer->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo(),
369  biases,
370  isFastMathEnabled,
371  &activationDesc);
372 
373  if (status)
374  {
375  FuseConvolution2dLayer<Convolution2dLayer>(optimizationViews,
376  baseLayer,
377  activationLayer,
378  activationDesc,
379  name);
380  untouched.erase(baseLayer->GetGuid());
381  untouched.erase(activationLayer->GetGuid());
382  }
383  }
384  else if (base.GetType() == LayerType::DepthwiseConvolution2d)
385  {
386  DepthwiseConvolution2dLayer* baseLayer =
387  PolymorphicDowncast<DepthwiseConvolution2dLayer*>(&base);
388 
389  Optional<TensorInfo> biases;
390 
391  if (baseLayer->GetParameters().m_BiasEnabled)
392  {
393  biases = baseLayer->GetInputSlot(2).GetConnectedOutputSlot()->GetTensorInfo();
394  }
395 
397  baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
398  activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
399  baseLayer->GetParameters(),
400  baseLayer->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo(),
401  biases,
402  &activationDesc);
403 
404  if (status)
405  {
406  FuseDepthwiseConvolution2dLayer<DepthwiseConvolution2dLayer>(optimizationViews,
407  baseLayer,
408  activationLayer,
409  activationDesc,
410  name);
411  untouched.erase(baseLayer->GetGuid());
412  untouched.erase(activationLayer->GetGuid());
413  }
414  }
415  else if (base.GetType() == LayerType::FullyConnected)
416  {
417  FullyConnectedLayer* baseLayer = PolymorphicDowncast<FullyConnectedLayer*>(&base);
418  FullyConnectedDescriptor descriptor = baseLayer->GetParameters();
419 
420  // As bias is optional only try to get TensorInfo from input if bias is enabled.
421  Optional<TensorInfo> biases;
422  if (descriptor.m_BiasEnabled)
423  {
424  biases = baseLayer->GetInputSlot(2).GetConnectedOutputSlot()->GetTensorInfo();
425  }
426 
428  baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
429  activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
430  baseLayer->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo(),
431  biases,
432  baseLayer->GetParameters(),
433  &activationDesc);
434 
435  if (status)
436  {
437  FuseFullyConnectedLayer<FullyConnectedLayer>(optimizationViews,
438  baseLayer,
439  activationLayer,
440  activationDesc,
441  name);
442  untouched.erase(baseLayer->GetGuid());
443  untouched.erase(activationLayer->GetGuid());
444  }
445  }
446  else if (base.GetType() == LayerType::BatchNormalization)
447  {
448  BatchNormalizationLayer* baseLayer =
449  PolymorphicDowncast<BatchNormalizationLayer*>(&base);
450 
452  baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
453  activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
454  baseLayer->m_Mean->GetTensorInfo(),
455  baseLayer->m_Variance->GetTensorInfo(),
456  baseLayer->m_Beta->GetTensorInfo(),
457  baseLayer->m_Gamma->GetTensorInfo(),
458  baseLayer->GetParameters(),
459  &activationDesc);
460 
461  if (status)
462  {
463  BatchNormalizationLayer* replacementLayer =
464  FuseBatchNormalizationLayer<BatchNormalizationLayer>(optimizationViews,
465  baseLayer,
466  activationLayer,
467  activationDesc,
468  name);
469 
470  replacementLayer->m_Beta = std::move(baseLayer->m_Beta);
471  replacementLayer->m_Gamma = std::move(baseLayer->m_Gamma);
472  replacementLayer->m_Mean = std::move(baseLayer->m_Mean);
473  replacementLayer->m_Variance = std::move(baseLayer->m_Variance);
474 
475  untouched.erase(baseLayer->GetGuid());
476  untouched.erase(activationLayer->GetGuid());
477  }
478  }
479  else if (base.GetType() == LayerType::Addition)
480  {
481  AdditionLayer* baseLayer = PolymorphicDowncast<AdditionLayer*>(&base);
482 
484  baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
485  baseLayer->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo(),
486  activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
487  &activationDesc);
488 
489  if (status)
490  {
491  FuseAdditionLayer<AdditionLayer>(optimizationViews,
492  baseLayer,
493  activationLayer,
494  activationDesc,
495  name);
496 
497  untouched.erase(baseLayer->GetGuid());
498  untouched.erase(activationLayer->GetGuid());
499  }
500  }
501  else if (base.GetType() == LayerType::Division)
502  {
503  DivisionLayer* baseLayer = PolymorphicDowncast<DivisionLayer*>(&base);
504 
506  baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
507  baseLayer->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo(),
508  activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
509  &activationDesc);
510 
511  if (status)
512  {
513  FuseDivisionLayer<DivisionLayer>(optimizationViews,
514  baseLayer,
515  activationLayer,
516  activationDesc,
517  name);
518  untouched.erase(baseLayer->GetGuid());
519  untouched.erase(activationLayer->GetGuid());
520  }
521  }
522  else if (base.GetType() == LayerType::Multiplication)
523  {
524  MultiplicationLayer* baseLayer = PolymorphicDowncast<MultiplicationLayer*>(&base);
525 
527  baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
528  baseLayer->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo(),
529  activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
530  &activationDesc);
531 
532  if (status)
533  {
534  FuseMultiplicationLayer<MultiplicationLayer>(optimizationViews,
535  baseLayer,
536  activationLayer,
537  activationDesc,
538  name);
539  untouched.erase(baseLayer->GetGuid());
540  untouched.erase(activationLayer->GetGuid());
541  }
542  }
543  else if (base.GetType() == LayerType::Subtraction)
544  {
545  SubtractionLayer* baseLayer = PolymorphicDowncast<SubtractionLayer*>(&base);
546 
548  baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
549  baseLayer->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo(),
550  activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
551  &activationDesc);
552 
553  if (status)
554  {
555  FuseSubtractionLayer<SubtractionLayer>(optimizationViews,
556  baseLayer,
557  activationLayer,
558  activationDesc,
559  name);
560  untouched.erase(baseLayer->GetGuid());
561  untouched.erase(activationLayer->GetGuid());
562  }
563  }
564  else if (base.GetType() == LayerType::ElementwiseBinary)
565  {
566  ElementwiseBinaryLayer* baseLayer = PolymorphicDowncast<ElementwiseBinaryLayer*>(&base);
567 
568  if (baseLayer->GetParameters().m_Operation == BinaryOperation::Add)
569  {
571  baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
572  baseLayer->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo(),
573  activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
574  &activationDesc);
575 
576  if (status)
577  {
578  FuseElementwiseBinaryLayer<ElementwiseBinaryLayer>(optimizationViews,
579  baseLayer,
580  activationLayer,
581  activationDesc,
583  name);
584  untouched.erase(baseLayer->GetGuid());
585  untouched.erase(activationLayer->GetGuid());
586  }
587  }
588  else if (baseLayer->GetParameters().m_Operation == BinaryOperation::Div)
589  {
591  baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
592  baseLayer->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo(),
593  activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
594  &activationDesc);
595 
596  if (status)
597  {
598  FuseElementwiseBinaryLayer<ElementwiseBinaryLayer>(optimizationViews,
599  baseLayer,
600  activationLayer,
601  activationDesc,
603  name);
604  untouched.erase(baseLayer->GetGuid());
605  untouched.erase(activationLayer->GetGuid());
606  }
607  }
608  else if (baseLayer->GetParameters().m_Operation == BinaryOperation::Mul)
609  {
611  baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
612  baseLayer->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo(),
613  activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
614  &activationDesc);
615 
616  if (status)
617  {
618  FuseElementwiseBinaryLayer<ElementwiseBinaryLayer>(optimizationViews,
619  baseLayer,
620  activationLayer,
621  activationDesc,
623  name);
624  untouched.erase(baseLayer->GetGuid());
625  untouched.erase(activationLayer->GetGuid());
626  }
627  }
628  else if (baseLayer->GetParameters().m_Operation == BinaryOperation::Sub)
629  {
631  baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
632  baseLayer->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo(),
633  activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
634  &activationDesc);
635 
636  if (status)
637  {
638  FuseElementwiseBinaryLayer<ElementwiseBinaryLayer>(optimizationViews,
639  baseLayer,
640  activationLayer,
641  activationDesc,
643  name);
644  untouched.erase(baseLayer->GetGuid());
645  untouched.erase(activationLayer->GetGuid());
646  }
647  }
648  // No fusion available for other BinaryOperations
649  }
650  }
651  }
652  }
653  }
654  }
655 
656  // Separate reduce layer with multiple axes into multiple reduce layers with 1 axis.
657  if (base.GetType() == LayerType::Reduce)
658  {
659  ReduceLayer* baseLayer = PolymorphicDowncast<ReduceLayer*>(&base);
660  ReduceDescriptor reduceDescriptor = baseLayer->GetParameters();
661 
662  if (!reduceDescriptor.m_vAxis.empty() && reduceDescriptor.m_vAxis.size() > 1)
663  {
664  // Add new layers to the graph and connect them.
665  std::vector<IConnectableLayer*> layers = ChainReduceLayers<ReduceLayer>(optimizationViews,
666  baseLayer,
667  reduceDescriptor);
668 
669  // Replace existing baselayer with new subgraph.
670  ReplaceLayers<ReduceLayer>(optimizationViews, baseLayer, layers);
671  untouched.erase(baseLayer->GetGuid());
672  }
673  }
674 
675  // Remove Reshape where possible
676  if (base.GetType() == LayerType::Reshape)
677  {
678  ReshapeLayer* baseLayer = PolymorphicDowncast<ReshapeLayer*>(&base);
679 
680  // Cannot remove a Reshape if it's connected to any layer that has an NCHW layout
681  if (ConnectedToLayerWithNCHW(baseLayer))
682  {
683  continue;
684  }
685  RemoveReshapeLayer(baseLayer, untouched, optimizationViews);
686  }
687 
688  // Special case to fuse padding into average pooling 2d for quantized datatype.
689  // Required to be done as a backend specific optimization as Neon does not support this special case.
690  if (base.GetType() == LayerType::Pooling2d)
691  {
692  Pooling2dLayer* baseLayer = PolymorphicDowncast<Pooling2dLayer*>(&base);
693  Pooling2dDescriptor poolingDescriptor = baseLayer->GetParameters();
694 
695  if (baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer().GetType() == LayerType::Pad)
696  {
697  PadLayer* padLayer = PolymorphicDowncast<PadLayer*>(
698  &baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer());
699  if (padLayer->GetOutputSlot(0).GetNumConnections() == 1 &&
700  optimizations::pad_fold::TryFoldPadIntoLayer2d(padLayer->GetParameters(),
701  poolingDescriptor,
702  padLayer->GetOutputSlot().GetTensorInfo(),
703  true))
704  {
705  FoldPadIntoAveragePool2d<Pooling2dLayer>(optimizationViews, baseLayer,
706  poolingDescriptor, padLayer);
707  untouched.erase(baseLayer->GetGuid());
708  untouched.erase(padLayer->GetGuid());
709  }
710  }
711  }
712  }
713 
714  if (optimizationViews.GetSubstitutions().empty() && optimizationViews.GetDeletedSubgraphs().empty())
715  {
716  optimizationViews.AddUntouchedSubgraph(SubgraphView(subgraph));
717  }
718  else
719  {
720  ReportUntouchedLayers(optimizationViews, untouched);
721  }
722 
723  return optimizationViews;
724 }

References armnn::Activation, armnn::Add, armnn::Addition, OptimizationViews::AddUntouchedSubgraph(), armnn::BatchNormalization, SubgraphView::begin(), Layer::BeginOutputSlots(), armnn::ClAdditionValidate(), armnn::ClBatchNormalizationValidate(), armnn::ClConvolution2dWorkloadValidate(), armnn::ClDepthwiseConvolutionWorkloadValidate(), armnn::ClDivisionWorkloadValidate(), armnn::ClFullyConnectedWorkloadValidate(), armnn::ClMultiplicationWorkloadValidate(), armnn::ClSubtractionValidate(), armnn::ConnectedToLayerWithNCHW(), armnn::Convolution2d, ClBackend::CreateBackendSpecificModelContext(), armnn::DepthwiseConvolution2d, armnn::Div, armnn::Division, armnn::ElementwiseBinary, SubgraphView::end(), Layer::EndOutputSlots(), armnn::FullyConnected, Layer::GetAdditionalInformation(), InputSlot::GetConnectedOutputSlot(), OptimizationViews::GetDeletedSubgraphs(), Layer::GetGuid(), Layer::GetInputSlot(), Layer::GetName(), OutputSlot::GetNumConnections(), Layer::GetOutputSlot(), OutputSlot::GetOwningLayer(), LayerWithParameters< Parameters >::GetParameters(), OptimizationViews::GetSubstitutions(), OutputSlot::GetTensorInfo(), Layer::GetType(), ClBackendModelContext::IsFastMathEnabled(), BatchNormalizationLayer::m_Beta, FullyConnectedDescriptor::m_BiasEnabled, Convolution2dDescriptor::m_BiasEnabled, DepthwiseConvolution2dDescriptor::m_BiasEnabled, BatchNormalizationLayer::m_Gamma, BatchNormalizationLayer::m_Mean, ElementwiseBinaryDescriptor::m_Operation, BatchNormalizationLayer::m_Variance, ReduceDescriptor::m_vAxis, armnn::Mul, armnn::Multiplication, armnn::Pad, armnn::Pooling2d, armnn::Reduce, armnn::RemoveReshapeLayer(), armnn::ReportUntouchedLayers(), armnn::Reshape, armnn::Sub, armnn::Subtraction, and armnn::optimizations::pad_fold::TryFoldPadIntoLayer2d().

◆ RegisterTensorHandleFactories() [1/2]

void RegisterTensorHandleFactories ( TensorHandleFactoryRegistry )
overridevirtual

(Optional) Register TensorHandleFactories Either this method or CreateMemoryManager() and IWorkloadFactory::CreateTensor() IWorkloadFactory::CreateSubtensor() methods must be implemented.

Reimplemented from IBackendInternal.

Definition at line 175 of file ClBackend.cpp.

176 {
177  std::shared_ptr<ClMemoryManager> memoryManager;
179  {
180  memoryManager = std::make_shared<ClMemoryManager>(m_CustomAllocator);
181  }
182  else
183  {
184  memoryManager = std::make_shared<ClMemoryManager>(std::make_unique<arm_compute::CLBufferAllocator>());
185  }
186 
187  std::unique_ptr<ITensorHandleFactory> factory = std::make_unique<ClTensorHandleFactory>(memoryManager);
188  std::unique_ptr<ITensorHandleFactory> importFactory = std::make_unique<ClImportTensorHandleFactory>(
190 
191  registry.RegisterCopyAndImportFactoryPair(factory->GetId(), importFactory->GetId());
192  registry.RegisterCopyAndImportFactoryPair(importFactory->GetId(), factory->GetId());
193 
194  registry.RegisterMemoryManager(memoryManager);
195  registry.RegisterFactory(std::move(factory));
196  registry.RegisterFactory(std::move(importFactory));
197 
198 }

References ClBackend::m_CustomAllocator, ClBackend::m_UsingCustomAllocator, armnn::Malloc, TensorHandleFactoryRegistry::RegisterCopyAndImportFactoryPair(), TensorHandleFactoryRegistry::RegisterFactory(), and TensorHandleFactoryRegistry::RegisterMemoryManager().

◆ RegisterTensorHandleFactories() [2/2]

void RegisterTensorHandleFactories ( TensorHandleFactoryRegistry registry,
MemorySourceFlags  inputFlags,
MemorySourceFlags  outputFlags 
)
overridevirtual

(Optional) Register TensorHandleFactories Either this method or CreateMemoryManager() and IWorkloadFactory::CreateTensor() IWorkloadFactory::CreateSubtensor() methods must be implemented.

Reimplemented from IBackendInternal.

Definition at line 200 of file ClBackend.cpp.

203 {
204  // To allow force import if inputFlags/outputFlags are Undefined, set it as Malloc
205  if (inputFlags == static_cast<MemorySourceFlags>(MemorySource::Undefined))
206  {
207  inputFlags = static_cast<MemorySourceFlags>(MemorySource::Malloc);
208  }
209  if (outputFlags == static_cast<MemorySourceFlags>(MemorySource::Undefined))
210  {
211  outputFlags = static_cast<MemorySourceFlags>(MemorySource::Malloc);
212  }
213  std::shared_ptr<ClMemoryManager> memoryManager;
215  {
216  memoryManager = std::make_shared<ClMemoryManager>(m_CustomAllocator);
217  }
218  else
219  {
220  memoryManager = std::make_shared<ClMemoryManager>(std::make_unique<arm_compute::CLBufferAllocator>());
221  }
222 
223  std::unique_ptr<ITensorHandleFactory> factory = std::make_unique<ClTensorHandleFactory>(memoryManager);
224  std::unique_ptr<ITensorHandleFactory> importFactory = std::make_unique<ClImportTensorHandleFactory>(
225  inputFlags, outputFlags);
226 
227  registry.RegisterCopyAndImportFactoryPair(factory->GetId(), importFactory->GetId());
228  registry.RegisterCopyAndImportFactoryPair(importFactory->GetId(), factory->GetId());
229 
230  registry.RegisterMemoryManager(memoryManager);
231  registry.RegisterFactory(std::move(factory));
232  registry.RegisterFactory(std::move(importFactory));
233 }

References ClBackend::m_CustomAllocator, ClBackend::m_UsingCustomAllocator, armnn::Malloc, TensorHandleFactoryRegistry::RegisterCopyAndImportFactoryPair(), TensorHandleFactoryRegistry::RegisterFactory(), TensorHandleFactoryRegistry::RegisterMemoryManager(), and armnn::Undefined.

◆ UseCustomMemoryAllocator()

virtual bool UseCustomMemoryAllocator ( std::shared_ptr< ICustomAllocator allocator,
armnn::Optional< std::string & >  errMsg 
)
inlineoverridevirtual

Signals the backend to use a custom memory allocator provided by the user.

Parameters
allocator- a pointer to the provided ICustomAllocator to use with this backend
errMsg- Optional string variable to return error messages
Returns
- Returns true if switching to custom allocator was successful

Reimplemented from IBackendInternal.

Definition at line 82 of file ClBackend.hpp.

84  {
85  IgnoreUnused(errMsg);
86  ARMNN_LOG(info) << "Using Custom Allocator for ClBackend";
87 
88  // Set flag to signal the backend to use a custom memory allocator
89  m_CustomAllocator = std::make_shared<ClBackendCustomAllocatorWrapper>(std::move(allocator));
92  }

References ARMNN_LOG, armnn::IgnoreUnused(), armnn::info, ClBackend::m_CustomAllocator, and ClBackend::m_UsingCustomAllocator.

Referenced by ClBackend::ClBackend().

Member Data Documentation

◆ m_CustomAllocator

◆ m_UsingCustomAllocator


The documentation for this class was generated from the following files:
armnn::MemorySource::Malloc
@ Malloc
armnn::BinaryOperation::Mul
@ Mul
armnn::BinaryOperation::Add
@ Add
armnn::ClBackend::m_UsingCustomAllocator
bool m_UsingCustomAllocator
Definition: ClBackend.hpp:284
armnn::LayerType::BatchNormalization
@ BatchNormalization
armnn::ClSubtractionValidate
arm_compute::Status ClSubtractionValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: ClSubtractionWorkload.cpp:46
armnn::ClTensorHandleFactory::GetIdStatic
static const FactoryId & GetIdStatic()
Definition: ClTensorHandleFactory.cpp:94
armnn::ClMultiplicationWorkloadValidate
arm_compute::Status ClMultiplicationWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: ClMultiplicationWorkload.cpp:18
armnn::BackendCapabilities
BackendOptions BackendCapabilities
Definition: BackendOptions.hpp:19
armnn::BinaryOperation::Sub
@ Sub
armnn::MemorySourceFlags
unsigned int MemorySourceFlags
Definition: MemorySources.hpp:15
armnn::ClFullyConnectedWorkloadValidate
arm_compute::Status ClFullyConnectedWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const Optional< TensorInfo > &biases, const FullyConnectedDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: ClFullyConnectedWorkload.cpp:19
armnn::ClConvolution2dWorkloadValidate
arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
Definition: ClConvolution2dWorkload.cpp:23
armnn::LayerType::Reduce
@ Reduce
armnn::IBackendInternal::IBackendContextPtr
std::unique_ptr< IBackendContext > IBackendContextPtr
Definition: IBackendInternal.hpp:90
armnn::LayerType::ElementwiseBinary
@ ElementwiseBinary
armnn::ClBackend::UseCustomMemoryAllocator
virtual bool UseCustomMemoryAllocator(std::shared_ptr< ICustomAllocator > allocator, armnn::Optional< std::string & > errMsg) override
Signals the backend to use a custom memory allocator provided by the user.
Definition: ClBackend.hpp:82
ARMNN_LOG
#define ARMNN_LOG(severity)
Definition: Logging.hpp:212
armnn::ClImportTensorHandleFactory::GetIdStatic
static const FactoryId & GetIdStatic()
Definition: ClImportTensorHandleFactory.cpp:93
armnn::ClAdditionValidate
arm_compute::Status ClAdditionValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: ClAdditionWorkload.cpp:45
armnn::optimizations::pad_fold::TryFoldPadIntoLayer2d
bool TryFoldPadIntoLayer2d(const PadDescriptor &padDescriptor, Descriptor &layerDescriptor, const TensorInfo &tensorInfo)
Definition: FoldPadIntoLayer2d.hpp:88
armnn::LayerType::Subtraction
@ Subtraction
armnn::RemoveReshapeLayer
void RemoveReshapeLayer(ReshapeLayer *baseLayer, std::map< LayerGuid, Layer * > &untouched, OptimizationViews &optimizationViews)
Definition: SubgraphUtils.hpp:293
armnn::MemorySource::Undefined
@ Undefined
armnn::LayerType::Multiplication
@ Multiplication
armnn::ClBackend::m_CustomAllocator
std::shared_ptr< ClBackendCustomAllocatorWrapper > m_CustomAllocator
Definition: ClBackend.hpp:283
armnn::LayerType::Addition
@ Addition
armnn::LayerType::Pooling2d
@ Pooling2d
armnn::LayerType::Division
@ Division
armnn::IBackendInternal::IBackendProfilingContextPtr
std::shared_ptr< arm::pipe::IBackendProfilingContext > IBackendProfilingContextPtr
This is the bridge between backend and backend profiling we'll keep it in the backend namespace.
Definition: IBackendInternal.hpp:92
armnn::LayerType::FullyConnected
@ FullyConnected
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::Status
Status
Definition: Types.hpp:42
armnn::LayerType::Reshape
@ Reshape
armnn::ClBatchNormalizationValidate
arm_compute::Status ClBatchNormalizationValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: ClBatchNormalizationFloatWorkload.cpp:19
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::ClDivisionWorkloadValidate
arm_compute::Status ClDivisionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: ClDivisionWorkload.cpp:18
armnn::ClBackend::GetIdStatic
static const BackendId & GetIdStatic()
Definition: ClBackend.cpp:44
armnn::LayerType::Pad
@ Pad
armnn::ClBackend::CreateBackendSpecificModelContext
IBackendInternal::IBackendSpecificModelContextPtr CreateBackendSpecificModelContext(const ModelOptions &modelOptions) const override
Definition: ClBackend.cpp:246
armnn::ClBackendId
constexpr const char * ClBackendId()
Definition: ClBackendId.hpp:10
armnn::IBackendInternal::ILayerSupportSharedPtr
std::shared_ptr< ILayerSupport > ILayerSupportSharedPtr
Definition: IBackendInternal.hpp:94
armnn::ReportUntouchedLayers
void ReportUntouchedLayers(OptimizationViews &optimizationViews, std::map< LayerGuid, Layer * > untouched)
Definition: SubgraphUtils.hpp:220
armnn::ConnectedToLayerWithNCHW
bool ConnectedToLayerWithNCHW(Layer *baseLayer)
Checks if the Layer is connected to any Layer that has an NCHW layout.
Definition: SubgraphUtils.hpp:250
armnn::BinaryOperation::Div
@ Div
armnn::LayerType::Convolution2d
@ Convolution2d
armnn::LayerType::Activation
@ Activation
armnn::ClDepthwiseConvolutionWorkloadValidate
arm_compute::Status ClDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, const ActivationDescriptor *activationDescriptor)
Definition: ClDepthwiseConvolutionWorkload.cpp:26
armnn::IBackendInternal::IBackendSpecificModelContextPtr
std::shared_ptr< IBackendModelContext > IBackendSpecificModelContextPtr
Definition: IBackendInternal.hpp:96
armnnDeserializer::Pooling2dDescriptor
const armnnSerializer::Pooling2dDescriptor * Pooling2dDescriptor
Definition: Deserializer.hpp:21