276 TEST_CASE(
"RegisterFactories")
278 TestBackendA backendA;
279 TestBackendB backendB;
281 CHECK(backendA.GetHandleFactoryPreferences()[0] ==
"TestHandleFactoryA1");
282 CHECK(backendA.GetHandleFactoryPreferences()[1] ==
"TestHandleFactoryA2");
283 CHECK(backendA.GetHandleFactoryPreferences()[2] ==
"TestHandleFactoryB1");
284 CHECK(backendA.GetHandleFactoryPreferences()[3] ==
"TestHandleFactoryD1");
287 backendA.RegisterTensorHandleFactories(registry);
288 backendB.RegisterTensorHandleFactories(registry);
290 CHECK((registry.GetFactory(
"Non-existing Backend") ==
nullptr));
291 CHECK((registry.GetFactory(
"TestHandleFactoryA1") !=
nullptr));
292 CHECK((registry.GetFactory(
"TestHandleFactoryA2") !=
nullptr));
293 CHECK((registry.GetFactory(
"TestHandleFactoryB1") !=
nullptr));
296 TEST_CASE(
"TensorHandleSelectionStrategy")
298 auto backendA = std::make_unique<TestBackendA>();
299 auto backendB = std::make_unique<TestBackendB>();
300 auto backendC = std::make_unique<TestBackendC>();
301 auto backendD = std::make_unique<TestBackendD>();
304 backendA->RegisterTensorHandleFactories(registry);
305 backendB->RegisterTensorHandleFactories(registry);
306 backendC->RegisterTensorHandleFactories(registry);
307 backendD->RegisterTensorHandleFactories(registry);
310 backends[
"BackendA"] = std::move(backendA);
311 backends[
"BackendB"] = std::move(backendB);
312 backends[
"BackendC"] = std::move(backendC);
313 backends[
"BackendD"] = std::move(backendD);
344 std::vector<std::string> errors;
347 CHECK(result.m_Error ==
false);
348 CHECK(result.m_Warning ==
false);
376 if (layer->
GetType() == LayerType::MemCopy)
381 CHECK(copyCount == 1);
387 if (layer->
GetType() == LayerType::MemImport)
392 CHECK(importCount == 1);
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
int Connect(InputSlot &destination)
EdgeStrategy GetEdgeStrategyForConnection(unsigned int connectionIdx) const
void SetBackendId(const BackendId &id)
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
A layer user-provided data can be bound to (e.g. inputs, outputs).
void ForEachLayer(Func func) const
This layer represents a softmax operation.
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
OptimizationResult SelectTensorHandleStrategy(Graph &optGraph, BackendsMap &backends, TensorHandleFactoryRegistry ®istry, bool importEnabled, Optional< std::vector< std::string > &> errMessages)
ITensorHandleFactory::FactoryId GetTensorHandleFactoryId() const
Graph & TopologicalSort()
Sorts layers in topological order and return this.
A SoftmaxDescriptor for the SoftmaxLayer.
void AddCompatibilityLayers(std::map< BackendId, std::unique_ptr< class IBackendInternal >> &backends, TensorHandleFactoryRegistry ®istry)
Modifies the graph in-place, removing edges connecting layers using different compute devices...
std::map< BackendId, std::unique_ptr< class IBackendInternal > > BackendsMap