14 #if defined(ARMCOMPUTECL_ENABLED) 18 #if defined(ARMCOMPUTENEON_ENABLED) 22 using namespace armnn;
29 template<
typename IComputeTensorHandle>
30 boost::test_tools::predicate_result CompareTensorHandleShape(IComputeTensorHandle* tensorHandle,
31 std::initializer_list<unsigned int> expectedDimensions)
33 arm_compute::ITensorInfo*
info = tensorHandle->GetTensor().info();
35 auto infoNumDims = info->num_dimensions();
36 auto numExpectedDims = expectedDimensions.size();
37 if (infoNumDims != numExpectedDims)
39 boost::test_tools::predicate_result res(
false);
40 res.message() <<
"Different number of dimensions [" << info->num_dimensions()
41 <<
"!=" << expectedDimensions.size() <<
"]";
45 size_t i = info->num_dimensions() - 1;
47 for (
unsigned int expectedDimension : expectedDimensions)
49 if (info->dimension(i) != expectedDimension)
51 boost::test_tools::predicate_result res(
false);
52 res.message() <<
"For dimension " << i <<
53 " expected size " << expectedDimension <<
54 " got " << info->dimension(i);
64 template<
typename IComputeTensorHandle>
81 Connect(input, layer1, tensorInfo);
82 Connect(layer1, layer2, tensorInfo);
83 Connect(layer2, output, tensorInfo);
85 input->CreateTensorHandles(registry, refFactory);
87 layer2->CreateTensorHandles(registry, refFactory);
88 output->CreateTensorHandles(registry, refFactory);
91 auto workload1 = MakeAndCheckWorkload<CopyMemGenericWorkload>(*layer1, factory);
92 auto workload2 = MakeAndCheckWorkload<CopyMemGenericWorkload>(*layer2, refFactory);
95 BOOST_TEST(queueDescriptor1.m_Inputs.size() == 1);
96 BOOST_TEST(queueDescriptor1.m_Outputs.size() == 1);
97 auto inputHandle1 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor1.m_Inputs[0]);
98 auto outputHandle1 = PolymorphicDowncast<IComputeTensorHandle*>(queueDescriptor1.m_Outputs[0]);
100 BOOST_TEST(CompareTensorHandleShape<IComputeTensorHandle>(outputHandle1, {2, 3}));
104 BOOST_TEST(queueDescriptor2.
m_Inputs.size() == 1);
105 BOOST_TEST(queueDescriptor2.
m_Outputs.size() == 1);
106 auto inputHandle2 = PolymorphicDowncast<IComputeTensorHandle*>(queueDescriptor2.
m_Inputs[0]);
107 auto outputHandle2 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor2.
m_Outputs[0]);
108 BOOST_TEST(CompareTensorHandleShape<IComputeTensorHandle>(inputHandle2, {2, 3}));
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Copyright (c) 2021 ARM Limited and Contributors.
A layer user-provided data can be bound to (e.g. inputs, outputs).
This layer represents a memory copy operation.
std::vector< ITensorHandle * > m_Outputs
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
std::vector< ITensorHandle * > m_Inputs
virtual void CreateTensorHandles(const TensorHandleFactoryRegistry ®istry, const IWorkloadFactory &factory, const bool IsMemoryManaged=true)