23 template<
typename T,
typename B>
32 boost::multi_array<T, 2>& weights,
33 boost::multi_array<B, 1>& bias,
34 boost::multi_array<T, 4>& input,
36 bool transposeWeights)
38 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
39 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
49 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
50 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
59 inputHandle->Allocate();
60 outputHandle->Allocate();
63 ExecuteWorkload(*workload, memoryManager);
70 template<
typename T,
typename B>
79 boost::multi_array<T, 2>& weights,
80 boost::multi_array<B, 1>& bias,
81 boost::multi_array<T, 4>& input,
83 bool transposeWeights)
85 std::unique_ptr<armnn::ITensorHandle> input0Handle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
86 std::unique_ptr<armnn::ITensorHandle> input1Handle = tensorHandleFactory.
CreateTensorHandle(weightsTensorInfo);
87 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
92 AddInputToWorkload(data, info, inputTensorInfo, input0Handle.get());
93 AddInputToWorkload(data, info, weightsTensorInfo, input1Handle.get());
94 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
99 std::unique_ptr<armnn::ITensorHandle> input2Handle =
nullptr;
103 AddInputToWorkload(data, info, biasesTensorInfo, input2Handle.get());
109 input0Handle->Allocate();
110 input1Handle->Allocate();
111 outputHandle->Allocate();
116 input2Handle->Allocate();
120 ExecuteWorkload(*workload, memoryManager);
127 template<armnn::DataType ArmnnType,
typename T>
133 bool constantWeights)
135 constexpr
static unsigned int inputWidth = 3u;
136 constexpr
static unsigned int inputHeight = 2u;
137 constexpr
static unsigned int inputChannels = 1u;
139 constexpr
static unsigned int inputSize = inputWidth * inputHeight * inputChannels;
141 constexpr
static unsigned int outputChannels = 2u;
143 armnn::TensorInfo inputTensorInfo({ 1, inputChannels, inputHeight, inputWidth }, ArmnnType);
145 inputTensorInfo.SetQuantizationOffset(63);
149 outputTensorInfo.SetQuantizationOffset(biasEnabled ? -50 : 10);
153 weightsDesc.SetQuantizationOffset(93);
156 biasesDesc.
SetQuantizationScale(inputTensorInfo.GetQuantizationScale() * weightsDesc.GetQuantizationScale());
157 biasesDesc.SetQuantizationOffset(0);
161 auto input = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>(
168 auto weights = MakeTensor<T, 2>(weightsDesc, ConvertToDataType<ArmnnType>(
170 -8.4f, 20.0f, -10.4f, -8, 16.4f, -11.8f,
171 23.4f, 10.4f, -14.0f, -3.8f, -11.8f, 11.4f
175 auto bias = MakeTensor<int32_t, 1>(biasesDesc, std::vector<int32_t>{9250, 67500});
179 result = SimpleFullyConnectedTestImpl<T>(workloadFactory,
194 result = SimpleFullyConnectedTestWeightsAsInputsImpl<T>(workloadFactory,
211 ConvertToDataType<ArmnnType>({80.f, 1460.f}, outputTensorInfo));
216 ConvertToDataType<ArmnnType>({-107.04f, 110.f}, outputTensorInfo));
228 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
233 bool transposeWeights,
237 unsigned int inputWidth = 1;
238 unsigned int inputHeight = 1;
239 unsigned int inputChannels = 5;
240 unsigned int inputNum = 1;
242 unsigned int outputChannels = 1;
243 unsigned int outputNum = 1;
251 unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
252 unsigned int outputShape[] = { outputNum, outputChannels };
253 unsigned int weightsShape[] = { inputChannels, outputChannels };
254 if (transposeWeights)
256 std::swap(weightsShape[0], weightsShape[1]);
259 unsigned int biasShape[] = { outputChannels };
267 if(armnn::IsQuantizedType<T>())
277 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputTensorInfo,
278 armnnUtils::QuantizedVector<T>({
279 1.0f, 10.0f, 100.0f, 1000.0f, 10000.0f,
284 boost::multi_array<T, 2> weights = MakeTensor<T, 2>(weightsDesc,
285 armnnUtils::QuantizedVector<T>({
286 2.0f, 3.0f, 4.0f, 5.0f, 6.0f
291 std::vector<T> biasValues({900000.f});
292 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasesDesc, biasValues);
294 result = SimpleFullyConnectedTestImpl<T>(
298 inputTensorInfo, outputTensorInfo,
299 weightsDesc, biasesDesc,
300 weights, bias, input,
301 true, transposeWeights
305 armnnUtils::QuantizedVector<T>({ 965432.0f }, qScale, qOffset));
315 FullyConnectedTest<armnn::DataType::QAsymmU8>(
323 FullyConnectedTest<armnn::DataType::QSymmS16>(
339 bool transposeWeights)
341 unsigned int inputWidth = 1;
342 unsigned int inputHeight = 1;
343 unsigned int inputChannels = 5;
344 unsigned int inputNum = 2;
346 unsigned int outputChannels = 3;
347 unsigned int outputNum = 2;
355 unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
356 unsigned int outputShape[] = { outputNum, outputChannels };
357 unsigned int weightsShape[] = { inputChannels, outputChannels };
359 if (transposeWeights)
361 std::swap(weightsShape[0], weightsShape[1]);
364 unsigned int biasShape[] = { outputChannels };
373 boost::multi_array<float, 4> input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(
375 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
377 5.0f, 4.0f, 3.0f, 2.0f, 1.0f
381 boost::multi_array<float, 2> weights = MakeTensor<float, 2>(weightsDesc, std::vector<float>(
390 if (transposeWeights)
392 weights = MakeTensor<float, 2>(weightsDesc, std::vector<float>(
394 .5f, .5f, .5f, .5f, .5f,
395 2.f, 2.f, 2.f, 2.f, 2.f,
396 .5f, 1.f, 2.f, 3.f, 4.f
401 std::vector<float> biasValues({0.f, 0.f, 0.f});
404 biasValues = std::vector<float>({10.f, 20.f, 30.f});
406 boost::multi_array<float, 1> bias = MakeTensor<float, 1>(biasesDesc, biasValues);
408 result = SimpleFullyConnectedTestImpl<float>(
412 inputTensorInfo, outputTensorInfo,
413 weightsDesc, biasesDesc,
414 weights, bias, input,
415 biasEnabled, transposeWeights
418 result.
outputExpected = MakeTensor<float, 2>(outputTensorInfo, std::vector<float>(
420 0.5f + 1.0f + 1.5f + 2.0f + 2.5f + biasValues[0],
421 2.0f + 4.0f + 6.0f + 8.0f + 10.f + biasValues[1],
422 0.5f + 2.0f + 6.0f + 12.f + 20.f + biasValues[2],
424 2.5f + 2.0f + 1.5f + 1.0f + 0.5f + biasValues[0],
425 10.0f + 8.0f + 6.0f + 4.0f + 2.f + biasValues[1],
426 2.5f + 4.0f + 6.0f + 6.f + 4.f + biasValues[2]
437 bool transposeWeights)
439 return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory,
LayerTestResult< float, 2 > FullyConnectedFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, bool transposeWeights)
void swap(OriginsDescriptor &first, OriginsDescriptor &second)
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
boost::multi_array< T, n > outputExpected
const ConstTensorHandle * m_Bias
LayerDescriptor m_Parameters
LayerTestResult< T, 2 > FullyConnectedTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, bool constantWeights)
armnn::Optional< armnn::DataType > GetBiasTypeFromWeightsType(armnn::Optional< armnn::DataType > weightsType)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
bool m_BiasEnabled
Enable/disable bias.
void SetQuantizationScale(float scale)
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
const ConstTensorHandle * m_Weight
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
boost::multi_array< T, n > output
LayerTestResult< float, 2 > FullyConnectedLargeTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool transposeWeights)
LayerTestResult< T, 2 > FullyConnectedLargeTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool transposeWeights, float qScale=0.0f, int32_t qOffset=0)
Contains information about inputs and outputs to a layer.
void SetQuantizationOffset(int32_t offset)
virtual std::unique_ptr< IWorkload > CreateFullyConnected(const FullyConnectedQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< T, 2 > SimpleFullyConnectedTestWeightsAsInputsImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, armnn::TensorInfo inputTensorInfo, armnn::TensorInfo outputTensorInfo, armnn::TensorInfo weightsTensorInfo, armnn::TensorInfo biasesTensorInfo, boost::multi_array< T, 2 > &weights, boost::multi_array< B, 1 > &bias, boost::multi_array< T, 4 > &input, bool biasEnabled, bool transposeWeights)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
LayerTestResult< T, 2 > SimpleFullyConnectedTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, armnn::TensorInfo inputTensorInfo, armnn::TensorInfo outputTensorInfo, armnn::TensorInfo weightsDesc, armnn::TensorInfo biasesDesc, boost::multi_array< T, 2 > &weights, boost::multi_array< B, 1 > &bias, boost::multi_array< T, 4 > &input, bool biasEnabled, bool transposeWeights)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
bool m_ConstantWeights
Enable/disable constant weights and biases.