ArmNN
 24.08
FullyConnected.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017, 2024 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "FullyConnected.hpp"
7 
8 #include "RefWorkloadUtils.hpp"
9 
10 namespace armnn
11 {
12 
13 void FullyConnected(const TensorShape& rInputShape,
14  Decoder<float>& rInputDecoder,
15  const TensorShape& rOutputShape,
16  Encoder<float>& rOutputEncoder,
17  const TensorShape& rWeightsShape,
18  Decoder<float>& rWeightDecoder,
19  Decoder<float>* pBiasDecoder,
20  const bool biasEnabled,
21  const unsigned int K,
22  const bool transposeWeights)
23 {
24  // Perform FullyConnected implementation
25  unsigned int outputSize = rOutputShape[1];
26 
27  const std::vector<float> decodedInputs = rInputDecoder.DecodeTensor(rInputShape);
28  const std::vector<float> decodedWeights = rWeightDecoder.DecodeTensor(rWeightsShape);
29 
30  const TensorShape biasShape{outputSize};
31 
32  const std::vector<float> decodedBiases = biasEnabled ? pBiasDecoder->DecodeTensor(biasShape) : std::vector<float>();
33 
34 
35  for (unsigned int n = 0; n < rInputShape[0]; n++)
36  {
37  for (unsigned int channelOutput = 0; channelOutput < outputSize; channelOutput++)
38  {
39  float outval = 0.f;
40 
41  for (unsigned int channelInput = 0; channelInput < K; channelInput++)
42  {
43  float weight;
44  if (transposeWeights)
45  {
46  weight = decodedWeights[channelOutput * K + channelInput];
47  }
48  else
49  {
50  weight = decodedWeights[channelInput * outputSize + channelOutput];
51  }
52 
53  outval += weight * decodedInputs[n * K + channelInput];
54  }
55 
56  if (biasEnabled)
57  {
58  outval += decodedBiases[channelOutput];
59  }
60 
61  rOutputEncoder[n * outputSize + channelOutput];
62  rOutputEncoder.Set(outval);
63  }
64  }
65 }
66 
67 } //namespace armnn
armnn::Decoder< float >
armnn::Encoder::Set
virtual void Set(IType right)=0
FullyConnected.hpp
armnn::FullyConnected
void FullyConnected(const TensorShape &rInputShape, Decoder< float > &rInputDecoder, const TensorShape &rOutputShape, Encoder< float > &rOutputEncoder, const TensorShape &rWeightsShape, Decoder< float > &rWeightDecoder, Decoder< float > *pBiasDecoder, const bool biasEnabled, const unsigned int K, const bool transposeWeights)
Performs a matrix multiplication and optionally adds a bias.
Definition: FullyConnected.cpp:13
armnn::TensorShape
Definition: Tensor.hpp:20
armnn::Encoder< float >
armnn::Decoder::DecodeTensor
virtual std::vector< float > DecodeTensor(const TensorShape &tensorShape, bool isDepthwise=false)=0
RefWorkloadUtils.hpp
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6