20 const bool biasEnabled,
22 const bool transposeWeights)
25 unsigned int outputSize = rOutputShape[1];
27 const std::vector<float> decodedInputs = rInputDecoder.
DecodeTensor(rInputShape);
28 const std::vector<float> decodedWeights = rWeightDecoder.
DecodeTensor(rWeightsShape);
32 const std::vector<float> decodedBiases = biasEnabled ? pBiasDecoder->
DecodeTensor(biasShape) : std::vector<float>();
35 for (
unsigned int n = 0; n < rInputShape[0]; n++)
37 for (
unsigned int channelOutput = 0; channelOutput < outputSize; channelOutput++)
41 for (
unsigned int channelInput = 0; channelInput < K; channelInput++)
46 weight = decodedWeights[channelOutput * K + channelInput];
50 weight = decodedWeights[channelInput * outputSize + channelOutput];
53 outval += weight * decodedInputs[n * K + channelInput];
58 outval += decodedBiases[channelOutput];
61 rOutputEncoder[n * outputSize + channelOutput];
62 rOutputEncoder.
Set(outval);
virtual std::vector< float > DecodeTensor(const TensorShape &tensorShape, bool isDepthwise=false)=0
virtual void Set(IType right)=0
Copyright (c) 2021 ARM Limited and Contributors.
void FullyConnected(const TensorShape &rInputShape, Decoder< float > &rInputDecoder, const TensorShape &rOutputShape, Encoder< float > &rOutputEncoder, const TensorShape &rWeightsShape, Decoder< float > &rWeightDecoder, Decoder< float > *pBiasDecoder, const bool biasEnabled, const unsigned int K, const bool transposeWeights)
Performs a matrix multiplication and optionally adds a bias.