18 unsigned int numActivations = 1;
21 numActivations *= inputInfo.
GetShape()[i];
23 return numActivations;
30 , m_InputShape(
info.m_InputTensorInfos[0].GetShape())
31 , m_WeightShape(
info.m_InputTensorInfos[1].GetShape())
32 , m_OutputShape(
info.m_OutputTensorInfos[0].GetShape())
46 std::unique_ptr<Decoder<float>> inputDecoder = MakeDecoder<float>(
GetTensorInfo(inputs[0]), inputs[0]->
Map());
47 std::unique_ptr<Encoder<float>> OutputEncoder = MakeEncoder<float>(
GetTensorInfo(outputs[0]), outputs[0]->
Map());
49 std::unique_ptr<Decoder<float>> weightsDecoder = MakeDecoder<float>(
GetTensorInfo(inputs[1]), inputs[1]->
Map());
50 std::unique_ptr<Decoder<float>> biasDecoder;
52 if (
m_Data.m_Parameters.m_BiasEnabled)
54 biasDecoder = MakeDecoder<float>(
GetTensorInfo(inputs[2]), inputs[2]->
Map());
64 m_Data.m_Parameters.m_BiasEnabled,
66 m_Data.m_Parameters.m_TransposeWeightMatrix);
#define ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
RefFullyConnectedWorkload(const FullyConnectedQueueDescriptor &descriptor, const WorkloadInfo &info)
void Execute() const override
unsigned int GetNumDimensions() const
const TensorShape & GetShape() const
Copyright (c) 2021 ARM Limited and Contributors.
unsigned int GetNumActivations(const TensorInfo &inputInfo)
void FullyConnected(const TensorShape &rInputShape, Decoder< float > &rInputDecoder, const TensorShape &rOutputShape, Encoder< float > &rOutputEncoder, const TensorShape &rWeightsShape, Decoder< float > &rWeightDecoder, Decoder< float > *pBiasDecoder, const bool biasEnabled, const unsigned int K, const bool transposeWeights)
Performs a matrix multiplication and optionally adds a bias.
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
std::vector< ITensorHandle * > m_Inputs
std::vector< ITensorHandle * > m_Outputs
Contains information about TensorInfos of a layer.