24.08
|
Go to the documentation of this file.
12 namespace optimizations
32 Layer& connectedLayer = layer.
GetInputSlots()[0].GetConnectedOutputSlot()->GetOwningLayer();
53 Layer& base = input->GetConnectedOutputSlot()->GetOwningLayer();
56 TensorInfo convertInfo = input->GetConnection()->GetTensorInfo();
60 input->GetConnection()->SetTensorInfo(convertInfo);
71 TensorInfo convertInfo = output->GetTensorInfo();
75 output->SetTensorInfo(convertInfo);
std::vector< ConvertFp16ToFp32Layer * > InsertConvertFp16ToFp32LayersBefore(Graph &graph, Layer &layer, bool expectCorrectInputType)
void SetDataType(DataType type)
const std::vector< InputSlot > & GetInputSlots() const
void Run(Graph &graph, Layer &layer) const
std::vector< ConvertFp32ToFp16Layer * > InsertConvertFp32ToFp16LayersAfter(Graph &graph, Layer &layer)
std::vector< InputSlot >::iterator EndInputSlots()
std::vector< InputSlot >::iterator BeginInputSlots()
DataType GetDataType() const
DataType GetDataType() const
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
std::vector< OutputSlot >::iterator BeginOutputSlots()
Copyright (c) 2021 ARM Limited and Contributors.
ConvertFp32NetworkToFp16Impl()=default
~ConvertFp32NetworkToFp16Impl()=default
std::vector< OutputSlot >::iterator EndOutputSlots()