24.02
|
#include <INetwork.hpp>
Definition at line 901 of file INetwork.hpp.
◆ IOptimizedNetwork() [1/4]
◆ IOptimizedNetwork() [2/4]
◆ IOptimizedNetwork() [3/4]
◆ ~IOptimizedNetwork()
◆ IOptimizedNetwork() [4/4]
◆ Destroy()
◆ ExecuteStrategy()
void ExecuteStrategy |
( |
IStrategy & |
strategy | ) |
const |
◆ GetGuid()
arm::pipe::ProfilingGuid GetGuid |
( |
| ) |
const |
◆ GetNumInputs()
size_t GetNumInputs |
( |
| ) |
const |
◆ GetNumOutputs()
size_t GetNumOutputs |
( |
| ) |
const |
◆ GetProfiler()
const std::shared_ptr< IProfiler > & GetProfiler |
( |
| ) |
const |
◆ PrintGraph()
◆ SerializeToDot()
Status SerializeToDot |
( |
std::ostream & |
stream | ) |
const |
◆ experimental::AsyncNetworkImpl
friend class experimental::AsyncNetworkImpl |
|
friend |
◆ experimental::WorkingMemHandle
◆ GetGraphForTesting
Definition at line 49 of file TestUtils.cpp.
51 return optNet->pOptimizedNetworkImpl->GetGraph();
◆ GetModelOptionsForTesting
Definition at line 54 of file TestUtils.cpp.
56 return optNet->pOptimizedNetworkImpl->GetModelOptions();
◆ LoadedNetwork
◆ Optimize [1/2]
Create an optimized version of the network.
- Parameters
-
inGraph | Graph to be optimized. |
backendPreferences | The choice of the backend ordered by user preferences. |
deviceSpec | DeviceSpec object as queried from the runtime. See IRuntime::GetDeviceSpec() |
messages | If there are failures or warnings a string describing same will be added to the vector |
options | OptimizerOptions object with optimizer configuration options |
- Returns
- An IOptimizedNetworkPtr interface to the optimized network, throws an exception derived from armnn::Exception if process fails.
Definition at line 1896 of file Network.cpp.
1905 auto profiler = inGraph.GetProfiler();
1907 profiler->EnableProfiling(options.GetProfilingEnabled());
1911 std::set<BackendId> backendSet(backendPreferences.begin(), backendPreferences.end());
1913 if (backendSet.find(
"GpuFsa") != backendSet.end() &&
1914 backendSet.find(
"GpuAcc") != backendSet.end())
1916 throw InvalidArgumentException(
"The backends \"GpuAcc\" and \"GpuFsa\" cannot be specified "
1917 "for the same optimized network.");
1922 if (backendPreferences.empty())
1924 throw InvalidArgumentException(
"Invoked Optimize with no backends specified");
1927 if (options.GetReduceFp32ToBf16())
1929 throw InvalidArgumentException(
"BFloat16 optimization is currently ignored. In order to use Bf16 optimization "
1930 "Please use the FastMathEnabled backend option for CpuAcc or GpuAcc.");
1933 if (options.GetReduceFp32ToFp16() && options.GetReduceFp32ToBf16())
1935 throw InvalidArgumentException(
"BFloat16 and Float16 optimization cannot be enabled at the same time.");
1939 inGraph.VerifyConstantLayerSetTensorInfo();
1941 std::unique_ptr<Graph> graph = std::make_unique<Graph>(inGraph);
1946 {{
"ImportEnabled", options.GetImportEnabled()},
1947 {
"ExportEnabled", options.GetExportEnabled()}});
1948 ModelOptions optimizedOptions(options.GetModelOptions());
1949 optimizedOptions.push_back(importExport);
1957 Graph& optGraph = optNetObjPtr->pOptimizedNetworkImpl->GetGraph();
1962 optGraph.InferTensorInfos();
1966 using namespace optimizations;
1974 optGraph.InferTensorInfos();
2008 BackendSettings backendSettings(backendPreferences, deviceSpec);
2009 auto availablePreferredBackends = backendSettings.GetAvailablePreferredBackends();
2010 if (availablePreferredBackends.empty())
2012 std::stringstream failureMsg;
2013 failureMsg <<
"None of the preferred backends " << backendPreferences
2014 <<
" are supported. Current platform provides " << backendSettings.m_SupportedBackends;
2016 throw InvalidArgumentException(failureMsg.str());
2020 TensorHandleFactoryRegistry tensorHandleFactoryRegistry;
2023 if (options.GetReduceFp32ToFp16())
2037 OptimizationResult assignBackendsResult =
AssignBackends(optNetObjPtr->pOptimizedNetworkImpl.get(),
2042 if (assignBackendsResult.m_Error)
2045 throw InvalidArgumentException(
"Failed to assign a backend to each layer");
2055 options.GetModelOptions(),
2057 if (backendOptimizationResult.m_Error)
2060 throw InvalidArgumentException(
"Failed to apply the backend-specific optimizations");
2073 if (options.GetDebugEnabled() && !options.GetDebugToFileEnabled())
2077 else if (options.GetDebugToFileEnabled())
2082 #if !defined(ARMNN_DISABLE_FILESYSTEM)
2084 ARMNN_LOG(info) <<
"Intermediate tensors will be written to: " << result;
2091 ARMNN_LOG(warning) <<
"Unable to print intermediate layer outputs : " << e.
what();
2098 tensorHandleFactoryRegistry,
2099 options.GetImportEnabled(),
2100 options.GetExportEnabled(),
2103 if (strategyResult.m_Error)
2112 optGraph.AddCompatibilityLayers(backends, tensorHandleFactoryRegistry);
◆ Optimize [2/2]
Create an optimized version of the network.
- Parameters
-
network | INetwork description of the network to be optimized. |
backendPreferences | The choice of the backend ordered by user preferences. |
deviceSpec | DeviceSpec object as queried from the runtime. See IRuntime::GetDeviceSpec() |
messages | If there are failures or warnings a string describing same will be added to the vector |
options | OptimizerOptions object with optimizer configuration options |
- Returns
- An IOptimizedNetworkPtr interface to the optimized network, throws an exception derived from armnn::Exception if process fails.
Definition at line 2132 of file Network.cpp.
2138 return Optimize(inNetwork.pNetworkImpl->GetGraph(),
◆ pOptimizedNetworkImpl
The documentation for this class was generated from the following files:
OptimizeForType< Layer, AddDebugToFileImpl > InsertDebugToFileLayer
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
OptimizationResult ApplyBackendOptimizations(OptimizedNetworkImpl *optNetObjPtr, BackendSettings &backendSettings, BackendsMap &backends, const ModelOptions &modelOptions, Optional< std::vector< std::string > & > errMessages)
OptimizeForType< Layer, AddDebugImpl > InsertDebugLayer
OptimizeForExclusiveConnection< Convolution2dLayer, BatchNormalizationLayer, FuseBatchNorm< Convolution2dLayer, armnn::DataType::Float32 > > FuseBatchNormIntoConvolution2DFloat32
void RegisterProfiler(IProfiler *profiler)
OptimizeForConnection< PermuteLayer, PermuteLayer, OptimizeInversePermutesImpl< PermuteLayer > > OptimizeInversePermutes
OptimizeForConnection< TransposeLayer, BatchToSpaceNdLayer, PermuteAndBatchToSpaceAsDepthToSpaceImpl< TransposeLayer > > TransposeAndBatchToSpaceAsDepthToSpace
OptimizeForExclusiveConnection< PadLayer, Pooling2dLayer, pad_fold::FoldPadIntoPooling2dImpl > FoldPadIntoPooling2d
OptimizeForType< Layer, ConvertFp32NetworkToFp16Impl > Fp32NetworkToFp16Converter
OptimizeForExclusiveConnection< PadLayer, Convolution2dLayer, pad_fold::FoldPadIntoConvolution2dImpl > FoldPadIntoConvolution2d
OptimizeForConnection< ConstantLayer, DequantizeLayer, ConvertConstDequantisationLayersToConstLayersImpl > ConvertConstDequantisationLayersToConstLayers
OptimizeForConnection< Layer, TransposeLayer, MoveTransposeUpImpl > MoveTransposeUp
OptimizationResult AssignBackends(OptimizedNetworkImpl *optNetObjPtr, BackendSettings &backendSettings, Graph::Iterator &firstLayer, Graph::Iterator &lastLayer, Optional< std::vector< std::string > & > errMessages)
OptimizeForType< BroadcastToLayer, DeleteBroadcastToImpl > BroadcastToOptimizationLayer
LayerList::const_iterator Iterator
OptimizeForType< PermuteLayer, PermuteAsReshapeImpl > PermuteAsReshape
OptimizeForConnection< PermuteLayer, BatchToSpaceNdLayer, PermuteAndBatchToSpaceAsDepthToSpaceImpl< PermuteLayer > > PermuteAndBatchToSpaceAsDepthToSpace
OptimizationResult SelectTensorHandleStrategy(Graph &optGraph, BackendsMap &backends, TensorHandleFactoryRegistry ®istry, bool importEnabled, bool exportEnabled, Optional< std::vector< std::string > & > errMessages)
OptimizeForConnection< Layer, PermuteLayer, MovePermuteUpImpl > MovePermuteUp
OptimizeForConnection< ConvertFp16ToFp32Layer, ConvertFp32ToFp16Layer, OptimizeInverseConversionsImpl > OptimizeInverseConversionsFp16
virtual const char * what() const noexcept override
#define ARMNN_LOG(severity)
BackendsMap CreateSupportedBackends(TensorHandleFactoryRegistry &handleFactoryRegistry, BackendSettings &backendSettings)
ConvertConstants< Float32ToFloat16, IsFloat16Layer > ConvertConstantsFloatToHalf
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
void ReportError(const std::string &errorMessage, Optional< std::vector< std::string > & > errorMessages)
bool CheckFp16Support(BackendsMap &backends, const std::vector< BackendId > &availablePreferredBackends)
OptimizeForConnection< ConstantLayer, PermuteLayer, ConvertConstPermuteLayersToConstLayers > FusePermuteIntoConstLayer
Optimizer::Optimizations MakeOptimizations(Args &&... args)
friend IOptimizedNetworkPtr Optimize(const INetwork &inNetwork, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptionsOpaque &options, Optional< std::vector< std::string > & > messages)
Create an optimized version of the network.
@ ValidateOnly
Validate all output shapes.
@ InferAndValidate
Infer missing output shapes and validate all output shapes.
OptimizeForConnection< ConvertFp32ToFp16Layer, ConvertFp16ToFp32Layer, OptimizeInverseConversionsImpl > OptimizeInverseConversionsFp32
Struct for the users to pass backend specific options.
OptimizeForType< TransposeLayer, TransposeAsReshapeImpl > TransposeAsReshape
static ProfilerManager & GetInstance()
OptimizeForExclusiveConnection< Convolution2dLayer, BatchNormalizationLayer, FuseBatchNorm< Convolution2dLayer, armnn::DataType::Float16 > > FuseBatchNormIntoConvolution2DFloat16
static void Destroy(IOptimizedNetwork *network)
OptimizeForExclusiveConnection< DepthwiseConvolution2dLayer, BatchNormalizationLayer, FuseBatchNorm< DepthwiseConvolution2dLayer, armnn::DataType::Float16 > > FuseBatchNormIntoDepthwiseConvolution2DFloat16
std::map< BackendId, std::unique_ptr< class IBackendInternal > > BackendsMap
OptimizeForConnection< Layer, TransposeLayer, SquashEqualSiblingsImpl< TransposeLayer > > SquashEqualTransposeSiblings
ConvertConstants< Float16ToFloat32, IsFloat32Layer > ConvertConstantsHalfToFloat
IOptimizedNetwork(const IOptimizedNetwork &other, const ModelOptions &modelOptions)
Creates a copy of the IOptimizedNetwork.
OptimizeForConnection< Layer, PermuteLayer, SquashEqualSiblingsImpl< PermuteLayer > > SquashEqualPermuteSiblings
static void Pass(Graph &graph, const Optimizations &optimizations)
std::vector< BackendOptions > ModelOptions
OptimizeForExclusiveConnection< PadLayer, DepthwiseConvolution2dLayer, pad_fold::FoldPadIntoDepthwiseConvolution2dImpl > FoldPadIntoDepthwiseConvolution2d
std::string CreateDirectory(std::string sPath)
Returns full path to temporary folder.
OptimizeForConnection< Layer, ReshapeLayer, SquashEqualSiblingsImpl< ReshapeLayer > > SquashEqualReshapeSiblings
OptimizeForConnection< TransposeLayer, TransposeLayer, OptimizeInversePermutesImpl< TransposeLayer > > OptimizeInverseTransposes
OptimizeForType< Layer, AddBroadcastReshapeLayerImpl > AddBroadcastReshapeLayer
OptimizeForExclusiveConnection< DepthwiseConvolution2dLayer, BatchNormalizationLayer, FuseBatchNorm< DepthwiseConvolution2dLayer, armnn::DataType::Float32 > > FuseBatchNormIntoDepthwiseConvolution2DFloat32
OptimizeForConnection< ReshapeLayer, ReshapeLayer, OptimizeConsecutiveReshapesImpl > OptimizeConsecutiveReshapes
std::unique_ptr< OptimizedNetworkImpl > pOptimizedNetworkImpl