24.08
|
#include <INetwork.hpp>
Definition at line 908 of file INetwork.hpp.
◆ IOptimizedNetwork() [1/4]
◆ IOptimizedNetwork() [2/4]
◆ IOptimizedNetwork() [3/4]
◆ ~IOptimizedNetwork()
◆ IOptimizedNetwork() [4/4]
◆ Destroy()
◆ ExecuteStrategy()
void ExecuteStrategy |
( |
IStrategy & |
strategy | ) |
const |
◆ GetGuid()
arm::pipe::ProfilingGuid GetGuid |
( |
| ) |
const |
◆ GetNumInputs()
size_t GetNumInputs |
( |
| ) |
const |
◆ GetNumOutputs()
size_t GetNumOutputs |
( |
| ) |
const |
◆ GetProfiler()
const std::shared_ptr< IProfiler > & GetProfiler |
( |
| ) |
const |
◆ PrintGraph()
◆ SerializeToDot()
Status SerializeToDot |
( |
std::ostream & |
stream | ) |
const |
◆ experimental::AsyncNetworkImpl
friend class experimental::AsyncNetworkImpl |
|
friend |
◆ experimental::WorkingMemHandle
◆ GetGraphForTesting
Definition at line 49 of file TestUtils.cpp.
51 return optNet->pOptimizedNetworkImpl->GetGraph();
◆ GetModelOptionsForTesting
Definition at line 54 of file TestUtils.cpp.
56 return optNet->pOptimizedNetworkImpl->GetModelOptions();
◆ LoadedNetwork
◆ Optimize [1/2]
Create an optimized version of the network.
- Parameters
-
inGraph | Graph to be optimized. |
backendPreferences | The choice of the backend ordered by user preferences. |
deviceSpec | DeviceSpec object as queried from the runtime. See IRuntime::GetDeviceSpec() |
messages | If there are failures or warnings a string describing same will be added to the vector |
options | OptimizerOptions object with optimizer configuration options |
- Returns
- An IOptimizedNetworkPtr interface to the optimized network, throws an exception derived from armnn::Exception if process fails.
Definition at line 1906 of file Network.cpp.
1915 auto profiler = inGraph.GetProfiler();
1917 profiler->EnableProfiling(options.GetProfilingEnabled());
1921 std::set<BackendId> backendSet(backendPreferences.begin(), backendPreferences.end());
1923 if (backendSet.find(
"GpuFsa") != backendSet.end() &&
1924 backendSet.find(
"GpuAcc") != backendSet.end())
1926 throw InvalidArgumentException(
"The backends \"GpuAcc\" and \"GpuFsa\" cannot be specified "
1927 "for the same optimized network.");
1932 if (backendPreferences.empty())
1934 throw InvalidArgumentException(
"Invoked Optimize with no backends specified");
1937 if (options.GetReduceFp32ToBf16())
1939 throw InvalidArgumentException(
"BFloat16 optimization is currently ignored. In order to use Bf16 optimization "
1940 "Please use the FastMathEnabled backend option for CpuAcc or GpuAcc.");
1943 if (options.GetReduceFp32ToFp16() && options.GetReduceFp32ToBf16())
1945 throw InvalidArgumentException(
"BFloat16 and Float16 optimization cannot be enabled at the same time.");
1949 inGraph.VerifyConstantLayerSetTensorInfo();
1951 std::unique_ptr<Graph> graph = std::make_unique<Graph>(inGraph);
1956 {{
"ImportEnabled", options.GetImportEnabled()},
1957 {
"ExportEnabled", options.GetExportEnabled()}});
1958 ModelOptions optimizedOptions(options.GetModelOptions());
1959 optimizedOptions.push_back(importExport);
1967 Graph& optGraph = optNetObjPtr->pOptimizedNetworkImpl->GetGraph();
1972 optGraph.InferTensorInfos();
1975 using namespace optimizations;
1988 optGraph.InferTensorInfos();
2021 BackendSettings backendSettings(backendPreferences, deviceSpec);
2022 auto availablePreferredBackends = backendSettings.GetAvailablePreferredBackends();
2023 if (availablePreferredBackends.empty())
2025 std::stringstream failureMsg;
2026 failureMsg <<
"None of the preferred backends " << backendPreferences
2027 <<
" are supported. Current platform provides " << backendSettings.m_SupportedBackends;
2029 throw InvalidArgumentException(failureMsg.str());
2033 TensorHandleFactoryRegistry tensorHandleFactoryRegistry;
2036 if (options.GetReduceFp32ToFp16())
2050 OptimizationResult assignBackendsResult =
AssignBackends(optNetObjPtr->pOptimizedNetworkImpl.get(),
2055 if (assignBackendsResult.m_Error)
2058 throw InvalidArgumentException(
"Failed to assign a backend to each layer");
2068 options.GetModelOptions(),
2070 if (backendOptimizationResult.m_Error)
2073 throw InvalidArgumentException(
"Failed to apply the backend-specific optimizations");
2086 if (options.GetDebugEnabled() && !options.GetDebugToFileEnabled())
2090 else if (options.GetDebugToFileEnabled())
2095 #if !defined(ARMNN_DISABLE_FILESYSTEM)
2097 ARMNN_LOG(info) <<
"Intermediate tensors will be written to: " << result;
2104 ARMNN_LOG(warning) <<
"Unable to print intermediate layer outputs : " << e.
what();
2111 tensorHandleFactoryRegistry,
2112 options.GetImportEnabled(),
2113 options.GetExportEnabled(),
2116 if (strategyResult.m_Error)
2125 optGraph.AddCompatibilityLayers(backends, tensorHandleFactoryRegistry);
◆ Optimize [2/2]
Create an optimized version of the network.
- Parameters
-
network | INetwork description of the network to be optimized. |
backendPreferences | The choice of the backend ordered by user preferences. |
deviceSpec | DeviceSpec object as queried from the runtime. See IRuntime::GetDeviceSpec() |
messages | If there are failures or warnings a string describing same will be added to the vector |
options | OptimizerOptions object with optimizer configuration options |
- Returns
- An IOptimizedNetworkPtr interface to the optimized network, throws an exception derived from armnn::Exception if process fails.
Definition at line 2145 of file Network.cpp.
2151 return Optimize(inNetwork.pNetworkImpl->GetGraph(),
◆ pOptimizedNetworkImpl
The documentation for this class was generated from the following files:
OptimizeForType< Layer, AddDebugToFileImpl > InsertDebugToFileLayer
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
OptimizationResult ApplyBackendOptimizations(OptimizedNetworkImpl *optNetObjPtr, BackendSettings &backendSettings, BackendsMap &backends, const ModelOptions &modelOptions, Optional< std::vector< std::string > & > errMessages)
OptimizeForType< Layer, AddDebugImpl > InsertDebugLayer
OptimizeForExclusiveConnection< Convolution2dLayer, BatchNormalizationLayer, FuseBatchNorm< Convolution2dLayer, armnn::DataType::Float32 > > FuseBatchNormIntoConvolution2DFloat32
void RegisterProfiler(IProfiler *profiler)
OptimizeForConnection< PermuteLayer, PermuteLayer, OptimizeInversePermutesImpl< PermuteLayer > > OptimizeInversePermutes
OptimizeForConnection< TransposeLayer, BatchToSpaceNdLayer, PermuteAndBatchToSpaceAsDepthToSpaceImpl< TransposeLayer > > TransposeAndBatchToSpaceAsDepthToSpace
OptimizeForExclusiveConnection< PadLayer, Pooling2dLayer, pad_fold::FoldPadIntoPooling2dImpl > FoldPadIntoPooling2d
OptimizeForType< Layer, ConvertFp32NetworkToFp16Impl > Fp32NetworkToFp16Converter
OptimizeForExclusiveConnection< PadLayer, Convolution2dLayer, pad_fold::FoldPadIntoConvolution2dImpl > FoldPadIntoConvolution2d
OptimizeForConnection< ConstantLayer, DequantizeLayer, ConvertConstDequantisationLayersToConstLayersImpl > ConvertConstDequantisationLayersToConstLayers
OptimizeForConnection< Layer, TransposeLayer, MoveTransposeUpImpl > MoveTransposeUp
OptimizationResult AssignBackends(OptimizedNetworkImpl *optNetObjPtr, BackendSettings &backendSettings, Graph::Iterator &firstLayer, Graph::Iterator &lastLayer, Optional< std::vector< std::string > & > errMessages)
OptimizeForType< BroadcastToLayer, DeleteBroadcastToImpl > BroadcastToOptimizationLayer
LayerList::const_iterator Iterator
OptimizeForType< PermuteLayer, PermuteAsReshapeImpl > PermuteAsReshape
OptimizeForConnection< PermuteLayer, BatchToSpaceNdLayer, PermuteAndBatchToSpaceAsDepthToSpaceImpl< PermuteLayer > > PermuteAndBatchToSpaceAsDepthToSpace
OptimizationResult SelectTensorHandleStrategy(Graph &optGraph, BackendsMap &backends, TensorHandleFactoryRegistry ®istry, bool importEnabled, bool exportEnabled, Optional< std::vector< std::string > & > errMessages)
OptimizeForConnection< Layer, PermuteLayer, MovePermuteUpImpl > MovePermuteUp
OptimizeForConnection< ConvertFp16ToFp32Layer, ConvertFp32ToFp16Layer, OptimizeInverseConversionsImpl > OptimizeInverseConversionsFp16
virtual const char * what() const noexcept override
#define ARMNN_LOG(severity)
BackendsMap CreateSupportedBackends(TensorHandleFactoryRegistry &handleFactoryRegistry, BackendSettings &backendSettings)
ConvertConstants< Float32ToFloat16, IsFloat16Layer > ConvertConstantsFloatToHalf
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
void ReportError(const std::string &errorMessage, Optional< std::vector< std::string > & > errorMessages)
bool CheckFp16Support(BackendsMap &backends, const std::vector< BackendId > &availablePreferredBackends)
OptimizeForConnection< ConstantLayer, PermuteLayer, ConvertConstPermuteLayersToConstLayers > FusePermuteIntoConstLayer
Optimizer::Optimizations MakeOptimizations(Args &&... args)
friend IOptimizedNetworkPtr Optimize(const INetwork &inNetwork, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptionsOpaque &options, Optional< std::vector< std::string > & > messages)
Create an optimized version of the network.
@ ValidateOnly
Validate all output shapes.
@ InferAndValidate
Infer missing output shapes and validate all output shapes.
OptimizeForConnection< ConvertFp32ToFp16Layer, ConvertFp16ToFp32Layer, OptimizeInverseConversionsImpl > OptimizeInverseConversionsFp32
Struct for the users to pass backend specific options.
OptimizeForType< TransposeLayer, TransposeAsReshapeImpl > TransposeAsReshape
static ProfilerManager & GetInstance()
OptimizeForExclusiveConnection< Convolution2dLayer, BatchNormalizationLayer, FuseBatchNorm< Convolution2dLayer, armnn::DataType::Float16 > > FuseBatchNormIntoConvolution2DFloat16
static void Destroy(IOptimizedNetwork *network)
OptimizeForExclusiveConnection< DepthwiseConvolution2dLayer, BatchNormalizationLayer, FuseBatchNorm< DepthwiseConvolution2dLayer, armnn::DataType::Float16 > > FuseBatchNormIntoDepthwiseConvolution2DFloat16
std::map< BackendId, std::unique_ptr< class IBackendInternal > > BackendsMap
OptimizeForConnection< Layer, TransposeLayer, SquashEqualSiblingsImpl< TransposeLayer > > SquashEqualTransposeSiblings
ConvertConstants< Float16ToFloat32, IsFloat32Layer > ConvertConstantsHalfToFloat
IOptimizedNetwork(const IOptimizedNetwork &other, const ModelOptions &modelOptions)
Creates a copy of the IOptimizedNetwork.
OptimizeForConnection< Layer, PermuteLayer, SquashEqualSiblingsImpl< PermuteLayer > > SquashEqualPermuteSiblings
static void Pass(Graph &graph, const Optimizations &optimizations)
std::vector< BackendOptions > ModelOptions
OptimizeForExclusiveConnection< PadLayer, DepthwiseConvolution2dLayer, pad_fold::FoldPadIntoDepthwiseConvolution2dImpl > FoldPadIntoDepthwiseConvolution2d
std::string CreateDirectory(std::string sPath)
Returns full path to temporary folder.
OptimizeForExclusiveConnection< ElementwiseBinaryLayer, ElementwiseBinaryLayer, MaxMinIntoBoundedReluImpl > MaxMinIntoBoundedRelu
OptimizeForConnection< Layer, ReshapeLayer, SquashEqualSiblingsImpl< ReshapeLayer > > SquashEqualReshapeSiblings
OptimizeForConnection< TransposeLayer, TransposeLayer, OptimizeInversePermutesImpl< TransposeLayer > > OptimizeInverseTransposes
OptimizeForType< Layer, AddBroadcastReshapeLayerImpl > AddBroadcastReshapeLayer
OptimizeForExclusiveConnection< DepthwiseConvolution2dLayer, BatchNormalizationLayer, FuseBatchNorm< DepthwiseConvolution2dLayer, armnn::DataType::Float32 > > FuseBatchNormIntoDepthwiseConvolution2DFloat32
OptimizeForConnection< ReshapeLayer, ReshapeLayer, OptimizeConsecutiveReshapesImpl > OptimizeConsecutiveReshapes
std::unique_ptr< OptimizedNetworkImpl > pOptimizedNetworkImpl