24.02
MemSyncWorkload.cpp
Go to the documentation of this file.
1
//
2
// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
3
// SPDX-License-Identifier: MIT
4
//
5
6
#include <
ResolveType.hpp
>
7
8
#include <
backendsCommon/MemSyncWorkload.hpp
>
9
#include <
armnn/backends/TensorHandle.hpp
>
10
11
#include <cstring>
12
13
namespace
armnn
14
{
15
16
SyncMemGenericWorkload::SyncMemGenericWorkload
(
const
MemSyncQueueDescriptor
& descriptor,
17
const
WorkloadInfo
& info)
18
:
BaseWorkload
<
MemSyncQueueDescriptor
>(descriptor,
info
)
19
{
20
m_TensorHandle = descriptor.
m_Inputs
[0];
21
}
22
23
void
SyncMemGenericWorkload::Execute
()
const
24
{
25
ARMNN_SCOPED_PROFILING_EVENT
(
Compute::Undefined
,
"SyncMemGeneric_Execute"
);
26
m_TensorHandle->
Map
(
true
);
27
m_TensorHandle->
Unmap
();
28
}
29
30
void
SyncMemGenericWorkload::ExecuteAsync
(
ExecutionData
& executionData)
31
{
32
ARMNN_SCOPED_PROFILING_EVENT
(
Compute::Undefined
,
"SyncMemGeneric_Execute_WorkingMemDescriptor"
);
33
34
WorkingMemDescriptor
* workingMemDescriptor =
static_cast<
WorkingMemDescriptor
*
>
(executionData.
m_Data
);
35
workingMemDescriptor->
m_Inputs
[0]->Map(
true
);
36
workingMemDescriptor->
m_Inputs
[0]->Unmap();
37
}
38
39
}
//namespace armnn
armnn::Compute::Undefined
@ Undefined
armnn::experimental::ExecutionData::m_Data
void * m_Data
Definition:
ExecutionData.hpp:16
armnn::SyncMemGenericWorkload::ExecuteAsync
void ExecuteAsync(ExecutionData &executionData) override
Definition:
MemSyncWorkload.cpp:30
MemSyncWorkload.hpp
ResolveType.hpp
ARMNN_SCOPED_PROFILING_EVENT
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition:
Profiling.hpp:220
armnn::WorkloadInfo
Contains information about TensorInfos of a layer.
Definition:
WorkloadInfo.hpp:16
armnn::ITensorHandle::Unmap
virtual void Unmap() const =0
Unmap the tensor data.
armnn::BoostLogSeverityMapping::info
@ info
armnn::BaseWorkload
Definition:
Workload.hpp:33
armnn::SyncMemGenericWorkload::SyncMemGenericWorkload
SyncMemGenericWorkload(const MemSyncQueueDescriptor &descriptor, const WorkloadInfo &info)
Definition:
MemSyncWorkload.cpp:16
armnn::SyncMemGenericWorkload::Execute
void Execute() const override
Definition:
MemSyncWorkload.cpp:23
TensorHandle.hpp
armnn::experimental::WorkingMemDescriptor::m_Inputs
std::vector< ITensorHandle * > m_Inputs
Definition:
WorkingMemDescriptor.hpp:20
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition:
01_00_quick_start.dox:6
armnn::experimental::WorkingMemDescriptor
Definition:
WorkingMemDescriptor.hpp:18
armnn::MemSyncQueueDescriptor
Definition:
WorkloadData.hpp:99
armnn::QueueDescriptor::m_Inputs
std::vector< ITensorHandle * > m_Inputs
Definition:
WorkloadData.hpp:26
armnn::ITensorHandle::Map
virtual const void * Map(bool blocking=true) const =0
Map the tensor data for access.
armnn::experimental::ExecutionData
Definition:
ExecutionData.hpp:14
src
backends
backendsCommon
MemSyncWorkload.cpp
Generated on Wed Feb 14 2024 16:36:15 for Arm NN by
1.8.17