17 #include <boost/test/unit_test.hpp> 24 namespace experimental
28 typename TInput = ResolveType <ArmnnIType>,
typename TOutput = ResolveType <ArmnnOType>>
30 const std::vector<std::map<
int, std::vector<TInput>>>& inputTensorData,
31 const std::vector<std::map<
int, std::vector<TOutput>>>& expectedOutputData,
32 std::vector<BackendId> backends,
33 const size_t numberOfInferences,
34 float tolerance = 0.000001f)
46 std::string errorMessage;
48 runtime->LoadNetwork(networkId, std::move(optNet), errorMessage, networkProperties);
50 std::vector<InputTensors> inputTensorsVec;
51 std::vector<OutputTensors> outputTensorsVec;
52 std::vector<std::map<int, std::vector<TOutput>>> outputStorageVec;
53 std::vector<std::unique_ptr<IWorkingMemHandle>> workingMemHandles;
55 for (
unsigned int i = 0; i < numberOfInferences; ++i)
59 outputStorageVec.emplace_back(std::map<
int, std::vector<TOutput>>());
61 inputTensors.reserve(inputTensorData.size());
62 for (
auto&& it : inputTensorData[i])
64 inputTensors.push_back({it.first,
65 ConstTensor(runtime->GetInputTensorInfo(networkId, it.first), it.second.data())});
68 outputTensors.reserve(expectedOutputData.size());
69 for (
auto&& it : expectedOutputData[i])
71 std::vector<TOutput> out(it.second.size());
72 outputStorageVec[i].emplace(it.first, out);
73 outputTensors.push_back({it.first,
74 Tensor(runtime->GetOutputTensorInfo(networkId, it.first),
75 outputStorageVec[i].at(it.first).data())});
78 inputTensorsVec.push_back(inputTensors);
79 outputTensorsVec.push_back(outputTensors);
81 workingMemHandles.push_back(runtime->CreateWorkingMemHandle(networkId));
84 std::vector<std::thread> threads;
85 for (
unsigned int i = 0; i < numberOfInferences; ++i)
92 threads.emplace_back([&]()
95 runtime->Execute(workingMemHandle, inputTensors, outputTensors);
99 for (
unsigned int i = 0; i < numberOfInferences; ++i)
105 for (
unsigned int i = 0; i < numberOfInferences; ++i)
107 for (
auto &&it : expectedOutputData[i])
109 std::vector<TOutput> out = outputStorageVec[i].at(it.first);
110 for (
unsigned int j = 0; j < out.size(); ++j)
112 BOOST_CHECK(Compare<ArmnnOType>(it.second[j], out[j], tolerance) ==
true);
122 const std::map<
int, std::vector<TInput>>& inputTensorData,
123 const std::map<
int, std::vector<TOutput>>& expectedOutputData,
124 std::vector<BackendId> backends,
125 float tolerance = 0.000001f,
126 size_t numThreads = 0)
138 std::string errorMessage;
142 runtime->LoadNetwork(networkId, std::move(optNet), errorMessage, networkProperties);
145 inputTensors.reserve(inputTensorData.size());
146 for (
auto&& it : inputTensorData)
148 inputTensors.push_back({it.first,
149 ConstTensor(runtime->GetInputTensorInfo(networkId, it.first), it.second.data())});
153 outputTensors.reserve(expectedOutputData.size());
154 std::map<int, std::vector<TOutput>> outputStorage;
155 for (
auto&& it : expectedOutputData)
157 std::vector<TOutput> out(it.second.size());
158 outputStorage.emplace(it.first, out);
159 outputTensors.push_back({it.first,
160 Tensor(runtime->GetOutputTensorInfo(networkId, it.first),
161 outputStorage.at(it.first).data())});
167 std::unique_ptr<IWorkingMemHandle> workingMemHandle = runtime->CreateWorkingMemHandle(networkId);
171 runtime->Execute(workingMemHandleRef, inputTensors, outputTensors);
175 std::vector<IAsyncExecutionCallbackPtr> callbacks;
178 for (
size_t i = 0; i < 1000; ++i)
180 callbacks.emplace_back(std::make_shared<AsyncExecutionCallback>());
187 runtime->Schedule(networkId,
190 static_cast<QosExecPriority>(rand()%3),
204 for (
auto&& it : expectedOutputData)
206 std::vector<TOutput> out = outputStorage.at(it.first);
208 for (
unsigned int i = 0; i < out.size(); ++i)
210 BOOST_CHECK(Compare<ArmnnOType>(it.second[i], out[i], tolerance) ==
true);
215 template<
typename armnn::DataType DataType>
218 const std::vector<int>& beginData,
219 const std::vector<int>& endData,
220 const std::vector<int>& stridesData,
223 int shrinkAxisMask = 0,
224 int ellipsisMask = 0,
226 const float qScale = 1.0f,
227 const int32_t qOffset = 0)
229 using namespace armnn;
237 stridedSliceDescriptor.
m_Begin = beginData;
238 stridedSliceDescriptor.
m_End = endData;
239 stridedSliceDescriptor.
m_Stride = stridesData;
241 stridedSliceDescriptor.
m_EndMask = endMask;
247 IConnectableLayer* stridedSlice = net->AddStridedSliceLayer(stridedSliceDescriptor,
"splitter");
250 Connect(input, stridedSlice, inputTensorInfo, 0, 0);
251 Connect(stridedSlice, output, outputTensorInfo, 0, 0);
256 template<armnn::DataType ArmnnType>
259 using namespace armnn;
264 const std::vector<int>& beginData = {1, 0, 0, 0};
265 const std::vector<int>& endData = {2, 2, 3, 1};
266 const std::vector<int>& stridesData = {1, 1, 1, 1};
269 int shrinkAxisMask = 0;
270 int ellipsisMask = 0;
274 INetworkPtr net = CreateStridedSliceNetwork<ArmnnType>(inputShape,
285 BOOST_TEST_CHECKPOINT(
"create a network");
288 std::vector<T> inputData{
289 1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 2.0f,
291 3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f,
293 5.0f, 5.0f, 5.0f, 6.0f, 6.0f, 6.0f
296 std::vector<T> outputExpected{
297 3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f
300 std::map<int, std::vector<T>> inputTensorData = {{0, inputData}};
301 std::map<int, std::vector<T>> expectedOutputData = {{0, outputExpected}};
303 AsyncEndToEndTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends, 0.000001f);
306 template<armnn::DataType ArmnnType>
309 using namespace armnn;
314 const std::vector<int>& beginData = {1, 0, 0, 0};
315 const std::vector<int>& endData = {2, 2, 3, 1};
316 const std::vector<int>& stridesData = {1, 1, 1, 1};
319 int shrinkAxisMask = 0;
320 int ellipsisMask = 0;
324 INetworkPtr net = CreateStridedSliceNetwork<ArmnnType>(inputShape,
336 std::vector<T> inputData{
337 1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 2.0f,
339 3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f,
341 5.0f, 5.0f, 5.0f, 6.0f, 6.0f, 6.0f
344 std::vector<T> outputExpected{
345 3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f
348 std::map<int, std::vector<T>> inputTensorData = {{0, inputData}};
349 std::map<int, std::vector<T>> expectedOutputData = {{0, outputExpected}};
351 AsyncEndToEndTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends, 0.000001f, 1);
354 template<armnn::DataType ArmnnType>
357 using namespace armnn;
362 const std::vector<int>& beginData = {1, 0, 0, 0};
363 const std::vector<int>& endData = {2, 2, 3, 1};
364 const std::vector<int>& stridesData = {1, 1, 1, 1};
367 int shrinkAxisMask = 0;
368 int ellipsisMask = 0;
372 INetworkPtr net = CreateStridedSliceNetwork<ArmnnType>(inputShape,
384 std::vector<T> inputData{
385 1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 2.0f,
387 3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f,
389 5.0f, 5.0f, 5.0f, 6.0f, 6.0f, 6.0f
392 std::vector<T> outputExpected{
393 3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f
396 std::map<int, std::vector<T>> inputTensorData = {{0, inputData}};
397 std::map<int, std::vector<T>> expectedOutputData = {{0, outputExpected}};
399 AsyncEndToEndTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends, 0.000001f, 3);
402 template<armnn::DataType ArmnnType>
405 using namespace armnn;
410 const std::vector<int>& beginData = {1, 0, 0, 0};
411 const std::vector<int>& endData = {2, 2, 3, 1};
412 const std::vector<int>& stridesData = {1, 1, 1, 1};
415 int shrinkAxisMask = 0;
416 int ellipsisMask = 0;
420 INetworkPtr net = CreateStridedSliceNetwork<ArmnnType>(inputShape,
431 BOOST_TEST_CHECKPOINT(
"create a network");
434 std::vector<T> inputData1{
435 1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 2.0f,
437 3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f,
439 5.0f, 5.0f, 5.0f, 6.0f, 6.0f, 6.0f
442 std::vector<T> outputExpected1{ 3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f };
445 std::vector<T> inputData2{
446 1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 2.0f,
448 8.0f, 8.0f, 8.0f, 7.0f, 7.0f, 7.0f,
450 5.0f, 5.0f, 5.0f, 6.0f, 6.0f, 6.0f
453 std::vector<T> outputExpected2{ 8.0f, 8.0f, 8.0f, 7.0f, 7.0f, 7.0f };
455 std::vector<std::map<int, std::vector<T>>> inputTensors;
456 std::vector<std::map<int, std::vector<T>>> outputTensors;
458 inputTensors.push_back(std::map<
int, std::vector<T>> {{0, inputData1}});
459 inputTensors.push_back(std::map<
int, std::vector<T>> {{0, inputData2}});
460 outputTensors.push_back(std::map<
int, std::vector<T>> {{0, outputExpected1}});
461 outputTensors.push_back(std::map<
int, std::vector<T>> {{0, outputExpected2}});
463 AsyncThreadedEndToEndTestImpl<ArmnnType, ArmnnType>(move(net), inputTensors, outputTensors, backends, 2);
static IRuntimePtr Create(const CreationOptions &options)
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
int32_t m_ShrinkAxisMask
Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1...
void AsyncThreadedEndToEndTestImpl(INetworkPtr network, const std::vector< std::map< int, std::vector< TInput >>> &inputTensorData, const std::vector< std::map< int, std::vector< TOutput >>> &expectedOutputData, std::vector< BackendId > backends, const size_t numberOfInferences, float tolerance=0.000001f)
std::vector< int > m_Begin
Begin values for the input that will be sliced.
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
typename ResolveTypeImpl< DT >::Type ResolveType
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Copyright (c) 2021 ARM Limited and Contributors.
int32_t m_BeginMask
Begin mask value.
int32_t m_EndMask
End mask value.
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
void StridedSlicedMultiThreadedEndToEndTest(const std::vector< BackendId > &backends)
std::shared_ptr< IAsyncExecutionCallback > IAsyncExecutionCallbackPtr
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
int32_t m_NewAxisMask
New axis mask value.
void StridedSlicedEndToEndTest(const std::vector< BackendId > &backends)
int32_t m_EllipsisMask
Ellipsis mask value.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
void AsyncScheduledStridedSlicedEndToEndTest(const std::vector< BackendId > &backends)
std::vector< int > m_Stride
Stride values for the input that will be sliced.
INetworkPtr CreateStridedSliceNetwork(const TensorShape &inputShape, const TensorShape &outputShape, const std::vector< int > &beginData, const std::vector< int > &endData, const std::vector< int > &stridesData, int beginMask=0, int endMask=0, int shrinkAxisMask=0, int ellipsisMask=0, int newAxisMask=0, const float qScale=1.0f, const int32_t qOffset=0)
std::vector< int > m_End
End values for the input that will be sliced.
void AsyncScheduledStridedSlicedMultiThreadedEndToEndTest(const std::vector< BackendId > &backends)
A StridedSliceDescriptor for the StridedSliceLayer.
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
static INetworkPtr Create(NetworkOptions networkOptions={})
void AsyncEndToEndTestImpl(INetworkPtr network, const std::map< int, std::vector< TInput >> &inputTensorData, const std::map< int, std::vector< TOutput >> &expectedOutputData, std::vector< BackendId > backends, float tolerance=0.000001f, size_t numThreads=0)