13 #include <boost/test/unit_test.hpp> 18 using namespace armnn;
26 std::vector<T>
GetVector(
unsigned int size,
float initial,
float increment)
28 std::vector<float> typeVector(size, initial);
29 std::vector<T> vector(size);
33 for (
unsigned int i = 0; i < size; ++i)
35 vector[i] = T(initial + (increment * static_cast<float>(i)));
41 template<DataType ArmnnType,
typename T = ResolveType<ArmnnType>>
42 struct Convolution2dTest
45 static const bool isElementWise =
false;
51 constexpr
static const unsigned int inputSize = 48;
52 constexpr
static const unsigned int outputSize = 36;
64 std::vector<float> weightsData = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
65 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
66 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
67 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42};
68 std::vector<T> weightsVector = armnnUtils::QuantizedVector<T>(weightsData, scale, offset);
69 TensorInfo weightsInfo(GetWeightsShape(), ArmnnType, scale, offset);
77 template<DataType ArmnnType,
typename T = ResolveType<ArmnnType>>
78 struct DWConvolution2dTest
82 static const bool isElementWise =
false;
88 constexpr
static const unsigned int inputSize = 48;
89 constexpr
static const unsigned int outputSize = 108;
102 std::vector<float> weightsData = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
103 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
104 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
105 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42};
106 std::vector<T> weightsVector = armnnUtils::QuantizedVector<T>(weightsData, scale, offset);
107 TensorInfo weightsInfo(GetWeightsShape(), ArmnnType, scale, offset);
115 template<DataType ArmnnType,
typename T = ResolveType<ArmnnType>>
120 static const bool isElementWise =
false;
126 constexpr
static const unsigned int inputSize = 10;
127 constexpr
static const unsigned int outputSize = 6;
137 std::vector<float> weightsData = { 1, 2, 3, 4, 5,
140 std::vector<T> weightsVector = armnnUtils::QuantizedVector<T>(weightsData, scale, offset);
141 TensorInfo weightsInfo(GetWeightsShape(), ArmnnType, scale, offset);
149 template<DataType ArmnnType,
typename T = ResolveType<ArmnnType>>
154 static const bool isElementWise =
false;
159 constexpr
static const unsigned int inputSize = 48;
160 constexpr
static const unsigned int outputSize = 48;
173 std::vector<T> betaVector = GetVector<T>(GetOutputShape()[3], 0.0f, 0.2f);
174 std::vector<T> gammaVector = GetVector<T>(GetOutputShape()[3], 0.5f, 0.1f);
175 std::vector<T> meanVector = GetVector<T>(GetOutputShape()[3], 0.1f, 0.1f);
176 std::vector<T> varianceVector = GetVector<T>(GetOutputShape()[3], 1.0f, 0.1f);
178 const unsigned int outputChannelSize[] = { GetOutputShape()[3] };
188 template<DataType ArmnnType,
typename T = ResolveType<ArmnnType>>
192 static const bool isElementWise =
true;
197 constexpr
static const unsigned int inputSize = 48;
198 constexpr
static const unsigned int outputSize = 48;
212 template<DataType ArmnnType,
typename T = ResolveType<ArmnnType>>
216 static const bool isElementWise =
true;
221 constexpr
static const unsigned int inputSize = 48;
222 constexpr
static const unsigned int outputSize = 48;
236 template<DataType ArmnnType,
typename T = ResolveType<ArmnnType>>
240 static const bool isElementWise =
true;
245 constexpr
static const unsigned int inputSize = 48;
246 constexpr
static const unsigned int outputSize = 48;
260 template<DataType ArmnnType,
typename T = ResolveType<ArmnnType>>
264 static const bool isElementWise =
true;
269 constexpr
static const unsigned int inputSize = 48;
270 constexpr
static const unsigned int outputSize = 48;
284 template<
typename LayerTest,
287 float scale, int32_t offset)
299 IConnectableLayer* activationLayer = network->AddActivationLayer(activationDescriptor,
303 IConnectableLayer* output2Layer = preventFusing?network->AddOutputLayer(1):
nullptr;
306 TensorInfo inputInfo(LayerTest::GetInputShape(), ArmnnType, scale, offset);
307 TensorInfo outputInfo(LayerTest::GetOutputShape(), ArmnnType, scale, offset);
312 activationLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
317 activationLayer->GetOutputSlot(0).Connect(outputLayer->
GetInputSlot(0));
319 if (LayerTest::isElementWise)
331 template<
typename LayerTest,
336 float scale = 1.f, int32_t offset=0)
340 INetworkPtr networkFused = CreatNetwork<LayerTest, ArmnnType>(activationDescriptor,
false, scale, offset);
350 auto checkFusedConv2d = [](
const Layer*
const layer)->
bool {
351 return IsLayerOfType<LayerType>(layer) &&
352 (layer->GetNameStr() ==
"fused-activation-into-receiverLayer");
355 BOOST_CHECK(3 == graphFused.GetNumLayers());
358 &IsLayerOfType<InputLayer>,
360 &IsLayerOfType<OutputLayer>));
364 BOOST_TEST(run->LoadNetwork(networkIdentifier, std::move(optNetFused)) ==
Status::Success);
367 std::vector<float> data = GetVector<float>(LayerTest::inputSize, 1.0f, 0.1f);
368 std::vector<T> inputDataFused = armnnUtils::QuantizedVector<T>(data, scale, offset);
369 std::vector<T> outputDataFused(LayerTest::outputSize);
372 {0,
ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputDataFused.data())}};
374 {0,
Tensor(run->GetOutputTensorInfo(networkIdentifier, 0), outputDataFused.data())}};
377 BOOST_TEST(run->EnqueueWorkload(networkIdentifier, inputTensorsFused, outputTensorsFused) ==
Status::Success);
381 INetworkPtr networkNotFused = CreatNetwork<LayerTest, ArmnnType>(activationDescriptor,
true, scale, offset);
391 BOOST_CHECK(5 == graphNotFused.GetNumLayers());
393 graphNotFused.cend(),
394 &IsLayerOfType<InputLayer>,
395 &IsLayerOfType<LayerType>,
396 &IsLayerOfType<ActivationLayer>,
397 &IsLayerOfType<OutputLayer>,
398 &IsLayerOfType<OutputLayer>));
402 BOOST_TEST(runNotFused->LoadNetwork(networkIdentifierNotFused, std::move(optNetNotFused)) ==
Status::Success);
405 std::vector<T> inputDataNotFused = armnnUtils::QuantizedVector<T>(data, scale, offset);
406 std::vector<T> outputDataNotFused(LayerTest::outputSize);
407 std::vector<T> outputData2NotFused(LayerTest::outputSize);
410 {0,
ConstTensor(runNotFused->GetInputTensorInfo(networkIdentifierNotFused, 0), inputDataNotFused.data())}};
412 {0,
Tensor(runNotFused->GetOutputTensorInfo(networkIdentifierNotFused, 0), outputDataNotFused.data())},
413 {1,
Tensor(runNotFused->GetOutputTensorInfo(networkIdentifierNotFused, 1), outputData2NotFused.data())}};
416 BOOST_TEST(runNotFused->EnqueueWorkload(networkIdentifierNotFused, inputTensorsNotFused, outputTensorsNotFused)
420 for (
unsigned int n = 0; n < outputDataFused.size(); ++n)
422 BOOST_CHECK_CLOSE(static_cast<float>(outputDataFused[n]), static_cast<float>(outputDataNotFused[n]),
427 template<
typename LayerTest,
432 float scale = 1.f, int32_t offset = 0)
438 INetworkPtr networkFused = CreatNetwork<LayerTest, ArmnnType>(activationDescriptor,
false, scale, offset);
448 BOOST_TEST(run->LoadNetwork(networkIdentifier, std::move(optNetFused)) ==
Status::Success);
451 std::vector<float> data = GetVector<float>(LayerTest::inputSize, 1.0f, 0.1f);
452 std::vector<T> inputDataFused = armnnUtils::QuantizedVector<T>(data, scale, offset);
453 std::vector<T> outputDataFused(LayerTest::outputSize);
456 {0,
ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputDataFused.data())}};
458 {0,
Tensor(run->GetOutputTensorInfo(networkIdentifier, 0), outputDataFused.data())}};
461 run->EnqueueWorkload(networkIdentifier, inputTensorsFused, outputTensorsFused);
465 catch (
const std::exception& e)
467 std::cerr << e.what() << std::endl;
476 using namespace armnn;
477 #if defined(ARMCOMPUTENEON_ENABLED) 484 FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::Float32>,
DataType::Float32>
492 FuseActivationIntoPreviousLayerTest<DWConvolution2dTest<DataType::Float32>,
DataType::Float32>
500 FuseActivationIntoPreviousLayerTest<FullyConnectedTest<DataType::Float32>,
DataType::Float32>
508 FuseActivationIntoPreviousLayerTest<BatchNormTest<DataType::Float32>,
DataType::Float32>
517 activationDescriptor.
m_A = 1.0f;
518 activationDescriptor.
m_B = -1.0f;
520 FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::Float32>,
DataType::Float32>
527 activationDescriptor.
m_A = 1.0f;
528 activationDescriptor.
m_B = -1.0f;
530 FuseActivationIntoPreviousLayerTest < DWConvolution2dTest < DataType::Float32 > ,
DataType::Float32 >
537 activationDescriptor.
m_A = 1.0f;
538 activationDescriptor.
m_B = -1.0f;
540 FuseActivationIntoPreviousLayerTest<FullyConnectedTest<DataType::Float32>,
DataType::Float32>
547 activationDescriptor.
m_A = 1.0f;
548 activationDescriptor.
m_B = -1.0f;
550 FuseActivationIntoPreviousLayerTest<BatchNormTest<DataType::Float32>,
DataType::Float32>
560 FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::QAsymmU8>,
DataType::QAsymmU8>
568 FuseActivationIntoPreviousLayerTest<DWConvolution2dTest<DataType::QAsymmU8>,
DataType::QAsymmU8>
576 FuseActivationIntoPreviousLayerTest<FullyConnectedTest<DataType::QAsymmU8>,
DataType::QAsymmU8>
585 activationDescriptor.
m_A = 6.0f;
586 activationDescriptor.
m_B = 0.0f;
588 FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::QAsymmS8>,
DataType::QAsymmS8>
595 activationDescriptor.
m_A = 6.0f;
596 activationDescriptor.
m_B = 0.0f;
598 FuseActivationIntoPreviousLayerTest < DWConvolution2dTest < DataType::QAsymmS8 > ,
DataType::QAsymmS8 >
605 activationDescriptor.
m_A = 6.0f;
606 activationDescriptor.
m_B = 0.0f;
608 FuseActivationIntoPreviousLayerTest<FullyConnectedTest<DataType::QAsymmS8>,
DataType::QAsymmS8>
618 FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::Float32>,
DataType::Float32>
628 FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::Float32>,
DataType::Float32>
636 for (
int i = 0; i != 12; ++i)
639 activationDescriptor.
m_A = 1.0f;
640 activationDescriptor.
m_B = -1.0f;
642 (activationDescriptor,
Compute::CpuAcc)),
"Convolution + Activation function " << i);
644 (activationDescriptor,
Compute::CpuAcc)),
"DepthwiseConvolution + Activation function " << i);
646 (activationDescriptor,
Compute::CpuAcc)),
"FullyConnected + Activation function " << i);
648 (activationDescriptor,
Compute::CpuAcc)),
"BatchNorm + Activation function " << i);
654 for (
int i = 0; i != 12; ++i)
657 activationDescriptor.
m_A = 1.0f;
658 activationDescriptor.
m_B = -1.0f;
660 (activationDescriptor,
Compute::CpuAcc)),
"Convolution + Activation function " << i);
662 (activationDescriptor,
Compute::CpuAcc)),
"DepthwiseConvolution + Activation function " << i);
664 (activationDescriptor,
Compute::CpuAcc)),
"FullyConnected + Activation function " << i);
666 (activationDescriptor,
Compute::CpuAcc)),
"BatchNorm + Activation function " << i);
675 (activationDescriptor,
Compute::CpuAcc, 1.f / 256.f, 0)),
"Convolution + Activation function " <<
676 static_cast<int>(activationDescriptor.
m_Function));
678 (activationDescriptor,
Compute::CpuAcc, 1.f / 256.f, 0)),
"FullyConnected + Activation function " <<
679 static_cast<int>(activationDescriptor.
m_Function));
683 (activationDescriptor,
Compute::CpuAcc, 1.f / 128.f, 128)),
"Convolution + Activation function " <<
684 static_cast<int>(activationDescriptor.
m_Function));
686 (activationDescriptor,
Compute::CpuAcc, 1.f / 128.f, 128)),
"FullyConnected + Activation function " <<
687 static_cast<int>(activationDescriptor.
m_Function));
691 (activationDescriptor,
Compute::CpuAcc)),
"Convolution + Activation function " <<
692 static_cast<int>(activationDescriptor.
m_Function));
694 (activationDescriptor,
Compute::CpuAcc)),
"FullyConnected + Activation function " <<
695 static_cast<int>(activationDescriptor.
m_Function));
698 activationDescriptor.
m_A = 1.0f;
699 activationDescriptor.
m_B = -1.0f;
701 (activationDescriptor,
Compute::CpuAcc)),
"Convolution + Activation function " <<
702 static_cast<int>(activationDescriptor.
m_Function));
704 (activationDescriptor,
Compute::CpuAcc)),
"FullyConnected + Activation function " <<
705 static_cast<int>(activationDescriptor.
m_Function));
709 (activationDescriptor,
Compute::CpuAcc)),
"Convolution + Activation function " <<
710 static_cast<int>(activationDescriptor.
m_Function));
712 (activationDescriptor,
Compute::CpuAcc)),
"FullyConnected + Activation function " <<
713 static_cast<int>(activationDescriptor.
m_Function));
717 #if defined(ARMCOMPUTECL_ENABLED) 724 FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::Float32>,
DataType::Float32>
732 FuseActivationIntoPreviousLayerTest<DWConvolution2dTest<DataType::Float32>,
DataType::Float32>
740 FuseActivationIntoPreviousLayerTest<FullyConnectedTest<DataType::Float32>,
DataType::Float32>
748 FuseActivationIntoPreviousLayerTest<BatchNormTest<DataType::Float32>,
DataType::Float32>
756 FuseActivationIntoPreviousLayerTest<MultiplicationTest<DataType::Float32>,
DataType::Float32>
764 FuseActivationIntoPreviousLayerTest<AdditionTest<DataType::Float32>,
DataType::Float32>
772 FuseActivationIntoPreviousLayerTest<SubtractionTest<DataType::Float32>,
DataType::Float32>
780 FuseActivationIntoPreviousLayerTest<DivisionTest<DataType::Float32>,
DataType::Float32>
789 activationDescriptor.
m_A = 1.0f;
790 activationDescriptor.
m_B = -1.0f;
792 FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::Float32>,
DataType::Float32>
799 activationDescriptor.
m_A = 1.0f;
800 activationDescriptor.
m_B = -1.0f;
802 FuseActivationIntoPreviousLayerTest<DWConvolution2dTest<DataType::Float32>,
DataType::Float32>
809 activationDescriptor.
m_A = 1.0f;
810 activationDescriptor.
m_B = -1.0f;
812 FuseActivationIntoPreviousLayerTest<FullyConnectedTest<DataType::Float32>,
DataType::Float32>
819 activationDescriptor.
m_A = 1.0f;
820 activationDescriptor.
m_B = -1.0f;
822 FuseActivationIntoPreviousLayerTest<BatchNormTest<DataType::Float32>,
DataType::Float32>
829 activationDescriptor.
m_A = 1.0f;
830 activationDescriptor.
m_B = -1.0f;
832 FuseActivationIntoPreviousLayerTest<MultiplicationTest<DataType::Float32>,
DataType::Float32>
839 activationDescriptor.
m_A = 1.0f;
840 activationDescriptor.
m_B = -1.0f;
842 FuseActivationIntoPreviousLayerTest<AdditionTest<DataType::Float32>,
DataType::Float32>
849 activationDescriptor.
m_A = 1.0f;
850 activationDescriptor.
m_B = -1.0f;
852 FuseActivationIntoPreviousLayerTest<SubtractionTest<DataType::Float32>,
DataType::Float32>
859 activationDescriptor.
m_A = 1.0f;
860 activationDescriptor.
m_B = -1.0f;
862 FuseActivationIntoPreviousLayerTest<DivisionTest<DataType::Float32>,
DataType::Float32>
872 FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::Float16>,
DataType::Float16>
880 FuseActivationIntoPreviousLayerTest<DWConvolution2dTest<DataType::Float16>,
DataType::Float16>
888 FuseActivationIntoPreviousLayerTest<FullyConnectedTest<DataType::Float16>,
DataType::Float16>
896 FuseActivationIntoPreviousLayerTest<BatchNormTest<DataType::Float16>,
DataType::Float16>
904 FuseActivationIntoPreviousLayerTest<MultiplicationTest<DataType::Float16>,
DataType::Float16>
912 FuseActivationIntoPreviousLayerTest<AdditionTest<DataType::Float16>,
DataType::Float16>
920 FuseActivationIntoPreviousLayerTest<SubtractionTest<DataType::Float16>,
DataType::Float16>
928 FuseActivationIntoPreviousLayerTest<DivisionTest<DataType::Float16>,
DataType::Float16>
938 FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::QAsymmU8>,
DataType::QAsymmU8>
946 FuseActivationIntoPreviousLayerTest<DWConvolution2dTest<DataType::QAsymmU8>,
DataType::QAsymmU8>
954 FuseActivationIntoPreviousLayerTest<FullyConnectedTest<DataType::QAsymmU8>,
DataType::QAsymmU8>
963 activationDescriptor.
m_A = 6.0f;
964 activationDescriptor.
m_B = 0.0f;
966 FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::QAsymmS8>,
DataType::QAsymmS8>
973 activationDescriptor.
m_A = 6.0f;
974 activationDescriptor.
m_B = 0.0f;
976 FuseActivationIntoPreviousLayerTest < DWConvolution2dTest < DataType::QAsymmS8 > ,
DataType::QAsymmS8 >
983 activationDescriptor.
m_A = 6.0f;
984 activationDescriptor.
m_B = 0.0f;
986 FuseActivationIntoPreviousLayerTest<FullyConnectedTest<DataType::QAsymmS8>,
DataType::QAsymmS8>
996 FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::Float32>,
DataType::Float32>
1004 FuseActivationIntoPreviousLayerTest<MultiplicationTest<DataType::Float32>,
DataType::Float32>
1012 FuseActivationIntoPreviousLayerTest<AdditionTest<DataType::Float32>,
DataType::Float32>
1020 FuseActivationIntoPreviousLayerTest<SubtractionTest<DataType::Float32>,
DataType::Float32>
1028 FuseActivationIntoPreviousLayerTest<DivisionTest<DataType::Float32>,
DataType::Float32>
1038 FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::Float32>,
DataType::Float32>
1046 FuseActivationIntoPreviousLayerTest<MultiplicationTest<DataType::Float32>,
DataType::Float32>
1054 FuseActivationIntoPreviousLayerTest<AdditionTest<DataType::Float32>,
DataType::Float32>
1062 FuseActivationIntoPreviousLayerTest<SubtractionTest<DataType::Float32>,
DataType::Float32>
1070 FuseActivationIntoPreviousLayerTest<DivisionTest<DataType::Float32>,
DataType::Float32>
1078 for (
int i = 0; i != 12; ++i)
1081 activationDescriptor.
m_A = 1.0f;
1082 activationDescriptor.
m_B = -1.0f;
1086 (activationDescriptor,
Compute::GpuAcc)),
"Convolution + Activation function " << i);
1088 (activationDescriptor,
Compute::GpuAcc)),
"DepthwiseConvolution + Activation function " << i);
1090 (activationDescriptor,
Compute::GpuAcc)),
"FullyConnected + Activation function " << i);
1092 (activationDescriptor,
Compute::GpuAcc)),
"BatchNorm + Activation function " << i);
1094 (activationDescriptor,
Compute::GpuAcc)),
"Multiplication + Activation function " << i);
1096 (activationDescriptor,
Compute::GpuAcc)),
"Addition + Activation function " << i);
1098 (activationDescriptor,
Compute::GpuAcc)),
"Subtraction + Activation function " << i);
1100 (activationDescriptor,
Compute::GpuAcc)),
"Division + Activation function " << i);
1107 for (
int i = 0; i != 12; ++i)
1110 activationDescriptor.
m_A = 1.0f;
1111 activationDescriptor.
m_B = -1.0f;
1115 (activationDescriptor,
Compute::GpuAcc)),
"Convolution + Activation function " << i);
1117 (activationDescriptor,
Compute::GpuAcc)),
"Depthwise + Activation function " << i);
1119 (activationDescriptor,
Compute::GpuAcc)),
"FullyConnected + Activation function " << i);
1121 (activationDescriptor,
Compute::GpuAcc)),
"BatchNorm + Activation function " << i);
1123 (activationDescriptor,
Compute::GpuAcc)),
"Multiplication + Activation function " << i);
1125 (activationDescriptor,
Compute::GpuAcc)),
"Addition + Activation function " << i);
1127 (activationDescriptor,
Compute::GpuAcc)),
"Subtraction + Activation function " << i);
1129 (activationDescriptor,
Compute::GpuAcc)),
"Division + Activation function " << i);
1139 (activationDescriptor,
Compute::GpuAcc, 1.f / 256.f, 0)),
"Convolution + Activation function " <<
1140 static_cast<int>(activationDescriptor.
m_Function));
1142 (activationDescriptor,
Compute::GpuAcc, 1.f / 256.f, 0)),
"FullyConnected + Activation function " <<
1143 static_cast<int>(activationDescriptor.
m_Function));
1147 (activationDescriptor,
Compute::GpuAcc, 1.f / 128.f, 128)),
"Convolution + Activation function " <<
1148 static_cast<int>(activationDescriptor.
m_Function));
1150 (activationDescriptor,
Compute::GpuAcc, 1.f / 128.f, 128)),
"FullyConnected + Activation function " <<
1151 static_cast<int>(activationDescriptor.
m_Function));
1155 (activationDescriptor,
Compute::GpuAcc)),
"Convolution + Activation function " <<
1156 static_cast<int>(activationDescriptor.
m_Function));
1158 (activationDescriptor,
Compute::GpuAcc)),
"FullyConnected + Activation function " <<
1159 static_cast<int>(activationDescriptor.
m_Function));
1162 activationDescriptor.
m_A = 1.0f;
1163 activationDescriptor.
m_B = -1.0f;
1165 (activationDescriptor,
Compute::GpuAcc)),
"Convolution + Activation function " <<
1166 static_cast<int>(activationDescriptor.
m_Function));
1168 (activationDescriptor,
Compute::GpuAcc)),
"FullyConnected + Activation function " <<
1169 static_cast<int>(activationDescriptor.
m_Function));
1173 (activationDescriptor,
Compute::GpuAcc)),
"Convolution + Activation function " <<
1174 static_cast<int>(activationDescriptor.
m_Function));
1176 (activationDescriptor,
Compute::GpuAcc)),
"FullyConnected + Activation function " <<
1177 static_cast<int>(activationDescriptor.
m_Function));
BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
IConnectableLayer * AddSubtractionLayer(const char *name=nullptr)
Adds a subtraction layer to the network.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
static IRuntimePtr Create(const CreationOptions &options)
This layer represents a batch normalization operation.
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
bool m_BiasEnabled
Enable/disable bias.
IConnectableLayer * AddFullyConnectedLayer(const FullyConnectedDescriptor &fullyConnectedDescriptor, const Optional< ConstTensor > &weights, const Optional< ConstTensor > &biases, const char *name=nullptr)
Adds a fully connected layer to the network.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
IConnectableLayer * AddDepthwiseConvolution2dLayer(const DepthwiseConvolution2dDescriptor &convolution2dDescriptor, const ConstTensor &weights, const Optional< ConstTensor > &biases, const char *name=nullptr)
Adds a 2D depthwise convolution layer to the network.
This layer represents a depthwise convolution 2d operation.
A Convolution2dDescriptor for the Convolution2dLayer.
LayerTestResult< float, 4 > DivisionTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
typename ResolveTypeImpl< DT >::Type ResolveType
Main network class which provides the interface for building up a neural network. ...
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
IConnectableLayer * AddConvolution2dLayer(const Convolution2dDescriptor &convolution2dDescriptor, const ConstTensor &weights, const Optional< ConstTensor > &biases, const char *name=nullptr)
Adds a 2D convolution layer to the network.
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
IConnectableLayer * AddDivisionLayer(const char *name=nullptr)
Adds a division layer to the network.
LayerTestResult< float, 4 > AdditionTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 2 > FullyConnectedTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, bool constantWeights)
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Compute
The Compute enum is now deprecated and it is now being replaced by BackendId.
This layer represents a fully connected operation.
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
A FullyConnectedDescriptor for the FullyConnectedLayer.
bool m_BiasEnabled
Enable/disable bias.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
std::vector< T > GetVector(unsigned int size, float initial, float increment)
void FuseActivationIntoPreviousLayerTest(ActivationDescriptor activationDescriptor, float tolerance, Compute backendId, float scale=1.f, int32_t offset=0)
GPU Execution: OpenCL: ArmCompute.
BOOST_AUTO_TEST_CASE(CheckConvolution2dLayer)
An ActivationDescriptor for the ActivationLayer.
min(a, max(b, input)) ReLu1 & ReLu6.
IConnectableLayer * AddBatchNormalizationLayer(const BatchNormalizationDescriptor &desc, const ConstTensor &mean, const ConstTensor &variance, const ConstTensor &beta, const ConstTensor &gamma, const char *name=nullptr)
Adds a batch normalization layer to the network.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
This layer represents an addition operation.
Graph & GetGraphForTesting(IOptimizedNetwork *optNet)
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
IConnectableLayer * AddAdditionLayer(const char *name=nullptr)
Adds an addition layer to the network.
BOOST_AUTO_TEST_SUITE_END()
This layer represents a subtraction operation.
bool FuseActivationSimpleTest(ActivationDescriptor activationDescriptor, Compute backendId, float scale=1.f, int32_t offset=0)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
LayerTestResult< float, 4 > SubtractionTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
CPU Execution: NEON: ArmCompute.
bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last)
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
This layer represents a division operation.
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
This layer represents a convolution 2d operation.
IConnectableLayer * AddMultiplicationLayer(const char *name=nullptr)
Adds a multiplication layer to the network.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
virtual int Connect(IInputSlot &destination)=0
This layer represents a multiplication operation.
INetworkPtr CreatNetwork(ActivationDescriptor activationDescriptor, bool preventFusing, float scale, int32_t offset)
static INetworkPtr Create(NetworkOptions networkOptions={})
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
LayerTestResult< float, 4 > MultiplicationTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...