|
template<> |
void | configure_conv_function< NEGEMMConv2d, Tensor > (NEGEMMConv2d &func, Tensor *src, const Tensor *weights, const Tensor *bias, Tensor *dst, const PadStrideInfo &info, const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups) |
|
| DATA_TEST_CASE (ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(make("InputInfo", { TensorInfo(TensorShape(18U, 18U, 32U), 1, DataType::F32), TensorInfo(TensorShape(23U, 27U, 32U, 4U), 1, DataType::F32), TensorInfo(TensorShape(3U, 3U, 2U, 1U), 1, DataType::F32), TensorInfo(TensorShape(33U, 27U, 7U, 4U), 1, DataType::F32) }), make("WeightsInfo", { TensorInfo(TensorShape(3U, 3U, 32U, 21U), 1, DataType::F32), TensorInfo(TensorShape(5U, 5U, 32U, 21U), 1, DataType::F32), TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32), TensorInfo(TensorShape(5U, 5U, 7U, 16U), 1, DataType::F16) })), make("OutputInfo", { TensorInfo(TensorShape(16U, 16U, 21U), 1, DataType::F32), TensorInfo(TensorShape(19U, 23U, 21U, 4U), 1, DataType::F32), TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32), TensorInfo(TensorShape(11U, 12U, 16U, 4U), 1, DataType::F32) })), make("ConvInfo", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(1, 1, 0, 0), PadStrideInfo(2, 1, 0, 0), PadStrideInfo(3, 2, 1, 0) })), make("FastMath", { true, true, false, false })), make("Expected", { ConvolutionMethod::WINOGRAD, ConvolutionMethod::WINOGRAD, ConvolutionMethod::GEMM, ConvolutionMethod::GEMM })), input_info, weights_info, output_info, conv_info, fast_math, expected) |
|
| TEST_SUITE_END () FIXTURE_DATA_TEST_CASE(RunSmall = ActivationValidationQuantizedFixture<CLTensor, CLAccessor, CLActivationLayer, T> |
| [CLActivationLayer Test snippet] More...
|
|
| TEST_CASE (MemoryInjection, framework::DatasetMode::ALL) |
| Test case for memory injection in cpu::CpuWinogradConv2d. More...
|
|
| TEST_CASE (MultipleExecutionWithConfigure, framework::DatasetMode::ALL) |
| Test case for memory injection in NEWinogradConvolutionLayer. More...
|
|
| DATA_TEST_CASE (SupportedKernels, framework::DatasetMode::ALL, zip(make("WeightsInfo", { TensorInfo(TensorShape(3U, 3U, 2U, 8U), 1, DataType::F32, DataLayout::NHWC), TensorInfo(TensorShape(1U, 3U, 2U, 8U), 1, DataType::F32, DataLayout::NHWC), TensorInfo(TensorShape(3U, 1U, 2U, 8U), 1, DataType::F32, DataLayout::NCHW), TensorInfo(TensorShape(5U, 5U, 2U, 8U), 1, DataType::F32, DataLayout::NCHW), TensorInfo(TensorShape(1U, 5U, 2U, 8U), 1, DataType::F32, DataLayout::NHWC), TensorInfo(TensorShape(5U, 1U, 2U, 8U), 1, DataType::F32, DataLayout::NCHW), TensorInfo(TensorShape(7U, 7U, 2U, 8U), 1, DataType::F32, DataLayout::NCHW), TensorInfo(TensorShape(1U, 7U, 2U, 8U), 1, DataType::F32, DataLayout::NHWC), TensorInfo(TensorShape(7U, 1U, 2U, 8U), 1, DataType::F32, DataLayout::NHWC), TensorInfo(TensorShape(2U, 2U, 2U, 8U), 1, DataType::F32, DataLayout::NHWC), TensorInfo(TensorShape(5U, 2U, 2U, 8U), 1, DataType::F32, DataLayout::NHWC), TensorInfo(TensorShape(3U, 6U, 2U, 8U), 1, DataType::F32, DataLayout::NCHW), TensorInfo(TensorShape(3U, 3U, 2U, 8U), 1, DataType::F16, DataLayout::NHWC), TensorInfo(TensorShape(1U, 3U, 2U, 8U), 1, DataType::F16, DataLayout::NHWC), TensorInfo(TensorShape(3U, 1U, 2U, 8U), 1, DataType::F16, DataLayout::NCHW), TensorInfo(TensorShape(5U, 5U, 2U, 8U), 1, DataType::F16, DataLayout::NCHW), TensorInfo(TensorShape(1U, 5U, 2U, 8U), 1, DataType::F16, DataLayout::NHWC), TensorInfo(TensorShape(5U, 1U, 2U, 8U), 1, DataType::F16, DataLayout::NCHW), TensorInfo(TensorShape(7U, 7U, 2U, 8U), 1, DataType::F16, DataLayout::NCHW), TensorInfo(TensorShape(1U, 7U, 2U, 8U), 1, DataType::F16, DataLayout::NHWC), TensorInfo(TensorShape(7U, 1U, 2U, 8U), 1, DataType::F16, DataLayout::NHWC), TensorInfo(TensorShape(2U, 2U, 2U, 8U), 1, DataType::F16, DataLayout::NHWC), TensorInfo(TensorShape(5U, 2U, 2U, 8U), 1, DataType::F16, DataLayout::NHWC), TensorInfo(TensorShape(3U, 6U, 2U, 8U), 1, DataType::F16, DataLayout::NCHW), }), make("Expected", { true, true, true, true, true, true, false, true, true, false, false, false, true, false, false, false, false, false, false, false, false, false, false, false, })), weights_info_const, expected_const) |
|
| FIXTURE_DATA_TEST_CASE (RunSmall, NEWinogradConvolutionLayerFixture< float >, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallWinogradConvolutionLayer1x3Dataset(), make("DataType", { DataType::F32 }), ActivationFunctionsDataset, make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) |
|
| FIXTURE_DATA_TEST_CASE (RunMixedDataLayout, NEWinogradConvolutionLayerMixedDataLayoutFixture< float >, framework::DatasetMode::PRECOMMIT, combine(make("Input", TensorShape(8U, 8U, 32U)), make("Weight", TensorShape(1U, 3U, 32U, 1U)), make("Bias", TensorShape(1U)), make("Output", TensorShape(8U, 6U, 1U)), make("PadStrideInfo", PadStrideInfo(1, 1, 0, 0)), make("Dilation", Size2D(1U, 1U)), make("DataType", { DataType::F32 }), ActivationFunctionsDataset, make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) |
|
| FIXTURE_DATA_TEST_CASE (RunLarge, NEWinogradConvolutionLayerFixture< float >, framework::DatasetMode::NIGHTLY, combine(datasets::LargeWinogradConvolutionLayer1x3Dataset(), make("DataType", { DataType::F32 }), make("ActivationInfo", { ActivationLayerInfo() }), make("DataLayout", { DataLayout::NHWC }))) |
|
| combine (datasets::SmallWinogradConvolutionLayer3x1Dataset(), make("DataType", { DataType::F32 }), ActivationFunctionsDataset, make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) |
|
| FIXTURE_DATA_TEST_CASE (RunLarge, NEWinogradConvolutionLayerFixture< float >, framework::DatasetMode::NIGHTLY, combine(datasets::LargeWinogradConvolutionLayer3x1Dataset(), make("DataType", { DataType::F32 }), make("ActivationInfo", { ActivationLayerInfo() }), make("DataLayout", { DataLayout::NHWC }))) |
|
| combine (datasets::SmallWinogradConvolutionLayer1x5Dataset(), make("DataType", { DataType::F32 }), ActivationFunctionsDataset, make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) |
|
| FIXTURE_DATA_TEST_CASE (RunLarge, NEWinogradConvolutionLayerFixture< float >, framework::DatasetMode::NIGHTLY, combine(datasets::LargeWinogradConvolutionLayer1x5Dataset(), make("DataType", { DataType::F32 }), make("ActivationInfo", { ActivationLayerInfo() }), make("DataLayout", { DataLayout::NHWC }))) |
|
| combine (datasets::SmallWinogradConvolutionLayer5x1Dataset(), make("DataType", { DataType::F32 }), ActivationFunctionsDataset, make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) |
|
| FIXTURE_DATA_TEST_CASE (RunLarge, NEWinogradConvolutionLayerFixture< float >, framework::DatasetMode::NIGHTLY, combine(datasets::LargeWinogradConvolutionLayer5x1Dataset(), make("DataType", { DataType::F32 }), make("ActivationInfo", { ActivationLayerInfo() }), make("DataLayout", { DataLayout::NHWC }))) |
|
| combine (datasets::SmallWinogradConvolutionLayer7x1Dataset(), make("DataType", { DataType::F32 }), ActivationFunctionsDataset, make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) |
|
| FIXTURE_DATA_TEST_CASE (RunLarge, NEWinogradConvolutionLayerFixture< float >, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeWinogradConvolutionLayer7x1Dataset(), make("DataType", { DataType::F32 })), make("ActivationInfo", { ActivationLayerInfo() })), make("DataLayout", { DataLayout::NHWC }))) |
|
| combine (datasets::SmallWinogradConvolutionLayer1x7Dataset(), make("DataType", { DataType::F32 }), ActivationFunctionsDataset, make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) |
|
| FIXTURE_DATA_TEST_CASE (RunLarge, NEWinogradConvolutionLayerFixture< float >, framework::DatasetMode::NIGHTLY, combine(datasets::LargeWinogradConvolutionLayer7x1Dataset(), make("DataType", { DataType::F32 }), make("ActivationInfo", { ActivationLayerInfo() }), make("DataLayout", { DataLayout::NHWC }))) |
|
| combine (datasets::SmallWinogradConvolutionLayer3x3Dataset(), make("DataType", { DataType::F32 }), ActivationFunctionsDataset, make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) |
|
| FIXTURE_DATA_TEST_CASE (RunActivations, NEWinogradConvolutionLayerFixture< float >, framework::DatasetMode::DISABLED, combine(make("Input", TensorShape(3U, 3U, 32U)), make("Weight", TensorShape(3U, 3U, 32U, 4U)), make("Bias", TensorShape(4U)), make("Output", TensorShape(1U, 1U, 4U)), make("PadStrideInfo", PadStrideInfo(1, 1, 0, 0)), make("Dilation", Size2D(1U, 1U)), make("DataType", { DataType::F32 }), ActivationFunctionsDatasetNightly, make("DataLayout", { DataLayout::NHWC }))) |
| It's enough to run the activations for a single weight/input combination and data type because activation function is called on top of the winograd output as a separate operator TODO: Enable after COMPMID-6573 is resolved. More...
|
|
| FIXTURE_DATA_TEST_CASE (RunLarge, NEWinogradConvolutionLayerFixture< float >, framework::DatasetMode::NIGHTLY, combine(datasets::LargeWinogradConvolutionLayer3x3Dataset(), make("DataType", { DataType::F32 }), make("ActivationInfo", { ActivationLayerInfo() }), make("DataLayout", { DataLayout::NHWC }))) |
|
| combine (datasets::SmallWinogradConvolutionLayer5x5Dataset(), make("DataType", { DataType::F32 }), ActivationFunctionsDataset, make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) |
|
| FIXTURE_DATA_TEST_CASE (RunLarge, NEWinogradConvolutionLayerFixture< float >, framework::DatasetMode::NIGHTLY, combine(datasets::LargeWinogradConvolutionLayer5x5Dataset(), make("DataType", { DataType::F32 }), make("ActivationInfo", { ActivationLayerInfo() }), make("DataLayout", { DataLayout::NHWC }))) |
|
| combine (framework::dataset::concat(datasets::SmallWinogradConvolutionLayer3x3Dataset(), datasets::SmallWinogradConvolutionLayer5x5Dataset()), make("DataType", { DataType::F32 }), ActivationFunctionsDataset, make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) |
|
| FIXTURE_DATA_TEST_CASE (RunSmall, NEGEMMConvolutionLayerFixture< float >, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(), framework::dataset::make("ReshapeWeights", { true })), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), ActivationFunctionsDataset)) |
|
| FIXTURE_DATA_TEST_CASE (RunMixedDataLayout, NEGEMMConvolutionLayerMixedDataLayoutFixture< float >, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(combine(combine(framework::dataset::make("Input", TensorShape(23U, 27U, 5U)), framework::dataset::make("Weights", TensorShape(3U, 3U, 5U, 2U))), framework::dataset::make("Bias", TensorShape(2U))), framework::dataset::make("Output", TensorShape(11U, 25U, 2U))), framework::dataset::make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0))), framework::dataset::make("Dilation", Size2D(1, 1))), framework::dataset::make("ReshapeWeights", { true })), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), ActivationFunctionsDataset)) |
|
| FIXTURE_DATA_TEST_CASE (RunPaddedWeights, NEGEMMConvolutionLayerPaddedWeightsFixture< float >, framework::DatasetMode::ALL, combine(datasets::SmallConvolutionLayerDataset(), framework::dataset::make("ReshapeWeights", { true }), framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataLayout", { DataLayout::NHWC }))) |
| Padded weights CpuGemmConv2d uses two different paths for reshaping the weights based on if the weight tensor has holes (a common way to have "holes" in tensor is via extended paddings) More...
|
|
| FIXTURE_DATA_TEST_CASE (RunSmall, NEGEMMConvolutionLayerQuantizedFixture< uint8_t >, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(), framework::dataset::make("ReshapeWeights", { true })), framework::dataset::make("DataType", DataType::QASYMM8)), framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), framework::dataset::make("QuantizationInfoIfActivationEnabled", { QuantizationInfo(2.f/255.f, 10) })), QuantizedActivationFunctionsDataset)) |
|
| FIXTURE_DATA_TEST_CASE (RunMixedDataLayout, NEGEMMConvolutionLayerQuantizedFixture< uint8_t >, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(framework::dataset::make("Input", TensorShape(23U, 27U, 5U)), framework::dataset::make("Weights", TensorShape(3U, 3U, 5U, 2U))), framework::dataset::make("Bias", TensorShape(2U))), framework::dataset::make("Output", TensorShape(11U, 25U, 2U))), framework::dataset::make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0))), framework::dataset::make("Dilation", Size2D(1, 1))), framework::dataset::make("ReshapeWeights", { true })), framework::dataset::make("DataType", DataType::QASYMM8)), framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), framework::dataset::make("QuantizationInfoIfActivationEnabled", { QuantizationInfo(2.f/255.f, 10) })), QuantizedActivationFunctionsDataset)) |
|
| combine (combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(), framework::dataset::make("ReshapeWeights", { true })), framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), framework::dataset::make("QuantizationInfoIfActivationEnabled", { QuantizationInfo(0.01f, -10) })), QuantizedActivationFunctionsDataset)) |
|
| FIXTURE_DATA_TEST_CASE (RunMixedDataLayout, NEGEMMConvolutionLayerQuantizedFixture< int8_t >, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(framework::dataset::make("Input", TensorShape(23U, 27U, 5U)), framework::dataset::make("Weights", TensorShape(3U, 3U, 5U, 2U))), framework::dataset::make("Bias", TensorShape(2U))), framework::dataset::make("Output", TensorShape(11U, 25U, 2U))), framework::dataset::make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0))), framework::dataset::make("Dilation", Size2D(1, 1))), framework::dataset::make("ReshapeWeights", { true })), framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), framework::dataset::make("QuantizationInfoIfActivationEnabled", { QuantizationInfo(2.f/255.f, 10) })), QuantizedActivationFunctionsDataset)) |
|
| combine (combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(), framework::dataset::make("ReshapeWeights", { true })), framework::dataset::make("DataType", { DataType::QASYMM8 })), framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), QuantizationData), QuantizedActivationFunctionsDataset), framework::dataset::make("WeightsDataType", { DataType::QSYMM8_PER_CHANNEL }))) |
|
| FIXTURE_DATA_TEST_CASE (RunSmallSigned, NEGEMMConvolutionLayerQuantizedPerChannelFixture< int8_t >, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(), framework::dataset::make("ReshapeWeights", { true })), framework::dataset::make("DataType", { DataType::QASYMM8_SIGNED })), framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), QuantizationData), QuantizedActivationFunctionsDataset), framework::dataset::make("WeightsDataType", { DataType::QSYMM8_PER_CHANNEL }))) |
|
| FIXTURE_DATA_TEST_CASE (RunSmall, NEDirectGEMMConv2dLayerFixture< float >, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(), framework::dataset::make("ReshapeWeights", { true })), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("DataLayout", { DataLayout::NHWC })), ActivationFunctionsDataset)) |
|