30 #include "tests/datasets/ShapeDatasets.h" 44 template <
typename U,
typename T>
45 inline void fill_tensor(U &&tensor,
const std::vector<T> &v)
47 std::memcpy(tensor.data(), v.data(),
sizeof(T) * v.size());
49 template <
typename U,
typename T>
50 inline void quantize_and_fill_tensor(U &&tensor,
const std::vector<T> &v)
52 QuantizationInfo qi = tensor.quantization_info();
53 std::vector<uint8_t> quantized;
54 quantized.reserve(v.size());
59 std::memcpy(tensor.data(), quantized.data(),
sizeof(uint8_t) * quantized.size());
61 inline QuantizationInfo qinfo_scaleoffset_from_minmax(
const float min,
const float max)
65 const uint8_t qmin = std::numeric_limits<uint8_t>::min();
66 const uint8_t qmax = std::numeric_limits<uint8_t>::max();
67 const float f_qmin = qmin;
68 const float f_qmax = qmax;
73 scale = (max - min) / (f_qmax - f_qmin);
74 const float offset_from_min = f_qmin - min /
scale;
75 const float offset_from_max = f_qmax - max /
scale;
77 const float offset_from_min_error = std::abs(f_qmin) + std::abs(min / scale);
78 const float offset_from_max_error = std::abs(f_qmax) + std::abs(max / scale);
79 const float f_offset = offset_from_min_error < offset_from_max_error ? offset_from_min : offset_from_max;
81 uint8_t uint8_offset = 0;
86 else if(f_offset > f_qmax)
94 offset = uint8_offset;
96 return QuantizationInfo(scale, offset);
101 AbsoluteTolerance<float> tolerance_boxes = AbsoluteTolerance<float>(0.1f), AbsoluteTolerance<float> tolerance_others = AbsoluteTolerance<float>(0.1f))
103 Tensor box_encoding = create_tensor<Tensor>(TensorShape(4U, 6U, 1U),
data_type, 1, qinfo_scaleoffset_from_minmax(-1.0f, 1.0f));
104 Tensor class_prediction = create_tensor<Tensor>(TensorShape(3U, 6U, 1U),
data_type, 1, qinfo_scaleoffset_from_minmax(0.0f, 1.0f));
105 Tensor anchors = create_tensor<Tensor>(TensorShape(4U, 6U),
data_type, 1, qinfo_scaleoffset_from_minmax(0.0f, 100.5f));
107 box_encoding.allocator()->allocate();
108 class_prediction.allocator()->allocate();
109 anchors.allocator()->allocate();
111 std::vector<float> box_encoding_vector =
113 0.0f, 1.0f, 0.0f, 0.0f,
114 0.0f, -1.0f, 0.0f, 0.0f,
115 0.0f, 0.0f, 0.0f, 0.0f,
116 0.0f, 0.0f, 0.0f, 0.0f,
117 0.0f, 1.0f, 0.0f, 0.0f,
118 0.0f, 0.0f, 0.0f, 0.0f
120 std::vector<float> class_prediction_vector =
129 std::vector<float> anchors_vector =
131 0.4f, 0.4f, 1.1f, 1.1f,
132 0.4f, 0.4f, 1.1f, 1.1f,
133 0.4f, 0.4f, 1.1f, 1.1f,
134 0.4f, 10.4f, 1.1f, 1.1f,
135 0.4f, 10.4f, 1.1f, 1.1f,
136 0.4f, 100.4f, 1.1f, 1.1f
142 fill_tensor(Accessor(box_encoding), box_encoding_vector);
143 fill_tensor(Accessor(class_prediction), class_prediction_vector);
148 quantize_and_fill_tensor(Accessor(box_encoding), box_encoding_vector);
149 quantize_and_fill_tensor(Accessor(class_prediction), class_prediction_vector);
150 quantize_and_fill_tensor(Accessor(anchors), anchors_vector);
155 Tensor output_classes;
156 Tensor output_scores;
157 Tensor num_detection;
158 NEDetectionPostProcessLayer detection;
159 detection.configure(&box_encoding, &class_prediction, &anchors, &output_boxes, &output_classes, &output_scores, &num_detection, info);
161 output_boxes.allocator()->allocate();
162 output_classes.allocator()->allocate();
163 output_scores.allocator()->allocate();
164 num_detection.allocator()->allocate();
171 validate(Accessor(output_boxes), expected_output_boxes, tolerance_boxes);
173 validate(Accessor(output_classes), expected_output_classes, tolerance_others);
175 validate(Accessor(output_scores), expected_output_scores, tolerance_others);
177 validate(Accessor(num_detection), expected_num_detection, tolerance_others);
229 framework::dataset::make(
"DetectionPostProcessLayerInfo",{
DetectionPostProcessLayerInfo(3, 1, 0.0f, 0.5f, 2, {0.1f,0.1f,0.1f,0.1f}),
236 box_encodings_info, classes_info, anchors_info, output_boxes_info, output_classes_info,output_scores_info, num_detection_info, detect_info,
expected)
239 &classes_info.clone()->set_is_resizable(
false),
240 &anchors_info.clone()->set_is_resizable(
false),
241 &output_boxes_info.clone()->set_is_resizable(
false),
242 &output_classes_info.clone()->set_is_resizable(
false),
243 &output_scores_info.clone()->set_is_resizable(
false), &num_detection_info.clone()->set_is_resizable(
false), detect_info);
253 0.5 , 2 , { 11.0, 11.0, 6.0, 6.0 } );
256 fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, -0.15, 0.95, 0.95, -0.15, 99.85, 0.95, 100.95 });
259 fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
262 fill_tensor(expected_output_scores, std::vector<float> { 0.97f, 0.95f, 0.31f });
265 fill_tensor(expected_num_detection, std::vector<float> { 3.f });
267 base_test_case(info,
DataType::F32, expected_output_boxes, expected_output_classes, expected_output_scores, expected_num_detection);
273 0.5 , 2 , { 11.0, 11.0, 6.0, 6.0 } ,
278 fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, -0.15, 0.95, 0.95, -0.15, 99.85, 0.95, 100.95 });
281 fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
284 fill_tensor(expected_output_scores, std::vector<float> { 0.97f, 0.95f, 0.31f });
287 fill_tensor(expected_num_detection, std::vector<float> { 3.f });
290 base_test_case(info,
DataType::F32, expected_output_boxes, expected_output_classes, expected_output_scores, expected_num_detection);
296 0.5 , 2 , { 11.0, 11.0, 6.0, 6.0 } ,
301 fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, 9.85, 0.95, 10.95, 0.0f, 0.0f, 0.0f, 0.0f });
304 fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
307 fill_tensor(expected_output_scores, std::vector<float> { 0.97f, 0.91f, 0.0f });
310 fill_tensor(expected_num_detection, std::vector<float> { 2.f });
313 base_test_case(info,
DataType::F32, expected_output_boxes, expected_output_classes, expected_output_scores, expected_num_detection);
321 0.5 , 2 , { 11.0, 11.0, 6.0, 6.0 } );
325 fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, -0.15, 0.95, 0.95, -0.15, 99.85, 0.95, 100.95 });
328 fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
331 fill_tensor(expected_output_scores, std::vector<float> { 0.97f, 0.95f, 0.31f });
334 fill_tensor(expected_num_detection, std::vector<float> { 3.f });
342 0.5 , 2 , { 11.0, 11.0, 6.0, 6.0 } ,
347 fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, -0.15, 0.95, 0.95, -0.15, 99.85, 0.95, 100.95 });
350 fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
353 fill_tensor(expected_output_scores, std::vector<float> { 0.97f, 0.95f, 0.31f });
356 fill_tensor(expected_num_detection, std::vector<float> { 3.f });
365 0.5 , 2 , { 11.0, 11.0, 6.0, 6.0 } ,
369 fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, 9.85, 0.95, 10.95, 0.0f, 0.0f, 0.0f, 0.0f });
372 fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
375 fill_tensor(expected_output_scores, std::vector<float> { 0.95f, 0.91f, 0.0f });
378 fill_tensor(expected_num_detection, std::vector<float> { 2.f });
__global uchar * offset(const Image *img, int x, int y)
Get the pointer position of a Image.
Class reprensenting an absolute tolerance value.
uint8_t quantize_qasymm8(float value, const INFO_TYPE &qinfo, RoundingPolicy rounding_policy=RoundingPolicy::TO_NEAREST_UP)
Quantize a value given an unsigned 8-bit asymmetric quantization scheme.
1 channel, 1 F32 per channel
base_test_case(info, DataType::QASYMM8, expected_output_boxes, expected_output_classes, expected_output_scores, expected_num_detection, AbsoluteTolerance< float >(0.3f))
ARM_COMPUTE_EXPECT(has_error==expected, framework::LogLevel::ERRORS)
std::enable_if< is_container< T >::value, ContainerDataset< T > >::type make(std::string name, T &&values)
Helper function to create a ContainerDataset.
Copyright (c) 2017-2021 Arm Limited.
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), })), framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::SQRT), })), framework::dataset::make("Expected", { false, true, true, true, false, false, true, true, false })), input_info, output_info, act_info, expected)
DatasetMode
Possible dataset modes.
fill_tensor(input_to_input_weights, std::vector< uint8_t >{ 122, 130, 124, 134, 120, 122, 134, 134 })
TEST_SUITE_END() FIXTURE_DATA_TEST_CASE(RunSmall
[CLActivationLayer Test snippet]
quantized, asymmetric fixed-point 8-bit number unsigned
TEST_SUITE(U8_to_S8) FIXTURE_DATA_TEST_CASE(RunSmall
validate(CLAccessor(output_state), expected_output)
Simple tensor object that stores elements in a consecutive chunk of memory.
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
SimpleTensor< float > expected_output_scores(TensorShape(3U), DataType::F32)
T round(T value)
Round floating-point value with half value rounding away from zero.
Detection Output layer info.
SimpleTensor< float > expected_output_boxes(TensorShape(4U, 3U), DataType::F32)
Store the tensor's metadata.
SimpleTensor< float > expected_num_detection(TensorShape(1U), DataType::F32)
TEST_CASE(FusedActivation, framework::DatasetMode::ALL)
Validate fused activation expecting the following behaviours:
zip(zip(framework::dataset::make("Weights", { TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 1U), 1, DataType::F32), }), framework::dataset::make("MVBGInfo",{ TensorInfo(TensorShape(2U), 1, DataType::F32), TensorInfo(TensorShape(2U), 1, DataType::F16), TensorInfo(TensorShape(5U), 1, DataType::F32), })), framework::dataset::make("Expected", { true, false, false}))
DataType
Available data types.
SimpleTensor< float > expected_output_classes(TensorShape(3U), DataType::F32)
static Status validate(const ITensorInfo *input_box_encoding, const ITensorInfo *input_class_score, const ITensorInfo *input_anchors, ITensorInfo *output_boxes, ITensorInfo *output_classes, ITensorInfo *output_scores, ITensorInfo *num_detection, DetectionPostProcessLayerInfo info=DetectionPostProcessLayerInfo())
Static function to check if given info will lead to a valid configuration of NEDetectionPostProcessLa...