30 #include "tests/datasets/ShapeDatasets.h" 44 template <
typename U,
typename T>
45 inline void fill_tensor(U &&tensor,
const std::vector<T> &v)
47 std::memcpy(tensor.data(), v.data(),
sizeof(T) * v.size());
49 template <
typename D,
typename U,
typename T>
50 inline void quantize_and_fill_tensor(U &&tensor,
const std::vector<T> &v)
52 QuantizationInfo qi = tensor.quantization_info();
53 std::vector<D> quantized;
54 quantized.reserve(v.size());
59 std::memcpy(tensor.data(), quantized.data(),
sizeof(D) * quantized.size());
62 inline int calc_qinfo(
const float min,
const float max,
float &
scale)
64 const auto qmin = std::numeric_limits<T>::min();
65 const auto qmax = std::numeric_limits<T>::max();
66 const float f_qmin = qmin;
67 const float f_qmax = qmax;
69 scale = (max - min) / (f_qmax - f_qmin);
70 const float offset_from_min = f_qmin - min /
scale;
71 const float offset_from_max = f_qmax - max /
scale;
73 const float offset_from_min_error = std::abs(f_qmin) + std::abs(min / scale);
74 const float offset_from_max_error = std::abs(f_qmax) + std::abs(max / scale);
75 const float f_offset = offset_from_min_error < offset_from_max_error ? offset_from_min : offset_from_max;
81 else if(f_offset > f_qmax)
89 return static_cast<int>(tmp_offset);
91 inline QuantizationInfo qinfo_scaleoffset_from_minmax(
DataType data_type,
const float min,
const float max)
101 offset = calc_qinfo<int8_t>(min, max,
scale);
105 offset = calc_qinfo<uint8_t>(min, max,
scale);
108 return QuantizationInfo(scale, offset);
113 AbsoluteTolerance<float> tolerance_boxes = AbsoluteTolerance<float>(0.1f), AbsoluteTolerance<float> tolerance_others = AbsoluteTolerance<float>(0.1f))
115 Tensor box_encoding = create_tensor<Tensor>(TensorShape(4U, 6U, 1U),
data_type, 1, qinfo_scaleoffset_from_minmax(data_type, -1.0f, 1.0f));
116 Tensor class_prediction = create_tensor<Tensor>(TensorShape(3U, 6U, 1U),
data_type, 1, qinfo_scaleoffset_from_minmax(data_type, 0.0f, 1.0f));
117 Tensor anchors = create_tensor<Tensor>(TensorShape(4U, 6U),
data_type, 1, qinfo_scaleoffset_from_minmax(data_type, 0.0f, 100.5f));
119 box_encoding.allocator()->allocate();
120 class_prediction.allocator()->allocate();
121 anchors.allocator()->allocate();
123 std::vector<float> box_encoding_vector =
125 0.0f, 1.0f, 0.0f, 0.0f,
126 0.0f, -1.0f, 0.0f, 0.0f,
127 0.0f, 0.0f, 0.0f, 0.0f,
128 0.0f, 0.0f, 0.0f, 0.0f,
129 0.0f, 1.0f, 0.0f, 0.0f,
130 0.0f, 0.0f, 0.0f, 0.0f
132 std::vector<float> class_prediction_vector =
141 std::vector<float> anchors_vector =
143 0.4f, 0.4f, 1.1f, 1.1f,
144 0.4f, 0.4f, 1.1f, 1.1f,
145 0.4f, 0.4f, 1.1f, 1.1f,
146 0.4f, 10.4f, 1.1f, 1.1f,
147 0.4f, 10.4f, 1.1f, 1.1f,
148 0.4f, 100.4f, 1.1f, 1.1f
156 fill_tensor(Accessor(box_encoding), box_encoding_vector);
157 fill_tensor(Accessor(class_prediction), class_prediction_vector);
163 quantize_and_fill_tensor<uint8_t>(Accessor(box_encoding), box_encoding_vector);
164 quantize_and_fill_tensor<uint8_t>(Accessor(class_prediction), class_prediction_vector);
165 quantize_and_fill_tensor<uint8_t>(Accessor(anchors), anchors_vector);
170 quantize_and_fill_tensor<int8_t>(Accessor(box_encoding), box_encoding_vector);
171 quantize_and_fill_tensor<int8_t>(Accessor(class_prediction), class_prediction_vector);
172 quantize_and_fill_tensor<int8_t>(Accessor(anchors), anchors_vector);
181 Tensor output_classes;
182 Tensor output_scores;
183 Tensor num_detection;
184 CPPDetectionPostProcessLayer detection;
185 detection.configure(&box_encoding, &class_prediction, &anchors, &output_boxes, &output_classes, &output_scores, &num_detection, info);
187 output_boxes.allocator()->allocate();
188 output_classes.allocator()->allocate();
189 output_scores.allocator()->allocate();
190 num_detection.allocator()->allocate();
197 validate(Accessor(output_boxes), expected_output_boxes, tolerance_boxes);
199 validate(Accessor(output_classes), expected_output_classes, tolerance_others);
201 validate(Accessor(output_scores), expected_output_scores, tolerance_others);
203 validate(Accessor(num_detection), expected_num_detection, tolerance_others);
258 framework::dataset::make(
"DetectionPostProcessLayerInfo",{
DetectionPostProcessLayerInfo(3, 1, 0.0f, 0.5f, 2, {0.1f,0.1f,0.1f,0.1f}),
265 box_encodings_info, classes_info, anchors_info, output_boxes_info, output_classes_info,output_scores_info, num_detection_info, detect_info,
expected)
268 &classes_info.clone()->set_is_resizable(
false),
269 &anchors_info.clone()->set_is_resizable(
false),
270 &output_boxes_info.clone()->set_is_resizable(
false),
271 &output_classes_info.clone()->set_is_resizable(
false),
272 &output_scores_info.clone()->set_is_resizable(
false), &num_detection_info.clone()->set_is_resizable(
false), detect_info);
282 0.5 , 2 , { 11.0, 11.0, 6.0, 6.0 } );
285 fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, -0.15, 0.95, 0.95, -0.15, 99.85, 0.95, 100.95 });
288 fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
291 fill_tensor(expected_output_scores, std::vector<float> { 0.97f, 0.95f, 0.31f });
294 fill_tensor(expected_num_detection, std::vector<float> { 3.f });
296 base_test_case(info,
DataType::F32, expected_output_boxes, expected_output_classes, expected_output_scores, expected_num_detection);
302 0.5 , 2 , { 11.0, 11.0, 6.0, 6.0 } ,
307 fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, -0.15, 0.95, 0.95, -0.15, 99.85, 0.95, 100.95 });
310 fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
313 fill_tensor(expected_output_scores, std::vector<float> { 0.97f, 0.95f, 0.31f });
316 fill_tensor(expected_num_detection, std::vector<float> { 3.f });
319 base_test_case(info,
DataType::F32, expected_output_boxes, expected_output_classes, expected_output_scores, expected_num_detection);
325 0.5 , 2 , { 11.0, 11.0, 6.0, 6.0 } ,
330 fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, 9.85, 0.95, 10.95, 0.0f, 0.0f, 0.0f, 0.0f });
333 fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
336 fill_tensor(expected_output_scores, std::vector<float> { 0.97f, 0.91f, 0.0f });
339 fill_tensor(expected_num_detection, std::vector<float> { 2.f });
342 base_test_case(info,
DataType::F32, expected_output_boxes, expected_output_classes, expected_output_scores, expected_num_detection);
350 0.5 , 2 , { 11.0, 11.0, 6.0, 6.0 } );
354 fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, -0.15, 0.95, 0.95, -0.15, 99.85, 0.95, 100.95 });
357 fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
360 fill_tensor(expected_output_scores, std::vector<float> { 0.97f, 0.95f, 0.31f });
363 fill_tensor(expected_num_detection, std::vector<float> { 3.f });
371 0.5 , 2 , { 11.0, 11.0, 6.0, 6.0 } ,
376 fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, -0.15, 0.95, 0.95, -0.15, 99.85, 0.95, 100.95 });
379 fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
382 fill_tensor(expected_output_scores, std::vector<float> { 0.97f, 0.95f, 0.31f });
385 fill_tensor(expected_num_detection, std::vector<float> { 3.f });
394 0.5 , 2 , { 11.0, 11.0, 6.0, 6.0 } ,
398 fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, 9.85, 0.95, 10.95, 0.0f, 0.0f, 0.0f, 0.0f });
401 fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
404 fill_tensor(expected_output_scores, std::vector<float> { 0.95f, 0.91f, 0.0f });
407 fill_tensor(expected_num_detection, std::vector<float> { 2.f });
419 0.5 , 2 , { 11.0, 11.0, 6.0, 6.0 } );
423 fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, -0.15, 0.95, 0.95, -0.15, 99.85, 0.95, 100.95 });
426 fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
429 fill_tensor(expected_output_scores, std::vector<float> { 0.97f, 0.95f, 0.31f });
432 fill_tensor(expected_num_detection, std::vector<float> { 3.f });
440 0.5 , 2 , { 11.0, 11.0, 6.0, 6.0 } ,
445 fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, -0.15, 0.95, 0.95, -0.15, 99.85, 0.95, 100.95 });
448 fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
451 fill_tensor(expected_output_scores, std::vector<float> { 0.97f, 0.95f, 0.31f });
454 fill_tensor(expected_num_detection, std::vector<float> { 3.f });
463 0.5 , 2 , { 11.0, 11.0, 6.0, 6.0 } ,
467 fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, 9.85, 0.95, 10.95, 0.0f, 0.0f, 0.0f, 0.0f });
470 fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
473 fill_tensor(expected_output_scores, std::vector<float> { 0.95f, 0.91f, 0.0f });
476 fill_tensor(expected_num_detection, std::vector<float> { 2.f });
__global uchar * offset(const Image *img, int x, int y)
Get the pointer position of a Image.
Class reprensenting an absolute tolerance value.
static Status validate(const ITensorInfo *input_box_encoding, const ITensorInfo *input_class_score, const ITensorInfo *input_anchors, ITensorInfo *output_boxes, ITensorInfo *output_classes, ITensorInfo *output_scores, ITensorInfo *num_detection, DetectionPostProcessLayerInfo info=DetectionPostProcessLayerInfo())
Static function to check if given info will lead to a valid configuration of CPPDetectionPostProcessL...
1 channel, 1 F32 per channel
base_test_case(info, DataType::QASYMM8, expected_output_boxes, expected_output_classes, expected_output_scores, expected_num_detection, AbsoluteTolerance< float >(0.3f))
ARM_COMPUTE_EXPECT(has_error==expected, framework::LogLevel::ERRORS)
std::enable_if< is_container< T >::value, ContainerDataset< T > >::type make(std::string name, T &&values)
Helper function to create a ContainerDataset.
Copyright (c) 2017-2021 Arm Limited.
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), })), framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::SQRT), })), framework::dataset::make("Expected", { false, true, true, true, false, false, true, true, false })), input_info, output_info, act_info, expected)
DatasetMode
Possible dataset modes.
fill_tensor(input_to_input_weights, std::vector< uint8_t >{ 122, 130, 124, 134, 120, 122, 134, 134 })
TEST_SUITE_END() FIXTURE_DATA_TEST_CASE(RunSmall
[CLActivationLayer Test snippet]
quantized, asymmetric fixed-point 8-bit number unsigned
TEST_SUITE(U8_to_S8) FIXTURE_DATA_TEST_CASE(RunSmall
validate(CLAccessor(output_state), expected_output)
Simple tensor object that stores elements in a consecutive chunk of memory.
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
SimpleTensor< float > expected_output_scores(TensorShape(3U), DataType::F32)
T round(T value)
Round floating-point value with half value rounding away from zero.
Detection Output layer info.
SimpleTensor< float > expected_output_boxes(TensorShape(4U, 3U), DataType::F32)
Store the tensor's metadata.
SimpleTensor< float > expected_num_detection(TensorShape(1U), DataType::F32)
TEST_CASE(FusedActivation, framework::DatasetMode::ALL)
Validate fused activation expecting the following behaviours:
quantized, asymmetric fixed-point 8-bit number signed
zip(zip(framework::dataset::make("Weights", { TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 1U), 1, DataType::F32), }), framework::dataset::make("MVBGInfo",{ TensorInfo(TensorShape(2U), 1, DataType::F32), TensorInfo(TensorShape(2U), 1, DataType::F16), TensorInfo(TensorShape(5U), 1, DataType::F32), })), framework::dataset::make("Expected", { true, false, false}))
DataType
Available data types.
static QUANTIZED_TYPE quantize(float value, const UniformQuantizationInfo &qinfo, RoundingPolicy rounding_policy=RoundingPolicy::TO_NEAREST_UP)
Quantize a value given a 8-bit asymmetric quantization scheme.
SimpleTensor< float > expected_output_classes(TensorShape(3U), DataType::F32)