24.02.1
|
Go to the documentation of this file.
43 struct ComputeAllAnchorsData
48 using ComputeAllAnchorsSelectorPtr = std::add_pointer<bool(
const ComputeAllAnchorsData &data)>
::type;
49 using ComputeAllAnchorsUKernelPtr = std::add_pointer<void(
50 const ITensor *anchors, ITensor *all_anchors, ComputeAnchorsInfo anchors_info,
const Window &window)>
::type;
52 struct ComputeAllAnchorsKernel
59 static const ComputeAllAnchorsKernel available_kernels[] = {
60 #if defined(ARM_COMPUTE_ENABLE_NEON)
61 {
"neon_qu16_computeallanchors", [](
const ComputeAllAnchorsData &data) {
return data.dt ==
DataType::QSYMM16; },
63 #endif //defined(ARM_COMPUTE_ENABLE_NEON)
64 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
65 {
"neon_fp16_computeallanchors", [](
const ComputeAllAnchorsData &data) {
return data.dt ==
DataType::F16; },
67 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
68 {
"neon_fp32_computeallanchors", [](
const ComputeAllAnchorsData &data) {
return data.dt ==
DataType::F32; },
78 const ComputeAllAnchorsKernel *get_implementation(
const ComputeAllAnchorsData &data)
80 for (
const auto &uk : available_kernels)
82 if (uk.is_selected(data))
90 Status
validate_arguments(
const ITensorInfo *anchors,
const ITensorInfo *all_anchors,
const ComputeAnchorsInfo &
info)
97 if (all_anchors->total_size() > 0)
99 const size_t feature_height =
info.feat_height();
100 const size_t feature_width =
info.feat_width();
101 const size_t num_anchors = anchors->dimension(1);
118 : _anchors(nullptr), _all_anchors(nullptr), _anchors_info(0.f, 0.f, 0.f)
130 const float width =
info.feat_width();
131 const float height =
info.feat_height();
140 _all_anchors = all_anchors;
141 _anchors_info =
info;
145 INEKernel::configure(win);
162 const auto *uk = get_implementation(ComputeAllAnchorsData{_anchors->
info()->
data_type()});
165 uk->ukernel(_anchors, _all_anchors, _anchors_info,
window);
Class to describe a number of elements in each dimension.
decltype(strategy::transforms) typedef type
ComputeAllAnchorsUKernelPtr ukernel
Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps, bool skip_border, BorderSize border_size)
void run(const Window &window, const ThreadInfo &info) override
Execute the kernel on the passed window.
Status validate_arguments(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *dst, const PadStrideInfo &conv_info)
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
Interface for CPU tensor.
#define REGISTER_FP16_NEON(func_name)
const ComputeAllAnchorsSelectorPtr is_selected
ComputeAnchors information class.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...)
@ QSYMM16
quantized, symmetric fixed-point 16-bit number
void neon_fp32_computeallanchors(const ITensor *anchors, ITensor *all_anchors, ComputeAnchorsInfo anchors_info, const Window &window)
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
virtual size_t dimension(size_t index) const =0
Return the size of the requested dimension.
#define REGISTER_FP32_NEON(func_name)
#define REGISTER_QSYMM16_NEON(func_name)
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor's metadata.
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
void neon_fp16_computeallanchors(const ITensor *anchors, ITensor *all_anchors, ComputeAnchorsInfo anchors_info, const Window &window)
#define ARM_COMPUTE_ERROR_THROW_ON(status)
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(t,...)
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
#define ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(tensor)
bool auto_init_if_empty(ITensorInfo &info, const TensorShape &shape, int num_channels, DataType data_type, QuantizationInfo quantization_info=QuantizationInfo())
Auto initialize the tensor info (shape, number of channels and data type) if the current assignment i...
NEComputeAllAnchorsKernel()
Default constructor.
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)
virtual DataType data_type() const =0
Data type used for each element of the tensor.
static Status validate(const ITensorInfo *anchors, const ITensorInfo *all_anchors, const ComputeAnchorsInfo &info)
Static function to check if given info will lead to a valid configuration of NEComputeAllAnchorsKerne...
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
void neon_qu16_computeallanchors(const ITensor *anchors, ITensor *all_anchors, ComputeAnchorsInfo anchors_info, const Window &window)
virtual QuantizationInfo quantization_info() const =0
Get the quantization settings (scale and offset) of the tensor.
const Window & window() const
The maximum window the kernel can be executed on.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(...)
Information about executing thread and CPU.
Store the tensor's metadata.
void configure(const ITensor *anchors, ITensor *all_anchors, const ComputeAnchorsInfo &info)
Set the input and output tensors.
Describe a multidimensional execution window.
Copyright (c) 2017-2024 Arm Limited.
@ F16
16-bit floating-point number
#define ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(...)
bool is_data_type_quantized(DataType dt)
Check if a given data type is of quantized type.
Store the tensor's metadata.
@ F32
32-bit floating-point number
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
DataType
Available data types.