40 PadStrideInfo compute_upsample_info(
const PadStrideInfo &
info, uint32_t deconv_pad_x, uint32_t deconv_pad_y)
42 const unsigned int pad_left = info.pad_left();
43 const unsigned int pad_right = info.pad_right();
44 const unsigned int pad_top = info.pad_top();
45 const unsigned int pad_bottom = info.pad_bottom();
46 const unsigned int stride_x = info.stride().first;
47 const unsigned int stride_y = info.stride().second;
50 unsigned int deconv_pad_left = pad_right > pad_left ? pad_right - pad_left : 0;
51 unsigned int deconv_pad_right = pad_left > pad_right ? pad_left - pad_right : 0;
52 deconv_pad_x -= deconv_pad_left + deconv_pad_right;
54 deconv_pad_left += deconv_pad_x / 2;
55 deconv_pad_right += deconv_pad_x / 2;
57 unsigned int deconv_pad_top = pad_bottom > pad_top ? pad_bottom - pad_top : 0;
58 unsigned int deconv_pad_bottom = pad_top > pad_bottom ? pad_top - pad_bottom : 0;
59 deconv_pad_y -= deconv_pad_top + deconv_pad_bottom;
61 deconv_pad_top += deconv_pad_y / 2;
62 deconv_pad_bottom += deconv_pad_y / 2;
64 return PadStrideInfo(stride_x, stride_y, deconv_pad_left, deconv_pad_right, deconv_pad_top, deconv_pad_bottom,
DimensionRoundingType::FLOOR);
70 : _memory_group(
std::move(memory_manager)),
77 _original_weights(nullptr),
128 uint32_t deconv_pad_x = 0;
129 uint32_t deconv_pad_y = 0;
130 const unsigned int stride_x = info.
stride().first;
131 const unsigned int stride_y = info.
stride().second;
136 const unsigned int out_x = (input->
dimension(idx_w) - 1) * stride_x + 1;
137 const unsigned int out_y = (input->
dimension(idx_h) - 1) * stride_y + 1;
144 TensorInfo scale_out_info(input->
clone()->set_is_resizable(
true).reset_padding().set_tensor_shape(scale_out_shape));
173 _original_weights = weights;
175 _is_prepared =
false;
177 const unsigned int stride_x = info.
stride().first;
178 const unsigned int stride_y = info.
stride().second;
181 _do_upsampling = stride_x != 1 || stride_y != 1 || weights->
info()->
dimension(width_idx) != 1 || weights->
info()->
dimension(height_idx) != 1;
189 _flip_weights.
configure(weights, &_weights_flipped, &_flip_axis);
193 uint32_t deconv_pad_x = 0;
194 uint32_t deconv_pad_y = 0;
198 auto axis_data =
reinterpret_cast<uint32_t *
>(_flip_axis.
buffer());
199 axis_data[0] =
static_cast<uint32_t
>(width_idx);
200 axis_data[1] =
static_cast<uint32_t
>(height_idx);
205 _memory_group.
manage(&_scaled_output);
208 out_dims, deconv_pad_x, deconv_pad_y);
210 const PadStrideInfo upsample_info = compute_upsample_info(info, deconv_pad_x, deconv_pad_y);
216 _upsample_f.
configure(input, &_scaled_output, upsample_info);
bool is_data_type_quantized(DataType dt)
Check if a given data type is of quantized type.
void run() override final
Run the kernels contained in the function.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(...)
void init(const TensorAllocator &allocator, const Coordinates &coords, TensorInfo &sub_info)
Shares the same backing memory with another tensor allocator, while the tensor info might be differen...
virtual size_t dimension(size_t index) const =0
Return the size of the requested dimension.
std::pair< unsigned int, unsigned int > deconvolution_output_dimensions(unsigned int in_width, unsigned int in_height, unsigned int kernel_width, unsigned int kernel_height, const PadStrideInfo &pad_stride_info)
Returns expected width and height of the deconvolution's output tensor.
void run() override
Run the kernels contained in the function.
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
virtual DataType data_type() const =0
Data type used for each element of the tensor.
bool is_used() const
Flags if the tensor is used or not.
1 channel, 1 F32 per channel
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
void configure(ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info=WeightsInfo(), const Size2D &dilation=Size2D(1U, 1U), const ActivationLayerInfo &act_info=ActivationLayerInfo(), bool enable_fast_math=false, unsigned int num_groups=1)
Set the input and output tensors.
Store the tensor's metadata.
#define ARM_COMPUTE_ERROR_THROW_ON(status)
void configure(ITensor *input, const ITensor *weights, const ITensor *bias, ITensor *output, const PadStrideInfo &info, bool enable_fast_math=false)
Set the input, weights, biases and output tensors.
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Activation Layer Information class.
Interface for CPU tensor.
Copyright (c) 2017-2023 Arm Limited.
1 channel, 1 F16 per channel
void configure(const ITensor *input, ITensor *output, const ITensor *axis)
Initialize the function.
TensorAllocator * allocator()
Return a pointer to the tensor's allocator.
Convolution Layer Weights Information class.
#define ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(...)
void run() override
Run the kernels contained in the function.
TensorShape compute_deconvolution_output_shape(const std::pair< unsigned int, unsigned int > &out_dims, const ITensorInfo &input, const ITensorInfo &weights)
Calculate the output shape of the deconvolution layer.
void mark_as_unused() const
Marks a tensor as unused.
1 channel, 1 S32 per channel
void manage(IMemoryManageable *obj) override
Sets a object to be managed by the given memory group.
static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *bias, const ITensorInfo *output, const PadStrideInfo &info, bool enable_fast_math=false)
Static function to check if given info will lead to a valid configuration of NEDeconvolutionLayer.
T x() const
Alias to access the size of the first dimension.
ITensorInfo & set_data_layout(const DataLayout &data_layout) override
Set the data layout of the tensor.
static constexpr size_t DimX
Alias for dimension 0 also known as X dimension.
1 channel, 1 U32 per channel
bool is_data_type_quantized_per_channel(DataType dt)
Check if a given data type is of per channel type.
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
quantized, asymmetric fixed-point 8-bit number unsigned
T z() const
Alias to access the size of the third dimension.
void allocate() override
Allocate size specified by TensorInfo of CPU memory.
std::pair< unsigned int, unsigned int > stride() const
Get the stride.
size_t total_size() const
Collapses all dimensions to a single linear total size.
bool auto_init_if_empty(ITensorInfo &info, const TensorShape &shape, int num_channels, DataType data_type, QuantizationInfo quantization_info=QuantizationInfo())
Auto initialize the tensor info (shape, number of channels and data type) if the current assignment i...
virtual std::unique_ptr< T > clone() const =0
Provide a clone of the current object of class T.
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor's metadata.
TensorShape compute_deconvolution_upsampled_shape(const ITensorInfo &input, const ITensorInfo &weights, unsigned int sx, unsigned int sy, std::pair< unsigned int, unsigned int > &out_dims, uint32_t &padx, uint32_t &pady)
Calculate the upsampled output shape used for deconvolution.
Padding and stride information class.
virtual QuantizationInfo quantization_info() const =0
Get the quantization settings (scale and offset) of the tensor.
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
quantized, symmetric per channel fixed-point 8-bit number
static constexpr size_t DimY
Alias for dimension 1 also known as Y dimension.
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
NEDeconvolutionLayer(std::shared_ptr< IMemoryManager > memory_manager=nullptr)
Constructor.
Memory group resources scope handling class.
void prepare() override
Prepare the function for executing.
static constexpr size_t DimZ
Alias for dimension 2 also known as Z dimension.
size_t get_data_layout_dimension_index(const DataLayout &data_layout, const DataLayoutDimension &data_layout_dimension)
Get the index of the given dimension.
Class for specifying the size of an image or rectangle.
void configure(const ITensor *input, ITensor *output, const PadStrideInfo &info)
Configure the upsample CPP kernel.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...)
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
uint8_t * buffer() const override
Interface to be implemented by the child class to return a pointer to CPU memory. ...
#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)
If the condition is true, an error is returned.
#define ARM_COMPUTE_LOG_PARAMS(...)
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Store the tensor's metadata.
void run() override final
Run the kernels contained in the function.
T y() const
Alias to access the size of the second dimension.
quantized, asymmetric fixed-point 8-bit number signed
DataLayout
[DataLayout enum definition]
static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info=WeightsInfo(), const Size2D &dilation=Size2D(1U, 1U), const ActivationLayerInfo &act_info=ActivationLayerInfo(), bool enable_fast_math=false, unsigned int num_groups=1)
Static function to check if given info will lead to a valid configuration of NEConvolutionLayer.
void prepare() override
Prepare the function for executing.
virtual DataLayout data_layout() const =0
Get the data layout of the tensor.