24.02
|
Go to the documentation of this file.
41 PadStrideInfo compute_upsample_info(
const PadStrideInfo &
info, uint32_t deconv_pad_x, uint32_t deconv_pad_y)
43 const unsigned int pad_left =
info.pad_left();
44 const unsigned int pad_right =
info.pad_right();
45 const unsigned int pad_top =
info.pad_top();
46 const unsigned int pad_bottom =
info.pad_bottom();
47 const unsigned int stride_x =
info.stride().first;
48 const unsigned int stride_y =
info.stride().second;
51 unsigned int deconv_pad_left = pad_right > pad_left ? pad_right - pad_left : 0;
52 unsigned int deconv_pad_right = pad_left > pad_right ? pad_left - pad_right : 0;
53 deconv_pad_x -= deconv_pad_left + deconv_pad_right;
55 deconv_pad_left += deconv_pad_x / 2;
56 deconv_pad_right += deconv_pad_x / 2;
58 unsigned int deconv_pad_top = pad_bottom > pad_top ? pad_bottom - pad_top : 0;
59 unsigned int deconv_pad_bottom = pad_top > pad_bottom ? pad_top - pad_bottom : 0;
60 deconv_pad_y -= deconv_pad_top + deconv_pad_bottom;
62 deconv_pad_top += deconv_pad_y / 2;
63 deconv_pad_bottom += deconv_pad_y / 2;
65 return PadStrideInfo(stride_x, stride_y, deconv_pad_left, deconv_pad_right, deconv_pad_top, deconv_pad_bottom,
71 : _memory_group(std::move(memory_manager)),
78 _original_weights(nullptr),
91 bool enable_fast_math,
112 const unsigned int pad_left =
info.pad_left();
113 const unsigned int pad_top =
info.pad_top();
114 const unsigned int pad_right =
info.pad_right();
115 const unsigned int pad_bottom =
info.pad_bottom();
145 "Output's width is invalid.");
147 "Output's height is invalid.");
149 "Output's depth is invalid.");
152 uint32_t deconv_pad_x = 0;
153 uint32_t deconv_pad_y = 0;
154 const uint32_t stride_x =
info.stride().first;
155 const uint32_t stride_y =
info.stride().second;
157 static_cast<int32_t
>(stride_y), out_dims);
159 "Negative padding not supported");
162 out_dims, deconv_pad_x, deconv_pad_y);
163 TensorInfo scale_out_info(
input->clone()->set_is_resizable(
true).reset_padding().set_tensor_shape(scale_out_shape));
164 const PadStrideInfo upsample_info = compute_upsample_info(
info, deconv_pad_x, deconv_pad_y);
167 const bool do_upsampling = stride_x != 1 || stride_y != 1;
169 const unsigned int batches_idx =
200 bool enable_fast_math,
206 (
bias ==
nullptr) ?
nullptr :
bias->info(),
220 _original_weights = weights;
222 _is_prepared =
false;
224 const unsigned int stride_x =
info.stride().first;
225 const unsigned int stride_y =
info.stride().second;
229 input->info()->quantization_info());
234 _flip_weights.
configure(weights, &_weights_flipped, &_flip_axis);
237 uint32_t deconv_pad_x = 0;
238 uint32_t deconv_pad_y = 0;
240 *
input->info(), *weights->
info(), stride_x, stride_y, out_dims, deconv_pad_x, deconv_pad_y);
242 const PadStrideInfo upsample_info = compute_upsample_info(
info, deconv_pad_x, deconv_pad_y);
245 _do_upsampling = stride_x != 1 || stride_y != 1;
249 auto axis_data =
reinterpret_cast<uint32_t *
>(_flip_axis.
buffer());
250 axis_data[0] =
static_cast<uint32_t
>(
width_idx);
251 axis_data[1] =
static_cast<uint32_t
>(
height_idx);
256 _memory_group.
manage(&_scaled_output);
259 TensorInfo scale_out_info(scale_out_shape, 1,
input->info()->data_type(),
input->info()->quantization_info());
void configure(ITensor *input, const ITensor *weights, const ITensor *bias, ITensor *output, const PadStrideInfo &info, bool enable_fast_math=false, const WeightsInfo &weights_info=WeightsInfo())
Set the input, weights, biases and output tensors.
@ QSYMM8_PER_CHANNEL
quantized, symmetric per channel fixed-point 8-bit number
void configure(ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info=WeightsInfo(), const Size2D &dilation=Size2D(1U, 1U), const ActivationLayerInfo &act_info=ActivationLayerInfo(), bool enable_fast_math=false, unsigned int num_groups=1)
Set the input and output tensors.
NEDeconvolutionLayer(std::shared_ptr< IMemoryManager > memory_manager=nullptr)
Constructor.
virtual DataLayout data_layout() const =0
Get the data layout of the tensor.
Convolution Layer Weights Information class.
void manage(IMemoryManageable *obj) override
Sets a object to be managed by the given memory group.
std::pair< int32_t, int32_t > compute_deconvolution_padding(const ITensorInfo &input, const ITensorInfo &weights, int32_t sx, int32_t sy, std::pair< uint32_t, uint32_t > out_dims)
Calculate padding required for deconvolution.
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
void init(const TensorAllocator &allocator, const Coordinates &coords, TensorInfo &sub_info)
Shares the same backing memory with another tensor allocator, while the tensor info might be differen...
DataLayout
[DataLayout enum definition]
unsigned int pad_right() const
Get the right padding.
std::pair< unsigned int, unsigned int > deconvolution_output_dimensions(unsigned int in_width, unsigned int in_height, unsigned int kernel_width, unsigned int kernel_height, const PadStrideInfo &pad_stride_info)
Returns expected width and height of the deconvolution's output tensor.
@ QASYMM8
quantized, asymmetric fixed-point 8-bit number unsigned
static constexpr size_t DimX
Alias for dimension 0 also known as X dimension.
void prepare() override
Prepare the function for executing.
Class for specifying the size of an image or rectangle.
Interface for CPU tensor.
size_t dimension(size_t index) const override
Return the size of the requested dimension.
void configure(const ITensor *input, ITensor *output, const PadStrideInfo &info)
Configure the upsample CPP kernel.
void configure(const ITensor *input, ITensor *output, const ITensor *axis, const bool use_inverted_axis=false)
Initialize the function.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...)
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
constexpr auto data_layout
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
T z() const
Alias to access the size of the third dimension.
virtual size_t dimension(size_t index) const =0
Return the size of the requested dimension.
Activation Layer Information class.
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor's metadata.
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
TensorAllocator * allocator()
Return a pointer to the tensor's allocator.
#define ARM_COMPUTE_ERROR_THROW_ON(status)
@ U32
unsigned 32-bit number
TensorShape compute_deconvolution_output_shape(const std::pair< unsigned int, unsigned int > &out_dims, const ITensorInfo &input, const ITensorInfo &weights)
Calculate the output shape of the deconvolution layer.
static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *bias, const ITensorInfo *output, const PadStrideInfo &info, bool enable_fast_math=false, const WeightsInfo &weights_info=WeightsInfo())
Static function to check if given info will lead to a valid configuration of NEDeconvolutionLayer.
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
bool auto_init_if_empty(ITensorInfo &info, const TensorShape &shape, int num_channels, DataType data_type, QuantizationInfo quantization_info=QuantizationInfo())
Auto initialize the tensor info (shape, number of channels and data type) if the current assignment i...
size_t total_size() const
Collapses all dimensions to a single linear total size.
void mark_as_unused() const
Marks a tensor as unused.
ITensorInfo & set_data_layout(const DataLayout &data_layout) override
Set the data layout of the tensor.
uint8_t * buffer() const override
Interface to be implemented by the child class to return a pointer to CPU memory.
@ QASYMM8_SIGNED
quantized, asymmetric fixed-point 8-bit number signed
virtual DataType data_type() const =0
Data type used for each element of the tensor.
bool is_data_type_quantized_per_channel(DataType dt)
Check if a given data type is of per channel type.
T x() const
Alias to access the size of the first dimension.
void prepare() override
Prepare the function for executing.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(...)
static constexpr size_t DimY
Alias for dimension 1 also known as Y dimension.
unsigned int pad_left() const
Get the left padding.
virtual std::unique_ptr< T > clone() const =0
Provide a clone of the current object of class T.
size_t get_data_layout_dimension_index(const DataLayout &data_layout, const DataLayoutDimension &data_layout_dimension)
Get the index of the given dimension.
Store the tensor's metadata.
unsigned int pad_bottom() const
Get the bottom padding.
bool is_used() const
Flags if the tensor is used or not.
void allocate() override
Allocate size specified by TensorInfo of CPU memory.
#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)
If the condition is true, an error is returned.
Memory group resources scope handling class.
Copyright (c) 2017-2024 Arm Limited.
@ F16
16-bit floating-point number
void run() override
Run the kernels contained in the function.
static constexpr size_t DimZ
Alias for dimension 2 also known as Z dimension.
void run() override final
Run the kernels contained in the function.
@ S32
signed 32-bit number
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
#define ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(...)
TensorShape compute_deconvolution_upsampled_shape(const ITensorInfo &input, const ITensorInfo &weights, unsigned int sx, unsigned int sy, std::pair< unsigned int, unsigned int > &out_dims, uint32_t &padx, uint32_t &pady)
Calculate the upsampled output shape used for deconvolution.
bool is_data_type_quantized(DataType dt)
Check if a given data type is of quantized type.
Store the tensor's metadata.
@ F32
32-bit floating-point number
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
void run() override
Run the kernels contained in the function.
T y() const
Alias to access the size of the second dimension.
#define ARM_COMPUTE_LOG_PARAMS(...)
void run() override final
Run the kernels contained in the function.
unsigned int pad_top() const
Get the top padding.
static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info=WeightsInfo(), const Size2D &dilation=Size2D(1U, 1U), const ActivationLayerInfo &act_info=ActivationLayerInfo(), bool enable_fast_math=false, unsigned int num_groups=1)
Static function to check if given info will lead to a valid configuration of NEConvolutionLayer.