sst-linux/fs/btrfs/block-rsv.h
Filipe Manana 563df8b411 btrfs: calculate the right space for delayed refs when updating global reserve
commit f8f210dc84709804c9f952297f2bfafa6ea6b4bd upstream.

When updating the global block reserve, we account for the 6 items needed
by an unlink operation and the 6 delayed references for each one of those
items. However the calculation for the delayed references is not correct
in case we have the free space tree enabled, as in that case we need to
touch the free space tree as well and therefore need twice the number of
bytes. So use the btrfs_calc_delayed_ref_bytes() helper to calculate the
number of bytes need for the delayed references at
btrfs_update_global_block_rsv().

Reviewed-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: Filipe Manana <fdmanana@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
[Diogo: this patch has been cherry-picked from the original commit;
conflicts included lack of a define (picked from commit 5630e2bcfe223)
and lack of btrfs_calc_delayed_ref_bytes (picked from commit 0e55a54502b97)
- changed const struct -> struct for compatibility.]
Signed-off-by: Diogo Jahchan Koike <djahchankoike@gmail.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2024-09-30 16:23:55 +02:00

149 lines
4.4 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef BTRFS_BLOCK_RSV_H
#define BTRFS_BLOCK_RSV_H
struct btrfs_trans_handle;
enum btrfs_reserve_flush_enum;
/*
* Types of block reserves
*/
enum btrfs_rsv_type {
BTRFS_BLOCK_RSV_GLOBAL,
BTRFS_BLOCK_RSV_DELALLOC,
BTRFS_BLOCK_RSV_TRANS,
BTRFS_BLOCK_RSV_CHUNK,
BTRFS_BLOCK_RSV_DELOPS,
BTRFS_BLOCK_RSV_DELREFS,
BTRFS_BLOCK_RSV_EMPTY,
BTRFS_BLOCK_RSV_TEMP,
};
struct btrfs_block_rsv {
u64 size;
u64 reserved;
struct btrfs_space_info *space_info;
spinlock_t lock;
bool full;
bool failfast;
/* Block reserve type, one of BTRFS_BLOCK_RSV_* */
enum btrfs_rsv_type type:8;
/*
* Qgroup equivalent for @size @reserved
*
* Unlike normal @size/@reserved for inode rsv, qgroup doesn't care
* about things like csum size nor how many tree blocks it will need to
* reserve.
*
* Qgroup cares more about net change of the extent usage.
*
* So for one newly inserted file extent, in worst case it will cause
* leaf split and level increase, nodesize for each file extent is
* already too much.
*
* In short, qgroup_size/reserved is the upper limit of possible needed
* qgroup metadata reservation.
*/
u64 qgroup_rsv_size;
u64 qgroup_rsv_reserved;
};
/*
* Number of metadata items necessary for an unlink operation:
*
* 1 for the possible orphan item
* 1 for the dir item
* 1 for the dir index
* 1 for the inode ref
* 1 for the inode
* 1 for the parent inode
*/
#define BTRFS_UNLINK_METADATA_UNITS 6
void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, enum btrfs_rsv_type type);
void btrfs_init_root_block_rsv(struct btrfs_root *root);
struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info,
enum btrfs_rsv_type type);
void btrfs_init_metadata_block_rsv(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv,
enum btrfs_rsv_type type);
void btrfs_free_block_rsv(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv);
int btrfs_block_rsv_add(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *block_rsv, u64 num_bytes,
enum btrfs_reserve_flush_enum flush);
int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_factor);
int btrfs_block_rsv_refill(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *block_rsv, u64 min_reserved,
enum btrfs_reserve_flush_enum flush);
int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
struct btrfs_block_rsv *dst_rsv, u64 num_bytes,
bool update_size);
int btrfs_block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv, u64 num_bytes);
int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *dest, u64 num_bytes,
int min_factor);
void btrfs_block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
u64 num_bytes, bool update_size);
u64 btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *block_rsv,
u64 num_bytes, u64 *qgroup_to_release);
void btrfs_update_global_block_rsv(struct btrfs_fs_info *fs_info);
void btrfs_init_global_block_rsv(struct btrfs_fs_info *fs_info);
void btrfs_release_global_block_rsv(struct btrfs_fs_info *fs_info);
struct btrfs_block_rsv *btrfs_use_block_rsv(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u32 blocksize);
static inline void btrfs_unuse_block_rsv(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *block_rsv,
u32 blocksize)
{
btrfs_block_rsv_add_bytes(block_rsv, blocksize, false);
btrfs_block_rsv_release(fs_info, block_rsv, 0, NULL);
}
/*
* Fast path to check if the reserve is full, may be carefully used outside of
* locks.
*/
static inline bool btrfs_block_rsv_full(const struct btrfs_block_rsv *rsv)
{
return data_race(rsv->full);
}
/*
* Get the reserved mount of a block reserve in a context where getting a stale
* value is acceptable, instead of accessing it directly and trigger data race
* warning from KCSAN.
*/
static inline u64 btrfs_block_rsv_reserved(struct btrfs_block_rsv *rsv)
{
u64 ret;
spin_lock(&rsv->lock);
ret = rsv->reserved;
spin_unlock(&rsv->lock);
return ret;
}
/*
* Get the size of a block reserve in a context where getting a stale value is
* acceptable, instead of accessing it directly and trigger data race warning
* from KCSAN.
*/
static inline u64 btrfs_block_rsv_size(struct btrfs_block_rsv *rsv)
{
u64 ret;
spin_lock(&rsv->lock);
ret = rsv->size;
spin_unlock(&rsv->lock);
return ret;
}
#endif /* BTRFS_BLOCK_RSV_H */