mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-04 20:19:47 +08:00

RT groups on a zoned file system need to be completely empty before their space can be reused. This means that partially empty groups need to be emptied entirely to free up space if no entirely free groups are available. Add a garbage collection thread that moves all data out of the least used zone when not enough free zones are available, and which resets all zones that have been emptied. To find empty zone a simple set of 10 buckets based on the amount of space used in the zone is used. To empty zones, the rmap is walked to find the owners and the data is read and then written to the new place. To automatically defragment files the rmap records are sorted by inode and logical offset. This means defragmentation of parallel writes into a single zone happens automatically when performing garbage collection. Because holding the iolock over the entire GC cycle would inject very noticeable latency for other accesses to the inodes, the iolock is not taken while performing I/O. Instead the I/O completion handler checks that the mapping hasn't changed over the one recorded at the start of the GC cycle and doesn't update the mapping if it change. Co-developed-by: Hans Holmberg <hans.holmberg@wdc.com> Signed-off-by: Hans Holmberg <hans.holmberg@wdc.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: "Darrick J. Wong" <djwong@kernel.org>
70 lines
2.0 KiB
C
70 lines
2.0 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _XFS_ZONE_ALLOC_H
|
|
#define _XFS_ZONE_ALLOC_H
|
|
|
|
struct iomap_ioend;
|
|
struct xfs_open_zone;
|
|
|
|
struct xfs_zone_alloc_ctx {
|
|
struct xfs_open_zone *open_zone;
|
|
xfs_filblks_t reserved_blocks;
|
|
};
|
|
|
|
/*
|
|
* Grab any available space, even if it is less than what the caller asked for.
|
|
*/
|
|
#define XFS_ZR_GREEDY (1U << 0)
|
|
/*
|
|
* Only grab instantly available space, don't wait or GC.
|
|
*/
|
|
#define XFS_ZR_NOWAIT (1U << 1)
|
|
/*
|
|
* Dip into the reserved pool.
|
|
*/
|
|
#define XFS_ZR_RESERVED (1U << 2)
|
|
|
|
int xfs_zoned_space_reserve(struct xfs_inode *ip, xfs_filblks_t count_fsb,
|
|
unsigned int flags, struct xfs_zone_alloc_ctx *ac);
|
|
void xfs_zoned_space_unreserve(struct xfs_inode *ip,
|
|
struct xfs_zone_alloc_ctx *ac);
|
|
void xfs_zoned_add_available(struct xfs_mount *mp, xfs_filblks_t count_fsb);
|
|
|
|
void xfs_zone_alloc_and_submit(struct iomap_ioend *ioend,
|
|
struct xfs_open_zone **oz);
|
|
int xfs_zone_free_blocks(struct xfs_trans *tp, struct xfs_rtgroup *rtg,
|
|
xfs_fsblock_t fsbno, xfs_filblks_t len);
|
|
int xfs_zoned_end_io(struct xfs_inode *ip, xfs_off_t offset, xfs_off_t count,
|
|
xfs_daddr_t daddr, struct xfs_open_zone *oz,
|
|
xfs_fsblock_t old_startblock);
|
|
void xfs_open_zone_put(struct xfs_open_zone *oz);
|
|
|
|
void xfs_zoned_wake_all(struct xfs_mount *mp);
|
|
bool xfs_zone_rgbno_is_valid(struct xfs_rtgroup *rtg, xfs_rgnumber_t rgbno);
|
|
void xfs_mark_rtg_boundary(struct iomap_ioend *ioend);
|
|
|
|
uint64_t xfs_zoned_default_resblks(struct xfs_mount *mp,
|
|
enum xfs_free_counter ctr);
|
|
|
|
#ifdef CONFIG_XFS_RT
|
|
int xfs_mount_zones(struct xfs_mount *mp);
|
|
void xfs_unmount_zones(struct xfs_mount *mp);
|
|
void xfs_zone_gc_start(struct xfs_mount *mp);
|
|
void xfs_zone_gc_stop(struct xfs_mount *mp);
|
|
#else
|
|
static inline int xfs_mount_zones(struct xfs_mount *mp)
|
|
{
|
|
return -EIO;
|
|
}
|
|
static inline void xfs_unmount_zones(struct xfs_mount *mp)
|
|
{
|
|
}
|
|
static inline void xfs_zone_gc_start(struct xfs_mount *mp)
|
|
{
|
|
}
|
|
static inline void xfs_zone_gc_stop(struct xfs_mount *mp)
|
|
{
|
|
}
|
|
#endif /* CONFIG_XFS_RT */
|
|
|
|
#endif /* _XFS_ZONE_ALLOC_H */
|