mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-04 20:19:47 +08:00

On shutdown when quotas are enabled, the shutdown can deadlock trying to unpin the dquot buffer buf_log_item like so: [ 3319.483590] task:kworker/20:0H state:D stack:14360 pid:1962230 tgid:1962230 ppid:2 task_flags:0x4208060 flags:0x00004000 [ 3319.493966] Workqueue: xfs-log/dm-6 xlog_ioend_work [ 3319.498458] Call Trace: [ 3319.500800] <TASK> [ 3319.502809] __schedule+0x699/0xb70 [ 3319.512672] schedule+0x64/0xd0 [ 3319.515573] schedule_timeout+0x30/0xf0 [ 3319.528125] __down_common+0xc3/0x200 [ 3319.531488] __down+0x1d/0x30 [ 3319.534186] down+0x48/0x50 [ 3319.540501] xfs_buf_lock+0x3d/0xe0 [ 3319.543609] xfs_buf_item_unpin+0x85/0x1b0 [ 3319.547248] xlog_cil_committed+0x289/0x570 [ 3319.571411] xlog_cil_process_committed+0x6d/0x90 [ 3319.575590] xlog_state_shutdown_callbacks+0x52/0x110 [ 3319.580017] xlog_force_shutdown+0x169/0x1a0 [ 3319.583780] xlog_ioend_work+0x7c/0xb0 [ 3319.587049] process_scheduled_works+0x1d6/0x400 [ 3319.591127] worker_thread+0x202/0x2e0 [ 3319.594452] kthread+0x20c/0x240 The CIL push has seen the deadlock, so it has aborted the push and is running CIL checkpoint completion to abort all the items in the checkpoint. This calls ->iop_unpin(remove = true) to clean up the log items in the checkpoint. When a buffer log item is unpined like this, it needs to lock the buffer to run io completion to correctly fail the buffer and run all the required completions to fail attached log items as well. In this case, the attempt to lock the buffer on unpin is hanging because the buffer is already locked. I suspected a leaked XFS_BLI_HOLD state because of XFS_BLI_STALE handling changes I was testing, so I went looking for pin events on HOLD buffers and unpin events on locked buffer. That isolated this one buffer with these two events: xfs_buf_item_pin: dev 251:6 daddr 0xa910 bbcount 0x2 hold 2 pincount 0 lock 0 flags DONE|KMEM recur 0 refcount 1 bliflags HOLD|DIRTY|LOGGED liflags DIRTY .... xfs_buf_item_unpin: dev 251:6 daddr 0xa910 bbcount 0x2 hold 4 pincount 1 lock 0 flags DONE|KMEM recur 0 refcount 1 bliflags DIRTY liflags ABORTED Firstly, bbcount = 0x2, which means it is not a single sector structure. That rules out every xfs_trans_bhold() case except one: dquot buffers. Then hung task dumping gave this trace: [ 3197.312078] task:fsync-tester state:D stack:12080 pid:2051125 tgid:2051125 ppid:1643233 task_flags:0x400000 flags:0x00004002 [ 3197.323007] Call Trace: [ 3197.325581] <TASK> [ 3197.327727] __schedule+0x699/0xb70 [ 3197.334582] schedule+0x64/0xd0 [ 3197.337672] schedule_timeout+0x30/0xf0 [ 3197.350139] wait_for_completion+0xbd/0x180 [ 3197.354235] __flush_workqueue+0xef/0x4e0 [ 3197.362229] xlog_cil_force_seq+0xa0/0x300 [ 3197.374447] xfs_log_force+0x77/0x230 [ 3197.378015] xfs_qm_dqunpin_wait+0x49/0xf0 [ 3197.382010] xfs_qm_dqflush+0x55/0x460 [ 3197.385663] xfs_qm_dquot_isolate+0x29e/0x4d0 [ 3197.389977] __list_lru_walk_one+0x141/0x220 [ 3197.398867] list_lru_walk_one+0x10/0x20 [ 3197.402713] xfs_qm_shrink_scan+0x6a/0x100 [ 3197.406699] do_shrink_slab+0x18a/0x350 [ 3197.410512] shrink_slab+0xf7/0x430 [ 3197.413967] drop_slab+0x97/0xf0 [ 3197.417121] drop_caches_sysctl_handler+0x59/0xc0 [ 3197.421654] proc_sys_call_handler+0x18b/0x280 [ 3197.426050] proc_sys_write+0x13/0x20 [ 3197.429750] vfs_write+0x2b8/0x3e0 [ 3197.438532] ksys_write+0x7e/0xf0 [ 3197.441742] __x64_sys_write+0x1b/0x30 [ 3197.445363] x64_sys_call+0x2c72/0x2f60 [ 3197.449044] do_syscall_64+0x6c/0x140 [ 3197.456341] entry_SYSCALL_64_after_hwframe+0x76/0x7e Yup, another test run by check-parallel is running drop_caches concurrently and the dquot shrinker for the hung filesystem is running. That's trying to flush a dirty dquot from reclaim context, and it waiting on a log force to complete. xfs_qm_dqflush is called with the dquot buffer held locked, and so we've called xfs_log_force() with that buffer locked. Now the log force is waiting for a workqueue flush to complete, and that workqueue flush is waiting of CIL checkpoint processing to finish. The CIL checkpoint processing is aborting all the log items it has, and that requires locking aborted buffers to cancel them. Now, normally this isn't a problem if we are issuing a log force to unpin an object, because the ->iop_unpin() method wakes pin waiters first. That results in the pin waiter finishing off whatever it was doing, dropping the lock and then xfs_buf_item_unpin() can lock the buffer and fail it. However, xfs_qm_dqflush() is waiting on the -dquot- unpin event, not the dquot buffer unpin event, and so it never gets woken and so does not drop the buffer lock. Inodes do not have this problem, as they can only be written from one spot (->iop_push) whilst dquots can be written from multiple places (memory reclaim, ->iop_push, xfs_dq_dqpurge, and quotacheck). The reason that the dquot buffer has an attached buffer log item is that it has been recently allocated. Initialisation of the dquot buffer logs the buffer directly, thereby pinning it in memory. We then modify the dquot in a separate operation, and have memory reclaim racing with a shutdown and we trigger this deadlock. check-parallel reproduces this reliably on 1kB FSB filesystems with quota enabled because it does all of these things concurrently without having to explicitly write tests to exercise these corner case conditions. xfs_qm_dquot_logitem_push() doesn't have this deadlock because it checks if the dquot is pinned before locking the dquot buffer and skipping it if it is pinned. This means the xfs_qm_dqunpin_wait() log force in xfs_qm_dqflush() never triggers and we unlock the buffer safely allowing a concurrent shutdown to fail the buffer appropriately. xfs_qm_dqpurge() could have this problem as it is called from quotacheck and we might have allocated dquot buffers when recording the quota updates. This can be fixed by calling xfs_qm_dqunpin_wait() before we lock the dquot buffer. Because we hold the dquot locked, nothing will be able to add to the pin count between the unpin_wait and the dqflush callout, so this now makes xfs_qm_dqpurge() safe against this race. xfs_qm_dquot_isolate() can also be fixed this same way but, quite frankly, we shouldn't be doing IO in memory reclaim context. If the dquot is pinned or dirty, simply rotate it and let memory reclaim come back to it later, same as we do for inodes. This then gets rid of the nasty issue in xfs_qm_flush_one() where quotacheck writeback races with memory reclaim flushing the dquots. We can lift xfs_qm_dqunpin_wait() up into this code, then get rid of the "can't get the dqflush lock" buffer write to cycle the dqlfush lock and enable it to be flushed again. checking if the dquot is pinned and returning -EAGAIN so that the dquot walk will revisit the dquot again later. Finally, with xfs_qm_dqunpin_wait() lifted into all the callers, we can remove it from the xfs_qm_dqflush() code. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Carlos Maiolino <cmaiolino@redhat.com> Signed-off-by: Carlos Maiolino <cem@kernel.org>
391 lines
12 KiB
C
391 lines
12 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Copyright (c) 2000-2005 Silicon Graphics, Inc.
|
|
* All Rights Reserved.
|
|
*/
|
|
#ifndef __XFS_BUF_H__
|
|
#define __XFS_BUF_H__
|
|
|
|
#include <linux/list.h>
|
|
#include <linux/types.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/dax.h>
|
|
#include <linux/uio.h>
|
|
#include <linux/list_lru.h>
|
|
|
|
extern struct kmem_cache *xfs_buf_cache;
|
|
|
|
/*
|
|
* Base types
|
|
*/
|
|
struct xfs_buf;
|
|
|
|
#define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL))
|
|
|
|
#define XBF_READ (1u << 0) /* buffer intended for reading from device */
|
|
#define XBF_WRITE (1u << 1) /* buffer intended for writing to device */
|
|
#define XBF_READ_AHEAD (1u << 2) /* asynchronous read-ahead */
|
|
#define XBF_ASYNC (1u << 4) /* initiator will not wait for completion */
|
|
#define XBF_DONE (1u << 5) /* all pages in the buffer uptodate */
|
|
#define XBF_STALE (1u << 6) /* buffer has been staled, do not find it */
|
|
#define XBF_WRITE_FAIL (1u << 7) /* async writes have failed on this buffer */
|
|
|
|
/* buffer type flags for write callbacks */
|
|
#define _XBF_LOGRECOVERY (1u << 18)/* log recovery buffer */
|
|
|
|
/* flags used only internally */
|
|
#define _XBF_KMEM (1u << 21)/* backed by heap memory */
|
|
#define _XBF_DELWRI_Q (1u << 22)/* buffer on a delwri queue */
|
|
|
|
/* flags used only as arguments to access routines */
|
|
/*
|
|
* Online fsck is scanning the buffer cache for live buffers. Do not warn
|
|
* about length mismatches during lookups and do not return stale buffers.
|
|
*/
|
|
#define XBF_LIVESCAN (1u << 28)
|
|
#define XBF_INCORE (1u << 29)/* lookup only, return if found in cache */
|
|
#define XBF_TRYLOCK (1u << 30)/* lock requested, but do not wait */
|
|
|
|
|
|
typedef unsigned int xfs_buf_flags_t;
|
|
|
|
#define XFS_BUF_FLAGS \
|
|
{ XBF_READ, "READ" }, \
|
|
{ XBF_WRITE, "WRITE" }, \
|
|
{ XBF_READ_AHEAD, "READ_AHEAD" }, \
|
|
{ XBF_ASYNC, "ASYNC" }, \
|
|
{ XBF_DONE, "DONE" }, \
|
|
{ XBF_STALE, "STALE" }, \
|
|
{ XBF_WRITE_FAIL, "WRITE_FAIL" }, \
|
|
{ _XBF_LOGRECOVERY, "LOG_RECOVERY" }, \
|
|
{ _XBF_KMEM, "KMEM" }, \
|
|
{ _XBF_DELWRI_Q, "DELWRI_Q" }, \
|
|
/* The following interface flags should never be set */ \
|
|
{ XBF_LIVESCAN, "LIVESCAN" }, \
|
|
{ XBF_INCORE, "INCORE" }, \
|
|
{ XBF_TRYLOCK, "TRYLOCK" }
|
|
|
|
/*
|
|
* Internal state flags.
|
|
*/
|
|
#define XFS_BSTATE_DISPOSE (1 << 0) /* buffer being discarded */
|
|
|
|
struct xfs_buf_cache {
|
|
struct rhashtable bc_hash;
|
|
};
|
|
|
|
int xfs_buf_cache_init(struct xfs_buf_cache *bch);
|
|
void xfs_buf_cache_destroy(struct xfs_buf_cache *bch);
|
|
|
|
/*
|
|
* The xfs_buftarg contains 2 notions of "sector size" -
|
|
*
|
|
* 1) The metadata sector size, which is the minimum unit and
|
|
* alignment of IO which will be performed by metadata operations.
|
|
* 2) The device logical sector size
|
|
*
|
|
* The first is specified at mkfs time, and is stored on-disk in the
|
|
* superblock's sb_sectsize.
|
|
*
|
|
* The latter is derived from the underlying device, and controls direct IO
|
|
* alignment constraints.
|
|
*/
|
|
struct xfs_buftarg {
|
|
dev_t bt_dev;
|
|
struct file *bt_bdev_file;
|
|
struct block_device *bt_bdev;
|
|
struct dax_device *bt_daxdev;
|
|
struct file *bt_file;
|
|
u64 bt_dax_part_off;
|
|
struct xfs_mount *bt_mount;
|
|
unsigned int bt_meta_sectorsize;
|
|
size_t bt_meta_sectormask;
|
|
size_t bt_logical_sectorsize;
|
|
size_t bt_logical_sectormask;
|
|
|
|
/* LRU control structures */
|
|
struct shrinker *bt_shrinker;
|
|
struct list_lru bt_lru;
|
|
|
|
struct percpu_counter bt_readahead_count;
|
|
struct ratelimit_state bt_ioerror_rl;
|
|
|
|
/* Atomic write unit values, bytes */
|
|
unsigned int bt_bdev_awu_min;
|
|
unsigned int bt_bdev_awu_max;
|
|
|
|
/* built-in cache, if we're not using the perag one */
|
|
struct xfs_buf_cache bt_cache[];
|
|
};
|
|
|
|
struct xfs_buf_map {
|
|
xfs_daddr_t bm_bn; /* block number for I/O */
|
|
int bm_len; /* size of I/O */
|
|
unsigned int bm_flags;
|
|
};
|
|
|
|
/*
|
|
* Online fsck is scanning the buffer cache for live buffers. Do not warn
|
|
* about length mismatches during lookups and do not return stale buffers.
|
|
*/
|
|
#define XBM_LIVESCAN (1U << 0)
|
|
|
|
#define DEFINE_SINGLE_BUF_MAP(map, blkno, numblk) \
|
|
struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) };
|
|
|
|
struct xfs_buf_ops {
|
|
char *name;
|
|
union {
|
|
__be32 magic[2]; /* v4 and v5 on disk magic values */
|
|
__be16 magic16[2]; /* v4 and v5 on disk magic values */
|
|
};
|
|
void (*verify_read)(struct xfs_buf *);
|
|
void (*verify_write)(struct xfs_buf *);
|
|
xfs_failaddr_t (*verify_struct)(struct xfs_buf *bp);
|
|
};
|
|
|
|
struct xfs_buf {
|
|
/*
|
|
* first cacheline holds all the fields needed for an uncontended cache
|
|
* hit to be fully processed. The semaphore straddles the cacheline
|
|
* boundary, but the counter and lock sits on the first cacheline,
|
|
* which is the only bit that is touched if we hit the semaphore
|
|
* fast-path on locking.
|
|
*/
|
|
struct rhash_head b_rhash_head; /* pag buffer hash node */
|
|
|
|
xfs_daddr_t b_rhash_key; /* buffer cache index */
|
|
int b_length; /* size of buffer in BBs */
|
|
unsigned int b_hold; /* reference count */
|
|
atomic_t b_lru_ref; /* lru reclaim ref count */
|
|
xfs_buf_flags_t b_flags; /* status flags */
|
|
struct semaphore b_sema; /* semaphore for lockables */
|
|
|
|
/*
|
|
* concurrent access to b_lru and b_lru_flags are protected by
|
|
* bt_lru_lock and not by b_sema
|
|
*/
|
|
struct list_head b_lru; /* lru list */
|
|
spinlock_t b_lock; /* internal state lock */
|
|
unsigned int b_state; /* internal state flags */
|
|
wait_queue_head_t b_waiters; /* unpin waiters */
|
|
struct list_head b_list;
|
|
struct xfs_perag *b_pag;
|
|
struct xfs_mount *b_mount;
|
|
struct xfs_buftarg *b_target; /* buffer target (device) */
|
|
void *b_addr; /* virtual address of buffer */
|
|
struct work_struct b_ioend_work;
|
|
struct completion b_iowait; /* queue for I/O waiters */
|
|
struct xfs_buf_log_item *b_log_item;
|
|
struct list_head b_li_list; /* Log items list head */
|
|
struct xfs_trans *b_transp;
|
|
struct xfs_buf_map *b_maps; /* compound buffer map */
|
|
struct xfs_buf_map __b_map; /* inline compound buffer map */
|
|
int b_map_count;
|
|
atomic_t b_pin_count; /* pin count */
|
|
int b_error; /* error code on I/O */
|
|
void (*b_iodone)(struct xfs_buf *bp);
|
|
|
|
/*
|
|
* async write failure retry count. Initialised to zero on the first
|
|
* failure, then when it exceeds the maximum configured without a
|
|
* success the write is considered to be failed permanently and the
|
|
* iodone handler will take appropriate action.
|
|
*
|
|
* For retry timeouts, we record the jiffy of the first failure. This
|
|
* means that we can change the retry timeout for buffers already under
|
|
* I/O and thus avoid getting stuck in a retry loop with a long timeout.
|
|
*
|
|
* last_error is used to ensure that we are getting repeated errors, not
|
|
* different errors. e.g. a block device might change ENOSPC to EIO when
|
|
* a failure timeout occurs, so we want to re-initialise the error
|
|
* retry behaviour appropriately when that happens.
|
|
*/
|
|
int b_retries;
|
|
unsigned long b_first_retry_time; /* in jiffies */
|
|
int b_last_error;
|
|
|
|
const struct xfs_buf_ops *b_ops;
|
|
struct rcu_head b_rcu;
|
|
};
|
|
|
|
/* Finding and Reading Buffers */
|
|
int xfs_buf_get_map(struct xfs_buftarg *target, struct xfs_buf_map *map,
|
|
int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp);
|
|
int xfs_buf_read_map(struct xfs_buftarg *target, struct xfs_buf_map *map,
|
|
int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp,
|
|
const struct xfs_buf_ops *ops, xfs_failaddr_t fa);
|
|
void xfs_buf_readahead_map(struct xfs_buftarg *target,
|
|
struct xfs_buf_map *map, int nmaps,
|
|
const struct xfs_buf_ops *ops);
|
|
|
|
static inline int
|
|
xfs_buf_incore(
|
|
struct xfs_buftarg *target,
|
|
xfs_daddr_t blkno,
|
|
size_t numblks,
|
|
xfs_buf_flags_t flags,
|
|
struct xfs_buf **bpp)
|
|
{
|
|
DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
|
|
|
|
return xfs_buf_get_map(target, &map, 1, XBF_INCORE | flags, bpp);
|
|
}
|
|
|
|
static inline int
|
|
xfs_buf_get(
|
|
struct xfs_buftarg *target,
|
|
xfs_daddr_t blkno,
|
|
size_t numblks,
|
|
struct xfs_buf **bpp)
|
|
{
|
|
DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
|
|
|
|
return xfs_buf_get_map(target, &map, 1, 0, bpp);
|
|
}
|
|
|
|
static inline int
|
|
xfs_buf_read(
|
|
struct xfs_buftarg *target,
|
|
xfs_daddr_t blkno,
|
|
size_t numblks,
|
|
xfs_buf_flags_t flags,
|
|
struct xfs_buf **bpp,
|
|
const struct xfs_buf_ops *ops)
|
|
{
|
|
DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
|
|
|
|
return xfs_buf_read_map(target, &map, 1, flags, bpp, ops,
|
|
__builtin_return_address(0));
|
|
}
|
|
|
|
static inline void
|
|
xfs_buf_readahead(
|
|
struct xfs_buftarg *target,
|
|
xfs_daddr_t blkno,
|
|
size_t numblks,
|
|
const struct xfs_buf_ops *ops)
|
|
{
|
|
DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
|
|
return xfs_buf_readahead_map(target, &map, 1, ops);
|
|
}
|
|
|
|
int xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks,
|
|
struct xfs_buf **bpp);
|
|
int xfs_buf_read_uncached(struct xfs_buftarg *target, xfs_daddr_t daddr,
|
|
size_t numblks, struct xfs_buf **bpp,
|
|
const struct xfs_buf_ops *ops);
|
|
int _xfs_buf_read(struct xfs_buf *bp);
|
|
void xfs_buf_hold(struct xfs_buf *bp);
|
|
|
|
/* Releasing Buffers */
|
|
extern void xfs_buf_rele(struct xfs_buf *);
|
|
|
|
/* Locking and Unlocking Buffers */
|
|
extern int xfs_buf_trylock(struct xfs_buf *);
|
|
extern void xfs_buf_lock(struct xfs_buf *);
|
|
extern void xfs_buf_unlock(struct xfs_buf *);
|
|
#define xfs_buf_islocked(bp) \
|
|
((bp)->b_sema.count <= 0)
|
|
|
|
static inline void xfs_buf_relse(struct xfs_buf *bp)
|
|
{
|
|
xfs_buf_unlock(bp);
|
|
xfs_buf_rele(bp);
|
|
}
|
|
|
|
/* Buffer Read and Write Routines */
|
|
extern int xfs_bwrite(struct xfs_buf *bp);
|
|
|
|
extern void __xfs_buf_ioerror(struct xfs_buf *bp, int error,
|
|
xfs_failaddr_t failaddr);
|
|
#define xfs_buf_ioerror(bp, err) __xfs_buf_ioerror((bp), (err), __this_address)
|
|
extern void xfs_buf_ioerror_alert(struct xfs_buf *bp, xfs_failaddr_t fa);
|
|
void xfs_buf_ioend_fail(struct xfs_buf *);
|
|
void __xfs_buf_mark_corrupt(struct xfs_buf *bp, xfs_failaddr_t fa);
|
|
#define xfs_buf_mark_corrupt(bp) __xfs_buf_mark_corrupt((bp), __this_address)
|
|
|
|
/* Buffer Utility Routines */
|
|
static inline void *xfs_buf_offset(struct xfs_buf *bp, size_t offset)
|
|
{
|
|
return bp->b_addr + offset;
|
|
}
|
|
|
|
static inline void xfs_buf_zero(struct xfs_buf *bp, size_t boff, size_t bsize)
|
|
{
|
|
memset(bp->b_addr + boff, 0, bsize);
|
|
}
|
|
|
|
extern void xfs_buf_stale(struct xfs_buf *bp);
|
|
|
|
/* Delayed Write Buffer Routines */
|
|
extern void xfs_buf_delwri_cancel(struct list_head *);
|
|
extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *);
|
|
void xfs_buf_delwri_queue_here(struct xfs_buf *bp, struct list_head *bl);
|
|
extern int xfs_buf_delwri_submit(struct list_head *);
|
|
extern int xfs_buf_delwri_submit_nowait(struct list_head *);
|
|
|
|
static inline xfs_daddr_t xfs_buf_daddr(struct xfs_buf *bp)
|
|
{
|
|
return bp->b_maps[0].bm_bn;
|
|
}
|
|
|
|
void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref);
|
|
|
|
/*
|
|
* If the buffer is already on the LRU, do nothing. Otherwise set the buffer
|
|
* up with a reference count of 0 so it will be tossed from the cache when
|
|
* released.
|
|
*/
|
|
static inline void xfs_buf_oneshot(struct xfs_buf *bp)
|
|
{
|
|
if (!list_empty(&bp->b_lru) || atomic_read(&bp->b_lru_ref) > 1)
|
|
return;
|
|
atomic_set(&bp->b_lru_ref, 0);
|
|
}
|
|
|
|
static inline int xfs_buf_ispinned(struct xfs_buf *bp)
|
|
{
|
|
return atomic_read(&bp->b_pin_count);
|
|
}
|
|
|
|
static inline int
|
|
xfs_buf_verify_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
|
|
{
|
|
return xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
|
|
cksum_offset);
|
|
}
|
|
|
|
static inline void
|
|
xfs_buf_update_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
|
|
{
|
|
xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length),
|
|
cksum_offset);
|
|
}
|
|
|
|
/*
|
|
* Handling of buftargs.
|
|
*/
|
|
struct xfs_buftarg *xfs_alloc_buftarg(struct xfs_mount *mp,
|
|
struct file *bdev_file);
|
|
extern void xfs_free_buftarg(struct xfs_buftarg *);
|
|
extern void xfs_buftarg_wait(struct xfs_buftarg *);
|
|
extern void xfs_buftarg_drain(struct xfs_buftarg *);
|
|
int xfs_configure_buftarg(struct xfs_buftarg *btp, unsigned int sectorsize);
|
|
|
|
#define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev)
|
|
#define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev)
|
|
|
|
int xfs_buf_reverify(struct xfs_buf *bp, const struct xfs_buf_ops *ops);
|
|
bool xfs_verify_magic(struct xfs_buf *bp, __be32 dmagic);
|
|
bool xfs_verify_magic16(struct xfs_buf *bp, __be16 dmagic);
|
|
|
|
/* for xfs_buf_mem.c only: */
|
|
int xfs_init_buftarg(struct xfs_buftarg *btp, size_t logical_sectorsize,
|
|
const char *descr);
|
|
void xfs_destroy_buftarg(struct xfs_buftarg *btp);
|
|
|
|
#endif /* __XFS_BUF_H__ */
|