mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-04 20:19:47 +08:00
io_uring/wq: avoid indirect do_work/free_work calls
struct io_wq stores do_work and free_work function pointers which are called on each work item. But these function pointers are always set to io_wq_submit_work and io_wq_free_work, respectively. So remove these function pointers and just call the functions directly. Signed-off-by: Caleb Sander Mateos <csander@purestorage.com> Link: https://lore.kernel.org/r/20250329161527.3281314-1-csander@purestorage.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
9d7a0577c9
commit
9fe99eed91
@ -114,9 +114,6 @@ enum {
|
||||
struct io_wq {
|
||||
unsigned long state;
|
||||
|
||||
free_work_fn *free_work;
|
||||
io_wq_work_fn *do_work;
|
||||
|
||||
struct io_wq_hash *hash;
|
||||
|
||||
atomic_t worker_refs;
|
||||
@ -612,10 +609,10 @@ static void io_worker_handle_work(struct io_wq_acct *acct,
|
||||
if (do_kill &&
|
||||
(work_flags & IO_WQ_WORK_UNBOUND))
|
||||
atomic_or(IO_WQ_WORK_CANCEL, &work->flags);
|
||||
wq->do_work(work);
|
||||
io_wq_submit_work(work);
|
||||
io_assign_current_work(worker, NULL);
|
||||
|
||||
linked = wq->free_work(work);
|
||||
linked = io_wq_free_work(work);
|
||||
work = next_hashed;
|
||||
if (!work && linked && !io_wq_is_hashed(linked)) {
|
||||
work = linked;
|
||||
@ -934,8 +931,8 @@ static void io_run_cancel(struct io_wq_work *work, struct io_wq *wq)
|
||||
{
|
||||
do {
|
||||
atomic_or(IO_WQ_WORK_CANCEL, &work->flags);
|
||||
wq->do_work(work);
|
||||
work = wq->free_work(work);
|
||||
io_wq_submit_work(work);
|
||||
work = io_wq_free_work(work);
|
||||
} while (work);
|
||||
}
|
||||
|
||||
@ -1195,8 +1192,6 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
|
||||
int ret, i;
|
||||
struct io_wq *wq;
|
||||
|
||||
if (WARN_ON_ONCE(!data->free_work || !data->do_work))
|
||||
return ERR_PTR(-EINVAL);
|
||||
if (WARN_ON_ONCE(!bounded))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
@ -1206,8 +1201,6 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
|
||||
|
||||
refcount_inc(&data->hash->refs);
|
||||
wq->hash = data->hash;
|
||||
wq->free_work = data->free_work;
|
||||
wq->do_work = data->do_work;
|
||||
|
||||
ret = -ENOMEM;
|
||||
|
||||
|
@ -21,9 +21,6 @@ enum io_wq_cancel {
|
||||
IO_WQ_CANCEL_NOTFOUND, /* work not found */
|
||||
};
|
||||
|
||||
typedef struct io_wq_work *(free_work_fn)(struct io_wq_work *);
|
||||
typedef void (io_wq_work_fn)(struct io_wq_work *);
|
||||
|
||||
struct io_wq_hash {
|
||||
refcount_t refs;
|
||||
unsigned long map;
|
||||
@ -39,8 +36,6 @@ static inline void io_wq_put_hash(struct io_wq_hash *hash)
|
||||
struct io_wq_data {
|
||||
struct io_wq_hash *hash;
|
||||
struct task_struct *task;
|
||||
io_wq_work_fn *do_work;
|
||||
free_work_fn *free_work;
|
||||
};
|
||||
|
||||
struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data);
|
||||
|
@ -1812,7 +1812,7 @@ void io_wq_submit_work(struct io_wq_work *work)
|
||||
bool needs_poll = false;
|
||||
int ret = 0, err = -ECANCELED;
|
||||
|
||||
/* one will be dropped by ->io_wq_free_work() after returning to io-wq */
|
||||
/* one will be dropped by io_wq_free_work() after returning to io-wq */
|
||||
if (!(req->flags & REQ_F_REFCOUNT))
|
||||
__io_req_set_refcount(req, 2);
|
||||
else
|
||||
|
@ -35,8 +35,6 @@ static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
|
||||
|
||||
data.hash = hash;
|
||||
data.task = task;
|
||||
data.free_work = io_wq_free_work;
|
||||
data.do_work = io_wq_submit_work;
|
||||
|
||||
/* Do QD, or 4 * CPUS, whatever is smallest */
|
||||
concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
|
||||
|
Loading…
Reference in New Issue
Block a user