2
0
mirror of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2025-09-04 20:19:47 +08:00

crypto: cryptd - Use nested-BH locking for cryptd_cpu_queue

cryptd_queue::cryptd_cpu_queue is a per-CPU variable and relies on
disabled BH for its locking. Without per-CPU locking in
local_bh_disable() on PREEMPT_RT this data structure requires explicit
locking.

Add a local_lock_t to the struct cryptd_cpu_queue and use
local_lock_nested_bh() for locking. This change adds only lockdep
coverage and does not alter the functional behaviour for !PREEMPT_RT.

Cc: "David S. Miller" <davem@davemloft.net>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: linux-crypto@vger.kernel.org
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Sebastian Andrzej Siewior 2025-07-01 08:09:36 +02:00 committed by Herbert Xu
parent d956692c7d
commit 590f8a67ba

View File

@ -34,6 +34,7 @@ MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
static struct workqueue_struct *cryptd_wq; static struct workqueue_struct *cryptd_wq;
struct cryptd_cpu_queue { struct cryptd_cpu_queue {
local_lock_t bh_lock;
struct crypto_queue queue; struct crypto_queue queue;
struct work_struct work; struct work_struct work;
}; };
@ -110,6 +111,7 @@ static int cryptd_init_queue(struct cryptd_queue *queue,
cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
INIT_WORK(&cpu_queue->work, cryptd_queue_worker); INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
local_lock_init(&cpu_queue->bh_lock);
} }
pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen); pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
return 0; return 0;
@ -135,6 +137,7 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
refcount_t *refcnt; refcount_t *refcnt;
local_bh_disable(); local_bh_disable();
local_lock_nested_bh(&queue->cpu_queue->bh_lock);
cpu_queue = this_cpu_ptr(queue->cpu_queue); cpu_queue = this_cpu_ptr(queue->cpu_queue);
err = crypto_enqueue_request(&cpu_queue->queue, request); err = crypto_enqueue_request(&cpu_queue->queue, request);
@ -151,6 +154,7 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
refcount_inc(refcnt); refcount_inc(refcnt);
out: out:
local_unlock_nested_bh(&queue->cpu_queue->bh_lock);
local_bh_enable(); local_bh_enable();
return err; return err;
@ -169,8 +173,10 @@ static void cryptd_queue_worker(struct work_struct *work)
* Only handle one request at a time to avoid hogging crypto workqueue. * Only handle one request at a time to avoid hogging crypto workqueue.
*/ */
local_bh_disable(); local_bh_disable();
__local_lock_nested_bh(&cpu_queue->bh_lock);
backlog = crypto_get_backlog(&cpu_queue->queue); backlog = crypto_get_backlog(&cpu_queue->queue);
req = crypto_dequeue_request(&cpu_queue->queue); req = crypto_dequeue_request(&cpu_queue->queue);
__local_unlock_nested_bh(&cpu_queue->bh_lock);
local_bh_enable(); local_bh_enable();
if (!req) if (!req)