mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-03-22 07:27:12 +08:00
spi: tegra210-quad: Protect curr_xfer check in IRQ handler
Now that all other accesses to curr_xfer are done under the lock,
protect the curr_xfer NULL check in tegra_qspi_isr_thread() with the
spinlock. Without this protection, the following race can occur:
CPU0 (ISR thread) CPU1 (timeout path)
---------------- -------------------
if (!tqspi->curr_xfer)
// sees non-NULL
spin_lock()
tqspi->curr_xfer = NULL
spin_unlock()
handle_*_xfer()
spin_lock()
t = tqspi->curr_xfer // NULL!
... t->len ... // NULL dereference!
With this patch, all curr_xfer accesses are now properly synchronized.
Although all accesses to curr_xfer are done under the lock, in
tegra_qspi_isr_thread() it checks for NULL, releases the lock and
reacquires it later in handle_cpu_based_xfer()/handle_dma_based_xfer().
There is a potential for an update in between, which could cause a NULL
pointer dereference.
To handle this, add a NULL check inside the handlers after acquiring
the lock. This ensures that if the timeout path has already cleared
curr_xfer, the handler will safely return without dereferencing the
NULL pointer.
Fixes: b4e002d8a7 ("spi: tegra210-quad: Fix timeout handling")
Signed-off-by: Breno Leitao <leitao@debian.org>
Tested-by: Jon Hunter <jonathanh@nvidia.com>
Acked-by: Jon Hunter <jonathanh@nvidia.com>
Acked-by: Thierry Reding <treding@nvidia.com>
Link: https://patch.msgid.link/20260126-tegra_xfer-v2-6-6d2115e4f387@debian.org
Signed-off-by: Mark Brown <broonie@kernel.org>
This commit is contained in:
@@ -1457,6 +1457,11 @@ static irqreturn_t handle_cpu_based_xfer(struct tegra_qspi *tqspi)
|
||||
spin_lock_irqsave(&tqspi->lock, flags);
|
||||
t = tqspi->curr_xfer;
|
||||
|
||||
if (!t) {
|
||||
spin_unlock_irqrestore(&tqspi->lock, flags);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
if (tqspi->tx_status || tqspi->rx_status) {
|
||||
tegra_qspi_handle_error(tqspi);
|
||||
complete(&tqspi->xfer_completion);
|
||||
@@ -1527,6 +1532,11 @@ static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi)
|
||||
spin_lock_irqsave(&tqspi->lock, flags);
|
||||
t = tqspi->curr_xfer;
|
||||
|
||||
if (!t) {
|
||||
spin_unlock_irqrestore(&tqspi->lock, flags);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
if (num_errors) {
|
||||
tegra_qspi_dma_unmap_xfer(tqspi, t);
|
||||
tegra_qspi_handle_error(tqspi);
|
||||
@@ -1565,6 +1575,7 @@ exit:
|
||||
static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data)
|
||||
{
|
||||
struct tegra_qspi *tqspi = context_data;
|
||||
unsigned long flags;
|
||||
u32 status;
|
||||
|
||||
/*
|
||||
@@ -1582,7 +1593,9 @@ static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data)
|
||||
* If no transfer is in progress, check if this was a real interrupt
|
||||
* that the timeout handler already processed, or a spurious one.
|
||||
*/
|
||||
spin_lock_irqsave(&tqspi->lock, flags);
|
||||
if (!tqspi->curr_xfer) {
|
||||
spin_unlock_irqrestore(&tqspi->lock, flags);
|
||||
/* Spurious interrupt - transfer not ready */
|
||||
if (!(status & QSPI_RDY))
|
||||
return IRQ_NONE;
|
||||
@@ -1599,7 +1612,14 @@ static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data)
|
||||
tqspi->rx_status = tqspi->status_reg & (QSPI_RX_FIFO_OVF | QSPI_RX_FIFO_UNF);
|
||||
|
||||
tegra_qspi_mask_clear_irq(tqspi);
|
||||
spin_unlock_irqrestore(&tqspi->lock, flags);
|
||||
|
||||
/*
|
||||
* Lock is released here but handlers safely re-check curr_xfer under
|
||||
* lock before dereferencing.
|
||||
* DMA handler also needs to sleep in wait_for_completion_*(), which
|
||||
* cannot be done while holding spinlock.
|
||||
*/
|
||||
if (!tqspi->is_curr_dma_xfer)
|
||||
return handle_cpu_based_xfer(tqspi);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user