Merge tag 'i3c/fixes-for-7.0' of git://git.kernel.org/pub/scm/linux/kernel/git/i3c/linux

Pull i3c fixes from Alexandre Belloni:
 "This introduces the I3C_OR_I2C symbol which is not a fix per se but is
  affecting multiple subsystems so it is included to ease
  synchronization.

  Apart from that, Adrian is mostly fixing the mipi-i3c-hci driver DMA
  handling, and I took the opportunity to add two fixes for the dw-i3c
  driver.

  Subsystem:
   - simplify combined i3c/i2c dependencies

  Drivers:
   - dw: handle 2C properly, fix possible race condition
   - mipi-i3c-hci: many DMA related fixes"

* tag 'i3c/fixes-for-7.0' of git://git.kernel.org/pub/scm/linux/kernel/git/i3c/linux:
  i3c: dw-i3c-master: Set SIR_REJECT in DAT on device attach and reattach
  i3c: master: dw-i3c: Fix missing of_node for virtual I2C adapter
  i3c: mipi-i3c-hci: Fallback to software reset when bus disable fails
  i3c: mipi-i3c-hci: Fix handling of shared IRQs during early initialization
  i3c: mipi-i3c-hci: Fix race in DMA error handling in interrupt context
  i3c: mipi-i3c-hci: Consolidate common xfer processing logic
  i3c: mipi-i3c-hci: Restart DMA ring correctly after dequeue abort
  i3c: mipi-i3c-hci: Add missing TID field to no-op command descriptor
  i3c: mipi-i3c-hci: Correct RING_CTRL_ABORT handling in DMA dequeue
  i3c: mipi-i3c-hci: Fix race between DMA ring dequeue and interrupt handler
  i3c: mipi-i3c-hci: Fix race in DMA ring dequeue
  i3c: mipi-i3c-hci: Fix race in DMA ring enqueue for parallel xfers
  i3c: mipi-i3c-hci: Consolidate spinlocks
  i3c: mipi-i3c-hci: Factor out DMA mapping from queuing path
  i3c: mipi-i3c-hci: Fix Hot-Join NACK
  i3c: mipi-i3c-hci: Use ETIMEDOUT instead of ETIME for timeout errors
  i3c: simplify combined i3c/i2c dependencies
This commit is contained in:
Linus Torvalds
2026-03-14 16:25:10 -07:00
12 changed files with 210 additions and 157 deletions

View File

@@ -1493,8 +1493,7 @@ config SENSORS_LM73
config SENSORS_LM75
tristate "National Semiconductor LM75 and compatibles"
depends on I2C
depends on I3C || !I3C
depends on I3C_OR_I2C
select REGMAP_I2C
select REGMAP_I3C if I3C
help
@@ -2382,8 +2381,7 @@ config SENSORS_TMP103
config SENSORS_TMP108
tristate "Texas Instruments TMP108"
depends on I2C
depends on I3C || !I3C
depends on I3C_OR_I2C
select REGMAP_I2C
select REGMAP_I3C if I3C
help

View File

@@ -22,3 +22,15 @@ menuconfig I3C
if I3C
source "drivers/i3c/master/Kconfig"
endif # I3C
config I3C_OR_I2C
tristate
default m if I3C=m
default I2C
help
Device drivers using module_i3c_i2c_driver() can use either
i2c or i3c hosts, but cannot be built-in for the kernel when
CONFIG_I3C=m.
Add 'depends on I2C_OR_I3C' in Kconfig for those drivers to
get the correct dependencies.

View File

@@ -1024,7 +1024,7 @@ static int dw_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
master->free_pos &= ~BIT(pos);
}
writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr),
writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr) | DEV_ADDR_TABLE_SIR_REJECT,
master->regs +
DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
@@ -1053,7 +1053,7 @@ static int dw_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
master->free_pos &= ~BIT(pos);
i3c_dev_set_master_data(dev, data);
writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(master->devs[pos].addr),
writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(master->devs[pos].addr) | DEV_ADDR_TABLE_SIR_REJECT,
master->regs +
DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
@@ -1659,6 +1659,8 @@ int dw_i3c_common_probe(struct dw_i3c_master *master,
pm_runtime_get_noresume(&pdev->dev);
INIT_WORK(&master->hj_work, dw_i3c_hj_work);
device_set_of_node_from_dev(&master->base.i2c.dev, &pdev->dev);
ret = i3c_master_register(&master->base, &pdev->dev,
&dw_mipi_i3c_ops, false);
if (ret)

View File

@@ -17,6 +17,7 @@
#define CMD_0_TOC W0_BIT_(31)
#define CMD_0_ROC W0_BIT_(30)
#define CMD_0_ATTR W0_MASK(2, 0)
#define CMD_0_TID W0_MASK(6, 3)
/*
* Response Descriptor Structure

View File

@@ -331,12 +331,10 @@ static int hci_cmd_v1_daa(struct i3c_hci *hci)
CMD_A0_ROC | CMD_A0_TOC;
xfer->cmd_desc[1] = 0;
xfer->completion = &done;
hci->io->queue_xfer(hci, xfer, 1);
if (!wait_for_completion_timeout(&done, HZ) &&
hci->io->dequeue_xfer(hci, xfer, 1)) {
ret = -ETIME;
xfer->timeout = HZ;
ret = i3c_hci_process_xfer(hci, xfer, 1);
if (ret)
break;
}
if ((RESP_STATUS(xfer->response) == RESP_ERR_ADDR_HEADER ||
RESP_STATUS(xfer->response) == RESP_ERR_NACK) &&
RESP_DATA_LENGTH(xfer->response) == 1) {

View File

@@ -253,6 +253,7 @@ static int hci_cmd_v2_daa(struct i3c_hci *hci)
xfer[0].rnw = true;
xfer[0].cmd_desc[1] = CMD_A1_DATA_LENGTH(8);
xfer[1].completion = &done;
xfer[1].timeout = HZ;
for (;;) {
ret = i3c_master_get_free_addr(&hci->master, next_addr);
@@ -272,12 +273,9 @@ static int hci_cmd_v2_daa(struct i3c_hci *hci)
CMD_A0_ASSIGN_ADDRESS(next_addr) |
CMD_A0_ROC |
CMD_A0_TOC;
hci->io->queue_xfer(hci, xfer, 2);
if (!wait_for_completion_timeout(&done, HZ) &&
hci->io->dequeue_xfer(hci, xfer, 2)) {
ret = -ETIME;
ret = i3c_hci_process_xfer(hci, xfer, 2);
if (ret)
break;
}
if (RESP_STATUS(xfer[0].response) != RESP_SUCCESS) {
ret = 0; /* no more devices to be assigned */
break;

View File

@@ -152,7 +152,11 @@ static int i3c_hci_bus_init(struct i3c_master_controller *m)
if (hci->quirks & HCI_QUIRK_RESP_BUF_THLD)
amd_set_resp_buf_thld(hci);
reg_set(HC_CONTROL, HC_CONTROL_BUS_ENABLE);
scoped_guard(spinlock_irqsave, &hci->lock)
hci->irq_inactive = false;
/* Enable bus with Hot-Join disabled */
reg_set(HC_CONTROL, HC_CONTROL_BUS_ENABLE | HC_CONTROL_HOT_JOIN_CTRL);
dev_dbg(&hci->master.dev, "HC_CONTROL = %#x", reg_read(HC_CONTROL));
return 0;
@@ -177,21 +181,51 @@ static int i3c_hci_bus_disable(struct i3c_hci *hci)
return ret;
}
static int i3c_hci_software_reset(struct i3c_hci *hci)
{
u32 regval;
int ret;
/*
* SOFT_RST must be clear before we write to it.
* Then we must wait until it clears again.
*/
ret = readx_poll_timeout(reg_read, RESET_CONTROL, regval,
!(regval & SOFT_RST), 0, 10 * USEC_PER_MSEC);
if (ret) {
dev_err(&hci->master.dev, "%s: Software reset stuck\n", __func__);
return ret;
}
reg_write(RESET_CONTROL, SOFT_RST);
ret = readx_poll_timeout(reg_read, RESET_CONTROL, regval,
!(regval & SOFT_RST), 0, 10 * USEC_PER_MSEC);
if (ret) {
dev_err(&hci->master.dev, "%s: Software reset failed\n", __func__);
return ret;
}
return 0;
}
void i3c_hci_sync_irq_inactive(struct i3c_hci *hci)
{
struct platform_device *pdev = to_platform_device(hci->master.dev.parent);
int irq = platform_get_irq(pdev, 0);
reg_write(INTR_SIGNAL_ENABLE, 0x0);
hci->irq_inactive = true;
synchronize_irq(irq);
scoped_guard(spinlock_irqsave, &hci->lock)
hci->irq_inactive = true;
}
static void i3c_hci_bus_cleanup(struct i3c_master_controller *m)
{
struct i3c_hci *hci = to_i3c_hci(m);
i3c_hci_bus_disable(hci);
if (i3c_hci_bus_disable(hci))
i3c_hci_software_reset(hci);
hci->io->cleanup(hci);
}
@@ -212,6 +246,36 @@ void mipi_i3c_hci_dct_index_reset(struct i3c_hci *hci)
reg_write(DCT_SECTION, FIELD_PREP(DCT_TABLE_INDEX, 0));
}
int i3c_hci_process_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int n)
{
struct completion *done = xfer[n - 1].completion;
unsigned long timeout = xfer[n - 1].timeout;
int ret;
ret = hci->io->queue_xfer(hci, xfer, n);
if (ret)
return ret;
if (!wait_for_completion_timeout(done, timeout)) {
if (hci->io->dequeue_xfer(hci, xfer, n)) {
dev_err(&hci->master.dev, "%s: timeout error\n", __func__);
return -ETIMEDOUT;
}
return 0;
}
if (hci->io->handle_error) {
bool error = false;
for (int i = 0; i < n && !error; i++)
error = RESP_STATUS(xfer[i].response);
if (error)
return hci->io->handle_error(hci, xfer, n);
}
return 0;
}
static int i3c_hci_send_ccc_cmd(struct i3c_master_controller *m,
struct i3c_ccc_cmd *ccc)
{
@@ -252,18 +316,14 @@ static int i3c_hci_send_ccc_cmd(struct i3c_master_controller *m,
last = i - 1;
xfer[last].cmd_desc[0] |= CMD_0_TOC;
xfer[last].completion = &done;
xfer[last].timeout = HZ;
if (prefixed)
xfer--;
ret = hci->io->queue_xfer(hci, xfer, nxfers);
ret = i3c_hci_process_xfer(hci, xfer, nxfers);
if (ret)
goto out;
if (!wait_for_completion_timeout(&done, HZ) &&
hci->io->dequeue_xfer(hci, xfer, nxfers)) {
ret = -ETIME;
goto out;
}
for (i = prefixed; i < nxfers; i++) {
if (ccc->rnw)
ccc->dests[i - prefixed].payload.len =
@@ -334,15 +394,11 @@ static int i3c_hci_i3c_xfers(struct i3c_dev_desc *dev,
last = i - 1;
xfer[last].cmd_desc[0] |= CMD_0_TOC;
xfer[last].completion = &done;
xfer[last].timeout = HZ;
ret = hci->io->queue_xfer(hci, xfer, nxfers);
ret = i3c_hci_process_xfer(hci, xfer, nxfers);
if (ret)
goto out;
if (!wait_for_completion_timeout(&done, HZ) &&
hci->io->dequeue_xfer(hci, xfer, nxfers)) {
ret = -ETIME;
goto out;
}
for (i = 0; i < nxfers; i++) {
if (i3c_xfers[i].rnw)
i3c_xfers[i].len = RESP_DATA_LENGTH(xfer[i].response);
@@ -382,15 +438,11 @@ static int i3c_hci_i2c_xfers(struct i2c_dev_desc *dev,
last = i - 1;
xfer[last].cmd_desc[0] |= CMD_0_TOC;
xfer[last].completion = &done;
xfer[last].timeout = m->i2c.timeout;
ret = hci->io->queue_xfer(hci, xfer, nxfers);
ret = i3c_hci_process_xfer(hci, xfer, nxfers);
if (ret)
goto out;
if (!wait_for_completion_timeout(&done, m->i2c.timeout) &&
hci->io->dequeue_xfer(hci, xfer, nxfers)) {
ret = -ETIME;
goto out;
}
for (i = 0; i < nxfers; i++) {
if (RESP_STATUS(xfer[i].response) != RESP_SUCCESS) {
ret = -EIO;
@@ -566,6 +618,8 @@ static irqreturn_t i3c_hci_irq_handler(int irq, void *dev_id)
irqreturn_t result = IRQ_NONE;
u32 val;
guard(spinlock)(&hci->lock);
/*
* The IRQ can be shared, so the handler may be called when the IRQ is
* due to a different device. That could happen when runtime suspended,
@@ -601,34 +655,6 @@ static irqreturn_t i3c_hci_irq_handler(int irq, void *dev_id)
return result;
}
static int i3c_hci_software_reset(struct i3c_hci *hci)
{
u32 regval;
int ret;
/*
* SOFT_RST must be clear before we write to it.
* Then we must wait until it clears again.
*/
ret = readx_poll_timeout(reg_read, RESET_CONTROL, regval,
!(regval & SOFT_RST), 0, 10 * USEC_PER_MSEC);
if (ret) {
dev_err(&hci->master.dev, "%s: Software reset stuck\n", __func__);
return ret;
}
reg_write(RESET_CONTROL, SOFT_RST);
ret = readx_poll_timeout(reg_read, RESET_CONTROL, regval,
!(regval & SOFT_RST), 0, 10 * USEC_PER_MSEC);
if (ret) {
dev_err(&hci->master.dev, "%s: Software reset failed\n", __func__);
return ret;
}
return 0;
}
static inline bool is_version_1_1_or_newer(struct i3c_hci *hci)
{
return hci->version_major > 1 || (hci->version_major == 1 && hci->version_minor > 0);
@@ -739,8 +765,12 @@ static int i3c_hci_runtime_suspend(struct device *dev)
int ret;
ret = i3c_hci_bus_disable(hci);
if (ret)
if (ret) {
/* Fall back to software reset to disable the bus */
ret = i3c_hci_software_reset(hci);
i3c_hci_sync_irq_inactive(hci);
return ret;
}
hci->io->suspend(hci);
@@ -760,11 +790,13 @@ static int i3c_hci_runtime_resume(struct device *dev)
mipi_i3c_hci_dat_v1.restore(hci);
hci->irq_inactive = false;
hci->io->resume(hci);
reg_set(HC_CONTROL, HC_CONTROL_BUS_ENABLE);
scoped_guard(spinlock_irqsave, &hci->lock)
hci->irq_inactive = false;
/* Enable bus with Hot-Join disabled */
reg_set(HC_CONTROL, HC_CONTROL_BUS_ENABLE | HC_CONTROL_HOT_JOIN_CTRL);
return 0;
}
@@ -924,6 +956,9 @@ static int i3c_hci_probe(struct platform_device *pdev)
if (!hci)
return -ENOMEM;
spin_lock_init(&hci->lock);
mutex_init(&hci->control_mutex);
/*
* Multi-bus instances share the same MMIO address range, but not
* necessarily in separate contiguous sub-ranges. To avoid overlapping
@@ -950,6 +985,8 @@ static int i3c_hci_probe(struct platform_device *pdev)
if (ret)
return ret;
hci->irq_inactive = true;
irq = platform_get_irq(pdev, 0);
ret = devm_request_irq(&pdev->dev, irq, i3c_hci_irq_handler,
IRQF_SHARED, NULL, hci);

View File

@@ -129,9 +129,8 @@ struct hci_rh_data {
dma_addr_t xfer_dma, resp_dma, ibi_status_dma, ibi_data_dma;
unsigned int xfer_entries, ibi_status_entries, ibi_chunks_total;
unsigned int xfer_struct_sz, resp_struct_sz, ibi_status_sz, ibi_chunk_sz;
unsigned int done_ptr, ibi_chunk_ptr;
unsigned int done_ptr, ibi_chunk_ptr, xfer_space;
struct hci_xfer **src_xfers;
spinlock_t lock;
struct completion op_done;
};
@@ -261,6 +260,7 @@ ring_ready:
rh->done_ptr = 0;
rh->ibi_chunk_ptr = 0;
rh->xfer_space = rh->xfer_entries;
}
static void hci_dma_init_rings(struct i3c_hci *hci)
@@ -344,7 +344,6 @@ static int hci_dma_init(struct i3c_hci *hci)
goto err_out;
rh = &rings->headers[i];
rh->regs = hci->base_regs + offset;
spin_lock_init(&rh->lock);
init_completion(&rh->op_done);
rh->xfer_entries = XFER_RING_ENTRIES;
@@ -439,26 +438,63 @@ static void hci_dma_unmap_xfer(struct i3c_hci *hci,
}
}
static struct i3c_dma *hci_dma_map_xfer(struct device *dev, struct hci_xfer *xfer)
{
enum dma_data_direction dir = xfer->rnw ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
bool need_bounce = device_iommu_mapped(dev) && xfer->rnw && (xfer->data_len & 3);
return i3c_master_dma_map_single(dev, xfer->data, xfer->data_len, need_bounce, dir);
}
static int hci_dma_map_xfer_list(struct i3c_hci *hci, struct device *dev,
struct hci_xfer *xfer_list, int n)
{
for (int i = 0; i < n; i++) {
struct hci_xfer *xfer = xfer_list + i;
if (!xfer->data)
continue;
xfer->dma = hci_dma_map_xfer(dev, xfer);
if (!xfer->dma) {
hci_dma_unmap_xfer(hci, xfer_list, i);
return -ENOMEM;
}
}
return 0;
}
static int hci_dma_queue_xfer(struct i3c_hci *hci,
struct hci_xfer *xfer_list, int n)
{
struct hci_rings_data *rings = hci->io_data;
struct hci_rh_data *rh;
unsigned int i, ring, enqueue_ptr;
u32 op1_val, op2_val;
u32 op1_val;
int ret;
ret = hci_dma_map_xfer_list(hci, rings->sysdev, xfer_list, n);
if (ret)
return ret;
/* For now we only use ring 0 */
ring = 0;
rh = &rings->headers[ring];
spin_lock_irq(&hci->lock);
if (n > rh->xfer_space) {
spin_unlock_irq(&hci->lock);
hci_dma_unmap_xfer(hci, xfer_list, n);
return -EBUSY;
}
op1_val = rh_reg_read(RING_OPERATION1);
enqueue_ptr = FIELD_GET(RING_OP1_CR_ENQ_PTR, op1_val);
for (i = 0; i < n; i++) {
struct hci_xfer *xfer = xfer_list + i;
u32 *ring_data = rh->xfer + rh->xfer_struct_sz * enqueue_ptr;
enum dma_data_direction dir = xfer->rnw ? DMA_FROM_DEVICE :
DMA_TO_DEVICE;
bool need_bounce;
/* store cmd descriptor */
*ring_data++ = xfer->cmd_desc[0];
@@ -477,18 +513,6 @@ static int hci_dma_queue_xfer(struct i3c_hci *hci,
/* 2nd and 3rd words of Data Buffer Descriptor Structure */
if (xfer->data) {
need_bounce = device_iommu_mapped(rings->sysdev) &&
xfer->rnw &&
xfer->data_len != ALIGN(xfer->data_len, 4);
xfer->dma = i3c_master_dma_map_single(rings->sysdev,
xfer->data,
xfer->data_len,
need_bounce,
dir);
if (!xfer->dma) {
hci_dma_unmap_xfer(hci, xfer_list, i);
return -ENOMEM;
}
*ring_data++ = lower_32_bits(xfer->dma->addr);
*ring_data++ = upper_32_bits(xfer->dma->addr);
} else {
@@ -503,26 +527,14 @@ static int hci_dma_queue_xfer(struct i3c_hci *hci,
xfer->ring_entry = enqueue_ptr;
enqueue_ptr = (enqueue_ptr + 1) % rh->xfer_entries;
/*
* We may update the hardware view of the enqueue pointer
* only if we didn't reach its dequeue pointer.
*/
op2_val = rh_reg_read(RING_OPERATION2);
if (enqueue_ptr == FIELD_GET(RING_OP2_CR_DEQ_PTR, op2_val)) {
/* the ring is full */
hci_dma_unmap_xfer(hci, xfer_list, i + 1);
return -EBUSY;
}
}
/* take care to update the hardware enqueue pointer atomically */
spin_lock_irq(&rh->lock);
op1_val = rh_reg_read(RING_OPERATION1);
rh->xfer_space -= n;
op1_val &= ~RING_OP1_CR_ENQ_PTR;
op1_val |= FIELD_PREP(RING_OP1_CR_ENQ_PTR, enqueue_ptr);
rh_reg_write(RING_OPERATION1, op1_val);
spin_unlock_irq(&rh->lock);
spin_unlock_irq(&hci->lock);
return 0;
}
@@ -534,18 +546,29 @@ static bool hci_dma_dequeue_xfer(struct i3c_hci *hci,
struct hci_rh_data *rh = &rings->headers[xfer_list[0].ring_number];
unsigned int i;
bool did_unqueue = false;
u32 ring_status;
/* stop the ring */
rh_reg_write(RING_CONTROL, RING_CTRL_ABORT);
if (wait_for_completion_timeout(&rh->op_done, HZ) == 0) {
/*
* We're deep in it if ever this condition is ever met.
* Hardware might still be writing to memory, etc.
*/
dev_crit(&hci->master.dev, "unable to abort the ring\n");
WARN_ON(1);
guard(mutex)(&hci->control_mutex);
ring_status = rh_reg_read(RING_STATUS);
if (ring_status & RING_STATUS_RUNNING) {
/* stop the ring */
reinit_completion(&rh->op_done);
rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE | RING_CTRL_ABORT);
wait_for_completion_timeout(&rh->op_done, HZ);
ring_status = rh_reg_read(RING_STATUS);
if (ring_status & RING_STATUS_RUNNING) {
/*
* We're deep in it if ever this condition is ever met.
* Hardware might still be writing to memory, etc.
*/
dev_crit(&hci->master.dev, "unable to abort the ring\n");
WARN_ON(1);
}
}
spin_lock_irq(&hci->lock);
for (i = 0; i < n; i++) {
struct hci_xfer *xfer = xfer_list + i;
int idx = xfer->ring_entry;
@@ -559,7 +582,7 @@ static bool hci_dma_dequeue_xfer(struct i3c_hci *hci,
u32 *ring_data = rh->xfer + rh->xfer_struct_sz * idx;
/* store no-op cmd descriptor */
*ring_data++ = FIELD_PREP(CMD_0_ATTR, 0x7);
*ring_data++ = FIELD_PREP(CMD_0_ATTR, 0x7) | FIELD_PREP(CMD_0_TID, xfer->cmd_tid);
*ring_data++ = 0;
if (hci->cmd == &mipi_i3c_hci_cmd_v2) {
*ring_data++ = 0;
@@ -577,15 +600,25 @@ static bool hci_dma_dequeue_xfer(struct i3c_hci *hci,
}
/* restart the ring */
mipi_i3c_hci_resume(hci);
rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE);
rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE | RING_CTRL_RUN_STOP);
spin_unlock_irq(&hci->lock);
return did_unqueue;
}
static int hci_dma_handle_error(struct i3c_hci *hci, struct hci_xfer *xfer_list, int n)
{
return hci_dma_dequeue_xfer(hci, xfer_list, n) ? -EIO : 0;
}
static void hci_dma_xfer_done(struct i3c_hci *hci, struct hci_rh_data *rh)
{
u32 op1_val, op2_val, resp, *ring_resp;
unsigned int tid, done_ptr = rh->done_ptr;
unsigned int done_cnt = 0;
struct hci_xfer *xfer;
for (;;) {
@@ -603,6 +636,7 @@ static void hci_dma_xfer_done(struct i3c_hci *hci, struct hci_rh_data *rh)
dev_dbg(&hci->master.dev, "orphaned ring entry");
} else {
hci_dma_unmap_xfer(hci, xfer, 1);
rh->src_xfers[done_ptr] = NULL;
xfer->ring_entry = -1;
xfer->response = resp;
if (tid != xfer->cmd_tid) {
@@ -617,15 +651,14 @@ static void hci_dma_xfer_done(struct i3c_hci *hci, struct hci_rh_data *rh)
done_ptr = (done_ptr + 1) % rh->xfer_entries;
rh->done_ptr = done_ptr;
done_cnt += 1;
}
/* take care to update the software dequeue pointer atomically */
spin_lock(&rh->lock);
rh->xfer_space += done_cnt;
op1_val = rh_reg_read(RING_OPERATION1);
op1_val &= ~RING_OP1_CR_SW_DEQ_PTR;
op1_val |= FIELD_PREP(RING_OP1_CR_SW_DEQ_PTR, done_ptr);
rh_reg_write(RING_OPERATION1, op1_val);
spin_unlock(&rh->lock);
}
static int hci_dma_request_ibi(struct i3c_hci *hci, struct i3c_dev_desc *dev,
@@ -805,13 +838,10 @@ static void hci_dma_process_ibi(struct i3c_hci *hci, struct hci_rh_data *rh)
i3c_master_queue_ibi(dev, slot);
done:
/* take care to update the ibi dequeue pointer atomically */
spin_lock(&rh->lock);
op1_val = rh_reg_read(RING_OPERATION1);
op1_val &= ~RING_OP1_IBI_DEQ_PTR;
op1_val |= FIELD_PREP(RING_OP1_IBI_DEQ_PTR, deq_ptr);
rh_reg_write(RING_OPERATION1, op1_val);
spin_unlock(&rh->lock);
/* update the chunk pointer */
rh->ibi_chunk_ptr += ibi_chunks;
@@ -845,29 +875,8 @@ static bool hci_dma_irq_handler(struct i3c_hci *hci)
hci_dma_xfer_done(hci, rh);
if (status & INTR_RING_OP)
complete(&rh->op_done);
if (status & INTR_TRANSFER_ABORT) {
u32 ring_status;
dev_notice_ratelimited(&hci->master.dev,
"Ring %d: Transfer Aborted\n", i);
mipi_i3c_hci_resume(hci);
ring_status = rh_reg_read(RING_STATUS);
if (!(ring_status & RING_STATUS_RUNNING) &&
status & INTR_TRANSFER_COMPLETION &&
status & INTR_TRANSFER_ERR) {
/*
* Ring stop followed by run is an Intel
* specific required quirk after resuming the
* halted controller. Do it only when the ring
* is not in running state after a transfer
* error.
*/
rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE);
rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE |
RING_CTRL_RUN_STOP);
}
}
if (status & INTR_TRANSFER_ABORT)
dev_dbg(&hci->master.dev, "Ring %d: Transfer Aborted\n", i);
if (status & INTR_IBI_RING_FULL)
dev_err_ratelimited(&hci->master.dev,
"Ring %d: IBI Ring Full Condition\n", i);
@@ -883,6 +892,7 @@ const struct hci_io_ops mipi_i3c_hci_dma = {
.cleanup = hci_dma_cleanup,
.queue_xfer = hci_dma_queue_xfer,
.dequeue_xfer = hci_dma_dequeue_xfer,
.handle_error = hci_dma_handle_error,
.irq_handler = hci_dma_irq_handler,
.request_ibi = hci_dma_request_ibi,
.free_ibi = hci_dma_free_ibi,

View File

@@ -50,6 +50,8 @@ struct i3c_hci {
const struct hci_io_ops *io;
void *io_data;
const struct hci_cmd_ops *cmd;
spinlock_t lock;
struct mutex control_mutex;
atomic_t next_cmd_tid;
bool irq_inactive;
u32 caps;
@@ -87,6 +89,7 @@ struct hci_xfer {
unsigned int data_len;
unsigned int cmd_tid;
struct completion *completion;
unsigned long timeout;
union {
struct {
/* PIO specific */
@@ -120,6 +123,7 @@ struct hci_io_ops {
bool (*irq_handler)(struct i3c_hci *hci);
int (*queue_xfer)(struct i3c_hci *hci, struct hci_xfer *xfer, int n);
bool (*dequeue_xfer)(struct i3c_hci *hci, struct hci_xfer *xfer, int n);
int (*handle_error)(struct i3c_hci *hci, struct hci_xfer *xfer, int n);
int (*request_ibi)(struct i3c_hci *hci, struct i3c_dev_desc *dev,
const struct i3c_ibi_setup *req);
void (*free_ibi)(struct i3c_hci *hci, struct i3c_dev_desc *dev);
@@ -154,5 +158,6 @@ void mipi_i3c_hci_dct_index_reset(struct i3c_hci *hci);
void amd_set_od_pp_timing(struct i3c_hci *hci);
void amd_set_resp_buf_thld(struct i3c_hci *hci);
void i3c_hci_sync_irq_inactive(struct i3c_hci *hci);
int i3c_hci_process_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int n);
#endif

View File

@@ -123,7 +123,6 @@ struct hci_pio_ibi_data {
};
struct hci_pio_data {
spinlock_t lock;
struct hci_xfer *curr_xfer, *xfer_queue;
struct hci_xfer *curr_rx, *rx_queue;
struct hci_xfer *curr_tx, *tx_queue;
@@ -212,7 +211,6 @@ static int hci_pio_init(struct i3c_hci *hci)
return -ENOMEM;
hci->io_data = pio;
spin_lock_init(&pio->lock);
__hci_pio_init(hci, &size_val);
@@ -631,7 +629,7 @@ static int hci_pio_queue_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int n)
xfer[i].data_left = xfer[i].data_len;
}
spin_lock_irq(&pio->lock);
spin_lock_irq(&hci->lock);
prev_queue_tail = pio->xfer_queue;
pio->xfer_queue = &xfer[n - 1];
if (pio->curr_xfer) {
@@ -645,7 +643,7 @@ static int hci_pio_queue_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int n)
pio_reg_read(INTR_STATUS),
pio_reg_read(INTR_SIGNAL_ENABLE));
}
spin_unlock_irq(&pio->lock);
spin_unlock_irq(&hci->lock);
return 0;
}
@@ -716,14 +714,14 @@ static bool hci_pio_dequeue_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int
struct hci_pio_data *pio = hci->io_data;
int ret;
spin_lock_irq(&pio->lock);
spin_lock_irq(&hci->lock);
dev_dbg(&hci->master.dev, "n=%d status=%#x/%#x", n,
pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
dev_dbg(&hci->master.dev, "main_status = %#x/%#x",
readl(hci->base_regs + 0x20), readl(hci->base_regs + 0x28));
ret = hci_pio_dequeue_xfer_common(hci, pio, xfer, n);
spin_unlock_irq(&pio->lock);
spin_unlock_irq(&hci->lock);
return ret;
}
@@ -1016,15 +1014,12 @@ static bool hci_pio_irq_handler(struct i3c_hci *hci)
struct hci_pio_data *pio = hci->io_data;
u32 status;
spin_lock(&pio->lock);
status = pio_reg_read(INTR_STATUS);
dev_dbg(&hci->master.dev, "PIO_INTR_STATUS %#x/%#x",
status, pio->enabled_irqs);
status &= pio->enabled_irqs | STAT_LATENCY_WARNINGS;
if (!status) {
spin_unlock(&pio->lock);
if (!status)
return false;
}
if (status & STAT_IBI_STATUS_THLD)
hci_pio_process_ibi(hci, pio);
@@ -1058,7 +1053,6 @@ static bool hci_pio_irq_handler(struct i3c_hci *hci)
pio_reg_write(INTR_SIGNAL_ENABLE, pio->enabled_irqs);
dev_dbg(&hci->master.dev, "PIO_INTR_STATUS %#x/%#x",
pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
spin_unlock(&pio->lock);
return true;
}

View File

@@ -143,8 +143,7 @@ config MMC5633
tristate "MEMSIC MMC5633 3-axis magnetic sensor"
select REGMAP_I2C
select REGMAP_I3C if I3C
depends on I2C
depends on I3C || !I3C
depends on I3C_OR_I2C
help
Say yes here to build support for the MEMSIC MMC5633 3-axis
magnetic sensor.

View File

@@ -1,10 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
config AMD_SBRMI_I2C
tristate "AMD side band RMI support"
depends on I2C
depends on I3C_OR_I2C
depends on ARM || ARM64 || COMPILE_TEST
select REGMAP_I2C
depends on I3C || !I3C
select REGMAP_I3C if I3C
help
Side band RMI over I2C/I3C support for AMD out of band management.