mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-04 20:19:47 +08:00
net/mlx5: Expose SFs IRQs
Expose the sysfs files for the IRQs that the mlx5 PCI SFs are using. These entries are similar to PCI PFs and VFs in 'msi_irqs' directory. Reviewed-by: Parav Pandit <parav@nvidia.com> Signed-off-by: Shay Drory <shayd@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com> --- v8-v9: - add Przemek RB v6->v7: - remove not needed changes to mlx5 sfnum SF sysfs v5->v6: - fail IRQ creation in case auxiliary_device_sysfs_irq_add() failed (Parav and Przemek) v2->v3: - fix mlx5 sfnum SF sysfs
This commit is contained in:
parent
a808878308
commit
0477d5168b
@ -714,7 +714,7 @@ err2:
|
||||
err1:
|
||||
mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
|
||||
mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
|
||||
mlx5_ctrl_irq_release(table->ctrl_irq);
|
||||
mlx5_ctrl_irq_release(dev, table->ctrl_irq);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -730,7 +730,7 @@ static void destroy_async_eqs(struct mlx5_core_dev *dev)
|
||||
cleanup_async_eq(dev, &table->cmd_eq, "cmd");
|
||||
mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
|
||||
mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
|
||||
mlx5_ctrl_irq_release(table->ctrl_irq);
|
||||
mlx5_ctrl_irq_release(dev, table->ctrl_irq);
|
||||
}
|
||||
|
||||
struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev)
|
||||
@ -918,7 +918,7 @@ static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx)
|
||||
af_desc.is_managed = 1;
|
||||
cpumask_copy(&af_desc.mask, cpu_online_mask);
|
||||
cpumask_andnot(&af_desc.mask, &af_desc.mask, &table->used_cpus);
|
||||
irq = mlx5_irq_affinity_request(pool, &af_desc);
|
||||
irq = mlx5_irq_affinity_request(dev, pool, &af_desc);
|
||||
if (IS_ERR(irq))
|
||||
return PTR_ERR(irq);
|
||||
|
||||
|
@ -112,15 +112,18 @@ irq_pool_find_least_loaded(struct mlx5_irq_pool *pool, const struct cpumask *req
|
||||
|
||||
/**
|
||||
* mlx5_irq_affinity_request - request an IRQ according to the given mask.
|
||||
* @dev: mlx5 core device which is requesting the IRQ.
|
||||
* @pool: IRQ pool to request from.
|
||||
* @af_desc: affinity descriptor for this IRQ.
|
||||
*
|
||||
* This function returns a pointer to IRQ, or ERR_PTR in case of error.
|
||||
*/
|
||||
struct mlx5_irq *
|
||||
mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc)
|
||||
mlx5_irq_affinity_request(struct mlx5_core_dev *dev, struct mlx5_irq_pool *pool,
|
||||
struct irq_affinity_desc *af_desc)
|
||||
{
|
||||
struct mlx5_irq *least_loaded_irq, *new_irq;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&pool->lock);
|
||||
least_loaded_irq = irq_pool_find_least_loaded(pool, &af_desc->mask);
|
||||
@ -153,6 +156,16 @@ out:
|
||||
mlx5_irq_read_locked(least_loaded_irq) / MLX5_EQ_REFS_PER_IRQ);
|
||||
unlock:
|
||||
mutex_unlock(&pool->lock);
|
||||
if (mlx5_irq_pool_is_sf_pool(pool)) {
|
||||
ret = auxiliary_device_sysfs_irq_add(mlx5_sf_coredev_to_adev(dev),
|
||||
mlx5_irq_get_irq(least_loaded_irq));
|
||||
if (ret) {
|
||||
mlx5_core_err(dev, "Failed to create sysfs entry for irq %d, ret = %d\n",
|
||||
mlx5_irq_get_irq(least_loaded_irq), ret);
|
||||
mlx5_irq_put(least_loaded_irq);
|
||||
least_loaded_irq = ERR_PTR(ret);
|
||||
}
|
||||
}
|
||||
return least_loaded_irq;
|
||||
}
|
||||
|
||||
@ -164,6 +177,9 @@ void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *i
|
||||
cpu = cpumask_first(mlx5_irq_get_affinity_mask(irq));
|
||||
synchronize_irq(pci_irq_vector(pool->dev->pdev,
|
||||
mlx5_irq_get_index(irq)));
|
||||
if (mlx5_irq_pool_is_sf_pool(pool))
|
||||
auxiliary_device_sysfs_irq_remove(mlx5_sf_coredev_to_adev(dev),
|
||||
mlx5_irq_get_irq(irq));
|
||||
if (mlx5_irq_put(irq))
|
||||
if (pool->irqs_per_cpu)
|
||||
cpu_put(pool, cpu);
|
||||
|
@ -320,6 +320,12 @@ static inline bool mlx5_core_is_sf(const struct mlx5_core_dev *dev)
|
||||
return dev->coredev_type == MLX5_COREDEV_SF;
|
||||
}
|
||||
|
||||
static inline struct auxiliary_device *
|
||||
mlx5_sf_coredev_to_adev(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
return container_of(mdev->device, struct auxiliary_device, dev);
|
||||
}
|
||||
|
||||
int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx);
|
||||
void mlx5_mdev_uninit(struct mlx5_core_dev *dev);
|
||||
int mlx5_init_one(struct mlx5_core_dev *dev);
|
||||
|
@ -25,7 +25,7 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int devfn,
|
||||
int mlx5_get_default_msix_vec_count(struct mlx5_core_dev *dev, int num_vfs);
|
||||
|
||||
struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev);
|
||||
void mlx5_ctrl_irq_release(struct mlx5_irq *ctrl_irq);
|
||||
void mlx5_ctrl_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *ctrl_irq);
|
||||
struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
|
||||
struct irq_affinity_desc *af_desc,
|
||||
struct cpu_rmap **rmap);
|
||||
@ -36,12 +36,14 @@ int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb);
|
||||
int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb);
|
||||
struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq);
|
||||
int mlx5_irq_get_index(struct mlx5_irq *irq);
|
||||
int mlx5_irq_get_irq(const struct mlx5_irq *irq);
|
||||
|
||||
struct mlx5_irq_pool;
|
||||
#ifdef CONFIG_MLX5_SF
|
||||
struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev,
|
||||
struct cpumask *used_cpus, u16 vecidx);
|
||||
struct mlx5_irq *mlx5_irq_affinity_request(struct mlx5_irq_pool *pool,
|
||||
struct mlx5_irq *
|
||||
mlx5_irq_affinity_request(struct mlx5_core_dev *dev, struct mlx5_irq_pool *pool,
|
||||
struct irq_affinity_desc *af_desc);
|
||||
void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *irq);
|
||||
#else
|
||||
@ -53,7 +55,8 @@ struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev,
|
||||
}
|
||||
|
||||
static inline struct mlx5_irq *
|
||||
mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc)
|
||||
mlx5_irq_affinity_request(struct mlx5_core_dev *dev, struct mlx5_irq_pool *pool,
|
||||
struct irq_affinity_desc *af_desc)
|
||||
{
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
}
|
||||
@ -61,6 +64,7 @@ mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, struct irq_affinity_desc *
|
||||
static inline
|
||||
void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *irq)
|
||||
{
|
||||
mlx5_irq_release_vector(irq);
|
||||
}
|
||||
#endif
|
||||
#endif /* __MLX5_IRQ_H__ */
|
||||
|
@ -367,6 +367,11 @@ struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq)
|
||||
return irq->mask;
|
||||
}
|
||||
|
||||
int mlx5_irq_get_irq(const struct mlx5_irq *irq)
|
||||
{
|
||||
return irq->map.virq;
|
||||
}
|
||||
|
||||
int mlx5_irq_get_index(struct mlx5_irq *irq)
|
||||
{
|
||||
return irq->map.index;
|
||||
@ -440,11 +445,12 @@ static void _mlx5_irq_release(struct mlx5_irq *irq)
|
||||
|
||||
/**
|
||||
* mlx5_ctrl_irq_release - release a ctrl IRQ back to the system.
|
||||
* @dev: mlx5 device that releasing the IRQ.
|
||||
* @ctrl_irq: ctrl IRQ to be released.
|
||||
*/
|
||||
void mlx5_ctrl_irq_release(struct mlx5_irq *ctrl_irq)
|
||||
void mlx5_ctrl_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *ctrl_irq)
|
||||
{
|
||||
_mlx5_irq_release(ctrl_irq);
|
||||
mlx5_irq_affinity_irq_release(dev, ctrl_irq);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -473,7 +479,7 @@ struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev)
|
||||
/* Allocate the IRQ in index 0. The vector was already allocated */
|
||||
irq = irq_pool_request_vector(pool, 0, &af_desc, NULL);
|
||||
} else {
|
||||
irq = mlx5_irq_affinity_request(pool, &af_desc);
|
||||
irq = mlx5_irq_affinity_request(dev, pool, &af_desc);
|
||||
}
|
||||
|
||||
return irq;
|
||||
|
Loading…
Reference in New Issue
Block a user