mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-04 20:19:47 +08:00
Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue
Tony Nguyen says: ==================== Intel Wired LAN Driver Updates 2025-08-25 (ice, ixgbe) For ice: Emil adds a check to ensure auxiliary device was created before tear down to prevent NULL a pointer dereference. Jake reworks flow for failed Tx scheduler configuration to allow for proper recovery and operation. He also adjusts ice_adapter index for E825C devices as use of DSN is incompatible with this device. Michal corrects tracking of buffer allocation failure in ice_clean_rx_irq(). For ixgbe: Jedrzej adds __packed attribute to ixgbe_orom_civd_info to compatibility with device OROM data. * '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue: ixgbe: fix ixgbe_orom_civd_info struct layout ice: fix incorrect counter for buffer allocation failures ice: use fixed adapter index for E825C embedded devices ice: don't leave device non-functional if Tx scheduler config fails ice: fix NULL pointer dereference in ice_unplug_aux_dev() on reset ==================== Link: https://patch.msgid.link/20250825215019.3442873-1-anthony.l.nguyen@intel.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
aa125f08cb
@ -510,6 +510,7 @@ enum ice_pf_flags {
|
||||
ICE_FLAG_LINK_LENIENT_MODE_ENA,
|
||||
ICE_FLAG_PLUG_AUX_DEV,
|
||||
ICE_FLAG_UNPLUG_AUX_DEV,
|
||||
ICE_FLAG_AUX_DEV_CREATED,
|
||||
ICE_FLAG_MTU_CHANGED,
|
||||
ICE_FLAG_GNSS, /* GNSS successfully initialized */
|
||||
ICE_FLAG_DPLL, /* SyncE/PTP dplls initialized */
|
||||
|
@ -13,16 +13,45 @@
|
||||
static DEFINE_XARRAY(ice_adapters);
|
||||
static DEFINE_MUTEX(ice_adapters_mutex);
|
||||
|
||||
static unsigned long ice_adapter_index(u64 dsn)
|
||||
#define ICE_ADAPTER_FIXED_INDEX BIT_ULL(63)
|
||||
|
||||
#define ICE_ADAPTER_INDEX_E825C \
|
||||
(ICE_DEV_ID_E825C_BACKPLANE | ICE_ADAPTER_FIXED_INDEX)
|
||||
|
||||
static u64 ice_adapter_index(struct pci_dev *pdev)
|
||||
{
|
||||
switch (pdev->device) {
|
||||
case ICE_DEV_ID_E825C_BACKPLANE:
|
||||
case ICE_DEV_ID_E825C_QSFP:
|
||||
case ICE_DEV_ID_E825C_SFP:
|
||||
case ICE_DEV_ID_E825C_SGMII:
|
||||
/* E825C devices have multiple NACs which are connected to the
|
||||
* same clock source, and which must share the same
|
||||
* ice_adapter structure. We can't use the serial number since
|
||||
* each NAC has its own NVM generated with its own unique
|
||||
* Device Serial Number. Instead, rely on the embedded nature
|
||||
* of the E825C devices, and use a fixed index. This relies on
|
||||
* the fact that all E825C physical functions in a given
|
||||
* system are part of the same overall device.
|
||||
*/
|
||||
return ICE_ADAPTER_INDEX_E825C;
|
||||
default:
|
||||
return pci_get_dsn(pdev) & ~ICE_ADAPTER_FIXED_INDEX;
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned long ice_adapter_xa_index(struct pci_dev *pdev)
|
||||
{
|
||||
u64 index = ice_adapter_index(pdev);
|
||||
|
||||
#if BITS_PER_LONG == 64
|
||||
return dsn;
|
||||
return index;
|
||||
#else
|
||||
return (u32)dsn ^ (u32)(dsn >> 32);
|
||||
return (u32)index ^ (u32)(index >> 32);
|
||||
#endif
|
||||
}
|
||||
|
||||
static struct ice_adapter *ice_adapter_new(u64 dsn)
|
||||
static struct ice_adapter *ice_adapter_new(struct pci_dev *pdev)
|
||||
{
|
||||
struct ice_adapter *adapter;
|
||||
|
||||
@ -30,7 +59,7 @@ static struct ice_adapter *ice_adapter_new(u64 dsn)
|
||||
if (!adapter)
|
||||
return NULL;
|
||||
|
||||
adapter->device_serial_number = dsn;
|
||||
adapter->index = ice_adapter_index(pdev);
|
||||
spin_lock_init(&adapter->ptp_gltsyn_time_lock);
|
||||
spin_lock_init(&adapter->txq_ctx_lock);
|
||||
refcount_set(&adapter->refcount, 1);
|
||||
@ -64,24 +93,23 @@ static void ice_adapter_free(struct ice_adapter *adapter)
|
||||
*/
|
||||
struct ice_adapter *ice_adapter_get(struct pci_dev *pdev)
|
||||
{
|
||||
u64 dsn = pci_get_dsn(pdev);
|
||||
struct ice_adapter *adapter;
|
||||
unsigned long index;
|
||||
int err;
|
||||
|
||||
index = ice_adapter_index(dsn);
|
||||
index = ice_adapter_xa_index(pdev);
|
||||
scoped_guard(mutex, &ice_adapters_mutex) {
|
||||
err = xa_insert(&ice_adapters, index, NULL, GFP_KERNEL);
|
||||
if (err == -EBUSY) {
|
||||
adapter = xa_load(&ice_adapters, index);
|
||||
refcount_inc(&adapter->refcount);
|
||||
WARN_ON_ONCE(adapter->device_serial_number != dsn);
|
||||
WARN_ON_ONCE(adapter->index != ice_adapter_index(pdev));
|
||||
return adapter;
|
||||
}
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
|
||||
adapter = ice_adapter_new(dsn);
|
||||
adapter = ice_adapter_new(pdev);
|
||||
if (!adapter)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
xa_store(&ice_adapters, index, adapter, GFP_KERNEL);
|
||||
@ -100,11 +128,10 @@ struct ice_adapter *ice_adapter_get(struct pci_dev *pdev)
|
||||
*/
|
||||
void ice_adapter_put(struct pci_dev *pdev)
|
||||
{
|
||||
u64 dsn = pci_get_dsn(pdev);
|
||||
struct ice_adapter *adapter;
|
||||
unsigned long index;
|
||||
|
||||
index = ice_adapter_index(dsn);
|
||||
index = ice_adapter_xa_index(pdev);
|
||||
scoped_guard(mutex, &ice_adapters_mutex) {
|
||||
adapter = xa_load(&ice_adapters, index);
|
||||
if (WARN_ON(!adapter))
|
||||
|
@ -33,7 +33,7 @@ struct ice_port_list {
|
||||
* @txq_ctx_lock: Spinlock protecting access to the GLCOMM_QTX_CNTX_CTL register
|
||||
* @ctrl_pf: Control PF of the adapter
|
||||
* @ports: Ports list
|
||||
* @device_serial_number: DSN cached for collision detection on 32bit systems
|
||||
* @index: 64-bit index cached for collision detection on 32bit systems
|
||||
*/
|
||||
struct ice_adapter {
|
||||
refcount_t refcount;
|
||||
@ -44,7 +44,7 @@ struct ice_adapter {
|
||||
|
||||
struct ice_pf *ctrl_pf;
|
||||
struct ice_port_list ports;
|
||||
u64 device_serial_number;
|
||||
u64 index;
|
||||
};
|
||||
|
||||
struct ice_adapter *ice_adapter_get(struct pci_dev *pdev);
|
||||
|
@ -2377,7 +2377,13 @@ ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
|
||||
* The function will apply the new Tx topology from the package buffer
|
||||
* if available.
|
||||
*
|
||||
* Return: zero when update was successful, negative values otherwise.
|
||||
* Return:
|
||||
* * 0 - Successfully applied topology configuration.
|
||||
* * -EBUSY - Failed to acquire global configuration lock.
|
||||
* * -EEXIST - Topology configuration has already been applied.
|
||||
* * -EIO - Unable to apply topology configuration.
|
||||
* * -ENODEV - Failed to re-initialize device after applying configuration.
|
||||
* * Other negative error codes indicate unexpected failures.
|
||||
*/
|
||||
int ice_cfg_tx_topo(struct ice_hw *hw, const void *buf, u32 len)
|
||||
{
|
||||
@ -2410,7 +2416,7 @@ int ice_cfg_tx_topo(struct ice_hw *hw, const void *buf, u32 len)
|
||||
|
||||
if (status) {
|
||||
ice_debug(hw, ICE_DBG_INIT, "Get current topology is failed\n");
|
||||
return status;
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* Is default topology already applied ? */
|
||||
@ -2497,31 +2503,45 @@ update_topo:
|
||||
ICE_GLOBAL_CFG_LOCK_TIMEOUT);
|
||||
if (status) {
|
||||
ice_debug(hw, ICE_DBG_INIT, "Failed to acquire global lock\n");
|
||||
return status;
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/* Check if reset was triggered already. */
|
||||
reg = rd32(hw, GLGEN_RSTAT);
|
||||
if (reg & GLGEN_RSTAT_DEVSTATE_M) {
|
||||
/* Reset is in progress, re-init the HW again */
|
||||
ice_debug(hw, ICE_DBG_INIT, "Reset is in progress. Layer topology might be applied already\n");
|
||||
ice_check_reset(hw);
|
||||
return 0;
|
||||
/* Reset is in progress, re-init the HW again */
|
||||
goto reinit_hw;
|
||||
}
|
||||
|
||||
/* Set new topology */
|
||||
status = ice_get_set_tx_topo(hw, new_topo, size, NULL, NULL, true);
|
||||
if (status) {
|
||||
ice_debug(hw, ICE_DBG_INIT, "Failed setting Tx topology\n");
|
||||
return status;
|
||||
ice_debug(hw, ICE_DBG_INIT, "Failed to set Tx topology, status %pe\n",
|
||||
ERR_PTR(status));
|
||||
/* only report -EIO here as the caller checks the error value
|
||||
* and reports an informational error message informing that
|
||||
* the driver failed to program Tx topology.
|
||||
*/
|
||||
status = -EIO;
|
||||
}
|
||||
|
||||
/* New topology is updated, delay 1 second before issuing the CORER */
|
||||
/* Even if Tx topology config failed, we need to CORE reset here to
|
||||
* clear the global configuration lock. Delay 1 second to allow
|
||||
* hardware to settle then issue a CORER
|
||||
*/
|
||||
msleep(1000);
|
||||
ice_reset(hw, ICE_RESET_CORER);
|
||||
/* CORER will clear the global lock, so no explicit call
|
||||
* required for release.
|
||||
*/
|
||||
ice_check_reset(hw);
|
||||
|
||||
return 0;
|
||||
reinit_hw:
|
||||
/* Since we triggered a CORER, re-initialize hardware */
|
||||
ice_deinit_hw(hw);
|
||||
if (ice_init_hw(hw)) {
|
||||
ice_debug(hw, ICE_DBG_INIT, "Failed to re-init hardware after setting Tx topology\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
@ -336,6 +336,7 @@ int ice_plug_aux_dev(struct ice_pf *pf)
|
||||
mutex_lock(&pf->adev_mutex);
|
||||
cdev->adev = adev;
|
||||
mutex_unlock(&pf->adev_mutex);
|
||||
set_bit(ICE_FLAG_AUX_DEV_CREATED, pf->flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -347,15 +348,16 @@ void ice_unplug_aux_dev(struct ice_pf *pf)
|
||||
{
|
||||
struct auxiliary_device *adev;
|
||||
|
||||
if (!test_and_clear_bit(ICE_FLAG_AUX_DEV_CREATED, pf->flags))
|
||||
return;
|
||||
|
||||
mutex_lock(&pf->adev_mutex);
|
||||
adev = pf->cdev_info->adev;
|
||||
pf->cdev_info->adev = NULL;
|
||||
mutex_unlock(&pf->adev_mutex);
|
||||
|
||||
if (adev) {
|
||||
auxiliary_device_delete(adev);
|
||||
auxiliary_device_uninit(adev);
|
||||
}
|
||||
auxiliary_device_delete(adev);
|
||||
auxiliary_device_uninit(adev);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -4536,17 +4536,23 @@ ice_init_tx_topology(struct ice_hw *hw, const struct firmware *firmware)
|
||||
dev_info(dev, "Tx scheduling layers switching feature disabled\n");
|
||||
else
|
||||
dev_info(dev, "Tx scheduling layers switching feature enabled\n");
|
||||
/* if there was a change in topology ice_cfg_tx_topo triggered
|
||||
* a CORER and we need to re-init hw
|
||||
return 0;
|
||||
} else if (err == -ENODEV) {
|
||||
/* If we failed to re-initialize the device, we can no longer
|
||||
* continue loading.
|
||||
*/
|
||||
ice_deinit_hw(hw);
|
||||
err = ice_init_hw(hw);
|
||||
|
||||
dev_warn(dev, "Failed to initialize hardware after applying Tx scheduling configuration.\n");
|
||||
return err;
|
||||
} else if (err == -EIO) {
|
||||
dev_info(dev, "DDP package does not support Tx scheduling layers switching feature - please update to the latest DDP package and try again\n");
|
||||
return 0;
|
||||
} else if (err == -EEXIST) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Do not treat this as a fatal error. */
|
||||
dev_info(dev, "Failed to apply Tx scheduling configuration, err %pe\n",
|
||||
ERR_PTR(err));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1352,7 +1352,7 @@ construct_skb:
|
||||
skb = ice_construct_skb(rx_ring, xdp);
|
||||
/* exit if we failed to retrieve a buffer */
|
||||
if (!skb) {
|
||||
rx_ring->ring_stats->rx_stats.alloc_page_failed++;
|
||||
rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
|
||||
xdp_verdict = ICE_XDP_CONSUMED;
|
||||
}
|
||||
ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict);
|
||||
|
@ -3125,7 +3125,7 @@ static int ixgbe_get_orom_ver_info(struct ixgbe_hw *hw,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
combo_ver = le32_to_cpu(civd.combo_ver);
|
||||
combo_ver = get_unaligned_le32(&civd.combo_ver);
|
||||
|
||||
orom->major = (u8)FIELD_GET(IXGBE_OROM_VER_MASK, combo_ver);
|
||||
orom->patch = (u8)FIELD_GET(IXGBE_OROM_VER_PATCH_MASK, combo_ver);
|
||||
|
@ -932,7 +932,7 @@ struct ixgbe_orom_civd_info {
|
||||
__le32 combo_ver; /* Combo Image Version number */
|
||||
u8 combo_name_len; /* Length of the unicode combo image version string, max of 32 */
|
||||
__le16 combo_name[32]; /* Unicode string representing the Combo Image version */
|
||||
};
|
||||
} __packed;
|
||||
|
||||
/* Function specific capabilities */
|
||||
struct ixgbe_hw_func_caps {
|
||||
|
Loading…
Reference in New Issue
Block a user