2
0
mirror of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2025-09-04 20:19:47 +08:00

idpf: stop Tx if there are insufficient buffer resources

The Tx refillq logic will cause packets to be silently dropped if there
are not enough buffer resources available to send a packet in flow
scheduling mode. Instead, determine how many buffers are needed along
with number of descriptors. Make sure there are enough of both resources
to send the packet, and stop the queue if not.

Fixes: 7292af042b ("idpf: fix a race in txq wakeup")
Signed-off-by: Joshua Hay <joshua.a.hay@intel.com>
Reviewed-by: Madhu Chittim <madhu.chittim@intel.com>
Tested-by: Samuel Salin <Samuel.salin@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
This commit is contained in:
Joshua Hay 2025-07-25 11:42:22 -07:00 committed by Tony Nguyen
parent 5f417d5513
commit 0c3f135e84
3 changed files with 47 additions and 19 deletions

View File

@ -415,11 +415,11 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
{ {
struct idpf_tx_offload_params offload = { }; struct idpf_tx_offload_params offload = { };
struct idpf_tx_buf *first; struct idpf_tx_buf *first;
u32 count, buf_count = 1;
int csum, tso, needed; int csum, tso, needed;
unsigned int count;
__be16 protocol; __be16 protocol;
count = idpf_tx_desc_count_required(tx_q, skb); count = idpf_tx_res_count_required(tx_q, skb, &buf_count);
if (unlikely(!count)) if (unlikely(!count))
return idpf_tx_drop_skb(tx_q, skb); return idpf_tx_drop_skb(tx_q, skb);

View File

@ -2191,15 +2191,22 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag); desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag);
} }
/* Global conditions to tell whether the txq (and related resources) /**
* has room to allow the use of "size" descriptors. * idpf_tx_splitq_has_room - check if enough Tx splitq resources are available
* @tx_q: the queue to be checked
* @descs_needed: number of descriptors required for this packet
* @bufs_needed: number of Tx buffers required for this packet
*
* Return: 0 if no room available, 1 otherwise
*/ */
static int idpf_txq_has_room(struct idpf_tx_queue *tx_q, u32 size) static int idpf_txq_has_room(struct idpf_tx_queue *tx_q, u32 descs_needed,
u32 bufs_needed)
{ {
if (IDPF_DESC_UNUSED(tx_q) < size || if (IDPF_DESC_UNUSED(tx_q) < descs_needed ||
IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) > IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) >
IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq) || IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq) ||
IDPF_TX_BUF_RSV_LOW(tx_q)) IDPF_TX_BUF_RSV_LOW(tx_q) ||
idpf_tx_splitq_get_free_bufs(tx_q->refillq) < bufs_needed)
return 0; return 0;
return 1; return 1;
} }
@ -2208,14 +2215,21 @@ static int idpf_txq_has_room(struct idpf_tx_queue *tx_q, u32 size)
* idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions * idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions
* @tx_q: the queue to be checked * @tx_q: the queue to be checked
* @descs_needed: number of descriptors required for this packet * @descs_needed: number of descriptors required for this packet
* @bufs_needed: number of buffers needed for this packet
* *
* Returns 0 if stop is not needed * Return: 0 if stop is not needed
*/ */
static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q, static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q,
unsigned int descs_needed) u32 descs_needed,
u32 bufs_needed)
{ {
/* Since we have multiple resources to check for splitq, our
* start,stop_thrs becomes a boolean check instead of a count
* threshold.
*/
if (netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx, if (netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx,
idpf_txq_has_room(tx_q, descs_needed), idpf_txq_has_room(tx_q, descs_needed,
bufs_needed),
1, 1)) 1, 1))
return 0; return 0;
@ -2257,14 +2271,16 @@ void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
} }
/** /**
* idpf_tx_desc_count_required - calculate number of Tx descriptors needed * idpf_tx_res_count_required - get number of Tx resources needed for this pkt
* @txq: queue to send buffer on * @txq: queue to send buffer on
* @skb: send buffer * @skb: send buffer
* @bufs_needed: (output) number of buffers needed for this skb.
* *
* Returns number of data descriptors needed for this skb. * Return: number of data descriptors and buffers needed for this skb.
*/ */
unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq, unsigned int idpf_tx_res_count_required(struct idpf_tx_queue *txq,
struct sk_buff *skb) struct sk_buff *skb,
u32 *bufs_needed)
{ {
const struct skb_shared_info *shinfo; const struct skb_shared_info *shinfo;
unsigned int count = 0, i; unsigned int count = 0, i;
@ -2275,6 +2291,7 @@ unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
return count; return count;
shinfo = skb_shinfo(skb); shinfo = skb_shinfo(skb);
*bufs_needed += shinfo->nr_frags;
for (i = 0; i < shinfo->nr_frags; i++) { for (i = 0; i < shinfo->nr_frags; i++) {
unsigned int size; unsigned int size;
@ -2892,11 +2909,11 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
}; };
union idpf_flex_tx_ctx_desc *ctx_desc; union idpf_flex_tx_ctx_desc *ctx_desc;
struct idpf_tx_buf *first; struct idpf_tx_buf *first;
unsigned int count; u32 count, buf_count = 1;
int tso, idx; int tso, idx;
u32 buf_id; u32 buf_id;
count = idpf_tx_desc_count_required(tx_q, skb); count = idpf_tx_res_count_required(tx_q, skb, &buf_count);
if (unlikely(!count)) if (unlikely(!count))
return idpf_tx_drop_skb(tx_q, skb); return idpf_tx_drop_skb(tx_q, skb);
@ -2906,7 +2923,7 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
/* Check for splitq specific TX resources */ /* Check for splitq specific TX resources */
count += (IDPF_TX_DESCS_PER_CACHE_LINE + tso); count += (IDPF_TX_DESCS_PER_CACHE_LINE + tso);
if (idpf_tx_maybe_stop_splitq(tx_q, count)) { if (idpf_tx_maybe_stop_splitq(tx_q, count, buf_count)) {
idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false); idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;

View File

@ -1026,6 +1026,17 @@ static inline void idpf_vport_intr_set_wb_on_itr(struct idpf_q_vector *q_vector)
reg->dyn_ctl); reg->dyn_ctl);
} }
/**
* idpf_tx_splitq_get_free_bufs - get number of free buf_ids in refillq
* @refillq: pointer to refillq containing buf_ids
*/
static inline u32 idpf_tx_splitq_get_free_bufs(struct idpf_sw_queue *refillq)
{
return (refillq->next_to_use > refillq->next_to_clean ?
0 : refillq->desc_count) +
refillq->next_to_use - refillq->next_to_clean - 1;
}
int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget); int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget);
void idpf_vport_init_num_qs(struct idpf_vport *vport, void idpf_vport_init_num_qs(struct idpf_vport *vport,
struct virtchnl2_create_vport *vport_msg); struct virtchnl2_create_vport *vport_msg);
@ -1053,8 +1064,8 @@ void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
bool xmit_more); bool xmit_more);
unsigned int idpf_size_to_txd_count(unsigned int size); unsigned int idpf_size_to_txd_count(unsigned int size);
netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb); netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb);
unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq, unsigned int idpf_tx_res_count_required(struct idpf_tx_queue *txq,
struct sk_buff *skb); struct sk_buff *skb, u32 *buf_count);
void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue); void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue);
netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb, netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
struct idpf_tx_queue *tx_q); struct idpf_tx_queue *tx_q);