2
0
mirror of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2025-09-04 20:19:47 +08:00

net/mlx5: HWS, prevent rehash from filling up the queues

While moving the rules during rehash, CQ is not drained. The flush
and drain happens only when all the rules of a certain queue have been
moved. This behaviour can lead to accumulating large quantity of rules
that haven't got their completion yet, and eventually will fill up
the queue and will cause the rehash to fail.

Fix this problem by requiring drain once the number of outstanding
completions reaches a certain threshold.

Fixes: ef94799a87 ("net/mlx5: HWS, rework rehash loop")
Signed-off-by: Yevgeny Kliteynik <kliteyn@nvidia.com>
Reviewed-by: Vlad Dogaru <vdogaru@nvidia.com>
Signed-off-by: Mark Bloch <mbloch@nvidia.com>
Link: https://patch.msgid.link/20250817202323.308604-5-mbloch@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Yevgeny Kliteynik 2025-08-17 23:23:20 +03:00 committed by Jakub Kicinski
parent 4a842b1bf1
commit 1a72298d27

View File

@ -84,6 +84,7 @@ hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
struct list_head *rules_list; struct list_head *rules_list;
u32 pending_rules; u32 pending_rules;
int i, ret = 0; int i, ret = 0;
bool drain;
mlx5hws_bwc_rule_fill_attr(bwc_matcher, 0, 0, &rule_attr); mlx5hws_bwc_rule_fill_attr(bwc_matcher, 0, 0, &rule_attr);
@ -111,10 +112,12 @@ hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
} }
pending_rules++; pending_rules++;
drain = pending_rules >=
hws_bwc_get_burst_th(ctx, rule_attr.queue_id);
ret = mlx5hws_bwc_queue_poll(ctx, ret = mlx5hws_bwc_queue_poll(ctx,
rule_attr.queue_id, rule_attr.queue_id,
&pending_rules, &pending_rules,
false); drain);
if (unlikely(ret)) { if (unlikely(ret)) {
if (ret == -ETIMEDOUT) { if (ret == -ETIMEDOUT) {
mlx5hws_err(ctx, mlx5hws_err(ctx,