Merge branch 'net-ethtool-introduce-ethnl-dump-helpers'

Maxime Chevallier says:

====================
net: ethtool: Introduce ethnl dump helpers

This is V8 for per-phy DUMP helpers, improving support for ->dumpit()
operations for PHY targetting commands.

This V8 fixes some issues spotted by Jakub (thanks !) on the multi-part
DUMP sequence. The netdev reftracking was reworked to make sure that
during a filtered DUMP, we only keep a ref on the netdev during
individual .dumpit() calls.

v1: https://lore.kernel.org/20250305141938.319282-1-maxime.chevallier@bootlin.com
v2: https://lore.kernel.org/20250308155440.267782-1-maxime.chevallier@bootlin.com
v3: https://lore.kernel.org/20250313182647.250007-1-maxime.chevallier@bootlin.com
v4: https://lore.kernel.org/20250324104012.367366-1-maxime.chevallier@bootlin.com
v5: https://lore.kernel.org/20250410123350.174105-1-maxime.chevallier@bootlin.com
v6: https://lore.kernel.org/20250415085155.132963-1-maxime.chevallier@bootlin.com
v7: https://lore.kernel.org/20250422161717.164440-1-maxime.chevallier@bootlin.com
====================

Link: https://patch.msgid.link/20250502085242.248645-1-maxime.chevallier@bootlin.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski
2025-05-05 17:17:43 -07:00
3 changed files with 305 additions and 272 deletions

View File

@@ -357,6 +357,18 @@ struct ethnl_dump_ctx {
unsigned long pos_ifindex;
};
/**
* struct ethnl_perphy_dump_ctx - context for dumpit() PHY-aware callbacks
* @ethnl_ctx: generic ethnl context
* @ifindex: For Filtered DUMP requests, the ifindex of the targeted netdev
* @pos_phyindex: iterator position for multi-msg DUMP
*/
struct ethnl_perphy_dump_ctx {
struct ethnl_dump_ctx ethnl_ctx;
unsigned int ifindex;
unsigned long pos_phyindex;
};
static const struct ethnl_request_ops *
ethnl_default_requests[__ETHTOOL_MSG_USER_CNT] = {
[ETHTOOL_MSG_STRSET_GET] = &ethnl_strset_request_ops,
@@ -400,6 +412,7 @@ ethnl_default_requests[__ETHTOOL_MSG_USER_CNT] = {
[ETHTOOL_MSG_MM_SET] = &ethnl_mm_request_ops,
[ETHTOOL_MSG_TSCONFIG_GET] = &ethnl_tsconfig_request_ops,
[ETHTOOL_MSG_TSCONFIG_SET] = &ethnl_tsconfig_request_ops,
[ETHTOOL_MSG_PHY_GET] = &ethnl_phy_request_ops,
};
static struct ethnl_dump_ctx *ethnl_dump_context(struct netlink_callback *cb)
@@ -407,6 +420,12 @@ static struct ethnl_dump_ctx *ethnl_dump_context(struct netlink_callback *cb)
return (struct ethnl_dump_ctx *)cb->ctx;
}
static struct ethnl_perphy_dump_ctx *
ethnl_perphy_dump_context(struct netlink_callback *cb)
{
return (struct ethnl_perphy_dump_ctx *)cb->ctx;
}
/**
* ethnl_default_parse() - Parse request message
* @req_info: pointer to structure to put data into
@@ -584,18 +603,19 @@ static int ethnl_default_dumpit(struct sk_buff *skb,
{
struct ethnl_dump_ctx *ctx = ethnl_dump_context(cb);
struct net *net = sock_net(skb->sk);
netdevice_tracker dev_tracker;
struct net_device *dev;
int ret = 0;
rcu_read_lock();
for_each_netdev_dump(net, dev, ctx->pos_ifindex) {
dev_hold(dev);
netdev_hold(dev, &dev_tracker, GFP_ATOMIC);
rcu_read_unlock();
ret = ethnl_default_dump_one(skb, dev, ctx, genl_info_dump(cb));
rcu_read_lock();
dev_put(dev);
netdev_put(dev, &dev_tracker);
if (ret < 0 && ret != -EOPNOTSUPP) {
if (likely(skb->len))
@@ -662,6 +682,173 @@ free_req_info:
return ret;
}
/* per-PHY ->start() handler for GET requests */
static int ethnl_perphy_start(struct netlink_callback *cb)
{
struct ethnl_perphy_dump_ctx *phy_ctx = ethnl_perphy_dump_context(cb);
const struct genl_dumpit_info *info = genl_dumpit_info(cb);
struct ethnl_dump_ctx *ctx = &phy_ctx->ethnl_ctx;
struct ethnl_reply_data *reply_data;
const struct ethnl_request_ops *ops;
struct ethnl_req_info *req_info;
struct genlmsghdr *ghdr;
int ret;
BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
ghdr = nlmsg_data(cb->nlh);
ops = ethnl_default_requests[ghdr->cmd];
if (WARN_ONCE(!ops, "cmd %u has no ethnl_request_ops\n", ghdr->cmd))
return -EOPNOTSUPP;
req_info = kzalloc(ops->req_info_size, GFP_KERNEL);
if (!req_info)
return -ENOMEM;
reply_data = kmalloc(ops->reply_data_size, GFP_KERNEL);
if (!reply_data) {
ret = -ENOMEM;
goto free_req_info;
}
/* Unlike per-dev dump, don't ignore dev. The dump handler
* will notice it and dump PHYs from given dev. We only keep track of
* the dev's ifindex, .dumpit() will grab and release the netdev itself.
*/
ret = ethnl_default_parse(req_info, &info->info, ops, false);
if (req_info->dev) {
phy_ctx->ifindex = req_info->dev->ifindex;
netdev_put(req_info->dev, &req_info->dev_tracker);
req_info->dev = NULL;
}
if (ret < 0)
goto free_reply_data;
ctx->ops = ops;
ctx->req_info = req_info;
ctx->reply_data = reply_data;
ctx->pos_ifindex = 0;
return 0;
free_reply_data:
kfree(reply_data);
free_req_info:
kfree(req_info);
return ret;
}
static int ethnl_perphy_dump_one_dev(struct sk_buff *skb,
struct ethnl_perphy_dump_ctx *ctx,
const struct genl_info *info)
{
struct ethnl_dump_ctx *ethnl_ctx = &ctx->ethnl_ctx;
struct net_device *dev = ethnl_ctx->req_info->dev;
struct phy_device_node *pdn;
int ret;
if (!dev->link_topo)
return 0;
xa_for_each_start(&dev->link_topo->phys, ctx->pos_phyindex, pdn,
ctx->pos_phyindex) {
ethnl_ctx->req_info->phy_index = ctx->pos_phyindex;
/* We can re-use the original dump_one as ->prepare_data in
* commands use ethnl_req_get_phydev(), which gets the PHY from
* the req_info->phy_index
*/
ret = ethnl_default_dump_one(skb, dev, ethnl_ctx, info);
if (ret)
return ret;
}
ctx->pos_phyindex = 0;
return 0;
}
static int ethnl_perphy_dump_all_dev(struct sk_buff *skb,
struct ethnl_perphy_dump_ctx *ctx,
const struct genl_info *info)
{
struct ethnl_dump_ctx *ethnl_ctx = &ctx->ethnl_ctx;
struct net *net = sock_net(skb->sk);
netdevice_tracker dev_tracker;
struct net_device *dev;
int ret = 0;
rcu_read_lock();
for_each_netdev_dump(net, dev, ethnl_ctx->pos_ifindex) {
netdev_hold(dev, &dev_tracker, GFP_ATOMIC);
rcu_read_unlock();
/* per-PHY commands use ethnl_req_get_phydev(), which needs the
* net_device in the req_info
*/
ethnl_ctx->req_info->dev = dev;
ret = ethnl_perphy_dump_one_dev(skb, ctx, info);
rcu_read_lock();
netdev_put(dev, &dev_tracker);
ethnl_ctx->req_info->dev = NULL;
if (ret < 0 && ret != -EOPNOTSUPP) {
if (likely(skb->len))
ret = skb->len;
break;
}
ret = 0;
}
rcu_read_unlock();
return ret;
}
/* per-PHY ->dumpit() handler for GET requests. */
static int ethnl_perphy_dumpit(struct sk_buff *skb,
struct netlink_callback *cb)
{
struct ethnl_perphy_dump_ctx *ctx = ethnl_perphy_dump_context(cb);
const struct genl_dumpit_info *info = genl_dumpit_info(cb);
struct ethnl_dump_ctx *ethnl_ctx = &ctx->ethnl_ctx;
int ret = 0;
if (ctx->ifindex) {
netdevice_tracker dev_tracker;
struct net_device *dev;
dev = netdev_get_by_index(genl_info_net(&info->info),
ctx->ifindex, &dev_tracker,
GFP_KERNEL);
if (!dev)
return -ENODEV;
ethnl_ctx->req_info->dev = dev;
ret = ethnl_perphy_dump_one_dev(skb, ctx, genl_info_dump(cb));
if (ret < 0 && ret != -EOPNOTSUPP && likely(skb->len))
ret = skb->len;
netdev_put(dev, &dev_tracker);
} else {
ret = ethnl_perphy_dump_all_dev(skb, ctx, genl_info_dump(cb));
}
return ret;
}
/* per-PHY ->done() handler for GET requests */
static int ethnl_perphy_done(struct netlink_callback *cb)
{
struct ethnl_perphy_dump_ctx *ctx = ethnl_perphy_dump_context(cb);
struct ethnl_dump_ctx *ethnl_ctx = &ctx->ethnl_ctx;
kfree(ethnl_ctx->reply_data);
kfree(ethnl_ctx->req_info);
return 0;
}
/* default ->done() handler for GET requests */
static int ethnl_default_done(struct netlink_callback *cb)
{
@@ -1200,9 +1387,9 @@ static const struct genl_ops ethtool_genl_ops[] = {
{
.cmd = ETHTOOL_MSG_PSE_GET,
.doit = ethnl_default_doit,
.start = ethnl_default_start,
.dumpit = ethnl_default_dumpit,
.done = ethnl_default_done,
.start = ethnl_perphy_start,
.dumpit = ethnl_perphy_dumpit,
.done = ethnl_perphy_done,
.policy = ethnl_pse_get_policy,
.maxattr = ARRAY_SIZE(ethnl_pse_get_policy) - 1,
},
@@ -1224,9 +1411,9 @@ static const struct genl_ops ethtool_genl_ops[] = {
{
.cmd = ETHTOOL_MSG_PLCA_GET_CFG,
.doit = ethnl_default_doit,
.start = ethnl_default_start,
.dumpit = ethnl_default_dumpit,
.done = ethnl_default_done,
.start = ethnl_perphy_start,
.dumpit = ethnl_perphy_dumpit,
.done = ethnl_perphy_done,
.policy = ethnl_plca_get_cfg_policy,
.maxattr = ARRAY_SIZE(ethnl_plca_get_cfg_policy) - 1,
},
@@ -1240,9 +1427,9 @@ static const struct genl_ops ethtool_genl_ops[] = {
{
.cmd = ETHTOOL_MSG_PLCA_GET_STATUS,
.doit = ethnl_default_doit,
.start = ethnl_default_start,
.dumpit = ethnl_default_dumpit,
.done = ethnl_default_done,
.start = ethnl_perphy_start,
.dumpit = ethnl_perphy_dumpit,
.done = ethnl_perphy_done,
.policy = ethnl_plca_get_status_policy,
.maxattr = ARRAY_SIZE(ethnl_plca_get_status_policy) - 1,
},
@@ -1271,10 +1458,10 @@ static const struct genl_ops ethtool_genl_ops[] = {
},
{
.cmd = ETHTOOL_MSG_PHY_GET,
.doit = ethnl_phy_doit,
.start = ethnl_phy_start,
.dumpit = ethnl_phy_dumpit,
.done = ethnl_phy_done,
.doit = ethnl_default_doit,
.start = ethnl_perphy_start,
.dumpit = ethnl_perphy_dumpit,
.done = ethnl_perphy_done,
.policy = ethnl_phy_get_policy,
.maxattr = ARRAY_SIZE(ethnl_phy_get_policy) - 1,
},

View File

@@ -499,10 +499,6 @@ int ethnl_tunnel_info_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
int ethnl_act_module_fw_flash(struct sk_buff *skb, struct genl_info *info);
int ethnl_rss_dump_start(struct netlink_callback *cb);
int ethnl_rss_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
int ethnl_phy_start(struct netlink_callback *cb);
int ethnl_phy_doit(struct sk_buff *skb, struct genl_info *info);
int ethnl_phy_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
int ethnl_phy_done(struct netlink_callback *cb);
int ethnl_tsinfo_start(struct netlink_callback *cb);
int ethnl_tsinfo_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
int ethnl_tsinfo_done(struct netlink_callback *cb);

View File

@@ -12,304 +12,154 @@
#include <net/netdev_lock.h>
struct phy_req_info {
struct ethnl_req_info base;
struct phy_device_node *pdn;
struct ethnl_req_info base;
};
#define PHY_REQINFO(__req_base) \
container_of(__req_base, struct phy_req_info, base)
struct phy_reply_data {
struct ethnl_reply_data base;
u32 phyindex;
char *drvname;
char *name;
unsigned int upstream_type;
char *upstream_sfp_name;
unsigned int upstream_index;
char *downstream_sfp_name;
};
#define PHY_REPDATA(__reply_base) \
container_of(__reply_base, struct phy_reply_data, base)
const struct nla_policy ethnl_phy_get_policy[ETHTOOL_A_PHY_HEADER + 1] = {
[ETHTOOL_A_PHY_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy),
};
/* Caller holds rtnl */
static ssize_t
ethnl_phy_reply_size(const struct ethnl_req_info *req_base,
struct netlink_ext_ack *extack)
static int phy_reply_size(const struct ethnl_req_info *req_info,
const struct ethnl_reply_data *reply_data)
{
struct phy_req_info *req_info = PHY_REQINFO(req_base);
struct phy_device_node *pdn = req_info->pdn;
struct phy_device *phydev = pdn->phy;
struct phy_reply_data *rep_data = PHY_REPDATA(reply_data);
size_t size = 0;
ASSERT_RTNL();
/* ETHTOOL_A_PHY_INDEX */
size += nla_total_size(sizeof(u32));
/* ETHTOOL_A_DRVNAME */
if (phydev->drv)
size += nla_total_size(strlen(phydev->drv->name) + 1);
if (rep_data->drvname)
size += nla_total_size(strlen(rep_data->drvname) + 1);
/* ETHTOOL_A_NAME */
size += nla_total_size(strlen(dev_name(&phydev->mdio.dev)) + 1);
size += nla_total_size(strlen(rep_data->name) + 1);
/* ETHTOOL_A_PHY_UPSTREAM_TYPE */
size += nla_total_size(sizeof(u32));
if (phy_on_sfp(phydev)) {
const char *upstream_sfp_name = sfp_get_name(pdn->parent_sfp_bus);
/* ETHTOOL_A_PHY_UPSTREAM_SFP_NAME */
if (rep_data->upstream_sfp_name)
size += nla_total_size(strlen(rep_data->upstream_sfp_name) + 1);
/* ETHTOOL_A_PHY_UPSTREAM_SFP_NAME */
if (upstream_sfp_name)
size += nla_total_size(strlen(upstream_sfp_name) + 1);
/* ETHTOOL_A_PHY_UPSTREAM_INDEX */
/* ETHTOOL_A_PHY_UPSTREAM_INDEX */
if (rep_data->upstream_index)
size += nla_total_size(sizeof(u32));
}
/* ETHTOOL_A_PHY_DOWNSTREAM_SFP_NAME */
if (phydev->sfp_bus) {
const char *sfp_name = sfp_get_name(phydev->sfp_bus);
if (sfp_name)
size += nla_total_size(strlen(sfp_name) + 1);
}
if (rep_data->downstream_sfp_name)
size += nla_total_size(strlen(rep_data->downstream_sfp_name) + 1);
return size;
}
static int
ethnl_phy_fill_reply(const struct ethnl_req_info *req_base, struct sk_buff *skb)
static int phy_prepare_data(const struct ethnl_req_info *req_info,
struct ethnl_reply_data *reply_data,
const struct genl_info *info)
{
struct phy_req_info *req_info = PHY_REQINFO(req_base);
struct phy_device_node *pdn = req_info->pdn;
struct phy_device *phydev = pdn->phy;
enum phy_upstream ptype;
ptype = pdn->upstream_type;
if (nla_put_u32(skb, ETHTOOL_A_PHY_INDEX, phydev->phyindex) ||
nla_put_string(skb, ETHTOOL_A_PHY_NAME, dev_name(&phydev->mdio.dev)) ||
nla_put_u32(skb, ETHTOOL_A_PHY_UPSTREAM_TYPE, ptype))
return -EMSGSIZE;
if (phydev->drv &&
nla_put_string(skb, ETHTOOL_A_PHY_DRVNAME, phydev->drv->name))
return -EMSGSIZE;
if (ptype == PHY_UPSTREAM_PHY) {
struct phy_device *upstream = pdn->upstream.phydev;
const char *sfp_upstream_name;
/* Parent index */
if (nla_put_u32(skb, ETHTOOL_A_PHY_UPSTREAM_INDEX, upstream->phyindex))
return -EMSGSIZE;
if (pdn->parent_sfp_bus) {
sfp_upstream_name = sfp_get_name(pdn->parent_sfp_bus);
if (sfp_upstream_name &&
nla_put_string(skb, ETHTOOL_A_PHY_UPSTREAM_SFP_NAME,
sfp_upstream_name))
return -EMSGSIZE;
}
}
if (phydev->sfp_bus) {
const char *sfp_name = sfp_get_name(phydev->sfp_bus);
if (sfp_name &&
nla_put_string(skb, ETHTOOL_A_PHY_DOWNSTREAM_SFP_NAME,
sfp_name))
return -EMSGSIZE;
}
return 0;
}
static int ethnl_phy_parse_request(struct ethnl_req_info *req_base,
struct nlattr **tb,
struct netlink_ext_ack *extack)
{
struct phy_link_topology *topo = req_base->dev->link_topo;
struct phy_req_info *req_info = PHY_REQINFO(req_base);
struct phy_link_topology *topo = reply_data->dev->link_topo;
struct phy_reply_data *rep_data = PHY_REPDATA(reply_data);
struct nlattr **tb = info->attrs;
struct phy_device_node *pdn;
struct phy_device *phydev;
phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_PHY_HEADER,
extack);
if (!phydev)
return 0;
/* RTNL is held by the caller */
phydev = ethnl_req_get_phydev(req_info, tb, ETHTOOL_A_PHY_HEADER,
info->extack);
if (IS_ERR_OR_NULL(phydev))
return -EOPNOTSUPP;
if (IS_ERR(phydev))
return PTR_ERR(phydev);
pdn = xa_load(&topo->phys, phydev->phyindex);
if (!pdn)
return -EOPNOTSUPP;
if (!topo)
return 0;
rep_data->phyindex = phydev->phyindex;
rep_data->name = kstrdup(dev_name(&phydev->mdio.dev), GFP_KERNEL);
rep_data->drvname = kstrdup(phydev->drv->name, GFP_KERNEL);
rep_data->upstream_type = pdn->upstream_type;
req_info->pdn = xa_load(&topo->phys, phydev->phyindex);
if (pdn->upstream_type == PHY_UPSTREAM_PHY) {
struct phy_device *upstream = pdn->upstream.phydev;
rep_data->upstream_index = upstream->phyindex;
}
if (pdn->parent_sfp_bus)
rep_data->upstream_sfp_name = kstrdup(sfp_get_name(pdn->parent_sfp_bus),
GFP_KERNEL);
if (phydev->sfp_bus)
rep_data->downstream_sfp_name = kstrdup(sfp_get_name(phydev->sfp_bus),
GFP_KERNEL);
return 0;
}
int ethnl_phy_doit(struct sk_buff *skb, struct genl_info *info)
static int phy_fill_reply(struct sk_buff *skb,
const struct ethnl_req_info *req_info,
const struct ethnl_reply_data *reply_data)
{
struct phy_req_info req_info = {};
struct nlattr **tb = info->attrs;
struct sk_buff *rskb;
void *reply_payload;
int reply_len;
int ret;
struct phy_reply_data *rep_data = PHY_REPDATA(reply_data);
ret = ethnl_parse_header_dev_get(&req_info.base,
tb[ETHTOOL_A_PHY_HEADER],
genl_info_net(info), info->extack,
true);
if (ret < 0)
return ret;
if (nla_put_u32(skb, ETHTOOL_A_PHY_INDEX, rep_data->phyindex) ||
nla_put_string(skb, ETHTOOL_A_PHY_NAME, rep_data->name) ||
nla_put_u32(skb, ETHTOOL_A_PHY_UPSTREAM_TYPE, rep_data->upstream_type))
return -EMSGSIZE;
rtnl_lock();
netdev_lock_ops(req_info.base.dev);
if (rep_data->drvname &&
nla_put_string(skb, ETHTOOL_A_PHY_DRVNAME, rep_data->drvname))
return -EMSGSIZE;
ret = ethnl_phy_parse_request(&req_info.base, tb, info->extack);
if (ret < 0)
goto err_unlock;
if (rep_data->upstream_index &&
nla_put_u32(skb, ETHTOOL_A_PHY_UPSTREAM_INDEX,
rep_data->upstream_index))
return -EMSGSIZE;
/* No PHY, return early */
if (!req_info.pdn)
goto err_unlock;
if (rep_data->upstream_sfp_name &&
nla_put_string(skb, ETHTOOL_A_PHY_UPSTREAM_SFP_NAME,
rep_data->upstream_sfp_name))
return -EMSGSIZE;
ret = ethnl_phy_reply_size(&req_info.base, info->extack);
if (ret < 0)
goto err_unlock;
reply_len = ret + ethnl_reply_header_size();
if (rep_data->downstream_sfp_name &&
nla_put_string(skb, ETHTOOL_A_PHY_DOWNSTREAM_SFP_NAME,
rep_data->downstream_sfp_name))
return -EMSGSIZE;
rskb = ethnl_reply_init(reply_len, req_info.base.dev,
ETHTOOL_MSG_PHY_GET_REPLY,
ETHTOOL_A_PHY_HEADER,
info, &reply_payload);
if (!rskb) {
ret = -ENOMEM;
goto err_unlock;
}
ret = ethnl_phy_fill_reply(&req_info.base, rskb);
if (ret)
goto err_free_msg;
netdev_unlock_ops(req_info.base.dev);
rtnl_unlock();
ethnl_parse_header_dev_put(&req_info.base);
genlmsg_end(rskb, reply_payload);
return genlmsg_reply(rskb, info);
err_free_msg:
nlmsg_free(rskb);
err_unlock:
netdev_unlock_ops(req_info.base.dev);
rtnl_unlock();
ethnl_parse_header_dev_put(&req_info.base);
return ret;
return 0;
}
struct ethnl_phy_dump_ctx {
struct phy_req_info *phy_req_info;
unsigned long ifindex;
unsigned long phy_index;
static void phy_cleanup_data(struct ethnl_reply_data *reply_data)
{
struct phy_reply_data *rep_data = PHY_REPDATA(reply_data);
kfree(rep_data->drvname);
kfree(rep_data->name);
kfree(rep_data->upstream_sfp_name);
kfree(rep_data->downstream_sfp_name);
}
const struct ethnl_request_ops ethnl_phy_request_ops = {
.request_cmd = ETHTOOL_MSG_PHY_GET,
.reply_cmd = ETHTOOL_MSG_PHY_GET_REPLY,
.hdr_attr = ETHTOOL_A_PHY_HEADER,
.req_info_size = sizeof(struct phy_req_info),
.reply_data_size = sizeof(struct phy_reply_data),
.prepare_data = phy_prepare_data,
.reply_size = phy_reply_size,
.fill_reply = phy_fill_reply,
.cleanup_data = phy_cleanup_data,
};
int ethnl_phy_start(struct netlink_callback *cb)
{
const struct genl_info *info = genl_info_dump(cb);
struct ethnl_phy_dump_ctx *ctx = (void *)cb->ctx;
int ret;
BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
ctx->phy_req_info = kzalloc(sizeof(*ctx->phy_req_info), GFP_KERNEL);
if (!ctx->phy_req_info)
return -ENOMEM;
ret = ethnl_parse_header_dev_get(&ctx->phy_req_info->base,
info->attrs[ETHTOOL_A_PHY_HEADER],
sock_net(cb->skb->sk), cb->extack,
false);
ctx->ifindex = 0;
ctx->phy_index = 0;
if (ret)
kfree(ctx->phy_req_info);
return ret;
}
int ethnl_phy_done(struct netlink_callback *cb)
{
struct ethnl_phy_dump_ctx *ctx = (void *)cb->ctx;
if (ctx->phy_req_info->base.dev)
ethnl_parse_header_dev_put(&ctx->phy_req_info->base);
kfree(ctx->phy_req_info);
return 0;
}
static int ethnl_phy_dump_one_dev(struct sk_buff *skb, struct net_device *dev,
struct netlink_callback *cb)
{
struct ethnl_phy_dump_ctx *ctx = (void *)cb->ctx;
struct phy_req_info *pri = ctx->phy_req_info;
struct phy_device_node *pdn;
int ret = 0;
void *ehdr;
if (!dev->link_topo)
return 0;
xa_for_each_start(&dev->link_topo->phys, ctx->phy_index, pdn, ctx->phy_index) {
ehdr = ethnl_dump_put(skb, cb, ETHTOOL_MSG_PHY_GET_REPLY);
if (!ehdr) {
ret = -EMSGSIZE;
break;
}
ret = ethnl_fill_reply_header(skb, dev, ETHTOOL_A_PHY_HEADER);
if (ret < 0) {
genlmsg_cancel(skb, ehdr);
break;
}
pri->pdn = pdn;
ret = ethnl_phy_fill_reply(&pri->base, skb);
if (ret < 0) {
genlmsg_cancel(skb, ehdr);
break;
}
genlmsg_end(skb, ehdr);
}
return ret;
}
int ethnl_phy_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
{
struct ethnl_phy_dump_ctx *ctx = (void *)cb->ctx;
struct net *net = sock_net(skb->sk);
struct net_device *dev;
int ret = 0;
rtnl_lock();
if (ctx->phy_req_info->base.dev) {
dev = ctx->phy_req_info->base.dev;
netdev_lock_ops(dev);
ret = ethnl_phy_dump_one_dev(skb, dev, cb);
netdev_unlock_ops(dev);
} else {
for_each_netdev_dump(net, dev, ctx->ifindex) {
netdev_lock_ops(dev);
ret = ethnl_phy_dump_one_dev(skb, dev, cb);
netdev_unlock_ops(dev);
if (ret)
break;
ctx->phy_index = 0;
}
}
rtnl_unlock();
return ret;
}