mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-04 20:19:47 +08:00
Including fixes from Bluetooth.
Current release - new code bugs: - eth: txgbe: fix the issue of TX failure - eth: ngbe: specify IRQ vector when the number of VFs is 7 Previous releases - regressions: - sched: always pass notifications when child class becomes empty - ipv4: fix stat increase when udp early demux drops the packet - bluetooth: prevent unintended pause by checking if advertising is active - virtio: fix error reporting in virtqueue_resize - eth: virtio-net: - ensure the received length does not exceed allocated size - fix the xsk frame's length check - eth: lan78xx: fix WARN in __netif_napi_del_locked on disconnect Previous releases - always broken: - bluetooth: mesh: check instances prior disabling advertising - eth: idpf: convert control queue mutex to a spinlock - eth: dpaa2: fix xdp_rxq_info leak - eth: amd-xgbe: align CL37 AN sequence as per databook Signed-off-by: Paolo Abeni <pabeni@redhat.com> -----BEGIN PGP SIGNATURE----- iQJGBAABCAAwFiEEg1AjqC77wbdLX2LbKSR5jcyPE6QFAmhmfzQSHHBhYmVuaUBy ZWRoYXQuY29tAAoJECkkeY3MjxOk/GMP/ixlapKjTP/ggGIFO0nEDTm1tAFnhQl3 bBuwBDoGPjalb46WBO24SFSFYqvZwV6ZIYxCxCeBfmkPyEun0FBX6xjqUIZqohTZ u5ZSmKFkODMoxQWAG0hXBGvfeKg/GBMWJT761o5IB2XvknRlqHq6uufUBcalvlJK t58ykSYp2wjfowXSRQ4jEZnr4HZzVuvarhbCB9hJWv206fdk4LiC07teHB1VhW4w LYmBQChp8SXDFCCYZajum0cNCzx78q90lGzz+MEErVXdXXnRVeqRAUY+k4Vd/Fz+ 0OY1vZJ7xgFpy2ns3Z6TH8D41P9whBI8jUYXZ5nA45J8N5wdEQo8oVHlRe9a6Y/E 0oC+DPahhSQAq8BKGFtYSyyURGJvd4+TpQP/LV4e83myReW8i0ZKtyXVgH0Cibwb 529l6wIXBAcLK03tyYwmoCI2VjJbRoMV3nMCeiACCtDExK1YCa3dhjQ82fa8voLc MIn7zXAGf12IKca39ZapRrdaooaqvSG4htxTn94vEqScNu0wi1cymvG47h9bDrES cPyS4/MIUH0sduSDVL5PpFYfIDhqS3mpc0e8Nc3pOy7VLQ9kvtBX37OaO/tX5aeh SWU+8q8y1Cnq0+mcUUHpENFMOgZEC5UO6rdeaJB3Nu0vlHlDEZoEkUXSkHEfsf2F aodwE/oPyQCg =O7OS -----END PGP SIGNATURE----- Merge tag 'net-6.16-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net Pull networking fixes from Paolo Abeni: "Including fixes from Bluetooth. Current release - new code bugs: - eth: - txgbe: fix the issue of TX failure - ngbe: specify IRQ vector when the number of VFs is 7 Previous releases - regressions: - sched: always pass notifications when child class becomes empty - ipv4: fix stat increase when udp early demux drops the packet - bluetooth: prevent unintended pause by checking if advertising is active - virtio: fix error reporting in virtqueue_resize - eth: - virtio-net: - ensure the received length does not exceed allocated size - fix the xsk frame's length check - lan78xx: fix WARN in __netif_napi_del_locked on disconnect Previous releases - always broken: - bluetooth: mesh: check instances prior disabling advertising - eth: - idpf: convert control queue mutex to a spinlock - dpaa2: fix xdp_rxq_info leak - amd-xgbe: align CL37 AN sequence as per databook" * tag 'net-6.16-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (38 commits) vsock/vmci: Clear the vmci transport packet properly when initializing it dt-bindings: net: sophgo,sg2044-dwmac: Drop status from the example net: ngbe: specify IRQ vector when the number of VFs is 7 net: wangxun: revert the adjustment of the IRQ vector sequence net: txgbe: request MISC IRQ in ndo_open virtio_net: Enforce minimum TX ring size for reliability virtio_net: Cleanup '2+MAX_SKB_FRAGS' virtio_ring: Fix error reporting in virtqueue_resize virtio-net: xsk: rx: fix the frame's length check virtio-net: use the check_mergeable_len helper virtio-net: remove redundant truesize check with PAGE_SIZE virtio-net: ensure the received length does not exceed allocated size net: ipv4: fix stat increase when udp early demux drops the packet net: libwx: fix the incorrect display of the queue number amd-xgbe: do not double read link status net/sched: Always pass notifications when child class becomes empty nui: Fix dma_mapping_error() check rose: fix dangling neighbour pointers in rose_rt_device_down() enic: fix incorrect MTU comparison in enic_change_mtu() amd-xgbe: align CL37 AN sequence as per databook ...
This commit is contained in:
commit
17bbde2e17
@ -80,6 +80,8 @@ examples:
|
|||||||
interrupt-parent = <&intc>;
|
interrupt-parent = <&intc>;
|
||||||
interrupts = <296 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <296 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
interrupt-names = "macirq";
|
interrupt-names = "macirq";
|
||||||
|
phy-handle = <&phy0>;
|
||||||
|
phy-mode = "rgmii-id";
|
||||||
resets = <&rst 30>;
|
resets = <&rst 30>;
|
||||||
reset-names = "stmmaceth";
|
reset-names = "stmmaceth";
|
||||||
snps,multicast-filter-bins = <0>;
|
snps,multicast-filter-bins = <0>;
|
||||||
@ -91,7 +93,6 @@ examples:
|
|||||||
snps,mtl-rx-config = <&gmac0_mtl_rx_setup>;
|
snps,mtl-rx-config = <&gmac0_mtl_rx_setup>;
|
||||||
snps,mtl-tx-config = <&gmac0_mtl_tx_setup>;
|
snps,mtl-tx-config = <&gmac0_mtl_tx_setup>;
|
||||||
snps,axi-config = <&gmac0_stmmac_axi_setup>;
|
snps,axi-config = <&gmac0_stmmac_axi_setup>;
|
||||||
status = "disabled";
|
|
||||||
|
|
||||||
gmac0_mtl_rx_setup: rx-queues-config {
|
gmac0_mtl_rx_setup: rx-queues-config {
|
||||||
snps,rx-queues-to-use = <8>;
|
snps,rx-queues-to-use = <8>;
|
||||||
|
@ -16,11 +16,13 @@ User interface
|
|||||||
Creating a TLS connection
|
Creating a TLS connection
|
||||||
-------------------------
|
-------------------------
|
||||||
|
|
||||||
First create a new TCP socket and set the TLS ULP.
|
First create a new TCP socket and once the connection is established set the
|
||||||
|
TLS ULP.
|
||||||
|
|
||||||
.. code-block:: c
|
.. code-block:: c
|
||||||
|
|
||||||
sock = socket(AF_INET, SOCK_STREAM, 0);
|
sock = socket(AF_INET, SOCK_STREAM, 0);
|
||||||
|
connect(sock, addr, addrlen);
|
||||||
setsockopt(sock, SOL_TCP, TCP_ULP, "tls", sizeof("tls"));
|
setsockopt(sock, SOL_TCP, TCP_ULP, "tls", sizeof("tls"));
|
||||||
|
|
||||||
Setting the TLS ULP allows us to set/get TLS socket options. Currently
|
Setting the TLS ULP allows us to set/get TLS socket options. Currently
|
||||||
|
@ -312,7 +312,7 @@ Posting as one thread is discouraged because it confuses patchwork
|
|||||||
(as of patchwork 2.2.2).
|
(as of patchwork 2.2.2).
|
||||||
|
|
||||||
Co-posting selftests
|
Co-posting selftests
|
||||||
--------------------
|
~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
Selftests should be part of the same series as the code changes.
|
Selftests should be part of the same series as the code changes.
|
||||||
Specifically for fixes both code change and related test should go into
|
Specifically for fixes both code change and related test should go into
|
||||||
|
10
MAINTAINERS
10
MAINTAINERS
@ -15550,6 +15550,7 @@ F: drivers/net/ethernet/mellanox/mlx4/en_*
|
|||||||
MELLANOX ETHERNET DRIVER (mlx5e)
|
MELLANOX ETHERNET DRIVER (mlx5e)
|
||||||
M: Saeed Mahameed <saeedm@nvidia.com>
|
M: Saeed Mahameed <saeedm@nvidia.com>
|
||||||
M: Tariq Toukan <tariqt@nvidia.com>
|
M: Tariq Toukan <tariqt@nvidia.com>
|
||||||
|
M: Mark Bloch <mbloch@nvidia.com>
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
W: https://www.nvidia.com/networking/
|
W: https://www.nvidia.com/networking/
|
||||||
@ -15619,6 +15620,7 @@ MELLANOX MLX5 core VPI driver
|
|||||||
M: Saeed Mahameed <saeedm@nvidia.com>
|
M: Saeed Mahameed <saeedm@nvidia.com>
|
||||||
M: Leon Romanovsky <leonro@nvidia.com>
|
M: Leon Romanovsky <leonro@nvidia.com>
|
||||||
M: Tariq Toukan <tariqt@nvidia.com>
|
M: Tariq Toukan <tariqt@nvidia.com>
|
||||||
|
M: Mark Bloch <mbloch@nvidia.com>
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
L: linux-rdma@vger.kernel.org
|
L: linux-rdma@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
@ -21198,7 +21200,7 @@ M: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
|
|||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
L: linux-renesas-soc@vger.kernel.org
|
L: linux-renesas-soc@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: Documentation/devicetree/bindings/net/renesas,r9a09g057-gbeth.yaml
|
F: Documentation/devicetree/bindings/net/renesas,rzv2h-gbeth.yaml
|
||||||
F: drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c
|
F: drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c
|
||||||
|
|
||||||
RENESAS RZ/V2H(P) USB2PHY PORT RESET DRIVER
|
RENESAS RZ/V2H(P) USB2PHY PORT RESET DRIVER
|
||||||
@ -22586,9 +22588,11 @@ S: Maintained
|
|||||||
F: drivers/misc/sgi-xp/
|
F: drivers/misc/sgi-xp/
|
||||||
|
|
||||||
SHARED MEMORY COMMUNICATIONS (SMC) SOCKETS
|
SHARED MEMORY COMMUNICATIONS (SMC) SOCKETS
|
||||||
|
M: D. Wythe <alibuda@linux.alibaba.com>
|
||||||
|
M: Dust Li <dust.li@linux.alibaba.com>
|
||||||
|
M: Sidraya Jayagond <sidraya@linux.ibm.com>
|
||||||
M: Wenjia Zhang <wenjia@linux.ibm.com>
|
M: Wenjia Zhang <wenjia@linux.ibm.com>
|
||||||
M: Jan Karcher <jaka@linux.ibm.com>
|
R: Mahanta Jambigi <mjambigi@linux.ibm.com>
|
||||||
R: D. Wythe <alibuda@linux.alibaba.com>
|
|
||||||
R: Tony Lu <tonylu@linux.alibaba.com>
|
R: Tony Lu <tonylu@linux.alibaba.com>
|
||||||
R: Wen Gu <guwen@linux.alibaba.com>
|
R: Wen Gu <guwen@linux.alibaba.com>
|
||||||
L: linux-rdma@vger.kernel.org
|
L: linux-rdma@vger.kernel.org
|
||||||
|
@ -1269,6 +1269,8 @@
|
|||||||
#define MDIO_VEND2_CTRL1_SS13 BIT(13)
|
#define MDIO_VEND2_CTRL1_SS13 BIT(13)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#define XGBE_VEND2_MAC_AUTO_SW BIT(9)
|
||||||
|
|
||||||
/* MDIO mask values */
|
/* MDIO mask values */
|
||||||
#define XGBE_AN_CL73_INT_CMPLT BIT(0)
|
#define XGBE_AN_CL73_INT_CMPLT BIT(0)
|
||||||
#define XGBE_AN_CL73_INC_LINK BIT(1)
|
#define XGBE_AN_CL73_INC_LINK BIT(1)
|
||||||
|
@ -266,6 +266,10 @@ static void xgbe_an37_set(struct xgbe_prv_data *pdata, bool enable,
|
|||||||
reg |= MDIO_VEND2_CTRL1_AN_RESTART;
|
reg |= MDIO_VEND2_CTRL1_AN_RESTART;
|
||||||
|
|
||||||
XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_CTRL1, reg);
|
XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_CTRL1, reg);
|
||||||
|
|
||||||
|
reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_PCS_DIG_CTRL);
|
||||||
|
reg |= XGBE_VEND2_MAC_AUTO_SW;
|
||||||
|
XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_PCS_DIG_CTRL, reg);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void xgbe_an37_restart(struct xgbe_prv_data *pdata)
|
static void xgbe_an37_restart(struct xgbe_prv_data *pdata)
|
||||||
@ -894,6 +898,11 @@ static void xgbe_an37_init(struct xgbe_prv_data *pdata)
|
|||||||
|
|
||||||
netif_dbg(pdata, link, pdata->netdev, "CL37 AN (%s) initialized\n",
|
netif_dbg(pdata, link, pdata->netdev, "CL37 AN (%s) initialized\n",
|
||||||
(pdata->an_mode == XGBE_AN_MODE_CL37) ? "BaseX" : "SGMII");
|
(pdata->an_mode == XGBE_AN_MODE_CL37) ? "BaseX" : "SGMII");
|
||||||
|
|
||||||
|
reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1);
|
||||||
|
reg &= ~MDIO_AN_CTRL1_ENABLE;
|
||||||
|
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_CTRL1, reg);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void xgbe_an73_init(struct xgbe_prv_data *pdata)
|
static void xgbe_an73_init(struct xgbe_prv_data *pdata)
|
||||||
@ -1295,6 +1304,10 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
|
|||||||
|
|
||||||
pdata->phy.link = pdata->phy_if.phy_impl.link_status(pdata,
|
pdata->phy.link = pdata->phy_if.phy_impl.link_status(pdata,
|
||||||
&an_restart);
|
&an_restart);
|
||||||
|
/* bail out if the link status register read fails */
|
||||||
|
if (pdata->phy.link < 0)
|
||||||
|
return;
|
||||||
|
|
||||||
if (an_restart) {
|
if (an_restart) {
|
||||||
xgbe_phy_config_aneg(pdata);
|
xgbe_phy_config_aneg(pdata);
|
||||||
goto adjust_link;
|
goto adjust_link;
|
||||||
|
@ -2746,8 +2746,7 @@ static bool xgbe_phy_valid_speed(struct xgbe_prv_data *pdata, int speed)
|
|||||||
static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
|
static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
|
||||||
{
|
{
|
||||||
struct xgbe_phy_data *phy_data = pdata->phy_data;
|
struct xgbe_phy_data *phy_data = pdata->phy_data;
|
||||||
unsigned int reg;
|
int reg, ret;
|
||||||
int ret;
|
|
||||||
|
|
||||||
*an_restart = 0;
|
*an_restart = 0;
|
||||||
|
|
||||||
@ -2781,11 +2780,20 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Link status is latched low, so read once to clear
|
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
|
||||||
* and then read again to get current state
|
if (reg < 0)
|
||||||
|
return reg;
|
||||||
|
|
||||||
|
/* Link status is latched low so that momentary link drops
|
||||||
|
* can be detected. If link was already down read again
|
||||||
|
* to get the latest state.
|
||||||
*/
|
*/
|
||||||
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
|
|
||||||
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
|
if (!pdata->phy.link && !(reg & MDIO_STAT1_LSTATUS)) {
|
||||||
|
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
|
||||||
|
if (reg < 0)
|
||||||
|
return reg;
|
||||||
|
}
|
||||||
|
|
||||||
if (pdata->en_rx_adap) {
|
if (pdata->en_rx_adap) {
|
||||||
/* if the link is available and adaptation is done,
|
/* if the link is available and adaptation is done,
|
||||||
@ -2804,9 +2812,7 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
|
|||||||
xgbe_phy_set_mode(pdata, phy_data->cur_mode);
|
xgbe_phy_set_mode(pdata, phy_data->cur_mode);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* check again for the link and adaptation status */
|
if (pdata->rx_adapt_done)
|
||||||
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
|
|
||||||
if ((reg & MDIO_STAT1_LSTATUS) && pdata->rx_adapt_done)
|
|
||||||
return 1;
|
return 1;
|
||||||
} else if (reg & MDIO_STAT1_LSTATUS)
|
} else if (reg & MDIO_STAT1_LSTATUS)
|
||||||
return 1;
|
return 1;
|
||||||
|
@ -183,12 +183,12 @@
|
|||||||
#define XGBE_LINK_TIMEOUT 5
|
#define XGBE_LINK_TIMEOUT 5
|
||||||
#define XGBE_KR_TRAINING_WAIT_ITER 50
|
#define XGBE_KR_TRAINING_WAIT_ITER 50
|
||||||
|
|
||||||
#define XGBE_SGMII_AN_LINK_STATUS BIT(1)
|
#define XGBE_SGMII_AN_LINK_DUPLEX BIT(1)
|
||||||
#define XGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3))
|
#define XGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3))
|
||||||
#define XGBE_SGMII_AN_LINK_SPEED_10 0x00
|
#define XGBE_SGMII_AN_LINK_SPEED_10 0x00
|
||||||
#define XGBE_SGMII_AN_LINK_SPEED_100 0x04
|
#define XGBE_SGMII_AN_LINK_SPEED_100 0x04
|
||||||
#define XGBE_SGMII_AN_LINK_SPEED_1000 0x08
|
#define XGBE_SGMII_AN_LINK_SPEED_1000 0x08
|
||||||
#define XGBE_SGMII_AN_LINK_DUPLEX BIT(4)
|
#define XGBE_SGMII_AN_LINK_STATUS BIT(4)
|
||||||
|
|
||||||
/* ECC correctable error notification window (seconds) */
|
/* ECC correctable error notification window (seconds) */
|
||||||
#define XGBE_ECC_LIMIT 60
|
#define XGBE_ECC_LIMIT 60
|
||||||
|
@ -1861,14 +1861,21 @@ static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
buffer_info->alloced = 1;
|
|
||||||
buffer_info->skb = skb;
|
|
||||||
buffer_info->length = (u16) adapter->rx_buffer_len;
|
|
||||||
page = virt_to_page(skb->data);
|
page = virt_to_page(skb->data);
|
||||||
offset = offset_in_page(skb->data);
|
offset = offset_in_page(skb->data);
|
||||||
buffer_info->dma = dma_map_page(&pdev->dev, page, offset,
|
buffer_info->dma = dma_map_page(&pdev->dev, page, offset,
|
||||||
adapter->rx_buffer_len,
|
adapter->rx_buffer_len,
|
||||||
DMA_FROM_DEVICE);
|
DMA_FROM_DEVICE);
|
||||||
|
if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
|
||||||
|
kfree_skb(skb);
|
||||||
|
adapter->soft_stats.rx_dropped++;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
buffer_info->alloced = 1;
|
||||||
|
buffer_info->skb = skb;
|
||||||
|
buffer_info->length = (u16)adapter->rx_buffer_len;
|
||||||
|
|
||||||
rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
|
rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
|
||||||
rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len);
|
rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len);
|
||||||
rfd_desc->coalese = 0;
|
rfd_desc->coalese = 0;
|
||||||
@ -2183,8 +2190,8 @@ static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
|
static bool atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
|
||||||
struct tx_packet_desc *ptpd)
|
struct tx_packet_desc *ptpd)
|
||||||
{
|
{
|
||||||
struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
|
struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
|
||||||
struct atl1_buffer *buffer_info;
|
struct atl1_buffer *buffer_info;
|
||||||
@ -2194,6 +2201,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
|
|||||||
unsigned int nr_frags;
|
unsigned int nr_frags;
|
||||||
unsigned int f;
|
unsigned int f;
|
||||||
int retval;
|
int retval;
|
||||||
|
u16 first_mapped;
|
||||||
u16 next_to_use;
|
u16 next_to_use;
|
||||||
u16 data_len;
|
u16 data_len;
|
||||||
u8 hdr_len;
|
u8 hdr_len;
|
||||||
@ -2201,6 +2209,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
|
|||||||
buf_len -= skb->data_len;
|
buf_len -= skb->data_len;
|
||||||
nr_frags = skb_shinfo(skb)->nr_frags;
|
nr_frags = skb_shinfo(skb)->nr_frags;
|
||||||
next_to_use = atomic_read(&tpd_ring->next_to_use);
|
next_to_use = atomic_read(&tpd_ring->next_to_use);
|
||||||
|
first_mapped = next_to_use;
|
||||||
buffer_info = &tpd_ring->buffer_info[next_to_use];
|
buffer_info = &tpd_ring->buffer_info[next_to_use];
|
||||||
BUG_ON(buffer_info->skb);
|
BUG_ON(buffer_info->skb);
|
||||||
/* put skb in last TPD */
|
/* put skb in last TPD */
|
||||||
@ -2216,6 +2225,8 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
|
|||||||
buffer_info->dma = dma_map_page(&adapter->pdev->dev, page,
|
buffer_info->dma = dma_map_page(&adapter->pdev->dev, page,
|
||||||
offset, hdr_len,
|
offset, hdr_len,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
|
if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma))
|
||||||
|
goto dma_err;
|
||||||
|
|
||||||
if (++next_to_use == tpd_ring->count)
|
if (++next_to_use == tpd_ring->count)
|
||||||
next_to_use = 0;
|
next_to_use = 0;
|
||||||
@ -2242,6 +2253,9 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
|
|||||||
page, offset,
|
page, offset,
|
||||||
buffer_info->length,
|
buffer_info->length,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
|
if (dma_mapping_error(&adapter->pdev->dev,
|
||||||
|
buffer_info->dma))
|
||||||
|
goto dma_err;
|
||||||
if (++next_to_use == tpd_ring->count)
|
if (++next_to_use == tpd_ring->count)
|
||||||
next_to_use = 0;
|
next_to_use = 0;
|
||||||
}
|
}
|
||||||
@ -2254,6 +2268,8 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
|
|||||||
buffer_info->dma = dma_map_page(&adapter->pdev->dev, page,
|
buffer_info->dma = dma_map_page(&adapter->pdev->dev, page,
|
||||||
offset, buf_len,
|
offset, buf_len,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
|
if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma))
|
||||||
|
goto dma_err;
|
||||||
if (++next_to_use == tpd_ring->count)
|
if (++next_to_use == tpd_ring->count)
|
||||||
next_to_use = 0;
|
next_to_use = 0;
|
||||||
}
|
}
|
||||||
@ -2277,6 +2293,9 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
|
|||||||
buffer_info->dma = skb_frag_dma_map(&adapter->pdev->dev,
|
buffer_info->dma = skb_frag_dma_map(&adapter->pdev->dev,
|
||||||
frag, i * ATL1_MAX_TX_BUF_LEN,
|
frag, i * ATL1_MAX_TX_BUF_LEN,
|
||||||
buffer_info->length, DMA_TO_DEVICE);
|
buffer_info->length, DMA_TO_DEVICE);
|
||||||
|
if (dma_mapping_error(&adapter->pdev->dev,
|
||||||
|
buffer_info->dma))
|
||||||
|
goto dma_err;
|
||||||
|
|
||||||
if (++next_to_use == tpd_ring->count)
|
if (++next_to_use == tpd_ring->count)
|
||||||
next_to_use = 0;
|
next_to_use = 0;
|
||||||
@ -2285,6 +2304,22 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
|
|||||||
|
|
||||||
/* last tpd's buffer-info */
|
/* last tpd's buffer-info */
|
||||||
buffer_info->skb = skb;
|
buffer_info->skb = skb;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
|
||||||
|
dma_err:
|
||||||
|
while (first_mapped != next_to_use) {
|
||||||
|
buffer_info = &tpd_ring->buffer_info[first_mapped];
|
||||||
|
dma_unmap_page(&adapter->pdev->dev,
|
||||||
|
buffer_info->dma,
|
||||||
|
buffer_info->length,
|
||||||
|
DMA_TO_DEVICE);
|
||||||
|
buffer_info->dma = 0;
|
||||||
|
|
||||||
|
if (++first_mapped == tpd_ring->count)
|
||||||
|
first_mapped = 0;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void atl1_tx_queue(struct atl1_adapter *adapter, u16 count,
|
static void atl1_tx_queue(struct atl1_adapter *adapter, u16 count,
|
||||||
@ -2355,10 +2390,8 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
|
|||||||
|
|
||||||
len = skb_headlen(skb);
|
len = skb_headlen(skb);
|
||||||
|
|
||||||
if (unlikely(skb->len <= 0)) {
|
if (unlikely(skb->len <= 0))
|
||||||
dev_kfree_skb_any(skb);
|
goto drop_packet;
|
||||||
return NETDEV_TX_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
nr_frags = skb_shinfo(skb)->nr_frags;
|
nr_frags = skb_shinfo(skb)->nr_frags;
|
||||||
for (f = 0; f < nr_frags; f++) {
|
for (f = 0; f < nr_frags; f++) {
|
||||||
@ -2371,10 +2404,9 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
|
|||||||
if (mss) {
|
if (mss) {
|
||||||
if (skb->protocol == htons(ETH_P_IP)) {
|
if (skb->protocol == htons(ETH_P_IP)) {
|
||||||
proto_hdr_len = skb_tcp_all_headers(skb);
|
proto_hdr_len = skb_tcp_all_headers(skb);
|
||||||
if (unlikely(proto_hdr_len > len)) {
|
if (unlikely(proto_hdr_len > len))
|
||||||
dev_kfree_skb_any(skb);
|
goto drop_packet;
|
||||||
return NETDEV_TX_OK;
|
|
||||||
}
|
|
||||||
/* need additional TPD ? */
|
/* need additional TPD ? */
|
||||||
if (proto_hdr_len != len)
|
if (proto_hdr_len != len)
|
||||||
count += (len - proto_hdr_len +
|
count += (len - proto_hdr_len +
|
||||||
@ -2406,23 +2438,26 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
|
|||||||
}
|
}
|
||||||
|
|
||||||
tso = atl1_tso(adapter, skb, ptpd);
|
tso = atl1_tso(adapter, skb, ptpd);
|
||||||
if (tso < 0) {
|
if (tso < 0)
|
||||||
dev_kfree_skb_any(skb);
|
goto drop_packet;
|
||||||
return NETDEV_TX_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!tso) {
|
if (!tso) {
|
||||||
ret_val = atl1_tx_csum(adapter, skb, ptpd);
|
ret_val = atl1_tx_csum(adapter, skb, ptpd);
|
||||||
if (ret_val < 0) {
|
if (ret_val < 0)
|
||||||
dev_kfree_skb_any(skb);
|
goto drop_packet;
|
||||||
return NETDEV_TX_OK;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
atl1_tx_map(adapter, skb, ptpd);
|
if (!atl1_tx_map(adapter, skb, ptpd))
|
||||||
|
goto drop_packet;
|
||||||
|
|
||||||
atl1_tx_queue(adapter, count, ptpd);
|
atl1_tx_queue(adapter, count, ptpd);
|
||||||
atl1_update_mailbox(adapter);
|
atl1_update_mailbox(adapter);
|
||||||
return NETDEV_TX_OK;
|
return NETDEV_TX_OK;
|
||||||
|
|
||||||
|
drop_packet:
|
||||||
|
adapter->soft_stats.tx_errors++;
|
||||||
|
dev_kfree_skb_any(skb);
|
||||||
|
return NETDEV_TX_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int atl1_rings_clean(struct napi_struct *napi, int budget)
|
static int atl1_rings_clean(struct napi_struct *napi, int budget)
|
||||||
|
@ -1864,10 +1864,10 @@ static int enic_change_mtu(struct net_device *netdev, int new_mtu)
|
|||||||
if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
|
if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
if (netdev->mtu > enic->port_mtu)
|
if (new_mtu > enic->port_mtu)
|
||||||
netdev_warn(netdev,
|
netdev_warn(netdev,
|
||||||
"interface MTU (%d) set higher than port MTU (%d)\n",
|
"interface MTU (%d) set higher than port MTU (%d)\n",
|
||||||
netdev->mtu, enic->port_mtu);
|
new_mtu, enic->port_mtu);
|
||||||
|
|
||||||
return _enic_change_mtu(netdev, new_mtu);
|
return _enic_change_mtu(netdev, new_mtu);
|
||||||
}
|
}
|
||||||
|
@ -3939,6 +3939,7 @@ static int dpaa2_eth_setup_rx_flow(struct dpaa2_eth_priv *priv,
|
|||||||
MEM_TYPE_PAGE_ORDER0, NULL);
|
MEM_TYPE_PAGE_ORDER0, NULL);
|
||||||
if (err) {
|
if (err) {
|
||||||
dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n");
|
dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n");
|
||||||
|
xdp_rxq_info_unreg(&fq->channel->xdp_rxq);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4432,17 +4433,25 @@ static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
|
err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
|
||||||
DPNI_QUEUE_TX, &priv->tx_qdid);
|
DPNI_QUEUE_TX, &priv->tx_qdid);
|
||||||
if (err) {
|
if (err) {
|
||||||
dev_err(dev, "dpni_get_qdid() failed\n");
|
dev_err(dev, "dpni_get_qdid() failed\n");
|
||||||
return err;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
out:
|
||||||
|
while (i--) {
|
||||||
|
if (priv->fq[i].type == DPAA2_RX_FQ &&
|
||||||
|
xdp_rxq_info_is_reg(&priv->fq[i].channel->xdp_rxq))
|
||||||
|
xdp_rxq_info_unreg(&priv->fq[i].channel->xdp_rxq);
|
||||||
|
}
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Allocate rings for storing incoming frame descriptors */
|
/* Allocate rings for storing incoming frame descriptors */
|
||||||
@ -4825,6 +4834,17 @@ static void dpaa2_eth_del_ch_napi(struct dpaa2_eth_priv *priv)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void dpaa2_eth_free_rx_xdp_rxq(struct dpaa2_eth_priv *priv)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < priv->num_fqs; i++) {
|
||||||
|
if (priv->fq[i].type == DPAA2_RX_FQ &&
|
||||||
|
xdp_rxq_info_is_reg(&priv->fq[i].channel->xdp_rxq))
|
||||||
|
xdp_rxq_info_unreg(&priv->fq[i].channel->xdp_rxq);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
|
static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
|
||||||
{
|
{
|
||||||
struct device *dev;
|
struct device *dev;
|
||||||
@ -5028,6 +5048,7 @@ err_alloc_percpu_extras:
|
|||||||
free_percpu(priv->percpu_stats);
|
free_percpu(priv->percpu_stats);
|
||||||
err_alloc_percpu_stats:
|
err_alloc_percpu_stats:
|
||||||
dpaa2_eth_del_ch_napi(priv);
|
dpaa2_eth_del_ch_napi(priv);
|
||||||
|
dpaa2_eth_free_rx_xdp_rxq(priv);
|
||||||
err_bind:
|
err_bind:
|
||||||
dpaa2_eth_free_dpbps(priv);
|
dpaa2_eth_free_dpbps(priv);
|
||||||
err_dpbp_setup:
|
err_dpbp_setup:
|
||||||
@ -5080,6 +5101,7 @@ static void dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
|
|||||||
free_percpu(priv->percpu_extras);
|
free_percpu(priv->percpu_extras);
|
||||||
|
|
||||||
dpaa2_eth_del_ch_napi(priv);
|
dpaa2_eth_del_ch_napi(priv);
|
||||||
|
dpaa2_eth_free_rx_xdp_rxq(priv);
|
||||||
dpaa2_eth_free_dpbps(priv);
|
dpaa2_eth_free_dpbps(priv);
|
||||||
dpaa2_eth_free_dpio(priv);
|
dpaa2_eth_free_dpio(priv);
|
||||||
dpaa2_eth_free_dpni(priv);
|
dpaa2_eth_free_dpni(priv);
|
||||||
|
@ -96,7 +96,7 @@ static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
|
|||||||
*/
|
*/
|
||||||
static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
|
static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
|
||||||
{
|
{
|
||||||
mutex_lock(&cq->cq_lock);
|
spin_lock(&cq->cq_lock);
|
||||||
|
|
||||||
/* free ring buffers and the ring itself */
|
/* free ring buffers and the ring itself */
|
||||||
idpf_ctlq_dealloc_ring_res(hw, cq);
|
idpf_ctlq_dealloc_ring_res(hw, cq);
|
||||||
@ -104,8 +104,7 @@ static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
|
|||||||
/* Set ring_size to 0 to indicate uninitialized queue */
|
/* Set ring_size to 0 to indicate uninitialized queue */
|
||||||
cq->ring_size = 0;
|
cq->ring_size = 0;
|
||||||
|
|
||||||
mutex_unlock(&cq->cq_lock);
|
spin_unlock(&cq->cq_lock);
|
||||||
mutex_destroy(&cq->cq_lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -173,7 +172,7 @@ int idpf_ctlq_add(struct idpf_hw *hw,
|
|||||||
|
|
||||||
idpf_ctlq_init_regs(hw, cq, is_rxq);
|
idpf_ctlq_init_regs(hw, cq, is_rxq);
|
||||||
|
|
||||||
mutex_init(&cq->cq_lock);
|
spin_lock_init(&cq->cq_lock);
|
||||||
|
|
||||||
list_add(&cq->cq_list, &hw->cq_list_head);
|
list_add(&cq->cq_list, &hw->cq_list_head);
|
||||||
|
|
||||||
@ -272,7 +271,7 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
|
|||||||
int err = 0;
|
int err = 0;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
mutex_lock(&cq->cq_lock);
|
spin_lock(&cq->cq_lock);
|
||||||
|
|
||||||
/* Ensure there are enough descriptors to send all messages */
|
/* Ensure there are enough descriptors to send all messages */
|
||||||
num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
|
num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
|
||||||
@ -332,7 +331,7 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
|
|||||||
wr32(hw, cq->reg.tail, cq->next_to_use);
|
wr32(hw, cq->reg.tail, cq->next_to_use);
|
||||||
|
|
||||||
err_unlock:
|
err_unlock:
|
||||||
mutex_unlock(&cq->cq_lock);
|
spin_unlock(&cq->cq_lock);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
@ -364,7 +363,7 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
|
|||||||
if (*clean_count > cq->ring_size)
|
if (*clean_count > cq->ring_size)
|
||||||
return -EBADR;
|
return -EBADR;
|
||||||
|
|
||||||
mutex_lock(&cq->cq_lock);
|
spin_lock(&cq->cq_lock);
|
||||||
|
|
||||||
ntc = cq->next_to_clean;
|
ntc = cq->next_to_clean;
|
||||||
|
|
||||||
@ -397,7 +396,7 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
|
|||||||
|
|
||||||
cq->next_to_clean = ntc;
|
cq->next_to_clean = ntc;
|
||||||
|
|
||||||
mutex_unlock(&cq->cq_lock);
|
spin_unlock(&cq->cq_lock);
|
||||||
|
|
||||||
/* Return number of descriptors actually cleaned */
|
/* Return number of descriptors actually cleaned */
|
||||||
*clean_count = i;
|
*clean_count = i;
|
||||||
@ -435,7 +434,7 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
|
|||||||
if (*buff_count > 0)
|
if (*buff_count > 0)
|
||||||
buffs_avail = true;
|
buffs_avail = true;
|
||||||
|
|
||||||
mutex_lock(&cq->cq_lock);
|
spin_lock(&cq->cq_lock);
|
||||||
|
|
||||||
if (tbp >= cq->ring_size)
|
if (tbp >= cq->ring_size)
|
||||||
tbp = 0;
|
tbp = 0;
|
||||||
@ -524,7 +523,7 @@ post_buffs_out:
|
|||||||
wr32(hw, cq->reg.tail, cq->next_to_post);
|
wr32(hw, cq->reg.tail, cq->next_to_post);
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&cq->cq_lock);
|
spin_unlock(&cq->cq_lock);
|
||||||
|
|
||||||
/* return the number of buffers that were not posted */
|
/* return the number of buffers that were not posted */
|
||||||
*buff_count = *buff_count - i;
|
*buff_count = *buff_count - i;
|
||||||
@ -552,7 +551,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
|
|||||||
u16 i;
|
u16 i;
|
||||||
|
|
||||||
/* take the lock before we start messing with the ring */
|
/* take the lock before we start messing with the ring */
|
||||||
mutex_lock(&cq->cq_lock);
|
spin_lock(&cq->cq_lock);
|
||||||
|
|
||||||
ntc = cq->next_to_clean;
|
ntc = cq->next_to_clean;
|
||||||
|
|
||||||
@ -614,7 +613,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
|
|||||||
|
|
||||||
cq->next_to_clean = ntc;
|
cq->next_to_clean = ntc;
|
||||||
|
|
||||||
mutex_unlock(&cq->cq_lock);
|
spin_unlock(&cq->cq_lock);
|
||||||
|
|
||||||
*num_q_msg = i;
|
*num_q_msg = i;
|
||||||
if (*num_q_msg == 0)
|
if (*num_q_msg == 0)
|
||||||
|
@ -99,7 +99,7 @@ struct idpf_ctlq_info {
|
|||||||
|
|
||||||
enum idpf_ctlq_type cq_type;
|
enum idpf_ctlq_type cq_type;
|
||||||
int q_id;
|
int q_id;
|
||||||
struct mutex cq_lock; /* control queue lock */
|
spinlock_t cq_lock; /* control queue lock */
|
||||||
/* used for interrupt processing */
|
/* used for interrupt processing */
|
||||||
u16 next_to_use;
|
u16 next_to_use;
|
||||||
u16 next_to_clean;
|
u16 next_to_clean;
|
||||||
|
@ -47,7 +47,7 @@ static u32 idpf_get_rxfh_key_size(struct net_device *netdev)
|
|||||||
struct idpf_vport_user_config_data *user_config;
|
struct idpf_vport_user_config_data *user_config;
|
||||||
|
|
||||||
if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
|
if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
|
||||||
return -EOPNOTSUPP;
|
return 0;
|
||||||
|
|
||||||
user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
|
user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
|
||||||
|
|
||||||
@ -66,7 +66,7 @@ static u32 idpf_get_rxfh_indir_size(struct net_device *netdev)
|
|||||||
struct idpf_vport_user_config_data *user_config;
|
struct idpf_vport_user_config_data *user_config;
|
||||||
|
|
||||||
if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
|
if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
|
||||||
return -EOPNOTSUPP;
|
return 0;
|
||||||
|
|
||||||
user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
|
user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
|
||||||
|
|
||||||
|
@ -2314,8 +2314,12 @@ void *idpf_alloc_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem, u64 size)
|
|||||||
struct idpf_adapter *adapter = hw->back;
|
struct idpf_adapter *adapter = hw->back;
|
||||||
size_t sz = ALIGN(size, 4096);
|
size_t sz = ALIGN(size, 4096);
|
||||||
|
|
||||||
mem->va = dma_alloc_coherent(&adapter->pdev->dev, sz,
|
/* The control queue resources are freed under a spinlock, contiguous
|
||||||
&mem->pa, GFP_KERNEL);
|
* pages will avoid IOMMU remapping and the use vmap (and vunmap in
|
||||||
|
* dma_free_*() path.
|
||||||
|
*/
|
||||||
|
mem->va = dma_alloc_attrs(&adapter->pdev->dev, sz, &mem->pa,
|
||||||
|
GFP_KERNEL, DMA_ATTR_FORCE_CONTIGUOUS);
|
||||||
mem->size = sz;
|
mem->size = sz;
|
||||||
|
|
||||||
return mem->va;
|
return mem->va;
|
||||||
@ -2330,8 +2334,8 @@ void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem)
|
|||||||
{
|
{
|
||||||
struct idpf_adapter *adapter = hw->back;
|
struct idpf_adapter *adapter = hw->back;
|
||||||
|
|
||||||
dma_free_coherent(&adapter->pdev->dev, mem->size,
|
dma_free_attrs(&adapter->pdev->dev, mem->size,
|
||||||
mem->va, mem->pa);
|
mem->va, mem->pa, DMA_ATTR_FORCE_CONTIGUOUS);
|
||||||
mem->size = 0;
|
mem->size = 0;
|
||||||
mem->va = NULL;
|
mem->va = NULL;
|
||||||
mem->pa = 0;
|
mem->pa = 0;
|
||||||
|
@ -7115,6 +7115,10 @@ static int igc_probe(struct pci_dev *pdev,
|
|||||||
adapter->port_num = hw->bus.func;
|
adapter->port_num = hw->bus.func;
|
||||||
adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
|
adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
|
||||||
|
|
||||||
|
/* Disable ASPM L1.2 on I226 devices to avoid packet loss */
|
||||||
|
if (igc_is_device_id_i226(hw))
|
||||||
|
pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2);
|
||||||
|
|
||||||
err = pci_save_state(pdev);
|
err = pci_save_state(pdev);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_ioremap;
|
goto err_ioremap;
|
||||||
@ -7500,6 +7504,9 @@ static int __igc_resume(struct device *dev, bool rpm)
|
|||||||
pci_enable_wake(pdev, PCI_D3hot, 0);
|
pci_enable_wake(pdev, PCI_D3hot, 0);
|
||||||
pci_enable_wake(pdev, PCI_D3cold, 0);
|
pci_enable_wake(pdev, PCI_D3cold, 0);
|
||||||
|
|
||||||
|
if (igc_is_device_id_i226(hw))
|
||||||
|
pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2);
|
||||||
|
|
||||||
if (igc_init_interrupt_scheme(adapter, true)) {
|
if (igc_init_interrupt_scheme(adapter, true)) {
|
||||||
netdev_err(netdev, "Unable to allocate memory for queues\n");
|
netdev_err(netdev, "Unable to allocate memory for queues\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@ -7625,6 +7632,9 @@ static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev)
|
|||||||
pci_enable_wake(pdev, PCI_D3hot, 0);
|
pci_enable_wake(pdev, PCI_D3hot, 0);
|
||||||
pci_enable_wake(pdev, PCI_D3cold, 0);
|
pci_enable_wake(pdev, PCI_D3cold, 0);
|
||||||
|
|
||||||
|
if (igc_is_device_id_i226(hw))
|
||||||
|
pci_disable_link_state_locked(pdev, PCIE_LINK_STATE_L1_2);
|
||||||
|
|
||||||
/* In case of PCI error, adapter loses its HW address
|
/* In case of PCI error, adapter loses its HW address
|
||||||
* so we should re-assign it here.
|
* so we should re-assign it here.
|
||||||
*/
|
*/
|
||||||
|
@ -3336,7 +3336,7 @@ static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp,
|
|||||||
|
|
||||||
addr = np->ops->map_page(np->device, page, 0,
|
addr = np->ops->map_page(np->device, page, 0,
|
||||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||||
if (!addr) {
|
if (np->ops->mapping_error(np->device, addr)) {
|
||||||
__free_page(page);
|
__free_page(page);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
@ -6676,6 +6676,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
|
|||||||
len = skb_headlen(skb);
|
len = skb_headlen(skb);
|
||||||
mapping = np->ops->map_single(np->device, skb->data,
|
mapping = np->ops->map_single(np->device, skb->data,
|
||||||
len, DMA_TO_DEVICE);
|
len, DMA_TO_DEVICE);
|
||||||
|
if (np->ops->mapping_error(np->device, mapping))
|
||||||
|
goto out_drop;
|
||||||
|
|
||||||
prod = rp->prod;
|
prod = rp->prod;
|
||||||
|
|
||||||
@ -6717,6 +6719,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
|
|||||||
mapping = np->ops->map_page(np->device, skb_frag_page(frag),
|
mapping = np->ops->map_page(np->device, skb_frag_page(frag),
|
||||||
skb_frag_off(frag), len,
|
skb_frag_off(frag), len,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
|
if (np->ops->mapping_error(np->device, mapping))
|
||||||
|
goto out_unmap;
|
||||||
|
|
||||||
rp->tx_buffs[prod].skb = NULL;
|
rp->tx_buffs[prod].skb = NULL;
|
||||||
rp->tx_buffs[prod].mapping = mapping;
|
rp->tx_buffs[prod].mapping = mapping;
|
||||||
@ -6741,6 +6745,19 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
|
|||||||
out:
|
out:
|
||||||
return NETDEV_TX_OK;
|
return NETDEV_TX_OK;
|
||||||
|
|
||||||
|
out_unmap:
|
||||||
|
while (i--) {
|
||||||
|
const skb_frag_t *frag;
|
||||||
|
|
||||||
|
prod = PREVIOUS_TX(rp, prod);
|
||||||
|
frag = &skb_shinfo(skb)->frags[i];
|
||||||
|
np->ops->unmap_page(np->device, rp->tx_buffs[prod].mapping,
|
||||||
|
skb_frag_size(frag), DMA_TO_DEVICE);
|
||||||
|
}
|
||||||
|
|
||||||
|
np->ops->unmap_single(np->device, rp->tx_buffs[rp->prod].mapping,
|
||||||
|
skb_headlen(skb), DMA_TO_DEVICE);
|
||||||
|
|
||||||
out_drop:
|
out_drop:
|
||||||
rp->tx_errors++;
|
rp->tx_errors++;
|
||||||
kfree_skb(skb);
|
kfree_skb(skb);
|
||||||
@ -9644,6 +9661,11 @@ static void niu_pci_unmap_single(struct device *dev, u64 dma_address,
|
|||||||
dma_unmap_single(dev, dma_address, size, direction);
|
dma_unmap_single(dev, dma_address, size, direction);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int niu_pci_mapping_error(struct device *dev, u64 addr)
|
||||||
|
{
|
||||||
|
return dma_mapping_error(dev, addr);
|
||||||
|
}
|
||||||
|
|
||||||
static const struct niu_ops niu_pci_ops = {
|
static const struct niu_ops niu_pci_ops = {
|
||||||
.alloc_coherent = niu_pci_alloc_coherent,
|
.alloc_coherent = niu_pci_alloc_coherent,
|
||||||
.free_coherent = niu_pci_free_coherent,
|
.free_coherent = niu_pci_free_coherent,
|
||||||
@ -9651,6 +9673,7 @@ static const struct niu_ops niu_pci_ops = {
|
|||||||
.unmap_page = niu_pci_unmap_page,
|
.unmap_page = niu_pci_unmap_page,
|
||||||
.map_single = niu_pci_map_single,
|
.map_single = niu_pci_map_single,
|
||||||
.unmap_single = niu_pci_unmap_single,
|
.unmap_single = niu_pci_unmap_single,
|
||||||
|
.mapping_error = niu_pci_mapping_error,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void niu_driver_version(void)
|
static void niu_driver_version(void)
|
||||||
@ -10019,6 +10042,11 @@ static void niu_phys_unmap_single(struct device *dev, u64 dma_address,
|
|||||||
/* Nothing to do. */
|
/* Nothing to do. */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int niu_phys_mapping_error(struct device *dev, u64 dma_address)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static const struct niu_ops niu_phys_ops = {
|
static const struct niu_ops niu_phys_ops = {
|
||||||
.alloc_coherent = niu_phys_alloc_coherent,
|
.alloc_coherent = niu_phys_alloc_coherent,
|
||||||
.free_coherent = niu_phys_free_coherent,
|
.free_coherent = niu_phys_free_coherent,
|
||||||
@ -10026,6 +10054,7 @@ static const struct niu_ops niu_phys_ops = {
|
|||||||
.unmap_page = niu_phys_unmap_page,
|
.unmap_page = niu_phys_unmap_page,
|
||||||
.map_single = niu_phys_map_single,
|
.map_single = niu_phys_map_single,
|
||||||
.unmap_single = niu_phys_unmap_single,
|
.unmap_single = niu_phys_unmap_single,
|
||||||
|
.mapping_error = niu_phys_mapping_error,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int niu_of_probe(struct platform_device *op)
|
static int niu_of_probe(struct platform_device *op)
|
||||||
|
@ -2879,6 +2879,9 @@ struct tx_ring_info {
|
|||||||
#define NEXT_TX(tp, index) \
|
#define NEXT_TX(tp, index) \
|
||||||
(((index) + 1) < (tp)->pending ? ((index) + 1) : 0)
|
(((index) + 1) < (tp)->pending ? ((index) + 1) : 0)
|
||||||
|
|
||||||
|
#define PREVIOUS_TX(tp, index) \
|
||||||
|
(((index) - 1) >= 0 ? ((index) - 1) : (((tp)->pending) - 1))
|
||||||
|
|
||||||
static inline u32 niu_tx_avail(struct tx_ring_info *tp)
|
static inline u32 niu_tx_avail(struct tx_ring_info *tp)
|
||||||
{
|
{
|
||||||
return (tp->pending -
|
return (tp->pending -
|
||||||
@ -3140,6 +3143,7 @@ struct niu_ops {
|
|||||||
enum dma_data_direction direction);
|
enum dma_data_direction direction);
|
||||||
void (*unmap_single)(struct device *dev, u64 dma_address,
|
void (*unmap_single)(struct device *dev, u64 dma_address,
|
||||||
size_t size, enum dma_data_direction direction);
|
size_t size, enum dma_data_direction direction);
|
||||||
|
int (*mapping_error)(struct device *dev, u64 dma_address);
|
||||||
};
|
};
|
||||||
|
|
||||||
struct niu_link_config {
|
struct niu_link_config {
|
||||||
|
@ -1705,6 +1705,7 @@ static void wx_set_rss_queues(struct wx *wx)
|
|||||||
|
|
||||||
clear_bit(WX_FLAG_FDIR_HASH, wx->flags);
|
clear_bit(WX_FLAG_FDIR_HASH, wx->flags);
|
||||||
|
|
||||||
|
wx->ring_feature[RING_F_FDIR].indices = 1;
|
||||||
/* Use Flow Director in addition to RSS to ensure the best
|
/* Use Flow Director in addition to RSS to ensure the best
|
||||||
* distribution of flows across cores, even when an FDIR flow
|
* distribution of flows across cores, even when an FDIR flow
|
||||||
* isn't matched.
|
* isn't matched.
|
||||||
@ -1746,7 +1747,7 @@ static void wx_set_num_queues(struct wx *wx)
|
|||||||
*/
|
*/
|
||||||
static int wx_acquire_msix_vectors(struct wx *wx)
|
static int wx_acquire_msix_vectors(struct wx *wx)
|
||||||
{
|
{
|
||||||
struct irq_affinity affd = { .pre_vectors = 1 };
|
struct irq_affinity affd = { .post_vectors = 1 };
|
||||||
int nvecs, i;
|
int nvecs, i;
|
||||||
|
|
||||||
/* We start by asking for one vector per queue pair */
|
/* We start by asking for one vector per queue pair */
|
||||||
@ -1783,16 +1784,24 @@ static int wx_acquire_msix_vectors(struct wx *wx)
|
|||||||
return nvecs;
|
return nvecs;
|
||||||
}
|
}
|
||||||
|
|
||||||
wx->msix_entry->entry = 0;
|
|
||||||
wx->msix_entry->vector = pci_irq_vector(wx->pdev, 0);
|
|
||||||
nvecs -= 1;
|
nvecs -= 1;
|
||||||
for (i = 0; i < nvecs; i++) {
|
for (i = 0; i < nvecs; i++) {
|
||||||
wx->msix_q_entries[i].entry = i;
|
wx->msix_q_entries[i].entry = i;
|
||||||
wx->msix_q_entries[i].vector = pci_irq_vector(wx->pdev, i + 1);
|
wx->msix_q_entries[i].vector = pci_irq_vector(wx->pdev, i);
|
||||||
}
|
}
|
||||||
|
|
||||||
wx->num_q_vectors = nvecs;
|
wx->num_q_vectors = nvecs;
|
||||||
|
|
||||||
|
wx->msix_entry->entry = nvecs;
|
||||||
|
wx->msix_entry->vector = pci_irq_vector(wx->pdev, nvecs);
|
||||||
|
|
||||||
|
if (test_bit(WX_FLAG_IRQ_VECTOR_SHARED, wx->flags)) {
|
||||||
|
wx->msix_entry->entry = 0;
|
||||||
|
wx->msix_entry->vector = pci_irq_vector(wx->pdev, 0);
|
||||||
|
wx->msix_q_entries[0].entry = 0;
|
||||||
|
wx->msix_q_entries[0].vector = pci_irq_vector(wx->pdev, 1);
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2291,6 +2300,8 @@ static void wx_set_ivar(struct wx *wx, s8 direction,
|
|||||||
|
|
||||||
if (direction == -1) {
|
if (direction == -1) {
|
||||||
/* other causes */
|
/* other causes */
|
||||||
|
if (test_bit(WX_FLAG_IRQ_VECTOR_SHARED, wx->flags))
|
||||||
|
msix_vector = 0;
|
||||||
msix_vector |= WX_PX_IVAR_ALLOC_VAL;
|
msix_vector |= WX_PX_IVAR_ALLOC_VAL;
|
||||||
index = 0;
|
index = 0;
|
||||||
ivar = rd32(wx, WX_PX_MISC_IVAR);
|
ivar = rd32(wx, WX_PX_MISC_IVAR);
|
||||||
@ -2299,8 +2310,6 @@ static void wx_set_ivar(struct wx *wx, s8 direction,
|
|||||||
wr32(wx, WX_PX_MISC_IVAR, ivar);
|
wr32(wx, WX_PX_MISC_IVAR, ivar);
|
||||||
} else {
|
} else {
|
||||||
/* tx or rx causes */
|
/* tx or rx causes */
|
||||||
if (!(wx->mac.type == wx_mac_em && wx->num_vfs == 7))
|
|
||||||
msix_vector += 1; /* offset for queue vectors */
|
|
||||||
msix_vector |= WX_PX_IVAR_ALLOC_VAL;
|
msix_vector |= WX_PX_IVAR_ALLOC_VAL;
|
||||||
index = ((16 * (queue & 1)) + (8 * direction));
|
index = ((16 * (queue & 1)) + (8 * direction));
|
||||||
ivar = rd32(wx, WX_PX_IVAR(queue >> 1));
|
ivar = rd32(wx, WX_PX_IVAR(queue >> 1));
|
||||||
@ -2339,7 +2348,7 @@ void wx_write_eitr(struct wx_q_vector *q_vector)
|
|||||||
|
|
||||||
itr_reg |= WX_PX_ITR_CNT_WDIS;
|
itr_reg |= WX_PX_ITR_CNT_WDIS;
|
||||||
|
|
||||||
wr32(wx, WX_PX_ITR(v_idx + 1), itr_reg);
|
wr32(wx, WX_PX_ITR(v_idx), itr_reg);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -2392,9 +2401,9 @@ void wx_configure_vectors(struct wx *wx)
|
|||||||
wx_write_eitr(q_vector);
|
wx_write_eitr(q_vector);
|
||||||
}
|
}
|
||||||
|
|
||||||
wx_set_ivar(wx, -1, 0, 0);
|
wx_set_ivar(wx, -1, 0, v_idx);
|
||||||
if (pdev->msix_enabled)
|
if (pdev->msix_enabled)
|
||||||
wr32(wx, WX_PX_ITR(0), 1950);
|
wr32(wx, WX_PX_ITR(v_idx), 1950);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(wx_configure_vectors);
|
EXPORT_SYMBOL(wx_configure_vectors);
|
||||||
|
|
||||||
|
@ -64,6 +64,7 @@ static void wx_sriov_clear_data(struct wx *wx)
|
|||||||
wr32m(wx, WX_PSR_VM_CTL, WX_PSR_VM_CTL_POOL_MASK, 0);
|
wr32m(wx, WX_PSR_VM_CTL, WX_PSR_VM_CTL_POOL_MASK, 0);
|
||||||
wx->ring_feature[RING_F_VMDQ].offset = 0;
|
wx->ring_feature[RING_F_VMDQ].offset = 0;
|
||||||
|
|
||||||
|
clear_bit(WX_FLAG_IRQ_VECTOR_SHARED, wx->flags);
|
||||||
clear_bit(WX_FLAG_SRIOV_ENABLED, wx->flags);
|
clear_bit(WX_FLAG_SRIOV_ENABLED, wx->flags);
|
||||||
/* Disable VMDq flag so device will be set in NM mode */
|
/* Disable VMDq flag so device will be set in NM mode */
|
||||||
if (wx->ring_feature[RING_F_VMDQ].limit == 1)
|
if (wx->ring_feature[RING_F_VMDQ].limit == 1)
|
||||||
@ -78,6 +79,9 @@ static int __wx_enable_sriov(struct wx *wx, u8 num_vfs)
|
|||||||
set_bit(WX_FLAG_SRIOV_ENABLED, wx->flags);
|
set_bit(WX_FLAG_SRIOV_ENABLED, wx->flags);
|
||||||
dev_info(&wx->pdev->dev, "SR-IOV enabled with %d VFs\n", num_vfs);
|
dev_info(&wx->pdev->dev, "SR-IOV enabled with %d VFs\n", num_vfs);
|
||||||
|
|
||||||
|
if (num_vfs == 7 && wx->mac.type == wx_mac_em)
|
||||||
|
set_bit(WX_FLAG_IRQ_VECTOR_SHARED, wx->flags);
|
||||||
|
|
||||||
/* Enable VMDq flag so device will be set in VM mode */
|
/* Enable VMDq flag so device will be set in VM mode */
|
||||||
set_bit(WX_FLAG_VMDQ_ENABLED, wx->flags);
|
set_bit(WX_FLAG_VMDQ_ENABLED, wx->flags);
|
||||||
if (!wx->ring_feature[RING_F_VMDQ].limit)
|
if (!wx->ring_feature[RING_F_VMDQ].limit)
|
||||||
|
@ -1191,6 +1191,7 @@ enum wx_pf_flags {
|
|||||||
WX_FLAG_VMDQ_ENABLED,
|
WX_FLAG_VMDQ_ENABLED,
|
||||||
WX_FLAG_VLAN_PROMISC,
|
WX_FLAG_VLAN_PROMISC,
|
||||||
WX_FLAG_SRIOV_ENABLED,
|
WX_FLAG_SRIOV_ENABLED,
|
||||||
|
WX_FLAG_IRQ_VECTOR_SHARED,
|
||||||
WX_FLAG_FDIR_CAPABLE,
|
WX_FLAG_FDIR_CAPABLE,
|
||||||
WX_FLAG_FDIR_HASH,
|
WX_FLAG_FDIR_HASH,
|
||||||
WX_FLAG_FDIR_PERFECT,
|
WX_FLAG_FDIR_PERFECT,
|
||||||
@ -1343,7 +1344,7 @@ struct wx {
|
|||||||
};
|
};
|
||||||
|
|
||||||
#define WX_INTR_ALL (~0ULL)
|
#define WX_INTR_ALL (~0ULL)
|
||||||
#define WX_INTR_Q(i) BIT((i) + 1)
|
#define WX_INTR_Q(i) BIT((i))
|
||||||
|
|
||||||
/* register operations */
|
/* register operations */
|
||||||
#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
|
#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
|
||||||
|
@ -161,7 +161,7 @@ static void ngbe_irq_enable(struct wx *wx, bool queues)
|
|||||||
if (queues)
|
if (queues)
|
||||||
wx_intr_enable(wx, NGBE_INTR_ALL);
|
wx_intr_enable(wx, NGBE_INTR_ALL);
|
||||||
else
|
else
|
||||||
wx_intr_enable(wx, NGBE_INTR_MISC);
|
wx_intr_enable(wx, NGBE_INTR_MISC(wx));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -286,7 +286,7 @@ static int ngbe_request_msix_irqs(struct wx *wx)
|
|||||||
* for queue. But when num_vfs == 7, vector[1] is assigned to vf6.
|
* for queue. But when num_vfs == 7, vector[1] is assigned to vf6.
|
||||||
* Misc and queue should reuse interrupt vector[0].
|
* Misc and queue should reuse interrupt vector[0].
|
||||||
*/
|
*/
|
||||||
if (wx->num_vfs == 7)
|
if (test_bit(WX_FLAG_IRQ_VECTOR_SHARED, wx->flags))
|
||||||
err = request_irq(wx->msix_entry->vector,
|
err = request_irq(wx->msix_entry->vector,
|
||||||
ngbe_misc_and_queue, 0, netdev->name, wx);
|
ngbe_misc_and_queue, 0, netdev->name, wx);
|
||||||
else
|
else
|
||||||
|
@ -87,7 +87,7 @@
|
|||||||
#define NGBE_PX_MISC_IC_TIMESYNC BIT(11) /* time sync */
|
#define NGBE_PX_MISC_IC_TIMESYNC BIT(11) /* time sync */
|
||||||
|
|
||||||
#define NGBE_INTR_ALL 0x1FF
|
#define NGBE_INTR_ALL 0x1FF
|
||||||
#define NGBE_INTR_MISC BIT(0)
|
#define NGBE_INTR_MISC(A) BIT((A)->msix_entry->entry)
|
||||||
|
|
||||||
#define NGBE_PHY_CONFIG(reg_offset) (0x14000 + ((reg_offset) * 4))
|
#define NGBE_PHY_CONFIG(reg_offset) (0x14000 + ((reg_offset) * 4))
|
||||||
#define NGBE_CFG_LAN_SPEED 0x14440
|
#define NGBE_CFG_LAN_SPEED 0x14440
|
||||||
|
@ -294,6 +294,7 @@ static void txgbe_mac_link_up_aml(struct phylink_config *config,
|
|||||||
wx_fc_enable(wx, tx_pause, rx_pause);
|
wx_fc_enable(wx, tx_pause, rx_pause);
|
||||||
|
|
||||||
txgbe_reconfig_mac(wx);
|
txgbe_reconfig_mac(wx);
|
||||||
|
txgbe_enable_sec_tx_path(wx);
|
||||||
|
|
||||||
txcfg = rd32(wx, TXGBE_AML_MAC_TX_CFG);
|
txcfg = rd32(wx, TXGBE_AML_MAC_TX_CFG);
|
||||||
txcfg &= ~TXGBE_AML_MAC_TX_CFG_SPEED_MASK;
|
txcfg &= ~TXGBE_AML_MAC_TX_CFG_SPEED_MASK;
|
||||||
|
@ -31,7 +31,7 @@ void txgbe_irq_enable(struct wx *wx, bool queues)
|
|||||||
wr32(wx, WX_PX_MISC_IEN, misc_ien);
|
wr32(wx, WX_PX_MISC_IEN, misc_ien);
|
||||||
|
|
||||||
/* unmask interrupt */
|
/* unmask interrupt */
|
||||||
wx_intr_enable(wx, TXGBE_INTR_MISC);
|
wx_intr_enable(wx, TXGBE_INTR_MISC(wx));
|
||||||
if (queues)
|
if (queues)
|
||||||
wx_intr_enable(wx, TXGBE_INTR_QALL(wx));
|
wx_intr_enable(wx, TXGBE_INTR_QALL(wx));
|
||||||
}
|
}
|
||||||
@ -78,7 +78,6 @@ free_queue_irqs:
|
|||||||
free_irq(wx->msix_q_entries[vector].vector,
|
free_irq(wx->msix_q_entries[vector].vector,
|
||||||
wx->q_vector[vector]);
|
wx->q_vector[vector]);
|
||||||
}
|
}
|
||||||
wx_reset_interrupt_capability(wx);
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -132,7 +131,7 @@ static irqreturn_t txgbe_misc_irq_handle(int irq, void *data)
|
|||||||
txgbe->eicr = eicr;
|
txgbe->eicr = eicr;
|
||||||
if (eicr & TXGBE_PX_MISC_IC_VF_MBOX) {
|
if (eicr & TXGBE_PX_MISC_IC_VF_MBOX) {
|
||||||
wx_msg_task(txgbe->wx);
|
wx_msg_task(txgbe->wx);
|
||||||
wx_intr_enable(wx, TXGBE_INTR_MISC);
|
wx_intr_enable(wx, TXGBE_INTR_MISC(wx));
|
||||||
}
|
}
|
||||||
return IRQ_WAKE_THREAD;
|
return IRQ_WAKE_THREAD;
|
||||||
}
|
}
|
||||||
@ -184,7 +183,7 @@ static irqreturn_t txgbe_misc_irq_thread_fn(int irq, void *data)
|
|||||||
nhandled++;
|
nhandled++;
|
||||||
}
|
}
|
||||||
|
|
||||||
wx_intr_enable(wx, TXGBE_INTR_MISC);
|
wx_intr_enable(wx, TXGBE_INTR_MISC(wx));
|
||||||
return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
|
return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -211,6 +210,7 @@ void txgbe_free_misc_irq(struct txgbe *txgbe)
|
|||||||
free_irq(txgbe->link_irq, txgbe);
|
free_irq(txgbe->link_irq, txgbe);
|
||||||
free_irq(txgbe->misc.irq, txgbe);
|
free_irq(txgbe->misc.irq, txgbe);
|
||||||
txgbe_del_irq_domain(txgbe);
|
txgbe_del_irq_domain(txgbe);
|
||||||
|
txgbe->wx->misc_irq_domain = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
int txgbe_setup_misc_irq(struct txgbe *txgbe)
|
int txgbe_setup_misc_irq(struct txgbe *txgbe)
|
||||||
|
@ -458,10 +458,14 @@ static int txgbe_open(struct net_device *netdev)
|
|||||||
|
|
||||||
wx_configure(wx);
|
wx_configure(wx);
|
||||||
|
|
||||||
err = txgbe_request_queue_irqs(wx);
|
err = txgbe_setup_misc_irq(wx->priv);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_free_resources;
|
goto err_free_resources;
|
||||||
|
|
||||||
|
err = txgbe_request_queue_irqs(wx);
|
||||||
|
if (err)
|
||||||
|
goto err_free_misc_irq;
|
||||||
|
|
||||||
/* Notify the stack of the actual queue counts. */
|
/* Notify the stack of the actual queue counts. */
|
||||||
err = netif_set_real_num_tx_queues(netdev, wx->num_tx_queues);
|
err = netif_set_real_num_tx_queues(netdev, wx->num_tx_queues);
|
||||||
if (err)
|
if (err)
|
||||||
@ -479,6 +483,9 @@ static int txgbe_open(struct net_device *netdev)
|
|||||||
|
|
||||||
err_free_irq:
|
err_free_irq:
|
||||||
wx_free_irq(wx);
|
wx_free_irq(wx);
|
||||||
|
err_free_misc_irq:
|
||||||
|
txgbe_free_misc_irq(wx->priv);
|
||||||
|
wx_reset_interrupt_capability(wx);
|
||||||
err_free_resources:
|
err_free_resources:
|
||||||
wx_free_resources(wx);
|
wx_free_resources(wx);
|
||||||
err_reset:
|
err_reset:
|
||||||
@ -519,6 +526,7 @@ static int txgbe_close(struct net_device *netdev)
|
|||||||
wx_ptp_stop(wx);
|
wx_ptp_stop(wx);
|
||||||
txgbe_down(wx);
|
txgbe_down(wx);
|
||||||
wx_free_irq(wx);
|
wx_free_irq(wx);
|
||||||
|
txgbe_free_misc_irq(wx->priv);
|
||||||
wx_free_resources(wx);
|
wx_free_resources(wx);
|
||||||
txgbe_fdir_filter_exit(wx);
|
txgbe_fdir_filter_exit(wx);
|
||||||
wx_control_hw(wx, false);
|
wx_control_hw(wx, false);
|
||||||
@ -564,7 +572,6 @@ static void txgbe_shutdown(struct pci_dev *pdev)
|
|||||||
int txgbe_setup_tc(struct net_device *dev, u8 tc)
|
int txgbe_setup_tc(struct net_device *dev, u8 tc)
|
||||||
{
|
{
|
||||||
struct wx *wx = netdev_priv(dev);
|
struct wx *wx = netdev_priv(dev);
|
||||||
struct txgbe *txgbe = wx->priv;
|
|
||||||
|
|
||||||
/* Hardware has to reinitialize queues and interrupts to
|
/* Hardware has to reinitialize queues and interrupts to
|
||||||
* match packet buffer alignment. Unfortunately, the
|
* match packet buffer alignment. Unfortunately, the
|
||||||
@ -575,7 +582,6 @@ int txgbe_setup_tc(struct net_device *dev, u8 tc)
|
|||||||
else
|
else
|
||||||
txgbe_reset(wx);
|
txgbe_reset(wx);
|
||||||
|
|
||||||
txgbe_free_misc_irq(txgbe);
|
|
||||||
wx_clear_interrupt_scheme(wx);
|
wx_clear_interrupt_scheme(wx);
|
||||||
|
|
||||||
if (tc)
|
if (tc)
|
||||||
@ -584,7 +590,6 @@ int txgbe_setup_tc(struct net_device *dev, u8 tc)
|
|||||||
netdev_reset_tc(dev);
|
netdev_reset_tc(dev);
|
||||||
|
|
||||||
wx_init_interrupt_scheme(wx);
|
wx_init_interrupt_scheme(wx);
|
||||||
txgbe_setup_misc_irq(txgbe);
|
|
||||||
|
|
||||||
if (netif_running(dev))
|
if (netif_running(dev))
|
||||||
txgbe_open(dev);
|
txgbe_open(dev);
|
||||||
@ -882,13 +887,9 @@ static int txgbe_probe(struct pci_dev *pdev,
|
|||||||
|
|
||||||
txgbe_init_fdir(txgbe);
|
txgbe_init_fdir(txgbe);
|
||||||
|
|
||||||
err = txgbe_setup_misc_irq(txgbe);
|
|
||||||
if (err)
|
|
||||||
goto err_release_hw;
|
|
||||||
|
|
||||||
err = txgbe_init_phy(txgbe);
|
err = txgbe_init_phy(txgbe);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_free_misc_irq;
|
goto err_release_hw;
|
||||||
|
|
||||||
err = register_netdev(netdev);
|
err = register_netdev(netdev);
|
||||||
if (err)
|
if (err)
|
||||||
@ -916,8 +917,6 @@ static int txgbe_probe(struct pci_dev *pdev,
|
|||||||
|
|
||||||
err_remove_phy:
|
err_remove_phy:
|
||||||
txgbe_remove_phy(txgbe);
|
txgbe_remove_phy(txgbe);
|
||||||
err_free_misc_irq:
|
|
||||||
txgbe_free_misc_irq(txgbe);
|
|
||||||
err_release_hw:
|
err_release_hw:
|
||||||
wx_clear_interrupt_scheme(wx);
|
wx_clear_interrupt_scheme(wx);
|
||||||
wx_control_hw(wx, false);
|
wx_control_hw(wx, false);
|
||||||
@ -957,7 +956,6 @@ static void txgbe_remove(struct pci_dev *pdev)
|
|||||||
unregister_netdev(netdev);
|
unregister_netdev(netdev);
|
||||||
|
|
||||||
txgbe_remove_phy(txgbe);
|
txgbe_remove_phy(txgbe);
|
||||||
txgbe_free_misc_irq(txgbe);
|
|
||||||
wx_free_isb_resources(wx);
|
wx_free_isb_resources(wx);
|
||||||
|
|
||||||
pci_release_selected_regions(pdev,
|
pci_release_selected_regions(pdev,
|
||||||
|
@ -302,8 +302,8 @@ struct txgbe_fdir_filter {
|
|||||||
#define TXGBE_DEFAULT_RX_WORK 128
|
#define TXGBE_DEFAULT_RX_WORK 128
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define TXGBE_INTR_MISC BIT(0)
|
#define TXGBE_INTR_MISC(A) BIT((A)->num_q_vectors)
|
||||||
#define TXGBE_INTR_QALL(A) GENMASK((A)->num_q_vectors, 1)
|
#define TXGBE_INTR_QALL(A) (TXGBE_INTR_MISC(A) - 1)
|
||||||
|
|
||||||
#define TXGBE_MAX_EITR GENMASK(11, 3)
|
#define TXGBE_MAX_EITR GENMASK(11, 3)
|
||||||
|
|
||||||
|
@ -4567,8 +4567,6 @@ static void lan78xx_disconnect(struct usb_interface *intf)
|
|||||||
if (!dev)
|
if (!dev)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
netif_napi_del(&dev->napi);
|
|
||||||
|
|
||||||
udev = interface_to_usbdev(intf);
|
udev = interface_to_usbdev(intf);
|
||||||
net = dev->net;
|
net = dev->net;
|
||||||
|
|
||||||
|
@ -778,6 +778,26 @@ static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
|
|||||||
return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1);
|
return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int check_mergeable_len(struct net_device *dev, void *mrg_ctx,
|
||||||
|
unsigned int len)
|
||||||
|
{
|
||||||
|
unsigned int headroom, tailroom, room, truesize;
|
||||||
|
|
||||||
|
truesize = mergeable_ctx_to_truesize(mrg_ctx);
|
||||||
|
headroom = mergeable_ctx_to_headroom(mrg_ctx);
|
||||||
|
tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
|
||||||
|
room = SKB_DATA_ALIGN(headroom + tailroom);
|
||||||
|
|
||||||
|
if (len > truesize - room) {
|
||||||
|
pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
|
||||||
|
dev->name, len, (unsigned long)(truesize - room));
|
||||||
|
DEV_STATS_INC(dev, rx_length_errors);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static struct sk_buff *virtnet_build_skb(void *buf, unsigned int buflen,
|
static struct sk_buff *virtnet_build_skb(void *buf, unsigned int buflen,
|
||||||
unsigned int headroom,
|
unsigned int headroom,
|
||||||
unsigned int len)
|
unsigned int len)
|
||||||
@ -1084,7 +1104,7 @@ static bool tx_may_stop(struct virtnet_info *vi,
|
|||||||
* Since most packets only take 1 or 2 ring slots, stopping the queue
|
* Since most packets only take 1 or 2 ring slots, stopping the queue
|
||||||
* early means 16 slots are typically wasted.
|
* early means 16 slots are typically wasted.
|
||||||
*/
|
*/
|
||||||
if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
|
if (sq->vq->num_free < MAX_SKB_FRAGS + 2) {
|
||||||
struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
|
struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
|
||||||
|
|
||||||
netif_tx_stop_queue(txq);
|
netif_tx_stop_queue(txq);
|
||||||
@ -1116,7 +1136,7 @@ static void check_sq_full_and_disable(struct virtnet_info *vi,
|
|||||||
} else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
|
} else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
|
||||||
/* More just got used, free them then recheck. */
|
/* More just got used, free them then recheck. */
|
||||||
free_old_xmit(sq, txq, false);
|
free_old_xmit(sq, txq, false);
|
||||||
if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
|
if (sq->vq->num_free >= MAX_SKB_FRAGS + 2) {
|
||||||
netif_start_subqueue(dev, qnum);
|
netif_start_subqueue(dev, qnum);
|
||||||
u64_stats_update_begin(&sq->stats.syncp);
|
u64_stats_update_begin(&sq->stats.syncp);
|
||||||
u64_stats_inc(&sq->stats.wake);
|
u64_stats_inc(&sq->stats.wake);
|
||||||
@ -1127,15 +1147,29 @@ static void check_sq_full_and_disable(struct virtnet_info *vi,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Note that @len is the length of received data without virtio header */
|
||||||
static struct xdp_buff *buf_to_xdp(struct virtnet_info *vi,
|
static struct xdp_buff *buf_to_xdp(struct virtnet_info *vi,
|
||||||
struct receive_queue *rq, void *buf, u32 len)
|
struct receive_queue *rq, void *buf,
|
||||||
|
u32 len, bool first_buf)
|
||||||
{
|
{
|
||||||
struct xdp_buff *xdp;
|
struct xdp_buff *xdp;
|
||||||
u32 bufsize;
|
u32 bufsize;
|
||||||
|
|
||||||
xdp = (struct xdp_buff *)buf;
|
xdp = (struct xdp_buff *)buf;
|
||||||
|
|
||||||
bufsize = xsk_pool_get_rx_frame_size(rq->xsk_pool) + vi->hdr_len;
|
/* In virtnet_add_recvbuf_xsk, we use part of XDP_PACKET_HEADROOM for
|
||||||
|
* virtio header and ask the vhost to fill data from
|
||||||
|
* hard_start + XDP_PACKET_HEADROOM - vi->hdr_len
|
||||||
|
* The first buffer has virtio header so the remaining region for frame
|
||||||
|
* data is
|
||||||
|
* xsk_pool_get_rx_frame_size()
|
||||||
|
* While other buffers than the first one do not have virtio header, so
|
||||||
|
* the maximum frame data's length can be
|
||||||
|
* xsk_pool_get_rx_frame_size() + vi->hdr_len
|
||||||
|
*/
|
||||||
|
bufsize = xsk_pool_get_rx_frame_size(rq->xsk_pool);
|
||||||
|
if (!first_buf)
|
||||||
|
bufsize += vi->hdr_len;
|
||||||
|
|
||||||
if (unlikely(len > bufsize)) {
|
if (unlikely(len > bufsize)) {
|
||||||
pr_debug("%s: rx error: len %u exceeds truesize %u\n",
|
pr_debug("%s: rx error: len %u exceeds truesize %u\n",
|
||||||
@ -1260,7 +1294,7 @@ static int xsk_append_merge_buffer(struct virtnet_info *vi,
|
|||||||
|
|
||||||
u64_stats_add(&stats->bytes, len);
|
u64_stats_add(&stats->bytes, len);
|
||||||
|
|
||||||
xdp = buf_to_xdp(vi, rq, buf, len);
|
xdp = buf_to_xdp(vi, rq, buf, len, false);
|
||||||
if (!xdp)
|
if (!xdp)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
@ -1358,7 +1392,7 @@ static void virtnet_receive_xsk_buf(struct virtnet_info *vi, struct receive_queu
|
|||||||
|
|
||||||
u64_stats_add(&stats->bytes, len);
|
u64_stats_add(&stats->bytes, len);
|
||||||
|
|
||||||
xdp = buf_to_xdp(vi, rq, buf, len);
|
xdp = buf_to_xdp(vi, rq, buf, len, true);
|
||||||
if (!xdp)
|
if (!xdp)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
@ -1797,7 +1831,8 @@ static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
|
|||||||
* across multiple buffers (num_buf > 1), and we make sure buffers
|
* across multiple buffers (num_buf > 1), and we make sure buffers
|
||||||
* have enough headroom.
|
* have enough headroom.
|
||||||
*/
|
*/
|
||||||
static struct page *xdp_linearize_page(struct receive_queue *rq,
|
static struct page *xdp_linearize_page(struct net_device *dev,
|
||||||
|
struct receive_queue *rq,
|
||||||
int *num_buf,
|
int *num_buf,
|
||||||
struct page *p,
|
struct page *p,
|
||||||
int offset,
|
int offset,
|
||||||
@ -1817,18 +1852,27 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
|
|||||||
memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
|
memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
|
||||||
page_off += *len;
|
page_off += *len;
|
||||||
|
|
||||||
|
/* Only mergeable mode can go inside this while loop. In small mode,
|
||||||
|
* *num_buf == 1, so it cannot go inside.
|
||||||
|
*/
|
||||||
while (--*num_buf) {
|
while (--*num_buf) {
|
||||||
unsigned int buflen;
|
unsigned int buflen;
|
||||||
void *buf;
|
void *buf;
|
||||||
|
void *ctx;
|
||||||
int off;
|
int off;
|
||||||
|
|
||||||
buf = virtnet_rq_get_buf(rq, &buflen, NULL);
|
buf = virtnet_rq_get_buf(rq, &buflen, &ctx);
|
||||||
if (unlikely(!buf))
|
if (unlikely(!buf))
|
||||||
goto err_buf;
|
goto err_buf;
|
||||||
|
|
||||||
p = virt_to_head_page(buf);
|
p = virt_to_head_page(buf);
|
||||||
off = buf - page_address(p);
|
off = buf - page_address(p);
|
||||||
|
|
||||||
|
if (check_mergeable_len(dev, ctx, buflen)) {
|
||||||
|
put_page(p);
|
||||||
|
goto err_buf;
|
||||||
|
}
|
||||||
|
|
||||||
/* guard against a misconfigured or uncooperative backend that
|
/* guard against a misconfigured or uncooperative backend that
|
||||||
* is sending packet larger than the MTU.
|
* is sending packet larger than the MTU.
|
||||||
*/
|
*/
|
||||||
@ -1917,7 +1961,7 @@ static struct sk_buff *receive_small_xdp(struct net_device *dev,
|
|||||||
headroom = vi->hdr_len + header_offset;
|
headroom = vi->hdr_len + header_offset;
|
||||||
buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
|
buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
|
||||||
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
|
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
|
||||||
xdp_page = xdp_linearize_page(rq, &num_buf, page,
|
xdp_page = xdp_linearize_page(dev, rq, &num_buf, page,
|
||||||
offset, header_offset,
|
offset, header_offset,
|
||||||
&tlen);
|
&tlen);
|
||||||
if (!xdp_page)
|
if (!xdp_page)
|
||||||
@ -2126,10 +2170,9 @@ static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
|
|||||||
struct virtnet_rq_stats *stats)
|
struct virtnet_rq_stats *stats)
|
||||||
{
|
{
|
||||||
struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
|
struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
|
||||||
unsigned int headroom, tailroom, room;
|
|
||||||
unsigned int truesize, cur_frag_size;
|
|
||||||
struct skb_shared_info *shinfo;
|
struct skb_shared_info *shinfo;
|
||||||
unsigned int xdp_frags_truesz = 0;
|
unsigned int xdp_frags_truesz = 0;
|
||||||
|
unsigned int truesize;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
skb_frag_t *frag;
|
skb_frag_t *frag;
|
||||||
int offset;
|
int offset;
|
||||||
@ -2172,21 +2215,14 @@ static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
|
|||||||
page = virt_to_head_page(buf);
|
page = virt_to_head_page(buf);
|
||||||
offset = buf - page_address(page);
|
offset = buf - page_address(page);
|
||||||
|
|
||||||
truesize = mergeable_ctx_to_truesize(ctx);
|
if (check_mergeable_len(dev, ctx, len)) {
|
||||||
headroom = mergeable_ctx_to_headroom(ctx);
|
|
||||||
tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
|
|
||||||
room = SKB_DATA_ALIGN(headroom + tailroom);
|
|
||||||
|
|
||||||
cur_frag_size = truesize;
|
|
||||||
xdp_frags_truesz += cur_frag_size;
|
|
||||||
if (unlikely(len > truesize - room || cur_frag_size > PAGE_SIZE)) {
|
|
||||||
put_page(page);
|
put_page(page);
|
||||||
pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
|
|
||||||
dev->name, len, (unsigned long)(truesize - room));
|
|
||||||
DEV_STATS_INC(dev, rx_length_errors);
|
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
truesize = mergeable_ctx_to_truesize(ctx);
|
||||||
|
xdp_frags_truesz += truesize;
|
||||||
|
|
||||||
frag = &shinfo->frags[shinfo->nr_frags++];
|
frag = &shinfo->frags[shinfo->nr_frags++];
|
||||||
skb_frag_fill_page_desc(frag, page, offset, len);
|
skb_frag_fill_page_desc(frag, page, offset, len);
|
||||||
if (page_is_pfmemalloc(page))
|
if (page_is_pfmemalloc(page))
|
||||||
@ -2252,7 +2288,7 @@ static void *mergeable_xdp_get_buf(struct virtnet_info *vi,
|
|||||||
*/
|
*/
|
||||||
if (!xdp_prog->aux->xdp_has_frags) {
|
if (!xdp_prog->aux->xdp_has_frags) {
|
||||||
/* linearize data for XDP */
|
/* linearize data for XDP */
|
||||||
xdp_page = xdp_linearize_page(rq, num_buf,
|
xdp_page = xdp_linearize_page(vi->dev, rq, num_buf,
|
||||||
*page, offset,
|
*page, offset,
|
||||||
XDP_PACKET_HEADROOM,
|
XDP_PACKET_HEADROOM,
|
||||||
len);
|
len);
|
||||||
@ -2400,18 +2436,12 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
|
|||||||
struct sk_buff *head_skb, *curr_skb;
|
struct sk_buff *head_skb, *curr_skb;
|
||||||
unsigned int truesize = mergeable_ctx_to_truesize(ctx);
|
unsigned int truesize = mergeable_ctx_to_truesize(ctx);
|
||||||
unsigned int headroom = mergeable_ctx_to_headroom(ctx);
|
unsigned int headroom = mergeable_ctx_to_headroom(ctx);
|
||||||
unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
|
|
||||||
unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
|
|
||||||
|
|
||||||
head_skb = NULL;
|
head_skb = NULL;
|
||||||
u64_stats_add(&stats->bytes, len - vi->hdr_len);
|
u64_stats_add(&stats->bytes, len - vi->hdr_len);
|
||||||
|
|
||||||
if (unlikely(len > truesize - room)) {
|
if (check_mergeable_len(dev, ctx, len))
|
||||||
pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
|
|
||||||
dev->name, len, (unsigned long)(truesize - room));
|
|
||||||
DEV_STATS_INC(dev, rx_length_errors);
|
|
||||||
goto err_skb;
|
goto err_skb;
|
||||||
}
|
|
||||||
|
|
||||||
if (unlikely(vi->xdp_enabled)) {
|
if (unlikely(vi->xdp_enabled)) {
|
||||||
struct bpf_prog *xdp_prog;
|
struct bpf_prog *xdp_prog;
|
||||||
@ -2446,17 +2476,10 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
|
|||||||
u64_stats_add(&stats->bytes, len);
|
u64_stats_add(&stats->bytes, len);
|
||||||
page = virt_to_head_page(buf);
|
page = virt_to_head_page(buf);
|
||||||
|
|
||||||
truesize = mergeable_ctx_to_truesize(ctx);
|
if (check_mergeable_len(dev, ctx, len))
|
||||||
headroom = mergeable_ctx_to_headroom(ctx);
|
|
||||||
tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
|
|
||||||
room = SKB_DATA_ALIGN(headroom + tailroom);
|
|
||||||
if (unlikely(len > truesize - room)) {
|
|
||||||
pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
|
|
||||||
dev->name, len, (unsigned long)(truesize - room));
|
|
||||||
DEV_STATS_INC(dev, rx_length_errors);
|
|
||||||
goto err_skb;
|
goto err_skb;
|
||||||
}
|
|
||||||
|
|
||||||
|
truesize = mergeable_ctx_to_truesize(ctx);
|
||||||
curr_skb = virtnet_skb_append_frag(head_skb, curr_skb, page,
|
curr_skb = virtnet_skb_append_frag(head_skb, curr_skb, page,
|
||||||
buf, len, truesize);
|
buf, len, truesize);
|
||||||
if (!curr_skb)
|
if (!curr_skb)
|
||||||
@ -2998,7 +3021,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq, int budget)
|
|||||||
free_old_xmit(sq, txq, !!budget);
|
free_old_xmit(sq, txq, !!budget);
|
||||||
} while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
|
} while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
|
||||||
|
|
||||||
if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) {
|
if (sq->vq->num_free >= MAX_SKB_FRAGS + 2) {
|
||||||
if (netif_tx_queue_stopped(txq)) {
|
if (netif_tx_queue_stopped(txq)) {
|
||||||
u64_stats_update_begin(&sq->stats.syncp);
|
u64_stats_update_begin(&sq->stats.syncp);
|
||||||
u64_stats_inc(&sq->stats.wake);
|
u64_stats_inc(&sq->stats.wake);
|
||||||
@ -3195,7 +3218,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
|
|||||||
else
|
else
|
||||||
free_old_xmit(sq, txq, !!budget);
|
free_old_xmit(sq, txq, !!budget);
|
||||||
|
|
||||||
if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) {
|
if (sq->vq->num_free >= MAX_SKB_FRAGS + 2) {
|
||||||
if (netif_tx_queue_stopped(txq)) {
|
if (netif_tx_queue_stopped(txq)) {
|
||||||
u64_stats_update_begin(&sq->stats.syncp);
|
u64_stats_update_begin(&sq->stats.syncp);
|
||||||
u64_stats_inc(&sq->stats.wake);
|
u64_stats_inc(&sq->stats.wake);
|
||||||
@ -3481,6 +3504,12 @@ static int virtnet_tx_resize(struct virtnet_info *vi, struct send_queue *sq,
|
|||||||
{
|
{
|
||||||
int qindex, err;
|
int qindex, err;
|
||||||
|
|
||||||
|
if (ring_num <= MAX_SKB_FRAGS + 2) {
|
||||||
|
netdev_err(vi->dev, "tx size (%d) cannot be smaller than %d\n",
|
||||||
|
ring_num, MAX_SKB_FRAGS + 2);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
qindex = sq - vi->sq;
|
qindex = sq - vi->sq;
|
||||||
|
|
||||||
virtnet_tx_pause(vi, sq);
|
virtnet_tx_pause(vi, sq);
|
||||||
|
@ -2797,7 +2797,7 @@ int virtqueue_resize(struct virtqueue *_vq, u32 num,
|
|||||||
void (*recycle_done)(struct virtqueue *vq))
|
void (*recycle_done)(struct virtqueue *vq))
|
||||||
{
|
{
|
||||||
struct vring_virtqueue *vq = to_vvq(_vq);
|
struct vring_virtqueue *vq = to_vvq(_vq);
|
||||||
int err;
|
int err, err_reset;
|
||||||
|
|
||||||
if (num > vq->vq.num_max)
|
if (num > vq->vq.num_max)
|
||||||
return -E2BIG;
|
return -E2BIG;
|
||||||
@ -2819,7 +2819,11 @@ int virtqueue_resize(struct virtqueue *_vq, u32 num,
|
|||||||
else
|
else
|
||||||
err = virtqueue_resize_split(_vq, num);
|
err = virtqueue_resize_split(_vq, num);
|
||||||
|
|
||||||
return virtqueue_enable_after_reset(_vq);
|
err_reset = virtqueue_enable_after_reset(_vq);
|
||||||
|
if (err_reset)
|
||||||
|
return err_reset;
|
||||||
|
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(virtqueue_resize);
|
EXPORT_SYMBOL_GPL(virtqueue_resize);
|
||||||
|
|
||||||
|
@ -899,8 +899,10 @@ static int check_expect_hints_stats(struct objagg_hints *objagg_hints,
|
|||||||
int err;
|
int err;
|
||||||
|
|
||||||
stats = objagg_hints_stats_get(objagg_hints);
|
stats = objagg_hints_stats_get(objagg_hints);
|
||||||
if (IS_ERR(stats))
|
if (IS_ERR(stats)) {
|
||||||
|
*errmsg = "objagg_hints_stats_get() failed.";
|
||||||
return PTR_ERR(stats);
|
return PTR_ERR(stats);
|
||||||
|
}
|
||||||
err = __check_expect_stats(stats, expect_stats, errmsg);
|
err = __check_expect_stats(stats, expect_stats, errmsg);
|
||||||
objagg_stats_put(stats);
|
objagg_stats_put(stats);
|
||||||
return err;
|
return err;
|
||||||
|
@ -2150,40 +2150,6 @@ static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data,
|
|||||||
return rp->status;
|
return rp->status;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data,
|
|
||||||
struct sk_buff *skb)
|
|
||||||
{
|
|
||||||
struct hci_rp_le_set_ext_adv_params *rp = data;
|
|
||||||
struct hci_cp_le_set_ext_adv_params *cp;
|
|
||||||
struct adv_info *adv_instance;
|
|
||||||
|
|
||||||
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
|
|
||||||
|
|
||||||
if (rp->status)
|
|
||||||
return rp->status;
|
|
||||||
|
|
||||||
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
|
|
||||||
if (!cp)
|
|
||||||
return rp->status;
|
|
||||||
|
|
||||||
hci_dev_lock(hdev);
|
|
||||||
hdev->adv_addr_type = cp->own_addr_type;
|
|
||||||
if (!cp->handle) {
|
|
||||||
/* Store in hdev for instance 0 */
|
|
||||||
hdev->adv_tx_power = rp->tx_power;
|
|
||||||
} else {
|
|
||||||
adv_instance = hci_find_adv_instance(hdev, cp->handle);
|
|
||||||
if (adv_instance)
|
|
||||||
adv_instance->tx_power = rp->tx_power;
|
|
||||||
}
|
|
||||||
/* Update adv data as tx power is known now */
|
|
||||||
hci_update_adv_data(hdev, cp->handle);
|
|
||||||
|
|
||||||
hci_dev_unlock(hdev);
|
|
||||||
|
|
||||||
return rp->status;
|
|
||||||
}
|
|
||||||
|
|
||||||
static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data,
|
static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data,
|
||||||
struct sk_buff *skb)
|
struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
@ -4164,8 +4130,6 @@ static const struct hci_cc {
|
|||||||
HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
|
HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
|
||||||
hci_cc_le_read_num_adv_sets,
|
hci_cc_le_read_num_adv_sets,
|
||||||
sizeof(struct hci_rp_le_read_num_supported_adv_sets)),
|
sizeof(struct hci_rp_le_read_num_supported_adv_sets)),
|
||||||
HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param,
|
|
||||||
sizeof(struct hci_rp_le_set_ext_adv_params)),
|
|
||||||
HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE,
|
HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE,
|
||||||
hci_cc_le_set_ext_adv_enable),
|
hci_cc_le_set_ext_adv_enable),
|
||||||
HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
|
HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
|
||||||
|
@ -1205,9 +1205,126 @@ static int hci_set_adv_set_random_addr_sync(struct hci_dev *hdev, u8 instance,
|
|||||||
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
|
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
hci_set_ext_adv_params_sync(struct hci_dev *hdev, struct adv_info *adv,
|
||||||
|
const struct hci_cp_le_set_ext_adv_params *cp,
|
||||||
|
struct hci_rp_le_set_ext_adv_params *rp)
|
||||||
|
{
|
||||||
|
struct sk_buff *skb;
|
||||||
|
|
||||||
|
skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(*cp),
|
||||||
|
cp, HCI_CMD_TIMEOUT);
|
||||||
|
|
||||||
|
/* If command return a status event, skb will be set to -ENODATA */
|
||||||
|
if (skb == ERR_PTR(-ENODATA))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (IS_ERR(skb)) {
|
||||||
|
bt_dev_err(hdev, "Opcode 0x%4.4x failed: %ld",
|
||||||
|
HCI_OP_LE_SET_EXT_ADV_PARAMS, PTR_ERR(skb));
|
||||||
|
return PTR_ERR(skb);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (skb->len != sizeof(*rp)) {
|
||||||
|
bt_dev_err(hdev, "Invalid response length for 0x%4.4x: %u",
|
||||||
|
HCI_OP_LE_SET_EXT_ADV_PARAMS, skb->len);
|
||||||
|
kfree_skb(skb);
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
|
||||||
|
memcpy(rp, skb->data, sizeof(*rp));
|
||||||
|
kfree_skb(skb);
|
||||||
|
|
||||||
|
if (!rp->status) {
|
||||||
|
hdev->adv_addr_type = cp->own_addr_type;
|
||||||
|
if (!cp->handle) {
|
||||||
|
/* Store in hdev for instance 0 */
|
||||||
|
hdev->adv_tx_power = rp->tx_power;
|
||||||
|
} else if (adv) {
|
||||||
|
adv->tx_power = rp->tx_power;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return rp->status;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance)
|
||||||
|
{
|
||||||
|
DEFINE_FLEX(struct hci_cp_le_set_ext_adv_data, pdu, data, length,
|
||||||
|
HCI_MAX_EXT_AD_LENGTH);
|
||||||
|
u8 len;
|
||||||
|
struct adv_info *adv = NULL;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
if (instance) {
|
||||||
|
adv = hci_find_adv_instance(hdev, instance);
|
||||||
|
if (!adv || !adv->adv_data_changed)
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
len = eir_create_adv_data(hdev, instance, pdu->data,
|
||||||
|
HCI_MAX_EXT_AD_LENGTH);
|
||||||
|
|
||||||
|
pdu->length = len;
|
||||||
|
pdu->handle = adv ? adv->handle : instance;
|
||||||
|
pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE;
|
||||||
|
pdu->frag_pref = LE_SET_ADV_DATA_NO_FRAG;
|
||||||
|
|
||||||
|
err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA,
|
||||||
|
struct_size(pdu, data, len), pdu,
|
||||||
|
HCI_CMD_TIMEOUT);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
/* Update data if the command succeed */
|
||||||
|
if (adv) {
|
||||||
|
adv->adv_data_changed = false;
|
||||||
|
} else {
|
||||||
|
memcpy(hdev->adv_data, pdu->data, len);
|
||||||
|
hdev->adv_data_len = len;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int hci_set_adv_data_sync(struct hci_dev *hdev, u8 instance)
|
||||||
|
{
|
||||||
|
struct hci_cp_le_set_adv_data cp;
|
||||||
|
u8 len;
|
||||||
|
|
||||||
|
memset(&cp, 0, sizeof(cp));
|
||||||
|
|
||||||
|
len = eir_create_adv_data(hdev, instance, cp.data, sizeof(cp.data));
|
||||||
|
|
||||||
|
/* There's nothing to do if the data hasn't changed */
|
||||||
|
if (hdev->adv_data_len == len &&
|
||||||
|
memcmp(cp.data, hdev->adv_data, len) == 0)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
|
||||||
|
hdev->adv_data_len = len;
|
||||||
|
|
||||||
|
cp.length = len;
|
||||||
|
|
||||||
|
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_DATA,
|
||||||
|
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
|
||||||
|
}
|
||||||
|
|
||||||
|
int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance)
|
||||||
|
{
|
||||||
|
if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (ext_adv_capable(hdev))
|
||||||
|
return hci_set_ext_adv_data_sync(hdev, instance);
|
||||||
|
|
||||||
|
return hci_set_adv_data_sync(hdev, instance);
|
||||||
|
}
|
||||||
|
|
||||||
int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
|
int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
|
||||||
{
|
{
|
||||||
struct hci_cp_le_set_ext_adv_params cp;
|
struct hci_cp_le_set_ext_adv_params cp;
|
||||||
|
struct hci_rp_le_set_ext_adv_params rp;
|
||||||
bool connectable;
|
bool connectable;
|
||||||
u32 flags;
|
u32 flags;
|
||||||
bdaddr_t random_addr;
|
bdaddr_t random_addr;
|
||||||
@ -1316,8 +1433,12 @@ int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
|
|||||||
cp.secondary_phy = HCI_ADV_PHY_1M;
|
cp.secondary_phy = HCI_ADV_PHY_1M;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS,
|
err = hci_set_ext_adv_params_sync(hdev, adv, &cp, &rp);
|
||||||
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
/* Update adv data as tx power is known now */
|
||||||
|
err = hci_set_ext_adv_data_sync(hdev, cp.handle);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
@ -1822,79 +1943,6 @@ int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason)
|
|||||||
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
|
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance)
|
|
||||||
{
|
|
||||||
DEFINE_FLEX(struct hci_cp_le_set_ext_adv_data, pdu, data, length,
|
|
||||||
HCI_MAX_EXT_AD_LENGTH);
|
|
||||||
u8 len;
|
|
||||||
struct adv_info *adv = NULL;
|
|
||||||
int err;
|
|
||||||
|
|
||||||
if (instance) {
|
|
||||||
adv = hci_find_adv_instance(hdev, instance);
|
|
||||||
if (!adv || !adv->adv_data_changed)
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
len = eir_create_adv_data(hdev, instance, pdu->data,
|
|
||||||
HCI_MAX_EXT_AD_LENGTH);
|
|
||||||
|
|
||||||
pdu->length = len;
|
|
||||||
pdu->handle = adv ? adv->handle : instance;
|
|
||||||
pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE;
|
|
||||||
pdu->frag_pref = LE_SET_ADV_DATA_NO_FRAG;
|
|
||||||
|
|
||||||
err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA,
|
|
||||||
struct_size(pdu, data, len), pdu,
|
|
||||||
HCI_CMD_TIMEOUT);
|
|
||||||
if (err)
|
|
||||||
return err;
|
|
||||||
|
|
||||||
/* Update data if the command succeed */
|
|
||||||
if (adv) {
|
|
||||||
adv->adv_data_changed = false;
|
|
||||||
} else {
|
|
||||||
memcpy(hdev->adv_data, pdu->data, len);
|
|
||||||
hdev->adv_data_len = len;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int hci_set_adv_data_sync(struct hci_dev *hdev, u8 instance)
|
|
||||||
{
|
|
||||||
struct hci_cp_le_set_adv_data cp;
|
|
||||||
u8 len;
|
|
||||||
|
|
||||||
memset(&cp, 0, sizeof(cp));
|
|
||||||
|
|
||||||
len = eir_create_adv_data(hdev, instance, cp.data, sizeof(cp.data));
|
|
||||||
|
|
||||||
/* There's nothing to do if the data hasn't changed */
|
|
||||||
if (hdev->adv_data_len == len &&
|
|
||||||
memcmp(cp.data, hdev->adv_data, len) == 0)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
|
|
||||||
hdev->adv_data_len = len;
|
|
||||||
|
|
||||||
cp.length = len;
|
|
||||||
|
|
||||||
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_DATA,
|
|
||||||
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
|
|
||||||
}
|
|
||||||
|
|
||||||
int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance)
|
|
||||||
{
|
|
||||||
if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (ext_adv_capable(hdev))
|
|
||||||
return hci_set_ext_adv_data_sync(hdev, instance);
|
|
||||||
|
|
||||||
return hci_set_adv_data_sync(hdev, instance);
|
|
||||||
}
|
|
||||||
|
|
||||||
int hci_schedule_adv_instance_sync(struct hci_dev *hdev, u8 instance,
|
int hci_schedule_adv_instance_sync(struct hci_dev *hdev, u8 instance,
|
||||||
bool force)
|
bool force)
|
||||||
{
|
{
|
||||||
@ -1970,13 +2018,10 @@ static int hci_clear_adv_sets_sync(struct hci_dev *hdev, struct sock *sk)
|
|||||||
static int hci_clear_adv_sync(struct hci_dev *hdev, struct sock *sk, bool force)
|
static int hci_clear_adv_sync(struct hci_dev *hdev, struct sock *sk, bool force)
|
||||||
{
|
{
|
||||||
struct adv_info *adv, *n;
|
struct adv_info *adv, *n;
|
||||||
int err = 0;
|
|
||||||
|
|
||||||
if (ext_adv_capable(hdev))
|
if (ext_adv_capable(hdev))
|
||||||
/* Remove all existing sets */
|
/* Remove all existing sets */
|
||||||
err = hci_clear_adv_sets_sync(hdev, sk);
|
return hci_clear_adv_sets_sync(hdev, sk);
|
||||||
if (ext_adv_capable(hdev))
|
|
||||||
return err;
|
|
||||||
|
|
||||||
/* This is safe as long as there is no command send while the lock is
|
/* This is safe as long as there is no command send while the lock is
|
||||||
* held.
|
* held.
|
||||||
@ -2004,13 +2049,11 @@ static int hci_clear_adv_sync(struct hci_dev *hdev, struct sock *sk, bool force)
|
|||||||
static int hci_remove_adv_sync(struct hci_dev *hdev, u8 instance,
|
static int hci_remove_adv_sync(struct hci_dev *hdev, u8 instance,
|
||||||
struct sock *sk)
|
struct sock *sk)
|
||||||
{
|
{
|
||||||
int err = 0;
|
int err;
|
||||||
|
|
||||||
/* If we use extended advertising, instance has to be removed first. */
|
/* If we use extended advertising, instance has to be removed first. */
|
||||||
if (ext_adv_capable(hdev))
|
if (ext_adv_capable(hdev))
|
||||||
err = hci_remove_ext_adv_instance_sync(hdev, instance, sk);
|
return hci_remove_ext_adv_instance_sync(hdev, instance, sk);
|
||||||
if (ext_adv_capable(hdev))
|
|
||||||
return err;
|
|
||||||
|
|
||||||
/* This is safe as long as there is no command send while the lock is
|
/* This is safe as long as there is no command send while the lock is
|
||||||
* held.
|
* held.
|
||||||
@ -2109,16 +2152,13 @@ int hci_read_tx_power_sync(struct hci_dev *hdev, __le16 handle, u8 type)
|
|||||||
int hci_disable_advertising_sync(struct hci_dev *hdev)
|
int hci_disable_advertising_sync(struct hci_dev *hdev)
|
||||||
{
|
{
|
||||||
u8 enable = 0x00;
|
u8 enable = 0x00;
|
||||||
int err = 0;
|
|
||||||
|
|
||||||
/* If controller is not advertising we are done. */
|
/* If controller is not advertising we are done. */
|
||||||
if (!hci_dev_test_flag(hdev, HCI_LE_ADV))
|
if (!hci_dev_test_flag(hdev, HCI_LE_ADV))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (ext_adv_capable(hdev))
|
if (ext_adv_capable(hdev))
|
||||||
err = hci_disable_ext_adv_instance_sync(hdev, 0x00);
|
return hci_disable_ext_adv_instance_sync(hdev, 0x00);
|
||||||
if (ext_adv_capable(hdev))
|
|
||||||
return err;
|
|
||||||
|
|
||||||
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
|
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
|
||||||
sizeof(enable), &enable, HCI_CMD_TIMEOUT);
|
sizeof(enable), &enable, HCI_CMD_TIMEOUT);
|
||||||
@ -2481,6 +2521,10 @@ static int hci_pause_advertising_sync(struct hci_dev *hdev)
|
|||||||
int err;
|
int err;
|
||||||
int old_state;
|
int old_state;
|
||||||
|
|
||||||
|
/* If controller is not advertising we are done. */
|
||||||
|
if (!hci_dev_test_flag(hdev, HCI_LE_ADV))
|
||||||
|
return 0;
|
||||||
|
|
||||||
/* If already been paused there is nothing to do. */
|
/* If already been paused there is nothing to do. */
|
||||||
if (hdev->advertising_paused)
|
if (hdev->advertising_paused)
|
||||||
return 0;
|
return 0;
|
||||||
@ -6277,6 +6321,7 @@ static int hci_le_ext_directed_advertising_sync(struct hci_dev *hdev,
|
|||||||
struct hci_conn *conn)
|
struct hci_conn *conn)
|
||||||
{
|
{
|
||||||
struct hci_cp_le_set_ext_adv_params cp;
|
struct hci_cp_le_set_ext_adv_params cp;
|
||||||
|
struct hci_rp_le_set_ext_adv_params rp;
|
||||||
int err;
|
int err;
|
||||||
bdaddr_t random_addr;
|
bdaddr_t random_addr;
|
||||||
u8 own_addr_type;
|
u8 own_addr_type;
|
||||||
@ -6318,8 +6363,12 @@ static int hci_le_ext_directed_advertising_sync(struct hci_dev *hdev,
|
|||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS,
|
err = hci_set_ext_adv_params_sync(hdev, NULL, &cp, &rp);
|
||||||
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
/* Update adv data as tx power is known now */
|
||||||
|
err = hci_set_ext_adv_data_sync(hdev, cp.handle);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
@ -1080,7 +1080,8 @@ static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
|
|||||||
struct mgmt_mesh_tx *mesh_tx;
|
struct mgmt_mesh_tx *mesh_tx;
|
||||||
|
|
||||||
hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
|
hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
|
||||||
hci_disable_advertising_sync(hdev);
|
if (list_empty(&hdev->adv_instances))
|
||||||
|
hci_disable_advertising_sync(hdev);
|
||||||
mesh_tx = mgmt_mesh_next(hdev, NULL);
|
mesh_tx = mgmt_mesh_next(hdev, NULL);
|
||||||
|
|
||||||
if (mesh_tx)
|
if (mesh_tx)
|
||||||
@ -2153,6 +2154,9 @@ static int set_mesh_sync(struct hci_dev *hdev, void *data)
|
|||||||
else
|
else
|
||||||
hci_dev_clear_flag(hdev, HCI_MESH);
|
hci_dev_clear_flag(hdev, HCI_MESH);
|
||||||
|
|
||||||
|
hdev->le_scan_interval = __le16_to_cpu(cp->period);
|
||||||
|
hdev->le_scan_window = __le16_to_cpu(cp->window);
|
||||||
|
|
||||||
len -= sizeof(*cp);
|
len -= sizeof(*cp);
|
||||||
|
|
||||||
/* If filters don't fit, forward all adv pkts */
|
/* If filters don't fit, forward all adv pkts */
|
||||||
@ -2167,6 +2171,7 @@ static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
|
|||||||
{
|
{
|
||||||
struct mgmt_cp_set_mesh *cp = data;
|
struct mgmt_cp_set_mesh *cp = data;
|
||||||
struct mgmt_pending_cmd *cmd;
|
struct mgmt_pending_cmd *cmd;
|
||||||
|
__u16 period, window;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
bt_dev_dbg(hdev, "sock %p", sk);
|
bt_dev_dbg(hdev, "sock %p", sk);
|
||||||
@ -2180,6 +2185,23 @@ static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
|
|||||||
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
|
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
|
||||||
MGMT_STATUS_INVALID_PARAMS);
|
MGMT_STATUS_INVALID_PARAMS);
|
||||||
|
|
||||||
|
/* Keep allowed ranges in sync with set_scan_params() */
|
||||||
|
period = __le16_to_cpu(cp->period);
|
||||||
|
|
||||||
|
if (period < 0x0004 || period > 0x4000)
|
||||||
|
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
|
||||||
|
MGMT_STATUS_INVALID_PARAMS);
|
||||||
|
|
||||||
|
window = __le16_to_cpu(cp->window);
|
||||||
|
|
||||||
|
if (window < 0x0004 || window > 0x4000)
|
||||||
|
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
|
||||||
|
MGMT_STATUS_INVALID_PARAMS);
|
||||||
|
|
||||||
|
if (window > period)
|
||||||
|
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
|
||||||
|
MGMT_STATUS_INVALID_PARAMS);
|
||||||
|
|
||||||
hci_dev_lock(hdev);
|
hci_dev_lock(hdev);
|
||||||
|
|
||||||
cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
|
cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
|
||||||
@ -6432,6 +6454,7 @@ static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
|
|||||||
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
|
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
|
||||||
MGMT_STATUS_NOT_SUPPORTED);
|
MGMT_STATUS_NOT_SUPPORTED);
|
||||||
|
|
||||||
|
/* Keep allowed ranges in sync with set_mesh() */
|
||||||
interval = __le16_to_cpu(cp->interval);
|
interval = __le16_to_cpu(cp->interval);
|
||||||
|
|
||||||
if (interval < 0x0004 || interval > 0x4000)
|
if (interval < 0x0004 || interval > 0x4000)
|
||||||
|
@ -319,8 +319,8 @@ static int ip_rcv_finish_core(struct net *net,
|
|||||||
const struct sk_buff *hint)
|
const struct sk_buff *hint)
|
||||||
{
|
{
|
||||||
const struct iphdr *iph = ip_hdr(skb);
|
const struct iphdr *iph = ip_hdr(skb);
|
||||||
int err, drop_reason;
|
|
||||||
struct rtable *rt;
|
struct rtable *rt;
|
||||||
|
int drop_reason;
|
||||||
|
|
||||||
if (ip_can_use_hint(skb, iph, hint)) {
|
if (ip_can_use_hint(skb, iph, hint)) {
|
||||||
drop_reason = ip_route_use_hint(skb, iph->daddr, iph->saddr,
|
drop_reason = ip_route_use_hint(skb, iph->daddr, iph->saddr,
|
||||||
@ -345,9 +345,10 @@ static int ip_rcv_finish_core(struct net *net,
|
|||||||
break;
|
break;
|
||||||
case IPPROTO_UDP:
|
case IPPROTO_UDP:
|
||||||
if (READ_ONCE(net->ipv4.sysctl_udp_early_demux)) {
|
if (READ_ONCE(net->ipv4.sysctl_udp_early_demux)) {
|
||||||
err = udp_v4_early_demux(skb);
|
drop_reason = udp_v4_early_demux(skb);
|
||||||
if (unlikely(err))
|
if (unlikely(drop_reason))
|
||||||
goto drop_error;
|
goto drop_error;
|
||||||
|
drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
|
||||||
|
|
||||||
/* must reload iph, skb->head might have changed */
|
/* must reload iph, skb->head might have changed */
|
||||||
iph = ip_hdr(skb);
|
iph = ip_hdr(skb);
|
||||||
|
@ -497,22 +497,15 @@ void rose_rt_device_down(struct net_device *dev)
|
|||||||
t = rose_node;
|
t = rose_node;
|
||||||
rose_node = rose_node->next;
|
rose_node = rose_node->next;
|
||||||
|
|
||||||
for (i = 0; i < t->count; i++) {
|
for (i = t->count - 1; i >= 0; i--) {
|
||||||
if (t->neighbour[i] != s)
|
if (t->neighbour[i] != s)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
t->count--;
|
t->count--;
|
||||||
|
|
||||||
switch (i) {
|
memmove(&t->neighbour[i], &t->neighbour[i + 1],
|
||||||
case 0:
|
sizeof(t->neighbour[0]) *
|
||||||
t->neighbour[0] = t->neighbour[1];
|
(t->count - i));
|
||||||
fallthrough;
|
|
||||||
case 1:
|
|
||||||
t->neighbour[1] = t->neighbour[2];
|
|
||||||
break;
|
|
||||||
case 2:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (t->count <= 0)
|
if (t->count <= 0)
|
||||||
|
@ -780,15 +780,12 @@ static u32 qdisc_alloc_handle(struct net_device *dev)
|
|||||||
|
|
||||||
void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
|
void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
|
||||||
{
|
{
|
||||||
bool qdisc_is_offloaded = sch->flags & TCQ_F_OFFLOADED;
|
|
||||||
const struct Qdisc_class_ops *cops;
|
const struct Qdisc_class_ops *cops;
|
||||||
unsigned long cl;
|
unsigned long cl;
|
||||||
u32 parentid;
|
u32 parentid;
|
||||||
bool notify;
|
bool notify;
|
||||||
int drops;
|
int drops;
|
||||||
|
|
||||||
if (n == 0 && len == 0)
|
|
||||||
return;
|
|
||||||
drops = max_t(int, n, 0);
|
drops = max_t(int, n, 0);
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
while ((parentid = sch->parent)) {
|
while ((parentid = sch->parent)) {
|
||||||
@ -797,17 +794,8 @@ void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
|
|||||||
|
|
||||||
if (sch->flags & TCQ_F_NOPARENT)
|
if (sch->flags & TCQ_F_NOPARENT)
|
||||||
break;
|
break;
|
||||||
/* Notify parent qdisc only if child qdisc becomes empty.
|
/* Notify parent qdisc only if child qdisc becomes empty. */
|
||||||
*
|
notify = !sch->q.qlen;
|
||||||
* If child was empty even before update then backlog
|
|
||||||
* counter is screwed and we skip notification because
|
|
||||||
* parent class is already passive.
|
|
||||||
*
|
|
||||||
* If the original child was offloaded then it is allowed
|
|
||||||
* to be seem as empty, so the parent is notified anyway.
|
|
||||||
*/
|
|
||||||
notify = !sch->q.qlen && !WARN_ON_ONCE(!n &&
|
|
||||||
!qdisc_is_offloaded);
|
|
||||||
/* TODO: perform the search on a per txq basis */
|
/* TODO: perform the search on a per txq basis */
|
||||||
sch = qdisc_lookup_rcu(qdisc_dev(sch), TC_H_MAJ(parentid));
|
sch = qdisc_lookup_rcu(qdisc_dev(sch), TC_H_MAJ(parentid));
|
||||||
if (sch == NULL) {
|
if (sch == NULL) {
|
||||||
@ -816,6 +804,9 @@ void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
|
|||||||
}
|
}
|
||||||
cops = sch->ops->cl_ops;
|
cops = sch->ops->cl_ops;
|
||||||
if (notify && cops->qlen_notify) {
|
if (notify && cops->qlen_notify) {
|
||||||
|
/* Note that qlen_notify must be idempotent as it may get called
|
||||||
|
* multiple times.
|
||||||
|
*/
|
||||||
cl = cops->find(sch, parentid);
|
cl = cops->find(sch, parentid);
|
||||||
cops->qlen_notify(sch, cl);
|
cops->qlen_notify(sch, cl);
|
||||||
}
|
}
|
||||||
|
@ -119,6 +119,8 @@ vmci_transport_packet_init(struct vmci_transport_packet *pkt,
|
|||||||
u16 proto,
|
u16 proto,
|
||||||
struct vmci_handle handle)
|
struct vmci_handle handle)
|
||||||
{
|
{
|
||||||
|
memset(pkt, 0, sizeof(*pkt));
|
||||||
|
|
||||||
/* We register the stream control handler as an any cid handle so we
|
/* We register the stream control handler as an any cid handle so we
|
||||||
* must always send from a source address of VMADDR_CID_ANY
|
* must always send from a source address of VMADDR_CID_ANY
|
||||||
*/
|
*/
|
||||||
@ -131,8 +133,6 @@ vmci_transport_packet_init(struct vmci_transport_packet *pkt,
|
|||||||
pkt->type = type;
|
pkt->type = type;
|
||||||
pkt->src_port = src->svm_port;
|
pkt->src_port = src->svm_port;
|
||||||
pkt->dst_port = dst->svm_port;
|
pkt->dst_port = dst->svm_port;
|
||||||
memset(&pkt->proto, 0, sizeof(pkt->proto));
|
|
||||||
memset(&pkt->_reserved2, 0, sizeof(pkt->_reserved2));
|
|
||||||
|
|
||||||
switch (pkt->type) {
|
switch (pkt->type) {
|
||||||
case VMCI_TRANSPORT_PACKET_TYPE_INVALID:
|
case VMCI_TRANSPORT_PACKET_TYPE_INVALID:
|
||||||
|
Loading…
Reference in New Issue
Block a user