2
0
mirror of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2025-09-04 20:19:47 +08:00

virtio,vhost: fixes

More small fixes. Most notably this fixes a messed up ioctl #,
 and a regression in shmem affecting drm users.
 
 Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 
 iQFDBAABCgAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmivE+4PHG1zdEByZWRo
 YXQuY29tAAoJECgfDbjSjVRp0zQIALgyJ2XpwY3IlrXWXZdKPqjL5i/pVwDB1bNt
 +9lnLav8UAaNCIfWZKBo6HYOOmjSLWX8zpVGO88n1Vz9aIm3L0NRkwS0nZoLpTCr
 eeZpZ5LwKGTn1iZIOjxX+itc++nffQWS4GKLdCJpasc6D5DHhGU0GLpX62g8NQQc
 /MevRNxuHgECj6KqtNUf8hhqbT5PrCdQmIKGagaX/jzVH2d+9ZTdo0BECStfzB/z
 B9HT6Xm0MsJjRpy7Hqmnp21tBC75O7o79VjnvibENdTOYWMDbgA3g7OL92aUNvGh
 zaAyG040nVYDD5jnC7UiGCZq+49NTw+x9LBUUCuGptU3a2FclHs=
 =fQg1
 -----END PGP SIGNATURE-----

Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost

Pull virtio/vhost fixes from Michael Tsirkin:
 "More small fixes. Most notably this fixes a messed up ioctl number,
  and a regression in shmem affecting drm users"

* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost:
  virtio_net: adjust the execution order of function `virtnet_close` during freeze
  virtio_input: Improve freeze handling
  vhost: Fix ioctl # for VHOST_[GS]ET_FORK_FROM_OWNER
  Revert "virtio: reject shm region if length is zero"
  vhost/net: Protect ubufs with rcu read lock in vhost_net_ubuf_put()
  virtio_pci: Fix misleading comment for queue vector
This commit is contained in:
Linus Torvalds 2025-08-27 10:19:35 -07:00
commit 39f90c1967
7 changed files with 21 additions and 13 deletions

View File

@ -5758,14 +5758,15 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
disable_rx_mode_work(vi); disable_rx_mode_work(vi);
flush_work(&vi->rx_mode_work); flush_work(&vi->rx_mode_work);
netif_tx_lock_bh(vi->dev);
netif_device_detach(vi->dev);
netif_tx_unlock_bh(vi->dev);
if (netif_running(vi->dev)) { if (netif_running(vi->dev)) {
rtnl_lock(); rtnl_lock();
virtnet_close(vi->dev); virtnet_close(vi->dev);
rtnl_unlock(); rtnl_unlock();
} }
netif_tx_lock_bh(vi->dev);
netif_device_detach(vi->dev);
netif_tx_unlock_bh(vi->dev);
} }
static int init_vqs(struct virtnet_info *vi); static int init_vqs(struct virtnet_info *vi);

View File

@ -99,6 +99,7 @@ struct vhost_net_ubuf_ref {
atomic_t refcount; atomic_t refcount;
wait_queue_head_t wait; wait_queue_head_t wait;
struct vhost_virtqueue *vq; struct vhost_virtqueue *vq;
struct rcu_head rcu;
}; };
#define VHOST_NET_BATCH 64 #define VHOST_NET_BATCH 64
@ -250,9 +251,13 @@ vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs) static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
{ {
int r = atomic_sub_return(1, &ubufs->refcount); int r;
rcu_read_lock();
r = atomic_sub_return(1, &ubufs->refcount);
if (unlikely(!r)) if (unlikely(!r))
wake_up(&ubufs->wait); wake_up(&ubufs->wait);
rcu_read_unlock();
return r; return r;
} }
@ -265,7 +270,7 @@ static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs) static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs)
{ {
vhost_net_ubuf_put_and_wait(ubufs); vhost_net_ubuf_put_and_wait(ubufs);
kfree(ubufs); kfree_rcu(ubufs, rcu);
} }
static void vhost_net_clear_ubuf_info(struct vhost_net *n) static void vhost_net_clear_ubuf_info(struct vhost_net *n)

View File

@ -360,11 +360,15 @@ static int virtinput_freeze(struct virtio_device *vdev)
{ {
struct virtio_input *vi = vdev->priv; struct virtio_input *vi = vdev->priv;
unsigned long flags; unsigned long flags;
void *buf;
spin_lock_irqsave(&vi->lock, flags); spin_lock_irqsave(&vi->lock, flags);
vi->ready = false; vi->ready = false;
spin_unlock_irqrestore(&vi->lock, flags); spin_unlock_irqrestore(&vi->lock, flags);
virtio_reset_device(vdev);
while ((buf = virtqueue_detach_unused_buf(vi->sts)) != NULL)
kfree(buf);
vdev->config->del_vqs(vdev); vdev->config->del_vqs(vdev);
return 0; return 0;
} }

View File

@ -140,9 +140,9 @@ EXPORT_SYMBOL_GPL(vp_legacy_set_status);
* vp_legacy_queue_vector - set the MSIX vector for a specific virtqueue * vp_legacy_queue_vector - set the MSIX vector for a specific virtqueue
* @ldev: the legacy virtio-pci device * @ldev: the legacy virtio-pci device
* @index: queue index * @index: queue index
* @vector: the config vector * @vector: the queue vector
* *
* Returns the config vector read from the device * Returns the queue vector read from the device
*/ */
u16 vp_legacy_queue_vector(struct virtio_pci_legacy_device *ldev, u16 vp_legacy_queue_vector(struct virtio_pci_legacy_device *ldev,
u16 index, u16 vector) u16 index, u16 vector)

View File

@ -546,9 +546,9 @@ EXPORT_SYMBOL_GPL(vp_modern_set_queue_reset);
* vp_modern_queue_vector - set the MSIX vector for a specific virtqueue * vp_modern_queue_vector - set the MSIX vector for a specific virtqueue
* @mdev: the modern virtio-pci device * @mdev: the modern virtio-pci device
* @index: queue index * @index: queue index
* @vector: the config vector * @vector: the queue vector
* *
* Returns the config vector read from the device * Returns the queue vector read from the device
*/ */
u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev, u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev,
u16 index, u16 vector) u16 index, u16 vector)

View File

@ -328,8 +328,6 @@ static inline
bool virtio_get_shm_region(struct virtio_device *vdev, bool virtio_get_shm_region(struct virtio_device *vdev,
struct virtio_shm_region *region, u8 id) struct virtio_shm_region *region, u8 id)
{ {
if (!region->len)
return false;
if (!vdev->config->get_shm_region) if (!vdev->config->get_shm_region)
return false; return false;
return vdev->config->get_shm_region(vdev, region, id); return vdev->config->get_shm_region(vdev, region, id);

View File

@ -260,7 +260,7 @@
* When fork_owner is set to VHOST_FORK_OWNER_KTHREAD: * When fork_owner is set to VHOST_FORK_OWNER_KTHREAD:
* - Vhost will create vhost workers as kernel threads. * - Vhost will create vhost workers as kernel threads.
*/ */
#define VHOST_SET_FORK_FROM_OWNER _IOW(VHOST_VIRTIO, 0x83, __u8) #define VHOST_SET_FORK_FROM_OWNER _IOW(VHOST_VIRTIO, 0x84, __u8)
/** /**
* VHOST_GET_FORK_OWNER - Get the current fork_owner flag for the vhost device. * VHOST_GET_FORK_OWNER - Get the current fork_owner flag for the vhost device.
@ -268,6 +268,6 @@
* *
* @return: An 8-bit value indicating the current thread mode. * @return: An 8-bit value indicating the current thread mode.
*/ */
#define VHOST_GET_FORK_FROM_OWNER _IOR(VHOST_VIRTIO, 0x84, __u8) #define VHOST_GET_FORK_FROM_OWNER _IOR(VHOST_VIRTIO, 0x85, __u8)
#endif #endif