mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-04 20:19:47 +08:00
VFIO updates for v6.17-rc1 v2
- Fix imbalance where the no-iommu/cdev device path skips too much on open, failing to increment a reference, but still decrements the reference on close. Add bounds checking to prevent such underflows. (Jacob Pan) - Fill missing detach_ioas op for pds_vfio_pci, fixing probe failure when used with IOMMUFD. (Brett Creeley) - Split SR-IOV VFs to separate dev_set, avoiding unnecessary serialization between VFs that appear on the same bus. (Alex Williamson) - Fix a theoretical integer overflow is the mlx5-vfio-pci variant driver. (Artem Sadovnikov) - Implement missing VF token checking support via vfio cdev/IOMMUFD interface. (Jason Gunthorpe) - Update QAT vfio-pci variant driver to claim latest VF devices. (Małgorzata Mielnik) - Add a cond_resched() call to avoid holding the CPU too long during DMA mapping operations. (Keith Busch) -----BEGIN PGP SIGNATURE----- iQJPBAABCAA5FiEEQvbATlQL0amee4qQI5ubbjuwiyIFAmiTcwAbHGFsZXgud2ls bGlhbXNvbkByZWRoYXQuY29tAAoJECObm247sIsit4MP/3h6nezTT2sBN6//nbYM maKV//WBCcaaWfi5kT+I8u6lia0ahPRQsG1qrmvl/ZL5RGSU43N45+R/ahcyo1NP dd3pc8g7XjLYm8WSfziNzx67DBBiwcycV18vGIvGut4O9CIYzWvn/UTvcWdTd2gk AEoBOJsmTfdipQCO6/rXJ96HAApEKHfIPLg9GKXOwabBD045CP1/5d/N3DIlZ9B2 rYsb966UxUcS3QZDkSTSjommm5MSOS1MXlxRsn35Ug1OciyPb82qox2jVufVVSKu Zoan5tLLwN9YlJdiryuC9pvnl+g0T8Xum/ARcLDGLdWEtdfaAjvmdOoigefckDIb PEWSLttezPs2VeW7v3p3HqW5BmMEFWs2LMah3/drC3BGOpizKEahrDU+hLC0QcAm +hULbYvZ3IKNcrVvSubLriwt0zm5ihtZ5FUEr2/6JgnuPwMl3WOUaB5h3qBAjZVf 41wmo4ef2vbd9B5dXz8p7BxsePsas2Hnvu4s8sp+JKa0khoRZIgucA8ecSpN5LBv PxJ4dzcv9LeSPSfExwQSXoMb+Wj9xtRbA5O0U8WzjNmnvRibdZYsjEuA+pJLfAeq gBwuEzr48WGZEpYfCl2nhezaT6s/OqAhU/uVZJ+3LAV2SJ2nVMGkc5DCPoVB7KOy f5v+9X4IxZcjHRZgHuWEIWrV =c3K5 -----END PGP SIGNATURE----- Merge tag 'vfio-v6.17-rc1-v2' of https://github.com/awilliam/linux-vfio Pull VFIO updates from Alex Williamson: - Fix imbalance where the no-iommu/cdev device path skips too much on open, failing to increment a reference, but still decrements the reference on close. Add bounds checking to prevent such underflows (Jacob Pan) - Fill missing detach_ioas op for pds_vfio_pci, fixing probe failure when used with IOMMUFD (Brett Creeley) - Split SR-IOV VFs to separate dev_set, avoiding unnecessary serialization between VFs that appear on the same bus (Alex Williamson) - Fix a theoretical integer overflow is the mlx5-vfio-pci variant driver (Artem Sadovnikov) - Implement missing VF token checking support via vfio cdev/IOMMUFD interface (Jason Gunthorpe) - Update QAT vfio-pci variant driver to claim latest VF devices (Małgorzata Mielnik) - Add a cond_resched() call to avoid holding the CPU too long during DMA mapping operations (Keith Busch) * tag 'vfio-v6.17-rc1-v2' of https://github.com/awilliam/linux-vfio: vfio/type1: conditional rescheduling while pinning vfio/qat: add support for intel QAT 6xxx virtual functions vfio/qat: Remove myself from VFIO QAT PCI driver maintainers vfio/pci: Do vf_token checks for VFIO_DEVICE_BIND_IOMMUFD vfio/mlx5: fix possible overflow in tracking max message size vfio/pci: Separate SR-IOV VF dev_set vfio/pds: Fix missing detach_ioas op vfio: Prevent open_count decrement to negative vfio: Fix unbalanced vfio_df_close call in no-iommu mode
This commit is contained in:
commit
e8214ed59b
@ -26455,7 +26455,6 @@ S: Maintained
|
|||||||
F: drivers/vfio/platform/
|
F: drivers/vfio/platform/
|
||||||
|
|
||||||
VFIO QAT PCI DRIVER
|
VFIO QAT PCI DRIVER
|
||||||
M: Xin Zeng <xin.zeng@intel.com>
|
|
||||||
M: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
|
M: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
|
||||||
L: kvm@vger.kernel.org
|
L: kvm@vger.kernel.org
|
||||||
L: qat-linux@intel.com
|
L: qat-linux@intel.com
|
||||||
|
@ -60,22 +60,50 @@ static void vfio_df_get_kvm_safe(struct vfio_device_file *df)
|
|||||||
spin_unlock(&df->kvm_ref_lock);
|
spin_unlock(&df->kvm_ref_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int vfio_df_check_token(struct vfio_device *device,
|
||||||
|
const struct vfio_device_bind_iommufd *bind)
|
||||||
|
{
|
||||||
|
uuid_t uuid;
|
||||||
|
|
||||||
|
if (!device->ops->match_token_uuid) {
|
||||||
|
if (bind->flags & VFIO_DEVICE_BIND_FLAG_TOKEN)
|
||||||
|
return -EINVAL;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!(bind->flags & VFIO_DEVICE_BIND_FLAG_TOKEN))
|
||||||
|
return device->ops->match_token_uuid(device, NULL);
|
||||||
|
|
||||||
|
if (copy_from_user(&uuid, u64_to_user_ptr(bind->token_uuid_ptr),
|
||||||
|
sizeof(uuid)))
|
||||||
|
return -EFAULT;
|
||||||
|
return device->ops->match_token_uuid(device, &uuid);
|
||||||
|
}
|
||||||
|
|
||||||
long vfio_df_ioctl_bind_iommufd(struct vfio_device_file *df,
|
long vfio_df_ioctl_bind_iommufd(struct vfio_device_file *df,
|
||||||
struct vfio_device_bind_iommufd __user *arg)
|
struct vfio_device_bind_iommufd __user *arg)
|
||||||
{
|
{
|
||||||
|
const u32 VALID_FLAGS = VFIO_DEVICE_BIND_FLAG_TOKEN;
|
||||||
struct vfio_device *device = df->device;
|
struct vfio_device *device = df->device;
|
||||||
struct vfio_device_bind_iommufd bind;
|
struct vfio_device_bind_iommufd bind;
|
||||||
unsigned long minsz;
|
unsigned long minsz;
|
||||||
|
u32 user_size;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
static_assert(__same_type(arg->out_devid, df->devid));
|
static_assert(__same_type(arg->out_devid, df->devid));
|
||||||
|
|
||||||
minsz = offsetofend(struct vfio_device_bind_iommufd, out_devid);
|
minsz = offsetofend(struct vfio_device_bind_iommufd, out_devid);
|
||||||
|
|
||||||
if (copy_from_user(&bind, arg, minsz))
|
ret = get_user(user_size, &arg->argsz);
|
||||||
return -EFAULT;
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
if (user_size < minsz)
|
||||||
|
return -EINVAL;
|
||||||
|
ret = copy_struct_from_user(&bind, minsz, arg, user_size);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
if (bind.argsz < minsz || bind.flags || bind.iommufd < 0)
|
if (bind.iommufd < 0 || bind.flags & ~VALID_FLAGS)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* BIND_IOMMUFD only allowed for cdev fds */
|
/* BIND_IOMMUFD only allowed for cdev fds */
|
||||||
@ -93,6 +121,10 @@ long vfio_df_ioctl_bind_iommufd(struct vfio_device_file *df,
|
|||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ret = vfio_df_check_token(device, &bind);
|
||||||
|
if (ret)
|
||||||
|
goto out_unlock;
|
||||||
|
|
||||||
df->iommufd = iommufd_ctx_from_fd(bind.iommufd);
|
df->iommufd = iommufd_ctx_from_fd(bind.iommufd);
|
||||||
if (IS_ERR(df->iommufd)) {
|
if (IS_ERR(df->iommufd)) {
|
||||||
ret = PTR_ERR(df->iommufd);
|
ret = PTR_ERR(df->iommufd);
|
||||||
|
@ -192,12 +192,11 @@ static int vfio_df_group_open(struct vfio_device_file *df)
|
|||||||
* implies they expected translation to exist
|
* implies they expected translation to exist
|
||||||
*/
|
*/
|
||||||
if (!capable(CAP_SYS_RAWIO) ||
|
if (!capable(CAP_SYS_RAWIO) ||
|
||||||
vfio_iommufd_device_has_compat_ioas(device, df->iommufd))
|
vfio_iommufd_device_has_compat_ioas(device, df->iommufd)) {
|
||||||
ret = -EPERM;
|
ret = -EPERM;
|
||||||
else
|
|
||||||
ret = 0;
|
|
||||||
goto out_put_kvm;
|
goto out_put_kvm;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
ret = vfio_df_open(df);
|
ret = vfio_df_open(df);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -25,6 +25,10 @@ int vfio_df_iommufd_bind(struct vfio_device_file *df)
|
|||||||
|
|
||||||
lockdep_assert_held(&vdev->dev_set->lock);
|
lockdep_assert_held(&vdev->dev_set->lock);
|
||||||
|
|
||||||
|
/* Returns 0 to permit device opening under noiommu mode */
|
||||||
|
if (vfio_device_is_noiommu(vdev))
|
||||||
|
return 0;
|
||||||
|
|
||||||
return vdev->ops->bind_iommufd(vdev, ictx, &df->devid);
|
return vdev->ops->bind_iommufd(vdev, ictx, &df->devid);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1583,6 +1583,7 @@ static const struct vfio_device_ops hisi_acc_vfio_pci_ops = {
|
|||||||
.mmap = vfio_pci_core_mmap,
|
.mmap = vfio_pci_core_mmap,
|
||||||
.request = vfio_pci_core_request,
|
.request = vfio_pci_core_request,
|
||||||
.match = vfio_pci_core_match,
|
.match = vfio_pci_core_match,
|
||||||
|
.match_token_uuid = vfio_pci_core_match_token_uuid,
|
||||||
.bind_iommufd = vfio_iommufd_physical_bind,
|
.bind_iommufd = vfio_iommufd_physical_bind,
|
||||||
.unbind_iommufd = vfio_iommufd_physical_unbind,
|
.unbind_iommufd = vfio_iommufd_physical_unbind,
|
||||||
.attach_ioas = vfio_iommufd_physical_attach_ioas,
|
.attach_ioas = vfio_iommufd_physical_attach_ioas,
|
||||||
|
@ -1523,8 +1523,8 @@ int mlx5vf_start_page_tracker(struct vfio_device *vdev,
|
|||||||
log_max_msg_size = MLX5_CAP_ADV_VIRTUALIZATION(mdev, pg_track_log_max_msg_size);
|
log_max_msg_size = MLX5_CAP_ADV_VIRTUALIZATION(mdev, pg_track_log_max_msg_size);
|
||||||
max_msg_size = (1ULL << log_max_msg_size);
|
max_msg_size = (1ULL << log_max_msg_size);
|
||||||
/* The RQ must hold at least 4 WQEs/messages for successful QP creation */
|
/* The RQ must hold at least 4 WQEs/messages for successful QP creation */
|
||||||
if (rq_size < 4 * max_msg_size)
|
if (rq_size < 4ULL * max_msg_size)
|
||||||
rq_size = 4 * max_msg_size;
|
rq_size = 4ULL * max_msg_size;
|
||||||
|
|
||||||
memset(tracker, 0, sizeof(*tracker));
|
memset(tracker, 0, sizeof(*tracker));
|
||||||
tracker->uar = mlx5_get_uars_page(mdev);
|
tracker->uar = mlx5_get_uars_page(mdev);
|
||||||
|
@ -1372,6 +1372,7 @@ static const struct vfio_device_ops mlx5vf_pci_ops = {
|
|||||||
.mmap = vfio_pci_core_mmap,
|
.mmap = vfio_pci_core_mmap,
|
||||||
.request = vfio_pci_core_request,
|
.request = vfio_pci_core_request,
|
||||||
.match = vfio_pci_core_match,
|
.match = vfio_pci_core_match,
|
||||||
|
.match_token_uuid = vfio_pci_core_match_token_uuid,
|
||||||
.bind_iommufd = vfio_iommufd_physical_bind,
|
.bind_iommufd = vfio_iommufd_physical_bind,
|
||||||
.unbind_iommufd = vfio_iommufd_physical_unbind,
|
.unbind_iommufd = vfio_iommufd_physical_unbind,
|
||||||
.attach_ioas = vfio_iommufd_physical_attach_ioas,
|
.attach_ioas = vfio_iommufd_physical_attach_ioas,
|
||||||
|
@ -696,6 +696,7 @@ static const struct vfio_device_ops nvgrace_gpu_pci_ops = {
|
|||||||
.mmap = nvgrace_gpu_mmap,
|
.mmap = nvgrace_gpu_mmap,
|
||||||
.request = vfio_pci_core_request,
|
.request = vfio_pci_core_request,
|
||||||
.match = vfio_pci_core_match,
|
.match = vfio_pci_core_match,
|
||||||
|
.match_token_uuid = vfio_pci_core_match_token_uuid,
|
||||||
.bind_iommufd = vfio_iommufd_physical_bind,
|
.bind_iommufd = vfio_iommufd_physical_bind,
|
||||||
.unbind_iommufd = vfio_iommufd_physical_unbind,
|
.unbind_iommufd = vfio_iommufd_physical_unbind,
|
||||||
.attach_ioas = vfio_iommufd_physical_attach_ioas,
|
.attach_ioas = vfio_iommufd_physical_attach_ioas,
|
||||||
@ -715,6 +716,7 @@ static const struct vfio_device_ops nvgrace_gpu_pci_core_ops = {
|
|||||||
.mmap = vfio_pci_core_mmap,
|
.mmap = vfio_pci_core_mmap,
|
||||||
.request = vfio_pci_core_request,
|
.request = vfio_pci_core_request,
|
||||||
.match = vfio_pci_core_match,
|
.match = vfio_pci_core_match,
|
||||||
|
.match_token_uuid = vfio_pci_core_match_token_uuid,
|
||||||
.bind_iommufd = vfio_iommufd_physical_bind,
|
.bind_iommufd = vfio_iommufd_physical_bind,
|
||||||
.unbind_iommufd = vfio_iommufd_physical_unbind,
|
.unbind_iommufd = vfio_iommufd_physical_unbind,
|
||||||
.attach_ioas = vfio_iommufd_physical_attach_ioas,
|
.attach_ioas = vfio_iommufd_physical_attach_ioas,
|
||||||
|
@ -201,9 +201,11 @@ static const struct vfio_device_ops pds_vfio_ops = {
|
|||||||
.mmap = vfio_pci_core_mmap,
|
.mmap = vfio_pci_core_mmap,
|
||||||
.request = vfio_pci_core_request,
|
.request = vfio_pci_core_request,
|
||||||
.match = vfio_pci_core_match,
|
.match = vfio_pci_core_match,
|
||||||
|
.match_token_uuid = vfio_pci_core_match_token_uuid,
|
||||||
.bind_iommufd = vfio_iommufd_physical_bind,
|
.bind_iommufd = vfio_iommufd_physical_bind,
|
||||||
.unbind_iommufd = vfio_iommufd_physical_unbind,
|
.unbind_iommufd = vfio_iommufd_physical_unbind,
|
||||||
.attach_ioas = vfio_iommufd_physical_attach_ioas,
|
.attach_ioas = vfio_iommufd_physical_attach_ioas,
|
||||||
|
.detach_ioas = vfio_iommufd_physical_detach_ioas,
|
||||||
};
|
};
|
||||||
|
|
||||||
const struct vfio_device_ops *pds_vfio_ops_info(void)
|
const struct vfio_device_ops *pds_vfio_ops_info(void)
|
||||||
|
@ -614,6 +614,7 @@ static const struct vfio_device_ops qat_vf_pci_ops = {
|
|||||||
.mmap = vfio_pci_core_mmap,
|
.mmap = vfio_pci_core_mmap,
|
||||||
.request = vfio_pci_core_request,
|
.request = vfio_pci_core_request,
|
||||||
.match = vfio_pci_core_match,
|
.match = vfio_pci_core_match,
|
||||||
|
.match_token_uuid = vfio_pci_core_match_token_uuid,
|
||||||
.bind_iommufd = vfio_iommufd_physical_bind,
|
.bind_iommufd = vfio_iommufd_physical_bind,
|
||||||
.unbind_iommufd = vfio_iommufd_physical_unbind,
|
.unbind_iommufd = vfio_iommufd_physical_unbind,
|
||||||
.attach_ioas = vfio_iommufd_physical_attach_ioas,
|
.attach_ioas = vfio_iommufd_physical_attach_ioas,
|
||||||
@ -675,6 +676,8 @@ static const struct pci_device_id qat_vf_vfio_pci_table[] = {
|
|||||||
{ PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_INTEL, 0x4941) },
|
{ PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_INTEL, 0x4941) },
|
||||||
{ PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_INTEL, 0x4943) },
|
{ PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_INTEL, 0x4943) },
|
||||||
{ PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_INTEL, 0x4945) },
|
{ PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_INTEL, 0x4945) },
|
||||||
|
/* Intel QAT GEN6 6xxx VF device */
|
||||||
|
{ PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_INTEL, 0x4949) },
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(pci, qat_vf_vfio_pci_table);
|
MODULE_DEVICE_TABLE(pci, qat_vf_vfio_pci_table);
|
||||||
@ -696,5 +699,5 @@ module_pci_driver(qat_vf_vfio_pci_driver);
|
|||||||
|
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
MODULE_AUTHOR("Xin Zeng <xin.zeng@intel.com>");
|
MODULE_AUTHOR("Xin Zeng <xin.zeng@intel.com>");
|
||||||
MODULE_DESCRIPTION("QAT VFIO PCI - VFIO PCI driver with live migration support for Intel(R) QAT GEN4 device family");
|
MODULE_DESCRIPTION("QAT VFIO PCI - VFIO PCI driver with live migration support for Intel(R) QAT device family");
|
||||||
MODULE_IMPORT_NS("CRYPTO_QAT");
|
MODULE_IMPORT_NS("CRYPTO_QAT");
|
||||||
|
@ -138,6 +138,7 @@ static const struct vfio_device_ops vfio_pci_ops = {
|
|||||||
.mmap = vfio_pci_core_mmap,
|
.mmap = vfio_pci_core_mmap,
|
||||||
.request = vfio_pci_core_request,
|
.request = vfio_pci_core_request,
|
||||||
.match = vfio_pci_core_match,
|
.match = vfio_pci_core_match,
|
||||||
|
.match_token_uuid = vfio_pci_core_match_token_uuid,
|
||||||
.bind_iommufd = vfio_iommufd_physical_bind,
|
.bind_iommufd = vfio_iommufd_physical_bind,
|
||||||
.unbind_iommufd = vfio_iommufd_physical_unbind,
|
.unbind_iommufd = vfio_iommufd_physical_unbind,
|
||||||
.attach_ioas = vfio_iommufd_physical_attach_ioas,
|
.attach_ioas = vfio_iommufd_physical_attach_ioas,
|
||||||
|
@ -1818,9 +1818,13 @@ void vfio_pci_core_request(struct vfio_device *core_vdev, unsigned int count)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(vfio_pci_core_request);
|
EXPORT_SYMBOL_GPL(vfio_pci_core_request);
|
||||||
|
|
||||||
static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
|
int vfio_pci_core_match_token_uuid(struct vfio_device *core_vdev,
|
||||||
bool vf_token, uuid_t *uuid)
|
const uuid_t *uuid)
|
||||||
|
|
||||||
{
|
{
|
||||||
|
struct vfio_pci_core_device *vdev =
|
||||||
|
container_of(core_vdev, struct vfio_pci_core_device, vdev);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* There's always some degree of trust or collaboration between SR-IOV
|
* There's always some degree of trust or collaboration between SR-IOV
|
||||||
* PF and VFs, even if just that the PF hosts the SR-IOV capability and
|
* PF and VFs, even if just that the PF hosts the SR-IOV capability and
|
||||||
@ -1851,7 +1855,7 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
|
|||||||
bool match;
|
bool match;
|
||||||
|
|
||||||
if (!pf_vdev) {
|
if (!pf_vdev) {
|
||||||
if (!vf_token)
|
if (!uuid)
|
||||||
return 0; /* PF is not vfio-pci, no VF token */
|
return 0; /* PF is not vfio-pci, no VF token */
|
||||||
|
|
||||||
pci_info_ratelimited(vdev->pdev,
|
pci_info_ratelimited(vdev->pdev,
|
||||||
@ -1859,7 +1863,7 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!vf_token) {
|
if (!uuid) {
|
||||||
pci_info_ratelimited(vdev->pdev,
|
pci_info_ratelimited(vdev->pdev,
|
||||||
"VF token required to access device\n");
|
"VF token required to access device\n");
|
||||||
return -EACCES;
|
return -EACCES;
|
||||||
@ -1877,7 +1881,7 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
|
|||||||
} else if (vdev->vf_token) {
|
} else if (vdev->vf_token) {
|
||||||
mutex_lock(&vdev->vf_token->lock);
|
mutex_lock(&vdev->vf_token->lock);
|
||||||
if (vdev->vf_token->users) {
|
if (vdev->vf_token->users) {
|
||||||
if (!vf_token) {
|
if (!uuid) {
|
||||||
mutex_unlock(&vdev->vf_token->lock);
|
mutex_unlock(&vdev->vf_token->lock);
|
||||||
pci_info_ratelimited(vdev->pdev,
|
pci_info_ratelimited(vdev->pdev,
|
||||||
"VF token required to access device\n");
|
"VF token required to access device\n");
|
||||||
@ -1890,12 +1894,12 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
|
|||||||
"Incorrect VF token provided for device\n");
|
"Incorrect VF token provided for device\n");
|
||||||
return -EACCES;
|
return -EACCES;
|
||||||
}
|
}
|
||||||
} else if (vf_token) {
|
} else if (uuid) {
|
||||||
uuid_copy(&vdev->vf_token->uuid, uuid);
|
uuid_copy(&vdev->vf_token->uuid, uuid);
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&vdev->vf_token->lock);
|
mutex_unlock(&vdev->vf_token->lock);
|
||||||
} else if (vf_token) {
|
} else if (uuid) {
|
||||||
pci_info_ratelimited(vdev->pdev,
|
pci_info_ratelimited(vdev->pdev,
|
||||||
"VF token incorrectly provided, not a PF or VF\n");
|
"VF token incorrectly provided, not a PF or VF\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@ -1903,6 +1907,7 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
|
|||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(vfio_pci_core_match_token_uuid);
|
||||||
|
|
||||||
#define VF_TOKEN_ARG "vf_token="
|
#define VF_TOKEN_ARG "vf_token="
|
||||||
|
|
||||||
@ -1949,7 +1954,8 @@ int vfio_pci_core_match(struct vfio_device *core_vdev, char *buf)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = vfio_pci_validate_vf_token(vdev, vf_token, &uuid);
|
ret = core_vdev->ops->match_token_uuid(core_vdev,
|
||||||
|
vf_token ? &uuid : NULL);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
@ -2146,7 +2152,7 @@ int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev)
|
|||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pci_is_root_bus(pdev->bus)) {
|
if (pci_is_root_bus(pdev->bus) || pdev->is_virtfn) {
|
||||||
ret = vfio_assign_device_set(&vdev->vdev, vdev);
|
ret = vfio_assign_device_set(&vdev->vdev, vdev);
|
||||||
} else if (!pci_probe_reset_slot(pdev->slot)) {
|
} else if (!pci_probe_reset_slot(pdev->slot)) {
|
||||||
ret = vfio_assign_device_set(&vdev->vdev, pdev->slot);
|
ret = vfio_assign_device_set(&vdev->vdev, pdev->slot);
|
||||||
|
@ -94,6 +94,7 @@ static const struct vfio_device_ops virtiovf_vfio_pci_lm_ops = {
|
|||||||
.mmap = vfio_pci_core_mmap,
|
.mmap = vfio_pci_core_mmap,
|
||||||
.request = vfio_pci_core_request,
|
.request = vfio_pci_core_request,
|
||||||
.match = vfio_pci_core_match,
|
.match = vfio_pci_core_match,
|
||||||
|
.match_token_uuid = vfio_pci_core_match_token_uuid,
|
||||||
.bind_iommufd = vfio_iommufd_physical_bind,
|
.bind_iommufd = vfio_iommufd_physical_bind,
|
||||||
.unbind_iommufd = vfio_iommufd_physical_unbind,
|
.unbind_iommufd = vfio_iommufd_physical_unbind,
|
||||||
.attach_ioas = vfio_iommufd_physical_attach_ioas,
|
.attach_ioas = vfio_iommufd_physical_attach_ioas,
|
||||||
@ -114,6 +115,7 @@ static const struct vfio_device_ops virtiovf_vfio_pci_tran_lm_ops = {
|
|||||||
.mmap = vfio_pci_core_mmap,
|
.mmap = vfio_pci_core_mmap,
|
||||||
.request = vfio_pci_core_request,
|
.request = vfio_pci_core_request,
|
||||||
.match = vfio_pci_core_match,
|
.match = vfio_pci_core_match,
|
||||||
|
.match_token_uuid = vfio_pci_core_match_token_uuid,
|
||||||
.bind_iommufd = vfio_iommufd_physical_bind,
|
.bind_iommufd = vfio_iommufd_physical_bind,
|
||||||
.unbind_iommufd = vfio_iommufd_physical_unbind,
|
.unbind_iommufd = vfio_iommufd_physical_unbind,
|
||||||
.attach_ioas = vfio_iommufd_physical_attach_ioas,
|
.attach_ioas = vfio_iommufd_physical_attach_ioas,
|
||||||
@ -134,6 +136,7 @@ static const struct vfio_device_ops virtiovf_vfio_pci_ops = {
|
|||||||
.mmap = vfio_pci_core_mmap,
|
.mmap = vfio_pci_core_mmap,
|
||||||
.request = vfio_pci_core_request,
|
.request = vfio_pci_core_request,
|
||||||
.match = vfio_pci_core_match,
|
.match = vfio_pci_core_match,
|
||||||
|
.match_token_uuid = vfio_pci_core_match_token_uuid,
|
||||||
.bind_iommufd = vfio_iommufd_physical_bind,
|
.bind_iommufd = vfio_iommufd_physical_bind,
|
||||||
.unbind_iommufd = vfio_iommufd_physical_unbind,
|
.unbind_iommufd = vfio_iommufd_physical_unbind,
|
||||||
.attach_ioas = vfio_iommufd_physical_attach_ioas,
|
.attach_ioas = vfio_iommufd_physical_attach_ioas,
|
||||||
|
@ -647,6 +647,13 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
|
|||||||
|
|
||||||
while (npage) {
|
while (npage) {
|
||||||
if (!batch->size) {
|
if (!batch->size) {
|
||||||
|
/*
|
||||||
|
* Large mappings may take a while to repeatedly refill
|
||||||
|
* the batch, so conditionally relinquish the CPU when
|
||||||
|
* needed to avoid stalls.
|
||||||
|
*/
|
||||||
|
cond_resched();
|
||||||
|
|
||||||
/* Empty batch, so refill it. */
|
/* Empty batch, so refill it. */
|
||||||
ret = vaddr_get_pfns(mm, vaddr, npage, dma->prot,
|
ret = vaddr_get_pfns(mm, vaddr, npage, dma->prot,
|
||||||
&pfn, batch);
|
&pfn, batch);
|
||||||
|
@ -583,7 +583,8 @@ void vfio_df_close(struct vfio_device_file *df)
|
|||||||
|
|
||||||
lockdep_assert_held(&device->dev_set->lock);
|
lockdep_assert_held(&device->dev_set->lock);
|
||||||
|
|
||||||
vfio_assert_device_open(device);
|
if (!vfio_assert_device_open(device))
|
||||||
|
return;
|
||||||
if (device->open_count == 1)
|
if (device->open_count == 1)
|
||||||
vfio_df_device_last_close(df);
|
vfio_df_device_last_close(df);
|
||||||
device->open_count--;
|
device->open_count--;
|
||||||
|
@ -105,6 +105,9 @@ struct vfio_device {
|
|||||||
* @match: Optional device name match callback (return: 0 for no-match, >0 for
|
* @match: Optional device name match callback (return: 0 for no-match, >0 for
|
||||||
* match, -errno for abort (ex. match with insufficient or incorrect
|
* match, -errno for abort (ex. match with insufficient or incorrect
|
||||||
* additional args)
|
* additional args)
|
||||||
|
* @match_token_uuid: Optional device token match/validation. Return 0
|
||||||
|
* if the uuid is valid for the device, -errno otherwise. uuid is NULL
|
||||||
|
* if none was provided.
|
||||||
* @dma_unmap: Called when userspace unmaps IOVA from the container
|
* @dma_unmap: Called when userspace unmaps IOVA from the container
|
||||||
* this device is attached to.
|
* this device is attached to.
|
||||||
* @device_feature: Optional, fill in the VFIO_DEVICE_FEATURE ioctl
|
* @device_feature: Optional, fill in the VFIO_DEVICE_FEATURE ioctl
|
||||||
@ -132,6 +135,7 @@ struct vfio_device_ops {
|
|||||||
int (*mmap)(struct vfio_device *vdev, struct vm_area_struct *vma);
|
int (*mmap)(struct vfio_device *vdev, struct vm_area_struct *vma);
|
||||||
void (*request)(struct vfio_device *vdev, unsigned int count);
|
void (*request)(struct vfio_device *vdev, unsigned int count);
|
||||||
int (*match)(struct vfio_device *vdev, char *buf);
|
int (*match)(struct vfio_device *vdev, char *buf);
|
||||||
|
int (*match_token_uuid)(struct vfio_device *vdev, const uuid_t *uuid);
|
||||||
void (*dma_unmap)(struct vfio_device *vdev, u64 iova, u64 length);
|
void (*dma_unmap)(struct vfio_device *vdev, u64 iova, u64 length);
|
||||||
int (*device_feature)(struct vfio_device *device, u32 flags,
|
int (*device_feature)(struct vfio_device *device, u32 flags,
|
||||||
void __user *arg, size_t argsz);
|
void __user *arg, size_t argsz);
|
||||||
|
@ -122,6 +122,8 @@ ssize_t vfio_pci_core_write(struct vfio_device *core_vdev, const char __user *bu
|
|||||||
int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma);
|
int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma);
|
||||||
void vfio_pci_core_request(struct vfio_device *core_vdev, unsigned int count);
|
void vfio_pci_core_request(struct vfio_device *core_vdev, unsigned int count);
|
||||||
int vfio_pci_core_match(struct vfio_device *core_vdev, char *buf);
|
int vfio_pci_core_match(struct vfio_device *core_vdev, char *buf);
|
||||||
|
int vfio_pci_core_match_token_uuid(struct vfio_device *core_vdev,
|
||||||
|
const uuid_t *uuid);
|
||||||
int vfio_pci_core_enable(struct vfio_pci_core_device *vdev);
|
int vfio_pci_core_enable(struct vfio_pci_core_device *vdev);
|
||||||
void vfio_pci_core_disable(struct vfio_pci_core_device *vdev);
|
void vfio_pci_core_disable(struct vfio_pci_core_device *vdev);
|
||||||
void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev);
|
void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev);
|
||||||
|
@ -905,10 +905,12 @@ struct vfio_device_feature {
|
|||||||
* VFIO_DEVICE_BIND_IOMMUFD - _IOR(VFIO_TYPE, VFIO_BASE + 18,
|
* VFIO_DEVICE_BIND_IOMMUFD - _IOR(VFIO_TYPE, VFIO_BASE + 18,
|
||||||
* struct vfio_device_bind_iommufd)
|
* struct vfio_device_bind_iommufd)
|
||||||
* @argsz: User filled size of this data.
|
* @argsz: User filled size of this data.
|
||||||
* @flags: Must be 0.
|
* @flags: Must be 0 or a bit flags of VFIO_DEVICE_BIND_*
|
||||||
* @iommufd: iommufd to bind.
|
* @iommufd: iommufd to bind.
|
||||||
* @out_devid: The device id generated by this bind. devid is a handle for
|
* @out_devid: The device id generated by this bind. devid is a handle for
|
||||||
* this device/iommufd bond and can be used in IOMMUFD commands.
|
* this device/iommufd bond and can be used in IOMMUFD commands.
|
||||||
|
* @token_uuid_ptr: Valid if VFIO_DEVICE_BIND_FLAG_TOKEN. Points to a 16 byte
|
||||||
|
* UUID in the same format as VFIO_DEVICE_FEATURE_PCI_VF_TOKEN.
|
||||||
*
|
*
|
||||||
* Bind a vfio_device to the specified iommufd.
|
* Bind a vfio_device to the specified iommufd.
|
||||||
*
|
*
|
||||||
@ -917,13 +919,21 @@ struct vfio_device_feature {
|
|||||||
*
|
*
|
||||||
* Unbind is automatically conducted when device fd is closed.
|
* Unbind is automatically conducted when device fd is closed.
|
||||||
*
|
*
|
||||||
|
* A token is sometimes required to open the device, unless this is known to be
|
||||||
|
* needed VFIO_DEVICE_BIND_FLAG_TOKEN should not be set and token_uuid_ptr is
|
||||||
|
* ignored. The only case today is a PF/VF relationship where the VF bind must
|
||||||
|
* be provided the same token as VFIO_DEVICE_FEATURE_PCI_VF_TOKEN provided to
|
||||||
|
* the PF.
|
||||||
|
*
|
||||||
* Return: 0 on success, -errno on failure.
|
* Return: 0 on success, -errno on failure.
|
||||||
*/
|
*/
|
||||||
struct vfio_device_bind_iommufd {
|
struct vfio_device_bind_iommufd {
|
||||||
__u32 argsz;
|
__u32 argsz;
|
||||||
__u32 flags;
|
__u32 flags;
|
||||||
|
#define VFIO_DEVICE_BIND_FLAG_TOKEN (1 << 0)
|
||||||
__s32 iommufd;
|
__s32 iommufd;
|
||||||
__u32 out_devid;
|
__u32 out_devid;
|
||||||
|
__aligned_u64 token_uuid_ptr;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define VFIO_DEVICE_BIND_IOMMUFD _IO(VFIO_TYPE, VFIO_BASE + 18)
|
#define VFIO_DEVICE_BIND_IOMMUFD _IO(VFIO_TYPE, VFIO_BASE + 18)
|
||||||
|
Loading…
Reference in New Issue
Block a user