2
0
mirror of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2025-09-04 20:19:47 +08:00

drm/msm: Add VM_BIND ioctl

Add a VM_BIND ioctl for binding/unbinding buffers into a VM.  This is
only supported if userspace has opted in to MSM_PARAM_EN_VM_BIND.

Signed-off-by: Rob Clark <robdclark@chromium.org>
Signed-off-by: Rob Clark <robin.clark@oss.qualcomm.com>
Tested-by: Antonino Maniscalco <antomani103@gmail.com>
Reviewed-by: Antonino Maniscalco <antomani103@gmail.com>
Patchwork: https://patchwork.freedesktop.org/patch/661524/
This commit is contained in:
Rob Clark 2025-06-29 13:13:18 -07:00 committed by Rob Clark
parent ecfd9fa83f
commit 2e6a8a1fe2
7 changed files with 1208 additions and 36 deletions

View File

@ -802,6 +802,7 @@ static const struct drm_ioctl_desc msm_ioctls[] = {
DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_VM_BIND, msm_ioctl_vm_bind, DRM_RENDER_ALLOW),
}; };
static void msm_show_fdinfo(struct drm_printer *p, struct drm_file *file) static void msm_show_fdinfo(struct drm_printer *p, struct drm_file *file)

View File

@ -255,7 +255,9 @@ struct drm_gpuvm *msm_kms_init_vm(struct drm_device *dev);
bool msm_use_mmu(struct drm_device *dev); bool msm_use_mmu(struct drm_device *dev);
int msm_ioctl_gem_submit(struct drm_device *dev, void *data, int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file); struct drm_file *file);
int msm_ioctl_vm_bind(struct drm_device *dev, void *data,
struct drm_file *file);
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
unsigned long msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan); unsigned long msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan);

View File

@ -251,8 +251,7 @@ static void put_pages(struct drm_gem_object *obj)
} }
} }
static struct page **msm_gem_get_pages_locked(struct drm_gem_object *obj, struct page **msm_gem_get_pages_locked(struct drm_gem_object *obj, unsigned madv)
unsigned madv)
{ {
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
@ -1052,18 +1051,37 @@ static void msm_gem_free_object(struct drm_gem_object *obj)
/* /*
* We need to lock any VMs the object is still attached to, but not * We need to lock any VMs the object is still attached to, but not
* the object itself (see explaination in msm_gem_assert_locked()), * the object itself (see explaination in msm_gem_assert_locked()),
* so just open-code this special case: * so just open-code this special case.
*
* Note that we skip the dance if we aren't attached to any VM. This
* is load bearing. The driver needs to support two usage models:
*
* 1. Legacy kernel managed VM: Userspace expects the VMA's to be
* implicitly torn down when the object is freed, the VMA's do
* not hold a hard reference to the BO.
*
* 2. VM_BIND, userspace managed VM: The VMA holds a reference to the
* BO. This can be dropped when the VM is closed and it's associated
* VMAs are torn down. (See msm_gem_vm_close()).
*
* In the latter case the last reference to a BO can be dropped while
* we already have the VM locked. It would have already been removed
* from the gpuva list, but lockdep doesn't know that. Or understand
* the differences between the two usage models.
*/ */
drm_exec_init(&exec, 0, 0); if (!list_empty(&obj->gpuva.list)) {
drm_exec_until_all_locked (&exec) { drm_exec_init(&exec, 0, 0);
struct drm_gpuvm_bo *vm_bo; drm_exec_until_all_locked (&exec) {
drm_gem_for_each_gpuvm_bo (vm_bo, obj) { struct drm_gpuvm_bo *vm_bo;
drm_exec_lock_obj(&exec, drm_gpuvm_resv_obj(vm_bo->vm)); drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
drm_exec_retry_on_contention(&exec); drm_exec_lock_obj(&exec,
drm_gpuvm_resv_obj(vm_bo->vm));
drm_exec_retry_on_contention(&exec);
}
} }
put_iova_spaces(obj, NULL, true);
drm_exec_fini(&exec); /* drop locks */
} }
put_iova_spaces(obj, NULL, true);
drm_exec_fini(&exec); /* drop locks */
if (drm_gem_is_imported(obj)) { if (drm_gem_is_imported(obj)) {
GEM_WARN_ON(msm_obj->vaddr); GEM_WARN_ON(msm_obj->vaddr);

View File

@ -73,6 +73,9 @@ struct msm_gem_vm {
/** @mmu: The mmu object which manages the pgtables */ /** @mmu: The mmu object which manages the pgtables */
struct msm_mmu *mmu; struct msm_mmu *mmu;
/** @mmu_lock: Protects access to the mmu */
struct mutex mmu_lock;
/** /**
* @pid: For address spaces associated with a specific process, this * @pid: For address spaces associated with a specific process, this
* will be non-NULL: * will be non-NULL:
@ -205,6 +208,7 @@ int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm,
uint64_t *iova); uint64_t *iova);
void msm_gem_unpin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm); void msm_gem_unpin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm);
void msm_gem_pin_obj_locked(struct drm_gem_object *obj); void msm_gem_pin_obj_locked(struct drm_gem_object *obj);
struct page **msm_gem_get_pages_locked(struct drm_gem_object *obj, unsigned madv);
struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj); struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj);
void msm_gem_unpin_pages_locked(struct drm_gem_object *obj); void msm_gem_unpin_pages_locked(struct drm_gem_object *obj);
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,

View File

@ -194,6 +194,7 @@ out:
static int submit_lookup_cmds(struct msm_gem_submit *submit, static int submit_lookup_cmds(struct msm_gem_submit *submit,
struct drm_msm_gem_submit *args, struct drm_file *file) struct drm_msm_gem_submit *args, struct drm_file *file)
{ {
struct msm_context *ctx = file->driver_priv;
unsigned i; unsigned i;
size_t sz; size_t sz;
int ret = 0; int ret = 0;
@ -225,6 +226,20 @@ static int submit_lookup_cmds(struct msm_gem_submit *submit,
goto out; goto out;
} }
if (msm_context_is_vmbind(ctx)) {
if (submit_cmd.nr_relocs) {
ret = SUBMIT_ERROR(EINVAL, submit, "nr_relocs must be zero");
goto out;
}
if (submit_cmd.submit_idx || submit_cmd.submit_offset) {
ret = SUBMIT_ERROR(EINVAL, submit, "submit_idx/offset must be zero");
goto out;
}
submit->cmd[i].iova = submit_cmd.iova;
}
submit->cmd[i].type = submit_cmd.type; submit->cmd[i].type = submit_cmd.type;
submit->cmd[i].size = submit_cmd.size / 4; submit->cmd[i].size = submit_cmd.size / 4;
submit->cmd[i].offset = submit_cmd.submit_offset / 4; submit->cmd[i].offset = submit_cmd.submit_offset / 4;
@ -537,6 +552,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct msm_syncobj_post_dep *post_deps = NULL; struct msm_syncobj_post_dep *post_deps = NULL;
struct drm_syncobj **syncobjs_to_reset = NULL; struct drm_syncobj **syncobjs_to_reset = NULL;
struct sync_file *sync_file = NULL; struct sync_file *sync_file = NULL;
unsigned cmds_to_parse;
int out_fence_fd = -1; int out_fence_fd = -1;
unsigned i; unsigned i;
int ret; int ret;
@ -661,7 +677,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (ret) if (ret)
goto out; goto out;
for (i = 0; i < args->nr_cmds; i++) { cmds_to_parse = msm_context_is_vmbind(ctx) ? 0 : args->nr_cmds;
for (i = 0; i < cmds_to_parse; i++) {
struct drm_gem_object *obj; struct drm_gem_object *obj;
uint64_t iova; uint64_t iova;
@ -692,7 +710,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
goto out; goto out;
} }
submit->nr_cmds = i; submit->nr_cmds = args->nr_cmds;
idr_preload(GFP_KERNEL); idr_preload(GFP_KERNEL);

File diff suppressed because it is too large Load Diff

View File

@ -272,7 +272,10 @@ struct drm_msm_gem_submit_cmd {
__u32 size; /* in, cmdstream size */ __u32 size; /* in, cmdstream size */
__u32 pad; __u32 pad;
__u32 nr_relocs; /* in, number of submit_reloc's */ __u32 nr_relocs; /* in, number of submit_reloc's */
__u64 relocs; /* in, ptr to array of submit_reloc's */ union {
__u64 relocs; /* in, ptr to array of submit_reloc's */
__u64 iova; /* cmdstream address (for VM_BIND contexts) */
};
}; };
/* Each buffer referenced elsewhere in the cmdstream submit (ie. the /* Each buffer referenced elsewhere in the cmdstream submit (ie. the
@ -339,7 +342,74 @@ struct drm_msm_gem_submit {
__u32 nr_out_syncobjs; /* in, number of entries in out_syncobj. */ __u32 nr_out_syncobjs; /* in, number of entries in out_syncobj. */
__u32 syncobj_stride; /* in, stride of syncobj arrays. */ __u32 syncobj_stride; /* in, stride of syncobj arrays. */
__u32 pad; /*in, reserved for future use, always 0. */ __u32 pad; /*in, reserved for future use, always 0. */
};
#define MSM_VM_BIND_OP_UNMAP 0
#define MSM_VM_BIND_OP_MAP 1
#define MSM_VM_BIND_OP_MAP_NULL 2
#define MSM_VM_BIND_OP_DUMP 1
#define MSM_VM_BIND_OP_FLAGS ( \
MSM_VM_BIND_OP_DUMP | \
0)
/**
* struct drm_msm_vm_bind_op - bind/unbind op to run
*/
struct drm_msm_vm_bind_op {
/** @op: one of MSM_VM_BIND_OP_x */
__u32 op;
/** @handle: GEM object handle, MBZ for UNMAP or MAP_NULL */
__u32 handle;
/** @obj_offset: Offset into GEM object, MBZ for UNMAP or MAP_NULL */
__u64 obj_offset;
/** @iova: Address to operate on */
__u64 iova;
/** @range: Number of bites to to map/unmap */
__u64 range;
/** @flags: Bitmask of MSM_VM_BIND_OP_FLAG_x */
__u32 flags;
/** @pad: MBZ */
__u32 pad;
};
#define MSM_VM_BIND_FENCE_FD_IN 0x00000001
#define MSM_VM_BIND_FENCE_FD_OUT 0x00000002
#define MSM_VM_BIND_FLAGS ( \
MSM_VM_BIND_FENCE_FD_IN | \
MSM_VM_BIND_FENCE_FD_OUT | \
0)
/**
* struct drm_msm_vm_bind - Input of &DRM_IOCTL_MSM_VM_BIND
*/
struct drm_msm_vm_bind {
/** @flags: in, bitmask of MSM_VM_BIND_x */
__u32 flags;
/** @nr_ops: the number of bind ops in this ioctl */
__u32 nr_ops;
/** @fence_fd: in/out fence fd (see MSM_VM_BIND_FENCE_FD_IN/OUT) */
__s32 fence_fd;
/** @queue_id: in, submitqueue id */
__u32 queue_id;
/** @in_syncobjs: in, ptr to array of drm_msm_gem_syncobj */
__u64 in_syncobjs;
/** @out_syncobjs: in, ptr to array of drm_msm_gem_syncobj */
__u64 out_syncobjs;
/** @nr_in_syncobjs: in, number of entries in in_syncobj */
__u32 nr_in_syncobjs;
/** @nr_out_syncobjs: in, number of entries in out_syncobj */
__u32 nr_out_syncobjs;
/** @syncobj_stride: in, stride of syncobj arrays */
__u32 syncobj_stride;
/** @op_stride: sizeof each struct drm_msm_vm_bind_op in @ops */
__u32 op_stride;
union {
/** @op: used if num_ops == 1 */
struct drm_msm_vm_bind_op op;
/** @ops: userptr to array of drm_msm_vm_bind_op if num_ops > 1 */
__u64 ops;
};
}; };
#define MSM_WAIT_FENCE_BOOST 0x00000001 #define MSM_WAIT_FENCE_BOOST 0x00000001
@ -435,6 +505,7 @@ struct drm_msm_submitqueue_query {
#define DRM_MSM_SUBMITQUEUE_NEW 0x0A #define DRM_MSM_SUBMITQUEUE_NEW 0x0A
#define DRM_MSM_SUBMITQUEUE_CLOSE 0x0B #define DRM_MSM_SUBMITQUEUE_CLOSE 0x0B
#define DRM_MSM_SUBMITQUEUE_QUERY 0x0C #define DRM_MSM_SUBMITQUEUE_QUERY 0x0C
#define DRM_MSM_VM_BIND 0x0D
#define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param) #define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
#define DRM_IOCTL_MSM_SET_PARAM DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SET_PARAM, struct drm_msm_param) #define DRM_IOCTL_MSM_SET_PARAM DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SET_PARAM, struct drm_msm_param)
@ -448,6 +519,7 @@ struct drm_msm_submitqueue_query {
#define DRM_IOCTL_MSM_SUBMITQUEUE_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_NEW, struct drm_msm_submitqueue) #define DRM_IOCTL_MSM_SUBMITQUEUE_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_NEW, struct drm_msm_submitqueue)
#define DRM_IOCTL_MSM_SUBMITQUEUE_CLOSE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_CLOSE, __u32) #define DRM_IOCTL_MSM_SUBMITQUEUE_CLOSE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_CLOSE, __u32)
#define DRM_IOCTL_MSM_SUBMITQUEUE_QUERY DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_QUERY, struct drm_msm_submitqueue_query) #define DRM_IOCTL_MSM_SUBMITQUEUE_QUERY DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_QUERY, struct drm_msm_submitqueue_query)
#define DRM_IOCTL_MSM_VM_BIND DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_VM_BIND, struct drm_msm_vm_bind)
#if defined(__cplusplus) #if defined(__cplusplus)
} }