mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-04 20:19:47 +08:00
drm/msm: Add VM_BIND submitqueue
This submitqueue type isn't tied to a hw ringbuffer, but instead executes on the CPU for performing async VM_BIND ops. Signed-off-by: Rob Clark <robdclark@chromium.org> Signed-off-by: Rob Clark <robin.clark@oss.qualcomm.com> Tested-by: Antonino Maniscalco <antomani103@gmail.com> Reviewed-by: Antonino Maniscalco <antomani103@gmail.com> Patchwork: https://patchwork.freedesktop.org/patch/661517/
This commit is contained in:
parent
cefb919cfa
commit
92395af63a
@ -53,6 +53,13 @@ struct msm_gem_vm {
|
|||||||
/** @base: Inherit from drm_gpuvm. */
|
/** @base: Inherit from drm_gpuvm. */
|
||||||
struct drm_gpuvm base;
|
struct drm_gpuvm base;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @sched: Scheduler used for asynchronous VM_BIND request.
|
||||||
|
*
|
||||||
|
* Unused for kernel managed VMs (where all operations are synchronous).
|
||||||
|
*/
|
||||||
|
struct drm_gpu_scheduler sched;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @mm: Memory management for kernel managed VA allocations
|
* @mm: Memory management for kernel managed VA allocations
|
||||||
*
|
*
|
||||||
@ -71,6 +78,9 @@ struct msm_gem_vm {
|
|||||||
*/
|
*/
|
||||||
struct pid *pid;
|
struct pid *pid;
|
||||||
|
|
||||||
|
/** @last_fence: Fence for last pending work scheduled on the VM */
|
||||||
|
struct dma_fence *last_fence;
|
||||||
|
|
||||||
/** @faults: the number of GPU hangs associated with this address space */
|
/** @faults: the number of GPU hangs associated with this address space */
|
||||||
int faults;
|
int faults;
|
||||||
|
|
||||||
@ -100,6 +110,8 @@ struct drm_gpuvm *
|
|||||||
msm_gem_vm_create(struct drm_device *drm, struct msm_mmu *mmu, const char *name,
|
msm_gem_vm_create(struct drm_device *drm, struct msm_mmu *mmu, const char *name,
|
||||||
u64 va_start, u64 va_size, bool managed);
|
u64 va_start, u64 va_size, bool managed);
|
||||||
|
|
||||||
|
void msm_gem_vm_close(struct drm_gpuvm *gpuvm);
|
||||||
|
|
||||||
struct msm_fence_context;
|
struct msm_fence_context;
|
||||||
|
|
||||||
#define MSM_VMA_DUMP (DRM_GPUVA_USERBITS << 0)
|
#define MSM_VMA_DUMP (DRM_GPUVA_USERBITS << 0)
|
||||||
|
@ -4,6 +4,7 @@
|
|||||||
* Author: Rob Clark <robdclark@gmail.com>
|
* Author: Rob Clark <robdclark@gmail.com>
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <linux/dma-fence-unwrap.h>
|
||||||
#include <linux/file.h>
|
#include <linux/file.h>
|
||||||
#include <linux/sync_file.h>
|
#include <linux/sync_file.h>
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
@ -259,8 +260,29 @@ out:
|
|||||||
static int submit_lock_objects(struct msm_gem_submit *submit)
|
static int submit_lock_objects(struct msm_gem_submit *submit)
|
||||||
{
|
{
|
||||||
unsigned flags = DRM_EXEC_INTERRUPTIBLE_WAIT;
|
unsigned flags = DRM_EXEC_INTERRUPTIBLE_WAIT;
|
||||||
|
struct drm_exec *exec = &submit->exec;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (msm_context_is_vmbind(submit->queue->ctx)) {
|
||||||
|
flags |= DRM_EXEC_IGNORE_DUPLICATES;
|
||||||
|
|
||||||
|
drm_exec_init(&submit->exec, flags, submit->nr_bos);
|
||||||
|
|
||||||
|
drm_exec_until_all_locked (&submit->exec) {
|
||||||
|
ret = drm_gpuvm_prepare_vm(submit->vm, exec, 1);
|
||||||
|
drm_exec_retry_on_contention(exec);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
ret = drm_gpuvm_prepare_objects(submit->vm, exec, 1);
|
||||||
|
drm_exec_retry_on_contention(exec);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
drm_exec_init(&submit->exec, flags, submit->nr_bos);
|
drm_exec_init(&submit->exec, flags, submit->nr_bos);
|
||||||
|
|
||||||
drm_exec_until_all_locked (&submit->exec) {
|
drm_exec_until_all_locked (&submit->exec) {
|
||||||
@ -268,20 +290,17 @@ static int submit_lock_objects(struct msm_gem_submit *submit)
|
|||||||
drm_gpuvm_resv_obj(submit->vm));
|
drm_gpuvm_resv_obj(submit->vm));
|
||||||
drm_exec_retry_on_contention(&submit->exec);
|
drm_exec_retry_on_contention(&submit->exec);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto error;
|
return ret;
|
||||||
for (unsigned i = 0; i < submit->nr_bos; i++) {
|
for (unsigned i = 0; i < submit->nr_bos; i++) {
|
||||||
struct drm_gem_object *obj = submit->bos[i].obj;
|
struct drm_gem_object *obj = submit->bos[i].obj;
|
||||||
ret = drm_exec_prepare_obj(&submit->exec, obj, 1);
|
ret = drm_exec_prepare_obj(&submit->exec, obj, 1);
|
||||||
drm_exec_retry_on_contention(&submit->exec);
|
drm_exec_retry_on_contention(&submit->exec);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto error;
|
return ret;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
error:
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int submit_fence_sync(struct msm_gem_submit *submit)
|
static int submit_fence_sync(struct msm_gem_submit *submit)
|
||||||
@ -367,9 +386,18 @@ static void submit_unpin_objects(struct msm_gem_submit *submit)
|
|||||||
|
|
||||||
static void submit_attach_object_fences(struct msm_gem_submit *submit)
|
static void submit_attach_object_fences(struct msm_gem_submit *submit)
|
||||||
{
|
{
|
||||||
int i;
|
struct msm_gem_vm *vm = to_msm_vm(submit->vm);
|
||||||
|
struct dma_fence *last_fence;
|
||||||
|
|
||||||
for (i = 0; i < submit->nr_bos; i++) {
|
if (msm_context_is_vmbind(submit->queue->ctx)) {
|
||||||
|
drm_gpuvm_resv_add_fence(submit->vm, &submit->exec,
|
||||||
|
submit->user_fence,
|
||||||
|
DMA_RESV_USAGE_BOOKKEEP,
|
||||||
|
DMA_RESV_USAGE_BOOKKEEP);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (unsigned i = 0; i < submit->nr_bos; i++) {
|
||||||
struct drm_gem_object *obj = submit->bos[i].obj;
|
struct drm_gem_object *obj = submit->bos[i].obj;
|
||||||
|
|
||||||
if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
|
if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
|
||||||
@ -379,6 +407,10 @@ static void submit_attach_object_fences(struct msm_gem_submit *submit)
|
|||||||
dma_resv_add_fence(obj->resv, submit->user_fence,
|
dma_resv_add_fence(obj->resv, submit->user_fence,
|
||||||
DMA_RESV_USAGE_READ);
|
DMA_RESV_USAGE_READ);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
last_fence = vm->last_fence;
|
||||||
|
vm->last_fence = dma_fence_unwrap_merge(submit->user_fence, last_fence);
|
||||||
|
dma_fence_put(last_fence);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
|
static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
|
||||||
@ -537,6 +569,11 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
|||||||
if (!queue)
|
if (!queue)
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
|
||||||
|
if (queue->flags & MSM_SUBMITQUEUE_VM_BIND) {
|
||||||
|
ret = UERR(EINVAL, dev, "Invalid queue type");
|
||||||
|
goto out_post_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
ring = gpu->rb[queue->ring_nr];
|
ring = gpu->rb[queue->ring_nr];
|
||||||
|
|
||||||
if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
|
if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
|
||||||
@ -727,6 +764,18 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
|||||||
|
|
||||||
submit_attach_object_fences(submit);
|
submit_attach_object_fences(submit);
|
||||||
|
|
||||||
|
if (msm_context_is_vmbind(ctx)) {
|
||||||
|
/*
|
||||||
|
* If we are not using VM_BIND, submit_pin_vmas() will validate
|
||||||
|
* just the BOs attached to the submit. In that case we don't
|
||||||
|
* need to validate the _entire_ vm, because userspace tracked
|
||||||
|
* what BOs are associated with the submit.
|
||||||
|
*/
|
||||||
|
ret = drm_gpuvm_validate(submit->vm, &submit->exec);
|
||||||
|
if (ret)
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
/* The scheduler owns a ref now: */
|
/* The scheduler owns a ref now: */
|
||||||
msm_gem_submit_get(submit);
|
msm_gem_submit_get(submit);
|
||||||
|
|
||||||
|
@ -16,6 +16,7 @@ msm_gem_vm_free(struct drm_gpuvm *gpuvm)
|
|||||||
drm_mm_takedown(&vm->mm);
|
drm_mm_takedown(&vm->mm);
|
||||||
if (vm->mmu)
|
if (vm->mmu)
|
||||||
vm->mmu->funcs->destroy(vm->mmu);
|
vm->mmu->funcs->destroy(vm->mmu);
|
||||||
|
dma_fence_put(vm->last_fence);
|
||||||
put_pid(vm->pid);
|
put_pid(vm->pid);
|
||||||
kfree(vm);
|
kfree(vm);
|
||||||
}
|
}
|
||||||
@ -154,6 +155,9 @@ static const struct drm_gpuvm_ops msm_gpuvm_ops = {
|
|||||||
.vm_free = msm_gem_vm_free,
|
.vm_free = msm_gem_vm_free,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct drm_sched_backend_ops msm_vm_bind_ops = {
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* msm_gem_vm_create() - Create and initialize a &msm_gem_vm
|
* msm_gem_vm_create() - Create and initialize a &msm_gem_vm
|
||||||
* @drm: the drm device
|
* @drm: the drm device
|
||||||
@ -195,6 +199,21 @@ msm_gem_vm_create(struct drm_device *drm, struct msm_mmu *mmu, const char *name,
|
|||||||
goto err_free_vm;
|
goto err_free_vm;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!managed) {
|
||||||
|
struct drm_sched_init_args args = {
|
||||||
|
.ops = &msm_vm_bind_ops,
|
||||||
|
.num_rqs = 1,
|
||||||
|
.credit_limit = 1,
|
||||||
|
.timeout = MAX_SCHEDULE_TIMEOUT,
|
||||||
|
.name = "msm-vm-bind",
|
||||||
|
.dev = drm->dev,
|
||||||
|
};
|
||||||
|
|
||||||
|
ret = drm_sched_init(&vm->sched, &args);
|
||||||
|
if (ret)
|
||||||
|
goto err_free_dummy;
|
||||||
|
}
|
||||||
|
|
||||||
drm_gpuvm_init(&vm->base, name, flags, drm, dummy_gem,
|
drm_gpuvm_init(&vm->base, name, flags, drm, dummy_gem,
|
||||||
va_start, va_size, 0, 0, &msm_gpuvm_ops);
|
va_start, va_size, 0, 0, &msm_gpuvm_ops);
|
||||||
drm_gem_object_put(dummy_gem);
|
drm_gem_object_put(dummy_gem);
|
||||||
@ -206,8 +225,60 @@ msm_gem_vm_create(struct drm_device *drm, struct msm_mmu *mmu, const char *name,
|
|||||||
|
|
||||||
return &vm->base;
|
return &vm->base;
|
||||||
|
|
||||||
|
err_free_dummy:
|
||||||
|
drm_gem_object_put(dummy_gem);
|
||||||
|
|
||||||
err_free_vm:
|
err_free_vm:
|
||||||
kfree(vm);
|
kfree(vm);
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* msm_gem_vm_close() - Close a VM
|
||||||
|
* @gpuvm: The VM to close
|
||||||
|
*
|
||||||
|
* Called when the drm device file is closed, to tear down VM related resources
|
||||||
|
* (which will drop refcounts to GEM objects that were still mapped into the
|
||||||
|
* VM at the time).
|
||||||
|
*/
|
||||||
|
void
|
||||||
|
msm_gem_vm_close(struct drm_gpuvm *gpuvm)
|
||||||
|
{
|
||||||
|
struct msm_gem_vm *vm = to_msm_vm(gpuvm);
|
||||||
|
struct drm_gpuva *vma, *tmp;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* For kernel managed VMs, the VMAs are torn down when the handle is
|
||||||
|
* closed, so nothing more to do.
|
||||||
|
*/
|
||||||
|
if (vm->managed)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (vm->last_fence)
|
||||||
|
dma_fence_wait(vm->last_fence, false);
|
||||||
|
|
||||||
|
/* Kill the scheduler now, so we aren't racing with it for cleanup: */
|
||||||
|
drm_sched_stop(&vm->sched, NULL);
|
||||||
|
drm_sched_fini(&vm->sched);
|
||||||
|
|
||||||
|
/* Tear down any remaining mappings: */
|
||||||
|
dma_resv_lock(drm_gpuvm_resv(gpuvm), NULL);
|
||||||
|
drm_gpuvm_for_each_va_safe (vma, tmp, gpuvm) {
|
||||||
|
struct drm_gem_object *obj = vma->gem.obj;
|
||||||
|
|
||||||
|
if (obj && obj->resv != drm_gpuvm_resv(gpuvm)) {
|
||||||
|
drm_gem_object_get(obj);
|
||||||
|
msm_gem_lock(obj);
|
||||||
|
}
|
||||||
|
|
||||||
|
msm_gem_vma_unmap(vma);
|
||||||
|
msm_gem_vma_close(vma);
|
||||||
|
|
||||||
|
if (obj && obj->resv != drm_gpuvm_resv(gpuvm)) {
|
||||||
|
msm_gem_unlock(obj);
|
||||||
|
drm_gem_object_put(obj);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
dma_resv_unlock(drm_gpuvm_resv(gpuvm));
|
||||||
|
}
|
||||||
|
@ -564,6 +564,9 @@ struct msm_gpu_submitqueue {
|
|||||||
struct mutex lock;
|
struct mutex lock;
|
||||||
struct kref ref;
|
struct kref ref;
|
||||||
struct drm_sched_entity *entity;
|
struct drm_sched_entity *entity;
|
||||||
|
|
||||||
|
/** @_vm_bind_entity: used for @entity pointer for VM_BIND queues */
|
||||||
|
struct drm_sched_entity _vm_bind_entity[0];
|
||||||
};
|
};
|
||||||
|
|
||||||
struct msm_gpu_state_bo {
|
struct msm_gpu_state_bo {
|
||||||
|
@ -72,6 +72,9 @@ void msm_submitqueue_destroy(struct kref *kref)
|
|||||||
|
|
||||||
idr_destroy(&queue->fence_idr);
|
idr_destroy(&queue->fence_idr);
|
||||||
|
|
||||||
|
if (queue->entity == &queue->_vm_bind_entity[0])
|
||||||
|
drm_sched_entity_destroy(queue->entity);
|
||||||
|
|
||||||
msm_context_put(queue->ctx);
|
msm_context_put(queue->ctx);
|
||||||
|
|
||||||
kfree(queue);
|
kfree(queue);
|
||||||
@ -102,7 +105,7 @@ struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_context *ctx,
|
|||||||
|
|
||||||
void msm_submitqueue_close(struct msm_context *ctx)
|
void msm_submitqueue_close(struct msm_context *ctx)
|
||||||
{
|
{
|
||||||
struct msm_gpu_submitqueue *entry, *tmp;
|
struct msm_gpu_submitqueue *queue, *tmp;
|
||||||
|
|
||||||
if (!ctx)
|
if (!ctx)
|
||||||
return;
|
return;
|
||||||
@ -111,10 +114,17 @@ void msm_submitqueue_close(struct msm_context *ctx)
|
|||||||
* No lock needed in close and there won't
|
* No lock needed in close and there won't
|
||||||
* be any more user ioctls coming our way
|
* be any more user ioctls coming our way
|
||||||
*/
|
*/
|
||||||
list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node) {
|
list_for_each_entry_safe(queue, tmp, &ctx->submitqueues, node) {
|
||||||
list_del(&entry->node);
|
if (queue->entity == &queue->_vm_bind_entity[0])
|
||||||
msm_submitqueue_put(entry);
|
drm_sched_entity_flush(queue->entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
|
||||||
|
list_del(&queue->node);
|
||||||
|
msm_submitqueue_put(queue);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!ctx->vm)
|
||||||
|
return;
|
||||||
|
|
||||||
|
msm_gem_vm_close(ctx->vm);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct drm_sched_entity *
|
static struct drm_sched_entity *
|
||||||
@ -160,8 +170,6 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_context *ctx,
|
|||||||
struct msm_drm_private *priv = drm->dev_private;
|
struct msm_drm_private *priv = drm->dev_private;
|
||||||
struct msm_gpu_submitqueue *queue;
|
struct msm_gpu_submitqueue *queue;
|
||||||
enum drm_sched_priority sched_prio;
|
enum drm_sched_priority sched_prio;
|
||||||
extern int enable_preemption;
|
|
||||||
bool preemption_supported;
|
|
||||||
unsigned ring_nr;
|
unsigned ring_nr;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -171,26 +179,53 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_context *ctx,
|
|||||||
if (!priv->gpu)
|
if (!priv->gpu)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
preemption_supported = priv->gpu->nr_rings == 1 && enable_preemption != 0;
|
if (flags & MSM_SUBMITQUEUE_VM_BIND) {
|
||||||
|
unsigned sz;
|
||||||
|
|
||||||
if (flags & MSM_SUBMITQUEUE_ALLOW_PREEMPT && preemption_supported)
|
/* Not allowed for kernel managed VMs (ie. kernel allocs VA) */
|
||||||
return -EINVAL;
|
if (!msm_context_is_vmbind(ctx))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
ret = msm_gpu_convert_priority(priv->gpu, prio, &ring_nr, &sched_prio);
|
if (prio)
|
||||||
if (ret)
|
return -EINVAL;
|
||||||
return ret;
|
|
||||||
|
|
||||||
queue = kzalloc(sizeof(*queue), GFP_KERNEL);
|
sz = struct_size(queue, _vm_bind_entity, 1);
|
||||||
|
queue = kzalloc(sz, GFP_KERNEL);
|
||||||
|
} else {
|
||||||
|
extern int enable_preemption;
|
||||||
|
bool preemption_supported =
|
||||||
|
priv->gpu->nr_rings == 1 && enable_preemption != 0;
|
||||||
|
|
||||||
|
if (flags & MSM_SUBMITQUEUE_ALLOW_PREEMPT && preemption_supported)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
ret = msm_gpu_convert_priority(priv->gpu, prio, &ring_nr, &sched_prio);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
queue = kzalloc(sizeof(*queue), GFP_KERNEL);
|
||||||
|
}
|
||||||
|
|
||||||
if (!queue)
|
if (!queue)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
kref_init(&queue->ref);
|
kref_init(&queue->ref);
|
||||||
queue->flags = flags;
|
queue->flags = flags;
|
||||||
queue->ring_nr = ring_nr;
|
|
||||||
|
|
||||||
queue->entity = get_sched_entity(ctx, priv->gpu->rb[ring_nr],
|
if (flags & MSM_SUBMITQUEUE_VM_BIND) {
|
||||||
ring_nr, sched_prio);
|
struct drm_gpu_scheduler *sched = &to_msm_vm(msm_context_vm(drm, ctx))->sched;
|
||||||
|
|
||||||
|
queue->entity = &queue->_vm_bind_entity[0];
|
||||||
|
|
||||||
|
drm_sched_entity_init(queue->entity, DRM_SCHED_PRIORITY_KERNEL,
|
||||||
|
&sched, 1, NULL);
|
||||||
|
} else {
|
||||||
|
queue->ring_nr = ring_nr;
|
||||||
|
|
||||||
|
queue->entity = get_sched_entity(ctx, priv->gpu->rb[ring_nr],
|
||||||
|
ring_nr, sched_prio);
|
||||||
|
}
|
||||||
|
|
||||||
if (IS_ERR(queue->entity)) {
|
if (IS_ERR(queue->entity)) {
|
||||||
ret = PTR_ERR(queue->entity);
|
ret = PTR_ERR(queue->entity);
|
||||||
kfree(queue);
|
kfree(queue);
|
||||||
|
@ -385,12 +385,19 @@ struct drm_msm_gem_madvise {
|
|||||||
/*
|
/*
|
||||||
* Draw queues allow the user to set specific submission parameter. Command
|
* Draw queues allow the user to set specific submission parameter. Command
|
||||||
* submissions specify a specific submitqueue to use. ID 0 is reserved for
|
* submissions specify a specific submitqueue to use. ID 0 is reserved for
|
||||||
* backwards compatibility as a "default" submitqueue
|
* backwards compatibility as a "default" submitqueue.
|
||||||
|
*
|
||||||
|
* Because VM_BIND async updates happen on the CPU, they must run on a
|
||||||
|
* virtual queue created with the flag MSM_SUBMITQUEUE_VM_BIND. If we had
|
||||||
|
* a way to do pgtable updates on the GPU, we could drop this restriction.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define MSM_SUBMITQUEUE_ALLOW_PREEMPT 0x00000001
|
#define MSM_SUBMITQUEUE_ALLOW_PREEMPT 0x00000001
|
||||||
|
#define MSM_SUBMITQUEUE_VM_BIND 0x00000002 /* virtual queue for VM_BIND ops */
|
||||||
|
|
||||||
#define MSM_SUBMITQUEUE_FLAGS ( \
|
#define MSM_SUBMITQUEUE_FLAGS ( \
|
||||||
MSM_SUBMITQUEUE_ALLOW_PREEMPT | \
|
MSM_SUBMITQUEUE_ALLOW_PREEMPT | \
|
||||||
|
MSM_SUBMITQUEUE_VM_BIND | \
|
||||||
0)
|
0)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
Loading…
Reference in New Issue
Block a user