mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-03-22 07:27:12 +08:00
Validate scatter-gather table size matches buffer object size before mapping. Break mapping early if the table exceeds buffer size to prevent overwriting existing mappings. Also validate the table is not smaller than buffer size to avoid unmapped regions that trigger MMU translation faults. Log error and fail mapping operation on size mismatch to prevent data corruption from mismatched host memory locations and NPU addresses. Unmap any partially mapped buffer on failure. Reviewed-by: Lizhi Hou <lizhi.hou@amd.com> Signed-off-by: Karol Wachowski <karol.wachowski@linux.intel.com> Link: https://patch.msgid.link/20251215070933.520377-1-karol.wachowski@linux.intel.com
53 lines
1.8 KiB
C
53 lines
1.8 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Copyright (C) 2020-2023 Intel Corporation
|
|
*/
|
|
|
|
#ifndef __IVPU_MMU_CONTEXT_H__
|
|
#define __IVPU_MMU_CONTEXT_H__
|
|
|
|
#include <drm/drm_mm.h>
|
|
|
|
struct ivpu_device;
|
|
struct ivpu_file_priv;
|
|
struct ivpu_addr_range;
|
|
|
|
#define IVPU_MMU_PGTABLE_ENTRIES 512ull
|
|
|
|
struct ivpu_mmu_pgtable {
|
|
u64 ***pte_ptrs[IVPU_MMU_PGTABLE_ENTRIES];
|
|
u64 **pmd_ptrs[IVPU_MMU_PGTABLE_ENTRIES];
|
|
u64 *pud_ptrs[IVPU_MMU_PGTABLE_ENTRIES];
|
|
u64 *pgd_dma_ptr;
|
|
dma_addr_t pgd_dma;
|
|
};
|
|
|
|
struct ivpu_mmu_context {
|
|
struct mutex lock; /* Protects: mm, pgtable, is_cd_valid */
|
|
struct drm_mm mm;
|
|
struct ivpu_mmu_pgtable pgtable;
|
|
bool is_cd_valid;
|
|
u32 id;
|
|
};
|
|
|
|
void ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 context_id);
|
|
void ivpu_mmu_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx);
|
|
void ivpu_mmu_global_context_init(struct ivpu_device *vdev);
|
|
void ivpu_mmu_global_context_fini(struct ivpu_device *vdev);
|
|
int ivpu_mmu_reserved_context_init(struct ivpu_device *vdev);
|
|
void ivpu_mmu_reserved_context_fini(struct ivpu_device *vdev);
|
|
|
|
int ivpu_mmu_context_insert_node(struct ivpu_mmu_context *ctx, const struct ivpu_addr_range *range,
|
|
u64 size, struct drm_mm_node *node);
|
|
void ivpu_mmu_context_remove_node(struct ivpu_mmu_context *ctx, struct drm_mm_node *node);
|
|
|
|
int
|
|
ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr,
|
|
struct sg_table *sgt, size_t bo_size, bool llc_coherent, bool read_only);
|
|
void ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
|
|
u64 vpu_addr, struct sg_table *sgt);
|
|
int ivpu_mmu_context_set_pages_ro(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
|
|
u64 vpu_addr, size_t size);
|
|
|
|
#endif /* __IVPU_MMU_CONTEXT_H__ */
|