2
0
mirror of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2025-09-04 20:19:47 +08:00

Merge tag 'drm-intel-gt-next-2025-03-12' of https://gitlab.freedesktop.org/drm/i915/kernel into drm-next

UAPI Changes:

- Increase I915_PARAM_MMAP_GTT_VERSION version to indicate support for partial mmaps (José Roberto de Souza)

Driver Changes:

Fixes/improvements/new stuff:

- Implement vmap/vunmap GEM object functions (Asbjørn Sloth Tønnesen)

Miscellaneous:

- Various register definition cleanups (Ville Syrjälä)
- Fix typo in a comment [gt/uc] (Yuichiro Tsuji)

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Tvrtko Ursulin <tursulin@igalia.com>
Link: https://patchwork.freedesktop.org/patch/msgid/Z9IXs5CzHHKScuQn@linux
This commit is contained in:
Dave Airlie 2025-03-25 08:21:06 +10:00
commit cf05922d63
9 changed files with 165 additions and 174 deletions

View File

@ -164,6 +164,9 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
* 4 - Support multiple fault handlers per object depending on object's
* backing storage (a.k.a. MMAP_OFFSET).
*
* 5 - Support multiple partial mmaps(mmap part of BO + unmap a offset, multiple
* times with different size and offset).
*
* Restrictions:
*
* * snoopable objects cannot be accessed via the GTT. It can cause machine
@ -191,7 +194,7 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
*/
int i915_gem_mmap_gtt_version(void)
{
return 4;
return 5;
}
static inline struct i915_gtt_view

View File

@ -873,6 +873,30 @@ bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj)
return lmem_placement;
}
static int i915_gem_vmap_object(struct drm_gem_object *gem_obj,
struct iosys_map *map)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
void *vaddr;
vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
iosys_map_set_vaddr(map, vaddr);
return 0;
}
static void i915_gem_vunmap_object(struct drm_gem_object *gem_obj,
struct iosys_map *map)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
}
void i915_gem_init__objects(struct drm_i915_private *i915)
{
INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
@ -896,6 +920,8 @@ static const struct drm_gem_object_funcs i915_gem_object_funcs = {
.free = i915_gem_free_object,
.close = i915_gem_close_object,
.export = i915_gem_prime_export,
.vmap = i915_gem_vmap_object,
.vunmap = i915_gem_vunmap_object,
};
/**

View File

@ -769,9 +769,8 @@ static void engine_mask_apply_media_fuses(struct intel_gt *gt)
if (MEDIA_VER_FULL(i915) < IP_VER(12, 55))
media_fuse = ~media_fuse;
vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
GEN11_GT_VEBOX_DISABLE_SHIFT;
vdbox_mask = REG_FIELD_GET(GEN11_GT_VDBOX_DISABLE_MASK, media_fuse);
vebox_mask = REG_FIELD_GET(GEN11_GT_VEBOX_DISABLE_MASK, media_fuse);
if (MEDIA_VER_FULL(i915) >= IP_VER(12, 55)) {
fuse1 = intel_uncore_read(gt->uncore, HSW_PAVP_FUSE1);

View File

@ -302,25 +302,48 @@ static void gen6_check_faults(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
unsigned long fault;
for_each_engine(engine, gt, id) {
u32 fault;
fault = GEN6_RING_FAULT_REG_READ(engine);
if (fault & RING_FAULT_VALID) {
gt_dbg(gt, "Unexpected fault\n"
"\tAddr: 0x%08lx\n"
"\tAddr: 0x%08x\n"
"\tAddress space: %s\n"
"\tSource ID: %ld\n"
"\tType: %ld\n",
fault & PAGE_MASK,
"\tSource ID: %d\n"
"\tType: %d\n",
fault & RING_FAULT_VADDR_MASK,
fault & RING_FAULT_GTTSEL_MASK ?
"GGTT" : "PPGTT",
RING_FAULT_SRCID(fault),
RING_FAULT_FAULT_TYPE(fault));
REG_FIELD_GET(RING_FAULT_SRCID_MASK, fault),
REG_FIELD_GET(RING_FAULT_FAULT_TYPE_MASK, fault));
}
}
}
static void gen8_report_fault(struct intel_gt *gt, u32 fault,
u32 fault_data0, u32 fault_data1)
{
u64 fault_addr;
fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
((u64)fault_data0 << 12);
gt_dbg(gt, "Unexpected fault\n"
"\tAddr: 0x%08x_%08x\n"
"\tAddress space: %s\n"
"\tEngine ID: %d\n"
"\tSource ID: %d\n"
"\tType: %d\n",
upper_32_bits(fault_addr), lower_32_bits(fault_addr),
fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
REG_FIELD_GET(RING_FAULT_ENGINE_ID_MASK, fault),
REG_FIELD_GET(RING_FAULT_SRCID_MASK, fault),
REG_FIELD_GET(RING_FAULT_FAULT_TYPE_MASK, fault));
}
static void xehp_check_faults(struct intel_gt *gt)
{
u32 fault;
@ -333,28 +356,10 @@ static void xehp_check_faults(struct intel_gt *gt)
* toward the primary instance.
*/
fault = intel_gt_mcr_read_any(gt, XEHP_RING_FAULT_REG);
if (fault & RING_FAULT_VALID) {
u32 fault_data0, fault_data1;
u64 fault_addr;
fault_data0 = intel_gt_mcr_read_any(gt, XEHP_FAULT_TLB_DATA0);
fault_data1 = intel_gt_mcr_read_any(gt, XEHP_FAULT_TLB_DATA1);
fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
((u64)fault_data0 << 12);
gt_dbg(gt, "Unexpected fault\n"
"\tAddr: 0x%08x_%08x\n"
"\tAddress space: %s\n"
"\tEngine ID: %d\n"
"\tSource ID: %d\n"
"\tType: %d\n",
upper_32_bits(fault_addr), lower_32_bits(fault_addr),
fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
GEN8_RING_FAULT_ENGINE_ID(fault),
RING_FAULT_SRCID(fault),
RING_FAULT_FAULT_TYPE(fault));
}
if (fault & RING_FAULT_VALID)
gen8_report_fault(gt, fault,
intel_gt_mcr_read_any(gt, XEHP_FAULT_TLB_DATA0),
intel_gt_mcr_read_any(gt, XEHP_FAULT_TLB_DATA1));
}
static void gen8_check_faults(struct intel_gt *gt)
@ -374,28 +379,10 @@ static void gen8_check_faults(struct intel_gt *gt)
}
fault = intel_uncore_read(uncore, fault_reg);
if (fault & RING_FAULT_VALID) {
u32 fault_data0, fault_data1;
u64 fault_addr;
fault_data0 = intel_uncore_read(uncore, fault_data0_reg);
fault_data1 = intel_uncore_read(uncore, fault_data1_reg);
fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
((u64)fault_data0 << 12);
gt_dbg(gt, "Unexpected fault\n"
"\tAddr: 0x%08x_%08x\n"
"\tAddress space: %s\n"
"\tEngine ID: %d\n"
"\tSource ID: %d\n"
"\tType: %d\n",
upper_32_bits(fault_addr), lower_32_bits(fault_addr),
fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
GEN8_RING_FAULT_ENGINE_ID(fault),
RING_FAULT_SRCID(fault),
RING_FAULT_FAULT_TYPE(fault));
}
if (fault & RING_FAULT_VALID)
gen8_report_fault(gt, fault,
intel_uncore_read(uncore, fault_data0_reg),
intel_uncore_read(uncore, fault_data1_reg));
}
void intel_gt_check_and_clear_faults(struct intel_gt *gt)

View File

@ -35,9 +35,7 @@ static u32 gen11_get_crystal_clock_freq(struct intel_uncore *uncore,
u32 f24_mhz = 24000000;
u32 f25_mhz = 25000000;
u32 f38_4_mhz = 38400000;
u32 crystal_clock =
(rpm_config_reg & GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
u32 crystal_clock = rpm_config_reg & GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK;
switch (crystal_clock) {
case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
@ -80,8 +78,7 @@ static u32 gen11_read_clock_frequency(struct intel_uncore *uncore)
* register increments from this frequency (it might
* increment only every few clock cycle).
*/
freq >>= 3 - ((c0 & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
freq >>= 3 - REG_FIELD_GET(GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK, c0);
}
return freq;
@ -102,8 +99,7 @@ static u32 gen9_read_clock_frequency(struct intel_uncore *uncore)
* register increments from this frequency (it might
* increment only every few clock cycle).
*/
freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >>
CTC_SHIFT_PARAMETER_SHIFT);
freq >>= 3 - REG_FIELD_GET(CTC_SHIFT_PARAMETER_MASK, ctc_reg);
}
return freq;

View File

@ -121,9 +121,8 @@ void intel_gt_mcr_init(struct intel_gt *gt)
gt->info.mslice_mask =
intel_slicemask_from_xehp_dssmask(gt->info.sseu.subslice_mask,
GEN_DSS_PER_MSLICE);
gt->info.mslice_mask |=
(intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3) &
GEN12_MEML3_EN_MASK);
gt->info.mslice_mask |= REG_FIELD_GET(GEN12_MEML3_EN_MASK,
intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3));
if (!gt->info.mslice_mask) /* should be impossible! */
gt_warn(gt, "mslice mask all zero!\n");

View File

@ -30,18 +30,15 @@
/* RPM unit config (Gen8+) */
#define RPM_CONFIG0 _MMIO(0xd00)
#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT 3
#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK (1 << GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT)
#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ 0
#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ 1
#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT 3
#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK (0x7 << GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT)
#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ 0
#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ 1
#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ 2
#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ 3
#define GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT 1
#define GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK (0x3 << GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT)
#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK REG_GENMASK(5, 3)
#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ REG_FIELD_PREP(GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK, 0)
#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ REG_FIELD_PREP(GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK, 1)
#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ REG_FIELD_PREP(GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK, 2)
#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ REG_FIELD_PREP(GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK, 3)
#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK REG_BIT(3)
#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ REG_FIELD_PREP(GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK, 0)
#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ REG_FIELD_PREP(GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK, 1)
#define GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK REG_GENMASK(2, 1)
#define RPM_CONFIG1 _MMIO(0xd04)
#define GEN10_GT_NOA_ENABLE (1 << 9)
@ -326,6 +323,12 @@
_RING_FAULT_REG_VCS, \
_RING_FAULT_REG_VECS, \
_RING_FAULT_REG_BCS))
#define RING_FAULT_VADDR_MASK REG_GENMASK(31, 12) /* pre-bdw */
#define RING_FAULT_ENGINE_ID_MASK REG_GENMASK(16, 12) /* bdw+ */
#define RING_FAULT_GTTSEL_MASK REG_BIT(11) /* pre-bdw */
#define RING_FAULT_SRCID_MASK REG_GENMASK(10, 3)
#define RING_FAULT_FAULT_TYPE_MASK REG_GENMASK(2, 1) /* ivb+ */
#define RING_FAULT_VALID REG_BIT(0)
#define ERROR_GEN6 _MMIO(0x40a0)
@ -385,6 +388,8 @@
#define GEN8_FAULT_TLB_DATA0 _MMIO(0x4b10)
#define GEN8_FAULT_TLB_DATA1 _MMIO(0x4b14)
#define FAULT_GTT_SEL REG_BIT(4)
#define FAULT_VA_HIGH_BITS REG_GENMASK(3, 0)
#define GEN11_GACB_PERF_CTRL _MMIO(0x4b80)
#define GEN11_HASH_CTRL_MASK (0x3 << 12 | 0xf << 0)
@ -507,11 +512,12 @@
#define GEN11_STATE_CACHE_REDIRECT_TO_CS (1 << 11)
#define GEN9_SLICE_PGCTL_ACK(slice) _MMIO(0x804c + (slice) * 0x4)
#define GEN9_PGCTL_SS_ACK(subslice) REG_BIT(2 + (subslice) * 2)
#define GEN9_PGCTL_SLICE_ACK REG_BIT(0)
#define GEN10_SLICE_PGCTL_ACK(slice) _MMIO(0x804c + ((slice) / 3) * 0x34 + \
((slice) % 3) * 0x4)
#define GEN9_PGCTL_SLICE_ACK (1 << 0)
#define GEN9_PGCTL_SS_ACK(subslice) (1 << (2 + (subslice) * 2))
#define GEN10_PGCTL_VALID_SS_MASK(slice) ((slice) == 0 ? 0x7F : 0x1F)
#define GEN10_PGCTL_VALID_SS_MASK(slice) ((slice) == 0 ? REG_GENMASK(6, 0) : REG_GENMASK(4, 0))
#define GEN9_SS01_EU_PGCTL_ACK(slice) _MMIO(0x805c + (slice) * 0x8)
#define GEN10_SS01_EU_PGCTL_ACK(slice) _MMIO(0x805c + ((slice) / 3) * 0x30 + \
@ -519,14 +525,14 @@
#define GEN9_SS23_EU_PGCTL_ACK(slice) _MMIO(0x8060 + (slice) * 0x8)
#define GEN10_SS23_EU_PGCTL_ACK(slice) _MMIO(0x8060 + ((slice) / 3) * 0x30 + \
((slice) % 3) * 0x8)
#define GEN9_PGCTL_SSA_EU08_ACK (1 << 0)
#define GEN9_PGCTL_SSA_EU19_ACK (1 << 2)
#define GEN9_PGCTL_SSA_EU210_ACK (1 << 4)
#define GEN9_PGCTL_SSA_EU311_ACK (1 << 6)
#define GEN9_PGCTL_SSB_EU08_ACK (1 << 8)
#define GEN9_PGCTL_SSB_EU19_ACK (1 << 10)
#define GEN9_PGCTL_SSB_EU210_ACK (1 << 12)
#define GEN9_PGCTL_SSB_EU311_ACK (1 << 14)
#define GEN9_PGCTL_SSB_EU311_ACK REG_BIT(14)
#define GEN9_PGCTL_SSB_EU210_ACK REG_BIT(12)
#define GEN9_PGCTL_SSB_EU19_ACK REG_BIT(10)
#define GEN9_PGCTL_SSB_EU08_ACK REG_BIT(8)
#define GEN9_PGCTL_SSA_EU311_ACK REG_BIT(6)
#define GEN9_PGCTL_SSA_EU210_ACK REG_BIT(4)
#define GEN9_PGCTL_SSA_EU19_ACK REG_BIT(2)
#define GEN9_PGCTL_SSA_EU08_ACK REG_BIT(0)
#define VF_PREEMPTION _MMIO(0x83a4)
#define PREEMPTION_VERTEX_COUNT REG_GENMASK(15, 0)
@ -583,7 +589,7 @@
#define GEN10_L3BANK_MASK 0x0F
/* on Xe_HP the same fuses indicates mslices instead of L3 banks */
#define GEN12_MAX_MSLICES 4
#define GEN12_MEML3_EN_MASK 0x0F
#define GEN12_MEML3_EN_MASK REG_GENMASK(3, 0)
#define HSW_PAVP_FUSE1 _MMIO(0x911c)
#define XEHP_SFC_ENABLE_MASK REG_GENMASK(27, 24)
@ -593,37 +599,30 @@
#define HSW_F1_EU_DIS_6EUS 2
#define GEN8_FUSE2 _MMIO(0x9120)
#define GEN8_F2_SS_DIS_SHIFT 21
#define GEN8_F2_SS_DIS_MASK (0x7 << GEN8_F2_SS_DIS_SHIFT)
#define GEN8_F2_S_ENA_SHIFT 25
#define GEN8_F2_S_ENA_MASK (0x7 << GEN8_F2_S_ENA_SHIFT)
#define GEN9_F2_SS_DIS_SHIFT 20
#define GEN9_F2_SS_DIS_MASK (0xf << GEN9_F2_SS_DIS_SHIFT)
#define GEN10_F2_S_ENA_SHIFT 22
#define GEN10_F2_S_ENA_MASK (0x3f << GEN10_F2_S_ENA_SHIFT)
#define GEN10_F2_SS_DIS_SHIFT 18
#define GEN10_F2_SS_DIS_MASK (0xf << GEN10_F2_SS_DIS_SHIFT)
#define GEN10_F2_S_ENA_MASK REG_GENMASK(27, 22)
#define GEN10_F2_SS_DIS_MASK REG_GENMASK(21, 18)
#define GEN8_F2_S_ENA_MASK REG_GENMASK(27, 25)
#define GEN9_F2_SS_DIS_MASK REG_GENMASK(23, 20)
#define GEN8_F2_SS_DIS_MASK REG_GENMASK(23, 21)
#define GEN8_EU_DISABLE0 _MMIO(0x9134)
#define GEN9_EU_DISABLE(slice) _MMIO(0x9134 + (slice) * 0x4)
#define GEN11_EU_DISABLE _MMIO(0x9134)
#define GEN8_EU_DIS0_S0_MASK 0xffffff
#define GEN8_EU_DIS0_S1_SHIFT 24
#define GEN8_EU_DIS0_S1_MASK (0xff << GEN8_EU_DIS0_S1_SHIFT)
#define GEN11_EU_DIS_MASK 0xFF
#define GEN8_EU_DIS0_S1_MASK REG_GENMASK(31, 24)
#define GEN8_EU_DIS0_S0_MASK REG_GENMASK(23, 0)
#define GEN11_EU_DIS_MASK REG_GENMASK(7, 0)
#define XEHP_EU_ENABLE _MMIO(0x9134)
#define XEHP_EU_ENA_MASK 0xFF
#define XEHP_EU_ENA_MASK REG_GENMASK(7, 0)
#define GEN8_EU_DISABLE1 _MMIO(0x9138)
#define GEN8_EU_DIS1_S1_MASK 0xffff
#define GEN8_EU_DIS1_S2_SHIFT 16
#define GEN8_EU_DIS1_S2_MASK (0xffff << GEN8_EU_DIS1_S2_SHIFT)
#define GEN8_EU_DIS1_S2_MASK REG_GENMASK(31, 16)
#define GEN8_EU_DIS1_S1_MASK REG_GENMASK(15, 0)
#define GEN11_GT_SLICE_ENABLE _MMIO(0x9138)
#define GEN11_GT_S_ENA_MASK 0xFF
#define GEN11_GT_S_ENA_MASK REG_GENMASK(7, 0)
#define GEN8_EU_DISABLE2 _MMIO(0x913c)
#define GEN8_EU_DIS2_S2_MASK 0xff
#define GEN8_EU_DIS2_S2_MASK REG_GENMASK(7, 0)
#define GEN11_GT_SUBSLICE_DISABLE _MMIO(0x913c)
#define GEN12_GT_GEOMETRY_DSS_ENABLE _MMIO(0x913c)
@ -631,9 +630,8 @@
#define GEN10_EU_DISABLE3 _MMIO(0x9140)
#define GEN10_EU_DIS_SS_MASK 0xff
#define GEN11_GT_VEBOX_VDBOX_DISABLE _MMIO(0x9140)
#define GEN11_GT_VDBOX_DISABLE_MASK 0xff
#define GEN11_GT_VEBOX_DISABLE_SHIFT 16
#define GEN11_GT_VEBOX_DISABLE_MASK (0x0f << GEN11_GT_VEBOX_DISABLE_SHIFT)
#define GEN11_GT_VEBOX_DISABLE_MASK REG_GENMASK(19, 16)
#define GEN11_GT_VDBOX_DISABLE_MASK REG_GENMASK(7, 0)
#define GEN12_GT_COMPUTE_DSS_ENABLE _MMIO(0x9144)
#define XEHPC_GT_COMPUTE_DSS_ENABLE_EXT _MMIO(0x9148)
@ -881,11 +879,10 @@
/* GPM unit config (Gen9+) */
#define CTC_MODE _MMIO(0xa26c)
#define CTC_SOURCE_PARAMETER_MASK 1
#define CTC_SOURCE_CRYSTAL_CLOCK 0
#define CTC_SOURCE_DIVIDE_LOGIC 1
#define CTC_SHIFT_PARAMETER_SHIFT 1
#define CTC_SHIFT_PARAMETER_MASK (0x3 << CTC_SHIFT_PARAMETER_SHIFT)
#define CTC_SHIFT_PARAMETER_MASK REG_GENMASK(2, 1)
#define CTC_SOURCE_PARAMETER_MASK REG_BIT(0)
#define CTC_SOURCE_CRYSTAL_CLOCK REG_FIELD_PREP(CTC_SOURCE_PARAMETER_MASK, 0)
#define CTC_SOURCE_DIVIDE_LOGIC REG_FIELD_PREP(CTC_SOURCE_PARAMETER_MASK, 1)
/* GPM MSG_IDLE */
#define MSG_IDLE_CS _MMIO(0x8000)
@ -929,12 +926,12 @@
#define CHV_POWER_SS0_SIG1 _MMIO(0xa720)
#define CHV_POWER_SS0_SIG2 _MMIO(0xa724)
#define CHV_POWER_SS1_SIG1 _MMIO(0xa728)
#define CHV_SS_PG_ENABLE (1 << 1)
#define CHV_EU08_PG_ENABLE (1 << 9)
#define CHV_EU19_PG_ENABLE (1 << 17)
#define CHV_EU210_PG_ENABLE (1 << 25)
#define CHV_EU210_PG_ENABLE REG_BIT(25)
#define CHV_EU19_PG_ENABLE REG_BIT(17)
#define CHV_EU08_PG_ENABLE REG_BIT(9)
#define CHV_SS_PG_ENABLE REG_BIT(1)
#define CHV_POWER_SS1_SIG2 _MMIO(0xa72c)
#define CHV_EU311_PG_ENABLE (1 << 1)
#define CHV_EU311_PG_ENABLE REG_BIT(1)
#define GEN7_SARCHKMD _MMIO(0xb000)
#define GEN7_DISABLE_DEMAND_PREFETCH (1 << 31)
@ -1038,17 +1035,12 @@
#define XEHP_FAULT_TLB_DATA0 MCR_REG(0xceb8)
#define GEN12_FAULT_TLB_DATA1 _MMIO(0xcebc)
#define XEHP_FAULT_TLB_DATA1 MCR_REG(0xcebc)
#define FAULT_VA_HIGH_BITS (0xf << 0)
#define FAULT_GTT_SEL (1 << 4)
/* see GEN8_FAULT_TLB_DATA0/1 */
#define GEN12_RING_FAULT_REG _MMIO(0xcec4)
#define XEHP_RING_FAULT_REG MCR_REG(0xcec4)
#define XELPMP_RING_FAULT_REG _MMIO(0xcec4)
#define GEN8_RING_FAULT_ENGINE_ID(x) (((x) >> 12) & 0x7)
#define RING_FAULT_GTTSEL_MASK (1 << 11)
#define RING_FAULT_SRCID(x) (((x) >> 3) & 0xff)
#define RING_FAULT_FAULT_TYPE(x) (((x) >> 1) & 0x3)
#define RING_FAULT_VALID (1 << 0)
/* see GEN8_RING_FAULT_REG */
#define GEN12_GFX_TLB_INV_CR _MMIO(0xced8)
#define XEHP_GFX_TLB_INV_CR MCR_REG(0xced8)
@ -1437,16 +1429,12 @@
#define XEHP_CCS_MODE_CSLICE(cslice, ccs) (ccs << (cslice * XEHP_CCS_MODE_CSLICE_WIDTH))
#define CHV_FUSE_GT _MMIO(VLV_GUNIT_BASE + 0x2168)
#define CHV_FGT_DISABLE_SS0 (1 << 10)
#define CHV_FGT_DISABLE_SS1 (1 << 11)
#define CHV_FGT_EU_DIS_SS0_R0_SHIFT 16
#define CHV_FGT_EU_DIS_SS0_R0_MASK (0xf << CHV_FGT_EU_DIS_SS0_R0_SHIFT)
#define CHV_FGT_EU_DIS_SS0_R1_SHIFT 20
#define CHV_FGT_EU_DIS_SS0_R1_MASK (0xf << CHV_FGT_EU_DIS_SS0_R1_SHIFT)
#define CHV_FGT_EU_DIS_SS1_R0_SHIFT 24
#define CHV_FGT_EU_DIS_SS1_R0_MASK (0xf << CHV_FGT_EU_DIS_SS1_R0_SHIFT)
#define CHV_FGT_EU_DIS_SS1_R1_SHIFT 28
#define CHV_FGT_EU_DIS_SS1_R1_MASK (0xf << CHV_FGT_EU_DIS_SS1_R1_SHIFT)
#define CHV_FGT_EU_DIS_SS1_R1_MASK REG_GENMASK(31, 28)
#define CHV_FGT_EU_DIS_SS1_R0_MASK REG_GENMASK(27, 24)
#define CHV_FGT_EU_DIS_SS0_R1_MASK REG_GENMASK(23, 20)
#define CHV_FGT_EU_DIS_SS0_R0_MASK REG_GENMASK(19, 16)
#define CHV_FGT_DISABLE_SS1 REG_BIT(11)
#define CHV_FGT_DISABLE_SS0 REG_BIT(10)
#define BCS_SWCTRL _MMIO(0x22200)
#define BCS_SRC_Y REG_BIT(0)

View File

@ -236,7 +236,8 @@ static void xehp_sseu_info_init(struct intel_gt *gt)
GEN12_GT_COMPUTE_DSS_ENABLE,
XEHPC_GT_COMPUTE_DSS_ENABLE_EXT);
eu_en_fuse = intel_uncore_read(uncore, XEHP_EU_ENABLE) & XEHP_EU_ENA_MASK;
eu_en_fuse = REG_FIELD_GET(XEHP_EU_ENA_MASK,
intel_uncore_read(uncore, XEHP_EU_ENABLE));
if (HAS_ONE_EU_PER_FUSE_BIT(gt->i915))
eu_en = eu_en_fuse;
@ -269,15 +270,15 @@ static void gen12_sseu_info_init(struct intel_gt *gt)
* Although gen12 architecture supported multiple slices, TGL, RKL,
* DG1, and ADL only had a single slice.
*/
s_en = intel_uncore_read(uncore, GEN11_GT_SLICE_ENABLE) &
GEN11_GT_S_ENA_MASK;
s_en = REG_FIELD_GET(GEN11_GT_S_ENA_MASK,
intel_uncore_read(uncore, GEN11_GT_SLICE_ENABLE));
drm_WARN_ON(&gt->i915->drm, s_en != 0x1);
g_dss_en = intel_uncore_read(uncore, GEN12_GT_GEOMETRY_DSS_ENABLE);
/* one bit per pair of EUs */
eu_en_fuse = ~(intel_uncore_read(uncore, GEN11_EU_DISABLE) &
GEN11_EU_DIS_MASK);
eu_en_fuse = ~REG_FIELD_GET(GEN11_EU_DIS_MASK,
intel_uncore_read(uncore, GEN11_EU_DISABLE));
for (eu = 0; eu < sseu->max_eus_per_subslice / 2; eu++)
if (eu_en_fuse & BIT(eu))
@ -306,14 +307,14 @@ static void gen11_sseu_info_init(struct intel_gt *gt)
* Although gen11 architecture supported multiple slices, ICL and
* EHL/JSL only had a single slice in practice.
*/
s_en = intel_uncore_read(uncore, GEN11_GT_SLICE_ENABLE) &
GEN11_GT_S_ENA_MASK;
s_en = REG_FIELD_GET(GEN11_GT_S_ENA_MASK,
intel_uncore_read(uncore, GEN11_GT_SLICE_ENABLE));
drm_WARN_ON(&gt->i915->drm, s_en != 0x1);
ss_en = ~intel_uncore_read(uncore, GEN11_GT_SUBSLICE_DISABLE);
eu_en = ~(intel_uncore_read(uncore, GEN11_EU_DISABLE) &
GEN11_EU_DIS_MASK);
eu_en = ~REG_FIELD_GET(GEN11_EU_DIS_MASK,
intel_uncore_read(uncore, GEN11_EU_DISABLE));
gen11_compute_sseu_info(sseu, ss_en, eu_en);
@ -335,10 +336,8 @@ static void cherryview_sseu_info_init(struct intel_gt *gt)
if (!(fuse & CHV_FGT_DISABLE_SS0)) {
u8 disabled_mask =
((fuse & CHV_FGT_EU_DIS_SS0_R0_MASK) >>
CHV_FGT_EU_DIS_SS0_R0_SHIFT) |
(((fuse & CHV_FGT_EU_DIS_SS0_R1_MASK) >>
CHV_FGT_EU_DIS_SS0_R1_SHIFT) << 4);
REG_FIELD_GET(CHV_FGT_EU_DIS_SS0_R0_MASK, fuse) |
REG_FIELD_GET(CHV_FGT_EU_DIS_SS0_R1_MASK, fuse) << hweight32(CHV_FGT_EU_DIS_SS0_R0_MASK);
sseu->subslice_mask.hsw[0] |= BIT(0);
sseu_set_eus(sseu, 0, 0, ~disabled_mask & 0xFF);
@ -346,10 +345,8 @@ static void cherryview_sseu_info_init(struct intel_gt *gt)
if (!(fuse & CHV_FGT_DISABLE_SS1)) {
u8 disabled_mask =
((fuse & CHV_FGT_EU_DIS_SS1_R0_MASK) >>
CHV_FGT_EU_DIS_SS1_R0_SHIFT) |
(((fuse & CHV_FGT_EU_DIS_SS1_R1_MASK) >>
CHV_FGT_EU_DIS_SS1_R1_SHIFT) << 4);
REG_FIELD_GET(CHV_FGT_EU_DIS_SS1_R0_MASK, fuse) |
REG_FIELD_GET(CHV_FGT_EU_DIS_SS1_R1_MASK, fuse) << hweight32(CHV_FGT_EU_DIS_SS1_R0_MASK);
sseu->subslice_mask.hsw[0] |= BIT(1);
sseu_set_eus(sseu, 0, 1, ~disabled_mask & 0xFF);
@ -385,7 +382,7 @@ static void gen9_sseu_info_init(struct intel_gt *gt)
int s, ss;
fuse2 = intel_uncore_read(uncore, GEN8_FUSE2);
sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
sseu->slice_mask = REG_FIELD_GET(GEN8_F2_S_ENA_MASK, fuse2);
/* BXT has a single slice and at most 3 subslices. */
intel_sseu_set_info(sseu, IS_GEN9_LP(i915) ? 1 : 3,
@ -396,8 +393,7 @@ static void gen9_sseu_info_init(struct intel_gt *gt)
* to each of the enabled slices.
*/
subslice_mask = (1 << sseu->max_subslices) - 1;
subslice_mask &= ~((fuse2 & GEN9_F2_SS_DIS_MASK) >>
GEN9_F2_SS_DIS_SHIFT);
subslice_mask &= ~REG_FIELD_GET(GEN9_F2_SS_DIS_MASK, fuse2);
/*
* Iterate through enabled slices and subslices to
@ -490,7 +486,7 @@ static void bdw_sseu_info_init(struct intel_gt *gt)
u32 eu_disable0, eu_disable1, eu_disable2;
fuse2 = intel_uncore_read(uncore, GEN8_FUSE2);
sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
sseu->slice_mask = REG_FIELD_GET(GEN8_F2_S_ENA_MASK, fuse2);
intel_sseu_set_info(sseu, 3, 3, 8);
/*
@ -498,18 +494,18 @@ static void bdw_sseu_info_init(struct intel_gt *gt)
* to each of the enabled slices.
*/
subslice_mask = GENMASK(sseu->max_subslices - 1, 0);
subslice_mask &= ~((fuse2 & GEN8_F2_SS_DIS_MASK) >>
GEN8_F2_SS_DIS_SHIFT);
subslice_mask &= ~REG_FIELD_GET(GEN8_F2_SS_DIS_MASK, fuse2);
eu_disable0 = intel_uncore_read(uncore, GEN8_EU_DISABLE0);
eu_disable1 = intel_uncore_read(uncore, GEN8_EU_DISABLE1);
eu_disable2 = intel_uncore_read(uncore, GEN8_EU_DISABLE2);
eu_disable[0] = eu_disable0 & GEN8_EU_DIS0_S0_MASK;
eu_disable[1] = (eu_disable0 >> GEN8_EU_DIS0_S1_SHIFT) |
((eu_disable1 & GEN8_EU_DIS1_S1_MASK) <<
(32 - GEN8_EU_DIS0_S1_SHIFT));
eu_disable[2] = (eu_disable1 >> GEN8_EU_DIS1_S2_SHIFT) |
((eu_disable2 & GEN8_EU_DIS2_S2_MASK) <<
(32 - GEN8_EU_DIS1_S2_SHIFT));
eu_disable[0] =
REG_FIELD_GET(GEN8_EU_DIS0_S0_MASK, eu_disable0);
eu_disable[1] =
REG_FIELD_GET(GEN8_EU_DIS0_S1_MASK, eu_disable0) |
REG_FIELD_GET(GEN8_EU_DIS1_S1_MASK, eu_disable1) << hweight32(GEN8_EU_DIS0_S1_MASK);
eu_disable[2] =
REG_FIELD_GET(GEN8_EU_DIS1_S2_MASK, eu_disable1) |
REG_FIELD_GET(GEN8_EU_DIS2_S2_MASK, eu_disable2) << hweight32(GEN8_EU_DIS1_S2_MASK);
/*
* Iterate through enabled slices and subslices to

View File

@ -1285,15 +1285,12 @@ static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
static u32 gpm_timestamp_shift(struct intel_gt *gt)
{
intel_wakeref_t wakeref;
u32 reg, shift;
u32 reg;
with_intel_runtime_pm(gt->uncore->rpm, wakeref)
reg = intel_uncore_read(gt->uncore, RPM_CONFIG0);
shift = (reg & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT;
return 3 - shift;
return 3 - REG_FIELD_GET(GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK, reg);
}
static void guc_update_pm_timestamp(struct intel_guc *guc, ktime_t *now)