mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-03-22 07:27:12 +08:00
Merge tag 'v6.14-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux into gpio/for-next
Linux 6.14-rc5
This commit is contained in:
3
.mailmap
3
.mailmap
@@ -522,6 +522,7 @@ Nadav Amit <nadav.amit@gmail.com> <namit@cs.technion.ac.il>
|
||||
Nadia Yvette Chambers <nyc@holomorphy.com> William Lee Irwin III <wli@holomorphy.com>
|
||||
Naoya Horiguchi <nao.horiguchi@gmail.com> <n-horiguchi@ah.jp.nec.com>
|
||||
Naoya Horiguchi <nao.horiguchi@gmail.com> <naoya.horiguchi@nec.com>
|
||||
Natalie Vock <natalie.vock@gmx.de> <friedrich.vock@gmx.de>
|
||||
Nathan Chancellor <nathan@kernel.org> <natechancellor@gmail.com>
|
||||
Naveen N Rao <naveen@kernel.org> <naveen.n.rao@linux.ibm.com>
|
||||
Naveen N Rao <naveen@kernel.org> <naveen.n.rao@linux.vnet.ibm.com>
|
||||
@@ -613,6 +614,8 @@ Richard Leitner <richard.leitner@linux.dev> <me@g0hl1n.net>
|
||||
Richard Leitner <richard.leitner@linux.dev> <richard.leitner@skidata.com>
|
||||
Robert Foss <rfoss@kernel.org> <robert.foss@linaro.org>
|
||||
Rocky Liao <quic_rjliao@quicinc.com> <rjliao@codeaurora.org>
|
||||
Rodrigo Siqueira <siqueira@igalia.com> <rodrigosiqueiramelo@gmail.com>
|
||||
Rodrigo Siqueira <siqueira@igalia.com> <Rodrigo.Siqueira@amd.com>
|
||||
Roman Gushchin <roman.gushchin@linux.dev> <guro@fb.com>
|
||||
Roman Gushchin <roman.gushchin@linux.dev> <guroan@gmail.com>
|
||||
Roman Gushchin <roman.gushchin@linux.dev> <klamm@yandex-team.ru>
|
||||
|
||||
@@ -18,6 +18,7 @@ Introduction
|
||||
both access system memory directly and with the same effective
|
||||
addresses.
|
||||
|
||||
**This driver is deprecated and will be removed in a future release.**
|
||||
|
||||
Hardware overview
|
||||
=================
|
||||
@@ -453,7 +454,7 @@ Sysfs Class
|
||||
|
||||
A cxl sysfs class is added under /sys/class/cxl to facilitate
|
||||
enumeration and tuning of the accelerators. Its layout is
|
||||
described in Documentation/ABI/testing/sysfs-class-cxl
|
||||
described in Documentation/ABI/obsolete/sysfs-class-cxl
|
||||
|
||||
|
||||
Udev rules
|
||||
|
||||
@@ -8,7 +8,7 @@ Landlock: unprivileged access control
|
||||
=====================================
|
||||
|
||||
:Author: Mickaël Salaün
|
||||
:Date: October 2024
|
||||
:Date: January 2025
|
||||
|
||||
The goal of Landlock is to enable restriction of ambient rights (e.g. global
|
||||
filesystem or network access) for a set of processes. Because Landlock
|
||||
@@ -329,11 +329,11 @@ non-sandboxed process, we can specify this restriction with
|
||||
A sandboxed process can connect to a non-sandboxed process when its domain is
|
||||
not scoped. If a process's domain is scoped, it can only connect to sockets
|
||||
created by processes in the same scope.
|
||||
Moreover, If a process is scoped to send signal to a non-scoped process, it can
|
||||
Moreover, if a process is scoped to send signal to a non-scoped process, it can
|
||||
only send signals to processes in the same scope.
|
||||
|
||||
A connected datagram socket behaves like a stream socket when its domain is
|
||||
scoped, meaning if the domain is scoped after the socket is connected , it can
|
||||
scoped, meaning if the domain is scoped after the socket is connected, it can
|
||||
still :manpage:`send(2)` data just like a stream socket. However, in the same
|
||||
scenario, a non-connected datagram socket cannot send data (with
|
||||
:manpage:`sendto(2)`) outside its scope.
|
||||
|
||||
26
MAINTAINERS
26
MAINTAINERS
@@ -1046,14 +1046,14 @@ F: drivers/crypto/ccp/hsti.*
|
||||
AMD DISPLAY CORE
|
||||
M: Harry Wentland <harry.wentland@amd.com>
|
||||
M: Leo Li <sunpeng.li@amd.com>
|
||||
M: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
|
||||
R: Rodrigo Siqueira <siqueira@igalia.com>
|
||||
L: amd-gfx@lists.freedesktop.org
|
||||
S: Supported
|
||||
T: git https://gitlab.freedesktop.org/agd5f/linux.git
|
||||
F: drivers/gpu/drm/amd/display/
|
||||
|
||||
AMD DISPLAY CORE - DML
|
||||
M: Chaitanya Dhere <chaitanya.dhere@amd.com>
|
||||
M: Austin Zheng <austin.zheng@amd.com>
|
||||
M: Jun Lei <jun.lei@amd.com>
|
||||
S: Supported
|
||||
F: drivers/gpu/drm/amd/display/dc/dml/
|
||||
@@ -2878,7 +2878,7 @@ F: drivers/pinctrl/nxp/
|
||||
|
||||
ARM/NXP S32G/S32R DWMAC ETHERNET DRIVER
|
||||
M: Jan Petrous <jan.petrous@oss.nxp.com>
|
||||
L: NXP S32 Linux Team <s32@nxp.com>
|
||||
R: s32@nxp.com
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/net/nxp,s32-dwmac.yaml
|
||||
F: drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c
|
||||
@@ -5856,7 +5856,6 @@ F: Documentation/security/snp-tdx-threat-model.rst
|
||||
|
||||
CONFIGFS
|
||||
M: Joel Becker <jlbec@evilplan.org>
|
||||
M: Christoph Hellwig <hch@lst.de>
|
||||
S: Supported
|
||||
T: git git://git.infradead.org/users/hch/configfs.git
|
||||
F: fs/configfs/
|
||||
@@ -5927,6 +5926,17 @@ F: tools/testing/selftests/cgroup/test_cpuset.c
|
||||
F: tools/testing/selftests/cgroup/test_cpuset_prs.sh
|
||||
F: tools/testing/selftests/cgroup/test_cpuset_v1_base.sh
|
||||
|
||||
CONTROL GROUP - DEVICE MEMORY CONTROLLER (DMEM)
|
||||
M: Maarten Lankhorst <dev@lankhorst.se>
|
||||
M: Maxime Ripard <mripard@kernel.org>
|
||||
M: Natalie Vock <natalie.vock@gmx.de>
|
||||
L: cgroups@vger.kernel.org
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
S: Maintained
|
||||
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
|
||||
F: include/linux/cgroup_dmem.h
|
||||
F: kernel/cgroup/dmem.c
|
||||
|
||||
CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
|
||||
M: Johannes Weiner <hannes@cmpxchg.org>
|
||||
M: Michal Hocko <mhocko@kernel.org>
|
||||
@@ -6879,7 +6889,6 @@ F: kernel/dma/map_benchmark.c
|
||||
F: tools/testing/selftests/dma/
|
||||
|
||||
DMA MAPPING HELPERS
|
||||
M: Christoph Hellwig <hch@lst.de>
|
||||
M: Marek Szyprowski <m.szyprowski@samsung.com>
|
||||
R: Robin Murphy <robin.murphy@arm.com>
|
||||
L: iommu@lists.linux.dev
|
||||
@@ -15682,7 +15691,7 @@ F: include/uapi/linux/cciss*.h
|
||||
|
||||
MICROSOFT MANA RDMA DRIVER
|
||||
M: Long Li <longli@microsoft.com>
|
||||
M: Ajay Sharma <sharmaajay@microsoft.com>
|
||||
M: Konstantin Taranov <kotaranov@microsoft.com>
|
||||
L: linux-rdma@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/infiniband/hw/mana/
|
||||
@@ -19657,7 +19666,6 @@ F: drivers/net/wireless/quantenna
|
||||
RADEON and AMDGPU DRM DRIVERS
|
||||
M: Alex Deucher <alexander.deucher@amd.com>
|
||||
M: Christian König <christian.koenig@amd.com>
|
||||
M: Xinhui Pan <Xinhui.Pan@amd.com>
|
||||
L: amd-gfx@lists.freedesktop.org
|
||||
S: Supported
|
||||
B: https://gitlab.freedesktop.org/drm/amd/-/issues
|
||||
@@ -20330,6 +20338,7 @@ RISC-V ARCHITECTURE
|
||||
M: Paul Walmsley <paul.walmsley@sifive.com>
|
||||
M: Palmer Dabbelt <palmer@dabbelt.com>
|
||||
M: Albert Ou <aou@eecs.berkeley.edu>
|
||||
R: Alexandre Ghiti <alex@ghiti.fr>
|
||||
L: linux-riscv@lists.infradead.org
|
||||
S: Supported
|
||||
Q: https://patchwork.kernel.org/project/linux-riscv/list/
|
||||
@@ -21923,10 +21932,13 @@ F: sound/soc/uniphier/
|
||||
|
||||
SOCKET TIMESTAMPING
|
||||
M: Willem de Bruijn <willemdebruijn.kernel@gmail.com>
|
||||
R: Jason Xing <kernelxing@tencent.com>
|
||||
S: Maintained
|
||||
F: Documentation/networking/timestamping.rst
|
||||
F: include/linux/net_tstamp.h
|
||||
F: include/uapi/linux/net_tstamp.h
|
||||
F: tools/testing/selftests/bpf/*/net_timestamping*
|
||||
F: tools/testing/selftests/net/*timestamp*
|
||||
F: tools/testing/selftests/net/so_txtime.c
|
||||
|
||||
SOEKRIS NET48XX LED SUPPORT
|
||||
|
||||
2
Makefile
2
Makefile
@@ -2,7 +2,7 @@
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 14
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc4
|
||||
EXTRAVERSION = -rc5
|
||||
NAME = Baby Opossum Posse
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
||||
@@ -42,8 +42,8 @@ extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep,
|
||||
pte_t pte, int dirty);
|
||||
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
|
||||
extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep);
|
||||
extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, unsigned long sz);
|
||||
#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
|
||||
extern void huge_ptep_set_wrprotect(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep);
|
||||
@@ -76,12 +76,22 @@ static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
|
||||
{
|
||||
unsigned long stride = huge_page_size(hstate_vma(vma));
|
||||
|
||||
if (stride == PMD_SIZE)
|
||||
__flush_tlb_range(vma, start, end, stride, false, 2);
|
||||
else if (stride == PUD_SIZE)
|
||||
__flush_tlb_range(vma, start, end, stride, false, 1);
|
||||
else
|
||||
__flush_tlb_range(vma, start, end, PAGE_SIZE, false, 0);
|
||||
switch (stride) {
|
||||
#ifndef __PAGETABLE_PMD_FOLDED
|
||||
case PUD_SIZE:
|
||||
__flush_tlb_range(vma, start, end, PUD_SIZE, false, 1);
|
||||
break;
|
||||
#endif
|
||||
case CONT_PMD_SIZE:
|
||||
case PMD_SIZE:
|
||||
__flush_tlb_range(vma, start, end, PMD_SIZE, false, 2);
|
||||
break;
|
||||
case CONT_PTE_SIZE:
|
||||
__flush_tlb_range(vma, start, end, PAGE_SIZE, false, 3);
|
||||
break;
|
||||
default:
|
||||
__flush_tlb_range(vma, start, end, PAGE_SIZE, false, TLBI_TTL_UNKNOWN);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* __ASM_HUGETLB_H */
|
||||
|
||||
@@ -119,7 +119,7 @@
|
||||
#define TCR_EL2_IRGN0_MASK TCR_IRGN0_MASK
|
||||
#define TCR_EL2_T0SZ_MASK 0x3f
|
||||
#define TCR_EL2_MASK (TCR_EL2_TG0_MASK | TCR_EL2_SH0_MASK | \
|
||||
TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK | TCR_EL2_T0SZ_MASK)
|
||||
TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK)
|
||||
|
||||
/* VTCR_EL2 Registers bits */
|
||||
#define VTCR_EL2_DS TCR_EL2_DS
|
||||
|
||||
@@ -1259,7 +1259,7 @@ int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
|
||||
extern unsigned int __ro_after_init kvm_arm_vmid_bits;
|
||||
int __init kvm_arm_vmid_alloc_init(void);
|
||||
void __init kvm_arm_vmid_alloc_free(void);
|
||||
bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
|
||||
void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
|
||||
void kvm_arm_vmid_clear_active(void);
|
||||
|
||||
static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
|
||||
|
||||
@@ -559,6 +559,16 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
mmu = vcpu->arch.hw_mmu;
|
||||
last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
|
||||
|
||||
/*
|
||||
* Ensure a VMID is allocated for the MMU before programming VTTBR_EL2,
|
||||
* which happens eagerly in VHE.
|
||||
*
|
||||
* Also, the VMID allocator only preserves VMIDs that are active at the
|
||||
* time of rollover, so KVM might need to grab a new VMID for the MMU if
|
||||
* this is called from kvm_sched_in().
|
||||
*/
|
||||
kvm_arm_vmid_update(&mmu->vmid);
|
||||
|
||||
/*
|
||||
* We guarantee that both TLBs and I-cache are private to each
|
||||
* vcpu. If detecting that a vcpu from the same VM has
|
||||
@@ -1138,18 +1148,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
preempt_disable();
|
||||
|
||||
/*
|
||||
* The VMID allocator only tracks active VMIDs per
|
||||
* physical CPU, and therefore the VMID allocated may not be
|
||||
* preserved on VMID roll-over if the task was preempted,
|
||||
* making a thread's VMID inactive. So we need to call
|
||||
* kvm_arm_vmid_update() in non-premptible context.
|
||||
*/
|
||||
if (kvm_arm_vmid_update(&vcpu->arch.hw_mmu->vmid) &&
|
||||
has_vhe())
|
||||
__load_stage2(vcpu->arch.hw_mmu,
|
||||
vcpu->arch.hw_mmu->arch);
|
||||
|
||||
kvm_pmu_flush_hwstate(vcpu);
|
||||
|
||||
local_irq_disable();
|
||||
@@ -1980,7 +1978,7 @@ static int kvm_init_vector_slots(void)
|
||||
static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
|
||||
{
|
||||
struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
|
||||
unsigned long tcr, ips;
|
||||
unsigned long tcr;
|
||||
|
||||
/*
|
||||
* Calculate the raw per-cpu offset without a translation from the
|
||||
@@ -1994,19 +1992,18 @@ static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
|
||||
params->mair_el2 = read_sysreg(mair_el1);
|
||||
|
||||
tcr = read_sysreg(tcr_el1);
|
||||
ips = FIELD_GET(TCR_IPS_MASK, tcr);
|
||||
if (cpus_have_final_cap(ARM64_KVM_HVHE)) {
|
||||
tcr &= ~(TCR_HD | TCR_HA | TCR_A1 | TCR_T0SZ_MASK);
|
||||
tcr |= TCR_EPD1_MASK;
|
||||
} else {
|
||||
unsigned long ips = FIELD_GET(TCR_IPS_MASK, tcr);
|
||||
|
||||
tcr &= TCR_EL2_MASK;
|
||||
tcr |= TCR_EL2_RES1;
|
||||
tcr |= TCR_EL2_RES1 | FIELD_PREP(TCR_EL2_PS_MASK, ips);
|
||||
if (lpa2_is_enabled())
|
||||
tcr |= TCR_EL2_DS;
|
||||
}
|
||||
tcr &= ~TCR_T0SZ_MASK;
|
||||
tcr |= TCR_T0SZ(hyp_va_bits);
|
||||
tcr &= ~TCR_EL2_PS_MASK;
|
||||
tcr |= FIELD_PREP(TCR_EL2_PS_MASK, ips);
|
||||
if (lpa2_is_enabled())
|
||||
tcr |= TCR_EL2_DS;
|
||||
params->tcr_el2 = tcr;
|
||||
|
||||
params->pgd_pa = kvm_mmu_get_httbr();
|
||||
|
||||
@@ -135,11 +135,10 @@ void kvm_arm_vmid_clear_active(void)
|
||||
atomic64_set(this_cpu_ptr(&active_vmids), VMID_ACTIVE_INVALID);
|
||||
}
|
||||
|
||||
bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
|
||||
void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
|
||||
{
|
||||
unsigned long flags;
|
||||
u64 vmid, old_active_vmid;
|
||||
bool updated = false;
|
||||
|
||||
vmid = atomic64_read(&kvm_vmid->id);
|
||||
|
||||
@@ -157,21 +156,17 @@ bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
|
||||
if (old_active_vmid != 0 && vmid_gen_match(vmid) &&
|
||||
0 != atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_vmids),
|
||||
old_active_vmid, vmid))
|
||||
return false;
|
||||
return;
|
||||
|
||||
raw_spin_lock_irqsave(&cpu_vmid_lock, flags);
|
||||
|
||||
/* Check that our VMID belongs to the current generation. */
|
||||
vmid = atomic64_read(&kvm_vmid->id);
|
||||
if (!vmid_gen_match(vmid)) {
|
||||
if (!vmid_gen_match(vmid))
|
||||
vmid = new_vmid(kvm_vmid);
|
||||
updated = true;
|
||||
}
|
||||
|
||||
atomic64_set(this_cpu_ptr(&active_vmids), vmid);
|
||||
raw_spin_unlock_irqrestore(&cpu_vmid_lock, flags);
|
||||
|
||||
return updated;
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -100,20 +100,11 @@ static int find_num_contig(struct mm_struct *mm, unsigned long addr,
|
||||
|
||||
static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
|
||||
{
|
||||
int contig_ptes = 0;
|
||||
int contig_ptes = 1;
|
||||
|
||||
*pgsize = size;
|
||||
|
||||
switch (size) {
|
||||
#ifndef __PAGETABLE_PMD_FOLDED
|
||||
case PUD_SIZE:
|
||||
if (pud_sect_supported())
|
||||
contig_ptes = 1;
|
||||
break;
|
||||
#endif
|
||||
case PMD_SIZE:
|
||||
contig_ptes = 1;
|
||||
break;
|
||||
case CONT_PMD_SIZE:
|
||||
*pgsize = PMD_SIZE;
|
||||
contig_ptes = CONT_PMDS;
|
||||
@@ -122,6 +113,8 @@ static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
|
||||
*pgsize = PAGE_SIZE;
|
||||
contig_ptes = CONT_PTES;
|
||||
break;
|
||||
default:
|
||||
WARN_ON(!__hugetlb_valid_size(size));
|
||||
}
|
||||
|
||||
return contig_ptes;
|
||||
@@ -163,24 +156,23 @@ static pte_t get_clear_contig(struct mm_struct *mm,
|
||||
unsigned long pgsize,
|
||||
unsigned long ncontig)
|
||||
{
|
||||
pte_t orig_pte = __ptep_get(ptep);
|
||||
unsigned long i;
|
||||
pte_t pte, tmp_pte;
|
||||
bool present;
|
||||
|
||||
for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) {
|
||||
pte_t pte = __ptep_get_and_clear(mm, addr, ptep);
|
||||
|
||||
/*
|
||||
* If HW_AFDBM is enabled, then the HW could turn on
|
||||
* the dirty or accessed bit for any page in the set,
|
||||
* so check them all.
|
||||
*/
|
||||
if (pte_dirty(pte))
|
||||
orig_pte = pte_mkdirty(orig_pte);
|
||||
|
||||
if (pte_young(pte))
|
||||
orig_pte = pte_mkyoung(orig_pte);
|
||||
pte = __ptep_get_and_clear(mm, addr, ptep);
|
||||
present = pte_present(pte);
|
||||
while (--ncontig) {
|
||||
ptep++;
|
||||
addr += pgsize;
|
||||
tmp_pte = __ptep_get_and_clear(mm, addr, ptep);
|
||||
if (present) {
|
||||
if (pte_dirty(tmp_pte))
|
||||
pte = pte_mkdirty(pte);
|
||||
if (pte_young(tmp_pte))
|
||||
pte = pte_mkyoung(pte);
|
||||
}
|
||||
}
|
||||
return orig_pte;
|
||||
return pte;
|
||||
}
|
||||
|
||||
static pte_t get_clear_contig_flush(struct mm_struct *mm,
|
||||
@@ -396,18 +388,13 @@ void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
|
||||
__pte_clear(mm, addr, ptep);
|
||||
}
|
||||
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, unsigned long sz)
|
||||
{
|
||||
int ncontig;
|
||||
size_t pgsize;
|
||||
pte_t orig_pte = __ptep_get(ptep);
|
||||
|
||||
if (!pte_cont(orig_pte))
|
||||
return __ptep_get_and_clear(mm, addr, ptep);
|
||||
|
||||
ncontig = find_num_contig(mm, addr, ptep, &pgsize);
|
||||
|
||||
ncontig = num_contig_ptes(sz, &pgsize);
|
||||
return get_clear_contig(mm, addr, ptep, pgsize, ncontig);
|
||||
}
|
||||
|
||||
@@ -549,6 +536,8 @@ bool __init arch_hugetlb_valid_size(unsigned long size)
|
||||
|
||||
pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
unsigned long psize = huge_page_size(hstate_vma(vma));
|
||||
|
||||
if (alternative_has_cap_unlikely(ARM64_WORKAROUND_2645198)) {
|
||||
/*
|
||||
* Break-before-make (BBM) is required for all user space mappings
|
||||
@@ -558,7 +547,7 @@ pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr
|
||||
if (pte_user_exec(__ptep_get(ptep)))
|
||||
return huge_ptep_clear_flush(vma, addr, ptep);
|
||||
}
|
||||
return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
|
||||
return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, psize);
|
||||
}
|
||||
|
||||
void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
|
||||
|
||||
@@ -279,12 +279,7 @@ void __init arm64_memblock_init(void)
|
||||
|
||||
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
|
||||
extern u16 memstart_offset_seed;
|
||||
|
||||
/*
|
||||
* Use the sanitised version of id_aa64mmfr0_el1 so that linear
|
||||
* map randomization can be enabled by shrinking the IPA space.
|
||||
*/
|
||||
u64 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
|
||||
u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
|
||||
int parange = cpuid_feature_extract_unsigned_field(
|
||||
mmfr0, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
|
||||
s64 range = linear_region_size -
|
||||
|
||||
@@ -36,7 +36,8 @@ static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
|
||||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
|
||||
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
unsigned long addr, pte_t *ptep,
|
||||
unsigned long sz)
|
||||
{
|
||||
pte_t clear;
|
||||
pte_t pte = ptep_get(ptep);
|
||||
@@ -51,8 +52,9 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
pte_t pte;
|
||||
unsigned long sz = huge_page_size(hstate_vma(vma));
|
||||
|
||||
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
|
||||
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, sz);
|
||||
flush_tlb_page(vma, addr);
|
||||
return pte;
|
||||
}
|
||||
|
||||
@@ -468,6 +468,8 @@ static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
|
||||
Elf_Sym *sym, const char *symname))
|
||||
{
|
||||
int i;
|
||||
struct section *extab_sec = sec_lookup("__ex_table");
|
||||
int extab_index = extab_sec ? extab_sec - secs : -1;
|
||||
|
||||
/* Walk through the relocations */
|
||||
for (i = 0; i < ehdr.e_shnum; i++) {
|
||||
@@ -480,6 +482,9 @@ static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
|
||||
if (sec->shdr.sh_type != SHT_REL_TYPE)
|
||||
continue;
|
||||
|
||||
if (sec->shdr.sh_info == extab_index)
|
||||
continue;
|
||||
|
||||
sec_symtab = sec->link;
|
||||
sec_applies = &secs[sec->shdr.sh_info];
|
||||
if (!(sec_applies->shdr.sh_flags & SHF_ALLOC))
|
||||
|
||||
@@ -27,7 +27,8 @@ static inline int prepare_hugepage_range(struct file *file,
|
||||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
|
||||
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
unsigned long addr, pte_t *ptep,
|
||||
unsigned long sz)
|
||||
{
|
||||
pte_t clear;
|
||||
pte_t pte = *ptep;
|
||||
@@ -42,13 +43,14 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
pte_t pte;
|
||||
unsigned long sz = huge_page_size(hstate_vma(vma));
|
||||
|
||||
/*
|
||||
* clear the huge pte entry firstly, so that the other smp threads will
|
||||
* not get old pte entry after finishing flush_tlb_page and before
|
||||
* setting new huge pte entry
|
||||
*/
|
||||
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
|
||||
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, sz);
|
||||
flush_tlb_page(vma, addr);
|
||||
return pte;
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep);
|
||||
pte_t *ptep, unsigned long sz);
|
||||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
|
||||
static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
|
||||
|
||||
@@ -126,7 +126,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
|
||||
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep)
|
||||
pte_t *ptep, unsigned long sz)
|
||||
{
|
||||
pte_t entry;
|
||||
|
||||
|
||||
@@ -45,7 +45,8 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
|
||||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
|
||||
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
unsigned long addr, pte_t *ptep,
|
||||
unsigned long sz)
|
||||
{
|
||||
return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));
|
||||
}
|
||||
@@ -55,8 +56,9 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
pte_t pte;
|
||||
unsigned long sz = huge_page_size(hstate_vma(vma));
|
||||
|
||||
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
|
||||
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, sz);
|
||||
flush_hugetlb_page(vma, addr);
|
||||
return pte;
|
||||
}
|
||||
|
||||
@@ -231,7 +231,7 @@
|
||||
__arch_cmpxchg(".w", ".w" sc_sfx, ".w" cas_sfx, \
|
||||
sc_prepend, sc_append, \
|
||||
cas_prepend, cas_append, \
|
||||
__ret, __ptr, (long), __old, __new); \
|
||||
__ret, __ptr, (long)(int)(long), __old, __new); \
|
||||
break; \
|
||||
case 8: \
|
||||
__arch_cmpxchg(".d", ".d" sc_sfx, ".d" cas_sfx, \
|
||||
|
||||
@@ -93,7 +93,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||
_ASM_EXTABLE_UACCESS_ERR(1b, 3b, %[r]) \
|
||||
_ASM_EXTABLE_UACCESS_ERR(2b, 3b, %[r]) \
|
||||
: [r] "+r" (ret), [v] "=&r" (val), [u] "+m" (*uaddr), [t] "=&r" (tmp)
|
||||
: [ov] "Jr" (oldval), [nv] "Jr" (newval)
|
||||
: [ov] "Jr" ((long)(int)oldval), [nv] "Jr" (newval)
|
||||
: "memory");
|
||||
__disable_user_access();
|
||||
|
||||
|
||||
@@ -28,7 +28,8 @@ void set_huge_pte_at(struct mm_struct *mm,
|
||||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep);
|
||||
unsigned long addr, pte_t *ptep,
|
||||
unsigned long sz);
|
||||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
|
||||
pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
|
||||
|
||||
@@ -108,11 +108,11 @@ int populate_cache_leaves(unsigned int cpu)
|
||||
if (!np)
|
||||
return -ENOENT;
|
||||
|
||||
if (of_property_read_bool(np, "cache-size"))
|
||||
if (of_property_present(np, "cache-size"))
|
||||
ci_leaf_init(this_leaf++, CACHE_TYPE_UNIFIED, level);
|
||||
if (of_property_read_bool(np, "i-cache-size"))
|
||||
if (of_property_present(np, "i-cache-size"))
|
||||
ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level);
|
||||
if (of_property_read_bool(np, "d-cache-size"))
|
||||
if (of_property_present(np, "d-cache-size"))
|
||||
ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level);
|
||||
|
||||
prev = np;
|
||||
@@ -125,11 +125,11 @@ int populate_cache_leaves(unsigned int cpu)
|
||||
break;
|
||||
if (level <= levels)
|
||||
break;
|
||||
if (of_property_read_bool(np, "cache-size"))
|
||||
if (of_property_present(np, "cache-size"))
|
||||
ci_leaf_init(this_leaf++, CACHE_TYPE_UNIFIED, level);
|
||||
if (of_property_read_bool(np, "i-cache-size"))
|
||||
if (of_property_present(np, "i-cache-size"))
|
||||
ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level);
|
||||
if (of_property_read_bool(np, "d-cache-size"))
|
||||
if (of_property_present(np, "d-cache-size"))
|
||||
ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level);
|
||||
levels = level;
|
||||
}
|
||||
|
||||
@@ -479,7 +479,7 @@ static void __init riscv_resolve_isa(unsigned long *source_isa,
|
||||
if (bit < RISCV_ISA_EXT_BASE)
|
||||
*this_hwcap |= isa2hwcap[bit];
|
||||
}
|
||||
} while (loop && memcmp(prev_resolved_isa, resolved_isa, sizeof(prev_resolved_isa)));
|
||||
} while (loop && !bitmap_equal(prev_resolved_isa, resolved_isa, RISCV_ISA_EXT_MAX));
|
||||
}
|
||||
|
||||
static void __init match_isa_ext(const char *name, const char *name_end, unsigned long *bitmap)
|
||||
|
||||
@@ -322,8 +322,8 @@ void __init setup_arch(char **cmdline_p)
|
||||
|
||||
riscv_init_cbo_blocksizes();
|
||||
riscv_fill_hwcap();
|
||||
init_rt_signal_env();
|
||||
apply_boot_alternatives();
|
||||
init_rt_signal_env();
|
||||
|
||||
if (IS_ENABLED(CONFIG_RISCV_ISA_ZICBOM) &&
|
||||
riscv_isa_extension_available(NULL, ZICBOM))
|
||||
|
||||
@@ -215,12 +215,6 @@ static size_t get_rt_frame_size(bool cal_all)
|
||||
if (cal_all || riscv_v_vstate_query(task_pt_regs(current)))
|
||||
total_context_size += riscv_v_sc_size;
|
||||
}
|
||||
/*
|
||||
* Preserved a __riscv_ctx_hdr for END signal context header if an
|
||||
* extension uses __riscv_extra_ext_header
|
||||
*/
|
||||
if (total_context_size)
|
||||
total_context_size += sizeof(struct __riscv_ctx_hdr);
|
||||
|
||||
frame_size += total_context_size;
|
||||
|
||||
|
||||
@@ -974,7 +974,6 @@ int kvm_riscv_vcpu_aia_imsic_inject(struct kvm_vcpu *vcpu,
|
||||
|
||||
if (imsic->vsfile_cpu >= 0) {
|
||||
writel(iid, imsic->vsfile_va + IMSIC_MMIO_SETIPNUM_LE);
|
||||
kvm_vcpu_kick(vcpu);
|
||||
} else {
|
||||
eix = &imsic->swfile->eix[iid / BITS_PER_TYPE(u64)];
|
||||
set_bit(iid & (BITS_PER_TYPE(u64) - 1), eix->eip);
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
#include <linux/errno.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/wordpart.h>
|
||||
#include <asm/sbi.h>
|
||||
#include <asm/kvm_vcpu_sbi.h>
|
||||
|
||||
@@ -79,12 +80,12 @@ static int kvm_sbi_hsm_vcpu_get_status(struct kvm_vcpu *vcpu)
|
||||
target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, target_vcpuid);
|
||||
if (!target_vcpu)
|
||||
return SBI_ERR_INVALID_PARAM;
|
||||
if (!kvm_riscv_vcpu_stopped(target_vcpu))
|
||||
return SBI_HSM_STATE_STARTED;
|
||||
else if (vcpu->stat.generic.blocking)
|
||||
if (kvm_riscv_vcpu_stopped(target_vcpu))
|
||||
return SBI_HSM_STATE_STOPPED;
|
||||
else if (target_vcpu->stat.generic.blocking)
|
||||
return SBI_HSM_STATE_SUSPENDED;
|
||||
else
|
||||
return SBI_HSM_STATE_STOPPED;
|
||||
return SBI_HSM_STATE_STARTED;
|
||||
}
|
||||
|
||||
static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
@@ -109,7 +110,7 @@ static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
}
|
||||
return 0;
|
||||
case SBI_EXT_HSM_HART_SUSPEND:
|
||||
switch (cp->a0) {
|
||||
switch (lower_32_bits(cp->a0)) {
|
||||
case SBI_HSM_SUSPEND_RET_DEFAULT:
|
||||
kvm_riscv_vcpu_wfi(vcpu);
|
||||
break;
|
||||
|
||||
@@ -21,7 +21,7 @@ static int kvm_sbi_ext_time_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
u64 next_cycle;
|
||||
|
||||
if (cp->a6 != SBI_EXT_TIME_SET_TIMER) {
|
||||
retdata->err_val = SBI_ERR_INVALID_PARAM;
|
||||
retdata->err_val = SBI_ERR_NOT_SUPPORTED;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -51,9 +51,10 @@ static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
|
||||
unsigned long hmask = cp->a0;
|
||||
unsigned long hbase = cp->a1;
|
||||
unsigned long hart_bit = 0, sentmask = 0;
|
||||
|
||||
if (cp->a6 != SBI_EXT_IPI_SEND_IPI) {
|
||||
retdata->err_val = SBI_ERR_INVALID_PARAM;
|
||||
retdata->err_val = SBI_ERR_NOT_SUPPORTED;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -62,15 +63,23 @@ static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
if (hbase != -1UL) {
|
||||
if (tmp->vcpu_id < hbase)
|
||||
continue;
|
||||
if (!(hmask & (1UL << (tmp->vcpu_id - hbase))))
|
||||
hart_bit = tmp->vcpu_id - hbase;
|
||||
if (hart_bit >= __riscv_xlen)
|
||||
goto done;
|
||||
if (!(hmask & (1UL << hart_bit)))
|
||||
continue;
|
||||
}
|
||||
ret = kvm_riscv_vcpu_set_interrupt(tmp, IRQ_VS_SOFT);
|
||||
if (ret < 0)
|
||||
break;
|
||||
sentmask |= 1UL << hart_bit;
|
||||
kvm_riscv_vcpu_pmu_incr_fw(tmp, SBI_PMU_FW_IPI_RCVD);
|
||||
}
|
||||
|
||||
done:
|
||||
if (hbase != -1UL && (hmask ^ sentmask))
|
||||
retdata->err_val = SBI_ERR_INVALID_PARAM;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/wordpart.h>
|
||||
|
||||
#include <asm/kvm_vcpu_sbi.h>
|
||||
#include <asm/sbi.h>
|
||||
@@ -19,7 +20,7 @@ static int kvm_sbi_ext_susp_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
|
||||
switch (funcid) {
|
||||
case SBI_EXT_SUSP_SYSTEM_SUSPEND:
|
||||
if (cp->a0 != SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM) {
|
||||
if (lower_32_bits(cp->a0) != SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM) {
|
||||
retdata->err_val = SBI_ERR_INVALID_PARAM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -293,7 +293,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
||||
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr,
|
||||
pte_t *ptep)
|
||||
pte_t *ptep, unsigned long sz)
|
||||
{
|
||||
pte_t orig_pte = ptep_get(ptep);
|
||||
int pte_num;
|
||||
|
||||
@@ -25,8 +25,16 @@ void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
#define __HAVE_ARCH_HUGE_PTEP_GET
|
||||
pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
|
||||
|
||||
pte_t __huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep);
|
||||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
|
||||
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep,
|
||||
unsigned long sz)
|
||||
{
|
||||
return __huge_ptep_get_and_clear(mm, addr, ptep);
|
||||
}
|
||||
|
||||
static inline void arch_clear_hugetlb_flags(struct folio *folio)
|
||||
{
|
||||
@@ -48,7 +56,7 @@ static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
|
||||
static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
|
||||
unsigned long address, pte_t *ptep)
|
||||
{
|
||||
return huge_ptep_get_and_clear(vma->vm_mm, address, ptep);
|
||||
return __huge_ptep_get_and_clear(vma->vm_mm, address, ptep);
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
|
||||
@@ -59,7 +67,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
||||
int changed = !pte_same(huge_ptep_get(vma->vm_mm, addr, ptep), pte);
|
||||
|
||||
if (changed) {
|
||||
huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
|
||||
__huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
|
||||
__set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
|
||||
}
|
||||
return changed;
|
||||
@@ -69,7 +77,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
||||
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
pte_t pte = huge_ptep_get_and_clear(mm, addr, ptep);
|
||||
pte_t pte = __huge_ptep_get_and_clear(mm, addr, ptep);
|
||||
|
||||
__set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte));
|
||||
}
|
||||
|
||||
@@ -188,8 +188,8 @@ pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
||||
return __rste_to_pte(pte_val(*ptep));
|
||||
}
|
||||
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
pte_t __huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
pte_t pte = huge_ptep_get(mm, addr, ptep);
|
||||
pmd_t *pmdp = (pmd_t *) ptep;
|
||||
|
||||
@@ -20,7 +20,7 @@ void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep);
|
||||
pte_t *ptep, unsigned long sz);
|
||||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
|
||||
static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
|
||||
|
||||
@@ -260,7 +260,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
}
|
||||
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep)
|
||||
pte_t *ptep, unsigned long sz)
|
||||
{
|
||||
unsigned int i, nptes, orig_shift, shift;
|
||||
unsigned long size;
|
||||
|
||||
@@ -190,6 +190,7 @@ static __always_inline bool int80_is_external(void)
|
||||
|
||||
/**
|
||||
* do_int80_emulation - 32-bit legacy syscall C entry from asm
|
||||
* @regs: syscall arguments in struct pt_args on the stack.
|
||||
*
|
||||
* This entry point can be used by 32-bit and 64-bit programs to perform
|
||||
* 32-bit system calls. Instances of INT $0x80 can be found inline in
|
||||
|
||||
@@ -628,7 +628,7 @@ int x86_pmu_hw_config(struct perf_event *event)
|
||||
if (event->attr.type == event->pmu->type)
|
||||
event->hw.config |= x86_pmu_get_event_config(event);
|
||||
|
||||
if (event->attr.sample_period && x86_pmu.limit_period) {
|
||||
if (!event->attr.freq && x86_pmu.limit_period) {
|
||||
s64 left = event->attr.sample_period;
|
||||
x86_pmu.limit_period(event, &left);
|
||||
if (left > event->attr.sample_period)
|
||||
|
||||
@@ -3952,6 +3952,85 @@ static inline bool intel_pmu_has_cap(struct perf_event *event, int idx)
|
||||
return test_bit(idx, (unsigned long *)&intel_cap->capabilities);
|
||||
}
|
||||
|
||||
static u64 intel_pmu_freq_start_period(struct perf_event *event)
|
||||
{
|
||||
int type = event->attr.type;
|
||||
u64 config, factor;
|
||||
s64 start;
|
||||
|
||||
/*
|
||||
* The 127 is the lowest possible recommended SAV (sample after value)
|
||||
* for a 4000 freq (default freq), according to the event list JSON file.
|
||||
* Also, assume the workload is idle 50% time.
|
||||
*/
|
||||
factor = 64 * 4000;
|
||||
if (type != PERF_TYPE_HARDWARE && type != PERF_TYPE_HW_CACHE)
|
||||
goto end;
|
||||
|
||||
/*
|
||||
* The estimation of the start period in the freq mode is
|
||||
* based on the below assumption.
|
||||
*
|
||||
* For a cycles or an instructions event, 1GHZ of the
|
||||
* underlying platform, 1 IPC. The workload is idle 50% time.
|
||||
* The start period = 1,000,000,000 * 1 / freq / 2.
|
||||
* = 500,000,000 / freq
|
||||
*
|
||||
* Usually, the branch-related events occur less than the
|
||||
* instructions event. According to the Intel event list JSON
|
||||
* file, the SAV (sample after value) of a branch-related event
|
||||
* is usually 1/4 of an instruction event.
|
||||
* The start period of branch-related events = 125,000,000 / freq.
|
||||
*
|
||||
* The cache-related events occurs even less. The SAV is usually
|
||||
* 1/20 of an instruction event.
|
||||
* The start period of cache-related events = 25,000,000 / freq.
|
||||
*/
|
||||
config = event->attr.config & PERF_HW_EVENT_MASK;
|
||||
if (type == PERF_TYPE_HARDWARE) {
|
||||
switch (config) {
|
||||
case PERF_COUNT_HW_CPU_CYCLES:
|
||||
case PERF_COUNT_HW_INSTRUCTIONS:
|
||||
case PERF_COUNT_HW_BUS_CYCLES:
|
||||
case PERF_COUNT_HW_STALLED_CYCLES_FRONTEND:
|
||||
case PERF_COUNT_HW_STALLED_CYCLES_BACKEND:
|
||||
case PERF_COUNT_HW_REF_CPU_CYCLES:
|
||||
factor = 500000000;
|
||||
break;
|
||||
case PERF_COUNT_HW_BRANCH_INSTRUCTIONS:
|
||||
case PERF_COUNT_HW_BRANCH_MISSES:
|
||||
factor = 125000000;
|
||||
break;
|
||||
case PERF_COUNT_HW_CACHE_REFERENCES:
|
||||
case PERF_COUNT_HW_CACHE_MISSES:
|
||||
factor = 25000000;
|
||||
break;
|
||||
default:
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
|
||||
if (type == PERF_TYPE_HW_CACHE)
|
||||
factor = 25000000;
|
||||
end:
|
||||
/*
|
||||
* Usually, a prime or a number with less factors (close to prime)
|
||||
* is chosen as an SAV, which makes it less likely that the sampling
|
||||
* period synchronizes with some periodic event in the workload.
|
||||
* Minus 1 to make it at least avoiding values near power of twos
|
||||
* for the default freq.
|
||||
*/
|
||||
start = DIV_ROUND_UP_ULL(factor, event->attr.sample_freq) - 1;
|
||||
|
||||
if (start > x86_pmu.max_period)
|
||||
start = x86_pmu.max_period;
|
||||
|
||||
if (x86_pmu.limit_period)
|
||||
x86_pmu.limit_period(event, &start);
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
static int intel_pmu_hw_config(struct perf_event *event)
|
||||
{
|
||||
int ret = x86_pmu_hw_config(event);
|
||||
@@ -3963,6 +4042,12 @@ static int intel_pmu_hw_config(struct perf_event *event)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (event->attr.freq && event->attr.sample_freq) {
|
||||
event->hw.sample_period = intel_pmu_freq_start_period(event);
|
||||
event->hw.last_period = event->hw.sample_period;
|
||||
local64_set(&event->hw.period_left, event->hw.sample_period);
|
||||
}
|
||||
|
||||
if (event->attr.precise_ip) {
|
||||
if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT)
|
||||
return -EINVAL;
|
||||
|
||||
@@ -879,6 +879,7 @@ static const struct x86_cpu_id rapl_model_match[] __initconst = {
|
||||
X86_MATCH_VFM(INTEL_METEORLAKE_L, &model_skl),
|
||||
X86_MATCH_VFM(INTEL_ARROWLAKE_H, &model_skl),
|
||||
X86_MATCH_VFM(INTEL_ARROWLAKE, &model_skl),
|
||||
X86_MATCH_VFM(INTEL_ARROWLAKE_U, &model_skl),
|
||||
X86_MATCH_VFM(INTEL_LUNARLAKE_M, &model_skl),
|
||||
{},
|
||||
};
|
||||
|
||||
@@ -153,8 +153,8 @@ static void geode_configure(void)
|
||||
u8 ccr3;
|
||||
local_irq_save(flags);
|
||||
|
||||
/* Suspend on halt power saving and enable #SUSP pin */
|
||||
setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);
|
||||
/* Suspend on halt power saving */
|
||||
setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x08);
|
||||
|
||||
ccr3 = getCx86(CX86_CCR3);
|
||||
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
/*
|
||||
* Architecture specific OF callbacks.
|
||||
*/
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/interrupt.h>
|
||||
@@ -313,6 +314,6 @@ void __init x86_flattree_get_config(void)
|
||||
if (initial_dtb)
|
||||
early_memunmap(dt, map_len);
|
||||
#endif
|
||||
if (of_have_populated_dt())
|
||||
if (acpi_disabled && of_have_populated_dt())
|
||||
x86_init.mpparse.parse_smp_cfg = x86_dtb_parse_smp_config;
|
||||
}
|
||||
|
||||
@@ -25,8 +25,10 @@
|
||||
#include <asm/posted_intr.h>
|
||||
#include <asm/irq_remapping.h>
|
||||
|
||||
#if defined(CONFIG_X86_LOCAL_APIC) || defined(CONFIG_X86_THERMAL_VECTOR)
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <asm/trace/irq_vectors.h>
|
||||
#endif
|
||||
|
||||
DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
|
||||
EXPORT_PER_CPU_SYMBOL(irq_stat);
|
||||
|
||||
@@ -7460,7 +7460,7 @@ static bool kvm_nx_huge_page_recovery_worker(void *data)
|
||||
return true;
|
||||
}
|
||||
|
||||
static void kvm_mmu_start_lpage_recovery(struct once *once)
|
||||
static int kvm_mmu_start_lpage_recovery(struct once *once)
|
||||
{
|
||||
struct kvm_arch *ka = container_of(once, struct kvm_arch, nx_once);
|
||||
struct kvm *kvm = container_of(ka, struct kvm, arch);
|
||||
@@ -7471,13 +7471,14 @@ static void kvm_mmu_start_lpage_recovery(struct once *once)
|
||||
kvm_nx_huge_page_recovery_worker_kill,
|
||||
kvm, "kvm-nx-lpage-recovery");
|
||||
|
||||
if (!nx_thread)
|
||||
return;
|
||||
if (IS_ERR(nx_thread))
|
||||
return PTR_ERR(nx_thread);
|
||||
|
||||
vhost_task_start(nx_thread);
|
||||
|
||||
/* Make the task visible only once it is fully started. */
|
||||
WRITE_ONCE(kvm->arch.nx_huge_page_recovery_thread, nx_thread);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_mmu_post_init_vm(struct kvm *kvm)
|
||||
@@ -7485,10 +7486,7 @@ int kvm_mmu_post_init_vm(struct kvm *kvm)
|
||||
if (nx_hugepage_mitigation_hard_disabled)
|
||||
return 0;
|
||||
|
||||
call_once(&kvm->arch.nx_once, kvm_mmu_start_lpage_recovery);
|
||||
if (!kvm->arch.nx_huge_page_recovery_thread)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
return call_once(&kvm->arch.nx_once, kvm_mmu_start_lpage_recovery);
|
||||
}
|
||||
|
||||
void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
|
||||
|
||||
@@ -5084,6 +5084,17 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
|
||||
|
||||
load_vmcs12_host_state(vcpu, vmcs12);
|
||||
|
||||
/*
|
||||
* Process events if an injectable IRQ or NMI is pending, even
|
||||
* if the event is blocked (RFLAGS.IF is cleared on VM-Exit).
|
||||
* If an event became pending while L2 was active, KVM needs to
|
||||
* either inject the event or request an IRQ/NMI window. SMIs
|
||||
* don't need to be processed as SMM is mutually exclusive with
|
||||
* non-root mode. INIT/SIPI don't need to be checked as INIT
|
||||
* is blocked post-VMXON, and SIPIs are ignored.
|
||||
*/
|
||||
if (kvm_cpu_has_injectable_intr(vcpu) || vcpu->arch.nmi_pending)
|
||||
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -12877,11 +12877,11 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
}
|
||||
kvm_unload_vcpu_mmus(kvm);
|
||||
kvm_destroy_vcpus(kvm);
|
||||
kvm_x86_call(vm_destroy)(kvm);
|
||||
kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1));
|
||||
kvm_pic_destroy(kvm);
|
||||
kvm_ioapic_destroy(kvm);
|
||||
kvm_destroy_vcpus(kvm);
|
||||
kvfree(rcu_dereference_check(kvm->arch.apic_map, 1));
|
||||
kfree(srcu_dereference_check(kvm->arch.pmu_event_filter, &kvm->srcu, 1));
|
||||
kvm_mmu_uninit_vm(kvm);
|
||||
|
||||
@@ -77,7 +77,7 @@ struct bio_slab {
|
||||
struct kmem_cache *slab;
|
||||
unsigned int slab_ref;
|
||||
unsigned int slab_size;
|
||||
char name[8];
|
||||
char name[12];
|
||||
};
|
||||
static DEFINE_MUTEX(bio_slab_lock);
|
||||
static DEFINE_XARRAY(bio_slabs);
|
||||
|
||||
@@ -329,7 +329,7 @@ int bio_split_rw_at(struct bio *bio, const struct queue_limits *lim,
|
||||
|
||||
if (nsegs < lim->max_segments &&
|
||||
bytes + bv.bv_len <= max_bytes &&
|
||||
bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
|
||||
bv.bv_offset + bv.bv_len <= lim->min_segment_size) {
|
||||
nsegs++;
|
||||
bytes += bv.bv_len;
|
||||
} else {
|
||||
|
||||
@@ -246,6 +246,7 @@ int blk_validate_limits(struct queue_limits *lim)
|
||||
{
|
||||
unsigned int max_hw_sectors;
|
||||
unsigned int logical_block_sectors;
|
||||
unsigned long seg_size;
|
||||
int err;
|
||||
|
||||
/*
|
||||
@@ -303,7 +304,7 @@ int blk_validate_limits(struct queue_limits *lim)
|
||||
max_hw_sectors = min_not_zero(lim->max_hw_sectors,
|
||||
lim->max_dev_sectors);
|
||||
if (lim->max_user_sectors) {
|
||||
if (lim->max_user_sectors < PAGE_SIZE / SECTOR_SIZE)
|
||||
if (lim->max_user_sectors < BLK_MIN_SEGMENT_SIZE / SECTOR_SIZE)
|
||||
return -EINVAL;
|
||||
lim->max_sectors = min(max_hw_sectors, lim->max_user_sectors);
|
||||
} else if (lim->io_opt > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
|
||||
@@ -341,7 +342,7 @@ int blk_validate_limits(struct queue_limits *lim)
|
||||
*/
|
||||
if (!lim->seg_boundary_mask)
|
||||
lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
|
||||
if (WARN_ON_ONCE(lim->seg_boundary_mask < PAGE_SIZE - 1))
|
||||
if (WARN_ON_ONCE(lim->seg_boundary_mask < BLK_MIN_SEGMENT_SIZE - 1))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
@@ -362,10 +363,17 @@ int blk_validate_limits(struct queue_limits *lim)
|
||||
*/
|
||||
if (!lim->max_segment_size)
|
||||
lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
|
||||
if (WARN_ON_ONCE(lim->max_segment_size < PAGE_SIZE))
|
||||
if (WARN_ON_ONCE(lim->max_segment_size < BLK_MIN_SEGMENT_SIZE))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* setup min segment size for building new segment in fast path */
|
||||
if (lim->seg_boundary_mask > lim->max_segment_size - 1)
|
||||
seg_size = lim->max_segment_size;
|
||||
else
|
||||
seg_size = lim->seg_boundary_mask + 1;
|
||||
lim->min_segment_size = min_t(unsigned int, seg_size, PAGE_SIZE);
|
||||
|
||||
/*
|
||||
* We require drivers to at least do logical block aligned I/O, but
|
||||
* historically could not check for that due to the separate calls
|
||||
|
||||
@@ -410,13 +410,14 @@ static bool disk_insert_zone_wplug(struct gendisk *disk,
|
||||
}
|
||||
}
|
||||
hlist_add_head_rcu(&zwplug->node, &disk->zone_wplugs_hash[idx]);
|
||||
atomic_inc(&disk->nr_zone_wplugs);
|
||||
spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static struct blk_zone_wplug *disk_get_zone_wplug(struct gendisk *disk,
|
||||
sector_t sector)
|
||||
static struct blk_zone_wplug *disk_get_hashed_zone_wplug(struct gendisk *disk,
|
||||
sector_t sector)
|
||||
{
|
||||
unsigned int zno = disk_zone_no(disk, sector);
|
||||
unsigned int idx = hash_32(zno, disk->zone_wplugs_hash_bits);
|
||||
@@ -437,6 +438,15 @@ static struct blk_zone_wplug *disk_get_zone_wplug(struct gendisk *disk,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct blk_zone_wplug *disk_get_zone_wplug(struct gendisk *disk,
|
||||
sector_t sector)
|
||||
{
|
||||
if (!atomic_read(&disk->nr_zone_wplugs))
|
||||
return NULL;
|
||||
|
||||
return disk_get_hashed_zone_wplug(disk, sector);
|
||||
}
|
||||
|
||||
static void disk_free_zone_wplug_rcu(struct rcu_head *rcu_head)
|
||||
{
|
||||
struct blk_zone_wplug *zwplug =
|
||||
@@ -503,6 +513,7 @@ static void disk_remove_zone_wplug(struct gendisk *disk,
|
||||
zwplug->flags |= BLK_ZONE_WPLUG_UNHASHED;
|
||||
spin_lock_irqsave(&disk->zone_wplugs_lock, flags);
|
||||
hlist_del_init_rcu(&zwplug->node);
|
||||
atomic_dec(&disk->nr_zone_wplugs);
|
||||
spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
|
||||
disk_put_zone_wplug(zwplug);
|
||||
}
|
||||
@@ -593,6 +604,11 @@ static void disk_zone_wplug_abort(struct blk_zone_wplug *zwplug)
|
||||
{
|
||||
struct bio *bio;
|
||||
|
||||
if (bio_list_empty(&zwplug->bio_list))
|
||||
return;
|
||||
|
||||
pr_warn_ratelimited("%s: zone %u: Aborting plugged BIOs\n",
|
||||
zwplug->disk->disk_name, zwplug->zone_no);
|
||||
while ((bio = bio_list_pop(&zwplug->bio_list)))
|
||||
blk_zone_wplug_bio_io_error(zwplug, bio);
|
||||
}
|
||||
@@ -1040,6 +1056,47 @@ plug:
|
||||
return true;
|
||||
}
|
||||
|
||||
static void blk_zone_wplug_handle_native_zone_append(struct bio *bio)
|
||||
{
|
||||
struct gendisk *disk = bio->bi_bdev->bd_disk;
|
||||
struct blk_zone_wplug *zwplug;
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* We have native support for zone append operations, so we are not
|
||||
* going to handle @bio through plugging. However, we may already have a
|
||||
* zone write plug for the target zone if that zone was previously
|
||||
* partially written using regular writes. In such case, we risk leaving
|
||||
* the plug in the disk hash table if the zone is fully written using
|
||||
* zone append operations. Avoid this by removing the zone write plug.
|
||||
*/
|
||||
zwplug = disk_get_zone_wplug(disk, bio->bi_iter.bi_sector);
|
||||
if (likely(!zwplug))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&zwplug->lock, flags);
|
||||
|
||||
/*
|
||||
* We are about to remove the zone write plug. But if the user
|
||||
* (mistakenly) has issued regular writes together with native zone
|
||||
* append, we must aborts the writes as otherwise the plugged BIOs would
|
||||
* not be executed by the plug BIO work as disk_get_zone_wplug() will
|
||||
* return NULL after the plug is removed. Aborting the plugged write
|
||||
* BIOs is consistent with the fact that these writes will most likely
|
||||
* fail anyway as there is no ordering guarantees between zone append
|
||||
* operations and regular write operations.
|
||||
*/
|
||||
if (!bio_list_empty(&zwplug->bio_list)) {
|
||||
pr_warn_ratelimited("%s: zone %u: Invalid mix of zone append and regular writes\n",
|
||||
disk->disk_name, zwplug->zone_no);
|
||||
disk_zone_wplug_abort(zwplug);
|
||||
}
|
||||
disk_remove_zone_wplug(disk, zwplug);
|
||||
spin_unlock_irqrestore(&zwplug->lock, flags);
|
||||
|
||||
disk_put_zone_wplug(zwplug);
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_zone_plug_bio - Handle a zone write BIO with zone write plugging
|
||||
* @bio: The BIO being submitted
|
||||
@@ -1096,8 +1153,10 @@ bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
|
||||
*/
|
||||
switch (bio_op(bio)) {
|
||||
case REQ_OP_ZONE_APPEND:
|
||||
if (!bdev_emulates_zone_append(bdev))
|
||||
if (!bdev_emulates_zone_append(bdev)) {
|
||||
blk_zone_wplug_handle_native_zone_append(bio);
|
||||
return false;
|
||||
}
|
||||
fallthrough;
|
||||
case REQ_OP_WRITE:
|
||||
case REQ_OP_WRITE_ZEROES:
|
||||
@@ -1284,6 +1343,7 @@ static int disk_alloc_zone_resources(struct gendisk *disk,
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
atomic_set(&disk->nr_zone_wplugs, 0);
|
||||
disk->zone_wplugs_hash_bits =
|
||||
min(ilog2(pool_size) + 1, BLK_ZONE_WPLUG_MAX_HASH_BITS);
|
||||
|
||||
@@ -1338,6 +1398,7 @@ static void disk_destroy_zone_wplugs_hash_table(struct gendisk *disk)
|
||||
}
|
||||
}
|
||||
|
||||
WARN_ON_ONCE(atomic_read(&disk->nr_zone_wplugs));
|
||||
kfree(disk->zone_wplugs_hash);
|
||||
disk->zone_wplugs_hash = NULL;
|
||||
disk->zone_wplugs_hash_bits = 0;
|
||||
@@ -1550,11 +1611,12 @@ static int blk_revalidate_seq_zone(struct blk_zone *zone, unsigned int idx,
|
||||
}
|
||||
|
||||
/*
|
||||
* We need to track the write pointer of all zones that are not
|
||||
* empty nor full. So make sure we have a zone write plug for
|
||||
* such zone if the device has a zone write plug hash table.
|
||||
* If the device needs zone append emulation, we need to track the
|
||||
* write pointer of all zones that are not empty nor full. So make sure
|
||||
* we have a zone write plug for such zone if the device has a zone
|
||||
* write plug hash table.
|
||||
*/
|
||||
if (!disk->zone_wplugs_hash)
|
||||
if (!queue_emulates_zone_append(disk->queue) || !disk->zone_wplugs_hash)
|
||||
return 0;
|
||||
|
||||
disk_zone_wplug_sync_wp_offset(disk, zone);
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
struct elevator_type;
|
||||
|
||||
#define BLK_DEV_MAX_SECTORS (LLONG_MAX >> 9)
|
||||
#define BLK_MIN_SEGMENT_SIZE 4096
|
||||
|
||||
/* Max future timer expiry for timeouts */
|
||||
#define BLK_MAX_TIMEOUT (5 * HZ)
|
||||
@@ -358,8 +359,12 @@ struct bio *bio_split_zone_append(struct bio *bio,
|
||||
static inline bool bio_may_need_split(struct bio *bio,
|
||||
const struct queue_limits *lim)
|
||||
{
|
||||
return lim->chunk_sectors || bio->bi_vcnt != 1 ||
|
||||
bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE;
|
||||
if (lim->chunk_sectors)
|
||||
return true;
|
||||
if (bio->bi_vcnt != 1)
|
||||
return true;
|
||||
return bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset >
|
||||
lim->min_segment_size;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -386,8 +386,12 @@ struct ahci_host_priv {
|
||||
static inline bool ahci_ignore_port(struct ahci_host_priv *hpriv,
|
||||
unsigned int portid)
|
||||
{
|
||||
return portid >= hpriv->nports ||
|
||||
!(hpriv->mask_port_map & (1 << portid));
|
||||
if (portid >= hpriv->nports)
|
||||
return true;
|
||||
/* mask_port_map not set means that all ports are available */
|
||||
if (!hpriv->mask_port_map)
|
||||
return false;
|
||||
return !(hpriv->mask_port_map & (1 << portid));
|
||||
}
|
||||
|
||||
extern int ahci_ignore_sss;
|
||||
|
||||
@@ -541,6 +541,7 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
|
||||
hpriv->saved_port_map = port_map;
|
||||
}
|
||||
|
||||
/* mask_port_map not set means that all ports are available */
|
||||
if (hpriv->mask_port_map) {
|
||||
dev_warn(dev, "masking port_map 0x%lx -> 0x%lx\n",
|
||||
port_map,
|
||||
|
||||
@@ -4143,10 +4143,6 @@ static const struct ata_dev_quirks_entry __ata_dev_quirks[] = {
|
||||
{ "Samsung SSD 860*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
|
||||
ATA_QUIRK_ZERO_AFTER_TRIM |
|
||||
ATA_QUIRK_NO_NCQ_ON_ATI },
|
||||
{ "Samsung SSD 870 QVO*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
|
||||
ATA_QUIRK_ZERO_AFTER_TRIM |
|
||||
ATA_QUIRK_NO_NCQ_ON_ATI |
|
||||
ATA_QUIRK_NOLPM },
|
||||
{ "Samsung SSD 870*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
|
||||
ATA_QUIRK_ZERO_AFTER_TRIM |
|
||||
ATA_QUIRK_NO_NCQ_ON_ATI },
|
||||
|
||||
@@ -2102,7 +2102,8 @@ static int btusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
return submit_or_queue_tx_urb(hdev, urb);
|
||||
|
||||
case HCI_SCODATA_PKT:
|
||||
if (hci_conn_num(hdev, SCO_LINK) < 1)
|
||||
if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
|
||||
hci_conn_num(hdev, SCO_LINK) < 1)
|
||||
return -ENODEV;
|
||||
|
||||
urb = alloc_isoc_urb(hdev, skb);
|
||||
@@ -2576,7 +2577,8 @@ static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
return submit_or_queue_tx_urb(hdev, urb);
|
||||
|
||||
case HCI_SCODATA_PKT:
|
||||
if (hci_conn_num(hdev, SCO_LINK) < 1)
|
||||
if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
|
||||
hci_conn_num(hdev, SCO_LINK) < 1)
|
||||
return -ENODEV;
|
||||
|
||||
urb = alloc_isoc_urb(hdev, skb);
|
||||
|
||||
@@ -59,9 +59,6 @@ struct bam_desc_hw {
|
||||
#define DESC_FLAG_NWD BIT(12)
|
||||
#define DESC_FLAG_CMD BIT(11)
|
||||
|
||||
#define BAM_NDP_REVISION_START 0x20
|
||||
#define BAM_NDP_REVISION_END 0x27
|
||||
|
||||
struct bam_async_desc {
|
||||
struct virt_dma_desc vd;
|
||||
|
||||
@@ -401,7 +398,6 @@ struct bam_device {
|
||||
|
||||
/* dma start transaction tasklet */
|
||||
struct tasklet_struct task;
|
||||
u32 bam_revision;
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -445,10 +441,8 @@ static void bam_reset(struct bam_device *bdev)
|
||||
writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
|
||||
|
||||
/* set descriptor threshold, start with 4 bytes */
|
||||
if (in_range(bdev->bam_revision, BAM_NDP_REVISION_START,
|
||||
BAM_NDP_REVISION_END))
|
||||
writel_relaxed(DEFAULT_CNT_THRSHLD,
|
||||
bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
|
||||
writel_relaxed(DEFAULT_CNT_THRSHLD,
|
||||
bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
|
||||
|
||||
/* Enable default set of h/w workarounds, ie all except BAM_FULL_PIPE */
|
||||
writel_relaxed(BAM_CNFG_BITS_DEFAULT, bam_addr(bdev, 0, BAM_CNFG_BITS));
|
||||
@@ -1006,10 +1000,9 @@ static void bam_apply_new_config(struct bam_chan *bchan,
|
||||
maxburst = bchan->slave.src_maxburst;
|
||||
else
|
||||
maxburst = bchan->slave.dst_maxburst;
|
||||
if (in_range(bdev->bam_revision, BAM_NDP_REVISION_START,
|
||||
BAM_NDP_REVISION_END))
|
||||
writel_relaxed(maxburst,
|
||||
bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
|
||||
|
||||
writel_relaxed(maxburst,
|
||||
bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
|
||||
}
|
||||
|
||||
bchan->reconfigure = 0;
|
||||
@@ -1199,11 +1192,10 @@ static int bam_init(struct bam_device *bdev)
|
||||
u32 val;
|
||||
|
||||
/* read revision and configuration information */
|
||||
val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION));
|
||||
if (!bdev->num_ees)
|
||||
if (!bdev->num_ees) {
|
||||
val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION));
|
||||
bdev->num_ees = (val >> NUM_EES_SHIFT) & NUM_EES_MASK;
|
||||
|
||||
bdev->bam_revision = val & REVISION_MASK;
|
||||
}
|
||||
|
||||
/* check that configured EE is within range */
|
||||
if (bdev->ee >= bdev->num_ees)
|
||||
|
||||
@@ -83,7 +83,9 @@ struct tegra_adma;
|
||||
* @nr_channels: Number of DMA channels available.
|
||||
* @ch_fifo_size_mask: Mask for FIFO size field.
|
||||
* @sreq_index_offset: Slave channel index offset.
|
||||
* @max_page: Maximum ADMA Channel Page.
|
||||
* @has_outstanding_reqs: If DMA channel can have outstanding requests.
|
||||
* @set_global_pg_config: Global page programming.
|
||||
*/
|
||||
struct tegra_adma_chip_data {
|
||||
unsigned int (*adma_get_burst_config)(unsigned int burst_size);
|
||||
@@ -99,6 +101,7 @@ struct tegra_adma_chip_data {
|
||||
unsigned int nr_channels;
|
||||
unsigned int ch_fifo_size_mask;
|
||||
unsigned int sreq_index_offset;
|
||||
unsigned int max_page;
|
||||
bool has_outstanding_reqs;
|
||||
void (*set_global_pg_config)(struct tegra_adma *tdma);
|
||||
};
|
||||
@@ -854,6 +857,7 @@ static const struct tegra_adma_chip_data tegra210_chip_data = {
|
||||
.nr_channels = 22,
|
||||
.ch_fifo_size_mask = 0xf,
|
||||
.sreq_index_offset = 2,
|
||||
.max_page = 0,
|
||||
.has_outstanding_reqs = false,
|
||||
.set_global_pg_config = NULL,
|
||||
};
|
||||
@@ -871,6 +875,7 @@ static const struct tegra_adma_chip_data tegra186_chip_data = {
|
||||
.nr_channels = 32,
|
||||
.ch_fifo_size_mask = 0x1f,
|
||||
.sreq_index_offset = 4,
|
||||
.max_page = 4,
|
||||
.has_outstanding_reqs = true,
|
||||
.set_global_pg_config = tegra186_adma_global_page_config,
|
||||
};
|
||||
|
||||
@@ -1609,8 +1609,8 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
|
||||
goto out_fw;
|
||||
}
|
||||
|
||||
ret = regmap_raw_write_async(regmap, reg, buf->buf,
|
||||
le32_to_cpu(region->len));
|
||||
ret = regmap_raw_write(regmap, reg, buf->buf,
|
||||
le32_to_cpu(region->len));
|
||||
if (ret != 0) {
|
||||
cs_dsp_err(dsp,
|
||||
"%s.%d: Failed to write %d bytes at %d in %s: %d\n",
|
||||
@@ -1625,12 +1625,6 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
|
||||
regions++;
|
||||
}
|
||||
|
||||
ret = regmap_async_complete(regmap);
|
||||
if (ret != 0) {
|
||||
cs_dsp_err(dsp, "Failed to complete async write: %d\n", ret);
|
||||
goto out_fw;
|
||||
}
|
||||
|
||||
if (pos > firmware->size)
|
||||
cs_dsp_warn(dsp, "%s.%d: %zu bytes at end of file\n",
|
||||
file, regions, pos - firmware->size);
|
||||
@@ -1638,7 +1632,6 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
|
||||
cs_dsp_debugfs_save_wmfwname(dsp, file);
|
||||
|
||||
out_fw:
|
||||
regmap_async_complete(regmap);
|
||||
cs_dsp_buf_free(&buf_list);
|
||||
|
||||
if (ret == -EOVERFLOW)
|
||||
@@ -2326,8 +2319,8 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
|
||||
cs_dsp_dbg(dsp, "%s.%d: Writing %d bytes at %x\n",
|
||||
file, blocks, le32_to_cpu(blk->len),
|
||||
reg);
|
||||
ret = regmap_raw_write_async(regmap, reg, buf->buf,
|
||||
le32_to_cpu(blk->len));
|
||||
ret = regmap_raw_write(regmap, reg, buf->buf,
|
||||
le32_to_cpu(blk->len));
|
||||
if (ret != 0) {
|
||||
cs_dsp_err(dsp,
|
||||
"%s.%d: Failed to write to %x in %s: %d\n",
|
||||
@@ -2339,10 +2332,6 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
|
||||
blocks++;
|
||||
}
|
||||
|
||||
ret = regmap_async_complete(regmap);
|
||||
if (ret != 0)
|
||||
cs_dsp_err(dsp, "Failed to complete async write: %d\n", ret);
|
||||
|
||||
if (pos > firmware->size)
|
||||
cs_dsp_warn(dsp, "%s.%d: %zu bytes at end of file\n",
|
||||
file, blocks, pos - firmware->size);
|
||||
@@ -2350,7 +2339,6 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
|
||||
cs_dsp_debugfs_save_binname(dsp, file);
|
||||
|
||||
out_fw:
|
||||
regmap_async_complete(regmap);
|
||||
cs_dsp_buf_free(&buf_list);
|
||||
|
||||
if (ret == -EOVERFLOW)
|
||||
@@ -2561,8 +2549,8 @@ static int cs_dsp_adsp2_enable_core(struct cs_dsp *dsp)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = regmap_update_bits_async(dsp->regmap, dsp->base + ADSP2_CONTROL,
|
||||
ADSP2_SYS_ENA, ADSP2_SYS_ENA);
|
||||
ret = regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
|
||||
ADSP2_SYS_ENA, ADSP2_SYS_ENA);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
|
||||
@@ -311,7 +311,7 @@ void cper_print_proc_arm(const char *pfx,
|
||||
ctx_info = (struct cper_arm_ctx_info *)err_info;
|
||||
max_ctx_type = ARRAY_SIZE(arm_reg_ctx_strs) - 1;
|
||||
for (i = 0; i < proc->context_info_num; i++) {
|
||||
int size = sizeof(*ctx_info) + ctx_info->size;
|
||||
int size = ALIGN(sizeof(*ctx_info) + ctx_info->size, 16);
|
||||
|
||||
printk("%sContext info structure %d:\n", pfx, i);
|
||||
if (len < size) {
|
||||
|
||||
@@ -325,7 +325,7 @@ void cper_print_proc_ia(const char *pfx, const struct cper_sec_proc_ia *proc)
|
||||
|
||||
ctx_info = (struct cper_ia_proc_ctx *)err_info;
|
||||
for (i = 0; i < VALID_PROC_CXT_INFO_NUM(proc->validation_bits); i++) {
|
||||
int size = sizeof(*ctx_info) + ctx_info->reg_arr_size;
|
||||
int size = ALIGN(sizeof(*ctx_info) + ctx_info->reg_arr_size, 16);
|
||||
int groupsize = 4;
|
||||
|
||||
printk("%sContext Information Structure %d:\n", pfx, i);
|
||||
|
||||
@@ -99,14 +99,13 @@ static struct kobject *mokvar_kobj;
|
||||
*/
|
||||
void __init efi_mokvar_table_init(void)
|
||||
{
|
||||
struct efi_mokvar_table_entry __aligned(1) *mokvar_entry, *next_entry;
|
||||
efi_memory_desc_t md;
|
||||
void *va = NULL;
|
||||
unsigned long cur_offset = 0;
|
||||
unsigned long offset_limit;
|
||||
unsigned long map_size = 0;
|
||||
unsigned long map_size_needed = 0;
|
||||
unsigned long size;
|
||||
struct efi_mokvar_table_entry *mokvar_entry;
|
||||
int err;
|
||||
|
||||
if (!efi_enabled(EFI_MEMMAP))
|
||||
@@ -134,48 +133,46 @@ void __init efi_mokvar_table_init(void)
|
||||
*/
|
||||
err = -EINVAL;
|
||||
while (cur_offset + sizeof(*mokvar_entry) <= offset_limit) {
|
||||
mokvar_entry = va + cur_offset;
|
||||
map_size_needed = cur_offset + sizeof(*mokvar_entry);
|
||||
if (map_size_needed > map_size) {
|
||||
if (va)
|
||||
early_memunmap(va, map_size);
|
||||
/*
|
||||
* Map a little more than the fixed size entry
|
||||
* header, anticipating some data. It's safe to
|
||||
* do so as long as we stay within current memory
|
||||
* descriptor.
|
||||
*/
|
||||
map_size = min(map_size_needed + 2*EFI_PAGE_SIZE,
|
||||
offset_limit);
|
||||
va = early_memremap(efi.mokvar_table, map_size);
|
||||
if (!va) {
|
||||
pr_err("Failed to map EFI MOKvar config table pa=0x%lx, size=%lu.\n",
|
||||
efi.mokvar_table, map_size);
|
||||
return;
|
||||
}
|
||||
mokvar_entry = va + cur_offset;
|
||||
if (va)
|
||||
early_memunmap(va, sizeof(*mokvar_entry));
|
||||
va = early_memremap(efi.mokvar_table + cur_offset, sizeof(*mokvar_entry));
|
||||
if (!va) {
|
||||
pr_err("Failed to map EFI MOKvar config table pa=0x%lx, size=%zu.\n",
|
||||
efi.mokvar_table + cur_offset, sizeof(*mokvar_entry));
|
||||
return;
|
||||
}
|
||||
|
||||
mokvar_entry = va;
|
||||
next:
|
||||
/* Check for last sentinel entry */
|
||||
if (mokvar_entry->name[0] == '\0') {
|
||||
if (mokvar_entry->data_size != 0)
|
||||
break;
|
||||
err = 0;
|
||||
map_size_needed = cur_offset + sizeof(*mokvar_entry);
|
||||
break;
|
||||
}
|
||||
|
||||
/* Sanity check that the name is null terminated */
|
||||
size = strnlen(mokvar_entry->name,
|
||||
sizeof(mokvar_entry->name));
|
||||
if (size >= sizeof(mokvar_entry->name))
|
||||
break;
|
||||
/* Enforce that the name is NUL terminated */
|
||||
mokvar_entry->name[sizeof(mokvar_entry->name) - 1] = '\0';
|
||||
|
||||
/* Advance to the next entry */
|
||||
cur_offset = map_size_needed + mokvar_entry->data_size;
|
||||
size = sizeof(*mokvar_entry) + mokvar_entry->data_size;
|
||||
cur_offset += size;
|
||||
|
||||
/*
|
||||
* Don't bother remapping if the current entry header and the
|
||||
* next one end on the same page.
|
||||
*/
|
||||
next_entry = (void *)((unsigned long)mokvar_entry + size);
|
||||
if (((((unsigned long)(mokvar_entry + 1) - 1) ^
|
||||
((unsigned long)(next_entry + 1) - 1)) & PAGE_MASK) == 0) {
|
||||
mokvar_entry = next_entry;
|
||||
goto next;
|
||||
}
|
||||
}
|
||||
|
||||
if (va)
|
||||
early_memunmap(va, map_size);
|
||||
early_memunmap(va, sizeof(*mokvar_entry));
|
||||
if (err) {
|
||||
pr_err("EFI MOKvar config table is not valid\n");
|
||||
return;
|
||||
|
||||
@@ -2779,7 +2779,7 @@ EXPORT_SYMBOL_GPL(gpiod_direction_input);
|
||||
|
||||
int gpiod_direction_input_nonotify(struct gpio_desc *desc)
|
||||
{
|
||||
int ret = 0;
|
||||
int ret = 0, dir;
|
||||
|
||||
CLASS(gpio_chip_guard, guard)(desc);
|
||||
if (!guard.gc)
|
||||
@@ -2807,11 +2807,11 @@ int gpiod_direction_input_nonotify(struct gpio_desc *desc)
|
||||
ret = gpiochip_direction_input(guard.gc,
|
||||
gpio_chip_hwgpio(desc));
|
||||
} else if (guard.gc->get_direction) {
|
||||
ret = gpiochip_get_direction(guard.gc, gpio_chip_hwgpio(desc));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
dir = gpiochip_get_direction(guard.gc, gpio_chip_hwgpio(desc));
|
||||
if (dir < 0)
|
||||
return dir;
|
||||
|
||||
if (ret != GPIO_LINE_DIRECTION_IN) {
|
||||
if (dir != GPIO_LINE_DIRECTION_IN) {
|
||||
gpiod_warn(desc,
|
||||
"%s: missing direction_input() operation and line is output\n",
|
||||
__func__);
|
||||
@@ -2851,7 +2851,7 @@ static int gpiochip_set(struct gpio_chip *gc, unsigned int offset, int value)
|
||||
|
||||
static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value)
|
||||
{
|
||||
int val = !!value, ret = 0;
|
||||
int val = !!value, ret = 0, dir;
|
||||
|
||||
CLASS(gpio_chip_guard, guard)(desc);
|
||||
if (!guard.gc)
|
||||
@@ -2875,12 +2875,12 @@ static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value)
|
||||
} else {
|
||||
/* Check that we are in output mode if we can */
|
||||
if (guard.gc->get_direction) {
|
||||
ret = gpiochip_get_direction(guard.gc,
|
||||
dir = gpiochip_get_direction(guard.gc,
|
||||
gpio_chip_hwgpio(desc));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (dir < 0)
|
||||
return dir;
|
||||
|
||||
if (ret != GPIO_LINE_DIRECTION_OUT) {
|
||||
if (dir != GPIO_LINE_DIRECTION_OUT) {
|
||||
gpiod_warn(desc,
|
||||
"%s: missing direction_output() operation\n",
|
||||
__func__);
|
||||
|
||||
@@ -1638,6 +1638,13 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return 0;
|
||||
|
||||
/* resizing on Dell G5 SE platforms causes problems with runtime pm */
|
||||
if ((amdgpu_runtime_pm != 0) &&
|
||||
adev->pdev->vendor == PCI_VENDOR_ID_ATI &&
|
||||
adev->pdev->device == 0x731f &&
|
||||
adev->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
|
||||
return 0;
|
||||
|
||||
/* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */
|
||||
if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR))
|
||||
DRM_WARN("System can't access extended configuration space, please check!!\n");
|
||||
|
||||
@@ -1638,22 +1638,19 @@ static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev,
|
||||
}
|
||||
|
||||
mutex_lock(&adev->enforce_isolation_mutex);
|
||||
|
||||
for (i = 0; i < num_partitions; i++) {
|
||||
if (adev->enforce_isolation[i] && !partition_values[i]) {
|
||||
if (adev->enforce_isolation[i] && !partition_values[i])
|
||||
/* Going from enabled to disabled */
|
||||
amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(i));
|
||||
amdgpu_mes_set_enforce_isolation(adev, i, false);
|
||||
} else if (!adev->enforce_isolation[i] && partition_values[i]) {
|
||||
else if (!adev->enforce_isolation[i] && partition_values[i])
|
||||
/* Going from disabled to enabled */
|
||||
amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i));
|
||||
amdgpu_mes_set_enforce_isolation(adev, i, true);
|
||||
}
|
||||
adev->enforce_isolation[i] = partition_values[i];
|
||||
}
|
||||
|
||||
mutex_unlock(&adev->enforce_isolation_mutex);
|
||||
|
||||
amdgpu_mes_update_enforce_isolation(adev);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
|
||||
@@ -1681,7 +1681,8 @@ bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
/* Fix me -- node_id is used to identify the correct MES instances in the future */
|
||||
int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev, uint32_t node_id, bool enable)
|
||||
static int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev,
|
||||
uint32_t node_id, bool enable)
|
||||
{
|
||||
struct mes_misc_op_input op_input = {0};
|
||||
int r;
|
||||
@@ -1703,6 +1704,23 @@ error:
|
||||
return r;
|
||||
}
|
||||
|
||||
int amdgpu_mes_update_enforce_isolation(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, r = 0;
|
||||
|
||||
if (adev->enable_mes && adev->gfx.enable_cleaner_shader) {
|
||||
mutex_lock(&adev->enforce_isolation_mutex);
|
||||
for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
|
||||
if (adev->enforce_isolation[i])
|
||||
r |= amdgpu_mes_set_enforce_isolation(adev, i, true);
|
||||
else
|
||||
r |= amdgpu_mes_set_enforce_isolation(adev, i, false);
|
||||
}
|
||||
mutex_unlock(&adev->enforce_isolation_mutex);
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
|
||||
static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
|
||||
|
||||
@@ -534,6 +534,6 @@ static inline void amdgpu_mes_unlock(struct amdgpu_mes *mes)
|
||||
|
||||
bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev);
|
||||
|
||||
int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev, uint32_t node_id, bool enable);
|
||||
int amdgpu_mes_update_enforce_isolation(struct amdgpu_device *adev);
|
||||
|
||||
#endif /* __AMDGPU_MES_H__ */
|
||||
|
||||
@@ -2281,7 +2281,7 @@ int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
|
||||
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
|
||||
struct amdgpu_res_cursor cursor;
|
||||
u64 addr;
|
||||
int r;
|
||||
int r = 0;
|
||||
|
||||
if (!adev->mman.buffer_funcs_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
@@ -1633,6 +1633,10 @@ static int mes_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
goto failure;
|
||||
}
|
||||
|
||||
r = amdgpu_mes_update_enforce_isolation(adev);
|
||||
if (r)
|
||||
goto failure;
|
||||
|
||||
out:
|
||||
/*
|
||||
* Disable KIQ ring usage from the driver once MES is enabled.
|
||||
|
||||
@@ -1743,6 +1743,10 @@ static int mes_v12_0_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
goto failure;
|
||||
}
|
||||
|
||||
r = amdgpu_mes_update_enforce_isolation(adev);
|
||||
if (r)
|
||||
goto failure;
|
||||
|
||||
out:
|
||||
/*
|
||||
* Disable KIQ ring usage from the driver once MES is enabled.
|
||||
|
||||
@@ -107,6 +107,8 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
|
||||
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
|
||||
0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
|
||||
|
||||
m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
|
||||
m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
|
||||
m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
|
||||
|
||||
m->cp_mqd_base_addr_lo = lower_32_bits(addr);
|
||||
@@ -167,10 +169,10 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
|
||||
|
||||
m = get_mqd(mqd);
|
||||
|
||||
m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
|
||||
m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK;
|
||||
m->cp_hqd_pq_control |=
|
||||
ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
|
||||
m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
|
||||
|
||||
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
|
||||
|
||||
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
|
||||
|
||||
@@ -154,6 +154,8 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
|
||||
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
|
||||
0x55 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
|
||||
|
||||
m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
|
||||
m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
|
||||
m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
|
||||
|
||||
m->cp_mqd_base_addr_lo = lower_32_bits(addr);
|
||||
@@ -221,10 +223,9 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
|
||||
|
||||
m = get_mqd(mqd);
|
||||
|
||||
m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
|
||||
m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK;
|
||||
m->cp_hqd_pq_control |=
|
||||
ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
|
||||
m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
|
||||
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
|
||||
|
||||
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
|
||||
|
||||
@@ -121,6 +121,8 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
|
||||
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
|
||||
0x55 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
|
||||
|
||||
m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
|
||||
m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
|
||||
m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
|
||||
|
||||
m->cp_mqd_base_addr_lo = lower_32_bits(addr);
|
||||
@@ -184,10 +186,9 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
|
||||
|
||||
m = get_mqd(mqd);
|
||||
|
||||
m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
|
||||
m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK;
|
||||
m->cp_hqd_pq_control |=
|
||||
ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
|
||||
m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
|
||||
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
|
||||
|
||||
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
|
||||
|
||||
@@ -183,6 +183,9 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
|
||||
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
|
||||
0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
|
||||
|
||||
m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
|
||||
m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
|
||||
|
||||
m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
|
||||
|
||||
m->cp_mqd_base_addr_lo = lower_32_bits(addr);
|
||||
@@ -245,7 +248,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
|
||||
|
||||
m = get_mqd(mqd);
|
||||
|
||||
m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
|
||||
m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK;
|
||||
m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1;
|
||||
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
|
||||
|
||||
|
||||
@@ -1618,75 +1618,130 @@ static bool dm_should_disable_stutter(struct pci_dev *pdev)
|
||||
return false;
|
||||
}
|
||||
|
||||
static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
|
||||
struct amdgpu_dm_quirks {
|
||||
bool aux_hpd_discon;
|
||||
bool support_edp0_on_dp1;
|
||||
};
|
||||
|
||||
static struct amdgpu_dm_quirks quirk_entries = {
|
||||
.aux_hpd_discon = false,
|
||||
.support_edp0_on_dp1 = false
|
||||
};
|
||||
|
||||
static int edp0_on_dp1_callback(const struct dmi_system_id *id)
|
||||
{
|
||||
quirk_entries.support_edp0_on_dp1 = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int aux_hpd_discon_callback(const struct dmi_system_id *id)
|
||||
{
|
||||
quirk_entries.aux_hpd_discon = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct dmi_system_id dmi_quirk_table[] = {
|
||||
{
|
||||
.callback = aux_hpd_discon_callback,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = aux_hpd_discon_callback,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = aux_hpd_discon_callback,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = aux_hpd_discon_callback,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = aux_hpd_discon_callback,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = aux_hpd_discon_callback,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = aux_hpd_discon_callback,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = aux_hpd_discon_callback,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = aux_hpd_discon_callback,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = edp0_on_dp1_callback,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite mt645 G8 Mobile Thin Client"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = edp0_on_dp1_callback,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 665 16 inch G11 Notebook PC"),
|
||||
},
|
||||
},
|
||||
{}
|
||||
/* TODO: refactor this from a fixed table to a dynamic option */
|
||||
};
|
||||
|
||||
static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
|
||||
static void retrieve_dmi_info(struct amdgpu_display_manager *dm, struct dc_init_data *init_data)
|
||||
{
|
||||
const struct dmi_system_id *dmi_id;
|
||||
int dmi_id;
|
||||
struct drm_device *dev = dm->ddev;
|
||||
|
||||
dm->aux_hpd_discon_quirk = false;
|
||||
init_data->flags.support_edp0_on_dp1 = false;
|
||||
|
||||
dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
|
||||
if (dmi_id) {
|
||||
dmi_id = dmi_check_system(dmi_quirk_table);
|
||||
|
||||
if (!dmi_id)
|
||||
return;
|
||||
|
||||
if (quirk_entries.aux_hpd_discon) {
|
||||
dm->aux_hpd_discon_quirk = true;
|
||||
DRM_INFO("aux_hpd_discon_quirk attached\n");
|
||||
drm_info(dev, "aux_hpd_discon_quirk attached\n");
|
||||
}
|
||||
if (quirk_entries.support_edp0_on_dp1) {
|
||||
init_data->flags.support_edp0_on_dp1 = true;
|
||||
drm_info(dev, "aux_hpd_discon_quirk attached\n");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1994,7 +2049,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
||||
if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0))
|
||||
init_data.num_virtual_links = 1;
|
||||
|
||||
retrieve_dmi_info(&adev->dm);
|
||||
retrieve_dmi_info(&adev->dm, &init_data);
|
||||
|
||||
if (adev->dm.bb_from_dmub)
|
||||
init_data.bb_from_dmub = adev->dm.bb_from_dmub;
|
||||
@@ -7240,8 +7295,14 @@ static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
|
||||
struct dc_link *dc_link = aconnector->dc_link;
|
||||
struct dc_sink *dc_em_sink = aconnector->dc_em_sink;
|
||||
const struct drm_edid *drm_edid;
|
||||
struct i2c_adapter *ddc;
|
||||
|
||||
drm_edid = drm_edid_read(connector);
|
||||
if (dc_link && dc_link->aux_mode)
|
||||
ddc = &aconnector->dm_dp_aux.aux.ddc;
|
||||
else
|
||||
ddc = &aconnector->i2c->base;
|
||||
|
||||
drm_edid = drm_edid_read_ddc(connector, ddc);
|
||||
drm_edid_connector_update(connector, drm_edid);
|
||||
if (!drm_edid) {
|
||||
DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
|
||||
@@ -7286,14 +7347,21 @@ static int get_modes(struct drm_connector *connector)
|
||||
static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
|
||||
{
|
||||
struct drm_connector *connector = &aconnector->base;
|
||||
struct dc_link *dc_link = aconnector->dc_link;
|
||||
struct dc_sink_init_data init_params = {
|
||||
.link = aconnector->dc_link,
|
||||
.sink_signal = SIGNAL_TYPE_VIRTUAL
|
||||
};
|
||||
const struct drm_edid *drm_edid;
|
||||
const struct edid *edid;
|
||||
struct i2c_adapter *ddc;
|
||||
|
||||
drm_edid = drm_edid_read(connector);
|
||||
if (dc_link && dc_link->aux_mode)
|
||||
ddc = &aconnector->dm_dp_aux.aux.ddc;
|
||||
else
|
||||
ddc = &aconnector->i2c->base;
|
||||
|
||||
drm_edid = drm_edid_read_ddc(connector, ddc);
|
||||
drm_edid_connector_update(connector, drm_edid);
|
||||
if (!drm_edid) {
|
||||
DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
|
||||
|
||||
@@ -894,6 +894,7 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
|
||||
struct drm_device *dev = adev_to_drm(adev);
|
||||
struct drm_connector *connector;
|
||||
struct drm_connector_list_iter iter;
|
||||
int i;
|
||||
|
||||
drm_connector_list_iter_begin(dev, &iter);
|
||||
drm_for_each_connector_iter(connector, &iter) {
|
||||
@@ -920,6 +921,12 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
|
||||
}
|
||||
}
|
||||
drm_connector_list_iter_end(&iter);
|
||||
|
||||
/* Update reference counts for HPDs */
|
||||
for (i = DC_IRQ_SOURCE_HPD1; i <= adev->mode_info.num_hpd; i++) {
|
||||
if (amdgpu_irq_get(adev, &adev->hpd_irq, i - DC_IRQ_SOURCE_HPD1))
|
||||
drm_err(dev, "DM_IRQ: Failed get HPD for source=%d)!\n", i);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -935,6 +942,7 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
|
||||
struct drm_device *dev = adev_to_drm(adev);
|
||||
struct drm_connector *connector;
|
||||
struct drm_connector_list_iter iter;
|
||||
int i;
|
||||
|
||||
drm_connector_list_iter_begin(dev, &iter);
|
||||
drm_for_each_connector_iter(connector, &iter) {
|
||||
@@ -960,4 +968,10 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
|
||||
}
|
||||
}
|
||||
drm_connector_list_iter_end(&iter);
|
||||
|
||||
/* Update reference counts for HPDs */
|
||||
for (i = DC_IRQ_SOURCE_HPD1; i <= adev->mode_info.num_hpd; i++) {
|
||||
if (amdgpu_irq_put(adev, &adev->hpd_irq, i - DC_IRQ_SOURCE_HPD1))
|
||||
drm_err(dev, "DM_IRQ: Failed put HPD for source=%d!\n", i);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -54,7 +54,8 @@ static bool link_supports_psrsu(struct dc_link *link)
|
||||
if (amdgpu_dc_debug_mask & DC_DISABLE_PSR_SU)
|
||||
return false;
|
||||
|
||||
return dc_dmub_check_min_version(dc->ctx->dmub_srv->dmub);
|
||||
/* Temporarily disable PSR-SU to avoid glitches */
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -3042,6 +3042,7 @@ static int kv_dpm_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
if (!amdgpu_dpm)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
kv_dpm_setup_asic(adev);
|
||||
ret = kv_dpm_enable(adev);
|
||||
if (ret)
|
||||
@@ -3049,6 +3050,8 @@ static int kv_dpm_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
else
|
||||
adev->pm.dpm_enabled = true;
|
||||
amdgpu_legacy_dpm_compute_clocks(adev);
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -3066,32 +3069,42 @@ static int kv_dpm_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
cancel_work_sync(&adev->pm.dpm.thermal.work);
|
||||
|
||||
if (adev->pm.dpm_enabled) {
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
adev->pm.dpm_enabled = false;
|
||||
/* disable dpm */
|
||||
kv_dpm_disable(adev);
|
||||
/* reset the power state */
|
||||
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kv_dpm_resume(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
int ret;
|
||||
int ret = 0;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
if (adev->pm.dpm_enabled) {
|
||||
if (!amdgpu_dpm)
|
||||
return 0;
|
||||
|
||||
if (!adev->pm.dpm_enabled) {
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
/* asic init will reset to the boot state */
|
||||
kv_dpm_setup_asic(adev);
|
||||
ret = kv_dpm_enable(adev);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
adev->pm.dpm_enabled = false;
|
||||
else
|
||||
} else {
|
||||
adev->pm.dpm_enabled = true;
|
||||
if (adev->pm.dpm_enabled)
|
||||
amdgpu_legacy_dpm_compute_clocks(adev);
|
||||
}
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
}
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool kv_dpm_is_idle(void *handle)
|
||||
|
||||
@@ -1009,9 +1009,12 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
|
||||
enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
|
||||
int temp, size = sizeof(temp);
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return;
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
|
||||
if (!adev->pm.dpm_enabled) {
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
return;
|
||||
}
|
||||
if (!pp_funcs->read_sensor(adev->powerplay.pp_handle,
|
||||
AMDGPU_PP_SENSOR_GPU_TEMP,
|
||||
(void *)&temp,
|
||||
@@ -1033,4 +1036,5 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
|
||||
adev->pm.dpm.state = dpm_state;
|
||||
|
||||
amdgpu_legacy_dpm_compute_clocks(adev->powerplay.pp_handle);
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
}
|
||||
|
||||
@@ -7786,6 +7786,7 @@ static int si_dpm_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
if (!amdgpu_dpm)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
si_dpm_setup_asic(adev);
|
||||
ret = si_dpm_enable(adev);
|
||||
if (ret)
|
||||
@@ -7793,6 +7794,7 @@ static int si_dpm_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
else
|
||||
adev->pm.dpm_enabled = true;
|
||||
amdgpu_legacy_dpm_compute_clocks(adev);
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -7810,32 +7812,44 @@ static int si_dpm_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
cancel_work_sync(&adev->pm.dpm.thermal.work);
|
||||
|
||||
if (adev->pm.dpm_enabled) {
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
adev->pm.dpm_enabled = false;
|
||||
/* disable dpm */
|
||||
si_dpm_disable(adev);
|
||||
/* reset the power state */
|
||||
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int si_dpm_resume(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
int ret;
|
||||
int ret = 0;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
if (adev->pm.dpm_enabled) {
|
||||
if (!amdgpu_dpm)
|
||||
return 0;
|
||||
|
||||
if (!adev->pm.dpm_enabled) {
|
||||
/* asic init will reset to the boot state */
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
si_dpm_setup_asic(adev);
|
||||
ret = si_dpm_enable(adev);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
adev->pm.dpm_enabled = false;
|
||||
else
|
||||
} else {
|
||||
adev->pm.dpm_enabled = true;
|
||||
if (adev->pm.dpm_enabled)
|
||||
amdgpu_legacy_dpm_compute_clocks(adev);
|
||||
}
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
}
|
||||
return 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool si_dpm_is_idle(void *handle)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#include <linux/fb.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_fbdev_dma.h>
|
||||
@@ -70,37 +71,102 @@ static const struct fb_ops drm_fbdev_dma_fb_ops = {
|
||||
.fb_destroy = drm_fbdev_dma_fb_destroy,
|
||||
};
|
||||
|
||||
FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma,
|
||||
FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma_shadowed,
|
||||
drm_fb_helper_damage_range,
|
||||
drm_fb_helper_damage_area);
|
||||
|
||||
static int drm_fbdev_dma_deferred_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
|
||||
static void drm_fbdev_dma_shadowed_fb_destroy(struct fb_info *info)
|
||||
{
|
||||
struct drm_fb_helper *fb_helper = info->par;
|
||||
struct drm_framebuffer *fb = fb_helper->fb;
|
||||
struct drm_gem_dma_object *dma = drm_fb_dma_get_gem_obj(fb, 0);
|
||||
void *shadow = info->screen_buffer;
|
||||
|
||||
if (!dma->map_noncoherent)
|
||||
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
|
||||
if (!fb_helper->dev)
|
||||
return;
|
||||
|
||||
return fb_deferred_io_mmap(info, vma);
|
||||
if (info->fbdefio)
|
||||
fb_deferred_io_cleanup(info);
|
||||
drm_fb_helper_fini(fb_helper);
|
||||
vfree(shadow);
|
||||
|
||||
drm_client_buffer_vunmap(fb_helper->buffer);
|
||||
drm_client_framebuffer_delete(fb_helper->buffer);
|
||||
drm_client_release(&fb_helper->client);
|
||||
drm_fb_helper_unprepare(fb_helper);
|
||||
kfree(fb_helper);
|
||||
}
|
||||
|
||||
static const struct fb_ops drm_fbdev_dma_deferred_fb_ops = {
|
||||
static const struct fb_ops drm_fbdev_dma_shadowed_fb_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.fb_open = drm_fbdev_dma_fb_open,
|
||||
.fb_release = drm_fbdev_dma_fb_release,
|
||||
__FB_DEFAULT_DEFERRED_OPS_RDWR(drm_fbdev_dma),
|
||||
FB_DEFAULT_DEFERRED_OPS(drm_fbdev_dma_shadowed),
|
||||
DRM_FB_HELPER_DEFAULT_OPS,
|
||||
__FB_DEFAULT_DEFERRED_OPS_DRAW(drm_fbdev_dma),
|
||||
.fb_mmap = drm_fbdev_dma_deferred_fb_mmap,
|
||||
.fb_destroy = drm_fbdev_dma_fb_destroy,
|
||||
.fb_destroy = drm_fbdev_dma_shadowed_fb_destroy,
|
||||
};
|
||||
|
||||
/*
|
||||
* struct drm_fb_helper
|
||||
*/
|
||||
|
||||
static void drm_fbdev_dma_damage_blit_real(struct drm_fb_helper *fb_helper,
|
||||
struct drm_clip_rect *clip,
|
||||
struct iosys_map *dst)
|
||||
{
|
||||
struct drm_framebuffer *fb = fb_helper->fb;
|
||||
size_t offset = clip->y1 * fb->pitches[0];
|
||||
size_t len = clip->x2 - clip->x1;
|
||||
unsigned int y;
|
||||
void *src;
|
||||
|
||||
switch (drm_format_info_bpp(fb->format, 0)) {
|
||||
case 1:
|
||||
offset += clip->x1 / 8;
|
||||
len = DIV_ROUND_UP(len + clip->x1 % 8, 8);
|
||||
break;
|
||||
case 2:
|
||||
offset += clip->x1 / 4;
|
||||
len = DIV_ROUND_UP(len + clip->x1 % 4, 4);
|
||||
break;
|
||||
case 4:
|
||||
offset += clip->x1 / 2;
|
||||
len = DIV_ROUND_UP(len + clip->x1 % 2, 2);
|
||||
break;
|
||||
default:
|
||||
offset += clip->x1 * fb->format->cpp[0];
|
||||
len *= fb->format->cpp[0];
|
||||
break;
|
||||
}
|
||||
|
||||
src = fb_helper->info->screen_buffer + offset;
|
||||
iosys_map_incr(dst, offset); /* go to first pixel within clip rect */
|
||||
|
||||
for (y = clip->y1; y < clip->y2; y++) {
|
||||
iosys_map_memcpy_to(dst, 0, src, len);
|
||||
iosys_map_incr(dst, fb->pitches[0]);
|
||||
src += fb->pitches[0];
|
||||
}
|
||||
}
|
||||
|
||||
static int drm_fbdev_dma_damage_blit(struct drm_fb_helper *fb_helper,
|
||||
struct drm_clip_rect *clip)
|
||||
{
|
||||
struct drm_client_buffer *buffer = fb_helper->buffer;
|
||||
struct iosys_map dst;
|
||||
|
||||
/*
|
||||
* For fbdev emulation, we only have to protect against fbdev modeset
|
||||
* operations. Nothing else will involve the client buffer's BO. So it
|
||||
* is sufficient to acquire struct drm_fb_helper.lock here.
|
||||
*/
|
||||
mutex_lock(&fb_helper->lock);
|
||||
|
||||
dst = buffer->map;
|
||||
drm_fbdev_dma_damage_blit_real(fb_helper, clip, &dst);
|
||||
|
||||
mutex_unlock(&fb_helper->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
|
||||
struct drm_clip_rect *clip)
|
||||
{
|
||||
@@ -112,6 +178,10 @@ static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
|
||||
return 0;
|
||||
|
||||
if (helper->fb->funcs->dirty) {
|
||||
ret = drm_fbdev_dma_damage_blit(helper, clip);
|
||||
if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret))
|
||||
return ret;
|
||||
|
||||
ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
|
||||
if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
|
||||
return ret;
|
||||
@@ -128,14 +198,80 @@ static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = {
|
||||
* struct drm_fb_helper
|
||||
*/
|
||||
|
||||
static int drm_fbdev_dma_driver_fbdev_probe_tail(struct drm_fb_helper *fb_helper,
|
||||
struct drm_fb_helper_surface_size *sizes)
|
||||
{
|
||||
struct drm_device *dev = fb_helper->dev;
|
||||
struct drm_client_buffer *buffer = fb_helper->buffer;
|
||||
struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(buffer->gem);
|
||||
struct drm_framebuffer *fb = fb_helper->fb;
|
||||
struct fb_info *info = fb_helper->info;
|
||||
struct iosys_map map = buffer->map;
|
||||
|
||||
info->fbops = &drm_fbdev_dma_fb_ops;
|
||||
|
||||
/* screen */
|
||||
info->flags |= FBINFO_VIRTFB; /* system memory */
|
||||
if (dma_obj->map_noncoherent)
|
||||
info->flags |= FBINFO_READS_FAST; /* signal caching */
|
||||
info->screen_size = sizes->surface_height * fb->pitches[0];
|
||||
info->screen_buffer = map.vaddr;
|
||||
if (!(info->flags & FBINFO_HIDE_SMEM_START)) {
|
||||
if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer)))
|
||||
info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
|
||||
}
|
||||
info->fix.smem_len = info->screen_size;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int drm_fbdev_dma_driver_fbdev_probe_tail_shadowed(struct drm_fb_helper *fb_helper,
|
||||
struct drm_fb_helper_surface_size *sizes)
|
||||
{
|
||||
struct drm_client_buffer *buffer = fb_helper->buffer;
|
||||
struct fb_info *info = fb_helper->info;
|
||||
size_t screen_size = buffer->gem->size;
|
||||
void *screen_buffer;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Deferred I/O requires struct page for framebuffer memory,
|
||||
* which is not guaranteed for all DMA ranges. We thus create
|
||||
* a shadow buffer in system memory.
|
||||
*/
|
||||
screen_buffer = vzalloc(screen_size);
|
||||
if (!screen_buffer)
|
||||
return -ENOMEM;
|
||||
|
||||
info->fbops = &drm_fbdev_dma_shadowed_fb_ops;
|
||||
|
||||
/* screen */
|
||||
info->flags |= FBINFO_VIRTFB; /* system memory */
|
||||
info->flags |= FBINFO_READS_FAST; /* signal caching */
|
||||
info->screen_buffer = screen_buffer;
|
||||
info->fix.smem_len = screen_size;
|
||||
|
||||
fb_helper->fbdefio.delay = HZ / 20;
|
||||
fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
|
||||
|
||||
info->fbdefio = &fb_helper->fbdefio;
|
||||
ret = fb_deferred_io_init(info);
|
||||
if (ret)
|
||||
goto err_vfree;
|
||||
|
||||
return 0;
|
||||
|
||||
err_vfree:
|
||||
vfree(screen_buffer);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
|
||||
struct drm_fb_helper_surface_size *sizes)
|
||||
{
|
||||
struct drm_client_dev *client = &fb_helper->client;
|
||||
struct drm_device *dev = fb_helper->dev;
|
||||
bool use_deferred_io = false;
|
||||
struct drm_client_buffer *buffer;
|
||||
struct drm_gem_dma_object *dma_obj;
|
||||
struct drm_framebuffer *fb;
|
||||
struct fb_info *info;
|
||||
u32 format;
|
||||
@@ -152,19 +288,9 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
|
||||
sizes->surface_height, format);
|
||||
if (IS_ERR(buffer))
|
||||
return PTR_ERR(buffer);
|
||||
dma_obj = to_drm_gem_dma_obj(buffer->gem);
|
||||
|
||||
fb = buffer->fb;
|
||||
|
||||
/*
|
||||
* Deferred I/O requires struct page for framebuffer memory,
|
||||
* which is not guaranteed for all DMA ranges. We thus only
|
||||
* install deferred I/O if we have a framebuffer that requires
|
||||
* it.
|
||||
*/
|
||||
if (fb->funcs->dirty)
|
||||
use_deferred_io = true;
|
||||
|
||||
ret = drm_client_buffer_vmap(buffer, &map);
|
||||
if (ret) {
|
||||
goto err_drm_client_buffer_delete;
|
||||
@@ -185,45 +311,12 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
|
||||
|
||||
drm_fb_helper_fill_info(info, fb_helper, sizes);
|
||||
|
||||
if (use_deferred_io)
|
||||
info->fbops = &drm_fbdev_dma_deferred_fb_ops;
|
||||
if (fb->funcs->dirty)
|
||||
ret = drm_fbdev_dma_driver_fbdev_probe_tail_shadowed(fb_helper, sizes);
|
||||
else
|
||||
info->fbops = &drm_fbdev_dma_fb_ops;
|
||||
|
||||
/* screen */
|
||||
info->flags |= FBINFO_VIRTFB; /* system memory */
|
||||
if (dma_obj->map_noncoherent)
|
||||
info->flags |= FBINFO_READS_FAST; /* signal caching */
|
||||
info->screen_size = sizes->surface_height * fb->pitches[0];
|
||||
info->screen_buffer = map.vaddr;
|
||||
if (!(info->flags & FBINFO_HIDE_SMEM_START)) {
|
||||
if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer)))
|
||||
info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
|
||||
}
|
||||
info->fix.smem_len = info->screen_size;
|
||||
|
||||
/*
|
||||
* Only set up deferred I/O if the screen buffer supports
|
||||
* it. If this disagrees with the previous test for ->dirty,
|
||||
* mmap on the /dev/fb file might not work correctly.
|
||||
*/
|
||||
if (!is_vmalloc_addr(info->screen_buffer) && info->fix.smem_start) {
|
||||
unsigned long pfn = info->fix.smem_start >> PAGE_SHIFT;
|
||||
|
||||
if (drm_WARN_ON(dev, !pfn_to_page(pfn)))
|
||||
use_deferred_io = false;
|
||||
}
|
||||
|
||||
/* deferred I/O */
|
||||
if (use_deferred_io) {
|
||||
fb_helper->fbdefio.delay = HZ / 20;
|
||||
fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
|
||||
|
||||
info->fbdefio = &fb_helper->fbdefio;
|
||||
ret = fb_deferred_io_init(info);
|
||||
if (ret)
|
||||
goto err_drm_fb_helper_release_info;
|
||||
}
|
||||
ret = drm_fbdev_dma_driver_fbdev_probe_tail(fb_helper, sizes);
|
||||
if (ret)
|
||||
goto err_drm_fb_helper_release_info;
|
||||
|
||||
return 0;
|
||||
|
||||
|
||||
@@ -866,7 +866,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
|
||||
encoder->base.base.id, encoder->base.name);
|
||||
|
||||
if (!mst_pipe_mask && dp128b132b_pipe_mask) {
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
|
||||
/*
|
||||
* If we don't have 8b/10b MST, but have more than one
|
||||
@@ -878,7 +878,8 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
|
||||
* we don't expect MST to have been enabled at that point, and
|
||||
* can assume it's SST.
|
||||
*/
|
||||
if (hweight8(dp128b132b_pipe_mask) > 1 || intel_dp->is_mst)
|
||||
if (hweight8(dp128b132b_pipe_mask) > 1 ||
|
||||
intel_dp_mst_encoder_active_links(dig_port))
|
||||
mst_pipe_mask = dp128b132b_pipe_mask;
|
||||
}
|
||||
|
||||
@@ -4151,13 +4152,13 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
|
||||
} else if (ddi_mode == TRANS_DDI_MODE_SELECT_DP_MST) {
|
||||
intel_ddi_read_func_ctl_dp_mst(encoder, pipe_config, ddi_func_ctl);
|
||||
} else if (ddi_mode == TRANS_DDI_MODE_SELECT_FDI_OR_128B132B && HAS_DP20(display)) {
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
|
||||
/*
|
||||
* If this is true, we know we're being called from mst stream
|
||||
* encoder's ->get_config().
|
||||
*/
|
||||
if (intel_dp->is_mst)
|
||||
if (intel_dp_mst_encoder_active_links(dig_port))
|
||||
intel_ddi_read_func_ctl_dp_mst(encoder, pipe_config, ddi_func_ctl);
|
||||
else
|
||||
intel_ddi_read_func_ctl_dp_sst(encoder, pipe_config, ddi_func_ctl);
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only OR MIT
|
||||
# Copyright (c) 2023 Imagination Technologies Ltd.
|
||||
|
||||
subdir-ccflags-y := -I$(src)
|
||||
|
||||
powervr-y := \
|
||||
pvr_ccb.o \
|
||||
pvr_cccb.o \
|
||||
|
||||
@@ -775,7 +775,6 @@ nouveau_connector_force(struct drm_connector *connector)
|
||||
if (!nv_encoder) {
|
||||
NV_ERROR(drm, "can't find encoder to force %s on!\n",
|
||||
connector->name);
|
||||
connector->status = connector_status_disconnected;
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -67,7 +67,7 @@ static u16 lerp_u16(u16 a, u16 b, s64 t)
|
||||
|
||||
s64 delta = drm_fixp_mul(b_fp - a_fp, t);
|
||||
|
||||
return drm_fixp2int(a_fp + delta);
|
||||
return drm_fixp2int_round(a_fp + delta);
|
||||
}
|
||||
|
||||
static s64 get_lut_index(const struct vkms_color_lut *lut, u16 channel_value)
|
||||
|
||||
@@ -53,7 +53,6 @@
|
||||
|
||||
#define RING_CTL(base) XE_REG((base) + 0x3c)
|
||||
#define RING_CTL_SIZE(size) ((size) - PAGE_SIZE) /* in bytes -> pages */
|
||||
#define RING_CTL_SIZE(size) ((size) - PAGE_SIZE) /* in bytes -> pages */
|
||||
|
||||
#define RING_START_UDW(base) XE_REG((base) + 0x48)
|
||||
|
||||
|
||||
@@ -1248,6 +1248,8 @@ static void __guc_exec_queue_fini_async(struct work_struct *w)
|
||||
|
||||
if (xe_exec_queue_is_lr(q))
|
||||
cancel_work_sync(&ge->lr_tdr);
|
||||
/* Confirm no work left behind accessing device structures */
|
||||
cancel_delayed_work_sync(&ge->sched.base.work_tdr);
|
||||
release_guc_id(guc, q);
|
||||
xe_sched_entity_fini(&ge->entity);
|
||||
xe_sched_fini(&ge->sched);
|
||||
|
||||
@@ -1689,7 +1689,7 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
|
||||
stream->oa_buffer.format = &stream->oa->oa_formats[param->oa_format];
|
||||
|
||||
stream->sample = param->sample;
|
||||
stream->periodic = param->period_exponent > 0;
|
||||
stream->periodic = param->period_exponent >= 0;
|
||||
stream->period_exponent = param->period_exponent;
|
||||
stream->no_preempt = param->no_preempt;
|
||||
stream->wait_num_reports = param->wait_num_reports;
|
||||
@@ -1970,6 +1970,7 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
|
||||
}
|
||||
|
||||
param.xef = xef;
|
||||
param.period_exponent = -1;
|
||||
ret = xe_oa_user_extensions(oa, XE_OA_USER_EXTN_FROM_OPEN, data, 0, ¶m);
|
||||
if (ret)
|
||||
return ret;
|
||||
@@ -2024,7 +2025,7 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
|
||||
goto err_exec_q;
|
||||
}
|
||||
|
||||
if (param.period_exponent > 0) {
|
||||
if (param.period_exponent >= 0) {
|
||||
u64 oa_period, oa_freq_hz;
|
||||
|
||||
/* Requesting samples from OAG buffer is a privileged operation */
|
||||
|
||||
@@ -666,20 +666,33 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
|
||||
|
||||
/* Collect invalidated userptrs */
|
||||
spin_lock(&vm->userptr.invalidated_lock);
|
||||
xe_assert(vm->xe, list_empty(&vm->userptr.repin_list));
|
||||
list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated,
|
||||
userptr.invalidate_link) {
|
||||
list_del_init(&uvma->userptr.invalidate_link);
|
||||
list_move_tail(&uvma->userptr.repin_link,
|
||||
&vm->userptr.repin_list);
|
||||
list_add_tail(&uvma->userptr.repin_link,
|
||||
&vm->userptr.repin_list);
|
||||
}
|
||||
spin_unlock(&vm->userptr.invalidated_lock);
|
||||
|
||||
/* Pin and move to temporary list */
|
||||
/* Pin and move to bind list */
|
||||
list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
|
||||
userptr.repin_link) {
|
||||
err = xe_vma_userptr_pin_pages(uvma);
|
||||
if (err == -EFAULT) {
|
||||
list_del_init(&uvma->userptr.repin_link);
|
||||
/*
|
||||
* We might have already done the pin once already, but
|
||||
* then had to retry before the re-bind happened, due
|
||||
* some other condition in the caller, but in the
|
||||
* meantime the userptr got dinged by the notifier such
|
||||
* that we need to revalidate here, but this time we hit
|
||||
* the EFAULT. In such a case make sure we remove
|
||||
* ourselves from the rebind list to avoid going down in
|
||||
* flames.
|
||||
*/
|
||||
if (!list_empty(&uvma->vma.combined_links.rebind))
|
||||
list_del_init(&uvma->vma.combined_links.rebind);
|
||||
|
||||
/* Wait for pending binds */
|
||||
xe_vm_lock(vm, false);
|
||||
@@ -690,10 +703,10 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
|
||||
err = xe_vm_invalidate_vma(&uvma->vma);
|
||||
xe_vm_unlock(vm);
|
||||
if (err)
|
||||
return err;
|
||||
break;
|
||||
} else {
|
||||
if (err < 0)
|
||||
return err;
|
||||
if (err)
|
||||
break;
|
||||
|
||||
list_del_init(&uvma->userptr.repin_link);
|
||||
list_move_tail(&uvma->vma.combined_links.rebind,
|
||||
@@ -701,7 +714,19 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
if (err) {
|
||||
down_write(&vm->userptr.notifier_lock);
|
||||
spin_lock(&vm->userptr.invalidated_lock);
|
||||
list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
|
||||
userptr.repin_link) {
|
||||
list_del_init(&uvma->userptr.repin_link);
|
||||
list_move_tail(&uvma->userptr.invalidate_link,
|
||||
&vm->userptr.invalidated);
|
||||
}
|
||||
spin_unlock(&vm->userptr.invalidated_lock);
|
||||
up_write(&vm->userptr.notifier_lock);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1066,6 +1091,7 @@ static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
|
||||
xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED);
|
||||
|
||||
spin_lock(&vm->userptr.invalidated_lock);
|
||||
xe_assert(vm->xe, list_empty(&to_userptr_vma(vma)->userptr.repin_link));
|
||||
list_del(&to_userptr_vma(vma)->userptr.invalidate_link);
|
||||
spin_unlock(&vm->userptr.invalidated_lock);
|
||||
} else if (!xe_vma_is_null(vma)) {
|
||||
|
||||
@@ -293,6 +293,7 @@ static irqreturn_t amd_asf_irq_handler(int irq, void *ptr)
|
||||
amd_asf_update_ioport_target(piix4_smba, ASF_SLV_INTR, SMBHSTSTS, true);
|
||||
}
|
||||
|
||||
iowrite32(irq, dev->eoi_base);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
* Rewritten for mainline by Binbin Zhou <zhoubinbin@loongson.cn>
|
||||
*/
|
||||
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/bits.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/device.h>
|
||||
@@ -26,7 +27,8 @@
|
||||
#include <linux/units.h>
|
||||
|
||||
/* I2C Registers */
|
||||
#define I2C_LS2X_PRER 0x0 /* Freq Division Register(16 bits) */
|
||||
#define I2C_LS2X_PRER_LO 0x0 /* Freq Division Low Byte Register */
|
||||
#define I2C_LS2X_PRER_HI 0x1 /* Freq Division High Byte Register */
|
||||
#define I2C_LS2X_CTR 0x2 /* Control Register */
|
||||
#define I2C_LS2X_TXR 0x3 /* Transport Data Register */
|
||||
#define I2C_LS2X_RXR 0x3 /* Receive Data Register */
|
||||
@@ -93,6 +95,7 @@ static irqreturn_t ls2x_i2c_isr(int this_irq, void *dev_id)
|
||||
*/
|
||||
static void ls2x_i2c_adjust_bus_speed(struct ls2x_i2c_priv *priv)
|
||||
{
|
||||
u16 val;
|
||||
struct i2c_timings *t = &priv->i2c_t;
|
||||
struct device *dev = priv->adapter.dev.parent;
|
||||
u32 acpi_speed = i2c_acpi_find_bus_speed(dev);
|
||||
@@ -104,9 +107,14 @@ static void ls2x_i2c_adjust_bus_speed(struct ls2x_i2c_priv *priv)
|
||||
else
|
||||
t->bus_freq_hz = LS2X_I2C_FREQ_STD;
|
||||
|
||||
/* Calculate and set i2c frequency. */
|
||||
writew(LS2X_I2C_PCLK_FREQ / (5 * t->bus_freq_hz) - 1,
|
||||
priv->base + I2C_LS2X_PRER);
|
||||
/*
|
||||
* According to the chip manual, we can only access the registers as bytes,
|
||||
* otherwise the high bits will be truncated.
|
||||
* So set the I2C frequency with a sequential writeb() instead of writew().
|
||||
*/
|
||||
val = LS2X_I2C_PCLK_FREQ / (5 * t->bus_freq_hz) - 1;
|
||||
writeb(FIELD_GET(GENMASK(7, 0), val), priv->base + I2C_LS2X_PRER_LO);
|
||||
writeb(FIELD_GET(GENMASK(15, 8), val), priv->base + I2C_LS2X_PRER_HI);
|
||||
}
|
||||
|
||||
static void ls2x_i2c_init(struct ls2x_i2c_priv *priv)
|
||||
|
||||
@@ -2554,6 +2554,13 @@ static int npcm_i2c_probe_bus(struct platform_device *pdev)
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
|
||||
/*
|
||||
* Disable the interrupt to avoid the interrupt handler being triggered
|
||||
* incorrectly by the asynchronous interrupt status since the machine
|
||||
* might do a warm reset during the last smbus/i2c transfer session.
|
||||
*/
|
||||
npcm_i2c_int_enable(bus, false);
|
||||
|
||||
ret = devm_request_irq(bus->dev, irq, npcm_i2c_bus_irq, 0,
|
||||
dev_name(bus->dev), bus);
|
||||
if (ret)
|
||||
|
||||
@@ -56,6 +56,7 @@
|
||||
#include <asm/intel-family.h>
|
||||
#include <asm/mwait.h>
|
||||
#include <asm/spec-ctrl.h>
|
||||
#include <asm/tsc.h>
|
||||
#include <asm/fpu/api.h>
|
||||
|
||||
#define INTEL_IDLE_VERSION "0.5.1"
|
||||
@@ -1799,6 +1800,9 @@ static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv)
|
||||
if (intel_idle_state_needs_timer_stop(state))
|
||||
state->flags |= CPUIDLE_FLAG_TIMER_STOP;
|
||||
|
||||
if (cx->type > ACPI_STATE_C1 && !boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
|
||||
mark_tsc_unstable("TSC halts in idle");
|
||||
|
||||
state->enter = intel_idle;
|
||||
state->enter_s2idle = intel_idle_s2idle;
|
||||
}
|
||||
|
||||
@@ -187,7 +187,6 @@ struct bnxt_re_dev {
|
||||
#define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29
|
||||
struct net_device *netdev;
|
||||
struct auxiliary_device *adev;
|
||||
struct notifier_block nb;
|
||||
unsigned int version, major, minor;
|
||||
struct bnxt_qplib_chip_ctx *chip_ctx;
|
||||
struct bnxt_en_dev *en_dev;
|
||||
|
||||
@@ -348,8 +348,8 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
|
||||
goto done;
|
||||
}
|
||||
bnxt_re_copy_err_stats(rdev, stats, err_s);
|
||||
if (_is_ext_stats_supported(rdev->dev_attr->dev_cap_flags) &&
|
||||
!rdev->is_virtfn) {
|
||||
if (bnxt_ext_stats_supported(rdev->chip_ctx, rdev->dev_attr->dev_cap_flags,
|
||||
rdev->is_virtfn)) {
|
||||
rc = bnxt_re_get_ext_stat(rdev, stats);
|
||||
if (rc) {
|
||||
clear_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS,
|
||||
|
||||
@@ -1870,6 +1870,8 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
|
||||
srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
|
||||
srq->srq_limit = srq_init_attr->attr.srq_limit;
|
||||
srq->qplib_srq.eventq_hw_ring_id = rdev->nqr->nq[0].ring_id;
|
||||
srq->qplib_srq.sg_info.pgsize = PAGE_SIZE;
|
||||
srq->qplib_srq.sg_info.pgshft = PAGE_SHIFT;
|
||||
nq = &rdev->nqr->nq[0];
|
||||
|
||||
if (udata) {
|
||||
|
||||
@@ -396,11 +396,16 @@ free_dcb:
|
||||
|
||||
static void bnxt_re_async_notifier(void *handle, struct hwrm_async_event_cmpl *cmpl)
|
||||
{
|
||||
struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
|
||||
struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(handle);
|
||||
struct bnxt_re_dcb_work *dcb_work;
|
||||
struct bnxt_re_dev *rdev;
|
||||
u32 data1, data2;
|
||||
u16 event_id;
|
||||
|
||||
rdev = en_info->rdev;
|
||||
if (!rdev)
|
||||
return;
|
||||
|
||||
event_id = le16_to_cpu(cmpl->event_id);
|
||||
data1 = le32_to_cpu(cmpl->event_data1);
|
||||
data2 = le32_to_cpu(cmpl->event_data2);
|
||||
@@ -433,6 +438,8 @@ static void bnxt_re_stop_irq(void *handle, bool reset)
|
||||
int indx;
|
||||
|
||||
rdev = en_info->rdev;
|
||||
if (!rdev)
|
||||
return;
|
||||
rcfw = &rdev->rcfw;
|
||||
|
||||
if (reset) {
|
||||
@@ -461,6 +468,8 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
|
||||
int indx, rc;
|
||||
|
||||
rdev = en_info->rdev;
|
||||
if (!rdev)
|
||||
return;
|
||||
msix_ent = rdev->nqr->msix_entries;
|
||||
rcfw = &rdev->rcfw;
|
||||
if (!ent) {
|
||||
@@ -1350,7 +1359,6 @@ static struct bnxt_re_dev *bnxt_re_dev_add(struct auxiliary_device *adev,
|
||||
return NULL;
|
||||
}
|
||||
/* Default values */
|
||||
rdev->nb.notifier_call = NULL;
|
||||
rdev->netdev = en_dev->net;
|
||||
rdev->en_dev = en_dev;
|
||||
rdev->adev = adev;
|
||||
@@ -2345,15 +2353,6 @@ exit:
|
||||
static void bnxt_re_remove_device(struct bnxt_re_dev *rdev, u8 op_type,
|
||||
struct auxiliary_device *aux_dev)
|
||||
{
|
||||
if (rdev->nb.notifier_call) {
|
||||
unregister_netdevice_notifier(&rdev->nb);
|
||||
rdev->nb.notifier_call = NULL;
|
||||
} else {
|
||||
/* If notifier is null, we should have already done a
|
||||
* clean up before coming here.
|
||||
*/
|
||||
return;
|
||||
}
|
||||
bnxt_re_setup_cc(rdev, false);
|
||||
ib_unregister_device(&rdev->ibdev);
|
||||
bnxt_re_dev_uninit(rdev, op_type);
|
||||
@@ -2433,6 +2432,7 @@ static int bnxt_re_suspend(struct auxiliary_device *adev, pm_message_t state)
|
||||
ibdev_info(&rdev->ibdev, "%s: L2 driver notified to stop en_state 0x%lx",
|
||||
__func__, en_dev->en_state);
|
||||
bnxt_re_remove_device(rdev, BNXT_RE_PRE_RECOVERY_REMOVE, adev);
|
||||
bnxt_re_update_en_info_rdev(NULL, en_info, adev);
|
||||
mutex_unlock(&bnxt_re_mutex);
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -547,6 +547,14 @@ static inline bool _is_ext_stats_supported(u16 dev_cap_flags)
|
||||
CREQ_QUERY_FUNC_RESP_SB_EXT_STATS;
|
||||
}
|
||||
|
||||
static inline int bnxt_ext_stats_supported(struct bnxt_qplib_chip_ctx *ctx,
|
||||
u16 flags, bool virtfn)
|
||||
{
|
||||
/* ext stats supported if cap flag is set AND is a PF OR a Thor2 VF */
|
||||
return (_is_ext_stats_supported(flags) &&
|
||||
((virtfn && bnxt_qplib_is_chip_gen_p7(ctx)) || (!virtfn)));
|
||||
}
|
||||
|
||||
static inline bool _is_hw_retx_supported(u16 dev_cap_flags)
|
||||
{
|
||||
return dev_cap_flags &
|
||||
|
||||
@@ -1286,10 +1286,8 @@ static u32 hns_roce_cmdq_tx_timeout(u16 opcode, u32 tx_timeout)
|
||||
return tx_timeout;
|
||||
}
|
||||
|
||||
static void hns_roce_wait_csq_done(struct hns_roce_dev *hr_dev, u16 opcode)
|
||||
static void hns_roce_wait_csq_done(struct hns_roce_dev *hr_dev, u32 tx_timeout)
|
||||
{
|
||||
struct hns_roce_v2_priv *priv = hr_dev->priv;
|
||||
u32 tx_timeout = hns_roce_cmdq_tx_timeout(opcode, priv->cmq.tx_timeout);
|
||||
u32 timeout = 0;
|
||||
|
||||
do {
|
||||
@@ -1299,8 +1297,9 @@ static void hns_roce_wait_csq_done(struct hns_roce_dev *hr_dev, u16 opcode)
|
||||
} while (++timeout < tx_timeout);
|
||||
}
|
||||
|
||||
static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_cmq_desc *desc, int num)
|
||||
static int __hns_roce_cmq_send_one(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_cmq_desc *desc,
|
||||
int num, u32 tx_timeout)
|
||||
{
|
||||
struct hns_roce_v2_priv *priv = hr_dev->priv;
|
||||
struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
|
||||
@@ -1309,8 +1308,6 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
spin_lock_bh(&csq->lock);
|
||||
|
||||
tail = csq->head;
|
||||
|
||||
for (i = 0; i < num; i++) {
|
||||
@@ -1324,22 +1321,17 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
|
||||
|
||||
atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_CMDS_CNT]);
|
||||
|
||||
hns_roce_wait_csq_done(hr_dev, le16_to_cpu(desc->opcode));
|
||||
hns_roce_wait_csq_done(hr_dev, tx_timeout);
|
||||
if (hns_roce_cmq_csq_done(hr_dev)) {
|
||||
ret = 0;
|
||||
for (i = 0; i < num; i++) {
|
||||
/* check the result of hardware write back */
|
||||
desc[i] = csq->desc[tail++];
|
||||
desc_ret = le16_to_cpu(csq->desc[tail++].retval);
|
||||
if (tail == csq->desc_num)
|
||||
tail = 0;
|
||||
|
||||
desc_ret = le16_to_cpu(desc[i].retval);
|
||||
if (likely(desc_ret == CMD_EXEC_SUCCESS))
|
||||
continue;
|
||||
|
||||
dev_err_ratelimited(hr_dev->dev,
|
||||
"Cmdq IO error, opcode = 0x%x, return = 0x%x.\n",
|
||||
desc->opcode, desc_ret);
|
||||
ret = hns_roce_cmd_err_convert_errno(desc_ret);
|
||||
}
|
||||
} else {
|
||||
@@ -1354,14 +1346,54 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
|
||||
ret = -EAGAIN;
|
||||
}
|
||||
|
||||
spin_unlock_bh(&csq->lock);
|
||||
|
||||
if (ret)
|
||||
atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_CMDS_ERR_CNT]);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_cmq_desc *desc, int num)
|
||||
{
|
||||
struct hns_roce_v2_priv *priv = hr_dev->priv;
|
||||
struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
|
||||
u16 opcode = le16_to_cpu(desc->opcode);
|
||||
u32 tx_timeout = hns_roce_cmdq_tx_timeout(opcode, priv->cmq.tx_timeout);
|
||||
u8 try_cnt = HNS_ROCE_OPC_POST_MB_TRY_CNT;
|
||||
u32 rsv_tail;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
while (try_cnt) {
|
||||
try_cnt--;
|
||||
|
||||
spin_lock_bh(&csq->lock);
|
||||
rsv_tail = csq->head;
|
||||
ret = __hns_roce_cmq_send_one(hr_dev, desc, num, tx_timeout);
|
||||
if (opcode == HNS_ROCE_OPC_POST_MB && ret == -ETIME &&
|
||||
try_cnt) {
|
||||
spin_unlock_bh(&csq->lock);
|
||||
mdelay(HNS_ROCE_OPC_POST_MB_RETRY_GAP_MSEC);
|
||||
continue;
|
||||
}
|
||||
|
||||
for (i = 0; i < num; i++) {
|
||||
desc[i] = csq->desc[rsv_tail++];
|
||||
if (rsv_tail == csq->desc_num)
|
||||
rsv_tail = 0;
|
||||
}
|
||||
spin_unlock_bh(&csq->lock);
|
||||
break;
|
||||
}
|
||||
|
||||
if (ret)
|
||||
dev_err_ratelimited(hr_dev->dev,
|
||||
"Cmdq IO error, opcode = 0x%x, return = %d.\n",
|
||||
opcode, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_cmq_desc *desc, int num)
|
||||
{
|
||||
|
||||
@@ -230,6 +230,8 @@ enum hns_roce_opcode_type {
|
||||
};
|
||||
|
||||
#define HNS_ROCE_OPC_POST_MB_TIMEOUT 35000
|
||||
#define HNS_ROCE_OPC_POST_MB_TRY_CNT 8
|
||||
#define HNS_ROCE_OPC_POST_MB_RETRY_GAP_MSEC 5
|
||||
struct hns_roce_cmdq_tx_timeout_map {
|
||||
u16 opcode;
|
||||
u32 tx_timeout;
|
||||
|
||||
@@ -174,7 +174,7 @@ static int mana_gd_allocate_doorbell_page(struct gdma_context *gc,
|
||||
|
||||
req.resource_type = GDMA_RESOURCE_DOORBELL_PAGE;
|
||||
req.num_resources = 1;
|
||||
req.alignment = 1;
|
||||
req.alignment = PAGE_SIZE / MANA_PAGE_SIZE;
|
||||
|
||||
/* Have GDMA start searching from 0 */
|
||||
req.allocated_resources = 0;
|
||||
|
||||
@@ -67,7 +67,8 @@ static void create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah,
|
||||
ah->av.tclass = grh->traffic_class;
|
||||
}
|
||||
|
||||
ah->av.stat_rate_sl = (rdma_ah_get_static_rate(ah_attr) << 4);
|
||||
ah->av.stat_rate_sl =
|
||||
(mlx5r_ib_rate(dev, rdma_ah_get_static_rate(ah_attr)) << 4);
|
||||
|
||||
if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
|
||||
if (init_attr->xmit_slave)
|
||||
|
||||
@@ -546,6 +546,7 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
|
||||
struct ib_qp *qp)
|
||||
{
|
||||
struct mlx5_ib_dev *dev = to_mdev(qp->device);
|
||||
bool new = false;
|
||||
int err;
|
||||
|
||||
if (!counter->id) {
|
||||
@@ -560,6 +561,7 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
|
||||
return err;
|
||||
counter->id =
|
||||
MLX5_GET(alloc_q_counter_out, out, counter_set_id);
|
||||
new = true;
|
||||
}
|
||||
|
||||
err = mlx5_ib_qp_set_counter(qp, counter);
|
||||
@@ -569,8 +571,10 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
|
||||
return 0;
|
||||
|
||||
fail_set_counter:
|
||||
mlx5_ib_counter_dealloc(counter);
|
||||
counter->id = 0;
|
||||
if (new) {
|
||||
mlx5_ib_counter_dealloc(counter);
|
||||
counter->id = 0;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user