2
0
mirror of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2025-09-04 20:19:47 +08:00

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Cross-merge networking fixes after downstream PR (net-6.16-rc6).

No conflicts.

Adjacent changes:

Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml
  0a12c435a1 ("dt-bindings: net: sun8i-emac: Add A100 EMAC compatible")
  b3603c0466 ("dt-bindings: net: sun8i-emac: Rename A523 EMAC0 to GMAC0")

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2025-07-10 10:08:47 -07:00
commit 3321e97eab
340 changed files with 3349 additions and 1829 deletions

View File

@ -56,7 +56,7 @@ Date: January 2009
Contact: Rafael J. Wysocki <rjw@rjwysocki.net> Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
Description: Description:
The /sys/devices/.../async attribute allows the user space to The /sys/devices/.../async attribute allows the user space to
enable or diasble the device's suspend and resume callbacks to enable or disable the device's suspend and resume callbacks to
be executed asynchronously (ie. in separate threads, in parallel be executed asynchronously (ie. in separate threads, in parallel
with the main suspend/resume thread) during system-wide power with the main suspend/resume thread) during system-wide power
transitions (eg. suspend to RAM, hibernation). transitions (eg. suspend to RAM, hibernation).

View File

@ -584,6 +584,7 @@ What: /sys/devices/system/cpu/vulnerabilities
/sys/devices/system/cpu/vulnerabilities/spectre_v1 /sys/devices/system/cpu/vulnerabilities/spectre_v1
/sys/devices/system/cpu/vulnerabilities/spectre_v2 /sys/devices/system/cpu/vulnerabilities/spectre_v2
/sys/devices/system/cpu/vulnerabilities/srbds /sys/devices/system/cpu/vulnerabilities/srbds
/sys/devices/system/cpu/vulnerabilities/tsa
/sys/devices/system/cpu/vulnerabilities/tsx_async_abort /sys/devices/system/cpu/vulnerabilities/tsx_async_abort
Date: January 2018 Date: January 2018
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org> Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>

View File

@ -711,7 +711,7 @@ Description: This file shows the thin provisioning type. This is one of
The file is read only. The file is read only.
What: /sys/class/scsi_device/*/device/unit_descriptor/physical_memory_resourse_count What: /sys/class/scsi_device/*/device/unit_descriptor/physical_memory_resource_count
Date: February 2018 Date: February 2018
Contact: Stanislav Nijnikov <stanislav.nijnikov@wdc.com> Contact: Stanislav Nijnikov <stanislav.nijnikov@wdc.com>
Description: This file shows the total physical memory resources. This is Description: This file shows the total physical memory resources. This is

View File

@ -157,9 +157,7 @@ This is achieved by using the otherwise unused and obsolete VERW instruction in
combination with a microcode update. The microcode clears the affected CPU combination with a microcode update. The microcode clears the affected CPU
buffers when the VERW instruction is executed. buffers when the VERW instruction is executed.
Kernel reuses the MDS function to invoke the buffer clearing: Kernel does the buffer clearing with x86_clear_cpu_buffers().
mds_clear_cpu_buffers()
On MDS affected CPUs, the kernel already invokes CPU buffer clear on On MDS affected CPUs, the kernel already invokes CPU buffer clear on
kernel/userspace, hypervisor/guest and C-state (idle) transitions. No kernel/userspace, hypervisor/guest and C-state (idle) transitions. No

View File

@ -7488,6 +7488,19 @@
having this key zero'ed is acceptable. E.g. in testing having this key zero'ed is acceptable. E.g. in testing
scenarios. scenarios.
tsa= [X86] Control mitigation for Transient Scheduler
Attacks on AMD CPUs. Search the following in your
favourite search engine for more details:
"Technical guidance for mitigating transient scheduler
attacks".
off - disable the mitigation
on - enable the mitigation (default)
user - mitigate only user/kernel transitions
vm - mitigate only guest/host transitions
tsc= Disable clocksource stability checks for TSC. tsc= Disable clocksource stability checks for TSC.
Format: <string> Format: <string>
[x86] reliable: mark tsc clocksource as reliable, this [x86] reliable: mark tsc clocksource as reliable, this

View File

@ -93,7 +93,7 @@ enters a C-state.
The kernel provides a function to invoke the buffer clearing: The kernel provides a function to invoke the buffer clearing:
mds_clear_cpu_buffers() x86_clear_cpu_buffers()
Also macro CLEAR_CPU_BUFFERS can be used in ASM late in exit-to-user path. Also macro CLEAR_CPU_BUFFERS can be used in ASM late in exit-to-user path.
Other than CFLAGS.ZF, this macro doesn't clobber any registers. Other than CFLAGS.ZF, this macro doesn't clobber any registers.
@ -185,9 +185,9 @@ Mitigation points
idle clearing would be a window dressing exercise and is therefore not idle clearing would be a window dressing exercise and is therefore not
activated. activated.
The invocation is controlled by the static key mds_idle_clear which is The invocation is controlled by the static key cpu_buf_idle_clear which is
switched depending on the chosen mitigation mode and the SMT state of switched depending on the chosen mitigation mode and the SMT state of the
the system. system.
The buffer clear is only invoked before entering the C-State to prevent The buffer clear is only invoked before entering the C-State to prevent
that stale data from the idling CPU from spilling to the Hyper-Thread that stale data from the idling CPU from spilling to the Hyper-Thread

View File

@ -26,7 +26,8 @@ properties:
- const: realtek,rtl9301-i2c - const: realtek,rtl9301-i2c
reg: reg:
description: Register offset and size this I2C controller. items:
- description: Register offset and size this I2C controller.
"#address-cells": "#address-cells":
const: 1 const: 1

View File

@ -4,14 +4,14 @@
$id: http://devicetree.org/schemas/input/elan,ekth6915.yaml# $id: http://devicetree.org/schemas/input/elan,ekth6915.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Elan eKTH6915 touchscreen controller title: Elan I2C-HID touchscreen controllers
maintainers: maintainers:
- Douglas Anderson <dianders@chromium.org> - Douglas Anderson <dianders@chromium.org>
description: description:
Supports the Elan eKTH6915 touchscreen controller. Supports the Elan eKTH6915 and other I2C-HID touchscreen controllers.
This touchscreen controller uses the i2c-hid protocol with a reset GPIO. These touchscreen controller use the i2c-hid protocol with a reset GPIO.
allOf: allOf:
- $ref: /schemas/input/touchscreen/touchscreen.yaml# - $ref: /schemas/input/touchscreen/touchscreen.yaml#
@ -23,12 +23,14 @@ properties:
- enum: - enum:
- elan,ekth5015m - elan,ekth5015m
- const: elan,ekth6915 - const: elan,ekth6915
- items:
- const: elan,ekth8d18
- const: elan,ekth6a12nay
- enum: - enum:
- elan,ekth6915 - elan,ekth6915
- elan,ekth6a12nay - elan,ekth6a12nay
reg: reg: true
const: 0x10
interrupts: interrupts:
maxItems: 1 maxItems: 1

View File

@ -24,7 +24,7 @@ properties:
- allwinner,sun50i-a100-emac - allwinner,sun50i-a100-emac
- allwinner,sun50i-h6-emac - allwinner,sun50i-h6-emac
- allwinner,sun50i-h616-emac0 - allwinner,sun50i-h616-emac0
- allwinner,sun55i-a523-emac0 - allwinner,sun55i-a523-gmac0
- const: allwinner,sun50i-a64-emac - const: allwinner,sun50i-a64-emac
reg: reg:

View File

@ -7196,6 +7196,10 @@ The valid value for 'flags' is:
u64 leaf; u64 leaf;
u64 r11, r12, r13, r14; u64 r11, r12, r13, r14;
} get_tdvmcall_info; } get_tdvmcall_info;
struct {
u64 ret;
u64 vector;
} setup_event_notify;
}; };
} tdx; } tdx;
@ -7210,21 +7214,24 @@ number from register R11. The remaining field of the union provide the
inputs and outputs of the TDVMCALL. Currently the following values of inputs and outputs of the TDVMCALL. Currently the following values of
``nr`` are defined: ``nr`` are defined:
* ``TDVMCALL_GET_QUOTE``: the guest has requested to generate a TD-Quote * ``TDVMCALL_GET_QUOTE``: the guest has requested to generate a TD-Quote
signed by a service hosting TD-Quoting Enclave operating on the host. signed by a service hosting TD-Quoting Enclave operating on the host.
Parameters and return value are in the ``get_quote`` field of the union. Parameters and return value are in the ``get_quote`` field of the union.
The ``gpa`` field and ``size`` specify the guest physical address The ``gpa`` field and ``size`` specify the guest physical address
(without the shared bit set) and the size of a shared-memory buffer, in (without the shared bit set) and the size of a shared-memory buffer, in
which the TDX guest passes a TD Report. The ``ret`` field represents which the TDX guest passes a TD Report. The ``ret`` field represents
the return value of the GetQuote request. When the request has been the return value of the GetQuote request. When the request has been
queued successfully, the TDX guest can poll the status field in the queued successfully, the TDX guest can poll the status field in the
shared-memory area to check whether the Quote generation is completed or shared-memory area to check whether the Quote generation is completed or
not. When completed, the generated Quote is returned via the same buffer. not. When completed, the generated Quote is returned via the same buffer.
* ``TDVMCALL_GET_TD_VM_CALL_INFO``: the guest has requested the support * ``TDVMCALL_GET_TD_VM_CALL_INFO``: the guest has requested the support
status of TDVMCALLs. The output values for the given leaf should be status of TDVMCALLs. The output values for the given leaf should be
placed in fields from ``r11`` to ``r14`` of the ``get_tdvmcall_info`` placed in fields from ``r11`` to ``r14`` of the ``get_tdvmcall_info``
field of the union. field of the union.
* ``TDVMCALL_SETUP_EVENT_NOTIFY_INTERRUPT``: the guest has requested to
set up a notification interrupt for vector ``vector``.
KVM may add support for more values in the future that may cause a userspace KVM may add support for more values in the future that may cause a userspace
exit, even without calls to ``KVM_ENABLE_CAP`` or similar. In this case, exit, even without calls to ``KVM_ENABLE_CAP`` or similar. In this case,

View File

@ -79,7 +79,20 @@ to be configured to the TDX guest.
struct kvm_tdx_capabilities { struct kvm_tdx_capabilities {
__u64 supported_attrs; __u64 supported_attrs;
__u64 supported_xfam; __u64 supported_xfam;
__u64 reserved[254];
/* TDG.VP.VMCALL hypercalls executed in kernel and forwarded to
* userspace, respectively
*/
__u64 kernel_tdvmcallinfo_1_r11;
__u64 user_tdvmcallinfo_1_r11;
/* TDG.VP.VMCALL instruction executions subfunctions executed in kernel
* and forwarded to userspace, respectively
*/
__u64 kernel_tdvmcallinfo_1_r12;
__u64 user_tdvmcallinfo_1_r12;
__u64 reserved[250];
/* Configurable CPUID bits for userspace */ /* Configurable CPUID bits for userspace */
struct kvm_cpuid2 cpuid; struct kvm_cpuid2 cpuid;

View File

@ -36,7 +36,7 @@ Offset Size (in bytes) Content
The WMI object flags control whether the method or notification ID is used: The WMI object flags control whether the method or notification ID is used:
- 0x1: Data block usage is expensive and must be explicitly enabled/disabled. - 0x1: Data block is expensive to collect.
- 0x2: Data block contains WMI methods. - 0x2: Data block contains WMI methods.
- 0x4: Data block contains ASCIZ string. - 0x4: Data block contains ASCIZ string.
- 0x8: Data block describes a WMI event, use notification ID instead - 0x8: Data block describes a WMI event, use notification ID instead
@ -83,14 +83,18 @@ event as hexadecimal value. Their first parameter is an integer with a value
of 0 if the WMI event should be disabled, other values will enable of 0 if the WMI event should be disabled, other values will enable
the WMI event. the WMI event.
Those ACPI methods are always called even for WMI events not registered as
being expensive to collect to match the behavior of the Windows driver.
WCxx ACPI methods WCxx ACPI methods
----------------- -----------------
Similar to the ``WExx`` ACPI methods, except that it controls data collection Similar to the ``WExx`` ACPI methods, except that instead of WMI events it controls
instead of events and thus the last two characters of the ACPI method name are data collection of data blocks registered as being expensive to collect. Thus the
the method ID of the data block to enable/disable. last two characters of the ACPI method name are the method ID of the data block
to enable/disable.
Those ACPI methods are also called before setting data blocks to match the Those ACPI methods are also called before setting data blocks to match the
behaviour of the Windows driver. behavior of the Windows driver.
_WED ACPI method _WED ACPI method
---------------- ----------------

View File

@ -4186,6 +4186,7 @@ F: include/linux/cpumask_types.h
F: include/linux/find.h F: include/linux/find.h
F: include/linux/nodemask.h F: include/linux/nodemask.h
F: include/linux/nodemask_types.h F: include/linux/nodemask_types.h
F: include/uapi/linux/bits.h
F: include/vdso/bits.h F: include/vdso/bits.h
F: lib/bitmap-str.c F: lib/bitmap-str.c
F: lib/bitmap.c F: lib/bitmap.c
@ -4198,6 +4199,7 @@ F: tools/include/linux/bitfield.h
F: tools/include/linux/bitmap.h F: tools/include/linux/bitmap.h
F: tools/include/linux/bits.h F: tools/include/linux/bits.h
F: tools/include/linux/find.h F: tools/include/linux/find.h
F: tools/include/uapi/linux/bits.h
F: tools/include/vdso/bits.h F: tools/include/vdso/bits.h
F: tools/lib/bitmap.c F: tools/lib/bitmap.c
F: tools/lib/find_bit.c F: tools/lib/find_bit.c
@ -16843,8 +16845,8 @@ F: include/dt-bindings/clock/mobileye,eyeq5-clk.h
MODULE SUPPORT MODULE SUPPORT
M: Luis Chamberlain <mcgrof@kernel.org> M: Luis Chamberlain <mcgrof@kernel.org>
M: Petr Pavlu <petr.pavlu@suse.com> M: Petr Pavlu <petr.pavlu@suse.com>
M: Daniel Gomez <da.gomez@kernel.org>
R: Sami Tolvanen <samitolvanen@google.com> R: Sami Tolvanen <samitolvanen@google.com>
R: Daniel Gomez <da.gomez@samsung.com>
L: linux-modules@vger.kernel.org L: linux-modules@vger.kernel.org
L: linux-kernel@vger.kernel.org L: linux-kernel@vger.kernel.org
S: Maintained S: Maintained
@ -17243,10 +17245,10 @@ F: drivers/rtc/rtc-ntxec.c
F: include/linux/mfd/ntxec.h F: include/linux/mfd/ntxec.h
NETRONOME ETHERNET DRIVERS NETRONOME ETHERNET DRIVERS
M: Louis Peens <louis.peens@corigine.com>
R: Jakub Kicinski <kuba@kernel.org> R: Jakub Kicinski <kuba@kernel.org>
R: Simon Horman <horms@kernel.org>
L: oss-drivers@corigine.com L: oss-drivers@corigine.com
S: Maintained S: Odd Fixes
F: drivers/net/ethernet/netronome/ F: drivers/net/ethernet/netronome/
NETWORK BLOCK DEVICE (NBD) NETWORK BLOCK DEVICE (NBD)
@ -19622,8 +19624,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/pinctrl/intel.git
F: drivers/pinctrl/intel/ F: drivers/pinctrl/intel/
PIN CONTROLLER - KEEMBAY PIN CONTROLLER - KEEMBAY
M: Lakshmi Sowjanya D <lakshmi.sowjanya.d@intel.com> S: Orphan
S: Supported
F: drivers/pinctrl/pinctrl-keembay* F: drivers/pinctrl/pinctrl-keembay*
PIN CONTROLLER - MEDIATEK PIN CONTROLLER - MEDIATEK
@ -20176,21 +20177,15 @@ S: Supported
F: Documentation/devicetree/bindings/soc/qcom/qcom,apr* F: Documentation/devicetree/bindings/soc/qcom/qcom,apr*
F: Documentation/devicetree/bindings/sound/qcom,* F: Documentation/devicetree/bindings/sound/qcom,*
F: drivers/soc/qcom/apr.c F: drivers/soc/qcom/apr.c
F: include/dt-bindings/sound/qcom,wcd9335.h F: drivers/soundwire/qcom.c
F: include/dt-bindings/sound/qcom,wcd934x.h F: include/dt-bindings/sound/qcom,wcd93*
F: sound/soc/codecs/lpass-rx-macro.* F: sound/soc/codecs/lpass-*.*
F: sound/soc/codecs/lpass-tx-macro.*
F: sound/soc/codecs/lpass-va-macro.c
F: sound/soc/codecs/lpass-wsa-macro.*
F: sound/soc/codecs/msm8916-wcd-analog.c F: sound/soc/codecs/msm8916-wcd-analog.c
F: sound/soc/codecs/msm8916-wcd-digital.c F: sound/soc/codecs/msm8916-wcd-digital.c
F: sound/soc/codecs/wcd-clsh-v2.* F: sound/soc/codecs/wcd-clsh-v2.*
F: sound/soc/codecs/wcd-mbhc-v2.* F: sound/soc/codecs/wcd-mbhc-v2.*
F: sound/soc/codecs/wcd9335.* F: sound/soc/codecs/wcd93*.*
F: sound/soc/codecs/wcd934x.c F: sound/soc/codecs/wsa88*.*
F: sound/soc/codecs/wsa881x.c
F: sound/soc/codecs/wsa883x.c
F: sound/soc/codecs/wsa884x.c
F: sound/soc/qcom/ F: sound/soc/qcom/
QCOM EMBEDDED USB DEBUGGER (EUD) QCOM EMBEDDED USB DEBUGGER (EUD)

View File

@ -2,7 +2,7 @@
VERSION = 6 VERSION = 6
PATCHLEVEL = 16 PATCHLEVEL = 16
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc4 EXTRAVERSION = -rc5
NAME = Baby Opossum Posse NAME = Baby Opossum Posse
# *DOCUMENTATION* # *DOCUMENTATION*

View File

@ -256,6 +256,7 @@ config ARM64
select HOTPLUG_SMT if HOTPLUG_CPU select HOTPLUG_SMT if HOTPLUG_CPU
select IRQ_DOMAIN select IRQ_DOMAIN
select IRQ_FORCED_THREADING select IRQ_FORCED_THREADING
select JUMP_LABEL
select KASAN_VMALLOC if KASAN select KASAN_VMALLOC if KASAN
select LOCK_MM_AND_FIND_VMA select LOCK_MM_AND_FIND_VMA
select MODULES_USE_ELF_RELA select MODULES_USE_ELF_RELA

View File

@ -20,8 +20,6 @@
compatible = "jedec,spi-nor"; compatible = "jedec,spi-nor";
reg = <0x0>; reg = <0x0>;
spi-max-frequency = <25000000>; spi-max-frequency = <25000000>;
#address-cells = <1>;
#size-cells = <1>;
partitions { partitions {
compatible = "fixed-partitions"; compatible = "fixed-partitions";

View File

@ -100,6 +100,8 @@
&displaydfr_mipi { &displaydfr_mipi {
status = "okay"; status = "okay";
#address-cells = <1>;
#size-cells = <0>;
dfr_panel: panel@0 { dfr_panel: panel@0 {
compatible = "apple,j293-summit", "apple,summit"; compatible = "apple,j293-summit", "apple,summit";

View File

@ -71,7 +71,7 @@
*/ */
&port00 { &port00 {
bus-range = <1 1>; bus-range = <1 1>;
wifi0: network@0,0 { wifi0: wifi@0,0 {
compatible = "pci14e4,4425"; compatible = "pci14e4,4425";
reg = <0x10000 0x0 0x0 0x0 0x0>; reg = <0x10000 0x0 0x0 0x0 0x0>;
/* To be filled by the loader */ /* To be filled by the loader */

View File

@ -405,8 +405,6 @@
compatible = "apple,t8103-display-pipe-mipi", "apple,h7-display-pipe-mipi"; compatible = "apple,t8103-display-pipe-mipi", "apple,h7-display-pipe-mipi";
reg = <0x2 0x28600000 0x0 0x100000>; reg = <0x2 0x28600000 0x0 0x100000>;
power-domains = <&ps_mipi_dsi>; power-domains = <&ps_mipi_dsi>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled"; status = "disabled";
ports { ports {

View File

@ -63,6 +63,8 @@
&displaydfr_mipi { &displaydfr_mipi {
status = "okay"; status = "okay";
#address-cells = <1>;
#size-cells = <0>;
dfr_panel: panel@0 { dfr_panel: panel@0 {
compatible = "apple,j493-summit", "apple,summit"; compatible = "apple,j493-summit", "apple,summit";

View File

@ -420,8 +420,6 @@
compatible = "apple,t8112-display-pipe-mipi", "apple,h7-display-pipe-mipi"; compatible = "apple,t8112-display-pipe-mipi", "apple,h7-display-pipe-mipi";
reg = <0x2 0x28600000 0x0 0x100000>; reg = <0x2 0x28600000 0x0 0x100000>;
power-domains = <&ps_mipi_dsi>; power-domains = <&ps_mipi_dsi>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled"; status = "disabled";
ports { ports {

View File

@ -1573,6 +1573,7 @@ CONFIG_RESET_QCOM_AOSS=y
CONFIG_RESET_QCOM_PDC=m CONFIG_RESET_QCOM_PDC=m
CONFIG_RESET_RZG2L_USBPHY_CTRL=y CONFIG_RESET_RZG2L_USBPHY_CTRL=y
CONFIG_RESET_TI_SCI=y CONFIG_RESET_TI_SCI=y
CONFIG_PHY_SNPS_EUSB2=m
CONFIG_PHY_XGENE=y CONFIG_PHY_XGENE=y
CONFIG_PHY_CAN_TRANSCEIVER=m CONFIG_PHY_CAN_TRANSCEIVER=m
CONFIG_PHY_NXP_PTN3222=m CONFIG_PHY_NXP_PTN3222=m
@ -1597,7 +1598,6 @@ CONFIG_PHY_QCOM_EDP=m
CONFIG_PHY_QCOM_PCIE2=m CONFIG_PHY_QCOM_PCIE2=m
CONFIG_PHY_QCOM_QMP=m CONFIG_PHY_QCOM_QMP=m
CONFIG_PHY_QCOM_QUSB2=m CONFIG_PHY_QCOM_QUSB2=m
CONFIG_PHY_QCOM_SNPS_EUSB2=m
CONFIG_PHY_QCOM_EUSB2_REPEATER=m CONFIG_PHY_QCOM_EUSB2_REPEATER=m
CONFIG_PHY_QCOM_M31_USB=m CONFIG_PHY_QCOM_M31_USB=m
CONFIG_PHY_QCOM_USB_HS=m CONFIG_PHY_QCOM_USB_HS=m

View File

@ -287,17 +287,6 @@
.Lskip_fgt2_\@: .Lskip_fgt2_\@:
.endm .endm
.macro __init_el2_gcs
mrs_s x1, SYS_ID_AA64PFR1_EL1
ubfx x1, x1, #ID_AA64PFR1_EL1_GCS_SHIFT, #4
cbz x1, .Lskip_gcs_\@
/* Ensure GCS is not enabled when we start trying to do BLs */
msr_s SYS_GCSCR_EL1, xzr
msr_s SYS_GCSCRE0_EL1, xzr
.Lskip_gcs_\@:
.endm
/** /**
* Initialize EL2 registers to sane values. This should be called early on all * Initialize EL2 registers to sane values. This should be called early on all
* cores that were booted in EL2. Note that everything gets initialised as * cores that were booted in EL2. Note that everything gets initialised as
@ -319,7 +308,6 @@
__init_el2_cptr __init_el2_cptr
__init_el2_fgt __init_el2_fgt
__init_el2_fgt2 __init_el2_fgt2
__init_el2_gcs
.endm .endm
#ifndef __KVM_NVHE_HYPERVISOR__ #ifndef __KVM_NVHE_HYPERVISOR__
@ -371,6 +359,13 @@
msr_s SYS_MPAMHCR_EL2, xzr // clear TRAP_MPAMIDR_EL1 -> EL2 msr_s SYS_MPAMHCR_EL2, xzr // clear TRAP_MPAMIDR_EL1 -> EL2
.Lskip_mpam_\@: .Lskip_mpam_\@:
check_override id_aa64pfr1, ID_AA64PFR1_EL1_GCS_SHIFT, .Linit_gcs_\@, .Lskip_gcs_\@, x1, x2
.Linit_gcs_\@:
msr_s SYS_GCSCR_EL1, xzr
msr_s SYS_GCSCRE0_EL1, xzr
.Lskip_gcs_\@:
check_override id_aa64pfr0, ID_AA64PFR0_EL1_SVE_SHIFT, .Linit_sve_\@, .Lskip_sve_\@, x1, x2 check_override id_aa64pfr0, ID_AA64PFR0_EL1_SVE_SHIFT, .Linit_sve_\@, .Lskip_sve_\@, x1, x2
.Linit_sve_\@: /* SVE register access */ .Linit_sve_\@: /* SVE register access */

View File

@ -1480,7 +1480,6 @@ int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm,
struct reg_mask_range *range); struct reg_mask_range *range);
/* Guest/host FPSIMD coordination helpers */ /* Guest/host FPSIMD coordination helpers */
int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);

View File

@ -34,7 +34,7 @@ obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
cpufeature.o alternative.o cacheinfo.o \ cpufeature.o alternative.o cacheinfo.o \
smp.o smp_spin_table.o topology.o smccc-call.o \ smp.o smp_spin_table.o topology.o smccc-call.o \
syscall.o proton-pack.o idle.o patching.o pi/ \ syscall.o proton-pack.o idle.o patching.o pi/ \
rsi.o rsi.o jump_label.o
obj-$(CONFIG_COMPAT) += sys32.o signal32.o \ obj-$(CONFIG_COMPAT) += sys32.o signal32.o \
sys_compat.o sys_compat.o
@ -47,7 +47,6 @@ obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
obj-$(CONFIG_HARDLOCKUP_DETECTOR_PERF) += watchdog_hld.o obj-$(CONFIG_HARDLOCKUP_DETECTOR_PERF) += watchdog_hld.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_CPU_PM) += sleep.o suspend.o obj-$(CONFIG_CPU_PM) += sleep.o suspend.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
obj-$(CONFIG_KGDB) += kgdb.o obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_EFI) += efi.o efi-rt-wrapper.o obj-$(CONFIG_EFI) += efi.o efi-rt-wrapper.o
obj-$(CONFIG_PCI) += pci.o obj-$(CONFIG_PCI) += pci.o

View File

@ -3135,6 +3135,13 @@ static bool has_sve_feature(const struct arm64_cpu_capabilities *cap, int scope)
} }
#endif #endif
#ifdef CONFIG_ARM64_SME
static bool has_sme_feature(const struct arm64_cpu_capabilities *cap, int scope)
{
return system_supports_sme() && has_user_cpuid_feature(cap, scope);
}
#endif
static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(ID_AA64ISAR0_EL1, AES, PMULL, CAP_HWCAP, KERNEL_HWCAP_PMULL), HWCAP_CAP(ID_AA64ISAR0_EL1, AES, PMULL, CAP_HWCAP, KERNEL_HWCAP_PMULL),
HWCAP_CAP(ID_AA64ISAR0_EL1, AES, AES, CAP_HWCAP, KERNEL_HWCAP_AES), HWCAP_CAP(ID_AA64ISAR0_EL1, AES, AES, CAP_HWCAP, KERNEL_HWCAP_AES),
@ -3223,31 +3230,31 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(ID_AA64ISAR2_EL1, BC, IMP, CAP_HWCAP, KERNEL_HWCAP_HBC), HWCAP_CAP(ID_AA64ISAR2_EL1, BC, IMP, CAP_HWCAP, KERNEL_HWCAP_HBC),
#ifdef CONFIG_ARM64_SME #ifdef CONFIG_ARM64_SME
HWCAP_CAP(ID_AA64PFR1_EL1, SME, IMP, CAP_HWCAP, KERNEL_HWCAP_SME), HWCAP_CAP(ID_AA64PFR1_EL1, SME, IMP, CAP_HWCAP, KERNEL_HWCAP_SME),
HWCAP_CAP(ID_AA64SMFR0_EL1, FA64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, FA64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64),
HWCAP_CAP(ID_AA64SMFR0_EL1, LUTv2, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_LUTV2), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, LUTv2, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_LUTV2),
HWCAP_CAP(ID_AA64SMFR0_EL1, SMEver, SME2p2, CAP_HWCAP, KERNEL_HWCAP_SME2P2), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SMEver, SME2p2, CAP_HWCAP, KERNEL_HWCAP_SME2P2),
HWCAP_CAP(ID_AA64SMFR0_EL1, SMEver, SME2p1, CAP_HWCAP, KERNEL_HWCAP_SME2P1), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SMEver, SME2p1, CAP_HWCAP, KERNEL_HWCAP_SME2P1),
HWCAP_CAP(ID_AA64SMFR0_EL1, SMEver, SME2, CAP_HWCAP, KERNEL_HWCAP_SME2), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SMEver, SME2, CAP_HWCAP, KERNEL_HWCAP_SME2),
HWCAP_CAP(ID_AA64SMFR0_EL1, I16I64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, I16I64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64),
HWCAP_CAP(ID_AA64SMFR0_EL1, F64F64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F64F64), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F64F64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F64F64),
HWCAP_CAP(ID_AA64SMFR0_EL1, I16I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I32), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, I16I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I32),
HWCAP_CAP(ID_AA64SMFR0_EL1, B16B16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16B16), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, B16B16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16B16),
HWCAP_CAP(ID_AA64SMFR0_EL1, F16F16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F16), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F16F16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F16),
HWCAP_CAP(ID_AA64SMFR0_EL1, F8F16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F8F16), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F8F16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F8F16),
HWCAP_CAP(ID_AA64SMFR0_EL1, F8F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F8F32), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F8F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F8F32),
HWCAP_CAP(ID_AA64SMFR0_EL1, I8I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I8I32), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, I8I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I8I32),
HWCAP_CAP(ID_AA64SMFR0_EL1, F16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F32), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F32),
HWCAP_CAP(ID_AA64SMFR0_EL1, B16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16F32), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, B16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16F32),
HWCAP_CAP(ID_AA64SMFR0_EL1, BI32I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_BI32I32), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, BI32I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_BI32I32),
HWCAP_CAP(ID_AA64SMFR0_EL1, F32F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F32F32), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F32F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F32F32),
HWCAP_CAP(ID_AA64SMFR0_EL1, SF8FMA, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8FMA), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SF8FMA, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8FMA),
HWCAP_CAP(ID_AA64SMFR0_EL1, SF8DP4, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8DP4), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SF8DP4, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8DP4),
HWCAP_CAP(ID_AA64SMFR0_EL1, SF8DP2, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8DP2), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SF8DP2, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8DP2),
HWCAP_CAP(ID_AA64SMFR0_EL1, SBitPerm, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SBITPERM), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SBitPerm, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SBITPERM),
HWCAP_CAP(ID_AA64SMFR0_EL1, AES, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_AES), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, AES, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_AES),
HWCAP_CAP(ID_AA64SMFR0_EL1, SFEXPA, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SFEXPA), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SFEXPA, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SFEXPA),
HWCAP_CAP(ID_AA64SMFR0_EL1, STMOP, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_STMOP), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, STMOP, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_STMOP),
HWCAP_CAP(ID_AA64SMFR0_EL1, SMOP4, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SMOP4), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SMOP4, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SMOP4),
#endif /* CONFIG_ARM64_SME */ #endif /* CONFIG_ARM64_SME */
HWCAP_CAP(ID_AA64FPFR0_EL1, F8CVT, IMP, CAP_HWCAP, KERNEL_HWCAP_F8CVT), HWCAP_CAP(ID_AA64FPFR0_EL1, F8CVT, IMP, CAP_HWCAP, KERNEL_HWCAP_F8CVT),
HWCAP_CAP(ID_AA64FPFR0_EL1, F8FMA, IMP, CAP_HWCAP, KERNEL_HWCAP_F8FMA), HWCAP_CAP(ID_AA64FPFR0_EL1, F8FMA, IMP, CAP_HWCAP, KERNEL_HWCAP_F8FMA),

View File

@ -15,6 +15,7 @@
#include <asm/efi.h> #include <asm/efi.h>
#include <asm/stacktrace.h> #include <asm/stacktrace.h>
#include <asm/vmap_stack.h>
static bool region_is_misaligned(const efi_memory_desc_t *md) static bool region_is_misaligned(const efi_memory_desc_t *md)
{ {
@ -214,9 +215,13 @@ static int __init arm64_efi_rt_init(void)
if (!efi_enabled(EFI_RUNTIME_SERVICES)) if (!efi_enabled(EFI_RUNTIME_SERVICES))
return 0; return 0;
p = __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, GFP_KERNEL, if (!IS_ENABLED(CONFIG_VMAP_STACK)) {
NUMA_NO_NODE, &&l); clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
l: if (!p) { return -ENOMEM;
}
p = arch_alloc_vmap_stack(THREAD_SIZE, NUMA_NO_NODE);
if (!p) {
pr_warn("Failed to allocate EFI runtime stack\n"); pr_warn("Failed to allocate EFI runtime stack\n");
clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
return -ENOMEM; return -ENOMEM;

View File

@ -673,6 +673,11 @@ static void permission_overlay_switch(struct task_struct *next)
current->thread.por_el0 = read_sysreg_s(SYS_POR_EL0); current->thread.por_el0 = read_sysreg_s(SYS_POR_EL0);
if (current->thread.por_el0 != next->thread.por_el0) { if (current->thread.por_el0 != next->thread.por_el0) {
write_sysreg_s(next->thread.por_el0, SYS_POR_EL0); write_sysreg_s(next->thread.por_el0, SYS_POR_EL0);
/*
* No ISB required as we can tolerate spurious Overlay faults -
* the fault handler will check again based on the new value
* of POR_EL0.
*/
} }
} }

View File

@ -1143,7 +1143,7 @@ static inline unsigned int num_other_online_cpus(void)
void smp_send_stop(void) void smp_send_stop(void)
{ {
static unsigned long stop_in_progress; static unsigned long stop_in_progress;
cpumask_t mask; static cpumask_t mask;
unsigned long timeout; unsigned long timeout;
/* /*

View File

@ -825,10 +825,6 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
if (!kvm_arm_vcpu_is_finalized(vcpu)) if (!kvm_arm_vcpu_is_finalized(vcpu))
return -EPERM; return -EPERM;
ret = kvm_arch_vcpu_run_map_fp(vcpu);
if (ret)
return ret;
if (likely(vcpu_has_run_once(vcpu))) if (likely(vcpu_has_run_once(vcpu)))
return 0; return 0;
@ -2129,7 +2125,7 @@ static void cpu_hyp_init(void *discard)
static void cpu_hyp_uninit(void *discard) static void cpu_hyp_uninit(void *discard)
{ {
if (__this_cpu_read(kvm_hyp_initialized)) { if (!is_protected_kvm_enabled() && __this_cpu_read(kvm_hyp_initialized)) {
cpu_hyp_reset(); cpu_hyp_reset();
__this_cpu_write(kvm_hyp_initialized, 0); __this_cpu_write(kvm_hyp_initialized, 0);
} }
@ -2345,8 +2341,13 @@ static void __init teardown_hyp_mode(void)
free_hyp_pgds(); free_hyp_pgds();
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
if (per_cpu(kvm_hyp_initialized, cpu))
continue;
free_pages(per_cpu(kvm_arm_hyp_stack_base, cpu), NVHE_STACK_SHIFT - PAGE_SHIFT); free_pages(per_cpu(kvm_arm_hyp_stack_base, cpu), NVHE_STACK_SHIFT - PAGE_SHIFT);
free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order());
if (!kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu])
continue;
if (free_sve) { if (free_sve) {
struct cpu_sve_state *sve_state; struct cpu_sve_state *sve_state;
@ -2354,6 +2355,9 @@ static void __init teardown_hyp_mode(void)
sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state; sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state;
free_pages((unsigned long) sve_state, pkvm_host_sve_state_order()); free_pages((unsigned long) sve_state, pkvm_host_sve_state_order());
} }
free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order());
} }
} }

View File

@ -14,32 +14,6 @@
#include <asm/kvm_mmu.h> #include <asm/kvm_mmu.h>
#include <asm/sysreg.h> #include <asm/sysreg.h>
/*
* Called on entry to KVM_RUN unless this vcpu previously ran at least
* once and the most recent prior KVM_RUN for this vcpu was called from
* the same task as current (highly likely).
*
* This is guaranteed to execute before kvm_arch_vcpu_load_fp(vcpu),
* such that on entering hyp the relevant parts of current are already
* mapped.
*/
int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu)
{
struct user_fpsimd_state *fpsimd = &current->thread.uw.fpsimd_state;
int ret;
/* pKVM has its own tracking of the host fpsimd state. */
if (is_protected_kvm_enabled())
return 0;
/* Make sure the host task fpsimd state is visible to hyp: */
ret = kvm_share_hyp(fpsimd, fpsimd + 1);
if (ret)
return ret;
return 0;
}
/* /*
* Prepare vcpu for saving the host's FPSIMD state and loading the guest's. * Prepare vcpu for saving the host's FPSIMD state and loading the guest's.
* The actual loading is done by the FPSIMD access trap taken to hyp. * The actual loading is done by the FPSIMD access trap taken to hyp.

View File

@ -479,6 +479,7 @@ static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range)
{ {
struct kvm_mem_range cur; struct kvm_mem_range cur;
kvm_pte_t pte; kvm_pte_t pte;
u64 granule;
s8 level; s8 level;
int ret; int ret;
@ -496,18 +497,21 @@ static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range)
return -EPERM; return -EPERM;
} }
do { for (; level <= KVM_PGTABLE_LAST_LEVEL; level++) {
u64 granule = kvm_granule_size(level); if (!kvm_level_supports_block_mapping(level))
continue;
granule = kvm_granule_size(level);
cur.start = ALIGN_DOWN(addr, granule); cur.start = ALIGN_DOWN(addr, granule);
cur.end = cur.start + granule; cur.end = cur.start + granule;
level++; if (!range_included(&cur, range))
} while ((level <= KVM_PGTABLE_LAST_LEVEL) && continue;
!(kvm_level_supports_block_mapping(level) && *range = cur;
range_included(&cur, range))); return 0;
}
*range = cur; WARN_ON(1);
return 0; return -EINVAL;
} }
int host_stage2_idmap_locked(phys_addr_t addr, u64 size, int host_stage2_idmap_locked(phys_addr_t addr, u64 size,

View File

@ -1402,6 +1402,21 @@ static void kvm_map_l1_vncr(struct kvm_vcpu *vcpu)
} }
} }
#define has_tgran_2(__r, __sz) \
({ \
u64 _s1, _s2, _mmfr0 = __r; \
\
_s2 = SYS_FIELD_GET(ID_AA64MMFR0_EL1, \
TGRAN##__sz##_2, _mmfr0); \
\
_s1 = SYS_FIELD_GET(ID_AA64MMFR0_EL1, \
TGRAN##__sz, _mmfr0); \
\
((_s2 != ID_AA64MMFR0_EL1_TGRAN##__sz##_2_NI && \
_s2 != ID_AA64MMFR0_EL1_TGRAN##__sz##_2_TGRAN##__sz) || \
(_s2 == ID_AA64MMFR0_EL1_TGRAN##__sz##_2_TGRAN##__sz && \
_s1 != ID_AA64MMFR0_EL1_TGRAN##__sz##_NI)); \
})
/* /*
* Our emulated CPU doesn't support all the possible features. For the * Our emulated CPU doesn't support all the possible features. For the
* sake of simplicity (and probably mental sanity), wipe out a number * sake of simplicity (and probably mental sanity), wipe out a number
@ -1411,6 +1426,8 @@ static void kvm_map_l1_vncr(struct kvm_vcpu *vcpu)
*/ */
u64 limit_nv_id_reg(struct kvm *kvm, u32 reg, u64 val) u64 limit_nv_id_reg(struct kvm *kvm, u32 reg, u64 val)
{ {
u64 orig_val = val;
switch (reg) { switch (reg) {
case SYS_ID_AA64ISAR0_EL1: case SYS_ID_AA64ISAR0_EL1:
/* Support everything but TME */ /* Support everything but TME */
@ -1480,13 +1497,16 @@ u64 limit_nv_id_reg(struct kvm *kvm, u32 reg, u64 val)
*/ */
switch (PAGE_SIZE) { switch (PAGE_SIZE) {
case SZ_4K: case SZ_4K:
val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN4_2, IMP); if (has_tgran_2(orig_val, 4))
val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN4_2, IMP);
fallthrough; fallthrough;
case SZ_16K: case SZ_16K:
val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN16_2, IMP); if (has_tgran_2(orig_val, 16))
val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN16_2, IMP);
fallthrough; fallthrough;
case SZ_64K: case SZ_64K:
val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN64_2, IMP); if (has_tgran_2(orig_val, 64))
val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN64_2, IMP);
break; break;
} }

View File

@ -401,9 +401,7 @@ void vgic_v3_nested_update_mi(struct kvm_vcpu *vcpu)
{ {
bool level; bool level;
level = __vcpu_sys_reg(vcpu, ICH_HCR_EL2) & ICH_HCR_EL2_En; level = (__vcpu_sys_reg(vcpu, ICH_HCR_EL2) & ICH_HCR_EL2_En) && vgic_v3_get_misr(vcpu);
if (level)
level &= vgic_v3_get_misr(vcpu);
kvm_vgic_inject_irq(vcpu->kvm, vcpu, kvm_vgic_inject_irq(vcpu->kvm, vcpu,
vcpu->kvm->arch.vgic.mi_intid, level, vcpu); vcpu->kvm->arch.vgic.mi_intid, level, vcpu);
} }

View File

@ -487,17 +487,29 @@ static void do_bad_area(unsigned long far, unsigned long esr,
} }
} }
static bool fault_from_pkey(unsigned long esr, struct vm_area_struct *vma, static bool fault_from_pkey(struct vm_area_struct *vma, unsigned int mm_flags)
unsigned int mm_flags)
{ {
unsigned long iss2 = ESR_ELx_ISS2(esr);
if (!system_supports_poe()) if (!system_supports_poe())
return false; return false;
if (esr_fsc_is_permission_fault(esr) && (iss2 & ESR_ELx_Overlay)) /*
return true; * We do not check whether an Overlay fault has occurred because we
* cannot make a decision based solely on its value:
*
* - If Overlay is set, a fault did occur due to POE, but it may be
* spurious in those cases where we update POR_EL0 without ISB (e.g.
* on context-switch). We would then need to manually check POR_EL0
* against vma_pkey(vma), which is exactly what
* arch_vma_access_permitted() does.
*
* - If Overlay is not set, we may still need to report a pkey fault.
* This is the case if an access was made within a mapping but with no
* page mapped, and POR_EL0 forbids the access (according to
* vma_pkey()). Such access will result in a SIGSEGV regardless
* because core code checks arch_vma_access_permitted(), but in order
* to report the correct error code - SEGV_PKUERR - we must handle
* that case here.
*/
return !arch_vma_access_permitted(vma, return !arch_vma_access_permitted(vma,
mm_flags & FAULT_FLAG_WRITE, mm_flags & FAULT_FLAG_WRITE,
mm_flags & FAULT_FLAG_INSTRUCTION, mm_flags & FAULT_FLAG_INSTRUCTION,
@ -635,7 +647,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
goto bad_area; goto bad_area;
} }
if (fault_from_pkey(esr, vma, mm_flags)) { if (fault_from_pkey(vma, mm_flags)) {
pkey = vma_pkey(vma); pkey = vma_pkey(vma);
vma_end_read(vma); vma_end_read(vma);
fault = 0; fault = 0;
@ -679,7 +691,7 @@ retry:
goto bad_area; goto bad_area;
} }
if (fault_from_pkey(esr, vma, mm_flags)) { if (fault_from_pkey(vma, mm_flags)) {
pkey = vma_pkey(vma); pkey = vma_pkey(vma);
mmap_read_unlock(mm); mmap_read_unlock(mm);
fault = 0; fault = 0;

View File

@ -518,7 +518,6 @@ alternative_else_nop_endif
msr REG_PIR_EL1, x0 msr REG_PIR_EL1, x0
orr tcr2, tcr2, TCR2_EL1_PIE orr tcr2, tcr2, TCR2_EL1_PIE
msr REG_TCR2_EL1, x0
.Lskip_indirection: .Lskip_indirection:

View File

@ -63,7 +63,8 @@ config RISCV
select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
select ARCH_STACKWALK select ARCH_STACKWALK
select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_CFI_CLANG # clang >= 17: https://github.com/llvm/llvm-project/commit/62fa708ceb027713b386c7e0efda994f8bdc27e2
select ARCH_SUPPORTS_CFI_CLANG if CLANG_VERSION >= 170000
select ARCH_SUPPORTS_DEBUG_PAGEALLOC if MMU select ARCH_SUPPORTS_DEBUG_PAGEALLOC if MMU
select ARCH_SUPPORTS_HUGE_PFNMAP if TRANSPARENT_HUGEPAGE select ARCH_SUPPORTS_HUGE_PFNMAP if TRANSPARENT_HUGEPAGE
select ARCH_SUPPORTS_HUGETLBFS if MMU select ARCH_SUPPORTS_HUGETLBFS if MMU

View File

@ -18,10 +18,10 @@ const struct cpu_operations cpu_ops_sbi;
/* /*
* Ordered booting via HSM brings one cpu at a time. However, cpu hotplug can * Ordered booting via HSM brings one cpu at a time. However, cpu hotplug can
* be invoked from multiple threads in parallel. Define a per cpu data * be invoked from multiple threads in parallel. Define an array of boot data
* to handle that. * to handle that.
*/ */
static DEFINE_PER_CPU(struct sbi_hart_boot_data, boot_data); static struct sbi_hart_boot_data boot_data[NR_CPUS];
static int sbi_hsm_hart_start(unsigned long hartid, unsigned long saddr, static int sbi_hsm_hart_start(unsigned long hartid, unsigned long saddr,
unsigned long priv) unsigned long priv)
@ -67,7 +67,7 @@ static int sbi_cpu_start(unsigned int cpuid, struct task_struct *tidle)
unsigned long boot_addr = __pa_symbol(secondary_start_sbi); unsigned long boot_addr = __pa_symbol(secondary_start_sbi);
unsigned long hartid = cpuid_to_hartid_map(cpuid); unsigned long hartid = cpuid_to_hartid_map(cpuid);
unsigned long hsm_data; unsigned long hsm_data;
struct sbi_hart_boot_data *bdata = &per_cpu(boot_data, cpuid); struct sbi_hart_boot_data *bdata = &boot_data[cpuid];
/* Make sure tidle is updated */ /* Make sure tidle is updated */
smp_mb(); smp_mb();

View File

@ -38,6 +38,7 @@ static int s390_sha1_init(struct shash_desc *desc)
sctx->state[4] = SHA1_H4; sctx->state[4] = SHA1_H4;
sctx->count = 0; sctx->count = 0;
sctx->func = CPACF_KIMD_SHA_1; sctx->func = CPACF_KIMD_SHA_1;
sctx->first_message_part = 0;
return 0; return 0;
} }
@ -60,6 +61,7 @@ static int s390_sha1_import(struct shash_desc *desc, const void *in)
sctx->count = ictx->count; sctx->count = ictx->count;
memcpy(sctx->state, ictx->state, sizeof(ictx->state)); memcpy(sctx->state, ictx->state, sizeof(ictx->state));
sctx->func = CPACF_KIMD_SHA_1; sctx->func = CPACF_KIMD_SHA_1;
sctx->first_message_part = 0;
return 0; return 0;
} }

View File

@ -32,6 +32,7 @@ static int sha512_init(struct shash_desc *desc)
ctx->count = 0; ctx->count = 0;
ctx->sha512.count_hi = 0; ctx->sha512.count_hi = 0;
ctx->func = CPACF_KIMD_SHA_512; ctx->func = CPACF_KIMD_SHA_512;
ctx->first_message_part = 0;
return 0; return 0;
} }
@ -57,6 +58,7 @@ static int sha512_import(struct shash_desc *desc, const void *in)
memcpy(sctx->state, ictx->state, sizeof(ictx->state)); memcpy(sctx->state, ictx->state, sizeof(ictx->state));
sctx->func = CPACF_KIMD_SHA_512; sctx->func = CPACF_KIMD_SHA_512;
sctx->first_message_part = 0;
return 0; return 0;
} }
@ -97,6 +99,7 @@ static int sha384_init(struct shash_desc *desc)
ctx->count = 0; ctx->count = 0;
ctx->sha512.count_hi = 0; ctx->sha512.count_hi = 0;
ctx->func = CPACF_KIMD_SHA_512; ctx->func = CPACF_KIMD_SHA_512;
ctx->first_message_part = 0;
return 0; return 0;
} }

View File

@ -2695,6 +2695,15 @@ config MITIGATION_ITS
disabled, mitigation cannot be enabled via cmdline. disabled, mitigation cannot be enabled via cmdline.
See <file:Documentation/admin-guide/hw-vuln/indirect-target-selection.rst> See <file:Documentation/admin-guide/hw-vuln/indirect-target-selection.rst>
config MITIGATION_TSA
bool "Mitigate Transient Scheduler Attacks"
depends on CPU_SUP_AMD
default y
help
Enable mitigation for Transient Scheduler Attacks. TSA is a hardware
security vulnerability on AMD CPUs which can lead to forwarding of
invalid info to subsequent instructions and thus can affect their
timing and thereby cause a leakage.
endif endif
config ARCH_HAS_ADD_PAGES config ARCH_HAS_ADD_PAGES

View File

@ -88,7 +88,7 @@ static const char * const sev_status_feat_names[] = {
*/ */
static u64 snp_tsc_scale __ro_after_init; static u64 snp_tsc_scale __ro_after_init;
static u64 snp_tsc_offset __ro_after_init; static u64 snp_tsc_offset __ro_after_init;
static u64 snp_tsc_freq_khz __ro_after_init; static unsigned long snp_tsc_freq_khz __ro_after_init;
DEFINE_PER_CPU(struct sev_es_runtime_data*, runtime_data); DEFINE_PER_CPU(struct sev_es_runtime_data*, runtime_data);
DEFINE_PER_CPU(struct sev_es_save_area *, sev_vmsa); DEFINE_PER_CPU(struct sev_es_save_area *, sev_vmsa);
@ -2167,15 +2167,31 @@ static unsigned long securetsc_get_tsc_khz(void)
void __init snp_secure_tsc_init(void) void __init snp_secure_tsc_init(void)
{ {
unsigned long long tsc_freq_mhz; struct snp_secrets_page *secrets;
unsigned long tsc_freq_mhz;
void *mem;
if (!cc_platform_has(CC_ATTR_GUEST_SNP_SECURE_TSC)) if (!cc_platform_has(CC_ATTR_GUEST_SNP_SECURE_TSC))
return; return;
mem = early_memremap_encrypted(sev_secrets_pa, PAGE_SIZE);
if (!mem) {
pr_err("Unable to get TSC_FACTOR: failed to map the SNP secrets page.\n");
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_SECURE_TSC);
}
secrets = (__force struct snp_secrets_page *)mem;
setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ); setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
rdmsrq(MSR_AMD64_GUEST_TSC_FREQ, tsc_freq_mhz); rdmsrq(MSR_AMD64_GUEST_TSC_FREQ, tsc_freq_mhz);
snp_tsc_freq_khz = (unsigned long)(tsc_freq_mhz * 1000);
/* Extract the GUEST TSC MHZ from BIT[17:0], rest is reserved space */
tsc_freq_mhz &= GENMASK_ULL(17, 0);
snp_tsc_freq_khz = SNP_SCALE_TSC_FREQ(tsc_freq_mhz * 1000, secrets->tsc_factor);
x86_platform.calibrate_cpu = securetsc_get_tsc_khz; x86_platform.calibrate_cpu = securetsc_get_tsc_khz;
x86_platform.calibrate_tsc = securetsc_get_tsc_khz; x86_platform.calibrate_tsc = securetsc_get_tsc_khz;
early_memunmap(mem, PAGE_SIZE);
} }

View File

@ -36,20 +36,20 @@ EXPORT_SYMBOL_GPL(write_ibpb);
/* /*
* Define the VERW operand that is disguised as entry code so that * Define the VERW operand that is disguised as entry code so that
* it can be referenced with KPTI enabled. This ensure VERW can be * it can be referenced with KPTI enabled. This ensures VERW can be
* used late in exit-to-user path after page tables are switched. * used late in exit-to-user path after page tables are switched.
*/ */
.pushsection .entry.text, "ax" .pushsection .entry.text, "ax"
.align L1_CACHE_BYTES, 0xcc .align L1_CACHE_BYTES, 0xcc
SYM_CODE_START_NOALIGN(mds_verw_sel) SYM_CODE_START_NOALIGN(x86_verw_sel)
UNWIND_HINT_UNDEFINED UNWIND_HINT_UNDEFINED
ANNOTATE_NOENDBR ANNOTATE_NOENDBR
.word __KERNEL_DS .word __KERNEL_DS
.align L1_CACHE_BYTES, 0xcc .align L1_CACHE_BYTES, 0xcc
SYM_CODE_END(mds_verw_sel); SYM_CODE_END(x86_verw_sel);
/* For KVM */ /* For KVM */
EXPORT_SYMBOL_GPL(mds_verw_sel); EXPORT_SYMBOL_GPL(x86_verw_sel);
.popsection .popsection

View File

@ -456,6 +456,7 @@
#define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* No Nested Data Breakpoints */ #define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* No Nested Data Breakpoints */
#define X86_FEATURE_WRMSR_XX_BASE_NS (20*32+ 1) /* WRMSR to {FS,GS,KERNEL_GS}_BASE is non-serializing */ #define X86_FEATURE_WRMSR_XX_BASE_NS (20*32+ 1) /* WRMSR to {FS,GS,KERNEL_GS}_BASE is non-serializing */
#define X86_FEATURE_LFENCE_RDTSC (20*32+ 2) /* LFENCE always serializing / synchronizes RDTSC */ #define X86_FEATURE_LFENCE_RDTSC (20*32+ 2) /* LFENCE always serializing / synchronizes RDTSC */
#define X86_FEATURE_VERW_CLEAR (20*32+ 5) /* The memory form of VERW mitigates TSA */
#define X86_FEATURE_NULL_SEL_CLR_BASE (20*32+ 6) /* Null Selector Clears Base */ #define X86_FEATURE_NULL_SEL_CLR_BASE (20*32+ 6) /* Null Selector Clears Base */
#define X86_FEATURE_AUTOIBRS (20*32+ 8) /* Automatic IBRS */ #define X86_FEATURE_AUTOIBRS (20*32+ 8) /* Automatic IBRS */
#define X86_FEATURE_NO_SMM_CTL_MSR (20*32+ 9) /* SMM_CTL MSR is not present */ #define X86_FEATURE_NO_SMM_CTL_MSR (20*32+ 9) /* SMM_CTL MSR is not present */
@ -487,6 +488,9 @@
#define X86_FEATURE_PREFER_YMM (21*32+ 8) /* Avoid ZMM registers due to downclocking */ #define X86_FEATURE_PREFER_YMM (21*32+ 8) /* Avoid ZMM registers due to downclocking */
#define X86_FEATURE_APX (21*32+ 9) /* Advanced Performance Extensions */ #define X86_FEATURE_APX (21*32+ 9) /* Advanced Performance Extensions */
#define X86_FEATURE_INDIRECT_THUNK_ITS (21*32+10) /* Use thunk for indirect branches in lower half of cacheline */ #define X86_FEATURE_INDIRECT_THUNK_ITS (21*32+10) /* Use thunk for indirect branches in lower half of cacheline */
#define X86_FEATURE_TSA_SQ_NO (21*32+11) /* AMD CPU not vulnerable to TSA-SQ */
#define X86_FEATURE_TSA_L1_NO (21*32+12) /* AMD CPU not vulnerable to TSA-L1 */
#define X86_FEATURE_CLEAR_CPU_BUF_VM (21*32+13) /* Clear CPU buffers using VERW before VMRUN */
/* /*
* BUG word(s) * BUG word(s)
@ -542,5 +546,5 @@
#define X86_BUG_OLD_MICROCODE X86_BUG( 1*32+ 6) /* "old_microcode" CPU has old microcode, it is surely vulnerable to something */ #define X86_BUG_OLD_MICROCODE X86_BUG( 1*32+ 6) /* "old_microcode" CPU has old microcode, it is surely vulnerable to something */
#define X86_BUG_ITS X86_BUG( 1*32+ 7) /* "its" CPU is affected by Indirect Target Selection */ #define X86_BUG_ITS X86_BUG( 1*32+ 7) /* "its" CPU is affected by Indirect Target Selection */
#define X86_BUG_ITS_NATIVE_ONLY X86_BUG( 1*32+ 8) /* "its_native_only" CPU is affected by ITS, VMX is not affected */ #define X86_BUG_ITS_NATIVE_ONLY X86_BUG( 1*32+ 8) /* "its_native_only" CPU is affected by ITS, VMX is not affected */
#define X86_BUG_TSA X86_BUG( 1*32+ 9) /* "tsa" CPU is affected by Transient Scheduler Attacks */
#endif /* _ASM_X86_CPUFEATURES_H */ #endif /* _ASM_X86_CPUFEATURES_H */

View File

@ -44,13 +44,13 @@ static __always_inline void native_irq_enable(void)
static __always_inline void native_safe_halt(void) static __always_inline void native_safe_halt(void)
{ {
mds_idle_clear_cpu_buffers(); x86_idle_clear_cpu_buffers();
asm volatile("sti; hlt": : :"memory"); asm volatile("sti; hlt": : :"memory");
} }
static __always_inline void native_halt(void) static __always_inline void native_halt(void)
{ {
mds_idle_clear_cpu_buffers(); x86_idle_clear_cpu_buffers();
asm volatile("hlt": : :"memory"); asm volatile("hlt": : :"memory");
} }

View File

@ -700,8 +700,13 @@ struct kvm_vcpu_hv {
struct kvm_vcpu_hv_tlb_flush_fifo tlb_flush_fifo[HV_NR_TLB_FLUSH_FIFOS]; struct kvm_vcpu_hv_tlb_flush_fifo tlb_flush_fifo[HV_NR_TLB_FLUSH_FIFOS];
/* Preallocated buffer for handling hypercalls passing sparse vCPU set */ /*
* Preallocated buffers for handling hypercalls that pass sparse vCPU
* sets (for high vCPU counts, they're too large to comfortably fit on
* the stack).
*/
u64 sparse_banks[HV_MAX_SPARSE_VCPU_BANKS]; u64 sparse_banks[HV_MAX_SPARSE_VCPU_BANKS];
DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS);
struct hv_vp_assist_page vp_assist_page; struct hv_vp_assist_page vp_assist_page;
@ -764,6 +769,7 @@ enum kvm_only_cpuid_leafs {
CPUID_8000_0022_EAX, CPUID_8000_0022_EAX,
CPUID_7_2_EDX, CPUID_7_2_EDX,
CPUID_24_0_EBX, CPUID_24_0_EBX,
CPUID_8000_0021_ECX,
NR_KVM_CPU_CAPS, NR_KVM_CPU_CAPS,
NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS, NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS,

View File

@ -43,8 +43,6 @@ static __always_inline void __monitorx(const void *eax, u32 ecx, u32 edx)
static __always_inline void __mwait(u32 eax, u32 ecx) static __always_inline void __mwait(u32 eax, u32 ecx)
{ {
mds_idle_clear_cpu_buffers();
/* /*
* Use the instruction mnemonic with implicit operands, as the LLVM * Use the instruction mnemonic with implicit operands, as the LLVM
* assembler fails to assemble the mnemonic with explicit operands: * assembler fails to assemble the mnemonic with explicit operands:
@ -80,7 +78,7 @@ static __always_inline void __mwait(u32 eax, u32 ecx)
*/ */
static __always_inline void __mwaitx(u32 eax, u32 ebx, u32 ecx) static __always_inline void __mwaitx(u32 eax, u32 ebx, u32 ecx)
{ {
/* No MDS buffer clear as this is AMD/HYGON only */ /* No need for TSA buffer clearing on AMD */
/* "mwaitx %eax, %ebx, %ecx" */ /* "mwaitx %eax, %ebx, %ecx" */
asm volatile(".byte 0x0f, 0x01, 0xfb" asm volatile(".byte 0x0f, 0x01, 0xfb"
@ -98,7 +96,6 @@ static __always_inline void __mwaitx(u32 eax, u32 ebx, u32 ecx)
*/ */
static __always_inline void __sti_mwait(u32 eax, u32 ecx) static __always_inline void __sti_mwait(u32 eax, u32 ecx)
{ {
mds_idle_clear_cpu_buffers();
asm volatile("sti; mwait" :: "a" (eax), "c" (ecx)); asm volatile("sti; mwait" :: "a" (eax), "c" (ecx));
} }
@ -115,21 +112,29 @@ static __always_inline void __sti_mwait(u32 eax, u32 ecx)
*/ */
static __always_inline void mwait_idle_with_hints(u32 eax, u32 ecx) static __always_inline void mwait_idle_with_hints(u32 eax, u32 ecx)
{ {
if (need_resched())
return;
x86_idle_clear_cpu_buffers();
if (static_cpu_has_bug(X86_BUG_MONITOR) || !current_set_polling_and_test()) { if (static_cpu_has_bug(X86_BUG_MONITOR) || !current_set_polling_and_test()) {
const void *addr = &current_thread_info()->flags; const void *addr = &current_thread_info()->flags;
alternative_input("", "clflush (%[addr])", X86_BUG_CLFLUSH_MONITOR, [addr] "a" (addr)); alternative_input("", "clflush (%[addr])", X86_BUG_CLFLUSH_MONITOR, [addr] "a" (addr));
__monitor(addr, 0, 0); __monitor(addr, 0, 0);
if (!need_resched()) { if (need_resched())
if (ecx & 1) { goto out;
__mwait(eax, ecx);
} else { if (ecx & 1) {
__sti_mwait(eax, ecx); __mwait(eax, ecx);
raw_local_irq_disable(); } else {
} __sti_mwait(eax, ecx);
raw_local_irq_disable();
} }
} }
out:
current_clr_polling(); current_clr_polling();
} }

View File

@ -302,25 +302,31 @@
.endm .endm
/* /*
* Macro to execute VERW instruction that mitigate transient data sampling * Macro to execute VERW insns that mitigate transient data sampling
* attacks such as MDS. On affected systems a microcode update overloaded VERW * attacks such as MDS or TSA. On affected systems a microcode update
* instruction to also clear the CPU buffers. VERW clobbers CFLAGS.ZF. * overloaded VERW insns to also clear the CPU buffers. VERW clobbers
* * CFLAGS.ZF.
* Note: Only the memory operand variant of VERW clears the CPU buffers. * Note: Only the memory operand variant of VERW clears the CPU buffers.
*/ */
.macro CLEAR_CPU_BUFFERS .macro __CLEAR_CPU_BUFFERS feature
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
ALTERNATIVE "", "verw mds_verw_sel(%rip)", X86_FEATURE_CLEAR_CPU_BUF ALTERNATIVE "", "verw x86_verw_sel(%rip)", \feature
#else #else
/* /*
* In 32bit mode, the memory operand must be a %cs reference. The data * In 32bit mode, the memory operand must be a %cs reference. The data
* segments may not be usable (vm86 mode), and the stack segment may not * segments may not be usable (vm86 mode), and the stack segment may not
* be flat (ESPFIX32). * be flat (ESPFIX32).
*/ */
ALTERNATIVE "", "verw %cs:mds_verw_sel", X86_FEATURE_CLEAR_CPU_BUF ALTERNATIVE "", "verw %cs:x86_verw_sel", \feature
#endif #endif
.endm .endm
#define CLEAR_CPU_BUFFERS \
__CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF
#define VM_CLEAR_CPU_BUFFERS \
__CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF_VM
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
.macro CLEAR_BRANCH_HISTORY .macro CLEAR_BRANCH_HISTORY
ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP
@ -567,24 +573,24 @@ DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
DECLARE_STATIC_KEY_FALSE(switch_vcpu_ibpb); DECLARE_STATIC_KEY_FALSE(switch_vcpu_ibpb);
DECLARE_STATIC_KEY_FALSE(mds_idle_clear); DECLARE_STATIC_KEY_FALSE(cpu_buf_idle_clear);
DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
DECLARE_STATIC_KEY_FALSE(cpu_buf_vm_clear); DECLARE_STATIC_KEY_FALSE(cpu_buf_vm_clear);
extern u16 mds_verw_sel; extern u16 x86_verw_sel;
#include <asm/segment.h> #include <asm/segment.h>
/** /**
* mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability * x86_clear_cpu_buffers - Buffer clearing support for different x86 CPU vulns
* *
* This uses the otherwise unused and obsolete VERW instruction in * This uses the otherwise unused and obsolete VERW instruction in
* combination with microcode which triggers a CPU buffer flush when the * combination with microcode which triggers a CPU buffer flush when the
* instruction is executed. * instruction is executed.
*/ */
static __always_inline void mds_clear_cpu_buffers(void) static __always_inline void x86_clear_cpu_buffers(void)
{ {
static const u16 ds = __KERNEL_DS; static const u16 ds = __KERNEL_DS;
@ -601,14 +607,15 @@ static __always_inline void mds_clear_cpu_buffers(void)
} }
/** /**
* mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability * x86_idle_clear_cpu_buffers - Buffer clearing support in idle for the MDS
* and TSA vulnerabilities.
* *
* Clear CPU buffers if the corresponding static key is enabled * Clear CPU buffers if the corresponding static key is enabled
*/ */
static __always_inline void mds_idle_clear_cpu_buffers(void) static __always_inline void x86_idle_clear_cpu_buffers(void)
{ {
if (static_branch_likely(&mds_idle_clear)) if (static_branch_likely(&cpu_buf_idle_clear))
mds_clear_cpu_buffers(); x86_clear_cpu_buffers();
} }
#endif /* __ASSEMBLER__ */ #endif /* __ASSEMBLER__ */

View File

@ -223,6 +223,18 @@ struct snp_tsc_info_resp {
u8 rsvd2[100]; u8 rsvd2[100];
} __packed; } __packed;
/*
* Obtain the mean TSC frequency by decreasing the nominal TSC frequency with
* TSC_FACTOR as documented in the SNP Firmware ABI specification:
*
* GUEST_TSC_FREQ * (1 - (TSC_FACTOR * 0.00001))
*
* which is equivalent to:
*
* GUEST_TSC_FREQ -= (GUEST_TSC_FREQ * TSC_FACTOR) / 100000;
*/
#define SNP_SCALE_TSC_FREQ(freq, factor) ((freq) - (freq) * (factor) / 100000)
struct snp_guest_req { struct snp_guest_req {
void *req_buf; void *req_buf;
size_t req_sz; size_t req_sz;
@ -282,8 +294,11 @@ struct snp_secrets_page {
u8 svsm_guest_vmpl; u8 svsm_guest_vmpl;
u8 rsvd3[3]; u8 rsvd3[3];
/* The percentage decrease from nominal to mean TSC frequency. */
u32 tsc_factor;
/* Remainder of page */ /* Remainder of page */
u8 rsvd4[3744]; u8 rsvd4[3740];
} __packed; } __packed;
struct snp_msg_desc { struct snp_msg_desc {

View File

@ -72,6 +72,7 @@
#define TDVMCALL_MAP_GPA 0x10001 #define TDVMCALL_MAP_GPA 0x10001
#define TDVMCALL_GET_QUOTE 0x10002 #define TDVMCALL_GET_QUOTE 0x10002
#define TDVMCALL_REPORT_FATAL_ERROR 0x10003 #define TDVMCALL_REPORT_FATAL_ERROR 0x10003
#define TDVMCALL_SETUP_EVENT_NOTIFY_INTERRUPT 0x10004ULL
/* /*
* TDG.VP.VMCALL Status Codes (returned in R10) * TDG.VP.VMCALL Status Codes (returned in R10)

View File

@ -965,7 +965,13 @@ struct kvm_tdx_cmd {
struct kvm_tdx_capabilities { struct kvm_tdx_capabilities {
__u64 supported_attrs; __u64 supported_attrs;
__u64 supported_xfam; __u64 supported_xfam;
__u64 reserved[254];
__u64 kernel_tdvmcallinfo_1_r11;
__u64 user_tdvmcallinfo_1_r11;
__u64 kernel_tdvmcallinfo_1_r12;
__u64 user_tdvmcallinfo_1_r12;
__u64 reserved[250];
/* Configurable CPUID bits for userspace */ /* Configurable CPUID bits for userspace */
struct kvm_cpuid2 cpuid; struct kvm_cpuid2 cpuid;

View File

@ -9,7 +9,7 @@
#include <linux/sched/clock.h> #include <linux/sched/clock.h>
#include <linux/random.h> #include <linux/random.h>
#include <linux/topology.h> #include <linux/topology.h>
#include <asm/amd/fch.h> #include <linux/platform_data/x86/amd-fch.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/cacheinfo.h> #include <asm/cacheinfo.h>
@ -377,6 +377,47 @@ static void bsp_determine_snp(struct cpuinfo_x86 *c)
#endif #endif
} }
#define ZEN_MODEL_STEP_UCODE(fam, model, step, ucode) \
X86_MATCH_VFM_STEPS(VFM_MAKE(X86_VENDOR_AMD, fam, model), \
step, step, ucode)
static const struct x86_cpu_id amd_tsa_microcode[] = {
ZEN_MODEL_STEP_UCODE(0x19, 0x01, 0x1, 0x0a0011d7),
ZEN_MODEL_STEP_UCODE(0x19, 0x01, 0x2, 0x0a00123b),
ZEN_MODEL_STEP_UCODE(0x19, 0x08, 0x2, 0x0a00820d),
ZEN_MODEL_STEP_UCODE(0x19, 0x11, 0x1, 0x0a10114c),
ZEN_MODEL_STEP_UCODE(0x19, 0x11, 0x2, 0x0a10124c),
ZEN_MODEL_STEP_UCODE(0x19, 0x18, 0x1, 0x0a108109),
ZEN_MODEL_STEP_UCODE(0x19, 0x21, 0x0, 0x0a20102e),
ZEN_MODEL_STEP_UCODE(0x19, 0x21, 0x2, 0x0a201211),
ZEN_MODEL_STEP_UCODE(0x19, 0x44, 0x1, 0x0a404108),
ZEN_MODEL_STEP_UCODE(0x19, 0x50, 0x0, 0x0a500012),
ZEN_MODEL_STEP_UCODE(0x19, 0x61, 0x2, 0x0a60120a),
ZEN_MODEL_STEP_UCODE(0x19, 0x74, 0x1, 0x0a704108),
ZEN_MODEL_STEP_UCODE(0x19, 0x75, 0x2, 0x0a705208),
ZEN_MODEL_STEP_UCODE(0x19, 0x78, 0x0, 0x0a708008),
ZEN_MODEL_STEP_UCODE(0x19, 0x7c, 0x0, 0x0a70c008),
ZEN_MODEL_STEP_UCODE(0x19, 0xa0, 0x2, 0x0aa00216),
{},
};
static void tsa_init(struct cpuinfo_x86 *c)
{
if (cpu_has(c, X86_FEATURE_HYPERVISOR))
return;
if (cpu_has(c, X86_FEATURE_ZEN3) ||
cpu_has(c, X86_FEATURE_ZEN4)) {
if (x86_match_min_microcode_rev(amd_tsa_microcode))
setup_force_cpu_cap(X86_FEATURE_VERW_CLEAR);
else
pr_debug("%s: current revision: 0x%x\n", __func__, c->microcode);
} else {
setup_force_cpu_cap(X86_FEATURE_TSA_SQ_NO);
setup_force_cpu_cap(X86_FEATURE_TSA_L1_NO);
}
}
static void bsp_init_amd(struct cpuinfo_x86 *c) static void bsp_init_amd(struct cpuinfo_x86 *c)
{ {
if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
@ -489,6 +530,9 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
} }
bsp_determine_snp(c); bsp_determine_snp(c);
tsa_init(c);
return; return;
warn: warn:

View File

@ -94,6 +94,8 @@ static void __init bhi_apply_mitigation(void);
static void __init its_select_mitigation(void); static void __init its_select_mitigation(void);
static void __init its_update_mitigation(void); static void __init its_update_mitigation(void);
static void __init its_apply_mitigation(void); static void __init its_apply_mitigation(void);
static void __init tsa_select_mitigation(void);
static void __init tsa_apply_mitigation(void);
/* The base value of the SPEC_CTRL MSR without task-specific bits set */ /* The base value of the SPEC_CTRL MSR without task-specific bits set */
u64 x86_spec_ctrl_base; u64 x86_spec_ctrl_base;
@ -169,9 +171,9 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
DEFINE_STATIC_KEY_FALSE(switch_vcpu_ibpb); DEFINE_STATIC_KEY_FALSE(switch_vcpu_ibpb);
EXPORT_SYMBOL_GPL(switch_vcpu_ibpb); EXPORT_SYMBOL_GPL(switch_vcpu_ibpb);
/* Control MDS CPU buffer clear before idling (halt, mwait) */ /* Control CPU buffer clear before idling (halt, mwait) */
DEFINE_STATIC_KEY_FALSE(mds_idle_clear); DEFINE_STATIC_KEY_FALSE(cpu_buf_idle_clear);
EXPORT_SYMBOL_GPL(mds_idle_clear); EXPORT_SYMBOL_GPL(cpu_buf_idle_clear);
/* /*
* Controls whether l1d flush based mitigations are enabled, * Controls whether l1d flush based mitigations are enabled,
@ -225,6 +227,7 @@ void __init cpu_select_mitigations(void)
gds_select_mitigation(); gds_select_mitigation();
its_select_mitigation(); its_select_mitigation();
bhi_select_mitigation(); bhi_select_mitigation();
tsa_select_mitigation();
/* /*
* After mitigations are selected, some may need to update their * After mitigations are selected, some may need to update their
@ -272,6 +275,7 @@ void __init cpu_select_mitigations(void)
gds_apply_mitigation(); gds_apply_mitigation();
its_apply_mitigation(); its_apply_mitigation();
bhi_apply_mitigation(); bhi_apply_mitigation();
tsa_apply_mitigation();
} }
/* /*
@ -637,7 +641,7 @@ static void __init mmio_apply_mitigation(void)
* is required irrespective of SMT state. * is required irrespective of SMT state.
*/ */
if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO))
static_branch_enable(&mds_idle_clear); static_branch_enable(&cpu_buf_idle_clear);
if (mmio_nosmt || cpu_mitigations_auto_nosmt()) if (mmio_nosmt || cpu_mitigations_auto_nosmt())
cpu_smt_disable(false); cpu_smt_disable(false);
@ -1487,6 +1491,94 @@ static void __init its_apply_mitigation(void)
set_return_thunk(its_return_thunk); set_return_thunk(its_return_thunk);
} }
#undef pr_fmt
#define pr_fmt(fmt) "Transient Scheduler Attacks: " fmt
enum tsa_mitigations {
TSA_MITIGATION_NONE,
TSA_MITIGATION_AUTO,
TSA_MITIGATION_UCODE_NEEDED,
TSA_MITIGATION_USER_KERNEL,
TSA_MITIGATION_VM,
TSA_MITIGATION_FULL,
};
static const char * const tsa_strings[] = {
[TSA_MITIGATION_NONE] = "Vulnerable",
[TSA_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
[TSA_MITIGATION_USER_KERNEL] = "Mitigation: Clear CPU buffers: user/kernel boundary",
[TSA_MITIGATION_VM] = "Mitigation: Clear CPU buffers: VM",
[TSA_MITIGATION_FULL] = "Mitigation: Clear CPU buffers",
};
static enum tsa_mitigations tsa_mitigation __ro_after_init =
IS_ENABLED(CONFIG_MITIGATION_TSA) ? TSA_MITIGATION_AUTO : TSA_MITIGATION_NONE;
static int __init tsa_parse_cmdline(char *str)
{
if (!str)
return -EINVAL;
if (!strcmp(str, "off"))
tsa_mitigation = TSA_MITIGATION_NONE;
else if (!strcmp(str, "on"))
tsa_mitigation = TSA_MITIGATION_FULL;
else if (!strcmp(str, "user"))
tsa_mitigation = TSA_MITIGATION_USER_KERNEL;
else if (!strcmp(str, "vm"))
tsa_mitigation = TSA_MITIGATION_VM;
else
pr_err("Ignoring unknown tsa=%s option.\n", str);
return 0;
}
early_param("tsa", tsa_parse_cmdline);
static void __init tsa_select_mitigation(void)
{
if (cpu_mitigations_off() || !boot_cpu_has_bug(X86_BUG_TSA)) {
tsa_mitigation = TSA_MITIGATION_NONE;
return;
}
if (tsa_mitigation == TSA_MITIGATION_NONE)
return;
if (!boot_cpu_has(X86_FEATURE_VERW_CLEAR)) {
tsa_mitigation = TSA_MITIGATION_UCODE_NEEDED;
goto out;
}
if (tsa_mitigation == TSA_MITIGATION_AUTO)
tsa_mitigation = TSA_MITIGATION_FULL;
/*
* No need to set verw_clear_cpu_buf_mitigation_selected - it
* doesn't fit all cases here and it is not needed because this
* is the only VERW-based mitigation on AMD.
*/
out:
pr_info("%s\n", tsa_strings[tsa_mitigation]);
}
static void __init tsa_apply_mitigation(void)
{
switch (tsa_mitigation) {
case TSA_MITIGATION_USER_KERNEL:
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
break;
case TSA_MITIGATION_VM:
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
break;
case TSA_MITIGATION_FULL:
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
break;
default:
break;
}
}
#undef pr_fmt #undef pr_fmt
#define pr_fmt(fmt) "Spectre V2 : " fmt #define pr_fmt(fmt) "Spectre V2 : " fmt
@ -2249,10 +2341,10 @@ static void update_mds_branch_idle(void)
return; return;
if (sched_smt_active()) { if (sched_smt_active()) {
static_branch_enable(&mds_idle_clear); static_branch_enable(&cpu_buf_idle_clear);
} else if (mmio_mitigation == MMIO_MITIGATION_OFF || } else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) { (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) {
static_branch_disable(&mds_idle_clear); static_branch_disable(&cpu_buf_idle_clear);
} }
} }
@ -2316,6 +2408,25 @@ void cpu_bugs_smt_update(void)
break; break;
} }
switch (tsa_mitigation) {
case TSA_MITIGATION_USER_KERNEL:
case TSA_MITIGATION_VM:
case TSA_MITIGATION_AUTO:
case TSA_MITIGATION_FULL:
/*
* TSA-SQ can potentially lead to info leakage between
* SMT threads.
*/
if (sched_smt_active())
static_branch_enable(&cpu_buf_idle_clear);
else
static_branch_disable(&cpu_buf_idle_clear);
break;
case TSA_MITIGATION_NONE:
case TSA_MITIGATION_UCODE_NEEDED:
break;
}
mutex_unlock(&spec_ctrl_mutex); mutex_unlock(&spec_ctrl_mutex);
} }
@ -3265,6 +3376,11 @@ static ssize_t gds_show_state(char *buf)
return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]); return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]);
} }
static ssize_t tsa_show_state(char *buf)
{
return sysfs_emit(buf, "%s\n", tsa_strings[tsa_mitigation]);
}
static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
char *buf, unsigned int bug) char *buf, unsigned int bug)
{ {
@ -3328,6 +3444,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
case X86_BUG_ITS: case X86_BUG_ITS:
return its_show_state(buf); return its_show_state(buf);
case X86_BUG_TSA:
return tsa_show_state(buf);
default: default:
break; break;
} }
@ -3414,6 +3533,11 @@ ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_att
{ {
return cpu_show_common(dev, attr, buf, X86_BUG_ITS); return cpu_show_common(dev, attr, buf, X86_BUG_ITS);
} }
ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf)
{
return cpu_show_common(dev, attr, buf, X86_BUG_TSA);
}
#endif #endif
void __warn_thunk(void) void __warn_thunk(void)

View File

@ -1233,6 +1233,8 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
#define ITS BIT(8) #define ITS BIT(8)
/* CPU is affected by Indirect Target Selection, but guest-host isolation is not affected */ /* CPU is affected by Indirect Target Selection, but guest-host isolation is not affected */
#define ITS_NATIVE_ONLY BIT(9) #define ITS_NATIVE_ONLY BIT(9)
/* CPU is affected by Transient Scheduler Attacks */
#define TSA BIT(10)
static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
VULNBL_INTEL_STEPS(INTEL_IVYBRIDGE, X86_STEP_MAX, SRBDS), VULNBL_INTEL_STEPS(INTEL_IVYBRIDGE, X86_STEP_MAX, SRBDS),
@ -1280,7 +1282,7 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
VULNBL_AMD(0x16, RETBLEED), VULNBL_AMD(0x16, RETBLEED),
VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO), VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO),
VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO), VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO),
VULNBL_AMD(0x19, SRSO), VULNBL_AMD(0x19, SRSO | TSA),
VULNBL_AMD(0x1a, SRSO), VULNBL_AMD(0x1a, SRSO),
{} {}
}; };
@ -1530,6 +1532,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
setup_force_cpu_bug(X86_BUG_ITS_NATIVE_ONLY); setup_force_cpu_bug(X86_BUG_ITS_NATIVE_ONLY);
} }
if (c->x86_vendor == X86_VENDOR_AMD) {
if (!cpu_has(c, X86_FEATURE_TSA_SQ_NO) ||
!cpu_has(c, X86_FEATURE_TSA_L1_NO)) {
if (cpu_matches(cpu_vuln_blacklist, TSA) ||
/* Enable bug on Zen guests to allow for live migration. */
(cpu_has(c, X86_FEATURE_HYPERVISOR) && cpu_has(c, X86_FEATURE_ZEN)))
setup_force_cpu_bug(X86_BUG_TSA);
}
}
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
return; return;

View File

@ -350,7 +350,6 @@ static void smca_configure(unsigned int bank, unsigned int cpu)
struct thresh_restart { struct thresh_restart {
struct threshold_block *b; struct threshold_block *b;
int reset;
int set_lvt_off; int set_lvt_off;
int lvt_off; int lvt_off;
u16 old_limit; u16 old_limit;
@ -432,13 +431,13 @@ static void threshold_restart_bank(void *_tr)
rdmsr(tr->b->address, lo, hi); rdmsr(tr->b->address, lo, hi);
if (tr->b->threshold_limit < (hi & THRESHOLD_MAX)) /*
tr->reset = 1; /* limit cannot be lower than err count */ * Reset error count and overflow bit.
* This is done during init or after handling an interrupt.
if (tr->reset) { /* reset err count and overflow bit */ */
hi = if (hi & MASK_OVERFLOW_HI || tr->set_lvt_off) {
(hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) | hi &= ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI);
(THRESHOLD_MAX - tr->b->threshold_limit); hi |= THRESHOLD_MAX - tr->b->threshold_limit;
} else if (tr->old_limit) { /* change limit w/o reset */ } else if (tr->old_limit) { /* change limit w/o reset */
int new_count = (hi & THRESHOLD_MAX) + int new_count = (hi & THRESHOLD_MAX) +
(tr->old_limit - tr->b->threshold_limit); (tr->old_limit - tr->b->threshold_limit);
@ -1113,13 +1112,20 @@ static const char *get_name(unsigned int cpu, unsigned int bank, struct threshol
} }
bank_type = smca_get_bank_type(cpu, bank); bank_type = smca_get_bank_type(cpu, bank);
if (bank_type >= N_SMCA_BANK_TYPES)
return NULL;
if (b && (bank_type == SMCA_UMC || bank_type == SMCA_UMC_V2)) { if (b && (bank_type == SMCA_UMC || bank_type == SMCA_UMC_V2)) {
if (b->block < ARRAY_SIZE(smca_umc_block_names)) if (b->block < ARRAY_SIZE(smca_umc_block_names))
return smca_umc_block_names[b->block]; return smca_umc_block_names[b->block];
return NULL; }
if (b && b->block) {
snprintf(buf_mcatype, MAX_MCATYPE_NAME_LEN, "th_block_%u", b->block);
return buf_mcatype;
}
if (bank_type >= N_SMCA_BANK_TYPES) {
snprintf(buf_mcatype, MAX_MCATYPE_NAME_LEN, "th_bank_%u", bank);
return buf_mcatype;
} }
if (per_cpu(smca_bank_counts, cpu)[bank_type] == 1) if (per_cpu(smca_bank_counts, cpu)[bank_type] == 1)

View File

@ -1740,6 +1740,11 @@ static void mc_poll_banks_default(void)
void (*mc_poll_banks)(void) = mc_poll_banks_default; void (*mc_poll_banks)(void) = mc_poll_banks_default;
static bool should_enable_timer(unsigned long iv)
{
return !mca_cfg.ignore_ce && iv;
}
static void mce_timer_fn(struct timer_list *t) static void mce_timer_fn(struct timer_list *t)
{ {
struct timer_list *cpu_t = this_cpu_ptr(&mce_timer); struct timer_list *cpu_t = this_cpu_ptr(&mce_timer);
@ -1763,7 +1768,7 @@ static void mce_timer_fn(struct timer_list *t)
if (mce_get_storm_mode()) { if (mce_get_storm_mode()) {
__start_timer(t, HZ); __start_timer(t, HZ);
} else { } else if (should_enable_timer(iv)) {
__this_cpu_write(mce_next_interval, iv); __this_cpu_write(mce_next_interval, iv);
__start_timer(t, iv); __start_timer(t, iv);
} }
@ -2156,11 +2161,10 @@ static void mce_start_timer(struct timer_list *t)
{ {
unsigned long iv = check_interval * HZ; unsigned long iv = check_interval * HZ;
if (mca_cfg.ignore_ce || !iv) if (should_enable_timer(iv)) {
return; this_cpu_write(mce_next_interval, iv);
__start_timer(t, iv);
this_cpu_write(mce_next_interval, iv); }
__start_timer(t, iv);
} }
static void __mcheck_cpu_setup_timer(void) static void __mcheck_cpu_setup_timer(void)
@ -2801,15 +2805,9 @@ static int mce_cpu_dead(unsigned int cpu)
static int mce_cpu_online(unsigned int cpu) static int mce_cpu_online(unsigned int cpu)
{ {
struct timer_list *t = this_cpu_ptr(&mce_timer); struct timer_list *t = this_cpu_ptr(&mce_timer);
int ret;
mce_device_create(cpu); mce_device_create(cpu);
mce_threshold_create_device(cpu);
ret = mce_threshold_create_device(cpu);
if (ret) {
mce_device_remove(cpu);
return ret;
}
mce_reenable_cpu(); mce_reenable_cpu();
mce_start_timer(t); mce_start_timer(t);
return 0; return 0;

View File

@ -478,6 +478,7 @@ void mce_intel_feature_init(struct cpuinfo_x86 *c)
void mce_intel_feature_clear(struct cpuinfo_x86 *c) void mce_intel_feature_clear(struct cpuinfo_x86 *c)
{ {
intel_clear_lmce(); intel_clear_lmce();
cmci_clear();
} }
bool intel_filter_mce(struct mce *m) bool intel_filter_mce(struct mce *m)

View File

@ -231,6 +231,13 @@ static const struct patch_digest phashes[] = {
0x0d,0x5b,0x65,0x34,0x69,0xb2,0x62,0x21, 0x0d,0x5b,0x65,0x34,0x69,0xb2,0x62,0x21,
} }
}, },
{ 0xa0011d7, {
0x35,0x07,0xcd,0x40,0x94,0xbc,0x81,0x6b,
0xfc,0x61,0x56,0x1a,0xe2,0xdb,0x96,0x12,
0x1c,0x1c,0x31,0xb1,0x02,0x6f,0xe5,0xd2,
0xfe,0x1b,0x04,0x03,0x2c,0x8f,0x4c,0x36,
}
},
{ 0xa001223, { { 0xa001223, {
0xfb,0x32,0x5f,0xc6,0x83,0x4f,0x8c,0xb8, 0xfb,0x32,0x5f,0xc6,0x83,0x4f,0x8c,0xb8,
0xa4,0x05,0xf9,0x71,0x53,0x01,0x16,0xc4, 0xa4,0x05,0xf9,0x71,0x53,0x01,0x16,0xc4,
@ -294,6 +301,13 @@ static const struct patch_digest phashes[] = {
0xc0,0xcd,0x33,0xf2,0x8d,0xf9,0xef,0x59, 0xc0,0xcd,0x33,0xf2,0x8d,0xf9,0xef,0x59,
} }
}, },
{ 0xa00123b, {
0xef,0xa1,0x1e,0x71,0xf1,0xc3,0x2c,0xe2,
0xc3,0xef,0x69,0x41,0x7a,0x54,0xca,0xc3,
0x8f,0x62,0x84,0xee,0xc2,0x39,0xd9,0x28,
0x95,0xa7,0x12,0x49,0x1e,0x30,0x71,0x72,
}
},
{ 0xa00820c, { { 0xa00820c, {
0xa8,0x0c,0x81,0xc0,0xa6,0x00,0xe7,0xf3, 0xa8,0x0c,0x81,0xc0,0xa6,0x00,0xe7,0xf3,
0x5f,0x65,0xd3,0xb9,0x6f,0xea,0x93,0x63, 0x5f,0x65,0xd3,0xb9,0x6f,0xea,0x93,0x63,
@ -301,6 +315,13 @@ static const struct patch_digest phashes[] = {
0xe1,0x3b,0x8d,0xb2,0xf8,0x22,0x03,0xe2, 0xe1,0x3b,0x8d,0xb2,0xf8,0x22,0x03,0xe2,
} }
}, },
{ 0xa00820d, {
0xf9,0x2a,0xc0,0xf4,0x9e,0xa4,0x87,0xa4,
0x7d,0x87,0x00,0xfd,0xab,0xda,0x19,0xca,
0x26,0x51,0x32,0xc1,0x57,0x91,0xdf,0xc1,
0x05,0xeb,0x01,0x7c,0x5a,0x95,0x21,0xb7,
}
},
{ 0xa10113e, { { 0xa10113e, {
0x05,0x3c,0x66,0xd7,0xa9,0x5a,0x33,0x10, 0x05,0x3c,0x66,0xd7,0xa9,0x5a,0x33,0x10,
0x1b,0xf8,0x9c,0x8f,0xed,0xfc,0xa7,0xa0, 0x1b,0xf8,0x9c,0x8f,0xed,0xfc,0xa7,0xa0,
@ -322,6 +343,13 @@ static const struct patch_digest phashes[] = {
0xf1,0x5e,0xb0,0xde,0xb4,0x98,0xae,0xc4, 0xf1,0x5e,0xb0,0xde,0xb4,0x98,0xae,0xc4,
} }
}, },
{ 0xa10114c, {
0x9e,0xb6,0xa2,0xd9,0x87,0x38,0xc5,0x64,
0xd8,0x88,0xfa,0x78,0x98,0xf9,0x6f,0x74,
0x39,0x90,0x1b,0xa5,0xcf,0x5e,0xb4,0x2a,
0x02,0xff,0xd4,0x8c,0x71,0x8b,0xe2,0xc0,
}
},
{ 0xa10123e, { { 0xa10123e, {
0x03,0xb9,0x2c,0x76,0x48,0x93,0xc9,0x18, 0x03,0xb9,0x2c,0x76,0x48,0x93,0xc9,0x18,
0xfb,0x56,0xfd,0xf7,0xe2,0x1d,0xca,0x4d, 0xfb,0x56,0xfd,0xf7,0xe2,0x1d,0xca,0x4d,
@ -343,6 +371,13 @@ static const struct patch_digest phashes[] = {
0x1b,0x7d,0x64,0x9d,0x4b,0x53,0x13,0x75, 0x1b,0x7d,0x64,0x9d,0x4b,0x53,0x13,0x75,
} }
}, },
{ 0xa10124c, {
0x29,0xea,0xf1,0x2c,0xb2,0xe4,0xef,0x90,
0xa4,0xcd,0x1d,0x86,0x97,0x17,0x61,0x46,
0xfc,0x22,0xcb,0x57,0x75,0x19,0xc8,0xcc,
0x0c,0xf5,0xbc,0xac,0x81,0x9d,0x9a,0xd2,
}
},
{ 0xa108108, { { 0xa108108, {
0xed,0xc2,0xec,0xa1,0x15,0xc6,0x65,0xe9, 0xed,0xc2,0xec,0xa1,0x15,0xc6,0x65,0xe9,
0xd0,0xef,0x39,0xaa,0x7f,0x55,0x06,0xc6, 0xd0,0xef,0x39,0xaa,0x7f,0x55,0x06,0xc6,
@ -350,6 +385,13 @@ static const struct patch_digest phashes[] = {
0x28,0x1e,0x9c,0x59,0x69,0x99,0x4d,0x16, 0x28,0x1e,0x9c,0x59,0x69,0x99,0x4d,0x16,
} }
}, },
{ 0xa108109, {
0x85,0xb4,0xbd,0x7c,0x49,0xa7,0xbd,0xfa,
0x49,0x36,0x80,0x81,0xc5,0xb7,0x39,0x1b,
0x9a,0xaa,0x50,0xde,0x9b,0xe9,0x32,0x35,
0x42,0x7e,0x51,0x4f,0x52,0x2c,0x28,0x59,
}
},
{ 0xa20102d, { { 0xa20102d, {
0xf9,0x6e,0xf2,0x32,0xd3,0x0f,0x5f,0x11, 0xf9,0x6e,0xf2,0x32,0xd3,0x0f,0x5f,0x11,
0x59,0xa1,0xfe,0xcc,0xcd,0x9b,0x42,0x89, 0x59,0xa1,0xfe,0xcc,0xcd,0x9b,0x42,0x89,
@ -357,6 +399,13 @@ static const struct patch_digest phashes[] = {
0x8c,0xe9,0x19,0x3e,0xcc,0x3f,0x7b,0xb4, 0x8c,0xe9,0x19,0x3e,0xcc,0x3f,0x7b,0xb4,
} }
}, },
{ 0xa20102e, {
0xbe,0x1f,0x32,0x04,0x0d,0x3c,0x9c,0xdd,
0xe1,0xa4,0xbf,0x76,0x3a,0xec,0xc2,0xf6,
0x11,0x00,0xa7,0xaf,0x0f,0xe5,0x02,0xc5,
0x54,0x3a,0x1f,0x8c,0x16,0xb5,0xff,0xbe,
}
},
{ 0xa201210, { { 0xa201210, {
0xe8,0x6d,0x51,0x6a,0x8e,0x72,0xf3,0xfe, 0xe8,0x6d,0x51,0x6a,0x8e,0x72,0xf3,0xfe,
0x6e,0x16,0xbc,0x62,0x59,0x40,0x17,0xe9, 0x6e,0x16,0xbc,0x62,0x59,0x40,0x17,0xe9,
@ -364,6 +413,13 @@ static const struct patch_digest phashes[] = {
0xf7,0x55,0xf0,0x13,0xbb,0x22,0xf6,0x41, 0xf7,0x55,0xf0,0x13,0xbb,0x22,0xf6,0x41,
} }
}, },
{ 0xa201211, {
0x69,0xa1,0x17,0xec,0xd0,0xf6,0x6c,0x95,
0xe2,0x1e,0xc5,0x59,0x1a,0x52,0x0a,0x27,
0xc4,0xed,0xd5,0x59,0x1f,0xbf,0x00,0xff,
0x08,0x88,0xb5,0xe1,0x12,0xb6,0xcc,0x27,
}
},
{ 0xa404107, { { 0xa404107, {
0xbb,0x04,0x4e,0x47,0xdd,0x5e,0x26,0x45, 0xbb,0x04,0x4e,0x47,0xdd,0x5e,0x26,0x45,
0x1a,0xc9,0x56,0x24,0xa4,0x4c,0x82,0xb0, 0x1a,0xc9,0x56,0x24,0xa4,0x4c,0x82,0xb0,
@ -371,6 +427,13 @@ static const struct patch_digest phashes[] = {
0x13,0xbc,0xc5,0x25,0xe4,0xc5,0xc3,0x99, 0x13,0xbc,0xc5,0x25,0xe4,0xc5,0xc3,0x99,
} }
}, },
{ 0xa404108, {
0x69,0x67,0x43,0x06,0xf8,0x0c,0x62,0xdc,
0xa4,0x21,0x30,0x4f,0x0f,0x21,0x2c,0xcb,
0xcc,0x37,0xf1,0x1c,0xc3,0xf8,0x2f,0x19,
0xdf,0x53,0x53,0x46,0xb1,0x15,0xea,0x00,
}
},
{ 0xa500011, { { 0xa500011, {
0x23,0x3d,0x70,0x7d,0x03,0xc3,0xc4,0xf4, 0x23,0x3d,0x70,0x7d,0x03,0xc3,0xc4,0xf4,
0x2b,0x82,0xc6,0x05,0xda,0x80,0x0a,0xf1, 0x2b,0x82,0xc6,0x05,0xda,0x80,0x0a,0xf1,
@ -378,6 +441,13 @@ static const struct patch_digest phashes[] = {
0x11,0x5e,0x96,0x7e,0x71,0xe9,0xfc,0x74, 0x11,0x5e,0x96,0x7e,0x71,0xe9,0xfc,0x74,
} }
}, },
{ 0xa500012, {
0xeb,0x74,0x0d,0x47,0xa1,0x8e,0x09,0xe4,
0x93,0x4c,0xad,0x03,0x32,0x4c,0x38,0x16,
0x10,0x39,0xdd,0x06,0xaa,0xce,0xd6,0x0f,
0x62,0x83,0x9d,0x8e,0x64,0x55,0xbe,0x63,
}
},
{ 0xa601209, { { 0xa601209, {
0x66,0x48,0xd4,0x09,0x05,0xcb,0x29,0x32, 0x66,0x48,0xd4,0x09,0x05,0xcb,0x29,0x32,
0x66,0xb7,0x9a,0x76,0xcd,0x11,0xf3,0x30, 0x66,0xb7,0x9a,0x76,0xcd,0x11,0xf3,0x30,
@ -385,6 +455,13 @@ static const struct patch_digest phashes[] = {
0xe8,0x73,0xe2,0xd6,0xdb,0xd2,0x77,0x1d, 0xe8,0x73,0xe2,0xd6,0xdb,0xd2,0x77,0x1d,
} }
}, },
{ 0xa60120a, {
0x0c,0x8b,0x3d,0xfd,0x52,0x52,0x85,0x7d,
0x20,0x3a,0xe1,0x7e,0xa4,0x21,0x3b,0x7b,
0x17,0x86,0xae,0xac,0x13,0xb8,0x63,0x9d,
0x06,0x01,0xd0,0xa0,0x51,0x9a,0x91,0x2c,
}
},
{ 0xa704107, { { 0xa704107, {
0xf3,0xc6,0x58,0x26,0xee,0xac,0x3f,0xd6, 0xf3,0xc6,0x58,0x26,0xee,0xac,0x3f,0xd6,
0xce,0xa1,0x72,0x47,0x3b,0xba,0x2b,0x93, 0xce,0xa1,0x72,0x47,0x3b,0xba,0x2b,0x93,
@ -392,6 +469,13 @@ static const struct patch_digest phashes[] = {
0x64,0x39,0x71,0x8c,0xce,0xe7,0x41,0x39, 0x64,0x39,0x71,0x8c,0xce,0xe7,0x41,0x39,
} }
}, },
{ 0xa704108, {
0xd7,0x55,0x15,0x2b,0xfe,0xc4,0xbc,0x93,
0xec,0x91,0xa0,0xae,0x45,0xb7,0xc3,0x98,
0x4e,0xff,0x61,0x77,0x88,0xc2,0x70,0x49,
0xe0,0x3a,0x1d,0x84,0x38,0x52,0xbf,0x5a,
}
},
{ 0xa705206, { { 0xa705206, {
0x8d,0xc0,0x76,0xbd,0x58,0x9f,0x8f,0xa4, 0x8d,0xc0,0x76,0xbd,0x58,0x9f,0x8f,0xa4,
0x12,0x9d,0x21,0xfb,0x48,0x21,0xbc,0xe7, 0x12,0x9d,0x21,0xfb,0x48,0x21,0xbc,0xe7,
@ -399,6 +483,13 @@ static const struct patch_digest phashes[] = {
0x03,0x35,0xe9,0xbe,0xfb,0x06,0xdf,0xfc, 0x03,0x35,0xe9,0xbe,0xfb,0x06,0xdf,0xfc,
} }
}, },
{ 0xa705208, {
0x30,0x1d,0x55,0x24,0xbc,0x6b,0x5a,0x19,
0x0c,0x7d,0x1d,0x74,0xaa,0xd1,0xeb,0xd2,
0x16,0x62,0xf7,0x5b,0xe1,0x1f,0x18,0x11,
0x5c,0xf0,0x94,0x90,0x26,0xec,0x69,0xff,
}
},
{ 0xa708007, { { 0xa708007, {
0x6b,0x76,0xcc,0x78,0xc5,0x8a,0xa3,0xe3, 0x6b,0x76,0xcc,0x78,0xc5,0x8a,0xa3,0xe3,
0x32,0x2d,0x79,0xe4,0xc3,0x80,0xdb,0xb2, 0x32,0x2d,0x79,0xe4,0xc3,0x80,0xdb,0xb2,
@ -406,6 +497,13 @@ static const struct patch_digest phashes[] = {
0xdf,0x92,0x73,0x84,0x87,0x3c,0x73,0x93, 0xdf,0x92,0x73,0x84,0x87,0x3c,0x73,0x93,
} }
}, },
{ 0xa708008, {
0x08,0x6e,0xf0,0x22,0x4b,0x8e,0xc4,0x46,
0x58,0x34,0xe6,0x47,0xa2,0x28,0xfd,0xab,
0x22,0x3d,0xdd,0xd8,0x52,0x9e,0x1d,0x16,
0xfa,0x01,0x68,0x14,0x79,0x3e,0xe8,0x6b,
}
},
{ 0xa70c005, { { 0xa70c005, {
0x88,0x5d,0xfb,0x79,0x64,0xd8,0x46,0x3b, 0x88,0x5d,0xfb,0x79,0x64,0xd8,0x46,0x3b,
0x4a,0x83,0x8e,0x77,0x7e,0xcf,0xb3,0x0f, 0x4a,0x83,0x8e,0x77,0x7e,0xcf,0xb3,0x0f,
@ -413,6 +511,13 @@ static const struct patch_digest phashes[] = {
0xee,0x49,0xac,0xe1,0x8b,0x13,0xc5,0x13, 0xee,0x49,0xac,0xe1,0x8b,0x13,0xc5,0x13,
} }
}, },
{ 0xa70c008, {
0x0f,0xdb,0x37,0xa1,0x10,0xaf,0xd4,0x21,
0x94,0x0d,0xa4,0xa2,0xe9,0x86,0x6c,0x0e,
0x85,0x7c,0x36,0x30,0xa3,0x3a,0x78,0x66,
0x18,0x10,0x60,0x0d,0x78,0x3d,0x44,0xd0,
}
},
{ 0xaa00116, { { 0xaa00116, {
0xe8,0x4c,0x2c,0x88,0xa1,0xac,0x24,0x63, 0xe8,0x4c,0x2c,0x88,0xa1,0xac,0x24,0x63,
0x65,0xe5,0xaa,0x2d,0x16,0xa9,0xc3,0xf5, 0x65,0xe5,0xaa,0x2d,0x16,0xa9,0xc3,0xf5,
@ -441,4 +546,11 @@ static const struct patch_digest phashes[] = {
0x68,0x2f,0x46,0xee,0xfe,0xc6,0x6d,0xef, 0x68,0x2f,0x46,0xee,0xfe,0xc6,0x6d,0xef,
} }
}, },
{ 0xaa00216, {
0x79,0xfb,0x5b,0x9f,0xb6,0xe6,0xa8,0xf5,
0x4e,0x7c,0x4f,0x8e,0x1d,0xad,0xd0,0x08,
0xc2,0x43,0x7c,0x8b,0xe6,0xdb,0xd0,0xd2,
0xe8,0x39,0x26,0xc1,0xe5,0x5a,0x48,0xf1,
}
},
}; };

View File

@ -50,6 +50,8 @@ static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 }, { X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 },
{ X86_FEATURE_SMBA, CPUID_EBX, 2, 0x80000020, 0 }, { X86_FEATURE_SMBA, CPUID_EBX, 2, 0x80000020, 0 },
{ X86_FEATURE_BMEC, CPUID_EBX, 3, 0x80000020, 0 }, { X86_FEATURE_BMEC, CPUID_EBX, 3, 0x80000020, 0 },
{ X86_FEATURE_TSA_SQ_NO, CPUID_ECX, 1, 0x80000021, 0 },
{ X86_FEATURE_TSA_L1_NO, CPUID_ECX, 2, 0x80000021, 0 },
{ X86_FEATURE_AMD_WORKLOAD_CLASS, CPUID_EAX, 22, 0x80000021, 0 }, { X86_FEATURE_AMD_WORKLOAD_CLASS, CPUID_EAX, 22, 0x80000021, 0 },
{ X86_FEATURE_PERFMON_V2, CPUID_EAX, 0, 0x80000022, 0 }, { X86_FEATURE_PERFMON_V2, CPUID_EAX, 0, 0x80000022, 0 },
{ X86_FEATURE_AMD_LBR_V2, CPUID_EAX, 1, 0x80000022, 0 }, { X86_FEATURE_AMD_LBR_V2, CPUID_EAX, 1, 0x80000022, 0 },

View File

@ -907,16 +907,24 @@ static __init bool prefer_mwait_c1_over_halt(void)
*/ */
static __cpuidle void mwait_idle(void) static __cpuidle void mwait_idle(void)
{ {
if (need_resched())
return;
x86_idle_clear_cpu_buffers();
if (!current_set_polling_and_test()) { if (!current_set_polling_and_test()) {
const void *addr = &current_thread_info()->flags; const void *addr = &current_thread_info()->flags;
alternative_input("", "clflush (%[addr])", X86_BUG_CLFLUSH_MONITOR, [addr] "a" (addr)); alternative_input("", "clflush (%[addr])", X86_BUG_CLFLUSH_MONITOR, [addr] "a" (addr));
__monitor(addr, 0, 0); __monitor(addr, 0, 0);
if (!need_resched()) { if (need_resched())
__sti_mwait(0, 0); goto out;
raw_local_irq_disable();
} __sti_mwait(0, 0);
raw_local_irq_disable();
} }
out:
__current_clr_polling(); __current_clr_polling();
} }

View File

@ -1165,6 +1165,8 @@ void kvm_set_cpu_caps(void)
*/ */
SYNTHESIZED_F(LFENCE_RDTSC), SYNTHESIZED_F(LFENCE_RDTSC),
/* SmmPgCfgLock */ /* SmmPgCfgLock */
/* 4: Resv */
SYNTHESIZED_F(VERW_CLEAR),
F(NULL_SEL_CLR_BASE), F(NULL_SEL_CLR_BASE),
/* UpperAddressIgnore */ /* UpperAddressIgnore */
F(AUTOIBRS), F(AUTOIBRS),
@ -1179,6 +1181,11 @@ void kvm_set_cpu_caps(void)
F(SRSO_USER_KERNEL_NO), F(SRSO_USER_KERNEL_NO),
); );
kvm_cpu_cap_init(CPUID_8000_0021_ECX,
SYNTHESIZED_F(TSA_SQ_NO),
SYNTHESIZED_F(TSA_L1_NO),
);
kvm_cpu_cap_init(CPUID_8000_0022_EAX, kvm_cpu_cap_init(CPUID_8000_0022_EAX,
F(PERFMON_V2), F(PERFMON_V2),
); );
@ -1748,8 +1755,9 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
entry->eax = entry->ebx = entry->ecx = entry->edx = 0; entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
break; break;
case 0x80000021: case 0x80000021:
entry->ebx = entry->ecx = entry->edx = 0; entry->ebx = entry->edx = 0;
cpuid_entry_override(entry, CPUID_8000_0021_EAX); cpuid_entry_override(entry, CPUID_8000_0021_EAX);
cpuid_entry_override(entry, CPUID_8000_0021_ECX);
break; break;
/* AMD Extended Performance Monitoring and Debug */ /* AMD Extended Performance Monitoring and Debug */
case 0x80000022: { case 0x80000022: {

View File

@ -1979,6 +1979,9 @@ int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
if (entries[i] == KVM_HV_TLB_FLUSHALL_ENTRY) if (entries[i] == KVM_HV_TLB_FLUSHALL_ENTRY)
goto out_flush_all; goto out_flush_all;
if (is_noncanonical_invlpg_address(entries[i], vcpu))
continue;
/* /*
* Lower 12 bits of 'address' encode the number of additional * Lower 12 bits of 'address' encode the number of additional
* pages to flush. * pages to flush.
@ -2001,11 +2004,11 @@ out_flush_all:
static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc) static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
{ {
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
unsigned long *vcpu_mask = hv_vcpu->vcpu_mask;
u64 *sparse_banks = hv_vcpu->sparse_banks; u64 *sparse_banks = hv_vcpu->sparse_banks;
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
struct hv_tlb_flush_ex flush_ex; struct hv_tlb_flush_ex flush_ex;
struct hv_tlb_flush flush; struct hv_tlb_flush flush;
DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS);
struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo; struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo;
/* /*
* Normally, there can be no more than 'KVM_HV_TLB_FLUSH_FIFO_SIZE' * Normally, there can be no more than 'KVM_HV_TLB_FLUSH_FIFO_SIZE'

View File

@ -52,6 +52,10 @@
/* CPUID level 0x80000022 (EAX) */ /* CPUID level 0x80000022 (EAX) */
#define KVM_X86_FEATURE_PERFMON_V2 KVM_X86_FEATURE(CPUID_8000_0022_EAX, 0) #define KVM_X86_FEATURE_PERFMON_V2 KVM_X86_FEATURE(CPUID_8000_0022_EAX, 0)
/* CPUID level 0x80000021 (ECX) */
#define KVM_X86_FEATURE_TSA_SQ_NO KVM_X86_FEATURE(CPUID_8000_0021_ECX, 1)
#define KVM_X86_FEATURE_TSA_L1_NO KVM_X86_FEATURE(CPUID_8000_0021_ECX, 2)
struct cpuid_reg { struct cpuid_reg {
u32 function; u32 function;
u32 index; u32 index;
@ -82,6 +86,7 @@ static const struct cpuid_reg reverse_cpuid[] = {
[CPUID_8000_0022_EAX] = {0x80000022, 0, CPUID_EAX}, [CPUID_8000_0022_EAX] = {0x80000022, 0, CPUID_EAX},
[CPUID_7_2_EDX] = { 7, 2, CPUID_EDX}, [CPUID_7_2_EDX] = { 7, 2, CPUID_EDX},
[CPUID_24_0_EBX] = { 0x24, 0, CPUID_EBX}, [CPUID_24_0_EBX] = { 0x24, 0, CPUID_EBX},
[CPUID_8000_0021_ECX] = {0x80000021, 0, CPUID_ECX},
}; };
/* /*
@ -121,6 +126,8 @@ static __always_inline u32 __feature_translate(int x86_feature)
KVM_X86_TRANSLATE_FEATURE(PERFMON_V2); KVM_X86_TRANSLATE_FEATURE(PERFMON_V2);
KVM_X86_TRANSLATE_FEATURE(RRSBA_CTRL); KVM_X86_TRANSLATE_FEATURE(RRSBA_CTRL);
KVM_X86_TRANSLATE_FEATURE(BHI_CTRL); KVM_X86_TRANSLATE_FEATURE(BHI_CTRL);
KVM_X86_TRANSLATE_FEATURE(TSA_SQ_NO);
KVM_X86_TRANSLATE_FEATURE(TSA_L1_NO);
default: default:
return x86_feature; return x86_feature;
} }

View File

@ -1971,6 +1971,10 @@ static int sev_check_source_vcpus(struct kvm *dst, struct kvm *src)
struct kvm_vcpu *src_vcpu; struct kvm_vcpu *src_vcpu;
unsigned long i; unsigned long i;
if (src->created_vcpus != atomic_read(&src->online_vcpus) ||
dst->created_vcpus != atomic_read(&dst->online_vcpus))
return -EBUSY;
if (!sev_es_guest(src)) if (!sev_es_guest(src))
return 0; return 0;
@ -4445,8 +4449,12 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm)
* the VMSA will be NULL if this vCPU is the destination for intrahost * the VMSA will be NULL if this vCPU is the destination for intrahost
* migration, and will be copied later. * migration, and will be copied later.
*/ */
if (svm->sev_es.vmsa && !svm->sev_es.snp_has_guest_vmsa) if (!svm->sev_es.snp_has_guest_vmsa) {
svm->vmcb->control.vmsa_pa = __pa(svm->sev_es.vmsa); if (svm->sev_es.vmsa)
svm->vmcb->control.vmsa_pa = __pa(svm->sev_es.vmsa);
else
svm->vmcb->control.vmsa_pa = INVALID_PAGE;
}
if (cpu_feature_enabled(X86_FEATURE_ALLOWED_SEV_FEATURES)) if (cpu_feature_enabled(X86_FEATURE_ALLOWED_SEV_FEATURES))
svm->vmcb->control.allowed_sev_features = sev->vmsa_features | svm->vmcb->control.allowed_sev_features = sev->vmsa_features |

View File

@ -169,6 +169,9 @@ SYM_FUNC_START(__svm_vcpu_run)
#endif #endif
mov VCPU_RDI(%_ASM_DI), %_ASM_DI mov VCPU_RDI(%_ASM_DI), %_ASM_DI
/* Clobbers EFLAGS.ZF */
VM_CLEAR_CPU_BUFFERS
/* Enter guest mode */ /* Enter guest mode */
3: vmrun %_ASM_AX 3: vmrun %_ASM_AX
4: 4:
@ -335,6 +338,9 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
mov SVM_current_vmcb(%rdi), %rax mov SVM_current_vmcb(%rdi), %rax
mov KVM_VMCB_pa(%rax), %rax mov KVM_VMCB_pa(%rax), %rax
/* Clobbers EFLAGS.ZF */
VM_CLEAR_CPU_BUFFERS
/* Enter guest mode */ /* Enter guest mode */
1: vmrun %rax 1: vmrun %rax
2: 2:

View File

@ -173,6 +173,9 @@ static void td_init_cpuid_entry2(struct kvm_cpuid_entry2 *entry, unsigned char i
tdx_clear_unsupported_cpuid(entry); tdx_clear_unsupported_cpuid(entry);
} }
#define TDVMCALLINFO_GET_QUOTE BIT(0)
#define TDVMCALLINFO_SETUP_EVENT_NOTIFY_INTERRUPT BIT(1)
static int init_kvm_tdx_caps(const struct tdx_sys_info_td_conf *td_conf, static int init_kvm_tdx_caps(const struct tdx_sys_info_td_conf *td_conf,
struct kvm_tdx_capabilities *caps) struct kvm_tdx_capabilities *caps)
{ {
@ -188,6 +191,10 @@ static int init_kvm_tdx_caps(const struct tdx_sys_info_td_conf *td_conf,
caps->cpuid.nent = td_conf->num_cpuid_config; caps->cpuid.nent = td_conf->num_cpuid_config;
caps->user_tdvmcallinfo_1_r11 =
TDVMCALLINFO_GET_QUOTE |
TDVMCALLINFO_SETUP_EVENT_NOTIFY_INTERRUPT;
for (i = 0; i < td_conf->num_cpuid_config; i++) for (i = 0; i < td_conf->num_cpuid_config; i++)
td_init_cpuid_entry2(&caps->cpuid.entries[i], i); td_init_cpuid_entry2(&caps->cpuid.entries[i], i);
@ -1530,6 +1537,27 @@ static int tdx_get_quote(struct kvm_vcpu *vcpu)
return 0; return 0;
} }
static int tdx_setup_event_notify_interrupt(struct kvm_vcpu *vcpu)
{
struct vcpu_tdx *tdx = to_tdx(vcpu);
u64 vector = tdx->vp_enter_args.r12;
if (vector < 32 || vector > 255) {
tdvmcall_set_return_code(vcpu, TDVMCALL_STATUS_INVALID_OPERAND);
return 1;
}
vcpu->run->exit_reason = KVM_EXIT_TDX;
vcpu->run->tdx.flags = 0;
vcpu->run->tdx.nr = TDVMCALL_SETUP_EVENT_NOTIFY_INTERRUPT;
vcpu->run->tdx.setup_event_notify.ret = TDVMCALL_STATUS_SUBFUNC_UNSUPPORTED;
vcpu->run->tdx.setup_event_notify.vector = vector;
vcpu->arch.complete_userspace_io = tdx_complete_simple;
return 0;
}
static int handle_tdvmcall(struct kvm_vcpu *vcpu) static int handle_tdvmcall(struct kvm_vcpu *vcpu)
{ {
switch (tdvmcall_leaf(vcpu)) { switch (tdvmcall_leaf(vcpu)) {
@ -1541,6 +1569,8 @@ static int handle_tdvmcall(struct kvm_vcpu *vcpu)
return tdx_get_td_vm_call_info(vcpu); return tdx_get_td_vm_call_info(vcpu);
case TDVMCALL_GET_QUOTE: case TDVMCALL_GET_QUOTE:
return tdx_get_quote(vcpu); return tdx_get_quote(vcpu);
case TDVMCALL_SETUP_EVENT_NOTIFY_INTERRUPT:
return tdx_setup_event_notify_interrupt(vcpu);
default: default:
break; break;
} }

View File

@ -7291,7 +7291,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
vmx_l1d_flush(vcpu); vmx_l1d_flush(vcpu);
else if (static_branch_unlikely(&cpu_buf_vm_clear) && else if (static_branch_unlikely(&cpu_buf_vm_clear) &&
kvm_arch_has_assigned_device(vcpu->kvm)) kvm_arch_has_assigned_device(vcpu->kvm))
mds_clear_cpu_buffers(); x86_clear_cpu_buffers();
vmx_disable_fb_clear(vmx); vmx_disable_fb_clear(vmx);

View File

@ -3258,9 +3258,11 @@ int kvm_guest_time_update(struct kvm_vcpu *v)
/* With all the info we got, fill in the values */ /* With all the info we got, fill in the values */
if (kvm_caps.has_tsc_control) if (kvm_caps.has_tsc_control) {
tgt_tsc_khz = kvm_scale_tsc(tgt_tsc_khz, tgt_tsc_khz = kvm_scale_tsc(tgt_tsc_khz,
v->arch.l1_tsc_scaling_ratio); v->arch.l1_tsc_scaling_ratio);
tgt_tsc_khz = tgt_tsc_khz ? : 1;
}
if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) { if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) {
kvm_get_time_scale(NSEC_PER_SEC, tgt_tsc_khz * 1000LL, kvm_get_time_scale(NSEC_PER_SEC, tgt_tsc_khz * 1000LL,

View File

@ -1971,8 +1971,19 @@ int kvm_xen_setup_evtchn(struct kvm *kvm,
{ {
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
if (ue->u.xen_evtchn.port >= max_evtchn_port(kvm)) /*
return -EINVAL; * Don't check for the port being within range of max_evtchn_port().
* Userspace can configure what ever targets it likes; events just won't
* be delivered if/while the target is invalid, just like userspace can
* configure MSIs which target non-existent APICs.
*
* This allow on Live Migration and Live Update, the IRQ routing table
* can be restored *independently* of other things like creating vCPUs,
* without imposing an ordering dependency on userspace. In this
* particular case, the problematic ordering would be with setting the
* Xen 'long mode' flag, which changes max_evtchn_port() to allow 4096
* instead of 1024 event channels.
*/
/* We only support 2 level event channels for now */ /* We only support 2 level event channels for now */
if (ue->u.xen_evtchn.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) if (ue->u.xen_evtchn.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)

View File

@ -243,23 +243,10 @@ static int acpi_battery_get_property(struct power_supply *psy,
break; break;
case POWER_SUPPLY_PROP_CURRENT_NOW: case POWER_SUPPLY_PROP_CURRENT_NOW:
case POWER_SUPPLY_PROP_POWER_NOW: case POWER_SUPPLY_PROP_POWER_NOW:
if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN) { if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN)
ret = -ENODEV; ret = -ENODEV;
break; else
} val->intval = battery->rate_now * 1000;
val->intval = battery->rate_now * 1000;
/*
* When discharging, the current should be reported as a
* negative number as per the power supply class interface
* definition.
*/
if (psp == POWER_SUPPLY_PROP_CURRENT_NOW &&
(battery->state & ACPI_BATTERY_STATE_DISCHARGING) &&
acpi_battery_handle_discharging(battery)
== POWER_SUPPLY_STATUS_DISCHARGING)
val->intval = -val->intval;
break; break;
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN: case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:

View File

@ -602,6 +602,7 @@ CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling);
CPU_SHOW_VULN_FALLBACK(ghostwrite); CPU_SHOW_VULN_FALLBACK(ghostwrite);
CPU_SHOW_VULN_FALLBACK(old_microcode); CPU_SHOW_VULN_FALLBACK(old_microcode);
CPU_SHOW_VULN_FALLBACK(indirect_target_selection); CPU_SHOW_VULN_FALLBACK(indirect_target_selection);
CPU_SHOW_VULN_FALLBACK(tsa);
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
@ -620,6 +621,7 @@ static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling
static DEVICE_ATTR(ghostwrite, 0444, cpu_show_ghostwrite, NULL); static DEVICE_ATTR(ghostwrite, 0444, cpu_show_ghostwrite, NULL);
static DEVICE_ATTR(old_microcode, 0444, cpu_show_old_microcode, NULL); static DEVICE_ATTR(old_microcode, 0444, cpu_show_old_microcode, NULL);
static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL); static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL);
static DEVICE_ATTR(tsa, 0444, cpu_show_tsa, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = { static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr, &dev_attr_meltdown.attr,
@ -639,6 +641,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_ghostwrite.attr, &dev_attr_ghostwrite.attr,
&dev_attr_old_microcode.attr, &dev_attr_old_microcode.attr,
&dev_attr_indirect_target_selection.attr, &dev_attr_indirect_target_selection.attr,
&dev_attr_tsa.attr,
NULL NULL
}; };

View File

@ -1236,6 +1236,7 @@ void dpm_complete(pm_message_t state)
*/ */
void dpm_resume_end(pm_message_t state) void dpm_resume_end(pm_message_t state)
{ {
pm_restore_gfp_mask();
dpm_resume(state); dpm_resume(state);
dpm_complete(state); dpm_complete(state);
} }
@ -2176,8 +2177,10 @@ int dpm_suspend_start(pm_message_t state)
error = dpm_prepare(state); error = dpm_prepare(state);
if (error) if (error)
dpm_save_failed_step(SUSPEND_PREPARE); dpm_save_failed_step(SUSPEND_PREPARE);
else else {
pm_restrict_gfp_mask();
error = dpm_suspend(state); error = dpm_suspend(state);
}
dpm_show_time(starttime, state, error, "start"); dpm_show_time(starttime, state, error, "start");
return error; return error;

View File

@ -64,13 +64,15 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector,
rcu_read_unlock(); rcu_read_unlock();
page = alloc_page(gfp | __GFP_ZERO | __GFP_HIGHMEM); page = alloc_page(gfp | __GFP_ZERO | __GFP_HIGHMEM);
rcu_read_lock(); if (!page) {
if (!page) rcu_read_lock();
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
}
xa_lock(&brd->brd_pages); xa_lock(&brd->brd_pages);
ret = __xa_cmpxchg(&brd->brd_pages, sector >> PAGE_SECTORS_SHIFT, NULL, ret = __xa_cmpxchg(&brd->brd_pages, sector >> PAGE_SECTORS_SHIFT, NULL,
page, gfp); page, gfp);
rcu_read_lock();
if (ret) { if (ret) {
xa_unlock(&brd->brd_pages); xa_unlock(&brd->brd_pages);
__free_page(page); __free_page(page);

View File

@ -1442,15 +1442,16 @@ static void ublk_queue_rqs(struct rq_list *rqlist)
struct ublk_queue *this_q = req->mq_hctx->driver_data; struct ublk_queue *this_q = req->mq_hctx->driver_data;
struct ublk_io *this_io = &this_q->ios[req->tag]; struct ublk_io *this_io = &this_q->ios[req->tag];
if (ublk_prep_req(this_q, req, true) != BLK_STS_OK) {
rq_list_add_tail(&requeue_list, req);
continue;
}
if (io && !ublk_belong_to_same_batch(io, this_io) && if (io && !ublk_belong_to_same_batch(io, this_io) &&
!rq_list_empty(&submit_list)) !rq_list_empty(&submit_list))
ublk_queue_cmd_list(io, &submit_list); ublk_queue_cmd_list(io, &submit_list);
io = this_io; io = this_io;
rq_list_add_tail(&submit_list, req);
if (ublk_prep_req(this_q, req, true) == BLK_STS_OK)
rq_list_add_tail(&submit_list, req);
else
rq_list_add_tail(&requeue_list, req);
} }
if (!rq_list_empty(&submit_list)) if (!rq_list_empty(&submit_list))

View File

@ -685,11 +685,13 @@ long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage,
dma_resv_iter_begin(&cursor, obj, usage); dma_resv_iter_begin(&cursor, obj, usage);
dma_resv_for_each_fence_unlocked(&cursor, fence) { dma_resv_for_each_fence_unlocked(&cursor, fence) {
ret = dma_fence_wait_timeout(fence, intr, ret); ret = dma_fence_wait_timeout(fence, intr, timeout);
if (ret <= 0) { if (ret <= 0)
dma_resv_iter_end(&cursor); break;
return ret;
} /* Even for zero timeout the return value is 1 */
if (timeout)
timeout = ret;
} }
dma_resv_iter_end(&cursor); dma_resv_iter_end(&cursor);

View File

@ -170,8 +170,10 @@ static int ecs_create_desc(struct device *ecs_dev, const struct attribute_group
fru_ctx->dev_attr[ECS_RESET] = EDAC_ECS_ATTR_WO(reset, fru); fru_ctx->dev_attr[ECS_RESET] = EDAC_ECS_ATTR_WO(reset, fru);
fru_ctx->dev_attr[ECS_THRESHOLD] = EDAC_ECS_ATTR_RW(threshold, fru); fru_ctx->dev_attr[ECS_THRESHOLD] = EDAC_ECS_ATTR_RW(threshold, fru);
for (i = 0; i < ECS_MAX_ATTRS; i++) for (i = 0; i < ECS_MAX_ATTRS; i++) {
sysfs_attr_init(&fru_ctx->dev_attr[i].dev_attr.attr);
fru_ctx->ecs_attrs[i] = &fru_ctx->dev_attr[i].dev_attr.attr; fru_ctx->ecs_attrs[i] = &fru_ctx->dev_attr[i].dev_attr.attr;
}
sprintf(fru_ctx->name, "%s%d", EDAC_ECS_FRU_NAME, fru); sprintf(fru_ctx->name, "%s%d", EDAC_ECS_FRU_NAME, fru);
group->name = fru_ctx->name; group->name = fru_ctx->name;

View File

@ -333,6 +333,7 @@ static int mem_repair_create_desc(struct device *dev,
for (i = 0; i < MR_MAX_ATTRS; i++) { for (i = 0; i < MR_MAX_ATTRS; i++) {
memcpy(&ctx->mem_repair_dev_attr[i], memcpy(&ctx->mem_repair_dev_attr[i],
&dev_attr[i], sizeof(dev_attr[i])); &dev_attr[i], sizeof(dev_attr[i]));
sysfs_attr_init(&ctx->mem_repair_dev_attr[i].dev_attr.attr);
ctx->mem_repair_attrs[i] = ctx->mem_repair_attrs[i] =
&ctx->mem_repair_dev_attr[i].dev_attr.attr; &ctx->mem_repair_dev_attr[i].dev_attr.attr;
} }

View File

@ -176,6 +176,7 @@ static int scrub_create_desc(struct device *scrub_dev,
group = &scrub_ctx->group; group = &scrub_ctx->group;
for (i = 0; i < SCRUB_MAX_ATTRS; i++) { for (i = 0; i < SCRUB_MAX_ATTRS; i++) {
memcpy(&scrub_ctx->scrub_dev_attr[i], &dev_attr[i], sizeof(dev_attr[i])); memcpy(&scrub_ctx->scrub_dev_attr[i], &dev_attr[i], sizeof(dev_attr[i]));
sysfs_attr_init(&scrub_ctx->scrub_dev_attr[i].dev_attr.attr);
scrub_ctx->scrub_attrs[i] = &scrub_ctx->scrub_dev_attr[i].dev_attr.attr; scrub_ctx->scrub_attrs[i] = &scrub_ctx->scrub_dev_attr[i].dev_attr.attr;
} }
sprintf(scrub_ctx->name, "%s%d", "scrub", instance); sprintf(scrub_ctx->name, "%s%d", "scrub", instance);

View File

@ -110,7 +110,7 @@ struct ffa_drv_info {
struct work_struct sched_recv_irq_work; struct work_struct sched_recv_irq_work;
struct xarray partition_info; struct xarray partition_info;
DECLARE_HASHTABLE(notifier_hash, ilog2(FFA_MAX_NOTIFICATIONS)); DECLARE_HASHTABLE(notifier_hash, ilog2(FFA_MAX_NOTIFICATIONS));
struct mutex notify_lock; /* lock to protect notifier hashtable */ rwlock_t notify_lock; /* lock to protect notifier hashtable */
}; };
static struct ffa_drv_info *drv_info; static struct ffa_drv_info *drv_info;
@ -1250,13 +1250,12 @@ notifier_hnode_get_by_type(u16 notify_id, enum notify_type type)
return NULL; return NULL;
} }
static int static int update_notifier_cb(struct ffa_device *dev, int notify_id,
update_notifier_cb(struct ffa_device *dev, int notify_id, void *cb, struct notifier_cb_info *cb, bool is_framework)
void *cb_data, bool is_registration, bool is_framework)
{ {
struct notifier_cb_info *cb_info = NULL; struct notifier_cb_info *cb_info = NULL;
enum notify_type type = ffa_notify_type_get(dev->vm_id); enum notify_type type = ffa_notify_type_get(dev->vm_id);
bool cb_found; bool cb_found, is_registration = !!cb;
if (is_framework) if (is_framework)
cb_info = notifier_hnode_get_by_vmid_uuid(notify_id, dev->vm_id, cb_info = notifier_hnode_get_by_vmid_uuid(notify_id, dev->vm_id,
@ -1270,20 +1269,10 @@ update_notifier_cb(struct ffa_device *dev, int notify_id, void *cb,
return -EINVAL; return -EINVAL;
if (is_registration) { if (is_registration) {
cb_info = kzalloc(sizeof(*cb_info), GFP_KERNEL); hash_add(drv_info->notifier_hash, &cb->hnode, notify_id);
if (!cb_info)
return -ENOMEM;
cb_info->dev = dev;
cb_info->cb_data = cb_data;
if (is_framework)
cb_info->fwk_cb = cb;
else
cb_info->cb = cb;
hash_add(drv_info->notifier_hash, &cb_info->hnode, notify_id);
} else { } else {
hash_del(&cb_info->hnode); hash_del(&cb_info->hnode);
kfree(cb_info);
} }
return 0; return 0;
@ -1300,20 +1289,19 @@ static int __ffa_notify_relinquish(struct ffa_device *dev, int notify_id,
if (notify_id >= FFA_MAX_NOTIFICATIONS) if (notify_id >= FFA_MAX_NOTIFICATIONS)
return -EINVAL; return -EINVAL;
mutex_lock(&drv_info->notify_lock); write_lock(&drv_info->notify_lock);
rc = update_notifier_cb(dev, notify_id, NULL, NULL, false, rc = update_notifier_cb(dev, notify_id, NULL, is_framework);
is_framework);
if (rc) { if (rc) {
pr_err("Could not unregister notification callback\n"); pr_err("Could not unregister notification callback\n");
mutex_unlock(&drv_info->notify_lock); write_unlock(&drv_info->notify_lock);
return rc; return rc;
} }
if (!is_framework) if (!is_framework)
rc = ffa_notification_unbind(dev->vm_id, BIT(notify_id)); rc = ffa_notification_unbind(dev->vm_id, BIT(notify_id));
mutex_unlock(&drv_info->notify_lock); write_unlock(&drv_info->notify_lock);
return rc; return rc;
} }
@ -1334,6 +1322,7 @@ static int __ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu,
{ {
int rc; int rc;
u32 flags = 0; u32 flags = 0;
struct notifier_cb_info *cb_info = NULL;
if (ffa_notifications_disabled()) if (ffa_notifications_disabled())
return -EOPNOTSUPP; return -EOPNOTSUPP;
@ -1341,28 +1330,40 @@ static int __ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu,
if (notify_id >= FFA_MAX_NOTIFICATIONS) if (notify_id >= FFA_MAX_NOTIFICATIONS)
return -EINVAL; return -EINVAL;
mutex_lock(&drv_info->notify_lock); cb_info = kzalloc(sizeof(*cb_info), GFP_KERNEL);
if (!cb_info)
return -ENOMEM;
cb_info->dev = dev;
cb_info->cb_data = cb_data;
if (is_framework)
cb_info->fwk_cb = cb;
else
cb_info->cb = cb;
write_lock(&drv_info->notify_lock);
if (!is_framework) { if (!is_framework) {
if (is_per_vcpu) if (is_per_vcpu)
flags = PER_VCPU_NOTIFICATION_FLAG; flags = PER_VCPU_NOTIFICATION_FLAG;
rc = ffa_notification_bind(dev->vm_id, BIT(notify_id), flags); rc = ffa_notification_bind(dev->vm_id, BIT(notify_id), flags);
if (rc) { if (rc)
mutex_unlock(&drv_info->notify_lock); goto out_unlock_free;
return rc;
}
} }
rc = update_notifier_cb(dev, notify_id, cb, cb_data, true, rc = update_notifier_cb(dev, notify_id, cb_info, is_framework);
is_framework);
if (rc) { if (rc) {
pr_err("Failed to register callback for %d - %d\n", pr_err("Failed to register callback for %d - %d\n",
notify_id, rc); notify_id, rc);
if (!is_framework) if (!is_framework)
ffa_notification_unbind(dev->vm_id, BIT(notify_id)); ffa_notification_unbind(dev->vm_id, BIT(notify_id));
} }
mutex_unlock(&drv_info->notify_lock);
out_unlock_free:
write_unlock(&drv_info->notify_lock);
if (rc)
kfree(cb_info);
return rc; return rc;
} }
@ -1406,9 +1407,9 @@ static void handle_notif_callbacks(u64 bitmap, enum notify_type type)
if (!(bitmap & 1)) if (!(bitmap & 1))
continue; continue;
mutex_lock(&drv_info->notify_lock); read_lock(&drv_info->notify_lock);
cb_info = notifier_hnode_get_by_type(notify_id, type); cb_info = notifier_hnode_get_by_type(notify_id, type);
mutex_unlock(&drv_info->notify_lock); read_unlock(&drv_info->notify_lock);
if (cb_info && cb_info->cb) if (cb_info && cb_info->cb)
cb_info->cb(notify_id, cb_info->cb_data); cb_info->cb(notify_id, cb_info->cb_data);
@ -1446,9 +1447,9 @@ static void handle_fwk_notif_callbacks(u32 bitmap)
ffa_rx_release(); ffa_rx_release();
mutex_lock(&drv_info->notify_lock); read_lock(&drv_info->notify_lock);
cb_info = notifier_hnode_get_by_vmid_uuid(notify_id, target, &uuid); cb_info = notifier_hnode_get_by_vmid_uuid(notify_id, target, &uuid);
mutex_unlock(&drv_info->notify_lock); read_unlock(&drv_info->notify_lock);
if (cb_info && cb_info->fwk_cb) if (cb_info && cb_info->fwk_cb)
cb_info->fwk_cb(notify_id, cb_info->cb_data, buf); cb_info->fwk_cb(notify_id, cb_info->cb_data, buf);
@ -1973,7 +1974,7 @@ static void ffa_notifications_setup(void)
goto cleanup; goto cleanup;
hash_init(drv_info->notifier_hash); hash_init(drv_info->notifier_hash);
mutex_init(&drv_info->notify_lock); rwlock_init(&drv_info->notify_lock);
drv_info->notif_enabled = true; drv_info->notif_enabled = true;
return; return;

View File

@ -29,14 +29,12 @@ SECTIONS
. = _etext; . = _etext;
} }
#ifdef CONFIG_EFI_SBAT
.sbat : ALIGN(4096) { .sbat : ALIGN(4096) {
_sbat = .; _sbat = .;
*(.sbat) *(.sbat)
_esbat = ALIGN(4096); _esbat = ALIGN(4096);
. = _esbat; . = _esbat;
} }
#endif
.data : ALIGN(4096) { .data : ALIGN(4096) {
_data = .; _data = .;
@ -60,6 +58,6 @@ SECTIONS
PROVIDE(__efistub__gzdata_size = PROVIDE(__efistub__gzdata_size =
ABSOLUTE(__efistub__gzdata_end - __efistub__gzdata_start)); ABSOLUTE(__efistub__gzdata_end - __efistub__gzdata_start));
PROVIDE(__data_rawsize = ABSOLUTE(_edata - _etext)); PROVIDE(__data_rawsize = ABSOLUTE(_edata - _data));
PROVIDE(__data_size = ABSOLUTE(_end - _etext)); PROVIDE(__data_size = ABSOLUTE(_end - _data));
PROVIDE(__sbat_size = ABSOLUTE(_esbat - _sbat)); PROVIDE(__sbat_size = ABSOLUTE(_esbat - _sbat));

View File

@ -430,6 +430,9 @@ int acpm_do_xfer(const struct acpm_handle *handle, const struct acpm_xfer *xfer)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
msg.chan_id = xfer->acpm_chan_id;
msg.chan_type = EXYNOS_MBOX_CHAN_TYPE_DOORBELL;
scoped_guard(mutex, &achan->tx_lock) { scoped_guard(mutex, &achan->tx_lock) {
tx_front = readl(achan->tx.front); tx_front = readl(achan->tx.front);
idx = (tx_front + 1) % achan->qlen; idx = (tx_front + 1) % achan->qlen;
@ -446,25 +449,15 @@ int acpm_do_xfer(const struct acpm_handle *handle, const struct acpm_xfer *xfer)
/* Advance TX front. */ /* Advance TX front. */
writel(idx, achan->tx.front); writel(idx, achan->tx.front);
ret = mbox_send_message(achan->chan, (void *)&msg);
if (ret < 0)
return ret;
mbox_client_txdone(achan->chan, 0);
} }
msg.chan_id = xfer->acpm_chan_id; return acpm_wait_for_message_response(achan, xfer);
msg.chan_type = EXYNOS_MBOX_CHAN_TYPE_DOORBELL;
ret = mbox_send_message(achan->chan, (void *)&msg);
if (ret < 0)
return ret;
ret = acpm_wait_for_message_response(achan, xfer);
/*
* NOTE: we might prefer not to need the mailbox ticker to manage the
* transfer queueing since the protocol layer queues things by itself.
* Unfortunately, we have to kick the mailbox framework after we have
* received our message.
*/
mbox_client_txdone(achan->chan, ret);
return ret;
} }
/** /**

View File

@ -561,6 +561,13 @@ static uint32_t read_vmid_from_vmfault_reg(struct amdgpu_device *adev)
return REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID); return REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
} }
static uint32_t kgd_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
int engine, int queue)
{
return 0;
}
const struct kfd2kgd_calls gfx_v7_kfd2kgd = { const struct kfd2kgd_calls gfx_v7_kfd2kgd = {
.program_sh_mem_settings = kgd_program_sh_mem_settings, .program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
@ -578,4 +585,5 @@ const struct kfd2kgd_calls gfx_v7_kfd2kgd = {
.set_scratch_backing_va = set_scratch_backing_va, .set_scratch_backing_va = set_scratch_backing_va,
.set_vm_context_page_table_base = set_vm_context_page_table_base, .set_vm_context_page_table_base = set_vm_context_page_table_base,
.read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg, .read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,
.hqd_sdma_get_doorbell = kgd_hqd_sdma_get_doorbell,
}; };

View File

@ -582,6 +582,13 @@ static void set_vm_context_page_table_base(struct amdgpu_device *adev,
lower_32_bits(page_table_base)); lower_32_bits(page_table_base));
} }
static uint32_t kgd_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
int engine, int queue)
{
return 0;
}
const struct kfd2kgd_calls gfx_v8_kfd2kgd = { const struct kfd2kgd_calls gfx_v8_kfd2kgd = {
.program_sh_mem_settings = kgd_program_sh_mem_settings, .program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
@ -599,4 +606,5 @@ const struct kfd2kgd_calls gfx_v8_kfd2kgd = {
get_atc_vmid_pasid_mapping_info, get_atc_vmid_pasid_mapping_info,
.set_scratch_backing_va = set_scratch_backing_va, .set_scratch_backing_va = set_scratch_backing_va,
.set_vm_context_page_table_base = set_vm_context_page_table_base, .set_vm_context_page_table_base = set_vm_context_page_table_base,
.hqd_sdma_get_doorbell = kgd_hqd_sdma_get_doorbell,
}; };

View File

@ -944,6 +944,7 @@ static void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
drm_sched_entity_fini(entity); drm_sched_entity_fini(entity);
} }
} }
kref_put(&ctx->refcount, amdgpu_ctx_fini);
} }
} }

View File

@ -45,6 +45,7 @@
#include "amdgpu_ras.h" #include "amdgpu_ras.h"
MODULE_FIRMWARE("amdgpu/sdma_4_4_2.bin"); MODULE_FIRMWARE("amdgpu/sdma_4_4_2.bin");
MODULE_FIRMWARE("amdgpu/sdma_4_4_4.bin");
MODULE_FIRMWARE("amdgpu/sdma_4_4_5.bin"); MODULE_FIRMWARE("amdgpu/sdma_4_4_5.bin");
static const struct amdgpu_hwip_reg_entry sdma_reg_list_4_4_2[] = { static const struct amdgpu_hwip_reg_entry sdma_reg_list_4_4_2[] = {

View File

@ -1543,8 +1543,13 @@ static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
u32 inst_id = ring->me; u32 inst_id = ring->me;
int r;
return amdgpu_sdma_reset_engine(adev, inst_id); amdgpu_amdkfd_suspend(adev, true);
r = amdgpu_sdma_reset_engine(adev, inst_id);
amdgpu_amdkfd_resume(adev, true);
return r;
} }
static int sdma_v5_0_stop_queue(struct amdgpu_ring *ring) static int sdma_v5_0_stop_queue(struct amdgpu_ring *ring)

View File

@ -1456,8 +1456,13 @@ static int sdma_v5_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
u32 inst_id = ring->me; u32 inst_id = ring->me;
int r;
return amdgpu_sdma_reset_engine(adev, inst_id); amdgpu_amdkfd_suspend(adev, true);
r = amdgpu_sdma_reset_engine(adev, inst_id);
amdgpu_amdkfd_resume(adev, true);
return r;
} }
static int sdma_v5_2_stop_queue(struct amdgpu_ring *ring) static int sdma_v5_2_stop_queue(struct amdgpu_ring *ring)

View File

@ -1171,13 +1171,12 @@ svm_range_split_head(struct svm_range *prange, uint64_t new_start,
} }
static void static void
svm_range_add_child(struct svm_range *prange, struct mm_struct *mm, svm_range_add_child(struct svm_range *prange, struct svm_range *pchild, enum svm_work_list_ops op)
struct svm_range *pchild, enum svm_work_list_ops op)
{ {
pr_debug("add child 0x%p [0x%lx 0x%lx] to prange 0x%p child list %d\n", pr_debug("add child 0x%p [0x%lx 0x%lx] to prange 0x%p child list %d\n",
pchild, pchild->start, pchild->last, prange, op); pchild, pchild->start, pchild->last, prange, op);
pchild->work_item.mm = mm; pchild->work_item.mm = NULL;
pchild->work_item.op = op; pchild->work_item.op = op;
list_add_tail(&pchild->child_list, &prange->child_list); list_add_tail(&pchild->child_list, &prange->child_list);
} }
@ -1278,7 +1277,7 @@ svm_range_get_pte_flags(struct kfd_node *node,
mapping_flags |= ext_coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; mapping_flags |= ext_coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
/* system memory accessed by the dGPU */ /* system memory accessed by the dGPU */
} else { } else {
if (gc_ip_version < IP_VERSION(9, 5, 0)) if (gc_ip_version < IP_VERSION(9, 5, 0) || ext_coherent)
mapping_flags |= AMDGPU_VM_MTYPE_UC; mapping_flags |= AMDGPU_VM_MTYPE_UC;
else else
mapping_flags |= AMDGPU_VM_MTYPE_NC; mapping_flags |= AMDGPU_VM_MTYPE_NC;
@ -2394,15 +2393,17 @@ svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
prange->work_item.op != SVM_OP_UNMAP_RANGE) prange->work_item.op != SVM_OP_UNMAP_RANGE)
prange->work_item.op = op; prange->work_item.op = op;
} else { } else {
prange->work_item.op = op; /* Pairs with mmput in deferred_list_work.
* If process is exiting and mm is gone, don't update mmu notifier.
/* Pairs with mmput in deferred_list_work */ */
mmget(mm); if (mmget_not_zero(mm)) {
prange->work_item.mm = mm; prange->work_item.mm = mm;
list_add_tail(&prange->deferred_list, prange->work_item.op = op;
&prange->svms->deferred_range_list); list_add_tail(&prange->deferred_list,
pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n", &prange->svms->deferred_range_list);
prange, prange->start, prange->last, op); pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n",
prange, prange->start, prange->last, op);
}
} }
spin_unlock(&svms->deferred_list_lock); spin_unlock(&svms->deferred_list_lock);
} }
@ -2416,8 +2417,7 @@ void schedule_deferred_list_work(struct svm_range_list *svms)
} }
static void static void
svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent, svm_range_unmap_split(struct svm_range *parent, struct svm_range *prange, unsigned long start,
struct svm_range *prange, unsigned long start,
unsigned long last) unsigned long last)
{ {
struct svm_range *head; struct svm_range *head;
@ -2438,12 +2438,12 @@ svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent,
svm_range_split(tail, last + 1, tail->last, &head); svm_range_split(tail, last + 1, tail->last, &head);
if (head != prange && tail != prange) { if (head != prange && tail != prange) {
svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE); svm_range_add_child(parent, head, SVM_OP_UNMAP_RANGE);
svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE); svm_range_add_child(parent, tail, SVM_OP_ADD_RANGE);
} else if (tail != prange) { } else if (tail != prange) {
svm_range_add_child(parent, mm, tail, SVM_OP_UNMAP_RANGE); svm_range_add_child(parent, tail, SVM_OP_UNMAP_RANGE);
} else if (head != prange) { } else if (head != prange) {
svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE); svm_range_add_child(parent, head, SVM_OP_UNMAP_RANGE);
} else if (parent != prange) { } else if (parent != prange) {
prange->work_item.op = SVM_OP_UNMAP_RANGE; prange->work_item.op = SVM_OP_UNMAP_RANGE;
} }
@ -2520,14 +2520,14 @@ svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
l = min(last, pchild->last); l = min(last, pchild->last);
if (l >= s) if (l >= s)
svm_range_unmap_from_gpus(pchild, s, l, trigger); svm_range_unmap_from_gpus(pchild, s, l, trigger);
svm_range_unmap_split(mm, prange, pchild, start, last); svm_range_unmap_split(prange, pchild, start, last);
mutex_unlock(&pchild->lock); mutex_unlock(&pchild->lock);
} }
s = max(start, prange->start); s = max(start, prange->start);
l = min(last, prange->last); l = min(last, prange->last);
if (l >= s) if (l >= s)
svm_range_unmap_from_gpus(prange, s, l, trigger); svm_range_unmap_from_gpus(prange, s, l, trigger);
svm_range_unmap_split(mm, prange, prange, start, last); svm_range_unmap_split(prange, prange, start, last);
if (unmap_parent) if (unmap_parent)
svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE); svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE);
@ -2570,8 +2570,6 @@ svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
if (range->event == MMU_NOTIFY_RELEASE) if (range->event == MMU_NOTIFY_RELEASE)
return true; return true;
if (!mmget_not_zero(mni->mm))
return true;
start = mni->interval_tree.start; start = mni->interval_tree.start;
last = mni->interval_tree.last; last = mni->interval_tree.last;
@ -2598,7 +2596,6 @@ svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
} }
svm_range_unlock(prange); svm_range_unlock(prange);
mmput(mni->mm);
return true; return true;
} }

View File

@ -3610,13 +3610,15 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
luminance_range = &conn_base->display_info.luminance_range; luminance_range = &conn_base->display_info.luminance_range;
if (luminance_range->max_luminance) { if (luminance_range->max_luminance)
caps->aux_min_input_signal = luminance_range->min_luminance;
caps->aux_max_input_signal = luminance_range->max_luminance; caps->aux_max_input_signal = luminance_range->max_luminance;
} else { else
caps->aux_min_input_signal = 0;
caps->aux_max_input_signal = 512; caps->aux_max_input_signal = 512;
}
if (luminance_range->min_luminance)
caps->aux_min_input_signal = luminance_range->min_luminance;
else
caps->aux_min_input_signal = 1;
min_input_signal_override = drm_get_panel_min_brightness_quirk(aconnector->drm_edid); min_input_signal_override = drm_get_panel_min_brightness_quirk(aconnector->drm_edid);
if (min_input_signal_override >= 0) if (min_input_signal_override >= 0)

View File

@ -974,6 +974,7 @@ struct dc_crtc_timing {
uint32_t pix_clk_100hz; uint32_t pix_clk_100hz;
uint32_t min_refresh_in_uhz; uint32_t min_refresh_in_uhz;
uint32_t max_refresh_in_uhz;
uint32_t vic; uint32_t vic;
uint32_t hdmi_vic; uint32_t hdmi_vic;

View File

@ -155,6 +155,14 @@ unsigned int mod_freesync_calc_v_total_from_refresh(
v_total = div64_u64(div64_u64(((unsigned long long)( v_total = div64_u64(div64_u64(((unsigned long long)(
frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)), frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)),
stream->timing.h_total), 1000000); stream->timing.h_total), 1000000);
} else if (refresh_in_uhz >= stream->timing.max_refresh_in_uhz) {
/* When the target refresh rate is the maximum panel refresh rate
* round up the vtotal value to prevent off-by-one error causing
* v_total_min to be below the panel's lower bound
*/
v_total = div64_u64(div64_u64(((unsigned long long)(
frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)),
stream->timing.h_total) + (1000000 - 1), 1000000);
} else { } else {
v_total = div64_u64(div64_u64(((unsigned long long)( v_total = div64_u64(div64_u64(((unsigned long long)(
frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)), frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)),

View File

@ -64,10 +64,11 @@ struct auxiliary_device *devm_drm_dp_hpd_bridge_alloc(struct device *parent, str
adev->id = ret; adev->id = ret;
adev->name = "dp_hpd_bridge"; adev->name = "dp_hpd_bridge";
adev->dev.parent = parent; adev->dev.parent = parent;
adev->dev.of_node = of_node_get(parent->of_node);
adev->dev.release = drm_aux_hpd_bridge_release; adev->dev.release = drm_aux_hpd_bridge_release;
adev->dev.platform_data = of_node_get(np); adev->dev.platform_data = of_node_get(np);
device_set_of_node_from_dev(&adev->dev, parent);
ret = auxiliary_device_init(adev); ret = auxiliary_device_init(adev);
if (ret) { if (ret) {
of_node_put(adev->dev.platform_data); of_node_put(adev->dev.platform_data);

View File

@ -299,6 +299,7 @@ struct drm_bridge *drm_panel_bridge_add_typed(struct drm_panel *panel,
panel_bridge->bridge.of_node = panel->dev->of_node; panel_bridge->bridge.of_node = panel->dev->of_node;
panel_bridge->bridge.ops = DRM_BRIDGE_OP_MODES; panel_bridge->bridge.ops = DRM_BRIDGE_OP_MODES;
panel_bridge->bridge.type = connector_type; panel_bridge->bridge.type = connector_type;
panel_bridge->bridge.pre_enable_prev_first = panel->prepare_prev_first;
drm_bridge_add(&panel_bridge->bridge); drm_bridge_add(&panel_bridge->bridge);
@ -413,8 +414,6 @@ struct drm_bridge *devm_drm_panel_bridge_add_typed(struct device *dev,
return bridge; return bridge;
} }
bridge->pre_enable_prev_first = panel->prepare_prev_first;
*ptr = bridge; *ptr = bridge;
devres_add(dev, ptr); devres_add(dev, ptr);
@ -456,8 +455,6 @@ struct drm_bridge *drmm_panel_bridge_add(struct drm_device *drm,
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
bridge->pre_enable_prev_first = panel->prepare_prev_first;
return bridge; return bridge;
} }
EXPORT_SYMBOL(drmm_panel_bridge_add); EXPORT_SYMBOL(drmm_panel_bridge_add);

View File

@ -1095,7 +1095,7 @@ static void samsung_dsim_send_to_fifo(struct samsung_dsim *dsi,
bool first = !xfer->tx_done; bool first = !xfer->tx_done;
u32 reg; u32 reg;
dev_dbg(dev, "< xfer %pK: tx len %u, done %u, rx len %u, done %u\n", dev_dbg(dev, "< xfer %p: tx len %u, done %u, rx len %u, done %u\n",
xfer, length, xfer->tx_done, xfer->rx_len, xfer->rx_done); xfer, length, xfer->tx_done, xfer->rx_len, xfer->rx_done);
if (length > DSI_TX_FIFO_SIZE) if (length > DSI_TX_FIFO_SIZE)
@ -1293,7 +1293,7 @@ static bool samsung_dsim_transfer_finish(struct samsung_dsim *dsi)
spin_unlock_irqrestore(&dsi->transfer_lock, flags); spin_unlock_irqrestore(&dsi->transfer_lock, flags);
dev_dbg(dsi->dev, dev_dbg(dsi->dev,
"> xfer %pK, tx_len %zu, tx_done %u, rx_len %u, rx_done %u\n", "> xfer %p, tx_len %zu, tx_done %u, rx_len %u, rx_done %u\n",
xfer, xfer->packet.payload_length, xfer->tx_done, xfer->rx_len, xfer, xfer->packet.payload_length, xfer->tx_done, xfer->rx_len,
xfer->rx_done); xfer->rx_done);

View File

@ -212,6 +212,35 @@ void drm_gem_private_object_fini(struct drm_gem_object *obj)
} }
EXPORT_SYMBOL(drm_gem_private_object_fini); EXPORT_SYMBOL(drm_gem_private_object_fini);
static void drm_gem_object_handle_get(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
drm_WARN_ON(dev, !mutex_is_locked(&dev->object_name_lock));
if (obj->handle_count++ == 0)
drm_gem_object_get(obj);
}
/**
* drm_gem_object_handle_get_unlocked - acquire reference on user-space handles
* @obj: GEM object
*
* Acquires a reference on the GEM buffer object's handle. Required
* to keep the GEM object alive. Call drm_gem_object_handle_put_unlocked()
* to release the reference.
*/
void drm_gem_object_handle_get_unlocked(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
guard(mutex)(&dev->object_name_lock);
drm_WARN_ON(dev, !obj->handle_count); /* first ref taken in create-tail helper */
drm_gem_object_handle_get(obj);
}
EXPORT_SYMBOL(drm_gem_object_handle_get_unlocked);
/** /**
* drm_gem_object_handle_free - release resources bound to userspace handles * drm_gem_object_handle_free - release resources bound to userspace handles
* @obj: GEM object to clean up. * @obj: GEM object to clean up.
@ -242,8 +271,14 @@ static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
} }
} }
static void /**
drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj) * drm_gem_object_handle_put_unlocked - releases reference on user-space handles
* @obj: GEM object
*
* Releases a reference on the GEM buffer object's handle. Possibly releases
* the GEM buffer object and associated dma-buf objects.
*/
void drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
{ {
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->dev;
bool final = false; bool final = false;
@ -268,6 +303,7 @@ drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
if (final) if (final)
drm_gem_object_put(obj); drm_gem_object_put(obj);
} }
EXPORT_SYMBOL(drm_gem_object_handle_put_unlocked);
/* /*
* Called at device or object close to release the file's * Called at device or object close to release the file's
@ -389,8 +425,8 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
int ret; int ret;
WARN_ON(!mutex_is_locked(&dev->object_name_lock)); WARN_ON(!mutex_is_locked(&dev->object_name_lock));
if (obj->handle_count++ == 0)
drm_gem_object_get(obj); drm_gem_object_handle_get(obj);
/* /*
* Get the user-visible handle using idr. Preload and perform * Get the user-visible handle using idr. Preload and perform

View File

@ -99,7 +99,7 @@ void drm_gem_fb_destroy(struct drm_framebuffer *fb)
unsigned int i; unsigned int i;
for (i = 0; i < fb->format->num_planes; i++) for (i = 0; i < fb->format->num_planes; i++)
drm_gem_object_put(fb->obj[i]); drm_gem_object_handle_put_unlocked(fb->obj[i]);
drm_framebuffer_cleanup(fb); drm_framebuffer_cleanup(fb);
kfree(fb); kfree(fb);
@ -182,8 +182,10 @@ int drm_gem_fb_init_with_funcs(struct drm_device *dev,
if (!objs[i]) { if (!objs[i]) {
drm_dbg_kms(dev, "Failed to lookup GEM object\n"); drm_dbg_kms(dev, "Failed to lookup GEM object\n");
ret = -ENOENT; ret = -ENOENT;
goto err_gem_object_put; goto err_gem_object_handle_put_unlocked;
} }
drm_gem_object_handle_get_unlocked(objs[i]);
drm_gem_object_put(objs[i]);
min_size = (height - 1) * mode_cmd->pitches[i] min_size = (height - 1) * mode_cmd->pitches[i]
+ drm_format_info_min_pitch(info, i, width) + drm_format_info_min_pitch(info, i, width)
@ -193,22 +195,22 @@ int drm_gem_fb_init_with_funcs(struct drm_device *dev,
drm_dbg_kms(dev, drm_dbg_kms(dev,
"GEM object size (%zu) smaller than minimum size (%u) for plane %d\n", "GEM object size (%zu) smaller than minimum size (%u) for plane %d\n",
objs[i]->size, min_size, i); objs[i]->size, min_size, i);
drm_gem_object_put(objs[i]); drm_gem_object_handle_put_unlocked(objs[i]);
ret = -EINVAL; ret = -EINVAL;
goto err_gem_object_put; goto err_gem_object_handle_put_unlocked;
} }
} }
ret = drm_gem_fb_init(dev, fb, mode_cmd, objs, i, funcs); ret = drm_gem_fb_init(dev, fb, mode_cmd, objs, i, funcs);
if (ret) if (ret)
goto err_gem_object_put; goto err_gem_object_handle_put_unlocked;
return 0; return 0;
err_gem_object_put: err_gem_object_handle_put_unlocked:
while (i > 0) { while (i > 0) {
--i; --i;
drm_gem_object_put(objs[i]); drm_gem_object_handle_put_unlocked(objs[i]);
} }
return ret; return ret;
} }

View File

@ -161,6 +161,8 @@ void drm_sysfs_lease_event(struct drm_device *dev);
/* drm_gem.c */ /* drm_gem.c */
int drm_gem_init(struct drm_device *dev); int drm_gem_init(struct drm_device *dev);
void drm_gem_object_handle_get_unlocked(struct drm_gem_object *obj);
void drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj);
int drm_gem_handle_create_tail(struct drm_file *file_priv, int drm_gem_handle_create_tail(struct drm_file *file_priv,
struct drm_gem_object *obj, struct drm_gem_object *obj,
u32 *handlep); u32 *handlep);

View File

@ -91,12 +91,13 @@ static const struct dev_pm_ops mipi_dsi_device_pm_ops = {
.restore = pm_generic_restore, .restore = pm_generic_restore,
}; };
static const struct bus_type mipi_dsi_bus_type = { const struct bus_type mipi_dsi_bus_type = {
.name = "mipi-dsi", .name = "mipi-dsi",
.match = mipi_dsi_device_match, .match = mipi_dsi_device_match,
.uevent = mipi_dsi_uevent, .uevent = mipi_dsi_uevent,
.pm = &mipi_dsi_device_pm_ops, .pm = &mipi_dsi_device_pm_ops,
}; };
EXPORT_SYMBOL_GPL(mipi_dsi_bus_type);
/** /**
* of_find_mipi_dsi_device_by_node() - find the MIPI DSI device matching a * of_find_mipi_dsi_device_by_node() - find the MIPI DSI device matching a

View File

@ -636,6 +636,10 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id)
if (!ctx->drm_dev) if (!ctx->drm_dev)
goto out; goto out;
/* check if crtc and vblank have been initialized properly */
if (!drm_dev_has_vblank(ctx->drm_dev))
goto out;
if (!ctx->i80_if) { if (!ctx->i80_if) {
drm_crtc_handle_vblank(&ctx->crtc->base); drm_crtc_handle_vblank(&ctx->crtc->base);

View File

@ -187,6 +187,7 @@ struct fimd_context {
u32 i80ifcon; u32 i80ifcon;
bool i80_if; bool i80_if;
bool suspended; bool suspended;
bool dp_clk_enabled;
wait_queue_head_t wait_vsync_queue; wait_queue_head_t wait_vsync_queue;
atomic_t wait_vsync_event; atomic_t wait_vsync_event;
atomic_t win_updated; atomic_t win_updated;
@ -1047,7 +1048,18 @@ static void fimd_dp_clock_enable(struct exynos_drm_clk *clk, bool enable)
struct fimd_context *ctx = container_of(clk, struct fimd_context, struct fimd_context *ctx = container_of(clk, struct fimd_context,
dp_clk); dp_clk);
u32 val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE; u32 val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE;
if (enable == ctx->dp_clk_enabled)
return;
if (enable)
pm_runtime_resume_and_get(ctx->dev);
ctx->dp_clk_enabled = enable;
writel(val, ctx->regs + DP_MIE_CLKCON); writel(val, ctx->regs + DP_MIE_CLKCON);
if (!enable)
pm_runtime_put(ctx->dev);
} }
static const struct exynos_drm_crtc_ops fimd_crtc_ops = { static const struct exynos_drm_crtc_ops fimd_crtc_ops = {

Some files were not shown because too many files have changed in this diff Show More