2
0
mirror of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2025-09-04 20:19:47 +08:00

Linux 6.16-rc7

-----BEGIN PGP SIGNATURE-----
 
 iQFSBAABCgA8FiEEq68RxlopcLEwq+PEeb4+QwBBGIYFAmh9azkeHHRvcnZhbGRz
 QGxpbnV4LWZvdW5kYXRpb24ub3JnAAoJEHm+PkMAQRiG7GIH/0lpQtHRl6N+q2Qs
 v75iG2ZouWyw2JlhUOHAToKU58MZqqTXLZzc8ZdY6fAd7DpXWKRGSDsyWVyLbUkt
 UKGzXEIJsHXYvw2QIPbhkY9gQBWpdZTh4tHztFyKb0QLn81qkibVP6ChOwSzOGa/
 xUyQ5v6yH+JvQlnQaCgy6hi7cMrLNSNZmuIjy0yc5Y153YPEtX5OUPO2PstpUx5r
 AuiOhU4ewW9QCe07X/Pk7tdn0T2Jg8Kwk1FViaM0RBUf/0GXGfsovIxpUP/eCyMc
 MA+9SXXLlDa/4Z8w3EsQYx6m2MnNmm0HPeevCmWqq3+Ocooik4si1BpzHfUaE6n/
 /0D8zBg=
 =NzEi
 -----END PGP SIGNATURE-----

Merge tag 'v6.16-rc7' into usb-next

We need the USB/Thunderbolt fixes in here for other patches to be on top
of.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Greg Kroah-Hartman 2025-07-21 10:55:57 +02:00
commit a83c371c4b
692 changed files with 7139 additions and 3144 deletions

View File

@ -416,6 +416,7 @@ Kenneth W Chen <kenneth.w.chen@intel.com>
Kenneth Westfield <quic_kwestfie@quicinc.com> <kwestfie@codeaurora.org> Kenneth Westfield <quic_kwestfie@quicinc.com> <kwestfie@codeaurora.org>
Kiran Gunda <quic_kgunda@quicinc.com> <kgunda@codeaurora.org> Kiran Gunda <quic_kgunda@quicinc.com> <kgunda@codeaurora.org>
Kirill Tkhai <tkhai@ya.ru> <ktkhai@virtuozzo.com> Kirill Tkhai <tkhai@ya.ru> <ktkhai@virtuozzo.com>
Kirill A. Shutemov <kas@kernel.org> <kirill.shutemov@linux.intel.com>
Kishon Vijay Abraham I <kishon@kernel.org> <kishon@ti.com> Kishon Vijay Abraham I <kishon@kernel.org> <kishon@ti.com>
Konrad Dybcio <konradybcio@kernel.org> <konrad.dybcio@linaro.org> Konrad Dybcio <konradybcio@kernel.org> <konrad.dybcio@linaro.org>
Konrad Dybcio <konradybcio@kernel.org> <konrad.dybcio@somainline.org> Konrad Dybcio <konradybcio@kernel.org> <konrad.dybcio@somainline.org>

View File

@ -56,7 +56,7 @@ Date: January 2009
Contact: Rafael J. Wysocki <rjw@rjwysocki.net> Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
Description: Description:
The /sys/devices/.../async attribute allows the user space to The /sys/devices/.../async attribute allows the user space to
enable or diasble the device's suspend and resume callbacks to enable or disable the device's suspend and resume callbacks to
be executed asynchronously (ie. in separate threads, in parallel be executed asynchronously (ie. in separate threads, in parallel
with the main suspend/resume thread) during system-wide power with the main suspend/resume thread) during system-wide power
transitions (eg. suspend to RAM, hibernation). transitions (eg. suspend to RAM, hibernation).

View File

@ -584,6 +584,7 @@ What: /sys/devices/system/cpu/vulnerabilities
/sys/devices/system/cpu/vulnerabilities/spectre_v1 /sys/devices/system/cpu/vulnerabilities/spectre_v1
/sys/devices/system/cpu/vulnerabilities/spectre_v2 /sys/devices/system/cpu/vulnerabilities/spectre_v2
/sys/devices/system/cpu/vulnerabilities/srbds /sys/devices/system/cpu/vulnerabilities/srbds
/sys/devices/system/cpu/vulnerabilities/tsa
/sys/devices/system/cpu/vulnerabilities/tsx_async_abort /sys/devices/system/cpu/vulnerabilities/tsx_async_abort
Date: January 2018 Date: January 2018
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org> Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>

View File

@ -1732,12 +1732,6 @@ The following nested keys are defined.
numa_hint_faults (npn) numa_hint_faults (npn)
Number of NUMA hinting faults. Number of NUMA hinting faults.
numa_task_migrated (npn)
Number of task migration by NUMA balancing.
numa_task_swapped (npn)
Number of task swap by NUMA balancing.
pgdemote_kswapd pgdemote_kswapd
Number of pages demoted by kswapd. Number of pages demoted by kswapd.

View File

@ -157,9 +157,7 @@ This is achieved by using the otherwise unused and obsolete VERW instruction in
combination with a microcode update. The microcode clears the affected CPU combination with a microcode update. The microcode clears the affected CPU
buffers when the VERW instruction is executed. buffers when the VERW instruction is executed.
Kernel reuses the MDS function to invoke the buffer clearing: Kernel does the buffer clearing with x86_clear_cpu_buffers().
mds_clear_cpu_buffers()
On MDS affected CPUs, the kernel already invokes CPU buffer clear on On MDS affected CPUs, the kernel already invokes CPU buffer clear on
kernel/userspace, hypervisor/guest and C-state (idle) transitions. No kernel/userspace, hypervisor/guest and C-state (idle) transitions. No

View File

@ -7488,6 +7488,19 @@
having this key zero'ed is acceptable. E.g. in testing having this key zero'ed is acceptable. E.g. in testing
scenarios. scenarios.
tsa= [X86] Control mitigation for Transient Scheduler
Attacks on AMD CPUs. Search the following in your
favourite search engine for more details:
"Technical guidance for mitigating transient scheduler
attacks".
off - disable the mitigation
on - enable the mitigation (default)
user - mitigate only user/kernel transitions
vm - mitigate only guest/host transitions
tsc= Disable clocksource stability checks for TSC. tsc= Disable clocksource stability checks for TSC.
Format: <string> Format: <string>
[x86] reliable: mark tsc clocksource as reliable, this [x86] reliable: mark tsc clocksource as reliable, this

View File

@ -93,7 +93,7 @@ enters a C-state.
The kernel provides a function to invoke the buffer clearing: The kernel provides a function to invoke the buffer clearing:
mds_clear_cpu_buffers() x86_clear_cpu_buffers()
Also macro CLEAR_CPU_BUFFERS can be used in ASM late in exit-to-user path. Also macro CLEAR_CPU_BUFFERS can be used in ASM late in exit-to-user path.
Other than CFLAGS.ZF, this macro doesn't clobber any registers. Other than CFLAGS.ZF, this macro doesn't clobber any registers.
@ -185,9 +185,9 @@ Mitigation points
idle clearing would be a window dressing exercise and is therefore not idle clearing would be a window dressing exercise and is therefore not
activated. activated.
The invocation is controlled by the static key mds_idle_clear which is The invocation is controlled by the static key cpu_buf_idle_clear which is
switched depending on the chosen mitigation mode and the SMT state of switched depending on the chosen mitigation mode and the SMT state of the
the system. system.
The buffer clear is only invoked before entering the C-State to prevent The buffer clear is only invoked before entering the C-State to prevent
that stale data from the idling CPU from spilling to the Hyper-Thread that stale data from the idling CPU from spilling to the Hyper-Thread

View File

@ -52,6 +52,9 @@ properties:
'#clock-cells': '#clock-cells':
const: 1 const: 1
'#reset-cells':
const: 1
required: required:
- compatible - compatible
- reg - reg

View File

@ -26,7 +26,8 @@ properties:
- const: realtek,rtl9301-i2c - const: realtek,rtl9301-i2c
reg: reg:
description: Register offset and size this I2C controller. items:
- description: Register offset and size this I2C controller.
"#address-cells": "#address-cells":
const: 1 const: 1

View File

@ -223,12 +223,6 @@ allOf:
- required: - required:
- pwms - pwms
- oneOf:
- required:
- interrupts
- required:
- io-backends
- if: - if:
properties: properties:
compatible: compatible:

View File

@ -21,7 +21,7 @@ properties:
vlogic-supply: true vlogic-supply: true
interrupts: interrupts:
minItems: 1 maxItems: 1
description: description:
Interrupt mapping for the trigger interrupt from the internal oscillator. Interrupt mapping for the trigger interrupt from the internal oscillator.

View File

@ -23,7 +23,7 @@ properties:
- allwinner,sun20i-d1-emac - allwinner,sun20i-d1-emac
- allwinner,sun50i-h6-emac - allwinner,sun50i-h6-emac
- allwinner,sun50i-h616-emac0 - allwinner,sun50i-h616-emac0
- allwinner,sun55i-a523-emac0 - allwinner,sun55i-a523-gmac0
- const: allwinner,sun50i-a64-emac - const: allwinner,sun50i-a64-emac
reg: reg:

View File

@ -65,7 +65,7 @@ Additional sysfs entries for sq52206
------------------------------------ ------------------------------------
======================= ======================================================= ======================= =======================================================
energy1_input Energy measurement (mJ) energy1_input Energy measurement (uJ)
power1_input_highest Peak Power (uW) power1_input_highest Peak Power (uW)
======================= ======================================================= ======================= =======================================================

View File

@ -160,6 +160,66 @@ attribute-sets:
name: link-tx-packets name: link-tx-packets
type: uint type: uint
doc: Number of packets transmitted at the transport level doc: Number of packets transmitted at the transport level
-
name: peer-new-input
subset-of: peer
attributes:
-
name: id
-
name: remote-ipv4
-
name: remote-ipv6
-
name: remote-ipv6-scope-id
-
name: remote-port
-
name: socket
-
name: vpn-ipv4
-
name: vpn-ipv6
-
name: local-ipv4
-
name: local-ipv6
-
name: keepalive-interval
-
name: keepalive-timeout
-
name: peer-set-input
subset-of: peer
attributes:
-
name: id
-
name: remote-ipv4
-
name: remote-ipv6
-
name: remote-ipv6-scope-id
-
name: remote-port
-
name: vpn-ipv4
-
name: vpn-ipv6
-
name: local-ipv4
-
name: local-ipv6
-
name: keepalive-interval
-
name: keepalive-timeout
-
name: peer-del-input
subset-of: peer
attributes:
-
name: id
- -
name: keyconf name: keyconf
attributes: attributes:
@ -216,6 +276,33 @@ attribute-sets:
obtain the actual cipher IV obtain the actual cipher IV
checks: checks:
exact-len: nonce-tail-size exact-len: nonce-tail-size
-
name: keyconf-get
subset-of: keyconf
attributes:
-
name: peer-id
-
name: slot
-
name: key-id
-
name: cipher-alg
-
name: keyconf-swap-input
subset-of: keyconf
attributes:
-
name: peer-id
-
name: keyconf-del-input
subset-of: keyconf
attributes:
-
name: peer-id
-
name: slot
- -
name: ovpn name: ovpn
attributes: attributes:
@ -235,12 +322,66 @@ attribute-sets:
type: nest type: nest
doc: Peer specific cipher configuration doc: Peer specific cipher configuration
nested-attributes: keyconf nested-attributes: keyconf
-
name: ovpn-peer-new-input
subset-of: ovpn
attributes:
-
name: ifindex
-
name: peer
nested-attributes: peer-new-input
-
name: ovpn-peer-set-input
subset-of: ovpn
attributes:
-
name: ifindex
-
name: peer
nested-attributes: peer-set-input
-
name: ovpn-peer-del-input
subset-of: ovpn
attributes:
-
name: ifindex
-
name: peer
nested-attributes: peer-del-input
-
name: ovpn-keyconf-get
subset-of: ovpn
attributes:
-
name: ifindex
-
name: keyconf
nested-attributes: keyconf-get
-
name: ovpn-keyconf-swap-input
subset-of: ovpn
attributes:
-
name: ifindex
-
name: keyconf
nested-attributes: keyconf-swap-input
-
name: ovpn-keyconf-del-input
subset-of: ovpn
attributes:
-
name: ifindex
-
name: keyconf
nested-attributes: keyconf-del-input
operations: operations:
list: list:
- -
name: peer-new name: peer-new
attribute-set: ovpn attribute-set: ovpn-peer-new-input
flags: [ admin-perm ] flags: [ admin-perm ]
doc: Add a remote peer doc: Add a remote peer
do: do:
@ -252,7 +393,7 @@ operations:
- peer - peer
- -
name: peer-set name: peer-set
attribute-set: ovpn attribute-set: ovpn-peer-set-input
flags: [ admin-perm ] flags: [ admin-perm ]
doc: modify a remote peer doc: modify a remote peer
do: do:
@ -286,7 +427,7 @@ operations:
- peer - peer
- -
name: peer-del name: peer-del
attribute-set: ovpn attribute-set: ovpn-peer-del-input
flags: [ admin-perm ] flags: [ admin-perm ]
doc: Delete existing remote peer doc: Delete existing remote peer
do: do:
@ -316,7 +457,7 @@ operations:
- keyconf - keyconf
- -
name: key-get name: key-get
attribute-set: ovpn attribute-set: ovpn-keyconf-get
flags: [ admin-perm ] flags: [ admin-perm ]
doc: Retrieve non-sensitive data about peer key and cipher doc: Retrieve non-sensitive data about peer key and cipher
do: do:
@ -331,7 +472,7 @@ operations:
- keyconf - keyconf
- -
name: key-swap name: key-swap
attribute-set: ovpn attribute-set: ovpn-keyconf-swap-input
flags: [ admin-perm ] flags: [ admin-perm ]
doc: Swap primary and secondary session keys for a specific peer doc: Swap primary and secondary session keys for a specific peer
do: do:
@ -350,7 +491,7 @@ operations:
mcgrp: peers mcgrp: peers
- -
name: key-del name: key-del
attribute-set: ovpn attribute-set: ovpn-keyconf-del-input
flags: [ admin-perm ] flags: [ admin-perm ]
doc: Delete cipher key for a specific peer doc: Delete cipher key for a specific peer
do: do:

View File

@ -2008,6 +2008,13 @@ If the KVM_CAP_VM_TSC_CONTROL capability is advertised, this can also
be used as a vm ioctl to set the initial tsc frequency of subsequently be used as a vm ioctl to set the initial tsc frequency of subsequently
created vCPUs. created vCPUs.
For TSC protected Confidential Computing (CoCo) VMs where TSC frequency
is configured once at VM scope and remains unchanged during VM's
lifetime, the vm ioctl should be used to configure the TSC frequency
and the vcpu ioctl is not supported.
Example of such CoCo VMs: TDX guests.
4.56 KVM_GET_TSC_KHZ 4.56 KVM_GET_TSC_KHZ
-------------------- --------------------
@ -7196,6 +7203,10 @@ The valid value for 'flags' is:
u64 leaf; u64 leaf;
u64 r11, r12, r13, r14; u64 r11, r12, r13, r14;
} get_tdvmcall_info; } get_tdvmcall_info;
struct {
u64 ret;
u64 vector;
} setup_event_notify;
}; };
} tdx; } tdx;
@ -7210,21 +7221,24 @@ number from register R11. The remaining field of the union provide the
inputs and outputs of the TDVMCALL. Currently the following values of inputs and outputs of the TDVMCALL. Currently the following values of
``nr`` are defined: ``nr`` are defined:
* ``TDVMCALL_GET_QUOTE``: the guest has requested to generate a TD-Quote * ``TDVMCALL_GET_QUOTE``: the guest has requested to generate a TD-Quote
signed by a service hosting TD-Quoting Enclave operating on the host. signed by a service hosting TD-Quoting Enclave operating on the host.
Parameters and return value are in the ``get_quote`` field of the union. Parameters and return value are in the ``get_quote`` field of the union.
The ``gpa`` field and ``size`` specify the guest physical address The ``gpa`` field and ``size`` specify the guest physical address
(without the shared bit set) and the size of a shared-memory buffer, in (without the shared bit set) and the size of a shared-memory buffer, in
which the TDX guest passes a TD Report. The ``ret`` field represents which the TDX guest passes a TD Report. The ``ret`` field represents
the return value of the GetQuote request. When the request has been the return value of the GetQuote request. When the request has been
queued successfully, the TDX guest can poll the status field in the queued successfully, the TDX guest can poll the status field in the
shared-memory area to check whether the Quote generation is completed or shared-memory area to check whether the Quote generation is completed or
not. When completed, the generated Quote is returned via the same buffer. not. When completed, the generated Quote is returned via the same buffer.
* ``TDVMCALL_GET_TD_VM_CALL_INFO``: the guest has requested the support * ``TDVMCALL_GET_TD_VM_CALL_INFO``: the guest has requested the support
status of TDVMCALLs. The output values for the given leaf should be status of TDVMCALLs. The output values for the given leaf should be
placed in fields from ``r11`` to ``r14`` of the ``get_tdvmcall_info`` placed in fields from ``r11`` to ``r14`` of the ``get_tdvmcall_info``
field of the union. field of the union.
* ``TDVMCALL_SETUP_EVENT_NOTIFY_INTERRUPT``: the guest has requested to
set up a notification interrupt for vector ``vector``.
KVM may add support for more values in the future that may cause a userspace KVM may add support for more values in the future that may cause a userspace
exit, even without calls to ``KVM_ENABLE_CAP`` or similar. In this case, exit, even without calls to ``KVM_ENABLE_CAP`` or similar. In this case,

View File

@ -7,7 +7,7 @@ Review checklist for kvm patches
1. The patch must follow Documentation/process/coding-style.rst and 1. The patch must follow Documentation/process/coding-style.rst and
Documentation/process/submitting-patches.rst. Documentation/process/submitting-patches.rst.
2. Patches should be against kvm.git master branch. 2. Patches should be against kvm.git master or next branches.
3. If the patch introduces or modifies a new userspace API: 3. If the patch introduces or modifies a new userspace API:
- the API must be documented in Documentation/virt/kvm/api.rst - the API must be documented in Documentation/virt/kvm/api.rst
@ -18,10 +18,10 @@ Review checklist for kvm patches
5. New features must default to off (userspace should explicitly request them). 5. New features must default to off (userspace should explicitly request them).
Performance improvements can and should default to on. Performance improvements can and should default to on.
6. New cpu features should be exposed via KVM_GET_SUPPORTED_CPUID2 6. New cpu features should be exposed via KVM_GET_SUPPORTED_CPUID2,
or its equivalent for non-x86 architectures
7. Emulator changes should be accompanied by unit tests for qemu-kvm.git 7. The feature should be testable (see below).
kvm/test directory.
8. Changes should be vendor neutral when possible. Changes to common code 8. Changes should be vendor neutral when possible. Changes to common code
are better than duplicating changes to vendor code. are better than duplicating changes to vendor code.
@ -36,6 +36,87 @@ Review checklist for kvm patches
11. New guest visible features must either be documented in a hardware manual 11. New guest visible features must either be documented in a hardware manual
or be accompanied by documentation. or be accompanied by documentation.
12. Features must be robust against reset and kexec - for example, shared Testing of KVM code
host/guest memory must be unshared to prevent the host from writing to -------------------
guest memory that the guest has not reserved for this purpose.
All features contributed to KVM, and in many cases bugfixes too, should be
accompanied by some kind of tests and/or enablement in open source guests
and VMMs. KVM is covered by multiple test suites:
*Selftests*
These are low level tests that allow granular testing of kernel APIs.
This includes API failure scenarios, invoking APIs after specific
guest instructions, and testing multiple calls to ``KVM_CREATE_VM``
within a single test. They are included in the kernel tree at
``tools/testing/selftests/kvm``.
``kvm-unit-tests``
A collection of small guests that test CPU and emulated device features
from a guest's perspective. They run under QEMU or ``kvmtool``, and
are generally not KVM-specific: they can be run with any accelerator
that QEMU support or even on bare metal, making it possible to compare
behavior across hypervisors and processor families.
Functional test suites
Various sets of functional tests exist, such as QEMU's ``tests/functional``
suite and `avocado-vt <https://avocado-vt.readthedocs.io/en/latest/>`__.
These typically involve running a full operating system in a virtual
machine.
The best testing approach depends on the feature's complexity and
operation. Here are some examples and guidelines:
New instructions (no new registers or APIs)
The corresponding CPU features (if applicable) should be made available
in QEMU. If the instructions require emulation support or other code in
KVM, it is worth adding coverage to ``kvm-unit-tests`` or selftests;
the latter can be a better choice if the instructions relate to an API
that already has good selftest coverage.
New hardware features (new registers, no new APIs)
These should be tested via ``kvm-unit-tests``; this more or less implies
supporting them in QEMU and/or ``kvmtool``. In some cases selftests
can be used instead, similar to the previous case, or specifically to
test corner cases in guest state save/restore.
Bug fixes and performance improvements
These usually do not introduce new APIs, but it's worth sharing
any benchmarks and tests that will validate your contribution,
ideally in the form of regression tests. Tests and benchmarks
can be included in either ``kvm-unit-tests`` or selftests, depending
on the specifics of your change. Selftests are especially useful for
regression tests because they are included directly in Linux's tree.
Large scale internal changes
While it's difficult to provide a single policy, you should ensure that
the changed code is covered by either ``kvm-unit-tests`` or selftests.
In some cases the affected code is run for any guests and functional
tests suffice. Explain your testing process in the cover letter,
as that can help identify gaps in existing test suites.
New APIs
It is important to demonstrate your use case. This can be as simple as
explaining that the feature is already in use on bare metal, or it can be
a proof-of-concept implementation in userspace. The latter need not be
open source, though that is of course preferrable for easier testing.
Selftests should test corner cases of the APIs, and should also cover
basic host and guest operation if no open source VMM uses the feature.
Bigger features, usually spanning host and guest
These should be supported by Linux guests, with limited exceptions for
Hyper-V features that are testable on Windows guests. It is strongly
suggested that the feature be usable with an open source host VMM, such
as at least one of QEMU or crosvm, and guest firmware. Selftests should
test at least API error cases. Guest operation can be covered by
either selftests of ``kvm-unit-tests`` (this is especially important for
paravirtualized and Windows-only features). Strong selftest coverage
can also be a replacement for implementation in an open source VMM,
but this is generally not recommended.
Following the above suggestions for testing in selftests and
``kvm-unit-tests`` will make it easier for the maintainers to review
and accept your code. In fact, even before you contribute your changes
upstream it will make it easier for you to develop for KVM.
Of course, the KVM maintainers reserve the right to require more tests,
though they may also waive the requirement from time to time.

View File

@ -79,7 +79,20 @@ to be configured to the TDX guest.
struct kvm_tdx_capabilities { struct kvm_tdx_capabilities {
__u64 supported_attrs; __u64 supported_attrs;
__u64 supported_xfam; __u64 supported_xfam;
__u64 reserved[254];
/* TDG.VP.VMCALL hypercalls executed in kernel and forwarded to
* userspace, respectively
*/
__u64 kernel_tdvmcallinfo_1_r11;
__u64 user_tdvmcallinfo_1_r11;
/* TDG.VP.VMCALL instruction executions subfunctions executed in kernel
* and forwarded to userspace, respectively
*/
__u64 kernel_tdvmcallinfo_1_r12;
__u64 user_tdvmcallinfo_1_r12;
__u64 reserved[250];
/* Configurable CPUID bits for userspace */ /* Configurable CPUID bits for userspace */
struct kvm_cpuid2 cpuid; struct kvm_cpuid2 cpuid;

View File

@ -36,7 +36,7 @@ Offset Size (in bytes) Content
The WMI object flags control whether the method or notification ID is used: The WMI object flags control whether the method or notification ID is used:
- 0x1: Data block usage is expensive and must be explicitly enabled/disabled. - 0x1: Data block is expensive to collect.
- 0x2: Data block contains WMI methods. - 0x2: Data block contains WMI methods.
- 0x4: Data block contains ASCIZ string. - 0x4: Data block contains ASCIZ string.
- 0x8: Data block describes a WMI event, use notification ID instead - 0x8: Data block describes a WMI event, use notification ID instead
@ -83,14 +83,18 @@ event as hexadecimal value. Their first parameter is an integer with a value
of 0 if the WMI event should be disabled, other values will enable of 0 if the WMI event should be disabled, other values will enable
the WMI event. the WMI event.
Those ACPI methods are always called even for WMI events not registered as
being expensive to collect to match the behavior of the Windows driver.
WCxx ACPI methods WCxx ACPI methods
----------------- -----------------
Similar to the ``WExx`` ACPI methods, except that it controls data collection Similar to the ``WExx`` ACPI methods, except that instead of WMI events it controls
instead of events and thus the last two characters of the ACPI method name are data collection of data blocks registered as being expensive to collect. Thus the
the method ID of the data block to enable/disable. last two characters of the ACPI method name are the method ID of the data block
to enable/disable.
Those ACPI methods are also called before setting data blocks to match the Those ACPI methods are also called before setting data blocks to match the
behaviour of the Windows driver. behavior of the Windows driver.
_WED ACPI method _WED ACPI method
---------------- ----------------

View File

@ -4181,6 +4181,7 @@ F: include/linux/cpumask_types.h
F: include/linux/find.h F: include/linux/find.h
F: include/linux/nodemask.h F: include/linux/nodemask.h
F: include/linux/nodemask_types.h F: include/linux/nodemask_types.h
F: include/uapi/linux/bits.h
F: include/vdso/bits.h F: include/vdso/bits.h
F: lib/bitmap-str.c F: lib/bitmap-str.c
F: lib/bitmap.c F: lib/bitmap.c
@ -4193,6 +4194,7 @@ F: tools/include/linux/bitfield.h
F: tools/include/linux/bitmap.h F: tools/include/linux/bitmap.h
F: tools/include/linux/bits.h F: tools/include/linux/bits.h
F: tools/include/linux/find.h F: tools/include/linux/find.h
F: tools/include/uapi/linux/bits.h
F: tools/include/vdso/bits.h F: tools/include/vdso/bits.h
F: tools/lib/bitmap.c F: tools/lib/bitmap.c
F: tools/lib/find_bit.c F: tools/lib/find_bit.c
@ -5568,6 +5570,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
F: drivers/char/ F: drivers/char/
F: drivers/misc/ F: drivers/misc/
F: include/linux/miscdevice.h F: include/linux/miscdevice.h
F: rust/kernel/miscdevice.rs
F: samples/rust/rust_misc_device.rs F: samples/rust/rust_misc_device.rs
X: drivers/char/agp/ X: drivers/char/agp/
X: drivers/char/hw_random/ X: drivers/char/hw_random/
@ -10504,7 +10507,7 @@ S: Maintained
F: block/partitions/efi.* F: block/partitions/efi.*
HABANALABS PCI DRIVER HABANALABS PCI DRIVER
M: Ofir Bitton <obitton@habana.ai> M: Yaron Avizrat <yaron.avizrat@intel.com>
L: dri-devel@lists.freedesktop.org L: dri-devel@lists.freedesktop.org
S: Supported S: Supported
C: irc://irc.oftc.net/dri-devel C: irc://irc.oftc.net/dri-devel
@ -16822,8 +16825,8 @@ F: include/dt-bindings/clock/mobileye,eyeq5-clk.h
MODULE SUPPORT MODULE SUPPORT
M: Luis Chamberlain <mcgrof@kernel.org> M: Luis Chamberlain <mcgrof@kernel.org>
M: Petr Pavlu <petr.pavlu@suse.com> M: Petr Pavlu <petr.pavlu@suse.com>
M: Daniel Gomez <da.gomez@kernel.org>
R: Sami Tolvanen <samitolvanen@google.com> R: Sami Tolvanen <samitolvanen@google.com>
R: Daniel Gomez <da.gomez@samsung.com>
L: linux-modules@vger.kernel.org L: linux-modules@vger.kernel.org
L: linux-kernel@vger.kernel.org L: linux-kernel@vger.kernel.org
S: Maintained S: Maintained
@ -17222,10 +17225,10 @@ F: drivers/rtc/rtc-ntxec.c
F: include/linux/mfd/ntxec.h F: include/linux/mfd/ntxec.h
NETRONOME ETHERNET DRIVERS NETRONOME ETHERNET DRIVERS
M: Louis Peens <louis.peens@corigine.com>
R: Jakub Kicinski <kuba@kernel.org> R: Jakub Kicinski <kuba@kernel.org>
R: Simon Horman <horms@kernel.org>
L: oss-drivers@corigine.com L: oss-drivers@corigine.com
S: Maintained S: Odd Fixes
F: drivers/net/ethernet/netronome/ F: drivers/net/ethernet/netronome/
NETWORK BLOCK DEVICE (NBD) NETWORK BLOCK DEVICE (NBD)
@ -19601,8 +19604,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/pinctrl/intel.git
F: drivers/pinctrl/intel/ F: drivers/pinctrl/intel/
PIN CONTROLLER - KEEMBAY PIN CONTROLLER - KEEMBAY
M: Lakshmi Sowjanya D <lakshmi.sowjanya.d@intel.com> S: Orphan
S: Supported
F: drivers/pinctrl/pinctrl-keembay* F: drivers/pinctrl/pinctrl-keembay*
PIN CONTROLLER - MEDIATEK PIN CONTROLLER - MEDIATEK
@ -20155,21 +20157,15 @@ S: Supported
F: Documentation/devicetree/bindings/soc/qcom/qcom,apr* F: Documentation/devicetree/bindings/soc/qcom/qcom,apr*
F: Documentation/devicetree/bindings/sound/qcom,* F: Documentation/devicetree/bindings/sound/qcom,*
F: drivers/soc/qcom/apr.c F: drivers/soc/qcom/apr.c
F: include/dt-bindings/sound/qcom,wcd9335.h F: drivers/soundwire/qcom.c
F: include/dt-bindings/sound/qcom,wcd934x.h F: include/dt-bindings/sound/qcom,wcd93*
F: sound/soc/codecs/lpass-rx-macro.* F: sound/soc/codecs/lpass-*.*
F: sound/soc/codecs/lpass-tx-macro.*
F: sound/soc/codecs/lpass-va-macro.c
F: sound/soc/codecs/lpass-wsa-macro.*
F: sound/soc/codecs/msm8916-wcd-analog.c F: sound/soc/codecs/msm8916-wcd-analog.c
F: sound/soc/codecs/msm8916-wcd-digital.c F: sound/soc/codecs/msm8916-wcd-digital.c
F: sound/soc/codecs/wcd-clsh-v2.* F: sound/soc/codecs/wcd-clsh-v2.*
F: sound/soc/codecs/wcd-mbhc-v2.* F: sound/soc/codecs/wcd-mbhc-v2.*
F: sound/soc/codecs/wcd9335.* F: sound/soc/codecs/wcd93*.*
F: sound/soc/codecs/wcd934x.c F: sound/soc/codecs/wsa88*.*
F: sound/soc/codecs/wsa881x.c
F: sound/soc/codecs/wsa883x.c
F: sound/soc/codecs/wsa884x.c
F: sound/soc/qcom/ F: sound/soc/qcom/
QCOM EMBEDDED USB DEBUGGER (EUD) QCOM EMBEDDED USB DEBUGGER (EUD)
@ -25907,6 +25903,8 @@ F: fs/hostfs/
USERSPACE COPYIN/COPYOUT (UIOVEC) USERSPACE COPYIN/COPYOUT (UIOVEC)
M: Alexander Viro <viro@zeniv.linux.org.uk> M: Alexander Viro <viro@zeniv.linux.org.uk>
L: linux-block@vger.kernel.org
L: linux-fsdevel@vger.kernel.org
S: Maintained S: Maintained
F: include/linux/uio.h F: include/linux/uio.h
F: lib/iov_iter.c F: lib/iov_iter.c
@ -26944,7 +26942,7 @@ F: arch/x86/kernel/stacktrace.c
F: arch/x86/kernel/unwind_*.c F: arch/x86/kernel/unwind_*.c
X86 TRUST DOMAIN EXTENSIONS (TDX) X86 TRUST DOMAIN EXTENSIONS (TDX)
M: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> M: Kirill A. Shutemov <kas@kernel.org>
R: Dave Hansen <dave.hansen@linux.intel.com> R: Dave Hansen <dave.hansen@linux.intel.com>
L: x86@kernel.org L: x86@kernel.org
L: linux-coco@lists.linux.dev L: linux-coco@lists.linux.dev
@ -27313,13 +27311,6 @@ S: Supported
W: http://www.marvell.com W: http://www.marvell.com
F: drivers/i2c/busses/i2c-xlp9xx.c F: drivers/i2c/busses/i2c-xlp9xx.c
XRA1403 GPIO EXPANDER
M: Nandor Han <nandor.han@ge.com>
L: linux-gpio@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/gpio/gpio-xra1403.txt
F: drivers/gpio/gpio-xra1403.c
XTENSA XTFPGA PLATFORM SUPPORT XTENSA XTFPGA PLATFORM SUPPORT
M: Max Filippov <jcmvbkbc@gmail.com> M: Max Filippov <jcmvbkbc@gmail.com>
S: Maintained S: Maintained

View File

@ -2,7 +2,7 @@
VERSION = 6 VERSION = 6
PATCHLEVEL = 16 PATCHLEVEL = 16
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc4 EXTRAVERSION = -rc7
NAME = Baby Opossum Posse NAME = Baby Opossum Posse
# *DOCUMENTATION* # *DOCUMENTATION*

View File

@ -256,6 +256,7 @@ config ARM64
select HOTPLUG_SMT if HOTPLUG_CPU select HOTPLUG_SMT if HOTPLUG_CPU
select IRQ_DOMAIN select IRQ_DOMAIN
select IRQ_FORCED_THREADING select IRQ_FORCED_THREADING
select JUMP_LABEL
select KASAN_VMALLOC if KASAN select KASAN_VMALLOC if KASAN
select LOCK_MM_AND_FIND_VMA select LOCK_MM_AND_FIND_VMA
select MODULES_USE_ELF_RELA select MODULES_USE_ELF_RELA

View File

@ -20,8 +20,6 @@
compatible = "jedec,spi-nor"; compatible = "jedec,spi-nor";
reg = <0x0>; reg = <0x0>;
spi-max-frequency = <25000000>; spi-max-frequency = <25000000>;
#address-cells = <1>;
#size-cells = <1>;
partitions { partitions {
compatible = "fixed-partitions"; compatible = "fixed-partitions";

View File

@ -100,6 +100,8 @@
&displaydfr_mipi { &displaydfr_mipi {
status = "okay"; status = "okay";
#address-cells = <1>;
#size-cells = <0>;
dfr_panel: panel@0 { dfr_panel: panel@0 {
compatible = "apple,j293-summit", "apple,summit"; compatible = "apple,j293-summit", "apple,summit";

View File

@ -71,7 +71,7 @@
*/ */
&port00 { &port00 {
bus-range = <1 1>; bus-range = <1 1>;
wifi0: network@0,0 { wifi0: wifi@0,0 {
compatible = "pci14e4,4425"; compatible = "pci14e4,4425";
reg = <0x10000 0x0 0x0 0x0 0x0>; reg = <0x10000 0x0 0x0 0x0 0x0>;
/* To be filled by the loader */ /* To be filled by the loader */

View File

@ -405,8 +405,6 @@
compatible = "apple,t8103-display-pipe-mipi", "apple,h7-display-pipe-mipi"; compatible = "apple,t8103-display-pipe-mipi", "apple,h7-display-pipe-mipi";
reg = <0x2 0x28600000 0x0 0x100000>; reg = <0x2 0x28600000 0x0 0x100000>;
power-domains = <&ps_mipi_dsi>; power-domains = <&ps_mipi_dsi>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled"; status = "disabled";
ports { ports {

View File

@ -63,6 +63,8 @@
&displaydfr_mipi { &displaydfr_mipi {
status = "okay"; status = "okay";
#address-cells = <1>;
#size-cells = <0>;
dfr_panel: panel@0 { dfr_panel: panel@0 {
compatible = "apple,j493-summit", "apple,summit"; compatible = "apple,j493-summit", "apple,summit";

View File

@ -420,8 +420,6 @@
compatible = "apple,t8112-display-pipe-mipi", "apple,h7-display-pipe-mipi"; compatible = "apple,t8112-display-pipe-mipi", "apple,h7-display-pipe-mipi";
reg = <0x2 0x28600000 0x0 0x100000>; reg = <0x2 0x28600000 0x0 0x100000>;
power-domains = <&ps_mipi_dsi>; power-domains = <&ps_mipi_dsi>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled"; status = "disabled";
ports { ports {

View File

@ -687,11 +687,12 @@
}; };
wdog0: watchdog@2ad0000 { wdog0: watchdog@2ad0000 {
compatible = "fsl,imx21-wdt"; compatible = "fsl,ls1046a-wdt", "fsl,imx21-wdt";
reg = <0x0 0x2ad0000 0x0 0x10000>; reg = <0x0 0x2ad0000 0x0 0x10000>;
interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(2)>; QORIQ_CLK_PLL_DIV(2)>;
big-endian;
}; };
edma0: dma-controller@2c00000 { edma0: dma-controller@2c00000 {

View File

@ -464,6 +464,7 @@
}; };
reg_nvcc_sd: LDO5 { reg_nvcc_sd: LDO5 {
regulator-always-on;
regulator-max-microvolt = <3300000>; regulator-max-microvolt = <3300000>;
regulator-min-microvolt = <1800000>; regulator-min-microvolt = <1800000>;
regulator-name = "On-module +V3.3_1.8_SD (LDO5)"; regulator-name = "On-module +V3.3_1.8_SD (LDO5)";

View File

@ -70,7 +70,7 @@
tpm@1 { tpm@1 {
compatible = "atmel,attpm20p", "tcg,tpm_tis-spi"; compatible = "atmel,attpm20p", "tcg,tpm_tis-spi";
reg = <0x1>; reg = <0x1>;
spi-max-frequency = <36000000>; spi-max-frequency = <25000000>;
}; };
}; };

View File

@ -110,7 +110,7 @@
tpm@1 { tpm@1 {
compatible = "atmel,attpm20p", "tcg,tpm_tis-spi"; compatible = "atmel,attpm20p", "tcg,tpm_tis-spi";
reg = <0x1>; reg = <0x1>;
spi-max-frequency = <36000000>; spi-max-frequency = <25000000>;
}; };
}; };

View File

@ -122,7 +122,7 @@
tpm@1 { tpm@1 {
compatible = "atmel,attpm20p", "tcg,tpm_tis-spi"; compatible = "atmel,attpm20p", "tcg,tpm_tis-spi";
reg = <0x1>; reg = <0x1>;
spi-max-frequency = <36000000>; spi-max-frequency = <25000000>;
}; };
}; };

View File

@ -201,7 +201,7 @@
tpm@0 { tpm@0 {
compatible = "atmel,attpm20p", "tcg,tpm_tis-spi"; compatible = "atmel,attpm20p", "tcg,tpm_tis-spi";
reg = <0x0>; reg = <0x0>;
spi-max-frequency = <36000000>; spi-max-frequency = <25000000>;
}; };
}; };

View File

@ -574,17 +574,17 @@
&scmi_iomuxc { &scmi_iomuxc {
pinctrl_emdio: emdiogrp { pinctrl_emdio: emdiogrp {
fsl,pins = < fsl,pins = <
IMX95_PAD_ENET2_MDC__NETCMIX_TOP_NETC_MDC 0x57e IMX95_PAD_ENET2_MDC__NETCMIX_TOP_NETC_MDC 0x50e
IMX95_PAD_ENET2_MDIO__NETCMIX_TOP_NETC_MDIO 0x97e IMX95_PAD_ENET2_MDIO__NETCMIX_TOP_NETC_MDIO 0x90e
>; >;
}; };
pinctrl_enetc0: enetc0grp { pinctrl_enetc0: enetc0grp {
fsl,pins = < fsl,pins = <
IMX95_PAD_ENET1_TD3__NETCMIX_TOP_ETH0_RGMII_TD3 0x57e IMX95_PAD_ENET1_TD3__NETCMIX_TOP_ETH0_RGMII_TD3 0x50e
IMX95_PAD_ENET1_TD2__NETCMIX_TOP_ETH0_RGMII_TD2 0x57e IMX95_PAD_ENET1_TD2__NETCMIX_TOP_ETH0_RGMII_TD2 0x50e
IMX95_PAD_ENET1_TD1__NETCMIX_TOP_ETH0_RGMII_TD1 0x57e IMX95_PAD_ENET1_TD1__NETCMIX_TOP_ETH0_RGMII_TD1 0x50e
IMX95_PAD_ENET1_TD0__NETCMIX_TOP_ETH0_RGMII_TD0 0x57e IMX95_PAD_ENET1_TD0__NETCMIX_TOP_ETH0_RGMII_TD0 0x50e
IMX95_PAD_ENET1_TX_CTL__NETCMIX_TOP_ETH0_RGMII_TX_CTL 0x57e IMX95_PAD_ENET1_TX_CTL__NETCMIX_TOP_ETH0_RGMII_TX_CTL 0x57e
IMX95_PAD_ENET1_TXC__NETCMIX_TOP_ETH0_RGMII_TX_CLK 0x58e IMX95_PAD_ENET1_TXC__NETCMIX_TOP_ETH0_RGMII_TX_CLK 0x58e
IMX95_PAD_ENET1_RX_CTL__NETCMIX_TOP_ETH0_RGMII_RX_CTL 0x57e IMX95_PAD_ENET1_RX_CTL__NETCMIX_TOP_ETH0_RGMII_RX_CTL 0x57e
@ -598,10 +598,10 @@
pinctrl_enetc1: enetc1grp { pinctrl_enetc1: enetc1grp {
fsl,pins = < fsl,pins = <
IMX95_PAD_ENET2_TD3__NETCMIX_TOP_ETH1_RGMII_TD3 0x57e IMX95_PAD_ENET2_TD3__NETCMIX_TOP_ETH1_RGMII_TD3 0x50e
IMX95_PAD_ENET2_TD2__NETCMIX_TOP_ETH1_RGMII_TD2 0x57e IMX95_PAD_ENET2_TD2__NETCMIX_TOP_ETH1_RGMII_TD2 0x50e
IMX95_PAD_ENET2_TD1__NETCMIX_TOP_ETH1_RGMII_TD1 0x57e IMX95_PAD_ENET2_TD1__NETCMIX_TOP_ETH1_RGMII_TD1 0x50e
IMX95_PAD_ENET2_TD0__NETCMIX_TOP_ETH1_RGMII_TD0 0x57e IMX95_PAD_ENET2_TD0__NETCMIX_TOP_ETH1_RGMII_TD0 0x50e
IMX95_PAD_ENET2_TX_CTL__NETCMIX_TOP_ETH1_RGMII_TX_CTL 0x57e IMX95_PAD_ENET2_TX_CTL__NETCMIX_TOP_ETH1_RGMII_TX_CTL 0x57e
IMX95_PAD_ENET2_TXC__NETCMIX_TOP_ETH1_RGMII_TX_CLK 0x58e IMX95_PAD_ENET2_TXC__NETCMIX_TOP_ETH1_RGMII_TX_CLK 0x58e
IMX95_PAD_ENET2_RX_CTL__NETCMIX_TOP_ETH1_RGMII_RX_CTL 0x57e IMX95_PAD_ENET2_RX_CTL__NETCMIX_TOP_ETH1_RGMII_RX_CTL 0x57e

View File

@ -566,17 +566,17 @@
&scmi_iomuxc { &scmi_iomuxc {
pinctrl_emdio: emdiogrp{ pinctrl_emdio: emdiogrp{
fsl,pins = < fsl,pins = <
IMX95_PAD_ENET1_MDC__NETCMIX_TOP_NETC_MDC 0x57e IMX95_PAD_ENET1_MDC__NETCMIX_TOP_NETC_MDC 0x50e
IMX95_PAD_ENET1_MDIO__NETCMIX_TOP_NETC_MDIO 0x97e IMX95_PAD_ENET1_MDIO__NETCMIX_TOP_NETC_MDIO 0x90e
>; >;
}; };
pinctrl_enetc0: enetc0grp { pinctrl_enetc0: enetc0grp {
fsl,pins = < fsl,pins = <
IMX95_PAD_ENET1_TD3__NETCMIX_TOP_ETH0_RGMII_TD3 0x57e IMX95_PAD_ENET1_TD3__NETCMIX_TOP_ETH0_RGMII_TD3 0x50e
IMX95_PAD_ENET1_TD2__NETCMIX_TOP_ETH0_RGMII_TD2 0x57e IMX95_PAD_ENET1_TD2__NETCMIX_TOP_ETH0_RGMII_TD2 0x50e
IMX95_PAD_ENET1_TD1__NETCMIX_TOP_ETH0_RGMII_TD1 0x57e IMX95_PAD_ENET1_TD1__NETCMIX_TOP_ETH0_RGMII_TD1 0x50e
IMX95_PAD_ENET1_TD0__NETCMIX_TOP_ETH0_RGMII_TD0 0x57e IMX95_PAD_ENET1_TD0__NETCMIX_TOP_ETH0_RGMII_TD0 0x50e
IMX95_PAD_ENET1_TX_CTL__NETCMIX_TOP_ETH0_RGMII_TX_CTL 0x57e IMX95_PAD_ENET1_TX_CTL__NETCMIX_TOP_ETH0_RGMII_TX_CTL 0x57e
IMX95_PAD_ENET1_TXC__NETCMIX_TOP_ETH0_RGMII_TX_CLK 0x58e IMX95_PAD_ENET1_TXC__NETCMIX_TOP_ETH0_RGMII_TX_CLK 0x58e
IMX95_PAD_ENET1_RX_CTL__NETCMIX_TOP_ETH0_RGMII_RX_CTL 0x57e IMX95_PAD_ENET1_RX_CTL__NETCMIX_TOP_ETH0_RGMII_RX_CTL 0x57e

View File

@ -1708,7 +1708,7 @@
<0x9 0 1 0>; <0x9 0 1 0>;
reg-names = "dbi","atu", "dbi2", "app", "dma", "addr_space"; reg-names = "dbi","atu", "dbi2", "app", "dma", "addr_space";
num-lanes = <1>; num-lanes = <1>;
interrupts = <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "dma"; interrupt-names = "dma";
clocks = <&scmi_clk IMX95_CLK_HSIO>, clocks = <&scmi_clk IMX95_CLK_HSIO>,
<&scmi_clk IMX95_CLK_HSIOPLL>, <&scmi_clk IMX95_CLK_HSIOPLL>,

View File

@ -1090,6 +1090,8 @@
}; };
&pmk8280_rtc { &pmk8280_rtc {
qcom,uefi-rtc-info;
status = "okay"; status = "okay";
}; };

View File

@ -224,6 +224,7 @@
reg-names = "rtc", "alarm"; reg-names = "rtc", "alarm";
interrupts = <0x0 0x62 0x1 IRQ_TYPE_EDGE_RISING>; interrupts = <0x0 0x62 0x1 IRQ_TYPE_EDGE_RISING>;
qcom,no-alarm; /* alarm owned by ADSP */ qcom,no-alarm; /* alarm owned by ADSP */
qcom,uefi-rtc-info;
}; };
pmk8550_sdam_2: nvram@7100 { pmk8550_sdam_2: nvram@7100 {

View File

@ -379,6 +379,18 @@
<0 RK_PA7 RK_FUNC_GPIO &pcfg_pull_up>; <0 RK_PA7 RK_FUNC_GPIO &pcfg_pull_up>;
}; };
}; };
spi1 {
spi1_csn0_gpio_pin: spi1-csn0-gpio-pin {
rockchip,pins =
<3 RK_PB1 RK_FUNC_GPIO &pcfg_pull_up_4ma>;
};
spi1_csn1_gpio_pin: spi1-csn1-gpio-pin {
rockchip,pins =
<3 RK_PB2 RK_FUNC_GPIO &pcfg_pull_up_4ma>;
};
};
}; };
&pmu_io_domains { &pmu_io_domains {
@ -396,6 +408,17 @@
vqmmc-supply = <&vccio_sd>; vqmmc-supply = <&vccio_sd>;
}; };
&spi1 {
/*
* Hardware CS has a very slow rise time of about 6us,
* causing transmission errors.
* With cs-gpios we have a rise time of about 20ns.
*/
cs-gpios = <&gpio3 RK_PB1 GPIO_ACTIVE_LOW>, <&gpio3 RK_PB2 GPIO_ACTIVE_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&spi1_clk &spi1_csn0_gpio_pin &spi1_csn1_gpio_pin &spi1_miso &spi1_mosi>;
};
&tsadc { &tsadc {
status = "okay"; status = "okay";
}; };

View File

@ -30,6 +30,7 @@
fan: gpio_fan { fan: gpio_fan {
compatible = "gpio-fan"; compatible = "gpio-fan";
fan-supply = <&vcc12v_dcin>;
gpios = <&gpio0 RK_PD5 GPIO_ACTIVE_HIGH>; gpios = <&gpio0 RK_PD5 GPIO_ACTIVE_HIGH>;
gpio-fan,speed-map = gpio-fan,speed-map =
< 0 0>, < 0 0>,

View File

@ -211,10 +211,38 @@
status = "okay"; status = "okay";
}; };
&cpu_b0 {
cpu-supply = <&vdd_cpu_big_s0>;
};
&cpu_b1 {
cpu-supply = <&vdd_cpu_big_s0>;
};
&cpu_b2 {
cpu-supply = <&vdd_cpu_big_s0>;
};
&cpu_b3 {
cpu-supply = <&vdd_cpu_big_s0>;
};
&cpu_l0 { &cpu_l0 {
cpu-supply = <&vdd_cpu_lit_s0>; cpu-supply = <&vdd_cpu_lit_s0>;
}; };
&cpu_l1 {
cpu-supply = <&vdd_cpu_lit_s0>;
};
&cpu_l2 {
cpu-supply = <&vdd_cpu_lit_s0>;
};
&cpu_l3 {
cpu-supply = <&vdd_cpu_lit_s0>;
};
&gmac0 { &gmac0 {
phy-mode = "rgmii-id"; phy-mode = "rgmii-id";
clock_in_out = "output"; clock_in_out = "output";

View File

@ -615,7 +615,7 @@
<0 0 0 2 &pcie1_intc 1>, <0 0 0 2 &pcie1_intc 1>,
<0 0 0 3 &pcie1_intc 2>, <0 0 0 3 &pcie1_intc 2>,
<0 0 0 4 &pcie1_intc 3>; <0 0 0 4 &pcie1_intc 3>;
linux,pci-domain = <0>; linux,pci-domain = <1>;
max-link-speed = <2>; max-link-speed = <2>;
num-ib-windows = <8>; num-ib-windows = <8>;
num-viewport = <8>; num-viewport = <8>;

View File

@ -578,14 +578,14 @@
hdmim0_tx0_scl: hdmim0-tx0-scl { hdmim0_tx0_scl: hdmim0-tx0-scl {
rockchip,pins = rockchip,pins =
/* hdmim0_tx0_scl */ /* hdmim0_tx0_scl */
<4 RK_PB7 5 &pcfg_pull_none>; <4 RK_PB7 5 &pcfg_pull_none_drv_level_5_smt>;
}; };
/omit-if-no-ref/ /omit-if-no-ref/
hdmim0_tx0_sda: hdmim0-tx0-sda { hdmim0_tx0_sda: hdmim0-tx0-sda {
rockchip,pins = rockchip,pins =
/* hdmim0_tx0_sda */ /* hdmim0_tx0_sda */
<4 RK_PC0 5 &pcfg_pull_none>; <4 RK_PC0 5 &pcfg_pull_none_drv_level_1_smt>;
}; };
/omit-if-no-ref/ /omit-if-no-ref/
@ -640,14 +640,14 @@
hdmim1_tx0_scl: hdmim1-tx0-scl { hdmim1_tx0_scl: hdmim1-tx0-scl {
rockchip,pins = rockchip,pins =
/* hdmim1_tx0_scl */ /* hdmim1_tx0_scl */
<0 RK_PD5 11 &pcfg_pull_none>; <0 RK_PD5 11 &pcfg_pull_none_drv_level_5_smt>;
}; };
/omit-if-no-ref/ /omit-if-no-ref/
hdmim1_tx0_sda: hdmim1-tx0-sda { hdmim1_tx0_sda: hdmim1-tx0-sda {
rockchip,pins = rockchip,pins =
/* hdmim1_tx0_sda */ /* hdmim1_tx0_sda */
<0 RK_PD4 11 &pcfg_pull_none>; <0 RK_PD4 11 &pcfg_pull_none_drv_level_1_smt>;
}; };
/omit-if-no-ref/ /omit-if-no-ref/
@ -668,14 +668,14 @@
hdmim1_tx1_scl: hdmim1-tx1-scl { hdmim1_tx1_scl: hdmim1-tx1-scl {
rockchip,pins = rockchip,pins =
/* hdmim1_tx1_scl */ /* hdmim1_tx1_scl */
<3 RK_PC6 5 &pcfg_pull_none>; <3 RK_PC6 5 &pcfg_pull_none_drv_level_5_smt>;
}; };
/omit-if-no-ref/ /omit-if-no-ref/
hdmim1_tx1_sda: hdmim1-tx1-sda { hdmim1_tx1_sda: hdmim1-tx1-sda {
rockchip,pins = rockchip,pins =
/* hdmim1_tx1_sda */ /* hdmim1_tx1_sda */
<3 RK_PC5 5 &pcfg_pull_none>; <3 RK_PC5 5 &pcfg_pull_none_drv_level_1_smt>;
}; };
/omit-if-no-ref/ /omit-if-no-ref/
hdmim2_rx_cec: hdmim2-rx-cec { hdmim2_rx_cec: hdmim2-rx-cec {
@ -709,14 +709,14 @@
hdmim2_tx0_scl: hdmim2-tx0-scl { hdmim2_tx0_scl: hdmim2-tx0-scl {
rockchip,pins = rockchip,pins =
/* hdmim2_tx0_scl */ /* hdmim2_tx0_scl */
<3 RK_PC7 5 &pcfg_pull_none>; <3 RK_PC7 5 &pcfg_pull_none_drv_level_5_smt>;
}; };
/omit-if-no-ref/ /omit-if-no-ref/
hdmim2_tx0_sda: hdmim2-tx0-sda { hdmim2_tx0_sda: hdmim2-tx0-sda {
rockchip,pins = rockchip,pins =
/* hdmim2_tx0_sda */ /* hdmim2_tx0_sda */
<3 RK_PD0 5 &pcfg_pull_none>; <3 RK_PD0 5 &pcfg_pull_none_drv_level_1_smt>;
}; };
/omit-if-no-ref/ /omit-if-no-ref/
@ -730,14 +730,14 @@
hdmim2_tx1_scl: hdmim2-tx1-scl { hdmim2_tx1_scl: hdmim2-tx1-scl {
rockchip,pins = rockchip,pins =
/* hdmim2_tx1_scl */ /* hdmim2_tx1_scl */
<1 RK_PA4 5 &pcfg_pull_none>; <1 RK_PA4 5 &pcfg_pull_none_drv_level_5_smt>;
}; };
/omit-if-no-ref/ /omit-if-no-ref/
hdmim2_tx1_sda: hdmim2-tx1-sda { hdmim2_tx1_sda: hdmim2-tx1-sda {
rockchip,pins = rockchip,pins =
/* hdmim2_tx1_sda */ /* hdmim2_tx1_sda */
<1 RK_PA3 5 &pcfg_pull_none>; <1 RK_PA3 5 &pcfg_pull_none_drv_level_1_smt>;
}; };
/omit-if-no-ref/ /omit-if-no-ref/

View File

@ -321,6 +321,7 @@
bus-width = <4>; bus-width = <4>;
cap-mmc-highspeed; cap-mmc-highspeed;
cap-sd-highspeed; cap-sd-highspeed;
cd-gpios = <&gpio0 RK_PA4 GPIO_ACTIVE_LOW>;
disable-wp; disable-wp;
max-frequency = <150000000>; max-frequency = <150000000>;
no-sdio; no-sdio;

View File

@ -160,14 +160,15 @@
hdmim0_tx1_scl: hdmim0-tx1-scl { hdmim0_tx1_scl: hdmim0-tx1-scl {
rockchip,pins = rockchip,pins =
/* hdmim0_tx1_scl */ /* hdmim0_tx1_scl */
<2 RK_PB5 4 &pcfg_pull_none>; <2 RK_PB5 4 &pcfg_pull_none_drv_level_3_smt>;
}; };
/omit-if-no-ref/ /omit-if-no-ref/
hdmim0_tx1_sda: hdmim0-tx1-sda { hdmim0_tx1_sda: hdmim0-tx1-sda {
rockchip,pins = rockchip,pins =
/* hdmim0_tx1_sda */ /* hdmim0_tx1_sda */
<2 RK_PB4 4 &pcfg_pull_none>; <2 RK_PB4 4 &pcfg_pull_none_drv_level_1_smt>;
}; };
}; };

View File

@ -474,6 +474,7 @@
bus-width = <4>; bus-width = <4>;
cap-mmc-highspeed; cap-mmc-highspeed;
cap-sd-highspeed; cap-sd-highspeed;
cd-gpios = <&gpio0 RK_PA4 GPIO_ACTIVE_LOW>;
disable-wp; disable-wp;
max-frequency = <150000000>; max-frequency = <150000000>;
no-sdio; no-sdio;

View File

@ -332,6 +332,41 @@
input-schmitt-enable; input-schmitt-enable;
}; };
/omit-if-no-ref/
pcfg_pull_none_drv_level_1_smt: pcfg-pull-none-drv-level-1-smt {
bias-disable;
drive-strength = <1>;
input-schmitt-enable;
};
/omit-if-no-ref/
pcfg_pull_none_drv_level_2_smt: pcfg-pull-none-drv-level-2-smt {
bias-disable;
drive-strength = <2>;
input-schmitt-enable;
};
/omit-if-no-ref/
pcfg_pull_none_drv_level_3_smt: pcfg-pull-none-drv-level-3-smt {
bias-disable;
drive-strength = <3>;
input-schmitt-enable;
};
/omit-if-no-ref/
pcfg_pull_none_drv_level_4_smt: pcfg-pull-none-drv-level-4-smt {
bias-disable;
drive-strength = <4>;
input-schmitt-enable;
};
/omit-if-no-ref/
pcfg_pull_none_drv_level_5_smt: pcfg-pull-none-drv-level-5-smt {
bias-disable;
drive-strength = <5>;
input-schmitt-enable;
};
/omit-if-no-ref/ /omit-if-no-ref/
pcfg_output_high: pcfg-output-high { pcfg_output_high: pcfg-output-high {
output-high; output-high;

View File

@ -1444,6 +1444,7 @@ CONFIG_PLATFORM_MHU=y
CONFIG_BCM2835_MBOX=y CONFIG_BCM2835_MBOX=y
CONFIG_QCOM_APCS_IPC=y CONFIG_QCOM_APCS_IPC=y
CONFIG_MTK_ADSP_MBOX=m CONFIG_MTK_ADSP_MBOX=m
CONFIG_QCOM_CPUCP_MBOX=m
CONFIG_QCOM_IPCC=y CONFIG_QCOM_IPCC=y
CONFIG_ROCKCHIP_IOMMU=y CONFIG_ROCKCHIP_IOMMU=y
CONFIG_TEGRA_IOMMU_SMMU=y CONFIG_TEGRA_IOMMU_SMMU=y
@ -1573,6 +1574,7 @@ CONFIG_RESET_QCOM_AOSS=y
CONFIG_RESET_QCOM_PDC=m CONFIG_RESET_QCOM_PDC=m
CONFIG_RESET_RZG2L_USBPHY_CTRL=y CONFIG_RESET_RZG2L_USBPHY_CTRL=y
CONFIG_RESET_TI_SCI=y CONFIG_RESET_TI_SCI=y
CONFIG_PHY_SNPS_EUSB2=m
CONFIG_PHY_XGENE=y CONFIG_PHY_XGENE=y
CONFIG_PHY_CAN_TRANSCEIVER=m CONFIG_PHY_CAN_TRANSCEIVER=m
CONFIG_PHY_NXP_PTN3222=m CONFIG_PHY_NXP_PTN3222=m
@ -1597,7 +1599,6 @@ CONFIG_PHY_QCOM_EDP=m
CONFIG_PHY_QCOM_PCIE2=m CONFIG_PHY_QCOM_PCIE2=m
CONFIG_PHY_QCOM_QMP=m CONFIG_PHY_QCOM_QMP=m
CONFIG_PHY_QCOM_QUSB2=m CONFIG_PHY_QCOM_QUSB2=m
CONFIG_PHY_QCOM_SNPS_EUSB2=m
CONFIG_PHY_QCOM_EUSB2_REPEATER=m CONFIG_PHY_QCOM_EUSB2_REPEATER=m
CONFIG_PHY_QCOM_M31_USB=m CONFIG_PHY_QCOM_M31_USB=m
CONFIG_PHY_QCOM_USB_HS=m CONFIG_PHY_QCOM_USB_HS=m

View File

@ -287,17 +287,6 @@
.Lskip_fgt2_\@: .Lskip_fgt2_\@:
.endm .endm
.macro __init_el2_gcs
mrs_s x1, SYS_ID_AA64PFR1_EL1
ubfx x1, x1, #ID_AA64PFR1_EL1_GCS_SHIFT, #4
cbz x1, .Lskip_gcs_\@
/* Ensure GCS is not enabled when we start trying to do BLs */
msr_s SYS_GCSCR_EL1, xzr
msr_s SYS_GCSCRE0_EL1, xzr
.Lskip_gcs_\@:
.endm
/** /**
* Initialize EL2 registers to sane values. This should be called early on all * Initialize EL2 registers to sane values. This should be called early on all
* cores that were booted in EL2. Note that everything gets initialised as * cores that were booted in EL2. Note that everything gets initialised as
@ -319,7 +308,6 @@
__init_el2_cptr __init_el2_cptr
__init_el2_fgt __init_el2_fgt
__init_el2_fgt2 __init_el2_fgt2
__init_el2_gcs
.endm .endm
#ifndef __KVM_NVHE_HYPERVISOR__ #ifndef __KVM_NVHE_HYPERVISOR__
@ -371,6 +359,13 @@
msr_s SYS_MPAMHCR_EL2, xzr // clear TRAP_MPAMIDR_EL1 -> EL2 msr_s SYS_MPAMHCR_EL2, xzr // clear TRAP_MPAMIDR_EL1 -> EL2
.Lskip_mpam_\@: .Lskip_mpam_\@:
check_override id_aa64pfr1, ID_AA64PFR1_EL1_GCS_SHIFT, .Linit_gcs_\@, .Lskip_gcs_\@, x1, x2
.Linit_gcs_\@:
msr_s SYS_GCSCR_EL1, xzr
msr_s SYS_GCSCRE0_EL1, xzr
.Lskip_gcs_\@:
check_override id_aa64pfr0, ID_AA64PFR0_EL1_SVE_SHIFT, .Linit_sve_\@, .Lskip_sve_\@, x1, x2 check_override id_aa64pfr0, ID_AA64PFR0_EL1_SVE_SHIFT, .Linit_sve_\@, .Lskip_sve_\@, x1, x2
.Linit_sve_\@: /* SVE register access */ .Linit_sve_\@: /* SVE register access */

View File

@ -1480,7 +1480,6 @@ int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm,
struct reg_mask_range *range); struct reg_mask_range *range);
/* Guest/host FPSIMD coordination helpers */ /* Guest/host FPSIMD coordination helpers */
int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);

View File

@ -34,7 +34,7 @@ obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
cpufeature.o alternative.o cacheinfo.o \ cpufeature.o alternative.o cacheinfo.o \
smp.o smp_spin_table.o topology.o smccc-call.o \ smp.o smp_spin_table.o topology.o smccc-call.o \
syscall.o proton-pack.o idle.o patching.o pi/ \ syscall.o proton-pack.o idle.o patching.o pi/ \
rsi.o rsi.o jump_label.o
obj-$(CONFIG_COMPAT) += sys32.o signal32.o \ obj-$(CONFIG_COMPAT) += sys32.o signal32.o \
sys_compat.o sys_compat.o
@ -47,7 +47,6 @@ obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
obj-$(CONFIG_HARDLOCKUP_DETECTOR_PERF) += watchdog_hld.o obj-$(CONFIG_HARDLOCKUP_DETECTOR_PERF) += watchdog_hld.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_CPU_PM) += sleep.o suspend.o obj-$(CONFIG_CPU_PM) += sleep.o suspend.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
obj-$(CONFIG_KGDB) += kgdb.o obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_EFI) += efi.o efi-rt-wrapper.o obj-$(CONFIG_EFI) += efi.o efi-rt-wrapper.o
obj-$(CONFIG_PCI) += pci.o obj-$(CONFIG_PCI) += pci.o

View File

@ -3135,6 +3135,13 @@ static bool has_sve_feature(const struct arm64_cpu_capabilities *cap, int scope)
} }
#endif #endif
#ifdef CONFIG_ARM64_SME
static bool has_sme_feature(const struct arm64_cpu_capabilities *cap, int scope)
{
return system_supports_sme() && has_user_cpuid_feature(cap, scope);
}
#endif
static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(ID_AA64ISAR0_EL1, AES, PMULL, CAP_HWCAP, KERNEL_HWCAP_PMULL), HWCAP_CAP(ID_AA64ISAR0_EL1, AES, PMULL, CAP_HWCAP, KERNEL_HWCAP_PMULL),
HWCAP_CAP(ID_AA64ISAR0_EL1, AES, AES, CAP_HWCAP, KERNEL_HWCAP_AES), HWCAP_CAP(ID_AA64ISAR0_EL1, AES, AES, CAP_HWCAP, KERNEL_HWCAP_AES),
@ -3223,31 +3230,31 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(ID_AA64ISAR2_EL1, BC, IMP, CAP_HWCAP, KERNEL_HWCAP_HBC), HWCAP_CAP(ID_AA64ISAR2_EL1, BC, IMP, CAP_HWCAP, KERNEL_HWCAP_HBC),
#ifdef CONFIG_ARM64_SME #ifdef CONFIG_ARM64_SME
HWCAP_CAP(ID_AA64PFR1_EL1, SME, IMP, CAP_HWCAP, KERNEL_HWCAP_SME), HWCAP_CAP(ID_AA64PFR1_EL1, SME, IMP, CAP_HWCAP, KERNEL_HWCAP_SME),
HWCAP_CAP(ID_AA64SMFR0_EL1, FA64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, FA64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64),
HWCAP_CAP(ID_AA64SMFR0_EL1, LUTv2, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_LUTV2), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, LUTv2, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_LUTV2),
HWCAP_CAP(ID_AA64SMFR0_EL1, SMEver, SME2p2, CAP_HWCAP, KERNEL_HWCAP_SME2P2), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SMEver, SME2p2, CAP_HWCAP, KERNEL_HWCAP_SME2P2),
HWCAP_CAP(ID_AA64SMFR0_EL1, SMEver, SME2p1, CAP_HWCAP, KERNEL_HWCAP_SME2P1), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SMEver, SME2p1, CAP_HWCAP, KERNEL_HWCAP_SME2P1),
HWCAP_CAP(ID_AA64SMFR0_EL1, SMEver, SME2, CAP_HWCAP, KERNEL_HWCAP_SME2), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SMEver, SME2, CAP_HWCAP, KERNEL_HWCAP_SME2),
HWCAP_CAP(ID_AA64SMFR0_EL1, I16I64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, I16I64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64),
HWCAP_CAP(ID_AA64SMFR0_EL1, F64F64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F64F64), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F64F64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F64F64),
HWCAP_CAP(ID_AA64SMFR0_EL1, I16I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I32), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, I16I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I32),
HWCAP_CAP(ID_AA64SMFR0_EL1, B16B16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16B16), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, B16B16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16B16),
HWCAP_CAP(ID_AA64SMFR0_EL1, F16F16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F16), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F16F16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F16),
HWCAP_CAP(ID_AA64SMFR0_EL1, F8F16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F8F16), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F8F16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F8F16),
HWCAP_CAP(ID_AA64SMFR0_EL1, F8F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F8F32), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F8F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F8F32),
HWCAP_CAP(ID_AA64SMFR0_EL1, I8I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I8I32), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, I8I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I8I32),
HWCAP_CAP(ID_AA64SMFR0_EL1, F16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F32), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F32),
HWCAP_CAP(ID_AA64SMFR0_EL1, B16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16F32), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, B16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16F32),
HWCAP_CAP(ID_AA64SMFR0_EL1, BI32I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_BI32I32), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, BI32I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_BI32I32),
HWCAP_CAP(ID_AA64SMFR0_EL1, F32F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F32F32), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F32F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F32F32),
HWCAP_CAP(ID_AA64SMFR0_EL1, SF8FMA, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8FMA), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SF8FMA, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8FMA),
HWCAP_CAP(ID_AA64SMFR0_EL1, SF8DP4, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8DP4), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SF8DP4, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8DP4),
HWCAP_CAP(ID_AA64SMFR0_EL1, SF8DP2, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8DP2), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SF8DP2, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8DP2),
HWCAP_CAP(ID_AA64SMFR0_EL1, SBitPerm, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SBITPERM), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SBitPerm, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SBITPERM),
HWCAP_CAP(ID_AA64SMFR0_EL1, AES, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_AES), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, AES, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_AES),
HWCAP_CAP(ID_AA64SMFR0_EL1, SFEXPA, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SFEXPA), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SFEXPA, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SFEXPA),
HWCAP_CAP(ID_AA64SMFR0_EL1, STMOP, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_STMOP), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, STMOP, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_STMOP),
HWCAP_CAP(ID_AA64SMFR0_EL1, SMOP4, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SMOP4), HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SMOP4, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SMOP4),
#endif /* CONFIG_ARM64_SME */ #endif /* CONFIG_ARM64_SME */
HWCAP_CAP(ID_AA64FPFR0_EL1, F8CVT, IMP, CAP_HWCAP, KERNEL_HWCAP_F8CVT), HWCAP_CAP(ID_AA64FPFR0_EL1, F8CVT, IMP, CAP_HWCAP, KERNEL_HWCAP_F8CVT),
HWCAP_CAP(ID_AA64FPFR0_EL1, F8FMA, IMP, CAP_HWCAP, KERNEL_HWCAP_F8FMA), HWCAP_CAP(ID_AA64FPFR0_EL1, F8FMA, IMP, CAP_HWCAP, KERNEL_HWCAP_F8FMA),

View File

@ -15,6 +15,7 @@
#include <asm/efi.h> #include <asm/efi.h>
#include <asm/stacktrace.h> #include <asm/stacktrace.h>
#include <asm/vmap_stack.h>
static bool region_is_misaligned(const efi_memory_desc_t *md) static bool region_is_misaligned(const efi_memory_desc_t *md)
{ {
@ -214,9 +215,13 @@ static int __init arm64_efi_rt_init(void)
if (!efi_enabled(EFI_RUNTIME_SERVICES)) if (!efi_enabled(EFI_RUNTIME_SERVICES))
return 0; return 0;
p = __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, GFP_KERNEL, if (!IS_ENABLED(CONFIG_VMAP_STACK)) {
NUMA_NO_NODE, &&l); clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
l: if (!p) { return -ENOMEM;
}
p = arch_alloc_vmap_stack(THREAD_SIZE, NUMA_NO_NODE);
if (!p) {
pr_warn("Failed to allocate EFI runtime stack\n"); pr_warn("Failed to allocate EFI runtime stack\n");
clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
return -ENOMEM; return -ENOMEM;

View File

@ -673,6 +673,11 @@ static void permission_overlay_switch(struct task_struct *next)
current->thread.por_el0 = read_sysreg_s(SYS_POR_EL0); current->thread.por_el0 = read_sysreg_s(SYS_POR_EL0);
if (current->thread.por_el0 != next->thread.por_el0) { if (current->thread.por_el0 != next->thread.por_el0) {
write_sysreg_s(next->thread.por_el0, SYS_POR_EL0); write_sysreg_s(next->thread.por_el0, SYS_POR_EL0);
/*
* No ISB required as we can tolerate spurious Overlay faults -
* the fault handler will check again based on the new value
* of POR_EL0.
*/
} }
} }

View File

@ -1143,7 +1143,7 @@ static inline unsigned int num_other_online_cpus(void)
void smp_send_stop(void) void smp_send_stop(void)
{ {
static unsigned long stop_in_progress; static unsigned long stop_in_progress;
cpumask_t mask; static cpumask_t mask;
unsigned long timeout; unsigned long timeout;
/* /*

View File

@ -825,10 +825,6 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
if (!kvm_arm_vcpu_is_finalized(vcpu)) if (!kvm_arm_vcpu_is_finalized(vcpu))
return -EPERM; return -EPERM;
ret = kvm_arch_vcpu_run_map_fp(vcpu);
if (ret)
return ret;
if (likely(vcpu_has_run_once(vcpu))) if (likely(vcpu_has_run_once(vcpu)))
return 0; return 0;
@ -2129,7 +2125,7 @@ static void cpu_hyp_init(void *discard)
static void cpu_hyp_uninit(void *discard) static void cpu_hyp_uninit(void *discard)
{ {
if (__this_cpu_read(kvm_hyp_initialized)) { if (!is_protected_kvm_enabled() && __this_cpu_read(kvm_hyp_initialized)) {
cpu_hyp_reset(); cpu_hyp_reset();
__this_cpu_write(kvm_hyp_initialized, 0); __this_cpu_write(kvm_hyp_initialized, 0);
} }
@ -2345,8 +2341,13 @@ static void __init teardown_hyp_mode(void)
free_hyp_pgds(); free_hyp_pgds();
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
if (per_cpu(kvm_hyp_initialized, cpu))
continue;
free_pages(per_cpu(kvm_arm_hyp_stack_base, cpu), NVHE_STACK_SHIFT - PAGE_SHIFT); free_pages(per_cpu(kvm_arm_hyp_stack_base, cpu), NVHE_STACK_SHIFT - PAGE_SHIFT);
free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order());
if (!kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu])
continue;
if (free_sve) { if (free_sve) {
struct cpu_sve_state *sve_state; struct cpu_sve_state *sve_state;
@ -2354,6 +2355,9 @@ static void __init teardown_hyp_mode(void)
sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state; sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state;
free_pages((unsigned long) sve_state, pkvm_host_sve_state_order()); free_pages((unsigned long) sve_state, pkvm_host_sve_state_order());
} }
free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order());
} }
} }

View File

@ -14,32 +14,6 @@
#include <asm/kvm_mmu.h> #include <asm/kvm_mmu.h>
#include <asm/sysreg.h> #include <asm/sysreg.h>
/*
* Called on entry to KVM_RUN unless this vcpu previously ran at least
* once and the most recent prior KVM_RUN for this vcpu was called from
* the same task as current (highly likely).
*
* This is guaranteed to execute before kvm_arch_vcpu_load_fp(vcpu),
* such that on entering hyp the relevant parts of current are already
* mapped.
*/
int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu)
{
struct user_fpsimd_state *fpsimd = &current->thread.uw.fpsimd_state;
int ret;
/* pKVM has its own tracking of the host fpsimd state. */
if (is_protected_kvm_enabled())
return 0;
/* Make sure the host task fpsimd state is visible to hyp: */
ret = kvm_share_hyp(fpsimd, fpsimd + 1);
if (ret)
return ret;
return 0;
}
/* /*
* Prepare vcpu for saving the host's FPSIMD state and loading the guest's. * Prepare vcpu for saving the host's FPSIMD state and loading the guest's.
* The actual loading is done by the FPSIMD access trap taken to hyp. * The actual loading is done by the FPSIMD access trap taken to hyp.

View File

@ -479,6 +479,7 @@ static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range)
{ {
struct kvm_mem_range cur; struct kvm_mem_range cur;
kvm_pte_t pte; kvm_pte_t pte;
u64 granule;
s8 level; s8 level;
int ret; int ret;
@ -496,18 +497,21 @@ static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range)
return -EPERM; return -EPERM;
} }
do { for (; level <= KVM_PGTABLE_LAST_LEVEL; level++) {
u64 granule = kvm_granule_size(level); if (!kvm_level_supports_block_mapping(level))
continue;
granule = kvm_granule_size(level);
cur.start = ALIGN_DOWN(addr, granule); cur.start = ALIGN_DOWN(addr, granule);
cur.end = cur.start + granule; cur.end = cur.start + granule;
level++; if (!range_included(&cur, range))
} while ((level <= KVM_PGTABLE_LAST_LEVEL) && continue;
!(kvm_level_supports_block_mapping(level) &&
range_included(&cur, range)));
*range = cur; *range = cur;
return 0; return 0;
}
WARN_ON(1);
return -EINVAL;
} }
int host_stage2_idmap_locked(phys_addr_t addr, u64 size, int host_stage2_idmap_locked(phys_addr_t addr, u64 size,

View File

@ -1402,6 +1402,21 @@ static void kvm_map_l1_vncr(struct kvm_vcpu *vcpu)
} }
} }
#define has_tgran_2(__r, __sz) \
({ \
u64 _s1, _s2, _mmfr0 = __r; \
\
_s2 = SYS_FIELD_GET(ID_AA64MMFR0_EL1, \
TGRAN##__sz##_2, _mmfr0); \
\
_s1 = SYS_FIELD_GET(ID_AA64MMFR0_EL1, \
TGRAN##__sz, _mmfr0); \
\
((_s2 != ID_AA64MMFR0_EL1_TGRAN##__sz##_2_NI && \
_s2 != ID_AA64MMFR0_EL1_TGRAN##__sz##_2_TGRAN##__sz) || \
(_s2 == ID_AA64MMFR0_EL1_TGRAN##__sz##_2_TGRAN##__sz && \
_s1 != ID_AA64MMFR0_EL1_TGRAN##__sz##_NI)); \
})
/* /*
* Our emulated CPU doesn't support all the possible features. For the * Our emulated CPU doesn't support all the possible features. For the
* sake of simplicity (and probably mental sanity), wipe out a number * sake of simplicity (and probably mental sanity), wipe out a number
@ -1411,6 +1426,8 @@ static void kvm_map_l1_vncr(struct kvm_vcpu *vcpu)
*/ */
u64 limit_nv_id_reg(struct kvm *kvm, u32 reg, u64 val) u64 limit_nv_id_reg(struct kvm *kvm, u32 reg, u64 val)
{ {
u64 orig_val = val;
switch (reg) { switch (reg) {
case SYS_ID_AA64ISAR0_EL1: case SYS_ID_AA64ISAR0_EL1:
/* Support everything but TME */ /* Support everything but TME */
@ -1480,12 +1497,15 @@ u64 limit_nv_id_reg(struct kvm *kvm, u32 reg, u64 val)
*/ */
switch (PAGE_SIZE) { switch (PAGE_SIZE) {
case SZ_4K: case SZ_4K:
if (has_tgran_2(orig_val, 4))
val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN4_2, IMP); val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN4_2, IMP);
fallthrough; fallthrough;
case SZ_16K: case SZ_16K:
if (has_tgran_2(orig_val, 16))
val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN16_2, IMP); val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN16_2, IMP);
fallthrough; fallthrough;
case SZ_64K: case SZ_64K:
if (has_tgran_2(orig_val, 64))
val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN64_2, IMP); val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN64_2, IMP);
break; break;
} }

View File

@ -2624,7 +2624,7 @@ static bool access_mdcr(struct kvm_vcpu *vcpu,
*/ */
if (hpmn > vcpu->kvm->arch.nr_pmu_counters) { if (hpmn > vcpu->kvm->arch.nr_pmu_counters) {
hpmn = vcpu->kvm->arch.nr_pmu_counters; hpmn = vcpu->kvm->arch.nr_pmu_counters;
u64_replace_bits(val, hpmn, MDCR_EL2_HPMN); u64p_replace_bits(&val, hpmn, MDCR_EL2_HPMN);
} }
__vcpu_assign_sys_reg(vcpu, MDCR_EL2, val); __vcpu_assign_sys_reg(vcpu, MDCR_EL2, val);

View File

@ -401,9 +401,7 @@ void vgic_v3_nested_update_mi(struct kvm_vcpu *vcpu)
{ {
bool level; bool level;
level = __vcpu_sys_reg(vcpu, ICH_HCR_EL2) & ICH_HCR_EL2_En; level = (__vcpu_sys_reg(vcpu, ICH_HCR_EL2) & ICH_HCR_EL2_En) && vgic_v3_get_misr(vcpu);
if (level)
level &= vgic_v3_get_misr(vcpu);
kvm_vgic_inject_irq(vcpu->kvm, vcpu, kvm_vgic_inject_irq(vcpu->kvm, vcpu,
vcpu->kvm->arch.vgic.mi_intid, level, vcpu); vcpu->kvm->arch.vgic.mi_intid, level, vcpu);
} }

View File

@ -487,17 +487,29 @@ static void do_bad_area(unsigned long far, unsigned long esr,
} }
} }
static bool fault_from_pkey(unsigned long esr, struct vm_area_struct *vma, static bool fault_from_pkey(struct vm_area_struct *vma, unsigned int mm_flags)
unsigned int mm_flags)
{ {
unsigned long iss2 = ESR_ELx_ISS2(esr);
if (!system_supports_poe()) if (!system_supports_poe())
return false; return false;
if (esr_fsc_is_permission_fault(esr) && (iss2 & ESR_ELx_Overlay)) /*
return true; * We do not check whether an Overlay fault has occurred because we
* cannot make a decision based solely on its value:
*
* - If Overlay is set, a fault did occur due to POE, but it may be
* spurious in those cases where we update POR_EL0 without ISB (e.g.
* on context-switch). We would then need to manually check POR_EL0
* against vma_pkey(vma), which is exactly what
* arch_vma_access_permitted() does.
*
* - If Overlay is not set, we may still need to report a pkey fault.
* This is the case if an access was made within a mapping but with no
* page mapped, and POR_EL0 forbids the access (according to
* vma_pkey()). Such access will result in a SIGSEGV regardless
* because core code checks arch_vma_access_permitted(), but in order
* to report the correct error code - SEGV_PKUERR - we must handle
* that case here.
*/
return !arch_vma_access_permitted(vma, return !arch_vma_access_permitted(vma,
mm_flags & FAULT_FLAG_WRITE, mm_flags & FAULT_FLAG_WRITE,
mm_flags & FAULT_FLAG_INSTRUCTION, mm_flags & FAULT_FLAG_INSTRUCTION,
@ -635,7 +647,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
goto bad_area; goto bad_area;
} }
if (fault_from_pkey(esr, vma, mm_flags)) { if (fault_from_pkey(vma, mm_flags)) {
pkey = vma_pkey(vma); pkey = vma_pkey(vma);
vma_end_read(vma); vma_end_read(vma);
fault = 0; fault = 0;
@ -679,7 +691,7 @@ retry:
goto bad_area; goto bad_area;
} }
if (fault_from_pkey(esr, vma, mm_flags)) { if (fault_from_pkey(vma, mm_flags)) {
pkey = vma_pkey(vma); pkey = vma_pkey(vma);
mmap_read_unlock(mm); mmap_read_unlock(mm);
fault = 0; fault = 0;

View File

@ -518,7 +518,6 @@ alternative_else_nop_endif
msr REG_PIR_EL1, x0 msr REG_PIR_EL1, x0
orr tcr2, tcr2, TCR2_EL1_PIE orr tcr2, tcr2, TCR2_EL1_PIE
msr REG_TCR2_EL1, x0
.Lskip_indirection: .Lskip_indirection:

View File

@ -63,7 +63,8 @@ config RISCV
select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
select ARCH_STACKWALK select ARCH_STACKWALK
select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_CFI_CLANG # clang >= 17: https://github.com/llvm/llvm-project/commit/62fa708ceb027713b386c7e0efda994f8bdc27e2
select ARCH_SUPPORTS_CFI_CLANG if CLANG_VERSION >= 170000
select ARCH_SUPPORTS_DEBUG_PAGEALLOC if MMU select ARCH_SUPPORTS_DEBUG_PAGEALLOC if MMU
select ARCH_SUPPORTS_HUGE_PFNMAP if TRANSPARENT_HUGEPAGE select ARCH_SUPPORTS_HUGE_PFNMAP if TRANSPARENT_HUGEPAGE
select ARCH_SUPPORTS_HUGETLBFS if MMU select ARCH_SUPPORTS_HUGETLBFS if MMU
@ -97,6 +98,7 @@ config RISCV
select CLONE_BACKWARDS select CLONE_BACKWARDS
select COMMON_CLK select COMMON_CLK
select CPU_PM if CPU_IDLE || HIBERNATION || SUSPEND select CPU_PM if CPU_IDLE || HIBERNATION || SUSPEND
select DYNAMIC_FTRACE if FUNCTION_TRACER
select EDAC_SUPPORT select EDAC_SUPPORT
select FRAME_POINTER if PERF_EVENTS || (FUNCTION_TRACER && !DYNAMIC_FTRACE) select FRAME_POINTER if PERF_EVENTS || (FUNCTION_TRACER && !DYNAMIC_FTRACE)
select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY if DYNAMIC_FTRACE select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY if DYNAMIC_FTRACE
@ -161,7 +163,7 @@ config RISCV
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
select HAVE_FUNCTION_GRAPH_TRACER if HAVE_DYNAMIC_FTRACE_WITH_ARGS select HAVE_FUNCTION_GRAPH_TRACER if HAVE_DYNAMIC_FTRACE_WITH_ARGS
select HAVE_FUNCTION_GRAPH_FREGS select HAVE_FUNCTION_GRAPH_FREGS
select HAVE_FUNCTION_TRACER if !XIP_KERNEL select HAVE_FUNCTION_TRACER if !XIP_KERNEL && HAVE_DYNAMIC_FTRACE
select HAVE_EBPF_JIT if MMU select HAVE_EBPF_JIT if MMU
select HAVE_GUP_FAST if MMU select HAVE_GUP_FAST if MMU
select HAVE_FUNCTION_ARG_ACCESS_API select HAVE_FUNCTION_ARG_ACCESS_API

View File

@ -87,6 +87,9 @@ DECLARE_STATIC_KEY_FALSE(kvm_riscv_aia_available);
extern struct kvm_device_ops kvm_riscv_aia_device_ops; extern struct kvm_device_ops kvm_riscv_aia_device_ops;
bool kvm_riscv_vcpu_aia_imsic_has_interrupt(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_aia_imsic_load(struct kvm_vcpu *vcpu, int cpu);
void kvm_riscv_vcpu_aia_imsic_put(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_aia_imsic_release(struct kvm_vcpu *vcpu); void kvm_riscv_vcpu_aia_imsic_release(struct kvm_vcpu *vcpu);
int kvm_riscv_vcpu_aia_imsic_update(struct kvm_vcpu *vcpu); int kvm_riscv_vcpu_aia_imsic_update(struct kvm_vcpu *vcpu);
@ -161,7 +164,6 @@ void kvm_riscv_aia_destroy_vm(struct kvm *kvm);
int kvm_riscv_aia_alloc_hgei(int cpu, struct kvm_vcpu *owner, int kvm_riscv_aia_alloc_hgei(int cpu, struct kvm_vcpu *owner,
void __iomem **hgei_va, phys_addr_t *hgei_pa); void __iomem **hgei_va, phys_addr_t *hgei_pa);
void kvm_riscv_aia_free_hgei(int cpu, int hgei); void kvm_riscv_aia_free_hgei(int cpu, int hgei);
void kvm_riscv_aia_wakeon_hgei(struct kvm_vcpu *owner, bool enable);
void kvm_riscv_aia_enable(void); void kvm_riscv_aia_enable(void);
void kvm_riscv_aia_disable(void); void kvm_riscv_aia_disable(void);

View File

@ -306,6 +306,9 @@ static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu; return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu;
} }
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
#define KVM_RISCV_GSTAGE_TLB_MIN_ORDER 12 #define KVM_RISCV_GSTAGE_TLB_MIN_ORDER 12
void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid, void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,

View File

@ -311,8 +311,8 @@ do { \
do { \ do { \
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && \ if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && \
!IS_ALIGNED((uintptr_t)__gu_ptr, sizeof(*__gu_ptr))) { \ !IS_ALIGNED((uintptr_t)__gu_ptr, sizeof(*__gu_ptr))) { \
__inttype(x) val = (__inttype(x))x; \ __inttype(x) ___val = (__inttype(x))x; \
if (__asm_copy_to_user_sum_enabled(__gu_ptr, &(val), sizeof(*__gu_ptr))) \ if (__asm_copy_to_user_sum_enabled(__gu_ptr, &(___val), sizeof(*__gu_ptr))) \
goto label; \ goto label; \
break; \ break; \
} \ } \

View File

@ -18,10 +18,10 @@ const struct cpu_operations cpu_ops_sbi;
/* /*
* Ordered booting via HSM brings one cpu at a time. However, cpu hotplug can * Ordered booting via HSM brings one cpu at a time. However, cpu hotplug can
* be invoked from multiple threads in parallel. Define a per cpu data * be invoked from multiple threads in parallel. Define an array of boot data
* to handle that. * to handle that.
*/ */
static DEFINE_PER_CPU(struct sbi_hart_boot_data, boot_data); static struct sbi_hart_boot_data boot_data[NR_CPUS];
static int sbi_hsm_hart_start(unsigned long hartid, unsigned long saddr, static int sbi_hsm_hart_start(unsigned long hartid, unsigned long saddr,
unsigned long priv) unsigned long priv)
@ -67,7 +67,7 @@ static int sbi_cpu_start(unsigned int cpuid, struct task_struct *tidle)
unsigned long boot_addr = __pa_symbol(secondary_start_sbi); unsigned long boot_addr = __pa_symbol(secondary_start_sbi);
unsigned long hartid = cpuid_to_hartid_map(cpuid); unsigned long hartid = cpuid_to_hartid_map(cpuid);
unsigned long hsm_data; unsigned long hsm_data;
struct sbi_hart_boot_data *bdata = &per_cpu(boot_data, cpuid); struct sbi_hart_boot_data *bdata = &boot_data[cpuid];
/* Make sure tidle is updated */ /* Make sure tidle is updated */
smp_mb(); smp_mb();

View File

@ -14,6 +14,18 @@
#include <asm/text-patching.h> #include <asm/text-patching.h>
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE
void ftrace_arch_code_modify_prepare(void)
__acquires(&text_mutex)
{
mutex_lock(&text_mutex);
}
void ftrace_arch_code_modify_post_process(void)
__releases(&text_mutex)
{
mutex_unlock(&text_mutex);
}
unsigned long ftrace_call_adjust(unsigned long addr) unsigned long ftrace_call_adjust(unsigned long addr)
{ {
if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS)) if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS))
@ -29,10 +41,8 @@ unsigned long arch_ftrace_get_symaddr(unsigned long fentry_ip)
void arch_ftrace_update_code(int command) void arch_ftrace_update_code(int command)
{ {
mutex_lock(&text_mutex);
command |= FTRACE_MAY_SLEEP; command |= FTRACE_MAY_SLEEP;
ftrace_modify_all_code(command); ftrace_modify_all_code(command);
mutex_unlock(&text_mutex);
flush_icache_all(); flush_icache_all();
} }
@ -149,6 +159,8 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
unsigned int nops[2], offset; unsigned int nops[2], offset;
int ret; int ret;
guard(mutex)(&text_mutex);
ret = ftrace_rec_set_nop_ops(rec); ret = ftrace_rec_set_nop_ops(rec);
if (ret) if (ret)
return ret; return ret;
@ -157,9 +169,7 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
nops[0] = to_auipc_t0(offset); nops[0] = to_auipc_t0(offset);
nops[1] = RISCV_INSN_NOP4; nops[1] = RISCV_INSN_NOP4;
mutex_lock(&text_mutex);
ret = patch_insn_write((void *)pc, nops, 2 * MCOUNT_INSN_SIZE); ret = patch_insn_write((void *)pc, nops, 2 * MCOUNT_INSN_SIZE);
mutex_unlock(&text_mutex);
return ret; return ret;
} }

View File

@ -6,6 +6,7 @@
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/irqflags.h>
#include <linux/randomize_kstack.h> #include <linux/randomize_kstack.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/sched/debug.h> #include <linux/sched/debug.h>
@ -151,7 +152,9 @@ asmlinkage __visible __trap_section void name(struct pt_regs *regs) \
{ \ { \
if (user_mode(regs)) { \ if (user_mode(regs)) { \
irqentry_enter_from_user_mode(regs); \ irqentry_enter_from_user_mode(regs); \
local_irq_enable(); \
do_trap_error(regs, signo, code, regs->epc, "Oops - " str); \ do_trap_error(regs, signo, code, regs->epc, "Oops - " str); \
local_irq_disable(); \
irqentry_exit_to_user_mode(regs); \ irqentry_exit_to_user_mode(regs); \
} else { \ } else { \
irqentry_state_t state = irqentry_nmi_enter(regs); \ irqentry_state_t state = irqentry_nmi_enter(regs); \
@ -173,17 +176,14 @@ asmlinkage __visible __trap_section void do_trap_insn_illegal(struct pt_regs *re
if (user_mode(regs)) { if (user_mode(regs)) {
irqentry_enter_from_user_mode(regs); irqentry_enter_from_user_mode(regs);
local_irq_enable(); local_irq_enable();
handled = riscv_v_first_use_handler(regs); handled = riscv_v_first_use_handler(regs);
local_irq_disable();
if (!handled) if (!handled)
do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->epc, do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->epc,
"Oops - illegal instruction"); "Oops - illegal instruction");
local_irq_disable();
irqentry_exit_to_user_mode(regs); irqentry_exit_to_user_mode(regs);
} else { } else {
irqentry_state_t state = irqentry_nmi_enter(regs); irqentry_state_t state = irqentry_nmi_enter(regs);
@ -308,9 +308,11 @@ asmlinkage __visible __trap_section void do_trap_break(struct pt_regs *regs)
{ {
if (user_mode(regs)) { if (user_mode(regs)) {
irqentry_enter_from_user_mode(regs); irqentry_enter_from_user_mode(regs);
local_irq_enable();
handle_break(regs); handle_break(regs);
local_irq_disable();
irqentry_exit_to_user_mode(regs); irqentry_exit_to_user_mode(regs);
} else { } else {
irqentry_state_t state = irqentry_nmi_enter(regs); irqentry_state_t state = irqentry_nmi_enter(regs);

View File

@ -461,7 +461,7 @@ static int handle_scalar_misaligned_load(struct pt_regs *regs)
} }
if (!fp) if (!fp)
SET_RD(insn, regs, val.data_ulong << shift >> shift); SET_RD(insn, regs, (long)(val.data_ulong << shift) >> shift);
else if (len == 8) else if (len == 8)
set_f64_rd(insn, regs, val.data_u64); set_f64_rd(insn, regs, val.data_u64);
else else

View File

@ -30,28 +30,6 @@ unsigned int kvm_riscv_aia_nr_hgei;
unsigned int kvm_riscv_aia_max_ids; unsigned int kvm_riscv_aia_max_ids;
DEFINE_STATIC_KEY_FALSE(kvm_riscv_aia_available); DEFINE_STATIC_KEY_FALSE(kvm_riscv_aia_available);
static int aia_find_hgei(struct kvm_vcpu *owner)
{
int i, hgei;
unsigned long flags;
struct aia_hgei_control *hgctrl = get_cpu_ptr(&aia_hgei);
raw_spin_lock_irqsave(&hgctrl->lock, flags);
hgei = -1;
for (i = 1; i <= kvm_riscv_aia_nr_hgei; i++) {
if (hgctrl->owners[i] == owner) {
hgei = i;
break;
}
}
raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
put_cpu_ptr(&aia_hgei);
return hgei;
}
static inline unsigned long aia_hvictl_value(bool ext_irq_pending) static inline unsigned long aia_hvictl_value(bool ext_irq_pending)
{ {
unsigned long hvictl; unsigned long hvictl;
@ -95,7 +73,6 @@ void kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu *vcpu)
bool kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu *vcpu, u64 mask) bool kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu *vcpu, u64 mask)
{ {
int hgei;
unsigned long seip; unsigned long seip;
if (!kvm_riscv_aia_available()) if (!kvm_riscv_aia_available())
@ -114,11 +91,7 @@ bool kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu *vcpu, u64 mask)
if (!kvm_riscv_aia_initialized(vcpu->kvm) || !seip) if (!kvm_riscv_aia_initialized(vcpu->kvm) || !seip)
return false; return false;
hgei = aia_find_hgei(vcpu); return kvm_riscv_vcpu_aia_imsic_has_interrupt(vcpu);
if (hgei > 0)
return !!(ncsr_read(CSR_HGEIP) & BIT(hgei));
return false;
} }
void kvm_riscv_vcpu_aia_update_hvip(struct kvm_vcpu *vcpu) void kvm_riscv_vcpu_aia_update_hvip(struct kvm_vcpu *vcpu)
@ -164,6 +137,9 @@ void kvm_riscv_vcpu_aia_load(struct kvm_vcpu *vcpu, int cpu)
csr_write(CSR_HVIPRIO2H, csr->hviprio2h); csr_write(CSR_HVIPRIO2H, csr->hviprio2h);
#endif #endif
} }
if (kvm_riscv_aia_initialized(vcpu->kvm))
kvm_riscv_vcpu_aia_imsic_load(vcpu, cpu);
} }
void kvm_riscv_vcpu_aia_put(struct kvm_vcpu *vcpu) void kvm_riscv_vcpu_aia_put(struct kvm_vcpu *vcpu)
@ -174,6 +150,9 @@ void kvm_riscv_vcpu_aia_put(struct kvm_vcpu *vcpu)
if (!kvm_riscv_aia_available()) if (!kvm_riscv_aia_available())
return; return;
if (kvm_riscv_aia_initialized(vcpu->kvm))
kvm_riscv_vcpu_aia_imsic_put(vcpu);
if (kvm_riscv_nacl_available()) { if (kvm_riscv_nacl_available()) {
nsh = nacl_shmem(); nsh = nacl_shmem();
csr->vsiselect = nacl_csr_read(nsh, CSR_VSISELECT); csr->vsiselect = nacl_csr_read(nsh, CSR_VSISELECT);
@ -472,22 +451,6 @@ void kvm_riscv_aia_free_hgei(int cpu, int hgei)
raw_spin_unlock_irqrestore(&hgctrl->lock, flags); raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
} }
void kvm_riscv_aia_wakeon_hgei(struct kvm_vcpu *owner, bool enable)
{
int hgei;
if (!kvm_riscv_aia_available())
return;
hgei = aia_find_hgei(owner);
if (hgei > 0) {
if (enable)
csr_set(CSR_HGEIE, BIT(hgei));
else
csr_clear(CSR_HGEIE, BIT(hgei));
}
}
static irqreturn_t hgei_interrupt(int irq, void *dev_id) static irqreturn_t hgei_interrupt(int irq, void *dev_id)
{ {
int i; int i;

View File

@ -676,6 +676,48 @@ static void imsic_swfile_update(struct kvm_vcpu *vcpu,
imsic_swfile_extirq_update(vcpu); imsic_swfile_extirq_update(vcpu);
} }
bool kvm_riscv_vcpu_aia_imsic_has_interrupt(struct kvm_vcpu *vcpu)
{
struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
unsigned long flags;
bool ret = false;
/*
* The IMSIC SW-file directly injects interrupt via hvip so
* only check for interrupt when IMSIC VS-file is being used.
*/
read_lock_irqsave(&imsic->vsfile_lock, flags);
if (imsic->vsfile_cpu > -1)
ret = !!(csr_read(CSR_HGEIP) & BIT(imsic->vsfile_hgei));
read_unlock_irqrestore(&imsic->vsfile_lock, flags);
return ret;
}
void kvm_riscv_vcpu_aia_imsic_load(struct kvm_vcpu *vcpu, int cpu)
{
/*
* No need to explicitly clear HGEIE CSR bits because the
* hgei interrupt handler (aka hgei_interrupt()) will always
* clear it for us.
*/
}
void kvm_riscv_vcpu_aia_imsic_put(struct kvm_vcpu *vcpu)
{
struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
unsigned long flags;
if (!kvm_vcpu_is_blocking(vcpu))
return;
read_lock_irqsave(&imsic->vsfile_lock, flags);
if (imsic->vsfile_cpu > -1)
csr_set(CSR_HGEIE, BIT(imsic->vsfile_hgei));
read_unlock_irqrestore(&imsic->vsfile_lock, flags);
}
void kvm_riscv_vcpu_aia_imsic_release(struct kvm_vcpu *vcpu) void kvm_riscv_vcpu_aia_imsic_release(struct kvm_vcpu *vcpu)
{ {
unsigned long flags; unsigned long flags;
@ -781,6 +823,9 @@ int kvm_riscv_vcpu_aia_imsic_update(struct kvm_vcpu *vcpu)
* producers to the new IMSIC VS-file. * producers to the new IMSIC VS-file.
*/ */
/* Ensure HGEIE CSR bit is zero before using the new IMSIC VS-file */
csr_clear(CSR_HGEIE, BIT(new_vsfile_hgei));
/* Zero-out new IMSIC VS-file */ /* Zero-out new IMSIC VS-file */
imsic_vsfile_local_clear(new_vsfile_hgei, imsic->nr_hw_eix); imsic_vsfile_local_clear(new_vsfile_hgei, imsic->nr_hw_eix);

View File

@ -207,16 +207,6 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
return kvm_riscv_vcpu_timer_pending(vcpu); return kvm_riscv_vcpu_timer_pending(vcpu);
} }
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
{
kvm_riscv_aia_wakeon_hgei(vcpu, true);
}
void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
{
kvm_riscv_aia_wakeon_hgei(vcpu, false);
}
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{ {
return (kvm_riscv_vcpu_has_interrupts(vcpu, -1UL) && return (kvm_riscv_vcpu_has_interrupts(vcpu, -1UL) &&

View File

@ -345,8 +345,24 @@ void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu)
/* /*
* The vstimecmp CSRs are saved by kvm_riscv_vcpu_timer_sync() * The vstimecmp CSRs are saved by kvm_riscv_vcpu_timer_sync()
* upon every VM exit so no need to save here. * upon every VM exit so no need to save here.
*
* If VS-timer expires when no VCPU running on a host CPU then
* WFI executed by such host CPU will be effective NOP resulting
* in no power savings. This is because as-per RISC-V Privileged
* specificaiton: "WFI is also required to resume execution for
* locally enabled interrupts pending at any privilege level,
* regardless of the global interrupt enable at each privilege
* level."
*
* To address the above issue, vstimecmp CSR must be set to -1UL
* over here when VCPU is scheduled-out or exits to user space.
*/ */
csr_write(CSR_VSTIMECMP, -1UL);
#if defined(CONFIG_32BIT)
csr_write(CSR_VSTIMECMPH, -1UL);
#endif
/* timer should be enabled for the remaining operations */ /* timer should be enabled for the remaining operations */
if (unlikely(!t->init_done)) if (unlikely(!t->init_done))
return; return;

View File

@ -14,7 +14,9 @@ bad_relocs=$(
${srctree}/scripts/relocs_check.sh "$@" | ${srctree}/scripts/relocs_check.sh "$@" |
# These relocations are okay # These relocations are okay
# R_RISCV_RELATIVE # R_RISCV_RELATIVE
grep -F -w -v 'R_RISCV_RELATIVE' # R_RISCV_NONE
grep -F -w -v 'R_RISCV_RELATIVE
R_RISCV_NONE'
) )
if [ -z "$bad_relocs" ]; then if [ -z "$bad_relocs" ]; then

View File

@ -38,6 +38,7 @@ static int s390_sha1_init(struct shash_desc *desc)
sctx->state[4] = SHA1_H4; sctx->state[4] = SHA1_H4;
sctx->count = 0; sctx->count = 0;
sctx->func = CPACF_KIMD_SHA_1; sctx->func = CPACF_KIMD_SHA_1;
sctx->first_message_part = 0;
return 0; return 0;
} }
@ -60,6 +61,7 @@ static int s390_sha1_import(struct shash_desc *desc, const void *in)
sctx->count = ictx->count; sctx->count = ictx->count;
memcpy(sctx->state, ictx->state, sizeof(ictx->state)); memcpy(sctx->state, ictx->state, sizeof(ictx->state));
sctx->func = CPACF_KIMD_SHA_1; sctx->func = CPACF_KIMD_SHA_1;
sctx->first_message_part = 0;
return 0; return 0;
} }

View File

@ -32,6 +32,7 @@ static int sha512_init(struct shash_desc *desc)
ctx->count = 0; ctx->count = 0;
ctx->sha512.count_hi = 0; ctx->sha512.count_hi = 0;
ctx->func = CPACF_KIMD_SHA_512; ctx->func = CPACF_KIMD_SHA_512;
ctx->first_message_part = 0;
return 0; return 0;
} }
@ -57,6 +58,7 @@ static int sha512_import(struct shash_desc *desc, const void *in)
memcpy(sctx->state, ictx->state, sizeof(ictx->state)); memcpy(sctx->state, ictx->state, sizeof(ictx->state));
sctx->func = CPACF_KIMD_SHA_512; sctx->func = CPACF_KIMD_SHA_512;
sctx->first_message_part = 0;
return 0; return 0;
} }
@ -97,6 +99,7 @@ static int sha384_init(struct shash_desc *desc)
ctx->count = 0; ctx->count = 0;
ctx->sha512.count_hi = 0; ctx->sha512.count_hi = 0;
ctx->func = CPACF_KIMD_SHA_512; ctx->func = CPACF_KIMD_SHA_512;
ctx->first_message_part = 0;
return 0; return 0;
} }

View File

@ -566,7 +566,15 @@ static void bpf_jit_plt(struct bpf_plt *plt, void *ret, void *target)
{ {
memcpy(plt, &bpf_plt, sizeof(*plt)); memcpy(plt, &bpf_plt, sizeof(*plt));
plt->ret = ret; plt->ret = ret;
plt->target = target; /*
* (target == NULL) implies that the branch to this PLT entry was
* patched and became a no-op. However, some CPU could have jumped
* to this PLT entry before patching and may be still executing it.
*
* Since the intention in this case is to make the PLT entry a no-op,
* make the target point to the return label instead of NULL.
*/
plt->target = target ?: ret;
} }
/* /*

View File

@ -147,7 +147,7 @@ config X86
select ARCH_WANTS_DYNAMIC_TASK_STRUCT select ARCH_WANTS_DYNAMIC_TASK_STRUCT
select ARCH_WANTS_NO_INSTR select ARCH_WANTS_NO_INSTR
select ARCH_WANT_GENERAL_HUGETLB select ARCH_WANT_GENERAL_HUGETLB
select ARCH_WANT_HUGE_PMD_SHARE select ARCH_WANT_HUGE_PMD_SHARE if X86_64
select ARCH_WANT_LD_ORPHAN_WARN select ARCH_WANT_LD_ORPHAN_WARN
select ARCH_WANT_OPTIMIZE_DAX_VMEMMAP if X86_64 select ARCH_WANT_OPTIMIZE_DAX_VMEMMAP if X86_64
select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP if X86_64 select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP if X86_64
@ -2695,6 +2695,15 @@ config MITIGATION_ITS
disabled, mitigation cannot be enabled via cmdline. disabled, mitigation cannot be enabled via cmdline.
See <file:Documentation/admin-guide/hw-vuln/indirect-target-selection.rst> See <file:Documentation/admin-guide/hw-vuln/indirect-target-selection.rst>
config MITIGATION_TSA
bool "Mitigate Transient Scheduler Attacks"
depends on CPU_SUP_AMD
default y
help
Enable mitigation for Transient Scheduler Attacks. TSA is a hardware
security vulnerability on AMD CPUs which can lead to forwarding of
invalid info to subsequent instructions and thus can affect their
timing and thereby cause a leakage.
endif endif
config ARCH_HAS_ADD_PAGES config ARCH_HAS_ADD_PAGES

View File

@ -5,5 +5,6 @@ obj-y += core.o sev-nmi.o vc-handle.o
# Clang 14 and older may fail to respect __no_sanitize_undefined when inlining # Clang 14 and older may fail to respect __no_sanitize_undefined when inlining
UBSAN_SANITIZE_sev-nmi.o := n UBSAN_SANITIZE_sev-nmi.o := n
# GCC may fail to respect __no_sanitize_address when inlining # GCC may fail to respect __no_sanitize_address or __no_kcsan when inlining
KASAN_SANITIZE_sev-nmi.o := n KASAN_SANITIZE_sev-nmi.o := n
KCSAN_SANITIZE_sev-nmi.o := n

View File

@ -88,7 +88,7 @@ static const char * const sev_status_feat_names[] = {
*/ */
static u64 snp_tsc_scale __ro_after_init; static u64 snp_tsc_scale __ro_after_init;
static u64 snp_tsc_offset __ro_after_init; static u64 snp_tsc_offset __ro_after_init;
static u64 snp_tsc_freq_khz __ro_after_init; static unsigned long snp_tsc_freq_khz __ro_after_init;
DEFINE_PER_CPU(struct sev_es_runtime_data*, runtime_data); DEFINE_PER_CPU(struct sev_es_runtime_data*, runtime_data);
DEFINE_PER_CPU(struct sev_es_save_area *, sev_vmsa); DEFINE_PER_CPU(struct sev_es_save_area *, sev_vmsa);
@ -2167,15 +2167,31 @@ static unsigned long securetsc_get_tsc_khz(void)
void __init snp_secure_tsc_init(void) void __init snp_secure_tsc_init(void)
{ {
unsigned long long tsc_freq_mhz; struct snp_secrets_page *secrets;
unsigned long tsc_freq_mhz;
void *mem;
if (!cc_platform_has(CC_ATTR_GUEST_SNP_SECURE_TSC)) if (!cc_platform_has(CC_ATTR_GUEST_SNP_SECURE_TSC))
return; return;
mem = early_memremap_encrypted(sev_secrets_pa, PAGE_SIZE);
if (!mem) {
pr_err("Unable to get TSC_FACTOR: failed to map the SNP secrets page.\n");
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_SECURE_TSC);
}
secrets = (__force struct snp_secrets_page *)mem;
setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ); setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
rdmsrq(MSR_AMD64_GUEST_TSC_FREQ, tsc_freq_mhz); rdmsrq(MSR_AMD64_GUEST_TSC_FREQ, tsc_freq_mhz);
snp_tsc_freq_khz = (unsigned long)(tsc_freq_mhz * 1000);
/* Extract the GUEST TSC MHZ from BIT[17:0], rest is reserved space */
tsc_freq_mhz &= GENMASK_ULL(17, 0);
snp_tsc_freq_khz = SNP_SCALE_TSC_FREQ(tsc_freq_mhz * 1000, secrets->tsc_factor);
x86_platform.calibrate_cpu = securetsc_get_tsc_khz; x86_platform.calibrate_cpu = securetsc_get_tsc_khz;
x86_platform.calibrate_tsc = securetsc_get_tsc_khz; x86_platform.calibrate_tsc = securetsc_get_tsc_khz;
early_memunmap(mem, PAGE_SIZE);
} }

View File

@ -36,20 +36,20 @@ EXPORT_SYMBOL_GPL(write_ibpb);
/* /*
* Define the VERW operand that is disguised as entry code so that * Define the VERW operand that is disguised as entry code so that
* it can be referenced with KPTI enabled. This ensure VERW can be * it can be referenced with KPTI enabled. This ensures VERW can be
* used late in exit-to-user path after page tables are switched. * used late in exit-to-user path after page tables are switched.
*/ */
.pushsection .entry.text, "ax" .pushsection .entry.text, "ax"
.align L1_CACHE_BYTES, 0xcc .align L1_CACHE_BYTES, 0xcc
SYM_CODE_START_NOALIGN(mds_verw_sel) SYM_CODE_START_NOALIGN(x86_verw_sel)
UNWIND_HINT_UNDEFINED UNWIND_HINT_UNDEFINED
ANNOTATE_NOENDBR ANNOTATE_NOENDBR
.word __KERNEL_DS .word __KERNEL_DS
.align L1_CACHE_BYTES, 0xcc .align L1_CACHE_BYTES, 0xcc
SYM_CODE_END(mds_verw_sel); SYM_CODE_END(x86_verw_sel);
/* For KVM */ /* For KVM */
EXPORT_SYMBOL_GPL(mds_verw_sel); EXPORT_SYMBOL_GPL(x86_verw_sel);
.popsection .popsection

View File

@ -34,6 +34,7 @@
#include <linux/syscore_ops.h> #include <linux/syscore_ops.h>
#include <clocksource/hyperv_timer.h> #include <clocksource/hyperv_timer.h>
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/export.h>
void *hv_hypercall_pg; void *hv_hypercall_pg;
EXPORT_SYMBOL_GPL(hv_hypercall_pg); EXPORT_SYMBOL_GPL(hv_hypercall_pg);

View File

@ -10,6 +10,7 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/export.h>
#include <asm/mshyperv.h> #include <asm/mshyperv.h>
static int hv_map_interrupt(union hv_device_id device_id, bool level, static int hv_map_interrupt(union hv_device_id device_id, bool level,
@ -46,7 +47,7 @@ static int hv_map_interrupt(union hv_device_id device_id, bool level,
if (nr_bank < 0) { if (nr_bank < 0) {
local_irq_restore(flags); local_irq_restore(flags);
pr_err("%s: unable to generate VP set\n", __func__); pr_err("%s: unable to generate VP set\n", __func__);
return EINVAL; return -EINVAL;
} }
intr_desc->target.flags = HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET; intr_desc->target.flags = HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET;
@ -66,7 +67,7 @@ static int hv_map_interrupt(union hv_device_id device_id, bool level,
if (!hv_result_success(status)) if (!hv_result_success(status))
hv_status_err(status, "\n"); hv_status_err(status, "\n");
return hv_result(status); return hv_result_to_errno(status);
} }
static int hv_unmap_interrupt(u64 id, struct hv_interrupt_entry *old_entry) static int hv_unmap_interrupt(u64 id, struct hv_interrupt_entry *old_entry)
@ -88,7 +89,10 @@ static int hv_unmap_interrupt(u64 id, struct hv_interrupt_entry *old_entry)
status = hv_do_hypercall(HVCALL_UNMAP_DEVICE_INTERRUPT, input, NULL); status = hv_do_hypercall(HVCALL_UNMAP_DEVICE_INTERRUPT, input, NULL);
local_irq_restore(flags); local_irq_restore(flags);
return hv_result(status); if (!hv_result_success(status))
hv_status_err(status, "\n");
return hv_result_to_errno(status);
} }
#ifdef CONFIG_PCI_MSI #ifdef CONFIG_PCI_MSI
@ -169,13 +173,34 @@ static union hv_device_id hv_build_pci_dev_id(struct pci_dev *dev)
return dev_id; return dev_id;
} }
static int hv_map_msi_interrupt(struct pci_dev *dev, int cpu, int vector, /**
struct hv_interrupt_entry *entry) * hv_map_msi_interrupt() - "Map" the MSI IRQ in the hypervisor.
* @data: Describes the IRQ
* @out_entry: Hypervisor (MSI) interrupt entry (can be NULL)
*
* Map the IRQ in the hypervisor by issuing a MAP_DEVICE_INTERRUPT hypercall.
*
* Return: 0 on success, -errno on failure
*/
int hv_map_msi_interrupt(struct irq_data *data,
struct hv_interrupt_entry *out_entry)
{ {
union hv_device_id device_id = hv_build_pci_dev_id(dev); struct irq_cfg *cfg = irqd_cfg(data);
struct hv_interrupt_entry dummy;
union hv_device_id device_id;
struct msi_desc *msidesc;
struct pci_dev *dev;
int cpu;
return hv_map_interrupt(device_id, false, cpu, vector, entry); msidesc = irq_data_get_msi_desc(data);
dev = msi_desc_to_pci_dev(msidesc);
device_id = hv_build_pci_dev_id(dev);
cpu = cpumask_first(irq_data_get_effective_affinity_mask(data));
return hv_map_interrupt(device_id, false, cpu, cfg->vector,
out_entry ? out_entry : &dummy);
} }
EXPORT_SYMBOL_GPL(hv_map_msi_interrupt);
static inline void entry_to_msi_msg(struct hv_interrupt_entry *entry, struct msi_msg *msg) static inline void entry_to_msi_msg(struct hv_interrupt_entry *entry, struct msi_msg *msg)
{ {
@ -188,13 +213,11 @@ static inline void entry_to_msi_msg(struct hv_interrupt_entry *entry, struct msi
static int hv_unmap_msi_interrupt(struct pci_dev *dev, struct hv_interrupt_entry *old_entry); static int hv_unmap_msi_interrupt(struct pci_dev *dev, struct hv_interrupt_entry *old_entry);
static void hv_irq_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) static void hv_irq_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{ {
struct hv_interrupt_entry *stored_entry;
struct irq_cfg *cfg = irqd_cfg(data);
struct msi_desc *msidesc; struct msi_desc *msidesc;
struct pci_dev *dev; struct pci_dev *dev;
struct hv_interrupt_entry out_entry, *stored_entry; int ret;
struct irq_cfg *cfg = irqd_cfg(data);
const cpumask_t *affinity;
int cpu;
u64 status;
msidesc = irq_data_get_msi_desc(data); msidesc = irq_data_get_msi_desc(data);
dev = msi_desc_to_pci_dev(msidesc); dev = msi_desc_to_pci_dev(msidesc);
@ -204,9 +227,6 @@ static void hv_irq_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
return; return;
} }
affinity = irq_data_get_effective_affinity_mask(data);
cpu = cpumask_first_and(affinity, cpu_online_mask);
if (data->chip_data) { if (data->chip_data) {
/* /*
* This interrupt is already mapped. Let's unmap first. * This interrupt is already mapped. Let's unmap first.
@ -219,15 +239,13 @@ static void hv_irq_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
stored_entry = data->chip_data; stored_entry = data->chip_data;
data->chip_data = NULL; data->chip_data = NULL;
status = hv_unmap_msi_interrupt(dev, stored_entry); ret = hv_unmap_msi_interrupt(dev, stored_entry);
kfree(stored_entry); kfree(stored_entry);
if (status != HV_STATUS_SUCCESS) { if (ret)
hv_status_debug(status, "failed to unmap\n");
return; return;
} }
}
stored_entry = kzalloc(sizeof(*stored_entry), GFP_ATOMIC); stored_entry = kzalloc(sizeof(*stored_entry), GFP_ATOMIC);
if (!stored_entry) { if (!stored_entry) {
@ -235,15 +253,14 @@ static void hv_irq_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
return; return;
} }
status = hv_map_msi_interrupt(dev, cpu, cfg->vector, &out_entry); ret = hv_map_msi_interrupt(data, stored_entry);
if (status != HV_STATUS_SUCCESS) { if (ret) {
kfree(stored_entry); kfree(stored_entry);
return; return;
} }
*stored_entry = out_entry;
data->chip_data = stored_entry; data->chip_data = stored_entry;
entry_to_msi_msg(&out_entry, msg); entry_to_msi_msg(data->chip_data, msg);
return; return;
} }
@ -257,7 +274,6 @@ static void hv_teardown_msi_irq(struct pci_dev *dev, struct irq_data *irqd)
{ {
struct hv_interrupt_entry old_entry; struct hv_interrupt_entry old_entry;
struct msi_msg msg; struct msi_msg msg;
u64 status;
if (!irqd->chip_data) { if (!irqd->chip_data) {
pr_debug("%s: no chip data\n!", __func__); pr_debug("%s: no chip data\n!", __func__);
@ -270,10 +286,7 @@ static void hv_teardown_msi_irq(struct pci_dev *dev, struct irq_data *irqd)
kfree(irqd->chip_data); kfree(irqd->chip_data);
irqd->chip_data = NULL; irqd->chip_data = NULL;
status = hv_unmap_msi_interrupt(dev, &old_entry); (void)hv_unmap_msi_interrupt(dev, &old_entry);
if (status != HV_STATUS_SUCCESS)
hv_status_err(status, "\n");
} }
static void hv_msi_free_irq(struct irq_domain *domain, static void hv_msi_free_irq(struct irq_domain *domain,

View File

@ -10,6 +10,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/export.h>
#include <asm/svm.h> #include <asm/svm.h>
#include <asm/sev.h> #include <asm/sev.h>
#include <asm/io.h> #include <asm/io.h>

View File

@ -11,6 +11,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/export.h>
#include <hyperv/hvhdk.h> #include <hyperv/hvhdk.h>
#include <asm/mshyperv.h> #include <asm/mshyperv.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>

View File

@ -456,6 +456,7 @@
#define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* No Nested Data Breakpoints */ #define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* No Nested Data Breakpoints */
#define X86_FEATURE_WRMSR_XX_BASE_NS (20*32+ 1) /* WRMSR to {FS,GS,KERNEL_GS}_BASE is non-serializing */ #define X86_FEATURE_WRMSR_XX_BASE_NS (20*32+ 1) /* WRMSR to {FS,GS,KERNEL_GS}_BASE is non-serializing */
#define X86_FEATURE_LFENCE_RDTSC (20*32+ 2) /* LFENCE always serializing / synchronizes RDTSC */ #define X86_FEATURE_LFENCE_RDTSC (20*32+ 2) /* LFENCE always serializing / synchronizes RDTSC */
#define X86_FEATURE_VERW_CLEAR (20*32+ 5) /* The memory form of VERW mitigates TSA */
#define X86_FEATURE_NULL_SEL_CLR_BASE (20*32+ 6) /* Null Selector Clears Base */ #define X86_FEATURE_NULL_SEL_CLR_BASE (20*32+ 6) /* Null Selector Clears Base */
#define X86_FEATURE_AUTOIBRS (20*32+ 8) /* Automatic IBRS */ #define X86_FEATURE_AUTOIBRS (20*32+ 8) /* Automatic IBRS */
#define X86_FEATURE_NO_SMM_CTL_MSR (20*32+ 9) /* SMM_CTL MSR is not present */ #define X86_FEATURE_NO_SMM_CTL_MSR (20*32+ 9) /* SMM_CTL MSR is not present */
@ -487,6 +488,9 @@
#define X86_FEATURE_PREFER_YMM (21*32+ 8) /* Avoid ZMM registers due to downclocking */ #define X86_FEATURE_PREFER_YMM (21*32+ 8) /* Avoid ZMM registers due to downclocking */
#define X86_FEATURE_APX (21*32+ 9) /* Advanced Performance Extensions */ #define X86_FEATURE_APX (21*32+ 9) /* Advanced Performance Extensions */
#define X86_FEATURE_INDIRECT_THUNK_ITS (21*32+10) /* Use thunk for indirect branches in lower half of cacheline */ #define X86_FEATURE_INDIRECT_THUNK_ITS (21*32+10) /* Use thunk for indirect branches in lower half of cacheline */
#define X86_FEATURE_TSA_SQ_NO (21*32+11) /* AMD CPU not vulnerable to TSA-SQ */
#define X86_FEATURE_TSA_L1_NO (21*32+12) /* AMD CPU not vulnerable to TSA-L1 */
#define X86_FEATURE_CLEAR_CPU_BUF_VM (21*32+13) /* Clear CPU buffers using VERW before VMRUN */
/* /*
* BUG word(s) * BUG word(s)
@ -542,5 +546,5 @@
#define X86_BUG_OLD_MICROCODE X86_BUG( 1*32+ 6) /* "old_microcode" CPU has old microcode, it is surely vulnerable to something */ #define X86_BUG_OLD_MICROCODE X86_BUG( 1*32+ 6) /* "old_microcode" CPU has old microcode, it is surely vulnerable to something */
#define X86_BUG_ITS X86_BUG( 1*32+ 7) /* "its" CPU is affected by Indirect Target Selection */ #define X86_BUG_ITS X86_BUG( 1*32+ 7) /* "its" CPU is affected by Indirect Target Selection */
#define X86_BUG_ITS_NATIVE_ONLY X86_BUG( 1*32+ 8) /* "its_native_only" CPU is affected by ITS, VMX is not affected */ #define X86_BUG_ITS_NATIVE_ONLY X86_BUG( 1*32+ 8) /* "its_native_only" CPU is affected by ITS, VMX is not affected */
#define X86_BUG_TSA X86_BUG( 1*32+ 9) /* "tsa" CPU is affected by Transient Scheduler Attacks */
#endif /* _ASM_X86_CPUFEATURES_H */ #endif /* _ASM_X86_CPUFEATURES_H */

View File

@ -44,13 +44,13 @@ static __always_inline void native_irq_enable(void)
static __always_inline void native_safe_halt(void) static __always_inline void native_safe_halt(void)
{ {
mds_idle_clear_cpu_buffers(); x86_idle_clear_cpu_buffers();
asm volatile("sti; hlt": : :"memory"); asm volatile("sti; hlt": : :"memory");
} }
static __always_inline void native_halt(void) static __always_inline void native_halt(void)
{ {
mds_idle_clear_cpu_buffers(); x86_idle_clear_cpu_buffers();
asm volatile("hlt": : :"memory"); asm volatile("hlt": : :"memory");
} }

View File

@ -700,8 +700,13 @@ struct kvm_vcpu_hv {
struct kvm_vcpu_hv_tlb_flush_fifo tlb_flush_fifo[HV_NR_TLB_FLUSH_FIFOS]; struct kvm_vcpu_hv_tlb_flush_fifo tlb_flush_fifo[HV_NR_TLB_FLUSH_FIFOS];
/* Preallocated buffer for handling hypercalls passing sparse vCPU set */ /*
* Preallocated buffers for handling hypercalls that pass sparse vCPU
* sets (for high vCPU counts, they're too large to comfortably fit on
* the stack).
*/
u64 sparse_banks[HV_MAX_SPARSE_VCPU_BANKS]; u64 sparse_banks[HV_MAX_SPARSE_VCPU_BANKS];
DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS);
struct hv_vp_assist_page vp_assist_page; struct hv_vp_assist_page vp_assist_page;
@ -764,6 +769,7 @@ enum kvm_only_cpuid_leafs {
CPUID_8000_0022_EAX, CPUID_8000_0022_EAX,
CPUID_7_2_EDX, CPUID_7_2_EDX,
CPUID_24_0_EBX, CPUID_24_0_EBX,
CPUID_8000_0021_ECX,
NR_KVM_CPU_CAPS, NR_KVM_CPU_CAPS,
NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS, NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS,

View File

@ -112,12 +112,6 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
return hv_status; return hv_status;
} }
/* Hypercall to the L0 hypervisor */
static inline u64 hv_do_nested_hypercall(u64 control, void *input, void *output)
{
return hv_do_hypercall(control | HV_HYPERCALL_NESTED, input, output);
}
/* Fast hypercall with 8 bytes of input and no output */ /* Fast hypercall with 8 bytes of input and no output */
static inline u64 _hv_do_fast_hypercall8(u64 control, u64 input1) static inline u64 _hv_do_fast_hypercall8(u64 control, u64 input1)
{ {
@ -165,13 +159,6 @@ static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
return _hv_do_fast_hypercall8(control, input1); return _hv_do_fast_hypercall8(control, input1);
} }
static inline u64 hv_do_fast_nested_hypercall8(u16 code, u64 input1)
{
u64 control = (u64)code | HV_HYPERCALL_FAST_BIT | HV_HYPERCALL_NESTED;
return _hv_do_fast_hypercall8(control, input1);
}
/* Fast hypercall with 16 bytes of input */ /* Fast hypercall with 16 bytes of input */
static inline u64 _hv_do_fast_hypercall16(u64 control, u64 input1, u64 input2) static inline u64 _hv_do_fast_hypercall16(u64 control, u64 input1, u64 input2)
{ {
@ -223,13 +210,6 @@ static inline u64 hv_do_fast_hypercall16(u16 code, u64 input1, u64 input2)
return _hv_do_fast_hypercall16(control, input1, input2); return _hv_do_fast_hypercall16(control, input1, input2);
} }
static inline u64 hv_do_fast_nested_hypercall16(u16 code, u64 input1, u64 input2)
{
u64 control = (u64)code | HV_HYPERCALL_FAST_BIT | HV_HYPERCALL_NESTED;
return _hv_do_fast_hypercall16(control, input1, input2);
}
extern struct hv_vp_assist_page **hv_vp_assist_page; extern struct hv_vp_assist_page **hv_vp_assist_page;
static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu) static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu)
@ -262,6 +242,8 @@ static inline void hv_apic_init(void) {}
struct irq_domain *hv_create_pci_msi_domain(void); struct irq_domain *hv_create_pci_msi_domain(void);
int hv_map_msi_interrupt(struct irq_data *data,
struct hv_interrupt_entry *out_entry);
int hv_map_ioapic_interrupt(int ioapic_id, bool level, int vcpu, int vector, int hv_map_ioapic_interrupt(int ioapic_id, bool level, int vcpu, int vector,
struct hv_interrupt_entry *entry); struct hv_interrupt_entry *entry);
int hv_unmap_ioapic_interrupt(int ioapic_id, struct hv_interrupt_entry *entry); int hv_unmap_ioapic_interrupt(int ioapic_id, struct hv_interrupt_entry *entry);

View File

@ -628,6 +628,7 @@
#define MSR_AMD64_OSVW_STATUS 0xc0010141 #define MSR_AMD64_OSVW_STATUS 0xc0010141
#define MSR_AMD_PPIN_CTL 0xc00102f0 #define MSR_AMD_PPIN_CTL 0xc00102f0
#define MSR_AMD_PPIN 0xc00102f1 #define MSR_AMD_PPIN 0xc00102f1
#define MSR_AMD64_CPUID_FN_7 0xc0011002
#define MSR_AMD64_CPUID_FN_1 0xc0011004 #define MSR_AMD64_CPUID_FN_1 0xc0011004
#define MSR_AMD64_LS_CFG 0xc0011020 #define MSR_AMD64_LS_CFG 0xc0011020
#define MSR_AMD64_DC_CFG 0xc0011022 #define MSR_AMD64_DC_CFG 0xc0011022

View File

@ -43,8 +43,6 @@ static __always_inline void __monitorx(const void *eax, u32 ecx, u32 edx)
static __always_inline void __mwait(u32 eax, u32 ecx) static __always_inline void __mwait(u32 eax, u32 ecx)
{ {
mds_idle_clear_cpu_buffers();
/* /*
* Use the instruction mnemonic with implicit operands, as the LLVM * Use the instruction mnemonic with implicit operands, as the LLVM
* assembler fails to assemble the mnemonic with explicit operands: * assembler fails to assemble the mnemonic with explicit operands:
@ -80,7 +78,7 @@ static __always_inline void __mwait(u32 eax, u32 ecx)
*/ */
static __always_inline void __mwaitx(u32 eax, u32 ebx, u32 ecx) static __always_inline void __mwaitx(u32 eax, u32 ebx, u32 ecx)
{ {
/* No MDS buffer clear as this is AMD/HYGON only */ /* No need for TSA buffer clearing on AMD */
/* "mwaitx %eax, %ebx, %ecx" */ /* "mwaitx %eax, %ebx, %ecx" */
asm volatile(".byte 0x0f, 0x01, 0xfb" asm volatile(".byte 0x0f, 0x01, 0xfb"
@ -98,7 +96,6 @@ static __always_inline void __mwaitx(u32 eax, u32 ebx, u32 ecx)
*/ */
static __always_inline void __sti_mwait(u32 eax, u32 ecx) static __always_inline void __sti_mwait(u32 eax, u32 ecx)
{ {
mds_idle_clear_cpu_buffers();
asm volatile("sti; mwait" :: "a" (eax), "c" (ecx)); asm volatile("sti; mwait" :: "a" (eax), "c" (ecx));
} }
@ -115,13 +112,20 @@ static __always_inline void __sti_mwait(u32 eax, u32 ecx)
*/ */
static __always_inline void mwait_idle_with_hints(u32 eax, u32 ecx) static __always_inline void mwait_idle_with_hints(u32 eax, u32 ecx)
{ {
if (need_resched())
return;
x86_idle_clear_cpu_buffers();
if (static_cpu_has_bug(X86_BUG_MONITOR) || !current_set_polling_and_test()) { if (static_cpu_has_bug(X86_BUG_MONITOR) || !current_set_polling_and_test()) {
const void *addr = &current_thread_info()->flags; const void *addr = &current_thread_info()->flags;
alternative_input("", "clflush (%[addr])", X86_BUG_CLFLUSH_MONITOR, [addr] "a" (addr)); alternative_input("", "clflush (%[addr])", X86_BUG_CLFLUSH_MONITOR, [addr] "a" (addr));
__monitor(addr, 0, 0); __monitor(addr, 0, 0);
if (!need_resched()) { if (need_resched())
goto out;
if (ecx & 1) { if (ecx & 1) {
__mwait(eax, ecx); __mwait(eax, ecx);
} else { } else {
@ -129,7 +133,8 @@ static __always_inline void mwait_idle_with_hints(u32 eax, u32 ecx)
raw_local_irq_disable(); raw_local_irq_disable();
} }
} }
}
out:
current_clr_polling(); current_clr_polling();
} }

View File

@ -302,25 +302,31 @@
.endm .endm
/* /*
* Macro to execute VERW instruction that mitigate transient data sampling * Macro to execute VERW insns that mitigate transient data sampling
* attacks such as MDS. On affected systems a microcode update overloaded VERW * attacks such as MDS or TSA. On affected systems a microcode update
* instruction to also clear the CPU buffers. VERW clobbers CFLAGS.ZF. * overloaded VERW insns to also clear the CPU buffers. VERW clobbers
* * CFLAGS.ZF.
* Note: Only the memory operand variant of VERW clears the CPU buffers. * Note: Only the memory operand variant of VERW clears the CPU buffers.
*/ */
.macro CLEAR_CPU_BUFFERS .macro __CLEAR_CPU_BUFFERS feature
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
ALTERNATIVE "", "verw mds_verw_sel(%rip)", X86_FEATURE_CLEAR_CPU_BUF ALTERNATIVE "", "verw x86_verw_sel(%rip)", \feature
#else #else
/* /*
* In 32bit mode, the memory operand must be a %cs reference. The data * In 32bit mode, the memory operand must be a %cs reference. The data
* segments may not be usable (vm86 mode), and the stack segment may not * segments may not be usable (vm86 mode), and the stack segment may not
* be flat (ESPFIX32). * be flat (ESPFIX32).
*/ */
ALTERNATIVE "", "verw %cs:mds_verw_sel", X86_FEATURE_CLEAR_CPU_BUF ALTERNATIVE "", "verw %cs:x86_verw_sel", \feature
#endif #endif
.endm .endm
#define CLEAR_CPU_BUFFERS \
__CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF
#define VM_CLEAR_CPU_BUFFERS \
__CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF_VM
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
.macro CLEAR_BRANCH_HISTORY .macro CLEAR_BRANCH_HISTORY
ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP
@ -567,24 +573,24 @@ DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
DECLARE_STATIC_KEY_FALSE(switch_vcpu_ibpb); DECLARE_STATIC_KEY_FALSE(switch_vcpu_ibpb);
DECLARE_STATIC_KEY_FALSE(mds_idle_clear); DECLARE_STATIC_KEY_FALSE(cpu_buf_idle_clear);
DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
DECLARE_STATIC_KEY_FALSE(cpu_buf_vm_clear); DECLARE_STATIC_KEY_FALSE(cpu_buf_vm_clear);
extern u16 mds_verw_sel; extern u16 x86_verw_sel;
#include <asm/segment.h> #include <asm/segment.h>
/** /**
* mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability * x86_clear_cpu_buffers - Buffer clearing support for different x86 CPU vulns
* *
* This uses the otherwise unused and obsolete VERW instruction in * This uses the otherwise unused and obsolete VERW instruction in
* combination with microcode which triggers a CPU buffer flush when the * combination with microcode which triggers a CPU buffer flush when the
* instruction is executed. * instruction is executed.
*/ */
static __always_inline void mds_clear_cpu_buffers(void) static __always_inline void x86_clear_cpu_buffers(void)
{ {
static const u16 ds = __KERNEL_DS; static const u16 ds = __KERNEL_DS;
@ -601,14 +607,15 @@ static __always_inline void mds_clear_cpu_buffers(void)
} }
/** /**
* mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability * x86_idle_clear_cpu_buffers - Buffer clearing support in idle for the MDS
* and TSA vulnerabilities.
* *
* Clear CPU buffers if the corresponding static key is enabled * Clear CPU buffers if the corresponding static key is enabled
*/ */
static __always_inline void mds_idle_clear_cpu_buffers(void) static __always_inline void x86_idle_clear_cpu_buffers(void)
{ {
if (static_branch_likely(&mds_idle_clear)) if (static_branch_likely(&cpu_buf_idle_clear))
mds_clear_cpu_buffers(); x86_clear_cpu_buffers();
} }
#endif /* __ASSEMBLER__ */ #endif /* __ASSEMBLER__ */

View File

@ -223,6 +223,18 @@ struct snp_tsc_info_resp {
u8 rsvd2[100]; u8 rsvd2[100];
} __packed; } __packed;
/*
* Obtain the mean TSC frequency by decreasing the nominal TSC frequency with
* TSC_FACTOR as documented in the SNP Firmware ABI specification:
*
* GUEST_TSC_FREQ * (1 - (TSC_FACTOR * 0.00001))
*
* which is equivalent to:
*
* GUEST_TSC_FREQ -= (GUEST_TSC_FREQ * TSC_FACTOR) / 100000;
*/
#define SNP_SCALE_TSC_FREQ(freq, factor) ((freq) - (freq) * (factor) / 100000)
struct snp_guest_req { struct snp_guest_req {
void *req_buf; void *req_buf;
size_t req_sz; size_t req_sz;
@ -282,8 +294,11 @@ struct snp_secrets_page {
u8 svsm_guest_vmpl; u8 svsm_guest_vmpl;
u8 rsvd3[3]; u8 rsvd3[3];
/* The percentage decrease from nominal to mean TSC frequency. */
u32 tsc_factor;
/* Remainder of page */ /* Remainder of page */
u8 rsvd4[3744]; u8 rsvd4[3740];
} __packed; } __packed;
struct snp_msg_desc { struct snp_msg_desc {

View File

@ -72,6 +72,7 @@
#define TDVMCALL_MAP_GPA 0x10001 #define TDVMCALL_MAP_GPA 0x10001
#define TDVMCALL_GET_QUOTE 0x10002 #define TDVMCALL_GET_QUOTE 0x10002
#define TDVMCALL_REPORT_FATAL_ERROR 0x10003 #define TDVMCALL_REPORT_FATAL_ERROR 0x10003
#define TDVMCALL_SETUP_EVENT_NOTIFY_INTERRUPT 0x10004ULL
/* /*
* TDG.VP.VMCALL Status Codes (returned in R10) * TDG.VP.VMCALL Status Codes (returned in R10)

View File

@ -965,7 +965,13 @@ struct kvm_tdx_cmd {
struct kvm_tdx_capabilities { struct kvm_tdx_capabilities {
__u64 supported_attrs; __u64 supported_attrs;
__u64 supported_xfam; __u64 supported_xfam;
__u64 reserved[254];
__u64 kernel_tdvmcallinfo_1_r11;
__u64 user_tdvmcallinfo_1_r11;
__u64 kernel_tdvmcallinfo_1_r12;
__u64 user_tdvmcallinfo_1_r12;
__u64 reserved[250];
/* Configurable CPUID bits for userspace */ /* Configurable CPUID bits for userspace */
struct kvm_cpuid2 cpuid; struct kvm_cpuid2 cpuid;

View File

@ -9,7 +9,7 @@
#include <linux/sched/clock.h> #include <linux/sched/clock.h>
#include <linux/random.h> #include <linux/random.h>
#include <linux/topology.h> #include <linux/topology.h>
#include <asm/amd/fch.h> #include <linux/platform_data/x86/amd-fch.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/cacheinfo.h> #include <asm/cacheinfo.h>
@ -377,6 +377,47 @@ static void bsp_determine_snp(struct cpuinfo_x86 *c)
#endif #endif
} }
#define ZEN_MODEL_STEP_UCODE(fam, model, step, ucode) \
X86_MATCH_VFM_STEPS(VFM_MAKE(X86_VENDOR_AMD, fam, model), \
step, step, ucode)
static const struct x86_cpu_id amd_tsa_microcode[] = {
ZEN_MODEL_STEP_UCODE(0x19, 0x01, 0x1, 0x0a0011d7),
ZEN_MODEL_STEP_UCODE(0x19, 0x01, 0x2, 0x0a00123b),
ZEN_MODEL_STEP_UCODE(0x19, 0x08, 0x2, 0x0a00820d),
ZEN_MODEL_STEP_UCODE(0x19, 0x11, 0x1, 0x0a10114c),
ZEN_MODEL_STEP_UCODE(0x19, 0x11, 0x2, 0x0a10124c),
ZEN_MODEL_STEP_UCODE(0x19, 0x18, 0x1, 0x0a108109),
ZEN_MODEL_STEP_UCODE(0x19, 0x21, 0x0, 0x0a20102e),
ZEN_MODEL_STEP_UCODE(0x19, 0x21, 0x2, 0x0a201211),
ZEN_MODEL_STEP_UCODE(0x19, 0x44, 0x1, 0x0a404108),
ZEN_MODEL_STEP_UCODE(0x19, 0x50, 0x0, 0x0a500012),
ZEN_MODEL_STEP_UCODE(0x19, 0x61, 0x2, 0x0a60120a),
ZEN_MODEL_STEP_UCODE(0x19, 0x74, 0x1, 0x0a704108),
ZEN_MODEL_STEP_UCODE(0x19, 0x75, 0x2, 0x0a705208),
ZEN_MODEL_STEP_UCODE(0x19, 0x78, 0x0, 0x0a708008),
ZEN_MODEL_STEP_UCODE(0x19, 0x7c, 0x0, 0x0a70c008),
ZEN_MODEL_STEP_UCODE(0x19, 0xa0, 0x2, 0x0aa00216),
{},
};
static void tsa_init(struct cpuinfo_x86 *c)
{
if (cpu_has(c, X86_FEATURE_HYPERVISOR))
return;
if (cpu_has(c, X86_FEATURE_ZEN3) ||
cpu_has(c, X86_FEATURE_ZEN4)) {
if (x86_match_min_microcode_rev(amd_tsa_microcode))
setup_force_cpu_cap(X86_FEATURE_VERW_CLEAR);
else
pr_debug("%s: current revision: 0x%x\n", __func__, c->microcode);
} else {
setup_force_cpu_cap(X86_FEATURE_TSA_SQ_NO);
setup_force_cpu_cap(X86_FEATURE_TSA_L1_NO);
}
}
static void bsp_init_amd(struct cpuinfo_x86 *c) static void bsp_init_amd(struct cpuinfo_x86 *c)
{ {
if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
@ -489,6 +530,9 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
} }
bsp_determine_snp(c); bsp_determine_snp(c);
tsa_init(c);
return; return;
warn: warn:
@ -930,6 +974,16 @@ static void init_amd_zen2(struct cpuinfo_x86 *c)
init_spectral_chicken(c); init_spectral_chicken(c);
fix_erratum_1386(c); fix_erratum_1386(c);
zen2_zenbleed_check(c); zen2_zenbleed_check(c);
/* Disable RDSEED on AMD Cyan Skillfish because of an error. */
if (c->x86_model == 0x47 && c->x86_stepping == 0x0) {
clear_cpu_cap(c, X86_FEATURE_RDSEED);
msr_clear_bit(MSR_AMD64_CPUID_FN_7, 18);
pr_emerg("RDSEED is not reliable on this platform; disabling.\n");
}
/* Correct misconfigured CPUID on some clients. */
clear_cpu_cap(c, X86_FEATURE_INVLPGB);
} }
static void init_amd_zen3(struct cpuinfo_x86 *c) static void init_amd_zen3(struct cpuinfo_x86 *c)

View File

@ -94,6 +94,8 @@ static void __init bhi_apply_mitigation(void);
static void __init its_select_mitigation(void); static void __init its_select_mitigation(void);
static void __init its_update_mitigation(void); static void __init its_update_mitigation(void);
static void __init its_apply_mitigation(void); static void __init its_apply_mitigation(void);
static void __init tsa_select_mitigation(void);
static void __init tsa_apply_mitigation(void);
/* The base value of the SPEC_CTRL MSR without task-specific bits set */ /* The base value of the SPEC_CTRL MSR without task-specific bits set */
u64 x86_spec_ctrl_base; u64 x86_spec_ctrl_base;
@ -169,9 +171,9 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
DEFINE_STATIC_KEY_FALSE(switch_vcpu_ibpb); DEFINE_STATIC_KEY_FALSE(switch_vcpu_ibpb);
EXPORT_SYMBOL_GPL(switch_vcpu_ibpb); EXPORT_SYMBOL_GPL(switch_vcpu_ibpb);
/* Control MDS CPU buffer clear before idling (halt, mwait) */ /* Control CPU buffer clear before idling (halt, mwait) */
DEFINE_STATIC_KEY_FALSE(mds_idle_clear); DEFINE_STATIC_KEY_FALSE(cpu_buf_idle_clear);
EXPORT_SYMBOL_GPL(mds_idle_clear); EXPORT_SYMBOL_GPL(cpu_buf_idle_clear);
/* /*
* Controls whether l1d flush based mitigations are enabled, * Controls whether l1d flush based mitigations are enabled,
@ -225,6 +227,7 @@ void __init cpu_select_mitigations(void)
gds_select_mitigation(); gds_select_mitigation();
its_select_mitigation(); its_select_mitigation();
bhi_select_mitigation(); bhi_select_mitigation();
tsa_select_mitigation();
/* /*
* After mitigations are selected, some may need to update their * After mitigations are selected, some may need to update their
@ -272,6 +275,7 @@ void __init cpu_select_mitigations(void)
gds_apply_mitigation(); gds_apply_mitigation();
its_apply_mitigation(); its_apply_mitigation();
bhi_apply_mitigation(); bhi_apply_mitigation();
tsa_apply_mitigation();
} }
/* /*
@ -637,7 +641,7 @@ static void __init mmio_apply_mitigation(void)
* is required irrespective of SMT state. * is required irrespective of SMT state.
*/ */
if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO))
static_branch_enable(&mds_idle_clear); static_branch_enable(&cpu_buf_idle_clear);
if (mmio_nosmt || cpu_mitigations_auto_nosmt()) if (mmio_nosmt || cpu_mitigations_auto_nosmt())
cpu_smt_disable(false); cpu_smt_disable(false);
@ -1487,6 +1491,94 @@ static void __init its_apply_mitigation(void)
set_return_thunk(its_return_thunk); set_return_thunk(its_return_thunk);
} }
#undef pr_fmt
#define pr_fmt(fmt) "Transient Scheduler Attacks: " fmt
enum tsa_mitigations {
TSA_MITIGATION_NONE,
TSA_MITIGATION_AUTO,
TSA_MITIGATION_UCODE_NEEDED,
TSA_MITIGATION_USER_KERNEL,
TSA_MITIGATION_VM,
TSA_MITIGATION_FULL,
};
static const char * const tsa_strings[] = {
[TSA_MITIGATION_NONE] = "Vulnerable",
[TSA_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
[TSA_MITIGATION_USER_KERNEL] = "Mitigation: Clear CPU buffers: user/kernel boundary",
[TSA_MITIGATION_VM] = "Mitigation: Clear CPU buffers: VM",
[TSA_MITIGATION_FULL] = "Mitigation: Clear CPU buffers",
};
static enum tsa_mitigations tsa_mitigation __ro_after_init =
IS_ENABLED(CONFIG_MITIGATION_TSA) ? TSA_MITIGATION_AUTO : TSA_MITIGATION_NONE;
static int __init tsa_parse_cmdline(char *str)
{
if (!str)
return -EINVAL;
if (!strcmp(str, "off"))
tsa_mitigation = TSA_MITIGATION_NONE;
else if (!strcmp(str, "on"))
tsa_mitigation = TSA_MITIGATION_FULL;
else if (!strcmp(str, "user"))
tsa_mitigation = TSA_MITIGATION_USER_KERNEL;
else if (!strcmp(str, "vm"))
tsa_mitigation = TSA_MITIGATION_VM;
else
pr_err("Ignoring unknown tsa=%s option.\n", str);
return 0;
}
early_param("tsa", tsa_parse_cmdline);
static void __init tsa_select_mitigation(void)
{
if (cpu_mitigations_off() || !boot_cpu_has_bug(X86_BUG_TSA)) {
tsa_mitigation = TSA_MITIGATION_NONE;
return;
}
if (tsa_mitigation == TSA_MITIGATION_NONE)
return;
if (!boot_cpu_has(X86_FEATURE_VERW_CLEAR)) {
tsa_mitigation = TSA_MITIGATION_UCODE_NEEDED;
goto out;
}
if (tsa_mitigation == TSA_MITIGATION_AUTO)
tsa_mitigation = TSA_MITIGATION_FULL;
/*
* No need to set verw_clear_cpu_buf_mitigation_selected - it
* doesn't fit all cases here and it is not needed because this
* is the only VERW-based mitigation on AMD.
*/
out:
pr_info("%s\n", tsa_strings[tsa_mitigation]);
}
static void __init tsa_apply_mitigation(void)
{
switch (tsa_mitigation) {
case TSA_MITIGATION_USER_KERNEL:
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
break;
case TSA_MITIGATION_VM:
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
break;
case TSA_MITIGATION_FULL:
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
break;
default:
break;
}
}
#undef pr_fmt #undef pr_fmt
#define pr_fmt(fmt) "Spectre V2 : " fmt #define pr_fmt(fmt) "Spectre V2 : " fmt
@ -2249,10 +2341,10 @@ static void update_mds_branch_idle(void)
return; return;
if (sched_smt_active()) { if (sched_smt_active()) {
static_branch_enable(&mds_idle_clear); static_branch_enable(&cpu_buf_idle_clear);
} else if (mmio_mitigation == MMIO_MITIGATION_OFF || } else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) { (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) {
static_branch_disable(&mds_idle_clear); static_branch_disable(&cpu_buf_idle_clear);
} }
} }
@ -2316,6 +2408,25 @@ void cpu_bugs_smt_update(void)
break; break;
} }
switch (tsa_mitigation) {
case TSA_MITIGATION_USER_KERNEL:
case TSA_MITIGATION_VM:
case TSA_MITIGATION_AUTO:
case TSA_MITIGATION_FULL:
/*
* TSA-SQ can potentially lead to info leakage between
* SMT threads.
*/
if (sched_smt_active())
static_branch_enable(&cpu_buf_idle_clear);
else
static_branch_disable(&cpu_buf_idle_clear);
break;
case TSA_MITIGATION_NONE:
case TSA_MITIGATION_UCODE_NEEDED:
break;
}
mutex_unlock(&spec_ctrl_mutex); mutex_unlock(&spec_ctrl_mutex);
} }
@ -3265,6 +3376,11 @@ static ssize_t gds_show_state(char *buf)
return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]); return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]);
} }
static ssize_t tsa_show_state(char *buf)
{
return sysfs_emit(buf, "%s\n", tsa_strings[tsa_mitigation]);
}
static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
char *buf, unsigned int bug) char *buf, unsigned int bug)
{ {
@ -3328,6 +3444,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
case X86_BUG_ITS: case X86_BUG_ITS:
return its_show_state(buf); return its_show_state(buf);
case X86_BUG_TSA:
return tsa_show_state(buf);
default: default:
break; break;
} }
@ -3414,6 +3533,11 @@ ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_att
{ {
return cpu_show_common(dev, attr, buf, X86_BUG_ITS); return cpu_show_common(dev, attr, buf, X86_BUG_ITS);
} }
ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf)
{
return cpu_show_common(dev, attr, buf, X86_BUG_TSA);
}
#endif #endif
void __warn_thunk(void) void __warn_thunk(void)

View File

@ -1233,6 +1233,8 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
#define ITS BIT(8) #define ITS BIT(8)
/* CPU is affected by Indirect Target Selection, but guest-host isolation is not affected */ /* CPU is affected by Indirect Target Selection, but guest-host isolation is not affected */
#define ITS_NATIVE_ONLY BIT(9) #define ITS_NATIVE_ONLY BIT(9)
/* CPU is affected by Transient Scheduler Attacks */
#define TSA BIT(10)
static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
VULNBL_INTEL_STEPS(INTEL_IVYBRIDGE, X86_STEP_MAX, SRBDS), VULNBL_INTEL_STEPS(INTEL_IVYBRIDGE, X86_STEP_MAX, SRBDS),
@ -1280,7 +1282,7 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
VULNBL_AMD(0x16, RETBLEED), VULNBL_AMD(0x16, RETBLEED),
VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO), VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO),
VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO), VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO),
VULNBL_AMD(0x19, SRSO), VULNBL_AMD(0x19, SRSO | TSA),
VULNBL_AMD(0x1a, SRSO), VULNBL_AMD(0x1a, SRSO),
{} {}
}; };
@ -1530,6 +1532,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
setup_force_cpu_bug(X86_BUG_ITS_NATIVE_ONLY); setup_force_cpu_bug(X86_BUG_ITS_NATIVE_ONLY);
} }
if (c->x86_vendor == X86_VENDOR_AMD) {
if (!cpu_has(c, X86_FEATURE_TSA_SQ_NO) ||
!cpu_has(c, X86_FEATURE_TSA_L1_NO)) {
if (cpu_matches(cpu_vuln_blacklist, TSA) ||
/* Enable bug on Zen guests to allow for live migration. */
(cpu_has(c, X86_FEATURE_HYPERVISOR) && cpu_has(c, X86_FEATURE_ZEN)))
setup_force_cpu_bug(X86_BUG_TSA);
}
}
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
return; return;

Some files were not shown because too many files have changed in this diff Show More