mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-04 20:19:47 +08:00
ASoC: stm32: sai: fix kernel rate configuration
Merge series from Olivier Moysan <olivier.moysan@foss.st.com>: This patchset adds some checks on kernel minimum rate requirements. This avoids potential clock rate misconfiguration, when setting the kernel frequency on STM32MP2 SoCs.
This commit is contained in:
commit
844af9911a
@ -92,6 +92,7 @@ ForEachMacros:
|
||||
- '__rq_for_each_bio'
|
||||
- '__shost_for_each_device'
|
||||
- '__sym_for_each'
|
||||
- '_for_each_counter'
|
||||
- 'apei_estatus_for_each_section'
|
||||
- 'ata_for_each_dev'
|
||||
- 'ata_for_each_link'
|
||||
@ -141,11 +142,14 @@ ForEachMacros:
|
||||
- 'damon_for_each_target_safe'
|
||||
- 'damos_for_each_filter'
|
||||
- 'damos_for_each_filter_safe'
|
||||
- 'damos_for_each_ops_filter'
|
||||
- 'damos_for_each_ops_filter_safe'
|
||||
- 'damos_for_each_quota_goal'
|
||||
- 'damos_for_each_quota_goal_safe'
|
||||
- 'data__for_each_file'
|
||||
- 'data__for_each_file_new'
|
||||
- 'data__for_each_file_start'
|
||||
- 'def_for_each_cpu'
|
||||
- 'device_for_each_child_node'
|
||||
- 'device_for_each_child_node_scoped'
|
||||
- 'dma_fence_array_for_each'
|
||||
@ -176,6 +180,7 @@ ForEachMacros:
|
||||
- 'drm_for_each_privobj'
|
||||
- 'drm_gem_for_each_gpuvm_bo'
|
||||
- 'drm_gem_for_each_gpuvm_bo_safe'
|
||||
- 'drm_gpusvm_for_each_range'
|
||||
- 'drm_gpuva_for_each_op'
|
||||
- 'drm_gpuva_for_each_op_from_reverse'
|
||||
- 'drm_gpuva_for_each_op_reverse'
|
||||
@ -216,8 +221,10 @@ ForEachMacros:
|
||||
- 'for_each_active_dev_scope'
|
||||
- 'for_each_active_drhd_unit'
|
||||
- 'for_each_active_iommu'
|
||||
- 'for_each_active_irq'
|
||||
- 'for_each_active_route'
|
||||
- 'for_each_aggr_pgid'
|
||||
- 'for_each_alloc_capable_rdt_resource'
|
||||
- 'for_each_and_bit'
|
||||
- 'for_each_andnot_bit'
|
||||
- 'for_each_available_child_of_node'
|
||||
@ -228,6 +235,7 @@ ForEachMacros:
|
||||
- 'for_each_btf_ext_rec'
|
||||
- 'for_each_btf_ext_sec'
|
||||
- 'for_each_bvec'
|
||||
- 'for_each_capable_rdt_resource'
|
||||
- 'for_each_card_auxs'
|
||||
- 'for_each_card_auxs_safe'
|
||||
- 'for_each_card_components'
|
||||
@ -241,6 +249,7 @@ ForEachMacros:
|
||||
- 'for_each_cgroup_storage_type'
|
||||
- 'for_each_child_of_node'
|
||||
- 'for_each_child_of_node_scoped'
|
||||
- 'for_each_child_of_node_with_prefix'
|
||||
- 'for_each_clear_bit'
|
||||
- 'for_each_clear_bit_from'
|
||||
- 'for_each_clear_bitrange'
|
||||
@ -296,6 +305,7 @@ ForEachMacros:
|
||||
- 'for_each_group_member_head'
|
||||
- 'for_each_hstate'
|
||||
- 'for_each_hwgpio'
|
||||
- 'for_each_hwgpio_in_range'
|
||||
- 'for_each_if'
|
||||
- 'for_each_inject_fn'
|
||||
- 'for_each_insn'
|
||||
@ -304,6 +314,7 @@ ForEachMacros:
|
||||
- 'for_each_intid'
|
||||
- 'for_each_iommu'
|
||||
- 'for_each_ip_tunnel_rcu'
|
||||
- 'for_each_irq_desc'
|
||||
- 'for_each_irq_nr'
|
||||
- 'for_each_lang'
|
||||
- 'for_each_link_ch_maps'
|
||||
@ -324,6 +335,8 @@ ForEachMacros:
|
||||
- 'for_each_missing_reg'
|
||||
- 'for_each_mle_subelement'
|
||||
- 'for_each_mod_mem_type'
|
||||
- 'for_each_mon_capable_rdt_resource'
|
||||
- 'for_each_mp_bvec'
|
||||
- 'for_each_net'
|
||||
- 'for_each_net_continue_reverse'
|
||||
- 'for_each_net_rcu'
|
||||
@ -351,6 +364,7 @@ ForEachMacros:
|
||||
- 'for_each_node_by_name'
|
||||
- 'for_each_node_by_type'
|
||||
- 'for_each_node_mask'
|
||||
- 'for_each_node_numadist'
|
||||
- 'for_each_node_state'
|
||||
- 'for_each_node_with_cpus'
|
||||
- 'for_each_node_with_property'
|
||||
@ -359,6 +373,8 @@ ForEachMacros:
|
||||
- 'for_each_of_allnodes'
|
||||
- 'for_each_of_allnodes_from'
|
||||
- 'for_each_of_cpu_node'
|
||||
- 'for_each_of_graph_port'
|
||||
- 'for_each_of_graph_port_endpoint'
|
||||
- 'for_each_of_pci_range'
|
||||
- 'for_each_old_connector_in_state'
|
||||
- 'for_each_old_crtc_in_state'
|
||||
@ -372,9 +388,11 @@ ForEachMacros:
|
||||
- 'for_each_oldnew_plane_in_state_reverse'
|
||||
- 'for_each_oldnew_private_obj_in_state'
|
||||
- 'for_each_online_cpu'
|
||||
- 'for_each_online_cpu_wrap'
|
||||
- 'for_each_online_node'
|
||||
- 'for_each_online_pgdat'
|
||||
- 'for_each_or_bit'
|
||||
- 'for_each_page_ext'
|
||||
- 'for_each_path'
|
||||
- 'for_each_pci_bridge'
|
||||
- 'for_each_pci_dev'
|
||||
@ -382,8 +400,10 @@ ForEachMacros:
|
||||
- 'for_each_physmem_range'
|
||||
- 'for_each_populated_zone'
|
||||
- 'for_each_possible_cpu'
|
||||
- 'for_each_possible_cpu_wrap'
|
||||
- 'for_each_present_blessed_reg'
|
||||
- 'for_each_present_cpu'
|
||||
- 'for_each_present_section_nr'
|
||||
- 'for_each_prime_number'
|
||||
- 'for_each_prime_number_from'
|
||||
- 'for_each_probe_cache_entry'
|
||||
@ -396,6 +416,7 @@ ForEachMacros:
|
||||
- 'for_each_prop_dlc_cpus'
|
||||
- 'for_each_prop_dlc_platforms'
|
||||
- 'for_each_property_of_node'
|
||||
- 'for_each_rdt_resource'
|
||||
- 'for_each_reg'
|
||||
- 'for_each_reg_filtered'
|
||||
- 'for_each_reloc'
|
||||
@ -434,10 +455,10 @@ ForEachMacros:
|
||||
- 'for_each_subelement_id'
|
||||
- 'for_each_sublist'
|
||||
- 'for_each_subsystem'
|
||||
- 'for_each_suite'
|
||||
- 'for_each_supported_activate_fn'
|
||||
- 'for_each_supported_inject_fn'
|
||||
- 'for_each_sym'
|
||||
- 'for_each_test'
|
||||
- 'for_each_thread'
|
||||
- 'for_each_token'
|
||||
- 'for_each_unicast_dest_pgid'
|
||||
@ -499,8 +520,10 @@ ForEachMacros:
|
||||
- 'idr_for_each_entry_continue'
|
||||
- 'idr_for_each_entry_continue_ul'
|
||||
- 'idr_for_each_entry_ul'
|
||||
- 'iio_for_each_active_channel'
|
||||
- 'in_dev_for_each_ifa_rcu'
|
||||
- 'in_dev_for_each_ifa_rtnl'
|
||||
- 'in_dev_for_each_ifa_rtnl_net'
|
||||
- 'inet_bind_bucket_for_each'
|
||||
- 'interval_tree_for_each_span'
|
||||
- 'intlist__for_each_entry'
|
||||
@ -542,7 +565,6 @@ ForEachMacros:
|
||||
- 'list_for_each_prev'
|
||||
- 'list_for_each_prev_safe'
|
||||
- 'list_for_each_rcu'
|
||||
- 'list_for_each_reverse'
|
||||
- 'list_for_each_safe'
|
||||
- 'llist_for_each'
|
||||
- 'llist_for_each_entry'
|
||||
@ -552,6 +574,7 @@ ForEachMacros:
|
||||
- 'map__for_each_symbol'
|
||||
- 'map__for_each_symbol_by_name'
|
||||
- 'mas_for_each'
|
||||
- 'mas_for_each_rev'
|
||||
- 'mci_for_each_dimm'
|
||||
- 'media_device_for_each_entity'
|
||||
- 'media_device_for_each_intf'
|
||||
@ -561,10 +584,15 @@ ForEachMacros:
|
||||
- 'media_pipeline_for_each_entity'
|
||||
- 'media_pipeline_for_each_pad'
|
||||
- 'mlx5_lag_for_each_peer_mdev'
|
||||
- 'mptcp_for_each_subflow'
|
||||
- 'msi_domain_for_each_desc'
|
||||
- 'msi_for_each_desc'
|
||||
- 'mt_for_each'
|
||||
- 'nanddev_io_for_each_block'
|
||||
- 'nanddev_io_for_each_page'
|
||||
- 'neigh_for_each_in_bucket'
|
||||
- 'neigh_for_each_in_bucket_rcu'
|
||||
- 'neigh_for_each_in_bucket_safe'
|
||||
- 'netdev_for_each_lower_dev'
|
||||
- 'netdev_for_each_lower_private'
|
||||
- 'netdev_for_each_lower_private_rcu'
|
||||
@ -604,11 +632,11 @@ ForEachMacros:
|
||||
- 'perf_evlist__for_each_entry_safe'
|
||||
- 'perf_evlist__for_each_evsel'
|
||||
- 'perf_evlist__for_each_mmap'
|
||||
- 'perf_evsel_for_each_per_thread_period_safe'
|
||||
- 'perf_hpp_list__for_each_format'
|
||||
- 'perf_hpp_list__for_each_format_safe'
|
||||
- 'perf_hpp_list__for_each_sort_list'
|
||||
- 'perf_hpp_list__for_each_sort_list_safe'
|
||||
- 'perf_tool_event__for_each_event'
|
||||
- 'plist_for_each'
|
||||
- 'plist_for_each_continue'
|
||||
- 'plist_for_each_entry'
|
||||
@ -627,7 +655,6 @@ ForEachMacros:
|
||||
- 'rdma_for_each_block'
|
||||
- 'rdma_for_each_port'
|
||||
- 'rdma_umem_for_each_dma_block'
|
||||
- 'resort_rb__for_each_entry'
|
||||
- 'resource_list_for_each_entry'
|
||||
- 'resource_list_for_each_entry_safe'
|
||||
- 'rhl_for_each_entry_rcu'
|
||||
@ -658,6 +685,7 @@ ForEachMacros:
|
||||
- 'shost_for_each_device'
|
||||
- 'sk_for_each'
|
||||
- 'sk_for_each_bound'
|
||||
- 'sk_for_each_bound_safe'
|
||||
- 'sk_for_each_entry_offset_rcu'
|
||||
- 'sk_for_each_from'
|
||||
- 'sk_for_each_rcu'
|
||||
@ -680,7 +708,11 @@ ForEachMacros:
|
||||
- 'tb_property_for_each'
|
||||
- 'tcf_act_for_each_action'
|
||||
- 'tcf_exts_for_each_action'
|
||||
- 'test_suite__for_each_test_case'
|
||||
- 'tool_pmu__for_each_event'
|
||||
- 'ttm_bo_lru_for_each_reserved_guarded'
|
||||
- 'ttm_resource_manager_for_each_res'
|
||||
- 'udp_lrpa_for_each_entry_rcu'
|
||||
- 'udp_portaddr_for_each_entry'
|
||||
- 'udp_portaddr_for_each_entry_rcu'
|
||||
- 'usb_hub_for_each_child'
|
||||
@ -691,6 +723,7 @@ ForEachMacros:
|
||||
- 'v4l2_m2m_for_each_src_buf_safe'
|
||||
- 'virtio_device_for_each_vq'
|
||||
- 'while_for_each_ftrace_op'
|
||||
- 'workloads__for_each'
|
||||
- 'xa_for_each'
|
||||
- 'xa_for_each_marked'
|
||||
- 'xa_for_each_range'
|
||||
|
3
.mailmap
3
.mailmap
@ -322,6 +322,7 @@ Jayachandran C <c.jayachandran@gmail.com> <jchandra@broadcom.com>
|
||||
Jayachandran C <c.jayachandran@gmail.com> <jchandra@digeo.com>
|
||||
Jayachandran C <c.jayachandran@gmail.com> <jnair@caviumnetworks.com>
|
||||
<jean-philippe@linaro.org> <jean-philippe.brucker@arm.com>
|
||||
Jean-Michel Hautbois <jeanmichel.hautbois@yoseli.org> <jeanmichel.hautbois@ideasonboard.com>
|
||||
Jean Tourrilhes <jt@hpl.hp.com>
|
||||
Jeevan Shriram <quic_jshriram@quicinc.com> <jshriram@codeaurora.org>
|
||||
Jeff Garzik <jgarzik@pretzel.yyz.us>
|
||||
@ -438,6 +439,8 @@ Linus Lüssing <linus.luessing@c0d3.blue> <ll@simonwunderlich.de>
|
||||
Li Yang <leoyang.li@nxp.com> <leoli@freescale.com>
|
||||
Li Yang <leoyang.li@nxp.com> <leo@zh-kernel.org>
|
||||
Lior David <quic_liord@quicinc.com> <liord@codeaurora.org>
|
||||
Loic Poulain <loic.poulain@oss.qualcomm.com> <loic.poulain@linaro.org>
|
||||
Loic Poulain <loic.poulain@oss.qualcomm.com> <loic.poulain@intel.com>
|
||||
Lorenzo Pieralisi <lpieralisi@kernel.org> <lorenzo.pieralisi@arm.com>
|
||||
Lorenzo Stoakes <lorenzo.stoakes@oracle.com> <lstoakes@gmail.com>
|
||||
Luca Ceresoli <luca.ceresoli@bootlin.com> <luca@lucaceresoli.net>
|
||||
|
4
CREDITS
4
CREDITS
@ -2071,6 +2071,10 @@ S: 660 Harvard Ave. #7
|
||||
S: Santa Clara, CA 95051
|
||||
S: USA
|
||||
|
||||
N: Joonsoo Kim
|
||||
E: iamjoonsoo.kim@lge.com
|
||||
D: Slab allocators
|
||||
|
||||
N: Kukjin Kim
|
||||
E: kgene@kernel.org
|
||||
D: Samsung S3C, S5P and Exynos ARM architectures
|
||||
|
@ -77,7 +77,7 @@ Description:
|
||||
|
||||
What: /sys/block/<disk>/diskseq
|
||||
Date: February 2021
|
||||
Contact: Matteo Croce <mcroce@microsoft.com>
|
||||
Contact: Matteo Croce <teknoraver@meta.com>
|
||||
Description:
|
||||
The /sys/block/<disk>/diskseq files reports the disk
|
||||
sequence number, which is a monotonically increasing
|
||||
|
@ -1604,3 +1604,35 @@ Description:
|
||||
prevent the UFS from frequently performing clock gating/ungating.
|
||||
|
||||
The attribute is read/write.
|
||||
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/device_lvl_exception_count
|
||||
What: /sys/bus/platform/devices/*.ufs/device_lvl_exception_count
|
||||
Date: March 2025
|
||||
Contact: Bao D. Nguyen <quic_nguyenb@quicinc.com>
|
||||
Description:
|
||||
This attribute is applicable to ufs devices compliant to the
|
||||
JEDEC specifications version 4.1 or later. The
|
||||
device_lvl_exception_count is a counter indicating the number of
|
||||
times the device level exceptions have occurred since the last
|
||||
time this variable is reset. Writing a 0 value to this
|
||||
attribute will reset the device_lvl_exception_count. If the
|
||||
device_lvl_exception_count reads a positive value, the user
|
||||
application should read the device_lvl_exception_id attribute to
|
||||
know more information about the exception.
|
||||
|
||||
The attribute is read/write.
|
||||
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/device_lvl_exception_id
|
||||
What: /sys/bus/platform/devices/*.ufs/device_lvl_exception_id
|
||||
Date: March 2025
|
||||
Contact: Bao D. Nguyen <quic_nguyenb@quicinc.com>
|
||||
Description:
|
||||
Reading the device_lvl_exception_id returns the
|
||||
qDeviceLevelExceptionID attribute of the ufs device JEDEC
|
||||
specification version 4.1. The definition of the
|
||||
qDeviceLevelExceptionID is the ufs device vendor specific
|
||||
implementation. Refer to the device manufacturer datasheet for
|
||||
more information on the meaning of the qDeviceLevelExceptionID
|
||||
attribute value.
|
||||
|
||||
The attribute is read only.
|
||||
|
@ -1,7 +1,7 @@
|
||||
What: /sys/kernel/reboot
|
||||
Date: November 2020
|
||||
KernelVersion: 5.11
|
||||
Contact: Matteo Croce <mcroce@microsoft.com>
|
||||
Contact: Matteo Croce <teknoraver@meta.com>
|
||||
Description: Interface to set the kernel reboot behavior, similarly to
|
||||
what can be done via the reboot= cmdline option.
|
||||
(see Documentation/admin-guide/kernel-parameters.txt)
|
||||
@ -9,25 +9,25 @@ Description: Interface to set the kernel reboot behavior, similarly to
|
||||
What: /sys/kernel/reboot/mode
|
||||
Date: November 2020
|
||||
KernelVersion: 5.11
|
||||
Contact: Matteo Croce <mcroce@microsoft.com>
|
||||
Contact: Matteo Croce <teknoraver@meta.com>
|
||||
Description: Reboot mode. Valid values are: cold warm hard soft gpio
|
||||
|
||||
What: /sys/kernel/reboot/type
|
||||
Date: November 2020
|
||||
KernelVersion: 5.11
|
||||
Contact: Matteo Croce <mcroce@microsoft.com>
|
||||
Contact: Matteo Croce <teknoraver@meta.com>
|
||||
Description: Reboot type. Valid values are: bios acpi kbd triple efi pci
|
||||
|
||||
What: /sys/kernel/reboot/cpu
|
||||
Date: November 2020
|
||||
KernelVersion: 5.11
|
||||
Contact: Matteo Croce <mcroce@microsoft.com>
|
||||
Contact: Matteo Croce <teknoraver@meta.com>
|
||||
Description: CPU number to use to reboot.
|
||||
|
||||
What: /sys/kernel/reboot/force
|
||||
Date: November 2020
|
||||
KernelVersion: 5.11
|
||||
Contact: Matteo Croce <mcroce@microsoft.com>
|
||||
Contact: Matteo Croce <teknoraver@meta.com>
|
||||
Description: Don't wait for any other CPUs on reboot and
|
||||
avoid anything that could hang.
|
||||
|
||||
|
@ -124,6 +124,14 @@ When mounting an XFS filesystem, the following options are accepted.
|
||||
controls the size of each buffer and so is also relevant to
|
||||
this case.
|
||||
|
||||
lifetime (default) or nolifetime
|
||||
Enable data placement based on write life time hints provided
|
||||
by the user. This turns on co-allocation of data of similar
|
||||
life times when statistically favorable to reduce garbage
|
||||
collection cost.
|
||||
|
||||
These options are only available for zoned rt file systems.
|
||||
|
||||
logbsize=value
|
||||
Set the size of each in-memory log buffer. The size may be
|
||||
specified in bytes, or in kilobytes with a "k" suffix.
|
||||
@ -143,6 +151,14 @@ When mounting an XFS filesystem, the following options are accepted.
|
||||
optional, and the log section can be separate from the data
|
||||
section or contained within it.
|
||||
|
||||
max_open_zones=value
|
||||
Specify the max number of zones to keep open for writing on a
|
||||
zoned rt device. Many open zones aids file data separation
|
||||
but may impact performance on HDDs.
|
||||
|
||||
If ``max_open_zones`` is not specified, the value is determined
|
||||
by the capabilities and the size of the zoned rt device.
|
||||
|
||||
noalign
|
||||
Data allocations will not be aligned at stripe unit
|
||||
boundaries. This is only relevant to filesystems created
|
||||
@ -542,3 +558,24 @@ The interesting knobs for XFS workqueues are as follows:
|
||||
nice Relative priority of scheduling the threads. These are the
|
||||
same nice levels that can be applied to userspace processes.
|
||||
============ ===========
|
||||
|
||||
Zoned Filesystems
|
||||
=================
|
||||
|
||||
For zoned file systems, the following attributes are exposed in:
|
||||
|
||||
/sys/fs/xfs/<dev>/zoned/
|
||||
|
||||
max_open_zones (Min: 1 Default: Varies Max: UINTMAX)
|
||||
This read-only attribute exposes the maximum number of open zones
|
||||
available for data placement. The value is determined at mount time and
|
||||
is limited by the capabilities of the backing zoned device, file system
|
||||
size and the max_open_zones mount option.
|
||||
|
||||
zonegc_low_space (Min: 0 Default: 0 Max: 100)
|
||||
Define a percentage for how much of the unused space that GC should keep
|
||||
available for writing. A high value will reclaim more of the space
|
||||
occupied by unused blocks, creating a larger buffer against write
|
||||
bursts at the cost of increased write amplification. Regardless
|
||||
of this value, garbage collection will always aim to free a minimum
|
||||
amount of blocks to keep max_open_zones open for data placement purposes.
|
||||
|
@ -7,10 +7,10 @@ target architecture, specifically, is the 32-bit OpenRISC 1000 family (or1k).
|
||||
|
||||
For information about OpenRISC processors and ongoing development:
|
||||
|
||||
======= =============================
|
||||
======= ==============================
|
||||
website https://openrisc.io
|
||||
email openrisc@lists.librecores.org
|
||||
======= =============================
|
||||
email linux-openrisc@vger.kernel.org
|
||||
======= ==============================
|
||||
|
||||
---------------------------------------------------------------------
|
||||
|
||||
@ -27,11 +27,11 @@ Toolchain binaries can be obtained from openrisc.io or our github releases page.
|
||||
Instructions for building the different toolchains can be found on openrisc.io
|
||||
or Stafford's toolchain build and release scripts.
|
||||
|
||||
========== =================================================
|
||||
binaries https://github.com/openrisc/or1k-gcc/releases
|
||||
========== ==========================================================
|
||||
binaries https://github.com/stffrdhrn/or1k-toolchain-build/releases
|
||||
toolchains https://openrisc.io/software
|
||||
building https://github.com/stffrdhrn/or1k-toolchain-build
|
||||
========== =================================================
|
||||
========== ==========================================================
|
||||
|
||||
2) Building
|
||||
|
||||
|
@ -51,7 +51,7 @@ The following keys are defined:
|
||||
* :c:macro:`RISCV_HWPROBE_KEY_MARCHID`: Contains the value of ``marchid``, as
|
||||
defined by the RISC-V privileged architecture specification.
|
||||
|
||||
* :c:macro:`RISCV_HWPROBE_KEY_MIMPLID`: Contains the value of ``mimplid``, as
|
||||
* :c:macro:`RISCV_HWPROBE_KEY_MIMPID`: Contains the value of ``mimpid``, as
|
||||
defined by the RISC-V privileged architecture specification.
|
||||
|
||||
* :c:macro:`RISCV_HWPROBE_KEY_BASE_BEHAVIOR`: A bitmask containing the base
|
||||
|
@ -382,6 +382,14 @@ In case of new BPF instructions, once the changes have been accepted
|
||||
into the Linux kernel, please implement support into LLVM's BPF back
|
||||
end. See LLVM_ section below for further information.
|
||||
|
||||
Q: What "BPF_INTERNAL" symbol namespace is for?
|
||||
-----------------------------------------------
|
||||
A: Symbols exported as BPF_INTERNAL can only be used by BPF infrastructure
|
||||
like preload kernel modules with light skeleton. Most symbols outside
|
||||
of BPF_INTERNAL are not expected to be used by code outside of BPF either.
|
||||
Symbols may lack the designation because they predate the namespaces,
|
||||
or due to an oversight.
|
||||
|
||||
Stable submission
|
||||
=================
|
||||
|
||||
|
@ -7,7 +7,6 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: Ceva AHCI SATA Controller
|
||||
|
||||
maintainers:
|
||||
- Mubin Sayyed <mubin.sayyed@amd.com>
|
||||
- Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
|
||||
|
||||
description: |
|
||||
|
@ -111,11 +111,27 @@ properties:
|
||||
unevaluatedProperties: false
|
||||
|
||||
port@1:
|
||||
$ref: /schemas/graph.yaml#/properties/port
|
||||
$ref: /schemas/graph.yaml#/$defs/port-base
|
||||
unevaluatedProperties: false
|
||||
description:
|
||||
DSI output port node to the panel or the next bridge
|
||||
in the chain
|
||||
|
||||
properties:
|
||||
endpoint:
|
||||
$ref: /schemas/media/video-interfaces.yaml#
|
||||
unevaluatedProperties: false
|
||||
|
||||
properties:
|
||||
data-lanes:
|
||||
description: array of physical DSI data lane indexes.
|
||||
minItems: 1
|
||||
items:
|
||||
- const: 1
|
||||
- const: 2
|
||||
- const: 3
|
||||
- const: 4
|
||||
|
||||
required:
|
||||
- port@0
|
||||
- port@1
|
||||
|
@ -12,7 +12,6 @@ description:
|
||||
PS_MODE). Every pin can be configured as input/output.
|
||||
|
||||
maintainers:
|
||||
- Mubin Sayyed <mubin.sayyed@amd.com>
|
||||
- Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
|
||||
|
||||
properties:
|
||||
|
@ -19,6 +19,7 @@ properties:
|
||||
- fsl,imx8mp-irqsteer
|
||||
- fsl,imx8qm-irqsteer
|
||||
- fsl,imx8qxp-irqsteer
|
||||
- fsl,imx94-irqsteer
|
||||
- const: fsl,imx-irqsteer
|
||||
|
||||
reg:
|
||||
|
@ -27,7 +27,7 @@ properties:
|
||||
$ref: /schemas/types.yaml#/definitions/uint32-array
|
||||
items:
|
||||
- minimum: 0
|
||||
maximum: 7
|
||||
maximum: 31
|
||||
description:
|
||||
Offset in bit within the address range specified by reg.
|
||||
- minimum: 1
|
||||
|
@ -19,6 +19,7 @@ properties:
|
||||
- enum:
|
||||
- qcom,apq8064-qfprom
|
||||
- qcom,apq8084-qfprom
|
||||
- qcom,ipq5018-qfprom
|
||||
- qcom,ipq5332-qfprom
|
||||
- qcom,ipq5424-qfprom
|
||||
- qcom,ipq6018-qfprom
|
||||
@ -28,6 +29,8 @@ properties:
|
||||
- qcom,msm8226-qfprom
|
||||
- qcom,msm8916-qfprom
|
||||
- qcom,msm8917-qfprom
|
||||
- qcom,msm8937-qfprom
|
||||
- qcom,msm8960-qfprom
|
||||
- qcom,msm8974-qfprom
|
||||
- qcom,msm8976-qfprom
|
||||
- qcom,msm8996-qfprom
|
||||
@ -51,6 +54,7 @@ properties:
|
||||
- qcom,sm8450-qfprom
|
||||
- qcom,sm8550-qfprom
|
||||
- qcom,sm8650-qfprom
|
||||
- qcom,x1e80100-qfprom
|
||||
- const: qcom,qfprom
|
||||
|
||||
reg:
|
||||
|
@ -14,6 +14,7 @@ properties:
|
||||
enum:
|
||||
- rockchip,px30-otp
|
||||
- rockchip,rk3308-otp
|
||||
- rockchip,rk3576-otp
|
||||
- rockchip,rk3588-otp
|
||||
|
||||
reg:
|
||||
@ -62,12 +63,34 @@ allOf:
|
||||
properties:
|
||||
clocks:
|
||||
maxItems: 3
|
||||
clock-names:
|
||||
maxItems: 3
|
||||
resets:
|
||||
maxItems: 1
|
||||
reset-names:
|
||||
items:
|
||||
- const: phy
|
||||
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
enum:
|
||||
- rockchip,rk3576-otp
|
||||
then:
|
||||
properties:
|
||||
clocks:
|
||||
maxItems: 3
|
||||
clock-names:
|
||||
maxItems: 3
|
||||
resets:
|
||||
minItems: 2
|
||||
maxItems: 2
|
||||
reset-names:
|
||||
items:
|
||||
- const: otp
|
||||
- const: apb
|
||||
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
@ -78,6 +101,8 @@ allOf:
|
||||
properties:
|
||||
clocks:
|
||||
minItems: 4
|
||||
clock-names:
|
||||
minItems: 4
|
||||
resets:
|
||||
minItems: 3
|
||||
reset-names:
|
||||
|
@ -9,15 +9,6 @@ title: Renesas R-Car Timer Pulse Unit PWM Controller
|
||||
maintainers:
|
||||
- Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
|
||||
|
||||
select:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
const: renesas,tpu
|
||||
required:
|
||||
- compatible
|
||||
- '#pwm-cells'
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
items:
|
||||
|
@ -7,7 +7,6 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: Zynq UltraScale+ MPSoC and Versal reset
|
||||
|
||||
maintainers:
|
||||
- Mubin Sayyed <mubin.sayyed@amd.com>
|
||||
- Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
|
||||
|
||||
description: |
|
||||
|
@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: Freescale Layerscape Reset Registers Module
|
||||
|
||||
maintainers:
|
||||
- Frank Li
|
||||
- Frank Li <Frank.Li@nxp.com>
|
||||
|
||||
description:
|
||||
Reset Module includes chip reset, service processor control and Reset Control
|
||||
|
@ -18,9 +18,14 @@ description: |
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- nxp,imx95-sysctr-timer
|
||||
- nxp,sysctr-timer
|
||||
oneOf:
|
||||
- enum:
|
||||
- nxp,imx95-sysctr-timer
|
||||
- nxp,sysctr-timer
|
||||
- items:
|
||||
- enum:
|
||||
- nxp,imx94-sysctr-timer
|
||||
- const: nxp,imx95-sysctr-timer
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
@ -1,56 +0,0 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/timer/renesas,tpu.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Renesas H8/300 Timer Pulse Unit
|
||||
|
||||
maintainers:
|
||||
- Yoshinori Sato <ysato@users.sourceforge.jp>
|
||||
|
||||
description:
|
||||
The TPU is a 16bit timer/counter with configurable clock inputs and
|
||||
programmable compare match.
|
||||
This implementation supports only cascade mode.
|
||||
|
||||
select:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
const: renesas,tpu
|
||||
'#pwm-cells': false
|
||||
required:
|
||||
- compatible
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: renesas,tpu
|
||||
|
||||
reg:
|
||||
items:
|
||||
- description: First channel
|
||||
- description: Second channel
|
||||
|
||||
clocks:
|
||||
maxItems: 1
|
||||
|
||||
clock-names:
|
||||
const: fck
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- clocks
|
||||
- clock-names
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
tpu: tpu@ffffe0 {
|
||||
compatible = "renesas,tpu";
|
||||
reg = <0xffffe0 16>, <0xfffff0 12>;
|
||||
clocks = <&pclk>;
|
||||
clock-names = "fck";
|
||||
};
|
@ -7,7 +7,6 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: Xilinx SuperSpeed DWC3 USB SoC controller
|
||||
|
||||
maintainers:
|
||||
- Mubin Sayyed <mubin.sayyed@amd.com>
|
||||
- Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
|
||||
|
||||
properties:
|
||||
|
@ -17,7 +17,6 @@ description:
|
||||
|
||||
maintainers:
|
||||
- Michal Simek <michal.simek@amd.com>
|
||||
- Mubin Sayyed <mubin.sayyed@amd.com>
|
||||
- Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
|
||||
|
||||
properties:
|
||||
|
@ -7,7 +7,6 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: Xilinx udc controller
|
||||
|
||||
maintainers:
|
||||
- Mubin Sayyed <mubin.sayyed@amd.com>
|
||||
- Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
|
||||
|
||||
properties:
|
||||
|
@ -123,12 +123,12 @@ attribute-sets:
|
||||
|
||||
operations:
|
||||
name-prefix: ovs-vport-cmd-
|
||||
fixed-header: ovs-header
|
||||
list:
|
||||
-
|
||||
name: new
|
||||
doc: Create a new OVS vport
|
||||
attribute-set: vport
|
||||
fixed-header: ovs-header
|
||||
do:
|
||||
request:
|
||||
attributes:
|
||||
@ -141,7 +141,6 @@ operations:
|
||||
name: del
|
||||
doc: Delete existing OVS vport from a data path
|
||||
attribute-set: vport
|
||||
fixed-header: ovs-header
|
||||
do:
|
||||
request:
|
||||
attributes:
|
||||
@ -152,7 +151,6 @@ operations:
|
||||
name: get
|
||||
doc: Get / dump OVS vport configuration and state
|
||||
attribute-set: vport
|
||||
fixed-header: ovs-header
|
||||
do: &vport-get-op
|
||||
request:
|
||||
attributes:
|
||||
|
@ -1113,11 +1113,10 @@ attribute-sets:
|
||||
-
|
||||
name: prop-list
|
||||
type: nest
|
||||
nested-attributes: link-attrs
|
||||
nested-attributes: prop-list-link-attrs
|
||||
-
|
||||
name: alt-ifname
|
||||
type: string
|
||||
multi-attr: true
|
||||
-
|
||||
name: perm-address
|
||||
type: binary
|
||||
@ -1163,6 +1162,13 @@ attribute-sets:
|
||||
-
|
||||
name: netns-immutable
|
||||
type: u8
|
||||
-
|
||||
name: prop-list-link-attrs
|
||||
subset-of: link-attrs
|
||||
attributes:
|
||||
-
|
||||
name: alt-ifname
|
||||
multi-attr: true
|
||||
-
|
||||
name: af-spec-attrs
|
||||
attributes:
|
||||
@ -1585,7 +1591,7 @@ attribute-sets:
|
||||
name: nf-call-iptables
|
||||
type: u8
|
||||
-
|
||||
name: nf-call-ip6-tables
|
||||
name: nf-call-ip6tables
|
||||
type: u8
|
||||
-
|
||||
name: nf-call-arptables
|
||||
@ -2077,7 +2083,7 @@ attribute-sets:
|
||||
name: id
|
||||
type: u16
|
||||
-
|
||||
name: flag
|
||||
name: flags
|
||||
type: binary
|
||||
struct: ifla-vlan-flags
|
||||
-
|
||||
@ -2165,7 +2171,7 @@ attribute-sets:
|
||||
type: binary
|
||||
struct: ifla-cacheinfo
|
||||
-
|
||||
name: icmp6-stats
|
||||
name: icmp6stats
|
||||
type: binary
|
||||
struct: ifla-icmp6-stats
|
||||
-
|
||||
@ -2179,9 +2185,10 @@ attribute-sets:
|
||||
type: u32
|
||||
-
|
||||
name: mctp-attrs
|
||||
name-prefix: ifla-mctp-
|
||||
attributes:
|
||||
-
|
||||
name: mctp-net
|
||||
name: net
|
||||
type: u32
|
||||
-
|
||||
name: phys-binding
|
||||
@ -2453,7 +2460,6 @@ operations:
|
||||
- min-mtu
|
||||
- max-mtu
|
||||
- prop-list
|
||||
- alt-ifname
|
||||
- perm-address
|
||||
- proto-down-reason
|
||||
- parent-dev-name
|
||||
|
@ -13,25 +13,25 @@ definitions:
|
||||
type: struct
|
||||
members:
|
||||
-
|
||||
name: family
|
||||
name: ndm-family
|
||||
type: u8
|
||||
-
|
||||
name: pad
|
||||
name: ndm-pad
|
||||
type: pad
|
||||
len: 3
|
||||
-
|
||||
name: ifindex
|
||||
name: ndm-ifindex
|
||||
type: s32
|
||||
-
|
||||
name: state
|
||||
name: ndm-state
|
||||
type: u16
|
||||
enum: nud-state
|
||||
-
|
||||
name: flags
|
||||
name: ndm-flags
|
||||
type: u8
|
||||
enum: ntf-flags
|
||||
-
|
||||
name: type
|
||||
name: ndm-type
|
||||
type: u8
|
||||
enum: rtm-type
|
||||
-
|
||||
@ -189,7 +189,7 @@ attribute-sets:
|
||||
type: binary
|
||||
display-hint: ipv4
|
||||
-
|
||||
name: lladr
|
||||
name: lladdr
|
||||
type: binary
|
||||
display-hint: mac
|
||||
-
|
||||
|
@ -154,7 +154,7 @@ suspending the device are satisfied) and to queue up a suspend request for the
|
||||
device in that case. If there is no idle callback, or if the callback returns
|
||||
0, then the PM core will attempt to carry out a runtime suspend of the device,
|
||||
also respecting devices configured for autosuspend. In essence this means a
|
||||
call to __pm_runtime_autosuspend() (do note that drivers needs to update the
|
||||
call to pm_runtime_autosuspend() (do note that drivers needs to update the
|
||||
device last busy mark, pm_runtime_mark_last_busy(), to control the delay under
|
||||
this circumstance). To prevent this (for example, if the callback routine has
|
||||
started a delayed suspend), the routine must return a non-zero value. Negative
|
||||
|
@ -17,10 +17,10 @@ OpenRISC 1000系列(或1k)。
|
||||
|
||||
关于OpenRISC处理器和正在进行中的开发的信息:
|
||||
|
||||
======= =============================
|
||||
======= ==============================
|
||||
网站 https://openrisc.io
|
||||
邮箱 openrisc@lists.librecores.org
|
||||
======= =============================
|
||||
邮箱 linux-openrisc@vger.kernel.org
|
||||
======= ==============================
|
||||
|
||||
---------------------------------------------------------------------
|
||||
|
||||
@ -36,11 +36,11 @@ OpenRISC工具链和Linux的构建指南
|
||||
工具链的构建指南可以在openrisc.io或Stafford的工具链构建和发布脚本
|
||||
中找到。
|
||||
|
||||
====== =================================================
|
||||
二进制 https://github.com/openrisc/or1k-gcc/releases
|
||||
====== ==========================================================
|
||||
二进制 https://github.com/stffrdhrn/or1k-toolchain-build/releases
|
||||
工具链 https://openrisc.io/software
|
||||
构建 https://github.com/stffrdhrn/or1k-toolchain-build
|
||||
====== =================================================
|
||||
====== ==========================================================
|
||||
|
||||
2) 构建
|
||||
|
||||
|
@ -17,10 +17,10 @@ OpenRISC 1000系列(或1k)。
|
||||
|
||||
關於OpenRISC處理器和正在進行中的開發的信息:
|
||||
|
||||
======= =============================
|
||||
======= ==============================
|
||||
網站 https://openrisc.io
|
||||
郵箱 openrisc@lists.librecores.org
|
||||
======= =============================
|
||||
郵箱 linux-openrisc@vger.kernel.org
|
||||
======= ==============================
|
||||
|
||||
---------------------------------------------------------------------
|
||||
|
||||
@ -36,11 +36,11 @@ OpenRISC工具鏈和Linux的構建指南
|
||||
工具鏈的構建指南可以在openrisc.io或Stafford的工具鏈構建和發佈腳本
|
||||
中找到。
|
||||
|
||||
====== =================================================
|
||||
二進制 https://github.com/openrisc/or1k-gcc/releases
|
||||
====== ==========================================================
|
||||
二進制 https://github.com/stffrdhrn/or1k-toolchain-build/releases
|
||||
工具鏈 https://openrisc.io/software
|
||||
構建 https://github.com/stffrdhrn/or1k-toolchain-build
|
||||
====== =================================================
|
||||
====== ==========================================================
|
||||
|
||||
2) 構建
|
||||
|
||||
|
@ -27,7 +27,7 @@ SYSCALL
|
||||
=======
|
||||
mseal syscall signature
|
||||
-----------------------
|
||||
``int mseal(void \* addr, size_t len, unsigned long flags)``
|
||||
``int mseal(void *addr, size_t len, unsigned long flags)``
|
||||
|
||||
**addr**/**len**: virtual memory address range.
|
||||
The address range set by **addr**/**len** must meet:
|
||||
|
@ -138,6 +138,10 @@ input data, the meaning of which depends on the subfeature being accessed.
|
||||
The output buffer contains a single byte which signals success or failure (``0x00`` on failure)
|
||||
and 31 bytes of output data, the meaning if which depends on the subfeature being accessed.
|
||||
|
||||
.. note::
|
||||
The ACPI control method responsible for handling the WMI method calls is not thread-safe.
|
||||
This is a firmware bug that needs to be handled inside the driver itself.
|
||||
|
||||
WMI method Get_EC()
|
||||
-------------------
|
||||
|
||||
|
106
MAINTAINERS
106
MAINTAINERS
@ -3191,6 +3191,12 @@ M: Dinh Nguyen <dinguyen@kernel.org>
|
||||
S: Maintained
|
||||
F: drivers/clk/socfpga/
|
||||
|
||||
ARM/SOCFPGA DWMAC GLUE LAYER
|
||||
M: Maxime Chevallier <maxime.chevallier@bootlin.com>
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/net/socfpga-dwmac.txt
|
||||
F: drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
|
||||
|
||||
ARM/SOCFPGA EDAC BINDINGS
|
||||
M: Matthew Gerlach <matthew.gerlach@altera.com>
|
||||
S: Maintained
|
||||
@ -3867,8 +3873,9 @@ AUXILIARY BUS DRIVER
|
||||
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
R: Dave Ertman <david.m.ertman@intel.com>
|
||||
R: Ira Weiny <ira.weiny@intel.com>
|
||||
R: Leon Romanovsky <leon@kernel.org>
|
||||
S: Supported
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/driver-core/driver-core.git
|
||||
F: Documentation/driver-api/auxiliary_bus.rst
|
||||
F: drivers/base/auxiliary.c
|
||||
F: include/linux/auxiliary_bus.h
|
||||
@ -6335,6 +6342,7 @@ F: Documentation/process/cve.rst
|
||||
|
||||
CW1200 WLAN driver
|
||||
S: Orphan
|
||||
L: linux-wireless@vger.kernel.org
|
||||
F: drivers/net/wireless/st/
|
||||
F: include/linux/platform_data/net-cw1200.h
|
||||
|
||||
@ -7020,6 +7028,7 @@ L: rust-for-linux@vger.kernel.org
|
||||
S: Supported
|
||||
W: https://rust-for-linux.com
|
||||
T: git https://github.com/Rust-for-Linux/linux.git alloc-next
|
||||
F: rust/helpers/dma.c
|
||||
F: rust/kernel/dma.rs
|
||||
F: samples/rust/rust_dma.rs
|
||||
|
||||
@ -7225,7 +7234,7 @@ M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
M: "Rafael J. Wysocki" <rafael@kernel.org>
|
||||
M: Danilo Krummrich <dakr@kernel.org>
|
||||
S: Supported
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/driver-core/driver-core.git
|
||||
F: Documentation/core-api/kobject.rst
|
||||
F: drivers/base/
|
||||
F: fs/debugfs/
|
||||
@ -10455,14 +10464,20 @@ S: Supported
|
||||
F: drivers/infiniband/hw/hfi1
|
||||
|
||||
HFS FILESYSTEM
|
||||
M: Viacheslav Dubeyko <slava@dubeyko.com>
|
||||
M: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
|
||||
M: Yangtao Li <frank.li@vivo.com>
|
||||
L: linux-fsdevel@vger.kernel.org
|
||||
S: Orphan
|
||||
S: Maintained
|
||||
F: Documentation/filesystems/hfs.rst
|
||||
F: fs/hfs/
|
||||
|
||||
HFSPLUS FILESYSTEM
|
||||
M: Viacheslav Dubeyko <slava@dubeyko.com>
|
||||
M: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
|
||||
M: Yangtao Li <frank.li@vivo.com>
|
||||
L: linux-fsdevel@vger.kernel.org
|
||||
S: Orphan
|
||||
S: Maintained
|
||||
F: Documentation/filesystems/hfsplus.rst
|
||||
F: fs/hfsplus/
|
||||
|
||||
@ -10956,6 +10971,7 @@ F: include/linux/platform_data/huawei-gaokun-ec.h
|
||||
|
||||
HUGETLB SUBSYSTEM
|
||||
M: Muchun Song <muchun.song@linux.dev>
|
||||
R: Oscar Salvador <osalvador@suse.de>
|
||||
L: linux-mm@kvack.org
|
||||
S: Maintained
|
||||
F: Documentation/ABI/testing/sysfs-kernel-mm-hugepages
|
||||
@ -12812,6 +12828,7 @@ F: lib/Kconfig.kcsan
|
||||
F: scripts/Makefile.kcsan
|
||||
|
||||
KDUMP
|
||||
M: Andrew Morton <akpm@linux-foundation.org>
|
||||
M: Baoquan He <bhe@redhat.com>
|
||||
R: Vivek Goyal <vgoyal@redhat.com>
|
||||
R: Dave Young <dyoung@redhat.com>
|
||||
@ -13108,11 +13125,13 @@ KERNFS
|
||||
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
M: Tejun Heo <tj@kernel.org>
|
||||
S: Supported
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/driver-core/driver-core.git
|
||||
F: fs/kernfs/
|
||||
F: include/linux/kernfs.h
|
||||
|
||||
KEXEC
|
||||
M: Andrew Morton <akpm@linux-foundation.org>
|
||||
M: Baoquan He <bhe@redhat.com>
|
||||
L: kexec@lists.infradead.org
|
||||
W: http://kernel.org/pub/linux/utils/kernel/kexec/
|
||||
F: include/linux/kexec.h
|
||||
@ -14285,6 +14304,7 @@ S: Odd fixes
|
||||
F: drivers/net/ethernet/marvell/sk*
|
||||
|
||||
MARVELL LIBERTAS WIRELESS DRIVER
|
||||
L: linux-wireless@vger.kernel.org
|
||||
L: libertas-dev@lists.infradead.org
|
||||
S: Orphan
|
||||
F: drivers/net/wireless/marvell/libertas/
|
||||
@ -15510,6 +15530,21 @@ F: mm/numa.c
|
||||
F: mm/numa_emulation.c
|
||||
F: mm/numa_memblks.c
|
||||
|
||||
MEMORY MANAGEMENT - PAGE ALLOCATOR
|
||||
M: Andrew Morton <akpm@linux-foundation.org>
|
||||
R: Vlastimil Babka <vbabka@suse.cz>
|
||||
R: Suren Baghdasaryan <surenb@google.com>
|
||||
R: Michal Hocko <mhocko@suse.com>
|
||||
R: Brendan Jackman <jackmanb@google.com>
|
||||
R: Johannes Weiner <hannes@cmpxchg.org>
|
||||
R: Zi Yan <ziy@nvidia.com>
|
||||
L: linux-mm@kvack.org
|
||||
S: Maintained
|
||||
F: mm/compaction.c
|
||||
F: mm/page_alloc.c
|
||||
F: include/linux/gfp.h
|
||||
F: include/linux/compaction.h
|
||||
|
||||
MEMORY MANAGEMENT - SECRETMEM
|
||||
M: Andrew Morton <akpm@linux-foundation.org>
|
||||
M: Mike Rapoport <rppt@kernel.org>
|
||||
@ -15537,10 +15572,12 @@ M: Liam R. Howlett <Liam.Howlett@oracle.com>
|
||||
M: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
|
||||
R: Vlastimil Babka <vbabka@suse.cz>
|
||||
R: Jann Horn <jannh@google.com>
|
||||
R: Pedro Falcato <pfalcato@suse.de>
|
||||
L: linux-mm@kvack.org
|
||||
S: Maintained
|
||||
W: http://www.linux-mm.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
|
||||
F: include/trace/events/mmap.h
|
||||
F: mm/mlock.c
|
||||
F: mm/mmap.c
|
||||
F: mm/mprotect.c
|
||||
@ -15551,6 +15588,36 @@ F: mm/vma.h
|
||||
F: mm/vma_internal.h
|
||||
F: tools/testing/vma/
|
||||
|
||||
MEMORY MAPPING - LOCKING
|
||||
M: Andrew Morton <akpm@linux-foundation.org>
|
||||
M: Suren Baghdasaryan <surenb@google.com>
|
||||
M: Liam R. Howlett <Liam.Howlett@oracle.com>
|
||||
M: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
|
||||
R: Vlastimil Babka <vbabka@suse.cz>
|
||||
R: Shakeel Butt <shakeel.butt@linux.dev>
|
||||
L: linux-mm@kvack.org
|
||||
S: Maintained
|
||||
W: http://www.linux-mm.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
|
||||
F: Documentation/mm/process_addrs.rst
|
||||
F: include/linux/mmap_lock.h
|
||||
F: include/trace/events/mmap_lock.h
|
||||
F: mm/mmap_lock.c
|
||||
|
||||
MEMORY MAPPING - MADVISE (MEMORY ADVICE)
|
||||
M: Andrew Morton <akpm@linux-foundation.org>
|
||||
M: Liam R. Howlett <Liam.Howlett@oracle.com>
|
||||
M: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
|
||||
M: David Hildenbrand <david@redhat.com>
|
||||
R: Vlastimil Babka <vbabka@suse.cz>
|
||||
R: Jann Horn <jannh@google.com>
|
||||
L: linux-mm@kvack.org
|
||||
S: Maintained
|
||||
W: http://www.linux-mm.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
|
||||
F: include/uapi/asm-generic/mman-common.h
|
||||
F: mm/madvise.c
|
||||
|
||||
MEMORY TECHNOLOGY DEVICES (MTD)
|
||||
M: Miquel Raynal <miquel.raynal@bootlin.com>
|
||||
M: Richard Weinberger <richard@nod.at>
|
||||
@ -16758,6 +16825,7 @@ F: Documentation/networking/net_cachelines/net_device.rst
|
||||
F: drivers/connector/
|
||||
F: drivers/net/
|
||||
F: drivers/ptp/
|
||||
F: drivers/s390/net/
|
||||
F: include/dt-bindings/net/
|
||||
F: include/linux/cn_proc.h
|
||||
F: include/linux/etherdevice.h
|
||||
@ -16767,6 +16835,7 @@ F: include/linux/fddidevice.h
|
||||
F: include/linux/hippidevice.h
|
||||
F: include/linux/if_*
|
||||
F: include/linux/inetdevice.h
|
||||
F: include/linux/ism.h
|
||||
F: include/linux/netdev*
|
||||
F: include/linux/platform_data/wiznet.h
|
||||
F: include/uapi/linux/cn_proc.h
|
||||
@ -18635,7 +18704,7 @@ F: drivers/pci/controller/pci-xgene-msi.c
|
||||
PCI NATIVE HOST BRIDGE AND ENDPOINT DRIVERS
|
||||
M: Lorenzo Pieralisi <lpieralisi@kernel.org>
|
||||
M: Krzysztof Wilczyński <kw@linux.com>
|
||||
R: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
|
||||
M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
|
||||
R: Rob Herring <robh@kernel.org>
|
||||
L: linux-pci@vger.kernel.org
|
||||
S: Supported
|
||||
@ -18688,6 +18757,16 @@ F: include/asm-generic/pci*
|
||||
F: include/linux/of_pci.h
|
||||
F: include/linux/pci*
|
||||
F: include/uapi/linux/pci*
|
||||
|
||||
PCI SUBSYSTEM [RUST]
|
||||
M: Danilo Krummrich <dakr@kernel.org>
|
||||
R: Bjorn Helgaas <bhelgaas@google.com>
|
||||
R: Krzysztof Wilczyński <kwilczynski@kernel.org>
|
||||
L: linux-pci@vger.kernel.org
|
||||
S: Maintained
|
||||
C: irc://irc.oftc.net/linux-pci
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci.git
|
||||
F: rust/helpers/pci.c
|
||||
F: rust/kernel/pci.rs
|
||||
F: samples/rust/rust_driver_pci.rs
|
||||
|
||||
@ -19748,6 +19827,7 @@ F: drivers/media/tuners/qt1010*
|
||||
|
||||
QUALCOMM ATH12K WIRELESS DRIVER
|
||||
M: Jeff Johnson <jjohnson@kernel.org>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
L: ath12k@lists.infradead.org
|
||||
S: Supported
|
||||
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath12k
|
||||
@ -19757,6 +19837,7 @@ N: ath12k
|
||||
|
||||
QUALCOMM ATHEROS ATH10K WIRELESS DRIVER
|
||||
M: Jeff Johnson <jjohnson@kernel.org>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
L: ath10k@lists.infradead.org
|
||||
S: Supported
|
||||
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath10k
|
||||
@ -19766,6 +19847,7 @@ N: ath10k
|
||||
|
||||
QUALCOMM ATHEROS ATH11K WIRELESS DRIVER
|
||||
M: Jeff Johnson <jjohnson@kernel.org>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
L: ath11k@lists.infradead.org
|
||||
S: Supported
|
||||
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath11k
|
||||
@ -21255,6 +21337,7 @@ L: linux-s390@vger.kernel.org
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/s390/net/
|
||||
F: include/linux/ism.h
|
||||
|
||||
S390 PCI SUBSYSTEM
|
||||
M: Niklas Schnelle <schnelle@linux.ibm.com>
|
||||
@ -22139,6 +22222,7 @@ F: drivers/platform/x86/touchscreen_dmi.c
|
||||
|
||||
SILICON LABS WIRELESS DRIVERS (for WFxxx series)
|
||||
M: Jérôme Pouiller <jerome.pouiller@silabs.com>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
S: Supported
|
||||
F: Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml
|
||||
F: drivers/net/wireless/silabs/
|
||||
@ -22239,9 +22323,7 @@ F: drivers/nvmem/layouts/sl28vpd.c
|
||||
|
||||
SLAB ALLOCATOR
|
||||
M: Christoph Lameter <cl@linux.com>
|
||||
M: Pekka Enberg <penberg@kernel.org>
|
||||
M: David Rientjes <rientjes@google.com>
|
||||
M: Joonsoo Kim <iamjoonsoo.kim@lge.com>
|
||||
M: Andrew Morton <akpm@linux-foundation.org>
|
||||
M: Vlastimil Babka <vbabka@suse.cz>
|
||||
R: Roman Gushchin <roman.gushchin@linux.dev>
|
||||
@ -25134,9 +25216,13 @@ S: Maintained
|
||||
F: drivers/usb/typec/mux/pi3usb30532.c
|
||||
|
||||
USB TYPEC PORT CONTROLLER DRIVERS
|
||||
M: Badhri Jagan Sridharan <badhri@google.com>
|
||||
L: linux-usb@vger.kernel.org
|
||||
S: Orphan
|
||||
F: drivers/usb/typec/tcpm/
|
||||
S: Maintained
|
||||
F: drivers/usb/typec/tcpm/tcpci.c
|
||||
F: drivers/usb/typec/tcpm/tcpm.c
|
||||
F: include/linux/usb/tcpci.h
|
||||
F: include/linux/usb/tcpm.h
|
||||
|
||||
USB TYPEC TUSB1046 MUX DRIVER
|
||||
M: Romain Gantois <romain.gantois@bootlin.com>
|
||||
|
8
Makefile
8
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 15
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc2
|
||||
EXTRAVERSION = -rc4
|
||||
NAME = Baby Opossum Posse
|
||||
|
||||
# *DOCUMENTATION*
|
||||
@ -477,7 +477,6 @@ export rust_common_flags := --edition=2021 \
|
||||
-Wclippy::ignored_unit_patterns \
|
||||
-Wclippy::mut_mut \
|
||||
-Wclippy::needless_bitwise_bool \
|
||||
-Wclippy::needless_continue \
|
||||
-Aclippy::needless_lifetimes \
|
||||
-Wclippy::no_mangle_with_rust_abi \
|
||||
-Wclippy::undocumented_unsafe_blocks \
|
||||
@ -1054,9 +1053,12 @@ NOSTDINC_FLAGS += -nostdinc
|
||||
KBUILD_CFLAGS += $(call cc-option, -fstrict-flex-arrays=3)
|
||||
|
||||
#Currently, disable -Wstringop-overflow for GCC 11, globally.
|
||||
KBUILD_CFLAGS-$(CONFIG_CC_NO_STRINGOP_OVERFLOW) += $(call cc-option, -Wno-stringop-overflow)
|
||||
KBUILD_CFLAGS-$(CONFIG_CC_NO_STRINGOP_OVERFLOW) += $(call cc-disable-warning, stringop-overflow)
|
||||
KBUILD_CFLAGS-$(CONFIG_CC_STRINGOP_OVERFLOW) += $(call cc-option, -Wstringop-overflow)
|
||||
|
||||
#Currently, disable -Wunterminated-string-initialization as broken
|
||||
KBUILD_CFLAGS += $(call cc-disable-warning, unterminated-string-initialization)
|
||||
|
||||
# disable invalid "can't wrap" optimizations for signed / pointers
|
||||
KBUILD_CFLAGS += -fno-strict-overflow
|
||||
|
||||
|
@ -1588,4 +1588,9 @@ void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val);
|
||||
#define kvm_has_s1poe(k) \
|
||||
(kvm_has_feat((k), ID_AA64MMFR3_EL1, S1POE, IMP))
|
||||
|
||||
static inline bool kvm_arch_has_irq_bypass(void)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif /* __ARM64_KVM_HOST_H__ */
|
||||
|
@ -94,17 +94,6 @@ static inline bool kaslr_requires_kpti(void)
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Systems affected by Cavium erratum 24756 are incompatible
|
||||
* with KPTI.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) {
|
||||
extern const struct midr_range cavium_erratum_27456_cpus[];
|
||||
|
||||
if (is_midr_in_range_list(cavium_erratum_27456_cpus))
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -335,7 +335,7 @@ static const struct midr_range cavium_erratum_23154_cpus[] = {
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CAVIUM_ERRATUM_27456
|
||||
const struct midr_range cavium_erratum_27456_cpus[] = {
|
||||
static const struct midr_range cavium_erratum_27456_cpus[] = {
|
||||
/* Cavium ThunderX, T88 pass 1.x - 2.1 */
|
||||
MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
|
||||
/* Cavium ThunderX, T81 pass 1.0 */
|
||||
|
@ -47,10 +47,6 @@ PROVIDE(__pi_id_aa64smfr0_override = id_aa64smfr0_override);
|
||||
PROVIDE(__pi_id_aa64zfr0_override = id_aa64zfr0_override);
|
||||
PROVIDE(__pi_arm64_sw_feature_override = arm64_sw_feature_override);
|
||||
PROVIDE(__pi_arm64_use_ng_mappings = arm64_use_ng_mappings);
|
||||
#ifdef CONFIG_CAVIUM_ERRATUM_27456
|
||||
PROVIDE(__pi_cavium_erratum_27456_cpus = cavium_erratum_27456_cpus);
|
||||
PROVIDE(__pi_is_midr_in_range_list = is_midr_in_range_list);
|
||||
#endif
|
||||
PROVIDE(__pi__ctype = _ctype);
|
||||
PROVIDE(__pi_memstart_offset_seed = memstart_offset_seed);
|
||||
|
||||
|
@ -207,6 +207,29 @@ static void __init map_fdt(u64 fdt)
|
||||
dsb(ishst);
|
||||
}
|
||||
|
||||
/*
|
||||
* PI version of the Cavium Eratum 27456 detection, which makes it
|
||||
* impossible to use non-global mappings.
|
||||
*/
|
||||
static bool __init ng_mappings_allowed(void)
|
||||
{
|
||||
static const struct midr_range cavium_erratum_27456_cpus[] __initconst = {
|
||||
/* Cavium ThunderX, T88 pass 1.x - 2.1 */
|
||||
MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
|
||||
/* Cavium ThunderX, T81 pass 1.0 */
|
||||
MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
|
||||
{},
|
||||
};
|
||||
|
||||
for (const struct midr_range *r = cavium_erratum_27456_cpus; r->model; r++) {
|
||||
if (midr_is_cpu_model_range(read_cpuid_id(), r->model,
|
||||
r->rv_min, r->rv_max))
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
asmlinkage void __init early_map_kernel(u64 boot_status, void *fdt)
|
||||
{
|
||||
static char const chosen_str[] __initconst = "/chosen";
|
||||
@ -246,7 +269,7 @@ asmlinkage void __init early_map_kernel(u64 boot_status, void *fdt)
|
||||
u64 kaslr_seed = kaslr_early_init(fdt, chosen);
|
||||
|
||||
if (kaslr_seed && kaslr_requires_kpti())
|
||||
arm64_use_ng_mappings = true;
|
||||
arm64_use_ng_mappings = ng_mappings_allowed();
|
||||
|
||||
kaslr_offset |= kaslr_seed & ~(MIN_KIMG_ALIGN - 1);
|
||||
}
|
||||
|
@ -2743,11 +2743,6 @@ bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
|
||||
return irqchip_in_kernel(kvm);
|
||||
}
|
||||
|
||||
bool kvm_arch_has_irq_bypass(void)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
|
||||
struct irq_bypass_producer *prod)
|
||||
{
|
||||
|
@ -73,6 +73,7 @@ config LOONGARCH
|
||||
select ARCH_SUPPORTS_RT
|
||||
select ARCH_USE_BUILTIN_BSWAP
|
||||
select ARCH_USE_CMPXCHG_LOCKREF
|
||||
select ARCH_USE_MEMTEST
|
||||
select ARCH_USE_QUEUED_RWLOCKS
|
||||
select ARCH_USE_QUEUED_SPINLOCKS
|
||||
select ARCH_WANT_DEFAULT_BPF_JIT
|
||||
|
@ -22,22 +22,29 @@
|
||||
struct sigcontext;
|
||||
|
||||
#define kernel_fpu_available() cpu_has_fpu
|
||||
extern void kernel_fpu_begin(void);
|
||||
extern void kernel_fpu_end(void);
|
||||
|
||||
extern void _init_fpu(unsigned int);
|
||||
extern void _save_fp(struct loongarch_fpu *);
|
||||
extern void _restore_fp(struct loongarch_fpu *);
|
||||
void kernel_fpu_begin(void);
|
||||
void kernel_fpu_end(void);
|
||||
|
||||
extern void _save_lsx(struct loongarch_fpu *fpu);
|
||||
extern void _restore_lsx(struct loongarch_fpu *fpu);
|
||||
extern void _init_lsx_upper(void);
|
||||
extern void _restore_lsx_upper(struct loongarch_fpu *fpu);
|
||||
asmlinkage void _init_fpu(unsigned int);
|
||||
asmlinkage void _save_fp(struct loongarch_fpu *);
|
||||
asmlinkage void _restore_fp(struct loongarch_fpu *);
|
||||
asmlinkage int _save_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
|
||||
asmlinkage int _restore_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
|
||||
|
||||
extern void _save_lasx(struct loongarch_fpu *fpu);
|
||||
extern void _restore_lasx(struct loongarch_fpu *fpu);
|
||||
extern void _init_lasx_upper(void);
|
||||
extern void _restore_lasx_upper(struct loongarch_fpu *fpu);
|
||||
asmlinkage void _save_lsx(struct loongarch_fpu *fpu);
|
||||
asmlinkage void _restore_lsx(struct loongarch_fpu *fpu);
|
||||
asmlinkage void _init_lsx_upper(void);
|
||||
asmlinkage void _restore_lsx_upper(struct loongarch_fpu *fpu);
|
||||
asmlinkage int _save_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
|
||||
asmlinkage int _restore_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
|
||||
|
||||
asmlinkage void _save_lasx(struct loongarch_fpu *fpu);
|
||||
asmlinkage void _restore_lasx(struct loongarch_fpu *fpu);
|
||||
asmlinkage void _init_lasx_upper(void);
|
||||
asmlinkage void _restore_lasx_upper(struct loongarch_fpu *fpu);
|
||||
asmlinkage int _save_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
|
||||
asmlinkage int _restore_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
|
||||
|
||||
static inline void enable_lsx(void);
|
||||
static inline void disable_lsx(void);
|
||||
|
@ -12,9 +12,13 @@
|
||||
#include <asm/loongarch.h>
|
||||
#include <asm/processor.h>
|
||||
|
||||
extern void _init_lbt(void);
|
||||
extern void _save_lbt(struct loongarch_lbt *);
|
||||
extern void _restore_lbt(struct loongarch_lbt *);
|
||||
asmlinkage void _init_lbt(void);
|
||||
asmlinkage void _save_lbt(struct loongarch_lbt *);
|
||||
asmlinkage void _restore_lbt(struct loongarch_lbt *);
|
||||
asmlinkage int _save_lbt_context(void __user *regs, void __user *eflags);
|
||||
asmlinkage int _restore_lbt_context(void __user *regs, void __user *eflags);
|
||||
asmlinkage int _save_ftop_context(void __user *ftop);
|
||||
asmlinkage int _restore_ftop_context(void __user *ftop);
|
||||
|
||||
static inline int is_lbt_enabled(void)
|
||||
{
|
||||
|
@ -33,9 +33,9 @@ struct pt_regs {
|
||||
unsigned long __last[];
|
||||
} __aligned(8);
|
||||
|
||||
static inline int regs_irqs_disabled(struct pt_regs *regs)
|
||||
static __always_inline bool regs_irqs_disabled(struct pt_regs *regs)
|
||||
{
|
||||
return arch_irqs_disabled_flags(regs->csr_prmd);
|
||||
return !(regs->csr_prmd & CSR_PRMD_PIE);
|
||||
}
|
||||
|
||||
static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
|
||||
|
@ -21,10 +21,10 @@ obj-$(CONFIG_CPU_HAS_LBT) += lbt.o
|
||||
|
||||
obj-$(CONFIG_ARCH_STRICT_ALIGN) += unaligned.o
|
||||
|
||||
CFLAGS_module.o += $(call cc-option,-Wno-override-init,)
|
||||
CFLAGS_syscall.o += $(call cc-option,-Wno-override-init,)
|
||||
CFLAGS_traps.o += $(call cc-option,-Wno-override-init,)
|
||||
CFLAGS_perf_event.o += $(call cc-option,-Wno-override-init,)
|
||||
CFLAGS_module.o += $(call cc-disable-warning, override-init)
|
||||
CFLAGS_syscall.o += $(call cc-disable-warning, override-init)
|
||||
CFLAGS_traps.o += $(call cc-disable-warning, override-init)
|
||||
CFLAGS_perf_event.o += $(call cc-disable-warning, override-init)
|
||||
|
||||
ifdef CONFIG_FUNCTION_TRACER
|
||||
ifndef CONFIG_DYNAMIC_FTRACE
|
||||
|
@ -458,6 +458,7 @@ SYM_FUNC_START(_save_fp_context)
|
||||
li.w a0, 0 # success
|
||||
jr ra
|
||||
SYM_FUNC_END(_save_fp_context)
|
||||
EXPORT_SYMBOL_GPL(_save_fp_context)
|
||||
|
||||
/*
|
||||
* a0: fpregs
|
||||
@ -471,6 +472,7 @@ SYM_FUNC_START(_restore_fp_context)
|
||||
li.w a0, 0 # success
|
||||
jr ra
|
||||
SYM_FUNC_END(_restore_fp_context)
|
||||
EXPORT_SYMBOL_GPL(_restore_fp_context)
|
||||
|
||||
/*
|
||||
* a0: fpregs
|
||||
@ -484,6 +486,7 @@ SYM_FUNC_START(_save_lsx_context)
|
||||
li.w a0, 0 # success
|
||||
jr ra
|
||||
SYM_FUNC_END(_save_lsx_context)
|
||||
EXPORT_SYMBOL_GPL(_save_lsx_context)
|
||||
|
||||
/*
|
||||
* a0: fpregs
|
||||
@ -497,6 +500,7 @@ SYM_FUNC_START(_restore_lsx_context)
|
||||
li.w a0, 0 # success
|
||||
jr ra
|
||||
SYM_FUNC_END(_restore_lsx_context)
|
||||
EXPORT_SYMBOL_GPL(_restore_lsx_context)
|
||||
|
||||
/*
|
||||
* a0: fpregs
|
||||
@ -510,6 +514,7 @@ SYM_FUNC_START(_save_lasx_context)
|
||||
li.w a0, 0 # success
|
||||
jr ra
|
||||
SYM_FUNC_END(_save_lasx_context)
|
||||
EXPORT_SYMBOL_GPL(_save_lasx_context)
|
||||
|
||||
/*
|
||||
* a0: fpregs
|
||||
@ -523,6 +528,7 @@ SYM_FUNC_START(_restore_lasx_context)
|
||||
li.w a0, 0 # success
|
||||
jr ra
|
||||
SYM_FUNC_END(_restore_lasx_context)
|
||||
EXPORT_SYMBOL_GPL(_restore_lasx_context)
|
||||
|
||||
.L_fpu_fault:
|
||||
li.w a0, -EFAULT # failure
|
||||
|
@ -90,6 +90,7 @@ SYM_FUNC_START(_save_lbt_context)
|
||||
li.w a0, 0 # success
|
||||
jr ra
|
||||
SYM_FUNC_END(_save_lbt_context)
|
||||
EXPORT_SYMBOL_GPL(_save_lbt_context)
|
||||
|
||||
/*
|
||||
* a0: scr
|
||||
@ -110,6 +111,7 @@ SYM_FUNC_START(_restore_lbt_context)
|
||||
li.w a0, 0 # success
|
||||
jr ra
|
||||
SYM_FUNC_END(_restore_lbt_context)
|
||||
EXPORT_SYMBOL_GPL(_restore_lbt_context)
|
||||
|
||||
/*
|
||||
* a0: ftop
|
||||
@ -120,6 +122,7 @@ SYM_FUNC_START(_save_ftop_context)
|
||||
li.w a0, 0 # success
|
||||
jr ra
|
||||
SYM_FUNC_END(_save_ftop_context)
|
||||
EXPORT_SYMBOL_GPL(_save_ftop_context)
|
||||
|
||||
/*
|
||||
* a0: ftop
|
||||
@ -150,6 +153,7 @@ SYM_FUNC_START(_restore_ftop_context)
|
||||
li.w a0, 0 # success
|
||||
jr ra
|
||||
SYM_FUNC_END(_restore_ftop_context)
|
||||
EXPORT_SYMBOL_GPL(_restore_ftop_context)
|
||||
|
||||
.L_lbt_fault:
|
||||
li.w a0, -EFAULT # failure
|
||||
|
@ -51,27 +51,6 @@
|
||||
#define lock_lbt_owner() ({ preempt_disable(); pagefault_disable(); })
|
||||
#define unlock_lbt_owner() ({ pagefault_enable(); preempt_enable(); })
|
||||
|
||||
/* Assembly functions to move context to/from the FPU */
|
||||
extern asmlinkage int
|
||||
_save_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
|
||||
extern asmlinkage int
|
||||
_restore_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
|
||||
extern asmlinkage int
|
||||
_save_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
|
||||
extern asmlinkage int
|
||||
_restore_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
|
||||
extern asmlinkage int
|
||||
_save_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
|
||||
extern asmlinkage int
|
||||
_restore_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
|
||||
|
||||
#ifdef CONFIG_CPU_HAS_LBT
|
||||
extern asmlinkage int _save_lbt_context(void __user *regs, void __user *eflags);
|
||||
extern asmlinkage int _restore_lbt_context(void __user *regs, void __user *eflags);
|
||||
extern asmlinkage int _save_ftop_context(void __user *ftop);
|
||||
extern asmlinkage int _restore_ftop_context(void __user *ftop);
|
||||
#endif
|
||||
|
||||
struct rt_sigframe {
|
||||
struct siginfo rs_info;
|
||||
struct ucontext rs_uctx;
|
||||
|
@ -553,9 +553,10 @@ asmlinkage void noinstr do_ale(struct pt_regs *regs)
|
||||
die_if_kernel("Kernel ale access", regs);
|
||||
force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
|
||||
#else
|
||||
bool pie = regs_irqs_disabled(regs);
|
||||
unsigned int *pc;
|
||||
|
||||
if (regs->csr_prmd & CSR_PRMD_PIE)
|
||||
if (!pie)
|
||||
local_irq_enable();
|
||||
|
||||
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->csr_badvaddr);
|
||||
@ -582,7 +583,7 @@ sigbus:
|
||||
die_if_kernel("Kernel ale access", regs);
|
||||
force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
|
||||
out:
|
||||
if (regs->csr_prmd & CSR_PRMD_PIE)
|
||||
if (!pie)
|
||||
local_irq_disable();
|
||||
#endif
|
||||
irqentry_exit(regs, state);
|
||||
@ -621,12 +622,13 @@ static void bug_handler(struct pt_regs *regs)
|
||||
asmlinkage void noinstr do_bce(struct pt_regs *regs)
|
||||
{
|
||||
bool user = user_mode(regs);
|
||||
bool pie = regs_irqs_disabled(regs);
|
||||
unsigned long era = exception_era(regs);
|
||||
u64 badv = 0, lower = 0, upper = ULONG_MAX;
|
||||
union loongarch_instruction insn;
|
||||
irqentry_state_t state = irqentry_enter(regs);
|
||||
|
||||
if (regs->csr_prmd & CSR_PRMD_PIE)
|
||||
if (!pie)
|
||||
local_irq_enable();
|
||||
|
||||
current->thread.trap_nr = read_csr_excode();
|
||||
@ -692,7 +694,7 @@ asmlinkage void noinstr do_bce(struct pt_regs *regs)
|
||||
force_sig_bnderr((void __user *)badv, (void __user *)lower, (void __user *)upper);
|
||||
|
||||
out:
|
||||
if (regs->csr_prmd & CSR_PRMD_PIE)
|
||||
if (!pie)
|
||||
local_irq_disable();
|
||||
|
||||
irqentry_exit(regs, state);
|
||||
@ -710,11 +712,12 @@ bad_era:
|
||||
asmlinkage void noinstr do_bp(struct pt_regs *regs)
|
||||
{
|
||||
bool user = user_mode(regs);
|
||||
bool pie = regs_irqs_disabled(regs);
|
||||
unsigned int opcode, bcode;
|
||||
unsigned long era = exception_era(regs);
|
||||
irqentry_state_t state = irqentry_enter(regs);
|
||||
|
||||
if (regs->csr_prmd & CSR_PRMD_PIE)
|
||||
if (!pie)
|
||||
local_irq_enable();
|
||||
|
||||
if (__get_inst(&opcode, (u32 *)era, user))
|
||||
@ -780,7 +783,7 @@ asmlinkage void noinstr do_bp(struct pt_regs *regs)
|
||||
}
|
||||
|
||||
out:
|
||||
if (regs->csr_prmd & CSR_PRMD_PIE)
|
||||
if (!pie)
|
||||
local_irq_disable();
|
||||
|
||||
irqentry_exit(regs, state);
|
||||
@ -1015,6 +1018,7 @@ static void init_restore_lbt(void)
|
||||
|
||||
asmlinkage void noinstr do_lbt(struct pt_regs *regs)
|
||||
{
|
||||
bool pie = regs_irqs_disabled(regs);
|
||||
irqentry_state_t state = irqentry_enter(regs);
|
||||
|
||||
/*
|
||||
@ -1024,7 +1028,7 @@ asmlinkage void noinstr do_lbt(struct pt_regs *regs)
|
||||
* (including the user using 'MOVGR2GCSR' to turn on TM, which
|
||||
* will not trigger the BTE), we need to check PRMD first.
|
||||
*/
|
||||
if (regs->csr_prmd & CSR_PRMD_PIE)
|
||||
if (!pie)
|
||||
local_irq_enable();
|
||||
|
||||
if (!cpu_has_lbt) {
|
||||
@ -1038,7 +1042,7 @@ asmlinkage void noinstr do_lbt(struct pt_regs *regs)
|
||||
preempt_enable();
|
||||
|
||||
out:
|
||||
if (regs->csr_prmd & CSR_PRMD_PIE)
|
||||
if (!pie)
|
||||
local_irq_disable();
|
||||
|
||||
irqentry_exit(regs, state);
|
||||
|
@ -21,4 +21,4 @@ kvm-y += intc/eiointc.o
|
||||
kvm-y += intc/pch_pic.o
|
||||
kvm-y += irqfd.o
|
||||
|
||||
CFLAGS_exit.o += $(call cc-option,-Wno-override-init,)
|
||||
CFLAGS_exit.o += $(call cc-disable-warning, override-init)
|
||||
|
@ -111,7 +111,7 @@ static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data)
|
||||
ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val);
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
if (unlikely(ret)) {
|
||||
kvm_err("%s: : read date from addr %llx failed\n", __func__, addr);
|
||||
kvm_err("%s: : read data from addr %llx failed\n", __func__, addr);
|
||||
return ret;
|
||||
}
|
||||
/* Construct the mask by scanning the bit 27-30 */
|
||||
@ -127,7 +127,7 @@ static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data)
|
||||
ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val);
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
if (unlikely(ret))
|
||||
kvm_err("%s: : write date to addr %llx failed\n", __func__, addr);
|
||||
kvm_err("%s: : write data to addr %llx failed\n", __func__, addr);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -296,10 +296,10 @@ int kvm_arch_enable_virtualization_cpu(void)
|
||||
/*
|
||||
* Enable virtualization features granting guest direct control of
|
||||
* certain features:
|
||||
* GCI=2: Trap on init or unimplement cache instruction.
|
||||
* GCI=2: Trap on init or unimplemented cache instruction.
|
||||
* TORU=0: Trap on Root Unimplement.
|
||||
* CACTRL=1: Root control cache.
|
||||
* TOP=0: Trap on Previlege.
|
||||
* TOP=0: Trap on Privilege.
|
||||
* TOE=0: Trap on Exception.
|
||||
* TIT=0: Trap on Timer.
|
||||
*/
|
||||
|
@ -294,6 +294,7 @@ static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
|
||||
|
||||
if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) {
|
||||
kvm_lose_pmu(vcpu);
|
||||
/* make sure the vcpu mode has been written */
|
||||
smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE);
|
||||
local_irq_enable();
|
||||
@ -902,6 +903,13 @@ static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
|
||||
vcpu->arch.st.guest_addr = 0;
|
||||
memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending));
|
||||
memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear));
|
||||
|
||||
/*
|
||||
* When vCPU reset, clear the ESTAT and GINTC registers
|
||||
* Other CSR registers are cleared with function _kvm_setcsr().
|
||||
*/
|
||||
kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_GINTC, 0);
|
||||
kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_ESTAT, 0);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
|
@ -47,7 +47,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
|
||||
pmd = pmd_offset(pud, addr);
|
||||
}
|
||||
}
|
||||
return (pte_t *) pmd;
|
||||
return pmd_none(pmdp_get(pmd)) ? NULL : (pte_t *) pmd;
|
||||
}
|
||||
|
||||
uint64_t pmd_to_entrylo(unsigned long pmd_val)
|
||||
|
@ -65,9 +65,6 @@ void __init paging_init(void)
|
||||
{
|
||||
unsigned long max_zone_pfns[MAX_NR_ZONES];
|
||||
|
||||
#ifdef CONFIG_ZONE_DMA
|
||||
max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
|
||||
#endif
|
||||
#ifdef CONFIG_ZONE_DMA32
|
||||
max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
|
||||
#endif
|
||||
|
@ -23,6 +23,9 @@
|
||||
*/
|
||||
extern void local_dcache_page_flush(struct page *page);
|
||||
extern void local_icache_page_inv(struct page *page);
|
||||
extern void local_dcache_range_flush(unsigned long start, unsigned long end);
|
||||
extern void local_dcache_range_inv(unsigned long start, unsigned long end);
|
||||
extern void local_icache_range_inv(unsigned long start, unsigned long end);
|
||||
|
||||
/*
|
||||
* Data cache flushing always happen on the local cpu. Instruction cache
|
||||
@ -38,6 +41,20 @@ extern void local_icache_page_inv(struct page *page);
|
||||
extern void smp_icache_page_inv(struct page *page);
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
/*
|
||||
* Even if the actual block size is larger than L1_CACHE_BYTES, paddr
|
||||
* can be incremented by L1_CACHE_BYTES. When paddr is written to the
|
||||
* invalidate register, the entire cache line encompassing this address
|
||||
* is invalidated. Each subsequent reference to the same cache line will
|
||||
* not affect the invalidation process.
|
||||
*/
|
||||
#define local_dcache_block_flush(addr) \
|
||||
local_dcache_range_flush(addr, addr + L1_CACHE_BYTES)
|
||||
#define local_dcache_block_inv(addr) \
|
||||
local_dcache_range_inv(addr, addr + L1_CACHE_BYTES)
|
||||
#define local_icache_block_inv(addr) \
|
||||
local_icache_range_inv(addr, addr + L1_CACHE_BYTES)
|
||||
|
||||
/*
|
||||
* Synchronizes caches. Whenever a cpu writes executable code to memory, this
|
||||
* should be called to make sure the processor sees the newly written code.
|
||||
|
@ -15,16 +15,21 @@
|
||||
#ifndef __ASM_OPENRISC_CPUINFO_H
|
||||
#define __ASM_OPENRISC_CPUINFO_H
|
||||
|
||||
#include <asm/spr.h>
|
||||
#include <asm/spr_defs.h>
|
||||
|
||||
struct cache_desc {
|
||||
u32 size;
|
||||
u32 sets;
|
||||
u32 block_size;
|
||||
u32 ways;
|
||||
};
|
||||
|
||||
struct cpuinfo_or1k {
|
||||
u32 clock_frequency;
|
||||
|
||||
u32 icache_size;
|
||||
u32 icache_block_size;
|
||||
u32 icache_ways;
|
||||
|
||||
u32 dcache_size;
|
||||
u32 dcache_block_size;
|
||||
u32 dcache_ways;
|
||||
struct cache_desc icache;
|
||||
struct cache_desc dcache;
|
||||
|
||||
u16 coreid;
|
||||
};
|
||||
@ -32,4 +37,9 @@ struct cpuinfo_or1k {
|
||||
extern struct cpuinfo_or1k cpuinfo_or1k[NR_CPUS];
|
||||
extern void setup_cpuinfo(void);
|
||||
|
||||
/*
|
||||
* Check if the cache component exists.
|
||||
*/
|
||||
extern bool cpu_cache_is_present(const unsigned int cache_type);
|
||||
|
||||
#endif /* __ASM_OPENRISC_CPUINFO_H */
|
||||
|
@ -7,7 +7,7 @@ extra-y := vmlinux.lds
|
||||
|
||||
obj-y := head.o setup.o or32_ksyms.o process.o dma.o \
|
||||
traps.o time.o irq.o entry.o ptrace.o signal.o \
|
||||
sys_call_table.o unwinder.o
|
||||
sys_call_table.o unwinder.o cacheinfo.o
|
||||
|
||||
obj-$(CONFIG_SMP) += smp.o sync-timer.o
|
||||
obj-$(CONFIG_STACKTRACE) += stacktrace.o
|
||||
|
104
arch/openrisc/kernel/cacheinfo.c
Normal file
104
arch/openrisc/kernel/cacheinfo.c
Normal file
@ -0,0 +1,104 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* OpenRISC cacheinfo support
|
||||
*
|
||||
* Based on work done for MIPS and LoongArch. All original copyrights
|
||||
* apply as per the original source declaration.
|
||||
*
|
||||
* OpenRISC implementation:
|
||||
* Copyright (C) 2025 Sahil Siddiq <sahilcdq@proton.me>
|
||||
*/
|
||||
|
||||
#include <linux/cacheinfo.h>
|
||||
#include <asm/cpuinfo.h>
|
||||
#include <asm/spr.h>
|
||||
#include <asm/spr_defs.h>
|
||||
|
||||
static inline void ci_leaf_init(struct cacheinfo *this_leaf, enum cache_type type,
|
||||
unsigned int level, struct cache_desc *cache, int cpu)
|
||||
{
|
||||
this_leaf->type = type;
|
||||
this_leaf->level = level;
|
||||
this_leaf->coherency_line_size = cache->block_size;
|
||||
this_leaf->number_of_sets = cache->sets;
|
||||
this_leaf->ways_of_associativity = cache->ways;
|
||||
this_leaf->size = cache->size;
|
||||
cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
|
||||
}
|
||||
|
||||
int init_cache_level(unsigned int cpu)
|
||||
{
|
||||
struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
|
||||
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
|
||||
int leaves = 0, levels = 0;
|
||||
unsigned long upr = mfspr(SPR_UPR);
|
||||
unsigned long iccfgr, dccfgr;
|
||||
|
||||
if (!(upr & SPR_UPR_UP)) {
|
||||
printk(KERN_INFO
|
||||
"-- no UPR register... unable to detect configuration\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
if (cpu_cache_is_present(SPR_UPR_DCP)) {
|
||||
dccfgr = mfspr(SPR_DCCFGR);
|
||||
cpuinfo->dcache.ways = 1 << (dccfgr & SPR_DCCFGR_NCW);
|
||||
cpuinfo->dcache.sets = 1 << ((dccfgr & SPR_DCCFGR_NCS) >> 3);
|
||||
cpuinfo->dcache.block_size = 16 << ((dccfgr & SPR_DCCFGR_CBS) >> 7);
|
||||
cpuinfo->dcache.size =
|
||||
cpuinfo->dcache.sets * cpuinfo->dcache.ways * cpuinfo->dcache.block_size;
|
||||
leaves += 1;
|
||||
printk(KERN_INFO
|
||||
"-- dcache: %d bytes total, %d bytes/line, %d set(s), %d way(s)\n",
|
||||
cpuinfo->dcache.size, cpuinfo->dcache.block_size,
|
||||
cpuinfo->dcache.sets, cpuinfo->dcache.ways);
|
||||
} else
|
||||
printk(KERN_INFO "-- dcache disabled\n");
|
||||
|
||||
if (cpu_cache_is_present(SPR_UPR_ICP)) {
|
||||
iccfgr = mfspr(SPR_ICCFGR);
|
||||
cpuinfo->icache.ways = 1 << (iccfgr & SPR_ICCFGR_NCW);
|
||||
cpuinfo->icache.sets = 1 << ((iccfgr & SPR_ICCFGR_NCS) >> 3);
|
||||
cpuinfo->icache.block_size = 16 << ((iccfgr & SPR_ICCFGR_CBS) >> 7);
|
||||
cpuinfo->icache.size =
|
||||
cpuinfo->icache.sets * cpuinfo->icache.ways * cpuinfo->icache.block_size;
|
||||
leaves += 1;
|
||||
printk(KERN_INFO
|
||||
"-- icache: %d bytes total, %d bytes/line, %d set(s), %d way(s)\n",
|
||||
cpuinfo->icache.size, cpuinfo->icache.block_size,
|
||||
cpuinfo->icache.sets, cpuinfo->icache.ways);
|
||||
} else
|
||||
printk(KERN_INFO "-- icache disabled\n");
|
||||
|
||||
if (!leaves)
|
||||
return -ENOENT;
|
||||
|
||||
levels = 1;
|
||||
|
||||
this_cpu_ci->num_leaves = leaves;
|
||||
this_cpu_ci->num_levels = levels;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int populate_cache_leaves(unsigned int cpu)
|
||||
{
|
||||
struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
|
||||
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
|
||||
struct cacheinfo *this_leaf = this_cpu_ci->info_list;
|
||||
int level = 1;
|
||||
|
||||
if (cpu_cache_is_present(SPR_UPR_DCP)) {
|
||||
ci_leaf_init(this_leaf, CACHE_TYPE_DATA, level, &cpuinfo->dcache, cpu);
|
||||
this_leaf->attributes = ((mfspr(SPR_DCCFGR) & SPR_DCCFGR_CWS) >> 8) ?
|
||||
CACHE_WRITE_BACK : CACHE_WRITE_THROUGH;
|
||||
this_leaf++;
|
||||
}
|
||||
|
||||
if (cpu_cache_is_present(SPR_UPR_ICP))
|
||||
ci_leaf_init(this_leaf, CACHE_TYPE_INST, level, &cpuinfo->icache, cpu);
|
||||
|
||||
this_cpu_ci->cpu_map_populated = true;
|
||||
|
||||
return 0;
|
||||
}
|
@ -17,6 +17,7 @@
|
||||
#include <linux/pagewalk.h>
|
||||
|
||||
#include <asm/cpuinfo.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/spr_defs.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
@ -24,9 +25,6 @@ static int
|
||||
page_set_nocache(pte_t *pte, unsigned long addr,
|
||||
unsigned long next, struct mm_walk *walk)
|
||||
{
|
||||
unsigned long cl;
|
||||
struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
|
||||
|
||||
pte_val(*pte) |= _PAGE_CI;
|
||||
|
||||
/*
|
||||
@ -36,8 +34,7 @@ page_set_nocache(pte_t *pte, unsigned long addr,
|
||||
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
|
||||
|
||||
/* Flush page out of dcache */
|
||||
for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo->dcache_block_size)
|
||||
mtspr(SPR_DCBFR, cl);
|
||||
local_dcache_range_flush(__pa(addr), __pa(next));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -98,21 +95,14 @@ void arch_dma_clear_uncached(void *cpu_addr, size_t size)
|
||||
void arch_sync_dma_for_device(phys_addr_t addr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
unsigned long cl;
|
||||
struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
|
||||
|
||||
switch (dir) {
|
||||
case DMA_TO_DEVICE:
|
||||
/* Flush the dcache for the requested range */
|
||||
for (cl = addr; cl < addr + size;
|
||||
cl += cpuinfo->dcache_block_size)
|
||||
mtspr(SPR_DCBFR, cl);
|
||||
local_dcache_range_flush(addr, addr + size);
|
||||
break;
|
||||
case DMA_FROM_DEVICE:
|
||||
/* Invalidate the dcache for the requested range */
|
||||
for (cl = addr; cl < addr + size;
|
||||
cl += cpuinfo->dcache_block_size)
|
||||
mtspr(SPR_DCBIR, cl);
|
||||
local_dcache_range_inv(addr, addr + size);
|
||||
break;
|
||||
default:
|
||||
/*
|
||||
|
@ -113,21 +113,6 @@ static void print_cpuinfo(void)
|
||||
return;
|
||||
}
|
||||
|
||||
if (upr & SPR_UPR_DCP)
|
||||
printk(KERN_INFO
|
||||
"-- dcache: %4d bytes total, %2d bytes/line, %d way(s)\n",
|
||||
cpuinfo->dcache_size, cpuinfo->dcache_block_size,
|
||||
cpuinfo->dcache_ways);
|
||||
else
|
||||
printk(KERN_INFO "-- dcache disabled\n");
|
||||
if (upr & SPR_UPR_ICP)
|
||||
printk(KERN_INFO
|
||||
"-- icache: %4d bytes total, %2d bytes/line, %d way(s)\n",
|
||||
cpuinfo->icache_size, cpuinfo->icache_block_size,
|
||||
cpuinfo->icache_ways);
|
||||
else
|
||||
printk(KERN_INFO "-- icache disabled\n");
|
||||
|
||||
if (upr & SPR_UPR_DMP)
|
||||
printk(KERN_INFO "-- dmmu: %4d entries, %lu way(s)\n",
|
||||
1 << ((mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTS) >> 2),
|
||||
@ -155,8 +140,6 @@ static void print_cpuinfo(void)
|
||||
void __init setup_cpuinfo(void)
|
||||
{
|
||||
struct device_node *cpu;
|
||||
unsigned long iccfgr, dccfgr;
|
||||
unsigned long cache_set_size;
|
||||
int cpu_id = smp_processor_id();
|
||||
struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[cpu_id];
|
||||
|
||||
@ -164,20 +147,6 @@ void __init setup_cpuinfo(void)
|
||||
if (!cpu)
|
||||
panic("Couldn't find CPU%d in device tree...\n", cpu_id);
|
||||
|
||||
iccfgr = mfspr(SPR_ICCFGR);
|
||||
cpuinfo->icache_ways = 1 << (iccfgr & SPR_ICCFGR_NCW);
|
||||
cache_set_size = 1 << ((iccfgr & SPR_ICCFGR_NCS) >> 3);
|
||||
cpuinfo->icache_block_size = 16 << ((iccfgr & SPR_ICCFGR_CBS) >> 7);
|
||||
cpuinfo->icache_size =
|
||||
cache_set_size * cpuinfo->icache_ways * cpuinfo->icache_block_size;
|
||||
|
||||
dccfgr = mfspr(SPR_DCCFGR);
|
||||
cpuinfo->dcache_ways = 1 << (dccfgr & SPR_DCCFGR_NCW);
|
||||
cache_set_size = 1 << ((dccfgr & SPR_DCCFGR_NCS) >> 3);
|
||||
cpuinfo->dcache_block_size = 16 << ((dccfgr & SPR_DCCFGR_CBS) >> 7);
|
||||
cpuinfo->dcache_size =
|
||||
cache_set_size * cpuinfo->dcache_ways * cpuinfo->dcache_block_size;
|
||||
|
||||
if (of_property_read_u32(cpu, "clock-frequency",
|
||||
&cpuinfo->clock_frequency)) {
|
||||
printk(KERN_WARNING
|
||||
@ -294,14 +263,14 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
||||
unsigned int vr, cpucfgr;
|
||||
unsigned int avr;
|
||||
unsigned int version;
|
||||
#ifdef CONFIG_SMP
|
||||
struct cpuinfo_or1k *cpuinfo = v;
|
||||
seq_printf(m, "processor\t\t: %d\n", cpuinfo->coreid);
|
||||
#endif
|
||||
|
||||
vr = mfspr(SPR_VR);
|
||||
cpucfgr = mfspr(SPR_CPUCFGR);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
seq_printf(m, "processor\t\t: %d\n", cpuinfo->coreid);
|
||||
#endif
|
||||
if (vr & SPR_VR_UVRP) {
|
||||
vr = mfspr(SPR_VR2);
|
||||
version = vr & SPR_VR2_VER;
|
||||
@ -320,14 +289,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
||||
seq_printf(m, "revision\t\t: %d\n", vr & SPR_VR_REV);
|
||||
}
|
||||
seq_printf(m, "frequency\t\t: %ld\n", loops_per_jiffy * HZ);
|
||||
seq_printf(m, "dcache size\t\t: %d bytes\n", cpuinfo->dcache_size);
|
||||
seq_printf(m, "dcache block size\t: %d bytes\n",
|
||||
cpuinfo->dcache_block_size);
|
||||
seq_printf(m, "dcache ways\t\t: %d\n", cpuinfo->dcache_ways);
|
||||
seq_printf(m, "icache size\t\t: %d bytes\n", cpuinfo->icache_size);
|
||||
seq_printf(m, "icache block size\t: %d bytes\n",
|
||||
cpuinfo->icache_block_size);
|
||||
seq_printf(m, "icache ways\t\t: %d\n", cpuinfo->icache_ways);
|
||||
seq_printf(m, "immu\t\t\t: %d entries, %lu ways\n",
|
||||
1 << ((mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTS) >> 2),
|
||||
1 + (mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTW));
|
||||
|
@ -14,31 +14,70 @@
|
||||
#include <asm/spr_defs.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/cpuinfo.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
static __always_inline void cache_loop(struct page *page, const unsigned int reg)
|
||||
/*
|
||||
* Check if the cache component exists.
|
||||
*/
|
||||
bool cpu_cache_is_present(const unsigned int cache_type)
|
||||
{
|
||||
unsigned long upr = mfspr(SPR_UPR);
|
||||
unsigned long mask = SPR_UPR_UP | cache_type;
|
||||
|
||||
return !((upr & mask) ^ mask);
|
||||
}
|
||||
|
||||
static __always_inline void cache_loop(unsigned long paddr, unsigned long end,
|
||||
const unsigned short reg, const unsigned int cache_type)
|
||||
{
|
||||
if (!cpu_cache_is_present(cache_type))
|
||||
return;
|
||||
|
||||
while (paddr < end) {
|
||||
mtspr(reg, paddr);
|
||||
paddr += L1_CACHE_BYTES;
|
||||
}
|
||||
}
|
||||
|
||||
static __always_inline void cache_loop_page(struct page *page, const unsigned short reg,
|
||||
const unsigned int cache_type)
|
||||
{
|
||||
unsigned long paddr = page_to_pfn(page) << PAGE_SHIFT;
|
||||
unsigned long line = paddr & ~(L1_CACHE_BYTES - 1);
|
||||
unsigned long end = paddr + PAGE_SIZE;
|
||||
|
||||
while (line < paddr + PAGE_SIZE) {
|
||||
mtspr(reg, line);
|
||||
line += L1_CACHE_BYTES;
|
||||
}
|
||||
paddr &= ~(L1_CACHE_BYTES - 1);
|
||||
|
||||
cache_loop(paddr, end, reg, cache_type);
|
||||
}
|
||||
|
||||
void local_dcache_page_flush(struct page *page)
|
||||
{
|
||||
cache_loop(page, SPR_DCBFR);
|
||||
cache_loop_page(page, SPR_DCBFR, SPR_UPR_DCP);
|
||||
}
|
||||
EXPORT_SYMBOL(local_dcache_page_flush);
|
||||
|
||||
void local_icache_page_inv(struct page *page)
|
||||
{
|
||||
cache_loop(page, SPR_ICBIR);
|
||||
cache_loop_page(page, SPR_ICBIR, SPR_UPR_ICP);
|
||||
}
|
||||
EXPORT_SYMBOL(local_icache_page_inv);
|
||||
|
||||
void local_dcache_range_flush(unsigned long start, unsigned long end)
|
||||
{
|
||||
cache_loop(start, end, SPR_DCBFR, SPR_UPR_DCP);
|
||||
}
|
||||
|
||||
void local_dcache_range_inv(unsigned long start, unsigned long end)
|
||||
{
|
||||
cache_loop(start, end, SPR_DCBIR, SPR_UPR_DCP);
|
||||
}
|
||||
|
||||
void local_icache_range_inv(unsigned long start, unsigned long end)
|
||||
{
|
||||
cache_loop(start, end, SPR_ICBIR, SPR_UPR_ICP);
|
||||
}
|
||||
|
||||
void update_cache(struct vm_area_struct *vma, unsigned long address,
|
||||
pte_t *pte)
|
||||
{
|
||||
@ -58,4 +97,3 @@ void update_cache(struct vm_area_struct *vma, unsigned long address,
|
||||
sync_icache_dcache(folio_page(folio, nr));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -35,6 +35,7 @@
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
int mem_init_done;
|
||||
|
||||
@ -176,8 +177,8 @@ void __init paging_init(void)
|
||||
barrier();
|
||||
|
||||
/* Invalidate instruction caches after code modification */
|
||||
mtspr(SPR_ICBIR, 0x900);
|
||||
mtspr(SPR_ICBIR, 0xa00);
|
||||
local_icache_block_inv(0x900);
|
||||
local_icache_block_inv(0xa00);
|
||||
|
||||
/* New TLB miss handlers and kernel page tables are in now place.
|
||||
* Make sure that page flags get updated for all pages in TLB by
|
||||
|
@ -115,24 +115,19 @@
|
||||
\old_c
|
||||
.endm
|
||||
|
||||
#define _ALTERNATIVE_CFG(old_c, ...) \
|
||||
ALTERNATIVE_CFG old_c
|
||||
|
||||
#define _ALTERNATIVE_CFG_2(old_c, ...) \
|
||||
ALTERNATIVE_CFG old_c
|
||||
#define __ALTERNATIVE_CFG(old_c, ...) ALTERNATIVE_CFG old_c
|
||||
#define __ALTERNATIVE_CFG_2(old_c, ...) ALTERNATIVE_CFG old_c
|
||||
|
||||
#else /* !__ASSEMBLY__ */
|
||||
|
||||
#define __ALTERNATIVE_CFG(old_c) \
|
||||
old_c "\n"
|
||||
|
||||
#define _ALTERNATIVE_CFG(old_c, ...) \
|
||||
__ALTERNATIVE_CFG(old_c)
|
||||
|
||||
#define _ALTERNATIVE_CFG_2(old_c, ...) \
|
||||
__ALTERNATIVE_CFG(old_c)
|
||||
#define __ALTERNATIVE_CFG(old_c, ...) old_c "\n"
|
||||
#define __ALTERNATIVE_CFG_2(old_c, ...) old_c "\n"
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#define _ALTERNATIVE_CFG(old_c, ...) __ALTERNATIVE_CFG(old_c)
|
||||
#define _ALTERNATIVE_CFG_2(old_c, ...) __ALTERNATIVE_CFG_2(old_c)
|
||||
|
||||
#endif /* CONFIG_RISCV_ALTERNATIVE */
|
||||
|
||||
/*
|
||||
|
@ -34,11 +34,6 @@ static inline void flush_dcache_page(struct page *page)
|
||||
flush_dcache_folio(page_folio(page));
|
||||
}
|
||||
|
||||
/*
|
||||
* RISC-V doesn't have an instruction to flush parts of the instruction cache,
|
||||
* so instead we just flush the whole thing.
|
||||
*/
|
||||
#define flush_icache_range(start, end) flush_icache_all()
|
||||
#define flush_icache_user_page(vma, pg, addr, len) \
|
||||
do { \
|
||||
if (vma->vm_flags & VM_EXEC) \
|
||||
@ -78,6 +73,16 @@ void flush_icache_mm(struct mm_struct *mm, bool local);
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
/*
|
||||
* RISC-V doesn't have an instruction to flush parts of the instruction cache,
|
||||
* so instead we just flush the whole thing.
|
||||
*/
|
||||
#define flush_icache_range flush_icache_range
|
||||
static inline void flush_icache_range(unsigned long start, unsigned long end)
|
||||
{
|
||||
flush_icache_all();
|
||||
}
|
||||
|
||||
extern unsigned int riscv_cbom_block_size;
|
||||
extern unsigned int riscv_cboz_block_size;
|
||||
void riscv_init_cbo_blocksizes(void);
|
||||
|
@ -19,16 +19,9 @@
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
void arch_kgdb_breakpoint(void);
|
||||
extern unsigned long kgdb_compiled_break;
|
||||
|
||||
static inline void arch_kgdb_breakpoint(void)
|
||||
{
|
||||
asm(".global kgdb_compiled_break\n"
|
||||
".option norvc\n"
|
||||
"kgdb_compiled_break: ebreak\n"
|
||||
".option rvc\n");
|
||||
}
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#define DBG_REG_ZERO "zero"
|
||||
|
@ -62,8 +62,11 @@ static inline void syscall_get_arguments(struct task_struct *task,
|
||||
unsigned long *args)
|
||||
{
|
||||
args[0] = regs->orig_a0;
|
||||
args++;
|
||||
memcpy(args, ®s->a1, 5 * sizeof(args[0]));
|
||||
args[1] = regs->a1;
|
||||
args[2] = regs->a2;
|
||||
args[3] = regs->a3;
|
||||
args[4] = regs->a4;
|
||||
args[5] = regs->a5;
|
||||
}
|
||||
|
||||
static inline int syscall_get_arch(struct task_struct *task)
|
||||
|
@ -9,8 +9,8 @@ CFLAGS_REMOVE_patch.o = $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_sbi.o = $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_return_address.o = $(CC_FLAGS_FTRACE)
|
||||
endif
|
||||
CFLAGS_syscall_table.o += $(call cc-option,-Wno-override-init,)
|
||||
CFLAGS_compat_syscall_table.o += $(call cc-option,-Wno-override-init,)
|
||||
CFLAGS_syscall_table.o += $(call cc-disable-warning, override-init)
|
||||
CFLAGS_compat_syscall_table.o += $(call cc-disable-warning, override-init)
|
||||
|
||||
ifdef CONFIG_KEXEC_CORE
|
||||
AFLAGS_kexec_relocate.o := -mcmodel=medany $(call cc-option,-mno-relax)
|
||||
|
@ -254,6 +254,12 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
|
||||
regs->epc = pc;
|
||||
}
|
||||
|
||||
noinline void arch_kgdb_breakpoint(void)
|
||||
{
|
||||
asm(".global kgdb_compiled_break\n"
|
||||
"kgdb_compiled_break: ebreak\n");
|
||||
}
|
||||
|
||||
void kgdb_arch_handle_qxfer_pkt(char *remcom_in_buffer,
|
||||
char *remcom_out_buffer)
|
||||
{
|
||||
|
@ -73,16 +73,17 @@ static bool duplicate_rela(const Elf_Rela *rela, int idx)
|
||||
static void count_max_entries(Elf_Rela *relas, int num,
|
||||
unsigned int *plts, unsigned int *gots)
|
||||
{
|
||||
unsigned int type, i;
|
||||
|
||||
for (i = 0; i < num; i++) {
|
||||
type = ELF_RISCV_R_TYPE(relas[i].r_info);
|
||||
if (type == R_RISCV_CALL_PLT) {
|
||||
for (int i = 0; i < num; i++) {
|
||||
switch (ELF_R_TYPE(relas[i].r_info)) {
|
||||
case R_RISCV_CALL_PLT:
|
||||
case R_RISCV_PLT32:
|
||||
if (!duplicate_rela(relas, i))
|
||||
(*plts)++;
|
||||
} else if (type == R_RISCV_GOT_HI20) {
|
||||
break;
|
||||
case R_RISCV_GOT_HI20:
|
||||
if (!duplicate_rela(relas, i))
|
||||
(*gots)++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -648,7 +648,7 @@ process_accumulated_relocations(struct module *me,
|
||||
kfree(bucket_iter);
|
||||
}
|
||||
|
||||
kfree(*relocation_hashtable);
|
||||
kvfree(*relocation_hashtable);
|
||||
}
|
||||
|
||||
static int add_relocation_to_accumulate(struct module *me, int type,
|
||||
@ -752,9 +752,10 @@ initialize_relocation_hashtable(unsigned int num_relocations,
|
||||
|
||||
hashtable_size <<= should_double_size;
|
||||
|
||||
*relocation_hashtable = kmalloc_array(hashtable_size,
|
||||
sizeof(**relocation_hashtable),
|
||||
GFP_KERNEL);
|
||||
/* Number of relocations may be large, so kvmalloc it */
|
||||
*relocation_hashtable = kvmalloc_array(hashtable_size,
|
||||
sizeof(**relocation_hashtable),
|
||||
GFP_KERNEL);
|
||||
if (!*relocation_hashtable)
|
||||
return 0;
|
||||
|
||||
@ -859,7 +860,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
|
||||
}
|
||||
|
||||
j++;
|
||||
if (j > sechdrs[relsec].sh_size / sizeof(*rel))
|
||||
if (j == num_relocations)
|
||||
j = 0;
|
||||
|
||||
} while (j_idx != j);
|
||||
|
@ -167,6 +167,7 @@ void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
|
||||
/* Initialize the slot */
|
||||
void *kaddr = kmap_atomic(page);
|
||||
void *dst = kaddr + (vaddr & ~PAGE_MASK);
|
||||
unsigned long start = (unsigned long)dst;
|
||||
|
||||
memcpy(dst, src, len);
|
||||
|
||||
@ -176,13 +177,6 @@ void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
|
||||
*(uprobe_opcode_t *)dst = __BUG_INSN_32;
|
||||
}
|
||||
|
||||
flush_icache_range(start, start + len);
|
||||
kunmap_atomic(kaddr);
|
||||
|
||||
/*
|
||||
* We probably need flush_icache_user_page() but it needs vma.
|
||||
* This should work on most of architectures by default. If
|
||||
* architecture needs to do something different it can define
|
||||
* its own version of the function.
|
||||
*/
|
||||
flush_dcache_page(page);
|
||||
}
|
||||
|
@ -66,6 +66,9 @@ static struct resource bss_res = { .name = "Kernel bss", };
|
||||
static struct resource elfcorehdr_res = { .name = "ELF Core hdr", };
|
||||
#endif
|
||||
|
||||
static int num_standard_resources;
|
||||
static struct resource *standard_resources;
|
||||
|
||||
static int __init add_resource(struct resource *parent,
|
||||
struct resource *res)
|
||||
{
|
||||
@ -139,7 +142,7 @@ static void __init init_resources(void)
|
||||
struct resource *res = NULL;
|
||||
struct resource *mem_res = NULL;
|
||||
size_t mem_res_sz = 0;
|
||||
int num_resources = 0, res_idx = 0;
|
||||
int num_resources = 0, res_idx = 0, non_resv_res = 0;
|
||||
int ret = 0;
|
||||
|
||||
/* + 1 as memblock_alloc() might increase memblock.reserved.cnt */
|
||||
@ -193,6 +196,7 @@ static void __init init_resources(void)
|
||||
/* Add /memory regions to the resource tree */
|
||||
for_each_mem_region(region) {
|
||||
res = &mem_res[res_idx--];
|
||||
non_resv_res++;
|
||||
|
||||
if (unlikely(memblock_is_nomap(region))) {
|
||||
res->name = "Reserved";
|
||||
@ -210,6 +214,9 @@ static void __init init_resources(void)
|
||||
goto error;
|
||||
}
|
||||
|
||||
num_standard_resources = non_resv_res;
|
||||
standard_resources = &mem_res[res_idx + 1];
|
||||
|
||||
/* Clean-up any unused pre-allocated resources */
|
||||
if (res_idx >= 0)
|
||||
memblock_free(mem_res, (res_idx + 1) * sizeof(*mem_res));
|
||||
@ -221,6 +228,33 @@ static void __init init_resources(void)
|
||||
memblock_free(mem_res, mem_res_sz);
|
||||
}
|
||||
|
||||
static int __init reserve_memblock_reserved_regions(void)
|
||||
{
|
||||
u64 i, j;
|
||||
|
||||
for (i = 0; i < num_standard_resources; i++) {
|
||||
struct resource *mem = &standard_resources[i];
|
||||
phys_addr_t r_start, r_end, mem_size = resource_size(mem);
|
||||
|
||||
if (!memblock_is_region_reserved(mem->start, mem_size))
|
||||
continue;
|
||||
|
||||
for_each_reserved_mem_range(j, &r_start, &r_end) {
|
||||
resource_size_t start, end;
|
||||
|
||||
start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start);
|
||||
end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end);
|
||||
|
||||
if (start > mem->end || end < mem->start)
|
||||
continue;
|
||||
|
||||
reserve_region_with_split(mem, start, end, "Reserved");
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
arch_initcall(reserve_memblock_reserved_regions);
|
||||
|
||||
static void __init parse_dtb(void)
|
||||
{
|
||||
|
@ -439,29 +439,36 @@ static int __init check_unaligned_access_all_cpus(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
if (unaligned_scalar_speed_param == RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN &&
|
||||
!check_unaligned_access_emulated_all_cpus()) {
|
||||
check_unaligned_access_speed_all_cpus();
|
||||
} else {
|
||||
pr_info("scalar unaligned access speed set to '%s' by command line\n",
|
||||
speed_str[unaligned_scalar_speed_param]);
|
||||
if (unaligned_scalar_speed_param != RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN) {
|
||||
pr_info("scalar unaligned access speed set to '%s' (%lu) by command line\n",
|
||||
speed_str[unaligned_scalar_speed_param], unaligned_scalar_speed_param);
|
||||
for_each_online_cpu(cpu)
|
||||
per_cpu(misaligned_access_speed, cpu) = unaligned_scalar_speed_param;
|
||||
} else if (!check_unaligned_access_emulated_all_cpus()) {
|
||||
check_unaligned_access_speed_all_cpus();
|
||||
}
|
||||
|
||||
if (unaligned_vector_speed_param != RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN) {
|
||||
if (!has_vector() &&
|
||||
unaligned_vector_speed_param != RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED) {
|
||||
pr_warn("vector support is not available, ignoring unaligned_vector_speed=%s\n",
|
||||
speed_str[unaligned_vector_speed_param]);
|
||||
} else {
|
||||
pr_info("vector unaligned access speed set to '%s' (%lu) by command line\n",
|
||||
speed_str[unaligned_vector_speed_param], unaligned_vector_speed_param);
|
||||
}
|
||||
}
|
||||
|
||||
if (!has_vector())
|
||||
unaligned_vector_speed_param = RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED;
|
||||
|
||||
if (unaligned_vector_speed_param == RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN &&
|
||||
!check_vector_unaligned_access_emulated_all_cpus() &&
|
||||
IS_ENABLED(CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS)) {
|
||||
kthread_run(vec_check_unaligned_access_speed_all_cpus,
|
||||
NULL, "vec_check_unaligned_access_speed_all_cpus");
|
||||
} else {
|
||||
pr_info("vector unaligned access speed set to '%s' by command line\n",
|
||||
speed_str[unaligned_vector_speed_param]);
|
||||
if (unaligned_vector_speed_param != RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN) {
|
||||
for_each_online_cpu(cpu)
|
||||
per_cpu(vector_misaligned_access, cpu) = unaligned_vector_speed_param;
|
||||
} else if (!check_vector_unaligned_access_emulated_all_cpus() &&
|
||||
IS_ENABLED(CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS)) {
|
||||
kthread_run(vec_check_unaligned_access_speed_all_cpus,
|
||||
NULL, "vec_check_unaligned_access_speed_all_cpus");
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -59,7 +59,7 @@ KBUILD_CFLAGS += $(CONFIG_CC_IMPLICIT_FALLTHROUGH)
|
||||
$(obj)/bzImage: asflags-y := $(SVGA_MODE)
|
||||
|
||||
quiet_cmd_image = BUILD $@
|
||||
cmd_image = cp $< $@; truncate -s %4K $@; cat $(obj)/vmlinux.bin >>$@
|
||||
cmd_image = (dd if=$< bs=4k conv=sync status=none; cat $(filter-out $<,$(real-prereqs))) >$@
|
||||
|
||||
$(obj)/bzImage: $(obj)/setup.bin $(obj)/vmlinux.bin FORCE
|
||||
$(call if_changed,image)
|
||||
|
@ -34,11 +34,14 @@ static bool early_is_tdx_guest(void)
|
||||
|
||||
void arch_accept_memory(phys_addr_t start, phys_addr_t end)
|
||||
{
|
||||
static bool sevsnp;
|
||||
|
||||
/* Platform-specific memory-acceptance call goes here */
|
||||
if (early_is_tdx_guest()) {
|
||||
if (!tdx_accept_memory(start, end))
|
||||
panic("TDX: Failed to accept memory\n");
|
||||
} else if (sev_snp_enabled()) {
|
||||
} else if (sevsnp || (sev_get_status() & MSR_AMD64_SEV_SNP_ENABLED)) {
|
||||
sevsnp = true;
|
||||
snp_accept_memory(start, end);
|
||||
} else {
|
||||
error("Cannot accept memory: unknown platform\n");
|
||||
|
@ -164,10 +164,7 @@ bool sev_snp_enabled(void)
|
||||
|
||||
static void __page_state_change(unsigned long paddr, enum psc_op op)
|
||||
{
|
||||
u64 val;
|
||||
|
||||
if (!sev_snp_enabled())
|
||||
return;
|
||||
u64 val, msr;
|
||||
|
||||
/*
|
||||
* If private -> shared then invalidate the page before requesting the
|
||||
@ -176,6 +173,9 @@ static void __page_state_change(unsigned long paddr, enum psc_op op)
|
||||
if (op == SNP_PAGE_STATE_SHARED)
|
||||
pvalidate_4k_page(paddr, paddr, false);
|
||||
|
||||
/* Save the current GHCB MSR value */
|
||||
msr = sev_es_rd_ghcb_msr();
|
||||
|
||||
/* Issue VMGEXIT to change the page state in RMP table. */
|
||||
sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op));
|
||||
VMGEXIT();
|
||||
@ -185,6 +185,9 @@ static void __page_state_change(unsigned long paddr, enum psc_op op)
|
||||
if ((GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP) || GHCB_MSR_PSC_RESP_VAL(val))
|
||||
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
|
||||
|
||||
/* Restore the GHCB MSR value */
|
||||
sev_es_wr_ghcb_msr(msr);
|
||||
|
||||
/*
|
||||
* Now that page state is changed in the RMP table, validate it so that it is
|
||||
* consistent with the RMP entry.
|
||||
@ -195,11 +198,17 @@ static void __page_state_change(unsigned long paddr, enum psc_op op)
|
||||
|
||||
void snp_set_page_private(unsigned long paddr)
|
||||
{
|
||||
if (!sev_snp_enabled())
|
||||
return;
|
||||
|
||||
__page_state_change(paddr, SNP_PAGE_STATE_PRIVATE);
|
||||
}
|
||||
|
||||
void snp_set_page_shared(unsigned long paddr)
|
||||
{
|
||||
if (!sev_snp_enabled())
|
||||
return;
|
||||
|
||||
__page_state_change(paddr, SNP_PAGE_STATE_SHARED);
|
||||
}
|
||||
|
||||
@ -223,56 +232,10 @@ static bool early_setup_ghcb(void)
|
||||
return true;
|
||||
}
|
||||
|
||||
static phys_addr_t __snp_accept_memory(struct snp_psc_desc *desc,
|
||||
phys_addr_t pa, phys_addr_t pa_end)
|
||||
{
|
||||
struct psc_hdr *hdr;
|
||||
struct psc_entry *e;
|
||||
unsigned int i;
|
||||
|
||||
hdr = &desc->hdr;
|
||||
memset(hdr, 0, sizeof(*hdr));
|
||||
|
||||
e = desc->entries;
|
||||
|
||||
i = 0;
|
||||
while (pa < pa_end && i < VMGEXIT_PSC_MAX_ENTRY) {
|
||||
hdr->end_entry = i;
|
||||
|
||||
e->gfn = pa >> PAGE_SHIFT;
|
||||
e->operation = SNP_PAGE_STATE_PRIVATE;
|
||||
if (IS_ALIGNED(pa, PMD_SIZE) && (pa_end - pa) >= PMD_SIZE) {
|
||||
e->pagesize = RMP_PG_SIZE_2M;
|
||||
pa += PMD_SIZE;
|
||||
} else {
|
||||
e->pagesize = RMP_PG_SIZE_4K;
|
||||
pa += PAGE_SIZE;
|
||||
}
|
||||
|
||||
e++;
|
||||
i++;
|
||||
}
|
||||
|
||||
if (vmgexit_psc(boot_ghcb, desc))
|
||||
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
|
||||
|
||||
pvalidate_pages(desc);
|
||||
|
||||
return pa;
|
||||
}
|
||||
|
||||
void snp_accept_memory(phys_addr_t start, phys_addr_t end)
|
||||
{
|
||||
struct snp_psc_desc desc = {};
|
||||
unsigned int i;
|
||||
phys_addr_t pa;
|
||||
|
||||
if (!boot_ghcb && !early_setup_ghcb())
|
||||
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
|
||||
|
||||
pa = start;
|
||||
while (pa < end)
|
||||
pa = __snp_accept_memory(&desc, pa, end);
|
||||
for (phys_addr_t pa = start; pa < end; pa += PAGE_SIZE)
|
||||
__page_state_change(pa, SNP_PAGE_STATE_PRIVATE);
|
||||
}
|
||||
|
||||
void sev_es_shutdown_ghcb(void)
|
||||
|
@ -12,11 +12,13 @@
|
||||
|
||||
bool sev_snp_enabled(void);
|
||||
void snp_accept_memory(phys_addr_t start, phys_addr_t end);
|
||||
u64 sev_get_status(void);
|
||||
|
||||
#else
|
||||
|
||||
static inline bool sev_snp_enabled(void) { return false; }
|
||||
static inline void snp_accept_memory(phys_addr_t start, phys_addr_t end) { }
|
||||
static inline u64 sev_get_status(void) { return 0; }
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -629,7 +629,7 @@ int x86_pmu_hw_config(struct perf_event *event)
|
||||
if (event->attr.type == event->pmu->type)
|
||||
event->hw.config |= x86_pmu_get_event_config(event);
|
||||
|
||||
if (!event->attr.freq && x86_pmu.limit_period) {
|
||||
if (is_sampling_event(event) && !event->attr.freq && x86_pmu.limit_period) {
|
||||
s64 left = event->attr.sample_period;
|
||||
x86_pmu.limit_period(event, &left);
|
||||
if (left > event->attr.sample_period)
|
||||
|
@ -3049,7 +3049,6 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
|
||||
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
||||
int bit;
|
||||
int handled = 0;
|
||||
u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
|
||||
|
||||
inc_irq_stat(apic_perf_irqs);
|
||||
|
||||
@ -3093,7 +3092,6 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
|
||||
handled++;
|
||||
x86_pmu_handle_guest_pebs(regs, &data);
|
||||
static_call(x86_pmu_drain_pebs)(regs, &data);
|
||||
status &= intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
|
||||
|
||||
/*
|
||||
* PMI throttle may be triggered, which stops the PEBS event.
|
||||
@ -3104,6 +3102,15 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
|
||||
*/
|
||||
if (pebs_enabled != cpuc->pebs_enabled)
|
||||
wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
|
||||
|
||||
/*
|
||||
* Above PEBS handler (PEBS counters snapshotting) has updated fixed
|
||||
* counter 3 and perf metrics counts if they are in counter group,
|
||||
* unnecessary to update again.
|
||||
*/
|
||||
if (cpuc->events[INTEL_PMC_IDX_FIXED_SLOTS] &&
|
||||
is_pebs_counter_event_group(cpuc->events[INTEL_PMC_IDX_FIXED_SLOTS]))
|
||||
status &= ~GLOBAL_STATUS_PERF_METRICS_OVF_BIT;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3123,6 +3130,8 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
|
||||
static_call(intel_pmu_update_topdown_event)(NULL, NULL);
|
||||
}
|
||||
|
||||
status &= hybrid(cpuc->pmu, intel_ctrl);
|
||||
|
||||
/*
|
||||
* Checkpointed counters can lead to 'spurious' PMIs because the
|
||||
* rollback caused by the PMI will have cleared the overflow status
|
||||
@ -7305,8 +7314,17 @@ __init int intel_pmu_init(void)
|
||||
name = "meteorlake_hybrid";
|
||||
break;
|
||||
|
||||
case INTEL_PANTHERLAKE_L:
|
||||
pr_cont("Pantherlake Hybrid events, ");
|
||||
name = "pantherlake_hybrid";
|
||||
goto lnl_common;
|
||||
|
||||
case INTEL_LUNARLAKE_M:
|
||||
case INTEL_ARROWLAKE:
|
||||
pr_cont("Lunarlake Hybrid events, ");
|
||||
name = "lunarlake_hybrid";
|
||||
|
||||
lnl_common:
|
||||
intel_pmu_init_hybrid(hybrid_big_small);
|
||||
|
||||
x86_pmu.pebs_latency_data = lnl_latency_data;
|
||||
@ -7328,8 +7346,6 @@ __init int intel_pmu_init(void)
|
||||
intel_pmu_init_skt(&pmu->pmu);
|
||||
|
||||
intel_pmu_pebs_data_source_lnl();
|
||||
pr_cont("Lunarlake Hybrid events, ");
|
||||
name = "lunarlake_hybrid";
|
||||
break;
|
||||
|
||||
case INTEL_ARROWLAKE_H:
|
||||
|
@ -1399,8 +1399,10 @@ static u64 pebs_update_adaptive_cfg(struct perf_event *event)
|
||||
* + precise_ip < 2 for the non event IP
|
||||
* + For RTM TSX weight we need GPRs for the abort code.
|
||||
*/
|
||||
gprs = (sample_type & PERF_SAMPLE_REGS_INTR) &&
|
||||
(attr->sample_regs_intr & PEBS_GP_REGS);
|
||||
gprs = ((sample_type & PERF_SAMPLE_REGS_INTR) &&
|
||||
(attr->sample_regs_intr & PEBS_GP_REGS)) ||
|
||||
((sample_type & PERF_SAMPLE_REGS_USER) &&
|
||||
(attr->sample_regs_user & PEBS_GP_REGS));
|
||||
|
||||
tsx_weight = (sample_type & PERF_SAMPLE_WEIGHT_TYPE) &&
|
||||
((attr->config & INTEL_ARCH_EVENT_MASK) ==
|
||||
@ -2123,7 +2125,7 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event,
|
||||
regs->flags &= ~PERF_EFLAGS_EXACT;
|
||||
}
|
||||
|
||||
if (sample_type & PERF_SAMPLE_REGS_INTR)
|
||||
if (sample_type & (PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER))
|
||||
adaptive_pebs_save_regs(regs, gprs);
|
||||
}
|
||||
|
||||
|
@ -4891,28 +4891,28 @@ static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = {
|
||||
INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
|
||||
/* Free-Running IIO BANDWIDTH IN Counters */
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.0517578125e-5"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.0517578125e-5"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.0517578125e-5"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.0517578125e-5"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.0517578125e-5"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.0517578125e-5"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.0517578125e-5"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.0517578125e-5"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
|
||||
{ /* end: all zeroes */ },
|
||||
};
|
||||
@ -5485,37 +5485,6 @@ static struct freerunning_counters icx_iio_freerunning[] = {
|
||||
[ICX_IIO_MSR_BW_IN] = { 0xaa0, 0x1, 0x10, 8, 48, icx_iio_bw_freerunning_box_offsets },
|
||||
};
|
||||
|
||||
static struct uncore_event_desc icx_uncore_iio_freerunning_events[] = {
|
||||
/* Free-Running IIO CLOCKS Counter */
|
||||
INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
|
||||
/* Free-Running IIO BANDWIDTH IN Counters */
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
|
||||
{ /* end: all zeroes */ },
|
||||
};
|
||||
|
||||
static struct intel_uncore_type icx_uncore_iio_free_running = {
|
||||
.name = "iio_free_running",
|
||||
.num_counters = 9,
|
||||
@ -5523,7 +5492,7 @@ static struct intel_uncore_type icx_uncore_iio_free_running = {
|
||||
.num_freerunning_types = ICX_IIO_FREERUNNING_TYPE_MAX,
|
||||
.freerunning = icx_iio_freerunning,
|
||||
.ops = &skx_uncore_iio_freerunning_ops,
|
||||
.event_descs = icx_uncore_iio_freerunning_events,
|
||||
.event_descs = snr_uncore_iio_freerunning_events,
|
||||
.format_group = &skx_uncore_iio_freerunning_format_group,
|
||||
};
|
||||
|
||||
@ -6320,69 +6289,13 @@ static struct freerunning_counters spr_iio_freerunning[] = {
|
||||
[SPR_IIO_MSR_BW_OUT] = { 0x3808, 0x1, 0x10, 8, 48 },
|
||||
};
|
||||
|
||||
static struct uncore_event_desc spr_uncore_iio_freerunning_events[] = {
|
||||
/* Free-Running IIO CLOCKS Counter */
|
||||
INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
|
||||
/* Free-Running IIO BANDWIDTH IN Counters */
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
|
||||
/* Free-Running IIO BANDWIDTH OUT Counters */
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port0, "event=0xff,umask=0x30"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port1, "event=0xff,umask=0x31"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port2, "event=0xff,umask=0x32"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port3, "event=0xff,umask=0x33"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port4, "event=0xff,umask=0x34"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port4.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port4.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port5, "event=0xff,umask=0x35"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port5.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port5.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port6, "event=0xff,umask=0x36"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port6.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port6.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port7, "event=0xff,umask=0x37"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port7.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port7.unit, "MiB"),
|
||||
{ /* end: all zeroes */ },
|
||||
};
|
||||
|
||||
static struct intel_uncore_type spr_uncore_iio_free_running = {
|
||||
.name = "iio_free_running",
|
||||
.num_counters = 17,
|
||||
.num_freerunning_types = SPR_IIO_FREERUNNING_TYPE_MAX,
|
||||
.freerunning = spr_iio_freerunning,
|
||||
.ops = &skx_uncore_iio_freerunning_ops,
|
||||
.event_descs = spr_uncore_iio_freerunning_events,
|
||||
.event_descs = snr_uncore_iio_freerunning_events,
|
||||
.format_group = &skx_uncore_iio_freerunning_format_group,
|
||||
};
|
||||
|
||||
|
@ -126,6 +126,8 @@
|
||||
#define INTEL_GRANITERAPIDS_X IFM(6, 0xAD) /* Redwood Cove */
|
||||
#define INTEL_GRANITERAPIDS_D IFM(6, 0xAE)
|
||||
|
||||
#define INTEL_BARTLETTLAKE IFM(6, 0xD7) /* Raptor Cove */
|
||||
|
||||
/* "Hybrid" Processors (P-Core/E-Core) */
|
||||
|
||||
#define INTEL_LAKEFIELD IFM(6, 0x8A) /* Sunny Cove / Tremont */
|
||||
|
@ -35,6 +35,7 @@
|
||||
#include <asm/mtrr.h>
|
||||
#include <asm/msr-index.h>
|
||||
#include <asm/asm.h>
|
||||
#include <asm/irq_remapping.h>
|
||||
#include <asm/kvm_page_track.h>
|
||||
#include <asm/kvm_vcpu_regs.h>
|
||||
#include <asm/reboot.h>
|
||||
@ -2423,4 +2424,9 @@ int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages);
|
||||
*/
|
||||
#define KVM_EXIT_HYPERCALL_MBZ GENMASK_ULL(31, 1)
|
||||
|
||||
static inline bool kvm_arch_has_irq_bypass(void)
|
||||
{
|
||||
return enable_apicv && irq_remapping_cap(IRQ_POSTING_CAP);
|
||||
}
|
||||
|
||||
#endif /* _ASM_X86_KVM_HOST_H */
|
||||
|
@ -6,6 +6,8 @@
|
||||
#include <linux/mm.h> /* for struct page */
|
||||
#include <linux/pagemap.h>
|
||||
|
||||
#include <asm/cpufeature.h>
|
||||
|
||||
#define __HAVE_ARCH_PTE_ALLOC_ONE
|
||||
#define __HAVE_ARCH_PGD_FREE
|
||||
#include <asm-generic/pgalloc.h>
|
||||
@ -29,16 +31,17 @@ static inline void paravirt_release_pud(unsigned long pfn) {}
|
||||
static inline void paravirt_release_p4d(unsigned long pfn) {}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
|
||||
/*
|
||||
* Instead of one PGD, we acquire two PGDs. Being order-1, it is
|
||||
* both 8k in size and 8k-aligned. That lets us just flip bit 12
|
||||
* in a pointer to swap between the two 4k halves.
|
||||
* In case of Page Table Isolation active, we acquire two PGDs instead of one.
|
||||
* Being order-1, it is both 8k in size and 8k-aligned. That lets us just
|
||||
* flip bit 12 in a pointer to swap between the two 4k halves.
|
||||
*/
|
||||
#define PGD_ALLOCATION_ORDER 1
|
||||
#else
|
||||
#define PGD_ALLOCATION_ORDER 0
|
||||
#endif
|
||||
static inline unsigned int pgd_allocation_order(void)
|
||||
{
|
||||
if (cpu_feature_enabled(X86_FEATURE_PTI))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate and free page tables.
|
||||
|
@ -869,6 +869,16 @@ static void init_amd_zen1(struct cpuinfo_x86 *c)
|
||||
|
||||
pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n");
|
||||
setup_force_cpu_bug(X86_BUG_DIV0);
|
||||
|
||||
/*
|
||||
* Turn off the Instructions Retired free counter on machines that are
|
||||
* susceptible to erratum #1054 "Instructions Retired Performance
|
||||
* Counter May Be Inaccurate".
|
||||
*/
|
||||
if (c->x86_model < 0x30) {
|
||||
msr_clear_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
|
||||
clear_cpu_cap(c, X86_FEATURE_IRPERF);
|
||||
}
|
||||
}
|
||||
|
||||
static bool cpu_has_zenbleed_microcode(void)
|
||||
@ -1052,13 +1062,8 @@ static void init_amd(struct cpuinfo_x86 *c)
|
||||
if (!cpu_feature_enabled(X86_FEATURE_XENPV))
|
||||
set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
|
||||
|
||||
/*
|
||||
* Turn on the Instructions Retired free counter on machines not
|
||||
* susceptible to erratum #1054 "Instructions Retired Performance
|
||||
* Counter May Be Inaccurate".
|
||||
*/
|
||||
if (cpu_has(c, X86_FEATURE_IRPERF) &&
|
||||
(boot_cpu_has(X86_FEATURE_ZEN1) && c->x86_model > 0x2f))
|
||||
/* Enable the Instructions Retired free counter */
|
||||
if (cpu_has(c, X86_FEATURE_IRPERF))
|
||||
msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
|
||||
|
||||
check_null_seg_clears_base(c);
|
||||
|
@ -199,6 +199,12 @@ static bool need_sha_check(u32 cur_rev)
|
||||
case 0xa70c0: return cur_rev <= 0xa70C009; break;
|
||||
case 0xaa001: return cur_rev <= 0xaa00116; break;
|
||||
case 0xaa002: return cur_rev <= 0xaa00218; break;
|
||||
case 0xb0021: return cur_rev <= 0xb002146; break;
|
||||
case 0xb1010: return cur_rev <= 0xb101046; break;
|
||||
case 0xb2040: return cur_rev <= 0xb204031; break;
|
||||
case 0xb4040: return cur_rev <= 0xb404031; break;
|
||||
case 0xb6000: return cur_rev <= 0xb600031; break;
|
||||
case 0xb7000: return cur_rev <= 0xb700031; break;
|
||||
default: break;
|
||||
}
|
||||
|
||||
@ -214,8 +220,7 @@ static bool verify_sha256_digest(u32 patch_id, u32 cur_rev, const u8 *data, unsi
|
||||
struct sha256_state s;
|
||||
int i;
|
||||
|
||||
if (x86_family(bsp_cpuid_1_eax) < 0x17 ||
|
||||
x86_family(bsp_cpuid_1_eax) > 0x19)
|
||||
if (x86_family(bsp_cpuid_1_eax) < 0x17)
|
||||
return true;
|
||||
|
||||
if (!need_sha_check(cur_rev))
|
||||
|
@ -1299,6 +1299,14 @@ void __init e820__memblock_setup(void)
|
||||
memblock_add(entry->addr, entry->size);
|
||||
}
|
||||
|
||||
/*
|
||||
* 32-bit systems are limited to 4BG of memory even with HIGHMEM and
|
||||
* to even less without it.
|
||||
* Discard memory after max_pfn - the actual limit detected at runtime.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_X86_32))
|
||||
memblock_remove(PFN_PHYS(max_pfn), -1);
|
||||
|
||||
/* Throw away partial pages: */
|
||||
memblock_trim_memory(PAGE_SIZE);
|
||||
|
||||
|
@ -46,7 +46,8 @@ bool __init pit_timer_init(void)
|
||||
* VMMs otherwise steal CPU time just to pointlessly waggle
|
||||
* the (masked) IRQ.
|
||||
*/
|
||||
clockevent_i8253_disable();
|
||||
scoped_guard(irq)
|
||||
clockevent_i8253_disable();
|
||||
return false;
|
||||
}
|
||||
clockevent_i8253_init(true);
|
||||
|
@ -42,7 +42,7 @@ static void load_segments(void)
|
||||
|
||||
static void machine_kexec_free_page_tables(struct kimage *image)
|
||||
{
|
||||
free_pages((unsigned long)image->arch.pgd, PGD_ALLOCATION_ORDER);
|
||||
free_pages((unsigned long)image->arch.pgd, pgd_allocation_order());
|
||||
image->arch.pgd = NULL;
|
||||
#ifdef CONFIG_X86_PAE
|
||||
free_page((unsigned long)image->arch.pmd0);
|
||||
@ -59,7 +59,7 @@ static void machine_kexec_free_page_tables(struct kimage *image)
|
||||
static int machine_kexec_alloc_page_tables(struct kimage *image)
|
||||
{
|
||||
image->arch.pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
|
||||
PGD_ALLOCATION_ORDER);
|
||||
pgd_allocation_order());
|
||||
#ifdef CONFIG_X86_PAE
|
||||
image->arch.pmd0 = (pmd_t *)get_zeroed_page(GFP_KERNEL);
|
||||
image->arch.pmd1 = (pmd_t *)get_zeroed_page(GFP_KERNEL);
|
||||
|
@ -796,12 +796,15 @@ static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
|
||||
struct amd_svm_iommu_ir *ir;
|
||||
u64 entry;
|
||||
|
||||
if (WARN_ON_ONCE(!pi->ir_data))
|
||||
return -EINVAL;
|
||||
|
||||
/**
|
||||
* In some cases, the existing irte is updated and re-set,
|
||||
* so we need to check here if it's already been * added
|
||||
* to the ir_list.
|
||||
*/
|
||||
if (pi->ir_data && (pi->prev_ga_tag != 0)) {
|
||||
if (pi->prev_ga_tag) {
|
||||
struct kvm *kvm = svm->vcpu.kvm;
|
||||
u32 vcpu_id = AVIC_GATAG_TO_VCPUID(pi->prev_ga_tag);
|
||||
struct kvm_vcpu *prev_vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
|
||||
@ -820,7 +823,7 @@ static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
|
||||
* Allocating new amd_iommu_pi_data, which will get
|
||||
* add to the per-vcpu ir_list.
|
||||
*/
|
||||
ir = kzalloc(sizeof(struct amd_svm_iommu_ir), GFP_KERNEL_ACCOUNT);
|
||||
ir = kzalloc(sizeof(struct amd_svm_iommu_ir), GFP_ATOMIC | __GFP_ACCOUNT);
|
||||
if (!ir) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
@ -896,10 +899,10 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
|
||||
{
|
||||
struct kvm_kernel_irq_routing_entry *e;
|
||||
struct kvm_irq_routing_table *irq_rt;
|
||||
bool enable_remapped_mode = true;
|
||||
int idx, ret = 0;
|
||||
|
||||
if (!kvm_arch_has_assigned_device(kvm) ||
|
||||
!irq_remapping_cap(IRQ_POSTING_CAP))
|
||||
if (!kvm_arch_has_assigned_device(kvm) || !kvm_arch_has_irq_bypass())
|
||||
return 0;
|
||||
|
||||
pr_debug("SVM: %s: host_irq=%#x, guest_irq=%#x, set=%#x\n",
|
||||
@ -933,6 +936,8 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
|
||||
kvm_vcpu_apicv_active(&svm->vcpu)) {
|
||||
struct amd_iommu_pi_data pi;
|
||||
|
||||
enable_remapped_mode = false;
|
||||
|
||||
/* Try to enable guest_mode in IRTE */
|
||||
pi.base = __sme_set(page_to_phys(svm->avic_backing_page) &
|
||||
AVIC_HPA_MASK);
|
||||
@ -951,33 +956,6 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
|
||||
*/
|
||||
if (!ret && pi.is_guest_mode)
|
||||
svm_ir_list_add(svm, &pi);
|
||||
} else {
|
||||
/* Use legacy mode in IRTE */
|
||||
struct amd_iommu_pi_data pi;
|
||||
|
||||
/**
|
||||
* Here, pi is used to:
|
||||
* - Tell IOMMU to use legacy mode for this interrupt.
|
||||
* - Retrieve ga_tag of prior interrupt remapping data.
|
||||
*/
|
||||
pi.prev_ga_tag = 0;
|
||||
pi.is_guest_mode = false;
|
||||
ret = irq_set_vcpu_affinity(host_irq, &pi);
|
||||
|
||||
/**
|
||||
* Check if the posted interrupt was previously
|
||||
* setup with the guest_mode by checking if the ga_tag
|
||||
* was cached. If so, we need to clean up the per-vcpu
|
||||
* ir_list.
|
||||
*/
|
||||
if (!ret && pi.prev_ga_tag) {
|
||||
int id = AVIC_GATAG_TO_VCPUID(pi.prev_ga_tag);
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
||||
vcpu = kvm_get_vcpu_by_id(kvm, id);
|
||||
if (vcpu)
|
||||
svm_ir_list_del(to_svm(vcpu), &pi);
|
||||
}
|
||||
}
|
||||
|
||||
if (!ret && svm) {
|
||||
@ -993,6 +971,34 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
if (enable_remapped_mode) {
|
||||
/* Use legacy mode in IRTE */
|
||||
struct amd_iommu_pi_data pi;
|
||||
|
||||
/**
|
||||
* Here, pi is used to:
|
||||
* - Tell IOMMU to use legacy mode for this interrupt.
|
||||
* - Retrieve ga_tag of prior interrupt remapping data.
|
||||
*/
|
||||
pi.prev_ga_tag = 0;
|
||||
pi.is_guest_mode = false;
|
||||
ret = irq_set_vcpu_affinity(host_irq, &pi);
|
||||
|
||||
/**
|
||||
* Check if the posted interrupt was previously
|
||||
* setup with the guest_mode by checking if the ga_tag
|
||||
* was cached. If so, we need to clean up the per-vcpu
|
||||
* ir_list.
|
||||
*/
|
||||
if (!ret && pi.prev_ga_tag) {
|
||||
int id = AVIC_GATAG_TO_VCPUID(pi.prev_ga_tag);
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
||||
vcpu = kvm_get_vcpu_by_id(kvm, id);
|
||||
if (vcpu)
|
||||
svm_ir_list_del(to_svm(vcpu), &pi);
|
||||
}
|
||||
}
|
||||
out:
|
||||
srcu_read_unlock(&kvm->irq_srcu, idx);
|
||||
return ret;
|
||||
|
@ -11,6 +11,13 @@
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM kvm
|
||||
|
||||
#ifdef CREATE_TRACE_POINTS
|
||||
#define tracing_kvm_rip_read(vcpu) ({ \
|
||||
typeof(vcpu) __vcpu = vcpu; \
|
||||
__vcpu->arch.guest_state_protected ? 0 : kvm_rip_read(__vcpu); \
|
||||
})
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Tracepoint for guest mode entry.
|
||||
*/
|
||||
@ -28,7 +35,7 @@ TRACE_EVENT(kvm_entry,
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->vcpu_id = vcpu->vcpu_id;
|
||||
__entry->rip = kvm_rip_read(vcpu);
|
||||
__entry->rip = tracing_kvm_rip_read(vcpu);
|
||||
__entry->immediate_exit = force_immediate_exit;
|
||||
|
||||
kvm_x86_call(get_entry_info)(vcpu, &__entry->intr_info,
|
||||
@ -319,7 +326,7 @@ TRACE_EVENT(name, \
|
||||
), \
|
||||
\
|
||||
TP_fast_assign( \
|
||||
__entry->guest_rip = kvm_rip_read(vcpu); \
|
||||
__entry->guest_rip = tracing_kvm_rip_read(vcpu); \
|
||||
__entry->isa = isa; \
|
||||
__entry->vcpu_id = vcpu->vcpu_id; \
|
||||
__entry->requests = READ_ONCE(vcpu->requests); \
|
||||
@ -423,7 +430,7 @@ TRACE_EVENT(kvm_page_fault,
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->vcpu_id = vcpu->vcpu_id;
|
||||
__entry->guest_rip = kvm_rip_read(vcpu);
|
||||
__entry->guest_rip = tracing_kvm_rip_read(vcpu);
|
||||
__entry->fault_address = fault_address;
|
||||
__entry->error_code = error_code;
|
||||
),
|
||||
|
@ -297,6 +297,7 @@ int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
|
||||
{
|
||||
struct kvm_kernel_irq_routing_entry *e;
|
||||
struct kvm_irq_routing_table *irq_rt;
|
||||
bool enable_remapped_mode = true;
|
||||
struct kvm_lapic_irq irq;
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct vcpu_data vcpu_info;
|
||||
@ -335,21 +336,8 @@ int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
|
||||
|
||||
kvm_set_msi_irq(kvm, e, &irq);
|
||||
if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) ||
|
||||
!kvm_irq_is_postable(&irq)) {
|
||||
/*
|
||||
* Make sure the IRTE is in remapped mode if
|
||||
* we don't handle it in posted mode.
|
||||
*/
|
||||
ret = irq_set_vcpu_affinity(host_irq, NULL);
|
||||
if (ret < 0) {
|
||||
printk(KERN_INFO
|
||||
"failed to back to remapped mode, irq: %u\n",
|
||||
host_irq);
|
||||
goto out;
|
||||
}
|
||||
|
||||
!kvm_irq_is_postable(&irq))
|
||||
continue;
|
||||
}
|
||||
|
||||
vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu));
|
||||
vcpu_info.vector = irq.vector;
|
||||
@ -357,11 +345,12 @@ int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
|
||||
trace_kvm_pi_irte_update(host_irq, vcpu->vcpu_id, e->gsi,
|
||||
vcpu_info.vector, vcpu_info.pi_desc_addr, set);
|
||||
|
||||
if (set)
|
||||
ret = irq_set_vcpu_affinity(host_irq, &vcpu_info);
|
||||
else
|
||||
ret = irq_set_vcpu_affinity(host_irq, NULL);
|
||||
if (!set)
|
||||
continue;
|
||||
|
||||
enable_remapped_mode = false;
|
||||
|
||||
ret = irq_set_vcpu_affinity(host_irq, &vcpu_info);
|
||||
if (ret < 0) {
|
||||
printk(KERN_INFO "%s: failed to update PI IRTE\n",
|
||||
__func__);
|
||||
@ -369,6 +358,9 @@ int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
|
||||
}
|
||||
}
|
||||
|
||||
if (enable_remapped_mode)
|
||||
ret = irq_set_vcpu_affinity(host_irq, NULL);
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
srcu_read_unlock(&kvm->irq_srcu, idx);
|
||||
|
@ -11098,7 +11098,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
||||
/*
|
||||
* Profile KVM exit RIPs:
|
||||
*/
|
||||
if (unlikely(prof_on == KVM_PROFILING)) {
|
||||
if (unlikely(prof_on == KVM_PROFILING &&
|
||||
!vcpu->arch.guest_state_protected)) {
|
||||
unsigned long rip = kvm_rip_read(vcpu);
|
||||
profile_hit(KVM_PROFILING, (void *)rip);
|
||||
}
|
||||
@ -13556,25 +13557,27 @@ bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma);
|
||||
|
||||
bool kvm_arch_has_irq_bypass(void)
|
||||
{
|
||||
return enable_apicv && irq_remapping_cap(IRQ_POSTING_CAP);
|
||||
}
|
||||
|
||||
int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
|
||||
struct irq_bypass_producer *prod)
|
||||
{
|
||||
struct kvm_kernel_irqfd *irqfd =
|
||||
container_of(cons, struct kvm_kernel_irqfd, consumer);
|
||||
struct kvm *kvm = irqfd->kvm;
|
||||
int ret;
|
||||
|
||||
irqfd->producer = prod;
|
||||
kvm_arch_start_assignment(irqfd->kvm);
|
||||
|
||||
spin_lock_irq(&kvm->irqfds.lock);
|
||||
irqfd->producer = prod;
|
||||
|
||||
ret = kvm_x86_call(pi_update_irte)(irqfd->kvm,
|
||||
prod->irq, irqfd->gsi, 1);
|
||||
if (ret)
|
||||
kvm_arch_end_assignment(irqfd->kvm);
|
||||
|
||||
spin_unlock_irq(&kvm->irqfds.lock);
|
||||
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -13584,9 +13587,9 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
|
||||
int ret;
|
||||
struct kvm_kernel_irqfd *irqfd =
|
||||
container_of(cons, struct kvm_kernel_irqfd, consumer);
|
||||
struct kvm *kvm = irqfd->kvm;
|
||||
|
||||
WARN_ON(irqfd->producer != prod);
|
||||
irqfd->producer = NULL;
|
||||
|
||||
/*
|
||||
* When producer of consumer is unregistered, we change back to
|
||||
@ -13594,12 +13597,18 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
|
||||
* when the irq is masked/disabled or the consumer side (KVM
|
||||
* int this case doesn't want to receive the interrupts.
|
||||
*/
|
||||
spin_lock_irq(&kvm->irqfds.lock);
|
||||
irqfd->producer = NULL;
|
||||
|
||||
ret = kvm_x86_call(pi_update_irte)(irqfd->kvm,
|
||||
prod->irq, irqfd->gsi, 0);
|
||||
if (ret)
|
||||
printk(KERN_INFO "irq bypass consumer (token %p) unregistration"
|
||||
" fails: %d\n", irqfd->consumer.token, ret);
|
||||
|
||||
spin_unlock_irq(&kvm->irqfds.lock);
|
||||
|
||||
|
||||
kvm_arch_end_assignment(irqfd->kvm);
|
||||
}
|
||||
|
||||
@ -13612,7 +13621,8 @@ int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
|
||||
bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *old,
|
||||
struct kvm_kernel_irq_routing_entry *new)
|
||||
{
|
||||
if (new->type != KVM_IRQ_ROUTING_MSI)
|
||||
if (old->type != KVM_IRQ_ROUTING_MSI ||
|
||||
new->type != KVM_IRQ_ROUTING_MSI)
|
||||
return true;
|
||||
|
||||
return !!memcmp(&old->msi, &new->msi, sizeof(new->msi));
|
||||
|
@ -996,8 +996,8 @@ AVXcode: 4
|
||||
83: Grp1 Ev,Ib (1A),(es)
|
||||
# CTESTSCC instructions are: CTESTB, CTESTBE, CTESTF, CTESTL, CTESTLE, CTESTNB, CTESTNBE, CTESTNL,
|
||||
# CTESTNLE, CTESTNO, CTESTNS, CTESTNZ, CTESTO, CTESTS, CTESTT, CTESTZ
|
||||
84: CTESTSCC (ev)
|
||||
85: CTESTSCC (es) | CTESTSCC (66),(es)
|
||||
84: CTESTSCC Eb,Gb (ev)
|
||||
85: CTESTSCC Ev,Gv (es) | CTESTSCC Ev,Gv (66),(es)
|
||||
88: POPCNT Gv,Ev (es) | POPCNT Gv,Ev (66),(es)
|
||||
8f: POP2 Bq,Rq (000),(11B),(ev)
|
||||
a5: SHLD Ev,Gv,CL (es) | SHLD Ev,Gv,CL (66),(es)
|
||||
|
@ -360,7 +360,7 @@ static inline pgd_t *_pgd_alloc(struct mm_struct *mm)
|
||||
* We allocate one page for pgd.
|
||||
*/
|
||||
if (!SHARED_KERNEL_PMD)
|
||||
return __pgd_alloc(mm, PGD_ALLOCATION_ORDER);
|
||||
return __pgd_alloc(mm, pgd_allocation_order());
|
||||
|
||||
/*
|
||||
* Now PAE kernel is not running as a Xen domain. We can allocate
|
||||
@ -380,7 +380,7 @@ static inline void _pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
||||
|
||||
static inline pgd_t *_pgd_alloc(struct mm_struct *mm)
|
||||
{
|
||||
return __pgd_alloc(mm, PGD_ALLOCATION_ORDER);
|
||||
return __pgd_alloc(mm, pgd_allocation_order());
|
||||
}
|
||||
|
||||
static inline void _pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
||||
|
@ -73,7 +73,7 @@ int __init efi_alloc_page_tables(void)
|
||||
gfp_t gfp_mask;
|
||||
|
||||
gfp_mask = GFP_KERNEL | __GFP_ZERO;
|
||||
efi_pgd = (pgd_t *)__get_free_pages(gfp_mask, PGD_ALLOCATION_ORDER);
|
||||
efi_pgd = (pgd_t *)__get_free_pages(gfp_mask, pgd_allocation_order());
|
||||
if (!efi_pgd)
|
||||
goto fail;
|
||||
|
||||
@ -96,7 +96,7 @@ free_p4d:
|
||||
if (pgtable_l5_enabled())
|
||||
free_page((unsigned long)pgd_page_vaddr(*pgd));
|
||||
free_pgd:
|
||||
free_pages((unsigned long)efi_pgd, PGD_ALLOCATION_ORDER);
|
||||
free_pages((unsigned long)efi_pgd, pgd_allocation_order());
|
||||
fail:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -103,10 +103,6 @@ noinstr void *__xen_hypercall_setfunc(void)
|
||||
void (*func)(void);
|
||||
|
||||
/*
|
||||
* Xen is supported only on CPUs with CPUID, so testing for
|
||||
* X86_FEATURE_CPUID is a test for early_cpu_init() having been
|
||||
* run.
|
||||
*
|
||||
* Note that __xen_hypercall_setfunc() is noinstr only due to a nasty
|
||||
* dependency chain: it is being called via the xen_hypercall static
|
||||
* call when running as a PVH or HVM guest. Hypercalls need to be
|
||||
@ -118,8 +114,7 @@ noinstr void *__xen_hypercall_setfunc(void)
|
||||
*/
|
||||
instrumentation_begin();
|
||||
|
||||
if (!boot_cpu_has(X86_FEATURE_CPUID))
|
||||
xen_get_vendor();
|
||||
xen_get_vendor();
|
||||
|
||||
if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
|
||||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON))
|
||||
|
@ -54,14 +54,20 @@ struct mc_debug_data {
|
||||
|
||||
static DEFINE_PER_CPU(struct mc_buffer, mc_buffer);
|
||||
static struct mc_debug_data mc_debug_data_early __initdata;
|
||||
static DEFINE_PER_CPU(struct mc_debug_data *, mc_debug_data) =
|
||||
&mc_debug_data_early;
|
||||
static struct mc_debug_data __percpu *mc_debug_data_ptr;
|
||||
DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags);
|
||||
|
||||
static struct static_key mc_debug __ro_after_init;
|
||||
static bool mc_debug_enabled __initdata;
|
||||
|
||||
static struct mc_debug_data * __ref get_mc_debug(void)
|
||||
{
|
||||
if (!mc_debug_data_ptr)
|
||||
return &mc_debug_data_early;
|
||||
|
||||
return this_cpu_ptr(mc_debug_data_ptr);
|
||||
}
|
||||
|
||||
static int __init xen_parse_mc_debug(char *arg)
|
||||
{
|
||||
mc_debug_enabled = true;
|
||||
@ -71,20 +77,16 @@ static int __init xen_parse_mc_debug(char *arg)
|
||||
}
|
||||
early_param("xen_mc_debug", xen_parse_mc_debug);
|
||||
|
||||
void mc_percpu_init(unsigned int cpu)
|
||||
{
|
||||
per_cpu(mc_debug_data, cpu) = per_cpu_ptr(mc_debug_data_ptr, cpu);
|
||||
}
|
||||
|
||||
static int __init mc_debug_enable(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct mc_debug_data __percpu *mcdb;
|
||||
|
||||
if (!mc_debug_enabled)
|
||||
return 0;
|
||||
|
||||
mc_debug_data_ptr = alloc_percpu(struct mc_debug_data);
|
||||
if (!mc_debug_data_ptr) {
|
||||
mcdb = alloc_percpu(struct mc_debug_data);
|
||||
if (!mcdb) {
|
||||
pr_err("xen_mc_debug inactive\n");
|
||||
static_key_slow_dec(&mc_debug);
|
||||
return -ENOMEM;
|
||||
@ -93,7 +95,7 @@ static int __init mc_debug_enable(void)
|
||||
/* Be careful when switching to percpu debug data. */
|
||||
local_irq_save(flags);
|
||||
xen_mc_flush();
|
||||
mc_percpu_init(0);
|
||||
mc_debug_data_ptr = mcdb;
|
||||
local_irq_restore(flags);
|
||||
|
||||
pr_info("xen_mc_debug active\n");
|
||||
@ -155,7 +157,7 @@ void xen_mc_flush(void)
|
||||
trace_xen_mc_flush(b->mcidx, b->argidx, b->cbidx);
|
||||
|
||||
if (static_key_false(&mc_debug)) {
|
||||
mcdb = __this_cpu_read(mc_debug_data);
|
||||
mcdb = get_mc_debug();
|
||||
memcpy(mcdb->entries, b->entries,
|
||||
b->mcidx * sizeof(struct multicall_entry));
|
||||
}
|
||||
@ -235,7 +237,7 @@ struct multicall_space __xen_mc_entry(size_t args)
|
||||
|
||||
ret.mc = &b->entries[b->mcidx];
|
||||
if (static_key_false(&mc_debug)) {
|
||||
struct mc_debug_data *mcdb = __this_cpu_read(mc_debug_data);
|
||||
struct mc_debug_data *mcdb = get_mc_debug();
|
||||
|
||||
mcdb->caller[b->mcidx] = __builtin_return_address(0);
|
||||
mcdb->argsz[b->mcidx] = args;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user