2
0
mirror of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2025-09-04 20:19:47 +08:00

Compare commits

...

93 Commits

Author SHA1 Message Date
Linus Torvalds
a2e94e8079 block-6.17-20250822
-----BEGIN PGP SIGNATURE-----
 
 iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmiobRkQHGF4Ym9lQGtl
 cm5lbC5kawAKCRD301j7KXHgpnvDEAC6ybsqvNAOSV1Tdk1EQZ/mIrmIb7tVrp/P
 zRReWTK9jF7kzOLn2Mqgu0c4RFLCMABXmPb5F2aLx72uSxMSFq2sI9QZCgGzZQeZ
 yjOIxFBAPsdgr+gyIOdS3zH04+IKfJw20ojJb83irCgd5M1hpmVwzZ3iGMq8Gs9q
 VJQYvKny7tjjpuLpk3DWl7t1J0YV+0sGQhk3iZdWEHrui7mqmfh6DkeB5forTu6z
 Gn5e4DNbZvmcvkJQ+Rnkua1UmTZ4hr/+3YV9mqzsWYv+1hOTx/uomGbY7DjSdSyK
 vWWNwN97sgAjwhaFgWvB2iRk1pdAb4A3zP+NV1MXheOhHnAT3C6i43DaS1fivone
 YKLEqy4v3IzB5WcdlwclJW2qizoLtopu7A4pRURv9v+Q0wb4Q2YM0gRum59QgxZN
 +YUhglR5ucazYPmIAxOZMaU/WMIN6m4h3hRa1RkFRNXkBvPGxV2fQxi8exX0QWqf
 oxSSfImO0QVjYPlAL7oi0eWwHtqXtebXXdrUNozQdnrEQnimTrxPAuSnfRIv63un
 swlaCzfqXXhtl25t9p6Sx7xM7aKF2k7tYnZdSM7JjiOS7KXHFaZcYt3YcoFfdLc7
 X/vtT9OQWwnEtqzFKnK8EvcjSN+4KbXwI4neVLmsWK81dwqI2huScB+Xe5eBPidU
 6d6dZzUikA==
 =mbqK
 -----END PGP SIGNATURE-----

Merge tag 'block-6.17-20250822' of git://git.kernel.dk/linux

Pull block fixes from Jens Axboe:
 "A set of fixes for block that should go into this tree. A bit larger
  than what I usually have at this point in time, a lot of that is the
  continued fixing of the lockdep annotation for queue freezing that we
  recently added, which has highlighted a number of little issues here
  and there. This contains:

   - MD pull request via Yu:

       - Add a legacy_async_del_gendisk mode, to prevent a user tools
         regression. New user tools releases will not use such a mode,
         the old release with a new kernel now will have warning about
         deprecated behavior, and we prepare to remove this legacy mode
         after about a year later

       - The rename in kernel causing user tools build failure, revert
         the rename in mdp_superblock_s

       - Fix a regression that interrupted resync can be shown as
         recover from mdstat or sysfs

   - Improve file size detection for loop, particularly for networked
     file systems, by using getattr to get the size rather than the
     cached inode size.

   - Hotplug CPU lock vs queue freeze fix

   - Lockdep fix while updating the number of hardware queues

   - Fix stacking for PI devices

   - Silence bio_check_eod() for the known case of device removal where
     the size is truncated to 0 sectors"

* tag 'block-6.17-20250822' of git://git.kernel.dk/linux:
  block: avoid cpu_hotplug_lock depedency on freeze_lock
  block: decrement block_rq_qos static key in rq_qos_del()
  block: skip q->rq_qos check in rq_qos_done_bio()
  blk-mq: fix lockdep warning in __blk_mq_update_nr_hw_queues
  block: tone down bio_check_eod
  loop: use vfs_getattr_nosec for accurate file size
  loop: Consolidate size calculation logic into lo_calculate_size()
  block: remove newlines from the warnings in blk_validate_integrity_limits
  block: handle pi_tuple_size in queue_limits_stack_integrity
  selftests: ublk: Use ARRAY_SIZE() macro to improve code
  md: fix sync_action incorrect display during resync
  md: add helper rdev_needs_recovery()
  md: keep recovery_cp in mdp_superblock_s
  md: add legacy_async_del_gendisk mode
2025-08-22 09:29:51 -04:00
Linus Torvalds
d28de4fc0a io_uring-6.17-20250822
-----BEGIN PGP SIGNATURE-----
 
 iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmiobS0QHGF4Ym9lQGtl
 cm5lbC5kawAKCRD301j7KXHgpl2rD/44niBCUuj5CTZKqCCkVMduiBIvRWmRl+Fz
 Xth2qCwn5RBCi9dZWe4D8xTr0d9d3CTdCUMLiINm6+lyLTcE3taz2AczBHcs/FWZ
 rL7anFYtVLcadoqabrlbkI02dEIY4ARkpAfenyaBO693GoI5YD2CBxTH0YkVF2Qw
 beasIRi0s+TbmGQbtqbAROCdIywX6LpyEiT4tuDfLixDHwfoV0teLb57H7TrfJ3j
 MwuFX83JN2ZpGvHSJHs7/T21OwDJ8h2hV4TgFt5hwKg7UKpVq0crh/+lLlZnZxUw
 D5GMW3EDYaOwUO1dxTpFWKuKUCpzvaOpBdqhK3UNsSJwEu6riRzkC/7IydbX1qcI
 dBAvJqg4TJYRpAFwMS0yNZ1gA+rC/kXFQrbJivaRm57ZLrxvhSI4RCPgv+Vd5ayP
 Bd25paIDEndk2sDf9iMESM4yPciTCmjWhjFc6TQwo7uidJagXbXJQD1fXDxDoT+m
 /gGv0UG4vZ20sUZbBOLHeVUFxlaw9MtgFwq1RSPMCUBhYbaxW57BVwjngAaTILNK
 NzBkpamtzJxk66GL1fLZ/fmMdNJenM+40GyHLAt/e46x95aooExaV4a/FtrYXQVa
 LAJTEFUTG4Ybu5zRxTpl1evEXfktpOHlubdQnsAaP1sj8ydRL+JJmRlcbReWQupy
 SeuJhF4rbQ==
 =jOfg
 -----END PGP SIGNATURE-----

Merge tag 'io_uring-6.17-20250822' of git://git.kernel.dk/linux

Pull io_uring fixes from Jens Axboe:
 "Just two small fixes - one that fixes inconsistent ->async_data vs
  REQ_F_ASYNC_DATA handling in futex, and a followup that just ensures
  that if other opcode handlers mess this up, it won't cause any issues"

* tag 'io_uring-6.17-20250822' of git://git.kernel.dk/linux:
  io_uring: clear ->async_data as part of normal init
  io_uring/futex: ensure io_futex_wait() cleans up properly on failure
2025-08-22 09:25:59 -04:00
Linus Torvalds
edeee68c42 SCSI fixes on 20250822
All fixes in drivers.  The largest diffstat in ufs is caused by the
 doc update with the next being the qcom null pointer deref fix.
 
 Signed-off-by: James E.J. Bottomley <James.Bottomley@HansenPartnership.com>
 -----BEGIN PGP SIGNATURE-----
 
 iJwEABMIAEQWIQTnYEDbdso9F2cI+arnQslM7pishQUCaKhBSSYcamFtZXMuYm90
 dG9tbGV5QGhhbnNlbnBhcnRuZXJzaGlwLmNvbQAKCRDnQslM7pisheYBAP9IwgCR
 ANnFXfILBJ0vGkZiVsV7lZPtcUChbTT0stGCXAD/axlU11QwE673mlxkVK5JuFGc
 fnCdTAt0iM1AqiGiCeI=
 =+twj
 -----END PGP SIGNATURE-----

Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI fixes from James Bottomley:
 "All fixes in drivers. The largest diffstat in ufs is caused by the doc
  update with the next being the qcom null pointer deref fix"

* tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi:
  scsi: ufs: ufs-qcom: Fix ESI null pointer dereference
  scsi: ufs: core: Rename ufshcd_wait_for_doorbell_clr()
  scsi: ufs: core: Fix the return value documentation
  scsi: ufs: core: Remove WARN_ON_ONCE() call from ufshcd_uic_cmd_compl()
  scsi: ufs: core: Fix IRQ lock inversion for the SCSI host lock
  scsi: qla4xxx: Prevent a potential error pointer dereference
  scsi: ufs: ufs-pci: Add support for Intel Wildcat Lake
  scsi: fnic: Remove a useless struct mempool forward declaration
2025-08-22 09:20:42 -04:00
Linus Torvalds
afd58777de MMC host:
- sdhci_am654: Disable HS400 for AM62P SR1.0 and SR1.1
  - sdhci-of-arasan: Ensure CD logic stabilization before power-up
  - sdhci-pci-gli: Mask the replay timer timeout of AER for GL9763e
 
 MEMSTICK:
  - Fix deadlock by moving removing flag earlier
 -----BEGIN PGP SIGNATURE-----
 
 iQJLBAABCgA1FiEEugLDXPmKSktSkQsV/iaEJXNYjCkFAmioRt4XHHVsZi5oYW5z
 c29uQGxpbmFyby5vcmcACgkQ/iaEJXNYjCkW/w/+NeQhlSoTGr/eBWL2BqF3+b1J
 yxyS/DIW0CukTrpQNMqsu9yeWqRqqugBGm613/ffJ2elCOV8sfhyQt8tgvIur9WB
 mWEXui8PwReNa4ZLmUVbFMLfLL0uJO0MIYRXFxJB8CGk0OXsN4jffaomurVdmXsS
 4WkvzHcm64MmGINPkGnkWICMIHiErjAaQOZJBmasG7VxkdHVvJjbi0QFeWjnVExd
 7HXN2qPU1y8Vv7jgArGaGnL7ZElmzEjcDn7+ZCStC4Q6QjTsKE3FFjsEkiF08b1M
 1BBwx6NfB/KOBxBNy6g7vLmeD52oLIET2AQ8E4aQ2MpQF24323eDmY/TLR7d8SIq
 jctdi21qcqQLeaj2oStcgS+FTH2vpkSQBK4kv/cRTcEVpvun+KYrCNpFI0JDovY0
 0BHOukSHMkB0i0X1uyBmMXlxFGne67sDM42tynfkHVMfb6MTtk50EqyWi5shoYcJ
 PUPv/E+AemohyKzqLpU177Lx84pX3km9oK21tVGJS62Qv8ydgJLV2YXWU7IKQ+Mv
 d+X7Ks6AlHIbhMdo00/L/4nLNjvUiAzJ480QnAE1qt+luFhnKievFaZ0hX2z7E14
 WGnpPlph2S2UUfJpeA4JuM3Pb2nwvjVm4pl5CZYRg2cRJz4DGRK20NbcZ/waA02I
 umWZrQ1PzgyYOqYRmkE=
 =FuWR
 -----END PGP SIGNATURE-----

Merge tag 'mmc-v6.17-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc

Pull MMC fixes from Ulf Hansson:
 "MMC host:
   - sdhci_am654: Disable HS400 for AM62P SR1.0 and SR1.1
   - sdhci-of-arasan: Ensure CD logic stabilization before power-up
   - sdhci-pci-gli: Mask the replay timer timeout of AER for GL9763e

  MEMSTICK:
   - Fix deadlock by moving removing flag earlier"

* tag 'mmc-v6.17-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc:
  mmc: sdhci_am654: Disable HS400 for AM62P SR1.0 and SR1.1
  memstick: Fix deadlock by moving removing flag earlier
  mmc: sdhci-of-arasan: Ensure CD logic stabilization before power-up
  mmc: sdhci-pci-gli: GL9763e: Mask the replay timer timeout of AER
  mmc: sdhci-pci-gli: GL9763e: Rename the gli_set_gl9763e() for consistency
  mmc: sdhci-pci-gli: Add a new function to simplify the code
2025-08-22 09:17:49 -04:00
Linus Torvalds
e2d324af56 RDMA v6.17 first rc pull request
- Syzkaller found WARN_ON in rxe due to poor lifecycle management of
   resources linked to skbs
 
 - Missing error path handling in erdma qp creation
 
 - Initialize the qp number for the GSI QP in erdma
 
 - Mismatching of DIP, SCC and QP numbers in hns
 
 - SRQ bug fixes in bnxt_re
 
 - Memory leak and possibly uninited memory in bnxt_re
 
 - Remove retired irdma maintainer
 
 - Fix kfree() for kvalloc() in ODP
 
 - Fix memory leak in hns
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQRRRCHOFoQz/8F5bUaFwuHvBreFYQUCaKeiiAAKCRCFwuHvBreF
 YWZtAQCYRx4n6lVWJ8t954bVZGcxLvruY6SVhR5r7xGTEEW/cwD/VnRJb7fwfgTc
 bP7+DIG+qLK2JWVNT4GptgpnjOuDPwY=
 =KjJs
 -----END PGP SIGNATURE-----

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma fixes from Jason Gunthorpe:

 - syzkaller found a WARN_ON in rxe due to poor lifecycle management of
   resources linked to skbs

 - Missing error path handling in erdma qp creation

 - Initialize the qp number for the GSI QP in erdma

 - Mismatching of DIP, SCC and QP numbers in hns

 - SRQ bug fixes in bnxt_re

 - Memory leak and possibly uninited memory in bnxt_re

 - Remove retired irdma maintainer

 - Fix kfree() for kvalloc() in ODP

 - Fix memory leak in hns

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  RDMA/hns: Fix dip entries leak on devices newer than hip09
  RDMA/core: Free pfn_list with appropriate kvfree call
  MAINTAINERS: Remove bouncing irdma maintainer
  RDMA/bnxt_re: Fix to initialize the PBL array
  RDMA/bnxt_re: Fix a possible memory leak in the driver
  RDMA/bnxt_re: Fix to remove workload check in SRQ limit path
  RDMA/bnxt_re: Fix to do SRQ armena by default
  RDMA/hns: Fix querying wrong SCC context for DIP algorithm
  RDMA/erdma: Fix unset QPN of GSI QP
  RDMA/erdma: Fix ignored return value of init_kernel_qp
  RDMA/rxe: Flush delayed SKBs while releasing RXE resources
2025-08-22 09:13:24 -04:00
Linus Torvalds
c37d2bc92b IOMMU Fixes for Linux v6.17-rc2:
Including:
 
 	- AMD-Vi: Fix potential stack buffer overflow via command line.
 
 	- NVidia-Tegra: Fix endianess sparse warning.
 
 	- ARM-SMMU: Fix ATS-masters reference count issue.
 
 	- Virtio-IOMMU: Fix race condition on instance lookup.
 
 	- RISC-V IOMMU: Fix potential NULL-ptr dereference in
 	  riscv_iommu_iova_to_phys().
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEr9jSbILcajRFYWYyK/BELZcBGuMFAmioMvgACgkQK/BELZcB
 GuMzlxAAh1sQ3jPkyEcQzCErCYvvICWMzEiV0Lweot5xBFcAZAXQEcYJy9u1PnA7
 NNoSfhaEyufkANUznvI5ikkJzv9wfIOK1mn3vjaeZe7ymXqh0m+JqFWS1hQynjp4
 6DEIYEJLB1pLDM41P+S+AnVWfS88XwMsUd13es773zJgPLHfK9U97oFWfUhkj6ei
 9ItvgSAiqAcIPYu1/hI5Te7vIC5RBRyOfvAhhzxx2255T3bdUqbVUz3ERLXZ808M
 MOmhznlVAChhoOKTdWRpOaVnFsaWCbUBhdnnSwwL7lsmixd4wDELZM/8jGA8hlIT
 7sbMIiiEdWqPfpMZxDuz7IfEilhFmZP8+e5c8VPL37PTouj2RdCGPbLtzGFzlMKU
 QF40K6FAJCbE9mmgmtdD33xhBkc3/7gar9Xvc3k9VEvVOfvZ/TxgPTW4ZilaO+OK
 C8S1/x3T6MFZmndKT/H1SAm+AM4pViON48N5t4L6rVA/na0xraOXNjFnsNeK6gLY
 lK5pz7St69Ud7KQUQXhUgKSktDDQOnZYcbn0I7z2KfSAIWFxqJgT9F5XtTde1kdm
 XZVe4qp59UzOgZQu1Q1K5F2peQGFSjoaK69r4I0Mh2VY7jokW/tZmqUgyTbW42hD
 qH/jhcPblpOTCFM9LPn94oShmTlMcNhKYbfO+VKIY9ThhxPVHJc=
 =BWSF
 -----END PGP SIGNATURE-----

Merge tag 'iommu-fixes-v6.17-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux

Pull iommu fixes from Joerg Roedel:

 - AMD-Vi: Fix potential stack buffer overflow via command line

 - NVidia-Tegra: Fix endianess sparse warning

 - ARM-SMMU: Fix ATS-masters reference count issue

 - Virtio-IOMMU: Fix race condition on instance lookup

 - RISC-V IOMMU: Fix potential NULL-ptr dereference in
   riscv_iommu_iova_to_phys()

* tag 'iommu-fixes-v6.17-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux:
  iommu/riscv: prevent NULL deref in iova_to_phys
  iommu/virtio: Make instance lookup robust
  iommu/arm-smmu-v3: Fix smmu_domain->nr_ats_masters decrement
  iommu/tegra241-cmdqv: Fix missing cpu_to_le64 at lvcmdq_err_map
  iommu/amd: Avoid stack buffer overflow from kernel cmdline
2025-08-22 09:10:30 -04:00
Linus Torvalds
f28ad47b66 sound fixes for 6.17-rc3
Only small fixes.
 
 - ASoC Cirrus codec fixes
 - A regression fix for the recent TAS2781 codec refactoring
 - A fix for user-timer error handling
 - Fixes for USB-audio descriptor validators
 - Usual HD-audio and ASoC device-specific quirks
 -----BEGIN PGP SIGNATURE-----
 
 iQJCBAABCAAsFiEEIXTw5fNLNI7mMiVaLtJE4w1nLE8FAminN10OHHRpd2FpQHN1
 c2UuZGUACgkQLtJE4w1nLE/RFw//XtS8u0fRY+WRbFdV7zE+nUBszbqAfOBs7kzd
 jB3iWJogKyeUq/whgT1xDa3R7PPHK74XDrSEAEJTTmyiUtAPMNh895CGIEqD07nz
 sg1rHlDLNgB4Hdc4cfiBXfSMZnPH4CAnWUSnUF7QsSXmaPHyhJsc3zQDwrVEl0/j
 DLuaF5BjzOGVL8my61UWCUrAEI+lVBerlJ5A2RT2LnZxH6f/PgtGmXyFElcfnxy0
 TFZ/na7llmNn4O+yUPq/w/lvaEXE5ER/G4vfDbtQTlOeBXI4IHJ2xWtPGGA1Nkbm
 YagdLxoBAMq+mL4HOHhldF0nuPbia0LFdpc5NEmFT8D4SOuy6WS+0qlqAxpytbSC
 T9UhLDe1lDiMhVYhnvqPORNIC7lCXsUHIT/fccaK240qpQidwzmNDA8JsFNC6ZZj
 D/wJddMk8XgCs3kmIa8P7nfnF4QO+xIATaiWjpBgE+ahIpWZFMq1f7ZKY3pwjqu3
 kJg4GsMeopqyOphD2mX3EQMJ6x4aDpemRLxVj9esPqvpT+j5dViLQsZI/h2n8ZUU
 V253FW9qj7uFPTQ/hYDcDXneMGG/kS741ggTFmHNJSNOAk31qBfOGjS52LkkHvw1
 DT/UouG6GRh4Qgq23doRIC2zRU6ySh4KbktGvysVAvljMvRJLroQywJNyaTVH3HP
 9Z+1zxI=
 =myx0
 -----END PGP SIGNATURE-----

Merge tag 'sound-6.17-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound

Pull sound fixes from Takashi Iwai:
 "Only small fixes.

   - ASoC Cirrus codec fixes

   - A regression fix for the recent TAS2781 codec refactoring

   - A fix for user-timer error handling

   - Fixes for USB-audio descriptor validators

   - Usual HD-audio and ASoC device-specific quirks"

* tag 'sound-6.17-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound:
  ALSA: usb-audio: Use correct sub-type for UAC3 feature unit validation
  ALSA: timer: fix ida_free call while not allocated
  ASoC: cs35l56: Remove SoundWire Clock Divider workaround for CS35L63
  ASoC: cs35l56: Handle new algorithms IDs for CS35L63
  ASoC: cs35l56: Update Firmware Addresses for CS35L63 for production silicon
  ALSA: hda: tas2781: Fix wrong reference of tasdevice_priv
  ALSA: hda/realtek: Audio disappears on HP 15-fc000 after warm boot again
  ALSA: hda/realtek: Fix headset mic on ASUS Zenbook 14
  ASoC: codecs: ES9389: Modify the standby configuration
  ALSA: usb-audio: Fix size validation in convert_chmap_v3()
  ALSA: hda/tas2781: Add name prefix tas2781 for tas2781's dvc_tlv and amp_vol_tlv
  ALSA: hda/realtek: Add support for HP EliteBook x360 830 G6 and EliteBook 830 G6
2025-08-22 09:05:37 -04:00
Linus Torvalds
3cfcd57def fix for netfs smb3 oops
-----BEGIN PGP SIGNATURE-----
 
 iQGzBAABCgAdFiEE6fsu8pdIjtWE/DpLiiy9cAdyT1EFAmin3xQACgkQiiy9cAdy
 T1Fq0gwAlHUoGyM53OFYvCKmiAmkKYk1p/xBMl8lf59meF0HC6jxhDkgngZ24FhL
 V8z9h9viETDidZeq3SDTrFUQVcHJUlk3VMRFCFjObfQ8ngt7r55DFRBFv5hky936
 kTMoCRjHB6hHUL0tO5q1OPWrKNg1I1V8GDuH3jEima7MR5VwnHL1CMj+DBYRSlsB
 U2hPcgIVkDwNH+ZymI0FKXa1PbV+D1PlYy5Cr6X7EeSQC9FreU2HFrD0jbzUP1TW
 Y9GKnyikuD44GP69vBk7sXW8oe7YaXIyFiBW0lEIHlV1nVjNHBKs5+XyIjdH+0bF
 J8qeczKvsl6XkLpv32i/dqKVx/YdTsfY2iUo92JjjcbW1dbjqn7E9f+LufcCykFp
 zjZ77PWxtPkqZamaCmnf8av8GBf3HIdd5X5rpHkyYBphobkWe5NsAXoqFiAbGMcw
 Q3yrB8EA6a5pEejiWZA/N8NJwJFkvY/OARgRsGaOwopuMqOeoudzduRoaaDt4dPd
 4sEoSN6X
 =MKGv
 -----END PGP SIGNATURE-----

Merge tag '6.17-rc2-smb3-client-fix' of git://git.samba.org/sfrench/cifs-2.6

Pull smb client fix from Steve French:
 "Fix for netfs smb3 oops"

* tag '6.17-rc2-smb3-client-fix' of git://git.samba.org/sfrench/cifs-2.6:
  cifs: Fix oops due to uninitialised variable
2025-08-22 09:02:32 -04:00
Linus Torvalds
e86ba12cf8 NFS client bugfix for Linux 6.17
Stable fixes:
 - NFS: Fix a data corrupting race when updating an existing write
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEESQctxSBg8JpV8KqEZwvnipYKAPIFAminvwMACgkQZwvnipYK
 APIFkxAAmmlm+6rVRmkb5W37hgcsntWS90CjIw6axH7WlFToypiDam83pCIYzAMc
 wzO5e03ZnKwp49zabTYSqoLD30/irZ3QwR6H1XIe6NToomYr43CZPjqctZLhVaeq
 HpLfBiJdxI2jJQMpHubQk66MyBhk0tXln+Pzzi59ZESOevbGWcT8bz6v849nIh0F
 emngSbMEKMXLrY+r/NsX31wF3Te4WYNyV/tumck3hOEZNouCKD/a2JKGEXdkWKw6
 1IFiTxo4IT8vsVrUTKsG3QUtkv/v8iZ0FSl1f9FD1C6eubX5Jo4JcEQmDc6NoJTF
 4viy0c19+8TUs//Kax4VlBE6opeb9jXba4iN0FOXeYsbqVXyv8fEuinLu174I+s4
 bQNpScZ8/o/dMio/Qa6RgTvxIvk9kICJEmfF1IKeIb7Kn+nczpsD8CqwT/EKiCV2
 ZotYZP5BP2BKYrtV5eUhWcl9mKpagz8ivHCevKvaG4JX+M2/XVNNspgkdr8hwu3j
 SX5lwOdLP//gHqt3x8PUCrD2G9Pn81qkshR5mfJAyWhPodwrP6vsDSvAnB93zeZB
 LVln0c1BHmvPpjAEnIzCh3mZXdzMHSVHxKldktYDfhfh/8tFJAkS7o3bXpgZ8dMF
 eHsYZHBJnByZBfi61B3RkWB9QU8KWi+winDQJpoU+MbZGxRdFmA=
 =Yh2D
 -----END PGP SIGNATURE-----

Merge tag 'nfs-for-6.17-2' of git://git.linux-nfs.org/projects/trondmy/linux-nfs

Pull NFS client fix from Trond Myklebust:

 - NFS: Fix a data corrupting race when updating an existing write

* tag 'nfs-for-6.17-2' of git://git.linux-nfs.org/projects/trondmy/linux-nfs:
  NFS: Fix a race when updating an existing write
2025-08-22 08:58:58 -04:00
Linus Torvalds
6eba757ce9 20 hotfixes. 10 are cc:stable and the remainder address post-6.16 issues
or aren't considered necessary for -stable kernels.  17 of these fixes are
 for MM.
 
 As usual, singletons all over the place, apart from a three-patch series
 of KHO followup work from Pasha which is actually also a bunch of
 singletons.
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCaKfFVwAKCRDdBJ7gKXxA
 jvZGAQCCRTRgwnYsH0op9Rlxs72zokENbErSzXweWLez31pNpAD/S7bVSjjk1mXr
 BQ24ZadKUUomWkghwCusb9VomMeneg0=
 =+uBT
 -----END PGP SIGNATURE-----

Merge tag 'mm-hotfixes-stable-2025-08-21-18-17' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
 "20 hotfixes. 10 are cc:stable and the remainder address post-6.16
  issues or aren't considered necessary for -stable kernels. 17 of these
  fixes are for MM.

  As usual, singletons all over the place, apart from a three-patch
  series of KHO followup work from Pasha which is actually also a bunch
  of singletons"

* tag 'mm-hotfixes-stable-2025-08-21-18-17' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
  mm/mremap: fix WARN with uffd that has remap events disabled
  mm/damon/sysfs-schemes: put damos dests dir after removing its files
  mm/migrate: fix NULL movable_ops if CONFIG_ZSMALLOC=m
  mm/damon/core: fix damos_commit_filter not changing allow
  mm/memory-failure: fix infinite UCE for VM_PFNMAP pfn
  MAINTAINERS: mark MGLRU as maintained
  mm: rust: add page.rs to MEMORY MANAGEMENT - RUST
  iov_iter: iterate_folioq: fix handling of offset >= folio size
  selftests/damon: fix selftests by installing drgn related script
  .mailmap: add entry for Easwar Hariharan
  selftests/mm: add test for invalid multi VMA operations
  mm/mremap: catch invalid multi VMA moves earlier
  mm/mremap: allow multi-VMA move when filesystem uses thp_get_unmapped_area
  mm/damon/core: fix commit_ops_filters by using correct nth function
  tools/testing: add linux/args.h header and fix radix, VMA tests
  mm/debug_vm_pgtable: clear page table entries at destroy_args()
  squashfs: fix memory leak in squashfs_fill_super
  kho: warn if KHO is disabled due to an error
  kho: mm: don't allow deferred struct page with KHO
  kho: init new_physxa->phys_bits to fix lockdep
2025-08-22 08:54:34 -04:00
XianLiang Huang
99d4d1a070 iommu/riscv: prevent NULL deref in iova_to_phys
The riscv_iommu_pte_fetch() function returns either NULL for
unmapped/never-mapped iova, or a valid leaf pte pointer that
requires no further validation.

riscv_iommu_iova_to_phys() failed to handle NULL returns.
Prevent null pointer dereference in
riscv_iommu_iova_to_phys(), and remove the pte validation.

Fixes: 488ffbf181 ("iommu/riscv: Paging domain support")
Cc: Tomasz Jeznach <tjeznach@rivosinc.com>
Signed-off-by: XianLiang Huang <huangxianliang@lanxincomputing.com>
Link: https://lore.kernel.org/r/20250820072248.312-1-huangxianliang@lanxincomputing.com
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
2025-08-22 08:51:49 +02:00
Robin Murphy
72b6f7cd89 iommu/virtio: Make instance lookup robust
Much like arm-smmu in commit 7d835134d4 ("iommu/arm-smmu: Make
instance lookup robust"), virtio-iommu appears to have the same issue
where iommu_device_register() makes the IOMMU instance visible to other
API callers (including itself) straight away, but internally the
instance isn't ready to recognise itself for viommu_probe_device() to
work correctly until after viommu_probe() has returned. This matters a
lot more now that bus_iommu_probe() has the DT/VIOT knowledge to probe
client devices the way that was always intended. Tweak the lookup and
initialisation in much the same way as for arm-smmu, to ensure that what
we register is functional and ready to go.

Cc: stable@vger.kernel.org
Fixes: bcb81ac6ae ("iommu: Get DT/ACPI parsing into the proper probe path")
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Tested-by: Eric Auger <eric.auger@redhat.com>
Link: https://lore.kernel.org/r/308911aaa1f5be32a3a709996c7bd6cf71d30f33.1755190036.git.robin.murphy@arm.com
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
2025-08-22 08:43:23 +02:00
Nicolin Chen
685ca577b4 iommu/arm-smmu-v3: Fix smmu_domain->nr_ats_masters decrement
The arm_smmu_attach_commit() updates master->ats_enabled before calling
arm_smmu_remove_master_domain() that is supposed to clean up everything
in the old domain, including the old domain's nr_ats_masters. So, it is
supposed to use the old ats_enabled state of the device, not an updated
state.

This isn't a problem if switching between two domains where:
 - old ats_enabled = false; new ats_enabled = false
 - old ats_enabled = true;  new ats_enabled = true
but can fail cases where:
 - old ats_enabled = false; new ats_enabled = true
   (old domain should keep the counter but incorrectly decreased it)
 - old ats_enabled = true;  new ats_enabled = false
   (old domain needed to decrease the counter but incorrectly missed it)

Update master->ats_enabled after arm_smmu_remove_master_domain() to fix
this.

Fixes: 7497f4211f ("iommu/arm-smmu-v3: Make changing domains be hitless for ATS")
Cc: stable@vger.kernel.org
Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
Acked-by: Will Deacon <will@kernel.org>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Pranjal Shrivastava <praan@google.com>
Link: https://lore.kernel.org/r/20250801030127.2006979-1-nicolinc@nvidia.com
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
2025-08-22 08:41:20 +02:00
Jens Axboe
e4e6aaea46 io_uring: clear ->async_data as part of normal init
Opcode handlers like POLL_ADD will use ->async_data as the pointer for
double poll handling, which is a bit different than the usual case
where it's strictly gated by the REQ_F_ASYNC_DATA flag. Be a bit more
proactive in handling ->async_data, and clear it to NULL as part of
regular init. Init is touching that cacheline anyway, so might as well
clear it.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
2025-08-21 13:54:01 -06:00
Jens Axboe
508c1314b3 io_uring/futex: ensure io_futex_wait() cleans up properly on failure
The io_futex_data is allocated upfront and assigned to the io_kiocb
async_data field, but the request isn't marked with REQ_F_ASYNC_DATA
at that point. Those two should always go together, as the flag tells
io_uring whether the field is valid or not.

Additionally, on failure cleanup, the futex handler frees the data but
does not clear ->async_data. Clear the data and the flag in the error
path as well.

Thanks to Trend Micro Zero Day Initiative and particularly ReDress for
reporting this.

Cc: stable@vger.kernel.org
Fixes: 194bb58c60 ("io_uring: add support for futex wake and wait")
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2025-08-21 13:53:33 -06:00
Takashi Iwai
8410fe8109 ALSA: usb-audio: Use correct sub-type for UAC3 feature unit validation
The entry of the validators table for UAC3 feature unit is defined
with a wrong sub-type UAC_FEATURE (= 0x06) while it should have been
UAC3_FEATURE (= 0x07).  This patch corrects the entry value.

Fixes: 57f8770620 ("ALSA: usb-audio: More validations of descriptor units")
Link: https://patch.msgid.link/20250821150835.8894-1-tiwai@suse.de
Signed-off-by: Takashi Iwai <tiwai@suse.de>
2025-08-21 17:10:14 +02:00
Nilay Shroff
370ac285f2 block: avoid cpu_hotplug_lock depedency on freeze_lock
A recent lockdep[1] splat observed while running blktest block/005
reveals a potential deadlock caused by the cpu_hotplug_lock dependency
on ->freeze_lock. This dependency was introduced by commit 033b667a82
("block: blk-rq-qos: guard rq-qos helpers by static key").

That change added a static key to avoid fetching q->rq_qos when
neither blk-wbt nor blk-iolatency is configured. The static key
dynamically patches kernel text to a NOP when disabled, eliminating
overhead of fetching q->rq_qos in the I/O hot path. However, enabling
a static key at runtime requires acquiring both cpu_hotplug_lock and
jump_label_mutex. When this happens after the queue has already been
frozen (i.e., while holding ->freeze_lock), it creates a locking
dependency from cpu_hotplug_lock to ->freeze_lock, which leads to a
potential deadlock reported by lockdep [1].

To resolve this, replace the static key mechanism with q->queue_flags:
QUEUE_FLAG_QOS_ENABLED. This flag is evaluated in the fast path before
accessing q->rq_qos. If the flag is set, we proceed to fetch q->rq_qos;
otherwise, the access is skipped.

Since q->queue_flags is commonly accessed in IO hotpath and resides in
the first cacheline of struct request_queue, checking it imposes minimal
overhead while eliminating the deadlock risk.

This change avoids the lockdep splat without introducing performance
regressions.

[1] https://lore.kernel.org/linux-block/4fdm37so3o4xricdgfosgmohn63aa7wj3ua4e5vpihoamwg3ui@fq42f5q5t5ic/

Reported-by: Shinichiro Kawasaki <shinichiro.kawasaki@wdc.com>
Closes: https://lore.kernel.org/linux-block/4fdm37so3o4xricdgfosgmohn63aa7wj3ua4e5vpihoamwg3ui@fq42f5q5t5ic/
Fixes: 033b667a82 ("block: blk-rq-qos: guard rq-qos helpers by static key")
Tested-by: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
Signed-off-by: Nilay Shroff <nilay@linux.ibm.com>
Reviewed-by: Ming Lei <ming.lei@redhat.com>
Reviewed-by: Yu Kuai <yukuai3@huawei.com>
Link: https://lore.kernel.org/r/20250814082612.500845-4-nilay@linux.ibm.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2025-08-21 07:11:11 -06:00
Nilay Shroff
ade1beea1c block: decrement block_rq_qos static key in rq_qos_del()
rq_qos_add() increments the block_rq_qos static key when a QoS
policy is attached. When a QoS policy is removed via rq_qos_del(),
we must symmetrically decrement the static key. If this removal drops
the last QoS policy from the queue (q->rq_qos becomes NULL), the
static branch can be disabled and the jump label patched to a NOP,
avoiding overhead on the hot path.

This change ensures rq_qos_add()/rq_qos_del() keep the
block_rq_qos static key balanced and prevents leaving the branch
permanently enabled after the last policy is removed.

Fixes: 033b667a82 ("block: blk-rq-qos: guard rq-qos helpers by static key")
Signed-off-by: Nilay Shroff <nilay@linux.ibm.com>
Reviewed-by: Ming Lei <ming.lei@redhat.com>
Reviewed-by: Yu Kuai <yukuai3@huawei.com>
Link: https://lore.kernel.org/r/20250814082612.500845-3-nilay@linux.ibm.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2025-08-21 07:11:11 -06:00
Nilay Shroff
275332877e block: skip q->rq_qos check in rq_qos_done_bio()
If a bio has BIO_QOS_THROTTLED or BIO_QOS_MERGED set,
it implicitly guarantees that q->rq_qos is present.
Avoid re-checking q->rq_qos in this case and call
__rq_qos_done_bio() directly as a minor optimization.

Suggested-by : Yu Kuai <yukuai1@huaweicloud.com>

Signed-off-by: Nilay Shroff <nilay@linux.ibm.com>
Reviewed-by: Ming Lei <ming.lei@redhat.com>
Reviewed-by: Yu Kuai <yukuai3@huawei.com>
Link: https://lore.kernel.org/r/20250814082612.500845-2-nilay@linux.ibm.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2025-08-21 07:11:11 -06:00
Ming Lei
2d82f3bd89 blk-mq: fix lockdep warning in __blk_mq_update_nr_hw_queues
Commit 5989bfe6ac ("block: restore two stage elevator switch while
running nr_hw_queue update") reintroduced a lockdep warning by calling
blk_mq_freeze_queue_nomemsave() before switching the I/O scheduler.

The function blk_mq_elv_switch_none() calls elevator_change_done().
Running this while the queue is frozen causes a lockdep warning.

Fix this by reordering the operations: first, switch the I/O scheduler
to 'none', and then freeze the queue. This ensures that elevator_change_done()
is not called on an already frozen queue. And this way is safe because
elevator_set_none() does freeze queue before switching to none.

Also we still have to rely on blk_mq_elv_switch_back() for switching
back, and it has to cover unfrozen queue case.

Cc: Nilay Shroff <nilay@linux.ibm.com>
Cc: Yu Kuai <yukuai3@huawei.com>
Fixes: 5989bfe6ac ("block: restore two stage elevator switch while running nr_hw_queue update")
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Reviewed-by: Yu Kuai <yukuai3@huawei.com>
Reviewed-by: Nilay Shroff <nilay@linux.ibm.com>
Link: https://lore.kernel.org/r/20250815131737.331692-1-ming.lei@redhat.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2025-08-21 05:34:19 -06:00
Judith Mendez
d2d7a96b29 mmc: sdhci_am654: Disable HS400 for AM62P SR1.0 and SR1.1
This adds SDHCI_AM654_QUIRK_DISABLE_HS400 quirk which shall be used
to disable HS400 support. AM62P SR1.0 and SR1.1 do not support HS400
due to errata i2458 [0] so disable HS400 for these SoC revisions.

[0] https://www.ti.com/lit/er/sprz574a/sprz574a.pdf
Fixes: 37f2816551 ("arm64: dts: ti: k3-am62p: Add ITAP/OTAP values for MMC")
Cc: stable@vger.kernel.org
Signed-off-by: Judith Mendez <jm@ti.com>
Reviewed-by: Andrew Davis <afd@ti.com>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Link: https://lore.kernel.org/r/20250820193047.4064142-1-jm@ti.com
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2025-08-21 11:07:16 +02:00
Dewei Meng
5003a65790 ALSA: timer: fix ida_free call while not allocated
In the snd_utimer_create() function, if the kasprintf() function return
NULL, snd_utimer_put_id() will be called, finally use ida_free()
to free the unallocated id 0.

the syzkaller reported the following information:
  ------------[ cut here ]------------
  ida_free called for id=0 which is not allocated.
  WARNING: CPU: 1 PID: 1286 at lib/idr.c:592 ida_free+0x1fd/0x2f0 lib/idr.c:592
  Modules linked in:
  CPU: 1 UID: 0 PID: 1286 Comm: syz-executor164 Not tainted 6.15.8 #3 PREEMPT(lazy)
  Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.3-4.fc42 04/01/2014
  RIP: 0010:ida_free+0x1fd/0x2f0 lib/idr.c:592
  Code: f8 fc 41 83 fc 3e 76 69 e8 70 b2 f8 (...)
  RSP: 0018:ffffc900007f79c8 EFLAGS: 00010282
  RAX: 0000000000000000 RBX: 1ffff920000fef3b RCX: ffffffff872176a5
  RDX: ffff88800369d200 RSI: 0000000000000000 RDI: ffff88800369d200
  RBP: 0000000000000000 R08: ffffffff87ba60a5 R09: 0000000000000000
  R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
  R13: 0000000000000002 R14: 0000000000000000 R15: 0000000000000000
  FS:  00007f6f1abc1740(0000) GS:ffff8880d76a0000(0000) knlGS:0000000000000000
  CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
  CR2: 00007f6f1ad7a784 CR3: 000000007a6e2000 CR4: 00000000000006f0
  Call Trace:
   <TASK>
   snd_utimer_put_id sound/core/timer.c:2043 [inline] [snd_timer]
   snd_utimer_create+0x59b/0x6a0 sound/core/timer.c:2184 [snd_timer]
   snd_utimer_ioctl_create sound/core/timer.c:2202 [inline] [snd_timer]
   __snd_timer_user_ioctl.isra.0+0x724/0x1340 sound/core/timer.c:2287 [snd_timer]
   snd_timer_user_ioctl+0x75/0xc0 sound/core/timer.c:2298 [snd_timer]
   vfs_ioctl fs/ioctl.c:51 [inline]
   __do_sys_ioctl fs/ioctl.c:907 [inline]
   __se_sys_ioctl fs/ioctl.c:893 [inline]
   __x64_sys_ioctl+0x198/0x200 fs/ioctl.c:893
   do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline]
   do_syscall_64+0x7b/0x160 arch/x86/entry/syscall_64.c:94
   entry_SYSCALL_64_after_hwframe+0x76/0x7e
  [...]

The utimer->id should be set properly before the kasprintf() function,
ensures the snd_utimer_put_id() function will free the allocated id.

Fixes: 37745918e0 ("ALSA: timer: Introduce virtual userspace-driven timers")
Signed-off-by: Dewei Meng <mengdewei@cqsoftware.com.cn>
Link: https://patch.msgid.link/20250821014317.40786-1-mengdewei@cqsoftware.com.cn
Signed-off-by: Takashi Iwai <tiwai@suse.de>
2025-08-21 09:12:24 +02:00
Takashi Iwai
279eb50aa8 ASoC: Fixes for v6.17
A few fixes that came in during the past week, there's some updates for
 the CS35L56 which adjust the driver for production silicon and a fix for
 buggy resume of the ES9389.
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCgAdFiEEreZoqmdXGLWf4p/qJNaLcl1Uh9AFAmimWXAACgkQJNaLcl1U
 h9BGKAf+MczirBmF8BaLvCYqWRWmqhSMuNsKibzl8E3Acf2/NHhX1sdKsqOPss4L
 Lje68EfywFP8U8/iARv6Aiijm1oIpF6C6U5GAd5ArInfmkIgITGF+OYObAmZdVbm
 7NX8xSk4KgwCzZOv+3JG34wECmVtXrBnNpd7/Bo+RM+xxcxyUVUFcdMPuoUQvef9
 Jc2cRDn9N9Lo7qi9DgXaBBH/cW4cgic1+CWEmoxSMtQJ1hXaxgaRYskfb5j4Sb1f
 w1Lhw46eMMuTOkKQ1G8sJ0zqM/bS8ZkT0IFSSNDSRXXpLThPj/fNBjxYYBM01brG
 r0shD5ju/2OgS13wWMPkfijzaU5dWw==
 =nURg
 -----END PGP SIGNATURE-----

Merge tag 'asoc-fix-v6.17-rc2' of https://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-linus

ASoC: Fixes for v6.17

A few fixes that came in during the past week, there's some updates for
the CS35L56 which adjust the driver for production silicon and a fix for
buggy resume of the ES9389.
2025-08-21 09:02:28 +02:00
Mark Brown
7c15e4cabf
ASoC: cs35l56: Fixes for CS35L63 for production
Merge series from Stefan Binding <sbinding@opensource.cirrus.com>:

Production silicon for CS36L63 has some small differences compared to
pre-production silicon. This requires small fixes in driver.
Update firmware addresses, tuning algorithm IDs and remove soundwire
clock workaround as no longer necessary.

No product was ever released using pre-production silicon, therefore
there is no need to keep support for it.
2025-08-20 19:56:20 +01:00
Stefan Binding
8d13d1bdb5
ASoC: cs35l56: Remove SoundWire Clock Divider workaround for CS35L63
Production silicon for CS36L63 has some small differences compared to
pre-production silicon. Remove soundwire clock workaround as no
longer necessary. We don't want to do tricks with low-level clocking
controls if we don't need to.

Fixes: 978858791c ("ASoC: cs35l56: Add initial support for CS35L63 for I2C and SoundWire")

Signed-off-by: Stefan Binding <sbinding@opensource.cirrus.com>
Link: https://patch.msgid.link/20250820142209.127575-4-sbinding@opensource.cirrus.com
Signed-off-by: Mark Brown <broonie@kernel.org>
2025-08-20 15:27:02 +01:00
Richard Fitzgerald
8dadc11b67
ASoC: cs35l56: Handle new algorithms IDs for CS35L63
CS35L63 uses different algorithm IDs from CS35L56.
Add a new mechanism to handle different alg IDs between parts in the
CS35L56 driver.

Fixes: 978858791c ("ASoC: cs35l56: Add initial support for CS35L63 for I2C and SoundWire")

Signed-off-by: Richard Fitzgerald <rf@opensource.cirrus.com>
Signed-off-by: Stefan Binding <sbinding@opensource.cirrus.com>
Link: https://patch.msgid.link/20250820142209.127575-3-sbinding@opensource.cirrus.com
Signed-off-by: Mark Brown <broonie@kernel.org>
2025-08-20 15:27:01 +01:00
Stefan Binding
f135fb24ef
ASoC: cs35l56: Update Firmware Addresses for CS35L63 for production silicon
Production silicon for CS36L63 has some small differences compared to
pre-production silicon. Update firmware addresses, which are different.

No product was ever released with pre-production silicon so there is no
need for the driver to include support for it.

Fixes: 978858791c ("ASoC: cs35l56: Add initial support for CS35L63 for I2C and SoundWire")

Signed-off-by: Stefan Binding <sbinding@opensource.cirrus.com>
Link: https://patch.msgid.link/20250820142209.127575-2-sbinding@opensource.cirrus.com
Signed-off-by: Mark Brown <broonie@kernel.org>
2025-08-20 15:27:00 +01:00
Takashi Iwai
3f4422e7c9 ALSA: hda: tas2781: Fix wrong reference of tasdevice_priv
During the conversion to unify the calibration data management, the
reference to tasdevice_priv was wrongly set to h->hda_priv instead of
h->priv.  This resulted in memory corruption and crashes eventually.
Unfortunately it's a void pointer, hence the compiler couldn't know
that it's wrong.

Fixes: 4fe2385134 ("ALSA: hda/tas2781: Move and unified the calibrated-data getting function for SPI and I2C into the tas2781_hda lib")
Link: https://bugzilla.suse.com/show_bug.cgi?id=1248270
Cc: <stable@vger.kernel.org>
Link: https://patch.msgid.link/20250820051902.4523-1-tiwai@suse.de
Signed-off-by: Takashi Iwai <tiwai@suse.de>
2025-08-20 07:23:23 +02:00
David Hildenbrand
772e5b4a5e mm/mremap: fix WARN with uffd that has remap events disabled
Registering userfaultd on a VMA that spans at least one PMD and then
mremap()'ing that VMA can trigger a WARN when recovering from a failed
page table move due to a page table allocation error.

The code ends up doing the right thing (recurse, avoiding moving actual
page tables), but triggering that WARN is unpleasant:

WARNING: CPU: 2 PID: 6133 at mm/mremap.c:357 move_normal_pmd mm/mremap.c:357 [inline]
WARNING: CPU: 2 PID: 6133 at mm/mremap.c:357 move_pgt_entry mm/mremap.c:595 [inline]
WARNING: CPU: 2 PID: 6133 at mm/mremap.c:357 move_page_tables+0x3832/0x44a0 mm/mremap.c:852
Modules linked in:
CPU: 2 UID: 0 PID: 6133 Comm: syz.0.19 Not tainted 6.17.0-rc1-syzkaller-00004-g53e760d89498 #0 PREEMPT(full)
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.3-debian-1.16.3-2~bpo12+1 04/01/2014
RIP: 0010:move_normal_pmd mm/mremap.c:357 [inline]
RIP: 0010:move_pgt_entry mm/mremap.c:595 [inline]
RIP: 0010:move_page_tables+0x3832/0x44a0 mm/mremap.c:852
Code: ...
RSP: 0018:ffffc900037a76d8 EFLAGS: 00010293
RAX: 0000000000000000 RBX: 0000000032930007 RCX: ffffffff820c6645
RDX: ffff88802e56a440 RSI: ffffffff820c7201 RDI: 0000000000000007
RBP: ffff888037728fc0 R08: 0000000000000007 R09: 0000000000000000
R10: 0000000032930007 R11: 0000000000000000 R12: 0000000000000000
R13: ffffc900037a79a8 R14: 0000000000000001 R15: dffffc0000000000
FS:  000055556316a500(0000) GS:ffff8880d68bc000(0000) knlGS:0000000000000000
CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 0000001b30863fff CR3: 0000000050171000 CR4: 0000000000352ef0
Call Trace:
 <TASK>
 copy_vma_and_data+0x468/0x790 mm/mremap.c:1215
 move_vma+0x548/0x1780 mm/mremap.c:1282
 mremap_to+0x1b7/0x450 mm/mremap.c:1406
 do_mremap+0xfad/0x1f80 mm/mremap.c:1921
 __do_sys_mremap+0x119/0x170 mm/mremap.c:1977
 do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline]
 do_syscall_64+0xcd/0x4c0 arch/x86/entry/syscall_64.c:94
 entry_SYSCALL_64_after_hwframe+0x77/0x7f
RIP: 0033:0x7f00d0b8ebe9
Code: ...
RSP: 002b:00007ffe5ea5ee98 EFLAGS: 00000246 ORIG_RAX: 0000000000000019
RAX: ffffffffffffffda RBX: 00007f00d0db5fa0 RCX: 00007f00d0b8ebe9
RDX: 0000000000400000 RSI: 0000000000c00000 RDI: 0000200000000000
RBP: 00007ffe5ea5eef0 R08: 0000200000c00000 R09: 0000000000000000
R10: 0000000000000003 R11: 0000000000000246 R12: 0000000000000002
R13: 00007f00d0db5fa0 R14: 00007f00d0db5fa0 R15: 0000000000000005
 </TASK>

The underlying issue is that we recurse during the original page table
move, but not during the recovery move.

Fix it by checking for both VMAs and performing the check before the
pmd_none() sanity check.

Add a new helper where we perform+document that check for the PMD and PUD
level.

Thanks to Harry for bisecting.

Link: https://lkml.kernel.org/r/20250818175358.1184757-1-david@redhat.com
Fixes: 0cef0bb836 ("mm: clear uffd-wp PTE/PMD state on mremap()")
Signed-off-by: David Hildenbrand <david@redhat.com>
Reported-by: syzbot+4d9a13f0797c46a29e42@syzkaller.appspotmail.com
Closes: https://lkml.kernel.org/r/689bb893.050a0220.7f033.013a.GAE@google.com
Tested-by: Harry Yoo <harry.yoo@oracle.com>
Cc: "Liam R. Howlett" <Liam.Howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Jann Horn <jannh@google.com>
Cc: Pedro Falcato <pfalcato@suse.de>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2025-08-19 16:35:57 -07:00
SeongJae Park
ba1dd7ac73 mm/damon/sysfs-schemes: put damos dests dir after removing its files
damon_sysfs_scheme_rm_dirs() puts dests directory kobject before removing
its internal files.  Sincee putting the kobject frees its container
struct, and the internal files removal accesses the container,
use-after-free happens.  Fix it by putting the reference _after_ removing
the files.

Link: https://lkml.kernel.org/r/20250816165559.2601-1-sj@kernel.org
Fixes: 2cd0bf85a2 ("mm/damon/sysfs-schemes: implement DAMOS action destinations directory")
Signed-off-by: SeongJae Park <sj@kernel.org>
Reported-by: Alexandre Ghiti <alex@ghiti.fr>
Closes: https://lore.kernel.org/2d39a734-320d-4341-8f8a-4019eec2dbf2@ghiti.fr
Tested-by: Alexandre Ghiti <alexghiti@rivosinc.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2025-08-19 16:35:57 -07:00
Huacai Chen
053c8ebe74 mm/migrate: fix NULL movable_ops if CONFIG_ZSMALLOC=m
After commit 84caf98838 ("mm: stop storing migration_ops in
page->mapping") we get such an error message if CONFIG_ZSMALLOC=m:

 WARNING: CPU: 3 PID: 42 at mm/migrate.c:142 isolate_movable_ops_page+0xa8/0x1c0
 CPU: 3 UID: 0 PID: 42 Comm: kcompactd0 Not tainted 6.16.0-rc5+ #2133 PREEMPT
 pc 9000000000540bd8 ra 9000000000540b84 tp 9000000100420000 sp 9000000100423a60
 a0 9000000100193a80 a1 000000000000000c a2 000000000000001b a3 ffffffffffffffff
 a4 ffffffffffffffff a5 0000000000000267 a6 0000000000000000 a7 9000000100423ae0
 t0 00000000000000f1 t1 00000000000000f6 t2 0000000000000000 t3 0000000000000001
 t4 ffffff00010eb834 t5 0000000000000040 t6 900000010c89d380 t7 90000000023fcc70
 t8 0000000000000018 u0 0000000000000000 s9 ffffff00010eb800 s0 ffffff00010eb800
 s1 000000000000000c s2 0000000000043ae0 s3 0000800000000000 s4 900000000219cc40
 s5 0000000000000000 s6 ffffff00010eb800 s7 0000000000000001 s8 90000000025b4000
    ra: 9000000000540b84 isolate_movable_ops_page+0x54/0x1c0
   ERA: 9000000000540bd8 isolate_movable_ops_page+0xa8/0x1c0
  CRMD: 000000b0 (PLV0 -IE -DA +PG DACF=CC DACM=CC -WE)
  PRMD: 00000004 (PPLV0 +PIE -PWE)
  EUEN: 00000000 (-FPE -SXE -ASXE -BTE)
  ECFG: 00071c1d (LIE=0,2-4,10-12 VS=7)
 ESTAT: 000c0000 [BRK] (IS= ECode=12 EsubCode=0)
  PRID: 0014c010 (Loongson-64bit, Loongson-3A5000)
 CPU: 3 UID: 0 PID: 42 Comm: kcompactd0 Not tainted 6.16.0-rc5+ #2133 PREEMPT
 Stack : 90000000021fd000 0000000000000000 9000000000247720 9000000100420000
         90000001004236a0 90000001004236a8 0000000000000000 90000001004237e8
         90000001004237e0 90000001004237e0 9000000100423550 0000000000000001
         0000000000000001 90000001004236a8 725a84864a19e2d9 90000000023fcc58
         9000000100420000 90000000024c6848 9000000002416848 0000000000000001
         0000000000000000 000000000000000a 0000000007fe0000 ffffff00010eb800
         0000000000000000 90000000021fd000 0000000000000000 900000000205cf30
         000000000000008e 0000000000000009 ffffff00010eb800 0000000000000001
         90000000025b4000 0000000000000000 900000000024773c 00007ffff103d748
         00000000000000b0 0000000000000004 0000000000000000 0000000000071c1d
         ...
 Call Trace:
 [<900000000024773c>] show_stack+0x5c/0x190
 [<90000000002415e0>] dump_stack_lvl+0x70/0x9c
 [<90000000004abe6c>] isolate_migratepages_block+0x3bc/0x16e0
 [<90000000004af408>] compact_zone+0x558/0x1000
 [<90000000004b0068>] compact_node+0xa8/0x1e0
 [<90000000004b0aa4>] kcompactd+0x394/0x410
 [<90000000002b3c98>] kthread+0x128/0x140
 [<9000000001779148>] ret_from_kernel_thread+0x28/0xc0
 [<9000000000245528>] ret_from_kernel_thread_asm+0x10/0x88

The reason is that defined(CONFIG_ZSMALLOC) evaluates to 1 only when
CONFIG_ZSMALLOC=y, we should use IS_ENABLED(CONFIG_ZSMALLOC) instead.  But
when I use IS_ENABLED(CONFIG_ZSMALLOC), page_movable_ops() cannot access
zsmalloc_mops because zsmalloc_mops is in a module.

To solve this problem, we define a set_movable_ops() interface to register
and unregister offline_movable_ops / zsmalloc_movable_ops in mm/migrate.c,
and call them at mm/balloon_compaction.c & mm/zsmalloc.c.  Since
offline_movable_ops / zsmalloc_movable_ops are always accessible, all
#ifdef / #endif are removed in page_movable_ops().

Link: https://lkml.kernel.org/r/20250817151759.2525174-1-chenhuacai@loongson.cn
Fixes: 84caf98838 ("mm: stop storing migration_ops in page->mapping")
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
Acked-by: Zi Yan <ziy@nvidia.com>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Huacai Chen <chenhuacai@kernel.org>
Cc: Huacai Chen <chenhuacai@loongson.cn>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2025-08-19 16:35:57 -07:00
Sang-Heon Jeon
b3dee902b6 mm/damon/core: fix damos_commit_filter not changing allow
Current damos_commit_filter() does not persist the `allow' value of the
filter.  As a result, changing the `allow' value of a filter and
committing doesn't change the `allow' value.

Add the missing `allow' value update, so committing the filter
persistently changes the `allow' value well.

Link: https://lkml.kernel.org/r/20250816015116.194589-1-ekffu200098@gmail.com
Fixes: fe6d7fdd62 ("mm/damon/core: add damos_filter->allow field")
Signed-off-by: Sang-Heon Jeon <ekffu200098@gmail.com>
Reviewed-by: SeongJae Park <sj@kernel.org>
Cc: <stable@vger.kernel.org>	[6.14.x]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2025-08-19 16:35:57 -07:00
Jinjiang Tu
2e6053fea3 mm/memory-failure: fix infinite UCE for VM_PFNMAP pfn
When memory_failure() is called for a already hwpoisoned pfn,
kill_accessing_process() will be called to kill current task.  However, if
the vma of the accessing vaddr is VM_PFNMAP, walk_page_range() will skip
the vma in walk_page_test() and return 0.

Before commit aaf99ac2ce ("mm/hwpoison: do not send SIGBUS to processes
with recovered clean pages"), kill_accessing_process() will return EFAULT.
For x86, the current task will be killed in kill_me_maybe().

However, after this commit, kill_accessing_process() simplies return 0,
that means UCE is handled properly, but it doesn't actually.  In such
case, the user task will trigger UCE infinitely.

To fix it, add .test_walk callback for hwpoison_walk_ops to scan all vmas.

Link: https://lkml.kernel.org/r/20250815073209.1984582-1-tujinjiang@huawei.com
Fixes: aaf99ac2ce ("mm/hwpoison: do not send SIGBUS to processes with recovered clean pages")
Signed-off-by: Jinjiang Tu <tujinjiang@huawei.com>
Acked-by: David Hildenbrand <david@redhat.com>
Acked-by: Miaohe Lin <linmiaohe@huawei.com>
Reviewed-by: Jane Chu <jane.chu@oracle.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Naoya Horiguchi <nao.horiguchi@gmail.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Shuai Xue <xueshuai@linux.alibaba.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2025-08-19 16:35:56 -07:00
Axel Rasmussen
44958000ba MAINTAINERS: mark MGLRU as maintained
The three folks being added here are actively working on MGLRU within
Google, so we can review patches for this feature and plan to contribute
some improvements / extensions to it on an ongoing basis.

With three of us we may have some hope filling Yu Zhao's shoes, since he
has moved on to other projects these days.

Link: https://lkml.kernel.org/r/20250815215914.3671925-1-axelrasmussen@google.com
Signed-off-by: Axel Rasmussen <axelrasmussen@google.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Wei Xu <weixugc@google.com>
Cc: Yuanchu Xie <yuanchu@google.com>
Cc: Yu Zhao <yuzhao@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2025-08-19 16:35:56 -07:00
Alice Ryhl
c7b70f76db mm: rust: add page.rs to MEMORY MANAGEMENT - RUST
The page.rs file currently isn't included anywhere, and I think it's a
good fit for the MEMORY MANAGEMENT - RUST entry.  The file was originally
added for use by Rust Binder, but I believe there is also work to use it
in the upcoming scatterlist abstractions.

Link: https://lkml.kernel.org/r/20250814075454.1596482-1-aliceryhl@google.com
Signed-off-by: Alice Ryhl <aliceryhl@google.com>
Acked-by: Danilo Krummrich <dakr@kernel.org>
Cc: Danilo Krummrich <dakr@kernel.org>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: "Uladzislau Rezki (Sony)" <urezki@gmail.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2025-08-19 16:35:56 -07:00
Dominique Martinet
808471ddb0 iov_iter: iterate_folioq: fix handling of offset >= folio size
It's apparently possible to get an iov advanced all the way up to the end
of the current page we're looking at, e.g.

(gdb) p *iter
$24 = {iter_type = 4 '\004', nofault = false, data_source = false, iov_offset = 4096, {__ubuf_iovec = {
      iov_base = 0xffff88800f5bc000, iov_len = 655}, {{__iov = 0xffff88800f5bc000, kvec = 0xffff88800f5bc000,
        bvec = 0xffff88800f5bc000, folioq = 0xffff88800f5bc000, xarray = 0xffff88800f5bc000,
        ubuf = 0xffff88800f5bc000}, count = 655}}, {nr_segs = 2, folioq_slot = 2 '\002', xarray_start = 2}}

Where iov_offset is 4k with 4k-sized folios

This should have been fine because we're only in the 2nd slot and there's
another one after this, but iterate_folioq should not try to map a folio
that skips the whole size, and more importantly part here does not end up
zero (because 'PAGE_SIZE - skip % PAGE_SIZE' ends up PAGE_SIZE and not
zero..), so skip forward to the "advance to next folio" code

Link: https://lkml.kernel.org/r/20250813-iot_iter_folio-v3-0-a0ffad2b665a@codewreck.org
Link: https://lkml.kernel.org/r/20250813-iot_iter_folio-v3-1-a0ffad2b665a@codewreck.org
Signed-off-by: Dominique Martinet <asmadeus@codewreck.org>
Fixes: db0aa2e956 ("mm: Define struct folio_queue and ITER_FOLIOQ to handle a sequence of folios")
Reported-by: Maximilian Bosch <maximilian@mbosch.me>
Reported-by: Ryan Lahfa <ryan@lahfa.xyz>
Reported-by: Christian Theune <ct@flyingcircus.io>
Reported-by: Arnout Engelen <arnout@bzzt.net>
Link: https://lkml.kernel.org/r/D4LHHUNLG79Y.12PI0X6BEHRHW@mbosch.me/
Acked-by: David Howells <dhowells@redhat.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: <stable@vger.kernel.org>	[6.12+]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2025-08-19 16:35:56 -07:00
Sang-Heon Jeon
0cc2a4880c selftests/damon: fix selftests by installing drgn related script
drgn_dump_damon_status is not installed during kselftest setup.  It can
break other tests which depend on drgn_dump_damon_status.  Install
drgn_dump_damon_status files to fix broken test.

Link: https://lkml.kernel.org/r/20250812140046.660486-1-ekffu200098@gmail.com
Fixes: f3e8e1e513 ("selftests/damon: add drgn script for extracting damon status")
Signed-off-by: Sang-Heon Jeon <ekffu200098@gmail.com>
Reviewed-by: SeongJae Park <sj@kernel.org>
Cc: Alexandre Ghiti <alexghiti@rivosinc.com>
Cc: Honggyu Kim <honggyu.kim@sk.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2025-08-19 16:35:55 -07:00
Easwar Hariharan
8b26f0a8b4 .mailmap: add entry for Easwar Hariharan
Map my old, obsolete work email address to my current one.

Link: https://lkml.kernel.org/r/20250812180218.92755-1-easwar.hariharan@linux.microsoft.com
Signed-off-by: Easwar Hariharan <easwar.hariharan@linux.microsoft.com>
Cc: Carlos Bilbao <carlos.bilbao@kernel.org>
Cc: Jarkko Sakkinen <jarkko@kernel.org>
Cc: Shannon Nelson <sln@onemain.com>
Cc: Dmitry Baryshkov <lumag@kernel.org>
Cc: Hans Verkuil <hverkuil@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2025-08-19 16:35:55 -07:00
Lorenzo Stoakes
742d3663a5 selftests/mm: add test for invalid multi VMA operations
We can use UFFD to easily assert invalid multi VMA moves, so do so,
asserting expected behaviour when VMAs invalid for a multi VMA operation
are encountered.

We assert both that such operations are not permitted, and that we do not
even attempt to move the first VMA under these circumstances.

We also assert that we can still move a single VMA regardless.

We then assert that a partial failure can occur if the invalid VMA appears
later in the range of multiple VMAs, both at the very next VMA, and also at
the end of the range.

As part of this change, we are using the is_range_valid() helper more
aggressively. Therefore, fix a bug where stale buffered data would hang
around on success, causing subsequent calls to is_range_valid() to
potentially give invalid results.

We simply have to fflush() the stream on success to resolve this issue.

Link: https://lkml.kernel.org/r/c4fb86dd5ba37610583ad5fc0e0c2306ddf318b9.1754218667.git.lorenzo.stoakes@oracle.com
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jann Horn <jannh@google.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2025-08-19 16:35:55 -07:00
Lorenzo Stoakes
d5f416c7c3 mm/mremap: catch invalid multi VMA moves earlier
Previously, any attempt to solely move a VMA would require that the
span specified reside within the span of that single VMA, with no gaps
before or afterwards.

After commit d23cb648e3 ("mm/mremap: permit mremap() move of multiple
VMAs"), the multi VMA move permitted a gap to exist only after VMAs. 
This was done to provide maximum flexibility.

However, We have consequently permitted this behaviour for the move of
a single VMA including those not eligible for multi VMA move.

The change introduced here means that we no longer permit non-eligible
VMAs from being moved in this way.

This is consistent, as it means all eligible VMA moves are treated the
same, and all non-eligible moves are treated as they were before.

This change does not break previous behaviour, which equally would have
disallowed such a move (only in all cases).

[lorenzo.stoakes@oracle.com: do not incorrectly reference invalid VMA in VM_WARN_ON_ONCE()]
  Link: https://lkml.kernel.org/r/b6dbda20-667e-4053-abae-8ed4fa84bb6c@lucifer.local
Link: https://lkml.kernel.org/r/2b5aad5681573be85b5b8fac61399af6fb6b68b6.1754218667.git.lorenzo.stoakes@oracle.com
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jann Horn <jannh@google.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2025-08-19 16:35:55 -07:00
Lorenzo Stoakes
7c91e0b91a mm/mremap: allow multi-VMA move when filesystem uses thp_get_unmapped_area
The multi-VMA move functionality introduced in commit d23cb648e3
("mm/mremap: permit mremap() move of multiple VMA") doesn't allow moves of
file-backed mappings which specify a custom f_op->get_unmapped_area
handler excepting hugetlb and shmem.

We expand this to include thp_get_unmapped_area to support file-backed
mappings for filesystems which use large folios.

Additionally, when the first VMA in a range is not compatible with a
multi-VMA move, instead of moving the first VMA and returning an error,
this series results in us not moving anything and returning an error
immediately.

Examining this second change in detail:

The semantics of multi-VMA moves in mremap() very clearly indicate that a
failure can result in a partial move of VMAs.

This is in line with other aggregate operations within the kernel, which
share these semantics.

There are two classes of failures we're concerned with - eligiblity for
mutli-VMA move, and transient failures that would occur even if the user
individually moved each VMA.

The latter is due to out-of-memory conditions (which, given the
allocations involved are small, would likely be fatal in any case), or
hitting the mapping limit.

Regardless of the cause, transient issues would be fatal anyway, so it
isn't really material which VMAs succeeded at being moved or not.

However with when it comes to multi-VMA move eligiblity, we face another
issue - we must allow a single VMA to succeed regardless of this
eligiblity (as, of course, it is not a multi-VMA move) - but we must then
fail multi-VMA operations.

The two means by which VMAs may fail the eligbility test are - the VMAs
being UFFD-armed, or the VMA being file-backed and providing its own
f_op->get_unmapped_area() helper (because this may result in MREMAP_FIXED
being disregarded), excepting those known to correctly handle
MREMAP_FIXED.

It is therefore conceivable that a user could erroneously try to use this
functionality in these instances, and would prefer to not perform any move
at all should that occur.

This series therefore avoids any move of subsequent VMAs should the first
be multi-VMA move ineligble and the input span exceeds that of the first
VMA.

We also add detailed test logic to assert that multi VMA move with
ineligible VMAs functions as expected.


This patch (of 3):

We currently restrict multi-VMA move to avoid filesystems or drivers which
provide a custom f_op->get_unmapped_area handler unless it is known to
correctly handle MREMAP_FIXED.

We do this so we do not get unexpected result when moving from one area to
another (for instance, if the handler would align things resulting in the
moved VMAs having different gaps than the original mapping).

More and more filesystems are moving to using large folios, and typically
do so (in part) by setting f_op->get_unmapped_area to
thp_get_unmapped_area.

When mremap() invokes the file system's get_unmapped MREMAP_FIXED, it does
so via get_unmapped_area(), called in vrm_set_new_addr().  In order to do
so, it converts the MREMAP_FIXED flag to a MAP_FIXED flag and passes this
to the unmapped area handler.

The __get_unmapped_area() function (called by get_unmapped_area()) in turn
invokes the filesystem or driver's f_op->get_unmapped_area() handler.

Therefore this is a point at which thp_get_unmapped_area() may be called
(also, this is the case for anonymous mappings where the size is huge page
aligned).

thp_get_unmapped_area() calls thp_get_unmapped_area_vmflags() and
__thp_get_unmapped_area() in turn (falling back to
mm_get_unmapped_area_vm_flags() which is known to handle MAP_FIXED
correctly).

The __thp_get_unmapped_area() function in turn does nothing to change the
address hint, nor the MAP_FIXED flag, only adjusting alignment parameters.
It hten calls mm_get_unmapped_area_vmflags(), and in turn arch-specific
unmapped area functions, all of which honour MAP_FIXED correctly.

Therefore, we can safely add thp_get_unmapped_area to the known-good
handlers.

Link: https://lkml.kernel.org/r/cover.1754218667.git.lorenzo.stoakes@oracle.com
Link: https://lkml.kernel.org/r/4f2542340c29c84d3d470b0c605e916b192f6c81.1754218667.git.lorenzo.stoakes@oracle.com
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jann Horn <jannh@google.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2025-08-19 16:35:54 -07:00
Sang-Heon Jeon
63f5dec167 mm/damon/core: fix commit_ops_filters by using correct nth function
damos_commit_ops_filters() incorrectly uses damos_nth_filter() which
iterates core_filters.  As a result, performing a commit unintentionally
corrupts ops_filters.

Add damos_nth_ops_filter() which iterates ops_filters.  Use this function
to fix issues caused by wrong iteration.

Link: https://lkml.kernel.org/r/20250810124201.15743-1-ekffu200098@gmail.com
Fixes: 3607cc590f ("mm/damon/core: support committing ops_filters") # 6.15.x
Signed-off-by: Sang-Heon Jeon <ekffu200098@gmail.com>
Reviewed-by: SeongJae Park <sj@kernel.org>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2025-08-19 16:35:54 -07:00
Lorenzo Stoakes
9a6a6a3191 tools/testing: add linux/args.h header and fix radix, VMA tests
Commit 857d18f23a ("cleanup: Introduce ACQUIRE() and ACQUIRE_ERR() for
conditional locks") accidentally broke the radix tree, VMA userland tests
by including linux/args.h which is not present in the tools/include
directory.

This patch copies this over and adds an #ifdef block to avoid duplicate
__CONCAT declaration in conflict with system headers when we ultimately
include this.

Link: https://lkml.kernel.org/r/20250811052654.33286-1-lorenzo.stoakes@oracle.com
Fixes: 857d18f23a ("cleanup: Introduce ACQUIRE() and ACQUIRE_ERR() for conditional locks") 
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Jann Horn <jannh@google.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2025-08-19 16:35:54 -07:00
Herton R. Krzesinski
dde30854bd mm/debug_vm_pgtable: clear page table entries at destroy_args()
The mm/debug_vm_pagetable test allocates manually page table entries for
the tests it runs, using also its manually allocated mm_struct.  That in
itself is ok, but when it exits, at destroy_args() it fails to clear those
entries with the *_clear functions.

The problem is that leaves stale entries.  If another process allocates an
mm_struct with a pgd at the same address, it may end up running into the
stale entry.  This is happening in practice on a debug kernel with
CONFIG_DEBUG_VM_PGTABLE=y, for example this is the output with some extra
debugging I added (it prints a warning trace if pgtables_bytes goes
negative, in addition to the warning at check_mm() function):

[    2.539353] debug_vm_pgtable: [get_random_vaddr         ]: random_vaddr is 0x7ea247140000
[    2.539366] kmem_cache info
[    2.539374] kmem_cachep 0x000000002ce82385 - freelist 0x0000000000000000 - offset 0x508
[    2.539447] debug_vm_pgtable: [init_args                ]: args->mm is 0x000000002267cc9e
(...)
[    2.552800] WARNING: CPU: 5 PID: 116 at include/linux/mm.h:2841 free_pud_range+0x8bc/0x8d0
[    2.552816] Modules linked in:
[    2.552843] CPU: 5 UID: 0 PID: 116 Comm: modprobe Not tainted 6.12.0-105.debug_vm2.el10.ppc64le+debug #1 VOLUNTARY
[    2.552859] Hardware name: IBM,9009-41A POWER9 (architected) 0x4e0202 0xf000005 of:IBM,FW910.00 (VL910_062) hv:phyp pSeries
[    2.552872] NIP:  c0000000007eef3c LR: c0000000007eef30 CTR: c0000000003d8c90
[    2.552885] REGS: c0000000622e73b0 TRAP: 0700   Not tainted  (6.12.0-105.debug_vm2.el10.ppc64le+debug)
[    2.552899] MSR:  800000000282b033 <SF,VEC,VSX,EE,FP,ME,IR,DR,RI,LE>  CR: 24002822  XER: 0000000a
[    2.552954] CFAR: c0000000008f03f0 IRQMASK: 0
[    2.552954] GPR00: c0000000007eef30 c0000000622e7650 c000000002b1ac00 0000000000000001
[    2.552954] GPR04: 0000000000000008 0000000000000000 c0000000007eef30 ffffffffffffffff
[    2.552954] GPR08: 00000000ffff00f5 0000000000000001 0000000000000048 0000000000004000
[    2.552954] GPR12: 00000003fa440000 c000000017ffa300 c0000000051d9f80 ffffffffffffffdb
[    2.552954] GPR16: 0000000000000000 0000000000000008 000000000000000a 60000000000000e0
[    2.552954] GPR20: 4080000000000000 c0000000113af038 00007fffcf130000 0000700000000000
[    2.552954] GPR24: c000000062a6a000 0000000000000001 8000000062a68000 0000000000000001
[    2.552954] GPR28: 000000000000000a c000000062ebc600 0000000000002000 c000000062ebc760
[    2.553170] NIP [c0000000007eef3c] free_pud_range+0x8bc/0x8d0
[    2.553185] LR [c0000000007eef30] free_pud_range+0x8b0/0x8d0
[    2.553199] Call Trace:
[    2.553207] [c0000000622e7650] [c0000000007eef30] free_pud_range+0x8b0/0x8d0 (unreliable)
[    2.553229] [c0000000622e7750] [c0000000007f40b4] free_pgd_range+0x284/0x3b0
[    2.553248] [c0000000622e7800] [c0000000007f4630] free_pgtables+0x450/0x570
[    2.553274] [c0000000622e78e0] [c0000000008161c0] exit_mmap+0x250/0x650
[    2.553292] [c0000000622e7a30] [c0000000001b95b8] __mmput+0x98/0x290
[    2.558344] [c0000000622e7a80] [c0000000001d1018] exit_mm+0x118/0x1b0
[    2.558361] [c0000000622e7ac0] [c0000000001d141c] do_exit+0x2ec/0x870
[    2.558376] [c0000000622e7b60] [c0000000001d1ca8] do_group_exit+0x88/0x150
[    2.558391] [c0000000622e7bb0] [c0000000001d1db8] sys_exit_group+0x48/0x50
[    2.558407] [c0000000622e7be0] [c00000000003d810] system_call_exception+0x1e0/0x4c0
[    2.558423] [c0000000622e7e50] [c00000000000d05c] system_call_vectored_common+0x15c/0x2ec
(...)
[    2.558892] ---[ end trace 0000000000000000 ]---
[    2.559022] BUG: Bad rss-counter state mm:000000002267cc9e type:MM_ANONPAGES val:1
[    2.559037] BUG: non-zero pgtables_bytes on freeing mm: -6144

Here the modprobe process ended up with an allocated mm_struct from the
mm_struct slab that was used before by the debug_vm_pgtable test.  That is
not a problem, since the mm_struct is initialized again etc., however, if
it ends up using the same pgd table, it bumps into the old stale entry
when clearing/freeing the page table entries, so it tries to free an entry
already gone (that one which was allocated by the debug_vm_pgtable test),
which also explains the negative pgtables_bytes since it's accounting for
not allocated entries in the current process.

As far as I looked pgd_{alloc,free} etc.  does not clear entries, and
clearing of the entries is explicitly done in the free_pgtables->
free_pgd_range->free_p4d_range->free_pud_range->free_pmd_range->
free_pte_range path.  However, the debug_vm_pgtable test does not call
free_pgtables, since it allocates mm_struct and entries manually for its
test and eg.  not goes through page faults.  So it also should clear
manually the entries before exit at destroy_args().

This problem was noticed on a reboot X number of times test being done on
a powerpc host, with a debug kernel with CONFIG_DEBUG_VM_PGTABLE enabled. 
Depends on the system, but on a 100 times reboot loop the problem could
manifest once or twice, if a process ends up getting the right mm->pgd
entry with the stale entries used by mm/debug_vm_pagetable.  After using
this patch, I couldn't reproduce/experience the problems anymore.  I was
able to reproduce the problem as well on latest upstream kernel (6.16).

I also modified destroy_args() to use mmput() instead of mmdrop(), there
is no reason to hold mm_users reference and not release the mm_struct
entirely, and in the output above with my debugging prints I already had
patched it to use mmput, it did not fix the problem, but helped in the
debugging as well.

Link: https://lkml.kernel.org/r/20250731214051.4115182-1-herton@redhat.com
Fixes: 3c9b84f044 ("mm/debug_vm_pgtable: introduce struct pgtable_debug_args")
Signed-off-by: Herton R. Krzesinski <herton@redhat.com>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Gavin Shan <gshan@redhat.com>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2025-08-19 16:35:54 -07:00
Phillip Lougher
b64700d41b squashfs: fix memory leak in squashfs_fill_super
If sb_min_blocksize returns 0, squashfs_fill_super exits without freeing
allocated memory (sb->s_fs_info).

Fix this by moving the call to sb_min_blocksize to before memory is
allocated.

Link: https://lkml.kernel.org/r/20250811223740.110392-1-phillip@squashfs.org.uk
Fixes: 734aa85390 ("Squashfs: check return result of sb_min_blocksize")
Signed-off-by: Phillip Lougher <phillip@squashfs.org.uk>
Reported-by: Scott GUO <scottzhguo@tencent.com>
Closes: https://lore.kernel.org/all/20250811061921.3807353-1-scott_gzh@163.com
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2025-08-19 16:35:53 -07:00
Pasha Tatashin
44958f2025 kho: warn if KHO is disabled due to an error
During boot scratch area is allocated based on command line parameters or
auto calculated.  However, scratch area may fail to allocate, and in that
case KHO is disabled.  Currently, no warning is printed that KHO is
disabled, which makes it confusing for the end user to figure out why KHO
is not available.  Add the missing warning message.

Link: https://lkml.kernel.org/r/20250808201804.772010-4-pasha.tatashin@soleen.com
Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com>
Acked-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Acked-by: Pratyush Yadav <pratyush@kernel.org>
Cc: Alexander Graf <graf@amazon.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Baoquan He <bhe@redhat.com>
Cc: Changyuan Lyu <changyuanl@google.com>
Cc: Coiby Xu <coxu@redhat.com>
Cc: Dave Vasilevsky <dave@vasilevsky.ca>
Cc: Eric Biggers <ebiggers@google.com>
Cc: Kees Cook <kees@kernel.org>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2025-08-19 16:35:53 -07:00
Pasha Tatashin
8b66ed2c3f kho: mm: don't allow deferred struct page with KHO
KHO uses struct pages for the preserved memory early in boot, however,
with deferred struct page initialization, only a small portion of memory
has properly initialized struct pages.

This problem was detected where vmemmap is poisoned, and illegal flag
combinations are detected.

Don't allow them to be enabled together, and later we will have to teach
KHO to work properly with deferred struct page init kernel feature.

Link: https://lkml.kernel.org/r/20250808201804.772010-3-pasha.tatashin@soleen.com
Fixes: 4e1d010e3b ("kexec: add config option for KHO")
Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com>
Acked-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Acked-by: Pratyush Yadav <pratyush@kernel.org>
Cc: Alexander Graf <graf@amazon.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Baoquan He <bhe@redhat.com>
Cc: Changyuan Lyu <changyuanl@google.com>
Cc: Coiby Xu <coxu@redhat.com>
Cc: Dave Vasilevsky <dave@vasilevsky.ca>
Cc: Eric Biggers <ebiggers@google.com>
Cc: Kees Cook <kees@kernel.org>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2025-08-19 16:35:53 -07:00
Pasha Tatashin
63b17b653d kho: init new_physxa->phys_bits to fix lockdep
Patch series "Several KHO Hotfixes".

Three unrelated fixes for Kexec Handover.


This patch (of 3):

Lockdep shows the following warning:

INFO: trying to register non-static key.  The code is fine but needs
lockdep annotation, or maybe you didn't initialize this object before use?
turning off the locking correctness validator.

[<ffffffff810133a6>] dump_stack_lvl+0x66/0xa0
[<ffffffff8136012c>] assign_lock_key+0x10c/0x120
[<ffffffff81358bb4>] register_lock_class+0xf4/0x2f0
[<ffffffff813597ff>] __lock_acquire+0x7f/0x2c40
[<ffffffff81360cb0>] ? __pfx_hlock_conflict+0x10/0x10
[<ffffffff811707be>] ? native_flush_tlb_global+0x8e/0xa0
[<ffffffff8117096e>] ? __flush_tlb_all+0x4e/0xa0
[<ffffffff81172fc2>] ? __kernel_map_pages+0x112/0x140
[<ffffffff813ec327>] ? xa_load_or_alloc+0x67/0xe0
[<ffffffff81359556>] lock_acquire+0xe6/0x280
[<ffffffff813ec327>] ? xa_load_or_alloc+0x67/0xe0
[<ffffffff8100b9e0>] _raw_spin_lock+0x30/0x40
[<ffffffff813ec327>] ? xa_load_or_alloc+0x67/0xe0
[<ffffffff813ec327>] xa_load_or_alloc+0x67/0xe0
[<ffffffff813eb4c0>] kho_preserve_folio+0x90/0x100
[<ffffffff813ebb7f>] __kho_finalize+0xcf/0x400
[<ffffffff813ebef4>] kho_finalize+0x34/0x70

This is becase xa has its own lock, that is not initialized in
xa_load_or_alloc.

Modifiy __kho_preserve_order(), to properly call
xa_init(&new_physxa->phys_bits);

Link: https://lkml.kernel.org/r/20250808201804.772010-2-pasha.tatashin@soleen.com
Fixes: fc33e4b44b ("kexec: enable KHO support for memory preservation")
Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com>
Acked-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Cc: Alexander Graf <graf@amazon.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Baoquan He <bhe@redhat.com>
Cc: Changyuan Lyu <changyuanl@google.com>
Cc: Coiby Xu <coxu@redhat.com>
Cc: Dave Vasilevsky <dave@vasilevsky.ca>
Cc: Eric Biggers <ebiggers@google.com>
Cc: Kees Cook <kees@kernel.org>
Cc: Pratyush Yadav <pratyush@kernel.org>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2025-08-19 16:35:53 -07:00
Trond Myklebust
76d2e3890f NFS: Fix a race when updating an existing write
After nfs_lock_and_join_requests() tests for whether the request is
still attached to the mapping, nothing prevents a call to
nfs_inode_remove_request() from succeeding until we actually lock the
page group.
The reason is that whoever called nfs_inode_remove_request() doesn't
necessarily have a lock on the page group head.

So in order to avoid races, let's take the page group lock earlier in
nfs_lock_and_join_requests(), and hold it across the removal of the
request in nfs_inode_remove_request().

Reported-by: Jeff Layton <jlayton@kernel.org>
Tested-by: Joe Quanaim <jdq@meta.com>
Tested-by: Andrew Steffen <aksteffen@meta.com>
Reviewed-by: Jeff Layton <jlayton@kernel.org>
Fixes: bd37d6fce1 ("NFSv4: Convert nfs_lock_and_join_requests() to use nfs_page_find_head_request()")
Cc: stable@vger.kernel.org
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
2025-08-19 11:16:02 -07:00
David Howells
453a6d2a68 cifs: Fix oops due to uninitialised variable
Fix smb3_init_transform_rq() to initialise buffer to NULL before calling
netfs_alloc_folioq_buffer() as netfs assumes it can append to the buffer it
is given.  Setting it to NULL means it should start a fresh buffer, but the
value is currently undefined.

Fixes: a2906d3316 ("cifs: Switch crypto buffer to use a folio_queue rather than an xarray")
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Steve French <sfrench@samba.org>
cc: Paulo Alcantara <pc@manguebit.org>
cc: linux-cifs@vger.kernel.org
cc: linux-fsdevel@vger.kernel.org
Signed-off-by: Steve French <stfrench@microsoft.com>
2025-08-19 11:16:14 -05:00
Kailang Yang
f4b3cef55f ALSA: hda/realtek: Audio disappears on HP 15-fc000 after warm boot again
There was a similar bug in the past (Bug 217440), which was fixed for
this laptop.
The same issue is occurring again as of kernel v.6.12.2. The symptoms
are very similar - initially audio works but after a warm reboot, the
audio completely disappears until the computer is powered off (there
is no audio output at all).

The issue is also related by caused by a different change now. By
bisecting different kernel versions, I found that reverting
cc3d0b5dd9 in patch_realtek.c[*] restores the sound and it works
fine after the reboot.

[*] https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/sound/pci/hda/patch_realtek.c?h=v6.12.2&id=4ed7f16070a8475c088ff423b2eb11ba15eb89b6

[ patch description reformatted by tiwai ]

Fixes: cc3d0b5dd9 ("ALSA: hda/realtek: Update ALC256 depop procedure")
Link: https://bugzilla.kernel.org/show_bug.cgi?id=220109
Signed-off-by: Kailang Yang <kailang@realtek.com>
Link: https://lore.kernel.org/5317ca723c82447a938414fcca85cbf5@realtek.com
Signed-off-by: Takashi Iwai <tiwai@suse.de>
2025-08-19 08:26:15 +02:00
Vasiliy Kovalev
018f659753 ALSA: hda/realtek: Fix headset mic on ASUS Zenbook 14
Add a PCI quirk to enable microphone input on the headphone jack on
the ASUS Zenbook 14 UM3406HA laptop.

This model uses an ALC294 codec with CS35L41 amplifiers over I2C,
and the existing fixup for it did not enable the headset microphone.
A new fix is introduced to get the mic working while keeping the
amplifier settings correct.

Fixes: 61cbc08fdb ("ALSA: hda/realtek: Add quirks for ASUS 2024 Zenbooks")
Signed-off-by: Vasiliy Kovalev <kovalev@altlinux.org>
Link: https://patch.msgid.link/20250818204243.247297-1-kovalev@altlinux.org
Signed-off-by: Takashi Iwai <tiwai@suse.de>
2025-08-19 08:20:26 +02:00
Nitin Rawat
6300d5c543 scsi: ufs: ufs-qcom: Fix ESI null pointer dereference
ESI/MSI is a performance optimization feature that provides dedicated
interrupts per MCQ hardware queue. This is optional feature and UFS MCQ
should work with and without ESI feature.

Commit e46a28cea2 ("scsi: ufs: qcom: Remove the MSI descriptor abuse")
brings a regression in ESI (Enhanced System Interrupt) configuration that
causes a null pointer dereference when Platform MSI allocation fails.

The issue occurs in when platform_device_msi_init_and_alloc_irqs() in
ufs_qcom_config_esi() fails (returns -EINVAL) but the current code uses
__free() macro for automatic cleanup free MSI resources that were never
successfully allocated.

Unable to handle kernel NULL pointer dereference at virtual
address 0000000000000008

  Call trace:
  mutex_lock+0xc/0x54 (P)
  platform_device_msi_free_irqs_all+0x1c/0x40
  ufs_qcom_config_esi+0x1d0/0x220 [ufs_qcom]
  ufshcd_config_mcq+0x28/0x104
  ufshcd_init+0xa3c/0xf40
  ufshcd_pltfrm_init+0x504/0x7d4
  ufs_qcom_probe+0x20/0x58 [ufs_qcom]

Fix by restructuring the ESI configuration to try MSI allocation first,
before any other resource allocation and instead use explicit cleanup
instead of __free() macro to avoid cleanup of unallocated resources.

Tested on SM8750 platform with MCQ enabled, both with and without
Platform ESI support.

Fixes: e46a28cea2 ("scsi: ufs: qcom: Remove the MSI descriptor abuse")
Cc: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
Signed-off-by: Nitin Rawat <quic_nitirawa@quicinc.com>
Link: https://lore.kernel.org/r/20250811073330.20230-1-quic_nitirawa@quicinc.com
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2025-08-18 22:25:52 -04:00
Bart Van Assche
09d57d68ba scsi: ufs: core: Rename ufshcd_wait_for_doorbell_clr()
The name ufshcd_wait_for_doorbell_clr() refers to legacy mode. Commit
8d077ede48 ("scsi: ufs: Optimize the command queueing code") added
support for MCQ mode in this function. Since then the name of this
function is misleading. Hence change the name of this function into
something that is appropriate for both legacy and MCQ mode.

Reviewed-by: Peter Wang <peter.wang@mediatek.com>
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Link: https://lore.kernel.org/r/20250815155842.472867-5-bvanassche@acm.org
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2025-08-18 22:15:19 -04:00
Bart Van Assche
9ee35fd43f scsi: ufs: core: Fix the return value documentation
ufshcd_wait_for_dev_cmd() and all its callers can return an OCS error.
OCS errors are represented by positive integers. Remove the WARN_ONCE()
statements that complain about positive error codes and update the
documentation.

Keep the behavior of ufshcd_wait_for_dev_cmd() because this return value
may end be passed as the second argument of bsg_job_done() and
bsg_job_done() handles positive and negative error codes differently.

Cc: Peter Wang <peter.wang@mediatek.com>
Fixes: cc59f3b685 ("scsi: ufs: core: Improve return value documentation")
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Link: https://lore.kernel.org/r/20250815155842.472867-4-bvanassche@acm.org
Reviewed-by: Peter Wang <peter.wang@mediatek.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2025-08-18 22:15:19 -04:00
Bart Van Assche
e5203d89d5 scsi: ufs: core: Remove WARN_ON_ONCE() call from ufshcd_uic_cmd_compl()
The UIC completion interrupt may be disabled while an UIC command is
being processed. When the UIC completion interrupt is reenabled, an UIC
interrupt is triggered and the WARN_ON_ONCE(!cmd) statement is hit.
Hence this patch that removes this kernel warning.

Fixes: fcd8b0450a ("scsi: ufs: core: Make ufshcd_uic_cmd_compl() easier to analyze")
Reviewed-by: Peter Wang <peter.wang@mediatek.com>
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Link: https://lore.kernel.org/r/20250815155842.472867-3-bvanassche@acm.org
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2025-08-18 22:15:18 -04:00
Bart Van Assche
eabcac808c scsi: ufs: core: Fix IRQ lock inversion for the SCSI host lock
Commit 3c7ac40d73 ("scsi: ufs: core: Delegate the interrupt service
routine to a threaded IRQ handler") introduced an IRQ lock inversion
issue. Fix this lock inversion by changing the spin_lock_irq() calls into
spin_lock_irqsave() calls in code that can be called either from
interrupt context or from thread context. This patch fixes the following
lockdep complaint:

WARNING: possible irq lock inversion dependency detected
6.12.30-android16-5-maybe-dirty-4k #1 Tainted: G        W  OE
--------------------------------------------------------
kworker/u28:0/12 just changed the state of lock:
ffffff881e29dd60 (&hba->clk_gating.lock){-...}-{2:2}, at: ufshcd_release_scsi_cmd+0x60/0x110
but this lock took another, HARDIRQ-unsafe lock in the past:
 (shost->host_lock){+.+.}-{2:2}

and interrupts could create inverse lock ordering between them.

other info that might help us debug this:
 Possible interrupt unsafe locking scenario:

       CPU0                    CPU1
       ----                    ----
  lock(shost->host_lock);
                               local_irq_disable();
                               lock(&hba->clk_gating.lock);
                               lock(shost->host_lock);
  <Interrupt>
    lock(&hba->clk_gating.lock);

 *** DEADLOCK ***

4 locks held by kworker/u28:0/12:
 #0: ffffff8800ac6158 ((wq_completion)async){+.+.}-{0:0}, at: process_one_work+0x1bc/0x65c
 #1: ffffffc085c93d70 ((work_completion)(&entry->work)){+.+.}-{0:0}, at: process_one_work+0x1e4/0x65c
 #2: ffffff881e29c0e0 (&shost->scan_mutex){+.+.}-{3:3}, at: __scsi_add_device+0x74/0x120
 #3: ffffff881960ea00 (&hwq->cq_lock){-...}-{2:2}, at: ufshcd_mcq_poll_cqe_lock+0x28/0x104

the shortest dependencies between 2nd lock and 1st lock:
 -> (shost->host_lock){+.+.}-{2:2} {
    HARDIRQ-ON-W at:
                      lock_acquire+0x134/0x2b4
                      _raw_spin_lock+0x48/0x64
                      ufshcd_sl_intr+0x4c/0xa08
                      ufshcd_threaded_intr+0x70/0x12c
                      irq_thread_fn+0x48/0xa8
                      irq_thread+0x130/0x1ec
                      kthread+0x110/0x134
                      ret_from_fork+0x10/0x20
    SOFTIRQ-ON-W at:
                      lock_acquire+0x134/0x2b4
                      _raw_spin_lock+0x48/0x64
                      ufshcd_sl_intr+0x4c/0xa08
                      ufshcd_threaded_intr+0x70/0x12c
                      irq_thread_fn+0x48/0xa8
                      irq_thread+0x130/0x1ec
                      kthread+0x110/0x134
                      ret_from_fork+0x10/0x20
    INITIAL USE at:
                     lock_acquire+0x134/0x2b4
                     _raw_spin_lock+0x48/0x64
                     ufshcd_sl_intr+0x4c/0xa08
                     ufshcd_threaded_intr+0x70/0x12c
                     irq_thread_fn+0x48/0xa8
                     irq_thread+0x130/0x1ec
                     kthread+0x110/0x134
                     ret_from_fork+0x10/0x20
  }
  ... key      at: [<ffffffc085ba1a98>] scsi_host_alloc.__key+0x0/0x10
  ... acquired at:
   _raw_spin_lock_irqsave+0x5c/0x80
   __ufshcd_release+0x78/0x118
   ufshcd_send_uic_cmd+0xe4/0x118
   ufshcd_dme_set_attr+0x88/0x1c8
   ufs_google_phy_initialization+0x68/0x418 [ufs]
   ufs_google_link_startup_notify+0x78/0x27c [ufs]
   ufshcd_link_startup+0x84/0x720
   ufshcd_init+0xf3c/0x1330
   ufshcd_pltfrm_init+0x728/0x7d8
   ufs_google_probe+0x30/0x84 [ufs]
   platform_probe+0xa0/0xe0
   really_probe+0x114/0x454
   __driver_probe_device+0xa4/0x160
   driver_probe_device+0x44/0x23c
   __driver_attach_async_helper+0x60/0xd4
   async_run_entry_fn+0x4c/0x17c
   process_one_work+0x26c/0x65c
   worker_thread+0x33c/0x498
   kthread+0x110/0x134
   ret_from_fork+0x10/0x20

-> (&hba->clk_gating.lock){-...}-{2:2} {
   IN-HARDIRQ-W at:
                    lock_acquire+0x134/0x2b4
                    _raw_spin_lock_irqsave+0x5c/0x80
                    ufshcd_release_scsi_cmd+0x60/0x110
                    ufshcd_compl_one_cqe+0x2c0/0x3f4
                    ufshcd_mcq_poll_cqe_lock+0xb0/0x104
                    ufs_google_mcq_intr+0x80/0xa0 [ufs]
                    __handle_irq_event_percpu+0x104/0x32c
                    handle_irq_event+0x40/0x9c
                    handle_fasteoi_irq+0x170/0x2e8
                    generic_handle_domain_irq+0x58/0x80
                    gic_handle_irq+0x48/0x104
                    call_on_irq_stack+0x3c/0x50
                    do_interrupt_handler+0x7c/0xd8
                    el1_interrupt+0x34/0x58
                    el1h_64_irq_handler+0x18/0x24
                    el1h_64_irq+0x68/0x6c
                    _raw_spin_unlock_irqrestore+0x3c/0x6c
                    debug_object_assert_init+0x16c/0x21c
                    __mod_timer+0x4c/0x48c
                    schedule_timeout+0xd4/0x16c
                    io_schedule_timeout+0x48/0x70
                    do_wait_for_common+0x100/0x194
                    wait_for_completion_io_timeout+0x48/0x6c
                    blk_execute_rq+0x124/0x17c
                    scsi_execute_cmd+0x18c/0x3f8
                    scsi_probe_and_add_lun+0x204/0xd74
                    __scsi_add_device+0xbc/0x120
                    ufshcd_async_scan+0x80/0x3c0
                    async_run_entry_fn+0x4c/0x17c
                    process_one_work+0x26c/0x65c
                    worker_thread+0x33c/0x498
                    kthread+0x110/0x134
                    ret_from_fork+0x10/0x20
   INITIAL USE at:
                   lock_acquire+0x134/0x2b4
                   _raw_spin_lock_irqsave+0x5c/0x80
                   ufshcd_hold+0x34/0x14c
                   ufshcd_send_uic_cmd+0x28/0x118
                   ufshcd_dme_set_attr+0x88/0x1c8
                   ufs_google_phy_initialization+0x68/0x418 [ufs]
                   ufs_google_link_startup_notify+0x78/0x27c [ufs]
                   ufshcd_link_startup+0x84/0x720
                   ufshcd_init+0xf3c/0x1330
                   ufshcd_pltfrm_init+0x728/0x7d8
                   ufs_google_probe+0x30/0x84 [ufs]
                   platform_probe+0xa0/0xe0
                   really_probe+0x114/0x454
                   __driver_probe_device+0xa4/0x160
                   driver_probe_device+0x44/0x23c
                   __driver_attach_async_helper+0x60/0xd4
                   async_run_entry_fn+0x4c/0x17c
                   process_one_work+0x26c/0x65c
                   worker_thread+0x33c/0x498
                   kthread+0x110/0x134
                   ret_from_fork+0x10/0x20
 }
 ... key      at: [<ffffffc085ba6fe8>] ufshcd_init.__key+0x0/0x10
 ... acquired at:
   mark_lock+0x1c4/0x224
   __lock_acquire+0x438/0x2e1c
   lock_acquire+0x134/0x2b4
   _raw_spin_lock_irqsave+0x5c/0x80
   ufshcd_release_scsi_cmd+0x60/0x110
   ufshcd_compl_one_cqe+0x2c0/0x3f4
   ufshcd_mcq_poll_cqe_lock+0xb0/0x104
   ufs_google_mcq_intr+0x80/0xa0 [ufs]
   __handle_irq_event_percpu+0x104/0x32c
   handle_irq_event+0x40/0x9c
   handle_fasteoi_irq+0x170/0x2e8
   generic_handle_domain_irq+0x58/0x80
   gic_handle_irq+0x48/0x104
   call_on_irq_stack+0x3c/0x50
   do_interrupt_handler+0x7c/0xd8
   el1_interrupt+0x34/0x58
   el1h_64_irq_handler+0x18/0x24
   el1h_64_irq+0x68/0x6c
   _raw_spin_unlock_irqrestore+0x3c/0x6c
   debug_object_assert_init+0x16c/0x21c
   __mod_timer+0x4c/0x48c
   schedule_timeout+0xd4/0x16c
   io_schedule_timeout+0x48/0x70
   do_wait_for_common+0x100/0x194
   wait_for_completion_io_timeout+0x48/0x6c
   blk_execute_rq+0x124/0x17c
   scsi_execute_cmd+0x18c/0x3f8
   scsi_probe_and_add_lun+0x204/0xd74
   __scsi_add_device+0xbc/0x120
   ufshcd_async_scan+0x80/0x3c0
   async_run_entry_fn+0x4c/0x17c
   process_one_work+0x26c/0x65c
   worker_thread+0x33c/0x498
   kthread+0x110/0x134
   ret_from_fork+0x10/0x20

stack backtrace:
CPU: 6 UID: 0 PID: 12 Comm: kworker/u28:0 Tainted: G        W  OE      6.12.30-android16-5-maybe-dirty-4k #1 ccd4020fe444bdf629efc3b86df6be920b8df7d0
Tainted: [W]=WARN, [O]=OOT_MODULE, [E]=UNSIGNED_MODULE
Hardware name: Spacecraft board based on MALIBU (DT)
Workqueue: async async_run_entry_fn
Call trace:
 dump_backtrace+0xfc/0x17c
 show_stack+0x18/0x28
 dump_stack_lvl+0x40/0xa0
 dump_stack+0x18/0x24
 print_irq_inversion_bug+0x2fc/0x304
 mark_lock_irq+0x388/0x4fc
 mark_lock+0x1c4/0x224
 __lock_acquire+0x438/0x2e1c
 lock_acquire+0x134/0x2b4
 _raw_spin_lock_irqsave+0x5c/0x80
 ufshcd_release_scsi_cmd+0x60/0x110
 ufshcd_compl_one_cqe+0x2c0/0x3f4
 ufshcd_mcq_poll_cqe_lock+0xb0/0x104
 ufs_google_mcq_intr+0x80/0xa0 [ufs dd6f385554e109da094ab91d5f7be18625a2222a]
 __handle_irq_event_percpu+0x104/0x32c
 handle_irq_event+0x40/0x9c
 handle_fasteoi_irq+0x170/0x2e8
 generic_handle_domain_irq+0x58/0x80
 gic_handle_irq+0x48/0x104
 call_on_irq_stack+0x3c/0x50
 do_interrupt_handler+0x7c/0xd8
 el1_interrupt+0x34/0x58
 el1h_64_irq_handler+0x18/0x24
 el1h_64_irq+0x68/0x6c
 _raw_spin_unlock_irqrestore+0x3c/0x6c
 debug_object_assert_init+0x16c/0x21c
 __mod_timer+0x4c/0x48c
 schedule_timeout+0xd4/0x16c
 io_schedule_timeout+0x48/0x70
 do_wait_for_common+0x100/0x194
 wait_for_completion_io_timeout+0x48/0x6c
 blk_execute_rq+0x124/0x17c
 scsi_execute_cmd+0x18c/0x3f8
 scsi_probe_and_add_lun+0x204/0xd74
 __scsi_add_device+0xbc/0x120
 ufshcd_async_scan+0x80/0x3c0
 async_run_entry_fn+0x4c/0x17c
 process_one_work+0x26c/0x65c
 worker_thread+0x33c/0x498
 kthread+0x110/0x134
 ret_from_fork+0x10/0x20

Cc: Neil Armstrong <neil.armstrong@linaro.org>
Cc: André Draszik <andre.draszik@linaro.org>
Reviewed-by: Peter Wang <peter.wang@mediatek.com>
Fixes: 3c7ac40d73 ("scsi: ufs: core: Delegate the interrupt service routine to a threaded IRQ handler")
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Link: https://lore.kernel.org/r/20250815155842.472867-2-bvanassche@acm.org
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2025-08-18 22:15:18 -04:00
Christoph Hellwig
d0a2b527d8 block: tone down bio_check_eod
bdev_nr_sectors() == 0 is a pattern used for block devices that have
been hot removed, don't spam the log about them.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20250818101102.1604551-1-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2025-08-18 13:27:05 -06:00
Rajeev Mishra
47b71abd58 loop: use vfs_getattr_nosec for accurate file size
Use vfs_getattr_nosec() in lo_calculate_size() for getting the file
size, rather than just read the cached inode size via i_size_read().
This provides better results than cached inode data, particularly for
network filesystems where metadata may be stale.

Signed-off-by: Rajeev Mishra <rajeevm@hpe.com>
Reviewed-by: Yu Kuai <yukuai3@huawei.com>
Link: https://lore.kernel.org/r/20250818184821.115033-3-rajeevm@hpe.com
[axboe: massage commit message]
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2025-08-18 13:10:35 -06:00
Rajeev Mishra
8aa5a3b68a loop: Consolidate size calculation logic into lo_calculate_size()
Renamed get_size to lo_calculate_size and merged the logic from get_size
and get_loop_size into a single function. Update all callers to use
lo_calculate_size. This is done in preparation for improving the size
detection logic.

Signed-off-by: Rajeev Mishra <rajeevm@hpe.com>
Reviewed-by: Yu Kuai <yukuai3@huawei.com>
Link: https://lore.kernel.org/r/20250818184821.115033-2-rajeevm@hpe.com
[axboe: massage commit message]
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2025-08-18 13:10:25 -06:00
Jens Axboe
7242169037 Merge tag 'md-6.17-20250819' of gitolite.kernel.org:pub/scm/linux/kernel/git/mdraid/linux into block-6.17
Pull MD fixes from Yu:

"- Add a legacy_async_del_gendisk mode, to prevent a user tools
   regression. New user tools releases will not use such a mode, the old
   release with a new kernel now will have warning about deprecated
   behavior, and we prepare to remove this legacy mode after about a
   year later.
 - The rename in kernel causing user tools build failure, revert the
   rename in mdp_superblock_s.
 - Fix a regression that interrupted resync can be shown as recover from
   mdstat or sysfs."

* tag 'md-6.17-20250819' of gitolite.kernel.org:pub/scm/linux/kernel/git/mdraid/linux:
  md: fix sync_action incorrect display during resync
  md: add helper rdev_needs_recovery()
  md: keep recovery_cp in mdp_superblock_s
  md: add legacy_async_del_gendisk mode
2025-08-18 11:13:38 -06:00
Zhang Yi
af24c20c46
ASoC: codecs: ES9389: Modify the standby configuration
Modify the standby configuration

Signed-off-by: Zhang Yi <zhangyi@everest-semi.com>
Link: https://patch.msgid.link/20250815024729.3051-1-zhangyi@everest-semi.com
Signed-off-by: Mark Brown <broonie@kernel.org>
2025-08-18 18:12:46 +01:00
Christoph Hellwig
f4ae174403 block: remove newlines from the warnings in blk_validate_integrity_limits
Otherwise they are very hard to read in the kernel log.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Anuj Gupta <anuj20.g@samsung.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Link: https://lore.kernel.org/r/20250818045456.1482889-3-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2025-08-18 10:17:49 -06:00
Christoph Hellwig
61ca3b891b block: handle pi_tuple_size in queue_limits_stack_integrity
queue_limits_stack_integrity needs to handle the new pi_tuple_size field,
otherwise stacking PI-capable devices will always fail.

Fixes: 76e45252a4 ("block: introduce pi_tuple_size field in blk_integrity")
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Anuj Gupta <anuj20.g@samsung.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Link: https://lore.kernel.org/r/20250818045456.1482889-2-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2025-08-18 10:17:49 -06:00
Akhilesh Patil
0227af355b selftests: ublk: Use ARRAY_SIZE() macro to improve code
Use ARRAY_SIZE() macro while calculating size of an array to improve
code readability and reduce potential sizing errors.
Implement this suggestion given by spatch tool by running
coccinelle script - scripts/coccinelle/misc/array_size.cocci
Follow ARRAY_SIZE() macro usage pattern in ublk.c introduced by,
commit ec12009318 ("selftests: ublk: fix ublk_find_tgt()")
wherever appropriate to maintain consistency.

Signed-off-by: Akhilesh Patil <akhilesh@ee.iitb.ac.in>
Reviewed-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/aKGihYui6/Pcijbk@bhairav-test.ee.iitb.ac.in
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2025-08-18 05:36:29 -06:00
Dan Carpenter
89f0addeee ALSA: usb-audio: Fix size validation in convert_chmap_v3()
The "p" pointer is void so sizeof(*p) is 1.  The intent was to check
sizeof(*cs_desc), which is 3, instead.

Fixes: ecfd41166b ("ALSA: usb-audio: Validate UAC3 cluster segment descriptors")
Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
Link: https://patch.msgid.link/aKL5kftC1qGt6lpv@stanley.mountain
Signed-off-by: Takashi Iwai <tiwai@suse.de>
2025-08-18 12:36:33 +02:00
Jiayi Li
99d7ab8db9 memstick: Fix deadlock by moving removing flag earlier
The existing memstick core patch: commit 62c59a8786 ("memstick: Skip
allocating card when removing host") sets host->removing in
memstick_remove_host(),but still exists a critical time window where
memstick_check can run after host->eject is set but before removing is set.

In the rtsx_usb_ms driver, the problematic sequence is:

rtsx_usb_ms_drv_remove:          memstick_check:
  host->eject = true
  cancel_work_sync(handle_req)     if(!host->removing)
  ...                              memstick_alloc_card()
                                     memstick_set_rw_addr()
                                       memstick_new_req()
                                         rtsx_usb_ms_request()
                                           if(!host->eject)
                                           skip schedule_work
                                       wait_for_completion()
  memstick_remove_host:                [blocks indefinitely]
    host->removing = true
    flush_workqueue()
    [block]

1. rtsx_usb_ms_drv_remove sets host->eject = true
2. cancel_work_sync(&host->handle_req) runs
3. memstick_check work may be executed here <-- danger window
4. memstick_remove_host sets removing = 1

During this window (step 3), memstick_check calls memstick_alloc_card,
which may indefinitely waiting for mrq_complete completion that will
never occur because rtsx_usb_ms_request sees eject=true and skips
scheduling work, memstick_set_rw_addr waits forever for completion.

This causes a deadlock when memstick_remove_host tries to flush_workqueue,
waiting for memstick_check to complete, while memstick_check is blocked
waiting for mrq_complete completion.

Fix this by setting removing=true at the start of rtsx_usb_ms_drv_remove,
before any work cancellation. This ensures memstick_check will see the
removing flag immediately and exit early, avoiding the deadlock.

Fixes: 62c59a8786 ("memstick: Skip allocating card when removing host")
Signed-off-by: Jiayi Li <lijiayi@kylinos.cn>
Cc: stable@vger.kernel.org
Link: https://lore.kernel.org/r/20250804013604.1311218-1-lijiayi@kylinos.cn
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2025-08-18 12:01:20 +02:00
Sai Krishna Potthuri
e251709aad mmc: sdhci-of-arasan: Ensure CD logic stabilization before power-up
During SD suspend/resume without a full card rescan (when using
non-removable SD cards for rootfs), the SD card initialization may fail
after resume. This occurs because, after a host controller reset, the
card detect logic may take time to stabilize due to debounce logic.
Without waiting for stabilization, the host may attempt powering up the
card prematurely, leading to command timeouts during resume flow.
Add sdhci_arasan_set_power_and_bus_voltage() to wait for the card detect
stable bit before power up the card. Since the stabilization time
is not fixed, a maximum timeout of one second is used to ensure
sufficient wait time for the card detect signal to stabilize.

Signed-off-by: Sai Krishna Potthuri <sai.krishna.potthuri@amd.com>
Cc: stable@vger.kernel.org
Link: https://lore.kernel.org/r/20250730060543.1735971-1-sai.krishna.potthuri@amd.com
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2025-08-18 11:43:27 +02:00
Victor Shih
340be332e4 mmc: sdhci-pci-gli: GL9763e: Mask the replay timer timeout of AER
Due to a flaw in the hardware design, the GL9763e replay timer frequently
times out when ASPM is enabled. As a result, the warning messages will
often appear in the system log when the system accesses the GL9763e
PCI config. Therefore, the replay timer timeout must be masked.

Signed-off-by: Victor Shih <victor.shih@genesyslogic.com.tw>
Fixes: 1ae1d2d6e5 ("mmc: sdhci-pci-gli: Add Genesys Logic GL9763E support")
Cc: stable@vger.kernel.org
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Link: https://lore.kernel.org/r/20250731065752.450231-4-victorshihgli@gmail.com
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2025-08-18 11:38:24 +02:00
Victor Shih
293ed0f5f3 mmc: sdhci-pci-gli: GL9763e: Rename the gli_set_gl9763e() for consistency
In preparation to fix replay timer timeout, rename the
gli_set_gl9763e() to gl9763e_hw_setting() for consistency.

Signed-off-by: Victor Shih <victor.shih@genesyslogic.com.tw>
Fixes: 1ae1d2d6e5 ("mmc: sdhci-pci-gli: Add Genesys Logic GL9763E support")
Cc: stable@vger.kernel.org
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Link: https://lore.kernel.org/r/20250731065752.450231-3-victorshihgli@gmail.com
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2025-08-18 11:38:24 +02:00
Victor Shih
dec8b38be4 mmc: sdhci-pci-gli: Add a new function to simplify the code
In preparation to fix replay timer timeout, add
sdhci_gli_mask_replay_timer_timeout() function
to simplify some of the code, allowing it to be re-used.

Signed-off-by: Victor Shih <victor.shih@genesyslogic.com.tw>
Fixes: 1ae1d2d6e5 ("mmc: sdhci-pci-gli: Add Genesys Logic GL9763E support")
Cc: stable@vger.kernel.org
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Link: https://lore.kernel.org/r/20250731065752.450231-2-victorshihgli@gmail.com
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2025-08-18 11:38:24 +02:00
Shenghao Ding
c0ed3c2edc ALSA: hda/tas2781: Add name prefix tas2781 for tas2781's dvc_tlv and amp_vol_tlv
With some new devices adding into the driver, dvc_tlv and amp_vol_tlv will
cause confusion for customers on which devices they support.

Fixes: 5be27f1e3e ("ALSA: hda/tas2781: Add tas2781 HDA driver")
Signed-off-by: Shenghao Ding <shenghao-ding@ti.com>

Link: https://patch.msgid.link/20250816042741.1659-1-shenghao-ding@ti.com
Signed-off-by: Takashi Iwai <tiwai@suse.de>
2025-08-18 11:10:57 +02:00
Zheng Qixing
b7ee30f0ef md: fix sync_action incorrect display during resync
During raid resync, if a disk becomes faulty, the operation is
briefly interrupted. The MD_RECOVERY_RECOVER flag triggered by
the disk failure causes sync_action to incorrectly show "recover"
instead of "resync". The same issue affects reshape operations.

Reproduction steps:
  mdadm -Cv /dev/md1 -l1 -n4 -e1.2 /dev/sd{a..d} // -> resync happened
  mdadm -f /dev/md1 /dev/sda                     // -> resync interrupted
  cat sync_action
  -> recover

Add progress checks in md_sync_action() for resync/recover/reshape
to ensure the interface correctly reports the actual operation type.

Fixes: 4b10a3bc67 ("md: ensure resync is prioritized over recovery")
Signed-off-by: Zheng Qixing <zhengqixing@huawei.com>
Link: https://lore.kernel.org/linux-raid/20250816002534.1754356-3-zhengqixing@huaweicloud.com
Signed-off-by: Yu Kuai <yukuai3@huawei.com>
2025-08-16 08:52:33 +08:00
Zheng Qixing
cb0780ad43 md: add helper rdev_needs_recovery()
Add a helper for checking if an rdev needs recovery.

Signed-off-by: Zheng Qixing <zhengqixing@huawei.com>
Link: https://lore.kernel.org/linux-raid/20250816002534.1754356-2-zhengqixing@huaweicloud.com
Signed-off-by: Yu Kuai <yukuai3@huawei.com>
2025-08-16 08:51:59 +08:00
Xiao Ni
c27973211f md: keep recovery_cp in mdp_superblock_s
commit 907a99c314 ("md: rename recovery_cp to resync_offset") replaces
recovery_cp with resync_offset in mdp_superblock_s which is in md_p.h.
md_p.h is used in userspace too. So mdadm building fails because of this.
This patch revert this change.

Fixes: 907a99c314 ("md: rename recovery_cp to resync_offset")
Signed-off-by: Xiao Ni <xni@redhat.com>
Link: https://lore.kernel.org/linux-raid/20250815040028.18085-1-xni@redhat.com
Signed-off-by: Yu Kuai <yukuai3@huawei.com>
2025-08-16 08:47:38 +08:00
Evgeniy Harchenko
eafae0fdd1 ALSA: hda/realtek: Add support for HP EliteBook x360 830 G6 and EliteBook 830 G6
The HP EliteBook x360 830 G6 and HP EliteBook 830 G6 have
Realtek HDA codec ALC215. It needs the ALC285_FIXUP_HP_GPIO_LED
quirk to enable the mute LED.

Cc: <stable@vger.kernel.org>
Signed-off-by: Evgeniy Harchenko <evgeniyharchenko.dev@gmail.com>
Link: https://patch.msgid.link/20250815095814.75845-1-evgeniyharchenko.dev@gmail.com
Signed-off-by: Takashi Iwai <tiwai@suse.de>
2025-08-15 17:55:57 +02:00
Nicolin Chen
41f0200c71 iommu/tegra241-cmdqv: Fix missing cpu_to_le64 at lvcmdq_err_map
Sparse reported a warning:
drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c:305:47:
	sparse:     expected restricted __le64
	sparse:     got unsigned long long

Add cpu_to_le64() to fix that.

Reported-by: kernel test robot <lkp@intel.com>
Closes: https://lore.kernel.org/oe-kbuild-all/202508142105.Jb5Smjsg-lkp@intel.com/
Suggested-by: Pranjal Shrivastava <praan@google.com>
Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
Link: https://lore.kernel.org/r/20250814193039.2265813-1-nicolinc@nvidia.com
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
2025-08-15 12:02:24 +02:00
Kees Cook
8503d0fcb1 iommu/amd: Avoid stack buffer overflow from kernel cmdline
While the kernel command line is considered trusted in most environments,
avoid writing 1 byte past the end of "acpiid" if the "str" argument is
maximum length.

Reported-by: Simcha Kosman <simcha.kosman@cyberark.com>
Closes: https://lore.kernel.org/all/AS8P193MB2271C4B24BCEDA31830F37AE84A52@AS8P193MB2271.EURP193.PROD.OUTLOOK.COM
Fixes: b6b26d86c6 ("iommu/amd: Add a length limitation for the ivrs_acpihid command-line parameter")
Signed-off-by: Kees Cook <kees@kernel.org>
Reviewed-by: Ankit Soni <Ankit.Soni@amd.com>
Link: https://lore.kernel.org/r/20250804154023.work.970-kees@kernel.org
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
2025-08-15 11:50:47 +02:00
Dan Carpenter
9dcf111dd3 scsi: qla4xxx: Prevent a potential error pointer dereference
The qla4xxx_get_ep_fwdb() function is supposed to return NULL on error,
but qla4xxx_ep_connect() returns error pointers.  Propagating the error
pointers will lead to an Oops in the caller, so change the error pointers
to NULL.

Fixes: 13483730a1 ("[SCSI] qla4xxx: fix flash/ddb support")
Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
Link: https://lore.kernel.org/r/aJwnVKS9tHsw1tEu@stanley.mountain
Reviewed-by: Chris Leech <cleech@redhat.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2025-08-14 23:22:46 -04:00
Adrian Hunter
823f95575d scsi: ufs: ufs-pci: Add support for Intel Wildcat Lake
Add PCI ID to support Intel Wildcat Lake, same as MTL.

Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
Link: https://lore.kernel.org/r/20250812130259.109645-1-adrian.hunter@intel.com
Reviewed-by: Bart Van Assche <bvanassche@acm.org>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2025-08-14 23:20:02 -04:00
Christoph Hellwig
fad2cf04e9 scsi: fnic: Remove a useless struct mempool forward declaration
struct mempool doesn't currently exist, and thus also isn't used in
fnic.h, remove it.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20250812082808.371119-1-hch@lst.de
Reviewed-by: Karan Tilak Kumar <kartilak@cisco.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2025-08-14 22:23:32 -04:00
Xiao Ni
25db5f284f md: add legacy_async_del_gendisk mode
commit 9e59d60976 ("md: call del_gendisk in control path") changes the
async way to sync way of calling del_gendisk. But it breaks mdadm
--assemble command. The assemble command runs like this:
1. create the array
2. stop the array
3. access the sysfs files after stopping

The sync way calls del_gendisk in step 2, so all sysfs files are removed.
Now to avoid breaking mdadm assemble command, this patch adds the parameter
legacy_async_del_gendisk that can be used to choose which way. The default
is async way. In future, we plan to change default to sync way in kernel
7.0. Then users need to upgrade to mdadm 4.5+ which removes step 2.

Fixes: 9e59d60976 ("md: call del_gendisk in control path")
Reported-by: Mikulas Patocka <mpatocka@redhat.com>
Closes: https://lore.kernel.org/linux-raid/CAMw=ZnQ=ET2St-+hnhsuq34rRPnebqcXqP1QqaHW5Bh4aaaZ4g@mail.gmail.com/T/#t
Suggested-and-reviewed-by: Yu Kuai <yukuai3@huawei.com>
Signed-off-by: Xiao Ni <xni@redhat.com>
Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
Link: https://lore.kernel.org/linux-raid/20250813032929.54978-1-xni@redhat.com
Signed-off-by: Yu Kuai <yukuai3@huawei.com>
2025-08-13 19:44:17 +08:00
Junxian Huang
fa2e2d31ee RDMA/hns: Fix dip entries leak on devices newer than hip09
DIP algorithm is also supported on devices newer than hip09, so free
dip entries too.

Fixes: f91696f2f0 ("RDMA/hns: Support congestion control type selection according to the FW")
Signed-off-by: Junxian Huang <huangjunxian6@hisilicon.com>
Link: https://patch.msgid.link/20250812122602.3524602-1-huangjunxian6@hisilicon.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
2025-08-13 07:22:18 -04:00
Akhilesh Patil
111aea0464 RDMA/core: Free pfn_list with appropriate kvfree call
Ensure that pfn_list allocated by kvcalloc() is freed using corresponding
kvfree() function. Match memory allocation and free routines kvcalloc -> kvfree.

Fixes: 259e9bd07c ("RDMA/core: Avoid hmm_dma_map_alloc() for virtual DMA devices")
Signed-off-by: Akhilesh Patil <akhilesh@ee.iitb.ac.in>
Link: https://patch.msgid.link/aJjcPjL1BVh8QrMN@bhairav-test.ee.iitb.ac.in
Signed-off-by: Leon Romanovsky <leon@kernel.org>
2025-08-13 07:00:21 -04:00
Dave Hansen
2186e8c39e MAINTAINERS: Remove bouncing irdma maintainer
This maintainer's email no longer works. Remove it from MAINTAINERS.

This still leaves one maintainer for the driver.

Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Tatyana Nikolova <tatyana.e.nikolova@intel.com>
Cc: linux-rdma@vger.kernel.org
Link: https://patch.msgid.link/20250808175601.EF0AF767@davehans-spike.ostc.intel.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
2025-08-13 06:53:38 -04:00
Anantha Prabhu
806b9f494f RDMA/bnxt_re: Fix to initialize the PBL array
memset the PBL page pointer and page map arrays before
populating the SGL addresses of the HWQ.

Fixes: 0c4dcd6028 ("RDMA/bnxt_re: Refactor hardware queue memory allocation")
Signed-off-by: Anantha Prabhu <anantha.prabhu@broadcom.com>
Reviewed-by: Saravanan Vajravel <saravanan.vajravel@broadcom.com>
Reviewed-by: Selvin Xavier <selvin.xavier@broadcom.com>
Signed-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
Link: https://patch.msgid.link/20250805101000.233310-5-kalesh-anakkur.purayil@broadcom.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
2025-08-13 06:35:40 -04:00
Kalesh AP
ba60a1e8cb RDMA/bnxt_re: Fix a possible memory leak in the driver
The GID context reuse logic requires the context memory to be
not freed if and when DEL_GID firmware command fails. But, if
there's no subsequent ADD_GID to reuse it, the context memory
must be freed when the driver is unloaded. Otherwise it leads
to a memory leak.

Below is the kmemleak trace reported:

unreferenced object 0xffff88817a4f34d0 (size 8):
  comm "insmod", pid 1072504, jiffies 4402561550
  hex dump (first 8 bytes):
  01 00 00 00 00 00 00 00                          ........
  backtrace (crc ccaa009e):
  __kmalloc_cache_noprof+0x33e/0x400
  0xffffffffc2db9d48
  add_modify_gid+0x5e0/0xb60 [ib_core]
  __ib_cache_gid_add+0x213/0x350 [ib_core]
  update_gid+0xf2/0x180 [ib_core]
  enum_netdev_ipv4_ips+0x3f3/0x690 [ib_core]
  enum_all_gids_of_dev_cb+0x125/0x1b0 [ib_core]
  ib_enum_roce_netdev+0x14b/0x250 [ib_core]
  ib_cache_setup_one+0x2e5/0x540 [ib_core]
  ib_register_device+0x82c/0xf10 [ib_core]
  0xffffffffc2df5ad9
  0xffffffffc2da8b07
  0xffffffffc2db174d
  auxiliary_bus_probe+0xa5/0x120
  really_probe+0x1e4/0x850
  __driver_probe_device+0x18f/0x3d0

Fixes: 4a62c5e9e2 ("RDMA/bnxt_re: Do not free the ctx_tbl entry if delete GID fails")
Signed-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
Link: https://patch.msgid.link/20250805101000.233310-4-kalesh-anakkur.purayil@broadcom.com
Reviewed-by: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
Signed-off-by: Leon Romanovsky <leon@kernel.org>
2025-08-13 06:35:40 -04:00
Kashyap Desai
666bce0bd7 RDMA/bnxt_re: Fix to remove workload check in SRQ limit path
There should not be any checks of current workload to set
srq_limit value to SRQ hw context.

Remove all such workload checks and make a direct call to
set srq_limit via doorbell SRQ_ARM.

Fixes: 37cb11acf1 ("RDMA/bnxt_re: Add SRQ support for Broadcom adapters")
Signed-off-by: Kashyap Desai <kashyap.desai@broadcom.com>
Signed-off-by: Saravanan Vajravel <saravanan.vajravel@broadcom.com>
Signed-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
Link: https://patch.msgid.link/20250805101000.233310-3-kalesh-anakkur.purayil@broadcom.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
2025-08-13 06:35:40 -04:00
Kashyap Desai
6296f9a529 RDMA/bnxt_re: Fix to do SRQ armena by default
Whenever SRQ is created, make sure SRQ arm enable is always
set. Driver is always ready to receive SRQ ASYNC event.

Additional note -
There is no need to do srq arm enable conditionally.
See bnxt_qplib_armen_db in bnxt_qplib_create_cq().

Fixes: 37cb11acf1 ("RDMA/bnxt_re: Add SRQ support for Broadcom adapters")
Signed-off-by: Kashyap Desai <kashyap.desai@broadcom.com>
Signed-off-by: Saravanan Vajravel <saravanan.vajravel@broadcom.com>
Link: https://patch.msgid.link/20250805101000.233310-2-kalesh-anakkur.purayil@broadcom.com
Reviewed-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
Signed-off-by: Leon Romanovsky <leon@kernel.org>
2025-08-13 06:35:40 -04:00
wenglianfa
085a1b42e5 RDMA/hns: Fix querying wrong SCC context for DIP algorithm
When using DIP algorithm, all QPs establishing connections with
the same destination IP share the same SCC, which is indexed by
dip_idx, but dip_idx isn't necessarily equal to qpn. Therefore,
dip_idx should be used to query SCC context instead of qpn.

Fixes: 124a9fbe43 ("RDMA/hns: Append SCC context to the raw dump of QPC")
Signed-off-by: wenglianfa <wenglianfa@huawei.com>
Signed-off-by: Junxian Huang <huangjunxian6@hisilicon.com>
Link: https://patch.msgid.link/20250726075345.846957-1-huangjunxian6@hisilicon.com
Reviewed-by: Zhu Yanjun <yanjun.zhu@linux.dev>
Signed-off-by: Leon Romanovsky <leon@kernel.org>
2025-08-13 06:34:25 -04:00
Boshi Yu
d4ac86b475 RDMA/erdma: Fix unset QPN of GSI QP
The QPN of the GSI QP was not set, which may cause issues.
Set the QPN to 1 when creating the GSI QP.

Fixes: 999a0a2e9b ("RDMA/erdma: Support UD QPs and UD WRs")
Reviewed-by: Cheng Xu <chengyou@linux.alibaba.com>
Signed-off-by: Boshi Yu <boshiyu@linux.alibaba.com>
Link: https://patch.msgid.link/20250725055410.67520-4-boshiyu@linux.alibaba.com
Reviewed-by: Zhu Yanjun <yanjun.zhu@linux.dev>
Signed-off-by: Leon Romanovsky <leon@kernel.org>
2025-08-13 06:23:54 -04:00
Boshi Yu
d5c74713f0 RDMA/erdma: Fix ignored return value of init_kernel_qp
The init_kernel_qp interface may fail. Check its return value and free
related resources properly when it does.

Fixes: 1550557717 ("RDMA/erdma: Add verbs implementation")
Reviewed-by: Cheng Xu <chengyou@linux.alibaba.com>
Signed-off-by: Boshi Yu <boshiyu@linux.alibaba.com>
Link: https://patch.msgid.link/20250725055410.67520-3-boshiyu@linux.alibaba.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
2025-08-13 06:23:54 -04:00
Zhu Yanjun
3c3e9a9f29 RDMA/rxe: Flush delayed SKBs while releasing RXE resources
When skb packets are sent out, these skb packets still depends on
the rxe resources, for example, QP, sk, when these packets are
destroyed.

If these rxe resources are released when the skb packets are destroyed,
the call traces will appear.

To avoid skb packets hang too long time in some network devices,
a timestamp is added when these skb packets are created. If these
skb packets hang too long time in network devices, these network
devices can free these skb packets to release rxe resources.

Reported-by: syzbot+8425ccfb599521edb153@syzkaller.appspotmail.com
Closes: https://syzkaller.appspot.com/bug?extid=8425ccfb599521edb153
Tested-by: syzbot+8425ccfb599521edb153@syzkaller.appspotmail.com
Fixes: 1a633bdc8f ("RDMA/rxe: Let destroy qp succeed with stuck packet")
Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev>
Link: https://patch.msgid.link/20250726013104.463570-1-yanjun.zhu@linux.dev
Signed-off-by: Leon Romanovsky <leon@kernel.org>
2025-08-13 06:20:00 -04:00
76 changed files with 951 additions and 435 deletions

View File

@ -226,6 +226,8 @@ Domen Puncer <domen@coderock.org>
Douglas Gilbert <dougg@torque.net>
Drew Fustini <fustini@kernel.org> <drew@pdp7.com>
<duje@dujemihanovic.xyz> <duje.mihanovic@skole.hr>
Easwar Hariharan <easwar.hariharan@linux.microsoft.com> <easwar.hariharan@intel.com>
Easwar Hariharan <easwar.hariharan@linux.microsoft.com> <eahariha@linux.microsoft.com>
Ed L. Cashin <ecashin@coraid.com>
Elliot Berman <quic_eberman@quicinc.com> <eberman@codeaurora.org>
Enric Balletbo i Serra <eballetbo@kernel.org> <enric.balletbo@collabora.com>

View File

@ -12281,7 +12281,6 @@ F: include/linux/avf/virtchnl.h
F: include/linux/net/intel/*/
INTEL ETHERNET PROTOCOL DRIVER FOR RDMA
M: Mustafa Ismail <mustafa.ismail@intel.com>
M: Tatyana Nikolova <tatyana.e.nikolova@intel.com>
L: linux-rdma@vger.kernel.org
S: Supported
@ -16058,6 +16057,23 @@ F: mm/mempolicy.c
F: mm/migrate.c
F: mm/migrate_device.c
MEMORY MANAGEMENT - MGLRU (MULTI-GEN LRU)
M: Andrew Morton <akpm@linux-foundation.org>
M: Axel Rasmussen <axelrasmussen@google.com>
M: Yuanchu Xie <yuanchu@google.com>
R: Wei Xu <weixugc@google.com>
L: linux-mm@kvack.org
S: Maintained
W: http://www.linux-mm.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
F: Documentation/admin-guide/mm/multigen_lru.rst
F: Documentation/mm/multigen_lru.rst
F: include/linux/mm_inline.h
F: include/linux/mmzone.h
F: mm/swap.c
F: mm/vmscan.c
F: mm/workingset.c
MEMORY MANAGEMENT - MISC
M: Andrew Morton <akpm@linux-foundation.org>
M: David Hildenbrand <david@redhat.com>
@ -16248,8 +16264,10 @@ S: Maintained
W: http://www.linux-mm.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
F: rust/helpers/mm.c
F: rust/helpers/page.c
F: rust/kernel/mm.rs
F: rust/kernel/mm/
F: rust/kernel/page.rs
MEMORY MAPPING
M: Andrew Morton <akpm@linux-foundation.org>

View File

@ -557,7 +557,7 @@ static inline int bio_check_eod(struct bio *bio)
sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
unsigned int nr_sectors = bio_sectors(bio);
if (nr_sectors &&
if (nr_sectors && maxsector &&
(nr_sectors > maxsector ||
bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
pr_info_ratelimited("%s: attempt to access beyond end of device\n"

View File

@ -95,6 +95,7 @@ static const char *const blk_queue_flag_name[] = {
QUEUE_FLAG_NAME(SQ_SCHED),
QUEUE_FLAG_NAME(DISABLE_WBT_DEF),
QUEUE_FLAG_NAME(NO_ELV_SWITCH),
QUEUE_FLAG_NAME(QOS_ENABLED),
};
#undef QUEUE_FLAG_NAME

View File

@ -5033,6 +5033,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
unsigned int memflags;
int i;
struct xarray elv_tbl, et_tbl;
bool queues_frozen = false;
lockdep_assert_held(&set->tag_list_lock);
@ -5056,9 +5057,6 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
blk_mq_sysfs_unregister_hctxs(q);
}
list_for_each_entry(q, &set->tag_list, tag_set_list)
blk_mq_freeze_queue_nomemsave(q);
/*
* Switch IO scheduler to 'none', cleaning up the data associated
* with the previous scheduler. We will switch back once we are done
@ -5068,6 +5066,9 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
if (blk_mq_elv_switch_none(q, &elv_tbl))
goto switch_back;
list_for_each_entry(q, &set->tag_list, tag_set_list)
blk_mq_freeze_queue_nomemsave(q);
queues_frozen = true;
if (blk_mq_realloc_tag_set_tags(set, nr_hw_queues) < 0)
goto switch_back;
@ -5091,8 +5092,12 @@ fallback:
}
switch_back:
/* The blk_mq_elv_switch_back unfreezes queue for us. */
list_for_each_entry(q, &set->tag_list, tag_set_list)
list_for_each_entry(q, &set->tag_list, tag_set_list) {
/* switch_back expects queue to be frozen */
if (!queues_frozen)
blk_mq_freeze_queue_nomemsave(q);
blk_mq_elv_switch_back(q, &elv_tbl, &et_tbl);
}
list_for_each_entry(q, &set->tag_list, tag_set_list) {
blk_mq_sysfs_register_hctxs(q);

View File

@ -2,8 +2,6 @@
#include "blk-rq-qos.h"
__read_mostly DEFINE_STATIC_KEY_FALSE(block_rq_qos);
/*
* Increment 'v', if 'v' is below 'below'. Returns true if we succeeded,
* false if 'v' + 1 would be bigger than 'below'.
@ -319,8 +317,8 @@ void rq_qos_exit(struct request_queue *q)
struct rq_qos *rqos = q->rq_qos;
q->rq_qos = rqos->next;
rqos->ops->exit(rqos);
static_branch_dec(&block_rq_qos);
}
blk_queue_flag_clear(QUEUE_FLAG_QOS_ENABLED, q);
mutex_unlock(&q->rq_qos_mutex);
}
@ -346,7 +344,7 @@ int rq_qos_add(struct rq_qos *rqos, struct gendisk *disk, enum rq_qos_id id,
goto ebusy;
rqos->next = q->rq_qos;
q->rq_qos = rqos;
static_branch_inc(&block_rq_qos);
blk_queue_flag_set(QUEUE_FLAG_QOS_ENABLED, q);
blk_mq_unfreeze_queue(q, memflags);
@ -377,6 +375,8 @@ void rq_qos_del(struct rq_qos *rqos)
break;
}
}
if (!q->rq_qos)
blk_queue_flag_clear(QUEUE_FLAG_QOS_ENABLED, q);
blk_mq_unfreeze_queue(q, memflags);
mutex_lock(&q->debugfs_mutex);

View File

@ -12,7 +12,6 @@
#include "blk-mq-debugfs.h"
struct blk_mq_debugfs_attr;
extern struct static_key_false block_rq_qos;
enum rq_qos_id {
RQ_QOS_WBT,
@ -113,43 +112,55 @@ void __rq_qos_queue_depth_changed(struct rq_qos *rqos);
static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
{
if (static_branch_unlikely(&block_rq_qos) && q->rq_qos)
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
q->rq_qos)
__rq_qos_cleanup(q->rq_qos, bio);
}
static inline void rq_qos_done(struct request_queue *q, struct request *rq)
{
if (static_branch_unlikely(&block_rq_qos) && q->rq_qos &&
!blk_rq_is_passthrough(rq))
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
q->rq_qos && !blk_rq_is_passthrough(rq))
__rq_qos_done(q->rq_qos, rq);
}
static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
{
if (static_branch_unlikely(&block_rq_qos) && q->rq_qos)
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
q->rq_qos)
__rq_qos_issue(q->rq_qos, rq);
}
static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
{
if (static_branch_unlikely(&block_rq_qos) && q->rq_qos)
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
q->rq_qos)
__rq_qos_requeue(q->rq_qos, rq);
}
static inline void rq_qos_done_bio(struct bio *bio)
{
if (static_branch_unlikely(&block_rq_qos) &&
bio->bi_bdev && (bio_flagged(bio, BIO_QOS_THROTTLED) ||
bio_flagged(bio, BIO_QOS_MERGED))) {
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
if (q->rq_qos)
__rq_qos_done_bio(q->rq_qos, bio);
}
struct request_queue *q;
if (!bio->bi_bdev || (!bio_flagged(bio, BIO_QOS_THROTTLED) &&
!bio_flagged(bio, BIO_QOS_MERGED)))
return;
q = bdev_get_queue(bio->bi_bdev);
/*
* If a bio has BIO_QOS_xxx set, it implicitly implies that
* q->rq_qos is present. So, we skip re-checking q->rq_qos
* here as an extra optimization and directly call
* __rq_qos_done_bio().
*/
__rq_qos_done_bio(q->rq_qos, bio);
}
static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
{
if (static_branch_unlikely(&block_rq_qos) && q->rq_qos) {
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
q->rq_qos) {
bio_set_flag(bio, BIO_QOS_THROTTLED);
__rq_qos_throttle(q->rq_qos, bio);
}
@ -158,14 +169,16 @@ static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
static inline void rq_qos_track(struct request_queue *q, struct request *rq,
struct bio *bio)
{
if (static_branch_unlikely(&block_rq_qos) && q->rq_qos)
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
q->rq_qos)
__rq_qos_track(q->rq_qos, rq, bio);
}
static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
struct bio *bio)
{
if (static_branch_unlikely(&block_rq_qos) && q->rq_qos) {
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
q->rq_qos) {
bio_set_flag(bio, BIO_QOS_MERGED);
__rq_qos_merge(q->rq_qos, rq, bio);
}
@ -173,7 +186,8 @@ static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
static inline void rq_qos_queue_depth_changed(struct request_queue *q)
{
if (static_branch_unlikely(&block_rq_qos) && q->rq_qos)
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
q->rq_qos)
__rq_qos_queue_depth_changed(q->rq_qos);
}

View File

@ -157,16 +157,14 @@ static int blk_validate_integrity_limits(struct queue_limits *lim)
switch (bi->csum_type) {
case BLK_INTEGRITY_CSUM_NONE:
if (bi->pi_tuple_size) {
pr_warn("pi_tuple_size must be 0 when checksum type \
is none\n");
pr_warn("pi_tuple_size must be 0 when checksum type is none\n");
return -EINVAL;
}
break;
case BLK_INTEGRITY_CSUM_CRC:
case BLK_INTEGRITY_CSUM_IP:
if (bi->pi_tuple_size != sizeof(struct t10_pi_tuple)) {
pr_warn("pi_tuple_size mismatch for T10 PI: expected \
%zu, got %u\n",
pr_warn("pi_tuple_size mismatch for T10 PI: expected %zu, got %u\n",
sizeof(struct t10_pi_tuple),
bi->pi_tuple_size);
return -EINVAL;
@ -174,8 +172,7 @@ static int blk_validate_integrity_limits(struct queue_limits *lim)
break;
case BLK_INTEGRITY_CSUM_CRC64:
if (bi->pi_tuple_size != sizeof(struct crc64_pi_tuple)) {
pr_warn("pi_tuple_size mismatch for CRC64 PI: \
expected %zu, got %u\n",
pr_warn("pi_tuple_size mismatch for CRC64 PI: expected %zu, got %u\n",
sizeof(struct crc64_pi_tuple),
bi->pi_tuple_size);
return -EINVAL;
@ -972,6 +969,8 @@ bool queue_limits_stack_integrity(struct queue_limits *t,
goto incompatible;
if (ti->csum_type != bi->csum_type)
goto incompatible;
if (ti->pi_tuple_size != bi->pi_tuple_size)
goto incompatible;
if ((ti->flags & BLK_INTEGRITY_REF_TAG) !=
(bi->flags & BLK_INTEGRITY_REF_TAG))
goto incompatible;
@ -980,6 +979,7 @@ bool queue_limits_stack_integrity(struct queue_limits *t,
ti->flags |= (bi->flags & BLK_INTEGRITY_DEVICE_CAPABLE) |
(bi->flags & BLK_INTEGRITY_REF_TAG);
ti->csum_type = bi->csum_type;
ti->pi_tuple_size = bi->pi_tuple_size;
ti->metadata_size = bi->metadata_size;
ti->pi_offset = bi->pi_offset;
ti->interval_exp = bi->interval_exp;

View File

@ -137,20 +137,29 @@ static void loop_global_unlock(struct loop_device *lo, bool global)
static int max_part;
static int part_shift;
static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file)
static loff_t lo_calculate_size(struct loop_device *lo, struct file *file)
{
struct kstat stat;
loff_t loopsize;
int ret;
/* Compute loopsize in bytes */
loopsize = i_size_read(file->f_mapping->host);
if (offset > 0)
loopsize -= offset;
/*
* Get the accurate file size. This provides better results than
* cached inode data, particularly for network filesystems where
* metadata may be stale.
*/
ret = vfs_getattr_nosec(&file->f_path, &stat, STATX_SIZE, 0);
if (ret)
return 0;
loopsize = stat.size;
if (lo->lo_offset > 0)
loopsize -= lo->lo_offset;
/* offset is beyond i_size, weird but possible */
if (loopsize < 0)
return 0;
if (sizelimit > 0 && sizelimit < loopsize)
loopsize = sizelimit;
if (lo->lo_sizelimit > 0 && lo->lo_sizelimit < loopsize)
loopsize = lo->lo_sizelimit;
/*
* Unfortunately, if we want to do I/O on the device,
* the number of 512-byte sectors has to fit into a sector_t.
@ -158,11 +167,6 @@ static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file)
return loopsize >> 9;
}
static loff_t get_loop_size(struct loop_device *lo, struct file *file)
{
return get_size(lo->lo_offset, lo->lo_sizelimit, file);
}
/*
* We support direct I/O only if lo_offset is aligned with the logical I/O size
* of backing device, and the logical block size of loop is bigger than that of
@ -569,7 +573,7 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
error = -EINVAL;
/* size of the new backing store needs to be the same */
if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
if (lo_calculate_size(lo, file) != lo_calculate_size(lo, old_file))
goto out_err;
/*
@ -1063,7 +1067,7 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
loop_update_dio(lo);
loop_sysfs_init(lo);
size = get_loop_size(lo, file);
size = lo_calculate_size(lo, file);
loop_set_size(lo, size);
/* Order wrt reading lo_state in loop_validate_file(). */
@ -1255,8 +1259,7 @@ out_unfreeze:
if (partscan)
clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
if (!err && size_changed) {
loff_t new_size = get_size(lo->lo_offset, lo->lo_sizelimit,
lo->lo_backing_file);
loff_t new_size = lo_calculate_size(lo, lo->lo_backing_file);
loop_set_size(lo, new_size);
}
out_unlock:
@ -1399,7 +1402,7 @@ static int loop_set_capacity(struct loop_device *lo)
if (unlikely(lo->lo_state != Lo_bound))
return -ENXIO;
size = get_loop_size(lo, lo->lo_backing_file);
size = lo_calculate_size(lo, lo->lo_backing_file);
loop_set_size(lo, size);
return 0;

View File

@ -115,7 +115,7 @@ static int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
out_free_map:
if (ib_uses_virt_dma(dev))
kfree(map->pfn_list);
kvfree(map->pfn_list);
else
hmm_dma_map_free(dev->dma_device, map);
return ret;
@ -287,7 +287,7 @@ static void ib_umem_odp_free(struct ib_umem_odp *umem_odp)
mutex_unlock(&umem_odp->umem_mutex);
mmu_interval_notifier_remove(&umem_odp->notifier);
if (ib_uses_virt_dma(dev))
kfree(umem_odp->map.pfn_list);
kvfree(umem_odp->map.pfn_list);
else
hmm_dma_map_free(dev->dma_device, &umem_odp->map);
}

View File

@ -1921,7 +1921,6 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
ib_srq);
struct bnxt_re_dev *rdev = srq->rdev;
int rc;
switch (srq_attr_mask) {
case IB_SRQ_MAX_WR:
@ -1933,11 +1932,8 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
return -EINVAL;
srq->qplib_srq.threshold = srq_attr->srq_limit;
rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
if (rc) {
ibdev_err(&rdev->ibdev, "Modify HW SRQ failed!");
return rc;
}
bnxt_qplib_srq_arm_db(&srq->qplib_srq.dbinfo, srq->qplib_srq.threshold);
/* On success, update the shadow */
srq->srq_limit = srq_attr->srq_limit;
/* No need to Build and send response back to udata */

View File

@ -2017,6 +2017,28 @@ static void bnxt_re_free_nqr_mem(struct bnxt_re_dev *rdev)
rdev->nqr = NULL;
}
/* When DEL_GID fails, driver is not freeing GID ctx memory.
* To avoid the memory leak, free the memory during unload
*/
static void bnxt_re_free_gid_ctx(struct bnxt_re_dev *rdev)
{
struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
int i;
if (!sgid_tbl->active)
return;
ctx_tbl = sgid_tbl->ctx;
for (i = 0; i < sgid_tbl->max; i++) {
if (sgid_tbl->hw_id[i] == 0xFFFF)
continue;
ctx = ctx_tbl[i];
kfree(ctx);
}
}
static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type)
{
u8 type;
@ -2030,6 +2052,7 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type)
if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags))
cancel_delayed_work_sync(&rdev->worker);
bnxt_re_free_gid_ctx(rdev);
if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED,
&rdev->flags))
bnxt_re_cleanup_res(rdev);

View File

@ -705,9 +705,7 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
srq->dbinfo.db = srq->dpi->dbr;
srq->dbinfo.max_slot = 1;
srq->dbinfo.priv_db = res->dpi_tbl.priv_db;
if (srq->threshold)
bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
srq->arm_req = false;
bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
return 0;
fail:
@ -717,24 +715,6 @@ fail:
return rc;
}
int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
struct bnxt_qplib_srq *srq)
{
struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
u32 count;
count = __bnxt_qplib_get_avail(srq_hwq);
if (count > srq->threshold) {
srq->arm_req = false;
bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
} else {
/* Deferred arming */
srq->arm_req = true;
}
return 0;
}
int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
struct bnxt_qplib_srq *srq)
{
@ -776,7 +756,6 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
struct rq_wqe *srqe;
struct sq_sge *hw_sge;
u32 count = 0;
int i, next;
spin_lock(&srq_hwq->lock);
@ -808,15 +787,8 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
bnxt_qplib_hwq_incr_prod(&srq->dbinfo, srq_hwq, srq->dbinfo.max_slot);
spin_lock(&srq_hwq->lock);
count = __bnxt_qplib_get_avail(srq_hwq);
spin_unlock(&srq_hwq->lock);
/* Ring DB */
bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ);
if (srq->arm_req == true && count > srq->threshold) {
srq->arm_req = false;
bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
}
return 0;
}

View File

@ -546,8 +546,6 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
srqn_handler_t srq_handler);
int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
struct bnxt_qplib_srq *srq);
int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
struct bnxt_qplib_srq *srq);
int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
struct bnxt_qplib_srq *srq);
void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,

View File

@ -121,6 +121,7 @@ static int __alloc_pbl(struct bnxt_qplib_res *res,
pbl->pg_arr = vmalloc_array(pages, sizeof(void *));
if (!pbl->pg_arr)
return -ENOMEM;
memset(pbl->pg_arr, 0, pages * sizeof(void *));
pbl->pg_map_arr = vmalloc_array(pages, sizeof(dma_addr_t));
if (!pbl->pg_map_arr) {
@ -128,6 +129,7 @@ static int __alloc_pbl(struct bnxt_qplib_res *res,
pbl->pg_arr = NULL;
return -ENOMEM;
}
memset(pbl->pg_map_arr, 0, pages * sizeof(dma_addr_t));
pbl->pg_count = 0;
pbl->pg_size = sginfo->pgsize;

View File

@ -994,6 +994,8 @@ int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
old_entry = xa_store(&dev->qp_xa, 1, qp, GFP_KERNEL);
if (xa_is_err(old_entry))
ret = xa_err(old_entry);
else
qp->ibqp.qp_num = 1;
} else {
ret = xa_alloc_cyclic(&dev->qp_xa, &qp->ibqp.qp_num, qp,
XA_LIMIT(1, dev->attrs.max_qp - 1),
@ -1031,7 +1033,9 @@ int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
if (ret)
goto err_out_cmd;
} else {
init_kernel_qp(dev, qp, attrs);
ret = init_kernel_qp(dev, qp, attrs);
if (ret)
goto err_out_xa;
}
qp->attrs.max_send_sge = attrs->cap.max_send_sge;

View File

@ -3043,7 +3043,7 @@ static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
if (!hr_dev->is_vf)
hns_roce_free_link_table(hr_dev);
if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP09)
if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
free_dip_entry(hr_dev);
}
@ -5476,7 +5476,7 @@ out:
return ret;
}
static int hns_roce_v2_query_sccc(struct hns_roce_dev *hr_dev, u32 qpn,
static int hns_roce_v2_query_sccc(struct hns_roce_dev *hr_dev, u32 sccn,
void *buffer)
{
struct hns_roce_v2_scc_context *context;
@ -5488,7 +5488,7 @@ static int hns_roce_v2_query_sccc(struct hns_roce_dev *hr_dev, u32 qpn,
return PTR_ERR(mailbox);
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_SCCC,
qpn);
sccn);
if (ret)
goto out;

View File

@ -100,6 +100,7 @@ int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp)
struct hns_roce_v2_qp_context qpc;
struct hns_roce_v2_scc_context sccc;
} context = {};
u32 sccn = hr_qp->qpn;
int ret;
if (!hr_dev->hw->query_qpc)
@ -116,7 +117,13 @@ int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp)
!hr_dev->hw->query_sccc)
goto out;
ret = hr_dev->hw->query_sccc(hr_dev, hr_qp->qpn, &context.sccc);
if (hr_qp->cong_type == CONG_TYPE_DIP) {
if (!hr_qp->dip)
goto out;
sccn = hr_qp->dip->dip_idx;
}
ret = hr_dev->hw->query_sccc(hr_dev, sccn, &context.sccc);
if (ret)
ibdev_warn_ratelimited(&hr_dev->ib_dev,
"failed to query SCCC, ret = %d.\n",

View File

@ -345,33 +345,15 @@ int rxe_prepare(struct rxe_av *av, struct rxe_pkt_info *pkt,
static void rxe_skb_tx_dtor(struct sk_buff *skb)
{
struct net_device *ndev = skb->dev;
struct rxe_dev *rxe;
unsigned int qp_index;
struct rxe_qp *qp;
struct rxe_qp *qp = skb->sk->sk_user_data;
int skb_out;
rxe = rxe_get_dev_from_net(ndev);
if (!rxe && is_vlan_dev(ndev))
rxe = rxe_get_dev_from_net(vlan_dev_real_dev(ndev));
if (WARN_ON(!rxe))
return;
qp_index = (int)(uintptr_t)skb->sk->sk_user_data;
if (!qp_index)
return;
qp = rxe_pool_get_index(&rxe->qp_pool, qp_index);
if (!qp)
goto put_dev;
skb_out = atomic_dec_return(&qp->skb_out);
if (qp->need_req_skb && skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW)
if (unlikely(qp->need_req_skb &&
skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW))
rxe_sched_task(&qp->send_task);
rxe_put(qp);
put_dev:
ib_device_put(&rxe->ib_dev);
sock_put(skb->sk);
}
@ -383,6 +365,7 @@ static int rxe_send(struct sk_buff *skb, struct rxe_pkt_info *pkt)
sock_hold(sk);
skb->sk = sk;
skb->destructor = rxe_skb_tx_dtor;
rxe_get(pkt->qp);
atomic_inc(&pkt->qp->skb_out);
if (skb->protocol == htons(ETH_P_IP))
@ -405,6 +388,7 @@ static int rxe_loopback(struct sk_buff *skb, struct rxe_pkt_info *pkt)
sock_hold(sk);
skb->sk = sk;
skb->destructor = rxe_skb_tx_dtor;
rxe_get(pkt->qp);
atomic_inc(&pkt->qp->skb_out);
if (skb->protocol == htons(ETH_P_IP))
@ -497,6 +481,9 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
goto out;
}
/* Add time stamp to skb. */
skb->tstamp = ktime_get();
skb_reserve(skb, hdr_len + LL_RESERVED_SPACE(ndev));
/* FIXME: hold reference to this netdev until life of this skb. */

View File

@ -244,7 +244,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
if (err < 0)
return err;
qp->sk->sk->sk_user_data = (void *)(uintptr_t)qp->elem.index;
qp->sk->sk->sk_user_data = qp;
/* pick a source UDP port number for this QP based on
* the source QPN. this spreads traffic for different QPs

View File

@ -3638,7 +3638,7 @@ static int __init parse_ivrs_acpihid(char *str)
{
u32 seg = 0, bus, dev, fn;
char *hid, *uid, *p, *addr;
char acpiid[ACPIID_LEN] = {0};
char acpiid[ACPIID_LEN + 1] = { }; /* size with NULL terminator */
int i;
addr = strchr(str, '@');
@ -3664,7 +3664,7 @@ static int __init parse_ivrs_acpihid(char *str)
/* We have the '@', make it the terminator to get just the acpiid */
*addr++ = 0;
if (strlen(str) > ACPIID_LEN + 1)
if (strlen(str) > ACPIID_LEN)
goto not_found;
if (sscanf(str, "=%s", acpiid) != 1)

View File

@ -2997,9 +2997,9 @@ void arm_smmu_attach_commit(struct arm_smmu_attach_state *state)
/* ATS is being switched off, invalidate the entire ATC */
arm_smmu_atc_inv_master(master, IOMMU_NO_PASID);
}
master->ats_enabled = state->ats_enabled;
arm_smmu_remove_master_domain(master, state->old_domain, state->ssid);
master->ats_enabled = state->ats_enabled;
}
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)

View File

@ -301,9 +301,11 @@ static void tegra241_vintf_user_handle_error(struct tegra241_vintf *vintf)
struct iommu_vevent_tegra241_cmdqv vevent_data;
int i;
for (i = 0; i < LVCMDQ_ERR_MAP_NUM_64; i++)
vevent_data.lvcmdq_err_map[i] =
readq_relaxed(REG_VINTF(vintf, LVCMDQ_ERR_MAP_64(i)));
for (i = 0; i < LVCMDQ_ERR_MAP_NUM_64; i++) {
u64 err = readq_relaxed(REG_VINTF(vintf, LVCMDQ_ERR_MAP_64(i)));
vevent_data.lvcmdq_err_map[i] = cpu_to_le64(err);
}
iommufd_viommu_report_event(viommu, IOMMU_VEVENTQ_TYPE_TEGRA241_CMDQV,
&vevent_data, sizeof(vevent_data));

View File

@ -1283,7 +1283,7 @@ static phys_addr_t riscv_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
unsigned long *ptr;
ptr = riscv_iommu_pte_fetch(domain, iova, &pte_size);
if (_io_pte_none(*ptr) || !_io_pte_present(*ptr))
if (!ptr)
return 0;
return pfn_to_phys(__page_val_to_pfn(*ptr)) | (iova & (pte_size - 1));

View File

@ -998,8 +998,7 @@ static void viommu_get_resv_regions(struct device *dev, struct list_head *head)
iommu_dma_get_resv_regions(dev, head);
}
static const struct iommu_ops viommu_ops;
static struct virtio_driver virtio_iommu_drv;
static const struct bus_type *virtio_bus_type;
static int viommu_match_node(struct device *dev, const void *data)
{
@ -1008,8 +1007,9 @@ static int viommu_match_node(struct device *dev, const void *data)
static struct viommu_dev *viommu_get_by_fwnode(struct fwnode_handle *fwnode)
{
struct device *dev = driver_find_device(&virtio_iommu_drv.driver, NULL,
fwnode, viommu_match_node);
struct device *dev = bus_find_device(virtio_bus_type, NULL, fwnode,
viommu_match_node);
put_device(dev);
return dev ? dev_to_virtio(dev)->priv : NULL;
@ -1160,6 +1160,9 @@ static int viommu_probe(struct virtio_device *vdev)
if (!viommu)
return -ENOMEM;
/* Borrow this for easy lookups later */
virtio_bus_type = dev->bus;
spin_lock_init(&viommu->request_lock);
ida_init(&viommu->domain_ids);
viommu->dev = dev;
@ -1229,10 +1232,10 @@ static int viommu_probe(struct virtio_device *vdev)
if (ret)
goto err_free_vqs;
iommu_device_register(&viommu->iommu, &viommu_ops, parent_dev);
vdev->priv = viommu;
iommu_device_register(&viommu->iommu, &viommu_ops, parent_dev);
dev_info(dev, "input address: %u bits\n",
order_base_2(viommu->geometry.aperture_end));
dev_info(dev, "page mask: %#llx\n", viommu->pgsize_bitmap);

View File

@ -339,6 +339,7 @@ static int start_readonly;
* so all the races disappear.
*/
static bool create_on_open = true;
static bool legacy_async_del_gendisk = true;
/*
* We have a system wide 'event count' that is incremented
@ -877,15 +878,18 @@ void mddev_unlock(struct mddev *mddev)
export_rdev(rdev, mddev);
}
/* Call del_gendisk after release reconfig_mutex to avoid
* deadlock (e.g. call del_gendisk under the lock and an
* access to sysfs files waits the lock)
* And MD_DELETED is only used for md raid which is set in
* do_md_stop. dm raid only uses md_stop to stop. So dm raid
* doesn't need to check MD_DELETED when getting reconfig lock
*/
if (test_bit(MD_DELETED, &mddev->flags))
del_gendisk(mddev->gendisk);
if (!legacy_async_del_gendisk) {
/*
* Call del_gendisk after release reconfig_mutex to avoid
* deadlock (e.g. call del_gendisk under the lock and an
* access to sysfs files waits the lock)
* And MD_DELETED is only used for md raid which is set in
* do_md_stop. dm raid only uses md_stop to stop. So dm raid
* doesn't need to check MD_DELETED when getting reconfig lock
*/
if (test_bit(MD_DELETED, &mddev->flags))
del_gendisk(mddev->gendisk);
}
}
EXPORT_SYMBOL_GPL(mddev_unlock);
@ -1419,7 +1423,7 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *freshest, stru
else {
if (sb->events_hi == sb->cp_events_hi &&
sb->events_lo == sb->cp_events_lo) {
mddev->resync_offset = sb->resync_offset;
mddev->resync_offset = sb->recovery_cp;
} else
mddev->resync_offset = 0;
}
@ -1547,13 +1551,13 @@ static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
mddev->minor_version = sb->minor_version;
if (mddev->in_sync)
{
sb->resync_offset = mddev->resync_offset;
sb->recovery_cp = mddev->resync_offset;
sb->cp_events_hi = (mddev->events>>32);
sb->cp_events_lo = (u32)mddev->events;
if (mddev->resync_offset == MaxSector)
sb->state = (1<< MD_SB_CLEAN);
} else
sb->resync_offset = 0;
sb->recovery_cp = 0;
sb->layout = mddev->layout;
sb->chunk_size = mddev->chunk_sectors << 9;
@ -4835,9 +4839,42 @@ out_unlock:
static struct md_sysfs_entry md_metadata =
__ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
static bool rdev_needs_recovery(struct md_rdev *rdev, sector_t sectors)
{
return rdev->raid_disk >= 0 &&
!test_bit(Journal, &rdev->flags) &&
!test_bit(Faulty, &rdev->flags) &&
!test_bit(In_sync, &rdev->flags) &&
rdev->recovery_offset < sectors;
}
static enum sync_action md_get_active_sync_action(struct mddev *mddev)
{
struct md_rdev *rdev;
bool is_recover = false;
if (mddev->resync_offset < MaxSector)
return ACTION_RESYNC;
if (mddev->reshape_position != MaxSector)
return ACTION_RESHAPE;
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev) {
if (rdev_needs_recovery(rdev, MaxSector)) {
is_recover = true;
break;
}
}
rcu_read_unlock();
return is_recover ? ACTION_RECOVER : ACTION_IDLE;
}
enum sync_action md_sync_action(struct mddev *mddev)
{
unsigned long recovery = mddev->recovery;
enum sync_action active_action;
/*
* frozen has the highest priority, means running sync_thread will be
@ -4861,8 +4898,17 @@ enum sync_action md_sync_action(struct mddev *mddev)
!test_bit(MD_RECOVERY_NEEDED, &recovery))
return ACTION_IDLE;
if (test_bit(MD_RECOVERY_RESHAPE, &recovery) ||
mddev->reshape_position != MaxSector)
/*
* Check if any sync operation (resync/recover/reshape) is
* currently active. This ensures that only one sync operation
* can run at a time. Returns the type of active operation, or
* ACTION_IDLE if none are active.
*/
active_action = md_get_active_sync_action(mddev);
if (active_action != ACTION_IDLE)
return active_action;
if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
return ACTION_RESHAPE;
if (test_bit(MD_RECOVERY_RECOVER, &recovery))
@ -5818,6 +5864,13 @@ static void md_kobj_release(struct kobject *ko)
{
struct mddev *mddev = container_of(ko, struct mddev, kobj);
if (legacy_async_del_gendisk) {
if (mddev->sysfs_state)
sysfs_put(mddev->sysfs_state);
if (mddev->sysfs_level)
sysfs_put(mddev->sysfs_level);
del_gendisk(mddev->gendisk);
}
put_disk(mddev->gendisk);
}
@ -6021,6 +6074,9 @@ static int md_alloc_and_put(dev_t dev, char *name)
{
struct mddev *mddev = md_alloc(dev, name);
if (legacy_async_del_gendisk)
pr_warn("md: async del_gendisk mode will be removed in future, please upgrade to mdadm-4.5+\n");
if (IS_ERR(mddev))
return PTR_ERR(mddev);
mddev_put(mddev);
@ -6431,10 +6487,22 @@ static void md_clean(struct mddev *mddev)
mddev->persistent = 0;
mddev->level = LEVEL_NONE;
mddev->clevel[0] = 0;
/* if UNTIL_STOP is set, it's cleared here */
mddev->hold_active = 0;
/* Don't clear MD_CLOSING, or mddev can be opened again. */
mddev->flags &= BIT_ULL_MASK(MD_CLOSING);
/*
* For legacy_async_del_gendisk mode, it can stop the array in the
* middle of assembling it, then it still can access the array. So
* it needs to clear MD_CLOSING. If not legacy_async_del_gendisk,
* it can't open the array again after stopping it. So it doesn't
* clear MD_CLOSING.
*/
if (legacy_async_del_gendisk && mddev->hold_active) {
clear_bit(MD_CLOSING, &mddev->flags);
} else {
/* if UNTIL_STOP is set, it's cleared here */
mddev->hold_active = 0;
/* Don't clear MD_CLOSING, or mddev can be opened again. */
mddev->flags &= BIT_ULL_MASK(MD_CLOSING);
}
mddev->sb_flags = 0;
mddev->ro = MD_RDWR;
mddev->metadata_type[0] = 0;
@ -6658,7 +6726,8 @@ static int do_md_stop(struct mddev *mddev, int mode)
export_array(mddev);
md_clean(mddev);
set_bit(MD_DELETED, &mddev->flags);
if (!legacy_async_del_gendisk)
set_bit(MD_DELETED, &mddev->flags);
}
md_new_event();
sysfs_notify_dirent_safe(mddev->sysfs_state);
@ -8968,11 +9037,7 @@ static sector_t md_sync_position(struct mddev *mddev, enum sync_action action)
start = MaxSector;
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev)
if (rdev->raid_disk >= 0 &&
!test_bit(Journal, &rdev->flags) &&
!test_bit(Faulty, &rdev->flags) &&
!test_bit(In_sync, &rdev->flags) &&
rdev->recovery_offset < start)
if (rdev_needs_recovery(rdev, start))
start = rdev->recovery_offset;
rcu_read_unlock();
@ -9331,12 +9396,8 @@ void md_do_sync(struct md_thread *thread)
test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) {
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev)
if (rdev->raid_disk >= 0 &&
mddev->delta_disks >= 0 &&
!test_bit(Journal, &rdev->flags) &&
!test_bit(Faulty, &rdev->flags) &&
!test_bit(In_sync, &rdev->flags) &&
rdev->recovery_offset < mddev->curr_resync)
if (mddev->delta_disks >= 0 &&
rdev_needs_recovery(rdev, mddev->curr_resync))
rdev->recovery_offset = mddev->curr_resync;
rcu_read_unlock();
}
@ -10392,6 +10453,7 @@ module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
module_param(create_on_open, bool, S_IRUSR|S_IWUSR);
module_param(legacy_async_del_gendisk, bool, 0600);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MD RAID framework");

View File

@ -555,7 +555,6 @@ EXPORT_SYMBOL(memstick_add_host);
*/
void memstick_remove_host(struct memstick_host *host)
{
host->removing = 1;
flush_workqueue(workqueue);
mutex_lock(&host->lock);
if (host->card)

View File

@ -812,6 +812,7 @@ static void rtsx_usb_ms_drv_remove(struct platform_device *pdev)
int err;
host->eject = true;
msh->removing = true;
cancel_work_sync(&host->handle_req);
cancel_delayed_work_sync(&host->poll_card);

View File

@ -99,6 +99,9 @@
#define HIWORD_UPDATE(val, mask, shift) \
((val) << (shift) | (mask) << ((shift) + 16))
#define CD_STABLE_TIMEOUT_US 1000000
#define CD_STABLE_MAX_SLEEP_US 10
/**
* struct sdhci_arasan_soc_ctl_field - Field used in sdhci_arasan_soc_ctl_map
*
@ -206,12 +209,15 @@ struct sdhci_arasan_data {
* 19MHz instead
*/
#define SDHCI_ARASAN_QUIRK_CLOCK_25_BROKEN BIT(2)
/* Enable CD stable check before power-up */
#define SDHCI_ARASAN_QUIRK_ENSURE_CD_STABLE BIT(3)
};
struct sdhci_arasan_of_data {
const struct sdhci_arasan_soc_ctl_map *soc_ctl_map;
const struct sdhci_pltfm_data *pdata;
const struct sdhci_arasan_clk_ops *clk_ops;
u32 quirks;
};
static const struct sdhci_arasan_soc_ctl_map rk3399_soc_ctl_map = {
@ -514,6 +520,24 @@ static int sdhci_arasan_voltage_switch(struct mmc_host *mmc,
return -EINVAL;
}
static void sdhci_arasan_set_power_and_bus_voltage(struct sdhci_host *host, unsigned char mode,
unsigned short vdd)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
u32 reg;
/*
* Ensure that the card detect logic has stabilized before powering up, this is
* necessary after a host controller reset.
*/
if (mode == MMC_POWER_UP && sdhci_arasan->quirks & SDHCI_ARASAN_QUIRK_ENSURE_CD_STABLE)
read_poll_timeout(sdhci_readl, reg, reg & SDHCI_CD_STABLE, CD_STABLE_MAX_SLEEP_US,
CD_STABLE_TIMEOUT_US, false, host, SDHCI_PRESENT_STATE);
sdhci_set_power_and_bus_voltage(host, mode, vdd);
}
static const struct sdhci_ops sdhci_arasan_ops = {
.set_clock = sdhci_arasan_set_clock,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
@ -521,7 +545,7 @@ static const struct sdhci_ops sdhci_arasan_ops = {
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_arasan_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
.set_power = sdhci_set_power_and_bus_voltage,
.set_power = sdhci_arasan_set_power_and_bus_voltage,
.hw_reset = sdhci_arasan_hw_reset,
};
@ -570,7 +594,7 @@ static const struct sdhci_ops sdhci_arasan_cqe_ops = {
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_arasan_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
.set_power = sdhci_set_power_and_bus_voltage,
.set_power = sdhci_arasan_set_power_and_bus_voltage,
.irq = sdhci_arasan_cqhci_irq,
};
@ -1447,6 +1471,7 @@ static const struct sdhci_arasan_clk_ops zynqmp_clk_ops = {
static struct sdhci_arasan_of_data sdhci_arasan_zynqmp_data = {
.pdata = &sdhci_arasan_zynqmp_pdata,
.clk_ops = &zynqmp_clk_ops,
.quirks = SDHCI_ARASAN_QUIRK_ENSURE_CD_STABLE,
};
static const struct sdhci_arasan_clk_ops versal_clk_ops = {
@ -1457,6 +1482,7 @@ static const struct sdhci_arasan_clk_ops versal_clk_ops = {
static struct sdhci_arasan_of_data sdhci_arasan_versal_data = {
.pdata = &sdhci_arasan_zynqmp_pdata,
.clk_ops = &versal_clk_ops,
.quirks = SDHCI_ARASAN_QUIRK_ENSURE_CD_STABLE,
};
static const struct sdhci_arasan_clk_ops versal_net_clk_ops = {
@ -1467,6 +1493,7 @@ static const struct sdhci_arasan_clk_ops versal_net_clk_ops = {
static struct sdhci_arasan_of_data sdhci_arasan_versal_net_data = {
.pdata = &sdhci_arasan_versal_net_pdata,
.clk_ops = &versal_net_clk_ops,
.quirks = SDHCI_ARASAN_QUIRK_ENSURE_CD_STABLE,
};
static struct sdhci_arasan_of_data intel_keembay_emmc_data = {
@ -1937,6 +1964,8 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
if (of_device_is_compatible(np, "rockchip,rk3399-sdhci-5.1"))
sdhci_arasan_update_clockmultiplier(host, 0x0);
sdhci_arasan->quirks |= data->quirks;
if (of_device_is_compatible(np, "intel,keembay-sdhci-5.1-emmc") ||
of_device_is_compatible(np, "intel,keembay-sdhci-5.1-sd") ||
of_device_is_compatible(np, "intel,keembay-sdhci-5.1-sdio")) {

View File

@ -287,6 +287,20 @@
#define GLI_MAX_TUNING_LOOP 40
/* Genesys Logic chipset */
static void sdhci_gli_mask_replay_timer_timeout(struct pci_dev *pdev)
{
int aer;
u32 value;
/* mask the replay timer timeout of AER */
aer = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
if (aer) {
pci_read_config_dword(pdev, aer + PCI_ERR_COR_MASK, &value);
value |= PCI_ERR_COR_REP_TIMER;
pci_write_config_dword(pdev, aer + PCI_ERR_COR_MASK, value);
}
}
static inline void gl9750_wt_on(struct sdhci_host *host)
{
u32 wt_value;
@ -607,7 +621,6 @@ static void gl9750_hw_setting(struct sdhci_host *host)
{
struct sdhci_pci_slot *slot = sdhci_priv(host);
struct pci_dev *pdev;
int aer;
u32 value;
pdev = slot->chip->pdev;
@ -626,12 +639,7 @@ static void gl9750_hw_setting(struct sdhci_host *host)
pci_set_power_state(pdev, PCI_D0);
/* mask the replay timer timeout of AER */
aer = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
if (aer) {
pci_read_config_dword(pdev, aer + PCI_ERR_COR_MASK, &value);
value |= PCI_ERR_COR_REP_TIMER;
pci_write_config_dword(pdev, aer + PCI_ERR_COR_MASK, value);
}
sdhci_gli_mask_replay_timer_timeout(pdev);
gl9750_wt_off(host);
}
@ -806,7 +814,6 @@ static void sdhci_gl9755_set_clock(struct sdhci_host *host, unsigned int clock)
static void gl9755_hw_setting(struct sdhci_pci_slot *slot)
{
struct pci_dev *pdev = slot->chip->pdev;
int aer;
u32 value;
gl9755_wt_on(pdev);
@ -841,12 +848,7 @@ static void gl9755_hw_setting(struct sdhci_pci_slot *slot)
pci_set_power_state(pdev, PCI_D0);
/* mask the replay timer timeout of AER */
aer = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
if (aer) {
pci_read_config_dword(pdev, aer + PCI_ERR_COR_MASK, &value);
value |= PCI_ERR_COR_REP_TIMER;
pci_write_config_dword(pdev, aer + PCI_ERR_COR_MASK, value);
}
sdhci_gli_mask_replay_timer_timeout(pdev);
gl9755_wt_off(pdev);
}
@ -1751,7 +1753,7 @@ cleanup:
return ret;
}
static void gli_set_gl9763e(struct sdhci_pci_slot *slot)
static void gl9763e_hw_setting(struct sdhci_pci_slot *slot)
{
struct pci_dev *pdev = slot->chip->pdev;
u32 value;
@ -1780,6 +1782,9 @@ static void gli_set_gl9763e(struct sdhci_pci_slot *slot)
value |= FIELD_PREP(GLI_9763E_HS400_RXDLY, GLI_9763E_HS400_RXDLY_5);
pci_write_config_dword(pdev, PCIE_GLI_9763E_CLKRXDLY, value);
/* mask the replay timer timeout of AER */
sdhci_gli_mask_replay_timer_timeout(pdev);
pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
value &= ~GLI_9763E_VHS_REV;
value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_R);
@ -1923,7 +1928,7 @@ static int gli_probe_slot_gl9763e(struct sdhci_pci_slot *slot)
gli_pcie_enable_msi(slot);
host->mmc_host_ops.hs400_enhanced_strobe =
gl9763e_hs400_enhanced_strobe;
gli_set_gl9763e(slot);
gl9763e_hw_setting(slot);
sdhci_enable_v4_mode(host);
return 0;

View File

@ -156,6 +156,7 @@ struct sdhci_am654_data {
#define SDHCI_AM654_QUIRK_FORCE_CDTEST BIT(0)
#define SDHCI_AM654_QUIRK_SUPPRESS_V1P8_ENA BIT(1)
#define SDHCI_AM654_QUIRK_DISABLE_HS400 BIT(2)
};
struct window {
@ -765,6 +766,7 @@ static int sdhci_am654_init(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
struct device *dev = mmc_dev(host->mmc);
u32 ctl_cfg_2 = 0;
u32 mask;
u32 val;
@ -820,6 +822,12 @@ static int sdhci_am654_init(struct sdhci_host *host)
if (ret)
goto err_cleanup_host;
if (sdhci_am654->quirks & SDHCI_AM654_QUIRK_DISABLE_HS400 &&
host->mmc->caps2 & (MMC_CAP2_HS400 | MMC_CAP2_HS400_ES)) {
dev_info(dev, "HS400 mode not supported on this silicon revision, disabling it\n");
host->mmc->caps2 &= ~(MMC_CAP2_HS400 | MMC_CAP2_HS400_ES);
}
ret = __sdhci_add_host(host);
if (ret)
goto err_cleanup_host;
@ -883,6 +891,12 @@ static int sdhci_am654_get_of_property(struct platform_device *pdev,
return 0;
}
static const struct soc_device_attribute sdhci_am654_descope_hs400[] = {
{ .family = "AM62PX", .revision = "SR1.0" },
{ .family = "AM62PX", .revision = "SR1.1" },
{ /* sentinel */ }
};
static const struct of_device_id sdhci_am654_of_match[] = {
{
.compatible = "ti,am654-sdhci-5.1",
@ -970,6 +984,10 @@ static int sdhci_am654_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(dev, ret, "parsing dt failed\n");
soc = soc_device_match(sdhci_am654_descope_hs400);
if (soc)
sdhci_am654->quirks |= SDHCI_AM654_QUIRK_DISABLE_HS400;
host->mmc_host_ops.start_signal_voltage_switch = sdhci_am654_start_signal_voltage_switch;
host->mmc_host_ops.execute_tuning = sdhci_am654_execute_tuning;

View File

@ -323,8 +323,6 @@ enum fnic_state {
FNIC_IN_ETH_TRANS_FC_MODE,
};
struct mempool;
enum fnic_role_e {
FNIC_ROLE_FCP_INITIATOR = 0,
};

View File

@ -6606,6 +6606,8 @@ static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0);
vfree(dst_addr);
if (IS_ERR(ep))
return NULL;
return ep;
}

View File

@ -1303,7 +1303,7 @@ static u32 ufshcd_pending_cmds(struct ufs_hba *hba)
*
* Return: 0 upon success; -EBUSY upon timeout.
*/
static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
static int ufshcd_wait_for_pending_cmds(struct ufs_hba *hba,
u64 wait_timeout_us)
{
int ret = 0;
@ -1431,7 +1431,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
down_write(&hba->clk_scaling_lock);
if (!hba->clk_scaling.is_allowed ||
ufshcd_wait_for_doorbell_clr(hba, timeout_us)) {
ufshcd_wait_for_pending_cmds(hba, timeout_us)) {
ret = -EBUSY;
up_write(&hba->clk_scaling_lock);
mutex_unlock(&hba->wb_mutex);
@ -3199,7 +3199,8 @@ ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
}
/*
* Return: 0 upon success; < 0 upon failure.
* Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
* < 0 if another error occurred.
*/
static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp, int max_timeout)
@ -3275,7 +3276,6 @@ retry:
}
}
WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err);
return err;
}
@ -3294,7 +3294,8 @@ static void ufshcd_dev_man_unlock(struct ufs_hba *hba)
}
/*
* Return: 0 upon success; < 0 upon failure.
* Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
* < 0 if another error occurred.
*/
static int ufshcd_issue_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
const u32 tag, int timeout)
@ -3317,7 +3318,8 @@ static int ufshcd_issue_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
* @cmd_type: specifies the type (NOP, Query...)
* @timeout: timeout in milliseconds
*
* Return: 0 upon success; < 0 upon failure.
* Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
* < 0 if another error occurred.
*
* NOTE: Since there is only one available tag for device management commands,
* it is expected you hold the hba->dev_cmd.lock mutex.
@ -3363,6 +3365,10 @@ static inline void ufshcd_init_query(struct ufs_hba *hba,
(*request)->upiu_req.selector = selector;
}
/*
* Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
* < 0 if another error occurred.
*/
static int ufshcd_query_flag_retry(struct ufs_hba *hba,
enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res)
{
@ -3383,7 +3389,6 @@ static int ufshcd_query_flag_retry(struct ufs_hba *hba,
dev_err(hba->dev,
"%s: query flag, opcode %d, idn %d, failed with error %d after %d retries\n",
__func__, opcode, idn, ret, retries);
WARN_ONCE(ret > 0, "Incorrect return value %d > 0\n", ret);
return ret;
}
@ -3395,7 +3400,8 @@ static int ufshcd_query_flag_retry(struct ufs_hba *hba,
* @index: flag index to access
* @flag_res: the flag value after the query request completes
*
* Return: 0 for success; < 0 upon failure.
* Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
* < 0 if another error occurred.
*/
int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
enum flag_idn idn, u8 index, bool *flag_res)
@ -3451,7 +3457,6 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
out_unlock:
ufshcd_dev_man_unlock(hba);
WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err);
return err;
}
@ -3464,8 +3469,9 @@ out_unlock:
* @selector: selector field
* @attr_val: the attribute value after the query request completes
*
* Return: 0 upon success; < 0 upon failure.
*/
* Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
* < 0 if another error occurred.
*/
int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
{
@ -3513,7 +3519,6 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
out_unlock:
ufshcd_dev_man_unlock(hba);
WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err);
return err;
}
@ -3528,8 +3533,9 @@ out_unlock:
* @attr_val: the attribute value after the query request
* completes
*
* Return: 0 for success; < 0 upon failure.
*/
* Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
* < 0 if another error occurred.
*/
int ufshcd_query_attr_retry(struct ufs_hba *hba,
enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
u32 *attr_val)
@ -3551,12 +3557,12 @@ int ufshcd_query_attr_retry(struct ufs_hba *hba,
dev_err(hba->dev,
"%s: query attribute, idn %d, failed with error %d after %d retries\n",
__func__, idn, ret, QUERY_REQ_RETRIES);
WARN_ONCE(ret > 0, "Incorrect return value %d > 0\n", ret);
return ret;
}
/*
* Return: 0 if successful; < 0 upon failure.
* Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
* < 0 if another error occurred.
*/
static int __ufshcd_query_descriptor(struct ufs_hba *hba,
enum query_opcode opcode, enum desc_idn idn, u8 index,
@ -3615,7 +3621,6 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
out_unlock:
hba->dev_cmd.query.descriptor = NULL;
ufshcd_dev_man_unlock(hba);
WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err);
return err;
}
@ -3632,7 +3637,8 @@ out_unlock:
* The buf_len parameter will contain, on return, the length parameter
* received on the response.
*
* Return: 0 for success; < 0 upon failure.
* Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
* < 0 if another error occurred.
*/
int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
enum query_opcode opcode,
@ -3650,7 +3656,6 @@ int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
break;
}
WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err);
return err;
}
@ -3663,7 +3668,8 @@ int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
* @param_read_buf: pointer to buffer where parameter would be read
* @param_size: sizeof(param_read_buf)
*
* Return: 0 in case of success; < 0 upon failure.
* Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
* < 0 if another error occurred.
*/
int ufshcd_read_desc_param(struct ufs_hba *hba,
enum desc_idn desc_id,
@ -3730,7 +3736,6 @@ int ufshcd_read_desc_param(struct ufs_hba *hba,
out:
if (is_kmalloc)
kfree(desc_buf);
WARN_ONCE(ret > 0, "Incorrect return value %d > 0\n", ret);
return ret;
}
@ -4781,7 +4786,8 @@ EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
*
* Set fDeviceInit flag and poll until device toggles it.
*
* Return: 0 upon success; < 0 upon failure.
* Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
* < 0 if another error occurred.
*/
static int ufshcd_complete_dev_init(struct ufs_hba *hba)
{
@ -5135,7 +5141,8 @@ out:
* not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
* and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
*
* Return: 0 upon success; < 0 upon failure.
* Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
* < 0 if another error occurred.
*/
static int ufshcd_verify_dev_init(struct ufs_hba *hba)
{
@ -5559,9 +5566,9 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
irqreturn_t retval = IRQ_NONE;
struct uic_command *cmd;
spin_lock(hba->host->host_lock);
guard(spinlock_irqsave)(hba->host->host_lock);
cmd = hba->active_uic_cmd;
if (WARN_ON_ONCE(!cmd))
if (!cmd)
goto unlock;
if (ufshcd_is_auto_hibern8_error(hba, intr_status))
@ -5586,8 +5593,6 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
ufshcd_add_uic_command_trace(hba, cmd, UFS_CMD_COMP);
unlock:
spin_unlock(hba->host->host_lock);
return retval;
}
@ -5869,7 +5874,8 @@ static inline int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
* as the device is allowed to manage its own way of handling background
* operations.
*
* Return: zero on success, non-zero on failure.
* Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
* < 0 if another error occurred.
*/
static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
{
@ -5908,7 +5914,8 @@ out:
* host is idle so that BKOPS are managed effectively without any negative
* impacts.
*
* Return: zero on success, non-zero on failure.
* Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
* < 0 if another error occurred.
*/
static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
{
@ -6058,6 +6065,10 @@ out:
__func__, err);
}
/*
* Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
* < 0 if another error occurred.
*/
int ufshcd_read_device_lvl_exception_id(struct ufs_hba *hba, u64 *exception_id)
{
struct utp_upiu_query_v4_0 *upiu_resp;
@ -6920,7 +6931,7 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
bool queue_eh_work = false;
irqreturn_t retval = IRQ_NONE;
spin_lock(hba->host->host_lock);
guard(spinlock_irqsave)(hba->host->host_lock);
hba->errors |= UFSHCD_ERROR_MASK & intr_status;
if (hba->errors & INT_FATAL_ERRORS) {
@ -6979,7 +6990,7 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
*/
hba->errors = 0;
hba->uic_error = 0;
spin_unlock(hba->host->host_lock);
return retval;
}
@ -7454,7 +7465,8 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
* @sg_list: Pointer to SG list when DATA IN/OUT UPIU is required in ARPMB operation
* @dir: DMA direction
*
* Return: zero on success, non-zero on failure.
* Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
* < 0 if another error occurred.
*/
int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *req_upiu,
struct utp_upiu_req *rsp_upiu, struct ufs_ehs *req_ehs,

View File

@ -2070,17 +2070,6 @@ static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *data)
return IRQ_HANDLED;
}
static void ufs_qcom_irq_free(struct ufs_qcom_irq *uqi)
{
for (struct ufs_qcom_irq *q = uqi; q->irq; q++)
devm_free_irq(q->hba->dev, q->irq, q->hba);
platform_device_msi_free_irqs_all(uqi->hba->dev);
devm_kfree(uqi->hba->dev, uqi);
}
DEFINE_FREE(ufs_qcom_irq, struct ufs_qcom_irq *, if (_T) ufs_qcom_irq_free(_T))
static int ufs_qcom_config_esi(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
@ -2095,18 +2084,18 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
*/
nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL];
struct ufs_qcom_irq *qi __free(ufs_qcom_irq) =
devm_kcalloc(hba->dev, nr_irqs, sizeof(*qi), GFP_KERNEL);
if (!qi)
return -ENOMEM;
/* Preset so __free() has a pointer to hba in all error paths */
qi[0].hba = hba;
ret = platform_device_msi_init_and_alloc_irqs(hba->dev, nr_irqs,
ufs_qcom_write_msi_msg);
if (ret) {
dev_err(hba->dev, "Failed to request Platform MSI %d\n", ret);
return ret;
dev_warn(hba->dev, "Platform MSI not supported or failed, continuing without ESI\n");
return ret; /* Continue without ESI */
}
struct ufs_qcom_irq *qi = devm_kcalloc(hba->dev, nr_irqs, sizeof(*qi), GFP_KERNEL);
if (!qi) {
platform_device_msi_free_irqs_all(hba->dev);
return -ENOMEM;
}
for (int idx = 0; idx < nr_irqs; idx++) {
@ -2117,15 +2106,17 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
ret = devm_request_irq(hba->dev, qi[idx].irq, ufs_qcom_mcq_esi_handler,
IRQF_SHARED, "qcom-mcq-esi", qi + idx);
if (ret) {
dev_err(hba->dev, "%s: Fail to request IRQ for %d, err = %d\n",
dev_err(hba->dev, "%s: Failed to request IRQ for %d, err = %d\n",
__func__, qi[idx].irq, ret);
qi[idx].irq = 0;
/* Free previously allocated IRQs */
for (int j = 0; j < idx; j++)
devm_free_irq(hba->dev, qi[j].irq, qi + j);
platform_device_msi_free_irqs_all(hba->dev);
devm_kfree(hba->dev, qi);
return ret;
}
}
retain_and_null_ptr(qi);
if (host->hw_ver.major >= 6) {
ufshcd_rmwl(hba, ESI_VEC_MASK, FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1),
REG_UFS_CFG3);

View File

@ -630,6 +630,7 @@ static const struct pci_device_id ufshcd_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0xA847), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
{ PCI_VDEVICE(INTEL, 0x7747), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
{ PCI_VDEVICE(INTEL, 0xE447), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
{ PCI_VDEVICE(INTEL, 0x4D47), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
{ } /* terminate list */
};

View File

@ -253,13 +253,14 @@ nfs_page_group_unlock(struct nfs_page *req)
nfs_page_clear_headlock(req);
}
/*
* nfs_page_group_sync_on_bit_locked
/**
* nfs_page_group_sync_on_bit_locked - Test if all requests have @bit set
* @req: request in page group
* @bit: PG_* bit that is used to sync page group
*
* must be called with page group lock held
*/
static bool
nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit)
bool nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit)
{
struct nfs_page *head = req->wb_head;
struct nfs_page *tmp;

View File

@ -153,20 +153,10 @@ nfs_page_set_inode_ref(struct nfs_page *req, struct inode *inode)
}
}
static int
nfs_cancel_remove_inode(struct nfs_page *req, struct inode *inode)
static void nfs_cancel_remove_inode(struct nfs_page *req, struct inode *inode)
{
int ret;
if (!test_bit(PG_REMOVE, &req->wb_flags))
return 0;
ret = nfs_page_group_lock(req);
if (ret)
return ret;
if (test_and_clear_bit(PG_REMOVE, &req->wb_flags))
nfs_page_set_inode_ref(req, inode);
nfs_page_group_unlock(req);
return 0;
}
/**
@ -585,19 +575,18 @@ retry:
}
}
ret = nfs_page_group_lock(head);
if (ret < 0)
goto out_unlock;
/* Ensure that nobody removed the request before we locked it */
if (head != folio->private) {
nfs_page_group_unlock(head);
nfs_unlock_and_release_request(head);
goto retry;
}
ret = nfs_cancel_remove_inode(head, inode);
if (ret < 0)
goto out_unlock;
ret = nfs_page_group_lock(head);
if (ret < 0)
goto out_unlock;
nfs_cancel_remove_inode(head, inode);
/* lock each request in the page group */
for (subreq = head->wb_this_page;
@ -786,7 +775,8 @@ static void nfs_inode_remove_request(struct nfs_page *req)
{
struct nfs_inode *nfsi = NFS_I(nfs_page_to_inode(req));
if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) {
nfs_page_group_lock(req);
if (nfs_page_group_sync_on_bit_locked(req, PG_REMOVE)) {
struct folio *folio = nfs_page_to_folio(req->wb_head);
struct address_space *mapping = folio->mapping;
@ -798,6 +788,7 @@ static void nfs_inode_remove_request(struct nfs_page *req)
}
spin_unlock(&mapping->i_private_lock);
}
nfs_page_group_unlock(req);
if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) {
atomic_long_dec(&nfsi->nrequests);

View File

@ -4496,7 +4496,7 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst,
for (int i = 1; i < num_rqst; i++) {
struct smb_rqst *old = &old_rq[i - 1];
struct smb_rqst *new = &new_rq[i];
struct folio_queue *buffer;
struct folio_queue *buffer = NULL;
size_t size = iov_iter_count(&old->rq_iter);
orig_len += smb_rqst_len(server, old);

View File

@ -187,10 +187,15 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc)
unsigned short flags;
unsigned int fragments;
u64 lookup_table_start, xattr_id_table_start, next_table;
int err;
int err, devblksize = sb_min_blocksize(sb, SQUASHFS_DEVBLK_SIZE);
TRACE("Entered squashfs_fill_superblock\n");
if (!devblksize) {
errorf(fc, "squashfs: unable to set blocksize\n");
return -EINVAL;
}
sb->s_fs_info = kzalloc(sizeof(*msblk), GFP_KERNEL);
if (sb->s_fs_info == NULL) {
ERROR("Failed to allocate squashfs_sb_info\n");
@ -201,12 +206,7 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc)
msblk->panic_on_errors = (opts->errors == Opt_errors_panic);
msblk->devblksize = sb_min_blocksize(sb, SQUASHFS_DEVBLK_SIZE);
if (!msblk->devblksize) {
errorf(fc, "squashfs: unable to set blocksize\n");
return -EINVAL;
}
msblk->devblksize = devblksize;
msblk->devblksize_log2 = ffz(~msblk->devblksize);
mutex_init(&msblk->meta_index_mutex);

View File

@ -656,6 +656,7 @@ enum {
QUEUE_FLAG_SQ_SCHED, /* single queue style io dispatch */
QUEUE_FLAG_DISABLE_WBT_DEF, /* for sched to disable/enable wbt */
QUEUE_FLAG_NO_ELV_SWITCH, /* can't switch elevator any more */
QUEUE_FLAG_QOS_ENABLED, /* qos is enabled */
QUEUE_FLAG_MAX
};

View File

@ -160,7 +160,7 @@ size_t iterate_folioq(struct iov_iter *iter, size_t len, void *priv, void *priv2
do {
struct folio *folio = folioq_folio(folioq, slot);
size_t part, remain, consumed;
size_t part, remain = 0, consumed;
size_t fsize;
void *base;
@ -168,14 +168,16 @@ size_t iterate_folioq(struct iov_iter *iter, size_t len, void *priv, void *priv2
break;
fsize = folioq_folio_size(folioq, slot);
base = kmap_local_folio(folio, skip);
part = umin(len, PAGE_SIZE - skip % PAGE_SIZE);
remain = step(base, progress, part, priv, priv2);
kunmap_local(base);
consumed = part - remain;
len -= consumed;
progress += consumed;
skip += consumed;
if (skip < fsize) {
base = kmap_local_folio(folio, skip);
part = umin(len, PAGE_SIZE - skip % PAGE_SIZE);
remain = step(base, progress, part, priv, priv2);
kunmap_local(base);
consumed = part - remain;
len -= consumed;
progress += consumed;
skip += consumed;
}
if (skip >= fsize) {
skip = 0;
slot++;

View File

@ -79,6 +79,7 @@ void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl)
void folio_migrate_flags(struct folio *newfolio, struct folio *folio);
int folio_migrate_mapping(struct address_space *mapping,
struct folio *newfolio, struct folio *folio, int extra_count);
int set_movable_ops(const struct movable_operations *ops, enum pagetype type);
#else
@ -100,6 +101,10 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
{
return -ENOSYS;
}
static inline int set_movable_ops(const struct movable_operations *ops, enum pagetype type)
{
return -ENOSYS;
}
#endif /* CONFIG_MIGRATION */

View File

@ -160,6 +160,7 @@ extern void nfs_join_page_group(struct nfs_page *head,
extern int nfs_page_group_lock(struct nfs_page *);
extern void nfs_page_group_unlock(struct nfs_page *);
extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int);
extern bool nfs_page_group_sync_on_bit_locked(struct nfs_page *, unsigned int);
extern int nfs_page_set_headlock(struct nfs_page *req);
extern void nfs_page_clear_headlock(struct nfs_page *req);
extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *);

View File

@ -107,8 +107,8 @@
#define CS35L56_DSP1_PMEM_5114 0x3804FE8
#define CS35L63_DSP1_FW_VER CS35L56_DSP1_FW_VER
#define CS35L63_DSP1_HALO_STATE 0x280396C
#define CS35L63_DSP1_PM_CUR_STATE 0x28042C8
#define CS35L63_DSP1_HALO_STATE 0x2803C04
#define CS35L63_DSP1_PM_CUR_STATE 0x2804518
#define CS35L63_PROTECTION_STATUS 0x340009C
#define CS35L63_TRANSDUCER_ACTUAL_PS 0x34000F4
#define CS35L63_MAIN_RENDER_USER_MUTE 0x3400020
@ -306,6 +306,7 @@ struct cs35l56_base {
struct gpio_desc *reset_gpio;
struct cs35l56_spi_payload *spi_payload_buf;
const struct cs35l56_fw_reg *fw_reg;
const struct cirrus_amp_cal_controls *calibration_controls;
};
static inline bool cs35l56_is_otp_register(unsigned int reg)

View File

@ -2,7 +2,7 @@
//
// ALSA SoC Texas Instruments TAS2781 Audio Smart Amplifier
//
// Copyright (C) 2022 - 2024 Texas Instruments Incorporated
// Copyright (C) 2022 - 2025 Texas Instruments Incorporated
// https://www.ti.com
//
// The TAS2781 driver implements a flexible and configurable
@ -15,7 +15,7 @@
#ifndef __TAS2781_TLV_H__
#define __TAS2781_TLV_H__
static const __maybe_unused DECLARE_TLV_DB_SCALE(dvc_tlv, -10000, 50, 0);
static const __maybe_unused DECLARE_TLV_DB_SCALE(amp_vol_tlv, 1100, 50, 0);
static const __maybe_unused DECLARE_TLV_DB_SCALE(tas2781_dvc_tlv, -10000, 50, 0);
static const __maybe_unused DECLARE_TLV_DB_SCALE(tas2781_amp_tlv, 1100, 50, 0);
#endif

View File

@ -173,7 +173,7 @@ typedef struct mdp_superblock_s {
#else
#error unspecified endianness
#endif
__u32 resync_offset; /* 11 resync checkpoint sector count */
__u32 recovery_cp; /* 11 resync checkpoint sector count */
/* There are only valid for minor_version > 90 */
__u64 reshape_position; /* 12,13 next address in array-space for reshape */
__u32 new_level; /* 14 new level we are reshaping to */

View File

@ -288,6 +288,7 @@ int io_futex_wait(struct io_kiocb *req, unsigned int issue_flags)
goto done_unlock;
}
req->flags |= REQ_F_ASYNC_DATA;
req->async_data = ifd;
ifd->q = futex_q_init;
ifd->q.bitset = iof->futex_mask;
@ -309,6 +310,8 @@ done:
if (ret < 0)
req_set_fail(req);
io_req_set_res(req, ret, 0);
req->async_data = NULL;
req->flags &= ~REQ_F_ASYNC_DATA;
kfree(ifd);
return IOU_COMPLETE;
}

View File

@ -2119,6 +2119,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
req->file = NULL;
req->tctx = current->io_uring;
req->cancel_seq_set = false;
req->async_data = NULL;
if (unlikely(opcode >= IORING_OP_LAST)) {
req->opcode = 0;

View File

@ -97,6 +97,7 @@ config KEXEC_JUMP
config KEXEC_HANDOVER
bool "kexec handover"
depends on ARCH_SUPPORTS_KEXEC_HANDOVER && ARCH_SUPPORTS_KEXEC_FILE
depends on !DEFERRED_STRUCT_PAGE_INIT
select MEMBLOCK_KHO_SCRATCH
select KEXEC_FILE
select DEBUG_FS

View File

@ -144,14 +144,34 @@ static int __kho_preserve_order(struct kho_mem_track *track, unsigned long pfn,
unsigned int order)
{
struct kho_mem_phys_bits *bits;
struct kho_mem_phys *physxa;
struct kho_mem_phys *physxa, *new_physxa;
const unsigned long pfn_high = pfn >> order;
might_sleep();
physxa = xa_load_or_alloc(&track->orders, order, sizeof(*physxa));
if (IS_ERR(physxa))
return PTR_ERR(physxa);
physxa = xa_load(&track->orders, order);
if (!physxa) {
int err;
new_physxa = kzalloc(sizeof(*physxa), GFP_KERNEL);
if (!new_physxa)
return -ENOMEM;
xa_init(&new_physxa->phys_bits);
physxa = xa_cmpxchg(&track->orders, order, NULL, new_physxa,
GFP_KERNEL);
err = xa_err(physxa);
if (err || physxa) {
xa_destroy(&new_physxa->phys_bits);
kfree(new_physxa);
if (err)
return err;
} else {
physxa = new_physxa;
}
}
bits = xa_load_or_alloc(&physxa->phys_bits, pfn_high / PRESERVE_BITS,
sizeof(*bits));
@ -544,6 +564,7 @@ err_free_scratch_areas:
err_free_scratch_desc:
memblock_free(kho_scratch, kho_scratch_cnt * sizeof(*kho_scratch));
err_disable_kho:
pr_warn("Failed to reserve scratch area, disabling kexec handover\n");
kho_enable = false;
}

View File

@ -254,4 +254,10 @@ const struct movable_operations balloon_mops = {
.putback_page = balloon_page_putback,
};
static int __init balloon_init(void)
{
return set_movable_ops(&balloon_mops, PGTY_offline);
}
core_initcall(balloon_init);
#endif /* CONFIG_BALLOON_COMPACTION */

View File

@ -845,6 +845,18 @@ static struct damos_filter *damos_nth_filter(int n, struct damos *s)
return NULL;
}
static struct damos_filter *damos_nth_ops_filter(int n, struct damos *s)
{
struct damos_filter *filter;
int i = 0;
damos_for_each_ops_filter(filter, s) {
if (i++ == n)
return filter;
}
return NULL;
}
static void damos_commit_filter_arg(
struct damos_filter *dst, struct damos_filter *src)
{
@ -871,6 +883,7 @@ static void damos_commit_filter(
{
dst->type = src->type;
dst->matching = src->matching;
dst->allow = src->allow;
damos_commit_filter_arg(dst, src);
}
@ -908,7 +921,7 @@ static int damos_commit_ops_filters(struct damos *dst, struct damos *src)
int i = 0, j = 0;
damos_for_each_ops_filter_safe(dst_filter, next, dst) {
src_filter = damos_nth_filter(i++, src);
src_filter = damos_nth_ops_filter(i++, src);
if (src_filter)
damos_commit_filter(dst_filter, src_filter);
else

View File

@ -2158,8 +2158,8 @@ static void damon_sysfs_scheme_rm_dirs(struct damon_sysfs_scheme *scheme)
{
damon_sysfs_access_pattern_rm_dirs(scheme->access_pattern);
kobject_put(&scheme->access_pattern->kobj);
kobject_put(&scheme->dests->kobj);
damos_sysfs_dests_rm_dirs(scheme->dests);
kobject_put(&scheme->dests->kobj);
damon_sysfs_quotas_rm_dirs(scheme->quotas);
kobject_put(&scheme->quotas->kobj);
kobject_put(&scheme->watermarks->kobj);

View File

@ -990,29 +990,34 @@ static void __init destroy_args(struct pgtable_debug_args *args)
/* Free page table entries */
if (args->start_ptep) {
pmd_clear(args->pmdp);
pte_free(args->mm, args->start_ptep);
mm_dec_nr_ptes(args->mm);
}
if (args->start_pmdp) {
pud_clear(args->pudp);
pmd_free(args->mm, args->start_pmdp);
mm_dec_nr_pmds(args->mm);
}
if (args->start_pudp) {
p4d_clear(args->p4dp);
pud_free(args->mm, args->start_pudp);
mm_dec_nr_puds(args->mm);
}
if (args->start_p4dp)
if (args->start_p4dp) {
pgd_clear(args->pgdp);
p4d_free(args->mm, args->start_p4dp);
}
/* Free vma and mm struct */
if (args->vma)
vm_area_free(args->vma);
if (args->mm)
mmdrop(args->mm);
mmput(args->mm);
}
static struct page * __init

View File

@ -853,9 +853,17 @@ static int hwpoison_hugetlb_range(pte_t *ptep, unsigned long hmask,
#define hwpoison_hugetlb_range NULL
#endif
static int hwpoison_test_walk(unsigned long start, unsigned long end,
struct mm_walk *walk)
{
/* We also want to consider pages mapped into VM_PFNMAP. */
return 0;
}
static const struct mm_walk_ops hwpoison_walk_ops = {
.pmd_entry = hwpoison_pte_range,
.hugetlb_entry = hwpoison_hugetlb_range,
.test_walk = hwpoison_test_walk,
.walk_lock = PGWALK_RDLOCK,
};

View File

@ -43,8 +43,6 @@
#include <linux/sched/sysctl.h>
#include <linux/memory-tiers.h>
#include <linux/pagewalk.h>
#include <linux/balloon_compaction.h>
#include <linux/zsmalloc.h>
#include <asm/tlbflush.h>
@ -53,6 +51,33 @@
#include "internal.h"
#include "swap.h"
static const struct movable_operations *offline_movable_ops;
static const struct movable_operations *zsmalloc_movable_ops;
int set_movable_ops(const struct movable_operations *ops, enum pagetype type)
{
/*
* We only allow for selected types and don't handle concurrent
* registration attempts yet.
*/
switch (type) {
case PGTY_offline:
if (offline_movable_ops && ops)
return -EBUSY;
offline_movable_ops = ops;
break;
case PGTY_zsmalloc:
if (zsmalloc_movable_ops && ops)
return -EBUSY;
zsmalloc_movable_ops = ops;
break;
default:
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL_GPL(set_movable_ops);
static const struct movable_operations *page_movable_ops(struct page *page)
{
VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(page), page);
@ -62,15 +87,12 @@ static const struct movable_operations *page_movable_ops(struct page *page)
* it as movable, the page type must be sticky until the page gets freed
* back to the buddy.
*/
#ifdef CONFIG_BALLOON_COMPACTION
if (PageOffline(page))
/* Only balloon compaction sets PageOffline pages movable. */
return &balloon_mops;
#endif /* CONFIG_BALLOON_COMPACTION */
#if defined(CONFIG_ZSMALLOC) && defined(CONFIG_COMPACTION)
return offline_movable_ops;
if (PageZsmalloc(page))
return &zsmalloc_mops;
#endif /* defined(CONFIG_ZSMALLOC) && defined(CONFIG_COMPACTION) */
return zsmalloc_movable_ops;
return NULL;
}

View File

@ -323,6 +323,25 @@ static inline bool arch_supports_page_table_move(void)
}
#endif
static inline bool uffd_supports_page_table_move(struct pagetable_move_control *pmc)
{
/*
* If we are moving a VMA that has uffd-wp registered but with
* remap events disabled (new VMA will not be registered with uffd), we
* need to ensure that the uffd-wp state is cleared from all pgtables.
* This means recursing into lower page tables in move_page_tables().
*
* We might get called with VMAs reversed when recovering from a
* failed page table move. In that case, the
* "old"-but-actually-"originally new" VMA during recovery will not have
* a uffd context. Recursing into lower page tables during the original
* move but not during the recovery move will cause trouble, because we
* run into already-existing page tables. So check both VMAs.
*/
return !vma_has_uffd_without_event_remap(pmc->old) &&
!vma_has_uffd_without_event_remap(pmc->new);
}
#ifdef CONFIG_HAVE_MOVE_PMD
static bool move_normal_pmd(struct pagetable_move_control *pmc,
pmd_t *old_pmd, pmd_t *new_pmd)
@ -335,6 +354,8 @@ static bool move_normal_pmd(struct pagetable_move_control *pmc,
if (!arch_supports_page_table_move())
return false;
if (!uffd_supports_page_table_move(pmc))
return false;
/*
* The destination pmd shouldn't be established, free_pgtables()
* should have released it.
@ -361,15 +382,6 @@ static bool move_normal_pmd(struct pagetable_move_control *pmc,
if (WARN_ON_ONCE(!pmd_none(*new_pmd)))
return false;
/* If this pmd belongs to a uffd vma with remap events disabled, we need
* to ensure that the uffd-wp state is cleared from all pgtables. This
* means recursing into lower page tables in move_page_tables(), and we
* can reuse the existing code if we simply treat the entry as "not
* moved".
*/
if (vma_has_uffd_without_event_remap(vma))
return false;
/*
* We don't have to worry about the ordering of src and dst
* ptlocks because exclusive mmap_lock prevents deadlock.
@ -418,6 +430,8 @@ static bool move_normal_pud(struct pagetable_move_control *pmc,
if (!arch_supports_page_table_move())
return false;
if (!uffd_supports_page_table_move(pmc))
return false;
/*
* The destination pud shouldn't be established, free_pgtables()
* should have released it.
@ -425,15 +439,6 @@ static bool move_normal_pud(struct pagetable_move_control *pmc,
if (WARN_ON_ONCE(!pud_none(*new_pud)))
return false;
/* If this pud belongs to a uffd vma with remap events disabled, we need
* to ensure that the uffd-wp state is cleared from all pgtables. This
* means recursing into lower page tables in move_page_tables(), and we
* can reuse the existing code if we simply treat the entry as "not
* moved".
*/
if (vma_has_uffd_without_event_remap(vma))
return false;
/*
* We don't have to worry about the ordering of src and dst
* ptlocks because exclusive mmap_lock prevents deadlock.
@ -1620,7 +1625,7 @@ static void notify_uffd(struct vma_remap_struct *vrm, bool failed)
static bool vma_multi_allowed(struct vm_area_struct *vma)
{
struct file *file;
struct file *file = vma->vm_file;
/*
* We can't support moving multiple uffd VMAs as notify requires
@ -1633,15 +1638,17 @@ static bool vma_multi_allowed(struct vm_area_struct *vma)
* Custom get unmapped area might result in MREMAP_FIXED not
* being obeyed.
*/
file = vma->vm_file;
if (file && !vma_is_shmem(vma) && !is_vm_hugetlb_page(vma)) {
const struct file_operations *fop = file->f_op;
if (!file || !file->f_op->get_unmapped_area)
return true;
/* Known good. */
if (vma_is_shmem(vma))
return true;
if (is_vm_hugetlb_page(vma))
return true;
if (file->f_op->get_unmapped_area == thp_get_unmapped_area)
return true;
if (fop->get_unmapped_area)
return false;
}
return true;
return false;
}
static int check_prep_vma(struct vma_remap_struct *vrm)
@ -1818,10 +1825,11 @@ static unsigned long remap_move(struct vma_remap_struct *vrm)
unsigned long start = vrm->addr;
unsigned long end = vrm->addr + vrm->old_len;
unsigned long new_addr = vrm->new_addr;
bool allowed = true, seen_vma = false;
unsigned long target_addr = new_addr;
unsigned long res = -EFAULT;
unsigned long last_end;
bool seen_vma = false;
VMA_ITERATOR(vmi, current->mm, start);
/*
@ -1834,9 +1842,7 @@ static unsigned long remap_move(struct vma_remap_struct *vrm)
unsigned long addr = max(vma->vm_start, start);
unsigned long len = min(end, vma->vm_end) - addr;
unsigned long offset, res_vma;
if (!allowed)
return -EFAULT;
bool multi_allowed;
/* No gap permitted at the start of the range. */
if (!seen_vma && start < vma->vm_start)
@ -1865,9 +1871,15 @@ static unsigned long remap_move(struct vma_remap_struct *vrm)
vrm->new_addr = target_addr + offset;
vrm->old_len = vrm->new_len = len;
allowed = vma_multi_allowed(vma);
if (seen_vma && !allowed)
return -EFAULT;
multi_allowed = vma_multi_allowed(vma);
if (!multi_allowed) {
/* This is not the first VMA, abort immediately. */
if (seen_vma)
return -EFAULT;
/* This is the first, but there are more, abort. */
if (vma->vm_end < end)
return -EFAULT;
}
res_vma = check_prep_vma(vrm);
if (!res_vma)
@ -1876,7 +1888,7 @@ static unsigned long remap_move(struct vma_remap_struct *vrm)
return res_vma;
if (!seen_vma) {
VM_WARN_ON_ONCE(allowed && res_vma != new_addr);
VM_WARN_ON_ONCE(multi_allowed && res_vma != new_addr);
res = res_vma;
}

View File

@ -2246,8 +2246,15 @@ EXPORT_SYMBOL_GPL(zs_destroy_pool);
static int __init zs_init(void)
{
int rc __maybe_unused;
#ifdef CONFIG_ZPOOL
zpool_register_driver(&zs_zpool_driver);
#endif
#ifdef CONFIG_COMPACTION
rc = set_movable_ops(&zsmalloc_mops, PGTY_zsmalloc);
if (rc)
return rc;
#endif
zs_stat_init();
return 0;
@ -2257,6 +2264,9 @@ static void __exit zs_exit(void)
{
#ifdef CONFIG_ZPOOL
zpool_unregister_driver(&zs_zpool_driver);
#endif
#ifdef CONFIG_COMPACTION
set_movable_ops(NULL, PGTY_zsmalloc);
#endif
zs_stat_exit();
}

View File

@ -2139,14 +2139,14 @@ static int snd_utimer_create(struct snd_timer_uinfo *utimer_info,
goto err_take_id;
}
utimer->id = utimer_id;
utimer->name = kasprintf(GFP_KERNEL, "snd-utimer%d", utimer_id);
if (!utimer->name) {
err = -ENOMEM;
goto err_get_name;
}
utimer->id = utimer_id;
tid.dev_sclass = SNDRV_TIMER_SCLASS_APPLICATION;
tid.dev_class = SNDRV_TIMER_CLASS_GLOBAL;
tid.card = -1;

View File

@ -510,6 +510,15 @@ static void alc256_shutup(struct hda_codec *codec)
hp_pin = 0x21;
alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x1); /* Low power */
/* 3k pull low control for Headset jack. */
/* NOTE: call this before clearing the pin, otherwise codec stalls */
/* If disable 3k pulldown control for alc257, the Mic detection will not work correctly
* when booting with headset plugged. So skip setting it for the codec alc257
*/
if (spec->en_3kpull_low)
alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
hp_pin_sense = snd_hda_jack_detect(codec, hp_pin);
if (hp_pin_sense) {
@ -520,14 +529,6 @@ static void alc256_shutup(struct hda_codec *codec)
msleep(75);
/* 3k pull low control for Headset jack. */
/* NOTE: call this before clearing the pin, otherwise codec stalls */
/* If disable 3k pulldown control for alc257, the Mic detection will not work correctly
* when booting with headset plugged. So skip setting it for the codec alc257
*/
if (spec->en_3kpull_low)
alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
if (!spec->no_shutup_pins)
snd_hda_codec_write(codec, hp_pin, 0,
AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
@ -3579,6 +3580,7 @@ enum {
ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE,
ALC294_FIXUP_ASUS_MIC,
ALC294_FIXUP_ASUS_HEADSET_MIC,
ALC294_FIXUP_ASUS_I2C_HEADSET_MIC,
ALC294_FIXUP_ASUS_SPK,
ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
@ -4889,6 +4891,15 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC269_FIXUP_HEADSET_MIC
},
[ALC294_FIXUP_ASUS_I2C_HEADSET_MIC] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
{ 0x19, 0x03a19020 }, /* use as headset mic */
{ }
},
.chained = true,
.chain_id = ALC287_FIXUP_CS35L41_I2C_2
},
[ALC294_FIXUP_ASUS_SPK] = {
.type = HDA_FIXUP_VERBS,
.v.verbs = (const struct hda_verb[]) {
@ -6368,6 +6379,8 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
SND_PCI_QUIRK(0x103c, 0x8519, "HP Spectre x360 15-df0xxx", ALC285_FIXUP_HP_SPECTRE_X360),
SND_PCI_QUIRK(0x103c, 0x8537, "HP ProBook 440 G6", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
SND_PCI_QUIRK(0x103c, 0x8548, "HP EliteBook x360 830 G6", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x854a, "HP EliteBook 830 G6", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x85c6, "HP Pavilion x360 Convertible 14-dy1xxx", ALC295_FIXUP_HP_MUTE_LED_COEFBIT11),
SND_PCI_QUIRK(0x103c, 0x85de, "HP Envy x360 13-ar0xxx", ALC285_FIXUP_HP_ENVY_X360),
SND_PCI_QUIRK(0x103c, 0x860f, "HP ZBook 15 G6", ALC285_FIXUP_HP_GPIO_AMP_INIT),
@ -6728,7 +6741,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1b13, "ASUS U41SV/GA403U", ALC285_FIXUP_ASUS_GA403U_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x1b93, "ASUS G614JVR/JIR", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x1c03, "ASUS UM3406HA", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x1043, 0x1c03, "ASUS UM3406HA", ALC294_FIXUP_ASUS_I2C_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x1043, 0x1c33, "ASUS UX5304MA", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x1c43, "ASUS UX8406MA", ALC245_FIXUP_CS35L41_SPI_2),

View File

@ -267,7 +267,7 @@ static const struct snd_kcontrol_new tas2770_snd_controls[] = {
static const struct snd_kcontrol_new tas2781_snd_controls[] = {
ACARD_SINGLE_RANGE_EXT_TLV("Speaker Analog Volume", TAS2781_AMP_LEVEL,
1, 0, 20, 0, tas2781_amp_getvol,
tas2781_amp_putvol, amp_vol_tlv),
tas2781_amp_putvol, tas2781_amp_tlv),
ACARD_SINGLE_BOOL_EXT("Speaker Force Firmware Load", 0,
tas2781_force_fwload_get, tas2781_force_fwload_put),
};
@ -305,7 +305,7 @@ static int tas2563_save_calibration(struct tas2781_hda *h)
efi_char16_t efi_name[TAS2563_CAL_VAR_NAME_MAX];
unsigned long max_size = TAS2563_CAL_DATA_SIZE;
unsigned char var8[TAS2563_CAL_VAR_NAME_MAX];
struct tasdevice_priv *p = h->hda_priv;
struct tasdevice_priv *p = h->priv;
struct calidata *cd = &p->cali_data;
struct cali_reg *r = &cd->cali_reg_array;
unsigned int offset = 0;

View File

@ -494,9 +494,11 @@ static int tas2781_force_fwload_put(struct snd_kcontrol *kcontrol,
static struct snd_kcontrol_new tas2781_snd_ctls[] = {
ACARD_SINGLE_RANGE_EXT_TLV(NULL, TAS2781_AMP_LEVEL, 1, 0, 20, 0,
tas2781_amp_getvol, tas2781_amp_putvol, amp_vol_tlv),
tas2781_amp_getvol, tas2781_amp_putvol,
tas2781_amp_tlv),
ACARD_SINGLE_RANGE_EXT_TLV(NULL, TAS2781_DVC_LVL, 0, 0, 200, 1,
tas2781_digital_getvol, tas2781_digital_putvol, dvc_tlv),
tas2781_digital_getvol, tas2781_digital_putvol,
tas2781_dvc_tlv),
ACARD_SINGLE_BOOL_EXT(NULL, 0, tas2781_force_fwload_get,
tas2781_force_fwload_put),
};

View File

@ -393,74 +393,6 @@ static int cs35l56_sdw_update_status(struct sdw_slave *peripheral,
return 0;
}
static int cs35l63_sdw_kick_divider(struct cs35l56_private *cs35l56,
struct sdw_slave *peripheral)
{
unsigned int curr_scale_reg, next_scale_reg;
int curr_scale, next_scale, ret;
if (!cs35l56->base.init_done)
return 0;
if (peripheral->bus->params.curr_bank) {
curr_scale_reg = SDW_SCP_BUSCLOCK_SCALE_B1;
next_scale_reg = SDW_SCP_BUSCLOCK_SCALE_B0;
} else {
curr_scale_reg = SDW_SCP_BUSCLOCK_SCALE_B0;
next_scale_reg = SDW_SCP_BUSCLOCK_SCALE_B1;
}
/*
* Current clock scale value must be different to new value.
* Modify current to guarantee this. If next still has the dummy
* value we wrote when it was current, the core code has not set
* a new scale so restore its original good value
*/
curr_scale = sdw_read_no_pm(peripheral, curr_scale_reg);
if (curr_scale < 0) {
dev_err(cs35l56->base.dev, "Failed to read current clock scale: %d\n", curr_scale);
return curr_scale;
}
next_scale = sdw_read_no_pm(peripheral, next_scale_reg);
if (next_scale < 0) {
dev_err(cs35l56->base.dev, "Failed to read next clock scale: %d\n", next_scale);
return next_scale;
}
if (next_scale == CS35L56_SDW_INVALID_BUS_SCALE) {
next_scale = cs35l56->old_sdw_clock_scale;
ret = sdw_write_no_pm(peripheral, next_scale_reg, next_scale);
if (ret < 0) {
dev_err(cs35l56->base.dev, "Failed to modify current clock scale: %d\n",
ret);
return ret;
}
}
cs35l56->old_sdw_clock_scale = curr_scale;
ret = sdw_write_no_pm(peripheral, curr_scale_reg, CS35L56_SDW_INVALID_BUS_SCALE);
if (ret < 0) {
dev_err(cs35l56->base.dev, "Failed to modify current clock scale: %d\n", ret);
return ret;
}
dev_dbg(cs35l56->base.dev, "Next bus scale: %#x\n", next_scale);
return 0;
}
static int cs35l56_sdw_bus_config(struct sdw_slave *peripheral,
struct sdw_bus_params *params)
{
struct cs35l56_private *cs35l56 = dev_get_drvdata(&peripheral->dev);
if ((cs35l56->base.type == 0x63) && (cs35l56->base.rev < 0xa1))
return cs35l63_sdw_kick_divider(cs35l56, peripheral);
return 0;
}
static int __maybe_unused cs35l56_sdw_clk_stop(struct sdw_slave *peripheral,
enum sdw_clk_stop_mode mode,
enum sdw_clk_stop_type type)
@ -476,7 +408,6 @@ static const struct sdw_slave_ops cs35l56_sdw_ops = {
.read_prop = cs35l56_sdw_read_prop,
.interrupt_callback = cs35l56_sdw_interrupt,
.update_status = cs35l56_sdw_update_status,
.bus_config = cs35l56_sdw_bus_config,
#ifdef DEBUG
.clk_stop = cs35l56_sdw_clk_stop,
#endif

View File

@ -838,6 +838,15 @@ const struct cirrus_amp_cal_controls cs35l56_calibration_controls = {
};
EXPORT_SYMBOL_NS_GPL(cs35l56_calibration_controls, "SND_SOC_CS35L56_SHARED");
static const struct cirrus_amp_cal_controls cs35l63_calibration_controls = {
.alg_id = 0xbf210,
.mem_region = WMFW_ADSP2_YM,
.ambient = "CAL_AMBIENT",
.calr = "CAL_R",
.status = "CAL_STATUS",
.checksum = "CAL_CHECKSUM",
};
int cs35l56_get_calibration(struct cs35l56_base *cs35l56_base)
{
u64 silicon_uid = 0;
@ -912,19 +921,31 @@ EXPORT_SYMBOL_NS_GPL(cs35l56_read_prot_status, "SND_SOC_CS35L56_SHARED");
void cs35l56_log_tuning(struct cs35l56_base *cs35l56_base, struct cs_dsp *cs_dsp)
{
__be32 pid, sid, tid;
unsigned int alg_id;
int ret;
switch (cs35l56_base->type) {
case 0x54:
case 0x56:
case 0x57:
alg_id = 0x9f212;
break;
default:
alg_id = 0xbf212;
break;
}
scoped_guard(mutex, &cs_dsp->pwr_lock) {
ret = cs_dsp_coeff_read_ctrl(cs_dsp_get_ctl(cs_dsp, "AS_PRJCT_ID",
WMFW_ADSP2_XM, 0x9f212),
WMFW_ADSP2_XM, alg_id),
0, &pid, sizeof(pid));
if (!ret)
ret = cs_dsp_coeff_read_ctrl(cs_dsp_get_ctl(cs_dsp, "AS_CHNNL_ID",
WMFW_ADSP2_XM, 0x9f212),
WMFW_ADSP2_XM, alg_id),
0, &sid, sizeof(sid));
if (!ret)
ret = cs_dsp_coeff_read_ctrl(cs_dsp_get_ctl(cs_dsp, "AS_SNPSHT_ID",
WMFW_ADSP2_XM, 0x9f212),
WMFW_ADSP2_XM, alg_id),
0, &tid, sizeof(tid));
}
@ -974,8 +995,10 @@ int cs35l56_hw_init(struct cs35l56_base *cs35l56_base)
case 0x35A54:
case 0x35A56:
case 0x35A57:
cs35l56_base->calibration_controls = &cs35l56_calibration_controls;
break;
case 0x35A630:
cs35l56_base->calibration_controls = &cs35l63_calibration_controls;
devid = devid >> 4;
break;
default:

View File

@ -695,7 +695,7 @@ static int cs35l56_write_cal(struct cs35l56_private *cs35l56)
return ret;
ret = cs_amp_write_cal_coeffs(&cs35l56->dsp.cs_dsp,
&cs35l56_calibration_controls,
cs35l56->base.calibration_controls,
&cs35l56->base.cal_data);
wm_adsp_stop(&cs35l56->dsp);

View File

@ -20,8 +20,6 @@
#define CS35L56_SDW_GEN_INT_MASK_1 0xc1
#define CS35L56_SDW_INT_MASK_CODEC_IRQ BIT(0)
#define CS35L56_SDW_INVALID_BUS_SCALE 0xf
#define CS35L56_RX_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE)
#define CS35L56_TX_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE \
| SNDRV_PCM_FMTBIT_S32_LE)
@ -52,7 +50,6 @@ struct cs35l56_private {
u8 asp_slot_count;
bool tdm_mode;
bool sysclk_set;
u8 old_sdw_clock_scale;
u8 sdw_link_num;
u8 sdw_unique_id;
};

View File

@ -636,7 +636,7 @@ static int es8389_set_bias_level(struct snd_soc_component *component,
regmap_write(es8389->regmap, ES8389_ANA_CTL1, 0x59);
regmap_write(es8389->regmap, ES8389_ADC_EN, 0x00);
regmap_write(es8389->regmap, ES8389_CLK_OFF1, 0x00);
regmap_write(es8389->regmap, ES8389_RESET, 0x7E);
regmap_write(es8389->regmap, ES8389_RESET, 0x3E);
regmap_update_bits(es8389->regmap, ES8389_DAC_INV, 0x80, 0x80);
usleep_range(8000, 8500);
regmap_update_bits(es8389->regmap, ES8389_DAC_INV, 0x80, 0x00);

View File

@ -910,10 +910,10 @@ static const struct snd_kcontrol_new tasdevice_cali_controls[] = {
static const struct snd_kcontrol_new tas2781_snd_controls[] = {
SOC_SINGLE_RANGE_EXT_TLV("Speaker Analog Volume", TAS2781_AMP_LEVEL,
1, 0, 20, 0, tas2781_amp_getvol,
tas2781_amp_putvol, amp_vol_tlv),
tas2781_amp_putvol, tas2781_amp_tlv),
SOC_SINGLE_RANGE_EXT_TLV("Speaker Digital Volume", TAS2781_DVC_LVL,
0, 0, 200, 1, tas2781_digital_getvol,
tas2781_digital_putvol, dvc_tlv),
tas2781_digital_putvol, tas2781_dvc_tlv),
};
static const struct snd_kcontrol_new tas2781_cali_controls[] = {

View File

@ -349,7 +349,7 @@ snd_pcm_chmap_elem *convert_chmap_v3(struct uac3_cluster_header_descriptor
u16 cs_len;
u8 cs_type;
if (len < sizeof(*p))
if (len < sizeof(*cs_desc))
break;
cs_len = le16_to_cpu(cs_desc->wLength);
if (len < cs_len)

View File

@ -285,7 +285,7 @@ static const struct usb_desc_validator audio_validators[] = {
/* UAC_VERSION_3, UAC3_EXTENDED_TERMINAL: not implemented yet */
FUNC(UAC_VERSION_3, UAC3_MIXER_UNIT, validate_mixer_unit),
FUNC(UAC_VERSION_3, UAC3_SELECTOR_UNIT, validate_selector_unit),
FUNC(UAC_VERSION_3, UAC_FEATURE_UNIT, validate_uac3_feature_unit),
FUNC(UAC_VERSION_3, UAC3_FEATURE_UNIT, validate_uac3_feature_unit),
/* UAC_VERSION_3, UAC3_EFFECT_UNIT: not implemented yet */
FUNC(UAC_VERSION_3, UAC3_PROCESSING_UNIT, validate_processing_unit),
FUNC(UAC_VERSION_3, UAC3_EXTENSION_UNIT, validate_processing_unit),

View File

@ -0,0 +1,28 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_ARGS_H
#define _LINUX_ARGS_H
/*
* How do these macros work?
*
* In __COUNT_ARGS() _0 to _12 are just placeholders from the start
* in order to make sure _n is positioned over the correct number
* from 12 to 0 (depending on X, which is a variadic argument list).
* They serve no purpose other than occupying a position. Since each
* macro parameter must have a distinct identifier, those identifiers
* are as good as any.
*
* In COUNT_ARGS() we use actual integers, so __COUNT_ARGS() returns
* that as _n.
*/
/* This counts to 15. Any more, it will return 16th argument. */
#define __COUNT_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _n, X...) _n
#define COUNT_ARGS(X...) __COUNT_ARGS(, ##X, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
/* Concatenate two parameters, but allow them to be expanded beforehand. */
#define __CONCAT(a, b) a ## b
#define CONCATENATE(a, b) __CONCAT(a, b)
#endif /* _LINUX_ARGS_H */

View File

@ -4,6 +4,7 @@
TEST_GEN_FILES += access_memory access_memory_even
TEST_FILES = _damon_sysfs.py
TEST_FILES += drgn_dump_damon_status.py
# functionality tests
TEST_PROGS += sysfs.sh

View File

@ -5,10 +5,14 @@
#define _GNU_SOURCE
#include <errno.h>
#include <fcntl.h>
#include <linux/userfaultfd.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <syscall.h>
#include <time.h>
#include <stdbool.h>
@ -168,6 +172,7 @@ static bool is_range_mapped(FILE *maps_fp, unsigned long start,
if (first_val <= start && second_val >= end) {
success = true;
fflush(maps_fp);
break;
}
}
@ -175,6 +180,15 @@ static bool is_range_mapped(FILE *maps_fp, unsigned long start,
return success;
}
/* Check if [ptr, ptr + size) mapped in /proc/self/maps. */
static bool is_ptr_mapped(FILE *maps_fp, void *ptr, unsigned long size)
{
unsigned long start = (unsigned long)ptr;
unsigned long end = start + size;
return is_range_mapped(maps_fp, start, end);
}
/*
* Returns the start address of the mapping on success, else returns
* NULL on failure.
@ -733,6 +747,249 @@ out:
dont_unmap ? " [dontunnmap]" : "");
}
#ifdef __NR_userfaultfd
static void mremap_move_multi_invalid_vmas(FILE *maps_fp,
unsigned long page_size)
{
char *test_name = "mremap move multiple invalid vmas";
const size_t size = 10 * page_size;
bool success = true;
char *ptr, *tgt_ptr;
int uffd, err, i;
void *res;
struct uffdio_api api = {
.api = UFFD_API,
.features = UFFD_EVENT_PAGEFAULT,
};
uffd = syscall(__NR_userfaultfd, O_NONBLOCK);
if (uffd == -1) {
err = errno;
perror("userfaultfd");
if (err == EPERM) {
ksft_test_result_skip("%s - missing uffd", test_name);
return;
}
success = false;
goto out;
}
if (ioctl(uffd, UFFDIO_API, &api)) {
perror("ioctl UFFDIO_API");
success = false;
goto out_close_uffd;
}
ptr = mmap(NULL, size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON, -1, 0);
if (ptr == MAP_FAILED) {
perror("mmap");
success = false;
goto out_close_uffd;
}
tgt_ptr = mmap(NULL, size, PROT_NONE, MAP_PRIVATE | MAP_ANON, -1, 0);
if (tgt_ptr == MAP_FAILED) {
perror("mmap");
success = false;
goto out_close_uffd;
}
if (munmap(tgt_ptr, size)) {
perror("munmap");
success = false;
goto out_unmap;
}
/*
* Unmap so we end up with:
*
* 0 2 4 6 8 10 offset in buffer
* |*| |*| |*| |*| |*|
* |*| |*| |*| |*| |*|
*
* Additionally, register each with UFFD.
*/
for (i = 0; i < 10; i += 2) {
void *unmap_ptr = &ptr[(i + 1) * page_size];
unsigned long start = (unsigned long)&ptr[i * page_size];
struct uffdio_register reg = {
.range = {
.start = start,
.len = page_size,
},
.mode = UFFDIO_REGISTER_MODE_MISSING,
};
if (ioctl(uffd, UFFDIO_REGISTER, &reg) == -1) {
perror("ioctl UFFDIO_REGISTER");
success = false;
goto out_unmap;
}
if (munmap(unmap_ptr, page_size)) {
perror("munmap");
success = false;
goto out_unmap;
}
}
/*
* Now try to move the entire range which is invalid for multi VMA move.
*
* This will fail, and no VMA should be moved, as we check this ahead of
* time.
*/
res = mremap(ptr, size, size, MREMAP_MAYMOVE | MREMAP_FIXED, tgt_ptr);
err = errno;
if (res != MAP_FAILED) {
fprintf(stderr, "mremap() succeeded for multi VMA uffd armed\n");
success = false;
goto out_unmap;
}
if (err != EFAULT) {
errno = err;
perror("mrmeap() unexpected error");
success = false;
goto out_unmap;
}
if (is_ptr_mapped(maps_fp, tgt_ptr, page_size)) {
fprintf(stderr,
"Invalid uffd-armed VMA at start of multi range moved\n");
success = false;
goto out_unmap;
}
/*
* Now try to move a single VMA, this should succeed as not multi VMA
* move.
*/
res = mremap(ptr, page_size, page_size,
MREMAP_MAYMOVE | MREMAP_FIXED, tgt_ptr);
if (res == MAP_FAILED) {
perror("mremap single invalid-multi VMA");
success = false;
goto out_unmap;
}
/*
* Unmap the VMA, and remap a non-uffd registered (therefore, multi VMA
* move valid) VMA at the start of ptr range.
*/
if (munmap(tgt_ptr, page_size)) {
perror("munmap");
success = false;
goto out_unmap;
}
res = mmap(ptr, page_size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
if (res == MAP_FAILED) {
perror("mmap");
success = false;
goto out_unmap;
}
/*
* Now try to move the entire range, we should succeed in moving the
* first VMA, but no others, and report a failure.
*/
res = mremap(ptr, size, size, MREMAP_MAYMOVE | MREMAP_FIXED, tgt_ptr);
err = errno;
if (res != MAP_FAILED) {
fprintf(stderr, "mremap() succeeded for multi VMA uffd armed\n");
success = false;
goto out_unmap;
}
if (err != EFAULT) {
errno = err;
perror("mrmeap() unexpected error");
success = false;
goto out_unmap;
}
if (!is_ptr_mapped(maps_fp, tgt_ptr, page_size)) {
fprintf(stderr, "Valid VMA not moved\n");
success = false;
goto out_unmap;
}
/*
* Unmap the VMA, and map valid VMA at start of ptr range, and replace
* all existing multi-move invalid VMAs, except the last, with valid
* multi-move VMAs.
*/
if (munmap(tgt_ptr, page_size)) {
perror("munmap");
success = false;
goto out_unmap;
}
if (munmap(ptr, size - 2 * page_size)) {
perror("munmap");
success = false;
goto out_unmap;
}
for (i = 0; i < 8; i += 2) {
res = mmap(&ptr[i * page_size], page_size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
if (res == MAP_FAILED) {
perror("mmap");
success = false;
goto out_unmap;
}
}
/*
* Now try to move the entire range, we should succeed in moving all but
* the last VMA, and report a failure.
*/
res = mremap(ptr, size, size, MREMAP_MAYMOVE | MREMAP_FIXED, tgt_ptr);
err = errno;
if (res != MAP_FAILED) {
fprintf(stderr, "mremap() succeeded for multi VMA uffd armed\n");
success = false;
goto out_unmap;
}
if (err != EFAULT) {
errno = err;
perror("mrmeap() unexpected error");
success = false;
goto out_unmap;
}
for (i = 0; i < 10; i += 2) {
bool is_mapped = is_ptr_mapped(maps_fp,
&tgt_ptr[i * page_size], page_size);
if (i < 8 && !is_mapped) {
fprintf(stderr, "Valid VMA not moved at %d\n", i);
success = false;
goto out_unmap;
} else if (i == 8 && is_mapped) {
fprintf(stderr, "Invalid VMA moved at %d\n", i);
success = false;
goto out_unmap;
}
}
out_unmap:
if (munmap(tgt_ptr, size))
perror("munmap tgt");
if (munmap(ptr, size))
perror("munmap src");
out_close_uffd:
close(uffd);
out:
if (success)
ksft_test_result_pass("%s\n", test_name);
else
ksft_test_result_fail("%s\n", test_name);
}
#else
static void mremap_move_multi_invalid_vmas(FILE *maps_fp, unsigned long page_size)
{
char *test_name = "mremap move multiple invalid vmas";
ksft_test_result_skip("%s - missing uffd", test_name);
}
#endif /* __NR_userfaultfd */
/* Returns the time taken for the remap on success else returns -1. */
static long long remap_region(struct config c, unsigned int threshold_mb,
char *rand_addr)
@ -1074,7 +1331,7 @@ int main(int argc, char **argv)
char *rand_addr;
size_t rand_size;
int num_expand_tests = 2;
int num_misc_tests = 8;
int num_misc_tests = 9;
struct test test_cases[MAX_TEST] = {};
struct test perf_test_cases[MAX_PERF_TEST];
int page_size;
@ -1197,8 +1454,6 @@ int main(int argc, char **argv)
mremap_expand_merge(maps_fp, page_size);
mremap_expand_merge_offset(maps_fp, page_size);
fclose(maps_fp);
mremap_move_within_range(pattern_seed, rand_addr);
mremap_move_1mb_from_start(pattern_seed, rand_addr);
mremap_shrink_multiple_vmas(page_size, /* inplace= */true);
@ -1207,6 +1462,9 @@ int main(int argc, char **argv)
mremap_move_multiple_vmas(pattern_seed, page_size, /* dontunmap= */ true);
mremap_move_multiple_vmas_split(pattern_seed, page_size, /* dontunmap= */ false);
mremap_move_multiple_vmas_split(pattern_seed, page_size, /* dontunmap= */ true);
mremap_move_multi_invalid_vmas(maps_fp, page_size);
fclose(maps_fp);
if (run_perf_tests) {
ksft_print_msg("\n%s\n",

View File

@ -1400,7 +1400,7 @@ static int cmd_dev_get_features(void)
if (!((1ULL << i) & features))
continue;
if (i < sizeof(feat_map) / sizeof(feat_map[0]))
if (i < ARRAY_SIZE(feat_map))
feat = feat_map[i];
else
feat = "unknown";
@ -1477,7 +1477,7 @@ static void __cmd_create_help(char *exe, bool recovery)
printf("\tdefault: nr_queues=2(max 32), depth=128(max 1024), dev_id=-1(auto allocation)\n");
printf("\tdefault: nthreads=nr_queues");
for (i = 0; i < sizeof(tgt_ops_list) / sizeof(tgt_ops_list[0]); i++) {
for (i = 0; i < ARRAY_SIZE(tgt_ops_list); i++) {
const struct ublk_tgt_ops *ops = tgt_ops_list[i];
if (ops->usage)

View File

@ -1 +1,5 @@
/* Avoid duplicate definitions due to system headers. */
#ifdef __CONCAT
#undef __CONCAT
#endif
#include "../../../../include/linux/idr.h"