2
0
mirror of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2025-09-04 20:19:47 +08:00

slab updates for 6.16

-----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEe7vIQRWZI0iWSE3xu+CwddJFiJoFAmhAB1wACgkQu+CwddJF
 iJrbwAf/Z5u9E7FO+dsNczPwI4/eZYdbfr4KYpLjnIBt0tbXp9HMjkkx8aJNiWoA
 4x4kcIxkATnYP669GTeELU9sskflrwjAmpAZbU4wWPZ89aqmPwQCk4ZbWX7Y+O+/
 OITeQiUVOcpWekZ8ke+JGWa9jjxWnWV5bn0ucBdxBmm2NvdVHEkwNUZmiFq85tEJ
 1M37TaoywC0d//ww+LGEBfpIYWoev7n3ITm8YeGXJT6+34lfBOBQ8F7C65olb5qq
 oGaGQH8VJ9XBRPOmQpUej/s76ovqc6wQfGs0sLcVMhslz8Vwcxtw62CuPtEw5JAK
 FdZueTH4kEhD6kspYNU0oq+UzUlc+g==
 =fGZh
 -----END PGP SIGNATURE-----

Merge tag 'slab-for-6.16' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab

Pull slab updates from Vlastimil Babka:

 - Make kvmalloc() more suitable for callers that need it to succeed,
   but without unnecessary overhead by reclaim and compaction to get a
   physically contiguous allocation.

   Instead fall back to vmalloc() more easily by default, unless
   instructed by __GFP_RETRY_MAYFAIL to prefer kmalloc() harder. This
   should allow the removal of a xfs-specific workaround (Michal Hocko)

 - Remove potentially excessive warnings due to memory pressure when
   allocating structures for per-object allocation profiling metadata
   (Usama Arif)

* tag 'slab-for-6.16' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab:
  mm: slub: only warn once when allocating slab obj extensions fails
  mm: kvmalloc: make kmalloc fast path real fast path
This commit is contained in:
Linus Torvalds 2025-06-04 08:59:59 -07:00
commit 1af80d00e1

View File

@ -2084,10 +2084,11 @@ prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
slab = virt_to_slab(p);
if (!slab_obj_exts(slab) &&
WARN(alloc_slab_obj_exts(slab, s, flags, false),
"%s, %s: Failed to create slab extension vector!\n",
__func__, s->name))
alloc_slab_obj_exts(slab, s, flags, false)) {
pr_warn_once("%s, %s: Failed to create slab extension vector!\n",
__func__, s->name);
return NULL;
}
return slab_obj_exts(slab) + obj_to_index(s, slab, p);
}
@ -4968,14 +4969,16 @@ static gfp_t kmalloc_gfp_adjust(gfp_t flags, size_t size)
* We want to attempt a large physically contiguous block first because
* it is less likely to fragment multiple larger blocks and therefore
* contribute to a long term fragmentation less than vmalloc fallback.
* However make sure that larger requests are not too disruptive - no
* OOM killer and no allocation failure warnings as we have a fallback.
* However make sure that larger requests are not too disruptive - i.e.
* do not direct reclaim unless physically continuous memory is preferred
* (__GFP_RETRY_MAYFAIL mode). We still kick in kswapd/kcompactd to
* start working in the background
*/
if (size > PAGE_SIZE) {
flags |= __GFP_NOWARN;
if (!(flags & __GFP_RETRY_MAYFAIL))
flags |= __GFP_NORETRY;
flags &= ~__GFP_DIRECT_RECLAIM;
/* nofail semantic is implemented by the vmalloc fallback */
flags &= ~__GFP_NOFAIL;