mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-03-22 07:27:12 +08:00
kho: use unsigned long for nr_pages
Patch series "kho: clean up page initialization logic", v2. This series simplifies the page initialization logic in kho_restore_page(). It was originally only a single patch [0], but on Pasha's suggestion, I added another patch to use unsigned long for nr_pages. Technically speaking, the patches aren't related and can be applied independently, but bundling them together since patch 2 relies on 1 and it is easier to manage them this way. This patch (of 2): With 4k pages, a 32-bit nr_pages can span up to 16 TiB. While it is a lot, there exist systems with terabytes of RAM. gup is also moving to using long for nr_pages. Use unsigned long and make KHO future-proof. Link: https://lkml.kernel.org/r/20260116112217.915803-1-pratyush@kernel.org Link: https://lkml.kernel.org/r/20260116112217.915803-2-pratyush@kernel.org Signed-off-by: Pratyush Yadav <pratyush@kernel.org> Suggested-by: Pasha Tatashin <pasha.tatashin@soleen.com> Reviewed-by: Mike Rapoport (Microsoft) <rppt@kernel.org> Reviewed-by: Pasha Tatashin <pasha.tatashin@soleen.com> Cc: Alexander Graf <graf@amazon.com> Cc: Suren Baghdasaryan <surenb@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
931d5c36c7
commit
840fe43d37
@@ -22,15 +22,15 @@ bool is_kho_boot(void);
|
||||
|
||||
int kho_preserve_folio(struct folio *folio);
|
||||
void kho_unpreserve_folio(struct folio *folio);
|
||||
int kho_preserve_pages(struct page *page, unsigned int nr_pages);
|
||||
void kho_unpreserve_pages(struct page *page, unsigned int nr_pages);
|
||||
int kho_preserve_pages(struct page *page, unsigned long nr_pages);
|
||||
void kho_unpreserve_pages(struct page *page, unsigned long nr_pages);
|
||||
int kho_preserve_vmalloc(void *ptr, struct kho_vmalloc *preservation);
|
||||
void kho_unpreserve_vmalloc(struct kho_vmalloc *preservation);
|
||||
void *kho_alloc_preserve(size_t size);
|
||||
void kho_unpreserve_free(void *mem);
|
||||
void kho_restore_free(void *mem);
|
||||
struct folio *kho_restore_folio(phys_addr_t phys);
|
||||
struct page *kho_restore_pages(phys_addr_t phys, unsigned int nr_pages);
|
||||
struct page *kho_restore_pages(phys_addr_t phys, unsigned long nr_pages);
|
||||
void *kho_restore_vmalloc(const struct kho_vmalloc *preservation);
|
||||
int kho_add_subtree(const char *name, void *fdt);
|
||||
void kho_remove_subtree(void *fdt);
|
||||
|
||||
@@ -219,7 +219,8 @@ static int __kho_preserve_order(struct kho_mem_track *track, unsigned long pfn,
|
||||
static struct page *kho_restore_page(phys_addr_t phys, bool is_folio)
|
||||
{
|
||||
struct page *page = pfn_to_online_page(PHYS_PFN(phys));
|
||||
unsigned int nr_pages, ref_cnt;
|
||||
unsigned long nr_pages;
|
||||
unsigned int ref_cnt;
|
||||
union kho_page_info info;
|
||||
|
||||
if (!page)
|
||||
@@ -246,7 +247,7 @@ static struct page *kho_restore_page(phys_addr_t phys, bool is_folio)
|
||||
* count of 1
|
||||
*/
|
||||
ref_cnt = is_folio ? 0 : 1;
|
||||
for (unsigned int i = 1; i < nr_pages; i++)
|
||||
for (unsigned long i = 1; i < nr_pages; i++)
|
||||
set_page_count(page + i, ref_cnt);
|
||||
|
||||
if (is_folio && info.order)
|
||||
@@ -288,7 +289,7 @@ EXPORT_SYMBOL_GPL(kho_restore_folio);
|
||||
*
|
||||
* Return: 0 on success, error code on failure
|
||||
*/
|
||||
struct page *kho_restore_pages(phys_addr_t phys, unsigned int nr_pages)
|
||||
struct page *kho_restore_pages(phys_addr_t phys, unsigned long nr_pages)
|
||||
{
|
||||
const unsigned long start_pfn = PHYS_PFN(phys);
|
||||
const unsigned long end_pfn = start_pfn + nr_pages;
|
||||
@@ -837,7 +838,7 @@ EXPORT_SYMBOL_GPL(kho_unpreserve_folio);
|
||||
*
|
||||
* Return: 0 on success, error code on failure
|
||||
*/
|
||||
int kho_preserve_pages(struct page *page, unsigned int nr_pages)
|
||||
int kho_preserve_pages(struct page *page, unsigned long nr_pages)
|
||||
{
|
||||
struct kho_mem_track *track = &kho_out.track;
|
||||
const unsigned long start_pfn = page_to_pfn(page);
|
||||
@@ -881,7 +882,7 @@ EXPORT_SYMBOL_GPL(kho_preserve_pages);
|
||||
* kho_preserve_pages() call. Unpreserving arbitrary sub-ranges of larger
|
||||
* preserved blocks is not supported.
|
||||
*/
|
||||
void kho_unpreserve_pages(struct page *page, unsigned int nr_pages)
|
||||
void kho_unpreserve_pages(struct page *page, unsigned long nr_pages)
|
||||
{
|
||||
struct kho_mem_track *track = &kho_out.track;
|
||||
const unsigned long start_pfn = page_to_pfn(page);
|
||||
|
||||
Reference in New Issue
Block a user