summaryrefslogtreecommitdiff
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c95
1 files changed, 45 insertions, 50 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 77d2a3811bdd..4d986e2d70b9 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -594,8 +594,6 @@ static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
static int page_is_consistent(struct zone *zone, struct page *page)
{
- if (!pfn_valid_within(page_to_pfn(page)))
- return 0;
if (zone != page_zone(page))
return 0;
@@ -840,21 +838,24 @@ void init_mem_debugging_and_hardening(void)
}
#endif
- if (_init_on_alloc_enabled_early) {
- if (page_poisoning_requested)
- pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
- "will take precedence over init_on_alloc\n");
- else
- static_branch_enable(&init_on_alloc);
- }
- if (_init_on_free_enabled_early) {
- if (page_poisoning_requested)
- pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
- "will take precedence over init_on_free\n");
- else
- static_branch_enable(&init_on_free);
+ if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) &&
+ page_poisoning_requested) {
+ pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
+ "will take precedence over init_on_alloc and init_on_free\n");
+ _init_on_alloc_enabled_early = false;
+ _init_on_free_enabled_early = false;
}
+ if (_init_on_alloc_enabled_early)
+ static_branch_enable(&init_on_alloc);
+ else
+ static_branch_disable(&init_on_alloc);
+
+ if (_init_on_free_enabled_early)
+ static_branch_enable(&init_on_free);
+ else
+ static_branch_disable(&init_on_free);
+
#ifdef CONFIG_DEBUG_PAGEALLOC
if (!debug_pagealloc_enabled())
return;
@@ -1022,16 +1023,12 @@ buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
if (order >= MAX_ORDER - 2)
return false;
- if (!pfn_valid_within(buddy_pfn))
- return false;
-
combined_pfn = buddy_pfn & pfn;
higher_page = page + (combined_pfn - pfn);
buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
higher_buddy = higher_page + (buddy_pfn - combined_pfn);
- return pfn_valid_within(buddy_pfn) &&
- page_is_buddy(higher_page, higher_buddy, order + 1);
+ return page_is_buddy(higher_page, higher_buddy, order + 1);
}
/*
@@ -1092,8 +1089,6 @@ continue_merging:
buddy_pfn = __find_buddy_pfn(pfn, order);
buddy = page + (buddy_pfn - pfn);
- if (!pfn_valid_within(buddy_pfn))
- goto done_merging;
if (!page_is_buddy(page, buddy, order))
goto done_merging;
/*
@@ -1751,9 +1746,7 @@ void __init memblock_free_pages(struct page *page, unsigned long pfn,
/*
* Check that the whole (or subset of) a pageblock given by the interval of
* [start_pfn, end_pfn) is valid and within the same zone, before scanning it
- * with the migration of free compaction scanner. The scanners then need to
- * use only pfn_valid_within() check for arches that allow holes within
- * pageblocks.
+ * with the migration of free compaction scanner.
*
* Return struct page pointer of start_pfn, or NULL if checks were not passed.
*
@@ -1869,8 +1862,6 @@ static inline void __init pgdat_init_report_one_done(void)
*/
static inline bool __init deferred_pfn_valid(unsigned long pfn)
{
- if (!pfn_valid_within(pfn))
- return false;
if (!(pfn & (pageblock_nr_pages - 1)) && !pfn_valid(pfn))
return false;
return true;
@@ -2517,11 +2508,6 @@ static int move_freepages(struct zone *zone,
int pages_moved = 0;
for (pfn = start_pfn; pfn <= end_pfn;) {
- if (!pfn_valid_within(pfn)) {
- pfn++;
- continue;
- }
-
page = pfn_to_page(pfn);
if (!PageBuddy(page)) {
/*
@@ -6165,7 +6151,7 @@ static int node_load[MAX_NUMNODES];
*
* Return: node id of the found node or %NUMA_NO_NODE if no node is found.
*/
-static int find_next_best_node(int node, nodemask_t *used_node_mask)
+int find_next_best_node(int node, nodemask_t *used_node_mask)
{
int n, val;
int min_val = INT_MAX;
@@ -6650,7 +6636,6 @@ static void __meminit zone_init_free_lists(struct zone *zone)
}
}
-#if !defined(CONFIG_FLATMEM)
/*
* Only struct pages that correspond to ranges defined by memblock.memory
* are zeroed and initialized by going through __init_single_page() during
@@ -6695,13 +6680,6 @@ static void __init init_unavailable_range(unsigned long spfn,
pr_info("On node %d, zone %s: %lld pages in unavailable ranges",
node, zone_names[zone], pgcnt);
}
-#else
-static inline void init_unavailable_range(unsigned long spfn,
- unsigned long epfn,
- int zone, int node)
-{
-}
-#endif
static void __init memmap_init_zone_range(struct zone *zone,
unsigned long start_pfn,
@@ -6731,7 +6709,7 @@ static void __init memmap_init(void)
{
unsigned long start_pfn, end_pfn;
unsigned long hole_pfn = 0;
- int i, j, zone_id, nid;
+ int i, j, zone_id = 0, nid;
for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
struct pglist_data *node = NODE_DATA(nid);
@@ -6764,6 +6742,26 @@ static void __init memmap_init(void)
init_unavailable_range(hole_pfn, end_pfn, zone_id, nid);
}
+void __init *memmap_alloc(phys_addr_t size, phys_addr_t align,
+ phys_addr_t min_addr, int nid, bool exact_nid)
+{
+ void *ptr;
+
+ if (exact_nid)
+ ptr = memblock_alloc_exact_nid_raw(size, align, min_addr,
+ MEMBLOCK_ALLOC_ACCESSIBLE,
+ nid);
+ else
+ ptr = memblock_alloc_try_nid_raw(size, align, min_addr,
+ MEMBLOCK_ALLOC_ACCESSIBLE,
+ nid);
+
+ if (ptr && size > 0)
+ page_init_poison(ptr, size);
+
+ return ptr;
+}
+
static int zone_batchsize(struct zone *zone)
{
#ifdef CONFIG_MMU
@@ -7511,7 +7509,7 @@ static void __init free_area_init_core(struct pglist_data *pgdat)
}
#ifdef CONFIG_FLATMEM
-static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
+static void __init alloc_node_mem_map(struct pglist_data *pgdat)
{
unsigned long __maybe_unused start = 0;
unsigned long __maybe_unused offset = 0;
@@ -7535,8 +7533,8 @@ static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
end = pgdat_end_pfn(pgdat);
end = ALIGN(end, MAX_ORDER_NR_PAGES);
size = (end - start) * sizeof(struct page);
- map = memblock_alloc_node(size, SMP_CACHE_BYTES,
- pgdat->node_id);
+ map = memmap_alloc(size, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
+ pgdat->node_id, false);
if (!map)
panic("Failed to allocate %ld bytes for node %d memory map\n",
size, pgdat->node_id);
@@ -7557,7 +7555,7 @@ static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
#endif
}
#else
-static void __ref alloc_node_mem_map(struct pglist_data *pgdat) { }
+static inline void alloc_node_mem_map(struct pglist_data *pgdat) { }
#endif /* CONFIG_FLATMEM */
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
@@ -8824,9 +8822,6 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page,
}
for (; iter < pageblock_nr_pages - offset; iter++) {
- if (!pfn_valid_within(pfn + iter))
- continue;
-
page = pfn_to_page(pfn + iter);
/*
@@ -8986,7 +8981,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
cc->nr_migratepages -= nr_reclaimed;
ret = migrate_pages(&cc->migratepages, alloc_migration_target,
- NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE);
+ NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE, NULL);
/*
* On -ENOMEM, migrate_pages() bails out right away. It is pointless