diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/Kconfig | 4 | ||||
-rw-r--r-- | mm/kmemleak.c | 5 | ||||
-rw-r--r-- | mm/ksm.c | 10 | ||||
-rw-r--r-- | mm/memcontrol.c | 127 | ||||
-rw-r--r-- | mm/rmap.c | 4 | ||||
-rw-r--r-- | mm/swapfile.c | 12 | ||||
-rw-r--r-- | mm/vmalloc.c | 50 |
7 files changed, 104 insertions, 108 deletions
diff --git a/mm/Kconfig b/mm/Kconfig index edd300aca173..57963c6063d1 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -224,7 +224,9 @@ config KSM the many instances by a single resident page with that content, so saving memory until one or another app needs to modify the content. Recommended for use with KVM, or with other duplicative applications. - See Documentation/vm/ksm.txt for more information. + See Documentation/vm/ksm.txt for more information: KSM is inactive + until a program has madvised that an area is MADV_MERGEABLE, and + root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set). config DEFAULT_MMAP_MIN_ADDR int "Low address space to protect from user allocation" diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 4ea4510e2996..8bf765c4f58d 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -833,12 +833,15 @@ static void early_alloc(struct early_log *log) */ rcu_read_lock(); object = create_object((unsigned long)log->ptr, log->size, - log->min_count, GFP_KERNEL); + log->min_count, GFP_ATOMIC); + if (!object) + goto out; spin_lock_irqsave(&object->lock, flags); for (i = 0; i < log->trace_len; i++) object->trace[i] = log->trace[i]; object->trace_len = log->trace_len; spin_unlock_irqrestore(&object->lock, flags); +out: rcu_read_unlock(); } @@ -184,11 +184,6 @@ static DEFINE_SPINLOCK(ksm_mmlist_lock); sizeof(struct __struct), __alignof__(struct __struct),\ (__flags), NULL) -static void __init ksm_init_max_kernel_pages(void) -{ - ksm_max_kernel_pages = nr_free_buffer_pages() / 4; -} - static int __init ksm_slab_init(void) { rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0); @@ -1673,7 +1668,7 @@ static int __init ksm_init(void) struct task_struct *ksm_thread; int err; - ksm_init_max_kernel_pages(); + ksm_max_kernel_pages = totalram_pages / 4; err = ksm_slab_init(); if (err) @@ -1697,6 +1692,9 @@ static int __init ksm_init(void) kthread_stop(ksm_thread); goto out_free2; } +#else + ksm_run = KSM_RUN_MERGE; /* no way for user to start it */ + #endif /* CONFIG_SYSFS */ return 0; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index e2b98a6875c0..f99f5991d6bb 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -313,7 +313,8 @@ soft_limit_tree_from_page(struct page *page) static void __mem_cgroup_insert_exceeded(struct mem_cgroup *mem, struct mem_cgroup_per_zone *mz, - struct mem_cgroup_tree_per_zone *mctz) + struct mem_cgroup_tree_per_zone *mctz, + unsigned long long new_usage_in_excess) { struct rb_node **p = &mctz->rb_root.rb_node; struct rb_node *parent = NULL; @@ -322,7 +323,9 @@ __mem_cgroup_insert_exceeded(struct mem_cgroup *mem, if (mz->on_tree) return; - mz->usage_in_excess = res_counter_soft_limit_excess(&mem->res); + mz->usage_in_excess = new_usage_in_excess; + if (!mz->usage_in_excess) + return; while (*p) { parent = *p; mz_node = rb_entry(parent, struct mem_cgroup_per_zone, @@ -353,16 +356,6 @@ __mem_cgroup_remove_exceeded(struct mem_cgroup *mem, } static void -mem_cgroup_insert_exceeded(struct mem_cgroup *mem, - struct mem_cgroup_per_zone *mz, - struct mem_cgroup_tree_per_zone *mctz) -{ - spin_lock(&mctz->lock); - __mem_cgroup_insert_exceeded(mem, mz, mctz); - spin_unlock(&mctz->lock); -} - -static void mem_cgroup_remove_exceeded(struct mem_cgroup *mem, struct mem_cgroup_per_zone *mz, struct mem_cgroup_tree_per_zone *mctz) @@ -392,34 +385,36 @@ static bool mem_cgroup_soft_limit_check(struct mem_cgroup *mem) static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page) { - unsigned long long prev_usage_in_excess, new_usage_in_excess; - bool updated_tree = false; + unsigned long long excess; struct mem_cgroup_per_zone *mz; struct mem_cgroup_tree_per_zone *mctz; - - mz = mem_cgroup_zoneinfo(mem, page_to_nid(page), page_zonenum(page)); + int nid = page_to_nid(page); + int zid = page_zonenum(page); mctz = soft_limit_tree_from_page(page); /* - * We do updates in lazy mode, mem's are removed - * lazily from the per-zone, per-node rb tree + * Necessary to update all ancestors when hierarchy is used. + * because their event counter is not touched. */ - prev_usage_in_excess = mz->usage_in_excess; - - new_usage_in_excess = res_counter_soft_limit_excess(&mem->res); - if (prev_usage_in_excess) { - mem_cgroup_remove_exceeded(mem, mz, mctz); - updated_tree = true; - } - if (!new_usage_in_excess) - goto done; - mem_cgroup_insert_exceeded(mem, mz, mctz); - -done: - if (updated_tree) { - spin_lock(&mctz->lock); - mz->usage_in_excess = new_usage_in_excess; - spin_unlock(&mctz->lock); + for (; mem; mem = parent_mem_cgroup(mem)) { + mz = mem_cgroup_zoneinfo(mem, nid, zid); + excess = res_counter_soft_limit_excess(&mem->res); + /* + * We have to update the tree if mz is on RB-tree or + * mem is over its softlimit. + */ + if (excess || mz->on_tree) { + spin_lock(&mctz->lock); + /* if on-tree, remove it */ + if (mz->on_tree) + __mem_cgroup_remove_exceeded(mem, mz, mctz); + /* + * Insert again. mz->usage_in_excess will be updated. + * If excess is 0, no tree ops. + */ + __mem_cgroup_insert_exceeded(mem, mz, mctz, excess); + spin_unlock(&mctz->lock); + } } } @@ -447,9 +442,10 @@ static struct mem_cgroup_per_zone * __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) { struct rb_node *rightmost = NULL; - struct mem_cgroup_per_zone *mz = NULL; + struct mem_cgroup_per_zone *mz; retry: + mz = NULL; rightmost = rb_last(&mctz->rb_root); if (!rightmost) goto done; /* Nothing to reclaim from */ @@ -1270,9 +1266,9 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, gfp_t gfp_mask, struct mem_cgroup **memcg, bool oom, struct page *page) { - struct mem_cgroup *mem, *mem_over_limit, *mem_over_soft_limit; + struct mem_cgroup *mem, *mem_over_limit; int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; - struct res_counter *fail_res, *soft_fail_res = NULL; + struct res_counter *fail_res; if (unlikely(test_thread_flag(TIF_MEMDIE))) { /* Don't account this! */ @@ -1304,17 +1300,16 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, if (mem_cgroup_is_root(mem)) goto done; - ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res, - &soft_fail_res); + ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res); if (likely(!ret)) { if (!do_swap_account) break; ret = res_counter_charge(&mem->memsw, PAGE_SIZE, - &fail_res, NULL); + &fail_res); if (likely(!ret)) break; /* mem+swap counter fails */ - res_counter_uncharge(&mem->res, PAGE_SIZE, NULL); + res_counter_uncharge(&mem->res, PAGE_SIZE); flags |= MEM_CGROUP_RECLAIM_NOSWAP; mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw); @@ -1353,16 +1348,11 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, } } /* - * Insert just the ancestor, we should trickle down to the correct - * cgroup for reclaim, since the other nodes will be below their - * soft limit + * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree. + * if they exceeds softlimit. */ - if (soft_fail_res) { - mem_over_soft_limit = - mem_cgroup_from_res_counter(soft_fail_res, res); - if (mem_cgroup_soft_limit_check(mem_over_soft_limit)) - mem_cgroup_update_tree(mem_over_soft_limit, page); - } + if (mem_cgroup_soft_limit_check(mem)) + mem_cgroup_update_tree(mem, page); done: return 0; nomem: @@ -1437,10 +1427,9 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem, if (unlikely(PageCgroupUsed(pc))) { unlock_page_cgroup(pc); if (!mem_cgroup_is_root(mem)) { - res_counter_uncharge(&mem->res, PAGE_SIZE, NULL); + res_counter_uncharge(&mem->res, PAGE_SIZE); if (do_swap_account) - res_counter_uncharge(&mem->memsw, PAGE_SIZE, - NULL); + res_counter_uncharge(&mem->memsw, PAGE_SIZE); } css_put(&mem->css); return; @@ -1519,7 +1508,7 @@ static int mem_cgroup_move_account(struct page_cgroup *pc, goto out; if (!mem_cgroup_is_root(from)) - res_counter_uncharge(&from->res, PAGE_SIZE, NULL); + res_counter_uncharge(&from->res, PAGE_SIZE); mem_cgroup_charge_statistics(from, pc, false); page = pc->page; @@ -1539,7 +1528,7 @@ static int mem_cgroup_move_account(struct page_cgroup *pc, } if (do_swap_account && !mem_cgroup_is_root(from)) - res_counter_uncharge(&from->memsw, PAGE_SIZE, NULL); + res_counter_uncharge(&from->memsw, PAGE_SIZE); css_put(&from->css); css_get(&to->css); @@ -1610,9 +1599,9 @@ uncharge: css_put(&parent->css); /* uncharge if move fails */ if (!mem_cgroup_is_root(parent)) { - res_counter_uncharge(&parent->res, PAGE_SIZE, NULL); + res_counter_uncharge(&parent->res, PAGE_SIZE); if (do_swap_account) - res_counter_uncharge(&parent->memsw, PAGE_SIZE, NULL); + res_counter_uncharge(&parent->memsw, PAGE_SIZE); } return ret; } @@ -1803,8 +1792,7 @@ __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr, * calling css_tryget */ if (!mem_cgroup_is_root(memcg)) - res_counter_uncharge(&memcg->memsw, PAGE_SIZE, - NULL); + res_counter_uncharge(&memcg->memsw, PAGE_SIZE); mem_cgroup_swap_statistics(memcg, false); mem_cgroup_put(memcg); } @@ -1831,9 +1819,9 @@ void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem) if (!mem) return; if (!mem_cgroup_is_root(mem)) { - res_counter_uncharge(&mem->res, PAGE_SIZE, NULL); + res_counter_uncharge(&mem->res, PAGE_SIZE); if (do_swap_account) - res_counter_uncharge(&mem->memsw, PAGE_SIZE, NULL); + res_counter_uncharge(&mem->memsw, PAGE_SIZE); } css_put(&mem->css); } @@ -1848,7 +1836,6 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) struct page_cgroup *pc; struct mem_cgroup *mem = NULL; struct mem_cgroup_per_zone *mz; - bool soft_limit_excess = false; if (mem_cgroup_disabled()) return NULL; @@ -1888,10 +1875,10 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) } if (!mem_cgroup_is_root(mem)) { - res_counter_uncharge(&mem->res, PAGE_SIZE, &soft_limit_excess); + res_counter_uncharge(&mem->res, PAGE_SIZE); if (do_swap_account && (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)) - res_counter_uncharge(&mem->memsw, PAGE_SIZE, NULL); + res_counter_uncharge(&mem->memsw, PAGE_SIZE); } if (ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) mem_cgroup_swap_statistics(mem, true); @@ -1908,7 +1895,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) mz = page_cgroup_zoneinfo(pc); unlock_page_cgroup(pc); - if (soft_limit_excess && mem_cgroup_soft_limit_check(mem)) + if (mem_cgroup_soft_limit_check(mem)) mem_cgroup_update_tree(mem, page); /* at swapout, this memcg will be accessed to record to swap */ if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT) @@ -1986,7 +1973,7 @@ void mem_cgroup_uncharge_swap(swp_entry_t ent) * This memcg can be obsolete one. We avoid calling css_tryget */ if (!mem_cgroup_is_root(memcg)) - res_counter_uncharge(&memcg->memsw, PAGE_SIZE, NULL); + res_counter_uncharge(&memcg->memsw, PAGE_SIZE); mem_cgroup_swap_statistics(memcg, false); mem_cgroup_put(memcg); } @@ -2233,6 +2220,7 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, unsigned long reclaimed; int loop = 0; struct mem_cgroup_tree_per_zone *mctz; + unsigned long long excess; if (order > 0) return 0; @@ -2284,9 +2272,8 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, break; } while (1); } - mz->usage_in_excess = - res_counter_soft_limit_excess(&mz->mem->res); __mem_cgroup_remove_exceeded(mz->mem, mz, mctz); + excess = res_counter_soft_limit_excess(&mz->mem->res); /* * One school of thought says that we should not add * back the node to the tree if reclaim returns 0. @@ -2295,8 +2282,8 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, * memory to reclaim from. Consider this as a longer * term TODO. */ - if (mz->usage_in_excess) - __mem_cgroup_insert_exceeded(mz->mem, mz, mctz); + /* If excess == 0, no tree ops */ + __mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess); spin_unlock(&mctz->lock); css_put(&mz->mem->css); loop++; diff --git a/mm/rmap.c b/mm/rmap.c index 28aafe2b5306..dd43373a483f 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -242,8 +242,8 @@ vma_address(struct page *page, struct vm_area_struct *vma) } /* - * At what user virtual address is page expected in vma? checking that the - * page matches the vma: currently only used on anon pages, by unuse_vma; + * At what user virtual address is page expected in vma? + * checking that the page matches the vma. */ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) { diff --git a/mm/swapfile.c b/mm/swapfile.c index 4de7f02f820b..a1bc6b9af9a2 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1974,12 +1974,14 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) goto bad_swap; } - if (blk_queue_nonrot(bdev_get_queue(p->bdev))) { - p->flags |= SWP_SOLIDSTATE; - p->cluster_next = 1 + (random32() % p->highest_bit); + if (p->bdev) { + if (blk_queue_nonrot(bdev_get_queue(p->bdev))) { + p->flags |= SWP_SOLIDSTATE; + p->cluster_next = 1 + (random32() % p->highest_bit); + } + if (discard_swap(p) == 0) + p->flags |= SWP_DISCARDABLE; } - if (discard_swap(p) == 0) - p->flags |= SWP_DISCARDABLE; mutex_lock(&swapon_mutex); spin_lock(&swap_lock); diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 69511e663234..0f551a4a44cd 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -12,6 +12,7 @@ #include <linux/mm.h> #include <linux/module.h> #include <linux/highmem.h> +#include <linux/sched.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/interrupt.h> @@ -25,10 +26,10 @@ #include <linux/rcupdate.h> #include <linux/pfn.h> #include <linux/kmemleak.h> -#include <linux/highmem.h> #include <asm/atomic.h> #include <asm/uaccess.h> #include <asm/tlbflush.h> +#include <asm/shmparam.h> /*** Page table manipulation functions ***/ @@ -1156,12 +1157,11 @@ static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, } static struct vm_struct *__get_vm_area_node(unsigned long size, - unsigned long flags, unsigned long start, unsigned long end, - int node, gfp_t gfp_mask, void *caller) + unsigned long align, unsigned long flags, unsigned long start, + unsigned long end, int node, gfp_t gfp_mask, void *caller) { static struct vmap_area *va; struct vm_struct *area; - unsigned long align = 1; BUG_ON(in_interrupt()); if (flags & VM_IOREMAP) { @@ -1201,7 +1201,7 @@ static struct vm_struct *__get_vm_area_node(unsigned long size, struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, unsigned long start, unsigned long end) { - return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL, + return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL, __builtin_return_address(0)); } EXPORT_SYMBOL_GPL(__get_vm_area); @@ -1210,7 +1210,7 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, unsigned long start, unsigned long end, void *caller) { - return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL, + return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL, caller); } @@ -1225,22 +1225,22 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, */ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) { - return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, + return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, -1, GFP_KERNEL, __builtin_return_address(0)); } struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, void *caller) { - return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, + return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, -1, GFP_KERNEL, caller); } struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, int node, gfp_t gfp_mask) { - return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node, - gfp_mask, __builtin_return_address(0)); + return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, + node, gfp_mask, __builtin_return_address(0)); } static struct vm_struct *find_vm_area(const void *addr) @@ -1403,7 +1403,8 @@ void *vmap(struct page **pages, unsigned int count, } EXPORT_SYMBOL(vmap); -static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, +static void *__vmalloc_node(unsigned long size, unsigned long align, + gfp_t gfp_mask, pgprot_t prot, int node, void *caller); static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot, int node, void *caller) @@ -1417,7 +1418,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, area->nr_pages = nr_pages; /* Please note that the recursion is strictly bounded. */ if (array_size > PAGE_SIZE) { - pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO, + pages = __vmalloc_node(array_size, 1, gfp_mask | __GFP_ZERO, PAGE_KERNEL, node, caller); area->flags |= VM_VPAGES; } else { @@ -1476,6 +1477,7 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) /** * __vmalloc_node - allocate virtually contiguous memory * @size: allocation size + * @align: desired alignment * @gfp_mask: flags for the page level allocator * @prot: protection mask for the allocated pages * @node: node to use for allocation or -1 @@ -1485,8 +1487,9 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) * allocator with @gfp_mask flags. Map them into contiguous * kernel virtual space, using a pagetable protection of @prot. */ -static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, - int node, void *caller) +static void *__vmalloc_node(unsigned long size, unsigned long align, + gfp_t gfp_mask, pgprot_t prot, + int node, void *caller) { struct vm_struct *area; void *addr; @@ -1496,8 +1499,8 @@ static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, if (!size || (size >> PAGE_SHIFT) > totalram_pages) return NULL; - area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END, - node, gfp_mask, caller); + area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START, + VMALLOC_END, node, gfp_mask, caller); if (!area) return NULL; @@ -1516,7 +1519,7 @@ static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) { - return __vmalloc_node(size, gfp_mask, prot, -1, + return __vmalloc_node(size, 1, gfp_mask, prot, -1, __builtin_return_address(0)); } EXPORT_SYMBOL(__vmalloc); @@ -1532,7 +1535,7 @@ EXPORT_SYMBOL(__vmalloc); */ void *vmalloc(unsigned long size) { - return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, -1, __builtin_return_address(0)); } EXPORT_SYMBOL(vmalloc); @@ -1549,7 +1552,8 @@ void *vmalloc_user(unsigned long size) struct vm_struct *area; void *ret; - ret = __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, + ret = __vmalloc_node(size, SHMLBA, + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL, -1, __builtin_return_address(0)); if (ret) { area = find_vm_area(ret); @@ -1572,7 +1576,7 @@ EXPORT_SYMBOL(vmalloc_user); */ void *vmalloc_node(unsigned long size, int node) { - return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node, __builtin_return_address(0)); } EXPORT_SYMBOL(vmalloc_node); @@ -1595,7 +1599,7 @@ EXPORT_SYMBOL(vmalloc_node); void *vmalloc_exec(unsigned long size) { - return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, -1, __builtin_return_address(0)); } @@ -1616,7 +1620,7 @@ void *vmalloc_exec(unsigned long size) */ void *vmalloc_32(unsigned long size) { - return __vmalloc_node(size, GFP_VMALLOC32, PAGE_KERNEL, + return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL, -1, __builtin_return_address(0)); } EXPORT_SYMBOL(vmalloc_32); @@ -1633,7 +1637,7 @@ void *vmalloc_32_user(unsigned long size) struct vm_struct *area; void *ret; - ret = __vmalloc_node(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, + ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, -1, __builtin_return_address(0)); if (ret) { area = find_vm_area(ret); |