| // SPDX-License-Identifier: GPL-2.0-only | 
 | #include <linux/mm.h> | 
 | #include <linux/slab.h> | 
 | #include <linux/string.h> | 
 | #include <linux/compiler.h> | 
 | #include <linux/export.h> | 
 | #include <linux/err.h> | 
 | #include <linux/sched.h> | 
 | #include <linux/sched/mm.h> | 
 | #include <linux/sched/signal.h> | 
 | #include <linux/sched/task_stack.h> | 
 | #include <linux/security.h> | 
 | #include <linux/swap.h> | 
 | #include <linux/swapops.h> | 
 | #include <linux/mman.h> | 
 | #include <linux/hugetlb.h> | 
 | #include <linux/vmalloc.h> | 
 | #include <linux/userfaultfd_k.h> | 
 | #include <linux/elf.h> | 
 | #include <linux/elf-randomize.h> | 
 | #include <linux/personality.h> | 
 | #include <linux/random.h> | 
 | #include <linux/processor.h> | 
 | #include <linux/sizes.h> | 
 | #include <linux/compat.h> | 
 |  | 
 | #include <linux/uaccess.h> | 
 |  | 
 | #include "internal.h" | 
 | #ifndef __GENKSYMS__ | 
 | #include <trace/hooks/syscall_check.h> | 
 | #endif | 
 |  | 
 | /** | 
 |  * kfree_const - conditionally free memory | 
 |  * @x: pointer to the memory | 
 |  * | 
 |  * Function calls kfree only if @x is not in .rodata section. | 
 |  */ | 
 | void kfree_const(const void *x) | 
 | { | 
 | 	if (!is_kernel_rodata((unsigned long)x)) | 
 | 		kfree(x); | 
 | } | 
 | EXPORT_SYMBOL(kfree_const); | 
 |  | 
 | /** | 
 |  * kstrdup - allocate space for and copy an existing string | 
 |  * @s: the string to duplicate | 
 |  * @gfp: the GFP mask used in the kmalloc() call when allocating memory | 
 |  * | 
 |  * Return: newly allocated copy of @s or %NULL in case of error | 
 |  */ | 
 | char *kstrdup(const char *s, gfp_t gfp) | 
 | { | 
 | 	size_t len; | 
 | 	char *buf; | 
 |  | 
 | 	if (!s) | 
 | 		return NULL; | 
 |  | 
 | 	len = strlen(s) + 1; | 
 | 	buf = kmalloc_track_caller(len, gfp); | 
 | 	if (buf) | 
 | 		memcpy(buf, s, len); | 
 | 	return buf; | 
 | } | 
 | EXPORT_SYMBOL(kstrdup); | 
 |  | 
 | /** | 
 |  * kstrdup_const - conditionally duplicate an existing const string | 
 |  * @s: the string to duplicate | 
 |  * @gfp: the GFP mask used in the kmalloc() call when allocating memory | 
 |  * | 
 |  * Note: Strings allocated by kstrdup_const should be freed by kfree_const and | 
 |  * must not be passed to krealloc(). | 
 |  * | 
 |  * Return: source string if it is in .rodata section otherwise | 
 |  * fallback to kstrdup. | 
 |  */ | 
 | const char *kstrdup_const(const char *s, gfp_t gfp) | 
 | { | 
 | 	if (is_kernel_rodata((unsigned long)s)) | 
 | 		return s; | 
 |  | 
 | 	return kstrdup(s, gfp); | 
 | } | 
 | EXPORT_SYMBOL(kstrdup_const); | 
 |  | 
 | /** | 
 |  * kstrndup - allocate space for and copy an existing string | 
 |  * @s: the string to duplicate | 
 |  * @max: read at most @max chars from @s | 
 |  * @gfp: the GFP mask used in the kmalloc() call when allocating memory | 
 |  * | 
 |  * Note: Use kmemdup_nul() instead if the size is known exactly. | 
 |  * | 
 |  * Return: newly allocated copy of @s or %NULL in case of error | 
 |  */ | 
 | char *kstrndup(const char *s, size_t max, gfp_t gfp) | 
 | { | 
 | 	size_t len; | 
 | 	char *buf; | 
 |  | 
 | 	if (!s) | 
 | 		return NULL; | 
 |  | 
 | 	len = strnlen(s, max); | 
 | 	buf = kmalloc_track_caller(len+1, gfp); | 
 | 	if (buf) { | 
 | 		memcpy(buf, s, len); | 
 | 		buf[len] = '\0'; | 
 | 	} | 
 | 	return buf; | 
 | } | 
 | EXPORT_SYMBOL(kstrndup); | 
 |  | 
 | /** | 
 |  * kmemdup - duplicate region of memory | 
 |  * | 
 |  * @src: memory region to duplicate | 
 |  * @len: memory region length | 
 |  * @gfp: GFP mask to use | 
 |  * | 
 |  * Return: newly allocated copy of @src or %NULL in case of error | 
 |  */ | 
 | void *kmemdup(const void *src, size_t len, gfp_t gfp) | 
 | { | 
 | 	void *p; | 
 |  | 
 | 	p = kmalloc_track_caller(len, gfp); | 
 | 	if (p) | 
 | 		memcpy(p, src, len); | 
 | 	return p; | 
 | } | 
 | EXPORT_SYMBOL(kmemdup); | 
 |  | 
 | /** | 
 |  * kmemdup_nul - Create a NUL-terminated string from unterminated data | 
 |  * @s: The data to stringify | 
 |  * @len: The size of the data | 
 |  * @gfp: the GFP mask used in the kmalloc() call when allocating memory | 
 |  * | 
 |  * Return: newly allocated copy of @s with NUL-termination or %NULL in | 
 |  * case of error | 
 |  */ | 
 | char *kmemdup_nul(const char *s, size_t len, gfp_t gfp) | 
 | { | 
 | 	char *buf; | 
 |  | 
 | 	if (!s) | 
 | 		return NULL; | 
 |  | 
 | 	buf = kmalloc_track_caller(len + 1, gfp); | 
 | 	if (buf) { | 
 | 		memcpy(buf, s, len); | 
 | 		buf[len] = '\0'; | 
 | 	} | 
 | 	return buf; | 
 | } | 
 | EXPORT_SYMBOL(kmemdup_nul); | 
 |  | 
 | /** | 
 |  * memdup_user - duplicate memory region from user space | 
 |  * | 
 |  * @src: source address in user space | 
 |  * @len: number of bytes to copy | 
 |  * | 
 |  * Return: an ERR_PTR() on failure.  Result is physically | 
 |  * contiguous, to be freed by kfree(). | 
 |  */ | 
 | void *memdup_user(const void __user *src, size_t len) | 
 | { | 
 | 	void *p; | 
 |  | 
 | 	p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN); | 
 | 	if (!p) | 
 | 		return ERR_PTR(-ENOMEM); | 
 |  | 
 | 	if (copy_from_user(p, src, len)) { | 
 | 		kfree(p); | 
 | 		return ERR_PTR(-EFAULT); | 
 | 	} | 
 |  | 
 | 	return p; | 
 | } | 
 | EXPORT_SYMBOL(memdup_user); | 
 |  | 
 | /** | 
 |  * vmemdup_user - duplicate memory region from user space | 
 |  * | 
 |  * @src: source address in user space | 
 |  * @len: number of bytes to copy | 
 |  * | 
 |  * Return: an ERR_PTR() on failure.  Result may be not | 
 |  * physically contiguous.  Use kvfree() to free. | 
 |  */ | 
 | void *vmemdup_user(const void __user *src, size_t len) | 
 | { | 
 | 	void *p; | 
 |  | 
 | 	p = kvmalloc(len, GFP_USER); | 
 | 	if (!p) | 
 | 		return ERR_PTR(-ENOMEM); | 
 |  | 
 | 	if (copy_from_user(p, src, len)) { | 
 | 		kvfree(p); | 
 | 		return ERR_PTR(-EFAULT); | 
 | 	} | 
 |  | 
 | 	return p; | 
 | } | 
 | EXPORT_SYMBOL(vmemdup_user); | 
 |  | 
 | /** | 
 |  * strndup_user - duplicate an existing string from user space | 
 |  * @s: The string to duplicate | 
 |  * @n: Maximum number of bytes to copy, including the trailing NUL. | 
 |  * | 
 |  * Return: newly allocated copy of @s or an ERR_PTR() in case of error | 
 |  */ | 
 | char *strndup_user(const char __user *s, long n) | 
 | { | 
 | 	char *p; | 
 | 	long length; | 
 |  | 
 | 	length = strnlen_user(s, n); | 
 |  | 
 | 	if (!length) | 
 | 		return ERR_PTR(-EFAULT); | 
 |  | 
 | 	if (length > n) | 
 | 		return ERR_PTR(-EINVAL); | 
 |  | 
 | 	p = memdup_user(s, length); | 
 |  | 
 | 	if (IS_ERR(p)) | 
 | 		return p; | 
 |  | 
 | 	p[length - 1] = '\0'; | 
 |  | 
 | 	return p; | 
 | } | 
 | EXPORT_SYMBOL(strndup_user); | 
 |  | 
 | /** | 
 |  * memdup_user_nul - duplicate memory region from user space and NUL-terminate | 
 |  * | 
 |  * @src: source address in user space | 
 |  * @len: number of bytes to copy | 
 |  * | 
 |  * Return: an ERR_PTR() on failure. | 
 |  */ | 
 | void *memdup_user_nul(const void __user *src, size_t len) | 
 | { | 
 | 	char *p; | 
 |  | 
 | 	/* | 
 | 	 * Always use GFP_KERNEL, since copy_from_user() can sleep and | 
 | 	 * cause pagefault, which makes it pointless to use GFP_NOFS | 
 | 	 * or GFP_ATOMIC. | 
 | 	 */ | 
 | 	p = kmalloc_track_caller(len + 1, GFP_KERNEL); | 
 | 	if (!p) | 
 | 		return ERR_PTR(-ENOMEM); | 
 |  | 
 | 	if (copy_from_user(p, src, len)) { | 
 | 		kfree(p); | 
 | 		return ERR_PTR(-EFAULT); | 
 | 	} | 
 | 	p[len] = '\0'; | 
 |  | 
 | 	return p; | 
 | } | 
 | EXPORT_SYMBOL(memdup_user_nul); | 
 |  | 
 | void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, | 
 | 		struct vm_area_struct *prev) | 
 | { | 
 | 	struct vm_area_struct *next; | 
 |  | 
 | 	vma->vm_prev = prev; | 
 | 	if (prev) { | 
 | 		next = prev->vm_next; | 
 | 		prev->vm_next = vma; | 
 | 	} else { | 
 | 		next = mm->mmap; | 
 | 		mm->mmap = vma; | 
 | 	} | 
 | 	vma->vm_next = next; | 
 | 	if (next) | 
 | 		next->vm_prev = vma; | 
 | } | 
 |  | 
 | void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma) | 
 | { | 
 | 	struct vm_area_struct *prev, *next; | 
 |  | 
 | 	next = vma->vm_next; | 
 | 	prev = vma->vm_prev; | 
 | 	if (prev) | 
 | 		prev->vm_next = next; | 
 | 	else | 
 | 		mm->mmap = next; | 
 | 	if (next) | 
 | 		next->vm_prev = prev; | 
 | } | 
 |  | 
 | /* Check if the vma is being used as a stack by this task */ | 
 | int vma_is_stack_for_current(struct vm_area_struct *vma) | 
 | { | 
 | 	struct task_struct * __maybe_unused t = current; | 
 |  | 
 | 	return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); | 
 | } | 
 |  | 
 | #ifndef STACK_RND_MASK | 
 | #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))     /* 8MB of VA */ | 
 | #endif | 
 |  | 
 | unsigned long randomize_stack_top(unsigned long stack_top) | 
 | { | 
 | 	unsigned long random_variable = 0; | 
 |  | 
 | 	if (current->flags & PF_RANDOMIZE) { | 
 | 		random_variable = get_random_long(); | 
 | 		random_variable &= STACK_RND_MASK; | 
 | 		random_variable <<= PAGE_SHIFT; | 
 | 	} | 
 | #ifdef CONFIG_STACK_GROWSUP | 
 | 	return PAGE_ALIGN(stack_top) + random_variable; | 
 | #else | 
 | 	return PAGE_ALIGN(stack_top) - random_variable; | 
 | #endif | 
 | } | 
 |  | 
 | /** | 
 |  * randomize_page - Generate a random, page aligned address | 
 |  * @start:	The smallest acceptable address the caller will take. | 
 |  * @range:	The size of the area, starting at @start, within which the | 
 |  *		random address must fall. | 
 |  * | 
 |  * If @start + @range would overflow, @range is capped. | 
 |  * | 
 |  * NOTE: Historical use of randomize_range, which this replaces, presumed that | 
 |  * @start was already page aligned.  We now align it regardless. | 
 |  * | 
 |  * Return: A page aligned address within [start, start + range).  On error, | 
 |  * @start is returned. | 
 |  */ | 
 | unsigned long randomize_page(unsigned long start, unsigned long range) | 
 | { | 
 | 	if (!PAGE_ALIGNED(start)) { | 
 | 		range -= PAGE_ALIGN(start) - start; | 
 | 		start = PAGE_ALIGN(start); | 
 | 	} | 
 |  | 
 | 	if (start > ULONG_MAX - range) | 
 | 		range = ULONG_MAX - start; | 
 |  | 
 | 	range >>= PAGE_SHIFT; | 
 |  | 
 | 	if (range == 0) | 
 | 		return start; | 
 |  | 
 | 	return start + (get_random_long() % range << PAGE_SHIFT); | 
 | } | 
 |  | 
 | #ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT | 
 | unsigned long arch_randomize_brk(struct mm_struct *mm) | 
 | { | 
 | 	/* Is the current task 32bit ? */ | 
 | 	if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task()) | 
 | 		return randomize_page(mm->brk, SZ_32M); | 
 |  | 
 | 	return randomize_page(mm->brk, SZ_1G); | 
 | } | 
 |  | 
 | unsigned long arch_mmap_rnd(void) | 
 | { | 
 | 	unsigned long rnd; | 
 |  | 
 | #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS | 
 | 	if (is_compat_task()) | 
 | 		rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1); | 
 | 	else | 
 | #endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */ | 
 | 		rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1); | 
 |  | 
 | 	return rnd << PAGE_SHIFT; | 
 | } | 
 | EXPORT_SYMBOL_GPL(arch_mmap_rnd); | 
 |  | 
 | static int mmap_is_legacy(struct rlimit *rlim_stack) | 
 | { | 
 | 	if (current->personality & ADDR_COMPAT_LAYOUT) | 
 | 		return 1; | 
 |  | 
 | 	if (rlim_stack->rlim_cur == RLIM_INFINITY) | 
 | 		return 1; | 
 |  | 
 | 	return sysctl_legacy_va_layout; | 
 | } | 
 |  | 
 | /* | 
 |  * Leave enough space between the mmap area and the stack to honour ulimit in | 
 |  * the face of randomisation. | 
 |  */ | 
 | #define MIN_GAP		(SZ_128M) | 
 | #define MAX_GAP		(STACK_TOP / 6 * 5) | 
 |  | 
 | static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack) | 
 | { | 
 | 	unsigned long gap = rlim_stack->rlim_cur; | 
 | 	unsigned long pad = stack_guard_gap; | 
 |  | 
 | 	/* Account for stack randomization if necessary */ | 
 | 	if (current->flags & PF_RANDOMIZE) | 
 | 		pad += (STACK_RND_MASK << PAGE_SHIFT); | 
 |  | 
 | 	/* Values close to RLIM_INFINITY can overflow. */ | 
 | 	if (gap + pad > gap) | 
 | 		gap += pad; | 
 |  | 
 | 	if (gap < MIN_GAP) | 
 | 		gap = MIN_GAP; | 
 | 	else if (gap > MAX_GAP) | 
 | 		gap = MAX_GAP; | 
 |  | 
 | 	return PAGE_ALIGN(STACK_TOP - gap - rnd); | 
 | } | 
 |  | 
 | void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) | 
 | { | 
 | 	unsigned long random_factor = 0UL; | 
 |  | 
 | 	if (current->flags & PF_RANDOMIZE) | 
 | 		random_factor = arch_mmap_rnd(); | 
 |  | 
 | 	if (mmap_is_legacy(rlim_stack)) { | 
 | 		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; | 
 | 		mm->get_unmapped_area = arch_get_unmapped_area; | 
 | 	} else { | 
 | 		mm->mmap_base = mmap_base(random_factor, rlim_stack); | 
 | 		mm->get_unmapped_area = arch_get_unmapped_area_topdown; | 
 | 	} | 
 | } | 
 | #elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT) | 
 | void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) | 
 | { | 
 | 	mm->mmap_base = TASK_UNMAPPED_BASE; | 
 | 	mm->get_unmapped_area = arch_get_unmapped_area; | 
 | } | 
 | #endif | 
 |  | 
 | /** | 
 |  * __account_locked_vm - account locked pages to an mm's locked_vm | 
 |  * @mm:          mm to account against | 
 |  * @pages:       number of pages to account | 
 |  * @inc:         %true if @pages should be considered positive, %false if not | 
 |  * @task:        task used to check RLIMIT_MEMLOCK | 
 |  * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped | 
 |  * | 
 |  * Assumes @task and @mm are valid (i.e. at least one reference on each), and | 
 |  * that mmap_lock is held as writer. | 
 |  * | 
 |  * Return: | 
 |  * * 0       on success | 
 |  * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded. | 
 |  */ | 
 | int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc, | 
 | 			struct task_struct *task, bool bypass_rlim) | 
 | { | 
 | 	unsigned long locked_vm, limit; | 
 | 	int ret = 0; | 
 |  | 
 | 	mmap_assert_write_locked(mm); | 
 |  | 
 | 	locked_vm = mm->locked_vm; | 
 | 	if (inc) { | 
 | 		if (!bypass_rlim) { | 
 | 			limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT; | 
 | 			if (locked_vm + pages > limit) | 
 | 				ret = -ENOMEM; | 
 | 		} | 
 | 		if (!ret) | 
 | 			mm->locked_vm = locked_vm + pages; | 
 | 	} else { | 
 | 		WARN_ON_ONCE(pages > locked_vm); | 
 | 		mm->locked_vm = locked_vm - pages; | 
 | 	} | 
 |  | 
 | 	pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid, | 
 | 		 (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT, | 
 | 		 locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK), | 
 | 		 ret ? " - exceeded" : ""); | 
 |  | 
 | 	return ret; | 
 | } | 
 | EXPORT_SYMBOL_GPL(__account_locked_vm); | 
 |  | 
 | /** | 
 |  * account_locked_vm - account locked pages to an mm's locked_vm | 
 |  * @mm:          mm to account against, may be NULL | 
 |  * @pages:       number of pages to account | 
 |  * @inc:         %true if @pages should be considered positive, %false if not | 
 |  * | 
 |  * Assumes a non-NULL @mm is valid (i.e. at least one reference on it). | 
 |  * | 
 |  * Return: | 
 |  * * 0       on success, or if mm is NULL | 
 |  * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded. | 
 |  */ | 
 | int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc) | 
 | { | 
 | 	int ret; | 
 |  | 
 | 	if (pages == 0 || !mm) | 
 | 		return 0; | 
 |  | 
 | 	mmap_write_lock(mm); | 
 | 	ret = __account_locked_vm(mm, pages, inc, current, | 
 | 				  capable(CAP_IPC_LOCK)); | 
 | 	mmap_write_unlock(mm); | 
 |  | 
 | 	return ret; | 
 | } | 
 | EXPORT_SYMBOL_GPL(account_locked_vm); | 
 |  | 
 | unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr, | 
 | 	unsigned long len, unsigned long prot, | 
 | 	unsigned long flag, unsigned long pgoff) | 
 | { | 
 | 	unsigned long ret; | 
 | 	struct mm_struct *mm = current->mm; | 
 | 	unsigned long populate; | 
 | 	LIST_HEAD(uf); | 
 |  | 
 | 	ret = security_mmap_file(file, prot, flag); | 
 | 	if (!ret) { | 
 | 		if (mmap_write_lock_killable(mm)) | 
 | 			return -EINTR; | 
 | 		ret = do_mmap(file, addr, len, prot, flag, pgoff, &populate, | 
 | 			      &uf); | 
 | 		mmap_write_unlock(mm); | 
 | 		userfaultfd_unmap_complete(mm, &uf); | 
 | 		if (populate) | 
 | 			mm_populate(ret, populate); | 
 | 	} | 
 | 	trace_android_vh_check_mmap_file(file, prot, flag, ret); | 
 | 	return ret; | 
 | } | 
 |  | 
 | unsigned long vm_mmap(struct file *file, unsigned long addr, | 
 | 	unsigned long len, unsigned long prot, | 
 | 	unsigned long flag, unsigned long offset) | 
 | { | 
 | 	if (unlikely(offset + PAGE_ALIGN(len) < offset)) | 
 | 		return -EINVAL; | 
 | 	if (unlikely(offset_in_page(offset))) | 
 | 		return -EINVAL; | 
 |  | 
 | 	return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); | 
 | } | 
 | EXPORT_SYMBOL(vm_mmap); | 
 |  | 
 | /** | 
 |  * kvmalloc_node - attempt to allocate physically contiguous memory, but upon | 
 |  * failure, fall back to non-contiguous (vmalloc) allocation. | 
 |  * @size: size of the request. | 
 |  * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL. | 
 |  * @node: numa node to allocate from | 
 |  * | 
 |  * Uses kmalloc to get the memory but if the allocation fails then falls back | 
 |  * to the vmalloc allocator. Use kvfree for freeing the memory. | 
 |  * | 
 |  * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported. | 
 |  * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is | 
 |  * preferable to the vmalloc fallback, due to visible performance drawbacks. | 
 |  * | 
 |  * Please note that any use of gfp flags outside of GFP_KERNEL is careful to not | 
 |  * fall back to vmalloc. | 
 |  * | 
 |  * Return: pointer to the allocated memory of %NULL in case of failure | 
 |  */ | 
 | void *kvmalloc_node(size_t size, gfp_t flags, int node) | 
 | { | 
 | 	gfp_t kmalloc_flags = flags; | 
 | 	void *ret; | 
 |  | 
 | 	/* | 
 | 	 * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables) | 
 | 	 * so the given set of flags has to be compatible. | 
 | 	 */ | 
 | 	if ((flags & GFP_KERNEL) != GFP_KERNEL) | 
 | 		return kmalloc_node(size, flags, node); | 
 |  | 
 | 	/* | 
 | 	 * We want to attempt a large physically contiguous block first because | 
 | 	 * it is less likely to fragment multiple larger blocks and therefore | 
 | 	 * contribute to a long term fragmentation less than vmalloc fallback. | 
 | 	 * However make sure that larger requests are not too disruptive - no | 
 | 	 * OOM killer and no allocation failure warnings as we have a fallback. | 
 | 	 */ | 
 | 	if (size > PAGE_SIZE) { | 
 | 		kmalloc_flags |= __GFP_NOWARN; | 
 |  | 
 | 		if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL)) | 
 | 			kmalloc_flags |= __GFP_NORETRY; | 
 | 	} | 
 |  | 
 | 	ret = kmalloc_node(size, kmalloc_flags, node); | 
 |  | 
 | 	/* | 
 | 	 * It doesn't really make sense to fallback to vmalloc for sub page | 
 | 	 * requests | 
 | 	 */ | 
 | 	if (ret || size <= PAGE_SIZE) | 
 | 		return ret; | 
 |  | 
 | 	/* Don't even allow crazy sizes */ | 
 | 	if (unlikely(size > INT_MAX)) { | 
 | 		WARN_ON_ONCE(!(flags & __GFP_NOWARN)); | 
 | 		return NULL; | 
 | 	} | 
 |  | 
 | 	return __vmalloc_node(size, 1, flags, node, | 
 | 			__builtin_return_address(0)); | 
 | } | 
 | EXPORT_SYMBOL(kvmalloc_node); | 
 |  | 
 | /** | 
 |  * kvfree() - Free memory. | 
 |  * @addr: Pointer to allocated memory. | 
 |  * | 
 |  * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc(). | 
 |  * It is slightly more efficient to use kfree() or vfree() if you are certain | 
 |  * that you know which one to use. | 
 |  * | 
 |  * Context: Either preemptible task context or not-NMI interrupt. | 
 |  */ | 
 | void kvfree(const void *addr) | 
 | { | 
 | 	if (is_vmalloc_addr(addr)) | 
 | 		vfree(addr); | 
 | 	else | 
 | 		kfree(addr); | 
 | } | 
 | EXPORT_SYMBOL(kvfree); | 
 |  | 
 | /** | 
 |  * kvfree_sensitive - Free a data object containing sensitive information. | 
 |  * @addr: address of the data object to be freed. | 
 |  * @len: length of the data object. | 
 |  * | 
 |  * Use the special memzero_explicit() function to clear the content of a | 
 |  * kvmalloc'ed object containing sensitive data to make sure that the | 
 |  * compiler won't optimize out the data clearing. | 
 |  */ | 
 | void kvfree_sensitive(const void *addr, size_t len) | 
 | { | 
 | 	if (likely(!ZERO_OR_NULL_PTR(addr))) { | 
 | 		memzero_explicit((void *)addr, len); | 
 | 		kvfree(addr); | 
 | 	} | 
 | } | 
 | EXPORT_SYMBOL(kvfree_sensitive); | 
 |  | 
 | void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags) | 
 | { | 
 | 	void *newp; | 
 |  | 
 | 	if (oldsize >= newsize) | 
 | 		return (void *)p; | 
 | 	newp = kvmalloc(newsize, flags); | 
 | 	if (!newp) | 
 | 		return NULL; | 
 | 	memcpy(newp, p, oldsize); | 
 | 	kvfree(p); | 
 | 	return newp; | 
 | } | 
 | EXPORT_SYMBOL(kvrealloc); | 
 |  | 
 | static inline void *__page_rmapping(struct page *page) | 
 | { | 
 | 	unsigned long mapping; | 
 |  | 
 | 	mapping = (unsigned long)page->mapping; | 
 | 	mapping &= ~PAGE_MAPPING_FLAGS; | 
 |  | 
 | 	return (void *)mapping; | 
 | } | 
 |  | 
 | /* Neutral page->mapping pointer to address_space or anon_vma or other */ | 
 | void *page_rmapping(struct page *page) | 
 | { | 
 | 	page = compound_head(page); | 
 | 	return __page_rmapping(page); | 
 | } | 
 |  | 
 | /* | 
 |  * Return true if this page is mapped into pagetables. | 
 |  * For compound page it returns true if any subpage of compound page is mapped. | 
 |  */ | 
 | bool page_mapped(struct page *page) | 
 | { | 
 | 	int i; | 
 |  | 
 | 	if (likely(!PageCompound(page))) | 
 | 		return atomic_read(&page->_mapcount) >= 0; | 
 | 	page = compound_head(page); | 
 | 	if (atomic_read(compound_mapcount_ptr(page)) >= 0) | 
 | 		return true; | 
 | 	if (PageHuge(page)) | 
 | 		return false; | 
 | 	for (i = 0; i < compound_nr(page); i++) { | 
 | 		if (atomic_read(&page[i]._mapcount) >= 0) | 
 | 			return true; | 
 | 	} | 
 | 	return false; | 
 | } | 
 | EXPORT_SYMBOL(page_mapped); | 
 |  | 
 | struct anon_vma *page_anon_vma(struct page *page) | 
 | { | 
 | 	unsigned long mapping; | 
 |  | 
 | 	page = compound_head(page); | 
 | 	mapping = (unsigned long)page->mapping; | 
 | 	if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) | 
 | 		return NULL; | 
 | 	return __page_rmapping(page); | 
 | } | 
 |  | 
 | struct address_space *page_mapping(struct page *page) | 
 | { | 
 | 	struct address_space *mapping; | 
 |  | 
 | 	page = compound_head(page); | 
 |  | 
 | 	/* This happens if someone calls flush_dcache_page on slab page */ | 
 | 	if (unlikely(PageSlab(page))) | 
 | 		return NULL; | 
 |  | 
 | 	if (unlikely(PageSwapCache(page))) { | 
 | 		swp_entry_t entry; | 
 |  | 
 | 		entry.val = page_private(page); | 
 | 		return swap_address_space(entry); | 
 | 	} | 
 |  | 
 | 	mapping = page->mapping; | 
 | 	if ((unsigned long)mapping & PAGE_MAPPING_ANON) | 
 | 		return NULL; | 
 |  | 
 | 	return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS); | 
 | } | 
 | EXPORT_SYMBOL(page_mapping); | 
 |  | 
 | /* | 
 |  * For file cache pages, return the address_space, otherwise return NULL | 
 |  */ | 
 | struct address_space *page_mapping_file(struct page *page) | 
 | { | 
 | 	if (unlikely(PageSwapCache(page))) | 
 | 		return NULL; | 
 | 	return page_mapping(page); | 
 | } | 
 |  | 
 | /* Slow path of page_mapcount() for compound pages */ | 
 | int __page_mapcount(struct page *page) | 
 | { | 
 | 	int ret; | 
 |  | 
 | 	ret = atomic_read(&page->_mapcount) + 1; | 
 | 	/* | 
 | 	 * For file THP page->_mapcount contains total number of mapping | 
 | 	 * of the page: no need to look into compound_mapcount. | 
 | 	 */ | 
 | 	if (!PageAnon(page) && !PageHuge(page)) | 
 | 		return ret; | 
 | 	page = compound_head(page); | 
 | 	ret += atomic_read(compound_mapcount_ptr(page)) + 1; | 
 | 	if (PageDoubleMap(page)) | 
 | 		ret--; | 
 | 	return ret; | 
 | } | 
 | EXPORT_SYMBOL_GPL(__page_mapcount); | 
 |  | 
 | int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; | 
 | int sysctl_overcommit_ratio __read_mostly = 50; | 
 | unsigned long sysctl_overcommit_kbytes __read_mostly; | 
 | int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; | 
 | unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */ | 
 | unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */ | 
 |  | 
 | int overcommit_ratio_handler(struct ctl_table *table, int write, void *buffer, | 
 | 		size_t *lenp, loff_t *ppos) | 
 | { | 
 | 	int ret; | 
 |  | 
 | 	ret = proc_dointvec(table, write, buffer, lenp, ppos); | 
 | 	if (ret == 0 && write) | 
 | 		sysctl_overcommit_kbytes = 0; | 
 | 	return ret; | 
 | } | 
 |  | 
 | static void sync_overcommit_as(struct work_struct *dummy) | 
 | { | 
 | 	percpu_counter_sync(&vm_committed_as); | 
 | } | 
 |  | 
 | int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer, | 
 | 		size_t *lenp, loff_t *ppos) | 
 | { | 
 | 	struct ctl_table t; | 
 | 	int new_policy = -1; | 
 | 	int ret; | 
 |  | 
 | 	/* | 
 | 	 * The deviation of sync_overcommit_as could be big with loose policy | 
 | 	 * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to | 
 | 	 * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply | 
 | 	 * with the strict "NEVER", and to avoid possible race condtion (even | 
 | 	 * though user usually won't too frequently do the switching to policy | 
 | 	 * OVERCOMMIT_NEVER), the switch is done in the following order: | 
 | 	 *	1. changing the batch | 
 | 	 *	2. sync percpu count on each CPU | 
 | 	 *	3. switch the policy | 
 | 	 */ | 
 | 	if (write) { | 
 | 		t = *table; | 
 | 		t.data = &new_policy; | 
 | 		ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); | 
 | 		if (ret || new_policy == -1) | 
 | 			return ret; | 
 |  | 
 | 		mm_compute_batch(new_policy); | 
 | 		if (new_policy == OVERCOMMIT_NEVER) | 
 | 			schedule_on_each_cpu(sync_overcommit_as); | 
 | 		sysctl_overcommit_memory = new_policy; | 
 | 	} else { | 
 | 		ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); | 
 | 	} | 
 |  | 
 | 	return ret; | 
 | } | 
 |  | 
 | int overcommit_kbytes_handler(struct ctl_table *table, int write, void *buffer, | 
 | 		size_t *lenp, loff_t *ppos) | 
 | { | 
 | 	int ret; | 
 |  | 
 | 	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); | 
 | 	if (ret == 0 && write) | 
 | 		sysctl_overcommit_ratio = 0; | 
 | 	return ret; | 
 | } | 
 |  | 
 | /* | 
 |  * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used | 
 |  */ | 
 | unsigned long vm_commit_limit(void) | 
 | { | 
 | 	unsigned long allowed; | 
 |  | 
 | 	if (sysctl_overcommit_kbytes) | 
 | 		allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10); | 
 | 	else | 
 | 		allowed = ((totalram_pages() - hugetlb_total_pages()) | 
 | 			   * sysctl_overcommit_ratio / 100); | 
 | 	allowed += total_swap_pages; | 
 |  | 
 | 	return allowed; | 
 | } | 
 |  | 
 | /* | 
 |  * Make sure vm_committed_as in one cacheline and not cacheline shared with | 
 |  * other variables. It can be updated by several CPUs frequently. | 
 |  */ | 
 | struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp; | 
 |  | 
 | /* | 
 |  * The global memory commitment made in the system can be a metric | 
 |  * that can be used to drive ballooning decisions when Linux is hosted | 
 |  * as a guest. On Hyper-V, the host implements a policy engine for dynamically | 
 |  * balancing memory across competing virtual machines that are hosted. | 
 |  * Several metrics drive this policy engine including the guest reported | 
 |  * memory commitment. | 
 |  * | 
 |  * The time cost of this is very low for small platforms, and for big | 
 |  * platform like a 2S/36C/72T Skylake server, in worst case where | 
 |  * vm_committed_as's spinlock is under severe contention, the time cost | 
 |  * could be about 30~40 microseconds. | 
 |  */ | 
 | unsigned long vm_memory_committed(void) | 
 | { | 
 | 	return percpu_counter_sum_positive(&vm_committed_as); | 
 | } | 
 | EXPORT_SYMBOL_GPL(vm_memory_committed); | 
 |  | 
 | /* | 
 |  * Check that a process has enough memory to allocate a new virtual | 
 |  * mapping. 0 means there is enough memory for the allocation to | 
 |  * succeed and -ENOMEM implies there is not. | 
 |  * | 
 |  * We currently support three overcommit policies, which are set via the | 
 |  * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting.rst | 
 |  * | 
 |  * Strict overcommit modes added 2002 Feb 26 by Alan Cox. | 
 |  * Additional code 2002 Jul 20 by Robert Love. | 
 |  * | 
 |  * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise. | 
 |  * | 
 |  * Note this is a helper function intended to be used by LSMs which | 
 |  * wish to use this logic. | 
 |  */ | 
 | int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) | 
 | { | 
 | 	long allowed; | 
 |  | 
 | 	vm_acct_memory(pages); | 
 |  | 
 | 	/* | 
 | 	 * Sometimes we want to use more memory than we have | 
 | 	 */ | 
 | 	if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS) | 
 | 		return 0; | 
 |  | 
 | 	if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { | 
 | 		if (pages > totalram_pages() + total_swap_pages) | 
 | 			goto error; | 
 | 		return 0; | 
 | 	} | 
 |  | 
 | 	allowed = vm_commit_limit(); | 
 | 	/* | 
 | 	 * Reserve some for root | 
 | 	 */ | 
 | 	if (!cap_sys_admin) | 
 | 		allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); | 
 |  | 
 | 	/* | 
 | 	 * Don't let a single process grow so big a user can't recover | 
 | 	 */ | 
 | 	if (mm) { | 
 | 		long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10); | 
 |  | 
 | 		allowed -= min_t(long, mm->total_vm / 32, reserve); | 
 | 	} | 
 |  | 
 | 	if (percpu_counter_read_positive(&vm_committed_as) < allowed) | 
 | 		return 0; | 
 | error: | 
 | 	vm_unacct_memory(pages); | 
 |  | 
 | 	return -ENOMEM; | 
 | } | 
 |  | 
 | /** | 
 |  * get_cmdline() - copy the cmdline value to a buffer. | 
 |  * @task:     the task whose cmdline value to copy. | 
 |  * @buffer:   the buffer to copy to. | 
 |  * @buflen:   the length of the buffer. Larger cmdline values are truncated | 
 |  *            to this length. | 
 |  * | 
 |  * Return: the size of the cmdline field copied. Note that the copy does | 
 |  * not guarantee an ending NULL byte. | 
 |  */ | 
 | int get_cmdline(struct task_struct *task, char *buffer, int buflen) | 
 | { | 
 | 	int res = 0; | 
 | 	unsigned int len; | 
 | 	struct mm_struct *mm = get_task_mm(task); | 
 | 	unsigned long arg_start, arg_end, env_start, env_end; | 
 | 	if (!mm) | 
 | 		goto out; | 
 | 	if (!mm->arg_end) | 
 | 		goto out_mm;	/* Shh! No looking before we're done */ | 
 |  | 
 | 	spin_lock(&mm->arg_lock); | 
 | 	arg_start = mm->arg_start; | 
 | 	arg_end = mm->arg_end; | 
 | 	env_start = mm->env_start; | 
 | 	env_end = mm->env_end; | 
 | 	spin_unlock(&mm->arg_lock); | 
 |  | 
 | 	len = arg_end - arg_start; | 
 |  | 
 | 	if (len > buflen) | 
 | 		len = buflen; | 
 |  | 
 | 	res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE); | 
 |  | 
 | 	/* | 
 | 	 * If the nul at the end of args has been overwritten, then | 
 | 	 * assume application is using setproctitle(3). | 
 | 	 */ | 
 | 	if (res > 0 && buffer[res-1] != '\0' && len < buflen) { | 
 | 		len = strnlen(buffer, res); | 
 | 		if (len < res) { | 
 | 			res = len; | 
 | 		} else { | 
 | 			len = env_end - env_start; | 
 | 			if (len > buflen - res) | 
 | 				len = buflen - res; | 
 | 			res += access_process_vm(task, env_start, | 
 | 						 buffer+res, len, | 
 | 						 FOLL_FORCE); | 
 | 			res = strnlen(buffer, res); | 
 | 		} | 
 | 	} | 
 | out_mm: | 
 | 	mmput(mm); | 
 | out: | 
 | 	return res; | 
 | } | 
 |  | 
 | int __weak memcmp_pages(struct page *page1, struct page *page2) | 
 | { | 
 | 	char *addr1, *addr2; | 
 | 	int ret; | 
 |  | 
 | 	addr1 = kmap_atomic(page1); | 
 | 	addr2 = kmap_atomic(page2); | 
 | 	ret = memcmp(addr1, addr2, PAGE_SIZE); | 
 | 	kunmap_atomic(addr2); | 
 | 	kunmap_atomic(addr1); | 
 | 	return ret; | 
 | } |