diff options
Diffstat (limited to 'arch/mips/mm/mmap.c')
| -rw-r--r-- | arch/mips/mm/mmap.c | 122 | 
1 files changed, 122 insertions, 0 deletions
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c new file mode 100644 index 00000000000..ae3c20a9556 --- /dev/null +++ b/arch/mips/mm/mmap.c @@ -0,0 +1,122 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License.  See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2011 Wind River Systems, + *   written by Ralf Baechle <ralf@linux-mips.org> + */ +#include <linux/errno.h> +#include <linux/mm.h> +#include <linux/mman.h> +#include <linux/module.h> +#include <linux/random.h> +#include <linux/sched.h> + +unsigned long shm_align_mask = PAGE_SIZE - 1;	/* Sane caches */ + +EXPORT_SYMBOL(shm_align_mask); + +#define COLOUR_ALIGN(addr,pgoff)				\ +	((((addr) + shm_align_mask) & ~shm_align_mask) +	\ +	 (((pgoff) << PAGE_SHIFT) & shm_align_mask)) + +unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, +	unsigned long len, unsigned long pgoff, unsigned long flags) +{ +	struct vm_area_struct * vmm; +	int do_color_align; + +	if (len > TASK_SIZE) +		return -ENOMEM; + +	if (flags & MAP_FIXED) { +		/* Even MAP_FIXED mappings must reside within TASK_SIZE.  */ +		if (TASK_SIZE - len < addr) +			return -EINVAL; + +		/* +		 * We do not accept a shared mapping if it would violate +		 * cache aliasing constraints. +		 */ +		if ((flags & MAP_SHARED) && +		    ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) +			return -EINVAL; +		return addr; +	} + +	do_color_align = 0; +	if (filp || (flags & MAP_SHARED)) +		do_color_align = 1; +	if (addr) { +		if (do_color_align) +			addr = COLOUR_ALIGN(addr, pgoff); +		else +			addr = PAGE_ALIGN(addr); +		vmm = find_vma(current->mm, addr); +		if (TASK_SIZE - len >= addr && +		    (!vmm || addr + len <= vmm->vm_start)) +			return addr; +	} +	addr = current->mm->mmap_base; +	if (do_color_align) +		addr = COLOUR_ALIGN(addr, pgoff); +	else +		addr = PAGE_ALIGN(addr); + +	for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { +		/* At this point:  (!vmm || addr < vmm->vm_end). */ +		if (TASK_SIZE - len < addr) +			return -ENOMEM; +		if (!vmm || addr + len <= vmm->vm_start) +			return addr; +		addr = vmm->vm_end; +		if (do_color_align) +			addr = COLOUR_ALIGN(addr, pgoff); +	} +} + +void arch_pick_mmap_layout(struct mm_struct *mm) +{ +	unsigned long random_factor = 0UL; + +	if (current->flags & PF_RANDOMIZE) { +		random_factor = get_random_int(); +		random_factor = random_factor << PAGE_SHIFT; +		if (TASK_IS_32BIT_ADDR) +			random_factor &= 0xfffffful; +		else +			random_factor &= 0xffffffful; +	} + +	mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; +	mm->get_unmapped_area = arch_get_unmapped_area; +	mm->unmap_area = arch_unmap_area; +} + +static inline unsigned long brk_rnd(void) +{ +	unsigned long rnd = get_random_int(); + +	rnd = rnd << PAGE_SHIFT; +	/* 8MB for 32bit, 256MB for 64bit */ +	if (TASK_IS_32BIT_ADDR) +		rnd = rnd & 0x7ffffful; +	else +		rnd = rnd & 0xffffffful; + +	return rnd; +} + +unsigned long arch_randomize_brk(struct mm_struct *mm) +{ +	unsigned long base = mm->brk; +	unsigned long ret; + +	ret = PAGE_ALIGN(base + brk_rnd()); + +	if (ret < mm->brk) +		return mm->brk; + +	return ret; +}  |