diff options
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/vmalloc.c | 50 | 
1 files changed, 29 insertions, 21 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index f6754663632..284346ee0e9 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1530,25 +1530,12 @@ fail:  	return NULL;  } -void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) -{ -	void *addr = __vmalloc_area_node(area, gfp_mask, prot, -1, -					 __builtin_return_address(0)); - -	/* -	 * A ref_count = 3 is needed because the vm_struct and vmap_area -	 * structures allocated in the __get_vm_area_node() function contain -	 * references to the virtual address of the vmalloc'ed block. -	 */ -	kmemleak_alloc(addr, area->size - PAGE_SIZE, 3, gfp_mask); - -	return addr; -} -  /** - *	__vmalloc_node  -  allocate virtually contiguous memory + *	__vmalloc_node_range  -  allocate virtually contiguous memory   *	@size:		allocation size   *	@align:		desired alignment + *	@start:		vm area range start + *	@end:		vm area range end   *	@gfp_mask:	flags for the page level allocator   *	@prot:		protection mask for the allocated pages   *	@node:		node to use for allocation or -1 @@ -1558,9 +1545,9 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)   *	allocator with @gfp_mask flags.  Map them into contiguous   *	kernel virtual space, using a pagetable protection of @prot.   */ -static void *__vmalloc_node(unsigned long size, unsigned long align, -			    gfp_t gfp_mask, pgprot_t prot, -			    int node, void *caller) +void *__vmalloc_node_range(unsigned long size, unsigned long align, +			unsigned long start, unsigned long end, gfp_t gfp_mask, +			pgprot_t prot, int node, void *caller)  {  	struct vm_struct *area;  	void *addr; @@ -1570,8 +1557,8 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,  	if (!size || (size >> PAGE_SHIFT) > totalram_pages)  		return NULL; -	area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START, -				  VMALLOC_END, node, gfp_mask, caller); +	area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node, +				  gfp_mask, caller);  	if (!area)  		return NULL; @@ -1588,6 +1575,27 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,  	return addr;  } +/** + *	__vmalloc_node  -  allocate virtually contiguous memory + *	@size:		allocation size + *	@align:		desired alignment + *	@gfp_mask:	flags for the page level allocator + *	@prot:		protection mask for the allocated pages + *	@node:		node to use for allocation or -1 + *	@caller:	caller's return address + * + *	Allocate enough pages to cover @size from the page level + *	allocator with @gfp_mask flags.  Map them into contiguous + *	kernel virtual space, using a pagetable protection of @prot. + */ +static void *__vmalloc_node(unsigned long size, unsigned long align, +			    gfp_t gfp_mask, pgprot_t prot, +			    int node, void *caller) +{ +	return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, +				gfp_mask, prot, node, caller); +} +  void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)  {  	return __vmalloc_node(size, 1, gfp_mask, prot, -1,  |