diff options
Diffstat (limited to 'include/linux/memblock.h')
| -rw-r--r-- | include/linux/memblock.h | 166 | 
1 files changed, 122 insertions, 44 deletions
diff --git a/include/linux/memblock.h b/include/linux/memblock.h index e6b843e16e8..a6bb1023514 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -2,8 +2,6 @@  #define _LINUX_MEMBLOCK_H  #ifdef __KERNEL__ -#define MEMBLOCK_ERROR	0 -  #ifdef CONFIG_HAVE_MEMBLOCK  /*   * Logical memory blocks. @@ -19,81 +17,161 @@  #include <linux/init.h>  #include <linux/mm.h> -#include <asm/memblock.h> -  #define INIT_MEMBLOCK_REGIONS	128  struct memblock_region {  	phys_addr_t base;  	phys_addr_t size; +#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP +	int nid; +#endif  };  struct memblock_type {  	unsigned long cnt;	/* number of regions */  	unsigned long max;	/* size of the allocated array */ +	phys_addr_t total_size;	/* size of all regions */  	struct memblock_region *regions;  };  struct memblock {  	phys_addr_t current_limit; -	phys_addr_t memory_size;	/* Updated by memblock_analyze() */  	struct memblock_type memory;  	struct memblock_type reserved;  };  extern struct memblock memblock;  extern int memblock_debug; -extern int memblock_can_resize;  #define memblock_dbg(fmt, ...) \  	if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) -u64 memblock_find_in_range(u64 start, u64 end, u64 size, u64 align); +phys_addr_t memblock_find_in_range_node(phys_addr_t start, phys_addr_t end, +				phys_addr_t size, phys_addr_t align, int nid); +phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end, +				   phys_addr_t size, phys_addr_t align);  int memblock_free_reserved_regions(void);  int memblock_reserve_reserved_regions(void); -extern void memblock_init(void); -extern void memblock_analyze(void); -extern long memblock_add(phys_addr_t base, phys_addr_t size); -extern long memblock_remove(phys_addr_t base, phys_addr_t size); -extern long memblock_free(phys_addr_t base, phys_addr_t size); -extern long memblock_reserve(phys_addr_t base, phys_addr_t size); +void memblock_allow_resize(void); +int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid); +int memblock_add(phys_addr_t base, phys_addr_t size); +int memblock_remove(phys_addr_t base, phys_addr_t size); +int memblock_free(phys_addr_t base, phys_addr_t size); +int memblock_reserve(phys_addr_t base, phys_addr_t size); + +#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP +void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, +			  unsigned long *out_end_pfn, int *out_nid); + +/** + * for_each_mem_pfn_range - early memory pfn range iterator + * @i: an integer used as loop variable + * @nid: node selector, %MAX_NUMNODES for all nodes + * @p_start: ptr to ulong for start pfn of the range, can be %NULL + * @p_end: ptr to ulong for end pfn of the range, can be %NULL + * @p_nid: ptr to int for nid of the range, can be %NULL + * + * Walks over configured memory ranges.  Available after early_node_map is + * populated. + */ +#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid)		\ +	for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \ +	     i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid)) +#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ + +void __next_free_mem_range(u64 *idx, int nid, phys_addr_t *out_start, +			   phys_addr_t *out_end, int *out_nid); + +/** + * for_each_free_mem_range - iterate through free memblock areas + * @i: u64 used as loop variable + * @nid: node selector, %MAX_NUMNODES for all nodes + * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL + * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL + * @p_nid: ptr to int for nid of the range, can be %NULL + * + * Walks over free (memory && !reserved) areas of memblock.  Available as + * soon as memblock is initialized. + */ +#define for_each_free_mem_range(i, nid, p_start, p_end, p_nid)		\ +	for (i = 0,							\ +	     __next_free_mem_range(&i, nid, p_start, p_end, p_nid);	\ +	     i != (u64)ULLONG_MAX;					\ +	     __next_free_mem_range(&i, nid, p_start, p_end, p_nid)) + +void __next_free_mem_range_rev(u64 *idx, int nid, phys_addr_t *out_start, +			       phys_addr_t *out_end, int *out_nid); -/* The numa aware allocator is only available if - * CONFIG_ARCH_POPULATES_NODE_MAP is set +/** + * for_each_free_mem_range_reverse - rev-iterate through free memblock areas + * @i: u64 used as loop variable + * @nid: node selector, %MAX_NUMNODES for all nodes + * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL + * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL + * @p_nid: ptr to int for nid of the range, can be %NULL + * + * Walks over free (memory && !reserved) areas of memblock in reverse + * order.  Available as soon as memblock is initialized.   */ -extern phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, -					int nid); -extern phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, -					    int nid); +#define for_each_free_mem_range_reverse(i, nid, p_start, p_end, p_nid)	\ +	for (i = (u64)ULLONG_MAX,					\ +	     __next_free_mem_range_rev(&i, nid, p_start, p_end, p_nid);	\ +	     i != (u64)ULLONG_MAX;					\ +	     __next_free_mem_range_rev(&i, nid, p_start, p_end, p_nid)) -extern phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align); +#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP +int memblock_set_node(phys_addr_t base, phys_addr_t size, int nid); + +static inline void memblock_set_region_node(struct memblock_region *r, int nid) +{ +	r->nid = nid; +} + +static inline int memblock_get_region_node(const struct memblock_region *r) +{ +	return r->nid; +} +#else +static inline void memblock_set_region_node(struct memblock_region *r, int nid) +{ +} + +static inline int memblock_get_region_node(const struct memblock_region *r) +{ +	return 0; +} +#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ + +phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid); +phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid); + +phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align);  /* Flags for memblock_alloc_base() amd __memblock_alloc_base() */  #define MEMBLOCK_ALLOC_ANYWHERE	(~(phys_addr_t)0)  #define MEMBLOCK_ALLOC_ACCESSIBLE	0 -extern phys_addr_t memblock_alloc_base(phys_addr_t size, -					 phys_addr_t align, -					 phys_addr_t max_addr); -extern phys_addr_t __memblock_alloc_base(phys_addr_t size, -					   phys_addr_t align, -					   phys_addr_t max_addr); -extern phys_addr_t memblock_phys_mem_size(void); -extern phys_addr_t memblock_start_of_DRAM(void); -extern phys_addr_t memblock_end_of_DRAM(void); -extern void memblock_enforce_memory_limit(phys_addr_t memory_limit); -extern int memblock_is_memory(phys_addr_t addr); -extern int memblock_is_region_memory(phys_addr_t base, phys_addr_t size); -extern int memblock_is_reserved(phys_addr_t addr); -extern int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size); +phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align, +				phys_addr_t max_addr); +phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align, +				  phys_addr_t max_addr); +phys_addr_t memblock_phys_mem_size(void); +phys_addr_t memblock_start_of_DRAM(void); +phys_addr_t memblock_end_of_DRAM(void); +void memblock_enforce_memory_limit(phys_addr_t memory_limit); +int memblock_is_memory(phys_addr_t addr); +int memblock_is_region_memory(phys_addr_t base, phys_addr_t size); +int memblock_is_reserved(phys_addr_t addr); +int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size); -extern void memblock_dump_all(void); +extern void __memblock_dump_all(void); -/* Provided by the architecture */ -extern phys_addr_t memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid); -extern int memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1, -				   phys_addr_t addr2, phys_addr_t size2); +static inline void memblock_dump_all(void) +{ +	if (memblock_debug) +		__memblock_dump_all(); +}  /**   * memblock_set_current_limit - Set the current allocation limit to allow @@ -101,7 +179,7 @@ extern int memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1,   *                         accessible during boot   * @limit: New limit value (physical address)   */ -extern void memblock_set_current_limit(phys_addr_t limit); +void memblock_set_current_limit(phys_addr_t limit);  /* @@ -154,9 +232,9 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo  	     region++) -#ifdef ARCH_DISCARD_MEMBLOCK -#define __init_memblock __init -#define __initdata_memblock __initdata +#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK +#define __init_memblock __meminit +#define __initdata_memblock __meminitdata  #else  #define __init_memblock  #define __initdata_memblock @@ -165,7 +243,7 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo  #else  static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)  { -	return MEMBLOCK_ERROR; +	return 0;  }  #endif /* CONFIG_HAVE_MEMBLOCK */  |