diff options
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/Kconfig | 8 | ||||
| -rw-r--r-- | mm/fremap.c | 5 | ||||
| -rw-r--r-- | mm/hugetlb.c | 8 | ||||
| -rw-r--r-- | mm/ksm.c | 2 | ||||
| -rw-r--r-- | mm/memcontrol.c | 8 | ||||
| -rw-r--r-- | mm/memory_hotplug.c | 8 | ||||
| -rw-r--r-- | mm/mempolicy.c | 4 | ||||
| -rw-r--r-- | mm/process_vm_access.c | 8 | 
8 files changed, 30 insertions, 21 deletions
diff --git a/mm/Kconfig b/mm/Kconfig index ae55c1e04d1..3bea74f1ccf 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -286,8 +286,12 @@ config NR_QUICK  	default "1"  config VIRT_TO_BUS -	def_bool y -	depends on HAVE_VIRT_TO_BUS +	bool +	help +	  An architecture should select this if it implements the +	  deprecated interface virt_to_bus().  All new architectures +	  should probably not select this. +  config MMU_NOTIFIER  	bool diff --git a/mm/fremap.c b/mm/fremap.c index 0cd4c11488e..4723ac8d2fc 100644 --- a/mm/fremap.c +++ b/mm/fremap.c @@ -129,7 +129,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,  	struct vm_area_struct *vma;  	int err = -EINVAL;  	int has_write_lock = 0; -	vm_flags_t vm_flags; +	vm_flags_t vm_flags = 0;  	if (prot)  		return err; @@ -254,7 +254,8 @@ get_write_lock:  	 */  out: -	vm_flags = vma->vm_flags; +	if (vma) +		vm_flags = vma->vm_flags;  	if (likely(!has_write_lock))  		up_read(&mm->mmap_sem);  	else diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 0a0be33bb19..ca9a7c6d7e9 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2124,8 +2124,12 @@ int hugetlb_report_node_meminfo(int nid, char *buf)  /* Return the number pages of memory we physically have, in PAGE_SIZE units. */  unsigned long hugetlb_total_pages(void)  { -	struct hstate *h = &default_hstate; -	return h->nr_huge_pages * pages_per_huge_page(h); +	struct hstate *h; +	unsigned long nr_total_pages = 0; + +	for_each_hstate(h) +		nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h); +	return nr_total_pages;  }  static int hugetlb_acct_memory(struct hstate *h, long delta) @@ -489,7 +489,7 @@ out:		page = NULL;   */  static inline int get_kpfn_nid(unsigned long kpfn)  { -	return ksm_merge_across_nodes ? 0 : pfn_to_nid(kpfn); +	return ksm_merge_across_nodes ? 0 : NUMA(pfn_to_nid(kpfn));  }  static void remove_node_from_stable_tree(struct stable_node *stable_node) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 53b8201b31e..2b552224f5c 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3012,6 +3012,8 @@ void memcg_update_array_size(int num)  		memcg_limited_groups_array_size = memcg_caches_array_size(num);  } +static void kmem_cache_destroy_work_func(struct work_struct *w); +  int memcg_update_cache_size(struct kmem_cache *s, int num_groups)  {  	struct memcg_cache_params *cur_params = s->memcg_params; @@ -3031,6 +3033,8 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups)  			return -ENOMEM;  		} +		INIT_WORK(&s->memcg_params->destroy, +				kmem_cache_destroy_work_func);  		s->memcg_params->is_root_cache = true;  		/* @@ -3078,6 +3082,8 @@ int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,  	if (!s->memcg_params)  		return -ENOMEM; +	INIT_WORK(&s->memcg_params->destroy, +			kmem_cache_destroy_work_func);  	if (memcg) {  		s->memcg_params->memcg = memcg;  		s->memcg_params->root_cache = root_cache; @@ -3358,8 +3364,6 @@ static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg)  	list_for_each_entry(params, &memcg->memcg_slab_caches, list) {  		cachep = memcg_params_to_cache(params);  		cachep->memcg_params->dead = true; -		INIT_WORK(&cachep->memcg_params->destroy, -				  kmem_cache_destroy_work_func);  		schedule_work(&cachep->memcg_params->destroy);  	}  	mutex_unlock(&memcg->slab_caches_mutex); diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index b81a367b9f3..ee376576081 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1779,7 +1779,11 @@ void try_offline_node(int nid)  	for (i = 0; i < MAX_NR_ZONES; i++) {  		struct zone *zone = pgdat->node_zones + i; -		if (zone->wait_table) +		/* +		 * wait_table may be allocated from boot memory, +		 * here only free if it's allocated by vmalloc. +		 */ +		if (is_vmalloc_addr(zone->wait_table))  			vfree(zone->wait_table);  	} @@ -1801,7 +1805,7 @@ int __ref remove_memory(int nid, u64 start, u64 size)  	int retry = 1;  	start_pfn = PFN_DOWN(start); -	end_pfn = start_pfn + PFN_DOWN(size); +	end_pfn = PFN_UP(start + size - 1);  	/*  	 * When CONFIG_MEMCG is on, one memory block may be used by other diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 31d26637b65..74310017296 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2390,9 +2390,9 @@ restart:  				*mpol_new = *n->policy;  				atomic_set(&mpol_new->refcnt, 1); -				sp_node_init(n_new, n->end, end, mpol_new); -				sp_insert(sp, n_new); +				sp_node_init(n_new, end, n->end, mpol_new);  				n->end = start; +				sp_insert(sp, n_new);  				n_new = NULL;  				mpol_new = NULL;  				break; diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c index 926b4664974..fd26d043350 100644 --- a/mm/process_vm_access.c +++ b/mm/process_vm_access.c @@ -429,12 +429,6 @@ compat_process_vm_rw(compat_pid_t pid,  	if (flags != 0)  		return -EINVAL; -	if (!access_ok(VERIFY_READ, lvec, liovcnt * sizeof(*lvec))) -		goto out; - -	if (!access_ok(VERIFY_READ, rvec, riovcnt * sizeof(*rvec))) -		goto out; -  	if (vm_write)  		rc = compat_rw_copy_check_uvector(WRITE, lvec, liovcnt,  						  UIO_FASTIOV, iovstack_l, @@ -459,8 +453,6 @@ free_iovecs:  		kfree(iov_r);  	if (iov_l != iovstack_l)  		kfree(iov_l); - -out:  	return rc;  }  |