diff options
Diffstat (limited to 'mm/memcontrol.c')
| -rw-r--r-- | mm/memcontrol.c | 103 | 
1 files changed, 98 insertions, 5 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 6aff93c98ac..94da8ee9e2c 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -50,6 +50,8 @@  #include <linux/cpu.h>  #include <linux/oom.h>  #include "internal.h" +#include <net/sock.h> +#include <net/tcp_memcontrol.h>  #include <asm/uaccess.h> @@ -286,6 +288,10 @@ struct mem_cgroup {  	 */  	struct mem_cgroup_stat_cpu nocpu_base;  	spinlock_t pcp_counter_lock; + +#ifdef CONFIG_INET +	struct tcp_memcontrol tcp_mem; +#endif  };  /* Stuffs for move charges at task migration. */ @@ -365,7 +371,58 @@ enum charge_type {  static void mem_cgroup_get(struct mem_cgroup *memcg);  static void mem_cgroup_put(struct mem_cgroup *memcg); -static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); + +/* Writing them here to avoid exposing memcg's inner layout */ +#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM +#ifdef CONFIG_INET +#include <net/sock.h> +#include <net/ip.h> + +static bool mem_cgroup_is_root(struct mem_cgroup *memcg); +void sock_update_memcg(struct sock *sk) +{ +	/* A socket spends its whole life in the same cgroup */ +	if (sk->sk_cgrp) { +		WARN_ON(1); +		return; +	} +	if (static_branch(&memcg_socket_limit_enabled)) { +		struct mem_cgroup *memcg; + +		BUG_ON(!sk->sk_prot->proto_cgroup); + +		rcu_read_lock(); +		memcg = mem_cgroup_from_task(current); +		if (!mem_cgroup_is_root(memcg)) { +			mem_cgroup_get(memcg); +			sk->sk_cgrp = sk->sk_prot->proto_cgroup(memcg); +		} +		rcu_read_unlock(); +	} +} +EXPORT_SYMBOL(sock_update_memcg); + +void sock_release_memcg(struct sock *sk) +{ +	if (static_branch(&memcg_socket_limit_enabled) && sk->sk_cgrp) { +		struct mem_cgroup *memcg; +		WARN_ON(!sk->sk_cgrp->memcg); +		memcg = sk->sk_cgrp->memcg; +		mem_cgroup_put(memcg); +	} +} + +struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg) +{ +	if (!memcg || mem_cgroup_is_root(memcg)) +		return NULL; + +	return &memcg->tcp_mem.cg_proto; +} +EXPORT_SYMBOL(tcp_proto_cgroup); +#endif /* CONFIG_INET */ +#endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */ +  static void drain_all_stock_async(struct mem_cgroup *memcg);  static struct mem_cgroup_per_zone * @@ -745,7 +802,7 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)  	preempt_enable();  } -static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont) +struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)  {  	return container_of(cgroup_subsys_state(cont,  				mem_cgroup_subsys_id), struct mem_cgroup, @@ -4612,6 +4669,36 @@ static int mem_control_numa_stat_open(struct inode *unused, struct file *file)  }  #endif /* CONFIG_NUMA */ +#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM +static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss) +{ +	/* +	 * Part of this would be better living in a separate allocation +	 * function, leaving us with just the cgroup tree population work. +	 * We, however, depend on state such as network's proto_list that +	 * is only initialized after cgroup creation. I found the less +	 * cumbersome way to deal with it to defer it all to populate time +	 */ +	return mem_cgroup_sockets_init(cont, ss); +}; + +static void kmem_cgroup_destroy(struct cgroup_subsys *ss, +				struct cgroup *cont) +{ +	mem_cgroup_sockets_destroy(cont, ss); +} +#else +static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss) +{ +	return 0; +} + +static void kmem_cgroup_destroy(struct cgroup_subsys *ss, +				struct cgroup *cont) +{ +} +#endif +  static struct cftype mem_cgroup_files[] = {  	{  		.name = "usage_in_bytes", @@ -4843,12 +4930,13 @@ static void mem_cgroup_put(struct mem_cgroup *memcg)  /*   * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.   */ -static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) +struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)  {  	if (!memcg->res.parent)  		return NULL;  	return mem_cgroup_from_res_counter(memcg->res.parent, res);  } +EXPORT_SYMBOL(parent_mem_cgroup);  #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP  static void __init enable_swap_cgroup(void) @@ -4907,9 +4995,9 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)  		int cpu;  		enable_swap_cgroup();  		parent = NULL; -		root_mem_cgroup = memcg;  		if (mem_cgroup_soft_limit_tree_init())  			goto free_out; +		root_mem_cgroup = memcg;  		for_each_possible_cpu(cpu) {  			struct memcg_stock_pcp *stock =  						&per_cpu(memcg_stock, cpu); @@ -4948,7 +5036,6 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)  	return &memcg->css;  free_out:  	__mem_cgroup_free(memcg); -	root_mem_cgroup = NULL;  	return ERR_PTR(error);  } @@ -4965,6 +5052,8 @@ static void mem_cgroup_destroy(struct cgroup_subsys *ss,  {  	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); +	kmem_cgroup_destroy(ss, cont); +  	mem_cgroup_put(memcg);  } @@ -4978,6 +5067,10 @@ static int mem_cgroup_populate(struct cgroup_subsys *ss,  	if (!ret)  		ret = register_memsw_files(cont, ss); + +	if (!ret) +		ret = register_kmem_files(cont, ss); +  	return ret;  }  |