summaryrefslogtreecommitdiff
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c3
-rw-r--r--net/core/filter.c6
-rw-r--r--net/core/netprio_cgroup.c30
-rw-r--r--net/core/pktgen.c4
-rw-r--r--net/core/skbuff.c168
-rw-r--r--net/core/sock.c10
6 files changed, 152 insertions, 69 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 33684b6e95e..cd0981977f5 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3602,7 +3602,7 @@ gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
}
EXPORT_SYMBOL(napi_frags_finish);
-struct sk_buff *napi_frags_skb(struct napi_struct *napi)
+static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
{
struct sk_buff *skb = napi->skb;
struct ethhdr *eth;
@@ -3637,7 +3637,6 @@ struct sk_buff *napi_frags_skb(struct napi_struct *napi)
out:
return skb;
}
-EXPORT_SYMBOL(napi_frags_skb);
gro_result_t napi_gro_frags(struct napi_struct *napi)
{
diff --git a/net/core/filter.c b/net/core/filter.c
index 47a5f055e7f..a3eddb515d1 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -38,6 +38,7 @@
#include <linux/filter.h>
#include <linux/reciprocal_div.h>
#include <linux/ratelimit.h>
+#include <linux/seccomp.h>
/* No hurry in this branch
*
@@ -355,6 +356,11 @@ load_b:
A = 0;
continue;
}
+#ifdef CONFIG_SECCOMP_FILTER
+ case BPF_S_ANC_SECCOMP_LD_W:
+ A = seccomp_bpf_load(fentry->k);
+ continue;
+#endif
default:
WARN_RATELIMIT(1, "Unknown code:%u jt:%u tf:%u k:%u\n",
fentry->code, fentry->jt,
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index 09eda68b676..5b8aa2fae48 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -25,21 +25,6 @@
#include <net/sock.h>
#include <net/netprio_cgroup.h>
-static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp);
-static void cgrp_destroy(struct cgroup *cgrp);
-static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp);
-
-struct cgroup_subsys net_prio_subsys = {
- .name = "net_prio",
- .create = cgrp_create,
- .destroy = cgrp_destroy,
- .populate = cgrp_populate,
-#ifdef CONFIG_NETPRIO_CGROUP
- .subsys_id = net_prio_subsys_id,
-#endif
- .module = THIS_MODULE
-};
-
#define PRIOIDX_SZ 128
static unsigned long prioidx_map[PRIOIDX_SZ];
@@ -259,12 +244,19 @@ static struct cftype ss_files[] = {
.read_map = read_priomap,
.write_string = write_priomap,
},
+ { } /* terminate */
};
-static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
-{
- return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files));
-}
+struct cgroup_subsys net_prio_subsys = {
+ .name = "net_prio",
+ .create = cgrp_create,
+ .destroy = cgrp_destroy,
+#ifdef CONFIG_NETPRIO_CGROUP
+ .subsys_id = net_prio_subsys_id,
+#endif
+ .base_cftypes = ss_files,
+ .module = THIS_MODULE
+};
static int netprio_device_event(struct notifier_block *unused,
unsigned long event, void *ptr)
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 30eb768cd67..cce9e53528b 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3749,13 +3749,13 @@ static void __exit pg_cleanup(void)
{
struct pktgen_thread *t;
struct list_head *q, *n;
- struct list_head list;
+ LIST_HEAD(list);
/* Stop all interfaces & threads */
pktgen_exiting = true;
mutex_lock(&pktgen_thread_lock);
- list_splice(&list, &pktgen_threads);
+ list_splice_init(&pktgen_threads, &list);
mutex_unlock(&pktgen_thread_lock);
list_for_each_safe(q, n, &list) {
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 7645df1bada..016694d6248 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -300,6 +300,40 @@ struct netdev_alloc_cache {
static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
/**
+ * netdev_alloc_frag - allocate a page fragment
+ * @fragsz: fragment size
+ *
+ * Allocates a frag from a page for receive buffer.
+ * Uses GFP_ATOMIC allocations.
+ */
+void *netdev_alloc_frag(unsigned int fragsz)
+{
+ struct netdev_alloc_cache *nc;
+ void *data = NULL;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ nc = &__get_cpu_var(netdev_alloc_cache);
+ if (unlikely(!nc->page)) {
+refill:
+ nc->page = alloc_page(GFP_ATOMIC | __GFP_COLD);
+ nc->offset = 0;
+ }
+ if (likely(nc->page)) {
+ if (nc->offset + fragsz > PAGE_SIZE) {
+ put_page(nc->page);
+ goto refill;
+ }
+ data = page_address(nc->page) + nc->offset;
+ nc->offset += fragsz;
+ get_page(nc->page);
+ }
+ local_irq_restore(flags);
+ return data;
+}
+EXPORT_SYMBOL(netdev_alloc_frag);
+
+/**
* __netdev_alloc_skb - allocate an skbuff for rx on a specific device
* @dev: network device to receive on
* @length: length to allocate
@@ -313,32 +347,20 @@ static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
* %NULL is returned if there is no free memory.
*/
struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
- unsigned int length, gfp_t gfp_mask)
+ unsigned int length, gfp_t gfp_mask)
{
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
if (fragsz <= PAGE_SIZE && !(gfp_mask & __GFP_WAIT)) {
- struct netdev_alloc_cache *nc;
- void *data = NULL;
+ void *data = netdev_alloc_frag(fragsz);
- nc = &get_cpu_var(netdev_alloc_cache);
- if (!nc->page) {
-refill: nc->page = alloc_page(gfp_mask);
- nc->offset = 0;
+ if (likely(data)) {
+ skb = build_skb(data, fragsz);
+ if (unlikely(!skb))
+ put_page(virt_to_head_page(data));
}
- if (likely(nc->page)) {
- if (nc->offset + fragsz > PAGE_SIZE) {
- put_page(nc->page);
- goto refill;
- }
- data = page_address(nc->page) + nc->offset;
- nc->offset += fragsz;
- get_page(nc->page);
- }
- put_cpu_var(netdev_alloc_cache);
- skb = data ? build_skb(data, fragsz) : NULL;
} else {
skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, NUMA_NO_NODE);
}
@@ -360,28 +382,6 @@ void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
}
EXPORT_SYMBOL(skb_add_rx_frag);
-/**
- * dev_alloc_skb - allocate an skbuff for receiving
- * @length: length to allocate
- *
- * Allocate a new &sk_buff and assign it a usage count of one. The
- * buffer has unspecified headroom built in. Users should allocate
- * the headroom they think they need without accounting for the
- * built in space. The built in space is used for optimisations.
- *
- * %NULL is returned if there is no free memory. Although this function
- * allocates memory it can be called from an interrupt.
- */
-struct sk_buff *dev_alloc_skb(unsigned int length)
-{
- /*
- * There is more code here than it seems:
- * __dev_alloc_skb is an inline
- */
- return __dev_alloc_skb(length, GFP_ATOMIC);
-}
-EXPORT_SYMBOL(dev_alloc_skb);
-
static void skb_drop_list(struct sk_buff **listp)
{
struct sk_buff *list = *listp;
@@ -3346,3 +3346,89 @@ void __skb_warn_lro_forwarding(const struct sk_buff *skb)
skb->dev->name);
}
EXPORT_SYMBOL(__skb_warn_lro_forwarding);
+
+void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
+{
+ if (head_stolen)
+ kmem_cache_free(skbuff_head_cache, skb);
+ else
+ __kfree_skb(skb);
+}
+EXPORT_SYMBOL(kfree_skb_partial);
+
+/**
+ * skb_try_coalesce - try to merge skb to prior one
+ * @to: prior buffer
+ * @from: buffer to add
+ * @fragstolen: pointer to boolean
+ *
+ */
+bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
+ bool *fragstolen, int *delta_truesize)
+{
+ int i, delta, len = from->len;
+
+ *fragstolen = false;
+
+ if (skb_cloned(to))
+ return false;
+
+ if (len <= skb_tailroom(to)) {
+ BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
+ *delta_truesize = 0;
+ return true;
+ }
+
+ if (skb_has_frag_list(to) || skb_has_frag_list(from))
+ return false;
+
+ if (skb_headlen(from) != 0) {
+ struct page *page;
+ unsigned int offset;
+
+ if (skb_shinfo(to)->nr_frags +
+ skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
+ return false;
+
+ if (skb_head_is_locked(from))
+ return false;
+
+ delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
+
+ page = virt_to_head_page(from->head);
+ offset = from->data - (unsigned char *)page_address(page);
+
+ skb_fill_page_desc(to, skb_shinfo(to)->nr_frags,
+ page, offset, skb_headlen(from));
+ *fragstolen = true;
+ } else {
+ if (skb_shinfo(to)->nr_frags +
+ skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS)
+ return false;
+
+ delta = from->truesize -
+ SKB_TRUESIZE(skb_end_pointer(from) - from->head);
+ }
+
+ WARN_ON_ONCE(delta < len);
+
+ memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags,
+ skb_shinfo(from)->frags,
+ skb_shinfo(from)->nr_frags * sizeof(skb_frag_t));
+ skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags;
+
+ if (!skb_cloned(from))
+ skb_shinfo(from)->nr_frags = 0;
+
+ /* if the skb is cloned this does nothing since we set nr_frags to 0 */
+ for (i = 0; i < skb_shinfo(from)->nr_frags; i++)
+ skb_frag_ref(from, i);
+
+ to->truesize += delta;
+ to->len += len;
+ to->data_len += len;
+
+ *delta_truesize = delta;
+ return true;
+}
+EXPORT_SYMBOL(skb_try_coalesce);
diff --git a/net/core/sock.c b/net/core/sock.c
index 5efcd6307fa..f372d9bf497 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -143,7 +143,7 @@ static DEFINE_MUTEX(proto_list_mutex);
static LIST_HEAD(proto_list);
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
-int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss)
+int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
{
struct proto *proto;
int ret = 0;
@@ -151,7 +151,7 @@ int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss)
mutex_lock(&proto_list_mutex);
list_for_each_entry(proto, &proto_list, node) {
if (proto->init_cgroup) {
- ret = proto->init_cgroup(cgrp, ss);
+ ret = proto->init_cgroup(memcg, ss);
if (ret)
goto out;
}
@@ -162,19 +162,19 @@ int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss)
out:
list_for_each_entry_continue_reverse(proto, &proto_list, node)
if (proto->destroy_cgroup)
- proto->destroy_cgroup(cgrp);
+ proto->destroy_cgroup(memcg);
mutex_unlock(&proto_list_mutex);
return ret;
}
-void mem_cgroup_sockets_destroy(struct cgroup *cgrp)
+void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg)
{
struct proto *proto;
mutex_lock(&proto_list_mutex);
list_for_each_entry_reverse(proto, &proto_list, node)
if (proto->destroy_cgroup)
- proto->destroy_cgroup(cgrp);
+ proto->destroy_cgroup(memcg);
mutex_unlock(&proto_list_mutex);
}
#endif