summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndy Fleming <afleming@freescale.com>2011-10-13 04:33:54 +0000
committerDavid S. Miller <davem@davemloft.net>2011-10-19 15:59:45 -0400
commit3d153a7c8b23031df35e61377c0600a6c96a8b0b (patch)
treef5c7f622331728ba12f40c2ebb66666ce3d924fd
parent1e5c22cde3b85737921d3ec6ecf2c356e5b64ea7 (diff)
net: Allow skb_recycle_check to be done in stages
skb_recycle_check resets the skb if it's eligible for recycling. However, there are times when a driver might want to optionally manipulate the skb data with the skb before resetting the skb, but after it has determined eligibility. We do this by splitting the eligibility check from the skb reset, creating two inline functions to accomplish that task. Signed-off-by: Andy Fleming <afleming@freescale.com> Acked-by: David Daney <david.daney@cavium.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/skbuff.h21
-rw-r--r--net/core/skbuff.c51
2 files changed, 47 insertions, 25 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 6fcbbbd12ceb..77ddf2de712f 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -550,6 +550,7 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
return __alloc_skb(size, priority, 1, NUMA_NO_NODE);
}
+extern void skb_recycle(struct sk_buff *skb);
extern bool skb_recycle_check(struct sk_buff *skb, int skb_size);
extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
@@ -2484,5 +2485,25 @@ static inline void skb_checksum_none_assert(struct sk_buff *skb)
bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
+static inline bool skb_is_recycleable(struct sk_buff *skb, int skb_size)
+{
+ if (irqs_disabled())
+ return false;
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)
+ return false;
+
+ if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
+ return false;
+
+ skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
+ if (skb_end_pointer(skb) - skb->head < skb_size)
+ return false;
+
+ if (skb_shared(skb) || skb_cloned(skb))
+ return false;
+
+ return true;
+}
#endif /* __KERNEL__ */
#endif /* _LINUX_SKBUFF_H */
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index ce357d986251..e27104039a39 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -485,6 +485,30 @@ void consume_skb(struct sk_buff *skb)
EXPORT_SYMBOL(consume_skb);
/**
+ * skb_recycle - clean up an skb for reuse
+ * @skb: buffer
+ *
+ * Recycles the skb to be reused as a receive buffer. This
+ * function does any necessary reference count dropping, and
+ * cleans up the skbuff as if it just came from __alloc_skb().
+ */
+void skb_recycle(struct sk_buff *skb)
+{
+ struct skb_shared_info *shinfo;
+
+ skb_release_head_state(skb);
+
+ shinfo = skb_shinfo(skb);
+ memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
+ atomic_set(&shinfo->dataref, 1);
+
+ memset(skb, 0, offsetof(struct sk_buff, tail));
+ skb->data = skb->head + NET_SKB_PAD;
+ skb_reset_tail_pointer(skb);
+}
+EXPORT_SYMBOL(skb_recycle);
+
+/**
* skb_recycle_check - check if skb can be reused for receive
* @skb: buffer
* @skb_size: minimum receive buffer size
@@ -498,33 +522,10 @@ EXPORT_SYMBOL(consume_skb);
*/
bool skb_recycle_check(struct sk_buff *skb, int skb_size)
{
- struct skb_shared_info *shinfo;
-
- if (irqs_disabled())
- return false;
-
- if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)
- return false;
-
- if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
- return false;
-
- skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
- if (skb_end_pointer(skb) - skb->head < skb_size)
- return false;
-
- if (skb_shared(skb) || skb_cloned(skb))
+ if (!skb_is_recycleable(skb, skb_size))
return false;
- skb_release_head_state(skb);
-
- shinfo = skb_shinfo(skb);
- memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
- atomic_set(&shinfo->dataref, 1);
-
- memset(skb, 0, offsetof(struct sk_buff, tail));
- skb->data = skb->head + NET_SKB_PAD;
- skb_reset_tail_pointer(skb);
+ skb_recycle(skb);
return true;
}