summaryrefslogtreecommitdiff
path: root/include/linux/skbuff.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/skbuff.h')
-rw-r--r--include/linux/skbuff.h175
1 files changed, 173 insertions, 2 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 909923717830..2725f4e5a9bf 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -146,8 +146,14 @@ struct skb_shared_info {
unsigned short gso_segs;
unsigned short gso_type;
__be32 ip6_frag_id;
+#ifdef CONFIG_HAS_DMA
+ unsigned int num_dma_maps;
+#endif
struct sk_buff *frag_list;
skb_frag_t frags[MAX_SKB_FRAGS];
+#ifdef CONFIG_HAS_DMA
+ dma_addr_t dma_maps[MAX_SKB_FRAGS + 1];
+#endif
};
/* We divide dataref into two halves. The higher 16 bits hold references
@@ -353,6 +359,14 @@ struct sk_buff {
#include <asm/system.h>
+#ifdef CONFIG_HAS_DMA
+#include <linux/dma-mapping.h>
+extern int skb_dma_map(struct device *dev, struct sk_buff *skb,
+ enum dma_data_direction dir);
+extern void skb_dma_unmap(struct device *dev, struct sk_buff *skb,
+ enum dma_data_direction dir);
+#endif
+
extern void kfree_skb(struct sk_buff *skb);
extern void __kfree_skb(struct sk_buff *skb);
extern struct sk_buff *__alloc_skb(unsigned int size,
@@ -369,6 +383,8 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
return __alloc_skb(size, priority, 1, -1);
}
+extern int skb_recycle_check(struct sk_buff *skb, int skb_size);
+
extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
extern struct sk_buff *skb_clone(struct sk_buff *skb,
gfp_t priority);
@@ -459,6 +475,37 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
}
/**
+ * skb_queue_is_last - check if skb is the last entry in the queue
+ * @list: queue head
+ * @skb: buffer
+ *
+ * Returns true if @skb is the last buffer on the list.
+ */
+static inline bool skb_queue_is_last(const struct sk_buff_head *list,
+ const struct sk_buff *skb)
+{
+ return (skb->next == (struct sk_buff *) list);
+}
+
+/**
+ * skb_queue_next - return the next packet in the queue
+ * @list: queue head
+ * @skb: current buffer
+ *
+ * Return the next packet in @list after @skb. It is only valid to
+ * call this if skb_queue_is_last() evaluates to false.
+ */
+static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
+ const struct sk_buff *skb)
+{
+ /* This BUG_ON may seem severe, but if we just return then we
+ * are going to dereference garbage.
+ */
+ BUG_ON(skb_queue_is_last(list, skb));
+ return skb->next;
+}
+
+/**
* skb_get - reference buffer
* @skb: buffer to reference
*
@@ -646,6 +693,22 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
return list_->qlen;
}
+/**
+ * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
+ * @list: queue to initialize
+ *
+ * This initializes only the list and queue length aspects of
+ * an sk_buff_head object. This allows to initialize the list
+ * aspects of an sk_buff_head without reinitializing things like
+ * the spinlock. It can also be used for on-stack sk_buff_head
+ * objects where the spinlock is known to not be used.
+ */
+static inline void __skb_queue_head_init(struct sk_buff_head *list)
+{
+ list->prev = list->next = (struct sk_buff *)list;
+ list->qlen = 0;
+}
+
/*
* This function creates a split out lock class for each invocation;
* this is needed for now since a whole lot of users of the skb-queue
@@ -657,8 +720,7 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
static inline void skb_queue_head_init(struct sk_buff_head *list)
{
spin_lock_init(&list->lock);
- list->prev = list->next = (struct sk_buff *)list;
- list->qlen = 0;
+ __skb_queue_head_init(list);
}
static inline void skb_queue_head_init_class(struct sk_buff_head *list,
@@ -685,6 +747,83 @@ static inline void __skb_insert(struct sk_buff *newsk,
list->qlen++;
}
+static inline void __skb_queue_splice(const struct sk_buff_head *list,
+ struct sk_buff *prev,
+ struct sk_buff *next)
+{
+ struct sk_buff *first = list->next;
+ struct sk_buff *last = list->prev;
+
+ first->prev = prev;
+ prev->next = first;
+
+ last->next = next;
+ next->prev = last;
+}
+
+/**
+ * skb_queue_splice - join two skb lists, this is designed for stacks
+ * @list: the new list to add
+ * @head: the place to add it in the first list
+ */
+static inline void skb_queue_splice(const struct sk_buff_head *list,
+ struct sk_buff_head *head)
+{
+ if (!skb_queue_empty(list)) {
+ __skb_queue_splice(list, (struct sk_buff *) head, head->next);
+ head->qlen += list->qlen;
+ }
+}
+
+/**
+ * skb_queue_splice - join two skb lists and reinitialise the emptied list
+ * @list: the new list to add
+ * @head: the place to add it in the first list
+ *
+ * The list at @list is reinitialised
+ */
+static inline void skb_queue_splice_init(struct sk_buff_head *list,
+ struct sk_buff_head *head)
+{
+ if (!skb_queue_empty(list)) {
+ __skb_queue_splice(list, (struct sk_buff *) head, head->next);
+ head->qlen += list->qlen;
+ __skb_queue_head_init(list);
+ }
+}
+
+/**
+ * skb_queue_splice_tail - join two skb lists, each list being a queue
+ * @list: the new list to add
+ * @head: the place to add it in the first list
+ */
+static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
+ struct sk_buff_head *head)
+{
+ if (!skb_queue_empty(list)) {
+ __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
+ head->qlen += list->qlen;
+ }
+}
+
+/**
+ * skb_queue_splice_tail - join two skb lists and reinitialise the emptied list
+ * @list: the new list to add
+ * @head: the place to add it in the first list
+ *
+ * Each of the lists is a queue.
+ * The list at @list is reinitialised
+ */
+static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
+ struct sk_buff_head *head)
+{
+ if (!skb_queue_empty(list)) {
+ __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
+ head->qlen += list->qlen;
+ __skb_queue_head_init(list);
+ }
+}
+
/**
* __skb_queue_after - queue a buffer at the list head
* @list: list to use
@@ -829,6 +968,9 @@ static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
skb_shinfo(skb)->nr_frags = i + 1;
}
+extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
+ int off, int size);
+
#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_shinfo(skb)->frag_list)
#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
@@ -1243,6 +1385,26 @@ static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
}
+extern struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask);
+
+/**
+ * netdev_alloc_page - allocate a page for ps-rx on a specific device
+ * @dev: network device to receive on
+ *
+ * Allocate a new page node local to the specified device.
+ *
+ * %NULL is returned if there is no free memory.
+ */
+static inline struct page *netdev_alloc_page(struct net_device *dev)
+{
+ return __netdev_alloc_page(dev, GFP_ATOMIC);
+}
+
+static inline void netdev_free_page(struct net_device *dev, struct page *page)
+{
+ __free_page(page);
+}
+
/**
* skb_clone_writable - is the header of a clone writable
* @skb: buffer to check
@@ -1434,6 +1596,15 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
skb != (struct sk_buff *)(queue); \
skb = tmp, tmp = skb->next)
+#define skb_queue_walk_from(queue, skb) \
+ for (; prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \
+ skb = skb->next)
+
+#define skb_queue_walk_from_safe(queue, skb, tmp) \
+ for (tmp = skb->next; \
+ skb != (struct sk_buff *)(queue); \
+ skb = tmp, tmp = skb->next)
+
#define skb_queue_reverse_walk(queue, skb) \
for (skb = (queue)->prev; \
prefetch(skb->prev), (skb != (struct sk_buff *)(queue)); \