1. 程式人生 > >skb管理函數之alloc_skb、dev_alloc_skb、kfree_skb、dev_kfree_skb、consume_skb

skb管理函數之alloc_skb、dev_alloc_skb、kfree_skb、dev_kfree_skb、consume_skb

save ren rect str make define sed his offsetof

alloc_skb--分配skb

dev_alloc_skb--分配skb,通常被設備驅動用在中斷上下文中,它是alloc_skb的封裝函數,因為在中斷處理函數中被調用,因此要求原子操作(GFP_ATOMIC)

kfree_skb--減少skb引用,為0則釋放,用於出錯丟包時釋放skb使用;

dev_kfree_skb==consume_skb--減少skb引用,為0則釋放,成功狀態下釋放skb使用;

1 static inline struct sk_buff *alloc_skb(unsigned int size,
2                     gfp_t priority)
3 { 4 return __alloc_skb(size, priority, 0, NUMA_NO_NODE); 5 }

  1 /**
  2  *    __alloc_skb    -    allocate a network buffer
  3  *    @size: size to allocate
  4  *    @gfp_mask: allocation mask
  5  *    @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache
  6  *        instead of head cache and allocate a cloned (child) skb.
7 * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for 8 * allocations in case the data is required for writeback 9 * @node: numa node to allocate memory on 10 * 11 * Allocate a new &sk_buff. The returned buffer has no headroom and a 12 * tail room of at least size bytes. The object has a reference count
13 * of one. The return is the buffer. On a failure the return is %NULL. 14 * 15 * Buffers may only be allocated from interrupts using a @gfp_mask of 16 * %GFP_ATOMIC. 17 */ 18 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 19 int flags, int node) 20 { 21 struct kmem_cache *cache; 22 struct skb_shared_info *shinfo; 23 struct sk_buff *skb; 24 u8 *data; 25 bool pfmemalloc; 26 27 /* 得到分配使用的高速緩存 */ 28 cache = (flags & SKB_ALLOC_FCLONE) 29 ? skbuff_fclone_cache : skbuff_head_cache; 30 31 if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) 32 gfp_mask |= __GFP_MEMALLOC; 33 34 /* Get the HEAD */ 35 /* 分配skb */ 36 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); 37 if (!skb) 38 goto out; 39 prefetchw(skb); 40 41 /* We do our best to align skb_shared_info on a separate cache 42 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives 43 * aligned memory blocks, unless SLUB/SLAB debug is enabled. 44 * Both skb->head and skb_shared_info are cache line aligned. 45 */ 46 /* 數據對齊 */ 47 size = SKB_DATA_ALIGN(size); 48 /* 對齊後的數據加上skb_shared_info對齊後的大小 */ 49 size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 50 51 //分配數據區 52 data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc); 53 if (!data) 54 goto nodata; 55 /* kmalloc(size) might give us more room than requested. 56 * Put skb_shared_info exactly at the end of allocated zone, 57 * to allow max possible filling before reallocation. 58 */ 59 /* 除了skb_shared_info以外的數據大小 */ 60 size = SKB_WITH_OVERHEAD(ksize(data)); 61 prefetchw(data + size); 62 63 /* 64 * Only clear those fields we need to clear, not those that we will 65 * actually initialise below. Hence, don‘t put any more fields after 66 * the tail pointer in struct sk_buff! 67 */ 68 memset(skb, 0, offsetof(struct sk_buff, tail)); 69 /* Account for allocated memory : skb + skb->head */ 70 /* 總長度= skb大小+ 數據大小+ skb_shared_info大小 */ 71 skb->truesize = SKB_TRUESIZE(size); 72 /* PFMEMALLOC分配標記 */ 73 skb->pfmemalloc = pfmemalloc; 74 /* 設置引用計數為1 */ 75 atomic_set(&skb->users, 1); 76 /*head data tail均指向數據區頭部*/ 77 skb->head = data; 78 skb->data = data; 79 skb_reset_tail_pointer(skb); 80 81 /* end指向數據區尾部 */ 82 skb->end = skb->tail + size; 83 /* 初始化默認各層header偏移值 */ 84 skb->mac_header = (typeof(skb->mac_header))~0U; 85 skb->transport_header = (typeof(skb->transport_header))~0U; 86 87 /* make sure we initialize shinfo sequentially */ 88 /* 從end開始的區域為skb_shared_info */ 89 shinfo = skb_shinfo(skb); 90 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 91 /* 設置引用計數為1 */ 92 atomic_set(&shinfo->dataref, 1); 93 kmemcheck_annotate_variable(shinfo->destructor_arg); 94 95 /* 如果有克隆標記 */ 96 if (flags & SKB_ALLOC_FCLONE) { 97 struct sk_buff_fclones *fclones; 98 99 /* 得到clone結構 */ 100 fclones = container_of(skb, struct sk_buff_fclones, skb1); 101 102 kmemcheck_annotate_bitfield(&fclones->skb2, flags1); 103 104 /* 設置克隆標記 */ 105 skb->fclone = SKB_FCLONE_ORIG; 106 107 /* 設置引用為1 */ 108 atomic_set(&fclones->fclone_ref, 1); 109 110 /* 設置skb2的克隆標記 */ 111 fclones->skb2.fclone = SKB_FCLONE_CLONE; 112 } 113 out: 114 return skb; 115 nodata: 116 kmem_cache_free(cache, skb); 117 skb = NULL; 118 goto out; 119 }

1 /* legacy helper around netdev_alloc_skb() */
2 static inline struct sk_buff *dev_alloc_skb(unsigned int length)
3 {
4     return netdev_alloc_skb(NULL, length);
5 }

 1 /**
 2  *    netdev_alloc_skb - allocate an skbuff for rx on a specific device
 3  *    @dev: network device to receive on
 4  *    @length: length to allocate
 5  *
 6  *    Allocate a new &sk_buff and assign it a usage count of one. The
 7  *    buffer has unspecified headroom built in. Users should allocate
 8  *    the headroom they think they need without accounting for the
 9  *    built in space. The built in space is used for optimisations.
10  *
11  *    %NULL is returned if there is no free memory. Although this function
12  *    allocates memory it can be called from an interrupt.
13  */
14 static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
15                            unsigned int length)
16 {
17     return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
18 }

 1 /**
 2  *    __netdev_alloc_skb - allocate an skbuff for rx on a specific device
 3  *    @dev: network device to receive on
 4  *    @len: length to allocate
 5  *    @gfp_mask: get_free_pages mask, passed to alloc_skb
 6  *
 7  *    Allocate a new &sk_buff and assign it a usage count of one. The
 8  *    buffer has NET_SKB_PAD headroom built in. Users should allocate
 9  *    the headroom they think they need without accounting for the
10  *    built in space. The built in space is used for optimisations.
11  *
12  *    %NULL is returned if there is no free memory.
13  */
14 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
15                    gfp_t gfp_mask)
16 {
17     struct page_frag_cache *nc;
18     unsigned long flags;
19     struct sk_buff *skb;
20     bool pfmemalloc;
21     void *data;
22 
23     len += NET_SKB_PAD;
24 
25     /* 
26         分配長度+ skb_shared_info長度> 一頁
27         有__GFP_DIRECT_RECLAIM | GFP_DMA 標記
28     */
29     if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
30         (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
31         /* 通過__alloc_skb分配內存*/
32         skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
33         if (!skb)
34             goto skb_fail;
35 
36         /* 分配成功 */
37         goto skb_success;
38     }
39 
40     /* 分配長度+ skb_shared_info長度*/
41     len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
42     /* 對整個長度進行對齊 */
43     len = SKB_DATA_ALIGN(len);
44 
45     if (sk_memalloc_socks())
46         gfp_mask |= __GFP_MEMALLOC;
47 
48     /* 保存中斷 */
49     local_irq_save(flags);
50 
51     nc = this_cpu_ptr(&netdev_alloc_cache);
52     /* 分配空間 */
53     data = page_frag_alloc(nc, len, gfp_mask);
54     pfmemalloc = nc->pfmemalloc;
55 
56     /* 恢復中斷 */
57     local_irq_restore(flags);
58 
59     if (unlikely(!data))
60         return NULL;
61 
62     /* 構建skb */
63     skb = __build_skb(data, len);
64     if (unlikely(!skb)) {
65         skb_free_frag(data);
66         return NULL;
67     }
68 
69     /* use OR instead of assignment to avoid clearing of bits in mask */
70     /* 設置PFMEMALLOC標記 */
71     if (pfmemalloc)
72         skb->pfmemalloc = 1;
73 
74     //打內存分配標記
75     skb->head_frag = 1;
76 
77 skb_success:
78     /* 保留空間 */
79     skb_reserve(skb, NET_SKB_PAD);
80     /* 設置輸入設備 */
81     skb->dev = dev;
82 
83 skb_fail:
84     return skb;
85 }

---free系列---

 1 /**
 2  *    kfree_skb - free an sk_buff
 3  *    @skb: buffer to free
 4  *
 5  *    Drop a reference to the buffer and free it if the usage count has
 6  *    hit zero.
 7  */
 8 /*
 9     釋放skb
10 */
11 void kfree_skb(struct sk_buff *skb)
12 {
13     if (unlikely(!skb))
14         return;
15     /* 引用為1,可直接釋放 */
16     if (likely(atomic_read(&skb->users) == 1))
17         smp_rmb();
18     /*
19         對引用減1,並且判斷,如果結果不為0
20         說明還有引用,返回
21     */
22     else if (likely(!atomic_dec_and_test(&skb->users)))
23         return;
24     trace_kfree_skb(skb, __builtin_return_address(0));
25 
26     //真正的skb釋放
27     __kfree_skb(skb);
28 }

 1 /**
 2  *    __kfree_skb - private function
 3  *    @skb: buffer
 4  *
 5  *    Free an sk_buff. Release anything attached to the buffer.
 6  *    Clean the state. This is an internal helper function. Users should
 7  *    always call kfree_skb
 8  */
 9 /* 釋放skb */
10 void __kfree_skb(struct sk_buff *skb)
11 {
12     /* 釋放skb附帶的所有數據 */
13     skb_release_all(skb);
14     /* 釋放skb */
15     kfree_skbmem(skb);
16 }

1 #define dev_kfree_skb(a)    consume_skb(a)

 1 /**
 2  *    consume_skb - free an skbuff
 3  *    @skb: buffer to free
 4  *
 5  *    Drop a ref to the buffer and free it if the usage count has hit zero
 6  *    Functions identically to kfree_skb, but kfree_skb assumes that the frame
 7  *    is being dropped after a failure and notes that
 8  */
 9 /* 釋放skb,與kfree_skb區別是,kfree_skb用於失敗時丟包釋放 */
10 void consume_skb(struct sk_buff *skb)
11 {
12     if (unlikely(!skb))
13         return;
14     if (likely(atomic_read(&skb->users) == 1))
15         smp_rmb();
16     else if (likely(!atomic_dec_and_test(&skb->users)))
17         return;
18     trace_consume_skb(skb);
19     __kfree_skb(skb);
20 }

skb管理函數之alloc_skb、dev_alloc_skb、kfree_skb、dev_kfree_skb、consume_skb