FD.io VPP  v21.06-1-gbb7418cf9
Vector Packet Processing
output.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2018 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #include <vlib/vlib.h>
19 #include <vlib/unix/unix.h>
20 #include <vlib/pci/pci.h>
21 #include <vppinfra/ring.h>
22 
23 #include <vnet/ethernet/ethernet.h>
24 #include <vnet/ip/ip4_packet.h>
25 #include <vnet/ip/ip6_packet.h>
26 #include <vnet/udp/udp_packet.h>
27 #include <vnet/tcp/tcp_packet.h>
28 
29 #include <vnet/devices/devices.h>
30 
31 #include <avf/avf.h>
32 
35 {
36  return d->qword[1] & 0x0f;
37 }
38 
40 {
46 };
47 
49 {
50  ip6_address_t src;
51  ip6_address_t dst;
54 };
55 
58 {
59  u64 flags = 0;
60  if (!is_tso && !(b->flags & VNET_BUFFER_F_OFFLOAD))
61  return 0;
62 
63  vnet_buffer_oflags_t oflags = vnet_buffer (b)->oflags;
64  u32 is_tcp = is_tso || oflags & VNET_BUFFER_OFFLOAD_F_TCP_CKSUM;
65  u32 is_udp = !is_tso && oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM;
66 
67  if (!is_tcp && !is_udp)
68  return 0;
69 
70  u32 is_ip4 = b->flags & VNET_BUFFER_F_IS_IP4;
71  u32 is_ip6 = b->flags & VNET_BUFFER_F_IS_IP6;
72 
73  ASSERT (!(is_tcp && is_udp));
74  ASSERT (is_ip4 || is_ip6);
75  i16 l2_hdr_offset = b->current_data;
76  i16 l3_hdr_offset = vnet_buffer (b)->l3_hdr_offset;
77  i16 l4_hdr_offset = vnet_buffer (b)->l4_hdr_offset;
78  u16 l2_len = l3_hdr_offset - l2_hdr_offset;
79  u16 l3_len = l4_hdr_offset - l3_hdr_offset;
80  ip4_header_t *ip4 = (void *) (b->data + l3_hdr_offset);
81  ip6_header_t *ip6 = (void *) (b->data + l3_hdr_offset);
82  tcp_header_t *tcp = (void *) (b->data + l4_hdr_offset);
83  udp_header_t *udp = (void *) (b->data + l4_hdr_offset);
84  u16 l4_len = is_tcp ? tcp_header_bytes (tcp) : sizeof (udp_header_t);
85  u16 sum = 0;
86 
87  flags |= AVF_TXD_OFFSET_MACLEN (l2_len) |
88  AVF_TXD_OFFSET_IPLEN (l3_len) | AVF_TXD_OFFSET_L4LEN (l4_len);
89  flags |= is_ip4 ? AVF_TXD_CMD_IIPT_IPV4 : AVF_TXD_CMD_IIPT_IPV6;
90  flags |= is_tcp ? AVF_TXD_CMD_L4T_TCP : AVF_TXD_CMD_L4T_UDP;
91 
92  if (is_ip4)
93  ip4->checksum = 0;
94 
95  if (is_tso)
96  {
97  if (is_ip4)
98  ip4->length = 0;
99  else
100  ip6->payload_length = 0;
101  }
102 
103  if (is_ip4)
104  {
105  struct avf_ip4_psh psh = { 0 };
106  psh.src = ip4->src_address.as_u32;
107  psh.dst = ip4->dst_address.as_u32;
108  psh.proto = ip4->protocol;
109  psh.l4len =
110  is_tso ? 0 :
111  clib_host_to_net_u16 (clib_net_to_host_u16 (ip4->length) -
112  (l4_hdr_offset - l3_hdr_offset));
113  sum = ~ip_csum (&psh, sizeof (psh));
114  }
115  else
116  {
117  struct avf_ip6_psh psh = { 0 };
118  psh.src = ip6->src_address;
119  psh.dst = ip6->dst_address;
120  psh.proto = clib_host_to_net_u32 ((u32) ip6->protocol);
121  psh.l4len = is_tso ? 0 : ip6->payload_length;
122  sum = ~ip_csum (&psh, sizeof (psh));
123  }
124 
125  /* ip_csum does a byte swap for some reason... */
126  sum = clib_net_to_host_u16 (sum);
127  if (is_tcp)
128  tcp->checksum = sum;
129  else
130  udp->checksum = sum;
131  return flags;
132 }
133 
136  vlib_buffer_t *b)
137 {
138  vlib_buffer_t *ctx_ph;
139  u32 *bi = txq->ph_bufs;
140 
141 next:
142  ctx_ph = vlib_get_buffer (vm, bi[0]);
143  if (PREDICT_FALSE (ctx_ph->ref_count == 255))
144  {
145  bi++;
146  goto next;
147  }
148 
149  /* Acquire a reference on the placeholder buffer */
150  ctx_ph->ref_count++;
151 
152  u16 l234hdr_sz = vnet_buffer (b)->l4_hdr_offset - b->current_data +
153  vnet_buffer2 (b)->gso_l4_hdr_sz;
154  u16 tlen = vlib_buffer_length_in_chain (vm, b) - l234hdr_sz;
155  d[0].qword[0] = 0;
157  | AVF_TXD_CTX_SEG_MSS (vnet_buffer2 (b)->gso_size) |
158  AVF_TXD_CTX_SEG_TLEN (tlen);
159  return bi[0];
160 }
161 
164 {
165 #if defined CLIB_HAVE_VEC512
166  while (n_descs >= 8)
167  {
168  u64x8u *dv = (u64x8u *) d;
169  u64x8u *sv = (u64x8u *) s;
170 
171  dv[0] = sv[0];
172  dv[1] = sv[1];
173 
174  /* next */
175  d += 8;
176  s += 8;
177  n_descs -= 8;
178  }
179 #elif defined CLIB_HAVE_VEC256
180  while (n_descs >= 4)
181  {
182  u64x4u *dv = (u64x4u *) d;
183  u64x4u *sv = (u64x4u *) s;
184 
185  dv[0] = sv[0];
186  dv[1] = sv[1];
187 
188  /* next */
189  d += 4;
190  s += 4;
191  n_descs -= 4;
192  }
193 #elif defined CLIB_HAVE_VEC128
194  while (n_descs >= 2)
195  {
196  u64x2u *dv = (u64x2u *) d;
197  u64x2u *sv = (u64x2u *) s;
198 
199  dv[0] = sv[0];
200  dv[1] = sv[1];
201 
202  /* next */
203  d += 2;
204  s += 2;
205  n_descs -= 2;
206  }
207 #endif
208  while (n_descs)
209  {
210  d[0].qword[0] = s[0].qword[0];
211  d[0].qword[1] = s[0].qword[1];
212  d++;
213  s++;
214  n_descs--;
215  }
216 }
217 
220  u64 cmd, int use_va_dma)
221 {
222  if (use_va_dma)
223  d->qword[0] = vlib_buffer_get_current_va (b);
224  else
225  d->qword[0] = vlib_buffer_get_current_pa (vm, b);
226  d->qword[1] = (((u64) b->current_length) << 34 | cmd | AVF_TXD_CMD_RSV);
227 }
230  u32 *buffers, u32 n_packets, u16 *n_enq_descs, int use_va_dma)
231 {
232  const u64 cmd_eop = AVF_TXD_CMD_EOP;
233  u16 n_free_desc, n_desc_left, n_packets_left = n_packets;
234  vlib_buffer_t *b[4];
235  avf_tx_desc_t *d = txq->tmp_descs;
236  u32 *tb = txq->tmp_bufs;
237 
238  n_free_desc = n_desc_left = txq->size - txq->n_enqueued - 8;
239 
240  if (n_desc_left == 0)
241  return 0;
242 
243  while (n_packets_left && n_desc_left)
244  {
245  u32 flags, or_flags;
246 
247  if (n_packets_left < 8 || n_desc_left < 4)
248  goto one_by_one;
249 
250  vlib_prefetch_buffer_with_index (vm, buffers[4], LOAD);
251  vlib_prefetch_buffer_with_index (vm, buffers[5], LOAD);
252  vlib_prefetch_buffer_with_index (vm, buffers[6], LOAD);
253  vlib_prefetch_buffer_with_index (vm, buffers[7], LOAD);
254 
255  b[0] = vlib_get_buffer (vm, buffers[0]);
256  b[1] = vlib_get_buffer (vm, buffers[1]);
257  b[2] = vlib_get_buffer (vm, buffers[2]);
258  b[3] = vlib_get_buffer (vm, buffers[3]);
259 
260  or_flags = b[0]->flags | b[1]->flags | b[2]->flags | b[3]->flags;
261 
262  if (PREDICT_FALSE (or_flags &
263  (VLIB_BUFFER_NEXT_PRESENT | VNET_BUFFER_F_OFFLOAD |
264  VNET_BUFFER_F_GSO)))
265  goto one_by_one;
266 
267  vlib_buffer_copy_indices (tb, buffers, 4);
268 
269  avf_tx_fill_data_desc (vm, d + 0, b[0], cmd_eop, use_va_dma);
270  avf_tx_fill_data_desc (vm, d + 1, b[1], cmd_eop, use_va_dma);
271  avf_tx_fill_data_desc (vm, d + 2, b[2], cmd_eop, use_va_dma);
272  avf_tx_fill_data_desc (vm, d + 3, b[3], cmd_eop, use_va_dma);
273 
274  buffers += 4;
275  n_packets_left -= 4;
276  n_desc_left -= 4;
277  d += 4;
278  tb += 4;
279  continue;
280 
281  one_by_one:
282  tb[0] = buffers[0];
283  b[0] = vlib_get_buffer (vm, buffers[0]);
284  flags = b[0]->flags;
285 
286  /* No chained buffers or TSO case */
287  if (PREDICT_TRUE (
288  (flags & (VLIB_BUFFER_NEXT_PRESENT | VNET_BUFFER_F_GSO)) == 0))
289  {
290  u64 cmd = cmd_eop;
291 
292  if (PREDICT_FALSE (flags & VNET_BUFFER_F_OFFLOAD))
293  cmd |= avf_tx_prepare_cksum (b[0], 0 /* is_tso */);
294 
295  avf_tx_fill_data_desc (vm, d, b[0], cmd, use_va_dma);
296  }
297  else
298  {
299  u16 n_desc_needed = 1;
300  u64 cmd = 0;
301 
302  if (flags & VLIB_BUFFER_NEXT_PRESENT)
303  {
304  vlib_buffer_t *next = vlib_get_buffer (vm, b[0]->next_buffer);
305  n_desc_needed = 2;
306  while (next->flags & VLIB_BUFFER_NEXT_PRESENT)
307  {
308  next = vlib_get_buffer (vm, next->next_buffer);
309  n_desc_needed++;
310  }
311  }
312 
313  if (flags & VNET_BUFFER_F_GSO)
314  {
315  n_desc_needed++;
316  }
317  else if (PREDICT_FALSE (n_desc_needed > 8))
318  {
319  vlib_buffer_free_one (vm, buffers[0]);
320  vlib_error_count (vm, node->node_index,
321  AVF_TX_ERROR_SEGMENT_SIZE_EXCEEDED, 1);
322  n_packets_left -= 1;
323  buffers += 1;
324  continue;
325  }
326 
327  if (PREDICT_FALSE (n_desc_left < n_desc_needed))
328  break;
329 
330  if (flags & VNET_BUFFER_F_GSO)
331  {
332  /* Enqueue a context descriptor */
333  tb[1] = tb[0];
334  tb[0] = avf_tx_fill_ctx_desc (vm, txq, d, b[0]);
335  n_desc_left -= 1;
336  d += 1;
337  tb += 1;
338  cmd = avf_tx_prepare_cksum (b[0], 1 /* is_tso */);
339  }
340  else if (flags & VNET_BUFFER_F_OFFLOAD)
341  {
342  cmd = avf_tx_prepare_cksum (b[0], 0 /* is_tso */);
343  }
344 
345  /* Deal with chain buffer if present */
346  while (b[0]->flags & VLIB_BUFFER_NEXT_PRESENT)
347  {
348  avf_tx_fill_data_desc (vm, d, b[0], cmd, use_va_dma);
349 
350  n_desc_left -= 1;
351  d += 1;
352  tb += 1;
353 
354  tb[0] = b[0]->next_buffer;
355  b[0] = vlib_get_buffer (vm, b[0]->next_buffer);
356  }
357 
358  avf_tx_fill_data_desc (vm, d, b[0], cmd_eop | cmd, use_va_dma);
359  }
360 
361  buffers += 1;
362  n_packets_left -= 1;
363  n_desc_left -= 1;
364  d += 1;
365  tb += 1;
366  }
367 
368  *n_enq_descs = n_free_desc - n_desc_left;
369  return n_packets - n_packets_left;
370 }
371 
375 {
376  vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
379  u8 qid = tf->queue_id;
380  avf_txq_t *txq = vec_elt_at_index (ad->txqs, qid);
381  u16 next;
382  u16 mask = txq->size - 1;
383  u32 *buffers = vlib_frame_vector_args (frame);
384  u16 n_enq, n_left, n_desc, *slot;
385  u16 n_retry = 2;
386 
387  if (tf->shared_queue)
388  clib_spinlock_lock (&txq->lock);
389 
390  n_left = frame->n_vectors;
391 
392 retry:
393  next = txq->next;
394  /* release consumed bufs */
395  if (txq->n_enqueued)
396  {
397  i32 complete_slot = -1;
398  while (1)
399  {
400  u16 *slot = clib_ring_get_first (txq->rs_slots);
401 
402  if (slot == 0)
403  break;
404 
405  if (avf_tx_desc_get_dtyp (txq->descs + slot[0]) != 0x0F)
406  break;
407 
408  complete_slot = slot[0];
409 
410  clib_ring_deq (txq->rs_slots);
411  }
412 
413  if (complete_slot >= 0)
414  {
415  u16 first, mask, n_free;
416  mask = txq->size - 1;
417  first = (txq->next - txq->n_enqueued) & mask;
418  n_free = (complete_slot + 1 - first) & mask;
419 
420  txq->n_enqueued -= n_free;
421  vlib_buffer_free_from_ring_no_next (vm, txq->bufs, first, txq->size,
422  n_free);
423  }
424  }
425 
426  n_desc = 0;
427  if (ad->flags & AVF_DEVICE_F_VA_DMA)
428  n_enq = avf_tx_prepare (vm, node, txq, buffers, n_left, &n_desc, 1);
429  else
430  n_enq = avf_tx_prepare (vm, node, txq, buffers, n_left, &n_desc, 0);
431 
432  if (n_desc)
433  {
434  if (PREDICT_TRUE (next + n_desc <= txq->size))
435  {
436  /* no wrap */
437  avf_tx_copy_desc (txq->descs + next, txq->tmp_descs, n_desc);
438  vlib_buffer_copy_indices (txq->bufs + next, txq->tmp_bufs, n_desc);
439  }
440  else
441  {
442  /* wrap */
443  u32 n_not_wrap = txq->size - next;
444  avf_tx_copy_desc (txq->descs + next, txq->tmp_descs, n_not_wrap);
445  avf_tx_copy_desc (txq->descs, txq->tmp_descs + n_not_wrap,
446  n_desc - n_not_wrap);
447  vlib_buffer_copy_indices (txq->bufs + next, txq->tmp_bufs,
448  n_not_wrap);
449  vlib_buffer_copy_indices (txq->bufs, txq->tmp_bufs + n_not_wrap,
450  n_desc - n_not_wrap);
451  }
452 
453  next += n_desc;
454  if ((slot = clib_ring_enq (txq->rs_slots)))
455  {
456  u16 rs_slot = slot[0] = (next - 1) & mask;
457  txq->descs[rs_slot].qword[1] |= AVF_TXD_CMD_RS;
458  }
459 
460  txq->next = next & mask;
461  avf_tail_write (txq->qtx_tail, txq->next);
462  txq->n_enqueued += n_desc;
463  n_left -= n_enq;
464  }
465 
466  if (n_left)
467  {
468  buffers += n_enq;
469 
470  if (n_retry--)
471  goto retry;
472 
473  vlib_buffer_free (vm, buffers, n_left);
474  vlib_error_count (vm, node->node_index,
475  AVF_TX_ERROR_NO_FREE_SLOTS, n_left);
476  }
477 
478  if (tf->shared_queue)
479  clib_spinlock_unlock (&txq->lock);
480 
481  return frame->n_vectors - n_left;
482 }
483 
484 /*
485  * fd.io coding-style-patch-verification: ON
486  *
487  * Local Variables:
488  * eval: (c-set-style "gnu")
489  * End:
490  */
vnet_buffer_oflags_t
Definition: buffer.h:118
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:133
static uword vlib_buffer_get_current_pa(vlib_main_t *vm, vlib_buffer_t *b)
Definition: buffer_funcs.h:494
#define AVF_TXD_OFFSET_MACLEN(val)
Definition: avf.h:76
vnet_hw_if_tx_frame_t * tf
static_always_inline void clib_spinlock_unlock(clib_spinlock_t *p)
Definition: lock.h:121
static uword vlib_buffer_get_current_va(vlib_buffer_t *b)
Definition: buffer.h:265
static_always_inline void clib_spinlock_lock(clib_spinlock_t *p)
Definition: lock.h:82
#define AVF_TXD_CTX_SEG_TLEN(val)
Definition: avf.h:83
u32 n_free
vl_api_wireguard_peer_flags_t flags
Definition: wireguard.api:105
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
Definition: buffer_funcs.h:982
ip4_address_t src_address
Definition: ip4_packet.h:125
#define vnet_buffer2(b)
Definition: buffer.h:499
vlib_buffer_copy_indices(to, tmp, n_free)
u32 * tmp_bufs
Definition: avf.h:197
#define PREDICT_TRUE(x)
Definition: clib.h:125
u16 l4len
Definition: output.c:45
static_always_inline u32 avf_tx_fill_ctx_desc(vlib_main_t *vm, avf_txq_t *txq, avf_tx_desc_t *d, vlib_buffer_t *b)
Definition: output.c:135
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:119
unsigned long u64
Definition: types.h:89
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
Definition: error_funcs.h:57
#define AVF_TXD_DTYP_CTX
Definition: avf.h:80
#define AVF_TXD_CTX_CMD_TSO
Definition: avf.h:81
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:122
static void avf_tail_write(volatile u32 *addr, u32 val)
Definition: avf.h:447
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: nat44_ei.c:3048
#define AVF_TXD_CMD_IIPT_IPV4
Definition: avf.h:68
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
Definition: buffer_funcs.h:433
struct _tcp_header tcp_header_t
volatile u32 * qtx_tail
Definition: avf.h:186
ip6_address_t src_address
Definition: ip6_packet.h:310
unsigned char u8
Definition: types.h:56
vlib_buffer_t ** b
unsigned int u32
Definition: types.h:88
u16 * rs_slots
Definition: avf.h:194
#define static_always_inline
Definition: clib.h:112
#define AVF_TXD_CMD_RSV
Definition: avf.h:63
avf_tx_desc_t * tmp_descs
Definition: avf.h:196
#define vlib_prefetch_buffer_with_index(vm, bi, type)
Prefetch buffer metadata by buffer index The first 64 bytes of buffer contains most header informatio...
Definition: buffer_funcs.h:507
clib_spinlock_t lock
Definition: avf.h:190
vl_api_ip6_address_t ip6
Definition: one.api:424
ip4_address_t dst_address
Definition: ip4_packet.h:125
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
VNET_DEVICE_CLASS_TX_FN() avf_device_class(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: output.c:372
#define AVF_TXD_CMD_L4T_TCP
Definition: avf.h:71
#define AVF_TXD_CMD_L4T_UDP
Definition: avf.h:73
#define VNET_DEVICE_CLASS_TX_FN(devclass)
Definition: interface.h:317
bool is_ip6
Definition: ip.api:43
static heap_elt_t * first(heap_header_t *h)
Definition: heap.c:59
u16 * next
#define clib_ring_deq(ring)
Definition: ring.h:120
#define AVF_TXD_CMD_EOP
Definition: avf.h:61
unsigned short u16
Definition: types.h:57
u32 size
Definition: vhost_user.h:125
#define clib_ring_get_first(ring)
Definition: ring.h:123
#define PREDICT_FALSE(x)
Definition: clib.h:124
vl_api_ip4_address_t ip4
Definition: one.api:376
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
Definition: nat44_ei.c:3047
u32 * ph_bufs
Definition: avf.h:189
u32 node_index
Node index.
Definition: node.h:479
static_always_inline u16 ip_csum(void *data, u16 n_left)
Definition: ip_packet.h:155
u32 n_left
static_always_inline u64 avf_tx_prepare_cksum(vlib_buffer_t *b, u8 is_tso)
Definition: output.c:57
u8 slot
Definition: pci_types.api:22
#define AVF_TXD_OFFSET_IPLEN(val)
Definition: avf.h:77
ip6_address_t src
Definition: output.c:50
static_always_inline void avf_tx_fill_data_desc(vlib_main_t *vm, avf_tx_desc_t *d, vlib_buffer_t *b, u64 cmd, int use_va_dma)
Definition: output.c:219
vl_api_pnat_mask_t mask
Definition: pnat.api:45
u8 zero
Definition: output.c:43
u8 data[]
Packet data.
Definition: buffer.h:204
#define clib_ring_enq(ring)
Definition: ring.h:94
avf_tx_desc_t * descs
Definition: avf.h:191
static void * vlib_frame_scalar_args(vlib_frame_t *f)
Get pointer to frame scalar data.
Definition: node_funcs.h:315
static_always_inline avf_device_t * avf_get_device(u32 dev_instance)
Definition: avf.h:368
static_always_inline u8 avf_tx_desc_get_dtyp(avf_tx_desc_t *d)
Definition: output.c:34
u32 * bufs
Definition: avf.h:192
signed int i32
Definition: types.h:77
#define ASSERT(truth)
#define AVF_TXD_CTX_SEG_MSS(val)
Definition: avf.h:84
Definition: avf.h:183
u8 proto
Definition: output.c:44
u16 n_enqueued
Definition: avf.h:193
u32 dst
Definition: output.c:42
static_always_inline u16 avf_tx_prepare(vlib_main_t *vm, vlib_node_runtime_t *node, avf_txq_t *txq, u32 *buffers, u32 n_packets, u16 *n_enq_descs, int use_va_dma)
Definition: output.c:229
#define AVF_TXD_CMD_RS
Definition: avf.h:62
u16 next
Definition: avf.h:187
u32 proto
Definition: output.c:53
u16 payload_length
Definition: ip6_packet.h:301
#define AVF_TXD_CMD_IIPT_IPV6
Definition: avf.h:66
static_always_inline void avf_tx_copy_desc(avf_tx_desc_t *d, avf_tx_desc_t *s, u32 n_descs)
Definition: output.c:163
vlib_main_t vlib_node_runtime_t * node
Definition: nat44_ei.c:3047
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:149
VLIB buffer representation.
Definition: buffer.h:111
u64 qword[2]
Definition: avf.h:160
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:301
u32 src
Definition: output.c:41
#define vnet_buffer(b)
Definition: buffer.h:437
static int tcp_header_bytes(tcp_header_t *t)
Definition: tcp_packet.h:93
u16 size
Definition: avf.h:188
#define AVF_TXD_OFFSET_L4LEN(val)
Definition: avf.h:78
u32 l4len
Definition: output.c:52
static void vlib_buffer_free_one(vlib_main_t *vm, u32 buffer_index)
Free one buffer Shorthand to free a single buffer chain.
static void vlib_buffer_free_from_ring_no_next(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers)
Free buffers from ring without freeing tail buffers.
volatile u8 ref_count
Reference count for this buffer.
Definition: buffer.h:139
ip6_address_t dst
Definition: output.c:51
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:111
ip6_address_t dst_address
Definition: ip6_packet.h:310
signed short i16
Definition: types.h:46