FD.io VPP  v21.06-1-gbb7418cf9
Vector Packet Processing
device.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2016 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #include <sys/types.h>
19 #include <sys/stat.h>
20 #include <fcntl.h>
21 
22 #include <vlib/vlib.h>
23 #include <vlib/unix/unix.h>
24 #include <vnet/vnet.h>
25 #include <vnet/ethernet/ethernet.h>
26 #include <vnet/gso/gro_func.h>
28 #include <vnet/ip/ip4_packet.h>
29 #include <vnet/ip/ip6_packet.h>
30 #include <vnet/tcp/tcp_packet.h>
31 #include <vnet/udp/udp_packet.h>
33 
34 #define VIRTIO_TX_MAX_CHAIN_LEN 127
35 
36 #define foreach_virtio_tx_func_error \
37 _(NO_FREE_SLOTS, "no free tx slots") \
38 _(TRUNC_PACKET, "packet > buffer size -- truncated in tx ring") \
39 _(PENDING_MSGS, "pending msgs in tx ring") \
40 _(INDIRECT_DESC_ALLOC_FAILED, "indirect descriptor allocation failed - packet drop") \
41 _(OUT_OF_ORDER, "out-of-order buffers in used ring") \
42 _(GSO_PACKET_DROP, "gso disabled on itf -- gso packet drop") \
43 _(CSUM_OFFLOAD_PACKET_DROP, "checksum offload disabled on itf -- csum offload packet drop")
44 
45 typedef enum
46 {
47 #define _(f,s) VIRTIO_TX_ERROR_##f,
49 #undef _
52 
53 static char *virtio_tx_func_error_strings[] = {
54 #define _(n,s) s,
56 #undef _
57 };
58 
59 static u8 *
60 format_virtio_device (u8 * s, va_list * args)
61 {
62  u32 dev_instance = va_arg (*args, u32);
63  int verbose = va_arg (*args, int);
64  u32 indent = format_get_indent (s);
65 
66  s = format (s, "VIRTIO interface");
67  if (verbose)
68  {
69  s = format (s, "\n%U instance %u", format_white_space, indent + 2,
70  dev_instance);
71  }
72  return s;
73 }
74 
75 typedef struct
76 {
82 
83 static u8 *
84 format_virtio_tx_trace (u8 * s, va_list * va)
85 {
86  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
87  CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
88  virtio_tx_trace_t *t = va_arg (*va, virtio_tx_trace_t *);
89  u32 indent = format_get_indent (s);
90 
91  s = format (s, "%Ubuffer 0x%x: %U\n",
92  format_white_space, indent,
94  s =
95  format (s, "%U%U\n", format_white_space, indent,
97  s =
98  format (s, "%U%U", format_white_space, indent,
100  sizeof (t->buffer.pre_data));
101  return s;
102 }
103 
104 static void
106  u32 bi, int is_tun)
107 {
109  t = vlib_add_trace (vm, node, b0, sizeof (t[0]));
110  t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX];
111  t->buffer_index = bi;
112  if (is_tun)
113  {
114  int is_ip4 = 0, is_ip6 = 0;
115 
116  switch (((u8 *) vlib_buffer_get_current (b0))[0] & 0xf0)
117  {
118  case 0x40:
119  is_ip4 = 1;
120  break;
121  case 0x60:
122  is_ip6 = 1;
123  break;
124  default:
125  break;
126  }
127  vnet_generic_header_offset_parser (b0, &t->gho, 0, is_ip4, is_ip6);
128  }
129  else
131  b0->flags &
132  VNET_BUFFER_F_IS_IP4,
133  b0->flags & VNET_BUFFER_F_IS_IP6);
134 
135  clib_memcpy_fast (&t->buffer, b0, sizeof (*b0) - sizeof (b0->pre_data));
137  sizeof (t->buffer.pre_data));
138 }
139 
140 static void
142  uword node_index, u32 *buffers, u16 n,
144 {
145  vlib_error_count (vm, node_index, error, n);
148  vm->thread_index, vif->sw_if_index, n);
149  vlib_buffer_free (vm, buffers, n);
150 }
151 
152 static void
153 virtio_memset_ring_u32 (u32 *ring, u32 start, u32 ring_size, u32 n_buffers)
154 {
155  ASSERT (n_buffers <= ring_size);
156 
157  if (PREDICT_TRUE (start + n_buffers <= ring_size))
158  {
159  clib_memset_u32 (ring + start, ~0, n_buffers);
160  }
161  else
162  {
163  clib_memset_u32 (ring + start, ~0, ring_size - start);
164  clib_memset_u32 (ring, ~0, n_buffers - (ring_size - start));
165  }
166 }
167 
168 static void
171 {
172  u16 used = vring->desc_in_use;
173  u16 sz = vring->size;
174  u16 mask = sz - 1;
175  u16 last = vring->last_used_idx;
176  u16 n_left = vring->used->idx - last;
177  u16 out_of_order_count = 0;
178 
179  if (n_left == 0)
180  return;
181 
182  while (n_left)
183  {
184  vring_used_elem_t *e = &vring->used->ring[last & mask];
185  u16 slot, n_buffers;
186  slot = n_buffers = e->id;
187 
188  while (e->id == (n_buffers & mask))
189  {
190  n_left--;
191  last++;
192  n_buffers++;
193  vring_desc_t *d = &vring->desc[e->id];
194  u16 next;
195  while (d->flags & VRING_DESC_F_NEXT)
196  {
197  n_buffers++;
198  next = d->next;
199  d = &vring->desc[next];
200  }
201  if (n_left == 0)
202  break;
203  e = &vring->used->ring[last & mask];
204  }
205  vlib_buffer_free_from_ring (vm, vring->buffers, slot,
206  sz, (n_buffers - slot));
207  virtio_memset_ring_u32 (vring->buffers, slot, sz, (n_buffers - slot));
208  used -= (n_buffers - slot);
209 
210  if (n_left > 0)
211  {
212  vlib_buffer_free (vm, &vring->buffers[e->id], 1);
213  vring->buffers[e->id] = ~0;
214  used--;
215  last++;
216  n_left--;
217  out_of_order_count++;
218  vring->flags |= VRING_TX_OUT_OF_ORDER;
219  }
220  }
221 
222  /*
223  * Some vhost-backends give buffers back in out-of-order fashion in used ring.
224  * It impacts the overall virtio-performance.
225  */
226  if (out_of_order_count)
227  vlib_error_count (vm, node_index, VIRTIO_TX_ERROR_OUT_OF_ORDER,
228  out_of_order_count);
229 
230  vring->desc_in_use = used;
231  vring->last_used_idx = last;
232 }
233 
234 static void
237 {
238  vring_packed_desc_t *d;
239  u16 sz = vring->size;
240  u16 last = vring->last_used_idx;
241  u16 n_buffers = 0, start;
242  u16 flags;
243 
244  if (vring->desc_in_use == 0)
245  return;
246 
247  d = &vring->packed_desc[last];
248  flags = d->flags;
249  start = d->id;
250 
251  while ((flags & VRING_DESC_F_AVAIL) == (vring->used_wrap_counter << 7) &&
252  (flags & VRING_DESC_F_USED) == (vring->used_wrap_counter << 15))
253  {
254  last++;
255  n_buffers++;
256 
257  if (last >= sz)
258  {
259  last = 0;
260  vring->used_wrap_counter ^= 1;
261  }
262  d = &vring->packed_desc[last];
263  flags = d->flags;
264  }
265 
266  if (n_buffers)
267  {
268  vlib_buffer_free_from_ring (vm, vring->buffers, start, sz, n_buffers);
269  virtio_memset_ring_u32 (vring->buffers, start, sz, n_buffers);
270  vring->desc_in_use -= n_buffers;
271  vring->last_used_idx = last;
272  }
273 }
274 
275 static void
277  uword node_index, int packed)
278 {
279  if (packed)
280  virtio_free_used_device_desc_packed (vm, vring, node_index);
281  else
282  virtio_free_used_device_desc_split (vm, vring, node_index);
283 
284 }
285 
286 static void
287 set_checksum_offsets (vlib_buffer_t *b, virtio_net_hdr_v1_t *hdr,
288  const int is_l2)
289 {
290  vnet_buffer_oflags_t oflags = vnet_buffer (b)->oflags;
291 
292  if (b->flags & VNET_BUFFER_F_IS_IP4)
293  {
294  ip4_header_t *ip4;
295  generic_header_offset_t gho = { 0 };
296  vnet_generic_header_offset_parser (b, &gho, is_l2, 1 /* ip4 */ ,
297  0 /* ip6 */ );
298  hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
299  hdr->csum_start = gho.l4_hdr_offset; // 0x22;
300  if (oflags & VNET_BUFFER_OFFLOAD_F_TCP_CKSUM)
301  {
302  hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum);
303  }
304  else if (oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM)
305  {
306  hdr->csum_offset = STRUCT_OFFSET_OF (udp_header_t, checksum);
307  }
308 
309  /*
310  * virtio devices do not support IP4 checksum offload. So driver takes care
311  * of it while doing tx.
312  */
313  ip4 =
315  if (oflags & VNET_BUFFER_OFFLOAD_F_IP_CKSUM)
316  ip4->checksum = ip4_header_checksum (ip4);
317  }
318  else if (b->flags & VNET_BUFFER_F_IS_IP6)
319  {
320  generic_header_offset_t gho = { 0 };
321  vnet_generic_header_offset_parser (b, &gho, is_l2, 0 /* ip4 */ ,
322  1 /* ip6 */ );
323  hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
324  hdr->csum_start = gho.l4_hdr_offset; // 0x36;
325  if (oflags & VNET_BUFFER_OFFLOAD_F_TCP_CKSUM)
326  {
327  hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum);
328  }
329  else if (oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM)
330  {
331  hdr->csum_offset = STRUCT_OFFSET_OF (udp_header_t, checksum);
332  }
333  }
334 }
335 
336 static void
337 set_gso_offsets (vlib_buffer_t *b, virtio_net_hdr_v1_t *hdr, const int is_l2)
338 {
339  vnet_buffer_oflags_t oflags = vnet_buffer (b)->oflags;
340 
341  if (b->flags & VNET_BUFFER_F_IS_IP4)
342  {
343  ip4_header_t *ip4;
344  generic_header_offset_t gho = { 0 };
345  vnet_generic_header_offset_parser (b, &gho, is_l2, 1 /* ip4 */ ,
346  0 /* ip6 */ );
347  hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
348  hdr->gso_size = vnet_buffer2 (b)->gso_size;
349  hdr->hdr_len = gho.hdr_sz;
350  hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
351  hdr->csum_start = gho.l4_hdr_offset; // 0x22;
352  hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum);
353  ip4 =
355  /*
356  * virtio devices do not support IP4 checksum offload. So driver takes care
357  * of it while doing tx.
358  */
359  if (oflags & VNET_BUFFER_OFFLOAD_F_IP_CKSUM)
360  ip4->checksum = ip4_header_checksum (ip4);
361  }
362  else if (b->flags & VNET_BUFFER_F_IS_IP6)
363  {
364  generic_header_offset_t gho = { 0 };
365  vnet_generic_header_offset_parser (b, &gho, is_l2, 0 /* ip4 */ ,
366  1 /* ip6 */ );
367  hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
368  hdr->gso_size = vnet_buffer2 (b)->gso_size;
369  hdr->hdr_len = gho.hdr_sz;
370  hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
371  hdr->csum_start = gho.l4_hdr_offset; // 0x36;
372  hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum);
373  }
374 }
375 
376 static u16
378  virtio_if_t *vif, virtio_vring_t *vring, u32 bi,
379  u16 free_desc_count, u16 avail, u16 next, u16 mask,
380  int hdr_sz, int do_gso, int csum_offload, int is_pci,
381  int is_tun, int is_indirect, int is_any_layout)
382 {
383  u16 n_added = 0;
384  vring_desc_t *d;
385  int is_l2 = !is_tun;
386  d = &vring->desc[next];
387  vlib_buffer_t *b = vlib_get_buffer (vm, bi);
388  virtio_net_hdr_v1_t *hdr = vlib_buffer_get_current (b) - hdr_sz;
389  u32 drop_inline = ~0;
390 
391  clib_memset_u8 (hdr, 0, hdr_sz);
392 
393  if (b->flags & VNET_BUFFER_F_GSO)
394  {
395  if (do_gso)
396  set_gso_offsets (b, hdr, is_l2);
397  else
398  {
399  drop_inline = VIRTIO_TX_ERROR_GSO_PACKET_DROP;
400  goto done;
401  }
402  }
403  else if (b->flags & VNET_BUFFER_F_OFFLOAD)
404  {
405  if (csum_offload)
406  set_checksum_offsets (b, hdr, is_l2);
407  else
408  {
409  drop_inline = VIRTIO_TX_ERROR_CSUM_OFFLOAD_PACKET_DROP;
410  goto done;
411  }
412  }
413 
414  if (PREDICT_FALSE (b->flags & VLIB_BUFFER_IS_TRACED))
415  {
416  virtio_tx_trace (vm, node, b, bi, is_tun);
417  }
418 
419  if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0))
420  {
421  d->addr = ((is_pci) ? vlib_buffer_get_current_pa (vm, b) :
423  d->len = b->current_length + hdr_sz;
424  d->flags = 0;
425  }
426  else if (is_indirect)
427  {
428  /*
429  * We are using single vlib_buffer_t for indirect descriptor(s)
430  * chain. Single descriptor is 16 bytes and vlib_buffer_t
431  * has 2048 bytes space. So maximum long chain can have 128
432  * (=2048/16) indirect descriptors.
433  * It can easily support 65535 bytes of Jumbo frames with
434  * each data buffer size of 512 bytes minimum.
435  */
436  u32 indirect_buffer = 0;
437  if (PREDICT_FALSE (vlib_buffer_alloc (vm, &indirect_buffer, 1) == 0))
438  {
439  drop_inline = VIRTIO_TX_ERROR_INDIRECT_DESC_ALLOC_FAILED;
440  goto done;
441  }
442 
443  vlib_buffer_t *indirect_desc = vlib_get_buffer (vm, indirect_buffer);
444  indirect_desc->current_data = 0;
445  indirect_desc->flags |= VLIB_BUFFER_NEXT_PRESENT;
446  indirect_desc->next_buffer = bi;
447  bi = indirect_buffer;
448 
449  vring_desc_t *id =
450  (vring_desc_t *) vlib_buffer_get_current (indirect_desc);
451  u32 count = 1;
452  if (is_pci)
453  {
454  d->addr = vlib_physmem_get_pa (vm, id);
455  id->addr = vlib_buffer_get_current_pa (vm, b) - hdr_sz;
456 
457  /*
458  * If VIRTIO_F_ANY_LAYOUT is not negotiated, then virtio_net_hdr
459  * should be presented in separate descriptor and data will start
460  * from next descriptor.
461  */
462  if (is_any_layout)
463  id->len = b->current_length + hdr_sz;
464  else
465  {
466  id->len = hdr_sz;
467  id->flags = VRING_DESC_F_NEXT;
468  id->next = count;
469  count++;
470  id++;
471  id->addr = vlib_buffer_get_current_pa (vm, b);
472  id->len = b->current_length;
473  }
474  while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
475  {
476  id->flags = VRING_DESC_F_NEXT;
477  id->next = count;
478  count++;
479  id++;
480  b = vlib_get_buffer (vm, b->next_buffer);
481  id->addr = vlib_buffer_get_current_pa (vm, b);
482  id->len = b->current_length;
483  if (PREDICT_FALSE (count == VIRTIO_TX_MAX_CHAIN_LEN))
484  {
485  if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
486  vlib_error_count (vm, node->node_index,
487  VIRTIO_TX_ERROR_TRUNC_PACKET, 1);
488  break;
489  }
490  }
491  }
492  else /* VIRTIO_IF_TYPE_[TAP | TUN] */
493  {
494  d->addr = pointer_to_uword (id);
495  /* first buffer in chain */
496  id->addr = pointer_to_uword (vlib_buffer_get_current (b)) - hdr_sz;
497  id->len = b->current_length + hdr_sz;
498 
499  while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
500  {
501  id->flags = VRING_DESC_F_NEXT;
502  id->next = count;
503  count++;
504  id++;
505  b = vlib_get_buffer (vm, b->next_buffer);
506  id->addr = pointer_to_uword (vlib_buffer_get_current (b));
507  id->len = b->current_length;
508  if (PREDICT_FALSE (count == VIRTIO_TX_MAX_CHAIN_LEN))
509  {
510  if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
511  vlib_error_count (vm, node->node_index,
512  VIRTIO_TX_ERROR_TRUNC_PACKET, 1);
513  break;
514  }
515  }
516  }
517  id->flags = 0;
518  id->next = 0;
519  d->len = count * sizeof (vring_desc_t);
521  }
522  else if (is_pci)
523  {
524  u16 count = next;
525  vlib_buffer_t *b_temp = b;
526  u16 n_buffers_in_chain = 1;
527 
528  /*
529  * Check the length of the chain for the required number of
530  * descriptors. Return from here, retry to get more descriptors,
531  * if chain length is greater than available descriptors.
532  */
533  while (b_temp->flags & VLIB_BUFFER_NEXT_PRESENT)
534  {
535  n_buffers_in_chain++;
536  b_temp = vlib_get_buffer (vm, b_temp->next_buffer);
537  }
538 
539  if (n_buffers_in_chain > free_desc_count)
540  return n_buffers_in_chain;
541 
542  d->addr = vlib_buffer_get_current_pa (vm, b) - hdr_sz;
543  d->len = b->current_length + hdr_sz;
544 
545  while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
546  {
548  vring->buffers[count] = bi;
549  b->flags &=
550  ~(VLIB_BUFFER_NEXT_PRESENT | VLIB_BUFFER_TOTAL_LENGTH_VALID);
551  bi = b->next_buffer;
552  b->next_buffer = 0;
553  n_added++;
554  count = (count + 1) & mask;
555  d->next = count;
556  d = &vring->desc[count];
557  b = vlib_get_buffer (vm, bi);
558  d->addr = vlib_buffer_get_current_pa (vm, b);
559  d->len = b->current_length;
560  }
561  d->flags = 0;
562  vring->buffers[count] = bi;
563  vring->avail->ring[avail & mask] = next;
564  n_added++;
565  return n_added;
566  }
567  else
568  {
569  ASSERT (0);
570  }
571  vring->buffers[next] = bi;
572  vring->avail->ring[avail & mask] = next;
573  n_added++;
574 
575 done:
576  if (drop_inline != ~0)
577  virtio_interface_drop_inline (vm, vif, node->node_index, &bi, 1,
578  drop_inline);
579 
580  return n_added;
581 }
582 
583 static u16
585  virtio_if_t *vif, virtio_vring_t *vring, u32 bi,
586  u16 next, int hdr_sz, int do_gso, int csum_offload,
587  int is_pci, int is_tun, int is_indirect,
588  int is_any_layout)
589 {
590  u16 n_added = 0, flags = 0;
591  int is_l2 = !is_tun;
592  vring_packed_desc_t *d = &vring->packed_desc[next];
593  vlib_buffer_t *b = vlib_get_buffer (vm, bi);
594  virtio_net_hdr_v1_t *hdr = vlib_buffer_get_current (b) - hdr_sz;
595  u32 drop_inline = ~0;
596 
597  clib_memset (hdr, 0, hdr_sz);
598 
599  if (b->flags & VNET_BUFFER_F_GSO)
600  {
601  if (do_gso)
602  set_gso_offsets (b, hdr, is_l2);
603  else
604  {
605  drop_inline = VIRTIO_TX_ERROR_GSO_PACKET_DROP;
606  goto done;
607  }
608  }
609  else if (b->flags & VNET_BUFFER_F_OFFLOAD)
610  {
611  if (csum_offload)
612  set_checksum_offsets (b, hdr, is_l2);
613  else
614  {
615  drop_inline = VIRTIO_TX_ERROR_CSUM_OFFLOAD_PACKET_DROP;
616  goto done;
617  }
618  }
619  if (PREDICT_FALSE (b->flags & VLIB_BUFFER_IS_TRACED))
620  {
621  virtio_tx_trace (vm, node, b, bi, is_tun);
622  }
623 
624  if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0))
625  {
626  d->addr =
627  ((is_pci) ? vlib_buffer_get_current_pa (vm,
628  b) :
630  d->len = b->current_length + hdr_sz;
631  }
632  else if (is_indirect)
633  {
634  /*
635  * We are using single vlib_buffer_t for indirect descriptor(s)
636  * chain. Single descriptor is 16 bytes and vlib_buffer_t
637  * has 2048 bytes space. So maximum long chain can have 128
638  * (=2048/16) indirect descriptors.
639  * It can easily support 65535 bytes of Jumbo frames with
640  * each data buffer size of 512 bytes minimum.
641  */
642  u32 indirect_buffer = 0;
643  if (PREDICT_FALSE (vlib_buffer_alloc (vm, &indirect_buffer, 1) == 0))
644  {
645  drop_inline = VIRTIO_TX_ERROR_INDIRECT_DESC_ALLOC_FAILED;
646  goto done;
647  }
648 
649  vlib_buffer_t *indirect_desc = vlib_get_buffer (vm, indirect_buffer);
650  indirect_desc->current_data = 0;
651  indirect_desc->flags |= VLIB_BUFFER_NEXT_PRESENT;
652  indirect_desc->next_buffer = bi;
653  bi = indirect_buffer;
654 
655  vring_packed_desc_t *id =
656  (vring_packed_desc_t *) vlib_buffer_get_current (indirect_desc);
657  u32 count = 1;
658  if (is_pci)
659  {
660  d->addr = vlib_physmem_get_pa (vm, id);
661  id->addr = vlib_buffer_get_current_pa (vm, b) - hdr_sz;
662 
663  /*
664  * If VIRTIO_F_ANY_LAYOUT is not negotiated, then virtio_net_hdr
665  * should be presented in separate descriptor and data will start
666  * from next descriptor.
667  */
668  if (is_any_layout)
669  id->len = b->current_length + hdr_sz;
670  else
671  {
672  id->len = hdr_sz;
673  id->flags = 0;
674  id->id = 0;
675  count++;
676  id++;
677  id->addr = vlib_buffer_get_current_pa (vm, b);
678  id->len = b->current_length;
679  }
680  while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
681  {
682  id->flags = 0;
683  id->id = 0;
684  count++;
685  id++;
686  b = vlib_get_buffer (vm, b->next_buffer);
687  id->addr = vlib_buffer_get_current_pa (vm, b);
688  id->len = b->current_length;
689  if (PREDICT_FALSE (count == VIRTIO_TX_MAX_CHAIN_LEN))
690  {
691  if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
692  vlib_error_count (vm, node->node_index,
693  VIRTIO_TX_ERROR_TRUNC_PACKET, 1);
694  break;
695  }
696  }
697  }
698  id->flags = 0;
699  id->id = 0;
700  d->len = count * sizeof (vring_packed_desc_t);
702  }
703  else
704  {
705  ASSERT (0);
706  }
707  if (vring->avail_wrap_counter)
708  {
711  }
712  else
713  {
716  }
717 
718  d->id = next;
719  d->flags = flags;
720  vring->buffers[next] = bi;
721  n_added++;
722 
723 done:
724  if (drop_inline != ~0)
725  virtio_interface_drop_inline (vm, vif, node->node_index, &bi, 1,
726  drop_inline);
727 
728  return n_added;
729 }
730 
731 static uword
735  virtio_vring_t *vring, u32 *buffers,
736  u16 n_left, const int do_gso,
737  const int csum_offload)
738 {
739  int is_pci = (type == VIRTIO_IF_TYPE_PCI);
740  int is_tun = (type == VIRTIO_IF_TYPE_TUN);
741  int is_indirect =
742  ((vif->features & VIRTIO_FEATURE (VIRTIO_RING_F_INDIRECT_DESC)) != 0);
743  int is_any_layout =
744  ((vif->features & VIRTIO_FEATURE (VIRTIO_F_ANY_LAYOUT)) != 0);
745  const int hdr_sz = vif->virtio_net_hdr_sz;
746  u16 sz = vring->size;
747  u16 used, next, n_buffers = 0, n_buffers_left = 0;
748  u16 n_vectors = n_left;
749 
750 
751  used = vring->desc_in_use;
752  next = vring->desc_next;
753 
754  if (vif->packet_buffering)
755  {
756  n_buffers = n_buffers_left = virtio_vring_n_buffers (vring->buffering);
757 
758  while (n_buffers_left && used < sz)
759  {
760  u16 n_added = 0;
761 
763  if (bi == ~0)
764  break;
765  n_added = add_buffer_to_slot_packed (
766  vm, node, vif, vring, bi, next, hdr_sz, do_gso, csum_offload,
767  is_pci, is_tun, is_indirect, is_any_layout);
768  n_buffers_left--;
769  if (PREDICT_FALSE (n_added == 0))
770  continue;
771 
772  used++;
773  next++;
774  if (next >= sz)
775  {
776  next = 0;
777  vring->avail_wrap_counter ^= 1;
778  }
779  }
780  }
781 
782  while (n_left && used < sz)
783  {
784  u16 n_added = 0;
785 
786  n_added = add_buffer_to_slot_packed (
787  vm, node, vif, vring, buffers[0], next, hdr_sz, do_gso, csum_offload,
788  is_pci, is_tun, is_indirect, is_any_layout);
789  buffers++;
790  n_left--;
791  if (PREDICT_FALSE (n_added == 0))
792  continue;
793 
794  used++;
795  next++;
796  if (next >= sz)
797  {
798  next = 0;
799  vring->avail_wrap_counter ^= 1;
800  }
801  }
802 
803  if (n_left != n_vectors || n_buffers != n_buffers_left)
804  {
806  vring->desc_next = next;
807  vring->desc_in_use = used;
809  if (vring->device_event->flags != VRING_EVENT_F_DISABLE)
810  virtio_kick (vm, vring, vif);
811  }
812 
813  return n_left;
814 }
815 
816 static void
818  u16 next, u32 *first_free_desc_index,
819  u16 *free_desc_count)
820 {
821  u16 start = 0;
822  /* next is used as hint: from where to start looking */
823  for (u16 i = 0; i < size; i++, next++)
824  {
825  if (vring->buffers[next & mask] == ~0)
826  {
827  if (*first_free_desc_index == ~0)
828  {
829  *first_free_desc_index = (next & mask);
830  start = i;
831  (*free_desc_count)++;
832  req--;
833  if (req == 0)
834  break;
835  }
836  else
837  {
838  if (start + *free_desc_count == i)
839  {
840  (*free_desc_count)++;
841  req--;
842  if (req == 0)
843  break;
844  }
845  else
846  break;
847  }
848  }
849  }
850 }
851 
852 static u16
856  virtio_vring_t *vring, u32 *buffers,
857  u16 n_left, int do_gso, int csum_offload)
858 {
859  u16 used, next, avail, n_buffers = 0, n_buffers_left = 0;
860  int is_pci = (type == VIRTIO_IF_TYPE_PCI);
861  int is_tun = (type == VIRTIO_IF_TYPE_TUN);
862  int is_indirect =
863  ((vif->features & VIRTIO_FEATURE (VIRTIO_RING_F_INDIRECT_DESC)) != 0);
864  int is_any_layout =
865  ((vif->features & VIRTIO_FEATURE (VIRTIO_F_ANY_LAYOUT)) != 0);
866  u16 sz = vring->size;
867  int hdr_sz = vif->virtio_net_hdr_sz;
868  u16 mask = sz - 1;
869  u16 n_vectors = n_left;
870 
871  used = vring->desc_in_use;
872  next = vring->desc_next;
873  avail = vring->avail->idx;
874 
875  u16 free_desc_count = 0;
876 
878  {
879  u32 first_free_desc_index = ~0;
880 
881  virtio_find_free_desc (vring, sz, mask, n_left, next,
882  &first_free_desc_index, &free_desc_count);
883 
884  if (free_desc_count)
885  next = first_free_desc_index;
886  }
887  else
888  free_desc_count = sz - used;
889 
890  if (vif->packet_buffering)
891  {
892  n_buffers = n_buffers_left = virtio_vring_n_buffers (vring->buffering);
893 
894  while (n_buffers_left && free_desc_count)
895  {
896  u16 n_added = 0;
897 
899  if (bi == ~0)
900  break;
901 
902  n_added = add_buffer_to_slot (vm, node, vif, vring, bi,
903  free_desc_count, avail, next, mask,
904  hdr_sz, do_gso, csum_offload, is_pci,
905  is_tun, is_indirect, is_any_layout);
906  if (PREDICT_FALSE (n_added == 0))
907  {
908  n_buffers_left--;
909  continue;
910  }
911  else if (PREDICT_FALSE (n_added > free_desc_count))
912  break;
913 
914  avail++;
915  next = (next + n_added) & mask;
916  used += n_added;
917  n_buffers_left--;
918  free_desc_count -= n_added;
919  }
920  }
921 
922  while (n_left && free_desc_count)
923  {
924  u16 n_added = 0;
925 
926  n_added =
927  add_buffer_to_slot (vm, node, vif, vring, buffers[0], free_desc_count,
928  avail, next, mask, hdr_sz, do_gso, csum_offload,
929  is_pci, is_tun, is_indirect, is_any_layout);
930 
931  if (PREDICT_FALSE (n_added == 0))
932  {
933  buffers++;
934  n_left--;
935  continue;
936  }
937  else if (PREDICT_FALSE (n_added > free_desc_count))
938  break;
939 
940  avail++;
941  next = (next + n_added) & mask;
942  used += n_added;
943  buffers++;
944  n_left--;
945  free_desc_count -= n_added;
946  }
947 
948  if (n_left != n_vectors || n_buffers != n_buffers_left)
949  {
950  clib_atomic_store_seq_cst (&vring->avail->idx, avail);
951  vring->desc_next = next;
952  vring->desc_in_use = used;
953  if ((clib_atomic_load_seq_cst (&vring->used->flags) &
955  virtio_kick (vm, vring, vif);
956  }
957 
958  return n_left;
959 }
960 
961 static u16
964  virtio_vring_t *vring, u32 *buffers,
965  u16 n_left, int packed, int do_gso,
966  int csum_offload)
967 {
968  if (packed)
969  return virtio_interface_tx_packed_gso_inline (vm, node, vif, type, vring,
970  buffers, n_left,
971  do_gso, csum_offload);
972  else
973  return virtio_interface_tx_split_gso_inline (vm, node, vif, type, vring,
974  buffers, n_left,
975  do_gso, csum_offload);
976 }
977 
978 static u16
980  virtio_if_t *vif, virtio_vring_t *vring,
981  virtio_if_type_t type, u32 *buffers, u16 n_left,
982  int packed)
983 {
984  vnet_main_t *vnm = vnet_get_main ();
986 
988  return virtio_interface_tx_gso_inline (vm, node, vif, type, vring,
989  buffers, n_left, packed,
990  1 /* do_gso */ ,
991  1 /* checksum offload */ );
993  return virtio_interface_tx_gso_inline (vm, node, vif, type, vring,
994  buffers, n_left, packed,
995  0 /* no do_gso */ ,
996  1 /* checksum offload */ );
997  else
998  return virtio_interface_tx_gso_inline (vm, node, vif, type, vring,
999  buffers, n_left, packed,
1000  0 /* no do_gso */ ,
1001  0 /* no checksum offload */ );
1002 }
1003 
1006  vlib_frame_t * frame)
1007 {
1009  vnet_interface_output_runtime_t *rund = (void *) node->runtime_data;
1011  u16 qid = vm->thread_index % vif->num_txqs;
1012  virtio_vring_t *vring = vec_elt_at_index (vif->txq_vrings, qid);
1013  u16 n_left = frame->n_vectors;
1014  u32 *buffers = vlib_frame_vector_args (frame);
1015  u32 to[GRO_TO_VECTOR_SIZE (n_left)];
1016  int packed = vif->is_packed;
1017  u16 n_vectors = frame->n_vectors;
1018 
1020 
1021  if (vif->packet_coalesce)
1022  {
1023  n_vectors = n_left =
1024  vnet_gro_inline (vm, vring->flow_table, buffers, n_left, to);
1025  buffers = to;
1026  }
1027 
1028  u16 retry_count = 2;
1029 
1030 retry:
1031  /* free consumed buffers */
1032  virtio_free_used_device_desc (vm, vring, node->node_index, packed);
1033 
1034  if (vif->type == VIRTIO_IF_TYPE_TAP)
1035  n_left = virtio_interface_tx_inline (vm, node, vif, vring,
1036  VIRTIO_IF_TYPE_TAP,
1037  &buffers[n_vectors - n_left],
1038  n_left, packed);
1039  else if (vif->type == VIRTIO_IF_TYPE_PCI)
1040  n_left = virtio_interface_tx_inline (vm, node, vif, vring,
1041  VIRTIO_IF_TYPE_PCI,
1042  &buffers[n_vectors - n_left],
1043  n_left, packed);
1044  else if (vif->type == VIRTIO_IF_TYPE_TUN)
1045  n_left = virtio_interface_tx_inline (vm, node, vif, vring,
1046  VIRTIO_IF_TYPE_TUN,
1047  &buffers[n_vectors - n_left],
1048  n_left, packed);
1049  else
1050  ASSERT (0);
1051 
1052  if (n_left && retry_count--)
1053  goto retry;
1054 
1055  if (vif->packet_buffering && n_left)
1056  {
1057  u16 n_buffered = virtio_vring_buffering_store_packets (vring->buffering,
1058  &buffers
1059  [n_vectors
1060  - n_left],
1061  n_left);
1062  n_left -= n_buffered;
1063  }
1064  if (n_left)
1065  virtio_interface_drop_inline (vm, vif, node->node_index,
1066  &buffers[n_vectors - n_left], n_left,
1067  VIRTIO_TX_ERROR_NO_FREE_SLOTS);
1068 
1070 
1071  return frame->n_vectors - n_left;
1072 }
1073 
1074 static void
1076  u32 node_index)
1077 {
1078  virtio_main_t *apm = &virtio_main;
1079  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
1081 
1082  /* Shut off redirection */
1083  if (node_index == ~0)
1084  {
1086  return;
1087  }
1088 
1091  node_index);
1092 }
1093 
1094 static void
1096 {
1097  /* Nothing for now */
1098 }
1099 
1100 static void
1102 {
1103  if (vif->is_packed)
1104  vring->driver_event->flags &= ~VRING_EVENT_F_DISABLE;
1105  else
1107 }
1108 
1109 static void
1111 {
1112  if (vif->is_packed)
1113  vring->driver_event->flags |= VRING_EVENT_F_DISABLE;
1114  else
1116 }
1117 
1118 static clib_error_t *
1121 {
1122  vlib_main_t *vm = vnm->vlib_main;
1123  virtio_main_t *mm = &virtio_main;
1124  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
1126  virtio_vring_t *rx_vring = vec_elt_at_index (vif->rxq_vrings, qid);
1127 
1128  if (vif->type == VIRTIO_IF_TYPE_PCI && !(vif->support_int_mode))
1129  {
1130  virtio_set_rx_polling (vif, rx_vring);
1131  return clib_error_return (0, "interrupt mode is not supported");
1132  }
1133 
1134  if (mode == VNET_HW_IF_RX_MODE_POLLING)
1135  {
1136  if (vif->packet_coalesce || vif->packet_buffering)
1137  {
1138  if (mm->interrupt_queues_count > 0)
1139  mm->interrupt_queues_count--;
1140  if (mm->interrupt_queues_count == 0)
1144  }
1145  virtio_set_rx_polling (vif, rx_vring);
1146  }
1147  else
1148  {
1149  if (vif->packet_coalesce || vif->packet_buffering)
1150  {
1151  mm->interrupt_queues_count++;
1152  if (mm->interrupt_queues_count == 1)
1156  }
1157  virtio_set_rx_interrupt (vif, rx_vring);
1158  }
1159 
1160  rx_vring->mode = mode;
1161 
1162  return 0;
1163 }
1164 
1165 static clib_error_t *
1167 {
1168  virtio_main_t *mm = &virtio_main;
1169  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
1171 
1172  if (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
1173  {
1174  vif->flags |= VIRTIO_IF_FLAG_ADMIN_UP;
1177  }
1178  else
1179  {
1180  vif->flags &= ~VIRTIO_IF_FLAG_ADMIN_UP;
1182  }
1183  return 0;
1184 }
1185 
1186 static clib_error_t *
1188  u32 hw_if_index,
1189  struct vnet_sw_interface_t *st, int is_add)
1190 {
1191  /* Nothing for now */
1192  return 0;
1193 }
1194 
1195 /* *INDENT-OFF* */
1197  .name = "virtio",
1198  .format_device_name = format_virtio_device_name,
1199  .format_device = format_virtio_device,
1200  .format_tx_trace = format_virtio_tx_trace,
1201  .tx_function_n_errors = VIRTIO_TX_N_ERROR,
1202  .tx_function_error_strings = virtio_tx_func_error_strings,
1203  .rx_redirect_to_node = virtio_set_interface_next_node,
1204  .clear_counters = virtio_clear_hw_interface_counters,
1205  .admin_up_down_function = virtio_interface_admin_up_down,
1206  .subif_add_del_function = virtio_subif_add_del_function,
1207  .rx_mode_change_function = virtio_interface_rx_mode_change,
1208 };
1209 
1210 /* *INDENT-ON* */
1211 
1212 /*
1213  * fd.io coding-style-patch-verification: ON
1214  *
1215  * Local Variables:
1216  * eval: (c-set-style "gnu")
1217  * End:
1218  */
u32 per_interface_next_index
Definition: virtio.h:133
gro_flow_table_t * flow_table
Definition: virtio.h:110
vlib_buffer_t buffer
Definition: device.c:80
vnet_buffer_oflags_t
Definition: buffer.h:118
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:133
static void set_checksum_offsets(vlib_buffer_t *b, virtio_net_hdr_v1_t *hdr, const int is_l2)
Definition: device.c:287
vlib_node_registration_t virtio_input_node
(constructor) VLIB_REGISTER_NODE (virtio_input_node)
Definition: node.c:498
static uword vlib_buffer_get_current_pa(vlib_main_t *vm, vlib_buffer_t *b)
Definition: buffer_funcs.h:494
static_always_inline u16 virtio_vring_n_buffers(virtio_vring_buffering_t *buffering)
VNET_DEVICE_CLASS_TX_FN() virtio_device_class(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: device.c:1004
vring_desc_event_t * device_event
Definition: virtio.h:79
virtio_if_t * interfaces
Definition: virtio.h:220
static u16 add_buffer_to_slot_packed(vlib_main_t *vm, vlib_node_runtime_t *node, virtio_if_t *vif, virtio_vring_t *vring, u32 bi, u16 next, int hdr_sz, int do_gso, int csum_offload, int is_pci, int is_tun, int is_indirect, int is_any_layout)
Definition: device.c:584
#define CLIB_UNUSED(x)
Definition: clib.h:90
#define VIRTIO_TX_MAX_CHAIN_LEN
Definition: device.c:34
static clib_error_t * virtio_interface_admin_up_down(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
Definition: device.c:1166
vl_api_wireguard_peer_flags_t flags
Definition: wireguard.api:105
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
Definition: buffer_funcs.h:982
#define VIRTIO_NET_HDR_F_NEEDS_CSUM
Definition: virtio_std.h:142
static u16 add_buffer_to_slot(vlib_main_t *vm, vlib_node_runtime_t *node, virtio_if_t *vif, virtio_vring_t *vring, u32 bi, u16 free_desc_count, u16 avail, u16 next, u16 mask, int hdr_sz, int do_gso, int csum_offload, int is_pci, int is_tun, int is_indirect, int is_any_layout)
Definition: device.c:377
#define vnet_buffer2(b)
Definition: buffer.h:499
vnet_interface_main_t interface_main
Definition: vnet.h:81
static clib_error_t * virtio_interface_rx_mode_change(vnet_main_t *vnm, u32 hw_if_index, u32 qid, vnet_hw_if_rx_mode mode)
Definition: device.c:1119
#define PREDICT_TRUE(x)
Definition: clib.h:125
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:119
#define CLIB_MEMORY_STORE_BARRIER()
Definition: clib.h:140
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
Definition: error_funcs.h:57
static u16 virtio_interface_tx_split_gso_inline(vlib_main_t *vm, vlib_node_runtime_t *node, virtio_if_t *vif, virtio_if_type_t type, virtio_vring_t *vring, u32 *buffers, u16 n_left, int do_gso, int csum_offload)
Definition: device.c:853
vring_used_elem_t ring[0]
Definition: virtio_std.h:121
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
vnet_hw_interface_capabilities_t caps
Definition: interface.h:645
static_always_inline void clib_spinlock_unlock_if_init(clib_spinlock_t *p)
Definition: lock.h:129
static vnet_hw_interface_t * vnet_get_hw_interface(vnet_main_t *vnm, u32 hw_if_index)
static u16 virtio_interface_tx_inline(vlib_main_t *vm, vlib_node_runtime_t *node, virtio_if_t *vif, virtio_vring_t *vring, virtio_if_type_t type, u32 *buffers, u16 n_left, int packed)
Definition: device.c:979
u32 thread_index
Definition: main.h:213
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:122
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: nat44_ei.c:3048
static heap_elt_t * last(heap_header_t *h)
Definition: heap.c:53
static void virtio_free_used_device_desc_split(vlib_main_t *vm, virtio_vring_t *vring, uword node_index)
Definition: device.c:169
#define VIRTIO_EVENT_START_TIMER
Definition: virtio.h:60
static u32 format_get_indent(u8 *s)
Definition: format.h:72
#define STRUCT_OFFSET_OF(t, f)
Definition: clib.h:73
static_always_inline u32 virtio_vring_buffering_read_from_front(virtio_vring_buffering_t *buffering)
static void vlib_increment_simple_counter(vlib_simple_counter_main_t *cm, u32 thread_index, u32 index, u64 increment)
Increment a simple counter.
Definition: counter.h:74
clib_error_t * vnet_hw_interface_set_flags(vnet_main_t *vnm, u32 hw_if_index, vnet_hw_interface_flags_t flags)
Definition: interface.c:513
struct _tcp_header tcp_header_t
static uword vlib_node_add_next(vlib_main_t *vm, uword node, uword next_node)
Definition: node_funcs.h:1177
unsigned char u8
Definition: types.h:56
vlib_buffer_t ** b
static u16 virtio_interface_tx_gso_inline(vlib_main_t *vm, vlib_node_runtime_t *node, virtio_if_t *vif, virtio_if_type_t type, virtio_vring_t *vring, u32 *buffers, u16 n_left, int packed, int do_gso, int csum_offload)
Definition: device.c:962
#define VIRTIO_FEATURE(X)
Definition: virtio_std.h:69
vring_desc_t * desc
Definition: virtio.h:71
unsigned int u32
Definition: types.h:88
static void virtio_set_rx_polling(virtio_if_t *vif, virtio_vring_t *vring)
Definition: device.c:1110
#define VRING_AVAIL_F_NO_INTERRUPT
Definition: virtio_std.h:93
u64 features
Definition: virtio.h:131
VNET_DEVICE_CLASS(af_xdp_device_class)
u32 hw_if_index
Definition: virtio.h:152
static clib_error_t * virtio_subif_add_del_function(vnet_main_t *vnm, u32 hw_if_index, struct vnet_sw_interface_t *st, int is_add)
Definition: device.c:1187
return frame n_vectors
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:129
u8 support_int_mode
Definition: virtio.h:206
description fragment has unexpected format
Definition: map.api:433
static void virtio_free_used_device_desc(vlib_main_t *vm, virtio_vring_t *vring, uword node_index, int packed)
Definition: device.c:276
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
clib_spinlock_t lockp
Definition: virtio.h:66
#define clib_error_return(e, args...)
Definition: error.h:99
vnet_main_t * vnet_get_main(void)
#define VNET_DEVICE_CLASS_TX_FN(devclass)
Definition: interface.h:317
#define foreach_virtio_tx_func_error
Definition: device.c:36
bool is_ip6
Definition: ip.api:43
static void vlib_buffer_free_from_ring(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers)
Free buffers from ring.
vl_api_fib_path_type_t type
Definition: fib_types.api:123
#define VRING_USED_F_NO_NOTIFY
Definition: virtio_std.h:92
Definition: cJSON.c:88
u16 * next
u32 * to
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:553
format_function_t format_vnet_buffer
Definition: buffer.h:515
vlib_main_t * vlib_main
Definition: vnet.h:111
static void vlib_process_signal_event(vlib_main_t *vm, uword node_index, uword type_opaque, uword data)
Definition: node_funcs.h:1019
vring_avail_t * avail
Definition: virtio.h:73
vring_desc_event_t * driver_event
Definition: virtio.h:78
static __clib_warn_unused_result u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
Definition: buffer_funcs.h:708
static void virtio_find_free_desc(virtio_vring_t *vring, u16 size, u16 mask, u16 req, u16 next, u32 *first_free_desc_index, u16 *free_desc_count)
Definition: device.c:817
unsigned short u16
Definition: types.h:57
vnet_hw_if_rx_mode mode
Definition: virtio.h:108
u32 size
Definition: vhost_user.h:125
static u8 * format_virtio_tx_trace(u8 *s, va_list *va)
Definition: device.c:84
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:257
static u8 * format_virtio_device(u8 *s, va_list *args)
Definition: device.c:60
#define PREDICT_FALSE(x)
Definition: clib.h:124
vl_api_ip4_address_t ip4
Definition: one.api:376
vnet_main_t vnet_main
Definition: misc.c:43
vlib_simple_counter_main_t * sw_if_counters
Definition: interface.h:1022
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
Definition: nat44_ei.c:3047
u32 node_index
Node index.
Definition: node.h:479
u32 interrupt_queues_count
Definition: virtio.h:216
static void virtio_free_used_device_desc_packed(vlib_main_t *vm, virtio_vring_t *vring, uword node_index)
Definition: device.c:235
#define VIRTIO_NET_HDR_GSO_TCPV4
Definition: virtio_std.h:146
u32 n_left
vl_api_tunnel_mode_t mode
Definition: gre.api:48
int packet_coalesce
Definition: virtio.h:156
static void virtio_set_interface_next_node(vnet_main_t *vnm, u32 hw_if_index, u32 node_index)
Definition: device.c:1075
vring_used_t * used
Definition: virtio.h:72
u16 desc_next
Definition: virtio.h:87
static void virtio_set_rx_interrupt(virtio_if_t *vif, virtio_vring_t *vring)
Definition: device.c:1101
u16 virtio_net_hdr_sz
Definition: virtio.h:149
u8 slot
Definition: pci_types.api:22
u8 * format_ethernet_header_with_length(u8 *s, va_list *args)
Definition: format.c:97
virtio_vring_t * rxq_vrings
Definition: virtio.h:136
format_function_t format_virtio_device_name
Definition: virtio.h:247
u16 last_used_idx
Definition: virtio.h:88
vlib_node_registration_t virtio_send_interrupt_node
(constructor) VLIB_REGISTER_NODE (virtio_send_interrupt_node)
#define clib_atomic_load_seq_cst(a)
Definition: atomics.h:55
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:261
vl_api_pnat_mask_t mask
Definition: pnat.api:45
u32 flags
Definition: virtio.h:132
static void set_gso_offsets(vlib_buffer_t *b, virtio_net_hdr_v1_t *hdr, const int is_l2)
Definition: device.c:337
virtio_if_type_t type
Definition: virtio.h:150
static void virtio_tx_trace(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_buffer_t *b0, u32 bi, int is_tun)
Definition: device.c:105
vring_packed_desc_t * packed_desc
Definition: virtio.h:77
#define ASSERT(truth)
u16 avail_wrap_counter
Definition: virtio.h:100
static_always_inline u32 vnet_gro_inline(vlib_main_t *vm, gro_flow_table_t *flow_table, u32 *from, u16 n_left_from, u32 *to)
coalesce buffers with flow tables
Definition: gro_func.h:559
u8 pre_data[VLIB_BUFFER_PRE_DATA_SIZE]
Space for inserting data before buffer start.
Definition: buffer.h:201
u32 buffer_index
Definition: device.c:77
u32 n_buffers
static_always_inline void * clib_memcpy_fast(void *restrict dst, const void *restrict src, size_t n)
Definition: string.h:92
virtio_tx_func_error_t
Definition: device.c:45
static_always_inline void clib_memset_u8(void *p, u8 val, uword count)
Definition: string.h:441
static uword pointer_to_uword(const void *p)
Definition: types.h:131
#define VRING_DESC_F_NEXT
Definition: virtio_std.h:73
virtio_main_t virtio_main
Definition: virtio.c:37
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:38
#define VRING_TX_OUT_OF_ORDER
Definition: virtio.h:105
static_always_inline void vnet_generic_header_offset_parser(vlib_buffer_t *b0, generic_header_offset_t *gho, int is_l2, int is_ip4, int is_ip6)
virtio_vring_buffering_t * buffering
Definition: virtio.h:109
static u64 vlib_physmem_get_pa(vlib_main_t *vm, void *mem)
u16 used_wrap_counter
Definition: virtio.h:101
Definition: defs.h:47
#define clib_atomic_store_seq_cst(a, b)
Definition: atomics.h:53
static void virtio_interface_drop_inline(vlib_main_t *vm, virtio_if_t *vif, uword node_index, u32 *buffers, u16 n, virtio_tx_func_error_t error)
Definition: device.c:141
vlib_main_t vlib_node_runtime_t * node
Definition: nat44_ei.c:3047
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:149
int packet_buffering
Definition: virtio.h:157
u32 instance
Definition: gre.api:51
static void virtio_clear_hw_interface_counters(u32 instance)
Definition: device.c:1095
#define GRO_TO_VECTOR_SIZE(X)
Definition: gro.h:27
VLIB buffer representation.
Definition: buffer.h:111
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:301
node node_index
static_always_inline u8 * format_generic_header_offset(u8 *s, va_list *args)
u32 * buffers
Definition: virtio.h:82
static_always_inline u16 virtio_vring_buffering_store_packets(virtio_vring_buffering_t *buffering, u32 *bi, u16 n_store)
#define vnet_buffer(b)
Definition: buffer.h:437
u32 sw_if_index
Definition: virtio.h:153
nat44_ei_main_t * nm
#define VNET_HW_INTERFACE_CAP_SUPPORTS_L4_TX_CKSUM
Definition: interface.h:555
#define VRING_DESC_F_USED
Definition: virtio_std.h:78
vnet_hw_if_rx_mode
Definition: interface.h:53
generic_header_offset_t gho
Definition: device.c:79
int is_packed
Definition: virtio.h:211
#define VIRTIO_EVENT_STOP_TIMER
Definition: virtio.h:61
u8 count
Definition: dhcp.api:208
#define CLIB_MEMORY_BARRIER()
Definition: clib.h:137
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace.c:628
static uword virtio_interface_tx_packed_gso_inline(vlib_main_t *vm, vlib_node_runtime_t *node, virtio_if_t *vif, virtio_if_type_t type, virtio_vring_t *vring, u32 *buffers, u16 n_left, const int do_gso, const int csum_offload)
Definition: device.c:732
virtio_if_type_t
Definition: virtio.h:50
static void virtio_memset_ring_u32(u32 *ring, u32 start, u32 ring_size, u32 n_buffers)
Definition: device.c:153
static_always_inline void clib_spinlock_lock_if_init(clib_spinlock_t *p)
Definition: lock.h:106
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:111
static u16 ip4_header_checksum(ip4_header_t *i)
Definition: ip4_packet.h:314
u16 desc_in_use
Definition: virtio.h:86
#define VRING_DESC_F_INDIRECT
Definition: virtio_std.h:75
static char * virtio_tx_func_error_strings[]
Definition: device.c:53
#define VIRTIO_NET_HDR_GSO_TCPV6
Definition: virtio_std.h:148
#define VRING_DESC_F_AVAIL
Definition: virtio_std.h:77
static_always_inline void virtio_kick(vlib_main_t *vm, virtio_vring_t *vring, virtio_if_t *vif)
Definition: virtio.h:251
static_always_inline void clib_memset_u32(void *p, u32 val, uword count)
Definition: string.h:349