FD.io VPP  v19.04.4-rc0-5-ge88582fac
Vector Packet Processing
vhost_user_input.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * vhost-user-input
4  *
5  * Copyright (c) 2014-2018 Cisco and/or its affiliates.
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at:
9  *
10  * http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  *------------------------------------------------------------------
18  */
19 
20 #include <fcntl.h> /* for open */
21 #include <sys/ioctl.h>
22 #include <sys/socket.h>
23 #include <sys/un.h>
24 #include <sys/stat.h>
25 #include <sys/types.h>
26 #include <sys/uio.h> /* for iovec */
27 #include <netinet/in.h>
28 #include <sys/vfs.h>
29 
30 #include <linux/if_arp.h>
31 #include <linux/if_tun.h>
32 
33 #include <vlib/vlib.h>
34 #include <vlib/unix/unix.h>
35 
36 #include <vnet/ip/ip.h>
37 
38 #include <vnet/ethernet/ethernet.h>
39 #include <vnet/devices/devices.h>
40 #include <vnet/feature/feature.h>
41 
44 
45 /*
46  * When an RX queue is down but active, received packets
47  * must be discarded. This value controls up to how many
48  * packets will be discarded during each round.
49  */
50 #define VHOST_USER_DOWN_DISCARD_COUNT 256
51 
52 /*
53  * When the number of available buffers gets under this threshold,
54  * RX node will start discarding packets.
55  */
56 #define VHOST_USER_RX_BUFFER_STARVATION 32
57 
58 /*
59  * On the receive side, the host should free descriptors as soon
60  * as possible in order to avoid TX drop in the VM.
61  * This value controls the number of copy operations that are stacked
62  * before copy is done for all and descriptors are given back to
63  * the guest.
64  * The value 64 was obtained by testing (48 and 128 were not as good).
65  */
66 #define VHOST_USER_RX_COPY_THRESHOLD 64
67 
69 
70 #define foreach_vhost_user_input_func_error \
71  _(NO_ERROR, "no error") \
72  _(NO_BUFFER, "no available buffer") \
73  _(MMAP_FAIL, "mmap failure") \
74  _(INDIRECT_OVERFLOW, "indirect descriptor overflows table") \
75  _(UNDERSIZED_FRAME, "undersized ethernet frame received (< 14 bytes)") \
76  _(FULL_RX_QUEUE, "full rx queue (possible driver tx drop)")
77 
78 typedef enum
79 {
80 #define _(f,s) VHOST_USER_INPUT_FUNC_ERROR_##f,
82 #undef _
85 
86 static __clib_unused char *vhost_user_input_func_error_strings[] = {
87 #define _(n,s) s,
89 #undef _
90 };
91 
94  vhost_user_intf_t * vui, u16 qid,
96  u16 last_avail_idx)
97 {
99  u32 desc_current = txvq->avail->ring[last_avail_idx & txvq->qsz_mask];
100  vring_desc_t *hdr_desc = 0;
101  virtio_net_hdr_mrg_rxbuf_t *hdr;
102  u32 hint = 0;
103 
104  clib_memset (t, 0, sizeof (*t));
105  t->device_index = vui - vum->vhost_user_interfaces;
106  t->qid = qid;
107 
108  hdr_desc = &txvq->desc[desc_current];
109  if (txvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT)
110  {
111  t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT;
112  /* Header is the first here */
113  hdr_desc = map_guest_mem (vui, txvq->desc[desc_current].addr, &hint);
114  }
115  if (txvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT)
116  {
117  t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED;
118  }
119  if (!(txvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT) &&
120  !(txvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT))
121  {
122  t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC;
123  }
124 
125  t->first_desc_len = hdr_desc ? hdr_desc->len : 0;
126 
127  if (!hdr_desc || !(hdr = map_guest_mem (vui, hdr_desc->addr, &hint)))
128  {
129  t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_MAP_ERROR;
130  }
131  else
132  {
133  u32 len = vui->virtio_net_hdr_sz;
134  memcpy (&t->hdr, hdr, len > hdr_desc->len ? hdr_desc->len : len);
135  }
136 }
137 
140  u16 copy_len, u32 * map_hint)
141 {
142  void *src0, *src1, *src2, *src3;
143  if (PREDICT_TRUE (copy_len >= 4))
144  {
145  if (PREDICT_FALSE (!(src2 = map_guest_mem (vui, cpy[0].src, map_hint))))
146  return 1;
147  if (PREDICT_FALSE (!(src3 = map_guest_mem (vui, cpy[1].src, map_hint))))
148  return 1;
149 
150  while (PREDICT_TRUE (copy_len >= 4))
151  {
152  src0 = src2;
153  src1 = src3;
154 
155  if (PREDICT_FALSE
156  (!(src2 = map_guest_mem (vui, cpy[2].src, map_hint))))
157  return 1;
158  if (PREDICT_FALSE
159  (!(src3 = map_guest_mem (vui, cpy[3].src, map_hint))))
160  return 1;
161 
162  CLIB_PREFETCH (src2, 64, LOAD);
163  CLIB_PREFETCH (src3, 64, LOAD);
164 
165  clib_memcpy_fast ((void *) cpy[0].dst, src0, cpy[0].len);
166  clib_memcpy_fast ((void *) cpy[1].dst, src1, cpy[1].len);
167  copy_len -= 2;
168  cpy += 2;
169  }
170  }
171  while (copy_len)
172  {
173  if (PREDICT_FALSE (!(src0 = map_guest_mem (vui, cpy->src, map_hint))))
174  return 1;
175  clib_memcpy_fast ((void *) cpy->dst, src0, cpy->len);
176  copy_len -= 1;
177  cpy += 1;
178  }
179  return 0;
180 }
181 
182 /**
183  * Try to discard packets from the tx ring (VPP RX path).
184  * Returns the number of discarded packets.
185  */
188  vhost_user_intf_t * vui,
189  vhost_user_vring_t * txvq, u32 discard_max)
190 {
191  /*
192  * On the RX side, each packet corresponds to one descriptor
193  * (it is the same whether it is a shallow descriptor, chained, or indirect).
194  * Therefore, discarding a packet is like discarding a descriptor.
195  */
196  u32 discarded_packets = 0;
197  u32 avail_idx = txvq->avail->idx;
198  u16 mask = txvq->qsz_mask;
199  u16 last_avail_idx = txvq->last_avail_idx;
200  u16 last_used_idx = txvq->last_used_idx;
201  while (discarded_packets != discard_max)
202  {
203  if (avail_idx == last_avail_idx)
204  goto out;
205 
206  u16 desc_chain_head = txvq->avail->ring[last_avail_idx & mask];
207  last_avail_idx++;
208  txvq->used->ring[last_used_idx & mask].id = desc_chain_head;
209  txvq->used->ring[last_used_idx & mask].len = 0;
210  vhost_user_log_dirty_ring (vui, txvq, ring[last_used_idx & mask]);
211  last_used_idx++;
212  discarded_packets++;
213  }
214 
215 out:
216  txvq->last_avail_idx = last_avail_idx;
217  txvq->last_used_idx = last_used_idx;
219  txvq->used->idx = txvq->last_used_idx;
220  vhost_user_log_dirty_ring (vui, txvq, idx);
221  return discarded_packets;
222 }
223 
224 /*
225  * In case of overflow, we need to rewind the array of allocated buffers.
226  */
229  vhost_cpu_t * cpu, vlib_buffer_t * b_head)
230 {
231  u32 bi_current = cpu->rx_buffers[cpu->rx_buffers_len];
232  vlib_buffer_t *b_current = vlib_get_buffer (vm, bi_current);
233  b_current->current_length = 0;
234  b_current->flags = 0;
235  while (b_current != b_head)
236  {
237  cpu->rx_buffers_len++;
238  bi_current = cpu->rx_buffers[cpu->rx_buffers_len];
239  b_current = vlib_get_buffer (vm, bi_current);
240  b_current->current_length = 0;
241  b_current->flags = 0;
242  }
243  cpu->rx_buffers_len++;
244 }
245 
248  vhost_user_main_t * vum,
249  vhost_user_intf_t * vui,
250  u16 qid, vlib_node_runtime_t * node,
252 {
253  vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
255  u16 n_rx_packets = 0;
256  u32 n_rx_bytes = 0;
257  u16 n_left;
258  u32 n_left_to_next, *to_next;
260  u32 n_trace = vlib_get_trace_count (vm, node);
261  u32 buffer_data_size = vlib_buffer_get_default_data_size (vm);
262  u32 map_hint = 0;
263  vhost_cpu_t *cpu = &vum->cpus[vm->thread_index];
264  u16 copy_len = 0;
265  u8 feature_arc_idx = fm->device_input_feature_arc_index;
266  u32 current_config_index = ~(u32) 0;
267  u16 mask = txvq->qsz_mask;
268 
269  /* The descriptor table is not ready yet */
270  if (PREDICT_FALSE (txvq->avail == 0))
271  goto done;
272 
273  {
274  /* do we have pending interrupts ? */
275  vhost_user_vring_t *rxvq = &vui->vrings[VHOST_VRING_IDX_RX (qid)];
276  f64 now = vlib_time_now (vm);
277 
278  if ((txvq->n_since_last_int) && (txvq->int_deadline < now))
279  vhost_user_send_call (vm, txvq);
280 
281  if ((rxvq->n_since_last_int) && (rxvq->int_deadline < now))
282  vhost_user_send_call (vm, rxvq);
283  }
284 
285  /*
286  * For adaptive mode, it is optimized to reduce interrupts.
287  * If the scheduler switches the input node to polling due
288  * to burst of traffic, we tell the driver no interrupt.
289  * When the traffic subsides, the scheduler switches the node back to
290  * interrupt mode. We must tell the driver we want interrupt.
291  */
293  {
294  if ((node->flags &
296  !(node->flags &
298  /* Tell driver we want notification */
299  txvq->used->flags = 0;
300  else
301  /* Tell driver we don't want notification */
303  }
304 
305  if (PREDICT_FALSE (txvq->avail->flags & 0xFFFE))
306  goto done;
307 
308  n_left = (u16) (txvq->avail->idx - txvq->last_avail_idx);
309 
310  /* nothing to do */
311  if (PREDICT_FALSE (n_left == 0))
312  goto done;
313 
314  if (PREDICT_FALSE (!vui->admin_up || !(txvq->enabled)))
315  {
316  /*
317  * Discard input packet if interface is admin down or vring is not
318  * enabled.
319  * "For example, for a networking device, in the disabled state
320  * client must not supply any new RX packets, but must process
321  * and discard any TX packets."
322  */
323  vhost_user_rx_discard_packet (vm, vui, txvq,
325  goto done;
326  }
327 
328  if (PREDICT_FALSE (n_left == (mask + 1)))
329  {
330  /*
331  * Informational error logging when VPP is not
332  * receiving packets fast enough.
333  */
334  vlib_error_count (vm, node->node_index,
335  VHOST_USER_INPUT_FUNC_ERROR_FULL_RX_QUEUE, 1);
336  }
337 
338  if (n_left > VLIB_FRAME_SIZE)
339  n_left = VLIB_FRAME_SIZE;
340 
341  /*
342  * For small packets (<2kB), we will not need more than one vlib buffer
343  * per packet. In case packets are bigger, we will just yeld at some point
344  * in the loop and come back later. This is not an issue as for big packet,
345  * processing cost really comes from the memory copy.
346  * The assumption is that big packets will fit in 40 buffers.
347  */
348  if (PREDICT_FALSE (cpu->rx_buffers_len < n_left + 1 ||
349  cpu->rx_buffers_len < 40))
350  {
351  u32 curr_len = cpu->rx_buffers_len;
352  cpu->rx_buffers_len +=
353  vlib_buffer_alloc (vm, cpu->rx_buffers + curr_len,
354  VHOST_USER_RX_BUFFERS_N - curr_len);
355 
356  if (PREDICT_FALSE
358  {
359  /* In case of buffer starvation, discard some packets from the queue
360  * and log the event.
361  * We keep doing best effort for the remaining packets. */
362  u32 flush = (n_left + 1 > cpu->rx_buffers_len) ?
363  n_left + 1 - cpu->rx_buffers_len : 1;
364  flush = vhost_user_rx_discard_packet (vm, vui, txvq, flush);
365 
366  n_left -= flush;
368  interface_main.sw_if_counters +
370  vm->thread_index, vui->sw_if_index,
371  flush);
372 
374  VHOST_USER_INPUT_FUNC_ERROR_NO_BUFFER, flush);
375  }
376  }
377 
378  if (PREDICT_FALSE (vnet_have_features (feature_arc_idx, vui->sw_if_index)))
379  {
381  cm = &fm->feature_config_mains[feature_arc_idx];
382  current_config_index = vec_elt (cm->config_index_by_sw_if_index,
383  vui->sw_if_index);
384  vnet_get_config_data (&cm->config_main, &current_config_index,
385  &next_index, 0);
386  }
387 
388  u16 last_avail_idx = txvq->last_avail_idx;
389  u16 last_used_idx = txvq->last_used_idx;
390 
391  vlib_get_new_next_frame (vm, node, next_index, to_next, n_left_to_next);
392 
393  if (next_index == VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT)
394  {
395  /* give some hints to ethernet-input */
396  vlib_next_frame_t *nf;
397  vlib_frame_t *f;
399  nf = vlib_node_runtime_get_next_frame (vm, node, next_index);
400  f = vlib_get_frame (vm, nf->frame);
402 
403  ef = vlib_frame_scalar_args (f);
404  ef->sw_if_index = vui->sw_if_index;
405  ef->hw_if_index = vui->hw_if_index;
407  }
408 
409  while (n_left > 0)
410  {
411  vlib_buffer_t *b_head, *b_current;
412  u32 bi_current;
413  u16 desc_current;
414  u32 desc_data_offset;
415  vring_desc_t *desc_table = txvq->desc;
416 
417  if (PREDICT_FALSE (cpu->rx_buffers_len <= 1))
418  {
419  /* Not enough rx_buffers
420  * Note: We yeld on 1 so we don't need to do an additional
421  * check for the next buffer prefetch.
422  */
423  n_left = 0;
424  break;
425  }
426 
427  desc_current = txvq->avail->ring[last_avail_idx & mask];
428  cpu->rx_buffers_len--;
429  bi_current = cpu->rx_buffers[cpu->rx_buffers_len];
430  b_head = b_current = vlib_get_buffer (vm, bi_current);
431  to_next[0] = bi_current; //We do that now so we can forget about bi_current
432  to_next++;
433  n_left_to_next--;
434 
436  (vm, cpu->rx_buffers[cpu->rx_buffers_len - 1], LOAD);
437 
438  /* Just preset the used descriptor id and length for later */
439  txvq->used->ring[last_used_idx & mask].id = desc_current;
440  txvq->used->ring[last_used_idx & mask].len = 0;
441  vhost_user_log_dirty_ring (vui, txvq, ring[last_used_idx & mask]);
442 
443  /* The buffer should already be initialized */
445  b_head->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
446 
447  if (PREDICT_FALSE (n_trace))
448  {
449  //TODO: next_index is not exactly known at that point
450  vlib_trace_buffer (vm, node, next_index, b_head,
451  /* follow_chain */ 0);
452  vhost_trace_t *t0 =
453  vlib_add_trace (vm, node, b_head, sizeof (t0[0]));
454  vhost_user_rx_trace (t0, vui, qid, b_head, txvq, last_avail_idx);
455  n_trace--;
456  vlib_set_trace_count (vm, node, n_trace);
457  }
458 
459  /* This depends on the setup but is very consistent
460  * So I think the CPU branch predictor will make a pretty good job
461  * at optimizing the decision. */
462  if (txvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT)
463  {
464  desc_table = map_guest_mem (vui, txvq->desc[desc_current].addr,
465  &map_hint);
466  desc_current = 0;
467  if (PREDICT_FALSE (desc_table == 0))
468  {
469  vlib_error_count (vm, node->node_index,
470  VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
471  goto out;
472  }
473  }
474 
475  if (PREDICT_TRUE (vui->is_any_layout) ||
476  (!(desc_table[desc_current].flags & VIRTQ_DESC_F_NEXT)))
477  {
478  /* ANYLAYOUT or single buffer */
479  desc_data_offset = vui->virtio_net_hdr_sz;
480  }
481  else
482  {
483  /* CSR case without ANYLAYOUT, skip 1st buffer */
484  desc_data_offset = desc_table[desc_current].len;
485  }
486 
487  while (1)
488  {
489  /* Get more input if necessary. Or end of packet. */
490  if (desc_data_offset == desc_table[desc_current].len)
491  {
492  if (PREDICT_FALSE (desc_table[desc_current].flags &
493  VIRTQ_DESC_F_NEXT))
494  {
495  desc_current = desc_table[desc_current].next;
496  desc_data_offset = 0;
497  }
498  else
499  {
500  goto out;
501  }
502  }
503 
504  /* Get more output if necessary. Or end of packet. */
505  if (PREDICT_FALSE (b_current->current_length == buffer_data_size))
506  {
507  if (PREDICT_FALSE (cpu->rx_buffers_len == 0))
508  {
509  /* Cancel speculation */
510  to_next--;
511  n_left_to_next++;
512 
513  /*
514  * Checking if there are some left buffers.
515  * If not, just rewind the used buffers and stop.
516  * Note: Scheduled copies are not cancelled. This is
517  * not an issue as they would still be valid. Useless,
518  * but valid.
519  */
520  vhost_user_input_rewind_buffers (vm, cpu, b_head);
521  n_left = 0;
522  goto stop;
523  }
524 
525  /* Get next output */
526  cpu->rx_buffers_len--;
527  u32 bi_next = cpu->rx_buffers[cpu->rx_buffers_len];
528  b_current->next_buffer = bi_next;
529  b_current->flags |= VLIB_BUFFER_NEXT_PRESENT;
530  bi_current = bi_next;
531  b_current = vlib_get_buffer (vm, bi_current);
532  }
533 
534  /* Prepare a copy order executed later for the data */
535  ASSERT (copy_len < VHOST_USER_COPY_ARRAY_N);
536  vhost_copy_t *cpy = &cpu->copy[copy_len];
537  copy_len++;
538  u32 desc_data_l = desc_table[desc_current].len - desc_data_offset;
539  cpy->len = buffer_data_size - b_current->current_length;
540  cpy->len = (cpy->len > desc_data_l) ? desc_data_l : cpy->len;
541  cpy->dst = (uword) (vlib_buffer_get_current (b_current) +
542  b_current->current_length);
543  cpy->src = desc_table[desc_current].addr + desc_data_offset;
544 
545  desc_data_offset += cpy->len;
546 
547  b_current->current_length += cpy->len;
549  }
550 
551  out:
552 
553  n_rx_bytes += b_head->total_length_not_including_first_buffer;
554  n_rx_packets++;
555 
557  b_head->current_length;
558 
559  /* consume the descriptor and return it as used */
560  last_avail_idx++;
561  last_used_idx++;
562 
564 
565  vnet_buffer (b_head)->sw_if_index[VLIB_RX] = vui->sw_if_index;
566  vnet_buffer (b_head)->sw_if_index[VLIB_TX] = (u32) ~ 0;
567  b_head->error = 0;
568 
569  if (current_config_index != ~(u32) 0)
570  {
571  b_head->current_config_index = current_config_index;
572  vnet_buffer (b_head)->feature_arc_index = feature_arc_idx;
573  }
574 
575  n_left--;
576 
577  /*
578  * Although separating memory copies from virtio ring parsing
579  * is beneficial, we can offer to perform the copies from time
580  * to time in order to free some space in the ring.
581  */
583  {
584  if (PREDICT_FALSE (vhost_user_input_copy (vui, cpu->copy,
585  copy_len, &map_hint)))
586  {
587  vlib_error_count (vm, node->node_index,
588  VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
589  }
590  copy_len = 0;
591 
592  /* give buffers back to driver */
594  txvq->used->idx = last_used_idx;
595  vhost_user_log_dirty_ring (vui, txvq, idx);
596  }
597  }
598 stop:
599  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
600 
601  txvq->last_used_idx = last_used_idx;
602  txvq->last_avail_idx = last_avail_idx;
603 
604  /* Do the memory copies */
605  if (PREDICT_FALSE (vhost_user_input_copy (vui, cpu->copy, copy_len,
606  &map_hint)))
607  {
608  vlib_error_count (vm, node->node_index,
609  VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
610  }
611 
612  /* give buffers back to driver */
614  txvq->used->idx = txvq->last_used_idx;
615  vhost_user_log_dirty_ring (vui, txvq, idx);
616 
617  /* interrupt (call) handling */
618  if ((txvq->callfd_idx != ~0) &&
620  {
621  txvq->n_since_last_int += n_rx_packets;
622 
623  if (txvq->n_since_last_int > vum->coalesce_frames)
624  vhost_user_send_call (vm, txvq);
625  }
626 
627  /* increase rx counters */
631  n_rx_packets, n_rx_bytes);
632 
634 
635 done:
636  return n_rx_packets;
637 }
638 
640  vlib_node_runtime_t * node,
641  vlib_frame_t * frame)
642 {
644  uword n_rx_packets = 0;
645  vhost_user_intf_t *vui;
647  (vnet_device_input_runtime_t *) node->runtime_data;
649 
651  {
652  if ((node->state == VLIB_NODE_STATE_POLLING) ||
653  clib_atomic_swap_acq_n (&dq->interrupt_pending, 0))
654  {
655  vui =
656  pool_elt_at_index (vum->vhost_user_interfaces, dq->dev_instance);
657  n_rx_packets += vhost_user_if_input (vm, vum, vui, dq->queue_id, node,
658  dq->mode);
659  }
660  }
661 
662  return n_rx_packets;
663 }
664 
665 /* *INDENT-OFF* */
667  .type = VLIB_NODE_TYPE_INPUT,
668  .name = "vhost-user-input",
669  .sibling_of = "device-input",
670 
671  /* Will be enabled if/when hardware is detected. */
672  .state = VLIB_NODE_STATE_DISABLED,
673 
674  .format_buffer = format_ethernet_header_with_length,
675  .format_trace = format_vhost_trace,
676 
677  .n_errors = VHOST_USER_INPUT_FUNC_N_ERROR,
678  .error_strings = vhost_user_input_func_error_strings,
679 };
680 /* *INDENT-ON* */
681 
682 /*
683  * fd.io coding-style-patch-verification: ON
684  *
685  * Local Variables:
686  * eval: (c-set-style "gnu")
687  * End:
688  */
vnet_config_main_t config_main
Definition: feature.h:82
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
u32 len
Definition: pci.h:200
static void vnet_device_increment_rx_packets(u32 thread_index, u64 count)
Definition: devices.h:110
static_always_inline u32 vhost_user_if_input(vlib_main_t *vm, vhost_user_main_t *vum, vhost_user_intf_t *vui, u16 qid, vlib_node_runtime_t *node, vnet_hw_interface_rx_mode mode)
vnet_device_and_queue_t * devices_and_queues
Definition: devices.h:69
u32 flags
Definition: vhost_user.h:115
vring_desc_t * desc
Definition: vhost_user.h:237
u32 virtio_ring_flags
The device index.
Definition: vhost_user.h:320
virtio_net_hdr_mrg_rxbuf_t hdr
Length of the first data descriptor.
Definition: vhost_user.h:322
vhost_user_input_func_error_t
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
Definition: trace_funcs.h:156
vhost_cpu_t * cpus
Per-CPU data for vhost-user.
Definition: vhost_user.h:353
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:220
vnet_interface_main_t interface_main
Definition: vnet.h:56
#define PREDICT_TRUE(x)
Definition: clib.h:112
#define CLIB_MEMORY_STORE_BARRIER()
Definition: clib.h:118
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
Definition: error_funcs.h:57
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
vring_used_elem_t ring[0]
Definition: pci.h:223
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
static f64 vlib_time_now(vlib_main_t *vm)
Definition: main.h:255
vhost_copy_t copy[VHOST_USER_COPY_ARRAY_N]
Definition: vhost_user.h:335
u16 next
Definition: pci.h:202
vring_avail_t * avail
Definition: vhost_user.h:238
u32 thread_index
Definition: main.h:197
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
static vlib_frame_t * vlib_get_frame(vlib_main_t *vm, vlib_frame_t *f)
Definition: node_funcs.h:216
#define foreach_vhost_user_input_func_error
#define VRING_AVAIL_F_NO_INTERRUPT
Definition: vhost_user.h:46
static void vlib_increment_simple_counter(vlib_simple_counter_main_t *cm, u32 thread_index, u32 index, u64 increment)
Increment a simple counter.
Definition: counter.h:78
static_always_inline int vnet_have_features(u8 arc, u32 sw_if_index)
Definition: feature.h:241
vl_api_ip4_address_t dst
Definition: ipsec_gre.api:39
u16 idx
Definition: pci.h:208
#define VHOST_USER_DOWN_DISCARD_COUNT
#define VLIB_NODE_FN(node)
Definition: node.h:201
static_always_inline u32 vhost_user_rx_discard_packet(vlib_main_t *vm, vhost_user_intf_t *vui, vhost_user_vring_t *txvq, u32 discard_max)
Try to discard packets from the tx ring (VPP RX path).
u16 flags
Definition: pci.h:221
vring_used_t * used
Definition: vhost_user.h:239
unsigned char u8
Definition: types.h:56
double f64
Definition: types.h:142
#define fm
static void vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
Definition: trace_funcs.h:114
u64 addr
Definition: pci.h:199
vnet_hw_interface_rx_mode
Definition: interface.h:52
#define static_always_inline
Definition: clib.h:99
#define vlib_prefetch_buffer_with_index(vm, bi, type)
Prefetch buffer metadata by buffer index The first 64 bytes of buffer contains most header informatio...
Definition: buffer_funcs.h:440
#define ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX
Definition: ethernet.h:52
vlib_combined_counter_main_t * combined_sw_if_counters
Definition: interface.h:824
static_always_inline void * map_guest_mem(vhost_user_intf_t *vui, uword addr, u32 *hint)
#define VHOST_VRING_IDX_TX(qid)
Definition: vhost_user.h:24
#define VRING_USED_F_NO_NOTIFY
Definition: vhost_user.h:45
#define vlib_get_new_next_frame(vm, node, next_index, vectors, n_vectors_left)
Definition: node_funcs.h:343
#define VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE
Definition: node.h:303
static_always_inline u8 * format_vhost_trace(u8 *s, va_list *va)
unsigned int u32
Definition: types.h:88
#define VLIB_FRAME_SIZE
Definition: node.h:376
static vlib_next_frame_t * vlib_node_runtime_get_next_frame(vlib_main_t *vm, vlib_node_runtime_t *n, u32 next_index)
Definition: node_funcs.h:264
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
#define VHOST_USER_COPY_ARRAY_N
Definition: vhost_user.h:327
vlib_node_registration_t vhost_user_input_node
(constructor) VLIB_REGISTER_NODE (vhost_user_input_node)
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:514
#define VHOST_USER_RX_BUFFER_STARVATION
unsigned short u16
Definition: types.h:57
#define VIRTQ_DESC_F_INDIRECT
Definition: vhost_user.h:28
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
static void * vnet_get_config_data(vnet_config_main_t *cm, u32 *config_index, u32 *next_index, u32 n_data_bytes)
Definition: config.h:122
#define PREDICT_FALSE(x)
Definition: clib.h:111
vhost_user_main_t vhost_user_main
Definition: vhost_user.c:56
vnet_main_t vnet_main
Definition: misc.c:43
vl_api_ip4_address_t src
Definition: ipsec_gre.api:38
u32 node_index
Node index.
Definition: node.h:494
u8 len
Definition: ip_types.api:49
static_always_inline u32 vlib_buffer_get_default_data_size(vlib_main_t *vm)
Definition: buffer_funcs.h:96
#define VHOST_VRING_IDX_RX(qid)
Definition: vhost_user.h:23
u8 * format_ethernet_header_with_length(u8 *s, va_list *args)
Definition: format.c:97
u16 device_index
The interface queue index (Not the virtio vring idx)
Definition: vhost_user.h:319
vhost_user_intf_t * vhost_user_interfaces
Definition: vhost_user.h:346
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
vlib_main_t * vm
Definition: buffer.c:312
static_always_inline void vhost_user_input_rewind_buffers(vlib_main_t *vm, vhost_cpu_t *cpu, vlib_buffer_t *b_head)
static void * vlib_frame_scalar_args(vlib_frame_t *f)
Get pointer to frame scalar data.
Definition: node_funcs.h:258
u32 current_config_index
Used by feature subgraph arcs to visit enabled feature nodes.
Definition: buffer.h:147
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:458
u16 first_desc_len
Runtime queue flags.
Definition: vhost_user.h:321
vl_api_vxlan_gbp_api_tunnel_mode_t mode
Definition: vxlan_gbp.api:44
u16 flags
Definition: pci.h:207
u32 rx_buffers[VHOST_USER_RX_BUFFERS_N]
Definition: vhost_user.h:332
static __clib_unused char * vhost_user_input_func_error_strings[]
#define ASSERT(truth)
#define VHOST_USER_RX_BUFFERS_N
Definition: vhost_user.h:326
vlib_frame_t * frame
Definition: node.h:404
#define clib_atomic_swap_acq_n(a, b)
Definition: atomics.h:45
#define VIRTQ_DESC_F_NEXT
Definition: vhost_user.h:27
u16 flags
Definition: node.h:386
static_always_inline void vhost_user_send_call(vlib_main_t *vm, vhost_user_vring_t *vq)
static_always_inline u32 vhost_user_input_copy(vhost_user_intf_t *vui, vhost_copy_t *cpy, u16 copy_len, u32 *map_hint)
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:57
#define vec_elt(v, i)
Get vector value at index i.
u8 device_input_feature_arc_index
Feature arc index for device-input.
Definition: feature.h:112
u16 ring[0]
Definition: pci.h:209
#define VHOST_USER_RX_COPY_THRESHOLD
struct _vlib_node_registration vlib_node_registration_t
Definition: defs.h:47
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:140
vhost_user_vring_t vrings[VHOST_VRING_MAX_N]
Definition: vhost_user.h:295
#define VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b)
Definition: buffer.h:451
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
static_always_inline void vhost_user_rx_trace(vhost_trace_t *t, vhost_user_intf_t *vui, u16 qid, vlib_buffer_t *b, vhost_user_vring_t *txvq, u16 last_avail_idx)
#define vhost_user_log_dirty_ring(vui, vq, member)
u32 rx_buffers_len
Definition: vhost_user.h:331
u16 idx
Definition: pci.h:222
#define vnet_buffer(b)
Definition: buffer.h:369
u16 flags
Definition: pci.h:201
#define vec_foreach(var, vec)
Vector iterator.
u16 flags
Copy of main node flags.
Definition: node.h:507
static void vlib_frame_no_append(vlib_frame_t *f)
Definition: node_funcs.h:224
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
Definition: trace_funcs.h:172
vnet_feature_config_main_t * feature_config_mains
feature config main objects
Definition: feature.h:100
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
Definition: buffer.h:167
static u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
Definition: buffer_funcs.h:612
vnet_feature_main_t feature_main
Definition: feature.c:19
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
#define VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE
Definition: node.h:304
Definition: defs.h:46