FD.io VPP  v21.06
Vector Packet Processing
node.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2016 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #define _GNU_SOURCE
19 #include <stdint.h>
20 #include <net/if.h>
21 #include <sys/ioctl.h>
22 #include <sys/uio.h>
23 
24 #include <vlib/vlib.h>
25 #include <vlib/unix/unix.h>
26 #include <vnet/ethernet/ethernet.h>
28 #include <vnet/feature/feature.h>
29 
30 #include <memif/memif.h>
31 #include <memif/private.h>
32 
33 #define foreach_memif_input_error \
34  _ (BUFFER_ALLOC_FAIL, buffer_alloc, ERROR, "buffer allocation failed") \
35  _ (BAD_DESC, bad_desc, ERROR, "bad descriptor") \
36  _ (NOT_IP, not_ip, INFO, "not ip packet")
37 
38 typedef enum
39 {
40 #define _(f, n, s, d) MEMIF_INPUT_ERROR_##f,
42 #undef _
45 
46 static vlib_error_desc_t memif_input_error_counters[] = {
47 #define _(f, n, s, d) { #n, d, VL_COUNTER_SEVERITY_##s },
49 #undef _
50 };
51 
52 typedef struct
53 {
58 
59 static __clib_unused u8 *
60 format_memif_input_trace (u8 * s, va_list * args)
61 {
62  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
63  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
64  memif_input_trace_t *t = va_arg (*args, memif_input_trace_t *);
65  u32 indent = format_get_indent (s);
66 
67  s = format (s, "memif: hw_if_index %d next-index %d",
68  t->hw_if_index, t->next_index);
69  s = format (s, "\n%Uslot: ring %u", format_white_space, indent + 2,
70  t->ring);
71  return s;
72 }
73 
76 {
77  u8 *ptr = vlib_buffer_get_current (b);
78  u8 v = *ptr & 0xf0;
79 
80  if (PREDICT_TRUE (v == 0x40))
82  else if (PREDICT_TRUE (v == 0x60))
84 
85  b->error = node->errors[MEMIF_INPUT_ERROR_NOT_IP];
87 }
88 
91  memif_if_t * mif, vlib_buffer_t * b, u32 next, u16 qid,
92  uword * n_tracep)
93 {
94  if (PREDICT_TRUE
95  (b != 0 && vlib_trace_buffer (vm, node, next, b, /* follow_chain */ 0)))
96  {
98  vlib_set_trace_count (vm, node, --(*n_tracep));
99  tr = vlib_add_trace (vm, node, b, sizeof (*tr));
100  tr->next_index = next;
101  tr->hw_if_index = mif->hw_if_index;
102  tr->ring = qid;
103  }
104 }
105 
108  u16 buffer_offset, u16 buffer_vec_index)
109 {
110  memif_copy_op_t *co;
112  co->data = data;
113  co->data_len = len;
114  co->buffer_offset = buffer_offset;
115  co->buffer_vec_index = buffer_vec_index;
116 }
117 
120  u32 buffer_size)
121 {
122  vlib_buffer_t *seg = b;
123  i32 bytes_left = b->current_length - buffer_size + b->current_data;
124 
125  if (PREDICT_TRUE (bytes_left <= 0))
126  return;
127 
128  b->current_length -= bytes_left;
130 
131  while (bytes_left)
132  {
133  seg->flags |= VLIB_BUFFER_NEXT_PRESENT;
134  seg->next_buffer = buffers[0];
135  seg = vlib_get_buffer (vm, buffers[0]);
136  buffers++;
137  seg->current_data = 0;
138  seg->current_length = clib_min (buffer_size, bytes_left);
139  bytes_left -= seg->current_length;
140  }
141 }
142 
147 {
148  vnet_main_t *vnm = vnet_get_main ();
149  memif_main_t *mm = &memif_main;
150  memif_ring_t *ring;
151  memif_queue_t *mq;
152  u16 buffer_size = vlib_buffer_get_default_data_size (vm);
153  uword n_trace;
155  u32 _to_next_bufs[MEMIF_RX_VECTOR_SZ], *to_next_bufs = _to_next_bufs, *bi;
156  u32 n_rx_packets = 0, n_rx_bytes = 0;
157  u32 n_left, n_left_to_next;
159  vlib_buffer_t *b0, *b1, *b2, *b3;
162  thread_index);
163  vlib_buffer_t bt;
164  u16 cur_slot, last_slot, ring_size, n_slots, mask;
165  i16 start_offset;
166  u16 n_buffers = 0, n_alloc;
167  memif_copy_op_t *co;
168  memif_packet_op_t *po;
169  memif_region_index_t last_region = ~0;
170  void *last_region_shm = 0;
171  void *last_region_max = 0;
172 
173  mq = vec_elt_at_index (mif->rx_queues, qid);
174  ring = mq->ring;
175  ring_size = 1 << mq->log2_ring_size;
176  mask = ring_size - 1;
177 
178  /* assume that somebody will want to add ethernet header on the packet
179  so start with IP header at offset 14 */
180  start_offset = (mode == MEMIF_INTERFACE_MODE_IP) ? 14 : 0;
181 
182  /* for S2M rings, we are consumers of packet buffers, and for M2S rings we
183  are producers of empty buffers */
184  cur_slot = (type == MEMIF_RING_S2M) ? mq->last_head : mq->last_tail;
185 
186  if (type == MEMIF_RING_S2M)
187  last_slot = __atomic_load_n (&ring->head, __ATOMIC_ACQUIRE);
188  else
189  last_slot = __atomic_load_n (&ring->tail, __ATOMIC_ACQUIRE);
190 
191  if (cur_slot == last_slot)
192  goto refill;
193  n_slots = last_slot - cur_slot;
194 
195  /* construct copy and packet vector out of ring slots */
196  while (n_slots && n_rx_packets < MEMIF_RX_VECTOR_SZ)
197  {
198  u32 dst_off, src_off, n_bytes_left;
199  u16 s0;
200  memif_desc_t *d0;
201  void *mb0;
202  po = ptd->packet_ops + n_rx_packets;
203  n_rx_packets++;
204  po->first_buffer_vec_index = n_buffers++;
205  po->packet_len = 0;
206  src_off = 0;
207  dst_off = start_offset;
208 
209  next_slot:
210  CLIB_PREFETCH (&ring->desc[(cur_slot + 8) & mask],
211  CLIB_CACHE_LINE_BYTES, LOAD);
212  s0 = cur_slot & mask;
213  d0 = &ring->desc[s0];
214  n_bytes_left = d0->length;
215 
216  /* slave resets buffer length,
217  * so it can produce full size buffer for master
218  */
219  if (type == MEMIF_RING_M2S)
220  d0->length = mif->run.buffer_size;
221 
222  po->packet_len += n_bytes_left;
223  if (PREDICT_FALSE (last_region != d0->region))
224  {
225  last_region_shm = mif->regions[d0->region].shm;
226  last_region = d0->region;
227  last_region_max =
228  last_region_shm + mif->regions[last_region].region_size;
229  }
230  mb0 = last_region_shm + d0->offset;
231 
232  if (PREDICT_FALSE (mb0 + n_bytes_left > last_region_max))
233  vlib_error_count (vm, node->node_index, MEMIF_INPUT_ERROR_BAD_DESC, 1);
234  else
235  do
236  {
237  u32 dst_free = buffer_size - dst_off;
238  if (dst_free == 0)
239  {
240  dst_off = 0;
241  dst_free = buffer_size;
242  n_buffers++;
243  }
244  u32 bytes_to_copy = clib_min (dst_free, n_bytes_left);
245  memif_add_copy_op (ptd, mb0 + src_off, bytes_to_copy, dst_off,
246  n_buffers - 1);
247  n_bytes_left -= bytes_to_copy;
248  src_off += bytes_to_copy;
249  dst_off += bytes_to_copy;
250  }
251  while (PREDICT_FALSE (n_bytes_left));
252 
253  cur_slot++;
254  n_slots--;
255  if ((d0->flags & MEMIF_DESC_FLAG_NEXT) && n_slots)
256  {
257  src_off = 0;
258  goto next_slot;
259  }
260  }
261 
262  /* allocate free buffers */
263  vec_validate_aligned (ptd->buffers, n_buffers - 1, CLIB_CACHE_LINE_BYTES);
264  n_alloc = vlib_buffer_alloc_from_pool (vm, ptd->buffers, n_buffers,
265  mq->buffer_pool_index);
266  if (PREDICT_FALSE (n_alloc != n_buffers))
267  {
268  if (n_alloc)
269  vlib_buffer_free (vm, ptd->buffers, n_alloc);
270  vlib_error_count (vm, node->node_index,
271  MEMIF_INPUT_ERROR_BUFFER_ALLOC_FAIL, 1);
272  goto refill;
273  }
274 
275  /* copy data */
276  n_left = vec_len (ptd->copy_ops);
277  co = ptd->copy_ops;
278  while (n_left >= 8)
279  {
281  CLIB_PREFETCH (co[5].data, CLIB_CACHE_LINE_BYTES, LOAD);
282  CLIB_PREFETCH (co[6].data, CLIB_CACHE_LINE_BYTES, LOAD);
283  CLIB_PREFETCH (co[7].data, CLIB_CACHE_LINE_BYTES, LOAD);
284 
285  b0 = vlib_get_buffer (vm, ptd->buffers[co[0].buffer_vec_index]);
286  b1 = vlib_get_buffer (vm, ptd->buffers[co[1].buffer_vec_index]);
287  b2 = vlib_get_buffer (vm, ptd->buffers[co[2].buffer_vec_index]);
288  b3 = vlib_get_buffer (vm, ptd->buffers[co[3].buffer_vec_index]);
289 
290  clib_memcpy_fast (b0->data + co[0].buffer_offset, co[0].data,
291  co[0].data_len);
292  clib_memcpy_fast (b1->data + co[1].buffer_offset, co[1].data,
293  co[1].data_len);
294  clib_memcpy_fast (b2->data + co[2].buffer_offset, co[2].data,
295  co[2].data_len);
296  clib_memcpy_fast (b3->data + co[3].buffer_offset, co[3].data,
297  co[3].data_len);
298 
299  co += 4;
300  n_left -= 4;
301  }
302  while (n_left)
303  {
304  b0 = vlib_get_buffer (vm, ptd->buffers[co[0].buffer_vec_index]);
305  clib_memcpy_fast (b0->data + co[0].buffer_offset, co[0].data,
306  co[0].data_len);
307  co += 1;
308  n_left -= 1;
309  }
310 
311  /* release slots from the ring */
312  if (type == MEMIF_RING_S2M)
313  {
314  __atomic_store_n (&ring->tail, cur_slot, __ATOMIC_RELEASE);
315  mq->last_head = cur_slot;
316  }
317  else
318  {
319  mq->last_tail = cur_slot;
320  }
321 
322  /* prepare buffer template and next indices */
323  vnet_buffer (&ptd->buffer_template)->sw_if_index[VLIB_RX] =
324  mif->sw_if_index;
325  vnet_buffer (&ptd->buffer_template)->feature_arc_index = 0;
326  ptd->buffer_template.current_data = start_offset;
329  ptd->buffer_template.ref_count = 1;
330 
331  if (mode == MEMIF_INTERFACE_MODE_ETHERNET)
332  {
334  if (mif->per_interface_next_index != ~0)
335  next_index = mif->per_interface_next_index;
336  else
338  &ptd->buffer_template);
339 
340  vlib_get_new_next_frame (vm, node, next_index, to_next_bufs,
341  n_left_to_next);
343  {
344  vlib_next_frame_t *nf;
345  vlib_frame_t *f;
347  nf = vlib_node_runtime_get_next_frame (vm, node, next_index);
348  f = vlib_get_frame (vm, nf->frame);
350 
351  ef = vlib_frame_scalar_args (f);
352  ef->sw_if_index = mif->sw_if_index;
353  ef->hw_if_index = mif->hw_if_index;
355  }
356  }
357 
358  /* process buffer metadata */
359  u32 n_from = n_rx_packets;
360  po = ptd->packet_ops;
361  bi = to_next_bufs;
362 
363  /* copy template into local variable - will save per packet load */
365 
366  while (n_from >= 8)
367  {
368  b0 = vlib_get_buffer (vm, ptd->buffers[po[4].first_buffer_vec_index]);
369  b1 = vlib_get_buffer (vm, ptd->buffers[po[5].first_buffer_vec_index]);
370  b2 = vlib_get_buffer (vm, ptd->buffers[po[6].first_buffer_vec_index]);
371  b3 = vlib_get_buffer (vm, ptd->buffers[po[7].first_buffer_vec_index]);
372 
373  vlib_prefetch_buffer_header (b0, STORE);
374  vlib_prefetch_buffer_header (b1, STORE);
375  vlib_prefetch_buffer_header (b2, STORE);
376  vlib_prefetch_buffer_header (b3, STORE);
377 
378  /* enqueue buffer */
379  u32 fbvi[4];
380  fbvi[0] = po[0].first_buffer_vec_index;
381  fbvi[1] = po[1].first_buffer_vec_index;
382  fbvi[2] = po[2].first_buffer_vec_index;
383  fbvi[3] = po[3].first_buffer_vec_index;
384 
385  bi[0] = ptd->buffers[fbvi[0]];
386  bi[1] = ptd->buffers[fbvi[1]];
387  bi[2] = ptd->buffers[fbvi[2]];
388  bi[3] = ptd->buffers[fbvi[3]];
389 
390  b0 = vlib_get_buffer (vm, bi[0]);
391  b1 = vlib_get_buffer (vm, bi[1]);
392  b2 = vlib_get_buffer (vm, bi[2]);
393  b3 = vlib_get_buffer (vm, bi[3]);
394 
395  vlib_buffer_copy_template (b0, &bt);
396  vlib_buffer_copy_template (b1, &bt);
397  vlib_buffer_copy_template (b2, &bt);
398  vlib_buffer_copy_template (b3, &bt);
399 
400  b0->current_length = po[0].packet_len;
401  n_rx_bytes += b0->current_length;
402  b1->current_length = po[1].packet_len;
403  n_rx_bytes += b1->current_length;
404  b2->current_length = po[2].packet_len;
405  n_rx_bytes += b2->current_length;
406  b3->current_length = po[3].packet_len;
407  n_rx_bytes += b3->current_length;
408 
409  memif_add_to_chain (vm, b0, ptd->buffers + fbvi[0] + 1, buffer_size);
410  memif_add_to_chain (vm, b1, ptd->buffers + fbvi[1] + 1, buffer_size);
411  memif_add_to_chain (vm, b2, ptd->buffers + fbvi[2] + 1, buffer_size);
412  memif_add_to_chain (vm, b3, ptd->buffers + fbvi[3] + 1, buffer_size);
413 
414  if (mode == MEMIF_INTERFACE_MODE_IP)
415  {
416  next[0] = memif_next_from_ip_hdr (node, b0);
417  next[1] = memif_next_from_ip_hdr (node, b1);
418  next[2] = memif_next_from_ip_hdr (node, b2);
419  next[3] = memif_next_from_ip_hdr (node, b3);
420  }
421 
422  /* next */
423  n_from -= 4;
424  po += 4;
425  bi += 4;
426  next += 4;
427  }
428  while (n_from)
429  {
430  u32 fbvi[4];
431  /* enqueue buffer */
432  fbvi[0] = po[0].first_buffer_vec_index;
433  bi[0] = ptd->buffers[fbvi[0]];
434  b0 = vlib_get_buffer (vm, bi[0]);
435  vlib_buffer_copy_template (b0, &bt);
436  b0->current_length = po->packet_len;
437  n_rx_bytes += b0->current_length;
438 
439  memif_add_to_chain (vm, b0, ptd->buffers + fbvi[0] + 1, buffer_size);
440 
441  if (mode == MEMIF_INTERFACE_MODE_IP)
442  {
443  next[0] = memif_next_from_ip_hdr (node, b0);
444  }
445 
446  /* next */
447  n_from -= 1;
448  po += 1;
449  bi += 1;
450  next += 1;
451  }
452 
453  /* packet trace if enabled */
454  if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node))))
455  {
456  u32 n_left = n_rx_packets;
457  bi = to_next_bufs;
458  next = nexts;
459  u32 ni = next_index;
460  while (n_trace && n_left)
461  {
462  vlib_buffer_t *b;
464  if (mode != MEMIF_INTERFACE_MODE_ETHERNET)
465  ni = next[0];
466  b = vlib_get_buffer (vm, bi[0]);
467  if (PREDICT_TRUE
468  (vlib_trace_buffer (vm, node, ni, b, /* follow_chain */ 0)))
469  {
470  tr = vlib_add_trace (vm, node, b, sizeof (*tr));
471  tr->next_index = ni;
472  tr->hw_if_index = mif->hw_if_index;
473  tr->ring = qid;
474  n_trace--;
475  }
476 
477  /* next */
478  n_left--;
479  bi++;
480  next++;
481  }
482  vlib_set_trace_count (vm, node, n_trace);
483  }
484 
485  if (mode == MEMIF_INTERFACE_MODE_ETHERNET)
486  {
487  n_left_to_next -= n_rx_packets;
488  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
489  }
490  else
491  vlib_buffer_enqueue_to_next (vm, node, to_next_bufs, nexts, n_rx_packets);
492 
494  + VNET_INTERFACE_COUNTER_RX, thread_index,
495  mif->sw_if_index, n_rx_packets,
496  n_rx_bytes);
497 
498  /* refill ring with empty buffers */
499 refill:
500  vec_reset_length (ptd->buffers);
501  vec_reset_length (ptd->copy_ops);
502 
503  if (type == MEMIF_RING_M2S)
504  {
505  u16 head = ring->head;
506  n_slots = ring_size - head + mq->last_tail;
507 
508  while (n_slots--)
509  {
510  u16 s = head++ & mask;
511  memif_desc_t *d = &ring->desc[s];
512  d->length = mif->run.buffer_size;
513  }
514 
515  __atomic_store_n (&ring->head, head, __ATOMIC_RELEASE);
516  }
517 
518  return n_rx_packets;
519 }
520 
523  memif_if_t *mif, u16 qid,
525 {
526  vnet_main_t *vnm = vnet_get_main ();
527  memif_main_t *mm = &memif_main;
528  memif_ring_t *ring;
529  memif_queue_t *mq;
530  u32 next_index;
531  uword n_trace = vlib_get_trace_count (vm, node);
532  u32 n_rx_packets = 0, n_rx_bytes = 0;
533  u32 *to_next = 0, *buffers;
534  u32 bi0, bi1, bi2, bi3;
535  u16 slot, s0;
536  memif_desc_t *d0;
537  vlib_buffer_t *b0, *b1, *b2, *b3;
540  thread_index);
541  u16 cur_slot, last_slot, ring_size, n_slots, mask, head;
542  i16 start_offset;
543  u64 offset;
545  u16 n_alloc, n_from;
546 
547  mq = vec_elt_at_index (mif->rx_queues, qid);
548  ring = mq->ring;
549  ring_size = 1 << mq->log2_ring_size;
550  mask = ring_size - 1;
551 
552  next_index = (mode == MEMIF_INTERFACE_MODE_IP) ?
554 
555  /* asume that somebody will want to add ethernet header on the packet
556  so start with IP header at offset 14 */
557  start_offset = (mode == MEMIF_INTERFACE_MODE_IP) ? 14 : 0;
558  buffer_length = vlib_buffer_get_default_data_size (vm) - start_offset;
559 
560  cur_slot = mq->last_tail;
561  last_slot = __atomic_load_n (&ring->tail, __ATOMIC_ACQUIRE);
562  if (cur_slot == last_slot)
563  goto refill;
564  n_slots = last_slot - cur_slot;
565 
566  /* process ring slots */
569  while (n_slots && n_rx_packets < MEMIF_RX_VECTOR_SZ)
570  {
571  vlib_buffer_t *hb;
572 
573  s0 = cur_slot & mask;
574  bi0 = mq->buffers[s0];
575  ptd->buffers[n_rx_packets++] = bi0;
576 
577  CLIB_PREFETCH (&ring->desc[(cur_slot + 8) & mask],
578  CLIB_CACHE_LINE_BYTES, LOAD);
579  d0 = &ring->desc[s0];
580  hb = b0 = vlib_get_buffer (vm, bi0);
581  b0->current_data = start_offset;
582  b0->current_length = d0->length;
583  n_rx_bytes += d0->length;
584 
585  cur_slot++;
586  n_slots--;
587  if (PREDICT_FALSE ((d0->flags & MEMIF_DESC_FLAG_NEXT) && n_slots))
588  {
589  hb->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
590  next_slot:
591  s0 = cur_slot & mask;
592  d0 = &ring->desc[s0];
593  bi0 = mq->buffers[s0];
594 
595  /* previous buffer */
596  b0->next_buffer = bi0;
597  b0->flags |= VLIB_BUFFER_NEXT_PRESENT;
598 
599  /* current buffer */
600  b0 = vlib_get_buffer (vm, bi0);
601  b0->current_data = start_offset;
602  b0->current_length = d0->length;
604  n_rx_bytes += d0->length;
605 
606  cur_slot++;
607  n_slots--;
608  if ((d0->flags & MEMIF_DESC_FLAG_NEXT) && n_slots)
609  goto next_slot;
610  }
611  }
612 
613  /* release slots from the ring */
614  mq->last_tail = cur_slot;
615 
616  n_from = n_rx_packets;
617  buffers = ptd->buffers;
618 
619  while (n_from)
620  {
621  u32 n_left_to_next;
622  u32 next0, next1, next2, next3;
623 
624  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
625  while (n_from >= 8 && n_left_to_next >= 4)
626  {
627  b0 = vlib_get_buffer (vm, buffers[4]);
628  b1 = vlib_get_buffer (vm, buffers[5]);
629  b2 = vlib_get_buffer (vm, buffers[6]);
630  b3 = vlib_get_buffer (vm, buffers[7]);
631  vlib_prefetch_buffer_header (b0, STORE);
632  vlib_prefetch_buffer_header (b1, STORE);
633  vlib_prefetch_buffer_header (b2, STORE);
634  vlib_prefetch_buffer_header (b3, STORE);
635 
636  /* enqueue buffer */
637  to_next[0] = bi0 = buffers[0];
638  to_next[1] = bi1 = buffers[1];
639  to_next[2] = bi2 = buffers[2];
640  to_next[3] = bi3 = buffers[3];
641  to_next += 4;
642  n_left_to_next -= 4;
643  buffers += 4;
644 
645  b0 = vlib_get_buffer (vm, bi0);
646  b1 = vlib_get_buffer (vm, bi1);
647  b2 = vlib_get_buffer (vm, bi2);
648  b3 = vlib_get_buffer (vm, bi3);
649 
650  vnet_buffer (b0)->sw_if_index[VLIB_RX] = mif->sw_if_index;
651  vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
652  vnet_buffer (b1)->sw_if_index[VLIB_RX] = mif->sw_if_index;
653  vnet_buffer (b1)->sw_if_index[VLIB_TX] = ~0;
654  vnet_buffer (b2)->sw_if_index[VLIB_RX] = mif->sw_if_index;
655  vnet_buffer (b2)->sw_if_index[VLIB_TX] = ~0;
656  vnet_buffer (b3)->sw_if_index[VLIB_RX] = mif->sw_if_index;
657  vnet_buffer (b3)->sw_if_index[VLIB_TX] = ~0;
658 
659  if (mode == MEMIF_INTERFACE_MODE_IP)
660  {
661  next0 = memif_next_from_ip_hdr (node, b0);
662  next1 = memif_next_from_ip_hdr (node, b1);
663  next2 = memif_next_from_ip_hdr (node, b2);
664  next3 = memif_next_from_ip_hdr (node, b3);
665  }
666  else if (mode == MEMIF_INTERFACE_MODE_ETHERNET)
667  {
668  if (PREDICT_FALSE (mif->per_interface_next_index != ~0))
669  {
670  next0 = mif->per_interface_next_index;
671  next1 = mif->per_interface_next_index;
672  next2 = mif->per_interface_next_index;
673  next3 = mif->per_interface_next_index;
674  }
675  else
676  {
677  next0 = next1 = next2 = next3 = next_index;
678  /* redirect if feature path enabled */
680  &next0, b0);
682  &next1, b1);
684  &next2, b2);
686  &next3, b3);
687  }
688  }
689 
690  /* trace */
691  if (PREDICT_FALSE (n_trace > 0))
692  {
693  memif_trace_buffer (vm, node, mif, b0, next0, qid, &n_trace);
694  if (PREDICT_FALSE (n_trace > 0))
695  memif_trace_buffer (vm, node, mif, b1, next1, qid, &n_trace);
696  if (PREDICT_FALSE (n_trace > 0))
697  memif_trace_buffer (vm, node, mif, b2, next2, qid, &n_trace);
698  if (PREDICT_FALSE (n_trace > 0))
699  memif_trace_buffer (vm, node, mif, b3, next3, qid, &n_trace);
700  }
701 
702  /* enqueue */
703  vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
704  n_left_to_next, bi0, bi1, bi2, bi3,
705  next0, next1, next2, next3);
706 
707  /* next */
708  n_from -= 4;
709  }
710  while (n_from && n_left_to_next)
711  {
712  /* enqueue buffer */
713  to_next[0] = bi0 = buffers[0];
714  to_next += 1;
715  n_left_to_next--;
716  buffers += 1;
717 
718  b0 = vlib_get_buffer (vm, bi0);
719  vnet_buffer (b0)->sw_if_index[VLIB_RX] = mif->sw_if_index;
720  vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
721 
722  if (mode == MEMIF_INTERFACE_MODE_IP)
723  {
724  next0 = memif_next_from_ip_hdr (node, b0);
725  }
726  else if (mode == MEMIF_INTERFACE_MODE_ETHERNET)
727  {
728  if (PREDICT_FALSE (mif->per_interface_next_index != ~0))
729  next0 = mif->per_interface_next_index;
730  else
731  {
732  next0 = next_index;
733  /* redirect if feature path enabled */
735  &next0, b0);
736  }
737  }
738 
739  /* trace */
740  if (PREDICT_FALSE (n_trace > 0))
741  memif_trace_buffer (vm, node, mif, b0, next0, qid, &n_trace);
742 
743  /* enqueue */
744  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
745  n_left_to_next, bi0, next0);
746 
747  /* next */
748  n_from--;
749  }
750  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
751  }
752 
754  + VNET_INTERFACE_COUNTER_RX, thread_index,
755  mif->sw_if_index, n_rx_packets,
756  n_rx_bytes);
757 
758  /* refill ring with empty buffers */
759 refill:
760  vec_reset_length (ptd->buffers);
761 
762  head = ring->head;
763  n_slots = ring_size - head + mq->last_tail;
764  slot = head & mask;
765 
766  n_slots &= ~7;
767 
768  if (n_slots < 32)
769  goto done;
770 
771  memif_desc_t desc_template, *dt = &desc_template;
772  clib_memset (dt, 0, sizeof (memif_desc_t));
773  dt->length = buffer_length;
774 
776  vm, mq->buffers, slot, ring_size, n_slots, mq->buffer_pool_index);
777  dt->region = mq->buffer_pool_index + 1;
778  offset = (u64) mif->regions[dt->region].shm + start_offset;
779 
780  if (PREDICT_FALSE (n_alloc != n_slots))
781  vlib_error_count (vm, node->node_index,
782  MEMIF_INPUT_ERROR_BUFFER_ALLOC_FAIL, 1);
783 
784  head += n_alloc;
785 
786  while (n_alloc)
787  {
788  memif_desc_t *d = ring->desc + slot;
789  u32 *bi = mq->buffers + slot;
790 
791  if (PREDICT_FALSE (((slot + 7 > mask) || (n_alloc < 8))))
792  goto one_by_one;
793 
794  clib_memcpy_fast (d + 0, dt, sizeof (memif_desc_t));
795  clib_memcpy_fast (d + 1, dt, sizeof (memif_desc_t));
796  clib_memcpy_fast (d + 2, dt, sizeof (memif_desc_t));
797  clib_memcpy_fast (d + 3, dt, sizeof (memif_desc_t));
798  clib_memcpy_fast (d + 4, dt, sizeof (memif_desc_t));
799  clib_memcpy_fast (d + 5, dt, sizeof (memif_desc_t));
800  clib_memcpy_fast (d + 6, dt, sizeof (memif_desc_t));
801  clib_memcpy_fast (d + 7, dt, sizeof (memif_desc_t));
802 
803  d[0].offset = (u64) vlib_get_buffer (vm, bi[0])->data - offset;
804  d[1].offset = (u64) vlib_get_buffer (vm, bi[1])->data - offset;
805  d[2].offset = (u64) vlib_get_buffer (vm, bi[2])->data - offset;
806  d[3].offset = (u64) vlib_get_buffer (vm, bi[3])->data - offset;
807  d[4].offset = (u64) vlib_get_buffer (vm, bi[4])->data - offset;
808  d[5].offset = (u64) vlib_get_buffer (vm, bi[5])->data - offset;
809  d[6].offset = (u64) vlib_get_buffer (vm, bi[6])->data - offset;
810  d[7].offset = (u64) vlib_get_buffer (vm, bi[7])->data - offset;
811 
812  slot = (slot + 8) & mask;
813  n_alloc -= 8;
814  continue;
815 
816  one_by_one:
817  clib_memcpy_fast (d, dt, sizeof (memif_desc_t));
818  d[0].offset = (u64) vlib_get_buffer (vm, bi[0])->data - offset;
819 
820  slot = (slot + 1) & mask;
821  n_alloc -= 1;
822  }
823 
824  __atomic_store_n (&ring->head, head, __ATOMIC_RELEASE);
825 
826 done:
827  return n_rx_packets;
828 }
829 
830 
834 {
835  u32 n_rx = 0;
836  memif_main_t *mm = &memif_main;
839 
842  for (int i = 0; i < vec_len (pv); i++)
843  {
844  memif_if_t *mif;
845  u32 qid;
846  mif = vec_elt_at_index (mm->interfaces, pv[i].dev_instance);
847  qid = pv[i].queue_id;
848  if ((mif->flags & MEMIF_IF_FLAG_ADMIN_UP) &&
849  (mif->flags & MEMIF_IF_FLAG_CONNECTED))
850  {
851  if (mif->flags & MEMIF_IF_FLAG_ZERO_COPY)
852  {
853  if (mif->mode == MEMIF_INTERFACE_MODE_IP)
854  n_rx +=
855  memif_device_input_zc_inline (vm, node, mif, qid, mode_ip);
856  else
857  n_rx +=
858  memif_device_input_zc_inline (vm, node, mif, qid, mode_eth);
859  }
860  else if (mif->flags & MEMIF_IF_FLAG_IS_SLAVE)
861  {
862  if (mif->mode == MEMIF_INTERFACE_MODE_IP)
863  n_rx += memif_device_input_inline (
864  vm, node, mif, MEMIF_RING_M2S, qid, mode_ip);
865  else
866  n_rx += memif_device_input_inline (
867  vm, node, mif, MEMIF_RING_M2S, qid, mode_eth);
868  }
869  else
870  {
871  if (mif->mode == MEMIF_INTERFACE_MODE_IP)
872  n_rx += memif_device_input_inline (
873  vm, node, mif, MEMIF_RING_S2M, qid, mode_ip);
874  else
875  n_rx += memif_device_input_inline (
876  vm, node, mif, MEMIF_RING_S2M, qid, mode_eth);
877  }
878  }
879  }
880 
881  return n_rx;
882 }
883 
884 /* *INDENT-OFF* */
886  .name = "memif-input",
888  .sibling_of = "device-input",
889  .format_trace = format_memif_input_trace,
890  .type = VLIB_NODE_TYPE_INPUT,
891  .state = VLIB_NODE_STATE_INTERRUPT,
892  .n_errors = MEMIF_INPUT_N_ERROR,
893  .error_counters = memif_input_error_counters,
894 };
895 
896 /* *INDENT-ON* */
897 
898 
899 /*
900  * fd.io coding-style-patch-verification: ON
901  *
902  * Local Variables:
903  * eval: (c-set-style "gnu")
904  * End:
905  */
memif_if_t * interfaces
Definition: private.h:250
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:133
static __clib_warn_unused_result u32 vlib_buffer_alloc_to_ring_from_pool(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers, u8 buffer_pool_index)
Allocate buffers into ring from specific buffer pool.
Definition: buffer_funcs.h:753
#define clib_min(x, y)
Definition: clib.h:342
#define CLIB_UNUSED(x)
Definition: clib.h:90
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
Definition: trace_funcs.h:212
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
Definition: buffer_funcs.h:982
static __clib_unused u8 * format_memif_input_trace(u8 *s, va_list *args)
Definition: node.c:60
vnet_interface_main_t interface_main
Definition: vnet.h:81
u32 thread_index
#define PREDICT_TRUE(x)
Definition: clib.h:125
static_always_inline void memif_add_to_chain(vlib_main_t *vm, vlib_buffer_t *b, u32 *buffers, u32 buffer_size)
Definition: node.c:119
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:119
unsigned long u64
Definition: types.h:89
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
Definition: error_funcs.h:57
u16 nexts[VLIB_FRAME_SIZE]
vlib_increment_combined_counter(ccm, ti, sw_if_index, n_buffers, n_bytes)
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
#define VLIB_NODE_FLAG_TRACE_SUPPORTED
Definition: node.h:296
#define vec_add2_aligned(V, P, N, A)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:657
static_always_inline vnet_hw_if_rxq_poll_vector_t * vnet_hw_if_get_rxq_poll_vector(vlib_main_t *vm, vlib_node_runtime_t *node)
u32 thread_index
Definition: main.h:213
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:122
memif_interface_mode_t
Definition: memif.h:55
static vlib_frame_t * vlib_get_frame(vlib_main_t *vm, vlib_frame_t *f)
Definition: node_funcs.h:273
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: nat44_ei.c:3048
#define vlib_validate_buffer_enqueue_x4(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, bi2, bi3, next0, next1, next2, next3)
Finish enqueueing four buffers forward in the graph.
Definition: buffer_node.h:140
uint16_t memif_region_index_t
Definition: memif.h:62
static u32 format_get_indent(u8 *s)
Definition: format.h:72
#define VLIB_NODE_FN(node)
Definition: node.h:202
u16 first_buffer_vec_index
Definition: private.h:215
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
Definition: vec.h:535
u32 * buffers
Definition: private.h:134
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:461
#define foreach_memif_input_error
Definition: node.c:33
unsigned char u8
Definition: types.h:56
vlib_buffer_t ** b
u8 data[128]
Definition: ipsec_types.api:92
u8 buffer_pool_index
index of buffer pool this buffer belongs.
Definition: buffer.h:142
uint32_t length
Definition: memif.h:154
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
unsigned int u32
Definition: types.h:88
vlib_frame_t * f
if(node->flags &VLIB_NODE_FLAG_TRACE) vnet_interface_output_trace(vm
#define static_always_inline
Definition: clib.h:112
#define ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX
Definition: ethernet.h:52
vlib_combined_counter_main_t * combined_sw_if_counters
Definition: interface.h:1023
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:129
u8 buffer_pool_index
Definition: private.h:135
u16 buffer_size
Definition: private.h:196
description fragment has unexpected format
Definition: map.api:433
static_always_inline u32 memif_next_from_ip_hdr(vlib_node_runtime_t *node, vlib_buffer_t *b)
Definition: node.c:75
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:231
uint16_t flags
Definition: memif.h:151
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
u32 per_interface_next_index
Definition: private.h:174
#define vlib_get_new_next_frame(vm, node, next_index, vectors, n_vectors_left)
Definition: node_funcs.h:400
vnet_main_t * vnet_get_main(void)
vlib_node_registration_t memif_input_node
(constructor) VLIB_REGISTER_NODE (memif_input_node)
Definition: node.c:885
vlib_buffer_enqueue_to_next(vm, node, from,(u16 *) nexts, frame->n_vectors)
void * shm
Definition: private.h:111
static vlib_next_frame_t * vlib_node_runtime_get_next_frame(vlib_main_t *vm, vlib_node_runtime_t *n, u32 next_index)
Definition: node_funcs.h:321
memif_packet_op_t packet_ops[MEMIF_RX_VECTOR_SZ]
Definition: private.h:234
vl_api_fib_path_type_t type
Definition: fib_types.api:123
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:145
memif_region_index_t region
Definition: memif.h:153
u16 last_head
Definition: private.h:132
u16 * next
memif_copy_op_t * copy_ops
Definition: private.h:235
static_always_inline void memif_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *node, memif_if_t *mif, vlib_buffer_t *b, u32 next, u16 qid, uword *n_tracep)
Definition: node.c:90
static_always_inline void vlib_buffer_copy_template(vlib_buffer_t *b, vlib_buffer_t *bt)
Definition: buffer_funcs.h:171
static __clib_warn_unused_result int vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
Definition: trace_funcs.h:153
unsigned short u16
Definition: types.h:57
static_always_inline void memif_add_copy_op(memif_per_thread_data_t *ptd, void *data, u32 len, u16 buffer_offset, u16 buffer_vec_index)
Definition: node.c:107
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:257
void * data
Definition: private.h:221
#define PREDICT_FALSE(x)
Definition: clib.h:124
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
Definition: nat44_ei.c:3047
u32 node_index
Node index.
Definition: node.h:479
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:224
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:395
u32 n_left
vl_api_tunnel_mode_t mode
Definition: gre.api:48
static_always_inline uword memif_device_input_inline(vlib_main_t *vm, vlib_node_runtime_t *node, memif_if_t *mif, memif_ring_type_t type, u16 qid, memif_interface_mode_t mode)
Definition: node.c:144
u8 len
Definition: ip_types.api:103
static_always_inline u32 vlib_buffer_get_default_data_size(vlib_main_t *vm)
Definition: buffer_funcs.h:122
u8 slot
Definition: pci_types.api:22
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:261
vl_api_pnat_mask_t mask
Definition: pnat.api:45
u16 buffer_vec_index
Definition: private.h:224
u8 data[]
Packet data.
Definition: buffer.h:204
static void * vlib_frame_scalar_args(vlib_frame_t *f)
Get pointer to frame scalar data.
Definition: node_funcs.h:315
u32 current_config_index
Used by feature subgraph arcs to visit enabled feature nodes.
Definition: buffer.h:156
u16 last_tail
Definition: private.h:133
i16 buffer_offset
Definition: private.h:223
memif_desc_t desc[]
Definition: memif.h:175
memif_region_t * regions
Definition: private.h:182
signed int i32
Definition: types.h:77
static vlib_error_desc_t memif_input_error_counters[]
Definition: node.c:46
#define MEMIF_DESC_FLAG_NEXT
Definition: memif.h:152
memif_input_error_t
Definition: node.c:38
vlib_frame_t * frame
Definition: node.h:397
u16 flags
Definition: node.h:379
u32 flags
Definition: private.h:167
memif_ring_t * ring
Definition: private.h:127
struct memif_if_t::@719 run
vlib_put_next_frame(vm, node, next_index, 0)
vlib_buffer_t buffer_template
Definition: private.h:239
u32 hw_if_index
Definition: private.h:169
u32 n_buffers
static_always_inline void * clib_memcpy_fast(void *restrict dst, const void *restrict src, size_t n)
Definition: string.h:92
nat44_ei_hairpin_src_next_t next_index
static_always_inline uword memif_device_input_zc_inline(vlib_main_t *vm, vlib_node_runtime_t *node, memif_if_t *mif, u16 qid, memif_interface_mode_t mode)
Definition: node.c:522
memif_region_offset_t offset
Definition: memif.h:155
Definition: defs.h:47
#define MEMIF_RX_VECTOR_SZ
Definition: private.h:227
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
vlib_main_t vlib_node_runtime_t * node
Definition: nat44_ei.c:3047
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:149
VLIB buffer representation.
Definition: buffer.h:111
u64 uword
Definition: types.h:112
memif_log2_ring_size_t log2_ring_size
Definition: private.h:128
struct clib_bihash_value offset
template key/value backing page structure
#define vnet_buffer(b)
Definition: buffer.h:437
static_always_inline void vnet_feature_start_device_input_x1(u32 sw_if_index, u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:343
memif_per_thread_data_t * per_thread_data
Definition: private.h:257
memif_ring_type_t
Definition: memif.h:49
volatile uint16_t head
Definition: memif.h:171
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace.c:628
memif_queue_t * rx_queues
Definition: private.h:184
static void vlib_frame_no_append(vlib_frame_t *f)
Definition: node_funcs.h:281
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
Definition: trace_funcs.h:226
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
Definition: buffer.h:176
memif_main_t memif_main
Definition: memif.c:43
volatile u8 ref_count
Reference count for this buffer.
Definition: buffer.h:139
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:111
u32 sw_if_index
Definition: private.h:170
volatile uint16_t tail
Definition: memif.h:173
size_t buffer_length
Definition: cJSON.h:149
memif_interface_mode_t mode
Definition: private.h:172
Definition: defs.h:46
memif_region_size_t region_size
Definition: private.h:112
static __clib_warn_unused_result u32 vlib_buffer_alloc_from_pool(vlib_main_t *vm, u32 *buffers, u32 n_buffers, u8 buffer_pool_index)
Allocate buffers from specific pool into supplied array.
Definition: buffer_funcs.h:597
signed short i16
Definition: types.h:46