FD.io VPP  v21.06
Vector Packet Processing
input.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2018 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #include <vlib/vlib.h>
19 #include <vlib/unix/unix.h>
20 #include <vlib/pci/pci.h>
21 #include <vnet/ethernet/ethernet.h>
22 #include <vnet/devices/devices.h>
24 
25 #include <rdma/rdma.h>
26 
27 #define foreach_rdma_input_error \
28  _(BUFFER_ALLOC, "buffer alloc error")
29 
30 typedef enum
31 {
32 #define _(f,s) RDMA_INPUT_ERROR_##f,
34 #undef _
37 
38 static __clib_unused char *rdma_input_error_strings[] = {
39 #define _(n,s) s,
41 #undef _
42 };
43 
44 
46 ibv_set_recv_wr_and_sge (struct ibv_recv_wr *w, struct ibv_sge *s, u64 va,
47  u32 data_size, u32 lkey)
48 {
49  s[0].addr = va;
50  s[0].length = data_size;
51  s[0].lkey = lkey;
52  w[0].next = w + 1;
53  w[0].sg_list = s;
54  w[0].num_sge = 1;
55 }
56 
59  rdma_device_t * rd,
60  rdma_rxq_t * rxq,
62  vlib_buffer_t * bt,
63  u32 first_slot, u32 n_alloc)
64 {
65  int i;
66  u8 log_wqe_sz = rxq->log_wqe_sz;
67  u32 *bi = ptd->tmp_bi;
68  vlib_buffer_t **bufs = ptd->tmp_bufs;
69 
70  for (i = 0; i < n_alloc; i++)
71  {
72  u8 chain_sz = rxq->n_used_per_chain[first_slot + i];
73  u8 chain_sz_alloc;
74  mlx5dv_wqe_ds_t *current_wqe =
75  rxq->wqes + ((first_slot + i) << log_wqe_sz);
76  if (chain_sz == 0)
77  continue;
78  if (PREDICT_FALSE ((chain_sz_alloc =
79  vlib_buffer_alloc_from_pool (vm, bi, chain_sz,
80  rd->pool)) !=
81  chain_sz))
82  {
83  vlib_buffer_free (vm, bi, chain_sz_alloc);
84  break;
85  }
86  /*Build the chain */
87  vlib_get_buffers (vm, bi, bufs, chain_sz);
88  for (int j = 0; j < chain_sz - 1; j++)
89  {
90  vlib_buffer_copy_template (bufs[j], bt);
91  bufs[j]->next_buffer = bi[j + 1];
92  bufs[j]->flags |= VLIB_BUFFER_NEXT_PRESENT;
93  }
94  /* The chain starting at the second buffer is pre-initialised */
95  vlib_buffer_copy_template (bufs[chain_sz - 1], bt);
96  /* Stick with the already existing chain */
97  if (chain_sz < rxq->n_ds_per_wqe - 1)
98  {
99  bufs[chain_sz - 1]->next_buffer = rxq->second_bufs[first_slot + i];
100  bufs[chain_sz - 1]->flags |= VLIB_BUFFER_NEXT_PRESENT;
101  }
102  else
103  {
104  bufs[chain_sz - 1]->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
105  }
106 
107  /* Update the wqes */
108  for (int j = 0; j < chain_sz; j++)
109  {
110  u64 addr;
111  vlib_get_buffers_with_offset (vm, bi + j,
112  (void *) &addr, 1,
113  sizeof (vlib_buffer_t));
114  current_wqe[j + 1].addr = clib_host_to_net_u64 (addr);
115  }
116  rxq->n_used_per_chain[first_slot + i] = 0;
117  rxq->n_total_additional_segs -= chain_sz;
118  rxq->second_bufs[first_slot + i] = bi[0];
119  }
120  return i;
121 }
122 
125  rdma_rxq_t * rxq, vlib_buffer_t * bt,
126  const int is_mlx5dv, const int is_striding)
127 {
128  u32 n_alloc, n;
129  u16 ring_space;
130  struct ibv_recv_wr wr[VLIB_FRAME_SIZE], *w = wr;
131  struct ibv_sge sge[VLIB_FRAME_SIZE], *s = sge;
134  u32 mask = rxq->size - 1;
135  u32 slot = rxq->tail & mask;
136  u32 *bufs = rxq->bufs + slot;
137  u32 data_size = rxq->buf_sz;
138  u32 lkey = rd->lkey;
139  const int log_stride_per_wqe = is_striding ? rxq->log_stride_per_wqe : 0;
140  const int log_wqe_sz = rxq->log_wqe_sz;
141 
142  /*In legacy mode, maybe some buffers chains are incomplete? */
143  if (PREDICT_FALSE
144  (is_mlx5dv && !is_striding && (rxq->incomplete_tail != rxq->tail)))
145  {
146  int n_incomplete = rxq->incomplete_tail - rxq->tail;
147  int n_completed =
148  rdma_device_legacy_input_refill_additional (vm, rd, rxq, ptd, bt,
149  slot,
150  n_incomplete);
151  rxq->tail += n_completed;
152  slot = rxq->tail & mask;
153  /* Don't start recycling head buffers if there are incomplete chains */
154  if (n_completed != n_incomplete)
155  return;
156  }
157 
158  /* refilled buffers must be a multiple of 8 and of strides per WQE */
159  u32 alloc_multiple = 1 << (clib_max (3, log_stride_per_wqe));
160 
161  ring_space = rxq->size - (rxq->tail - rxq->head);
162 
163  n_alloc = clib_min (VLIB_FRAME_SIZE, ring_space);
164 
165  /* do not bother to allocate if too small */
166  if (n_alloc < 2 * alloc_multiple)
167  return;
168 
169  /* avoid wrap-around logic in core loop */
170  n_alloc = clib_min (n_alloc, rxq->size - slot);
171 
172  n_alloc &= ~(alloc_multiple - 1); /* round to alloc_multiple */
173 
174  n = vlib_buffer_alloc_to_ring_from_pool (vm, rxq->bufs, slot, rxq->size,
175  n_alloc, rd->pool);
176 
177  if (PREDICT_FALSE (n != n_alloc))
178  {
179  u32 n_free;
180  if (n < alloc_multiple)
181  {
182  if (n)
183  vlib_buffer_free_from_ring (vm, rxq->bufs, slot, rxq->size, n);
184  return;
185  }
186 
187  /* partial allocation, round and return rest */
188  n_free = n & (alloc_multiple - 1);
189  n -= n_free;
190  if (n_free)
191  vlib_buffer_free_from_ring (vm, rxq->bufs, (slot + n) & mask,
192  rxq->size, n_free);
193  }
194 
195  n_alloc = n;
196 
197  if (is_mlx5dv)
198  {
199  u64 __clib_aligned (32) va[8];
200 
201  /* slot does not necessarily correspond to the slot
202  in the wqes ring (in 16B words) */
203  u32 wqes_slot = slot << (log_wqe_sz - log_stride_per_wqe);
204  const u32 wqe_cnt = rxq->wqe_cnt;
205  mlx5dv_wqe_ds_t *wqe = rxq->wqes + wqes_slot;
206  const int wqe_sz = 1 << log_wqe_sz;
207  const int stride_per_wqe = 1 << log_stride_per_wqe;
208  int current_data_seg = 0;
209 
210  /* In legacy mode, this function only refills head descriptors for each
211  WQE, so RDMA_RXQ_MAX_CHAIN_SZ-1 data segments are skipped per WQE */
212  const int log_skip_wqe = is_striding ? 0 : log_wqe_sz;
213 
214  while (n >= 1)
215  {
216  vlib_get_buffers_with_offset (vm, rxq->bufs + slot, (void **) va, 8,
217  sizeof (vlib_buffer_t));
218 #ifdef CLIB_HAVE_VEC256
219  *(u64x4 *) va = u64x4_byte_swap (*(u64x4 *) va);
220  *(u64x4 *) (va + 4) = u64x4_byte_swap (*(u64x4 *) (va + 4));
221 #else
222  for (int i = 0; i < 8; i++)
223  va[i] = clib_host_to_net_u64 (va[i]);
224 #endif
225 
226  /*In striding RQ mode, the first 16B-word of the WQE is the SRQ header.
227  It is initialised as if it were a LINKED_LIST, as we have no guarantee
228  about what RDMA core does (CYCLIC_RQ or LINKED_LIST_RQ). In cyclic
229  mode, the SRQ header is ignored anyways... */
230 
231 /* *INDENT-OFF* */
232  if (is_striding && !(current_data_seg & (wqe_sz - 1)))
234  {
235  .rsvd0 = {0},
236  .next_wqe_index = clib_host_to_net_u16 (((wqes_slot >> log_wqe_sz) + 1) & (wqe_cnt - 1)),
237  .signature = 0,
238  .rsvd1 = {0}
239  };
240 /* *INDENT-ON* */
241 
242  /* TODO: when log_skip_wqe > 2, hw_prefetcher doesn't work, lots of LLC store
243  misses occur for wqes, to be fixed... */
244  if (!is_striding || !(current_data_seg & ~(stride_per_wqe - 1)))
245  {
246  wqe[(0 << log_skip_wqe) + is_striding].addr = va[0];
247  wqe[(1 << log_skip_wqe) + is_striding].addr = va[1];
248  wqe[(2 << log_skip_wqe) + is_striding].addr = va[2];
249  wqe[(3 << log_skip_wqe) + is_striding].addr = va[3];
250  wqe[(4 << log_skip_wqe) + is_striding].addr = va[4];
251  wqe[(5 << log_skip_wqe) + is_striding].addr = va[5];
252  wqe[(6 << log_skip_wqe) + is_striding].addr = va[6];
253  wqe[(7 << log_skip_wqe) + is_striding].addr = va[7];
254  slot += 8;
255  n -= 8;
256  }
257  wqe += 8 << log_skip_wqe;
258  wqes_slot += 8 << log_skip_wqe;
259  current_data_seg += 8;
260  current_data_seg &= wqe_sz - 1;
261  }
262 
263  /* In legacy mode, there is some work required to finish building the SG lists */
264  if (!is_striding)
265  {
266  int first_slot = slot - n_alloc;
267  rxq->incomplete_tail += n_alloc;
269  n_alloc =
271  bt, first_slot,
272  n_alloc);
273  }
275  rxq->tail += n_alloc;
276  if (is_striding)
277  {
278  rxq->striding_wqe_tail += n_alloc >> log_stride_per_wqe;
279  rxq->wq_db[MLX5_RCV_DBR] =
280  clib_host_to_net_u32 (rxq->striding_wqe_tail);
281  }
282  else
283  rxq->wq_db[MLX5_RCV_DBR] = clib_host_to_net_u32 (rxq->tail);
284  return;
285  }
286 
287  while (n >= 8)
288  {
289  u64 va[8];
290  if (PREDICT_TRUE (n >= 16))
291  {
292  clib_prefetch_store (s + 16);
293  clib_prefetch_store (w + 16);
294  }
295 
296  vlib_get_buffers_with_offset (vm, bufs, (void **) va, 8,
297  sizeof (vlib_buffer_t));
298 
299  ibv_set_recv_wr_and_sge (w++, s++, va[0], data_size, lkey);
300  ibv_set_recv_wr_and_sge (w++, s++, va[1], data_size, lkey);
301  ibv_set_recv_wr_and_sge (w++, s++, va[2], data_size, lkey);
302  ibv_set_recv_wr_and_sge (w++, s++, va[3], data_size, lkey);
303  ibv_set_recv_wr_and_sge (w++, s++, va[4], data_size, lkey);
304  ibv_set_recv_wr_and_sge (w++, s++, va[5], data_size, lkey);
305  ibv_set_recv_wr_and_sge (w++, s++, va[6], data_size, lkey);
306  ibv_set_recv_wr_and_sge (w++, s++, va[7], data_size, lkey);
307 
308  bufs += 8;
309  n -= 8;
310  }
311 
312  w[-1].next = 0; /* fix next pointer in WR linked-list last item */
313 
314  n = n_alloc;
315  if (ibv_post_wq_recv (rxq->wq, wr, &w) != 0)
316  {
317  n = w - wr;
318  vlib_buffer_free_from_ring (vm, rxq->bufs, slot + n, rxq->size,
319  n_alloc - n);
320  }
321 
322  rxq->tail += n;
323 }
324 
327  const rdma_device_t * rd, u32 n_left,
328  const u32 * bi, u32 next_index, u16 * cqe_flags,
329  int is_mlx5dv)
330 {
331  u32 n_trace = vlib_get_trace_count (vm, node);
332 
333  if (PREDICT_TRUE (0 == n_trace))
334  return;
335 
336  while (n_trace && n_left)
337  {
338  vlib_buffer_t *b = vlib_get_buffer (vm, bi[0]);
339  if (PREDICT_TRUE
340  (vlib_trace_buffer (vm, node, next_index, b, /* follow_chain */ 0)))
341  {
342  rdma_input_trace_t *tr = vlib_add_trace (vm, node, b, sizeof (*tr));
343  tr->next_index = next_index;
344  tr->hw_if_index = rd->hw_if_index;
345  tr->cqe_flags = is_mlx5dv ? clib_net_to_host_u16 (cqe_flags[0]) : 0;
346  n_trace--;
347  }
348  /* next */
349  n_left--;
350  cqe_flags++;
351  bi++;
352  }
353  vlib_set_trace_count (vm, node, n_trace);
354 }
355 
358  const rdma_device_t * rd, u32 next_index,
359  int skip_ip4_cksum)
360 {
361  vlib_next_frame_t *nf;
362  vlib_frame_t *f;
364 
366  return;
367 
368  nf =
371  f = vlib_get_frame (vm, nf->frame);
373  if (skip_ip4_cksum)
375 
376  ef = vlib_frame_scalar_args (f);
377  ef->sw_if_index = rd->sw_if_index;
378  ef->hw_if_index = rd->hw_if_index;
379 }
380 
383  vlib_buffer_t ** b, struct ibv_wc *wc,
385 {
386  u32 n_rx_bytes = 0;
387 
388  while (n_left_from >= 4)
389  {
390  if (PREDICT_TRUE (n_left_from >= 8))
391  {
392  CLIB_PREFETCH (&wc[4 + 0], CLIB_CACHE_LINE_BYTES, LOAD);
393  CLIB_PREFETCH (&wc[4 + 1], CLIB_CACHE_LINE_BYTES, LOAD);
394  CLIB_PREFETCH (&wc[4 + 2], CLIB_CACHE_LINE_BYTES, LOAD);
395  CLIB_PREFETCH (&wc[4 + 3], CLIB_CACHE_LINE_BYTES, LOAD);
396  vlib_prefetch_buffer_header (b[4 + 0], STORE);
397  vlib_prefetch_buffer_header (b[4 + 1], STORE);
398  vlib_prefetch_buffer_header (b[4 + 2], STORE);
399  vlib_prefetch_buffer_header (b[4 + 3], STORE);
400  }
401 
402  vlib_buffer_copy_template (b[0], bt);
403  vlib_buffer_copy_template (b[1], bt);
404  vlib_buffer_copy_template (b[2], bt);
405  vlib_buffer_copy_template (b[3], bt);
406 
407  n_rx_bytes += b[0]->current_length = wc[0].byte_len;
408  n_rx_bytes += b[1]->current_length = wc[1].byte_len;
409  n_rx_bytes += b[2]->current_length = wc[2].byte_len;
410  n_rx_bytes += b[3]->current_length = wc[3].byte_len;
411 
412  b += 4;
413  wc += 4;
414  n_left_from -= 4;
415  }
416 
417  while (n_left_from >= 1)
418  {
419  vlib_buffer_copy_template (b[0], bt);
420  n_rx_bytes += b[0]->current_length = wc[0].byte_len;
421 
422  b += 1;
423  wc += 1;
424  n_left_from -= 1;
425  }
426 
427  return n_rx_bytes;
428 }
429 
432  u32 mask, u32 * byte_cnt)
433 {
434  mlx5dv_mini_cqe_t *mcqe;
435  u32 mcqe_array_index = (cq_ci + 1) & mask;
436  mcqe = (mlx5dv_mini_cqe_t *) (rxq->cqes + mcqe_array_index);
437 
438  mcqe_array_index = cq_ci;
439 
440  if (skip)
441  {
442  u32 n = skip & ~7;
443 
444  if (n)
445  {
446  mcqe_array_index = (mcqe_array_index + n) & mask;
447  mcqe = (mlx5dv_mini_cqe_t *) (rxq->cqes + mcqe_array_index);
448  skip -= n;
449  }
450 
451  if (skip)
452  {
453  n = clib_min (8 - skip, n_left);
454  for (int i = 0; i < n; i++)
455  byte_cnt[i] = mcqe[skip + i].byte_count;
456  mcqe_array_index = (mcqe_array_index + 8) & mask;
457  mcqe = (mlx5dv_mini_cqe_t *) (rxq->cqes + mcqe_array_index);
458  n_left -= n;
459  byte_cnt += n;
460  }
461 
462  }
463 
464  while (n_left >= 8)
465  {
466  for (int i = 0; i < 8; i++)
467  byte_cnt[i] = mcqe[i].byte_count;
468 
469  n_left -= 8;
470  byte_cnt += 8;
471  mcqe_array_index = (mcqe_array_index + 8) & mask;
472  mcqe = (mlx5dv_mini_cqe_t *) (rxq->cqes + mcqe_array_index);
473  }
474 
475  if (n_left)
476  {
477  for (int i = 0; i < n_left; i++)
478  byte_cnt[i] = mcqe[i].byte_count;
479  }
480 }
481 
484 {
485  while (n_left >= 8)
486  {
487  cqe[0].opcode_cqefmt_se_owner = owner;
488  cqe[1].opcode_cqefmt_se_owner = owner;
489  cqe[2].opcode_cqefmt_se_owner = owner;
490  cqe[3].opcode_cqefmt_se_owner = owner;
491  cqe[4].opcode_cqefmt_se_owner = owner;
492  cqe[5].opcode_cqefmt_se_owner = owner;
493  cqe[6].opcode_cqefmt_se_owner = owner;
494  cqe[7].opcode_cqefmt_se_owner = owner;
495  n_left -= 8;
496  cqe += 8;
497  }
498  while (n_left)
499  {
500  cqe[0].opcode_cqefmt_se_owner = owner;
501  n_left--;
502  cqe++;
503  }
504 }
505 
507 compressed_cqe_reset_owner (rdma_rxq_t * rxq, u32 n_mini_cqes, u32 cq_ci,
508  u32 mask, u32 log2_cq_size)
509 {
510  u8 owner;
511  u32 offset, cq_size = 1 << log2_cq_size;
512 
513 
514  /* first CQE is reset by hardware */
515  cq_ci++;
516  n_mini_cqes--;
517 
518  offset = cq_ci & mask;
519  owner = 0xf0 | ((cq_ci >> log2_cq_size) & 1);
520 
521  if (offset + n_mini_cqes < cq_size)
522  {
523  cqe_set_owner (rxq->cqes + offset, n_mini_cqes, owner);
524  }
525  else
526  {
527  u32 n = cq_size - offset;
528  cqe_set_owner (rxq->cqes + offset, n, owner);
529  cqe_set_owner (rxq->cqes, n_mini_cqes - n, owner ^ 1);
530  }
531 
532 }
533 
536  u32 * byte_cnt, u16 * cqe_flags)
537 {
538  u32 n_rx_packets = 0;
539  u32 log2_cq_size = rxq->log2_cq_size;
540  u32 mask = pow2_mask (log2_cq_size);
541  u32 cq_ci = rxq->cq_ci;
542 
543  if (rxq->n_mini_cqes_left)
544  {
545  /* partially processed mini-cqe array */
546  u32 n_mini_cqes = rxq->n_mini_cqes;
547  u32 n_mini_cqes_left = rxq->n_mini_cqes_left;
548  process_mini_cqes (rxq, n_mini_cqes - n_mini_cqes_left,
549  n_mini_cqes_left, cq_ci, mask, byte_cnt);
550  compressed_cqe_reset_owner (rxq, n_mini_cqes, cq_ci, mask,
551  log2_cq_size);
552  clib_memset_u16 (cqe_flags, rxq->last_cqe_flags, n_mini_cqes_left);
553  n_rx_packets = n_mini_cqes_left;
554  byte_cnt += n_mini_cqes_left;
555  cqe_flags += n_mini_cqes_left;
556  rxq->n_mini_cqes_left = 0;
557  rxq->cq_ci = cq_ci = cq_ci + n_mini_cqes;
558  }
559 
560  while (n_rx_packets < VLIB_FRAME_SIZE)
561  {
562  u8 cqe_last_byte, owner;
563  mlx5dv_cqe_t *cqe = rxq->cqes + (cq_ci & mask);
564 
565  clib_prefetch_load (rxq->cqes + ((cq_ci + 8) & mask));
566 
567  owner = (cq_ci >> log2_cq_size) & 1;
568  cqe_last_byte = cqe->opcode_cqefmt_se_owner;
569 
570  if ((cqe_last_byte & 0x1) != owner)
571  break;
572 
573  cqe_last_byte &= 0xfc; /* remove owner and solicited bits */
574 
575  if (cqe_last_byte == 0x2c) /* OPCODE = 0x2 (Responder Send), Format = 0x3 (Compressed CQE) */
576  {
577  u32 n_mini_cqes = clib_net_to_host_u32 (cqe->mini_cqe_num);
578  u32 n_left = VLIB_FRAME_SIZE - n_rx_packets;
579  u16 flags = cqe->flags;
580 
581  if (n_left >= n_mini_cqes)
582  {
583  process_mini_cqes (rxq, 0, n_mini_cqes, cq_ci, mask, byte_cnt);
584  clib_memset_u16 (cqe_flags, flags, n_mini_cqes);
585  compressed_cqe_reset_owner (rxq, n_mini_cqes, cq_ci, mask,
586  log2_cq_size);
587  n_rx_packets += n_mini_cqes;
588  byte_cnt += n_mini_cqes;
589  cqe_flags += n_mini_cqes;
590  cq_ci += n_mini_cqes;
591  }
592  else
593  {
594  process_mini_cqes (rxq, 0, n_left, cq_ci, mask, byte_cnt);
595  clib_memset_u16 (cqe_flags, flags, n_left);
596  n_rx_packets = VLIB_FRAME_SIZE;
597  rxq->n_mini_cqes = n_mini_cqes;
598  rxq->n_mini_cqes_left = n_mini_cqes - n_left;
599  rxq->last_cqe_flags = flags;
600  goto done;
601  }
602  continue;
603  }
604 
605  if (cqe_last_byte == 0x20) /* OPCODE = 0x2 (Responder Send), Format = 0x0 (no inline data) */
606  {
607  byte_cnt[0] = cqe->byte_cnt;
608  cqe_flags[0] = cqe->flags;
609  n_rx_packets++;
610  cq_ci++;
611  byte_cnt++;
612  continue;
613  }
614 
615  rd->flags |= RDMA_DEVICE_F_ERROR;
616  break;
617  }
618 
619 done:
620  if (n_rx_packets)
621  rxq->cq_db[0] = rxq->cq_ci = cq_ci;
622  return n_rx_packets;
623 }
624 
626 rdma_device_mlx5dv_striding_rq_parse_bc (int n_rx_packets, int *n_rx_segs,
627  u32 * bc)
628 {
629 /* Determine if slow path is needed */
630  int filler = 0;
631  for (int i = 0; i < n_rx_packets; i++)
632  {
633  *n_rx_segs +=
636  filler |= ! !(bc[i] & CQE_BC_FILLER_MASK);
637  }
638  return n_rx_packets != *n_rx_segs || filler;
639 }
640 
643  u32 * bc)
644 {
645 #if defined CLIB_HAVE_VEC256
646  u32x8 thresh8 = u32x8_splat (buf_sz);
647  for (int i = 0; i < n_rx_packets; i += 8)
648  if (!u32x8_is_all_zero (*(u32x8 *) (bc + i) > thresh8))
649  return 1;
650 #elif defined CLIB_HAVE_VEC128
651  u32x4 thresh4 = u32x4_splat (buf_sz);
652  for (int i = 0; i < n_rx_packets; i += 4)
653  if (!u32x4_is_all_zero (*(u32x4 *) (bc + i) > thresh4))
654  return 1;
655 #else
656  while (n_rx_packets)
657  {
658  if (*bc > buf_sz)
659  return 1;
660  bc++;
661  n_rx_packets--;
662  }
663 #endif
664 
665  return 0;
666 }
667 
670  * ptd, int n_rx_packets, u32 * bc)
671 {
674 
675  /* verify that all ip4 packets have l3_ok flag set and convert packet
676  length from network to host byte order */
677  int skip_ip4_cksum = 1;
678 
679 #if defined CLIB_HAVE_VEC256
680  u16x16 mask16 = u16x16_splat (mask);
681  u16x16 match16 = u16x16_splat (match);
682  u16x16 r = { };
683 
684  for (int i = 0; i * 16 < n_rx_packets; i++)
685  r |= (ptd->cqe_flags16[i] & mask16) != match16;
686 
687  if (!u16x16_is_all_zero (r))
688  skip_ip4_cksum = 0;
689 
690  for (int i = 0; i < n_rx_packets; i += 8)
691  *(u32x8 *) (bc + i) = u32x8_byte_swap (*(u32x8 *) (bc + i));
692 #elif defined CLIB_HAVE_VEC128
693  u16x8 mask8 = u16x8_splat (mask);
694  u16x8 match8 = u16x8_splat (match);
695  u16x8 r = { };
696 
697  for (int i = 0; i * 8 < n_rx_packets; i++)
698  r |= (ptd->cqe_flags8[i] & mask8) != match8;
699 
700  if (!u16x8_is_all_zero (r))
701  skip_ip4_cksum = 0;
702 
703  for (int i = 0; i < n_rx_packets; i += 4)
704  *(u32x4 *) (bc + i) = u32x4_byte_swap (*(u32x4 *) (bc + i));
705 #else
706  for (int i = 0; i < n_rx_packets; i++)
707  if ((ptd->cqe_flags[i] & mask) != match)
708  skip_ip4_cksum = 0;
709 
710  for (int i = 0; i < n_rx_packets; i++)
711  bc[i] = clib_net_to_host_u32 (bc[i]);
712 #endif
713  return skip_ip4_cksum;
714 }
715 
718  vlib_buffer_t ** bufs,
719  u32 qs_mask, vlib_buffer_t * bt,
720  u32 * to_next, u32 n_rx_segs, u32 * bc,
721  u32 bc_mask)
722 {
723  vlib_buffer_t **b = bufs;
724  u32 n_left = n_rx_segs;
725  u32 n_rx_bytes = 0;
727  rxq->head & qs_mask, rxq->size,
728  n_rx_segs);
729  rxq->head += n_rx_segs;
730  vlib_get_buffers (vm, to_next, bufs, n_rx_segs);
731  while (n_left >= 8)
732  {
733  clib_prefetch_store (b[4]);
734  vlib_buffer_copy_template (b[0], bt);
735  n_rx_bytes += b[0]->current_length = bc[0] & bc_mask;
736  clib_prefetch_store (b[5]);
737  vlib_buffer_copy_template (b[1], bt);
738  n_rx_bytes += b[1]->current_length = bc[1] & bc_mask;
739  clib_prefetch_store (b[6]);
740  vlib_buffer_copy_template (b[2], bt);
741  n_rx_bytes += b[2]->current_length = bc[2] & bc_mask;
742  clib_prefetch_store (b[7]);
743  vlib_buffer_copy_template (b[3], bt);
744  n_rx_bytes += b[3]->current_length = bc[3] & bc_mask;
745  /* next */
746  bc += 4;
747  b += 4;
748  n_left -= 4;
749  }
750  while (n_left)
751  {
752  vlib_buffer_copy_template (b[0], bt);
753  n_rx_bytes += b[0]->current_length = bc[0] & bc_mask;
754  /* next */
755  bc++;
756  b++;
757  n_left--;
758  }
759  return n_rx_bytes;
760 }
761 
764  vlib_buffer_t ** bufs, u32 qs_mask,
765  u32 n)
766 {
767  u32 buf_sz = rxq->buf_sz;
768  uword slot = (rxq->head - n) & qs_mask;
769  u32 *second = &rxq->second_bufs[slot];
770  u32 n_wrap_around = (slot + n) & (qs_mask + 1) ? (slot + n) & qs_mask : 0;
771  u8 *n_used_per_chain = &rxq->n_used_per_chain[slot];
772  n -= n_wrap_around;
773 wrap_around:
774  while (n > 0)
775  {
776  u16 total_length = bufs[0]->current_length;
777  if (total_length > buf_sz)
778  {
779  vlib_buffer_t *current_buf = bufs[0];
780  u8 current_chain_sz = 0;
781  current_buf->current_length = buf_sz;
782  total_length -= buf_sz;
783  current_buf->total_length_not_including_first_buffer = total_length;
784  current_buf->flags |= VLIB_BUFFER_NEXT_PRESENT;
785  current_buf->next_buffer = second[0];
786  do
787  {
788  current_buf = vlib_get_buffer (vm, current_buf->next_buffer);
789  current_buf->current_length = clib_min (buf_sz, total_length);
790  total_length -= current_buf->current_length;
791  current_chain_sz++;
792  }
793  while (total_length > 0);
794  current_buf->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
795  second[0] = current_buf->next_buffer;
796  current_buf->next_buffer = 0;
797  rxq->n_total_additional_segs += current_chain_sz;
798  n_used_per_chain[0] = current_chain_sz;
799  }
800  bufs++;
801  second++;
802  n_used_per_chain++;
803  n--;
804  }
805  if (PREDICT_FALSE (n_wrap_around))
806  {
807  n = n_wrap_around;
808  n_wrap_around = 0;
809  second = rxq->second_bufs;
810  n_used_per_chain = rxq->n_used_per_chain;
811  goto wrap_around;
812  }
813 }
814 
818  rdma_rxq_t * rxq,
819  vlib_buffer_t * bt, u32 * to_next,
820  int n_rx_segs, int *n_rx_packets,
821  u32 * bc, int slow_path_needed)
822 {
823  u32 mask = rxq->size - 1;
824  u32 n_rx_bytes = 0;
825  if (PREDICT_TRUE (!slow_path_needed))
826  {
828  n_rx_bytes +=
829  rdma_device_mlx5dv_fast_input (vm, rxq, bufs, mask, bt, to_next,
830  n_rx_segs, bc, CQE_BC_BYTE_COUNT_MASK);
831  }
832  else /* Slow path with multiseg */
833  {
834  vlib_buffer_t *pkt_head; /*Current head buffer */
835  vlib_buffer_t *pkt_prev; /* Buffer processed at the previous iteration */
836  u32 pkt_head_idx;
837  vlib_buffer_t **pkt;
838  uword n_segs_remaining = 0; /*Remaining strides in current buffer */
839  u32 n_bytes_remaining = 0; /*Remaining bytes in current buffer */
840  u32 *next_in_frame = to_next;
841  u32 *next_to_free = ptd->to_free_buffers;
843  do
844  {
846  u32 n_left = clib_min (n_rx_segs, VLIB_FRAME_SIZE);
847  n_rx_segs -= n_left;
849  rxq->bufs, rxq->head & mask,
850  rxq->size, n_left);
851  rxq->head += n_left;
852  vlib_get_buffers (vm, ptd->current_segs, bufs, n_left);
853  pkt = bufs;
854  while (n_left > 0)
855  {
856  /* Initialize the current buffer as full size */
857  vlib_buffer_copy_template (pkt[0], bt);
858  if (!n_segs_remaining) /* No pending chain */
859  {
860  n_segs_remaining =
861  (bc[0] & CQE_BC_CONSUMED_STRIDES_MASK) >>
863  pkt_head = pkt[0];
864  pkt_head_idx = ptd->current_segs[pkt - bufs];
865  n_bytes_remaining = bc[0] & CQE_BC_BYTE_COUNT_MASK;
867  n_segs_remaining >
868  1 ? n_bytes_remaining - pkt[0]->current_length : 0;
869  }
870  else /* Perform chaining if it's a continuation buffer */
871  {
872  pkt_prev->next_buffer = ptd->current_segs[pkt - bufs];
873  pkt_prev->flags |= VLIB_BUFFER_NEXT_PRESENT;
874  pkt[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
875  }
876  if (n_segs_remaining == 1) /* Last buffer of the chain */
877  {
878  pkt[0]->current_length = n_bytes_remaining;
879  if (bc[0] & CQE_BC_FILLER_MASK)
880  {
881  (next_to_free++)[0] = pkt_head_idx;
882  (*n_rx_packets)--;
883  }
884 
885  else
886  {
887  (next_in_frame++)[0] = pkt_head_idx;
888  n_rx_bytes +=
889  pkt_head->current_length +
891  }
892  /*Go to next CQE */
893  bc++;
894  }
895  else
896  {
897  n_bytes_remaining -= pkt[0]->current_length;
898  pkt_prev = pkt[0];
899  }
900  n_segs_remaining--;
901  n_left--;
902  pkt++;
903  }
904 
905  }
906  while (n_rx_segs > 0);
908  next_to_free - ptd->to_free_buffers);
909  }
910  return n_rx_bytes;
911 }
912 
916  u16 qid, const int use_mlx5dv)
917 {
918  rdma_main_t *rm = &rdma_main;
919  vnet_main_t *vnm = vnet_get_main ();
921  vm->thread_index);
922  rdma_rxq_t *rxq = vec_elt_at_index (rd->rxqs, qid);
923  struct ibv_wc wc[VLIB_FRAME_SIZE];
924  u32 __clib_aligned (32) byte_cnts[VLIB_FRAME_SIZE];
925  vlib_buffer_t bt;
926  u32 next_index, *to_next, n_left_to_next, n_rx_bytes = 0;
927  int n_rx_packets, skip_ip4_cksum = 0;
928  u32 mask = rxq->size - 1;
929  const int is_striding = ! !(rd->flags & RDMA_DEVICE_F_STRIDING_RQ);
930 
931  if (use_mlx5dv)
932  n_rx_packets = rdma_device_poll_cq_mlx5dv (rd, rxq, byte_cnts,
933  ptd->cqe_flags);
934  else
935  n_rx_packets = ibv_poll_cq (rxq->cq, VLIB_FRAME_SIZE, wc);
936 
937  /* init buffer template */
939  vnet_buffer (&bt)->sw_if_index[VLIB_RX] = rd->sw_if_index;
940  bt.buffer_pool_index = rd->pool;
941 
942  if (PREDICT_FALSE (n_rx_packets <= 0))
943  goto refill;
944 
945  /* update buffer template for input feature arcs if any */
946  next_index = rd->per_interface_next_index;
948  vnet_feature_start_device_input_x1 (rd->sw_if_index, &next_index, &bt);
949 
950  vlib_get_new_next_frame (vm, node, next_index, to_next, n_left_to_next);
951 
952  if (use_mlx5dv)
953  {
954  u32 *bc = byte_cnts;
955  int slow_path_needed;
956  skip_ip4_cksum =
957  rdma_device_mlx5dv_l3_validate_and_swap_bc (ptd, n_rx_packets, bc);
958  if (is_striding)
959  {
960  int n_rx_segs = 0;
961  slow_path_needed =
963  &n_rx_segs, bc);
964  n_rx_bytes =
965  rdma_device_mlx5dv_striding_rq_input (vm, ptd, rxq, &bt,
966  to_next, n_rx_segs,
967  &n_rx_packets, bc,
968  slow_path_needed);
969  }
970  else
971  {
973  slow_path_needed =
975  n_rx_packets, bc);
976  n_rx_bytes = rdma_device_mlx5dv_fast_input (
977  vm, rxq, bufs, mask, &bt, to_next, n_rx_packets, bc, ~0);
978 
979  /* If there are chained buffers, some of the head buffers have a current length
980  higher than buf_sz: it needs to be fixed */
981  if (PREDICT_FALSE (slow_path_needed))
982  rdma_device_mlx5dv_legacy_rq_fix_chains (vm, rxq, bufs, mask,
983  n_rx_packets);
984  }
985  }
986  else
987  {
990  rxq->head & mask,
991  rxq->size, n_rx_packets);
992  vlib_get_buffers (vm, to_next, bufs, n_rx_packets);
993  rxq->head += n_rx_packets;
994  n_rx_bytes =
995  rdma_device_input_bufs (vm, rd, bufs, wc, n_rx_packets, &bt);
996 
997  }
998 
999  rdma_device_input_ethernet (vm, node, rd, next_index, skip_ip4_cksum);
1000  vlib_put_next_frame (vm, node, next_index, n_left_to_next - n_rx_packets);
1001  rdma_device_input_trace (vm, node, rd, n_rx_packets, to_next,
1002  next_index, ptd->cqe_flags, use_mlx5dv);
1003  /* reset flags to zero for the next run */
1004  if (use_mlx5dv)
1005  clib_memset_u16 (ptd->cqe_flags, 0, VLIB_FRAME_SIZE);
1007  combined_sw_if_counters +
1009  vm->thread_index, rd->hw_if_index,
1010  n_rx_packets, n_rx_bytes);
1011 refill:
1012  rdma_device_input_refill (vm, rd, rxq, &bt, use_mlx5dv, is_striding);
1013  return n_rx_packets;
1014 }
1015 
1018  vlib_frame_t * frame)
1019 {
1020  u32 n_rx = 0;
1021  rdma_main_t *rm = &rdma_main;
1024  for (int i = 0; i < vec_len (pv); i++)
1025  {
1026  rdma_device_t *rd;
1027  rd = vec_elt_at_index (rm->devices, pv[i].dev_instance);
1028  if (PREDICT_TRUE (rd->flags & RDMA_DEVICE_F_ADMIN_UP) == 0)
1029  continue;
1030 
1031  if (PREDICT_TRUE (rd->flags & RDMA_DEVICE_F_ERROR))
1032  continue;
1033 
1034  if (PREDICT_TRUE (rd->flags & RDMA_DEVICE_F_MLX5DV))
1035  n_rx +=
1036  rdma_device_input_inline (vm, node, frame, rd, pv[i].queue_id, 1);
1037  else
1038  n_rx +=
1039  rdma_device_input_inline (vm, node, frame, rd, pv[i].queue_id, 0);
1040  }
1041  return n_rx;
1042 }
1043 
1044 /* *INDENT-OFF* */
1046  .name = "rdma-input",
1048  .sibling_of = "device-input",
1049  .format_trace = format_rdma_input_trace,
1050  .type = VLIB_NODE_TYPE_INPUT,
1051  .state = VLIB_NODE_STATE_DISABLED,
1052  .n_errors = RDMA_INPUT_N_ERROR,
1053  .error_strings = rdma_input_error_strings,
1054 };
1055 
1056 /* *INDENT-ON* */
1057 
1058 
1059 /*
1060  * fd.io coding-style-patch-verification: ON
1061  *
1062  * Local Variables:
1063  * eval: (c-set-style "gnu")
1064  * End:
1065  */
u32 mini_cqe_num
Definition: rdma_mlx5dv.h:54
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:133
static_always_inline void rdma_device_input_refill(vlib_main_t *vm, rdma_device_t *rd, rdma_rxq_t *rxq, vlib_buffer_t *bt, const int is_mlx5dv, const int is_striding)
Definition: input.c:124
static __clib_warn_unused_result u32 vlib_buffer_alloc_to_ring_from_pool(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers, u8 buffer_pool_index)
Allocate buffers into ring from specific buffer pool.
Definition: buffer_funcs.h:753
#define clib_min(x, y)
Definition: clib.h:342
u32 n_free
static_always_inline u32x4 u32x4_byte_swap(u32x4 v)
Definition: vector_neon.h:107
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
Definition: trace_funcs.h:212
vl_api_wireguard_peer_flags_t flags
Definition: wireguard.api:105
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
Definition: buffer_funcs.h:982
static_always_inline void clib_prefetch_load(void *p)
Definition: cache.h:94
vnet_hw_if_output_node_runtime_t * r
vnet_interface_main_t interface_main
Definition: vnet.h:81
volatile u32 * cq_db
Definition: rdma.h:88
#define PREDICT_TRUE(x)
Definition: clib.h:125
unsigned long u64
Definition: types.h:89
#define CLIB_MEMORY_STORE_BARRIER()
Definition: clib.h:140
u32 size
Definition: rdma.h:76
vlib_increment_combined_counter(ccm, ti, sw_if_index, n_buffers, n_bytes)
#define VLIB_NODE_FLAG_TRACE_SUPPORTED
Definition: node.h:296
static_always_inline vnet_hw_if_rxq_poll_vector_t * vnet_hw_if_get_rxq_poll_vector(vlib_main_t *vm, vlib_node_runtime_t *node)
u8 opcode_cqefmt_se_owner
Definition: rdma_mlx5dv.h:59
u32 thread_index
Definition: main.h:213
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:122
static vlib_frame_t * vlib_get_frame(vlib_main_t *vm, vlib_frame_t *f)
Definition: node_funcs.h:273
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: nat44_ei.c:3048
static_always_inline uword rdma_device_poll_cq_mlx5dv(rdma_device_t *rd, rdma_rxq_t *rxq, u32 *byte_cnt, u16 *cqe_flags)
Definition: input.c:535
struct ibv_wq * wq
Definition: rdma.h:74
u32 per_interface_next_index
Definition: rdma.h:202
format_function_t format_rdma_input_trace
Definition: rdma.h:303
rdma_main_t rdma_main
Definition: device.c:47
static_always_inline void vlib_get_buffers_with_offset(vlib_main_t *vm, u32 *bi, void **b, int count, i32 offset)
Translate array of buffer indices into buffer pointers with offset.
Definition: buffer_funcs.h:204
epu16_epi64 epu8_epi16 epu8_epi64 epi16_epi64 epi8_epi16 epi8_epi64 static_always_inline u64x4 u64x4_byte_swap(u64x4 v)
Definition: vector_avx2.h:157
#define VLIB_NODE_FN(node)
Definition: node.h:202
u32 tail
Definition: rdma.h:78
vhost_vring_addr_t addr
Definition: vhost_user.h:130
u16 cqe_flags[VLIB_FRAME_SIZE]
Definition: rdma.h:237
unsigned char u8
Definition: types.h:56
vlib_buffer_t ** b
static_always_inline uword rdma_device_input_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, rdma_device_t *rd, u16 qid, const int use_mlx5dv)
Definition: input.c:914
u8 buffer_pool_index
index of buffer pool this buffer belongs.
Definition: buffer.h:142
unsigned int u32
Definition: types.h:88
vlib_frame_t * f
static_always_inline u32 rdma_device_input_bufs(vlib_main_t *vm, const rdma_device_t *rd, vlib_buffer_t **b, struct ibv_wc *wc, u32 n_left_from, vlib_buffer_t *bt)
Definition: input.c:382
u16x8 cqe_flags8[VLIB_FRAME_SIZE/8]
Definition: rdma.h:238
u32 to_free_buffers[VLIB_FRAME_SIZE]
Definition: rdma.h:246
rdma_per_thread_data_t * per_thread_data
Definition: rdma.h:260
u16 n_mini_cqes_left
Definition: rdma.h:82
#define static_always_inline
Definition: clib.h:112
static_always_inline void rdma_device_input_trace(vlib_main_t *vm, vlib_node_runtime_t *node, const rdma_device_t *rd, u32 n_left, const u32 *bi, u32 next_index, u16 *cqe_flags, int is_mlx5dv)
Definition: input.c:326
vlib_get_buffers(vm, from, b, n_left_from)
static uword pow2_mask(uword x)
Definition: clib.h:252
#define ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX
Definition: ethernet.h:52
rdma_device_t * devices
Definition: rdma.h:261
#define CQE_FLAG_L3_OK
Definition: rdma_mlx5dv.h:28
#define CQE_FLAG_L3_HDR_TYPE_SHIFT
Definition: rdma_mlx5dv.h:32
#define CQE_FLAG_L3_HDR_TYPE_IP4
Definition: rdma_mlx5dv.h:35
static_always_inline int vnet_device_input_have_features(u32 sw_if_index)
Definition: feature.h:336
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:231
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define vlib_get_new_next_frame(vm, node, next_index, vectors, n_vectors_left)
Definition: node_funcs.h:400
vnet_main_t * vnet_get_main(void)
u16 n_mini_cqes
Definition: rdma.h:81
#define VLIB_FRAME_SIZE
Definition: node.h:369
static vlib_next_frame_t * vlib_node_runtime_get_next_frame(vlib_main_t *vm, vlib_node_runtime_t *n, u32 next_index)
Definition: node_funcs.h:321
u32 flags
Definition: rdma.h:201
static void vlib_buffer_free_from_ring(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers)
Free buffers from ring.
u32 * bufs
Definition: rdma.h:75
static_always_inline u32 rdma_device_mlx5dv_striding_rq_input(vlib_main_t *vm, rdma_per_thread_data_t *ptd, rdma_rxq_t *rxq, vlib_buffer_t *bt, u32 *to_next, int n_rx_segs, int *n_rx_packets, u32 *bc, int slow_path_needed)
Definition: input.c:816
static_always_inline void vlib_buffer_copy_template(vlib_buffer_t *b, vlib_buffer_t *bt)
Definition: buffer_funcs.h:171
static __clib_warn_unused_result int vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
Definition: trace_funcs.h:153
u32 * second_bufs
Definition: rdma.h:106
unsigned short u16
Definition: types.h:57
vlib_node_registration_t rdma_input_node
(constructor) VLIB_REGISTER_NODE (rdma_input_node)
Definition: input.c:1045
_mm_packus_epi16 u16x8
Definition: vector_sse42.h:196
#define ETH_INPUT_FRAME_F_IP4_CKSUM_OK
Definition: ethernet.h:55
struct ibv_cq * cq
Definition: rdma.h:73
static_always_inline u32x8 u32x8_byte_swap(u32x8 v)
Definition: vector_avx2.h:167
u32 buf_sz
Definition: rdma.h:92
static_always_inline int rdma_device_mlx5dv_l3_validate_and_swap_bc(rdma_per_thread_data_t *ptd, int n_rx_packets, u32 *bc)
Definition: input.c:669
#define PREDICT_FALSE(x)
Definition: clib.h:124
static_always_inline u32 rdma_device_mlx5dv_fast_input(vlib_main_t *vm, rdma_rxq_t *rxq, vlib_buffer_t **bufs, u32 qs_mask, vlib_buffer_t *bt, u32 *to_next, u32 n_rx_segs, u32 *bc, u32 bc_mask)
Definition: input.c:717
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
Definition: nat44_ei.c:3047
#define CQE_FLAG_L3_HDR_TYPE_MASK
Definition: rdma_mlx5dv.h:33
u32 n_left
static_always_inline u32 vlib_buffer_get_default_data_size(vlib_main_t *vm)
Definition: buffer_funcs.h:122
static_always_inline void ibv_set_recv_wr_and_sge(struct ibv_recv_wr *w, struct ibv_sge *s, u64 va, u32 data_size, u32 lkey)
Definition: input.c:46
u8 slot
Definition: pci_types.api:22
vlib_buffer_t buffer_template
Definition: rdma.h:255
u32 hw_if_index
Definition: rdma.h:204
u32 tmp_bi[VLIB_FRAME_SIZE]
Definition: rdma.h:250
u32 wqe_cnt
Definition: rdma.h:90
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
u8 log_stride_per_wqe
Definition: rdma.h:99
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:208
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
u32 incomplete_tail
Definition: rdma.h:107
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:261
vl_api_pnat_mask_t mask
Definition: pnat.api:45
static_always_inline void process_mini_cqes(rdma_rxq_t *rxq, u32 skip, u32 n_left, u32 cq_ci, u32 mask, u32 *byte_cnt)
Definition: input.c:431
u32 lkey
Definition: rdma.h:205
static void * vlib_frame_scalar_args(vlib_frame_t *f)
Get pointer to frame scalar data.
Definition: node_funcs.h:315
static __clib_unused char * rdma_input_error_strings[]
Definition: input.c:38
u32 sw_if_index
Definition: rdma.h:203
u16 log2_cq_size
Definition: rdma.h:80
u32 head
Definition: rdma.h:77
static_always_inline u32 rdma_device_legacy_input_refill_additional(vlib_main_t *vm, rdma_device_t *rd, rdma_rxq_t *rxq, rdma_per_thread_data_t *ptd, vlib_buffer_t *bt, u32 first_slot, u32 n_alloc)
Definition: input.c:58
u8 * n_used_per_chain
Definition: rdma.h:104
static_always_inline int rdma_device_mlx5dv_striding_rq_parse_bc(int n_rx_packets, int *n_rx_segs, u32 *bc)
Definition: input.c:626
vlib_frame_t * frame
Definition: node.h:397
u8 log_wqe_sz
Definition: rdma.h:114
#define CQE_BC_CONSUMED_STRIDES_SHIFT
Definition: rdma_mlx5dv.h:41
vlib_buffer_t * tmp_bufs[VLIB_FRAME_SIZE]
Definition: rdma.h:251
u16 flags
Definition: node.h:379
#define CQE_BC_FILLER_MASK
Definition: rdma_mlx5dv.h:40
vlib_put_next_frame(vm, node, next_index, 0)
#define CQE_BC_BYTE_COUNT_MASK
Definition: rdma_mlx5dv.h:43
nat44_ei_hairpin_src_next_t next_index
rdma_rxq_t * rxqs
Definition: rdma.h:199
static_always_inline void rdma_device_input_ethernet(vlib_main_t *vm, vlib_node_runtime_t *node, const rdma_device_t *rd, u32 next_index, int skip_ip4_cksum)
Definition: input.c:357
#define foreach_rdma_input_error
Definition: input.c:27
#define clib_max(x, y)
Definition: clib.h:335
#define CQE_BC_CONSUMED_STRIDES_MASK
Definition: rdma_mlx5dv.h:42
static_always_inline void clib_memset_u16(void *p, u16 val, uword count)
Definition: string.h:395
mlx5dv_cqe_t * cqes
Definition: rdma.h:84
u32 current_segs[VLIB_FRAME_SIZE]
Definition: rdma.h:245
static_always_inline void rdma_device_mlx5dv_legacy_rq_fix_chains(vlib_main_t *vm, rdma_rxq_t *rxq, vlib_buffer_t **bufs, u32 qs_mask, u32 n)
Definition: input.c:763
static_always_inline void clib_prefetch_store(void *p)
Definition: cache.h:100
static_always_inline void compressed_cqe_reset_owner(rdma_rxq_t *rxq, u32 n_mini_cqes, u32 cq_ci, u32 mask, u32 log2_cq_size)
Definition: input.c:507
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
u16 n_total_additional_segs
Definition: rdma.h:110
vlib_main_t vlib_node_runtime_t * node
Definition: nat44_ei.c:3047
u16x16 cqe_flags16[VLIB_FRAME_SIZE/16]
Definition: rdma.h:239
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:149
VLIB buffer representation.
Definition: buffer.h:111
u64 uword
Definition: types.h:112
u32 striding_wqe_tail
Definition: rdma.h:98
static void vlib_buffer_copy_indices_from_ring(u32 *dst, u32 *ring, u32 start, u32 ring_size, u32 n_buffers)
Definition: buffer_funcs.h:134
volatile u32 * wq_db
Definition: rdma.h:87
static_always_inline int rdma_device_mlx5dv_legacy_rq_slow_path_needed(u32 buf_sz, int n_rx_packets, u32 *bc)
Definition: input.c:642
struct clib_bihash_value offset
template key/value backing page structure
#define vnet_buffer(b)
Definition: buffer.h:437
static_always_inline void vnet_feature_start_device_input_x1(u32 sw_if_index, u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:343
static_always_inline void cqe_set_owner(mlx5dv_cqe_t *cqe, u32 n_left, u8 owner)
Definition: input.c:483
u32 cq_ci
Definition: rdma.h:79
unsigned long long u32x4
Definition: ixge.c:28
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace.c:628
u16 last_cqe_flags
Definition: rdma.h:83
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
Definition: trace_funcs.h:226
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
Definition: buffer.h:176
_mm256_packus_epi16 u16x16
Definition: vector_avx2.h:118
rdma_input_error_t
Definition: input.c:30
vlib_buffer_t * bufs[VLIB_FRAME_SIZE]
u64x4
Definition: vector_avx2.h:142
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:111
mlx5dv_wqe_ds_t * wqes
Definition: rdma.h:85
Definition: defs.h:46
static __clib_warn_unused_result u32 vlib_buffer_alloc_from_pool(vlib_main_t *vm, u32 *buffers, u32 n_buffers, u8 buffer_pool_index)
Allocate buffers from specific pool into supplied array.
Definition: buffer_funcs.h:597