FD.io VPP  v20.09-64-g4f7b92f0a
Vector Packet Processing
input.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /*
16  * pg_input.c: buffer generator input
17  *
18  * Copyright (c) 2008 Eliot Dresselhaus
19  *
20  * Permission is hereby granted, free of charge, to any person obtaining
21  * a copy of this software and associated documentation files (the
22  * "Software"), to deal in the Software without restriction, including
23  * without limitation the rights to use, copy, modify, merge, publish,
24  * distribute, sublicense, and/or sell copies of the Software, and to
25  * permit persons to whom the Software is furnished to do so, subject to
26  * the following conditions:
27  *
28  * The above copyright notice and this permission notice shall be
29  * included in all copies or substantial portions of the Software.
30  *
31  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38  */
39 
40  /*
41  * To be honest, the packet generator needs an extreme
42  * makeover. Two key assumptions which drove the current implementation
43  * are no longer true. First, buffer managers implement a
44  * post-TX recycle list. Second, that packet generator performance
45  * is first-order important.
46  */
47 
48 #include <vlib/vlib.h>
49 #include <vnet/pg/pg.h>
50 #include <vnet/vnet.h>
51 #include <vnet/ethernet/ethernet.h>
52 #include <vnet/feature/feature.h>
53 #include <vnet/ip/ip4_packet.h>
54 #include <vnet/ip/ip6_packet.h>
55 #include <vnet/udp/udp_packet.h>
56 #include <vnet/devices/devices.h>
57 #include <vnet/gso/gro_func.h>
58 
59 static int
61  u32 data_offset, u32 n_bytes)
62 {
63  u8 *bd, *pd, *pm;
64  u32 i;
65 
66  bd = b->data;
67  pd = s->fixed_packet_data + data_offset;
68  pm = s->fixed_packet_data_mask + data_offset;
69 
70  if (pd + n_bytes >= vec_end (s->fixed_packet_data))
71  n_bytes = (pd < vec_end (s->fixed_packet_data)
72  ? vec_end (s->fixed_packet_data) - pd : 0);
73 
74  for (i = 0; i < n_bytes; i++)
75  if ((bd[i] & pm[i]) != pd[i])
76  break;
77 
78  if (i >= n_bytes)
79  return 1;
80 
81  clib_warning ("buffer %U", format_vnet_buffer, b);
82  clib_warning ("differ at index %d", i);
83  clib_warning ("is %U", format_hex_bytes, bd, n_bytes);
84  clib_warning ("mask %U", format_hex_bytes, pm, n_bytes);
85  clib_warning ("expect %U", format_hex_bytes, pd, n_bytes);
86  return 0;
87 }
88 
89 static int
91 {
92  return validate_buffer_data2 (b, s, 0, s->buffer_bytes);
93 }
94 
95 always_inline void
96 set_1 (void *a0,
97  u64 v0, u64 v_min, u64 v_max, u32 n_bits, u32 is_net_byte_order)
98 {
99  ASSERT (v0 >= v_min && v0 <= v_max);
100  if (n_bits == BITS (u8))
101  {
102  ((u8 *) a0)[0] = v0;
103  }
104  else if (n_bits == BITS (u16))
105  {
106  if (is_net_byte_order)
107  v0 = clib_host_to_net_u16 (v0);
108  clib_mem_unaligned (a0, u16) = v0;
109  }
110  else if (n_bits == BITS (u32))
111  {
112  if (is_net_byte_order)
113  v0 = clib_host_to_net_u32 (v0);
114  clib_mem_unaligned (a0, u32) = v0;
115  }
116  else if (n_bits == BITS (u64))
117  {
118  if (is_net_byte_order)
119  v0 = clib_host_to_net_u64 (v0);
120  clib_mem_unaligned (a0, u64) = v0;
121  }
122 }
123 
124 always_inline void
125 set_2 (void *a0, void *a1,
126  u64 v0, u64 v1,
127  u64 v_min, u64 v_max,
128  u32 n_bits, u32 is_net_byte_order, u32 is_increment)
129 {
130  ASSERT (v0 >= v_min && v0 <= v_max);
131  ASSERT (v1 >= v_min && v1 <= (v_max + is_increment));
132  if (n_bits == BITS (u8))
133  {
134  ((u8 *) a0)[0] = v0;
135  ((u8 *) a1)[0] = v1;
136  }
137  else if (n_bits == BITS (u16))
138  {
139  if (is_net_byte_order)
140  {
141  v0 = clib_host_to_net_u16 (v0);
142  v1 = clib_host_to_net_u16 (v1);
143  }
144  clib_mem_unaligned (a0, u16) = v0;
145  clib_mem_unaligned (a1, u16) = v1;
146  }
147  else if (n_bits == BITS (u32))
148  {
149  if (is_net_byte_order)
150  {
151  v0 = clib_host_to_net_u32 (v0);
152  v1 = clib_host_to_net_u32 (v1);
153  }
154  clib_mem_unaligned (a0, u32) = v0;
155  clib_mem_unaligned (a1, u32) = v1;
156  }
157  else if (n_bits == BITS (u64))
158  {
159  if (is_net_byte_order)
160  {
161  v0 = clib_host_to_net_u64 (v0);
162  v1 = clib_host_to_net_u64 (v1);
163  }
164  clib_mem_unaligned (a0, u64) = v0;
165  clib_mem_unaligned (a1, u64) = v1;
166  }
167 }
168 
171  pg_stream_t * s,
172  u32 * buffers,
173  u32 n_buffers,
174  u32 n_bits,
175  u32 byte_offset, u32 is_net_byte_order, u64 v_min, u64 v_max)
176 {
178 
179  while (n_buffers >= 4)
180  {
181  vlib_buffer_t *b0, *b1, *b2, *b3;
182  void *a0, *a1;
183 
184  b0 = vlib_get_buffer (vm, buffers[0]);
185  b1 = vlib_get_buffer (vm, buffers[1]);
186  b2 = vlib_get_buffer (vm, buffers[2]);
187  b3 = vlib_get_buffer (vm, buffers[3]);
188  buffers += 2;
189  n_buffers -= 2;
190 
191  a0 = (void *) b0 + byte_offset;
192  a1 = (void *) b1 + byte_offset;
193  CLIB_PREFETCH ((void *) b2 + byte_offset, sizeof (v_min), WRITE);
194  CLIB_PREFETCH ((void *) b3 + byte_offset, sizeof (v_min), WRITE);
195 
196  set_2 (a0, a1, v_min, v_min, v_min, v_max, n_bits, is_net_byte_order,
197  /* is_increment */ 0);
198 
199  ASSERT (validate_buffer_data (b0, s));
200  ASSERT (validate_buffer_data (b1, s));
201  }
202 
203  while (n_buffers > 0)
204  {
205  vlib_buffer_t *b0;
206  void *a0;
207 
208  b0 = vlib_get_buffer (vm, buffers[0]);
209  buffers += 1;
210  n_buffers -= 1;
211 
212  a0 = (void *) b0 + byte_offset;
213 
214  set_1 (a0, v_min, v_min, v_max, n_bits, is_net_byte_order);
215 
216  ASSERT (validate_buffer_data (b0, s));
217  }
218 }
219 
222  pg_stream_t * s,
223  u32 * buffers,
224  u32 n_buffers,
225  u32 n_bits,
226  u32 byte_offset,
227  u32 is_net_byte_order,
228  u32 want_sum, u64 * sum_result, u64 v_min, u64 v_max, u64 v)
229 {
231  u64 sum = 0;
232 
233  ASSERT (v >= v_min && v <= v_max);
234 
235  while (n_buffers >= 4)
236  {
237  vlib_buffer_t *b0, *b1, *b2, *b3;
238  void *a0, *a1;
239  u64 v_old;
240 
241  b0 = vlib_get_buffer (vm, buffers[0]);
242  b1 = vlib_get_buffer (vm, buffers[1]);
243  b2 = vlib_get_buffer (vm, buffers[2]);
244  b3 = vlib_get_buffer (vm, buffers[3]);
245  buffers += 2;
246  n_buffers -= 2;
247 
248  a0 = (void *) b0 + byte_offset;
249  a1 = (void *) b1 + byte_offset;
250  CLIB_PREFETCH ((void *) b2 + byte_offset, sizeof (v_min), WRITE);
251  CLIB_PREFETCH ((void *) b3 + byte_offset, sizeof (v_min), WRITE);
252 
253  v_old = v;
254  v = v_old + 2;
255  v = v > v_max ? v_min : v;
256  set_2 (a0, a1,
257  v_old + 0, v_old + 1, v_min, v_max, n_bits, is_net_byte_order,
258  /* is_increment */ 1);
259 
260  if (want_sum)
261  sum += 2 * v_old + 1;
262 
263  if (PREDICT_FALSE (v_old + 1 > v_max))
264  {
265  if (want_sum)
266  sum -= 2 * v_old + 1;
267 
268  v = v_old;
269  set_1 (a0, v + 0, v_min, v_max, n_bits, is_net_byte_order);
270  if (want_sum)
271  sum += v;
272  v += 1;
273 
274  v = v > v_max ? v_min : v;
275  set_1 (a1, v + 0, v_min, v_max, n_bits, is_net_byte_order);
276  if (want_sum)
277  sum += v;
278  v += 1;
279  }
280 
281  ASSERT (validate_buffer_data (b0, s));
282  ASSERT (validate_buffer_data (b1, s));
283  }
284 
285  while (n_buffers > 0)
286  {
287  vlib_buffer_t *b0;
288  void *a0;
289  u64 v_old;
290 
291  b0 = vlib_get_buffer (vm, buffers[0]);
292  buffers += 1;
293  n_buffers -= 1;
294 
295  a0 = (void *) b0 + byte_offset;
296 
297  v_old = v;
298  if (want_sum)
299  sum += v_old;
300  v += 1;
301  v = v > v_max ? v_min : v;
302 
303  ASSERT (v_old >= v_min && v_old <= v_max);
304  set_1 (a0, v_old, v_min, v_max, n_bits, is_net_byte_order);
305 
306  ASSERT (validate_buffer_data (b0, s));
307  }
308 
309  if (want_sum)
310  *sum_result = sum;
311 
312  return v;
313 }
314 
317  pg_stream_t * s,
318  u32 * buffers,
319  u32 n_buffers,
320  u32 n_bits,
321  u32 byte_offset,
322  u32 is_net_byte_order,
323  u32 want_sum, u64 * sum_result, u64 v_min, u64 v_max)
324 {
326  u64 v_diff = v_max - v_min + 1;
327  u64 r_mask = max_pow2 (v_diff) - 1;
328  u64 v0, v1;
329  u64 sum = 0;
330  void *random_data;
331 
332  random_data = clib_random_buffer_get_data
333  (&vm->random_buffer, n_buffers * n_bits / BITS (u8));
334 
335  v0 = v1 = v_min;
336 
337  while (n_buffers >= 4)
338  {
339  vlib_buffer_t *b0, *b1, *b2, *b3;
340  void *a0, *a1;
341  u64 r0 = 0, r1 = 0; /* warnings be gone */
342 
343  b0 = vlib_get_buffer (vm, buffers[0]);
344  b1 = vlib_get_buffer (vm, buffers[1]);
345  b2 = vlib_get_buffer (vm, buffers[2]);
346  b3 = vlib_get_buffer (vm, buffers[3]);
347  buffers += 2;
348  n_buffers -= 2;
349 
350  a0 = (void *) b0 + byte_offset;
351  a1 = (void *) b1 + byte_offset;
352  CLIB_PREFETCH ((void *) b2 + byte_offset, sizeof (v_min), WRITE);
353  CLIB_PREFETCH ((void *) b3 + byte_offset, sizeof (v_min), WRITE);
354 
355  switch (n_bits)
356  {
357 #define _(n) \
358  case BITS (u##n): \
359  { \
360  u##n * r = random_data; \
361  r0 = r[0]; \
362  r1 = r[1]; \
363  random_data = r + 2; \
364  } \
365  break;
366 
367  _(8);
368  _(16);
369  _(32);
370  _(64);
371 
372 #undef _
373  }
374 
375  /* Add power of 2 sized random number which may be out of range. */
376  v0 += r0 & r_mask;
377  v1 += r1 & r_mask;
378 
379  /* Twice should be enough to reduce to v_min .. v_max range. */
380  v0 = v0 > v_max ? v0 - v_diff : v0;
381  v1 = v1 > v_max ? v1 - v_diff : v1;
382  v0 = v0 > v_max ? v0 - v_diff : v0;
383  v1 = v1 > v_max ? v1 - v_diff : v1;
384 
385  if (want_sum)
386  sum += v0 + v1;
387 
388  set_2 (a0, a1, v0, v1, v_min, v_max, n_bits, is_net_byte_order,
389  /* is_increment */ 0);
390 
391  ASSERT (validate_buffer_data (b0, s));
392  ASSERT (validate_buffer_data (b1, s));
393  }
394 
395  while (n_buffers > 0)
396  {
397  vlib_buffer_t *b0;
398  void *a0;
399  u64 r0 = 0; /* warnings be gone */
400 
401  b0 = vlib_get_buffer (vm, buffers[0]);
402  buffers += 1;
403  n_buffers -= 1;
404 
405  a0 = (void *) b0 + byte_offset;
406 
407  switch (n_bits)
408  {
409 #define _(n) \
410  case BITS (u##n): \
411  { \
412  u##n * r = random_data; \
413  r0 = r[0]; \
414  random_data = r + 1; \
415  } \
416  break;
417 
418  _(8);
419  _(16);
420  _(32);
421  _(64);
422 
423 #undef _
424  }
425 
426  /* Add power of 2 sized random number which may be out of range. */
427  v0 += r0 & r_mask;
428 
429  /* Twice should be enough to reduce to v_min .. v_max range. */
430  v0 = v0 > v_max ? v0 - v_diff : v0;
431  v0 = v0 > v_max ? v0 - v_diff : v0;
432 
433  if (want_sum)
434  sum += v0;
435 
436  set_1 (a0, v0, v_min, v_max, n_bits, is_net_byte_order);
437 
438  ASSERT (validate_buffer_data (b0, s));
439  }
440 
441  if (want_sum)
442  *sum_result = sum;
443 }
444 
445 #define _(i,t) \
446  clib_mem_unaligned (a##i, t) = \
447  clib_host_to_net_##t ((clib_net_to_host_mem_##t (a##i) &~ mask) \
448  | (v##i << shift))
449 
450 always_inline void
451 setbits_1 (void *a0,
452  u64 v0,
453  u64 v_min, u64 v_max,
454  u32 max_bits, u32 n_bits, u64 mask, u32 shift)
455 {
456  ASSERT (v0 >= v_min && v0 <= v_max);
457  if (max_bits == BITS (u8))
458  ((u8 *) a0)[0] = (((u8 *) a0)[0] & ~mask) | (v0 << shift);
459 
460  else if (max_bits == BITS (u16))
461  {
462  _(0, u16);
463  }
464  else if (max_bits == BITS (u32))
465  {
466  _(0, u32);
467  }
468  else if (max_bits == BITS (u64))
469  {
470  _(0, u64);
471  }
472 }
473 
474 always_inline void
475 setbits_2 (void *a0, void *a1,
476  u64 v0, u64 v1,
477  u64 v_min, u64 v_max,
478  u32 max_bits, u32 n_bits, u64 mask, u32 shift, u32 is_increment)
479 {
480  ASSERT (v0 >= v_min && v0 <= v_max);
481  ASSERT (v1 >= v_min && v1 <= v_max + is_increment);
482  if (max_bits == BITS (u8))
483  {
484  ((u8 *) a0)[0] = (((u8 *) a0)[0] & ~mask) | (v0 << shift);
485  ((u8 *) a1)[0] = (((u8 *) a1)[0] & ~mask) | (v1 << shift);
486  }
487 
488  else if (max_bits == BITS (u16))
489  {
490  _(0, u16);
491  _(1, u16);
492  }
493  else if (max_bits == BITS (u32))
494  {
495  _(0, u32);
496  _(1, u32);
497  }
498  else if (max_bits == BITS (u64))
499  {
500  _(0, u64);
501  _(1, u64);
502  }
503 }
504 
505 #undef _
506 
509  pg_stream_t * s,
510  u32 * buffers,
511  u32 n_buffers,
512  u32 max_bits,
513  u32 n_bits,
514  u32 byte_offset, u64 v_min, u64 v_max, u64 mask, u32 shift)
515 {
517 
518  while (n_buffers >= 4)
519  {
520  vlib_buffer_t *b0, *b1, *b2, *b3;
521  void *a0, *a1;
522 
523  b0 = vlib_get_buffer (vm, buffers[0]);
524  b1 = vlib_get_buffer (vm, buffers[1]);
525  b2 = vlib_get_buffer (vm, buffers[2]);
526  b3 = vlib_get_buffer (vm, buffers[3]);
527  buffers += 2;
528  n_buffers -= 2;
529 
530  a0 = (void *) b0 + byte_offset;
531  a1 = (void *) b1 + byte_offset;
532  CLIB_PREFETCH ((void *) b2 + byte_offset, sizeof (v_min), WRITE);
533  CLIB_PREFETCH ((void *) b3 + byte_offset, sizeof (v_min), WRITE);
534 
535  setbits_2 (a0, a1,
536  v_min, v_min, v_min, v_max, max_bits, n_bits, mask, shift,
537  /* is_increment */ 0);
538 
539  ASSERT (validate_buffer_data (b0, s));
540  ASSERT (validate_buffer_data (b1, s));
541  }
542 
543  while (n_buffers > 0)
544  {
545  vlib_buffer_t *b0;
546  void *a0;
547 
548  b0 = vlib_get_buffer (vm, buffers[0]);
549  buffers += 1;
550  n_buffers -= 1;
551 
552  a0 = (void *) b0 + byte_offset;
553 
554  setbits_1 (a0, v_min, v_min, v_max, max_bits, n_bits, mask, shift);
555  ASSERT (validate_buffer_data (b0, s));
556  }
557 }
558 
561  pg_stream_t * s,
562  u32 * buffers,
563  u32 n_buffers,
564  u32 max_bits,
565  u32 n_bits,
566  u32 byte_offset,
567  u64 v_min, u64 v_max, u64 v, u64 mask, u32 shift)
568 {
570 
571  ASSERT (v >= v_min && v <= v_max);
572 
573  while (n_buffers >= 4)
574  {
575  vlib_buffer_t *b0, *b1, *b2, *b3;
576  void *a0, *a1;
577  u64 v_old;
578 
579  b0 = vlib_get_buffer (vm, buffers[0]);
580  b1 = vlib_get_buffer (vm, buffers[1]);
581  b2 = vlib_get_buffer (vm, buffers[2]);
582  b3 = vlib_get_buffer (vm, buffers[3]);
583  buffers += 2;
584  n_buffers -= 2;
585 
586  a0 = (void *) b0 + byte_offset;
587  a1 = (void *) b1 + byte_offset;
588  CLIB_PREFETCH ((void *) b2 + byte_offset, sizeof (v_min), WRITE);
589  CLIB_PREFETCH ((void *) b3 + byte_offset, sizeof (v_min), WRITE);
590 
591  v_old = v;
592  v = v_old + 2;
593  v = v > v_max ? v_min : v;
594  setbits_2 (a0, a1,
595  v_old + 0, v_old + 1,
596  v_min, v_max, max_bits, n_bits, mask, shift,
597  /* is_increment */ 1);
598 
599  if (PREDICT_FALSE (v_old + 1 > v_max))
600  {
601  v = v_old;
602  setbits_1 (a0, v + 0, v_min, v_max, max_bits, n_bits, mask, shift);
603  v += 1;
604 
605  v = v > v_max ? v_min : v;
606  setbits_1 (a1, v + 0, v_min, v_max, max_bits, n_bits, mask, shift);
607  v += 1;
608  }
609  ASSERT (validate_buffer_data (b0, s));
610  ASSERT (validate_buffer_data (b1, s));
611  }
612 
613  while (n_buffers > 0)
614  {
615  vlib_buffer_t *b0;
616  void *a0;
617  u64 v_old;
618 
619  b0 = vlib_get_buffer (vm, buffers[0]);
620  buffers += 1;
621  n_buffers -= 1;
622 
623  a0 = (void *) b0 + byte_offset;
624 
625  v_old = v;
626  v = v_old + 1;
627  v = v > v_max ? v_min : v;
628 
629  ASSERT (v_old >= v_min && v_old <= v_max);
630  setbits_1 (a0, v_old, v_min, v_max, max_bits, n_bits, mask, shift);
631 
632  ASSERT (validate_buffer_data (b0, s));
633  }
634 
635  return v;
636 }
637 
640  pg_stream_t * s,
641  u32 * buffers,
642  u32 n_buffers,
643  u32 max_bits,
644  u32 n_bits,
645  u32 byte_offset, u64 v_min, u64 v_max, u64 mask, u32 shift)
646 {
648  u64 v_diff = v_max - v_min + 1;
649  u64 r_mask = max_pow2 (v_diff) - 1;
650  u64 v0, v1;
651  void *random_data;
652 
653  random_data = clib_random_buffer_get_data
654  (&vm->random_buffer, n_buffers * max_bits / BITS (u8));
655  v0 = v1 = v_min;
656 
657  while (n_buffers >= 4)
658  {
659  vlib_buffer_t *b0, *b1, *b2, *b3;
660  void *a0, *a1;
661  u64 r0 = 0, r1 = 0; /* warnings be gone */
662 
663  b0 = vlib_get_buffer (vm, buffers[0]);
664  b1 = vlib_get_buffer (vm, buffers[1]);
665  b2 = vlib_get_buffer (vm, buffers[2]);
666  b3 = vlib_get_buffer (vm, buffers[3]);
667  buffers += 2;
668  n_buffers -= 2;
669 
670  a0 = (void *) b0 + byte_offset;
671  a1 = (void *) b1 + byte_offset;
672  CLIB_PREFETCH ((void *) b2 + byte_offset, sizeof (v_min), WRITE);
673  CLIB_PREFETCH ((void *) b3 + byte_offset, sizeof (v_min), WRITE);
674 
675  switch (max_bits)
676  {
677 #define _(n) \
678  case BITS (u##n): \
679  { \
680  u##n * r = random_data; \
681  r0 = r[0]; \
682  r1 = r[1]; \
683  random_data = r + 2; \
684  } \
685  break;
686 
687  _(8);
688  _(16);
689  _(32);
690  _(64);
691 
692 #undef _
693  }
694 
695  /* Add power of 2 sized random number which may be out of range. */
696  v0 += r0 & r_mask;
697  v1 += r1 & r_mask;
698 
699  /* Twice should be enough to reduce to v_min .. v_max range. */
700  v0 = v0 > v_max ? v0 - v_diff : v0;
701  v1 = v1 > v_max ? v1 - v_diff : v1;
702  v0 = v0 > v_max ? v0 - v_diff : v0;
703  v1 = v1 > v_max ? v1 - v_diff : v1;
704 
705  setbits_2 (a0, a1, v0, v1, v_min, v_max, max_bits, n_bits, mask, shift,
706  /* is_increment */ 0);
707 
708  ASSERT (validate_buffer_data (b0, s));
709  ASSERT (validate_buffer_data (b1, s));
710  }
711 
712  while (n_buffers > 0)
713  {
714  vlib_buffer_t *b0;
715  void *a0;
716  u64 r0 = 0; /* warnings be gone */
717 
718  b0 = vlib_get_buffer (vm, buffers[0]);
719  buffers += 1;
720  n_buffers -= 1;
721 
722  a0 = (void *) b0 + byte_offset;
723 
724  switch (max_bits)
725  {
726 #define _(n) \
727  case BITS (u##n): \
728  { \
729  u##n * r = random_data; \
730  r0 = r[0]; \
731  random_data = r + 1; \
732  } \
733  break;
734 
735  _(8);
736  _(16);
737  _(32);
738  _(64);
739 
740 #undef _
741  }
742 
743  /* Add power of 2 sized random number which may be out of range. */
744  v0 += r0 & r_mask;
745 
746  /* Twice should be enough to reduce to v_min .. v_max range. */
747  v0 = v0 > v_max ? v0 - v_diff : v0;
748  v0 = v0 > v_max ? v0 - v_diff : v0;
749 
750  setbits_1 (a0, v0, v_min, v_max, max_bits, n_bits, mask, shift);
751 
752  ASSERT (validate_buffer_data (b0, s));
753  }
754 }
755 
756 static u64
758  pg_stream_t * s,
759  u32 * buffers,
760  u32 n_buffers,
761  u32 lo_bit, u32 hi_bit,
762  u64 v_min, u64 v_max, u64 v, pg_edit_type_t edit_type)
763 {
764  u32 max_bits, l0, l1, h1, start_bit;
765 
766  if (v_min == v_max)
767  edit_type = PG_EDIT_FIXED;
768 
769  l0 = lo_bit / BITS (u8);
770  l1 = lo_bit % BITS (u8);
771  h1 = hi_bit % BITS (u8);
772 
773  start_bit = l0 * BITS (u8);
774 
775  max_bits = hi_bit - start_bit;
776  ASSERT (max_bits <= 64);
777 
778 #define _(n) \
779  case (n): \
780  if (edit_type == PG_EDIT_INCREMENT) \
781  v = do_set_increment (pg, s, buffers, n_buffers, \
782  BITS (u##n), \
783  l0, \
784  /* is_net_byte_order */ 1, \
785  /* want sum */ 0, 0, \
786  v_min, v_max, \
787  v); \
788  else if (edit_type == PG_EDIT_RANDOM) \
789  do_set_random (pg, s, buffers, n_buffers, \
790  BITS (u##n), \
791  l0, \
792  /* is_net_byte_order */ 1, \
793  /* want sum */ 0, 0, \
794  v_min, v_max); \
795  else /* edit_type == PG_EDIT_FIXED */ \
796  do_set_fixed (pg, s, buffers, n_buffers, \
797  BITS (u##n), \
798  l0, \
799  /* is_net_byte_order */ 1, \
800  v_min, v_max); \
801  goto done;
802 
803  if (l1 == 0 && h1 == 0)
804  {
805  switch (max_bits)
806  {
807  _(8);
808  _(16);
809  _(32);
810  _(64);
811  }
812  }
813 
814 #undef _
815 
816  {
817  u64 mask;
818  u32 shift = l1;
819  u32 n_bits = max_bits;
820 
821  max_bits = clib_max (max_pow2 (n_bits), 8);
822 
823  mask = ((u64) 1 << (u64) n_bits) - 1;
824  mask &= ~(((u64) 1 << (u64) shift) - 1);
825 
826  mask <<= max_bits - n_bits;
827  shift += max_bits - n_bits;
828 
829  switch (max_bits)
830  {
831 #define _(n) \
832  case (n): \
833  if (edit_type == PG_EDIT_INCREMENT) \
834  v = do_setbits_increment (pg, s, buffers, n_buffers, \
835  BITS (u##n), n_bits, \
836  l0, v_min, v_max, v, \
837  mask, shift); \
838  else if (edit_type == PG_EDIT_RANDOM) \
839  do_setbits_random (pg, s, buffers, n_buffers, \
840  BITS (u##n), n_bits, \
841  l0, v_min, v_max, \
842  mask, shift); \
843  else /* edit_type == PG_EDIT_FIXED */ \
844  do_setbits_fixed (pg, s, buffers, n_buffers, \
845  BITS (u##n), n_bits, \
846  l0, v_min, v_max, \
847  mask, shift); \
848  goto done;
849 
850  _(8);
851  _(16);
852  _(32);
853  _(64);
854 
855 #undef _
856  }
857  }
858 
859 done:
860  return v;
861 }
862 
863 static void
865  pg_stream_t * s, u32 * buffers, u32 n_buffers)
866 {
867  u64 v_min, v_max, length_sum;
868  pg_edit_type_t edit_type;
869 
870  v_min = s->min_packet_bytes;
871  v_max = s->max_packet_bytes;
872  edit_type = s->packet_size_edit_type;
873 
874  if (edit_type == PG_EDIT_INCREMENT)
876  = do_set_increment (pg, s, buffers, n_buffers,
877  8 * STRUCT_SIZE_OF (vlib_buffer_t, current_length),
878  STRUCT_OFFSET_OF (vlib_buffer_t, current_length),
879  /* is_net_byte_order */ 0,
880  /* want sum */ 1, &length_sum,
881  v_min, v_max, s->last_increment_packet_size);
882 
883  else if (edit_type == PG_EDIT_RANDOM)
884  do_set_random (pg, s, buffers, n_buffers,
885  8 * STRUCT_SIZE_OF (vlib_buffer_t, current_length),
886  STRUCT_OFFSET_OF (vlib_buffer_t, current_length),
887  /* is_net_byte_order */ 0,
888  /* want sum */ 1, &length_sum,
889  v_min, v_max);
890 
891  else /* edit_type == PG_EDIT_FIXED */
892  {
893  do_set_fixed (pg, s, buffers, n_buffers,
894  8 * STRUCT_SIZE_OF (vlib_buffer_t, current_length),
895  STRUCT_OFFSET_OF (vlib_buffer_t, current_length),
896  /* is_net_byte_order */ 0,
897  v_min, v_max);
898  length_sum = v_min * n_buffers;
899  }
900 
901  {
902  vnet_main_t *vnm = vnet_get_main ();
906 
910  si->sw_if_index, n_buffers, length_sum);
911  }
912 
913 }
914 
915 static void
917  pg_stream_t * s,
918  u32 * buffers, u32 n_buffers)
919 {
921  pg_buffer_index_t *pbi;
922  uword n_bytes_left;
923  static u32 *unused_buffers = 0;
924 
925  while (n_buffers > 0)
926  {
927  vlib_buffer_t *b;
928  u32 bi;
929 
930  bi = buffers[0];
931  b = vlib_get_buffer (vm, bi);
932 
933  /* Current length here is length of whole packet. */
934  n_bytes_left = b->current_length;
935 
936  pbi = s->buffer_indices;
937  while (1)
938  {
939  uword n = clib_min (n_bytes_left, s->buffer_bytes);
940 
941  b->current_length = n;
942  n_bytes_left -= n;
943  if (n_bytes_left > 0)
944  b->flags |= VLIB_BUFFER_NEXT_PRESENT;
945  else
946  b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
947 
948  /* Return unused buffers to fifos. */
949  if (n == 0)
950  vec_add1 (unused_buffers, bi);
951 
952  pbi++;
953  if (pbi >= vec_end (s->buffer_indices))
954  break;
955 
956  bi = b->next_buffer;
957  b = vlib_get_buffer (vm, bi);
958  }
959  ASSERT (n_bytes_left == 0);
960 
961  buffers += 1;
962  n_buffers -= 1;
963  }
964 
965  if (vec_len (unused_buffers) > 0)
966  {
967  vlib_buffer_free_no_next (vm, unused_buffers, vec_len (unused_buffers));
968  _vec_len (unused_buffers) = 0;
969  }
970 }
971 
972 static void
974  pg_stream_t * s, u32 * buffers, u32 n_buffers)
975 {
976  pg_edit_t *e;
977 
979  {
980  switch (e->type)
981  {
982  case PG_EDIT_RANDOM:
983  case PG_EDIT_INCREMENT:
984  {
985  u32 lo_bit, hi_bit;
986  u64 v_min, v_max;
987 
988  v_min = pg_edit_get_value (e, PG_EDIT_LO);
989  v_max = pg_edit_get_value (e, PG_EDIT_HI);
990 
991  hi_bit = (BITS (u8) * STRUCT_OFFSET_OF (vlib_buffer_t, data)
992  + BITS (u8) + e->lsb_bit_offset);
993  lo_bit = hi_bit - e->n_bits;
994 
996  = do_it (pg, s, buffers, n_buffers, lo_bit, hi_bit, v_min, v_max,
997  e->last_increment_value, e->type);
998  }
999  break;
1000 
1001  case PG_EDIT_UNSPECIFIED:
1002  break;
1003 
1004  default:
1005  /* Should not be any fixed edits left. */
1006  ASSERT (0);
1007  break;
1008  }
1009  }
1010 
1011  /* Call any edit functions to e.g. completely IP lengths, checksums, ... */
1012  {
1013  int i;
1014  for (i = vec_len (s->edit_groups) - 1; i >= 0; i--)
1015  {
1016  pg_edit_group_t *g = s->edit_groups + i;
1017  if (g->edit_function)
1018  g->edit_function (pg, s, g, buffers, n_buffers);
1019  }
1020  }
1021 }
1022 
1023 static void
1025  pg_stream_t * s,
1026  u32 * buffers, u32 * next_buffers, u32 n_buffers)
1027 {
1029 
1030  while (n_buffers >= 4)
1031  {
1032  u32 ni0, ni1;
1033  vlib_buffer_t *b0, *b1;
1034 
1035  b0 = vlib_get_buffer (vm, buffers[0]);
1036  b1 = vlib_get_buffer (vm, buffers[1]);
1037  ni0 = next_buffers[0];
1038  ni1 = next_buffers[1];
1039 
1040  vlib_prefetch_buffer_with_index (vm, buffers[2], WRITE);
1041  vlib_prefetch_buffer_with_index (vm, buffers[3], WRITE);
1042 
1043  b0->flags |= VLIB_BUFFER_NEXT_PRESENT;
1044  b1->flags |= VLIB_BUFFER_NEXT_PRESENT;
1045  b0->next_buffer = ni0;
1046  b1->next_buffer = ni1;
1047 
1048  buffers += 2;
1049  next_buffers += 2;
1050  n_buffers -= 2;
1051  }
1052 
1053  while (n_buffers > 0)
1054  {
1055  u32 ni0;
1056  vlib_buffer_t *b0;
1057 
1058  b0 = vlib_get_buffer (vm, buffers[0]);
1059  ni0 = next_buffers[0];
1060  buffers += 1;
1061  next_buffers += 1;
1062  n_buffers -= 1;
1063 
1064  b0->flags |= VLIB_BUFFER_NEXT_PRESENT;
1065  b0->next_buffer = ni0;
1066  }
1067 }
1068 
1071  pg_stream_t * s,
1072  u32 * buffers,
1073  u32 n_buffers, u32 data_offset, u32 n_data, u32 set_data)
1074 {
1075  u32 n_left, *b;
1076  u8 *data, *mask;
1077 
1078  ASSERT (s->replay_packet_templates == 0);
1079 
1080  data = s->fixed_packet_data + data_offset;
1081  mask = s->fixed_packet_data_mask + data_offset;
1082  if (data + n_data >= vec_end (s->fixed_packet_data))
1083  n_data = (data < vec_end (s->fixed_packet_data)
1084  ? vec_end (s->fixed_packet_data) - data : 0);
1085  if (n_data > 0)
1086  {
1087  ASSERT (data + n_data <= vec_end (s->fixed_packet_data));
1088  ASSERT (mask + n_data <= vec_end (s->fixed_packet_data_mask));
1089  }
1090 
1091  n_left = n_buffers;
1092  b = buffers;
1093 
1094  while (n_left >= 4)
1095  {
1096  u32 bi0, bi1;
1097  vlib_buffer_t *b0, *b1;
1098 
1099  /* Prefetch next iteration. */
1100  vlib_prefetch_buffer_with_index (vm, b[2], STORE);
1101  vlib_prefetch_buffer_with_index (vm, b[3], STORE);
1102 
1103  bi0 = b[0];
1104  bi1 = b[1];
1105  b += 2;
1106  n_left -= 2;
1107 
1108  b0 = vlib_get_buffer (vm, bi0);
1109  b1 = vlib_get_buffer (vm, bi1);
1110  b0->flags |= s->buffer_flags;
1111  b1->flags |= s->buffer_flags;
1112 
1113  vnet_buffer (b0)->sw_if_index[VLIB_RX] =
1114  vnet_buffer (b1)->sw_if_index[VLIB_RX] = s->sw_if_index[VLIB_RX];
1115 
1116  vnet_buffer (b0)->sw_if_index[VLIB_TX] =
1117  vnet_buffer (b1)->sw_if_index[VLIB_TX] = s->sw_if_index[VLIB_TX];
1118 
1119  if (set_data)
1120  {
1121  clib_memcpy_fast (b0->data, data, n_data);
1122  clib_memcpy_fast (b1->data, data, n_data);
1123  }
1124  else
1125  {
1126  ASSERT (validate_buffer_data2 (b0, s, data_offset, n_data));
1127  ASSERT (validate_buffer_data2 (b1, s, data_offset, n_data));
1128  }
1129  }
1130 
1131  while (n_left >= 1)
1132  {
1133  u32 bi0;
1134  vlib_buffer_t *b0;
1135 
1136  bi0 = b[0];
1137  b += 1;
1138  n_left -= 1;
1139 
1140  b0 = vlib_get_buffer (vm, bi0);
1141  b0->flags |= s->buffer_flags;
1142  vnet_buffer (b0)->sw_if_index[VLIB_RX] = s->sw_if_index[VLIB_RX];
1143  vnet_buffer (b0)->sw_if_index[VLIB_TX] = s->sw_if_index[VLIB_TX];
1144 
1145  if (set_data)
1146  clib_memcpy_fast (b0->data, data, n_data);
1147  else
1148  ASSERT (validate_buffer_data2 (b0, s, data_offset, n_data));
1149  }
1150 }
1151 
1152 static u32
1154  pg_stream_t * s,
1155  pg_buffer_index_t * bi,
1156  u32 * buffers, u32 * next_buffers, u32 n_alloc)
1157 {
1159  uword is_start_of_packet = bi == s->buffer_indices;
1160  u32 n_allocated;
1161 
1163 
1164  n_allocated = vlib_buffer_alloc (vm, buffers, n_alloc);
1165  if (n_allocated == 0)
1166  return 0;
1167 
1168  /*
1169  * We can't assume we got all the buffers we asked for...
1170  * This never worked until recently.
1171  */
1172  n_alloc = n_allocated;
1173 
1174  /* Reinitialize buffers */
1176  (vm, s,
1177  buffers,
1178  n_alloc, (bi - s->buffer_indices) * s->buffer_bytes /* data offset */ ,
1179  s->buffer_bytes,
1180  /* set_data */ 1);
1181 
1182  if (next_buffers)
1183  pg_set_next_buffer_pointers (pg, s, buffers, next_buffers, n_alloc);
1184 
1185  if (is_start_of_packet)
1186  {
1187  pg_generate_set_lengths (pg, s, buffers, n_alloc);
1188  if (vec_len (s->buffer_indices) > 1)
1189  pg_generate_fix_multi_buffer_lengths (pg, s, buffers, n_alloc);
1190 
1191  pg_generate_edit (pg, s, buffers, n_alloc);
1192  }
1193 
1194  return n_alloc;
1195 }
1196 
1197 static u32
1199 {
1200  pg_buffer_index_t *bi;
1201  u32 n_left, i, l;
1202  u32 buffer_alloc_request = 0;
1203  u32 buffer_alloc_result;
1204  u32 current_buffer_index;
1205  u32 *buffers;
1207  vnet_main_t *vnm = vnet_get_main ();
1208  u32 buf_sz = vlib_buffer_get_default_data_size (vm);
1211 
1212  buffers = pg->replay_buffers_by_thread[vm->thread_index];
1213  vec_reset_length (buffers);
1214  bi = s->buffer_indices;
1215 
1216  n_left = n_alloc;
1219 
1220  /* Figure out how many buffers we need */
1221  while (n_left > 0)
1222  {
1223  u8 *d0;
1224 
1225  d0 = vec_elt (s->replay_packet_templates, i);
1226  buffer_alloc_request += (vec_len (d0) + (buf_sz - 1)) / buf_sz;
1227 
1228  i = ((i + 1) == l) ? 0 : i + 1;
1229  n_left--;
1230  }
1231 
1232  ASSERT (buffer_alloc_request > 0);
1233  vec_validate (buffers, buffer_alloc_request - 1);
1234 
1235  /* Allocate that many buffers */
1236  buffer_alloc_result = vlib_buffer_alloc (vm, buffers, buffer_alloc_request);
1237  if (buffer_alloc_result < buffer_alloc_request)
1238  {
1239  clib_warning ("alloc failure, got %d not %d", buffer_alloc_result,
1240  buffer_alloc_request);
1241  vlib_buffer_free_no_next (vm, buffers, buffer_alloc_result);
1242  pg->replay_buffers_by_thread[vm->thread_index] = buffers;
1243  return 0;
1244  }
1245 
1246  /* Now go generate the buffers, and add them to the FIFO */
1247  n_left = n_alloc;
1248 
1249  current_buffer_index = 0;
1252  while (n_left > 0)
1253  {
1254  u8 *d0;
1255  int not_last;
1256  u32 data_offset;
1257  u32 bytes_to_copy, bytes_this_chunk;
1258  vlib_buffer_t *b;
1259 
1260  d0 = vec_elt (s->replay_packet_templates, i);
1261  data_offset = 0;
1262  bytes_to_copy = vec_len (d0);
1263 
1264  /* Add head chunk to pg fifo */
1265  clib_fifo_add1 (bi->buffer_fifo, buffers[current_buffer_index]);
1266 
1267  /* Copy the data */
1268  while (bytes_to_copy)
1269  {
1270  bytes_this_chunk = clib_min (bytes_to_copy, buf_sz);
1271  ASSERT (current_buffer_index < vec_len (buffers));
1272  b = vlib_get_buffer (vm, buffers[current_buffer_index]);
1273  clib_memcpy_fast (b->data, d0 + data_offset, bytes_this_chunk);
1274  vnet_buffer (b)->sw_if_index[VLIB_RX] = s->sw_if_index[VLIB_RX];
1275  vnet_buffer (b)->sw_if_index[VLIB_TX] = s->sw_if_index[VLIB_TX];
1276  b->flags = s->buffer_flags;
1277  b->next_buffer = 0;
1278  b->current_data = 0;
1279  b->current_length = bytes_this_chunk;
1280 
1281  not_last = bytes_this_chunk < bytes_to_copy;
1282  if (not_last)
1283  {
1284  ASSERT (current_buffer_index < (vec_len (buffers) - 1));
1285  b->flags |= VLIB_BUFFER_NEXT_PRESENT;
1286  b->next_buffer = buffers[current_buffer_index + 1];
1287  }
1288  bytes_to_copy -= bytes_this_chunk;
1289  data_offset += bytes_this_chunk;
1290  current_buffer_index++;
1291  }
1292 
1293  i = ((i + 1) == l) ? 0 : i + 1;
1294  n_left--;
1295  }
1296 
1297  /* Update the interface counters */
1298  si = vnet_get_sw_interface (vnm, s->sw_if_index[VLIB_RX]);
1299  l = 0;
1300  for (i = 0; i < n_alloc; i++)
1301  l += vlib_buffer_index_length_in_chain (vm, buffers[i]);
1305  si->sw_if_index, n_alloc, l);
1306 
1307  s->current_replay_packet_index += n_alloc;
1309 
1310  pg->replay_buffers_by_thread[vm->thread_index] = buffers;
1311  return n_alloc;
1312 }
1313 
1314 
1315 static u32
1316 pg_stream_fill (pg_main_t * pg, pg_stream_t * s, u32 n_buffers)
1317 {
1318  pg_buffer_index_t *bi;
1319  word i, n_in_fifo, n_alloc, n_free, n_added;
1320  u32 *tail, *start, *end, *last_tail, *last_start;
1321 
1322  bi = s->buffer_indices;
1323 
1324  n_in_fifo = clib_fifo_elts (bi->buffer_fifo);
1325  if (n_in_fifo >= n_buffers)
1326  return n_in_fifo;
1327 
1328  n_alloc = n_buffers - n_in_fifo;
1329 
1330  /* Round up, but never generate more than limit. */
1331  n_alloc = clib_max (VLIB_FRAME_SIZE, n_alloc);
1332 
1333  if (s->n_packets_limit > 0
1334  && s->n_packets_generated + n_in_fifo + n_alloc >= s->n_packets_limit)
1335  {
1336  n_alloc = s->n_packets_limit - s->n_packets_generated - n_in_fifo;
1337  if (n_alloc < 0)
1338  n_alloc = 0;
1339  }
1340 
1341  /*
1342  * Handle pcap replay directly
1343  */
1344  if (s->replay_packet_templates)
1345  return pg_stream_fill_replay (pg, s, n_alloc);
1346 
1347  /* All buffer fifos should have the same size. */
1348  if (CLIB_DEBUG > 0)
1349  {
1350  uword l = ~0, e;
1351  vec_foreach (bi, s->buffer_indices)
1352  {
1353  e = clib_fifo_elts (bi->buffer_fifo);
1354  if (bi == s->buffer_indices)
1355  l = e;
1356  ASSERT (l == e);
1357  }
1358  }
1359 
1360  last_tail = last_start = 0;
1361  n_added = n_alloc;
1362 
1363  for (i = vec_len (s->buffer_indices) - 1; i >= 0; i--)
1364  {
1365  bi = vec_elt_at_index (s->buffer_indices, i);
1366 
1367  n_free = clib_fifo_free_elts (bi->buffer_fifo);
1368  if (n_free < n_alloc)
1369  clib_fifo_resize (bi->buffer_fifo, n_alloc - n_free);
1370 
1371  tail = clib_fifo_advance_tail (bi->buffer_fifo, n_alloc);
1372  start = bi->buffer_fifo;
1373  end = clib_fifo_end (bi->buffer_fifo);
1374 
1375  if (tail + n_alloc <= end)
1376  {
1377  n_added =
1378  pg_stream_fill_helper (pg, s, bi, tail, last_tail, n_alloc);
1379  }
1380  else
1381  {
1382  u32 n = clib_min (end - tail, n_alloc);
1383  n_added = pg_stream_fill_helper (pg, s, bi, tail, last_tail, n);
1384 
1385  if (n_added == n && n_alloc > n_added)
1386  {
1387  n_added += pg_stream_fill_helper
1388  (pg, s, bi, start, last_start, n_alloc - n_added);
1389  }
1390  }
1391 
1392  if (PREDICT_FALSE (n_added < n_alloc))
1393  tail = clib_fifo_advance_tail (bi->buffer_fifo, n_added - n_alloc);
1394 
1395  last_tail = tail;
1396  last_start = start;
1397 
1398  /* Verify that pkts in the fifo are properly allocated */
1399  }
1400 
1401  return n_in_fifo + n_added;
1402 }
1403 
1404 typedef struct
1405 {
1407 
1410 
1411  /* Use pre data for packet data. */
1414 
1415 static u8 *
1416 format_pg_input_trace (u8 * s, va_list * va)
1417 {
1418  vlib_main_t *vm = va_arg (*va, vlib_main_t *);
1419  CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
1420  pg_input_trace_t *t = va_arg (*va, pg_input_trace_t *);
1421  pg_main_t *pg = &pg_main;
1422  pg_stream_t *stream;
1423  vlib_node_t *n;
1424  u32 indent = format_get_indent (s);
1425 
1426  stream = 0;
1427  if (!pool_is_free_index (pg->streams, t->stream_index))
1428  stream = pool_elt_at_index (pg->streams, t->stream_index);
1429 
1430  if (stream)
1431  s = format (s, "stream %v", pg->streams[t->stream_index].name);
1432  else
1433  s = format (s, "stream %d", t->stream_index);
1434 
1435  s = format (s, ", %d bytes", t->packet_length);
1436  s = format (s, ", sw_if_index %d", t->sw_if_index);
1437 
1438  s = format (s, "\n%U%U",
1440 
1441  s = format (s, "\n%U", format_white_space, indent);
1442 
1443  n = 0;
1444  if (stream)
1445  n = vlib_get_node (vm, stream->node_index);
1446 
1447  if (n && n->format_buffer)
1448  s = format (s, "%U", n->format_buffer,
1449  t->buffer.pre_data, sizeof (t->buffer.pre_data));
1450  else
1451  s = format (s, "%U",
1453  ARRAY_LEN (t->buffer.pre_data));
1454  return s;
1455 }
1456 
1457 static void
1459  vlib_node_runtime_t * node, u32 stream_index, u32 next_index,
1460  u32 * buffers, u32 n_buffers)
1461 {
1463  u32 *b, n_left;
1464 
1465  n_left = n_buffers;
1466  b = buffers;
1467 
1468  while (n_left >= 2)
1469  {
1470  u32 bi0, bi1;
1471  vlib_buffer_t *b0, *b1;
1472  pg_input_trace_t *t0, *t1;
1473 
1474  bi0 = b[0];
1475  bi1 = b[1];
1476  b += 2;
1477  n_left -= 2;
1478 
1479  b0 = vlib_get_buffer (vm, bi0);
1480  b1 = vlib_get_buffer (vm, bi1);
1481 
1482  vlib_trace_buffer (vm, node, next_index, b0, /* follow_chain */ 1);
1483  vlib_trace_buffer (vm, node, next_index, b1, /* follow_chain */ 1);
1484 
1485  t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
1486  t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
1487 
1488  t0->stream_index = stream_index;
1489  t1->stream_index = stream_index;
1490 
1493 
1494  t0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1495  t1->sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_RX];
1496 
1497  clib_memcpy_fast (&t0->buffer, b0,
1498  sizeof (b0[0]) - sizeof (b0->pre_data));
1499  clib_memcpy_fast (&t1->buffer, b1,
1500  sizeof (b1[0]) - sizeof (b1->pre_data));
1501 
1503  sizeof (t0->buffer.pre_data));
1505  sizeof (t1->buffer.pre_data));
1506  }
1507 
1508  while (n_left >= 1)
1509  {
1510  u32 bi0;
1511  vlib_buffer_t *b0;
1512  pg_input_trace_t *t0;
1513 
1514  bi0 = b[0];
1515  b += 1;
1516  n_left -= 1;
1517 
1518  b0 = vlib_get_buffer (vm, bi0);
1519 
1520  vlib_trace_buffer (vm, node, next_index, b0, /* follow_chain */ 1);
1521  t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
1522 
1523  t0->stream_index = stream_index;
1525  t0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1526  clib_memcpy_fast (&t0->buffer, b0,
1527  sizeof (b0[0]) - sizeof (b0->pre_data));
1529  sizeof (t0->buffer.pre_data));
1530  }
1531 }
1532 
1535  int gso_enabled, u32 gso_size)
1536 {
1537  for (int i = 0; i < n_buffers; i++)
1538  {
1539  vlib_buffer_t *b0 = vlib_get_buffer (vm, buffers[i]);
1540  u8 l4_proto = 0;
1541 
1542  ethernet_header_t *eh =
1544  u16 ethertype = clib_net_to_host_u16 (eh->type);
1545  u16 l2hdr_sz = sizeof (ethernet_header_t);
1546 
1547  if (ethernet_frame_is_tagged (ethertype))
1548  {
1549  ethernet_vlan_header_t *vlan = (ethernet_vlan_header_t *) (eh + 1);
1550 
1551  ethertype = clib_net_to_host_u16 (vlan->type);
1552  l2hdr_sz += sizeof (*vlan);
1553  if (ethertype == ETHERNET_TYPE_VLAN)
1554  {
1555  vlan++;
1556  ethertype = clib_net_to_host_u16 (vlan->type);
1557  l2hdr_sz += sizeof (*vlan);
1558  }
1559  }
1560 
1561  vnet_buffer (b0)->l2_hdr_offset = 0;
1562  vnet_buffer (b0)->l3_hdr_offset = l2hdr_sz;
1563 
1564  if (PREDICT_TRUE (ethertype == ETHERNET_TYPE_IP4))
1565  {
1566  ip4_header_t *ip4 =
1567  (ip4_header_t *) (vlib_buffer_get_current (b0) + l2hdr_sz);
1568  vnet_buffer (b0)->l4_hdr_offset = l2hdr_sz + ip4_header_bytes (ip4);
1569  l4_proto = ip4->protocol;
1570  b0->flags |=
1571  (VNET_BUFFER_F_IS_IP4 | VNET_BUFFER_F_OFFLOAD_IP_CKSUM);
1572  b0->flags |= (VNET_BUFFER_F_L2_HDR_OFFSET_VALID
1573  | VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
1574  VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
1575  }
1576  else if (PREDICT_TRUE (ethertype == ETHERNET_TYPE_IP6))
1577  {
1578  ip6_header_t *ip6 =
1579  (ip6_header_t *) (vlib_buffer_get_current (b0) + l2hdr_sz);
1580  vnet_buffer (b0)->l4_hdr_offset = l2hdr_sz + sizeof (ip6_header_t);
1581  /* FIXME IPv6 EH traversal */
1582  l4_proto = ip6->protocol;
1583  b0->flags |=
1584  (VNET_BUFFER_F_IS_IP6 | VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
1585  VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
1586  VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
1587  }
1588 
1589  if (l4_proto == IP_PROTOCOL_TCP)
1590  {
1591  b0->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
1592 
1593  /* only set GSO flag for chained buffers */
1594  if (gso_enabled && (b0->flags & VLIB_BUFFER_NEXT_PRESENT))
1595  {
1596  b0->flags |= VNET_BUFFER_F_GSO;
1597  tcp_header_t *tcp =
1599  vnet_buffer (b0)->l4_hdr_offset);
1600  vnet_buffer2 (b0)->gso_l4_hdr_sz = tcp_header_bytes (tcp);
1601  vnet_buffer2 (b0)->gso_size = gso_size;
1602  }
1603  }
1604  else if (l4_proto == IP_PROTOCOL_UDP)
1605  {
1606  b0->flags |= VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
1607  }
1608  }
1609 }
1610 
1611 static uword
1613  pg_main_t * pg,
1614  pg_stream_t * s, uword n_packets_to_generate)
1615 {
1617  u32 *to_next, n_this_frame, n_left, n_trace, n_packets_in_fifo;
1618  uword n_packets_generated;
1619  pg_buffer_index_t *bi, *bi0;
1620  u32 next_index = s->next_index;
1623  u8 feature_arc_index = fm->device_input_feature_arc_index;
1624  cm = &fm->feature_config_mains[feature_arc_index];
1625  u32 current_config_index = ~(u32) 0;
1627  int i;
1628 
1629  bi0 = s->buffer_indices;
1630 
1631  n_packets_in_fifo = pg_stream_fill (pg, s, n_packets_to_generate);
1632  n_packets_to_generate = clib_min (n_packets_in_fifo, n_packets_to_generate);
1633  n_packets_generated = 0;
1634 
1635  if (PREDICT_FALSE
1636  (vnet_have_features (feature_arc_index, s->sw_if_index[VLIB_RX])))
1637  {
1638  current_config_index =
1640  vnet_get_config_data (&cm->config_main, &current_config_index,
1641  &next_index, 0);
1642  }
1643 
1644  if (PREDICT_FALSE (pi->coalesce_enabled))
1646 
1647  while (n_packets_to_generate > 0)
1648  {
1649  u32 *head, *start, *end;
1650 
1652  {
1653  vlib_next_frame_t *nf;
1654  vlib_frame_t *f;
1656  vlib_get_new_next_frame (vm, node, next_index, to_next, n_left);
1657  nf = vlib_node_runtime_get_next_frame (vm, node, next_index);
1658  f = vlib_get_frame (vm, nf->frame);
1660 
1661  ef = vlib_frame_scalar_args (f);
1662  ef->sw_if_index = pi->sw_if_index;
1663  ef->hw_if_index = pi->hw_if_index;
1665  }
1666  else
1667  vlib_get_next_frame (vm, node, next_index, to_next, n_left);
1668 
1669  n_this_frame = n_packets_to_generate;
1670  if (n_this_frame > n_left)
1671  n_this_frame = n_left;
1672 
1673  start = bi0->buffer_fifo;
1674  end = clib_fifo_end (bi0->buffer_fifo);
1675  head = clib_fifo_head (bi0->buffer_fifo);
1676 
1677  if (head + n_this_frame <= end)
1678  vlib_buffer_copy_indices (to_next, head, n_this_frame);
1679  else
1680  {
1681  u32 n = end - head;
1682  vlib_buffer_copy_indices (to_next + 0, head, n);
1683  vlib_buffer_copy_indices (to_next + n, start, n_this_frame - n);
1684  }
1685 
1686  if (s->replay_packet_templates == 0)
1687  {
1688  vec_foreach (bi, s->buffer_indices)
1689  clib_fifo_advance_head (bi->buffer_fifo, n_this_frame);
1690  }
1691  else
1692  {
1693  clib_fifo_advance_head (bi0->buffer_fifo, n_this_frame);
1694  }
1695 
1696  if (current_config_index != ~(u32) 0)
1697  for (i = 0; i < n_this_frame; i++)
1698  {
1699  vlib_buffer_t *b;
1700  b = vlib_get_buffer (vm, to_next[i]);
1701  b->current_config_index = current_config_index;
1702  vnet_buffer (b)->feature_arc_index = feature_arc_index;
1703  }
1704 
1705  if (pi->gso_enabled ||
1706  (s->buffer_flags & (VNET_BUFFER_F_OFFLOAD_TCP_CKSUM |
1707  VNET_BUFFER_F_OFFLOAD_UDP_CKSUM |
1708  VNET_BUFFER_F_OFFLOAD_IP_CKSUM)))
1709  {
1710  fill_buffer_offload_flags (vm, to_next, n_this_frame,
1711  pi->gso_enabled, pi->gso_size);
1712  }
1713 
1714  n_trace = vlib_get_trace_count (vm, node);
1715  if (n_trace > 0)
1716  {
1717  u32 n = clib_min (n_trace, n_this_frame);
1718  pg_input_trace (pg, node, s - pg->streams, next_index, to_next, n);
1719  vlib_set_trace_count (vm, node, n_trace - n);
1720  }
1721  n_packets_to_generate -= n_this_frame;
1722  n_packets_generated += n_this_frame;
1723  n_left -= n_this_frame;
1724  if (CLIB_DEBUG > 0)
1725  {
1726  int i;
1727  vlib_buffer_t *b;
1728 
1729  for (i = 0; i < n_this_frame; i++)
1730  {
1731  b = vlib_get_buffer (vm, to_next[i]);
1732  ASSERT ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0 ||
1734  }
1735  }
1736  vlib_put_next_frame (vm, node, next_index, n_left);
1737  }
1738 
1739  return n_packets_generated;
1740 }
1741 
1742 static uword
1744 {
1746  uword n_packets;
1747  f64 time_now, dt;
1748 
1749  if (s->n_packets_limit > 0 && s->n_packets_generated >= s->n_packets_limit)
1750  {
1751  pg_stream_enable_disable (pg, s, /* want_enabled */ 0);
1752  return 0;
1753  }
1754 
1755  /* Apply rate limit. */
1756  time_now = vlib_time_now (vm);
1757  if (s->time_last_generate == 0)
1758  s->time_last_generate = time_now;
1759 
1760  dt = time_now - s->time_last_generate;
1761  s->time_last_generate = time_now;
1762 
1763  n_packets = VLIB_FRAME_SIZE;
1764  if (s->rate_packets_per_second > 0)
1765  {
1767  n_packets = s->packet_accumulator;
1768 
1769  /* Never allow accumulator to grow if we get behind. */
1770  s->packet_accumulator -= n_packets;
1771  }
1772 
1773  /* Apply fixed limit. */
1774  if (s->n_packets_limit > 0
1775  && s->n_packets_generated + n_packets > s->n_packets_limit)
1776  n_packets = s->n_packets_limit - s->n_packets_generated;
1777 
1778  /* Generate up to one frame's worth of packets. */
1779  if (n_packets > s->n_max_frame)
1780  n_packets = s->n_max_frame;
1781 
1782  if (n_packets > 0)
1783  n_packets = pg_generate_packets (node, pg, s, n_packets);
1784 
1785  s->n_packets_generated += n_packets;
1786 
1787  return n_packets;
1788 }
1789 
1790 uword
1792 {
1793  uword i;
1794  pg_main_t *pg = &pg_main;
1795  uword n_packets = 0;
1796  u32 worker_index = 0;
1797 
1798  if (vlib_num_workers ())
1799  worker_index = vlib_get_current_worker_index ();
1800 
1801  /* *INDENT-OFF* */
1802  clib_bitmap_foreach (i, pg->enabled_streams[worker_index], ({
1803  pg_stream_t *s = vec_elt_at_index (pg->streams, i);
1804  n_packets += pg_input_stream (node, pg, s);
1805  }));
1806  /* *INDENT-ON* */
1807 
1808  return n_packets;
1809 }
1810 
1811 /* *INDENT-OFF* */
1813  .function = pg_input,
1815  .name = "pg-input",
1816  .sibling_of = "device-input",
1817  .type = VLIB_NODE_TYPE_INPUT,
1818 
1819  .format_trace = format_pg_input_trace,
1820 
1821  /* Input node will be left disabled until a stream is active. */
1822  .state = VLIB_NODE_STATE_DISABLED,
1823 };
1824 /* *INDENT-ON* */
1825 
1826 /*
1827  * fd.io coding-style-patch-verification: ON
1828  *
1829  * Local Variables:
1830  * eval: (c-set-style "gnu")
1831  * End:
1832  */
vnet_config_main_t config_main
Definition: feature.h:82
u32 sw_if_index
Definition: pg.h:304
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:509
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
Definition: edit.h:64
static uword pg_generate_packets(vlib_node_runtime_t *node, pg_main_t *pg, pg_stream_t *s, uword n_packets_to_generate)
Definition: input.c:1612
u32 next_index
Definition: pg.h:151
#define PG_EDIT_LO
Definition: edit.h:83
u64 n_packets_limit
Definition: pg.h:160
#define clib_min(x, y)
Definition: clib.h:327
#define clib_fifo_head(v)
Definition: fifo.h:254
#define CLIB_UNUSED(x)
Definition: clib.h:87
u8 * fixed_packet_data
Definition: pg.h:121
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
Definition: trace_funcs.h:193
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:220
static uword clib_fifo_elts(void *v)
Definition: fifo.h:66
vnet_main_t * vnet_get_main(void)
Definition: misc.c:46
static void setbits_1(void *a0, u64 v0, u64 v_min, u64 v_max, u32 max_bits, u32 n_bits, u64 mask, u32 shift)
Definition: input.c:451
#define vnet_buffer2(b)
Definition: buffer.h:482
Definition: pg.h:324
static void pg_generate_fix_multi_buffer_lengths(pg_main_t *pg, pg_stream_t *s, u32 *buffers, u32 n_buffers)
Definition: input.c:916
vnet_interface_main_t interface_main
Definition: vnet.h:59
static u64 do_it(pg_main_t *pg, pg_stream_t *s, u32 *buffers, u32 n_buffers, u32 lo_bit, u32 hi_bit, u64 v_min, u64 v_max, u64 v, pg_edit_type_t edit_type)
Definition: input.c:757
#define PREDICT_TRUE(x)
Definition: clib.h:121
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:110
unsigned long u64
Definition: types.h:89
pg_edit_group_t * edit_groups
Definition: pg.h:107
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
#define clib_fifo_advance_tail(f, n_elts)
Definition: fifo.h:161
#define VLIB_NODE_FLAG_TRACE_SUPPORTED
Definition: node.h:305
static f64 vlib_time_now(vlib_main_t *vm)
Definition: main.h:333
#define clib_fifo_resize(f, n_elts)
Definition: fifo.h:101
static void * clib_random_buffer_get_data(clib_random_buffer_t *b, uword n_bytes)
Definition: random_buffer.h:83
void(* edit_function)(struct pg_main_t *pg, struct pg_stream_t *s, struct pg_edit_group_t *g, u32 *buffers, u32 n_buffers)
Definition: pg.h:74
pg_edit_type_t
Definition: edit.h:46
u32 thread_index
Definition: main.h:249
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
static vlib_frame_t * vlib_get_frame(vlib_main_t *vm, vlib_frame_t *f)
Definition: node_funcs.h:269
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:592
static_always_inline u64 do_set_increment(pg_main_t *pg, pg_stream_t *s, u32 *buffers, u32 n_buffers, u32 n_bits, u32 byte_offset, u32 is_net_byte_order, u32 want_sum, u64 *sum_result, u64 v_min, u64 v_max, u64 v)
Definition: input.c:221
static void pg_generate_set_lengths(pg_main_t *pg, pg_stream_t *s, u32 *buffers, u32 n_buffers)
Definition: input.c:864
u8 coalesce_enabled
Definition: pg.h:309
u32 n_max_frame
Definition: pg.h:163
static u32 format_get_indent(u8 *s)
Definition: format.h:72
#define STRUCT_OFFSET_OF(t, f)
Definition: clib.h:70
static vnet_sw_interface_t * vnet_get_sw_interface(vnet_main_t *vnm, u32 sw_if_index)
vlib_main_t * vm
Definition: in2out_ed.c:1582
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
u64 last_increment_value
Definition: edit.h:87
static_always_inline int vnet_have_features(u8 arc, u32 sw_if_index)
Definition: feature.h:251
pg_buffer_index_t * buffer_indices
Definition: pg.h:173
u32 hw_if_index
Definition: pg.h:304
static_always_inline void init_buffers_inline(vlib_main_t *vm, pg_stream_t *s, u32 *buffers, u32 n_buffers, u32 data_offset, u32 n_data, u32 set_data)
Definition: input.c:1070
u16 mask
Definition: flow_types.api:52
pg_edit_type_t packet_size_edit_type
Definition: pg.h:109
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
Definition: buffer_funcs.h:402
struct _tcp_header tcp_header_t
unsigned char u8
Definition: types.h:56
u8 * fixed_packet_data_mask
Definition: pg.h:121
u8 data[128]
Definition: ipsec_types.api:89
static_always_inline void do_set_fixed(pg_main_t *pg, pg_stream_t *s, u32 *buffers, u32 n_buffers, u32 n_bits, u32 byte_offset, u32 is_net_byte_order, u64 v_min, u64 v_max)
Definition: input.c:170
static void set_1(void *a0, u64 v0, u64 v_min, u64 v_max, u32 n_bits, u32 is_net_byte_order)
Definition: input.c:96
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
gro_flow_table_t * flow_table
Definition: pg.h:310
double f64
Definition: types.h:142
#define fm
static void vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
Definition: trace_funcs.h:136
static void pg_generate_edit(pg_main_t *pg, pg_stream_t *s, u32 *buffers, u32 n_buffers)
Definition: input.c:973
#define PG_EDIT_HI
Definition: edit.h:84
u32 gso_size
Definition: pg.h:312
#define static_always_inline
Definition: clib.h:108
static void pg_set_next_buffer_pointers(pg_main_t *pg, pg_stream_t *s, u32 *buffers, u32 *next_buffers, u32 n_buffers)
Definition: input.c:1024
i64 word
Definition: types.h:111
#define vlib_prefetch_buffer_with_index(vm, bi, type)
Prefetch buffer metadata by buffer index The first 64 bytes of buffer contains most header informatio...
Definition: buffer_funcs.h:476
static_always_inline u64 do_setbits_increment(pg_main_t *pg, pg_stream_t *s, u32 *buffers, u32 n_buffers, u32 max_bits, u32 n_bits, u32 byte_offset, u64 v_min, u64 v_max, u64 v, u64 mask, u32 shift)
Definition: input.c:560
vl_api_ip6_address_t ip6
Definition: one.api:424
static_always_inline void do_setbits_fixed(pg_main_t *pg, pg_stream_t *s, u32 *buffers, u32 n_buffers, u32 max_bits, u32 n_bits, u32 byte_offset, u64 v_min, u64 v_max, u64 mask, u32 shift)
Definition: input.c:508
#define ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX
Definition: ethernet.h:52
vlib_combined_counter_main_t * combined_sw_if_counters
Definition: interface.h:881
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:129
static uword clib_fifo_free_elts(void *v)
Definition: fifo.h:82
pg_edit_type_t type
Definition: edit.h:66
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
u8 * format_hex_bytes(u8 *s, va_list *va)
Definition: std-formats.c:84
#define vlib_get_new_next_frame(vm, node, next_index, vectors, n_vectors_left)
Definition: node_funcs.h:396
static uword pg_input_stream(vlib_node_runtime_t *node, pg_main_t *pg, pg_stream_t *s)
Definition: input.c:1743
i32 lsb_bit_offset
Definition: edit.h:73
unsigned int u32
Definition: types.h:88
static u32 vlib_get_current_worker_index()
Definition: threads.h:395
#define vec_end(v)
End (last data address) of vector.
static int validate_buffer_data(vlib_buffer_t *b, pg_stream_t *s)
Definition: input.c:90
#define VLIB_FRAME_SIZE
Definition: node.h:377
static vlib_next_frame_t * vlib_node_runtime_get_next_frame(vlib_main_t *vm, vlib_node_runtime_t *n, u32 next_index)
Definition: node_funcs.h:317
u32 buffer_bytes
Definition: pg.h:125
vnet_crypto_main_t * cm
Definition: quic_crypto.c:53
f64 packet_accumulator
Definition: pg.h:171
u32 last_increment_packet_size
Definition: pg.h:131
#define clib_bitmap_foreach(i, ai, body)
Macro to iterate across set bits in a bitmap.
Definition: bitmap.h:361
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:534
u32 pg_if_index
Definition: pg.h:134
format_function_t format_vnet_buffer
Definition: buffer.h:498
u32 current_replay_packet_index
Definition: pg.h:177
static __clib_warn_unused_result u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
Definition: buffer_funcs.h:677
u32 ** replay_buffers_by_thread
Definition: pg.h:340
u8 * name
Definition: pg.h:98
unsigned short u16
Definition: types.h:57
static u64 pg_edit_get_value(pg_edit_t *e, int hi_or_lo)
Definition: edit.h:173
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
static void * vnet_get_config_data(vnet_config_main_t *cm, u32 *config_index, u32 *next_index, u32 n_data_bytes)
Definition: config.h:123
#define PREDICT_FALSE(x)
Definition: clib.h:120
#define always_inline
Definition: ipsec.h:28
vl_api_ip4_address_t ip4
Definition: one.api:376
static_always_inline void vnet_gro_flow_table_schedule_node_on_dispatcher(vlib_main_t *vm, gro_flow_table_t *flow_table)
Definition: gro_func.h:385
format_function_t * format_buffer
Definition: node.h:357
static void pg_input_trace(pg_main_t *pg, vlib_node_runtime_t *node, u32 stream_index, u32 next_index, u32 *buffers, u32 n_buffers)
Definition: input.c:1458
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:391
uword ** enabled_streams
Definition: pg.h:330
u32 n_bits
Definition: edit.h:79
static_always_inline u32 vlib_buffer_get_default_data_size(vlib_main_t *vm)
Definition: buffer_funcs.h:96
vlib_buffer_t buffer
Definition: input.c:1412
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
static void vlib_buffer_free_no_next(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers, does not free the buffer chain for each buffer.
Definition: buffer_funcs.h:954
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:219
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
static_always_inline void do_set_random(pg_main_t *pg, pg_stream_t *s, u32 *buffers, u32 n_buffers, u32 n_bits, u32 byte_offset, u32 is_net_byte_order, u32 want_sum, u64 *sum_result, u64 v_min, u64 v_max)
Definition: input.c:316
u8 gso_enabled
Definition: pg.h:311
static u32 pg_stream_fill(pg_main_t *pg, pg_stream_t *s, u32 n_buffers)
Definition: input.c:1316
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:317
u32 min_packet_bytes
Definition: pg.h:112
u32 max_packet_bytes
Definition: pg.h:112
#define clib_warning(format, args...)
Definition: error.h:59
u8 data[]
Packet data.
Definition: buffer.h:181
static void set_2(void *a0, void *a1, u64 v0, u64 v1, u64 v_min, u64 v_max, u32 n_bits, u32 is_net_byte_order, u32 is_increment)
Definition: input.c:125
static uword max_pow2(uword x)
Definition: clib.h:243
static void * vlib_frame_scalar_args(vlib_frame_t *f)
Get pointer to frame scalar data.
Definition: node_funcs.h:311
#define pool_is_free_index(P, I)
Use free bitmap to query whether given index is free.
Definition: pool.h:299
u32 current_config_index
Used by feature subgraph arcs to visit enabled feature nodes.
Definition: buffer.h:147
#define ARRAY_LEN(x)
Definition: clib.h:67
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:483
pg_edit_t * non_fixed_edits
Definition: pg.h:116
u8 ** replay_packet_templates
Definition: pg.h:175
vlib_main_t vlib_node_runtime_t * node
Definition: in2out_ed.c:1582
static uword clib_fifo_advance_head(void *v, uword n_elts)
Definition: fifo.h:169
static int validate_buffer_data2(vlib_buffer_t *b, pg_stream_t *s, u32 data_offset, u32 n_bytes)
Definition: input.c:60
pg_stream_t * streams
Definition: pg.h:327
#define ASSERT(truth)
u8 pre_data[VLIB_BUFFER_PRE_DATA_SIZE]
Space for inserting data before buffer start.
Definition: buffer.h:178
vlib_frame_t * frame
Definition: node.h:405
u16 flags
Definition: node.h:387
void pg_stream_enable_disable(pg_main_t *pg, pg_stream_t *s, int is_enable)
Definition: stream.c:49
static_always_inline int ethernet_frame_is_tagged(u16 type)
Definition: ethernet.h:78
static uword vlib_buffer_index_length_in_chain(vlib_main_t *vm, u32 bi)
Get length in bytes of the buffer index buffer chain.
Definition: buffer_funcs.h:422
#define clib_mem_unaligned(pointer, type)
Definition: types.h:155
Definition: pg.h:95
#define clib_max(x, y)
Definition: clib.h:320
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:23
#define clib_fifo_end(v)
Definition: fifo.h:63
#define vec_elt(v, i)
Get vector value at index i.
u8 device_input_feature_arc_index
Feature arc index for device-input.
Definition: feature.h:112
static_always_inline void do_setbits_random(pg_main_t *pg, pg_stream_t *s, u32 *buffers, u32 n_buffers, u32 max_bits, u32 n_bits, u32 byte_offset, u64 v_min, u64 v_max, u64 mask, u32 shift)
Definition: input.c:639
u32 node_index
Definition: pg.h:145
Definition: defs.h:47
u32 sw_if_index[VLIB_N_RX_TX]
Definition: pg.h:142
#define clib_fifo_add1(f, e)
Definition: fifo.h:192
static_always_inline void vlib_buffer_copy_indices(u32 *dst, u32 *src, u32 n_indices)
Definition: buffer_funcs.h:102
static u32 pg_stream_fill_replay(pg_main_t *pg, pg_stream_t *s, u32 n_alloc)
Definition: input.c:1198
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:140
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: in2out_ed.c:1583
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
u32 packet_length
Definition: input.c:1408
#define VLIB_BUFFER_MIN_CHAIN_SEG_SIZE
Definition: buffer.h:58
f64 rate_packets_per_second
Definition: pg.h:167
static u8 * format_pg_input_trace(u8 *s, va_list *va)
Definition: input.c:1416
#define vnet_buffer(b)
Definition: buffer.h:417
u32 buffer_flags
Definition: pg.h:128
static int tcp_header_bytes(tcp_header_t *t)
Definition: tcp_packet.h:93
static u32 pg_stream_fill_helper(pg_main_t *pg, pg_stream_t *s, pg_buffer_index_t *bi, u32 *buffers, u32 *next_buffers, u32 n_alloc)
Definition: input.c:1153
static u32 vlib_num_workers()
Definition: threads.h:377
#define STRUCT_SIZE_OF(t, f)
Definition: clib.h:72
u64 n_packets_generated
Definition: pg.h:156
vlib_node_registration_t pg_input_node
(constructor) VLIB_REGISTER_NODE (pg_input_node)
Definition: input.c:1812
pg_main_t pg_main
Definition: init.c:44
static vlib_node_t * vlib_get_node(vlib_main_t *vm, u32 i)
Get vlib node by index.
Definition: node_funcs.h:85
#define vec_foreach(var, vec)
Vector iterator.
f64 end
end of the time range
Definition: mactime.api:44
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace.c:577
u8 si
Definition: lisp_types.api:47
static void vlib_frame_no_append(vlib_frame_t *f)
Definition: node_funcs.h:277
static int ip4_header_bytes(const ip4_header_t *i)
Definition: ip4_packet.h:190
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
Definition: trace_funcs.h:209
vnet_feature_config_main_t * feature_config_mains
feature config main objects
Definition: feature.h:100
u32 stream_index
Definition: input.c:1406
uword pg_input(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: input.c:1791
#define BITS(x)
Definition: clib.h:66
f64 time_last_generate
Definition: pg.h:169
vnet_feature_main_t feature_main
Definition: feature.c:18
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
clib_random_buffer_t random_buffer
Definition: main.h:243
static void setbits_2(void *a0, void *a1, u64 v0, u64 v1, u64 v_min, u64 v_max, u32 max_bits, u32 n_bits, u64 mask, u32 shift, u32 is_increment)
Definition: input.c:475
pg_interface_t * interfaces
Definition: pg.h:336
static_always_inline void fill_buffer_offload_flags(vlib_main_t *vm, u32 *buffers, u32 n_buffers, int gso_enabled, u32 gso_size)
Definition: input.c:1534
Definition: defs.h:46
u32 * buffer_fifo
Definition: pg.h:91