FD.io VPP  v20.01-48-g3e0dafb74
Vector Packet Processing
input.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /*
16  * pg_input.c: buffer generator input
17  *
18  * Copyright (c) 2008 Eliot Dresselhaus
19  *
20  * Permission is hereby granted, free of charge, to any person obtaining
21  * a copy of this software and associated documentation files (the
22  * "Software"), to deal in the Software without restriction, including
23  * without limitation the rights to use, copy, modify, merge, publish,
24  * distribute, sublicense, and/or sell copies of the Software, and to
25  * permit persons to whom the Software is furnished to do so, subject to
26  * the following conditions:
27  *
28  * The above copyright notice and this permission notice shall be
29  * included in all copies or substantial portions of the Software.
30  *
31  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38  */
39 
40  /*
41  * To be honest, the packet generator needs an extreme
42  * makeover. Two key assumptions which drove the current implementation
43  * are no longer true. First, buffer managers implement a
44  * post-TX recycle list. Second, that packet generator performance
45  * is first-order important.
46  */
47 
48 #include <vlib/vlib.h>
49 #include <vnet/pg/pg.h>
50 #include <vnet/vnet.h>
51 #include <vnet/ethernet/ethernet.h>
52 #include <vnet/feature/feature.h>
53 #include <vnet/ip/ip4_packet.h>
54 #include <vnet/ip/ip6_packet.h>
55 #include <vnet/udp/udp_packet.h>
56 #include <vnet/devices/devices.h>
57 
58 static int
60  u32 data_offset, u32 n_bytes)
61 {
62  u8 *bd, *pd, *pm;
63  u32 i;
64 
65  bd = b->data;
66  pd = s->fixed_packet_data + data_offset;
67  pm = s->fixed_packet_data_mask + data_offset;
68 
69  if (pd + n_bytes >= vec_end (s->fixed_packet_data))
70  n_bytes = (pd < vec_end (s->fixed_packet_data)
71  ? vec_end (s->fixed_packet_data) - pd : 0);
72 
73  for (i = 0; i < n_bytes; i++)
74  if ((bd[i] & pm[i]) != pd[i])
75  break;
76 
77  if (i >= n_bytes)
78  return 1;
79 
80  clib_warning ("buffer %U", format_vnet_buffer, b);
81  clib_warning ("differ at index %d", i);
82  clib_warning ("is %U", format_hex_bytes, bd, n_bytes);
83  clib_warning ("mask %U", format_hex_bytes, pm, n_bytes);
84  clib_warning ("expect %U", format_hex_bytes, pd, n_bytes);
85  return 0;
86 }
87 
88 static int
90 {
91  return validate_buffer_data2 (b, s, 0, s->buffer_bytes);
92 }
93 
94 always_inline void
95 set_1 (void *a0,
96  u64 v0, u64 v_min, u64 v_max, u32 n_bits, u32 is_net_byte_order)
97 {
98  ASSERT (v0 >= v_min && v0 <= v_max);
99  if (n_bits == BITS (u8))
100  {
101  ((u8 *) a0)[0] = v0;
102  }
103  else if (n_bits == BITS (u16))
104  {
105  if (is_net_byte_order)
106  v0 = clib_host_to_net_u16 (v0);
107  clib_mem_unaligned (a0, u16) = v0;
108  }
109  else if (n_bits == BITS (u32))
110  {
111  if (is_net_byte_order)
112  v0 = clib_host_to_net_u32 (v0);
113  clib_mem_unaligned (a0, u32) = v0;
114  }
115  else if (n_bits == BITS (u64))
116  {
117  if (is_net_byte_order)
118  v0 = clib_host_to_net_u64 (v0);
119  clib_mem_unaligned (a0, u64) = v0;
120  }
121 }
122 
123 always_inline void
124 set_2 (void *a0, void *a1,
125  u64 v0, u64 v1,
126  u64 v_min, u64 v_max,
127  u32 n_bits, u32 is_net_byte_order, u32 is_increment)
128 {
129  ASSERT (v0 >= v_min && v0 <= v_max);
130  ASSERT (v1 >= v_min && v1 <= (v_max + is_increment));
131  if (n_bits == BITS (u8))
132  {
133  ((u8 *) a0)[0] = v0;
134  ((u8 *) a1)[0] = v1;
135  }
136  else if (n_bits == BITS (u16))
137  {
138  if (is_net_byte_order)
139  {
140  v0 = clib_host_to_net_u16 (v0);
141  v1 = clib_host_to_net_u16 (v1);
142  }
143  clib_mem_unaligned (a0, u16) = v0;
144  clib_mem_unaligned (a1, u16) = v1;
145  }
146  else if (n_bits == BITS (u32))
147  {
148  if (is_net_byte_order)
149  {
150  v0 = clib_host_to_net_u32 (v0);
151  v1 = clib_host_to_net_u32 (v1);
152  }
153  clib_mem_unaligned (a0, u32) = v0;
154  clib_mem_unaligned (a1, u32) = v1;
155  }
156  else if (n_bits == BITS (u64))
157  {
158  if (is_net_byte_order)
159  {
160  v0 = clib_host_to_net_u64 (v0);
161  v1 = clib_host_to_net_u64 (v1);
162  }
163  clib_mem_unaligned (a0, u64) = v0;
164  clib_mem_unaligned (a1, u64) = v1;
165  }
166 }
167 
170  pg_stream_t * s,
171  u32 * buffers,
172  u32 n_buffers,
173  u32 n_bits,
174  u32 byte_offset, u32 is_net_byte_order, u64 v_min, u64 v_max)
175 {
177 
178  while (n_buffers >= 4)
179  {
180  vlib_buffer_t *b0, *b1, *b2, *b3;
181  void *a0, *a1;
182 
183  b0 = vlib_get_buffer (vm, buffers[0]);
184  b1 = vlib_get_buffer (vm, buffers[1]);
185  b2 = vlib_get_buffer (vm, buffers[2]);
186  b3 = vlib_get_buffer (vm, buffers[3]);
187  buffers += 2;
188  n_buffers -= 2;
189 
190  a0 = (void *) b0 + byte_offset;
191  a1 = (void *) b1 + byte_offset;
192  CLIB_PREFETCH ((void *) b2 + byte_offset, sizeof (v_min), WRITE);
193  CLIB_PREFETCH ((void *) b3 + byte_offset, sizeof (v_min), WRITE);
194 
195  set_2 (a0, a1, v_min, v_min, v_min, v_max, n_bits, is_net_byte_order,
196  /* is_increment */ 0);
197 
198  ASSERT (validate_buffer_data (b0, s));
199  ASSERT (validate_buffer_data (b1, s));
200  }
201 
202  while (n_buffers > 0)
203  {
204  vlib_buffer_t *b0;
205  void *a0;
206 
207  b0 = vlib_get_buffer (vm, buffers[0]);
208  buffers += 1;
209  n_buffers -= 1;
210 
211  a0 = (void *) b0 + byte_offset;
212 
213  set_1 (a0, v_min, v_min, v_max, n_bits, is_net_byte_order);
214 
215  ASSERT (validate_buffer_data (b0, s));
216  }
217 }
218 
221  pg_stream_t * s,
222  u32 * buffers,
223  u32 n_buffers,
224  u32 n_bits,
225  u32 byte_offset,
226  u32 is_net_byte_order,
227  u32 want_sum, u64 * sum_result, u64 v_min, u64 v_max, u64 v)
228 {
230  u64 sum = 0;
231 
232  ASSERT (v >= v_min && v <= v_max);
233 
234  while (n_buffers >= 4)
235  {
236  vlib_buffer_t *b0, *b1, *b2, *b3;
237  void *a0, *a1;
238  u64 v_old;
239 
240  b0 = vlib_get_buffer (vm, buffers[0]);
241  b1 = vlib_get_buffer (vm, buffers[1]);
242  b2 = vlib_get_buffer (vm, buffers[2]);
243  b3 = vlib_get_buffer (vm, buffers[3]);
244  buffers += 2;
245  n_buffers -= 2;
246 
247  a0 = (void *) b0 + byte_offset;
248  a1 = (void *) b1 + byte_offset;
249  CLIB_PREFETCH ((void *) b2 + byte_offset, sizeof (v_min), WRITE);
250  CLIB_PREFETCH ((void *) b3 + byte_offset, sizeof (v_min), WRITE);
251 
252  v_old = v;
253  v = v_old + 2;
254  v = v > v_max ? v_min : v;
255  set_2 (a0, a1,
256  v_old + 0, v_old + 1, v_min, v_max, n_bits, is_net_byte_order,
257  /* is_increment */ 1);
258 
259  if (want_sum)
260  sum += 2 * v_old + 1;
261 
262  if (PREDICT_FALSE (v_old + 1 > v_max))
263  {
264  if (want_sum)
265  sum -= 2 * v_old + 1;
266 
267  v = v_old;
268  set_1 (a0, v + 0, v_min, v_max, n_bits, is_net_byte_order);
269  if (want_sum)
270  sum += v;
271  v += 1;
272 
273  v = v > v_max ? v_min : v;
274  set_1 (a1, v + 0, v_min, v_max, n_bits, is_net_byte_order);
275  if (want_sum)
276  sum += v;
277  v += 1;
278  }
279 
280  ASSERT (validate_buffer_data (b0, s));
281  ASSERT (validate_buffer_data (b1, s));
282  }
283 
284  while (n_buffers > 0)
285  {
286  vlib_buffer_t *b0;
287  void *a0;
288  u64 v_old;
289 
290  b0 = vlib_get_buffer (vm, buffers[0]);
291  buffers += 1;
292  n_buffers -= 1;
293 
294  a0 = (void *) b0 + byte_offset;
295 
296  v_old = v;
297  if (want_sum)
298  sum += v_old;
299  v += 1;
300  v = v > v_max ? v_min : v;
301 
302  ASSERT (v_old >= v_min && v_old <= v_max);
303  set_1 (a0, v_old, v_min, v_max, n_bits, is_net_byte_order);
304 
305  ASSERT (validate_buffer_data (b0, s));
306  }
307 
308  if (want_sum)
309  *sum_result = sum;
310 
311  return v;
312 }
313 
316  pg_stream_t * s,
317  u32 * buffers,
318  u32 n_buffers,
319  u32 n_bits,
320  u32 byte_offset,
321  u32 is_net_byte_order,
322  u32 want_sum, u64 * sum_result, u64 v_min, u64 v_max)
323 {
325  u64 v_diff = v_max - v_min + 1;
326  u64 r_mask = max_pow2 (v_diff) - 1;
327  u64 v0, v1;
328  u64 sum = 0;
329  void *random_data;
330 
331  random_data = clib_random_buffer_get_data
332  (&vm->random_buffer, n_buffers * n_bits / BITS (u8));
333 
334  v0 = v1 = v_min;
335 
336  while (n_buffers >= 4)
337  {
338  vlib_buffer_t *b0, *b1, *b2, *b3;
339  void *a0, *a1;
340  u64 r0 = 0, r1 = 0; /* warnings be gone */
341 
342  b0 = vlib_get_buffer (vm, buffers[0]);
343  b1 = vlib_get_buffer (vm, buffers[1]);
344  b2 = vlib_get_buffer (vm, buffers[2]);
345  b3 = vlib_get_buffer (vm, buffers[3]);
346  buffers += 2;
347  n_buffers -= 2;
348 
349  a0 = (void *) b0 + byte_offset;
350  a1 = (void *) b1 + byte_offset;
351  CLIB_PREFETCH ((void *) b2 + byte_offset, sizeof (v_min), WRITE);
352  CLIB_PREFETCH ((void *) b3 + byte_offset, sizeof (v_min), WRITE);
353 
354  switch (n_bits)
355  {
356 #define _(n) \
357  case BITS (u##n): \
358  { \
359  u##n * r = random_data; \
360  r0 = r[0]; \
361  r1 = r[1]; \
362  random_data = r + 2; \
363  } \
364  break;
365 
366  _(8);
367  _(16);
368  _(32);
369  _(64);
370 
371 #undef _
372  }
373 
374  /* Add power of 2 sized random number which may be out of range. */
375  v0 += r0 & r_mask;
376  v1 += r1 & r_mask;
377 
378  /* Twice should be enough to reduce to v_min .. v_max range. */
379  v0 = v0 > v_max ? v0 - v_diff : v0;
380  v1 = v1 > v_max ? v1 - v_diff : v1;
381  v0 = v0 > v_max ? v0 - v_diff : v0;
382  v1 = v1 > v_max ? v1 - v_diff : v1;
383 
384  if (want_sum)
385  sum += v0 + v1;
386 
387  set_2 (a0, a1, v0, v1, v_min, v_max, n_bits, is_net_byte_order,
388  /* is_increment */ 0);
389 
390  ASSERT (validate_buffer_data (b0, s));
391  ASSERT (validate_buffer_data (b1, s));
392  }
393 
394  while (n_buffers > 0)
395  {
396  vlib_buffer_t *b0;
397  void *a0;
398  u64 r0 = 0; /* warnings be gone */
399 
400  b0 = vlib_get_buffer (vm, buffers[0]);
401  buffers += 1;
402  n_buffers -= 1;
403 
404  a0 = (void *) b0 + byte_offset;
405 
406  switch (n_bits)
407  {
408 #define _(n) \
409  case BITS (u##n): \
410  { \
411  u##n * r = random_data; \
412  r0 = r[0]; \
413  random_data = r + 1; \
414  } \
415  break;
416 
417  _(8);
418  _(16);
419  _(32);
420  _(64);
421 
422 #undef _
423  }
424 
425  /* Add power of 2 sized random number which may be out of range. */
426  v0 += r0 & r_mask;
427 
428  /* Twice should be enough to reduce to v_min .. v_max range. */
429  v0 = v0 > v_max ? v0 - v_diff : v0;
430  v0 = v0 > v_max ? v0 - v_diff : v0;
431 
432  if (want_sum)
433  sum += v0;
434 
435  set_1 (a0, v0, v_min, v_max, n_bits, is_net_byte_order);
436 
437  ASSERT (validate_buffer_data (b0, s));
438  }
439 
440  if (want_sum)
441  *sum_result = sum;
442 }
443 
444 #define _(i,t) \
445  clib_mem_unaligned (a##i, t) = \
446  clib_host_to_net_##t ((clib_net_to_host_mem_##t (a##i) &~ mask) \
447  | (v##i << shift))
448 
449 always_inline void
450 setbits_1 (void *a0,
451  u64 v0,
452  u64 v_min, u64 v_max,
453  u32 max_bits, u32 n_bits, u64 mask, u32 shift)
454 {
455  ASSERT (v0 >= v_min && v0 <= v_max);
456  if (max_bits == BITS (u8))
457  ((u8 *) a0)[0] = (((u8 *) a0)[0] & ~mask) | (v0 << shift);
458 
459  else if (max_bits == BITS (u16))
460  {
461  _(0, u16);
462  }
463  else if (max_bits == BITS (u32))
464  {
465  _(0, u32);
466  }
467  else if (max_bits == BITS (u64))
468  {
469  _(0, u64);
470  }
471 }
472 
473 always_inline void
474 setbits_2 (void *a0, void *a1,
475  u64 v0, u64 v1,
476  u64 v_min, u64 v_max,
477  u32 max_bits, u32 n_bits, u64 mask, u32 shift, u32 is_increment)
478 {
479  ASSERT (v0 >= v_min && v0 <= v_max);
480  ASSERT (v1 >= v_min && v1 <= v_max + is_increment);
481  if (max_bits == BITS (u8))
482  {
483  ((u8 *) a0)[0] = (((u8 *) a0)[0] & ~mask) | (v0 << shift);
484  ((u8 *) a1)[0] = (((u8 *) a1)[0] & ~mask) | (v1 << shift);
485  }
486 
487  else if (max_bits == BITS (u16))
488  {
489  _(0, u16);
490  _(1, u16);
491  }
492  else if (max_bits == BITS (u32))
493  {
494  _(0, u32);
495  _(1, u32);
496  }
497  else if (max_bits == BITS (u64))
498  {
499  _(0, u64);
500  _(1, u64);
501  }
502 }
503 
504 #undef _
505 
508  pg_stream_t * s,
509  u32 * buffers,
510  u32 n_buffers,
511  u32 max_bits,
512  u32 n_bits,
513  u32 byte_offset, u64 v_min, u64 v_max, u64 mask, u32 shift)
514 {
516 
517  while (n_buffers >= 4)
518  {
519  vlib_buffer_t *b0, *b1, *b2, *b3;
520  void *a0, *a1;
521 
522  b0 = vlib_get_buffer (vm, buffers[0]);
523  b1 = vlib_get_buffer (vm, buffers[1]);
524  b2 = vlib_get_buffer (vm, buffers[2]);
525  b3 = vlib_get_buffer (vm, buffers[3]);
526  buffers += 2;
527  n_buffers -= 2;
528 
529  a0 = (void *) b0 + byte_offset;
530  a1 = (void *) b1 + byte_offset;
531  CLIB_PREFETCH ((void *) b2 + byte_offset, sizeof (v_min), WRITE);
532  CLIB_PREFETCH ((void *) b3 + byte_offset, sizeof (v_min), WRITE);
533 
534  setbits_2 (a0, a1,
535  v_min, v_min, v_min, v_max, max_bits, n_bits, mask, shift,
536  /* is_increment */ 0);
537 
538  ASSERT (validate_buffer_data (b0, s));
539  ASSERT (validate_buffer_data (b1, s));
540  }
541 
542  while (n_buffers > 0)
543  {
544  vlib_buffer_t *b0;
545  void *a0;
546 
547  b0 = vlib_get_buffer (vm, buffers[0]);
548  buffers += 1;
549  n_buffers -= 1;
550 
551  a0 = (void *) b0 + byte_offset;
552 
553  setbits_1 (a0, v_min, v_min, v_max, max_bits, n_bits, mask, shift);
554  ASSERT (validate_buffer_data (b0, s));
555  }
556 }
557 
560  pg_stream_t * s,
561  u32 * buffers,
562  u32 n_buffers,
563  u32 max_bits,
564  u32 n_bits,
565  u32 byte_offset,
566  u64 v_min, u64 v_max, u64 v, u64 mask, u32 shift)
567 {
569 
570  ASSERT (v >= v_min && v <= v_max);
571 
572  while (n_buffers >= 4)
573  {
574  vlib_buffer_t *b0, *b1, *b2, *b3;
575  void *a0, *a1;
576  u64 v_old;
577 
578  b0 = vlib_get_buffer (vm, buffers[0]);
579  b1 = vlib_get_buffer (vm, buffers[1]);
580  b2 = vlib_get_buffer (vm, buffers[2]);
581  b3 = vlib_get_buffer (vm, buffers[3]);
582  buffers += 2;
583  n_buffers -= 2;
584 
585  a0 = (void *) b0 + byte_offset;
586  a1 = (void *) b1 + byte_offset;
587  CLIB_PREFETCH ((void *) b2 + byte_offset, sizeof (v_min), WRITE);
588  CLIB_PREFETCH ((void *) b3 + byte_offset, sizeof (v_min), WRITE);
589 
590  v_old = v;
591  v = v_old + 2;
592  v = v > v_max ? v_min : v;
593  setbits_2 (a0, a1,
594  v_old + 0, v_old + 1,
595  v_min, v_max, max_bits, n_bits, mask, shift,
596  /* is_increment */ 1);
597 
598  if (PREDICT_FALSE (v_old + 1 > v_max))
599  {
600  v = v_old;
601  setbits_1 (a0, v + 0, v_min, v_max, max_bits, n_bits, mask, shift);
602  v += 1;
603 
604  v = v > v_max ? v_min : v;
605  setbits_1 (a1, v + 0, v_min, v_max, max_bits, n_bits, mask, shift);
606  v += 1;
607  }
608  ASSERT (validate_buffer_data (b0, s));
609  ASSERT (validate_buffer_data (b1, s));
610  }
611 
612  while (n_buffers > 0)
613  {
614  vlib_buffer_t *b0;
615  void *a0;
616  u64 v_old;
617 
618  b0 = vlib_get_buffer (vm, buffers[0]);
619  buffers += 1;
620  n_buffers -= 1;
621 
622  a0 = (void *) b0 + byte_offset;
623 
624  v_old = v;
625  v = v_old + 1;
626  v = v > v_max ? v_min : v;
627 
628  ASSERT (v_old >= v_min && v_old <= v_max);
629  setbits_1 (a0, v_old, v_min, v_max, max_bits, n_bits, mask, shift);
630 
631  ASSERT (validate_buffer_data (b0, s));
632  }
633 
634  return v;
635 }
636 
639  pg_stream_t * s,
640  u32 * buffers,
641  u32 n_buffers,
642  u32 max_bits,
643  u32 n_bits,
644  u32 byte_offset, u64 v_min, u64 v_max, u64 mask, u32 shift)
645 {
647  u64 v_diff = v_max - v_min + 1;
648  u64 r_mask = max_pow2 (v_diff) - 1;
649  u64 v0, v1;
650  void *random_data;
651 
652  random_data = clib_random_buffer_get_data
653  (&vm->random_buffer, n_buffers * max_bits / BITS (u8));
654  v0 = v1 = v_min;
655 
656  while (n_buffers >= 4)
657  {
658  vlib_buffer_t *b0, *b1, *b2, *b3;
659  void *a0, *a1;
660  u64 r0 = 0, r1 = 0; /* warnings be gone */
661 
662  b0 = vlib_get_buffer (vm, buffers[0]);
663  b1 = vlib_get_buffer (vm, buffers[1]);
664  b2 = vlib_get_buffer (vm, buffers[2]);
665  b3 = vlib_get_buffer (vm, buffers[3]);
666  buffers += 2;
667  n_buffers -= 2;
668 
669  a0 = (void *) b0 + byte_offset;
670  a1 = (void *) b1 + byte_offset;
671  CLIB_PREFETCH ((void *) b2 + byte_offset, sizeof (v_min), WRITE);
672  CLIB_PREFETCH ((void *) b3 + byte_offset, sizeof (v_min), WRITE);
673 
674  switch (max_bits)
675  {
676 #define _(n) \
677  case BITS (u##n): \
678  { \
679  u##n * r = random_data; \
680  r0 = r[0]; \
681  r1 = r[1]; \
682  random_data = r + 2; \
683  } \
684  break;
685 
686  _(8);
687  _(16);
688  _(32);
689  _(64);
690 
691 #undef _
692  }
693 
694  /* Add power of 2 sized random number which may be out of range. */
695  v0 += r0 & r_mask;
696  v1 += r1 & r_mask;
697 
698  /* Twice should be enough to reduce to v_min .. v_max range. */
699  v0 = v0 > v_max ? v0 - v_diff : v0;
700  v1 = v1 > v_max ? v1 - v_diff : v1;
701  v0 = v0 > v_max ? v0 - v_diff : v0;
702  v1 = v1 > v_max ? v1 - v_diff : v1;
703 
704  setbits_2 (a0, a1, v0, v1, v_min, v_max, max_bits, n_bits, mask, shift,
705  /* is_increment */ 0);
706 
707  ASSERT (validate_buffer_data (b0, s));
708  ASSERT (validate_buffer_data (b1, s));
709  }
710 
711  while (n_buffers > 0)
712  {
713  vlib_buffer_t *b0;
714  void *a0;
715  u64 r0 = 0; /* warnings be gone */
716 
717  b0 = vlib_get_buffer (vm, buffers[0]);
718  buffers += 1;
719  n_buffers -= 1;
720 
721  a0 = (void *) b0 + byte_offset;
722 
723  switch (max_bits)
724  {
725 #define _(n) \
726  case BITS (u##n): \
727  { \
728  u##n * r = random_data; \
729  r0 = r[0]; \
730  random_data = r + 1; \
731  } \
732  break;
733 
734  _(8);
735  _(16);
736  _(32);
737  _(64);
738 
739 #undef _
740  }
741 
742  /* Add power of 2 sized random number which may be out of range. */
743  v0 += r0 & r_mask;
744 
745  /* Twice should be enough to reduce to v_min .. v_max range. */
746  v0 = v0 > v_max ? v0 - v_diff : v0;
747  v0 = v0 > v_max ? v0 - v_diff : v0;
748 
749  setbits_1 (a0, v0, v_min, v_max, max_bits, n_bits, mask, shift);
750 
751  ASSERT (validate_buffer_data (b0, s));
752  }
753 }
754 
755 static u64
757  pg_stream_t * s,
758  u32 * buffers,
759  u32 n_buffers,
760  u32 lo_bit, u32 hi_bit,
761  u64 v_min, u64 v_max, u64 v, pg_edit_type_t edit_type)
762 {
763  u32 max_bits, l0, l1, h1, start_bit;
764 
765  if (v_min == v_max)
766  edit_type = PG_EDIT_FIXED;
767 
768  l0 = lo_bit / BITS (u8);
769  l1 = lo_bit % BITS (u8);
770  h1 = hi_bit % BITS (u8);
771 
772  start_bit = l0 * BITS (u8);
773 
774  max_bits = hi_bit - start_bit;
775  ASSERT (max_bits <= 64);
776 
777 #define _(n) \
778  case (n): \
779  if (edit_type == PG_EDIT_INCREMENT) \
780  v = do_set_increment (pg, s, buffers, n_buffers, \
781  BITS (u##n), \
782  l0, \
783  /* is_net_byte_order */ 1, \
784  /* want sum */ 0, 0, \
785  v_min, v_max, \
786  v); \
787  else if (edit_type == PG_EDIT_RANDOM) \
788  do_set_random (pg, s, buffers, n_buffers, \
789  BITS (u##n), \
790  l0, \
791  /* is_net_byte_order */ 1, \
792  /* want sum */ 0, 0, \
793  v_min, v_max); \
794  else /* edit_type == PG_EDIT_FIXED */ \
795  do_set_fixed (pg, s, buffers, n_buffers, \
796  BITS (u##n), \
797  l0, \
798  /* is_net_byte_order */ 1, \
799  v_min, v_max); \
800  goto done;
801 
802  if (l1 == 0 && h1 == 0)
803  {
804  switch (max_bits)
805  {
806  _(8);
807  _(16);
808  _(32);
809  _(64);
810  }
811  }
812 
813 #undef _
814 
815  {
816  u64 mask;
817  u32 shift = l1;
818  u32 n_bits = max_bits;
819 
820  max_bits = clib_max (max_pow2 (n_bits), 8);
821 
822  mask = ((u64) 1 << (u64) n_bits) - 1;
823  mask &= ~(((u64) 1 << (u64) shift) - 1);
824 
825  mask <<= max_bits - n_bits;
826  shift += max_bits - n_bits;
827 
828  switch (max_bits)
829  {
830 #define _(n) \
831  case (n): \
832  if (edit_type == PG_EDIT_INCREMENT) \
833  v = do_setbits_increment (pg, s, buffers, n_buffers, \
834  BITS (u##n), n_bits, \
835  l0, v_min, v_max, v, \
836  mask, shift); \
837  else if (edit_type == PG_EDIT_RANDOM) \
838  do_setbits_random (pg, s, buffers, n_buffers, \
839  BITS (u##n), n_bits, \
840  l0, v_min, v_max, \
841  mask, shift); \
842  else /* edit_type == PG_EDIT_FIXED */ \
843  do_setbits_fixed (pg, s, buffers, n_buffers, \
844  BITS (u##n), n_bits, \
845  l0, v_min, v_max, \
846  mask, shift); \
847  goto done;
848 
849  _(8);
850  _(16);
851  _(32);
852  _(64);
853 
854 #undef _
855  }
856  }
857 
858 done:
859  return v;
860 }
861 
862 static void
864  pg_stream_t * s, u32 * buffers, u32 n_buffers)
865 {
866  u64 v_min, v_max, length_sum;
867  pg_edit_type_t edit_type;
868 
869  v_min = s->min_packet_bytes;
870  v_max = s->max_packet_bytes;
871  edit_type = s->packet_size_edit_type;
872 
873  if (edit_type == PG_EDIT_INCREMENT)
875  = do_set_increment (pg, s, buffers, n_buffers,
876  8 * STRUCT_SIZE_OF (vlib_buffer_t, current_length),
877  STRUCT_OFFSET_OF (vlib_buffer_t, current_length),
878  /* is_net_byte_order */ 0,
879  /* want sum */ 1, &length_sum,
880  v_min, v_max, s->last_increment_packet_size);
881 
882  else if (edit_type == PG_EDIT_RANDOM)
883  do_set_random (pg, s, buffers, n_buffers,
884  8 * STRUCT_SIZE_OF (vlib_buffer_t, current_length),
885  STRUCT_OFFSET_OF (vlib_buffer_t, current_length),
886  /* is_net_byte_order */ 0,
887  /* want sum */ 1, &length_sum,
888  v_min, v_max);
889 
890  else /* edit_type == PG_EDIT_FIXED */
891  {
892  do_set_fixed (pg, s, buffers, n_buffers,
893  8 * STRUCT_SIZE_OF (vlib_buffer_t, current_length),
894  STRUCT_OFFSET_OF (vlib_buffer_t, current_length),
895  /* is_net_byte_order */ 0,
896  v_min, v_max);
897  length_sum = v_min * n_buffers;
898  }
899 
900  {
901  vnet_main_t *vnm = vnet_get_main ();
903  vnet_sw_interface_t *si =
905 
909  si->sw_if_index, n_buffers, length_sum);
910  }
911 
912 }
913 
914 static void
916  pg_stream_t * s,
917  u32 * buffers, u32 n_buffers)
918 {
920  pg_buffer_index_t *pbi;
921  uword n_bytes_left;
922  static u32 *unused_buffers = 0;
923 
924  while (n_buffers > 0)
925  {
926  vlib_buffer_t *b;
927  u32 bi;
928 
929  bi = buffers[0];
930  b = vlib_get_buffer (vm, bi);
931 
932  /* Current length here is length of whole packet. */
933  n_bytes_left = b->current_length;
934 
935  pbi = s->buffer_indices;
936  while (1)
937  {
938  uword n = clib_min (n_bytes_left, s->buffer_bytes);
939 
940  b->current_length = n;
941  n_bytes_left -= n;
942  if (n_bytes_left > 0)
943  b->flags |= VLIB_BUFFER_NEXT_PRESENT;
944  else
945  b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
946 
947  /* Return unused buffers to fifos. */
948  if (n == 0)
949  vec_add1 (unused_buffers, bi);
950 
951  pbi++;
952  if (pbi >= vec_end (s->buffer_indices))
953  break;
954 
955  bi = b->next_buffer;
956  b = vlib_get_buffer (vm, bi);
957  }
958  ASSERT (n_bytes_left == 0);
959 
960  buffers += 1;
961  n_buffers -= 1;
962  }
963 
964  if (vec_len (unused_buffers) > 0)
965  {
966  vlib_buffer_free_no_next (vm, unused_buffers, vec_len (unused_buffers));
967  _vec_len (unused_buffers) = 0;
968  }
969 }
970 
971 static void
973  pg_stream_t * s, u32 * buffers, u32 n_buffers)
974 {
975  pg_edit_t *e;
976 
978  {
979  switch (e->type)
980  {
981  case PG_EDIT_RANDOM:
982  case PG_EDIT_INCREMENT:
983  {
984  u32 lo_bit, hi_bit;
985  u64 v_min, v_max;
986 
987  v_min = pg_edit_get_value (e, PG_EDIT_LO);
988  v_max = pg_edit_get_value (e, PG_EDIT_HI);
989 
990  hi_bit = (BITS (u8) * STRUCT_OFFSET_OF (vlib_buffer_t, data)
991  + BITS (u8) + e->lsb_bit_offset);
992  lo_bit = hi_bit - e->n_bits;
993 
995  = do_it (pg, s, buffers, n_buffers, lo_bit, hi_bit, v_min, v_max,
996  e->last_increment_value, e->type);
997  }
998  break;
999 
1000  case PG_EDIT_UNSPECIFIED:
1001  break;
1002 
1003  default:
1004  /* Should not be any fixed edits left. */
1005  ASSERT (0);
1006  break;
1007  }
1008  }
1009 
1010  /* Call any edit functions to e.g. completely IP lengths, checksums, ... */
1011  {
1012  int i;
1013  for (i = vec_len (s->edit_groups) - 1; i >= 0; i--)
1014  {
1015  pg_edit_group_t *g = s->edit_groups + i;
1016  if (g->edit_function)
1017  g->edit_function (pg, s, g, buffers, n_buffers);
1018  }
1019  }
1020 }
1021 
1022 static void
1024  pg_stream_t * s,
1025  u32 * buffers, u32 * next_buffers, u32 n_buffers)
1026 {
1028 
1029  while (n_buffers >= 4)
1030  {
1031  u32 ni0, ni1;
1032  vlib_buffer_t *b0, *b1;
1033 
1034  b0 = vlib_get_buffer (vm, buffers[0]);
1035  b1 = vlib_get_buffer (vm, buffers[1]);
1036  ni0 = next_buffers[0];
1037  ni1 = next_buffers[1];
1038 
1039  vlib_prefetch_buffer_with_index (vm, buffers[2], WRITE);
1040  vlib_prefetch_buffer_with_index (vm, buffers[3], WRITE);
1041 
1042  b0->flags |= VLIB_BUFFER_NEXT_PRESENT;
1043  b1->flags |= VLIB_BUFFER_NEXT_PRESENT;
1044  b0->next_buffer = ni0;
1045  b1->next_buffer = ni1;
1046 
1047  buffers += 2;
1048  next_buffers += 2;
1049  n_buffers -= 2;
1050  }
1051 
1052  while (n_buffers > 0)
1053  {
1054  u32 ni0;
1055  vlib_buffer_t *b0;
1056 
1057  b0 = vlib_get_buffer (vm, buffers[0]);
1058  ni0 = next_buffers[0];
1059  buffers += 1;
1060  next_buffers += 1;
1061  n_buffers -= 1;
1062 
1063  b0->flags |= VLIB_BUFFER_NEXT_PRESENT;
1064  b0->next_buffer = ni0;
1065  }
1066 }
1067 
1070  pg_stream_t * s,
1071  u32 * buffers,
1072  u32 n_buffers, u32 data_offset, u32 n_data, u32 set_data)
1073 {
1074  u32 n_left, *b;
1075  u8 *data, *mask;
1076 
1077  ASSERT (s->replay_packet_templates == 0);
1078 
1079  data = s->fixed_packet_data + data_offset;
1080  mask = s->fixed_packet_data_mask + data_offset;
1081  if (data + n_data >= vec_end (s->fixed_packet_data))
1082  n_data = (data < vec_end (s->fixed_packet_data)
1083  ? vec_end (s->fixed_packet_data) - data : 0);
1084  if (n_data > 0)
1085  {
1086  ASSERT (data + n_data <= vec_end (s->fixed_packet_data));
1087  ASSERT (mask + n_data <= vec_end (s->fixed_packet_data_mask));
1088  }
1089 
1090  n_left = n_buffers;
1091  b = buffers;
1092 
1093  while (n_left >= 4)
1094  {
1095  u32 bi0, bi1;
1096  vlib_buffer_t *b0, *b1;
1097 
1098  /* Prefetch next iteration. */
1099  vlib_prefetch_buffer_with_index (vm, b[2], STORE);
1100  vlib_prefetch_buffer_with_index (vm, b[3], STORE);
1101 
1102  bi0 = b[0];
1103  bi1 = b[1];
1104  b += 2;
1105  n_left -= 2;
1106 
1107  b0 = vlib_get_buffer (vm, bi0);
1108  b1 = vlib_get_buffer (vm, bi1);
1109 
1110  vnet_buffer (b0)->sw_if_index[VLIB_RX] =
1111  vnet_buffer (b1)->sw_if_index[VLIB_RX] = s->sw_if_index[VLIB_RX];
1112 
1113  vnet_buffer (b0)->sw_if_index[VLIB_TX] =
1114  vnet_buffer (b1)->sw_if_index[VLIB_TX] = s->sw_if_index[VLIB_TX];
1115 
1116  if (set_data)
1117  {
1118  clib_memcpy_fast (b0->data, data, n_data);
1119  clib_memcpy_fast (b1->data, data, n_data);
1120  }
1121  else
1122  {
1123  ASSERT (validate_buffer_data2 (b0, s, data_offset, n_data));
1124  ASSERT (validate_buffer_data2 (b1, s, data_offset, n_data));
1125  }
1126  }
1127 
1128  while (n_left >= 1)
1129  {
1130  u32 bi0;
1131  vlib_buffer_t *b0;
1132 
1133  bi0 = b[0];
1134  b += 1;
1135  n_left -= 1;
1136 
1137  b0 = vlib_get_buffer (vm, bi0);
1138  vnet_buffer (b0)->sw_if_index[VLIB_RX] = s->sw_if_index[VLIB_RX];
1139  vnet_buffer (b0)->sw_if_index[VLIB_TX] = s->sw_if_index[VLIB_TX];
1140 
1141  if (set_data)
1142  clib_memcpy_fast (b0->data, data, n_data);
1143  else
1144  ASSERT (validate_buffer_data2 (b0, s, data_offset, n_data));
1145  }
1146 }
1147 
1148 static u32
1150  pg_stream_t * s,
1151  pg_buffer_index_t * bi,
1152  u32 * buffers, u32 * next_buffers, u32 n_alloc)
1153 {
1155  uword is_start_of_packet = bi == s->buffer_indices;
1156  u32 n_allocated;
1157 
1159 
1160  n_allocated = vlib_buffer_alloc (vm, buffers, n_alloc);
1161  if (n_allocated == 0)
1162  return 0;
1163 
1164  /*
1165  * We can't assume we got all the buffers we asked for...
1166  * This never worked until recently.
1167  */
1168  n_alloc = n_allocated;
1169 
1170  /* Reinitialize buffers */
1172  (vm, s,
1173  buffers,
1174  n_alloc, (bi - s->buffer_indices) * s->buffer_bytes /* data offset */ ,
1175  s->buffer_bytes,
1176  /* set_data */ 1);
1177 
1178  if (next_buffers)
1179  pg_set_next_buffer_pointers (pg, s, buffers, next_buffers, n_alloc);
1180 
1181  if (is_start_of_packet)
1182  {
1183  pg_generate_set_lengths (pg, s, buffers, n_alloc);
1184  if (vec_len (s->buffer_indices) > 1)
1185  pg_generate_fix_multi_buffer_lengths (pg, s, buffers, n_alloc);
1186 
1187  pg_generate_edit (pg, s, buffers, n_alloc);
1188  }
1189 
1190  return n_alloc;
1191 }
1192 
1193 static u32
1195 {
1196  pg_buffer_index_t *bi;
1197  u32 n_left, i, l;
1198  u32 buffer_alloc_request = 0;
1199  u32 buffer_alloc_result;
1200  u32 current_buffer_index;
1201  u32 *buffers;
1203  vnet_main_t *vnm = vnet_get_main ();
1204  u32 buf_sz = vlib_buffer_get_default_data_size (vm);
1206  vnet_sw_interface_t *si;
1207 
1208  buffers = pg->replay_buffers_by_thread[vm->thread_index];
1209  vec_reset_length (buffers);
1210  bi = s->buffer_indices;
1211 
1212  n_left = n_alloc;
1215 
1216  /* Figure out how many buffers we need */
1217  while (n_left > 0)
1218  {
1219  u8 *d0;
1220 
1221  d0 = vec_elt (s->replay_packet_templates, i);
1222  buffer_alloc_request += (vec_len (d0) + (buf_sz - 1)) / buf_sz;
1223 
1224  i = ((i + 1) == l) ? 0 : i + 1;
1225  n_left--;
1226  }
1227 
1228  ASSERT (buffer_alloc_request > 0);
1229  vec_validate (buffers, buffer_alloc_request - 1);
1230 
1231  /* Allocate that many buffers */
1232  buffer_alloc_result = vlib_buffer_alloc (vm, buffers, buffer_alloc_request);
1233  if (buffer_alloc_result < buffer_alloc_request)
1234  {
1235  clib_warning ("alloc failure, got %d not %d", buffer_alloc_result,
1236  buffer_alloc_request);
1237  vlib_buffer_free_no_next (vm, buffers, buffer_alloc_result);
1238  pg->replay_buffers_by_thread[vm->thread_index] = buffers;
1239  return 0;
1240  }
1241 
1242  /* Now go generate the buffers, and add them to the FIFO */
1243  n_left = n_alloc;
1244 
1245  current_buffer_index = 0;
1248  while (n_left > 0)
1249  {
1250  u8 *d0;
1251  int not_last;
1252  u32 data_offset;
1253  u32 bytes_to_copy, bytes_this_chunk;
1254  vlib_buffer_t *b;
1255 
1256  d0 = vec_elt (s->replay_packet_templates, i);
1257  data_offset = 0;
1258  bytes_to_copy = vec_len (d0);
1259 
1260  /* Add head chunk to pg fifo */
1261  clib_fifo_add1 (bi->buffer_fifo, buffers[current_buffer_index]);
1262 
1263  /* Copy the data */
1264  while (bytes_to_copy)
1265  {
1266  bytes_this_chunk = clib_min (bytes_to_copy, buf_sz);
1267  ASSERT (current_buffer_index < vec_len (buffers));
1268  b = vlib_get_buffer (vm, buffers[current_buffer_index]);
1269  clib_memcpy_fast (b->data, d0 + data_offset, bytes_this_chunk);
1270  vnet_buffer (b)->sw_if_index[VLIB_RX] = s->sw_if_index[VLIB_RX];
1271  vnet_buffer (b)->sw_if_index[VLIB_TX] = s->sw_if_index[VLIB_TX];
1272  b->flags = 0;
1273  b->next_buffer = 0;
1274  b->current_data = 0;
1275  b->current_length = bytes_this_chunk;
1276 
1277  not_last = bytes_this_chunk < bytes_to_copy;
1278  if (not_last)
1279  {
1280  ASSERT (current_buffer_index < (vec_len (buffers) - 1));
1281  b->flags |= VLIB_BUFFER_NEXT_PRESENT;
1282  b->next_buffer = buffers[current_buffer_index + 1];
1283  }
1284  bytes_to_copy -= bytes_this_chunk;
1285  data_offset += bytes_this_chunk;
1286  current_buffer_index++;
1287  }
1288 
1289  i = ((i + 1) == l) ? 0 : i + 1;
1290  n_left--;
1291  }
1292 
1293  /* Update the interface counters */
1294  si = vnet_get_sw_interface (vnm, s->sw_if_index[VLIB_RX]);
1295  l = 0;
1296  for (i = 0; i < n_alloc; i++)
1297  l += vlib_buffer_index_length_in_chain (vm, buffers[i]);
1301  si->sw_if_index, n_alloc, l);
1302 
1303  s->current_replay_packet_index += n_alloc;
1305 
1306  pg->replay_buffers_by_thread[vm->thread_index] = buffers;
1307  return n_alloc;
1308 }
1309 
1310 
1311 static u32
1312 pg_stream_fill (pg_main_t * pg, pg_stream_t * s, u32 n_buffers)
1313 {
1314  pg_buffer_index_t *bi;
1315  word i, n_in_fifo, n_alloc, n_free, n_added;
1316  u32 *tail, *start, *end, *last_tail, *last_start;
1317 
1318  bi = s->buffer_indices;
1319 
1320  n_in_fifo = clib_fifo_elts (bi->buffer_fifo);
1321  if (n_in_fifo >= n_buffers)
1322  return n_in_fifo;
1323 
1324  n_alloc = n_buffers - n_in_fifo;
1325 
1326  /* Round up, but never generate more than limit. */
1327  n_alloc = clib_max (VLIB_FRAME_SIZE, n_alloc);
1328 
1329  if (s->n_packets_limit > 0
1330  && s->n_packets_generated + n_in_fifo + n_alloc >= s->n_packets_limit)
1331  {
1332  n_alloc = s->n_packets_limit - s->n_packets_generated - n_in_fifo;
1333  if (n_alloc < 0)
1334  n_alloc = 0;
1335  }
1336 
1337  /*
1338  * Handle pcap replay directly
1339  */
1340  if (s->replay_packet_templates)
1341  return pg_stream_fill_replay (pg, s, n_alloc);
1342 
1343  /* All buffer fifos should have the same size. */
1344  if (CLIB_DEBUG > 0)
1345  {
1346  uword l = ~0, e;
1347  vec_foreach (bi, s->buffer_indices)
1348  {
1349  e = clib_fifo_elts (bi->buffer_fifo);
1350  if (bi == s->buffer_indices)
1351  l = e;
1352  ASSERT (l == e);
1353  }
1354  }
1355 
1356  last_tail = last_start = 0;
1357  n_added = n_alloc;
1358 
1359  for (i = vec_len (s->buffer_indices) - 1; i >= 0; i--)
1360  {
1361  bi = vec_elt_at_index (s->buffer_indices, i);
1362 
1363  n_free = clib_fifo_free_elts (bi->buffer_fifo);
1364  if (n_free < n_alloc)
1365  clib_fifo_resize (bi->buffer_fifo, n_alloc - n_free);
1366 
1367  tail = clib_fifo_advance_tail (bi->buffer_fifo, n_alloc);
1368  start = bi->buffer_fifo;
1369  end = clib_fifo_end (bi->buffer_fifo);
1370 
1371  if (tail + n_alloc <= end)
1372  {
1373  n_added =
1374  pg_stream_fill_helper (pg, s, bi, tail, last_tail, n_alloc);
1375  }
1376  else
1377  {
1378  u32 n = clib_min (end - tail, n_alloc);
1379  n_added = pg_stream_fill_helper (pg, s, bi, tail, last_tail, n);
1380 
1381  if (n_added == n && n_alloc > n_added)
1382  {
1383  n_added += pg_stream_fill_helper
1384  (pg, s, bi, start, last_start, n_alloc - n_added);
1385  }
1386  }
1387 
1388  if (PREDICT_FALSE (n_added < n_alloc))
1389  tail = clib_fifo_advance_tail (bi->buffer_fifo, n_added - n_alloc);
1390 
1391  last_tail = tail;
1392  last_start = start;
1393 
1394  /* Verify that pkts in the fifo are properly allocated */
1395  }
1396 
1397  return n_in_fifo + n_added;
1398 }
1399 
1400 typedef struct
1401 {
1403 
1406 
1407  /* Use pre data for packet data. */
1410 
1411 static u8 *
1412 format_pg_input_trace (u8 * s, va_list * va)
1413 {
1414  vlib_main_t *vm = va_arg (*va, vlib_main_t *);
1415  CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
1416  pg_input_trace_t *t = va_arg (*va, pg_input_trace_t *);
1417  pg_main_t *pg = &pg_main;
1418  pg_stream_t *stream;
1419  vlib_node_t *n;
1420  u32 indent = format_get_indent (s);
1421 
1422  stream = 0;
1423  if (!pool_is_free_index (pg->streams, t->stream_index))
1424  stream = pool_elt_at_index (pg->streams, t->stream_index);
1425 
1426  if (stream)
1427  s = format (s, "stream %v", pg->streams[t->stream_index].name);
1428  else
1429  s = format (s, "stream %d", t->stream_index);
1430 
1431  s = format (s, ", %d bytes", t->packet_length);
1432  s = format (s, ", sw_if_index %d", t->sw_if_index);
1433 
1434  s = format (s, "\n%U%U",
1436 
1437  s = format (s, "\n%U", format_white_space, indent);
1438 
1439  n = 0;
1440  if (stream)
1441  n = vlib_get_node (vm, stream->node_index);
1442 
1443  if (n && n->format_buffer)
1444  s = format (s, "%U", n->format_buffer,
1445  t->buffer.pre_data, sizeof (t->buffer.pre_data));
1446  else
1447  s = format (s, "%U",
1449  ARRAY_LEN (t->buffer.pre_data));
1450  return s;
1451 }
1452 
1453 static void
1455  vlib_node_runtime_t * node, u32 stream_index, u32 next_index,
1456  u32 * buffers, u32 n_buffers)
1457 {
1459  u32 *b, n_left;
1460 
1461  n_left = n_buffers;
1462  b = buffers;
1463 
1464  while (n_left >= 2)
1465  {
1466  u32 bi0, bi1;
1467  vlib_buffer_t *b0, *b1;
1468  pg_input_trace_t *t0, *t1;
1469 
1470  bi0 = b[0];
1471  bi1 = b[1];
1472  b += 2;
1473  n_left -= 2;
1474 
1475  b0 = vlib_get_buffer (vm, bi0);
1476  b1 = vlib_get_buffer (vm, bi1);
1477 
1478  vlib_trace_buffer (vm, node, next_index, b0, /* follow_chain */ 1);
1479  vlib_trace_buffer (vm, node, next_index, b1, /* follow_chain */ 1);
1480 
1481  t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
1482  t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
1483 
1484  t0->stream_index = stream_index;
1485  t1->stream_index = stream_index;
1486 
1489 
1490  t0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1491  t1->sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_RX];
1492 
1493  clib_memcpy_fast (&t0->buffer, b0,
1494  sizeof (b0[0]) - sizeof (b0->pre_data));
1495  clib_memcpy_fast (&t1->buffer, b1,
1496  sizeof (b1[0]) - sizeof (b1->pre_data));
1497 
1499  sizeof (t0->buffer.pre_data));
1501  sizeof (t1->buffer.pre_data));
1502  }
1503 
1504  while (n_left >= 1)
1505  {
1506  u32 bi0;
1507  vlib_buffer_t *b0;
1508  pg_input_trace_t *t0;
1509 
1510  bi0 = b[0];
1511  b += 1;
1512  n_left -= 1;
1513 
1514  b0 = vlib_get_buffer (vm, bi0);
1515 
1516  vlib_trace_buffer (vm, node, next_index, b0, /* follow_chain */ 1);
1517  t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
1518 
1519  t0->stream_index = stream_index;
1521  t0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1522  clib_memcpy_fast (&t0->buffer, b0,
1523  sizeof (b0[0]) - sizeof (b0->pre_data));
1525  sizeof (t0->buffer.pre_data));
1526  }
1527 }
1528 
1530 fill_gso_buffer_flags (vlib_main_t * vm, u32 * buffers, u32 n_buffers,
1531  u32 packet_data_size)
1532 {
1533 
1534  for (int i = 0; i < n_buffers; i++)
1535  {
1536  vlib_buffer_t *b0 = vlib_get_buffer (vm, buffers[i]);
1537  u8 l4_proto = 0;
1538  u8 l4_hdr_sz = 0;
1539 
1540  ethernet_header_t *eh =
1542  u16 ethertype = clib_net_to_host_u16 (eh->type);
1543  u16 l2hdr_sz = sizeof (ethernet_header_t);
1544 
1545  if (ethernet_frame_is_tagged (ethertype))
1546  {
1547  ethernet_vlan_header_t *vlan = (ethernet_vlan_header_t *) (eh + 1);
1548 
1549  ethertype = clib_net_to_host_u16 (vlan->type);
1550  l2hdr_sz += sizeof (*vlan);
1551  if (ethertype == ETHERNET_TYPE_VLAN)
1552  {
1553  vlan++;
1554  ethertype = clib_net_to_host_u16 (vlan->type);
1555  l2hdr_sz += sizeof (*vlan);
1556  }
1557  }
1558 
1559  vnet_buffer (b0)->l2_hdr_offset = 0;
1560  vnet_buffer (b0)->l3_hdr_offset = l2hdr_sz;
1561 
1562  if (PREDICT_TRUE (ethertype == ETHERNET_TYPE_IP4))
1563  {
1564  ip4_header_t *ip4 =
1565  (ip4_header_t *) (vlib_buffer_get_current (b0) + l2hdr_sz);
1566  vnet_buffer (b0)->l4_hdr_offset = l2hdr_sz + ip4_header_bytes (ip4);
1567  l4_proto = ip4->protocol;
1568  b0->flags |=
1569  (VNET_BUFFER_F_IS_IP4 | VNET_BUFFER_F_OFFLOAD_IP_CKSUM);
1570  b0->flags |= (VNET_BUFFER_F_L2_HDR_OFFSET_VALID
1571  | VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
1572  VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
1573  }
1574  else if (PREDICT_TRUE (ethertype == ETHERNET_TYPE_IP6))
1575  {
1576  ip6_header_t *ip6 =
1577  (ip6_header_t *) (vlib_buffer_get_current (b0) + l2hdr_sz);
1578  vnet_buffer (b0)->l4_hdr_offset = l2hdr_sz + sizeof (ip6_header_t);
1579  /* FIXME IPv6 EH traversal */
1580  l4_proto = ip6->protocol;
1581  b0->flags |=
1582  (VNET_BUFFER_F_IS_IP6 | VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
1583  VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
1584  VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
1585  }
1586  if (l4_proto == IP_PROTOCOL_TCP)
1587  {
1588  b0->flags |= (VNET_BUFFER_F_OFFLOAD_TCP_CKSUM | VNET_BUFFER_F_GSO);
1590  vnet_buffer
1591  (b0)->l4_hdr_offset);
1592  l4_hdr_sz = tcp_header_bytes (tcp);
1593  tcp->checksum = 0;
1594  vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
1595  vnet_buffer2 (b0)->gso_size = packet_data_size;
1596  }
1597  else if (l4_proto == IP_PROTOCOL_UDP)
1598  {
1599  b0->flags |= VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
1601  vnet_buffer
1602  (b0)->l4_hdr_offset);
1603  vnet_buffer2 (b0)->gso_l4_hdr_sz = sizeof (*udp);
1604  udp->checksum = 0;
1605  }
1606  }
1607 }
1608 
1609 static uword
1611  pg_main_t * pg,
1612  pg_stream_t * s, uword n_packets_to_generate)
1613 {
1615  u32 *to_next, n_this_frame, n_left, n_trace, n_packets_in_fifo;
1616  uword n_packets_generated;
1617  pg_buffer_index_t *bi, *bi0;
1618  u32 next_index = s->next_index;
1621  u8 feature_arc_index = fm->device_input_feature_arc_index;
1622  cm = &fm->feature_config_mains[feature_arc_index];
1623  u32 current_config_index = ~(u32) 0;
1625  int i;
1626 
1627  bi0 = s->buffer_indices;
1628 
1629  n_packets_in_fifo = pg_stream_fill (pg, s, n_packets_to_generate);
1630  n_packets_to_generate = clib_min (n_packets_in_fifo, n_packets_to_generate);
1631  n_packets_generated = 0;
1632 
1633  if (PREDICT_FALSE
1634  (vnet_have_features (feature_arc_index, s->sw_if_index[VLIB_RX])))
1635  {
1636  current_config_index =
1638  vnet_get_config_data (&cm->config_main, &current_config_index,
1639  &next_index, 0);
1640  }
1641 
1642  while (n_packets_to_generate > 0)
1643  {
1644  u32 *head, *start, *end;
1645 
1647  {
1648  vlib_next_frame_t *nf;
1649  vlib_frame_t *f;
1651  vlib_get_new_next_frame (vm, node, next_index, to_next, n_left);
1652  nf = vlib_node_runtime_get_next_frame (vm, node, next_index);
1653  f = vlib_get_frame (vm, nf->frame);
1655 
1656  ef = vlib_frame_scalar_args (f);
1657  ef->sw_if_index = pi->sw_if_index;
1658  ef->hw_if_index = pi->hw_if_index;
1660  }
1661  else
1662  vlib_get_next_frame (vm, node, next_index, to_next, n_left);
1663 
1664  n_this_frame = n_packets_to_generate;
1665  if (n_this_frame > n_left)
1666  n_this_frame = n_left;
1667 
1668  start = bi0->buffer_fifo;
1669  end = clib_fifo_end (bi0->buffer_fifo);
1670  head = clib_fifo_head (bi0->buffer_fifo);
1671 
1672  if (head + n_this_frame <= end)
1673  vlib_buffer_copy_indices (to_next, head, n_this_frame);
1674  else
1675  {
1676  u32 n = end - head;
1677  vlib_buffer_copy_indices (to_next + 0, head, n);
1678  vlib_buffer_copy_indices (to_next + n, start, n_this_frame - n);
1679  }
1680 
1681  if (s->replay_packet_templates == 0)
1682  {
1683  vec_foreach (bi, s->buffer_indices)
1684  clib_fifo_advance_head (bi->buffer_fifo, n_this_frame);
1685  }
1686  else
1687  {
1688  clib_fifo_advance_head (bi0->buffer_fifo, n_this_frame);
1689  }
1690 
1691  if (current_config_index != ~(u32) 0)
1692  for (i = 0; i < n_this_frame; i++)
1693  {
1694  vlib_buffer_t *b;
1695  b = vlib_get_buffer (vm, to_next[i]);
1696  b->current_config_index = current_config_index;
1697  vnet_buffer (b)->feature_arc_index = feature_arc_index;
1698  }
1699 
1700  if (pi->gso_enabled)
1701  fill_gso_buffer_flags (vm, to_next, n_this_frame, pi->gso_size);
1702 
1703  n_trace = vlib_get_trace_count (vm, node);
1704  if (n_trace > 0)
1705  {
1706  u32 n = clib_min (n_trace, n_this_frame);
1707  pg_input_trace (pg, node, s - pg->streams, next_index, to_next, n);
1708  vlib_set_trace_count (vm, node, n_trace - n);
1709  }
1710  n_packets_to_generate -= n_this_frame;
1711  n_packets_generated += n_this_frame;
1712  n_left -= n_this_frame;
1713  if (CLIB_DEBUG > 0)
1714  {
1715  int i;
1716  vlib_buffer_t *b;
1717 
1718  for (i = 0; i < n_this_frame; i++)
1719  {
1720  b = vlib_get_buffer (vm, to_next[i]);
1721  ASSERT ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0 ||
1723  }
1724  }
1725  vlib_put_next_frame (vm, node, next_index, n_left);
1726  }
1727 
1728  return n_packets_generated;
1729 }
1730 
1731 static uword
1733 {
1735  uword n_packets;
1736  f64 time_now, dt;
1737 
1738  if (s->n_packets_limit > 0 && s->n_packets_generated >= s->n_packets_limit)
1739  {
1740  pg_stream_enable_disable (pg, s, /* want_enabled */ 0);
1741  return 0;
1742  }
1743 
1744  /* Apply rate limit. */
1745  time_now = vlib_time_now (vm);
1746  if (s->time_last_generate == 0)
1747  s->time_last_generate = time_now;
1748 
1749  dt = time_now - s->time_last_generate;
1750  s->time_last_generate = time_now;
1751 
1752  n_packets = VLIB_FRAME_SIZE;
1753  if (s->rate_packets_per_second > 0)
1754  {
1756  n_packets = s->packet_accumulator;
1757 
1758  /* Never allow accumulator to grow if we get behind. */
1759  s->packet_accumulator -= n_packets;
1760  }
1761 
1762  /* Apply fixed limit. */
1763  if (s->n_packets_limit > 0
1764  && s->n_packets_generated + n_packets > s->n_packets_limit)
1765  n_packets = s->n_packets_limit - s->n_packets_generated;
1766 
1767  /* Generate up to one frame's worth of packets. */
1768  if (n_packets > s->n_max_frame)
1769  n_packets = s->n_max_frame;
1770 
1771  if (n_packets > 0)
1772  n_packets = pg_generate_packets (node, pg, s, n_packets);
1773 
1774  s->n_packets_generated += n_packets;
1775 
1776  return n_packets;
1777 }
1778 
1779 uword
1781 {
1782  uword i;
1783  pg_main_t *pg = &pg_main;
1784  uword n_packets = 0;
1785  u32 worker_index = 0;
1786 
1787  if (vlib_num_workers ())
1788  worker_index = vlib_get_current_worker_index ();
1789 
1790  /* *INDENT-OFF* */
1791  clib_bitmap_foreach (i, pg->enabled_streams[worker_index], ({
1792  pg_stream_t *s = vec_elt_at_index (pg->streams, i);
1793  n_packets += pg_input_stream (node, pg, s);
1794  }));
1795  /* *INDENT-ON* */
1796 
1797  return n_packets;
1798 }
1799 
1800 /* *INDENT-OFF* */
1802  .function = pg_input,
1804  .name = "pg-input",
1805  .sibling_of = "device-input",
1806  .type = VLIB_NODE_TYPE_INPUT,
1807 
1808  .format_trace = format_pg_input_trace,
1809 
1810  /* Input node will be left disabled until a stream is active. */
1811  .state = VLIB_NODE_STATE_DISABLED,
1812 };
1813 /* *INDENT-ON* */
1814 
1815 /*
1816  * fd.io coding-style-patch-verification: ON
1817  *
1818  * Local Variables:
1819  * eval: (c-set-style "gnu")
1820  * End:
1821  */
vnet_config_main_t config_main
Definition: feature.h:82
u32 sw_if_index
Definition: pg.h:300
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:440
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
Definition: edit.h:64
static uword pg_generate_packets(vlib_node_runtime_t *node, pg_main_t *pg, pg_stream_t *s, uword n_packets_to_generate)
Definition: input.c:1610
u32 next_index
Definition: pg.h:147
#define PG_EDIT_LO
Definition: edit.h:83
u64 n_packets_limit
Definition: pg.h:156
#define clib_min(x, y)
Definition: clib.h:295
#define clib_fifo_head(v)
Definition: fifo.h:254
#define CLIB_UNUSED(x)
Definition: clib.h:82
u8 * fixed_packet_data
Definition: pg.h:120
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
Definition: trace_funcs.h:187
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:220
static uword clib_fifo_elts(void *v)
Definition: fifo.h:66
vnet_main_t * vnet_get_main(void)
Definition: misc.c:46
static void setbits_1(void *a0, u64 v0, u64 v_min, u64 v_max, u32 max_bits, u32 n_bits, u64 mask, u32 shift)
Definition: input.c:450
#define vnet_buffer2(b)
Definition: buffer.h:467
Definition: pg.h:318
static void pg_generate_fix_multi_buffer_lengths(pg_main_t *pg, pg_stream_t *s, u32 *buffers, u32 n_buffers)
Definition: input.c:915
vnet_interface_main_t interface_main
Definition: vnet.h:56
static u64 do_it(pg_main_t *pg, pg_stream_t *s, u32 *buffers, u32 n_buffers, u32 lo_bit, u32 hi_bit, u64 v_min, u64 v_max, u64 v, pg_edit_type_t edit_type)
Definition: input.c:756
#define PREDICT_TRUE(x)
Definition: clib.h:112
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:110
unsigned long u64
Definition: types.h:89
pg_edit_group_t * edit_groups
Definition: pg.h:106
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
#define clib_fifo_advance_tail(f, n_elts)
Definition: fifo.h:161
#define VLIB_NODE_FLAG_TRACE_SUPPORTED
Definition: node.h:306
static f64 vlib_time_now(vlib_main_t *vm)
Definition: main.h:279
#define clib_fifo_resize(f, n_elts)
Definition: fifo.h:101
static void * clib_random_buffer_get_data(clib_random_buffer_t *b, uword n_bytes)
Definition: random_buffer.h:78
void(* edit_function)(struct pg_main_t *pg, struct pg_stream_t *s, struct pg_edit_group_t *g, u32 *buffers, u32 n_buffers)
Definition: pg.h:73
pg_edit_type_t
Definition: edit.h:46
u32 thread_index
Definition: main.h:218
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
static vlib_frame_t * vlib_get_frame(vlib_main_t *vm, vlib_frame_t *f)
Definition: node_funcs.h:216
u8 data[0]
Packet data.
Definition: buffer.h:181
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:523
static_always_inline u64 do_set_increment(pg_main_t *pg, pg_stream_t *s, u32 *buffers, u32 n_buffers, u32 n_bits, u32 byte_offset, u32 is_net_byte_order, u32 want_sum, u64 *sum_result, u64 v_min, u64 v_max, u64 v)
Definition: input.c:220
static void pg_generate_set_lengths(pg_main_t *pg, pg_stream_t *s, u32 *buffers, u32 n_buffers)
Definition: input.c:863
u32 n_max_frame
Definition: pg.h:159
int i
static u32 format_get_indent(u8 *s)
Definition: format.h:72
#define STRUCT_OFFSET_OF(t, f)
Definition: clib.h:65
static vnet_sw_interface_t * vnet_get_sw_interface(vnet_main_t *vnm, u32 sw_if_index)
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
u64 last_increment_value
Definition: edit.h:87
static_always_inline int vnet_have_features(u8 arc, u32 sw_if_index)
Definition: feature.h:248
pg_buffer_index_t * buffer_indices
Definition: pg.h:169
u32 hw_if_index
Definition: pg.h:300
static_always_inline void init_buffers_inline(vlib_main_t *vm, pg_stream_t *s, u32 *buffers, u32 n_buffers, u32 data_offset, u32 n_data, u32 set_data)
Definition: input.c:1069
pg_edit_type_t packet_size_edit_type
Definition: pg.h:108
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
Definition: buffer_funcs.h:366
struct _tcp_header tcp_header_t
unsigned char u8
Definition: types.h:56
u8 * fixed_packet_data_mask
Definition: pg.h:120
static_always_inline void do_set_fixed(pg_main_t *pg, pg_stream_t *s, u32 *buffers, u32 n_buffers, u32 n_bits, u32 byte_offset, u32 is_net_byte_order, u64 v_min, u64 v_max)
Definition: input.c:169
static void set_1(void *a0, u64 v0, u64 v_min, u64 v_max, u32 n_bits, u32 is_net_byte_order)
Definition: input.c:95
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
double f64
Definition: types.h:142
#define fm
static void vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
Definition: trace_funcs.h:130
static void pg_generate_edit(pg_main_t *pg, pg_stream_t *s, u32 *buffers, u32 n_buffers)
Definition: input.c:972
#define PG_EDIT_HI
Definition: edit.h:84
u32 gso_size
Definition: pg.h:306
#define static_always_inline
Definition: clib.h:99
static void pg_set_next_buffer_pointers(pg_main_t *pg, pg_stream_t *s, u32 *buffers, u32 *next_buffers, u32 n_buffers)
Definition: input.c:1023
i64 word
Definition: types.h:111
#define vlib_prefetch_buffer_with_index(vm, bi, type)
Prefetch buffer metadata by buffer index The first 64 bytes of buffer contains most header informatio...
Definition: buffer_funcs.h:440
static_always_inline u64 do_setbits_increment(pg_main_t *pg, pg_stream_t *s, u32 *buffers, u32 n_buffers, u32 max_bits, u32 n_bits, u32 byte_offset, u64 v_min, u64 v_max, u64 v, u64 mask, u32 shift)
Definition: input.c:559
static_always_inline void do_setbits_fixed(pg_main_t *pg, pg_stream_t *s, u32 *buffers, u32 n_buffers, u32 max_bits, u32 n_bits, u32 byte_offset, u64 v_min, u64 v_max, u64 mask, u32 shift)
Definition: input.c:507
#define ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX
Definition: ethernet.h:52
vlib_combined_counter_main_t * combined_sw_if_counters
Definition: interface.h:863
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:129
static uword clib_fifo_free_elts(void *v)
Definition: fifo.h:82
pg_edit_type_t type
Definition: edit.h:66
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
u8 * format_hex_bytes(u8 *s, va_list *va)
Definition: std-formats.c:84
#define vlib_get_new_next_frame(vm, node, next_index, vectors, n_vectors_left)
Definition: node_funcs.h:343
static uword pg_input_stream(vlib_node_runtime_t *node, pg_main_t *pg, pg_stream_t *s)
Definition: input.c:1732
i32 lsb_bit_offset
Definition: edit.h:73
unsigned int u32
Definition: types.h:88
static u32 vlib_get_current_worker_index()
Definition: threads.h:390
#define vec_end(v)
End (last data address) of vector.
static int validate_buffer_data(vlib_buffer_t *b, pg_stream_t *s)
Definition: input.c:89
#define VLIB_FRAME_SIZE
Definition: node.h:378
static vlib_next_frame_t * vlib_node_runtime_get_next_frame(vlib_main_t *vm, vlib_node_runtime_t *n, u32 next_index)
Definition: node_funcs.h:264
u32 buffer_bytes
Definition: pg.h:124
vnet_crypto_main_t * cm
Definition: quic_crypto.c:41
f64 packet_accumulator
Definition: pg.h:167
u32 last_increment_packet_size
Definition: pg.h:127
#define clib_bitmap_foreach(i, ai, body)
Macro to iterate across set bits in a bitmap.
Definition: bitmap.h:361
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:519
u32 pg_if_index
Definition: pg.h:130
format_function_t format_vnet_buffer
Definition: buffer.h:483
u32 current_replay_packet_index
Definition: pg.h:173
u32 ** replay_buffers_by_thread
Definition: pg.h:334
u8 * name
Definition: pg.h:97
unsigned short u16
Definition: types.h:57
static u64 pg_edit_get_value(pg_edit_t *e, int hi_or_lo)
Definition: edit.h:173
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
static void * vnet_get_config_data(vnet_config_main_t *cm, u32 *config_index, u32 *next_index, u32 n_data_bytes)
Definition: config.h:122
#define PREDICT_FALSE(x)
Definition: clib.h:111
#define always_inline
Definition: ipsec.h:28
format_function_t * format_buffer
Definition: node.h:358
static void pg_input_trace(pg_main_t *pg, vlib_node_runtime_t *node, u32 stream_index, u32 next_index, u32 *buffers, u32 n_buffers)
Definition: input.c:1454
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:338
vlib_main_t * vm
Definition: in2out_ed.c:1810
uword ** enabled_streams
Definition: pg.h:324
u32 n_bits
Definition: edit.h:79
static_always_inline u32 vlib_buffer_get_default_data_size(vlib_main_t *vm)
Definition: buffer_funcs.h:96
u8 ip6[16]
Definition: one.api:477
vlib_buffer_t buffer
Definition: input.c:1408
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
static void vlib_buffer_free_no_next(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers, does not free the buffer chain for each buffer.
Definition: buffer_funcs.h:907
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:218
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
static_always_inline void do_set_random(pg_main_t *pg, pg_stream_t *s, u32 *buffers, u32 n_buffers, u32 n_bits, u32 byte_offset, u32 is_net_byte_order, u32 want_sum, u64 *sum_result, u64 v_min, u64 v_max)
Definition: input.c:315
u8 gso_enabled
Definition: pg.h:305
static u32 pg_stream_fill(pg_main_t *pg, pg_stream_t *s, u32 n_buffers)
Definition: input.c:1312
u32 min_packet_bytes
Definition: pg.h:111
u32 max_packet_bytes
Definition: pg.h:111
#define clib_warning(format, args...)
Definition: error.h:59
static void set_2(void *a0, void *a1, u64 v0, u64 v1, u64 v_min, u64 v_max, u32 n_bits, u32 is_net_byte_order, u32 is_increment)
Definition: input.c:124
static uword max_pow2(uword x)
Definition: clib.h:226
static void * vlib_frame_scalar_args(vlib_frame_t *f)
Get pointer to frame scalar data.
Definition: node_funcs.h:258
#define pool_is_free_index(P, I)
Use free bitmap to query whether given index is free.
Definition: pool.h:284
u32 current_config_index
Used by feature subgraph arcs to visit enabled feature nodes.
Definition: buffer.h:147
#define ARRAY_LEN(x)
Definition: clib.h:62
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:456
pg_edit_t * non_fixed_edits
Definition: pg.h:115
u8 ** replay_packet_templates
Definition: pg.h:171
vlib_main_t vlib_node_runtime_t * node
Definition: in2out_ed.c:1810
static uword clib_fifo_advance_head(void *v, uword n_elts)
Definition: fifo.h:169
static int validate_buffer_data2(vlib_buffer_t *b, pg_stream_t *s, u32 data_offset, u32 n_bytes)
Definition: input.c:59
pg_stream_t * streams
Definition: pg.h:321
static_always_inline void fill_gso_buffer_flags(vlib_main_t *vm, u32 *buffers, u32 n_buffers, u32 packet_data_size)
Definition: input.c:1530
#define ASSERT(truth)
u8 pre_data[VLIB_BUFFER_PRE_DATA_SIZE]
Space for inserting data before buffer start.
Definition: buffer.h:178
u8 data[128]
Definition: ipsec_types.api:87
vlib_frame_t * frame
Definition: node.h:406
u16 flags
Definition: node.h:388
void pg_stream_enable_disable(pg_main_t *pg, pg_stream_t *s, int is_enable)
Definition: stream.c:49
static_always_inline int ethernet_frame_is_tagged(u16 type)
Definition: ethernet.h:78
static uword vlib_buffer_index_length_in_chain(vlib_main_t *vm, u32 bi)
Get length in bytes of the buffer index buffer chain.
Definition: buffer_funcs.h:386
#define clib_mem_unaligned(pointer, type)
Definition: types.h:155
Definition: pg.h:94
#define clib_max(x, y)
Definition: clib.h:288
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:23
#define clib_fifo_end(v)
Definition: fifo.h:63
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
#define vec_elt(v, i)
Get vector value at index i.
u8 device_input_feature_arc_index
Feature arc index for device-input.
Definition: feature.h:112
static_always_inline void do_setbits_random(pg_main_t *pg, pg_stream_t *s, u32 *buffers, u32 n_buffers, u32 max_bits, u32 n_bits, u32 byte_offset, u64 v_min, u64 v_max, u64 mask, u32 shift)
Definition: input.c:638
u32 node_index
Definition: pg.h:141
Definition: defs.h:47
u32 sw_if_index[VLIB_N_RX_TX]
Definition: pg.h:138
#define clib_fifo_add1(f, e)
Definition: fifo.h:192
static_always_inline void vlib_buffer_copy_indices(u32 *dst, u32 *src, u32 n_indices)
Definition: buffer_funcs.h:102
static u32 pg_stream_fill_replay(pg_main_t *pg, pg_stream_t *s, u32 n_alloc)
Definition: input.c:1194
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:140
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
u32 packet_length
Definition: input.c:1404
#define VLIB_BUFFER_MIN_CHAIN_SEG_SIZE
Definition: buffer.h:58
f64 rate_packets_per_second
Definition: pg.h:163
static u8 * format_pg_input_trace(u8 *s, va_list *va)
Definition: input.c:1412
#define vnet_buffer(b)
Definition: buffer.h:408
static int tcp_header_bytes(tcp_header_t *t)
Definition: tcp_packet.h:93
static u32 pg_stream_fill_helper(pg_main_t *pg, pg_stream_t *s, pg_buffer_index_t *bi, u32 *buffers, u32 *next_buffers, u32 n_alloc)
Definition: input.c:1149
static u32 vlib_num_workers()
Definition: threads.h:372
#define STRUCT_SIZE_OF(t, f)
Definition: clib.h:67
u64 n_packets_generated
Definition: pg.h:152
vlib_node_registration_t pg_input_node
(constructor) VLIB_REGISTER_NODE (pg_input_node)
Definition: input.c:1801
pg_main_t pg_main
Definition: init.c:44
static vlib_node_t * vlib_get_node(vlib_main_t *vm, u32 i)
Get vlib node by index.
Definition: node_funcs.h:59
#define vec_foreach(var, vec)
Vector iterator.
f64 end
end of the time range
Definition: mactime.api:44
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: in2out_ed.c:1811
static void vlib_frame_no_append(vlib_frame_t *f)
Definition: node_funcs.h:224
static int ip4_header_bytes(const ip4_header_t *i)
Definition: ip4_packet.h:235
u32 ip4
Definition: one.api:440
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
Definition: trace_funcs.h:203
vnet_feature_config_main_t * feature_config_mains
feature config main objects
Definition: feature.h:100
u32 stream_index
Definition: input.c:1402
uword pg_input(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: input.c:1780
static u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
Definition: buffer_funcs.h:630
#define BITS(x)
Definition: clib.h:61
f64 time_last_generate
Definition: pg.h:165
vnet_feature_main_t feature_main
Definition: feature.c:19
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
clib_random_buffer_t random_buffer
Definition: main.h:212
static void setbits_2(void *a0, void *a1, u64 v0, u64 v1, u64 v_min, u64 v_max, u32 max_bits, u32 n_bits, u64 mask, u32 shift, u32 is_increment)
Definition: input.c:474
pg_interface_t * interfaces
Definition: pg.h:330
Definition: defs.h:46
u32 * buffer_fifo
Definition: pg.h:90