FD.io VPP  v21.06-1-gbb7418cf9
Vector Packet Processing
input.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /*
16  * pg_input.c: buffer generator input
17  *
18  * Copyright (c) 2008 Eliot Dresselhaus
19  *
20  * Permission is hereby granted, free of charge, to any person obtaining
21  * a copy of this software and associated documentation files (the
22  * "Software"), to deal in the Software without restriction, including
23  * without limitation the rights to use, copy, modify, merge, publish,
24  * distribute, sublicense, and/or sell copies of the Software, and to
25  * permit persons to whom the Software is furnished to do so, subject to
26  * the following conditions:
27  *
28  * The above copyright notice and this permission notice shall be
29  * included in all copies or substantial portions of the Software.
30  *
31  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38  */
39 
40  /*
41  * To be honest, the packet generator needs an extreme
42  * makeover. Two key assumptions which drove the current implementation
43  * are no longer true. First, buffer managers implement a
44  * post-TX recycle list. Second, that packet generator performance
45  * is first-order important.
46  */
47 
48 #include <vlib/vlib.h>
49 #include <vnet/pg/pg.h>
50 #include <vnet/vnet.h>
51 #include <vnet/ethernet/ethernet.h>
52 #include <vnet/feature/feature.h>
53 #include <vnet/ip/ip4_packet.h>
54 #include <vnet/ip/ip6_packet.h>
55 #include <vnet/udp/udp_packet.h>
56 #include <vnet/devices/devices.h>
57 #include <vnet/gso/gro_func.h>
58 
59 static int
61  u32 data_offset, u32 n_bytes)
62 {
63  u8 *bd, *pd, *pm;
64  u32 i;
65 
66  bd = b->data;
67  pd = s->fixed_packet_data + data_offset;
68  pm = s->fixed_packet_data_mask + data_offset;
69 
70  if (pd + n_bytes >= vec_end (s->fixed_packet_data))
71  n_bytes = (pd < vec_end (s->fixed_packet_data)
72  ? vec_end (s->fixed_packet_data) - pd : 0);
73 
74  for (i = 0; i < n_bytes; i++)
75  if ((bd[i] & pm[i]) != pd[i])
76  break;
77 
78  if (i >= n_bytes)
79  return 1;
80 
81  clib_warning ("buffer %U", format_vnet_buffer, b);
82  clib_warning ("differ at index %d", i);
83  clib_warning ("is %U", format_hex_bytes, bd, n_bytes);
84  clib_warning ("mask %U", format_hex_bytes, pm, n_bytes);
85  clib_warning ("expect %U", format_hex_bytes, pd, n_bytes);
86  return 0;
87 }
88 
89 static int
91 {
92  return validate_buffer_data2 (b, s, 0, s->buffer_bytes);
93 }
94 
95 always_inline void
96 set_1 (void *a0,
97  u64 v0, u64 v_min, u64 v_max, u32 n_bits, u32 is_net_byte_order)
98 {
99  ASSERT (v0 >= v_min && v0 <= v_max);
100  if (n_bits == BITS (u8))
101  {
102  ((u8 *) a0)[0] = v0;
103  }
104  else if (n_bits == BITS (u16))
105  {
106  if (is_net_byte_order)
107  v0 = clib_host_to_net_u16 (v0);
108  clib_mem_unaligned (a0, u16) = v0;
109  }
110  else if (n_bits == BITS (u32))
111  {
112  if (is_net_byte_order)
113  v0 = clib_host_to_net_u32 (v0);
114  clib_mem_unaligned (a0, u32) = v0;
115  }
116  else if (n_bits == BITS (u64))
117  {
118  if (is_net_byte_order)
119  v0 = clib_host_to_net_u64 (v0);
120  clib_mem_unaligned (a0, u64) = v0;
121  }
122 }
123 
124 always_inline void
125 set_2 (void *a0, void *a1,
126  u64 v0, u64 v1,
127  u64 v_min, u64 v_max,
128  u32 n_bits, u32 is_net_byte_order, u32 is_increment)
129 {
130  ASSERT (v0 >= v_min && v0 <= v_max);
131  ASSERT (v1 >= v_min && v1 <= (v_max + is_increment));
132  if (n_bits == BITS (u8))
133  {
134  ((u8 *) a0)[0] = v0;
135  ((u8 *) a1)[0] = v1;
136  }
137  else if (n_bits == BITS (u16))
138  {
139  if (is_net_byte_order)
140  {
141  v0 = clib_host_to_net_u16 (v0);
142  v1 = clib_host_to_net_u16 (v1);
143  }
144  clib_mem_unaligned (a0, u16) = v0;
145  clib_mem_unaligned (a1, u16) = v1;
146  }
147  else if (n_bits == BITS (u32))
148  {
149  if (is_net_byte_order)
150  {
151  v0 = clib_host_to_net_u32 (v0);
152  v1 = clib_host_to_net_u32 (v1);
153  }
154  clib_mem_unaligned (a0, u32) = v0;
155  clib_mem_unaligned (a1, u32) = v1;
156  }
157  else if (n_bits == BITS (u64))
158  {
159  if (is_net_byte_order)
160  {
161  v0 = clib_host_to_net_u64 (v0);
162  v1 = clib_host_to_net_u64 (v1);
163  }
164  clib_mem_unaligned (a0, u64) = v0;
165  clib_mem_unaligned (a1, u64) = v1;
166  }
167 }
168 
171  pg_stream_t * s,
172  u32 * buffers,
173  u32 n_buffers,
174  u32 n_bits,
175  u32 byte_offset, u32 is_net_byte_order, u64 v_min, u64 v_max)
176 {
177  vlib_main_t *vm = vlib_get_main ();
178 
179  while (n_buffers >= 4)
180  {
181  vlib_buffer_t *b0, *b1, *b2, *b3;
182  void *a0, *a1;
183 
184  b0 = vlib_get_buffer (vm, buffers[0]);
185  b1 = vlib_get_buffer (vm, buffers[1]);
186  b2 = vlib_get_buffer (vm, buffers[2]);
187  b3 = vlib_get_buffer (vm, buffers[3]);
188  buffers += 2;
189  n_buffers -= 2;
190 
191  a0 = (void *) b0 + byte_offset;
192  a1 = (void *) b1 + byte_offset;
193  CLIB_PREFETCH ((void *) b2 + byte_offset, sizeof (v_min), WRITE);
194  CLIB_PREFETCH ((void *) b3 + byte_offset, sizeof (v_min), WRITE);
195 
196  set_2 (a0, a1, v_min, v_min, v_min, v_max, n_bits, is_net_byte_order,
197  /* is_increment */ 0);
198 
199  ASSERT (validate_buffer_data (b0, s));
200  ASSERT (validate_buffer_data (b1, s));
201  }
202 
203  while (n_buffers > 0)
204  {
205  vlib_buffer_t *b0;
206  void *a0;
207 
208  b0 = vlib_get_buffer (vm, buffers[0]);
209  buffers += 1;
210  n_buffers -= 1;
211 
212  a0 = (void *) b0 + byte_offset;
213 
214  set_1 (a0, v_min, v_min, v_max, n_bits, is_net_byte_order);
215 
216  ASSERT (validate_buffer_data (b0, s));
217  }
218 }
219 
222  pg_stream_t * s,
223  u32 * buffers,
224  u32 n_buffers,
225  u32 n_bits,
226  u32 byte_offset,
227  u32 is_net_byte_order,
228  u32 want_sum, u64 * sum_result, u64 v_min, u64 v_max, u64 v)
229 {
230  vlib_main_t *vm = vlib_get_main ();
231  u64 sum = 0;
232 
233  ASSERT (v >= v_min && v <= v_max);
234 
235  while (n_buffers >= 4)
236  {
237  vlib_buffer_t *b0, *b1, *b2, *b3;
238  void *a0, *a1;
239  u64 v_old;
240 
241  b0 = vlib_get_buffer (vm, buffers[0]);
242  b1 = vlib_get_buffer (vm, buffers[1]);
243  b2 = vlib_get_buffer (vm, buffers[2]);
244  b3 = vlib_get_buffer (vm, buffers[3]);
245  buffers += 2;
246  n_buffers -= 2;
247 
248  a0 = (void *) b0 + byte_offset;
249  a1 = (void *) b1 + byte_offset;
250  CLIB_PREFETCH ((void *) b2 + byte_offset, sizeof (v_min), WRITE);
251  CLIB_PREFETCH ((void *) b3 + byte_offset, sizeof (v_min), WRITE);
252 
253  v_old = v;
254  v = v_old + 2;
255  v = v > v_max ? v_min : v;
256  set_2 (a0, a1,
257  v_old + 0, v_old + 1, v_min, v_max, n_bits, is_net_byte_order,
258  /* is_increment */ 1);
259 
260  if (want_sum)
261  sum += 2 * v_old + 1;
262 
263  if (PREDICT_FALSE (v_old + 1 > v_max))
264  {
265  if (want_sum)
266  sum -= 2 * v_old + 1;
267 
268  v = v_old;
269  set_1 (a0, v + 0, v_min, v_max, n_bits, is_net_byte_order);
270  if (want_sum)
271  sum += v;
272  v += 1;
273 
274  v = v > v_max ? v_min : v;
275  set_1 (a1, v + 0, v_min, v_max, n_bits, is_net_byte_order);
276  if (want_sum)
277  sum += v;
278  v += 1;
279  }
280 
281  ASSERT (validate_buffer_data (b0, s));
282  ASSERT (validate_buffer_data (b1, s));
283  }
284 
285  while (n_buffers > 0)
286  {
287  vlib_buffer_t *b0;
288  void *a0;
289  u64 v_old;
290 
291  b0 = vlib_get_buffer (vm, buffers[0]);
292  buffers += 1;
293  n_buffers -= 1;
294 
295  a0 = (void *) b0 + byte_offset;
296 
297  v_old = v;
298  if (want_sum)
299  sum += v_old;
300  v += 1;
301  v = v > v_max ? v_min : v;
302 
303  ASSERT (v_old >= v_min && v_old <= v_max);
304  set_1 (a0, v_old, v_min, v_max, n_bits, is_net_byte_order);
305 
306  ASSERT (validate_buffer_data (b0, s));
307  }
308 
309  if (want_sum)
310  *sum_result = sum;
311 
312  return v;
313 }
314 
317  pg_stream_t * s,
318  u32 * buffers,
319  u32 n_buffers,
320  u32 n_bits,
321  u32 byte_offset,
322  u32 is_net_byte_order,
323  u32 want_sum, u64 * sum_result, u64 v_min, u64 v_max)
324 {
325  vlib_main_t *vm = vlib_get_main ();
326  u64 v_diff = v_max - v_min + 1;
327  u64 r_mask = max_pow2 (v_diff) - 1;
328  u64 v0, v1;
329  u64 sum = 0;
330  void *random_data;
331 
332  random_data = clib_random_buffer_get_data
333  (&vm->random_buffer, n_buffers * n_bits / BITS (u8));
334 
335  v0 = v1 = v_min;
336 
337  while (n_buffers >= 4)
338  {
339  vlib_buffer_t *b0, *b1, *b2, *b3;
340  void *a0, *a1;
341  u64 r0 = 0, r1 = 0; /* warnings be gone */
342 
343  b0 = vlib_get_buffer (vm, buffers[0]);
344  b1 = vlib_get_buffer (vm, buffers[1]);
345  b2 = vlib_get_buffer (vm, buffers[2]);
346  b3 = vlib_get_buffer (vm, buffers[3]);
347  buffers += 2;
348  n_buffers -= 2;
349 
350  a0 = (void *) b0 + byte_offset;
351  a1 = (void *) b1 + byte_offset;
352  CLIB_PREFETCH ((void *) b2 + byte_offset, sizeof (v_min), WRITE);
353  CLIB_PREFETCH ((void *) b3 + byte_offset, sizeof (v_min), WRITE);
354 
355  switch (n_bits)
356  {
357 #define _(n) \
358  case BITS (u##n): \
359  { \
360  u##n * r = random_data; \
361  r0 = r[0]; \
362  r1 = r[1]; \
363  random_data = r + 2; \
364  } \
365  break;
366 
367  _(8);
368  _(16);
369  _(32);
370  _(64);
371 
372 #undef _
373  }
374 
375  /* Add power of 2 sized random number which may be out of range. */
376  v0 += r0 & r_mask;
377  v1 += r1 & r_mask;
378 
379  /* Twice should be enough to reduce to v_min .. v_max range. */
380  v0 = v0 > v_max ? v0 - v_diff : v0;
381  v1 = v1 > v_max ? v1 - v_diff : v1;
382  v0 = v0 > v_max ? v0 - v_diff : v0;
383  v1 = v1 > v_max ? v1 - v_diff : v1;
384 
385  if (want_sum)
386  sum += v0 + v1;
387 
388  set_2 (a0, a1, v0, v1, v_min, v_max, n_bits, is_net_byte_order,
389  /* is_increment */ 0);
390 
391  ASSERT (validate_buffer_data (b0, s));
392  ASSERT (validate_buffer_data (b1, s));
393  }
394 
395  while (n_buffers > 0)
396  {
397  vlib_buffer_t *b0;
398  void *a0;
399  u64 r0 = 0; /* warnings be gone */
400 
401  b0 = vlib_get_buffer (vm, buffers[0]);
402  buffers += 1;
403  n_buffers -= 1;
404 
405  a0 = (void *) b0 + byte_offset;
406 
407  switch (n_bits)
408  {
409 #define _(n) \
410  case BITS (u##n): \
411  { \
412  u##n * r = random_data; \
413  r0 = r[0]; \
414  random_data = r + 1; \
415  } \
416  break;
417 
418  _(8);
419  _(16);
420  _(32);
421  _(64);
422 
423 #undef _
424  }
425 
426  /* Add power of 2 sized random number which may be out of range. */
427  v0 += r0 & r_mask;
428 
429  /* Twice should be enough to reduce to v_min .. v_max range. */
430  v0 = v0 > v_max ? v0 - v_diff : v0;
431  v0 = v0 > v_max ? v0 - v_diff : v0;
432 
433  if (want_sum)
434  sum += v0;
435 
436  set_1 (a0, v0, v_min, v_max, n_bits, is_net_byte_order);
437 
438  ASSERT (validate_buffer_data (b0, s));
439  }
440 
441  if (want_sum)
442  *sum_result = sum;
443 }
444 
445 #define _(i,t) \
446  clib_mem_unaligned (a##i, t) = \
447  clib_host_to_net_##t ((clib_net_to_host_mem_##t (a##i) &~ mask) \
448  | (v##i << shift))
449 
450 always_inline void
451 setbits_1 (void *a0,
452  u64 v0,
453  u64 v_min, u64 v_max,
454  u32 max_bits, u32 n_bits, u64 mask, u32 shift)
455 {
456  ASSERT (v0 >= v_min && v0 <= v_max);
457  if (max_bits == BITS (u8))
458  ((u8 *) a0)[0] = (((u8 *) a0)[0] & ~mask) | (v0 << shift);
459 
460  else if (max_bits == BITS (u16))
461  {
462  _(0, u16);
463  }
464  else if (max_bits == BITS (u32))
465  {
466  _(0, u32);
467  }
468  else if (max_bits == BITS (u64))
469  {
470  _(0, u64);
471  }
472 }
473 
474 always_inline void
475 setbits_2 (void *a0, void *a1,
476  u64 v0, u64 v1,
477  u64 v_min, u64 v_max,
478  u32 max_bits, u32 n_bits, u64 mask, u32 shift, u32 is_increment)
479 {
480  ASSERT (v0 >= v_min && v0 <= v_max);
481  ASSERT (v1 >= v_min && v1 <= v_max + is_increment);
482  if (max_bits == BITS (u8))
483  {
484  ((u8 *) a0)[0] = (((u8 *) a0)[0] & ~mask) | (v0 << shift);
485  ((u8 *) a1)[0] = (((u8 *) a1)[0] & ~mask) | (v1 << shift);
486  }
487 
488  else if (max_bits == BITS (u16))
489  {
490  _(0, u16);
491  _(1, u16);
492  }
493  else if (max_bits == BITS (u32))
494  {
495  _(0, u32);
496  _(1, u32);
497  }
498  else if (max_bits == BITS (u64))
499  {
500  _(0, u64);
501  _(1, u64);
502  }
503 }
504 
505 #undef _
506 
509  pg_stream_t * s,
510  u32 * buffers,
511  u32 n_buffers,
512  u32 max_bits,
513  u32 n_bits,
514  u32 byte_offset, u64 v_min, u64 v_max, u64 mask, u32 shift)
515 {
516  vlib_main_t *vm = vlib_get_main ();
517 
518  while (n_buffers >= 4)
519  {
520  vlib_buffer_t *b0, *b1, *b2, *b3;
521  void *a0, *a1;
522 
523  b0 = vlib_get_buffer (vm, buffers[0]);
524  b1 = vlib_get_buffer (vm, buffers[1]);
525  b2 = vlib_get_buffer (vm, buffers[2]);
526  b3 = vlib_get_buffer (vm, buffers[3]);
527  buffers += 2;
528  n_buffers -= 2;
529 
530  a0 = (void *) b0 + byte_offset;
531  a1 = (void *) b1 + byte_offset;
532  CLIB_PREFETCH ((void *) b2 + byte_offset, sizeof (v_min), WRITE);
533  CLIB_PREFETCH ((void *) b3 + byte_offset, sizeof (v_min), WRITE);
534 
535  setbits_2 (a0, a1,
536  v_min, v_min, v_min, v_max, max_bits, n_bits, mask, shift,
537  /* is_increment */ 0);
538 
539  ASSERT (validate_buffer_data (b0, s));
540  ASSERT (validate_buffer_data (b1, s));
541  }
542 
543  while (n_buffers > 0)
544  {
545  vlib_buffer_t *b0;
546  void *a0;
547 
548  b0 = vlib_get_buffer (vm, buffers[0]);
549  buffers += 1;
550  n_buffers -= 1;
551 
552  a0 = (void *) b0 + byte_offset;
553 
554  setbits_1 (a0, v_min, v_min, v_max, max_bits, n_bits, mask, shift);
555  ASSERT (validate_buffer_data (b0, s));
556  }
557 }
558 
561  pg_stream_t * s,
562  u32 * buffers,
563  u32 n_buffers,
564  u32 max_bits,
565  u32 n_bits,
566  u32 byte_offset,
567  u64 v_min, u64 v_max, u64 v, u64 mask, u32 shift)
568 {
569  vlib_main_t *vm = vlib_get_main ();
570 
571  ASSERT (v >= v_min && v <= v_max);
572 
573  while (n_buffers >= 4)
574  {
575  vlib_buffer_t *b0, *b1, *b2, *b3;
576  void *a0, *a1;
577  u64 v_old;
578 
579  b0 = vlib_get_buffer (vm, buffers[0]);
580  b1 = vlib_get_buffer (vm, buffers[1]);
581  b2 = vlib_get_buffer (vm, buffers[2]);
582  b3 = vlib_get_buffer (vm, buffers[3]);
583  buffers += 2;
584  n_buffers -= 2;
585 
586  a0 = (void *) b0 + byte_offset;
587  a1 = (void *) b1 + byte_offset;
588  CLIB_PREFETCH ((void *) b2 + byte_offset, sizeof (v_min), WRITE);
589  CLIB_PREFETCH ((void *) b3 + byte_offset, sizeof (v_min), WRITE);
590 
591  v_old = v;
592  v = v_old + 2;
593  v = v > v_max ? v_min : v;
594  setbits_2 (a0, a1,
595  v_old + 0, v_old + 1,
596  v_min, v_max, max_bits, n_bits, mask, shift,
597  /* is_increment */ 1);
598 
599  if (PREDICT_FALSE (v_old + 1 > v_max))
600  {
601  v = v_old;
602  setbits_1 (a0, v + 0, v_min, v_max, max_bits, n_bits, mask, shift);
603  v += 1;
604 
605  v = v > v_max ? v_min : v;
606  setbits_1 (a1, v + 0, v_min, v_max, max_bits, n_bits, mask, shift);
607  v += 1;
608  }
609  ASSERT (validate_buffer_data (b0, s));
610  ASSERT (validate_buffer_data (b1, s));
611  }
612 
613  while (n_buffers > 0)
614  {
615  vlib_buffer_t *b0;
616  void *a0;
617  u64 v_old;
618 
619  b0 = vlib_get_buffer (vm, buffers[0]);
620  buffers += 1;
621  n_buffers -= 1;
622 
623  a0 = (void *) b0 + byte_offset;
624 
625  v_old = v;
626  v = v_old + 1;
627  v = v > v_max ? v_min : v;
628 
629  ASSERT (v_old >= v_min && v_old <= v_max);
630  setbits_1 (a0, v_old, v_min, v_max, max_bits, n_bits, mask, shift);
631 
632  ASSERT (validate_buffer_data (b0, s));
633  }
634 
635  return v;
636 }
637 
640  pg_stream_t * s,
641  u32 * buffers,
642  u32 n_buffers,
643  u32 max_bits,
644  u32 n_bits,
645  u32 byte_offset, u64 v_min, u64 v_max, u64 mask, u32 shift)
646 {
647  vlib_main_t *vm = vlib_get_main ();
648  u64 v_diff = v_max - v_min + 1;
649  u64 r_mask = max_pow2 (v_diff) - 1;
650  u64 v0, v1;
651  void *random_data;
652 
653  random_data = clib_random_buffer_get_data
654  (&vm->random_buffer, n_buffers * max_bits / BITS (u8));
655  v0 = v1 = v_min;
656 
657  while (n_buffers >= 4)
658  {
659  vlib_buffer_t *b0, *b1, *b2, *b3;
660  void *a0, *a1;
661  u64 r0 = 0, r1 = 0; /* warnings be gone */
662 
663  b0 = vlib_get_buffer (vm, buffers[0]);
664  b1 = vlib_get_buffer (vm, buffers[1]);
665  b2 = vlib_get_buffer (vm, buffers[2]);
666  b3 = vlib_get_buffer (vm, buffers[3]);
667  buffers += 2;
668  n_buffers -= 2;
669 
670  a0 = (void *) b0 + byte_offset;
671  a1 = (void *) b1 + byte_offset;
672  CLIB_PREFETCH ((void *) b2 + byte_offset, sizeof (v_min), WRITE);
673  CLIB_PREFETCH ((void *) b3 + byte_offset, sizeof (v_min), WRITE);
674 
675  switch (max_bits)
676  {
677 #define _(n) \
678  case BITS (u##n): \
679  { \
680  u##n * r = random_data; \
681  r0 = r[0]; \
682  r1 = r[1]; \
683  random_data = r + 2; \
684  } \
685  break;
686 
687  _(8);
688  _(16);
689  _(32);
690  _(64);
691 
692 #undef _
693  }
694 
695  /* Add power of 2 sized random number which may be out of range. */
696  v0 += r0 & r_mask;
697  v1 += r1 & r_mask;
698 
699  /* Twice should be enough to reduce to v_min .. v_max range. */
700  v0 = v0 > v_max ? v0 - v_diff : v0;
701  v1 = v1 > v_max ? v1 - v_diff : v1;
702  v0 = v0 > v_max ? v0 - v_diff : v0;
703  v1 = v1 > v_max ? v1 - v_diff : v1;
704 
705  setbits_2 (a0, a1, v0, v1, v_min, v_max, max_bits, n_bits, mask, shift,
706  /* is_increment */ 0);
707 
708  ASSERT (validate_buffer_data (b0, s));
709  ASSERT (validate_buffer_data (b1, s));
710  }
711 
712  while (n_buffers > 0)
713  {
714  vlib_buffer_t *b0;
715  void *a0;
716  u64 r0 = 0; /* warnings be gone */
717 
718  b0 = vlib_get_buffer (vm, buffers[0]);
719  buffers += 1;
720  n_buffers -= 1;
721 
722  a0 = (void *) b0 + byte_offset;
723 
724  switch (max_bits)
725  {
726 #define _(n) \
727  case BITS (u##n): \
728  { \
729  u##n * r = random_data; \
730  r0 = r[0]; \
731  random_data = r + 1; \
732  } \
733  break;
734 
735  _(8);
736  _(16);
737  _(32);
738  _(64);
739 
740 #undef _
741  }
742 
743  /* Add power of 2 sized random number which may be out of range. */
744  v0 += r0 & r_mask;
745 
746  /* Twice should be enough to reduce to v_min .. v_max range. */
747  v0 = v0 > v_max ? v0 - v_diff : v0;
748  v0 = v0 > v_max ? v0 - v_diff : v0;
749 
750  setbits_1 (a0, v0, v_min, v_max, max_bits, n_bits, mask, shift);
751 
752  ASSERT (validate_buffer_data (b0, s));
753  }
754 }
755 
756 static u64
758  pg_stream_t * s,
759  u32 * buffers,
760  u32 n_buffers,
761  u32 lo_bit, u32 hi_bit,
762  u64 v_min, u64 v_max, u64 v, pg_edit_type_t edit_type)
763 {
764  u32 max_bits, l0, l1, h1, start_bit;
765 
766  if (v_min == v_max)
767  edit_type = PG_EDIT_FIXED;
768 
769  l0 = lo_bit / BITS (u8);
770  l1 = lo_bit % BITS (u8);
771  h1 = hi_bit % BITS (u8);
772 
773  start_bit = l0 * BITS (u8);
774 
775  max_bits = hi_bit - start_bit;
776  ASSERT (max_bits <= 64);
777 
778 #define _(n) \
779  case (n): \
780  if (edit_type == PG_EDIT_INCREMENT) \
781  v = do_set_increment (pg, s, buffers, n_buffers, \
782  BITS (u##n), \
783  l0, \
784  /* is_net_byte_order */ 1, \
785  /* want sum */ 0, 0, \
786  v_min, v_max, \
787  v); \
788  else if (edit_type == PG_EDIT_RANDOM) \
789  do_set_random (pg, s, buffers, n_buffers, \
790  BITS (u##n), \
791  l0, \
792  /* is_net_byte_order */ 1, \
793  /* want sum */ 0, 0, \
794  v_min, v_max); \
795  else /* edit_type == PG_EDIT_FIXED */ \
796  do_set_fixed (pg, s, buffers, n_buffers, \
797  BITS (u##n), \
798  l0, \
799  /* is_net_byte_order */ 1, \
800  v_min, v_max); \
801  goto done;
802 
803  if (l1 == 0 && h1 == 0)
804  {
805  switch (max_bits)
806  {
807  _(8);
808  _(16);
809  _(32);
810  _(64);
811  }
812  }
813 
814 #undef _
815 
816  {
817  u64 mask;
818  u32 shift = l1;
819  u32 n_bits = max_bits;
820 
821  max_bits = clib_max (max_pow2 (n_bits), 8);
822 
823  mask = ((u64) 1 << (u64) n_bits) - 1;
824  mask &= ~(((u64) 1 << (u64) shift) - 1);
825 
826  mask <<= max_bits - n_bits;
827  shift += max_bits - n_bits;
828 
829  switch (max_bits)
830  {
831 #define _(n) \
832  case (n): \
833  if (edit_type == PG_EDIT_INCREMENT) \
834  v = do_setbits_increment (pg, s, buffers, n_buffers, \
835  BITS (u##n), n_bits, \
836  l0, v_min, v_max, v, \
837  mask, shift); \
838  else if (edit_type == PG_EDIT_RANDOM) \
839  do_setbits_random (pg, s, buffers, n_buffers, \
840  BITS (u##n), n_bits, \
841  l0, v_min, v_max, \
842  mask, shift); \
843  else /* edit_type == PG_EDIT_FIXED */ \
844  do_setbits_fixed (pg, s, buffers, n_buffers, \
845  BITS (u##n), n_bits, \
846  l0, v_min, v_max, \
847  mask, shift); \
848  goto done;
849 
850  _(8);
851  _(16);
852  _(32);
853  _(64);
854 
855 #undef _
856  }
857  }
858 
859 done:
860  return v;
861 }
862 
863 static void
865  pg_stream_t * s, u32 * buffers, u32 n_buffers)
866 {
867  u64 v_min, v_max, length_sum;
868  pg_edit_type_t edit_type;
869 
870  v_min = s->min_packet_bytes;
871  v_max = s->max_packet_bytes;
872  edit_type = s->packet_size_edit_type;
873 
874  if (edit_type == PG_EDIT_INCREMENT)
876  = do_set_increment (pg, s, buffers, n_buffers,
877  8 * STRUCT_SIZE_OF (vlib_buffer_t, current_length),
878  STRUCT_OFFSET_OF (vlib_buffer_t, current_length),
879  /* is_net_byte_order */ 0,
880  /* want sum */ 1, &length_sum,
881  v_min, v_max, s->last_increment_packet_size);
882 
883  else if (edit_type == PG_EDIT_RANDOM)
884  do_set_random (pg, s, buffers, n_buffers,
885  8 * STRUCT_SIZE_OF (vlib_buffer_t, current_length),
886  STRUCT_OFFSET_OF (vlib_buffer_t, current_length),
887  /* is_net_byte_order */ 0,
888  /* want sum */ 1, &length_sum,
889  v_min, v_max);
890 
891  else /* edit_type == PG_EDIT_FIXED */
892  {
893  do_set_fixed (pg, s, buffers, n_buffers,
894  8 * STRUCT_SIZE_OF (vlib_buffer_t, current_length),
895  STRUCT_OFFSET_OF (vlib_buffer_t, current_length),
896  /* is_net_byte_order */ 0,
897  v_min, v_max);
898  length_sum = v_min * n_buffers;
899  }
900 
901  {
902  vnet_main_t *vnm = vnet_get_main ();
906 
910  si->sw_if_index, n_buffers, length_sum);
911  }
912 
913 }
914 
915 static void
917  pg_stream_t * s,
918  u32 * buffers, u32 n_buffers)
919 {
920  vlib_main_t *vm = vlib_get_main ();
921  pg_buffer_index_t *pbi;
922  uword n_bytes_left;
923  static u32 *unused_buffers = 0;
924 
925  while (n_buffers > 0)
926  {
927  vlib_buffer_t *b;
928  u32 bi;
929 
930  bi = buffers[0];
931  b = vlib_get_buffer (vm, bi);
932 
933  /* Current length here is length of whole packet. */
934  n_bytes_left = b->current_length;
935 
936  pbi = s->buffer_indices;
937  while (1)
938  {
939  uword n = clib_min (n_bytes_left, s->buffer_bytes);
940 
941  b->current_length = n;
942  n_bytes_left -= n;
943  if (n_bytes_left > 0)
944  b->flags |= VLIB_BUFFER_NEXT_PRESENT;
945  else
946  b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
947 
948  /* Return unused buffers to fifos. */
949  if (n == 0)
950  vec_add1 (unused_buffers, bi);
951 
952  pbi++;
953  if (pbi >= vec_end (s->buffer_indices))
954  break;
955 
956  bi = b->next_buffer;
957  b = vlib_get_buffer (vm, bi);
958  }
959  ASSERT (n_bytes_left == 0);
960 
961  buffers += 1;
962  n_buffers -= 1;
963  }
964 
965  if (vec_len (unused_buffers) > 0)
966  {
967  vlib_buffer_free_no_next (vm, unused_buffers, vec_len (unused_buffers));
968  _vec_len (unused_buffers) = 0;
969  }
970 }
971 
972 static void
974  pg_stream_t * s, u32 * buffers, u32 n_buffers)
975 {
976  pg_edit_t *e;
977 
979  {
980  switch (e->type)
981  {
982  case PG_EDIT_RANDOM:
983  case PG_EDIT_INCREMENT:
984  {
985  u32 lo_bit, hi_bit;
986  u64 v_min, v_max;
987 
988  v_min = pg_edit_get_value (e, PG_EDIT_LO);
989  v_max = pg_edit_get_value (e, PG_EDIT_HI);
990 
991  hi_bit = (BITS (u8) * STRUCT_OFFSET_OF (vlib_buffer_t, data)
992  + BITS (u8) + e->lsb_bit_offset);
993  lo_bit = hi_bit - e->n_bits;
994 
996  = do_it (pg, s, buffers, n_buffers, lo_bit, hi_bit, v_min, v_max,
997  e->last_increment_value, e->type);
998  }
999  break;
1000 
1001  case PG_EDIT_UNSPECIFIED:
1002  break;
1003 
1004  default:
1005  /* Should not be any fixed edits left. */
1006  ASSERT (0);
1007  break;
1008  }
1009  }
1010 
1011  /* Call any edit functions to e.g. completely IP lengths, checksums, ... */
1012  {
1013  int i;
1014  for (i = vec_len (s->edit_groups) - 1; i >= 0; i--)
1015  {
1016  pg_edit_group_t *g = s->edit_groups + i;
1017  if (g->edit_function)
1018  g->edit_function (pg, s, g, buffers, n_buffers);
1019  }
1020  }
1021 }
1022 
1023 static void
1025  pg_stream_t * s,
1026  u32 * buffers, u32 * next_buffers, u32 n_buffers)
1027 {
1028  vlib_main_t *vm = vlib_get_main ();
1029 
1030  while (n_buffers >= 4)
1031  {
1032  u32 ni0, ni1;
1033  vlib_buffer_t *b0, *b1;
1034 
1035  b0 = vlib_get_buffer (vm, buffers[0]);
1036  b1 = vlib_get_buffer (vm, buffers[1]);
1037  ni0 = next_buffers[0];
1038  ni1 = next_buffers[1];
1039 
1040  vlib_prefetch_buffer_with_index (vm, buffers[2], WRITE);
1041  vlib_prefetch_buffer_with_index (vm, buffers[3], WRITE);
1042 
1043  b0->flags |= VLIB_BUFFER_NEXT_PRESENT;
1044  b1->flags |= VLIB_BUFFER_NEXT_PRESENT;
1045  b0->next_buffer = ni0;
1046  b1->next_buffer = ni1;
1047 
1048  buffers += 2;
1049  next_buffers += 2;
1050  n_buffers -= 2;
1051  }
1052 
1053  while (n_buffers > 0)
1054  {
1055  u32 ni0;
1056  vlib_buffer_t *b0;
1057 
1058  b0 = vlib_get_buffer (vm, buffers[0]);
1059  ni0 = next_buffers[0];
1060  buffers += 1;
1061  next_buffers += 1;
1062  n_buffers -= 1;
1063 
1064  b0->flags |= VLIB_BUFFER_NEXT_PRESENT;
1065  b0->next_buffer = ni0;
1066  }
1067 }
1068 
1071  pg_stream_t * s,
1072  u32 * buffers,
1073  u32 n_buffers, u32 data_offset, u32 n_data, u32 set_data)
1074 {
1075  u32 n_left, *b;
1076  u8 *data, *mask;
1077 
1078  ASSERT (s->replay_packet_templates == 0);
1079 
1080  data = s->fixed_packet_data + data_offset;
1081  mask = s->fixed_packet_data_mask + data_offset;
1082  if (data + n_data >= vec_end (s->fixed_packet_data))
1083  n_data = (data < vec_end (s->fixed_packet_data)
1084  ? vec_end (s->fixed_packet_data) - data : 0);
1085  if (n_data > 0)
1086  {
1087  ASSERT (data + n_data <= vec_end (s->fixed_packet_data));
1088  ASSERT (mask + n_data <= vec_end (s->fixed_packet_data_mask));
1089  }
1090 
1091  n_left = n_buffers;
1092  b = buffers;
1093 
1094  while (n_left >= 4)
1095  {
1096  u32 bi0, bi1;
1097  vlib_buffer_t *b0, *b1;
1098 
1099  /* Prefetch next iteration. */
1100  vlib_prefetch_buffer_with_index (vm, b[2], STORE);
1101  vlib_prefetch_buffer_with_index (vm, b[3], STORE);
1102 
1103  bi0 = b[0];
1104  bi1 = b[1];
1105  b += 2;
1106  n_left -= 2;
1107 
1108  b0 = vlib_get_buffer (vm, bi0);
1109  b1 = vlib_get_buffer (vm, bi1);
1110  b0->flags |= s->buffer_flags;
1111  b1->flags |= s->buffer_flags;
1112 
1113  vnet_buffer (b0)->sw_if_index[VLIB_RX] =
1114  vnet_buffer (b1)->sw_if_index[VLIB_RX] = s->sw_if_index[VLIB_RX];
1115 
1116  vnet_buffer (b0)->sw_if_index[VLIB_TX] =
1117  vnet_buffer (b1)->sw_if_index[VLIB_TX] = s->sw_if_index[VLIB_TX];
1118 
1119  if (set_data)
1120  {
1121  clib_memcpy_fast (b0->data, data, n_data);
1122  clib_memcpy_fast (b1->data, data, n_data);
1123  }
1124  else
1125  {
1126  ASSERT (validate_buffer_data2 (b0, s, data_offset, n_data));
1127  ASSERT (validate_buffer_data2 (b1, s, data_offset, n_data));
1128  }
1129  }
1130 
1131  while (n_left >= 1)
1132  {
1133  u32 bi0;
1134  vlib_buffer_t *b0;
1135 
1136  bi0 = b[0];
1137  b += 1;
1138  n_left -= 1;
1139 
1140  b0 = vlib_get_buffer (vm, bi0);
1141  b0->flags |= s->buffer_flags;
1142  vnet_buffer (b0)->sw_if_index[VLIB_RX] = s->sw_if_index[VLIB_RX];
1143  vnet_buffer (b0)->sw_if_index[VLIB_TX] = s->sw_if_index[VLIB_TX];
1144 
1145  if (set_data)
1146  clib_memcpy_fast (b0->data, data, n_data);
1147  else
1148  ASSERT (validate_buffer_data2 (b0, s, data_offset, n_data));
1149  }
1150 }
1151 
1152 static u32
1154  pg_stream_t * s,
1155  pg_buffer_index_t * bi,
1156  u32 * buffers, u32 * next_buffers, u32 n_alloc)
1157 {
1158  vlib_main_t *vm = vlib_get_main ();
1159  uword is_start_of_packet = bi == s->buffer_indices;
1160  u32 n_allocated;
1161 
1163 
1164  n_allocated = vlib_buffer_alloc (vm, buffers, n_alloc);
1165  if (n_allocated == 0)
1166  return 0;
1167 
1168  /*
1169  * We can't assume we got all the buffers we asked for...
1170  * This never worked until recently.
1171  */
1172  n_alloc = n_allocated;
1173 
1174  /* Reinitialize buffers */
1176  (vm, s,
1177  buffers,
1178  n_alloc, (bi - s->buffer_indices) * s->buffer_bytes /* data offset */ ,
1179  s->buffer_bytes,
1180  /* set_data */ 1);
1181 
1182  if (next_buffers)
1183  pg_set_next_buffer_pointers (pg, s, buffers, next_buffers, n_alloc);
1184 
1185  if (is_start_of_packet)
1186  {
1187  pg_generate_set_lengths (pg, s, buffers, n_alloc);
1188  if (vec_len (s->buffer_indices) > 1)
1189  pg_generate_fix_multi_buffer_lengths (pg, s, buffers, n_alloc);
1190 
1191  pg_generate_edit (pg, s, buffers, n_alloc);
1192  }
1193 
1194  return n_alloc;
1195 }
1196 
1197 static u32
1199 {
1200  pg_buffer_index_t *bi;
1201  u32 n_left, i, l;
1202  u32 buffer_alloc_request = 0;
1203  u32 buffer_alloc_result;
1204  u32 current_buffer_index;
1205  u32 *buffers;
1206  vlib_main_t *vm = vlib_get_main ();
1207  vnet_main_t *vnm = vnet_get_main ();
1208  u32 buf_sz = vlib_buffer_get_default_data_size (vm);
1211 
1212  buffers = pg->replay_buffers_by_thread[vm->thread_index];
1213  vec_reset_length (buffers);
1214  bi = s->buffer_indices;
1215 
1216  n_left = n_alloc;
1219 
1220  /* Figure out how many buffers we need */
1221  while (n_left > 0)
1222  {
1223  u8 *d0;
1224 
1225  d0 = vec_elt (s->replay_packet_templates, i);
1226  buffer_alloc_request += (vec_len (d0) + (buf_sz - 1)) / buf_sz;
1227 
1228  i = ((i + 1) == l) ? 0 : i + 1;
1229  n_left--;
1230  }
1231 
1232  ASSERT (buffer_alloc_request > 0);
1233  vec_validate (buffers, buffer_alloc_request - 1);
1234 
1235  /* Allocate that many buffers */
1236  buffer_alloc_result = vlib_buffer_alloc (vm, buffers, buffer_alloc_request);
1237  if (buffer_alloc_result < buffer_alloc_request)
1238  {
1239  clib_warning ("alloc failure, got %d not %d", buffer_alloc_result,
1240  buffer_alloc_request);
1241  vlib_buffer_free_no_next (vm, buffers, buffer_alloc_result);
1242  pg->replay_buffers_by_thread[vm->thread_index] = buffers;
1243  return 0;
1244  }
1245 
1246  /* Now go generate the buffers, and add them to the FIFO */
1247  n_left = n_alloc;
1248 
1249  current_buffer_index = 0;
1252  while (n_left > 0)
1253  {
1254  u8 *d0;
1255  int not_last;
1256  u32 data_offset;
1257  u32 bytes_to_copy, bytes_this_chunk;
1258  vlib_buffer_t *b;
1259 
1260  d0 = vec_elt (s->replay_packet_templates, i);
1261  data_offset = 0;
1262  bytes_to_copy = vec_len (d0);
1263 
1264  /* Add head chunk to pg fifo */
1265  clib_fifo_add1 (bi->buffer_fifo, buffers[current_buffer_index]);
1266 
1267  /* Copy the data */
1268  while (bytes_to_copy)
1269  {
1270  bytes_this_chunk = clib_min (bytes_to_copy, buf_sz);
1271  ASSERT (current_buffer_index < vec_len (buffers));
1272  b = vlib_get_buffer (vm, buffers[current_buffer_index]);
1273  clib_memcpy_fast (b->data, d0 + data_offset, bytes_this_chunk);
1274  vnet_buffer (b)->sw_if_index[VLIB_RX] = s->sw_if_index[VLIB_RX];
1275  vnet_buffer (b)->sw_if_index[VLIB_TX] = s->sw_if_index[VLIB_TX];
1276  b->flags = s->buffer_flags;
1277  b->next_buffer = 0;
1278  b->current_data = 0;
1279  b->current_length = bytes_this_chunk;
1280 
1281  not_last = bytes_this_chunk < bytes_to_copy;
1282  if (not_last)
1283  {
1284  ASSERT (current_buffer_index < (vec_len (buffers) - 1));
1285  b->flags |= VLIB_BUFFER_NEXT_PRESENT;
1286  b->next_buffer = buffers[current_buffer_index + 1];
1287  }
1288  bytes_to_copy -= bytes_this_chunk;
1289  data_offset += bytes_this_chunk;
1290  current_buffer_index++;
1291  }
1292 
1293  i = ((i + 1) == l) ? 0 : i + 1;
1294  n_left--;
1295  }
1296 
1297  /* Update the interface counters */
1298  si = vnet_get_sw_interface (vnm, s->sw_if_index[VLIB_RX]);
1299  l = 0;
1300  for (i = 0; i < n_alloc; i++)
1301  l += vlib_buffer_index_length_in_chain (vm, buffers[i]);
1305  si->sw_if_index, n_alloc, l);
1306 
1307  s->current_replay_packet_index += n_alloc;
1309 
1310  pg->replay_buffers_by_thread[vm->thread_index] = buffers;
1311  return n_alloc;
1312 }
1313 
1314 
1315 static u32
1317 {
1318  pg_buffer_index_t *bi;
1319  word i, n_in_fifo, n_alloc, n_free, n_added;
1320  u32 *tail, *start, *end, *last_tail, *last_start;
1321 
1322  bi = s->buffer_indices;
1323 
1324  n_in_fifo = clib_fifo_elts (bi->buffer_fifo);
1325  if (n_in_fifo >= n_buffers)
1326  return n_in_fifo;
1327 
1328  n_alloc = n_buffers - n_in_fifo;
1329 
1330  /* Round up, but never generate more than limit. */
1331  n_alloc = clib_max (VLIB_FRAME_SIZE, n_alloc);
1332 
1333  if (s->n_packets_limit > 0
1334  && s->n_packets_generated + n_in_fifo + n_alloc >= s->n_packets_limit)
1335  {
1336  n_alloc = s->n_packets_limit - s->n_packets_generated - n_in_fifo;
1337  if (n_alloc < 0)
1338  n_alloc = 0;
1339  }
1340 
1341  /*
1342  * Handle pcap replay directly
1343  */
1344  if (s->replay_packet_templates)
1345  return pg_stream_fill_replay (pg, s, n_alloc);
1346 
1347  /* All buffer fifos should have the same size. */
1348  if (CLIB_DEBUG > 0)
1349  {
1350  uword l = ~0, e;
1351  vec_foreach (bi, s->buffer_indices)
1352  {
1353  e = clib_fifo_elts (bi->buffer_fifo);
1354  if (bi == s->buffer_indices)
1355  l = e;
1356  ASSERT (l == e);
1357  }
1358  }
1359 
1360  last_tail = last_start = 0;
1361  n_added = n_alloc;
1362 
1363  for (i = vec_len (s->buffer_indices) - 1; i >= 0; i--)
1364  {
1365  bi = vec_elt_at_index (s->buffer_indices, i);
1366 
1367  n_free = clib_fifo_free_elts (bi->buffer_fifo);
1368  if (n_free < n_alloc)
1369  clib_fifo_resize (bi->buffer_fifo, n_alloc - n_free);
1370 
1371  tail = clib_fifo_advance_tail (bi->buffer_fifo, n_alloc);
1372  start = bi->buffer_fifo;
1373  end = clib_fifo_end (bi->buffer_fifo);
1374 
1375  if (tail + n_alloc <= end)
1376  {
1377  n_added =
1378  pg_stream_fill_helper (pg, s, bi, tail, last_tail, n_alloc);
1379  }
1380  else
1381  {
1382  u32 n = clib_min (end - tail, n_alloc);
1383  n_added = pg_stream_fill_helper (pg, s, bi, tail, last_tail, n);
1384 
1385  if (n_added == n && n_alloc > n_added)
1386  {
1387  n_added += pg_stream_fill_helper
1388  (pg, s, bi, start, last_start, n_alloc - n_added);
1389  }
1390  }
1391 
1392  if (PREDICT_FALSE (n_added < n_alloc))
1393  tail = clib_fifo_advance_tail (bi->buffer_fifo, n_added - n_alloc);
1394 
1395  last_tail = tail;
1396  last_start = start;
1397 
1398  /* Verify that pkts in the fifo are properly allocated */
1399  }
1400 
1401  return n_in_fifo + n_added;
1402 }
1403 
1404 typedef struct
1405 {
1407 
1410 
1411  /* Use pre data for packet data. */
1414 
1415 static u8 *
1416 format_pg_input_trace (u8 * s, va_list * va)
1417 {
1418  vlib_main_t *vm = va_arg (*va, vlib_main_t *);
1419  CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
1420  pg_input_trace_t *t = va_arg (*va, pg_input_trace_t *);
1421  pg_main_t *pg = &pg_main;
1422  pg_stream_t *stream;
1423  vlib_node_t *n;
1424  u32 indent = format_get_indent (s);
1425 
1426  stream = 0;
1427  if (!pool_is_free_index (pg->streams, t->stream_index))
1428  stream = pool_elt_at_index (pg->streams, t->stream_index);
1429 
1430  if (stream)
1431  s = format (s, "stream %v", pg->streams[t->stream_index].name);
1432  else
1433  s = format (s, "stream %d", t->stream_index);
1434 
1435  s = format (s, ", %d bytes", t->packet_length);
1436  s = format (s, ", sw_if_index %d", t->sw_if_index);
1437 
1438  s = format (s, "\n%U%U",
1440 
1441  s = format (s, "\n%U", format_white_space, indent);
1442 
1443  n = 0;
1444  if (stream)
1445  n = vlib_get_node (vm, stream->node_index);
1446 
1447  if (n && n->format_buffer)
1448  s = format (s, "%U", n->format_buffer,
1449  t->buffer.pre_data, sizeof (t->buffer.pre_data));
1450  else
1451  s = format (s, "%U",
1453  ARRAY_LEN (t->buffer.pre_data));
1454  return s;
1455 }
1456 
1457 static int
1459  vlib_node_runtime_t * node, u32 stream_index, u32 next_index,
1460  u32 * buffers, const u32 n_buffers, const u32 n_trace)
1461 {
1462  vlib_main_t *vm = vlib_get_main ();
1463  u32 *b, n_left;
1464  u32 n_trace0 = 0, n_trace1 = 0;
1465 
1466  n_left = clib_min (n_buffers, n_trace);
1467  b = buffers;
1468 
1469  while (n_left >= 2)
1470  {
1471  u32 bi0, bi1;
1472  vlib_buffer_t *b0, *b1;
1473  pg_input_trace_t *t0, *t1;
1474 
1475  bi0 = b[0];
1476  bi1 = b[1];
1477  b += 2;
1478  n_left -= 2;
1479 
1480  b0 = vlib_get_buffer (vm, bi0);
1481  b1 = vlib_get_buffer (vm, bi1);
1482 
1483  n_trace0 +=
1484  vlib_trace_buffer (vm, node, next_index, b0, /* follow_chain */ 1);
1485  n_trace1 +=
1486  vlib_trace_buffer (vm, node, next_index, b1, /* follow_chain */ 1);
1487 
1488  t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
1489  t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
1490 
1491  t0->stream_index = stream_index;
1492  t1->stream_index = stream_index;
1493 
1496 
1497  t0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1498  t1->sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_RX];
1499 
1500  clib_memcpy_fast (&t0->buffer, b0,
1501  sizeof (b0[0]) - sizeof (b0->pre_data));
1502  clib_memcpy_fast (&t1->buffer, b1,
1503  sizeof (b1[0]) - sizeof (b1->pre_data));
1504 
1506  sizeof (t0->buffer.pre_data));
1508  sizeof (t1->buffer.pre_data));
1509  }
1510 
1511  while (n_left >= 1)
1512  {
1513  u32 bi0;
1514  vlib_buffer_t *b0;
1515  pg_input_trace_t *t0;
1516 
1517  bi0 = b[0];
1518  b += 1;
1519  n_left -= 1;
1520 
1521  b0 = vlib_get_buffer (vm, bi0);
1522 
1523  n_trace0 +=
1524  vlib_trace_buffer (vm, node, next_index, b0, /* follow_chain */ 1);
1525  t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
1526 
1527  t0->stream_index = stream_index;
1529  t0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1530  clib_memcpy_fast (&t0->buffer, b0,
1531  sizeof (b0[0]) - sizeof (b0->pre_data));
1533  sizeof (t0->buffer.pre_data));
1534  }
1535 
1536  return n_trace - n_trace0 - n_trace1;
1537 }
1538 
1541  u32 buffer_oflags, int gso_enabled, u32 gso_size)
1542 {
1543  for (int i = 0; i < n_buffers; i++)
1544  {
1545  vlib_buffer_t *b0 = vlib_get_buffer (vm, buffers[i]);
1546  u8 l4_proto = 0;
1547  vnet_buffer_oflags_t oflags = 0;
1548 
1549  ethernet_header_t *eh =
1551  u16 ethertype = clib_net_to_host_u16 (eh->type);
1552  u16 l2hdr_sz = sizeof (ethernet_header_t);
1553 
1554  if (ethernet_frame_is_tagged (ethertype))
1555  {
1556  ethernet_vlan_header_t *vlan = (ethernet_vlan_header_t *) (eh + 1);
1557 
1558  ethertype = clib_net_to_host_u16 (vlan->type);
1559  l2hdr_sz += sizeof (*vlan);
1560  if (ethertype == ETHERNET_TYPE_VLAN)
1561  {
1562  vlan++;
1563  ethertype = clib_net_to_host_u16 (vlan->type);
1564  l2hdr_sz += sizeof (*vlan);
1565  }
1566  }
1567 
1568  vnet_buffer (b0)->l2_hdr_offset = 0;
1569  vnet_buffer (b0)->l3_hdr_offset = l2hdr_sz;
1570 
1571  if (PREDICT_TRUE (ethertype == ETHERNET_TYPE_IP4))
1572  {
1573  ip4_header_t *ip4 =
1574  (ip4_header_t *) (vlib_buffer_get_current (b0) + l2hdr_sz);
1575  vnet_buffer (b0)->l4_hdr_offset = l2hdr_sz + ip4_header_bytes (ip4);
1576  l4_proto = ip4->protocol;
1577  b0->flags |=
1578  (VNET_BUFFER_F_IS_IP4 | VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
1579  VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
1580  VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
1581  if (buffer_oflags & VNET_BUFFER_OFFLOAD_F_IP_CKSUM)
1582  oflags |= VNET_BUFFER_OFFLOAD_F_IP_CKSUM;
1583  }
1584  else if (PREDICT_TRUE (ethertype == ETHERNET_TYPE_IP6))
1585  {
1586  ip6_header_t *ip6 =
1587  (ip6_header_t *) (vlib_buffer_get_current (b0) + l2hdr_sz);
1588  vnet_buffer (b0)->l4_hdr_offset = l2hdr_sz + sizeof (ip6_header_t);
1589  /* FIXME IPv6 EH traversal */
1590  l4_proto = ip6->protocol;
1591  b0->flags |=
1592  (VNET_BUFFER_F_IS_IP6 | VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
1593  VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
1594  VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
1595  }
1596 
1597  if (l4_proto == IP_PROTOCOL_TCP)
1598  {
1599  if (buffer_oflags & VNET_BUFFER_OFFLOAD_F_TCP_CKSUM)
1600  oflags |= VNET_BUFFER_OFFLOAD_F_TCP_CKSUM;
1601 
1602  /* only set GSO flag for chained buffers */
1603  if (gso_enabled && (b0->flags & VLIB_BUFFER_NEXT_PRESENT))
1604  {
1605  b0->flags |= VNET_BUFFER_F_GSO;
1606  tcp_header_t *tcp =
1608  vnet_buffer (b0)->l4_hdr_offset);
1609  vnet_buffer2 (b0)->gso_l4_hdr_sz = tcp_header_bytes (tcp);
1610  vnet_buffer2 (b0)->gso_size = gso_size;
1611  }
1612  }
1613  else if (l4_proto == IP_PROTOCOL_UDP)
1614  {
1615  if (buffer_oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM)
1616  oflags |= VNET_BUFFER_OFFLOAD_F_UDP_CKSUM;
1617  }
1618 
1619  if (oflags)
1620  vnet_buffer_offload_flags_set (b0, oflags);
1621  }
1622 }
1623 
1624 static uword
1626  pg_main_t * pg,
1627  pg_stream_t * s, uword n_packets_to_generate)
1628 {
1629  vlib_main_t *vm = vlib_get_main ();
1630  u32 *to_next, n_this_frame, n_left, n_trace, n_packets_in_fifo;
1631  uword n_packets_generated;
1632  pg_buffer_index_t *bi, *bi0;
1633  u32 next_index = s->next_index;
1636  u8 feature_arc_index = fm->device_input_feature_arc_index;
1637  cm = &fm->feature_config_mains[feature_arc_index];
1638  u32 current_config_index = ~(u32) 0;
1639  pg_interface_t *pi;
1640  int i;
1641 
1642  pi = pool_elt_at_index (pg->interfaces,
1644  bi0 = s->buffer_indices;
1645 
1646  n_packets_in_fifo = pg_stream_fill (pg, s, n_packets_to_generate);
1647  n_packets_to_generate = clib_min (n_packets_in_fifo, n_packets_to_generate);
1648  n_packets_generated = 0;
1649 
1650  if (PREDICT_FALSE
1651  (vnet_have_features (feature_arc_index, s->sw_if_index[VLIB_RX])))
1652  {
1653  current_config_index =
1655  vnet_get_config_data (&cm->config_main, &current_config_index,
1656  &next_index, 0);
1657  }
1658 
1659  if (PREDICT_FALSE (pi->coalesce_enabled))
1661 
1662  while (n_packets_to_generate > 0)
1663  {
1664  u32 *head, *start, *end;
1665 
1667  {
1668  vlib_next_frame_t *nf;
1669  vlib_frame_t *f;
1671  vlib_get_new_next_frame (vm, node, next_index, to_next, n_left);
1672  nf = vlib_node_runtime_get_next_frame (vm, node, next_index);
1673  f = vlib_get_frame (vm, nf->frame);
1675 
1676  ef = vlib_frame_scalar_args (f);
1677  ef->sw_if_index = pi->sw_if_index;
1678  ef->hw_if_index = pi->hw_if_index;
1680  }
1681  else
1682  vlib_get_next_frame (vm, node, next_index, to_next, n_left);
1683 
1684  n_this_frame = n_packets_to_generate;
1685  if (n_this_frame > n_left)
1686  n_this_frame = n_left;
1687 
1688  start = bi0->buffer_fifo;
1689  end = clib_fifo_end (bi0->buffer_fifo);
1690  head = clib_fifo_head (bi0->buffer_fifo);
1691 
1692  if (head + n_this_frame <= end)
1693  vlib_buffer_copy_indices (to_next, head, n_this_frame);
1694  else
1695  {
1696  u32 n = end - head;
1697  vlib_buffer_copy_indices (to_next + 0, head, n);
1698  vlib_buffer_copy_indices (to_next + n, start, n_this_frame - n);
1699  }
1700 
1701  if (s->replay_packet_templates == 0)
1702  {
1703  vec_foreach (bi, s->buffer_indices)
1704  clib_fifo_advance_head (bi->buffer_fifo, n_this_frame);
1705  }
1706  else
1707  {
1708  clib_fifo_advance_head (bi0->buffer_fifo, n_this_frame);
1709  }
1710 
1711  if (current_config_index != ~(u32) 0)
1712  for (i = 0; i < n_this_frame; i++)
1713  {
1714  vlib_buffer_t *b;
1715  b = vlib_get_buffer (vm, to_next[i]);
1716  b->current_config_index = current_config_index;
1717  vnet_buffer (b)->feature_arc_index = feature_arc_index;
1718  }
1719 
1720  if (pi->gso_enabled || (s->buffer_flags & VNET_BUFFER_F_OFFLOAD))
1721  {
1722  fill_buffer_offload_flags (vm, to_next, n_this_frame,
1723  s->buffer_oflags, pi->gso_enabled,
1724  pi->gso_size);
1725  }
1726 
1727  n_trace = vlib_get_trace_count (vm, node);
1728  if (PREDICT_FALSE (n_trace > 0))
1729  {
1730  n_trace =
1731  pg_input_trace (pg, node, s - pg->streams, next_index, to_next,
1732  n_this_frame, n_trace);
1733  vlib_set_trace_count (vm, node, n_trace);
1734  }
1735  n_packets_to_generate -= n_this_frame;
1736  n_packets_generated += n_this_frame;
1737  n_left -= n_this_frame;
1738  if (CLIB_DEBUG > 0)
1739  {
1740  int i;
1741  vlib_buffer_t *b;
1742 
1743  for (i = 0; i < n_this_frame; i++)
1744  {
1745  b = vlib_get_buffer (vm, to_next[i]);
1746  ASSERT ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0 ||
1748  }
1749  }
1750  vlib_put_next_frame (vm, node, next_index, n_left);
1751  }
1752 
1753  return n_packets_generated;
1754 }
1755 
1756 static uword
1758 {
1759  vlib_main_t *vm = vlib_get_main ();
1760  uword n_packets;
1761  f64 time_now, dt;
1762 
1763  if (s->n_packets_limit > 0 && s->n_packets_generated >= s->n_packets_limit)
1764  {
1765  pg_stream_enable_disable (pg, s, /* want_enabled */ 0);
1766  return 0;
1767  }
1768 
1769  /* Apply rate limit. */
1770  time_now = vlib_time_now (vm);
1771  if (s->time_last_generate == 0)
1772  s->time_last_generate = time_now;
1773 
1774  dt = time_now - s->time_last_generate;
1775  s->time_last_generate = time_now;
1776 
1777  n_packets = VLIB_FRAME_SIZE;
1778  if (s->rate_packets_per_second > 0)
1779  {
1781  n_packets = s->packet_accumulator;
1782 
1783  /* Never allow accumulator to grow if we get behind. */
1784  s->packet_accumulator -= n_packets;
1785  }
1786 
1787  /* Apply fixed limit. */
1788  if (s->n_packets_limit > 0
1789  && s->n_packets_generated + n_packets > s->n_packets_limit)
1790  n_packets = s->n_packets_limit - s->n_packets_generated;
1791 
1792  /* Generate up to one frame's worth of packets. */
1793  if (n_packets > s->n_max_frame)
1794  n_packets = s->n_max_frame;
1795 
1796  if (n_packets > 0)
1797  n_packets = pg_generate_packets (node, pg, s, n_packets);
1798 
1799  s->n_packets_generated += n_packets;
1800 
1801  return n_packets;
1802 }
1803 
1804 uword
1806 {
1807  uword i;
1808  pg_main_t *pg = &pg_main;
1809  uword n_packets = 0;
1810  u32 worker_index = 0;
1811 
1812  if (vlib_num_workers ())
1813  worker_index = vlib_get_current_worker_index ();
1814 
1815  /* *INDENT-OFF* */
1816  clib_bitmap_foreach (i, pg->enabled_streams[worker_index]) {
1817  pg_stream_t *s = vec_elt_at_index (pg->streams, i);
1818  n_packets += pg_input_stream (node, pg, s);
1819  }
1820  /* *INDENT-ON* */
1821 
1822  return n_packets;
1823 }
1824 
1825 /* *INDENT-OFF* */
1827  .function = pg_input,
1829  .name = "pg-input",
1830  .sibling_of = "device-input",
1831  .type = VLIB_NODE_TYPE_INPUT,
1832 
1833  .format_trace = format_pg_input_trace,
1834 
1835  /* Input node will be left disabled until a stream is active. */
1836  .state = VLIB_NODE_STATE_DISABLED,
1837 };
1838 /* *INDENT-ON* */
1839 
1842  vlib_frame_t * frame)
1843 {
1846  pg_main_t *pg = &pg_main;
1847  u32 n_left, *from;
1848 
1849  from = vlib_frame_vector_args (frame);
1850  n_left = frame->n_vectors;
1851  next = nexts;
1852 
1853  clib_memset_u16 (next, 0, VLIB_FRAME_SIZE);
1854 
1855  vlib_get_buffers (vm, from, bufs, n_left);
1856 
1857  while (n_left)
1858  {
1859  const ethernet_header_t *eth;
1860  pg_interface_t *pi;
1861  mac_address_t in;
1862 
1863  pi = pool_elt_at_index
1864  (pg->interfaces,
1866  eth = vlib_buffer_get_current (b[0]);
1867 
1868  mac_address_from_bytes (&in, eth->dst_address);
1869 
1871  {
1872  mac_address_t *allowed;
1873 
1874  if (0 != vec_len (pi->allowed_mcast_macs))
1875  {
1876  vec_foreach (allowed, pi->allowed_mcast_macs)
1877  {
1878  if (0 != mac_address_cmp (allowed, &in))
1879  break;
1880  }
1881 
1882  if (vec_is_member (allowed, pi->allowed_mcast_macs))
1883  vnet_feature_next_u16 (&next[0], b[0]);
1884  }
1885  }
1886 
1887  b += 1;
1888  next += 1;
1889  n_left -= 1;
1890  }
1891 
1892  vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
1893 
1894  return (frame->n_vectors);
1895 }
1896 
1897 /* *INDENT-OFF* */
1899  .name = "pg-input-mac-filter",
1900  .vector_size = sizeof (u32),
1901  .format_trace = format_pg_input_trace,
1902  .n_next_nodes = 1,
1903  .next_nodes = {
1904  [0] = "error-drop",
1905  },
1906 };
1907 VNET_FEATURE_INIT (pg_input_mac_filter_feat, static) = {
1908  .arc_name = "device-input",
1909  .node_name = "pg-input-mac-filter",
1910 };
1911 /* *INDENT-ON* */
1912 
1913 static clib_error_t *
1915  unformat_input_t * input, vlib_cli_command_t * cmd)
1916 {
1917  unformat_input_t _line_input, *line_input = &_line_input;
1918  u32 sw_if_index = ~0;
1919  int is_enable = 1;
1920 
1921  if (!unformat_user (input, unformat_line_input, line_input))
1922  return 0;
1923 
1924  while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
1925  {
1926  if (unformat (line_input, "%U",
1928  vnet_get_main (), &sw_if_index))
1929  ;
1930  else if (unformat (line_input, "%U",
1931  unformat_vlib_enable_disable, &is_enable))
1932  ;
1933  else
1934  return clib_error_create ("unknown input `%U'",
1935  format_unformat_error, line_input);
1936  }
1937  unformat_free (line_input);
1938 
1939  if (~0 == sw_if_index)
1940  return clib_error_create ("specify interface");
1941 
1942  vnet_feature_enable_disable ("device-input",
1943  "pg-input-mac-filter",
1944  sw_if_index, is_enable, 0, 0);
1945 
1946  return NULL;
1947 }
1948 
1949 /* *INDENT-OFF* */
1950 VLIB_CLI_COMMAND (enable_streams_cli, static) = {
1951  .path = "packet-generator mac-filter",
1952  .short_help = "packet-generator mac-filter <INTERFACE> <on|off>",
1953  .function = pg_input_mac_filter_cfg,
1954 };
1955 /* *INDENT-ON* */
1956 
1957 
1958 /*
1959  * fd.io coding-style-patch-verification: ON
1960  *
1961  * Local Variables:
1962  * eval: (c-set-style "gnu")
1963  * End:
1964  */
vnet_config_main_t config_main
Definition: feature.h:82
u32 sw_if_index
Definition: pg.h:308
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:524
vnet_buffer_oflags_t
Definition: buffer.h:118
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:133
Definition: edit.h:64
static uword pg_generate_packets(vlib_node_runtime_t *node, pg_main_t *pg, pg_stream_t *s, uword n_packets_to_generate)
Definition: input.c:1625
u32 next_index
Definition: pg.h:155
vlib_node_registration_t pg_input_mac_filter
(constructor) VLIB_REGISTER_NODE (pg_input_mac_filter)
Definition: input.c:1898
#define PG_EDIT_LO
Definition: edit.h:83
u64 n_packets_limit
Definition: pg.h:164
#define clib_min(x, y)
Definition: clib.h:342
#define clib_fifo_head(v)
Definition: fifo.h:254
#define CLIB_UNUSED(x)
Definition: clib.h:90
u32 n_free
u8 * fixed_packet_data
Definition: pg.h:122
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
Definition: trace_funcs.h:212
mac_address_t * allowed_mcast_macs
Definition: pg.h:320
static int pg_input_trace(pg_main_t *pg, vlib_node_runtime_t *node, u32 stream_index, u32 next_index, u32 *buffers, const u32 n_buffers, const u32 n_trace)
Definition: input.c:1458
static uword clib_fifo_elts(void *v)
Definition: fifo.h:66
static void setbits_1(void *a0, u64 v0, u64 v_min, u64 v_max, u32 max_bits, u32 n_bits, u64 mask, u32 shift)
Definition: input.c:451
#define vnet_buffer2(b)
Definition: buffer.h:499
Definition: pg.h:330
static void pg_generate_fix_multi_buffer_lengths(pg_main_t *pg, pg_stream_t *s, u32 *buffers, u32 n_buffers)
Definition: input.c:916
vlib_buffer_copy_indices(to, tmp, n_free)
vnet_interface_main_t interface_main
Definition: vnet.h:81
static u64 do_it(pg_main_t *pg, pg_stream_t *s, u32 *buffers, u32 n_buffers, u32 lo_bit, u32 hi_bit, u64 v_min, u64 v_max, u64 v, pg_edit_type_t edit_type)
Definition: input.c:757
u32 n_bytes
#define PREDICT_TRUE(x)
Definition: clib.h:125
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:119
unsigned long u64
Definition: types.h:89
pg_edit_group_t * edit_groups
Definition: pg.h:108
u16 nexts[VLIB_FRAME_SIZE]
vlib_increment_combined_counter(ccm, ti, sw_if_index, n_buffers, n_bytes)
vnet_feature_config_main_t * cm
#define clib_fifo_advance_tail(f, n_elts)
Definition: fifo.h:161
#define VLIB_NODE_FLAG_TRACE_SUPPORTED
Definition: node.h:296
static f64 vlib_time_now(vlib_main_t *vm)
Definition: main.h:325
#define clib_fifo_resize(f, n_elts)
Definition: fifo.h:101
static void * clib_random_buffer_get_data(clib_random_buffer_t *b, uword n_bytes)
Definition: random_buffer.h:83
#define clib_bitmap_foreach(i, ai)
Macro to iterate across set bits in a bitmap.
Definition: bitmap.h:361
void(* edit_function)(struct pg_main_t *pg, struct pg_stream_t *s, struct pg_edit_group_t *g, u32 *buffers, u32 n_buffers)
Definition: pg.h:75
pg_edit_type_t
Definition: edit.h:46
u32 thread_index
Definition: main.h:213
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:122
static vlib_frame_t * vlib_get_frame(vlib_main_t *vm, vlib_frame_t *f)
Definition: node_funcs.h:273
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: nat44_ei.c:3048
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:607
static_always_inline u64 do_set_increment(pg_main_t *pg, pg_stream_t *s, u32 *buffers, u32 n_buffers, u32 n_bits, u32 byte_offset, u32 is_net_byte_order, u32 want_sum, u64 *sum_result, u64 v_min, u64 v_max, u64 v)
Definition: input.c:221
static void pg_generate_set_lengths(pg_main_t *pg, pg_stream_t *s, u32 *buffers, u32 n_buffers)
Definition: input.c:864
u8 coalesce_enabled
Definition: pg.h:313
u32 n_max_frame
Definition: pg.h:167
uword unformat_user(unformat_input_t *input, unformat_function_t *func,...)
Definition: unformat.c:989
static u32 format_get_indent(u8 *s)
Definition: format.h:72
#define STRUCT_OFFSET_OF(t, f)
Definition: clib.h:73
static vnet_sw_interface_t * vnet_get_sw_interface(vnet_main_t *vnm, u32 sw_if_index)
unformat_function_t unformat_vnet_sw_interface
u64 last_increment_value
Definition: edit.h:87
static_always_inline int vnet_have_features(u8 arc, u32 sw_if_index)
Definition: feature.h:251
pg_buffer_index_t * buffer_indices
Definition: pg.h:177
u32 hw_if_index
Definition: pg.h:308
static_always_inline void init_buffers_inline(vlib_main_t *vm, pg_stream_t *s, u32 *buffers, u32 n_buffers, u32 data_offset, u32 n_data, u32 set_data)
Definition: input.c:1070
#define VLIB_NODE_FN(node)
Definition: node.h:202
pg_edit_type_t packet_size_edit_type
Definition: pg.h:110
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
Definition: buffer_funcs.h:433
struct _tcp_header tcp_header_t
unsigned char u8
Definition: types.h:56
vlib_buffer_t ** b
u8 * fixed_packet_data_mask
Definition: pg.h:122
u8 data[128]
Definition: ipsec_types.api:92
static_always_inline void do_set_fixed(pg_main_t *pg, pg_stream_t *s, u32 *buffers, u32 n_buffers, u32 n_bits, u32 byte_offset, u32 is_net_byte_order, u64 v_min, u64 v_max)
Definition: input.c:170
static void set_1(void *a0, u64 v0, u64 v_min, u64 v_max, u32 n_bits, u32 is_net_byte_order)
Definition: input.c:96
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
gro_flow_table_t * flow_table
Definition: pg.h:314
double f64
Definition: types.h:142
static_always_inline void vnet_feature_next_u16(u16 *next0, vlib_buffer_t *b0)
Definition: feature.h:328
unsigned int u32
Definition: types.h:88
static void pg_generate_edit(pg_main_t *pg, pg_stream_t *s, u32 *buffers, u32 n_buffers)
Definition: input.c:973
#define PG_EDIT_HI
Definition: edit.h:84
vlib_frame_t * f
vnet_feature_main_t * fm
u32 gso_size
Definition: pg.h:316
#define static_always_inline
Definition: clib.h:112
static void pg_set_next_buffer_pointers(pg_main_t *pg, pg_stream_t *s, u32 *buffers, u32 *next_buffers, u32 n_buffers)
Definition: input.c:1024
i64 word
Definition: types.h:111
#define vlib_prefetch_buffer_with_index(vm, bi, type)
Prefetch buffer metadata by buffer index The first 64 bytes of buffer contains most header informatio...
Definition: buffer_funcs.h:507
static_always_inline u64 do_setbits_increment(pg_main_t *pg, pg_stream_t *s, u32 *buffers, u32 n_buffers, u32 max_bits, u32 n_bits, u32 byte_offset, u64 v_min, u64 v_max, u64 v, u64 mask, u32 shift)
Definition: input.c:560
vlib_get_buffers(vm, from, b, n_left_from)
vl_api_ip6_address_t ip6
Definition: one.api:424
static_always_inline void do_setbits_fixed(pg_main_t *pg, pg_stream_t *s, u32 *buffers, u32 n_buffers, u32 max_bits, u32 n_bits, u32 byte_offset, u64 v_min, u64 v_max, u64 mask, u32 shift)
Definition: input.c:508
#define ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX
Definition: ethernet.h:52
u8 dst_address[6]
Definition: packet.h:55
vlib_combined_counter_main_t * combined_sw_if_counters
Definition: interface.h:1023
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:129
static uword clib_fifo_free_elts(void *v)
Definition: fifo.h:82
description fragment has unexpected format
Definition: map.api:433
pg_edit_type_t type
Definition: edit.h:66
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
u8 * format_hex_bytes(u8 *s, va_list *va)
Definition: std-formats.c:84
#define vlib_get_new_next_frame(vm, node, next_index, vectors, n_vectors_left)
Definition: node_funcs.h:400
vnet_main_t * vnet_get_main(void)
static uword pg_input_stream(vlib_node_runtime_t *node, pg_main_t *pg, pg_stream_t *s)
Definition: input.c:1757
i32 lsb_bit_offset
Definition: edit.h:73
uword * if_id_by_sw_if_index
Definition: pg.h:344
static u32 vlib_get_current_worker_index()
Definition: threads.h:372
#define vec_end(v)
End (last data address) of vector.
#define clib_error_create(args...)
Definition: error.h:96
vlib_buffer_enqueue_to_next(vm, node, from,(u16 *) nexts, frame->n_vectors)
static int validate_buffer_data(vlib_buffer_t *b, pg_stream_t *s)
Definition: input.c:90
#define VLIB_FRAME_SIZE
Definition: node.h:369
static vlib_next_frame_t * vlib_node_runtime_get_next_frame(vlib_main_t *vm, vlib_node_runtime_t *n, u32 next_index)
Definition: node_funcs.h:321
static clib_error_t * pg_input_mac_filter_cfg(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
Definition: input.c:1914
unformat_function_t unformat_line_input
Definition: format.h:275
u32 buffer_bytes
Definition: pg.h:126
f64 packet_accumulator
Definition: pg.h:175
u32 last_increment_packet_size
Definition: pg.h:135
u16 * next
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:553
vl_api_interface_index_t sw_if_index
Definition: wireguard.api:34
format_function_t format_vnet_buffer
Definition: buffer.h:515
static_always_inline void mac_address_from_bytes(mac_address_t *mac, const u8 *bytes)
Definition: mac_address.h:92
u32 buffer_oflags
Definition: pg.h:132
u32 current_replay_packet_index
Definition: pg.h:181
static __clib_warn_unused_result u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
Definition: buffer_funcs.h:708
u32 ** replay_buffers_by_thread
Definition: pg.h:347
static __clib_warn_unused_result int vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
Definition: trace_funcs.h:153
u8 * name
Definition: pg.h:99
struct _unformat_input_t unformat_input_t
unsigned short u16
Definition: types.h:57
static u64 pg_edit_get_value(pg_edit_t *e, int hi_or_lo)
Definition: edit.h:173
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:257
static void * vnet_get_config_data(vnet_config_main_t *cm, u32 *config_index, u32 *next_index, u32 n_data_bytes)
Definition: config.h:123
int vnet_feature_enable_disable(const char *arc_name, const char *node_name, u32 sw_if_index, int enable_disable, void *feature_config, u32 n_feature_config_bytes)
#define PREDICT_FALSE(x)
Definition: clib.h:124
vl_api_ip4_address_t ip4
Definition: one.api:376
static_always_inline void vnet_gro_flow_table_schedule_node_on_dispatcher(vlib_main_t *vm, gro_flow_table_t *flow_table)
Definition: gro_func.h:409
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
Definition: nat44_ei.c:3047
format_function_t * format_buffer
Definition: node.h:349
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:395
u32 n_left
uword ** enabled_streams
Definition: pg.h:336
u32 n_bits
Definition: edit.h:79
static_always_inline u32 vlib_buffer_get_default_data_size(vlib_main_t *vm)
Definition: buffer_funcs.h:122
static uword ethernet_address_cast(const u8 *a)
Definition: packet.h:67
vlib_buffer_t buffer
Definition: input.c:1412
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
static void vlib_buffer_free_no_next(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers, does not free the buffer chain for each buffer.
Definition: buffer_funcs.h:999
#define UNFORMAT_END_OF_INPUT
Definition: format.h:137
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:208
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
static_always_inline void do_set_random(pg_main_t *pg, pg_stream_t *s, u32 *buffers, u32 n_buffers, u32 n_bits, u32 byte_offset, u32 is_net_byte_order, u32 want_sum, u64 *sum_result, u64 v_min, u64 v_max)
Definition: input.c:316
u8 gso_enabled
Definition: pg.h:315
static u32 pg_stream_fill(pg_main_t *pg, pg_stream_t *s, u32 n_buffers)
Definition: input.c:1316
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:261
vl_api_pnat_mask_t mask
Definition: pnat.api:45
u32 min_packet_bytes
Definition: pg.h:113
u32 max_packet_bytes
Definition: pg.h:113
#define clib_warning(format, args...)
Definition: error.h:59
u8 data[]
Packet data.
Definition: buffer.h:204
vnet_interface_main_t * im
vnet_feature_main_t feature_main
static void set_2(void *a0, void *a1, u64 v0, u64 v1, u64 v_min, u64 v_max, u32 n_bits, u32 is_net_byte_order, u32 is_increment)
Definition: input.c:125
static uword max_pow2(uword x)
Definition: clib.h:258
static void * vlib_frame_scalar_args(vlib_frame_t *f)
Get pointer to frame scalar data.
Definition: node_funcs.h:315
#define pool_is_free_index(P, I)
Use free bitmap to query whether given index is free.
Definition: pool.h:302
u32 current_config_index
Used by feature subgraph arcs to visit enabled feature nodes.
Definition: buffer.h:156
#define ARRAY_LEN(x)
Definition: clib.h:70
pg_edit_t * non_fixed_edits
Definition: pg.h:117
u8 ** replay_packet_templates
Definition: pg.h:179
static uword clib_fifo_advance_head(void *v, uword n_elts)
Definition: fifo.h:169
static int validate_buffer_data2(vlib_buffer_t *b, pg_stream_t *s, u32 data_offset, u32 n_bytes)
Definition: input.c:60
#define VLIB_CLI_COMMAND(x,...)
Definition: cli.h:163
pg_stream_t * streams
Definition: pg.h:333
#define ASSERT(truth)
u8 pre_data[VLIB_BUFFER_PRE_DATA_SIZE]
Space for inserting data before buffer start.
Definition: buffer.h:201
vnet_sw_interface_t * si
vlib_frame_t * frame
Definition: node.h:397
#define always_inline
Definition: rdma_mlx5dv.h:23
u16 flags
Definition: node.h:379
void pg_stream_enable_disable(pg_main_t *pg, pg_stream_t *s, int is_enable)
Definition: stream.c:49
static_always_inline int ethernet_frame_is_tagged(u16 type)
Definition: ethernet.h:78
#define vec_is_member(v, e)
True if given pointer is within given vector.
vlib_put_next_frame(vm, node, next_index, 0)
u32 n_buffers
static uword vlib_buffer_index_length_in_chain(vlib_main_t *vm, u32 bi)
Get length in bytes of the buffer index buffer chain.
Definition: buffer_funcs.h:453
static_always_inline void * clib_memcpy_fast(void *restrict dst, const void *restrict src, size_t n)
Definition: string.h:92
#define clib_mem_unaligned(pointer, type)
Definition: types.h:155
nat44_ei_hairpin_src_next_t next_index
Definition: pg.h:96
VNET_FEATURE_INIT(pg_input_mac_filter_feat, static)
#define clib_max(x, y)
Definition: clib.h:335
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:38
#define clib_fifo_end(v)
Definition: fifo.h:63
static_always_inline void clib_memset_u16(void *p, u16 val, uword count)
Definition: string.h:395
#define vec_elt(v, i)
Get vector value at index i.
u8 device_input_feature_arc_index
Feature arc index for device-input.
Definition: feature.h:112
static_always_inline void do_setbits_random(pg_main_t *pg, pg_stream_t *s, u32 *buffers, u32 n_buffers, u32 max_bits, u32 n_bits, u32 byte_offset, u64 v_min, u64 v_max, u64 mask, u32 shift)
Definition: input.c:639
u32 node_index
Definition: pg.h:149
Definition: defs.h:47
u32 sw_if_index[VLIB_N_RX_TX]
Definition: pg.h:146
#define clib_fifo_add1(f, e)
Definition: fifo.h:192
static u32 pg_stream_fill_replay(pg_main_t *pg, pg_stream_t *s, u32 n_alloc)
Definition: input.c:1198
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
uword unformat_vlib_enable_disable(unformat_input_t *input, va_list *args)
Definition: format.c:116
vlib_main_t vlib_node_runtime_t * node
Definition: nat44_ei.c:3047
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:149
VLIB buffer representation.
Definition: buffer.h:111
u64 uword
Definition: types.h:112
static void unformat_free(unformat_input_t *i)
Definition: format.h:155
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:301
u32 packet_length
Definition: input.c:1408
#define VLIB_BUFFER_MIN_CHAIN_SEG_SIZE
Definition: buffer.h:58
f64 rate_packets_per_second
Definition: pg.h:171
static u8 * format_pg_input_trace(u8 *s, va_list *va)
Definition: input.c:1416
#define vnet_buffer(b)
Definition: buffer.h:437
u8 * format_unformat_error(u8 *s, va_list *va)
Definition: unformat.c:91
u32 buffer_flags
Definition: pg.h:129
static int tcp_header_bytes(tcp_header_t *t)
Definition: tcp_packet.h:93
static u32 pg_stream_fill_helper(pg_main_t *pg, pg_stream_t *s, pg_buffer_index_t *bi, u32 *buffers, u32 *next_buffers, u32 n_alloc)
Definition: input.c:1153
static u32 vlib_num_workers()
Definition: threads.h:354
#define STRUCT_SIZE_OF(t, f)
Definition: clib.h:75
u64 n_packets_generated
Definition: pg.h:160
vlib_node_registration_t pg_input_node
(constructor) VLIB_REGISTER_NODE (pg_input_node)
Definition: input.c:1826
pg_main_t pg_main
Definition: init.c:44
static vlib_node_t * vlib_get_node(vlib_main_t *vm, u32 i)
Get vlib node by index.
Definition: node_funcs.h:86
#define vec_foreach(var, vec)
Vector iterator.
static_always_inline void vnet_buffer_offload_flags_set(vlib_buffer_t *b, vnet_buffer_oflags_t oflags)
Definition: buffer.h:522
f64 end
end of the time range
Definition: mactime.api:44
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace.c:628
static void vlib_frame_no_append(vlib_frame_t *f)
Definition: node_funcs.h:281
static int ip4_header_bytes(const ip4_header_t *i)
Definition: ip4_packet.h:190
static_always_inline int mac_address_cmp(const mac_address_t *a, const mac_address_t *b)
Definition: mac_address.h:134
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
Definition: trace_funcs.h:226
vnet_feature_config_main_t * feature_config_mains
feature config main objects
Definition: feature.h:100
u32 stream_index
Definition: input.c:1406
uword pg_input(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: input.c:1805
#define BITS(x)
Definition: clib.h:69
f64 time_last_generate
Definition: pg.h:173
vlib_buffer_t * bufs[VLIB_FRAME_SIZE]
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:111
clib_random_buffer_t random_buffer
Definition: main.h:210
static void setbits_2(void *a0, void *a1, u64 v0, u64 v1, u64 v_min, u64 v_max, u32 max_bits, u32 n_bits, u64 mask, u32 shift, u32 is_increment)
Definition: input.c:475
pg_interface_t * interfaces
Definition: pg.h:342
uword unformat(unformat_input_t *i, const char *fmt,...)
Definition: unformat.c:978
Definition: defs.h:46
static_always_inline void fill_buffer_offload_flags(vlib_main_t *vm, u32 *buffers, u32 n_buffers, u32 buffer_oflags, int gso_enabled, u32 gso_size)
Definition: input.c:1540
static uword unformat_check_input(unformat_input_t *i)
Definition: format.h:163
u32 * buffer_fifo
Definition: pg.h:92