FD.io VPP  v16.12-rc0-308-g931be3a
Vector Packet Processing
hqos.c
Go to the documentation of this file.
1 /*
2  * Copyright(c) 2016 Intel Corporation. All rights reserved.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include <stdio.h>
16 #include <stdlib.h>
17 #include <unistd.h>
18 #include <sys/stat.h>
19 #include <sys/mount.h>
20 #include <string.h>
21 #include <fcntl.h>
22 
23 #include <vppinfra/vec.h>
24 #include <vppinfra/error.h>
25 #include <vppinfra/format.h>
26 #include <vppinfra/bitmap.h>
27 
28 #include <vnet/vnet.h>
29 #include <vnet/ethernet/ethernet.h>
30 #include <vnet/devices/dpdk/dpdk.h>
31 
32 #include <vlib/unix/physmem.h>
33 #include <vlib/pci/pci.h>
34 #include <vlibmemory/api.h>
35 #include <vlibmemory/vl_memory_msg_enum.h> /* enumerate all vlib messages */
36 
37 #define vl_typedefs /* define message structures */
39 #undef vl_typedefs
40 
41 /* instantiate all the print functions we know about */
42 #define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
43 #define vl_printfun
45 #undef vl_printfun
46 
47 #include "dpdk_priv.h"
48 
50 
51 /***
52  *
53  * HQoS default configuration values
54  *
55  ***/
56 
57 static dpdk_device_config_hqos_t hqos_params_default = {
58  .hqos_thread_valid = 0,
59 
60  .swq_size = 4096,
61  .burst_enq = 256,
62  .burst_deq = 220,
63 
64  /*
65  * Packet field to identify the subport.
66  *
67  * Default value: Since only one subport is defined by default (see below:
68  * n_subports_per_port = 1), the subport ID is hardcoded to 0.
69  */
70  .pktfield0_slabpos = 0,
71  .pktfield0_slabmask = 0,
72 
73  /*
74  * Packet field to identify the pipe.
75  *
76  * Default value: Assuming Ethernet/IPv4/UDP packets, UDP payload bits 12 .. 23
77  */
78  .pktfield1_slabpos = 40,
79  .pktfield1_slabmask = 0x0000000FFF000000LLU,
80 
81  /* Packet field used as index into TC translation table to identify the traffic
82  * class and queue.
83  *
84  * Default value: Assuming Ethernet/IPv4 packets, IPv4 DSCP field
85  */
86  .pktfield2_slabpos = 8,
87  .pktfield2_slabmask = 0x00000000000000FCLLU,
88  .tc_table = {
89  0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
90  0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
91  0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
92  0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
93  },
94 
95  /* port */
96  .port = {
97  .name = NULL, /* Set at init */
98  .socket = 0, /* Set at init */
99  .rate = 1250000000, /* Assuming 10GbE port */
100  .mtu = 14 + 1500, /* Assuming Ethernet/IPv4 pkt (Ethernet FCS not included) */
101  .frame_overhead = RTE_SCHED_FRAME_OVERHEAD_DEFAULT,
102  .n_subports_per_port = 1,
103  .n_pipes_per_subport = 4096,
104  .qsize = {64, 64, 64, 64},
105  .pipe_profiles = NULL, /* Set at config */
106  .n_pipe_profiles = 1,
107 
108 #ifdef RTE_SCHED_RED
109  .red_params = {
110  /* Traffic Class 0 Colors Green / Yellow / Red */
111  [0][0] = {.min_th = 48,.max_th = 64,.maxp_inv =
112  10,.wq_log2 = 9},
113  [0][1] = {.min_th = 40,.max_th = 64,.maxp_inv =
114  10,.wq_log2 = 9},
115  [0][2] = {.min_th = 32,.max_th = 64,.maxp_inv =
116  10,.wq_log2 = 9},
117 
118  /* Traffic Class 1 - Colors Green / Yellow / Red */
119  [1][0] = {.min_th = 48,.max_th = 64,.maxp_inv =
120  10,.wq_log2 = 9},
121  [1][1] = {.min_th = 40,.max_th = 64,.maxp_inv =
122  10,.wq_log2 = 9},
123  [1][2] = {.min_th = 32,.max_th = 64,.maxp_inv =
124  10,.wq_log2 = 9},
125 
126  /* Traffic Class 2 - Colors Green / Yellow / Red */
127  [2][0] = {.min_th = 48,.max_th = 64,.maxp_inv =
128  10,.wq_log2 = 9},
129  [2][1] = {.min_th = 40,.max_th = 64,.maxp_inv =
130  10,.wq_log2 = 9},
131  [2][2] = {.min_th = 32,.max_th = 64,.maxp_inv =
132  10,.wq_log2 = 9},
133 
134  /* Traffic Class 3 - Colors Green / Yellow / Red */
135  [3][0] = {.min_th = 48,.max_th = 64,.maxp_inv =
136  10,.wq_log2 = 9},
137  [3][1] = {.min_th = 40,.max_th = 64,.maxp_inv =
138  10,.wq_log2 = 9},
139  [3][2] = {.min_th = 32,.max_th = 64,.maxp_inv =
140  10,.wq_log2 = 9}
141  },
142 #endif /* RTE_SCHED_RED */
143  },
144 };
145 
146 static struct rte_sched_subport_params hqos_subport_params_default = {
147  .tb_rate = 1250000000, /* 10GbE line rate (measured in bytes/second) */
148  .tb_size = 1000000,
149  .tc_rate = {1250000000, 1250000000, 1250000000, 1250000000},
150  .tc_period = 10,
151 };
152 
153 static struct rte_sched_pipe_params hqos_pipe_params_default = {
154  .tb_rate = 305175, /* 10GbE line rate divided by 4K pipes */
155  .tb_size = 1000000,
156  .tc_rate = {305175, 305175, 305175, 305175},
157  .tc_period = 40,
158 #ifdef RTE_SCHED_SUBPORT_TC_OV
159  .tc_ov_weight = 1,
160 #endif
161  .wrr_weights = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
162 };
163 
164 /***
165  *
166  * HQoS configuration
167  *
168  ***/
169 
170 int
172 {
173  int count = __builtin_popcountll (mask);
174  int pos_lead = sizeof (u64) * 8 - __builtin_clzll (mask);
175  int pos_trail = __builtin_ctzll (mask);
176  int count_expected = __builtin_popcount (n - 1);
177 
178  /* Handle the exceptions */
179  if (n == 0)
180  return -1; /* Error */
181 
182  if ((mask == 0) && (n == 1))
183  return 0; /* OK */
184 
185  if (((mask == 0) && (n != 1)) || ((mask != 0) && (n == 1)))
186  return -2; /* Error */
187 
188  /* Check that mask is contiguous */
189  if ((pos_lead - pos_trail) != count)
190  return -3; /* Error */
191 
192  /* Check that mask contains the expected number of bits set */
193  if (count != count_expected)
194  return -4; /* Error */
195 
196  return 0; /* OK */
197 }
198 
199 void
201  hqos, u32 pipe_profile_id)
202 {
203  memcpy (&hqos->pipe[pipe_profile_id], &hqos_pipe_params_default,
204  sizeof (hqos_pipe_params_default));
205 }
206 
207 void
209 {
210  struct rte_sched_subport_params *subport_params;
211  struct rte_sched_pipe_params *pipe_params;
212  u32 *pipe_map;
213  u32 i;
214 
215  memcpy (hqos, &hqos_params_default, sizeof (hqos_params_default));
216 
217  /* pipe */
218  vec_add2 (hqos->pipe, pipe_params, hqos->port.n_pipe_profiles);
219 
220  for (i = 0; i < vec_len (hqos->pipe); i++)
221  memcpy (&pipe_params[i],
223 
224  hqos->port.pipe_profiles = hqos->pipe;
225 
226  /* subport */
227  vec_add2 (hqos->subport, subport_params, hqos->port.n_subports_per_port);
228 
229  for (i = 0; i < vec_len (hqos->subport); i++)
230  memcpy (&subport_params[i],
232  sizeof (hqos_subport_params_default));
233 
234  /* pipe profile */
235  vec_add2 (hqos->pipe_map,
236  pipe_map,
237  hqos->port.n_subports_per_port * hqos->port.n_pipes_per_subport);
238 
239  for (i = 0; i < vec_len (hqos->pipe_map); i++)
240  pipe_map[i] = 0;
241 }
242 
243 /***
244  *
245  * HQoS init
246  *
247  ***/
248 
249 clib_error_t *
251 {
253  char name[32];
254  u32 subport_id, i;
255  int rv;
256 
257  /* Detect the set of worker threads */
258  int worker_thread_first = 0;
259  int worker_thread_count = 0;
260 
261  uword *p = hash_get_mem (tm->thread_registrations_by_name, "workers");
263  p ? (vlib_thread_registration_t *) p[0] : 0;
264 
265  if (tr && tr->count > 0)
266  {
267  worker_thread_first = tr->first_index;
268  worker_thread_count = tr->count;
269  }
270 
271  /* Allocate the per-thread device data array */
274  memset (xd->hqos_wt, 0, tm->n_vlib_mains * sizeof (xd->hqos_wt[0]));
275 
277  memset (xd->hqos_ht, 0, sizeof (xd->hqos_ht[0]));
278 
279  /* Allocate space for one SWQ per worker thread in the I/O TX thread data structure */
280  vec_validate (xd->hqos_ht->swq, worker_thread_count - 1);
281 
282  /* SWQ */
283  for (i = 0; i < worker_thread_count; i++)
284  {
285  u32 swq_flags = RING_F_SP_ENQ | RING_F_SC_DEQ;
286 
287  snprintf (name, sizeof (name), "SWQ-worker%u-to-device%u", i,
288  xd->device_index);
289  xd->hqos_ht->swq[i] =
290  rte_ring_create (name, hqos->swq_size, xd->cpu_socket, swq_flags);
291  if (xd->hqos_ht->swq[i] == NULL)
292  return clib_error_return (0,
293  "SWQ-worker%u-to-device%u: rte_ring_create err",
294  i, xd->device_index);
295  }
296 
297  /*
298  * HQoS
299  */
300 
301  /* HQoS port */
302  snprintf (name, sizeof (name), "HQoS%u", xd->device_index);
303  hqos->port.name = strdup (name);
304  if (hqos->port.name == NULL)
305  return clib_error_return (0, "HQoS%u: strdup err", xd->device_index);
306 
307  hqos->port.socket = rte_eth_dev_socket_id (xd->device_index);
308  if (hqos->port.socket == SOCKET_ID_ANY)
309  hqos->port.socket = 0;
310 
311  xd->hqos_ht->hqos = rte_sched_port_config (&hqos->port);
312  if (xd->hqos_ht->hqos == NULL)
313  return clib_error_return (0, "HQoS%u: rte_sched_port_config err",
314  xd->device_index);
315 
316  /* HQoS subport */
317  for (subport_id = 0; subport_id < hqos->port.n_subports_per_port;
318  subport_id++)
319  {
320  u32 pipe_id;
321 
322  rv =
323  rte_sched_subport_config (xd->hqos_ht->hqos, subport_id,
324  &hqos->subport[subport_id]);
325  if (rv)
326  return clib_error_return (0,
327  "HQoS%u subport %u: rte_sched_subport_config err (%d)",
328  xd->device_index, subport_id, rv);
329 
330  /* HQoS pipe */
331  for (pipe_id = 0; pipe_id < hqos->port.n_pipes_per_subport; pipe_id++)
332  {
333  u32 pos = subport_id * hqos->port.n_pipes_per_subport + pipe_id;
334  u32 profile_id = hqos->pipe_map[pos];
335 
336  rv =
337  rte_sched_pipe_config (xd->hqos_ht->hqos, subport_id, pipe_id,
338  profile_id);
339  if (rv)
340  return clib_error_return (0,
341  "HQoS%u subport %u pipe %u: rte_sched_pipe_config err (%d)",
342  xd->device_index, subport_id, pipe_id,
343  rv);
344  }
345  }
346 
347  /* Set up per-thread device data for the I/O TX thread */
348  xd->hqos_ht->hqos_burst_enq = hqos->burst_enq;
349  xd->hqos_ht->hqos_burst_deq = hqos->burst_deq;
350  vec_validate (xd->hqos_ht->pkts_enq, 2 * hqos->burst_enq - 1);
351  vec_validate (xd->hqos_ht->pkts_deq, hqos->burst_deq - 1);
352  xd->hqos_ht->pkts_enq_len = 0;
353  xd->hqos_ht->swq_pos = 0;
354  xd->hqos_ht->flush_count = 0;
355 
356  /* Set up per-thread device data for each worker thread */
357  for (i = 0; i < worker_thread_count; i++)
358  {
359  u32 tid = worker_thread_first + i;
360 
361  xd->hqos_wt[tid].swq = xd->hqos_ht->swq[i];
364  xd->hqos_wt[tid].hqos_field0_slabshr =
365  __builtin_ctzll (hqos->pktfield0_slabmask);
368  xd->hqos_wt[tid].hqos_field1_slabshr =
369  __builtin_ctzll (hqos->pktfield1_slabmask);
372  xd->hqos_wt[tid].hqos_field2_slabshr =
373  __builtin_ctzll (hqos->pktfield2_slabmask);
374  memcpy (xd->hqos_wt[tid].hqos_tc_table, hqos->tc_table,
375  sizeof (hqos->tc_table));
376  }
377 
378  return 0;
379 }
380 
381 /***
382  *
383  * HQoS run-time
384  *
385  ***/
386 /*
387  * dpdk_hqos_thread - Contains the main loop of an HQoS thread.
388  *
389  * w
390  * Information for the current thread
391  */
394 {
395  dpdk_main_t *dm = &dpdk_main;
396  u32 cpu_index = vm->cpu_index;
397  u32 dev_pos;
398 
399  dev_pos = 0;
400  while (1)
401  {
403 
404  u32 n_devs = vec_len (dm->devices_by_hqos_cpu[cpu_index]);
405  if (dev_pos >= n_devs)
406  dev_pos = 0;
407 
409  vec_elt_at_index (dm->devices_by_hqos_cpu[cpu_index], dev_pos);
410  dpdk_device_t *xd = vec_elt_at_index (dm->devices, dq->device);
411 
413  u32 device_index = xd->device_index;
414  u16 queue_id = dq->queue_id;
415 
416  struct rte_mbuf **pkts_enq = hqos->pkts_enq;
417  u32 pkts_enq_len = hqos->pkts_enq_len;
418  u32 swq_pos = hqos->swq_pos;
419  u32 n_swq = vec_len (hqos->swq), i;
420  u32 flush_count = hqos->flush_count;
421 
422  for (i = 0; i < n_swq; i++)
423  {
424  /* Get current SWQ for this device */
425  struct rte_ring *swq = hqos->swq[swq_pos];
426 
427  /* Read SWQ burst to packet buffer of this device */
428  pkts_enq_len += rte_ring_sc_dequeue_burst (swq,
429  (void **)
430  &pkts_enq[pkts_enq_len],
431  hqos->hqos_burst_enq);
432 
433  /* Get next SWQ for this device */
434  swq_pos++;
435  if (swq_pos >= n_swq)
436  swq_pos = 0;
437  hqos->swq_pos = swq_pos;
438 
439  /* HWQ TX enqueue when burst available */
440  if (pkts_enq_len >= hqos->hqos_burst_enq)
441  {
442  u32 n_pkts = rte_eth_tx_burst (device_index,
443  (uint16_t) queue_id,
444  pkts_enq,
445  (uint16_t) pkts_enq_len);
446 
447  for (; n_pkts < pkts_enq_len; n_pkts++)
448  rte_pktmbuf_free (pkts_enq[n_pkts]);
449 
450  pkts_enq_len = 0;
451  flush_count = 0;
452  break;
453  }
454  }
455  if (pkts_enq_len)
456  {
457  flush_count++;
458  if (PREDICT_FALSE (flush_count == HQOS_FLUSH_COUNT_THRESHOLD))
459  {
460  rte_sched_port_enqueue (hqos->hqos, pkts_enq, pkts_enq_len);
461 
462  pkts_enq_len = 0;
463  flush_count = 0;
464  }
465  }
466  hqos->pkts_enq_len = pkts_enq_len;
467  hqos->flush_count = flush_count;
468 
469  /* Advance to next device */
470  dev_pos++;
471  }
472 }
473 
476 {
477  dpdk_main_t *dm = &dpdk_main;
478  u32 cpu_index = vm->cpu_index;
479  u32 dev_pos;
480 
481  dev_pos = 0;
482  while (1)
483  {
485 
486  u32 n_devs = vec_len (dm->devices_by_hqos_cpu[cpu_index]);
487  if (PREDICT_FALSE (n_devs == 0))
488  {
489  dev_pos = 0;
490  continue;
491  }
492  if (dev_pos >= n_devs)
493  dev_pos = 0;
494 
496  vec_elt_at_index (dm->devices_by_hqos_cpu[cpu_index], dev_pos);
497  dpdk_device_t *xd = vec_elt_at_index (dm->devices, dq->device);
498 
500  u32 device_index = xd->device_index;
501  u16 queue_id = dq->queue_id;
502 
503  struct rte_mbuf **pkts_enq = hqos->pkts_enq;
504  struct rte_mbuf **pkts_deq = hqos->pkts_deq;
505  u32 pkts_enq_len = hqos->pkts_enq_len;
506  u32 swq_pos = hqos->swq_pos;
507  u32 n_swq = vec_len (hqos->swq), i;
508  u32 flush_count = hqos->flush_count;
509 
510  /*
511  * SWQ dequeue and HQoS enqueue for current device
512  */
513  for (i = 0; i < n_swq; i++)
514  {
515  /* Get current SWQ for this device */
516  struct rte_ring *swq = hqos->swq[swq_pos];
517 
518  /* Read SWQ burst to packet buffer of this device */
519  pkts_enq_len += rte_ring_sc_dequeue_burst (swq,
520  (void **)
521  &pkts_enq[pkts_enq_len],
522  hqos->hqos_burst_enq);
523 
524  /* Get next SWQ for this device */
525  swq_pos++;
526  if (swq_pos >= n_swq)
527  swq_pos = 0;
528  hqos->swq_pos = swq_pos;
529 
530  /* HQoS enqueue when burst available */
531  if (pkts_enq_len >= hqos->hqos_burst_enq)
532  {
533  rte_sched_port_enqueue (hqos->hqos, pkts_enq, pkts_enq_len);
534 
535  pkts_enq_len = 0;
536  flush_count = 0;
537  break;
538  }
539  }
540  if (pkts_enq_len)
541  {
542  flush_count++;
543  if (PREDICT_FALSE (flush_count == HQOS_FLUSH_COUNT_THRESHOLD))
544  {
545  rte_sched_port_enqueue (hqos->hqos, pkts_enq, pkts_enq_len);
546 
547  pkts_enq_len = 0;
548  flush_count = 0;
549  }
550  }
551  hqos->pkts_enq_len = pkts_enq_len;
552  hqos->flush_count = flush_count;
553 
554  /*
555  * HQoS dequeue and HWQ TX enqueue for current device
556  */
557  {
558  u32 pkts_deq_len, n_pkts;
559 
560  pkts_deq_len = rte_sched_port_dequeue (hqos->hqos,
561  pkts_deq,
562  hqos->hqos_burst_deq);
563 
564  for (n_pkts = 0; n_pkts < pkts_deq_len;)
565  n_pkts += rte_eth_tx_burst (device_index,
566  (uint16_t) queue_id,
567  &pkts_deq[n_pkts],
568  (uint16_t) (pkts_deq_len - n_pkts));
569  }
570 
571  /* Advance to next device */
572  dev_pos++;
573  }
574 }
575 
576 void
578 {
579  vlib_main_t *vm;
581  dpdk_main_t *dm = &dpdk_main;
582 
583  vm = vlib_get_main ();
584 
585  ASSERT (vm->cpu_index == os_get_cpu_number ());
586 
587  clib_time_init (&vm->clib_time);
589 
590  /* Wait until the dpdk init sequence is complete */
591  while (tm->worker_thread_release == 0)
593 
594  if (vec_len (dm->devices_by_hqos_cpu[vm->cpu_index]) == 0)
595  return
596  clib_error
597  ("current I/O TX thread does not have any devices assigned to it");
598 
601  else
603 }
604 
605 void
607 {
610  dpdk_hqos_thread (w);
611 }
612 
613 /* *INDENT-OFF* */
614 VLIB_REGISTER_THREAD (hqos_thread_reg, static) =
615 {
616  .name = "hqos-threads",
617  .short_name = "hqos-threads",
618  .function = dpdk_hqos_thread_fn,
619 };
620 /* *INDENT-ON* */
621 
622 /*
623  * HQoS run-time code to be called by the worker threads
624  */
625 #define BITFIELD(byte_array, slab_pos, slab_mask, slab_shr) \
626 ({ \
627  u64 slab = *((u64 *) &byte_array[slab_pos]); \
628  u64 val = (rte_be_to_cpu_64(slab) & slab_mask) >> slab_shr; \
629  val; \
630 })
631 
632 #define RTE_SCHED_PORT_HIERARCHY(subport, pipe, traffic_class, queue, color) \
633  ((((u64) (queue)) & 0x3) | \
634  ((((u64) (traffic_class)) & 0x3) << 2) | \
635  ((((u64) (color)) & 0x3) << 4) | \
636  ((((u64) (subport)) & 0xFFFF) << 16) | \
637  ((((u64) (pipe)) & 0xFFFFFFFF) << 32))
638 
639 void
641  struct rte_mbuf **pkts, u32 n_pkts)
642 {
643  u32 i;
644 
645  for (i = 0; i < (n_pkts & (~0x3)); i += 4)
646  {
647  struct rte_mbuf *pkt0 = pkts[i];
648  struct rte_mbuf *pkt1 = pkts[i + 1];
649  struct rte_mbuf *pkt2 = pkts[i + 2];
650  struct rte_mbuf *pkt3 = pkts[i + 3];
651 
652  u8 *pkt0_data = rte_pktmbuf_mtod (pkt0, u8 *);
653  u8 *pkt1_data = rte_pktmbuf_mtod (pkt1, u8 *);
654  u8 *pkt2_data = rte_pktmbuf_mtod (pkt2, u8 *);
655  u8 *pkt3_data = rte_pktmbuf_mtod (pkt3, u8 *);
656 
657  u64 pkt0_subport = BITFIELD (pkt0_data, hqos->hqos_field0_slabpos,
658  hqos->hqos_field0_slabmask,
659  hqos->hqos_field0_slabshr);
660  u64 pkt0_pipe = BITFIELD (pkt0_data, hqos->hqos_field1_slabpos,
661  hqos->hqos_field1_slabmask,
662  hqos->hqos_field1_slabshr);
663  u64 pkt0_dscp = BITFIELD (pkt0_data, hqos->hqos_field2_slabpos,
664  hqos->hqos_field2_slabmask,
665  hqos->hqos_field2_slabshr);
666  u32 pkt0_tc = hqos->hqos_tc_table[pkt0_dscp & 0x3F] >> 2;
667  u32 pkt0_tc_q = hqos->hqos_tc_table[pkt0_dscp & 0x3F] & 0x3;
668 
669  u64 pkt1_subport = BITFIELD (pkt1_data, hqos->hqos_field0_slabpos,
670  hqos->hqos_field0_slabmask,
671  hqos->hqos_field0_slabshr);
672  u64 pkt1_pipe = BITFIELD (pkt1_data, hqos->hqos_field1_slabpos,
673  hqos->hqos_field1_slabmask,
674  hqos->hqos_field1_slabshr);
675  u64 pkt1_dscp = BITFIELD (pkt1_data, hqos->hqos_field2_slabpos,
676  hqos->hqos_field2_slabmask,
677  hqos->hqos_field2_slabshr);
678  u32 pkt1_tc = hqos->hqos_tc_table[pkt1_dscp & 0x3F] >> 2;
679  u32 pkt1_tc_q = hqos->hqos_tc_table[pkt1_dscp & 0x3F] & 0x3;
680 
681  u64 pkt2_subport = BITFIELD (pkt2_data, hqos->hqos_field0_slabpos,
682  hqos->hqos_field0_slabmask,
683  hqos->hqos_field0_slabshr);
684  u64 pkt2_pipe = BITFIELD (pkt2_data, hqos->hqos_field1_slabpos,
685  hqos->hqos_field1_slabmask,
686  hqos->hqos_field1_slabshr);
687  u64 pkt2_dscp = BITFIELD (pkt2_data, hqos->hqos_field2_slabpos,
688  hqos->hqos_field2_slabmask,
689  hqos->hqos_field2_slabshr);
690  u32 pkt2_tc = hqos->hqos_tc_table[pkt2_dscp & 0x3F] >> 2;
691  u32 pkt2_tc_q = hqos->hqos_tc_table[pkt2_dscp & 0x3F] & 0x3;
692 
693  u64 pkt3_subport = BITFIELD (pkt3_data, hqos->hqos_field0_slabpos,
694  hqos->hqos_field0_slabmask,
695  hqos->hqos_field0_slabshr);
696  u64 pkt3_pipe = BITFIELD (pkt3_data, hqos->hqos_field1_slabpos,
697  hqos->hqos_field1_slabmask,
698  hqos->hqos_field1_slabshr);
699  u64 pkt3_dscp = BITFIELD (pkt3_data, hqos->hqos_field2_slabpos,
700  hqos->hqos_field2_slabmask,
701  hqos->hqos_field2_slabshr);
702  u32 pkt3_tc = hqos->hqos_tc_table[pkt3_dscp & 0x3F] >> 2;
703  u32 pkt3_tc_q = hqos->hqos_tc_table[pkt3_dscp & 0x3F] & 0x3;
704 
705  u64 pkt0_sched = RTE_SCHED_PORT_HIERARCHY (pkt0_subport,
706  pkt0_pipe,
707  pkt0_tc,
708  pkt0_tc_q,
709  0);
710  u64 pkt1_sched = RTE_SCHED_PORT_HIERARCHY (pkt1_subport,
711  pkt1_pipe,
712  pkt1_tc,
713  pkt1_tc_q,
714  0);
715  u64 pkt2_sched = RTE_SCHED_PORT_HIERARCHY (pkt2_subport,
716  pkt2_pipe,
717  pkt2_tc,
718  pkt2_tc_q,
719  0);
720  u64 pkt3_sched = RTE_SCHED_PORT_HIERARCHY (pkt3_subport,
721  pkt3_pipe,
722  pkt3_tc,
723  pkt3_tc_q,
724  0);
725 
726  pkt0->hash.sched.lo = pkt0_sched & 0xFFFFFFFF;
727  pkt0->hash.sched.hi = pkt0_sched >> 32;
728  pkt1->hash.sched.lo = pkt1_sched & 0xFFFFFFFF;
729  pkt1->hash.sched.hi = pkt1_sched >> 32;
730  pkt2->hash.sched.lo = pkt2_sched & 0xFFFFFFFF;
731  pkt2->hash.sched.hi = pkt2_sched >> 32;
732  pkt3->hash.sched.lo = pkt3_sched & 0xFFFFFFFF;
733  pkt3->hash.sched.hi = pkt3_sched >> 32;
734  }
735 
736  for (; i < n_pkts; i++)
737  {
738  struct rte_mbuf *pkt = pkts[i];
739 
740  u8 *pkt_data = rte_pktmbuf_mtod (pkt, u8 *);
741 
742  u64 pkt_subport = BITFIELD (pkt_data, hqos->hqos_field0_slabpos,
743  hqos->hqos_field0_slabmask,
744  hqos->hqos_field0_slabshr);
745  u64 pkt_pipe = BITFIELD (pkt_data, hqos->hqos_field1_slabpos,
746  hqos->hqos_field1_slabmask,
747  hqos->hqos_field1_slabshr);
748  u64 pkt_dscp = BITFIELD (pkt_data, hqos->hqos_field2_slabpos,
749  hqos->hqos_field2_slabmask,
750  hqos->hqos_field2_slabshr);
751  u32 pkt_tc = hqos->hqos_tc_table[pkt_dscp & 0x3F] >> 2;
752  u32 pkt_tc_q = hqos->hqos_tc_table[pkt_dscp & 0x3F] & 0x3;
753 
754  u64 pkt_sched = RTE_SCHED_PORT_HIERARCHY (pkt_subport,
755  pkt_pipe,
756  pkt_tc,
757  pkt_tc_q,
758  0);
759 
760  pkt->hash.sched.lo = pkt_sched & 0xFFFFFFFF;
761  pkt->hash.sched.hi = pkt_sched >> 32;
762  }
763 }
764 
765 /*
766  * fd.io coding-style-patch-verification: ON
767  *
768  * Local Variables:
769  * eval: (c-set-style "gnu")
770  * End:
771  */
VLIB_REGISTER_THREAD(hqos_thread_reg, static)
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:396
sll srl srl sll sra u16x4 i
Definition: vector_sse2.h:343
static struct rte_sched_subport_params hqos_subport_params_default
Definition: hqos.c:146
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:23
dpdk_main_t dpdk_main
Definition: hqos.c:49
#define NULL
Definition: clib.h:55
#define clib_error(format, args...)
Definition: error.h:62
dpdk_device_and_queue_t ** devices_by_hqos_cpu
Definition: dpdk.h:408
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:521
struct rte_sched_port_params port
Definition: dpdk.h:330
struct rte_mbuf ** pkts_enq
Definition: dpdk.h:181
struct rte_ring ** swq
Definition: dpdk.h:180
struct rte_sched_port * hqos
Definition: dpdk.h:183
clib_time_t clib_time
Definition: main.h:62
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
Definition: vec.h:407
#define BITFIELD(byte_array, slab_pos, slab_mask, slab_shr)
Definition: hqos.c:625
static void vlib_worker_thread_barrier_check(void)
Definition: threads.h:206
struct rte_sched_pipe_params * pipe
Definition: dpdk.h:332
#define static_always_inline
Definition: clib.h:85
struct rte_mbuf ** pkts_deq
Definition: dpdk.h:182
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
u32 cpu_index
Definition: main.h:159
unsigned long u64
Definition: types.h:89
u32 device_index
Definition: dpdk.h:197
dpdk_device_hqos_per_worker_thread_t * hqos_wt
Definition: dpdk.h:240
void * thread_mheap
Definition: threads.h:96
uword os_get_cpu_number(void)
Definition: unix-misc.c:224
static_always_inline void dpdk_hqos_thread_internal_hqos_dbg_bypass(vlib_main_t *vm)
Definition: hqos.c:393
#define PREDICT_FALSE(x)
Definition: clib.h:97
void clib_time_init(clib_time_t *c)
Definition: time.c:165
int dpdk_hqos_validate_mask(u64 mask, u32 n)
Definition: hqos.c:171
#define HQOS_FLUSH_COUNT_THRESHOLD
Definition: dpdk.h:310
dpdk_device_t * devices
Definition: dpdk.h:406
static vlib_thread_main_t * vlib_get_thread_main()
Definition: global_funcs.h:32
static void * clib_mem_set_heap(void *heap)
Definition: mem.h:223
void vlib_worker_thread_init(vlib_worker_thread_t *w)
Definition: threads.c:474
#define ASSERT(truth)
unsigned int u32
Definition: types.h:88
#define DPDK_HQOS_DBG_BYPASS
Definition: dpdk.h:306
void dpdk_hqos_thread_fn(void *arg)
Definition: hqos.c:606
clib_error_t * dpdk_port_setup_hqos(dpdk_device_t *xd, dpdk_device_config_hqos_t *hqos)
Definition: hqos.c:250
Bitmaps built as vectors of machine words.
dpdk_device_hqos_per_hqos_thread_t * hqos_ht
Definition: dpdk.h:241
static_always_inline void dpdk_hqos_thread_internal(vlib_main_t *vm)
Definition: hqos.c:475
uword * thread_registrations_by_name
Definition: threads.h:285
void dpdk_hqos_metadata_set(dpdk_device_hqos_per_worker_thread_t *hqos, struct rte_mbuf **pkts, u32 n_pkts)
Definition: hqos.c:640
u64 uword
Definition: types.h:112
void dpdk_device_config_hqos_default(dpdk_device_config_hqos_t *hqos)
Definition: hqos.c:208
unsigned short u16
Definition: types.h:57
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
unsigned char u8
Definition: types.h:56
uint32_t * pipe_map
Definition: dpdk.h:333
void dpdk_hqos_thread(vlib_worker_thread_t *w)
Definition: hqos.c:577
#define hash_get_mem(h, key)
Definition: hash.h:268
struct rte_sched_subport_params * subport
Definition: dpdk.h:331
i8 cpu_socket
Definition: dpdk.h:213
#define clib_error_return(e, args...)
Definition: error.h:111
#define RTE_SCHED_PORT_HIERARCHY(subport, pipe, traffic_class, queue, color)
Definition: hqos.c:632
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:67
void dpdk_device_config_hqos_pipe_profile_default(dpdk_device_config_hqos_t *hqos, u32 pipe_profile_id)
Definition: hqos.c:200
volatile u32 worker_thread_release
Definition: threads.h:332
static struct rte_sched_pipe_params hqos_pipe_params_default
Definition: hqos.c:153
CLIB vectors are ubiquitous dynamically resized arrays with by user defined "headers".