FD.io VPP  v18.04-17-g3a0d853
Vector Packet Processing
hqos.c
Go to the documentation of this file.
1 /*
2  * Copyright(c) 2016 Intel Corporation. All rights reserved.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include <stdio.h>
16 #include <stdlib.h>
17 #include <unistd.h>
18 #include <sys/stat.h>
19 #include <sys/mount.h>
20 #include <string.h>
21 #include <fcntl.h>
22 
23 #include <vppinfra/vec.h>
24 #include <vppinfra/error.h>
25 #include <vppinfra/format.h>
26 #include <vppinfra/bitmap.h>
27 
28 #include <vnet/vnet.h>
29 #include <vnet/ethernet/ethernet.h>
30 #include <dpdk/device/dpdk.h>
31 
32 #include <vlib/pci/pci.h>
33 #include <vlibmemory/api.h>
34 #include <vlibmemory/vl_memory_msg_enum.h> /* enumerate all vlib messages */
35 
36 #define vl_typedefs /* define message structures */
38 #undef vl_typedefs
39 
40 /* instantiate all the print functions we know about */
41 #define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
42 #define vl_printfun
44 #undef vl_printfun
45 
46 #include <dpdk/device/dpdk_priv.h>
47 
48 /***
49  *
50  * HQoS default configuration values
51  *
52  ***/
53 
54 static dpdk_device_config_hqos_t hqos_params_default = {
55  .hqos_thread_valid = 0,
56 
57  .swq_size = 4096,
58  .burst_enq = 256,
59  .burst_deq = 220,
60 
61  /*
62  * Packet field to identify the subport.
63  *
64  * Default value: Since only one subport is defined by default (see below:
65  * n_subports_per_port = 1), the subport ID is hardcoded to 0.
66  */
67  .pktfield0_slabpos = 0,
68  .pktfield0_slabmask = 0,
69 
70  /*
71  * Packet field to identify the pipe.
72  *
73  * Default value: Assuming Ethernet/IPv4/UDP packets, UDP payload bits 12 .. 23
74  */
75  .pktfield1_slabpos = 40,
76  .pktfield1_slabmask = 0x0000000FFF000000LLU,
77 
78  /* Packet field used as index into TC translation table to identify the traffic
79  * class and queue.
80  *
81  * Default value: Assuming Ethernet/IPv4 packets, IPv4 DSCP field
82  */
83  .pktfield2_slabpos = 8,
84  .pktfield2_slabmask = 0x00000000000000FCLLU,
85  .tc_table = {
86  0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
87  0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
88  0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
89  0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
90  },
91 
92  /* port */
93  .port = {
94  .name = NULL, /* Set at init */
95  .socket = 0, /* Set at init */
96  .rate = 1250000000, /* Assuming 10GbE port */
97  .mtu = 14 + 1500, /* Assuming Ethernet/IPv4 pkt (Ethernet FCS not included) */
98  .frame_overhead = RTE_SCHED_FRAME_OVERHEAD_DEFAULT,
99  .n_subports_per_port = 1,
100  .n_pipes_per_subport = 4096,
101  .qsize = {64, 64, 64, 64},
102  .pipe_profiles = NULL, /* Set at config */
103  .n_pipe_profiles = 1,
104 
105 #ifdef RTE_SCHED_RED
106  .red_params = {
107  /* Traffic Class 0 Colors Green / Yellow / Red */
108  [0][0] = {.min_th = 48,.max_th = 64,.maxp_inv =
109  10,.wq_log2 = 9},
110  [0][1] = {.min_th = 40,.max_th = 64,.maxp_inv =
111  10,.wq_log2 = 9},
112  [0][2] = {.min_th = 32,.max_th = 64,.maxp_inv =
113  10,.wq_log2 = 9},
114 
115  /* Traffic Class 1 - Colors Green / Yellow / Red */
116  [1][0] = {.min_th = 48,.max_th = 64,.maxp_inv =
117  10,.wq_log2 = 9},
118  [1][1] = {.min_th = 40,.max_th = 64,.maxp_inv =
119  10,.wq_log2 = 9},
120  [1][2] = {.min_th = 32,.max_th = 64,.maxp_inv =
121  10,.wq_log2 = 9},
122 
123  /* Traffic Class 2 - Colors Green / Yellow / Red */
124  [2][0] = {.min_th = 48,.max_th = 64,.maxp_inv =
125  10,.wq_log2 = 9},
126  [2][1] = {.min_th = 40,.max_th = 64,.maxp_inv =
127  10,.wq_log2 = 9},
128  [2][2] = {.min_th = 32,.max_th = 64,.maxp_inv =
129  10,.wq_log2 = 9},
130 
131  /* Traffic Class 3 - Colors Green / Yellow / Red */
132  [3][0] = {.min_th = 48,.max_th = 64,.maxp_inv =
133  10,.wq_log2 = 9},
134  [3][1] = {.min_th = 40,.max_th = 64,.maxp_inv =
135  10,.wq_log2 = 9},
136  [3][2] = {.min_th = 32,.max_th = 64,.maxp_inv =
137  10,.wq_log2 = 9}
138  },
139 #endif /* RTE_SCHED_RED */
140  },
141 };
142 
143 static struct rte_sched_subport_params hqos_subport_params_default = {
144  .tb_rate = 1250000000, /* 10GbE line rate (measured in bytes/second) */
145  .tb_size = 1000000,
146  .tc_rate = {1250000000, 1250000000, 1250000000, 1250000000},
147  .tc_period = 10,
148 };
149 
150 static struct rte_sched_pipe_params hqos_pipe_params_default = {
151  .tb_rate = 305175, /* 10GbE line rate divided by 4K pipes */
152  .tb_size = 1000000,
153  .tc_rate = {305175, 305175, 305175, 305175},
154  .tc_period = 40,
155 #ifdef RTE_SCHED_SUBPORT_TC_OV
156  .tc_ov_weight = 1,
157 #endif
158  .wrr_weights = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
159 };
160 
161 /***
162  *
163  * HQoS configuration
164  *
165  ***/
166 
167 int
169 {
170  int count = __builtin_popcountll (mask);
171  int pos_lead = sizeof (u64) * 8 - __builtin_clzll (mask);
172  int pos_trail = __builtin_ctzll (mask);
173  int count_expected = __builtin_popcount (n - 1);
174 
175  /* Handle the exceptions */
176  if (n == 0)
177  return -1; /* Error */
178 
179  if ((mask == 0) && (n == 1))
180  return 0; /* OK */
181 
182  if (((mask == 0) && (n != 1)) || ((mask != 0) && (n == 1)))
183  return -2; /* Error */
184 
185  /* Check that mask is contiguous */
186  if ((pos_lead - pos_trail) != count)
187  return -3; /* Error */
188 
189  /* Check that mask contains the expected number of bits set */
190  if (count != count_expected)
191  return -4; /* Error */
192 
193  return 0; /* OK */
194 }
195 
196 void
198  hqos, u32 pipe_profile_id)
199 {
200  memcpy (&hqos->pipe[pipe_profile_id], &hqos_pipe_params_default,
201  sizeof (hqos_pipe_params_default));
202 }
203 
204 void
206 {
207  struct rte_sched_subport_params *subport_params;
208  struct rte_sched_pipe_params *pipe_params;
209  u32 *pipe_map;
210  u32 i;
211 
212  memcpy (hqos, &hqos_params_default, sizeof (hqos_params_default));
213 
214  /* pipe */
215  vec_add2 (hqos->pipe, pipe_params, hqos->port.n_pipe_profiles);
216 
217  for (i = 0; i < vec_len (hqos->pipe); i++)
218  memcpy (&pipe_params[i],
220 
221  hqos->port.pipe_profiles = hqos->pipe;
222 
223  /* subport */
224  vec_add2 (hqos->subport, subport_params, hqos->port.n_subports_per_port);
225 
226  for (i = 0; i < vec_len (hqos->subport); i++)
227  memcpy (&subport_params[i],
229  sizeof (hqos_subport_params_default));
230 
231  /* pipe profile */
232  vec_add2 (hqos->pipe_map,
233  pipe_map,
234  hqos->port.n_subports_per_port * hqos->port.n_pipes_per_subport);
235 
236  for (i = 0; i < vec_len (hqos->pipe_map); i++)
237  pipe_map[i] = 0;
238 }
239 
240 /***
241  *
242  * HQoS init
243  *
244  ***/
245 
246 clib_error_t *
248 {
250  char name[32];
251  u32 subport_id, i;
252  int rv;
253 
254  /* Detect the set of worker threads */
255  int worker_thread_first = 0;
256  int worker_thread_count = 0;
257 
258  uword *p = hash_get_mem (tm->thread_registrations_by_name, "workers");
260  p ? (vlib_thread_registration_t *) p[0] : 0;
261 
262  if (tr && tr->count > 0)
263  {
264  worker_thread_first = tr->first_index;
265  worker_thread_count = tr->count;
266  }
267 
268  /* Allocate the per-thread device data array */
271  memset (xd->hqos_wt, 0, tm->n_vlib_mains * sizeof (xd->hqos_wt[0]));
272 
274  memset (xd->hqos_ht, 0, sizeof (xd->hqos_ht[0]));
275 
276  /* Allocate space for one SWQ per worker thread in the I/O TX thread data structure */
277  vec_validate (xd->hqos_ht->swq, worker_thread_count);
278 
279  /* SWQ */
280  for (i = 0; i < worker_thread_count + 1; i++)
281  {
282  u32 swq_flags = RING_F_SP_ENQ | RING_F_SC_DEQ;
283 
284  snprintf (name, sizeof (name), "SWQ-worker%u-to-device%u", i,
285  xd->device_index);
286  xd->hqos_ht->swq[i] =
287  rte_ring_create (name, hqos->swq_size, xd->cpu_socket, swq_flags);
288  if (xd->hqos_ht->swq[i] == NULL)
289  return clib_error_return (0,
290  "SWQ-worker%u-to-device%u: rte_ring_create err",
291  i, xd->device_index);
292  }
293 
294  /*
295  * HQoS
296  */
297 
298  /* HQoS port */
299  snprintf (name, sizeof (name), "HQoS%u", xd->device_index);
300  hqos->port.name = strdup (name);
301  if (hqos->port.name == NULL)
302  return clib_error_return (0, "HQoS%u: strdup err", xd->device_index);
303 
304  hqos->port.socket = rte_eth_dev_socket_id (xd->device_index);
305  if (hqos->port.socket == SOCKET_ID_ANY)
306  hqos->port.socket = 0;
307 
308  xd->hqos_ht->hqos = rte_sched_port_config (&hqos->port);
309  if (xd->hqos_ht->hqos == NULL)
310  return clib_error_return (0, "HQoS%u: rte_sched_port_config err",
311  xd->device_index);
312 
313  /* HQoS subport */
314  for (subport_id = 0; subport_id < hqos->port.n_subports_per_port;
315  subport_id++)
316  {
317  u32 pipe_id;
318 
319  rv =
320  rte_sched_subport_config (xd->hqos_ht->hqos, subport_id,
321  &hqos->subport[subport_id]);
322  if (rv)
323  return clib_error_return (0,
324  "HQoS%u subport %u: rte_sched_subport_config err (%d)",
325  xd->device_index, subport_id, rv);
326 
327  /* HQoS pipe */
328  for (pipe_id = 0; pipe_id < hqos->port.n_pipes_per_subport; pipe_id++)
329  {
330  u32 pos = subport_id * hqos->port.n_pipes_per_subport + pipe_id;
331  u32 profile_id = hqos->pipe_map[pos];
332 
333  rv =
334  rte_sched_pipe_config (xd->hqos_ht->hqos, subport_id, pipe_id,
335  profile_id);
336  if (rv)
337  return clib_error_return (0,
338  "HQoS%u subport %u pipe %u: rte_sched_pipe_config err (%d)",
339  xd->device_index, subport_id, pipe_id,
340  rv);
341  }
342  }
343 
344  /* Set up per-thread device data for the I/O TX thread */
345  xd->hqos_ht->hqos_burst_enq = hqos->burst_enq;
346  xd->hqos_ht->hqos_burst_deq = hqos->burst_deq;
347  vec_validate (xd->hqos_ht->pkts_enq, 2 * hqos->burst_enq - 1);
348  vec_validate (xd->hqos_ht->pkts_deq, hqos->burst_deq - 1);
349  xd->hqos_ht->pkts_enq_len = 0;
350  xd->hqos_ht->swq_pos = 0;
351  xd->hqos_ht->flush_count = 0;
352 
353  /* Set up per-thread device data for each worker thread */
354  for (i = 0; i < worker_thread_count + 1; i++)
355  {
356  u32 tid;
357  if (i)
358  tid = worker_thread_first + (i - 1);
359  else
360  tid = i;
361 
362  xd->hqos_wt[tid].swq = xd->hqos_ht->swq[i];
365  xd->hqos_wt[tid].hqos_field0_slabshr =
366  __builtin_ctzll (hqos->pktfield0_slabmask);
369  xd->hqos_wt[tid].hqos_field1_slabshr =
370  __builtin_ctzll (hqos->pktfield1_slabmask);
373  xd->hqos_wt[tid].hqos_field2_slabshr =
374  __builtin_ctzll (hqos->pktfield2_slabmask);
375  memcpy (xd->hqos_wt[tid].hqos_tc_table, hqos->tc_table,
376  sizeof (hqos->tc_table));
377  }
378 
379  return 0;
380 }
381 
382 /***
383  *
384  * HQoS run-time
385  *
386  ***/
387 /*
388  * dpdk_hqos_thread - Contains the main loop of an HQoS thread.
389  *
390  * w
391  * Information for the current thread
392  */
395 {
396  dpdk_main_t *dm = &dpdk_main;
397  u32 thread_index = vm->thread_index;
398  u32 dev_pos;
399 
400  dev_pos = 0;
401  while (1)
402  {
404 
405  u32 n_devs = vec_len (dm->devices_by_hqos_cpu[thread_index]);
406  if (dev_pos >= n_devs)
407  dev_pos = 0;
408 
410  vec_elt_at_index (dm->devices_by_hqos_cpu[thread_index], dev_pos);
411  dpdk_device_t *xd = vec_elt_at_index (dm->devices, dq->device);
412 
414  u32 device_index = xd->device_index;
415  u16 queue_id = dq->queue_id;
416 
417  struct rte_mbuf **pkts_enq = hqos->pkts_enq;
418  u32 pkts_enq_len = hqos->pkts_enq_len;
419  u32 swq_pos = hqos->swq_pos;
420  u32 n_swq = vec_len (hqos->swq), i;
421  u32 flush_count = hqos->flush_count;
422 
423  for (i = 0; i < n_swq; i++)
424  {
425  /* Get current SWQ for this device */
426  struct rte_ring *swq = hqos->swq[swq_pos];
427 
428  /* Read SWQ burst to packet buffer of this device */
429  pkts_enq_len += rte_ring_sc_dequeue_burst (swq,
430  (void **)
431  &pkts_enq[pkts_enq_len],
432  hqos->hqos_burst_enq, 0);
433 
434  /* Get next SWQ for this device */
435  swq_pos++;
436  if (swq_pos >= n_swq)
437  swq_pos = 0;
438  hqos->swq_pos = swq_pos;
439 
440  /* HWQ TX enqueue when burst available */
441  if (pkts_enq_len >= hqos->hqos_burst_enq)
442  {
443  u32 n_pkts = rte_eth_tx_burst (device_index,
444  (uint16_t) queue_id,
445  pkts_enq,
446  (uint16_t) pkts_enq_len);
447 
448  for (; n_pkts < pkts_enq_len; n_pkts++)
449  rte_pktmbuf_free (pkts_enq[n_pkts]);
450 
451  pkts_enq_len = 0;
452  flush_count = 0;
453  break;
454  }
455  }
456  if (pkts_enq_len)
457  {
458  flush_count++;
459  if (PREDICT_FALSE (flush_count == HQOS_FLUSH_COUNT_THRESHOLD))
460  {
461  rte_sched_port_enqueue (hqos->hqos, pkts_enq, pkts_enq_len);
462 
463  pkts_enq_len = 0;
464  flush_count = 0;
465  }
466  }
467  hqos->pkts_enq_len = pkts_enq_len;
468  hqos->flush_count = flush_count;
469 
470  /* Advance to next device */
471  dev_pos++;
472  }
473 }
474 
477 {
478  dpdk_main_t *dm = &dpdk_main;
479  u32 thread_index = vm->thread_index;
480  u32 dev_pos;
481 
482  dev_pos = 0;
483  while (1)
484  {
486 
487  u32 n_devs = vec_len (dm->devices_by_hqos_cpu[thread_index]);
488  if (PREDICT_FALSE (n_devs == 0))
489  {
490  dev_pos = 0;
491  continue;
492  }
493  if (dev_pos >= n_devs)
494  dev_pos = 0;
495 
497  vec_elt_at_index (dm->devices_by_hqos_cpu[thread_index], dev_pos);
498  dpdk_device_t *xd = vec_elt_at_index (dm->devices, dq->device);
499 
501  u32 device_index = xd->device_index;
502  u16 queue_id = dq->queue_id;
503 
504  struct rte_mbuf **pkts_enq = hqos->pkts_enq;
505  struct rte_mbuf **pkts_deq = hqos->pkts_deq;
506  u32 pkts_enq_len = hqos->pkts_enq_len;
507  u32 swq_pos = hqos->swq_pos;
508  u32 n_swq = vec_len (hqos->swq), i;
509  u32 flush_count = hqos->flush_count;
510 
511  /*
512  * SWQ dequeue and HQoS enqueue for current device
513  */
514  for (i = 0; i < n_swq; i++)
515  {
516  /* Get current SWQ for this device */
517  struct rte_ring *swq = hqos->swq[swq_pos];
518 
519  /* Read SWQ burst to packet buffer of this device */
520  pkts_enq_len += rte_ring_sc_dequeue_burst (swq,
521  (void **)
522  &pkts_enq[pkts_enq_len],
523  hqos->hqos_burst_enq, 0);
524 
525  /* Get next SWQ for this device */
526  swq_pos++;
527  if (swq_pos >= n_swq)
528  swq_pos = 0;
529  hqos->swq_pos = swq_pos;
530 
531  /* HQoS enqueue when burst available */
532  if (pkts_enq_len >= hqos->hqos_burst_enq)
533  {
534  rte_sched_port_enqueue (hqos->hqos, pkts_enq, pkts_enq_len);
535 
536  pkts_enq_len = 0;
537  flush_count = 0;
538  break;
539  }
540  }
541  if (pkts_enq_len)
542  {
543  flush_count++;
544  if (PREDICT_FALSE (flush_count == HQOS_FLUSH_COUNT_THRESHOLD))
545  {
546  rte_sched_port_enqueue (hqos->hqos, pkts_enq, pkts_enq_len);
547 
548  pkts_enq_len = 0;
549  flush_count = 0;
550  }
551  }
552  hqos->pkts_enq_len = pkts_enq_len;
553  hqos->flush_count = flush_count;
554 
555  /*
556  * HQoS dequeue and HWQ TX enqueue for current device
557  */
558  {
559  u32 pkts_deq_len, n_pkts;
560 
561  pkts_deq_len = rte_sched_port_dequeue (hqos->hqos,
562  pkts_deq,
563  hqos->hqos_burst_deq);
564 
565  for (n_pkts = 0; n_pkts < pkts_deq_len;)
566  n_pkts += rte_eth_tx_burst (device_index,
567  (uint16_t) queue_id,
568  &pkts_deq[n_pkts],
569  (uint16_t) (pkts_deq_len - n_pkts));
570  }
571 
572  /* Advance to next device */
573  dev_pos++;
574  }
575 }
576 
577 void
579 {
580  vlib_main_t *vm;
582  dpdk_main_t *dm = &dpdk_main;
583 
584  vm = vlib_get_main ();
585 
587 
588  clib_time_init (&vm->clib_time);
590 
591  /* Wait until the dpdk init sequence is complete */
592  while (tm->worker_thread_release == 0)
594 
595  if (vec_len (dm->devices_by_hqos_cpu[vm->thread_index]) == 0)
596  return
597  clib_error
598  ("current I/O TX thread does not have any devices assigned to it");
599 
602  else
604 }
605 
606 void
608 {
611  dpdk_hqos_thread (w);
612 }
613 
614 /* *INDENT-OFF* */
615 VLIB_REGISTER_THREAD (hqos_thread_reg, static) =
616 {
617  .name = "hqos-threads",
618  .short_name = "hqos-threads",
619  .function = dpdk_hqos_thread_fn,
620 };
621 /* *INDENT-ON* */
622 
623 /*
624  * HQoS run-time code to be called by the worker threads
625  */
626 #define BITFIELD(byte_array, slab_pos, slab_mask, slab_shr) \
627 ({ \
628  u64 slab = *((u64 *) &byte_array[slab_pos]); \
629  u64 val = (rte_be_to_cpu_64(slab) & slab_mask) >> slab_shr; \
630  val; \
631 })
632 
633 #define RTE_SCHED_PORT_HIERARCHY(subport, pipe, traffic_class, queue, color) \
634  ((((u64) (queue)) & 0x3) | \
635  ((((u64) (traffic_class)) & 0x3) << 2) | \
636  ((((u64) (color)) & 0x3) << 4) | \
637  ((((u64) (subport)) & 0xFFFF) << 16) | \
638  ((((u64) (pipe)) & 0xFFFFFFFF) << 32))
639 
640 void
642  struct rte_mbuf **pkts, u32 n_pkts)
643 {
644  u32 i;
645 
646  for (i = 0; i < (n_pkts & (~0x3)); i += 4)
647  {
648  struct rte_mbuf *pkt0 = pkts[i];
649  struct rte_mbuf *pkt1 = pkts[i + 1];
650  struct rte_mbuf *pkt2 = pkts[i + 2];
651  struct rte_mbuf *pkt3 = pkts[i + 3];
652 
653  u8 *pkt0_data = rte_pktmbuf_mtod (pkt0, u8 *);
654  u8 *pkt1_data = rte_pktmbuf_mtod (pkt1, u8 *);
655  u8 *pkt2_data = rte_pktmbuf_mtod (pkt2, u8 *);
656  u8 *pkt3_data = rte_pktmbuf_mtod (pkt3, u8 *);
657 
658  u64 pkt0_subport = BITFIELD (pkt0_data, hqos->hqos_field0_slabpos,
659  hqos->hqos_field0_slabmask,
660  hqos->hqos_field0_slabshr);
661  u64 pkt0_pipe = BITFIELD (pkt0_data, hqos->hqos_field1_slabpos,
662  hqos->hqos_field1_slabmask,
663  hqos->hqos_field1_slabshr);
664  u64 pkt0_dscp = BITFIELD (pkt0_data, hqos->hqos_field2_slabpos,
665  hqos->hqos_field2_slabmask,
666  hqos->hqos_field2_slabshr);
667  u32 pkt0_tc = hqos->hqos_tc_table[pkt0_dscp & 0x3F] >> 2;
668  u32 pkt0_tc_q = hqos->hqos_tc_table[pkt0_dscp & 0x3F] & 0x3;
669 
670  u64 pkt1_subport = BITFIELD (pkt1_data, hqos->hqos_field0_slabpos,
671  hqos->hqos_field0_slabmask,
672  hqos->hqos_field0_slabshr);
673  u64 pkt1_pipe = BITFIELD (pkt1_data, hqos->hqos_field1_slabpos,
674  hqos->hqos_field1_slabmask,
675  hqos->hqos_field1_slabshr);
676  u64 pkt1_dscp = BITFIELD (pkt1_data, hqos->hqos_field2_slabpos,
677  hqos->hqos_field2_slabmask,
678  hqos->hqos_field2_slabshr);
679  u32 pkt1_tc = hqos->hqos_tc_table[pkt1_dscp & 0x3F] >> 2;
680  u32 pkt1_tc_q = hqos->hqos_tc_table[pkt1_dscp & 0x3F] & 0x3;
681 
682  u64 pkt2_subport = BITFIELD (pkt2_data, hqos->hqos_field0_slabpos,
683  hqos->hqos_field0_slabmask,
684  hqos->hqos_field0_slabshr);
685  u64 pkt2_pipe = BITFIELD (pkt2_data, hqos->hqos_field1_slabpos,
686  hqos->hqos_field1_slabmask,
687  hqos->hqos_field1_slabshr);
688  u64 pkt2_dscp = BITFIELD (pkt2_data, hqos->hqos_field2_slabpos,
689  hqos->hqos_field2_slabmask,
690  hqos->hqos_field2_slabshr);
691  u32 pkt2_tc = hqos->hqos_tc_table[pkt2_dscp & 0x3F] >> 2;
692  u32 pkt2_tc_q = hqos->hqos_tc_table[pkt2_dscp & 0x3F] & 0x3;
693 
694  u64 pkt3_subport = BITFIELD (pkt3_data, hqos->hqos_field0_slabpos,
695  hqos->hqos_field0_slabmask,
696  hqos->hqos_field0_slabshr);
697  u64 pkt3_pipe = BITFIELD (pkt3_data, hqos->hqos_field1_slabpos,
698  hqos->hqos_field1_slabmask,
699  hqos->hqos_field1_slabshr);
700  u64 pkt3_dscp = BITFIELD (pkt3_data, hqos->hqos_field2_slabpos,
701  hqos->hqos_field2_slabmask,
702  hqos->hqos_field2_slabshr);
703  u32 pkt3_tc = hqos->hqos_tc_table[pkt3_dscp & 0x3F] >> 2;
704  u32 pkt3_tc_q = hqos->hqos_tc_table[pkt3_dscp & 0x3F] & 0x3;
705 
706  u64 pkt0_sched = RTE_SCHED_PORT_HIERARCHY (pkt0_subport,
707  pkt0_pipe,
708  pkt0_tc,
709  pkt0_tc_q,
710  0);
711  u64 pkt1_sched = RTE_SCHED_PORT_HIERARCHY (pkt1_subport,
712  pkt1_pipe,
713  pkt1_tc,
714  pkt1_tc_q,
715  0);
716  u64 pkt2_sched = RTE_SCHED_PORT_HIERARCHY (pkt2_subport,
717  pkt2_pipe,
718  pkt2_tc,
719  pkt2_tc_q,
720  0);
721  u64 pkt3_sched = RTE_SCHED_PORT_HIERARCHY (pkt3_subport,
722  pkt3_pipe,
723  pkt3_tc,
724  pkt3_tc_q,
725  0);
726 
727  pkt0->hash.sched.lo = pkt0_sched & 0xFFFFFFFF;
728  pkt0->hash.sched.hi = pkt0_sched >> 32;
729  pkt1->hash.sched.lo = pkt1_sched & 0xFFFFFFFF;
730  pkt1->hash.sched.hi = pkt1_sched >> 32;
731  pkt2->hash.sched.lo = pkt2_sched & 0xFFFFFFFF;
732  pkt2->hash.sched.hi = pkt2_sched >> 32;
733  pkt3->hash.sched.lo = pkt3_sched & 0xFFFFFFFF;
734  pkt3->hash.sched.hi = pkt3_sched >> 32;
735  }
736 
737  for (; i < n_pkts; i++)
738  {
739  struct rte_mbuf *pkt = pkts[i];
740 
741  u8 *pkt_data = rte_pktmbuf_mtod (pkt, u8 *);
742 
743  u64 pkt_subport = BITFIELD (pkt_data, hqos->hqos_field0_slabpos,
744  hqos->hqos_field0_slabmask,
745  hqos->hqos_field0_slabshr);
746  u64 pkt_pipe = BITFIELD (pkt_data, hqos->hqos_field1_slabpos,
747  hqos->hqos_field1_slabmask,
748  hqos->hqos_field1_slabshr);
749  u64 pkt_dscp = BITFIELD (pkt_data, hqos->hqos_field2_slabpos,
750  hqos->hqos_field2_slabmask,
751  hqos->hqos_field2_slabshr);
752  u32 pkt_tc = hqos->hqos_tc_table[pkt_dscp & 0x3F] >> 2;
753  u32 pkt_tc_q = hqos->hqos_tc_table[pkt_dscp & 0x3F] & 0x3;
754 
755  u64 pkt_sched = RTE_SCHED_PORT_HIERARCHY (pkt_subport,
756  pkt_pipe,
757  pkt_tc,
758  pkt_tc_q,
759  0);
760 
761  pkt->hash.sched.lo = pkt_sched & 0xFFFFFFFF;
762  pkt->hash.sched.hi = pkt_sched >> 32;
763  }
764 }
765 
766 /*
767  * fd.io coding-style-patch-verification: ON
768  *
769  * Local Variables:
770  * eval: (c-set-style "gnu")
771  * End:
772  */
VLIB_REGISTER_THREAD(hqos_thread_reg, static)
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:434
static struct rte_sched_subport_params hqos_subport_params_default
Definition: hqos.c:143
dpdk_main_t dpdk_main
Definition: init.c:39
#define clib_error(format, args...)
Definition: error.h:62
#define NULL
Definition: clib.h:55
u32 thread_index
Definition: main.h:176
dpdk_device_and_queue_t ** devices_by_hqos_cpu
Definition: dpdk.h:354
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:559
int i
struct rte_sched_port_params port
Definition: dpdk.h:277
struct rte_mbuf ** pkts_enq
Definition: dpdk.h:150
struct rte_ring ** swq
Definition: dpdk.h:149
struct rte_sched_port * hqos
Definition: dpdk.h:152
clib_time_t clib_time
Definition: main.h:63
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
Definition: vec.h:445
#define BITFIELD(byte_array, slab_pos, slab_mask, slab_shr)
Definition: hqos.c:626
static void vlib_worker_thread_barrier_check(void)
Definition: threads.h:392
struct rte_sched_pipe_params * pipe
Definition: dpdk.h:279
#define static_always_inline
Definition: clib.h:93
struct rte_mbuf ** pkts_deq
Definition: dpdk.h:151
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define clib_error_return(e, args...)
Definition: error.h:99
unsigned long u64
Definition: types.h:89
dpdk_device_hqos_per_worker_thread_t * hqos_wt
Definition: dpdk.h:212
static_always_inline void dpdk_hqos_thread_internal_hqos_dbg_bypass(vlib_main_t *vm)
Definition: hqos.c:394
#define PREDICT_FALSE(x)
Definition: clib.h:105
void clib_time_init(clib_time_t *c)
Definition: time.c:178
int dpdk_hqos_validate_mask(u64 mask, u32 n)
Definition: hqos.c:168
#define HQOS_FLUSH_COUNT_THRESHOLD
Definition: dpdk.h:257
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:221
dpdk_device_t * devices
Definition: dpdk.h:353
vlib_main_t * vm
Definition: buffer.c:294
static void * clib_mem_set_heap(void *heap)
Definition: mem.h:226
void vlib_worker_thread_init(vlib_worker_thread_t *w)
Definition: threads.c:633
#define ASSERT(truth)
unsigned int u32
Definition: types.h:88
#define DPDK_HQOS_DBG_BYPASS
Definition: dpdk.h:253
void dpdk_hqos_thread_fn(void *arg)
Definition: hqos.c:607
clib_error_t * dpdk_port_setup_hqos(dpdk_device_t *xd, dpdk_device_config_hqos_t *hqos)
Definition: hqos.c:247
Bitmaps built as vectors of machine words.
dpdk_device_hqos_per_hqos_thread_t * hqos_ht
Definition: dpdk.h:213
static_always_inline void dpdk_hqos_thread_internal(vlib_main_t *vm)
Definition: hqos.c:476
size_t count
Definition: vapi.c:42
uword * thread_registrations_by_name
Definition: threads.h:297
dpdk_portid_t device_index
Definition: dpdk.h:166
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:23
void dpdk_hqos_metadata_set(dpdk_device_hqos_per_worker_thread_t *hqos, struct rte_mbuf **pkts, u32 n_pkts)
Definition: hqos.c:641
u64 uword
Definition: types.h:112
void dpdk_device_config_hqos_default(dpdk_device_config_hqos_t *hqos)
Definition: hqos.c:205
unsigned short u16
Definition: types.h:57
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
unsigned char u8
Definition: types.h:56
uint32_t * pipe_map
Definition: dpdk.h:280
void dpdk_hqos_thread(vlib_worker_thread_t *w)
Definition: hqos.c:578
#define hash_get_mem(h, key)
Definition: hash.h:268
static vlib_thread_main_t * vlib_get_thread_main()
Definition: global_funcs.h:32
struct rte_sched_subport_params * subport
Definition: dpdk.h:278
i8 cpu_socket
Definition: dpdk.h:179
#define RTE_SCHED_PORT_HIERARCHY(subport, pipe, traffic_class, queue, color)
Definition: hqos.c:633
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
void dpdk_device_config_hqos_pipe_profile_default(dpdk_device_config_hqos_t *hqos, u32 pipe_profile_id)
Definition: hqos.c:197
volatile u32 worker_thread_release
Definition: threads.h:338
static struct rte_sched_pipe_params hqos_pipe_params_default
Definition: hqos.c:150
CLIB vectors are ubiquitous dynamically resized arrays with by user defined "headers".