FD.io VPP  v21.06-1-gbb7418cf9
Vector Packet Processing
avf_fdir_lib.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2020 Intel and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #include <vppinfra/mem.h>
19 #include "avf_advanced_flow.h"
20 
21 #define AVF_FDIR_IPV6_TC_OFFSET 20
22 #define AVF_IPV6_TC_MASK (0xFF << AVF_FDIR_IPV6_TC_OFFSET)
23 #define AVF_FDIR_MAX_QREGION_SIZE 128
24 
25 /*
26  * Return the last (most-significant) bit set.
27  */
28 static inline int
30 {
31  return (x == 0) ? 0 : 32 - count_leading_zeros (x);
32 }
33 
34 static inline int
36 {
37  const u16 *w = (const u16 *) ea;
38 
39  return (w[0] | w[1] | w[2]) == 0;
40 }
41 
42 int
43 avf_fdir_rcfg_create (struct avf_fdir_conf **rcfg, int tunnel_level, u16 vsi,
44  u16 nrxq)
45 {
46  (*rcfg) = clib_mem_alloc (sizeof (**rcfg));
47  if ((*rcfg) == NULL)
48  {
49  return -1;
50  }
51 
52  clib_memset (*rcfg, 0, sizeof (**rcfg));
53 
54  (*rcfg)->add_fltr.rule_cfg.proto_hdrs.tunnel_level = tunnel_level;
55  (*rcfg)->vsi = vsi;
56  (*rcfg)->nb_rx_queues = nrxq;
57 
58  return 0;
59 }
60 
61 int
63 {
64  clib_mem_free (rcfg);
65 
66  return 0;
67 }
68 
69 int
70 avf_fdir_rcfg_set_hdr (struct avf_fdir_conf *rcfg, int layer,
71  enum virtchnl_proto_hdr_type hdr)
72 {
73  struct virtchnl_proto_hdrs *hdrs;
74 
75  hdrs = &rcfg->add_fltr.rule_cfg.proto_hdrs;
76  if (layer >= VIRTCHNL_MAX_NUM_PROTO_HDRS)
77  return -1;
78 
79  hdrs->proto_hdr[layer].type = hdr;
80 
81  return 0;
82 }
83 
84 int
85 avf_fdir_rcfg_set_field (struct avf_fdir_conf *rcfg, int layer,
86  struct avf_flow_item *item,
87  struct avf_flow_error *error)
88 {
89  const struct avf_ipv4_hdr *ipv4_spec, *ipv4_mask;
90  const struct avf_ipv6_hdr *ipv6_spec, *ipv6_mask;
91  const struct avf_udp_hdr *udp_spec, *udp_mask;
92  const struct avf_tcp_hdr *tcp_spec, *tcp_mask;
93  const struct avf_sctp_hdr *sctp_spec, *sctp_mask;
94  const struct avf_gtp_hdr *gtp_spec, *gtp_mask;
95  const struct avf_gtp_psc_hdr *gtp_psc_spec, *gtp_psc_mask;
96  const struct avf_l2tpv3oip_hdr *l2tpv3oip_spec, *l2tpv3oip_mask;
97  const struct avf_esp_hdr *esp_spec, *esp_mask;
98  const struct avf_ah_hdr *ah_spec, *ah_mask;
99  const struct avf_pfcp_hdr *pfcp_spec, *pfcp_mask;
100  const struct avf_flow_eth_hdr *eth_spec, *eth_mask;
101 
102  struct virtchnl_proto_hdr *hdr;
104  u16 ether_type;
105  int ret = 0;
106 
107  u8 ipv6_addr_mask[16] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
108  0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
109 
110  hdr = &rcfg->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
111  type = item->type;
112 
113  switch (type)
114  {
116  eth_spec = item->spec;
117  eth_mask = item->mask;
118 
120 
121  if (eth_spec && eth_mask)
122  {
123  if (!ether_addr_is_zero (&eth_mask->src) ||
124  !ether_addr_is_zero (&eth_mask->dst))
125  {
126  ret = avf_flow_error_set (error, AVF_FAILURE,
128  "Invalid MAC_addr mask.");
129  return ret;
130  }
131 
132  if (eth_mask->type)
133  {
134  if (eth_mask->type != 0xffff)
135  {
136  ret = avf_flow_error_set (error, AVF_FAILURE,
138  "Invalid type mask.");
139  return ret;
140  }
141  }
142  }
143 
144  if (eth_spec && eth_mask && eth_mask->type)
145  {
146  ether_type = clib_net_to_host_u16 (eth_spec->type);
147  if (ether_type == AVF_ETHER_TYPE_IPV4 ||
148  ether_type == AVF_ETHER_TYPE_IPV6)
149  {
150  ret = avf_flow_error_set (error, AVF_FAILURE,
152  "Unsupported ether_type.");
153  return ret;
154  }
155 
157  VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, ETH, ETHERTYPE);
158 
159  clib_memcpy (hdr->buffer, eth_spec, sizeof (*eth_spec));
160  }
161  break;
162 
164  ipv4_spec = item->spec;
165  ipv4_mask = item->mask;
167 
168  if (ipv4_spec && ipv4_mask)
169  {
170  if (ipv4_mask->version_ihl || ipv4_mask->total_length ||
171  ipv4_mask->packet_id || ipv4_mask->fragment_offset ||
172  ipv4_mask->hdr_checksum)
173  {
174  ret = avf_flow_error_set (error, AVF_FAILURE,
176  "Invalid IPv4 mask.");
177  return ret;
178  }
179 
180  if (ipv4_mask->type_of_service == 0xff)
181  {
182  rcfg->input_set |= AVF_INSET_IPV4_TOS;
183  VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, IPV4, DSCP);
184  }
185 
186  if (ipv4_mask->next_proto_id == 0xff)
187  {
189  VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, IPV4, PROT);
190  }
191 
192  if (ipv4_mask->time_to_live == 0xff)
193  {
194  rcfg->input_set |= AVF_INSET_IPV4_TTL;
195  VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, IPV4, TTL);
196  }
197 
198  if (ipv4_mask->src_addr == 0xffffffff)
199  {
200  rcfg->input_set |= AVF_INSET_IPV4_SRC;
201  VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, IPV4, SRC);
202  }
203 
204  if (ipv4_mask->dst_addr == 0xffffffff)
205  {
206  rcfg->input_set |= AVF_INSET_IPV4_DST;
207  VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, IPV4, DST);
208  }
209 
210  clib_memcpy (hdr->buffer, ipv4_spec, sizeof (*ipv4_spec));
211  }
212  break;
213 
215  ipv6_spec = item->spec;
216  ipv6_mask = item->mask;
218 
219  if (ipv6_spec && ipv6_mask)
220  {
221  if (ipv6_mask->payload_len)
222  {
223  ret = avf_flow_error_set (error, AVF_FAILURE,
225  "Invalid IPv6 mask");
226  return ret;
227  }
228 
229  if ((ipv6_mask->vtc_flow &
230  clib_host_to_net_u32 (AVF_IPV6_TC_MASK)) ==
231  (clib_host_to_net_u32 (AVF_IPV6_TC_MASK)))
232  {
233  rcfg->input_set |= AVF_INSET_IPV6_TC;
234  VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, IPV6, TC);
235  }
236 
237  if (ipv6_mask->proto == 0xff)
238  {
240  VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, IPV6, PROT);
241  }
242 
243  if (ipv6_mask->hop_limits == 0xff)
244  {
246  VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, IPV6, HOP_LIMIT);
247  }
248 
249  if (!clib_memcmp (ipv6_mask->src_addr, ipv6_addr_mask,
250  sizeof (ipv6_mask->src_addr)))
251  {
252  rcfg->input_set |= AVF_INSET_IPV6_SRC;
253  VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, IPV6, SRC);
254  }
255  if (!clib_memcmp (ipv6_mask->dst_addr, ipv6_addr_mask,
256  sizeof (ipv6_mask->dst_addr)))
257  {
258  rcfg->input_set |= AVF_INSET_IPV6_DST;
259  VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, IPV6, DST);
260 
261  clib_memcpy (hdr->buffer, ipv6_spec, sizeof (*ipv6_spec));
262  }
263  }
264 
265  break;
266 
268  udp_spec = item->spec;
269  udp_mask = item->mask;
271 
272  if (udp_spec && udp_mask)
273  {
274  if (udp_mask->dgram_len || udp_mask->dgram_cksum)
275  {
276  ret = avf_flow_error_set (error, AVF_FAILURE,
278  "Invalid UDP mask");
279  return ret;
280  };
281 
282  if (udp_mask->src_port == 0xffff)
283  {
285  VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, UDP, SRC_PORT);
286  }
287 
288  if (udp_mask->dst_port == 0xffff)
289  {
291  VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, UDP, DST_PORT);
292  }
293 
294  clib_memcpy (hdr->buffer, udp_spec, sizeof (*udp_spec));
295  }
296  break;
297 
299  tcp_spec = item->spec;
300  tcp_mask = item->mask;
302 
303  if (tcp_spec && tcp_mask)
304  {
305  if (tcp_mask->sent_seq || tcp_mask->recv_ack || tcp_mask->data_off ||
306  tcp_mask->tcp_flags || tcp_mask->rx_win || tcp_mask->cksum ||
307  tcp_mask->tcp_urp)
308  {
309  ret = avf_flow_error_set (error, AVF_FAILURE,
311  "Invalid TCP mask");
312  return ret;
313  }
314 
315  if (tcp_mask->src_port == 0xffff)
316  {
318  VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, TCP, SRC_PORT);
319  }
320 
321  if (tcp_mask->dst_port == 0xffff)
322  {
324  VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, TCP, DST_PORT);
325  }
326 
327  clib_memcpy (hdr->buffer, tcp_spec, sizeof (*tcp_spec));
328  }
329 
330  break;
331 
333  sctp_spec = item->spec;
334  sctp_mask = item->mask;
336 
337  if (sctp_spec && sctp_mask)
338  {
339  if (sctp_mask->cksum)
340  {
341  ret = avf_flow_error_set (error, AVF_FAILURE,
343  "Invalid UDP mask");
344  return ret;
345  }
346 
347  if (sctp_mask->src_port == 0xffff)
348  {
350  VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, SCTP, SRC_PORT);
351  }
352 
353  if (sctp_mask->dst_port == 0xffff)
354  {
356  VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, SCTP, DST_PORT);
357  }
358 
359  clib_memcpy (hdr->buffer, sctp_spec, sizeof (*sctp_spec));
360  }
361  break;
362 
364  gtp_spec = item->spec;
365  gtp_mask = item->mask;
367 
368  if (gtp_spec && gtp_mask)
369  {
370  if (gtp_mask->v_pt_rsv_flags || gtp_mask->msg_type ||
371  gtp_mask->msg_len)
372  {
373  ret = avf_flow_error_set (error, AVF_FAILURE,
375  "Invalid GTP mask");
376  return ret;
377  }
378 
379  if (gtp_mask->teid == 0xffffffff)
380  {
382  VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, GTPU_IP, TEID);
383  }
384 
385  clib_memcpy (hdr->buffer, gtp_spec, sizeof (*gtp_spec));
386  }
387 
388  break;
389 
391  gtp_psc_spec = item->spec;
392  gtp_psc_mask = item->mask;
394 
395  if (gtp_psc_spec && gtp_psc_mask)
396  {
397  if (gtp_psc_mask->qfi == 0xff)
398  {
399  rcfg->input_set |= AVF_INSET_GTPU_QFI;
400  VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, GTPU_EH, QFI);
401  }
402 
403  clib_memcpy (hdr->buffer, gtp_psc_spec, sizeof (*gtp_psc_spec));
404  }
405 
406  break;
407 
409  l2tpv3oip_spec = item->spec;
410  l2tpv3oip_mask = item->mask;
412 
413  if (l2tpv3oip_spec && l2tpv3oip_mask)
414  {
415  if (l2tpv3oip_mask->session_id == 0xffffffff)
416  {
418  VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, L2TPV3, SESS_ID);
419  }
420 
421  clib_memcpy (hdr->buffer, l2tpv3oip_spec, sizeof (*l2tpv3oip_spec));
422  }
423  break;
424 
426  esp_spec = item->spec;
427  esp_mask = item->mask;
429 
430  if (esp_spec && esp_mask)
431  {
432  if (esp_mask->spi == 0xffffffff)
433  {
434  rcfg->input_set |= AVF_INSET_ESP_SPI;
435  VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, ESP, SPI);
436  }
437 
438  clib_memcpy (hdr->buffer, esp_spec, sizeof (*esp_spec));
439  }
440  break;
441 
443  ah_spec = item->spec;
444  ah_mask = item->mask;
446 
447  if (ah_spec && ah_mask)
448  {
449  if (ah_mask->spi == 0xffffffff)
450  {
451  rcfg->input_set |= AVF_INSET_AH_SPI;
452  VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, AH, SPI);
453  }
454 
455  clib_memcpy (hdr->buffer, ah_spec, sizeof (*ah_spec));
456  }
457  break;
458 
460  pfcp_spec = item->spec;
461  pfcp_mask = item->mask;
463 
464  if (pfcp_spec && pfcp_mask)
465  {
466  if (pfcp_mask->s_field == 0xff)
467  {
469  VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, PFCP, S_FIELD);
470  }
471 
472  clib_memcpy (hdr->buffer, pfcp_spec, sizeof (*pfcp_spec));
473  }
474  break;
475 
476  default:
478  item, "Invalid pattern item.");
479  return ret;
480  }
481 
482  return 0;
483 }
484 
485 int
486 avf_fdir_rcfg_act_queue (struct avf_fdir_conf *rcfg, int queue, int size,
487  int act_idx)
488 {
489  if (act_idx >= VIRTCHNL_MAX_NUM_ACTIONS)
490  return -AVF_FAILURE;
491 
492  struct virtchnl_filter_action *filter_action;
493 
494  filter_action = rcfg->add_fltr.rule_cfg.action_set.actions + act_idx;
495  filter_action->type = VIRTCHNL_ACTION_QUEUE;
496  filter_action->act_conf.queue.index = queue;
497 
498  if (size == 1)
499  return 0;
500  else if (is_pow2 (size))
501  filter_action->act_conf.queue.region = fls_u32 (size) - 1;
502 
503  return 0;
504 }
505 
506 int
508  const struct avf_flow_action *act, int act_idx,
509  struct avf_flow_error *error)
510 {
511  const struct avf_flow_action_rss *rss = act->conf;
512  struct virtchnl_filter_action *filter_action;
513  u32 i;
514  int ret;
515 
516  filter_action = rcfg->add_fltr.rule_cfg.action_set.actions + act_idx;
517 
518  if (rss->queue_num <= 1)
519  {
521  act, "Queue region size can't be 0 or 1.");
522  return ret;
523  }
524 
525  /* check if queue index for queue region is continuous */
526  for (i = 0; i < rss->queue_num - 1; i++)
527  {
528  if (rss->queue[i + 1] != rss->queue[i] + 1)
529  {
530  ret =
532  act, "Discontinuous queue region");
533  return ret;
534  }
535  }
536 
537  if (rss->queue[rss->queue_num - 1] >= rcfg->nb_rx_queues)
538  {
540  act, "Invalid queue region indexes.");
541  return ret;
542  }
543 
544  if (!(is_pow2 (rss->queue_num) &&
546  {
548  act,
549  "The region size should be any of the"
550  "following values: 1, 2, 4, 8, 16, 32"
551  ", 64, 128 as long as the total number of"
552  "queues do not exceed the VSI allocation");
553  return ret;
554  }
555 
556  filter_action->type = VIRTCHNL_ACTION_Q_REGION;
557  filter_action->act_conf.queue.index = rss->queue[0];
558  filter_action->act_conf.queue.region = fls_u32 (rss->queue_num) - 1;
559 
560  return 0;
561 }
562 
563 int
564 avf_fdir_rcfg_act_drop (struct avf_fdir_conf *rcfg, int act_idx)
565 {
566  struct virtchnl_filter_action *filter_action;
567 
568  if (act_idx >= VIRTCHNL_MAX_NUM_ACTIONS)
569  return -AVF_FAILURE;
570 
571  filter_action = rcfg->add_fltr.rule_cfg.action_set.actions + act_idx;
572  filter_action->type = VIRTCHNL_ACTION_DROP;
573 
574  return 0;
575 }
576 
577 int
578 avf_fdir_rcfg_act_mark (struct avf_fdir_conf *rcfg, const u32 mark,
579  int act_idx)
580 {
581  struct virtchnl_filter_action *filter_action;
582  if (act_idx >= VIRTCHNL_MAX_NUM_ACTIONS)
583  return -AVF_FAILURE;
584 
585  filter_action = rcfg->add_fltr.rule_cfg.action_set.actions + act_idx;
586 
587  filter_action->type = VIRTCHNL_ACTION_MARK;
588  filter_action->act_conf.mark_id = mark;
589 
590  return 0;
591 }
592 
593 int
595  struct avf_fdir_conf *rcfg)
596 {
597  int ret;
598  rcfg->add_fltr.vsi_id = rcfg->vsi;
599  rcfg->add_fltr.validate_only = 1;
600  struct virtchnl_fdir_add fdir_ret;
601 
602  ret =
604  sizeof (rcfg->add_fltr), &fdir_ret, sizeof (fdir_ret));
605 
606  if (ret != 0)
607  {
608  return ret;
609  }
610 
611  if (fdir_ret.status != VIRTCHNL_FDIR_SUCCESS)
612  {
613  ret = -fdir_ret.status;
614  }
615 
616  return ret;
617 }
618 
619 int
621 {
622  int ret;
623  rcfg->add_fltr.vsi_id = rcfg->vsi;
624  rcfg->add_fltr.validate_only = 0;
625  struct virtchnl_fdir_add fdir_ret;
626 
627  ret =
629  sizeof (rcfg->add_fltr), &fdir_ret, sizeof (fdir_ret));
630 
631  if (ret != 0)
632  {
633  return ret;
634  }
635 
636  rcfg->flow_id = fdir_ret.flow_id;
637 
638  if (fdir_ret.status != VIRTCHNL_FDIR_SUCCESS)
639  {
640  ret = -fdir_ret.status;
641  }
642 
643  return ret;
644 }
645 
646 int
648 {
649  int ret;
650  struct virtchnl_fdir_del fdir_ret;
651  rcfg->del_fltr.vsi_id = rcfg->vsi;
652  rcfg->del_fltr.flow_id = rcfg->flow_id;
653 
654  ret =
656  sizeof (rcfg->del_fltr), &fdir_ret, sizeof (fdir_ret));
657 
658  if (ret != 0)
659  {
660  return ret;
661  }
662 
663  if (fdir_ret.status != VIRTCHNL_FDIR_SUCCESS)
664  {
665  ret = -fdir_ret.status;
666  }
667 
668  return ret;
669 }
670 
671 int
673  struct avf_fdir_conf *rcfg,
674  struct avf_flow_error *error)
675 {
676  int act_idx = 0, ret = 0;
677  u32 dest_num = 0;
678  u32 mark_num = 0;
679  u32 act_num;
680  struct virtchnl_filter_action *filter_action;
681  const struct avf_flow_action_queue *act_q;
682  const struct avf_flow_action_mark *act_msk;
683 
684  struct virtchnl_fdir_rule *rule_cfg = &rcfg->add_fltr.rule_cfg;
685 
686  for (; actions->type != VIRTCHNL_ACTION_NONE; actions++, act_idx++)
687  {
688  switch (actions->type)
689  {
691  dest_num++;
692  filter_action = &rule_cfg->action_set.actions[act_idx];
693  filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
694  rule_cfg->action_set.count++;
695  break;
696 
698  dest_num++;
699  ret = avf_fdir_rcfg_act_drop (rcfg, act_idx);
700  if (ret)
701  return ret;
702 
703  rule_cfg->action_set.count++;
704  break;
705 
707  dest_num++;
708  act_q = actions->conf;
709 
710  if (act_q->index >= rcfg->nb_rx_queues)
711  {
712  ret = avf_flow_error_set (error, AVF_FAILURE,
714  "Invalid queue for FDIR.");
715  return -AVF_FAILURE;
716  }
717 
718  ret = avf_fdir_rcfg_act_queue (rcfg, act_q->index, 1, act_idx);
719  if (ret)
720  return ret;
721 
722  rule_cfg->action_set.count++;
723  break;
724 
726  dest_num++;
727  filter_action = &rule_cfg->action_set.actions[act_idx];
728  ret = avf_fdir_parse_action_qregion (rcfg, actions, act_idx, error);
729  if (ret)
730  return ret;
731 
732  rule_cfg->action_set.count++;
733  break;
734 
736  mark_num++;
737  act_msk = actions->conf;
738  rcfg->mark_flag = 1;
739 
740  ret = avf_fdir_rcfg_act_mark (rcfg, act_msk->id, act_idx);
741  if (ret)
742  return ret;
743 
744  rule_cfg->action_set.count++;
745  break;
746 
747  default:
748  ret =
750  actions, "Invalid action.");
751  return ret;
752  }
753  }
754 
755  if (dest_num >= 2)
756  {
758  actions, "Unsupported action combination");
759  return ret;
760  }
761 
762  if (mark_num >= 2)
763  {
765  actions, "Too many mark actions");
766  return ret;
767  }
768 
769  if (dest_num + mark_num == 0)
770  {
772  actions, "Empty action");
773  return ret;
774  }
775 
776  /* Mark only is equal to mark + passthru. */
777  act_num = rule_cfg->action_set.count;
778  if (dest_num == 0)
779  {
780  filter_action = &rule_cfg->action_set.actions[act_num];
781  filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
782  rule_cfg->action_set.count = ++act_num;
783  }
784 
785  return ret;
786 }
787 
788 int
790  struct avf_flow_item avf_items[],
791  struct avf_flow_error *error)
792 {
793  int layer = 0;
794  int ret = 0;
795  struct avf_flow_item *item;
796 
797  for (item = avf_items; item->type != VIRTCHNL_PROTO_HDR_NONE; item++)
798  {
799  ret = avf_fdir_rcfg_set_field (rcfg, layer, item, error);
800  if (ret)
801  return ret;
802 
803  rcfg->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
804  }
805 
806  return ret;
807 }
808 
809 int
811  enum avf_flow_error_type type, const void *cause,
812  const char *message)
813 {
814  if (error)
815  {
816  *error = (struct avf_flow_error){
817  .type = type,
818  .cause = cause,
819  .message = message,
820  };
821  }
822 
823  return code;
824 }
825 
826 char *
828 {
829  enum virtchnl_fdir_prgm_status status;
830  char *s = NULL;
831 
832  err_no = -err_no;
833 
834  if (err_no >= VIRTCHNL_FDIR_FAILURE_MAX)
835  return "Failed to program the rule due to other reasons";
836 
837  status = (enum virtchnl_fdir_prgm_status) err_no;
838  switch (status)
839  {
841  s = "Succeed in programming rule request by PF";
842  break;
844  s = "Failed to add rule request due to no hardware resource";
845  break;
847  s = "Failed to add rule request due to the rule is already existed";
848  break;
850  s = "Failed to add rule request due to the rule is conflict with "
851  "existing rule";
852  break;
854  s = "Failed to delete rule request due to this rule doesn't exist";
855  break;
857  s = "Failed to add rule request due to the hardware doesn't support";
858  break;
860  s = "Failed to add rule request due to time out for programming";
861  break;
863  s = "Succeed in programming rule request by PF";
864  break;
865  default:
866  s = "Failed to program the rule due to other reasons";
867  break;
868  }
869 
870  return s;
871 }
872 
873 /*
874  * fd.io coding-style-patch-verification: ON
875  *
876  * Local Variables:
877  * eval: (c-set-style "gnu")
878  * End:
879  */
u16 src_port
Source port.
#define AVF_INSET_IPV4_TTL
const void * cause
Object responsible for the error.
const u16 * queue
Queue indices to use.
u32 id
Integer value to return with packets.
int avf_fdir_parse_pattern(struct avf_fdir_conf *rcfg, struct avf_flow_item avf_items[], struct avf_flow_error *error)
Definition: avf_fdir_lib.c:789
#define AVF_INSET_PFCP_S_FIELD
const void * mask
Bit-mask applied to spec and last.
#define AVF_INSET_TCP_SRC_PORT
enum avf_flow_error_type type
Cause field and error types.
u16 payload_len
IP packet length - includes header size.
#define count_leading_zeros(x)
Definition: clib.h:160
static int ether_addr_is_zero(const struct avf_ether_addr *ea)
Definition: avf_fdir_lib.c:35
u8 tcp_flags
TCP flags.
#define AVF_INSET_ETHERTYPE
SCTP Header.
#define VIRTCHNL_MAX_NUM_PROTO_HDRS
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
Matches a L2TPv3 over IP header.
#define AVF_INSET_IPV4_TOS
virtchnl_proto_hdr_type
int avf_fdir_rcfg_act_queue(struct avf_fdir_conf *rcfg, int queue, int size, int act_idx)
Set action as to queue(group), conflict with drop action.
Definition: avf_fdir_lib.c:486
#define AVF_ETHER_TYPE_IPV4
IPv4 Protocol.
#define AVF_IPV6_TC_MASK
Definition: avf_fdir_lib.c:22
#define AVF_INSET_SCTP_SRC_PORT
u16 dst_port
Destin port.
u16 dst_port
TCP destination port.
#define AVF_INSET_UDP_SRC_PORT
int avf_fdir_parse_action_qregion(struct avf_fdir_conf *rcfg, const struct avf_flow_action *act, int act_idx, struct avf_flow_error *error)
Set action as to queue group, conflict with drop action.
Definition: avf_fdir_lib.c:507
enum virtchnl_action type
int count
specify where protocol header start from.
char * avf_fdir_prgm_error_decode(int err_no)
Definition: avf_fdir_lib.c:827
#define AVF_INSET_IPV6_TC
u8 v_pt_rsv_flags
Version (3b), protocol type (1b), reserved (1b), Extension header flag (1b), Sequence number flag (1b...
#define AVF_L2TPV3OIP_SESSION_ID
static int fls_u32(u32 x)
Definition: avf_fdir_lib.c:29
u8 time_to_live
time to live
unsigned char u8
Definition: types.h:56
avf_flow_error_type
Those headers used temporary, maybe OS packet definition can replace.
u16 cksum
TCP checksum.
unsigned int u32
Definition: types.h:88
#define clib_memcpy(d, s, n)
Definition: string.h:197
#define AVF_INSET_GTPU_QFI
enum virtchnl_fdir_prgm_status status
struct avf_ether_addr src
Source MAC.
u16 src_port
UDP source port.
Verbose error structure definition.
#define AVF_INSET_TCP_DST_PORT
UDP Header.
struct virtchnl_filter_action_set action_set
u8 version_ihl
version and header length
struct virtchnl_filter_action actions[VIRTCHNL_MAX_NUM_ACTIONS]
int avf_fdir_parse_action(const struct avf_flow_action actions[], struct avf_fdir_conf *rcfg, struct avf_flow_error *error)
Definition: avf_fdir_lib.c:672
#define AVF_FAILURE
u8 proto
Protocol, next header.
const void * conf
Pointer to action configuration object.
u16 packet_id
packet ID
u32 vtc_flow
IP version, traffic class & flow label.
u32 spi
Security Parameters Index.
u8 qfi
QoS flow identifier.
#define AVF_INSET_SCTP_DST_PORT
int avf_fdir_rcfg_act_mark(struct avf_fdir_conf *rcfg, const u32 mark, int act_idx)
Set action as mark, it can co-exist with to queue(group) or drop action.
Definition: avf_fdir_lib.c:578
u16 total_length
length of packet
int avf_fdir_rule_destroy(struct avf_fdir_vc_ctx *ctx, struct avf_fdir_conf *rcfg)
Destroy a flow rule.
Definition: avf_fdir_lib.c:647
vl_api_fib_path_type_t type
Definition: fib_types.api:123
u8 dst_addr[16]
IP address of destination host(s).
Definition: cJSON.c:88
Match PFCP Header.
u8 hop_limits
Hop limits.
struct virtchnl_fdir_add add_fltr
u16 rx_win
RX flow control window.
u16 tcp_urp
TCP urgent pointer, if any.
virtchnl_fdir_prgm_status
#define AVF_INSET_ESP_SPI
#define AVF_INSET_GTPU_TEID
long ctx[MAX_CONNS]
Definition: main.c:144
const void * spec
Pointer to item specification structure.
unsigned short u16
Definition: types.h:57
u8 msg_type
Message type.
int avf_fdir_rcfg_set_field(struct avf_fdir_conf *rcfg, int layer, struct avf_flow_item *item, struct avf_flow_error *error)
Set a match field on specific protocol layer, if any match field already be set on this layer...
Definition: avf_fdir_lib.c:85
u32 size
Definition: vhost_user.h:125
ESP Header.
u16 dst_port
UDP destination port.
struct virtchnl_filter_action::@32::@33 queue
Specific pattern item.
u16 src_port
TCP source port.
u16 dgram_len
UDP datagram length.
u16 fragment_offset
fragmentation offset
u16 type
EtherType or TPID.
u32 sent_seq
TX data sequence number.
int avf_fdir_rcfg_destroy(struct avf_fdir_conf *rcfg)
Destroy a rule cfg object.
Definition: avf_fdir_lib.c:62
u32 recv_ack
RX data acknowledgment sequence number.
int avf_fdir_rcfg_act_drop(struct avf_fdir_conf *rcfg, int act_idx)
Set action as as drop, conflict with to queue(gropu) action.
Definition: avf_fdir_lib.c:564
TCP Header.
int avf_fdir_rcfg_validate(struct avf_fdir_vc_ctx *ctx, struct avf_fdir_conf *rcfg)
Validate a flow rule cfg, check with PF driver if the rule cfg is supportted or not.
Definition: avf_fdir_lib.c:594
IPv6 Header.
#define AVF_INSET_IPV6_NEXT_HDR
int avf_fdir_rcfg_create(struct avf_fdir_conf **rcfg, int tunnel_level, u16 vsi, u16 nrxq)
Create a rule cfg object.
Definition: avf_fdir_lib.c:43
#define clib_memcmp(s1, s2, m1)
Definition: string.h:734
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:261
Matches a GTPv1 header.
u32 session_id
Session ID.
u8 next_proto_id
protocol ID
#define AVF_INSET_UDP_DST_PORT
u8 src_addr[16]
IP address of source host.
int avf_fdir_rcfg_set_hdr(struct avf_fdir_conf *rcfg, int layer, enum virtchnl_proto_hdr_type hdr)
Set match potocol header on specific layer, it will overwrite is already be set.
Definition: avf_fdir_lib.c:70
#define AVF_INSET_AH_SPI
u32 dst_addr
destination address
IPv4 Header.
TCP(sport=80, dport=6872)
int avf_flow_error_set(struct avf_flow_error *error, int code, enum avf_flow_error_type type, const void *cause, const char *message)
Initialize flow error structure.
Definition: avf_fdir_lib.c:810
u16 index
Queue index to use.
cJSON * item
Definition: cJSON.h:222
#define AVF_FDIR_MAX_QREGION_SIZE
Definition: avf_fdir_lib.c:23
enum virtchnl_proto_hdr_type type
Item type.
int avf_fdir_rule_create(struct avf_fdir_vc_ctx *ctx, struct avf_fdir_conf *rcfg)
Create a flow rule, a FDIR rule is expected to be programmed into hardware if return success...
Definition: avf_fdir_lib.c:620
#define AVF_INSET_IPV4_PROTO
static void clib_mem_free(void *p)
Definition: mem.h:311
u32 cksum
Checksum.
struct virtchnl_fdir_rule rule_cfg
struct virtchnl_fdir_del del_fltr
struct virtchnl_proto_hdr proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS]
union virtchnl_filter_action::@32 act_conf
u8 data_off
Data offset.
Matches a GTP PDU extension header with type 0x85.
#define AVF_INSET_IPV6_HOP_LIMIT
static void * clib_mem_alloc(uword size)
Definition: mem.h:253
avf_fdir_vc_op_t vc_op
u8 type_of_service
type of service
struct virtchnl_proto_hdrs proto_hdrs
static uword is_pow2(uword x)
Definition: clib.h:267
u32 teid
Tunnel endpoint identifier.
enum virtchnl_fdir_prgm_status status
#define AVF_INSET_IPV4_DST
#define AVF_INSET_IPV4_SRC
vl_api_flow_action_t actions
Definition: flow_types.api:224
#define AVF_ETHER_TYPE_IPV6
IPv6 Protocol.
u16 hdr_checksum
header checksum
#define VIRTCHNL_MAX_NUM_ACTIONS
#define AVF_INSET_IPV6_SRC
const char * message
Human-readable error message.
u32 queue_num
Number of entries in queue.
u32 src_addr
source address
u16 msg_len
Message length.
u16 dgram_cksum
UDP datagram checksum.
enum virtchnl_action type
Action type.
#define VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field)
enum virtchnl_proto_hdr_type type
struct avf_ether_addr dst
Destination MAC.
Match IP Authentication Header (AH), RFC 4302.
#define AVF_INSET_IPV6_DST