FD.io VPP  v21.06-1-gbb7418cf9
Vector Packet Processing
flow.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2020 Intel and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #include <stdbool.h>
19 #include <vlib/vlib.h>
20 #include <vppinfra/ring.h>
21 #include <vlib/unix/unix.h>
22 #include <vlib/pci/pci.h>
23 #include <vnet/ethernet/ethernet.h>
24 
25 #include <avf/avf.h>
26 #include <avf/avf_advanced_flow.h>
27 
28 #define FLOW_IS_ETHERNET_CLASS(f) (f->type == VNET_FLOW_TYPE_ETHERNET)
29 
30 #define FLOW_IS_IPV4_CLASS(f) \
31  ((f->type == VNET_FLOW_TYPE_IP4) || \
32  (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) || \
33  (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE_TAGGED) || \
34  (f->type == VNET_FLOW_TYPE_IP4_VXLAN) || \
35  (f->type == VNET_FLOW_TYPE_IP4_GTPC) || \
36  (f->type == VNET_FLOW_TYPE_IP4_GTPU) || \
37  (f->type == VNET_FLOW_TYPE_IP4_L2TPV3OIP) || \
38  (f->type == VNET_FLOW_TYPE_IP4_IPSEC_ESP) || \
39  (f->type == VNET_FLOW_TYPE_IP4_IPSEC_AH))
40 
41 #define FLOW_IS_IPV6_CLASS(f) \
42  ((f->type == VNET_FLOW_TYPE_IP6) || \
43  (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE) || \
44  (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE_TAGGED) || \
45  (f->type == VNET_FLOW_TYPE_IP6_VXLAN))
46 
47 /* check if flow is L3 type */
48 #define FLOW_IS_L3_TYPE(f) \
49  ((f->type == VNET_FLOW_TYPE_IP4) || (f->type == VNET_FLOW_TYPE_IP6))
50 
51 /* check if flow is L4 type */
52 #define FLOW_IS_L4_TYPE(f) \
53  ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) || \
54  (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE) || \
55  (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE_TAGGED) || \
56  (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE_TAGGED))
57 
58 /* check if flow is L4 tunnel type */
59 #define FLOW_IS_L4_TUNNEL_TYPE(f) \
60  ((f->type == VNET_FLOW_TYPE_IP4_VXLAN) || \
61  (f->type == VNET_FLOW_TYPE_IP6_VXLAN) || \
62  (f->type == VNET_FLOW_TYPE_IP4_GTPC) || \
63  (f->type == VNET_FLOW_TYPE_IP4_GTPU))
64 
65 int
67  u32 in_len, void *out, u32 out_len)
68 {
69  u32 dev_instance = *(u32 *) vc_hdl;
70  avf_device_t *ad = avf_get_device (dev_instance);
71  clib_error_t *err = 0;
72  int is_add;
73 
74  if (vc_op >= VIRTCHNL_ADV_OP_MAX)
75  {
76  return -1;
77  }
78 
79  switch (vc_op)
80  {
82  is_add = 1;
83  break;
85  is_add = 0;
86  break;
87  default:
88  avf_log_err (ad, "unsupported avf virtual channel opcode %u\n",
89  (u32) vc_op);
90  return -1;
91  }
92 
93  err = avf_program_flow (dev_instance, is_add, in, in_len, out, out_len);
94  if (err != 0)
95  {
96  avf_log_err (ad, "avf fdir program failed: %U", format_clib_error, err);
97  clib_error_free (err);
98  return -1;
99  }
100 
101  avf_log_debug (ad, "avf fdir program success");
102  return 0;
103 }
104 
105 static int
107 {
108  avf_device_t *ad = avf_get_device (dev_instance);
109  int rv = 0;
110  int ret = 0;
111  u16 src_port = 0, dst_port = 0;
112  u16 src_port_mask = 0, dst_port_mask = 0;
113  u8 protocol = IP_PROTOCOL_RESERVED;
114  bool fate = false;
115  struct avf_flow_error error;
116 
117  int layer = 0;
118  int action_count = 0;
119 
120  struct avf_fdir_vc_ctx vc_ctx;
121  struct avf_fdir_conf *filter;
122  struct avf_flow_item avf_items[VIRTCHNL_MAX_NUM_PROTO_HDRS];
123  struct avf_flow_action avf_actions[VIRTCHNL_MAX_NUM_ACTIONS];
124 
125  struct avf_ipv4_hdr ip4_spec = {}, ip4_mask = {};
126  struct avf_ipv6_hdr ip6_spec = {}, ip6_mask = {};
127  struct avf_tcp_hdr tcp_spec = {}, tcp_mask = {};
128  struct avf_udp_hdr udp_spec = {}, udp_mask = {};
129  struct avf_gtp_hdr gtp_spec = {}, gtp_mask = {};
130  struct avf_l2tpv3oip_hdr l2tpv3_spec = {}, l2tpv3_mask = {};
131  struct avf_esp_hdr esp_spec = {}, esp_mask = {};
132  struct avf_ah_hdr ah_spec = {}, ah_mask = {};
133 
134  struct avf_flow_action_queue act_q = {};
135  struct avf_flow_action_mark act_msk = {};
136 
137  enum
138  {
139  FLOW_UNKNOWN_CLASS,
140  FLOW_ETHERNET_CLASS,
141  FLOW_IPV4_CLASS,
142  FLOW_IPV6_CLASS,
143  } flow_class = FLOW_UNKNOWN_CLASS;
144 
145  if (FLOW_IS_ETHERNET_CLASS (f))
146  flow_class = FLOW_ETHERNET_CLASS;
147  else if (FLOW_IS_IPV4_CLASS (f))
148  flow_class = FLOW_IPV4_CLASS;
149  else if (FLOW_IS_IPV6_CLASS (f))
150  flow_class = FLOW_IPV6_CLASS;
151  else
152  return VNET_FLOW_ERROR_NOT_SUPPORTED;
153 
154  ret = avf_fdir_rcfg_create (&filter, 0, ad->vsi_id, ad->n_rx_queues);
155  if (ret)
156  {
157  rv = VNET_FLOW_ERROR_INTERNAL;
158  goto done;
159  }
160 
161  /* init a virtual channel context */
162  vc_ctx.vc_hdl = &dev_instance;
164 
165  clib_memset (avf_items, 0, sizeof (avf_actions));
166  clib_memset (avf_actions, 0, sizeof (avf_actions));
167 
168  /* Ethernet Layer */
169  avf_items[layer].type = VIRTCHNL_PROTO_HDR_ETH;
170  avf_items[layer].spec = NULL;
171  avf_items[layer].mask = NULL;
172  layer++;
173 
174  if (flow_class == FLOW_IPV4_CLASS)
175  {
176  vnet_flow_ip4_t *ip4_ptr = &f->ip4;
177 
178  /* IPv4 Layer */
179  avf_items[layer].type = VIRTCHNL_PROTO_HDR_IPV4;
180  avf_items[layer].spec = &ip4_spec;
181  avf_items[layer].mask = &ip4_mask;
182  layer++;
183 
184  if ((!ip4_ptr->src_addr.mask.as_u32) &&
185  (!ip4_ptr->dst_addr.mask.as_u32) && (!ip4_ptr->protocol.mask))
186  {
187  ;
188  }
189  else
190  {
191  ip4_spec.src_addr = ip4_ptr->src_addr.addr.as_u32;
192  ip4_mask.src_addr = ip4_ptr->src_addr.mask.as_u32;
193 
194  ip4_spec.dst_addr = ip4_ptr->dst_addr.addr.as_u32;
195  ip4_mask.dst_addr = ip4_ptr->dst_addr.mask.as_u32;
196 
197  ip4_spec.next_proto_id = ip4_ptr->protocol.prot;
198  ip4_mask.next_proto_id = ip4_ptr->protocol.mask;
199  }
200 
202  {
203  vnet_flow_ip4_n_tuple_t *ip4_n_ptr = &f->ip4_n_tuple;
204 
205  src_port = ip4_n_ptr->src_port.port;
206  dst_port = ip4_n_ptr->dst_port.port;
207  src_port_mask = ip4_n_ptr->src_port.mask;
208  dst_port_mask = ip4_n_ptr->dst_port.mask;
209  }
210 
211  protocol = ip4_ptr->protocol.prot;
212  }
213  else if (flow_class == FLOW_IPV6_CLASS)
214  {
215  vnet_flow_ip6_t *ip6_ptr = &f->ip6;
216 
217  /* IPv6 Layer */
218  avf_items[layer].type = VIRTCHNL_PROTO_HDR_IPV6;
219  avf_items[layer].spec = &ip6_spec;
220  avf_items[layer].mask = &ip6_mask;
221  layer++;
222 
223  if ((ip6_address_is_zero (&ip6_ptr->src_addr.mask)) &&
224  (ip6_address_is_zero (&ip6_ptr->dst_addr.mask)) &&
225  (!ip6_ptr->protocol.mask))
226  {
227  ;
228  }
229  else
230  {
231  clib_memcpy (ip6_spec.src_addr, &ip6_ptr->src_addr.addr,
232  ARRAY_LEN (ip6_ptr->src_addr.addr.as_u8));
233  clib_memcpy (ip6_mask.src_addr, &ip6_ptr->src_addr.mask,
234  ARRAY_LEN (ip6_ptr->src_addr.mask.as_u8));
235  clib_memcpy (ip6_spec.dst_addr, &ip6_ptr->dst_addr.addr,
236  ARRAY_LEN (ip6_ptr->dst_addr.addr.as_u8));
237  clib_memcpy (ip6_mask.dst_addr, &ip6_ptr->dst_addr.mask,
238  ARRAY_LEN (ip6_ptr->dst_addr.mask.as_u8));
239  ip6_spec.proto = ip6_ptr->protocol.prot;
240  ip6_mask.proto = ip6_ptr->protocol.mask;
241  }
242 
244  {
245  vnet_flow_ip6_n_tuple_t *ip6_n_ptr = &f->ip6_n_tuple;
246 
247  src_port = ip6_n_ptr->src_port.port;
248  dst_port = ip6_n_ptr->dst_port.port;
249  src_port_mask = ip6_n_ptr->src_port.mask;
250  dst_port_mask = ip6_n_ptr->dst_port.mask;
251  }
252 
253  protocol = ip6_ptr->protocol.prot;
254  }
255 
256  if (FLOW_IS_L3_TYPE (f))
257  goto pattern_end;
258 
259  /* Layer 4 */
260  switch (protocol)
261  {
262  case IP_PROTOCOL_L2TP:
263  avf_items[layer].type = VIRTCHNL_PROTO_HDR_L2TPV3;
264  avf_items[layer].spec = &l2tpv3_spec;
265  avf_items[layer].mask = &l2tpv3_mask;
266  layer++;
267 
268  vnet_flow_ip4_l2tpv3oip_t *l2tph = &f->ip4_l2tpv3oip;
269  l2tpv3_spec.session_id = clib_host_to_net_u32 (l2tph->session_id);
270  l2tpv3_mask.session_id = ~0;
271  break;
272 
273  case IP_PROTOCOL_IPSEC_ESP:
274  avf_items[layer].type = VIRTCHNL_PROTO_HDR_ESP;
275  avf_items[layer].spec = &esp_spec;
276  avf_items[layer].mask = &esp_mask;
277  layer++;
278 
279  vnet_flow_ip4_ipsec_esp_t *esph = &f->ip4_ipsec_esp;
280  esp_spec.spi = clib_host_to_net_u32 (esph->spi);
281  esp_mask.spi = ~0;
282  break;
283 
284  case IP_PROTOCOL_IPSEC_AH:
285  avf_items[layer].type = VIRTCHNL_PROTO_HDR_AH;
286  avf_items[layer].spec = &ah_spec;
287  avf_items[layer].mask = &ah_mask;
288  layer++;
289 
290  vnet_flow_ip4_ipsec_ah_t *ah = &f->ip4_ipsec_ah;
291  ah_spec.spi = clib_host_to_net_u32 (ah->spi);
292  ah_mask.spi = ~0;
293  break;
294 
295  case IP_PROTOCOL_TCP:
296  avf_items[layer].type = VIRTCHNL_PROTO_HDR_TCP;
297  avf_items[layer].spec = &tcp_spec;
298  avf_items[layer].mask = &tcp_mask;
299  layer++;
300 
301  if (src_port_mask)
302  {
303  tcp_spec.src_port = clib_host_to_net_u16 (src_port);
304  tcp_mask.src_port = clib_host_to_net_u16 (src_port_mask);
305  }
306  if (dst_port_mask)
307  {
308  tcp_spec.dst_port = clib_host_to_net_u16 (dst_port);
309  tcp_mask.dst_port = clib_host_to_net_u16 (dst_port_mask);
310  }
311  break;
312 
313  case IP_PROTOCOL_UDP:
314  avf_items[layer].type = VIRTCHNL_PROTO_HDR_UDP;
315  avf_items[layer].spec = &udp_spec;
316  avf_items[layer].mask = &udp_mask;
317  layer++;
318 
319  if (src_port_mask)
320  {
321  udp_spec.src_port = clib_host_to_net_u16 (src_port);
322  udp_mask.src_port = clib_host_to_net_u16 (src_port_mask);
323  }
324  if (dst_port_mask)
325  {
326  udp_spec.dst_port = clib_host_to_net_u16 (dst_port);
327  udp_mask.dst_port = clib_host_to_net_u16 (dst_port_mask);
328  }
329 
330  /* handle the UDP tunnels */
331  if (f->type == VNET_FLOW_TYPE_IP4_GTPU)
332  {
333  avf_items[layer].type = VIRTCHNL_PROTO_HDR_GTPU_IP;
334  avf_items[layer].spec = &gtp_spec;
335  avf_items[layer].mask = &gtp_mask;
336  layer++;
337 
338  vnet_flow_ip4_gtpu_t *gu = &f->ip4_gtpu;
339  gtp_spec.teid = clib_host_to_net_u32 (gu->teid);
340  gtp_mask.teid = ~0;
341  }
342  break;
343 
344  default:
345  rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
346  goto done;
347  }
348 
349 pattern_end:
350  /* pattern end flag */
351  avf_items[layer].type = VIRTCHNL_PROTO_HDR_NONE;
352  ret = avf_fdir_parse_pattern (filter, avf_items, &error);
353  if (ret)
354  {
355  avf_log_err (ad, "avf fdir parse pattern failed: %s", error.message);
356  rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
357  goto done;
358  }
359 
360  /* Action */
361  /* Only one 'fate' can be assigned */
362  if (f->actions & VNET_FLOW_ACTION_REDIRECT_TO_QUEUE)
363  {
364  avf_actions[action_count].type = VIRTCHNL_ACTION_QUEUE;
365  avf_actions[action_count].conf = &act_q;
366 
367  act_q.index = f->redirect_queue;
368  fate = true;
369  action_count++;
370  }
371 
372  if (f->actions & VNET_FLOW_ACTION_DROP)
373  {
374  avf_actions[action_count].type = VIRTCHNL_ACTION_DROP;
375  avf_actions[action_count].conf = NULL;
376 
377  if (fate == true)
378  {
379  rv = VNET_FLOW_ERROR_INTERNAL;
380  goto done;
381  }
382  else
383  fate = true;
384 
385  action_count++;
386  }
387 
388  if (fate == false)
389  {
390  avf_actions[action_count].type = VIRTCHNL_ACTION_PASSTHRU;
391  avf_actions[action_count].conf = NULL;
392 
393  fate = true;
394  action_count++;
395  }
396 
397  if (f->actions & VNET_FLOW_ACTION_MARK)
398  {
399  avf_actions[action_count].type = VIRTCHNL_ACTION_MARK;
400  avf_actions[action_count].conf = &act_msk;
401  action_count++;
402 
403  act_msk.id = fe->mark;
404  }
405 
406  /* action end flag */
407  avf_actions[action_count].type = VIRTCHNL_ACTION_NONE;
408 
409  /* parse action */
410  ret = avf_fdir_parse_action (avf_actions, filter, &error);
411  if (ret)
412  {
413  avf_log_err (ad, "avf fdir parse action failed: %s", error.message);
414  rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
415  goto done;
416  }
417 
418  /* create flow rule, save rule */
419  ret = avf_fdir_rule_create (&vc_ctx, filter);
420 
421  if (ret)
422  {
423  avf_log_err (ad, "avf fdir rule create failed: %s",
425  rv = VNET_FLOW_ERROR_INTERNAL;
426  goto done;
427  }
428  else
429  {
430  fe->rcfg = filter;
431  }
432 done:
433 
434  return rv;
435 }
436 
437 int
439  u32 flow_index, uword *private_data)
440 {
441  vnet_flow_t *flow = vnet_get_flow (flow_index);
442  avf_device_t *ad = avf_get_device (dev_instance);
443  avf_flow_entry_t *fe = NULL;
444  avf_flow_lookup_entry_t *fle = NULL;
445  int rv = 0;
446 
447  if ((ad->cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF) == 0)
448  {
449  rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
450  goto done;
451  }
452 
453  if (op == VNET_FLOW_DEV_OP_ADD_FLOW)
454  {
455  pool_get (ad->flow_entries, fe);
456  fe->flow_index = flow->index;
457 
458  /* if we need to mark packets, assign one mark */
459  if (flow->actions &
460  (VNET_FLOW_ACTION_MARK | VNET_FLOW_ACTION_REDIRECT_TO_NODE |
461  VNET_FLOW_ACTION_BUFFER_ADVANCE))
462  {
463  /* reserve slot 0 */
464  if (ad->flow_lookup_entries == 0)
469  fe->mark = fle - ad->flow_lookup_entries;
470 
471  /* install entry in the lookup table */
472  clib_memset (fle, -1, sizeof (*fle));
473  if (flow->actions & VNET_FLOW_ACTION_MARK)
474  fle->flow_id = flow->mark_flow_id;
475  if (flow->actions & VNET_FLOW_ACTION_REDIRECT_TO_NODE)
477  if (flow->actions & VNET_FLOW_ACTION_BUFFER_ADVANCE)
478  fle->buffer_advance = flow->buffer_advance;
479 
480  if ((ad->flags & AVF_DEVICE_F_RX_FLOW_OFFLOAD) == 0)
481  {
482  ad->flags |= AVF_DEVICE_F_RX_FLOW_OFFLOAD;
483  }
484  }
485  else
486  fe->mark = 0;
487 
488  switch (flow->type)
489  {
490  case VNET_FLOW_TYPE_IP4:
491  case VNET_FLOW_TYPE_IP6:
492  case VNET_FLOW_TYPE_IP4_N_TUPLE:
493  case VNET_FLOW_TYPE_IP6_N_TUPLE:
494  case VNET_FLOW_TYPE_IP4_GTPU:
495  case VNET_FLOW_TYPE_IP4_L2TPV3OIP:
496  case VNET_FLOW_TYPE_IP4_IPSEC_ESP:
497  case VNET_FLOW_TYPE_IP4_IPSEC_AH:
498  if ((rv = avf_flow_add (dev_instance, flow, fe)))
499  goto done;
500  break;
501  default:
502  rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
503  goto done;
504  }
505 
506  *private_data = fe - ad->flow_entries;
507  }
508  else if (op == VNET_FLOW_DEV_OP_DEL_FLOW)
509  {
510  fe = vec_elt_at_index (ad->flow_entries, *private_data);
511 
512  struct avf_fdir_vc_ctx ctx;
513  ctx.vc_hdl = &dev_instance;
515 
516  rv = avf_fdir_rule_destroy (&ctx, fe->rcfg);
517  if (rv)
518  return VNET_FLOW_ERROR_INTERNAL;
519 
520  if (fe->mark)
521  {
522  fle = pool_elt_at_index (ad->flow_lookup_entries, fe->mark);
523  clib_memset (fle, -1, sizeof (*fle));
525  }
526 
527  (void) avf_fdir_rcfg_destroy (fe->rcfg);
528  clib_memset (fe, 0, sizeof (*fe));
529  pool_put (ad->flow_entries, fe);
530  goto disable_rx_offload;
531  }
532  else
533  return VNET_FLOW_ERROR_NOT_SUPPORTED;
534 
535 done:
536  if (rv)
537  {
538  if (fe)
539  {
540  clib_memset (fe, 0, sizeof (*fe));
541  pool_put (ad->flow_entries, fe);
542  }
543 
544  if (fle)
545  {
546  clib_memset (fle, -1, sizeof (*fle));
547  pool_put (ad->flow_lookup_entries, fle);
548  }
549  }
550 disable_rx_offload:
551  if ((ad->flags & AVF_DEVICE_F_RX_FLOW_OFFLOAD) != 0 &&
552  pool_elts (ad->flow_entries) == 0)
553  {
554  ad->flags &= ~AVF_DEVICE_F_RX_FLOW_OFFLOAD;
555  }
556 
557  return rv;
558 }
559 
560 /*
561  * fd.io coding-style-patch-verification: ON
562  *
563  * Local Variables:
564  * eval: (c-set-style "gnu")
565  * End:
566  */
u32 id
Integer value to return with packets.
const void * mask
Bit-mask applied to spec and last.
u32 flow_index
Definition: avf.h:203
vnet_flow_type_t type
Definition: flow.h:218
vl_api_ip_port_and_mask_t dst_port
Definition: flow_types.api:92
vl_api_ip_proto_t protocol
Definition: lb_types.api:72
Definition: avf.h:201
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
Matches a L2TPv3 over IP header.
int avf_fdir_parse_action(const struct avf_flow_action actions[], struct avf_fdir_conf *rcfg, struct avf_flow_error *error)
Definition: avf_fdir_lib.c:672
u16 dst_port
TCP destination port.
int avf_fdir_rcfg_create(struct avf_fdir_conf **rcfg, int tunnel_level, u16 vsi, u16 nrxq)
Create a rule cfg object.
Definition: avf_fdir_lib.c:43
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
Definition: pool.h:255
clib_error_t * avf_program_flow(u32 dev_instance, int is_add, u8 *rule, u32 rule_len, u8 *program_status, u32 status_len)
Definition: device.c:1886
static int avf_flow_add(u32 dev_instance, vnet_flow_t *f, avf_flow_entry_t *fe)
Definition: flow.c:106
unsigned char u8
Definition: types.h:56
unsigned int u32
Definition: types.h:88
#define clib_memcpy(d, s, n)
Definition: string.h:197
u32 cap_flags
Definition: avf.h:247
vlib_frame_t * f
u16 src_port
UDP source port.
Verbose error structure definition.
UDP Header.
u32 mark_flow_id
Definition: flow.h:227
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
u8 proto
Protocol, next header.
const void * conf
Pointer to action configuration object.
u32 spi
Security Parameters Index.
int __clib_unused rv
Definition: application.c:491
int avf_flow_ops_fn(vnet_main_t *vm, vnet_flow_dev_op_t op, u32 dev_instance, u32 flow_index, uword *private_data)
Definition: flow.c:438
u8 dst_addr[16]
IP address of destination host(s).
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:553
u32 redirect_queue
Definition: flow.h:234
#define FLOW_IS_IPV6_CLASS(f)
Definition: flow.c:41
const void * spec
Pointer to item specification structure.
unsigned short u16
Definition: types.h:57
ESP Header.
u16 dst_port
UDP destination port.
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:305
int avf_fdir_rcfg_destroy(struct avf_fdir_conf *rcfg)
Destroy a rule cfg object.
Definition: avf_fdir_lib.c:62
vnet_flow_t * vnet_get_flow(u32 flow_index)
Definition: flow.c:57
u16 src_port
TCP source port.
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
Definition: nat44_ei.c:3047
u32 index
Definition: flow.h:221
u16 n_rx_queues
Definition: avf.h:233
i32 buffer_advance
Definition: flow.h:237
#define pool_get_aligned(P, E, A)
Allocate an object E from a pool P with alignment A.
Definition: pool.h:249
u32 actions
Definition: flow.h:224
vl_api_ip_port_and_mask_t src_port
Definition: flow_types.api:91
vnet_flow_dev_op_t
Definition: interface.h:96
avf_flow_entry_t * flow_entries
Definition: avf.h:259
TCP Header.
#define FLOW_IS_IPV4_CLASS(f)
Definition: flow.c:30
virthnl_adv_ops
#define FLOW_IS_L3_TYPE(f)
Definition: flow.c:48
IPv6 Header.
#define avf_log_debug(dev, f,...)
Definition: avf.h:100
static uword ip6_address_is_zero(const ip6_address_t *a)
Definition: ip6_packet.h:226
u32 flow_id
Definition: avf.h:210
Matches a GTPv1 header.
u32 session_id
Session ID.
#define FLOW_IS_L4_TUNNEL_TYPE(f)
Definition: flow.c:59
u8 next_proto_id
protocol ID
int avf_fdir_vc_op_callback(void *vc_hdl, enum virthnl_adv_ops vc_op, void *in, u32 in_len, void *out, u32 out_len)
Definition: flow.c:66
u8 src_addr[16]
IP address of source host.
i16 buffer_advance
Definition: avf.h:212
#define ARRAY_LEN(x)
Definition: clib.h:70
static_always_inline avf_device_t * avf_get_device(u32 dev_instance)
Definition: avf.h:368
u32 dst_addr
destination address
#define FLOW_IS_ETHERNET_CLASS(f)
Definition: flow.c:28
u16 vsi_id
Definition: avf.h:246
IPv4 Header.
u16 index
Queue index to use.
enum virtchnl_proto_hdr_type type
Item type.
#define pool_put_index(p, i)
Free pool element with given index.
Definition: pool.h:337
u32 flags
Definition: avf.h:218
u16 next_index
Definition: avf.h:211
avf_fdir_vc_op_t vc_op
char * avf_fdir_prgm_error_decode(int err_no)
Definition: avf_fdir_lib.c:827
u32 teid
Tunnel endpoint identifier.
avf_flow_lookup_entry_t * flow_lookup_entries
Definition: avf.h:260
u64 uword
Definition: types.h:112
int avf_fdir_rule_create(struct avf_fdir_vc_ctx *ctx, struct avf_fdir_conf *rcfg)
Create a flow rule, a FDIR rule is expected to be programmed into hardware if return success...
Definition: avf_fdir_lib.c:620
struct avf_fdir_conf * rcfg
Definition: avf.h:205
#define clib_error_free(e)
Definition: error.h:86
Definition: avf.h:208
u32 redirect_device_input_next_index
Definition: flow.h:231
int avf_fdir_parse_pattern(struct avf_fdir_conf *rcfg, struct avf_flow_item avf_items[], struct avf_flow_error *error)
Definition: avf_fdir_lib.c:789
#define FLOW_IS_L4_TYPE(f)
Definition: flow.c:52
int avf_fdir_rule_destroy(struct avf_fdir_vc_ctx *ctx, struct avf_fdir_conf *rcfg)
Destroy a flow rule.
Definition: avf_fdir_lib.c:647
const char * message
Human-readable error message.
#define avf_log_err(dev, f,...)
Definition: avf.h:90
u32 src_addr
source address
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
u32 mark
Definition: avf.h:204
enum virtchnl_action type
Action type.
__clib_export u8 * format_clib_error(u8 *s, va_list *va)
Definition: error.c:191
Match IP Authentication Header (AH), RFC 4302.
static uword pool_elts(void *v)
Number of active elements in a pool.
Definition: pool.h:127