FD.io VPP  v20.01-48-g3e0dafb74
Vector Packet Processing
flow.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2019 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include <vnet/vnet.h>
17 #include <vppinfra/vec.h>
18 #include <vppinfra/format.h>
19 #include <vlib/unix/cj.h>
20 #include <assert.h>
21 
22 #include <vnet/ip/ip.h>
23 #include <vnet/ethernet/ethernet.h>
25 #include <vnet/vxlan/vxlan.h>
26 #include <dpdk/device/dpdk.h>
27 
28 #include <dpdk/device/dpdk_priv.h>
29 #include <vppinfra/error.h>
30 
31 /* check if flow is L2 flow */
32 #define FLOW_IS_L2_LAYER(f) \
33  (f->type == VNET_FLOW_TYPE_ETHERNET)
34 
35 /* check if flow is L4 type */
36 #define FLOW_IS_L4_LAYER(f) \
37  ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) || \
38  (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE))
39 
40 /* check if flow is L4 tunnel type */
41 #define FLOW_IS_L4_TUNNEL_LAYER(f) \
42  ((f->type >= VNET_FLOW_TYPE_IP4_VXLAN) || \
43  (f->type <= VNET_FLOW_TYPE_IP6_GTPU_IP6))
44 
45 /* constant structs */
46 static const struct rte_flow_attr ingress = {.ingress = 1 };
47 
48 static inline bool
50 {
51  int i = 0;
52 
53  for (i = 0; i < 6; i++)
54  if (addr[i] != 0)
55  return false;
56 
57  return true;
58 }
59 
60 static int
62 {
63  struct rte_flow_item_eth eth[2] = { };
64  struct rte_flow_item_ipv4 ip4[2] = { };
65  struct rte_flow_item_ipv4 inner_ip4[2] = { };
66  struct rte_flow_item_ipv6 ip6[2] = { };
67  struct rte_flow_item_ipv6 inner_ip6[2] = { };
68  struct rte_flow_item_udp udp[2] = { };
69  struct rte_flow_item_tcp tcp[2] = { };
70  struct rte_flow_item_gtp gtp[2] = { };
71  struct rte_flow_action_mark mark = { 0 };
72  struct rte_flow_action_queue queue = { 0 };
73  struct rte_flow_item *item, *items = 0;
74  struct rte_flow_action *action, *actions = 0;
75  bool fate = false;
76 
77  enum
78  {
79  vxlan_hdr_sz = sizeof (vxlan_header_t),
80  raw_sz = sizeof (struct rte_flow_item_raw)
81  };
82 
83  union
84  {
85  struct rte_flow_item_raw item;
86  u8 val[raw_sz + vxlan_hdr_sz];
87  } raw[2];
88 
89  u16 src_port, dst_port, src_port_mask, dst_port_mask;
90  u8 protocol;
91  int rv = 0;
92 
93  if (f->actions & (~xd->supported_flow_actions))
94  return VNET_FLOW_ERROR_NOT_SUPPORTED;
95 
96  /* Match items */
97  /* Ethernet */
98  vec_add2 (items, item, 1);
99  item->type = RTE_FLOW_ITEM_TYPE_ETH;
100  if (f->type == VNET_FLOW_TYPE_ETHERNET)
101  {
102  vnet_flow_ethernet_t *te = &f->ethernet;
103 
104  clib_memset (&eth[0], 0, sizeof (eth[0]));
105  clib_memset (&eth[1], 0, sizeof (eth[1]));
106 
107  /* check if SMAC/DMAC/Ether_type assigned */
108  if (!mac_address_is_all_zero (te->eth_hdr.dst_address))
109  {
110  clib_memcpy_fast (&eth[0].dst, &te->eth_hdr.dst_address,
111  sizeof (eth[0].dst));
112  clib_memset (&eth[1].dst, 0xFF, sizeof (eth[1].dst));
113  }
114 
115  if (!mac_address_is_all_zero (te->eth_hdr.src_address))
116  {
117  clib_memcpy_fast (&eth[0].src, &te->eth_hdr.src_address,
118  sizeof (eth[0].src));
119  clib_memset (&eth[1].src, 0xFF, sizeof (eth[1].src));
120  }
121 
122  if (te->eth_hdr.type)
123  {
124  eth[0].type = clib_host_to_net_u16 (te->eth_hdr.type);
125  eth[1].type = clib_host_to_net_u16 (0xFFFF);
126  }
127 
128  item->spec = eth;
129  item->mask = eth + 1;
130  }
131  else
132  {
133  item->spec = NULL;
134  item->mask = NULL;
135  }
136 
137  /* VLAN */
138  if ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) ||
139  (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE))
140  {
141  vec_add2 (items, item, 1);
142  item->type = RTE_FLOW_ITEM_TYPE_VLAN;
143  item->spec = NULL;
144  item->mask = NULL;
145  }
146 
147  if (FLOW_IS_L2_LAYER (f))
148  goto pattern_end;
149 
150  /* IP */
151  vec_add2 (items, item, 1);
152  if ((f->type == VNET_FLOW_TYPE_IP6_N_TUPLE) ||
153  (f->type == VNET_FLOW_TYPE_IP6_GTPC) ||
154  (f->type == VNET_FLOW_TYPE_IP6_GTPU) ||
155  (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP4) ||
156  (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP6))
157  {
158  vnet_flow_ip6_n_tuple_t *t6 = &f->ip6_n_tuple;
159  item->type = RTE_FLOW_ITEM_TYPE_IPV6;
160 
161  if (!clib_memcmp (&t6->src_addr.mask, &zero_addr, 16) &&
162  !clib_memcmp (&t6->dst_addr.mask, &zero_addr, 16))
163  {
164  item->spec = NULL;
165  item->mask = NULL;
166  }
167  else
168  {
169  clib_memcpy_fast (ip6[0].hdr.src_addr, &t6->src_addr.addr, 16);
170  clib_memcpy_fast (ip6[1].hdr.src_addr, &t6->src_addr.mask, 16);
171  clib_memcpy_fast (ip6[0].hdr.dst_addr, &t6->dst_addr.addr, 16);
172  clib_memcpy_fast (ip6[1].hdr.dst_addr, &t6->dst_addr.mask, 16);
173  item->spec = ip6;
174  item->mask = ip6 + 1;
175  }
176 
177  src_port = t6->src_port.port;
178  dst_port = t6->dst_port.port;
179  src_port_mask = t6->src_port.mask;
180  dst_port_mask = t6->dst_port.mask;
181  protocol = t6->protocol;
182  }
183  else if ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) ||
184  (f->type == VNET_FLOW_TYPE_IP4_GTPC) ||
185  (f->type == VNET_FLOW_TYPE_IP4_GTPU) ||
186  (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP4) ||
187  (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP6))
188  {
189  vnet_flow_ip4_n_tuple_t *t4 = &f->ip4_n_tuple;
190  item->type = RTE_FLOW_ITEM_TYPE_IPV4;
191 
192  if (!t4->src_addr.mask.as_u32 && !t4->dst_addr.mask.as_u32)
193  {
194  item->spec = NULL;
195  item->mask = NULL;
196  }
197  else
198  {
199  ip4[0].hdr.src_addr = t4->src_addr.addr.as_u32;
200  ip4[1].hdr.src_addr = t4->src_addr.mask.as_u32;
201  ip4[0].hdr.dst_addr = t4->dst_addr.addr.as_u32;
202  ip4[1].hdr.dst_addr = t4->dst_addr.mask.as_u32;
203  item->spec = ip4;
204  item->mask = ip4 + 1;
205  }
206 
207  src_port = t4->src_port.port;
208  dst_port = t4->dst_port.port;
209  src_port_mask = t4->src_port.mask;
210  dst_port_mask = t4->dst_port.mask;
211  protocol = t4->protocol;
212  }
213  else if (f->type == VNET_FLOW_TYPE_IP4_VXLAN)
214  {
215  vnet_flow_ip4_vxlan_t *v4 = &f->ip4_vxlan;
216  ip4[0].hdr.src_addr = v4->src_addr.as_u32;
217  ip4[1].hdr.src_addr = -1;
218  ip4[0].hdr.dst_addr = v4->dst_addr.as_u32;
219  ip4[1].hdr.dst_addr = -1;
220  item->type = RTE_FLOW_ITEM_TYPE_IPV4;
221  item->spec = ip4;
222  item->mask = ip4 + 1;
223 
224  dst_port = v4->dst_port;
225  dst_port_mask = -1;
226  src_port = 0;
227  src_port_mask = 0;
228  protocol = IP_PROTOCOL_UDP;
229  }
230  else
231  {
232  rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
233  goto done;
234  }
235 
236  /* Layer 4 */
237  vec_add2 (items, item, 1);
238  if (protocol == IP_PROTOCOL_UDP)
239  {
240  item->type = RTE_FLOW_ITEM_TYPE_UDP;
241 
242  if ((src_port_mask == 0) && (dst_port_mask == 0))
243  {
244  item->spec = NULL;
245  item->mask = NULL;
246  }
247  else
248  {
249  udp[0].hdr.src_port = clib_host_to_net_u16 (src_port);
250  udp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask);
251  udp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port);
252  udp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask);
253  item->spec = udp;
254  item->mask = udp + 1;
255  }
256  }
257  else if (protocol == IP_PROTOCOL_TCP)
258  {
259  item->type = RTE_FLOW_ITEM_TYPE_TCP;
260 
261  if ((src_port_mask == 0) && (dst_port_mask == 0))
262  {
263  item->spec = NULL;
264  item->mask = NULL;
265  }
266 
267  tcp[0].hdr.src_port = clib_host_to_net_u16 (src_port);
268  tcp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask);
269  tcp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port);
270  tcp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask);
271  item->spec = tcp;
272  item->mask = tcp + 1;
273  }
274  else
275  {
276  rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
277  goto done;
278  }
279 
280  /* Tunnel header match */
281  if (f->type == VNET_FLOW_TYPE_IP4_VXLAN)
282  {
283  u32 vni = f->ip4_vxlan.vni;
284  vxlan_header_t spec_hdr = {
285  .flags = VXLAN_FLAGS_I,
286  .vni_reserved = clib_host_to_net_u32 (vni << 8)
287  };
288  vxlan_header_t mask_hdr = {
289  .flags = 0xff,
290  .vni_reserved = clib_host_to_net_u32 (((u32) - 1) << 8)
291  };
292 
293  clib_memset (raw, 0, sizeof raw);
294  raw[0].item.relative = 1;
295  raw[0].item.length = vxlan_hdr_sz;
296 
297  clib_memcpy_fast (raw[0].val + raw_sz, &spec_hdr, vxlan_hdr_sz);
298  raw[0].item.pattern = raw[0].val + raw_sz;
299  clib_memcpy_fast (raw[1].val + raw_sz, &mask_hdr, vxlan_hdr_sz);
300  raw[1].item.pattern = raw[1].val + raw_sz;
301 
302  vec_add2 (items, item, 1);
303  item->type = RTE_FLOW_ITEM_TYPE_RAW;
304  item->spec = raw;
305  item->mask = raw + 1;
306  }
307  else if (f->type == VNET_FLOW_TYPE_IP4_GTPC)
308  {
309  vnet_flow_ip4_gtpc_t *gc = &f->ip4_gtpc;
310  gtp[0].teid = clib_host_to_net_u32 (gc->teid);
311  gtp[1].teid = ~0;
312 
313  vec_add2 (items, item, 1);
314  item->type = RTE_FLOW_ITEM_TYPE_GTPC;
315  item->spec = gtp;
316  item->mask = gtp + 1;
317  }
318  else if (f->type == VNET_FLOW_TYPE_IP4_GTPU)
319  {
320  vnet_flow_ip4_gtpu_t *gu = &f->ip4_gtpu;
321  gtp[0].teid = clib_host_to_net_u32 (gu->teid);
322  gtp[1].teid = ~0;
323 
324  vec_add2 (items, item, 1);
325  item->type = RTE_FLOW_ITEM_TYPE_GTPU;
326  item->spec = gtp;
327  item->mask = gtp + 1;
328  }
329  else if ((f->type == VNET_FLOW_TYPE_IP4_GTPU_IP4) ||
330  (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP6))
331  {
332  vnet_flow_ip4_gtpu_t *gu = &f->ip4_gtpu;
333  gtp[0].teid = clib_host_to_net_u32 (gu->teid);
334  gtp[1].teid = ~0;
335 
336  vec_add2 (items, item, 1);
337  item->type = RTE_FLOW_ITEM_TYPE_GTPU;
338  item->spec = gtp;
339  item->mask = gtp + 1;
340 
341  /* inner IP4 header */
342  if (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP4)
343  {
344  vec_add2 (items, item, 1);
345  item->type = RTE_FLOW_ITEM_TYPE_IPV4;
346 
347  vnet_flow_ip4_gtpu_ip4_t *gu4 = &f->ip4_gtpu_ip4;
348  if (!gu4->inner_src_addr.mask.as_u32 &&
349  !gu4->inner_dst_addr.mask.as_u32)
350  {
351  item->spec = NULL;
352  item->mask = NULL;
353  }
354  else
355  {
356  inner_ip4[0].hdr.src_addr = gu4->inner_src_addr.addr.as_u32;
357  inner_ip4[1].hdr.src_addr = gu4->inner_src_addr.mask.as_u32;
358  inner_ip4[0].hdr.dst_addr = gu4->inner_dst_addr.addr.as_u32;
359  inner_ip4[1].hdr.dst_addr = gu4->inner_dst_addr.mask.as_u32;
360  item->spec = inner_ip4;
361  item->mask = inner_ip4 + 1;
362  }
363  }
364  else if (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP6)
365  {
367  vnet_flow_ip4_gtpu_ip6_t *gu6 = &f->ip4_gtpu_ip6;
368 
369  clib_memset (&zero_addr, 0, sizeof (ip6_address_t));
370 
371  vec_add2 (items, item, 1);
372  item->type = RTE_FLOW_ITEM_TYPE_IPV6;
373 
374  if (!clib_memcmp (&gu6->inner_src_addr.mask, &zero_addr, 16) &&
375  !clib_memcmp (&gu6->inner_dst_addr.mask, &zero_addr, 16))
376  {
377  item->spec = NULL;
378  item->mask = NULL;
379  }
380  else
381  {
382  clib_memcpy_fast (inner_ip6[0].hdr.src_addr,
383  &gu6->inner_src_addr.addr, 16);
384  clib_memcpy_fast (inner_ip6[1].hdr.src_addr,
385  &gu6->inner_src_addr.mask, 16);
386  clib_memcpy_fast (inner_ip6[0].hdr.dst_addr,
387  &gu6->inner_dst_addr.addr, 16);
388  clib_memcpy_fast (inner_ip6[1].hdr.dst_addr,
389  &gu6->inner_dst_addr.mask, 16);
390  item->spec = inner_ip6;
391  item->mask = inner_ip6 + 1;
392  }
393  }
394  }
395  else if (f->type == VNET_FLOW_TYPE_IP6_GTPC)
396  {
397  vnet_flow_ip6_gtpc_t *gc = &f->ip6_gtpc;
398  gtp[0].teid = clib_host_to_net_u32 (gc->teid);
399  gtp[1].teid = ~0;
400 
401  vec_add2 (items, item, 1);
402  item->type = RTE_FLOW_ITEM_TYPE_GTPC;
403  item->spec = gtp;
404  item->mask = gtp + 1;
405  }
406  else if (f->type == VNET_FLOW_TYPE_IP6_GTPU)
407  {
408  vnet_flow_ip6_gtpu_t *gu = &f->ip6_gtpu;
409  gtp[0].teid = clib_host_to_net_u32 (gu->teid);
410  gtp[1].teid = ~0;
411 
412  vec_add2 (items, item, 1);
413  item->type = RTE_FLOW_ITEM_TYPE_GTPU;
414  item->spec = gtp;
415  item->mask = gtp + 1;
416  }
417  else if ((f->type == VNET_FLOW_TYPE_IP6_GTPU_IP4) ||
418  (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP6))
419  {
420  vnet_flow_ip6_gtpu_t *gu = &f->ip6_gtpu;
421  gtp[0].teid = clib_host_to_net_u32 (gu->teid);
422  gtp[1].teid = ~0;
423 
424  vec_add2 (items, item, 1);
425  item->type = RTE_FLOW_ITEM_TYPE_GTPU;
426  item->spec = gtp;
427  item->mask = gtp + 1;
428 
429  /* inner IP4 header */
430  if (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP4)
431  {
432  vec_add2 (items, item, 1);
433  item->type = RTE_FLOW_ITEM_TYPE_IPV4;
434 
435  vnet_flow_ip6_gtpu_ip4_t *gu4 = &f->ip6_gtpu_ip4;
436 
437  if (!gu4->inner_src_addr.mask.as_u32 &&
438  !gu4->inner_dst_addr.mask.as_u32)
439  {
440  item->spec = NULL;
441  item->mask = NULL;
442  }
443  else
444  {
445  inner_ip4[0].hdr.src_addr = gu4->inner_src_addr.addr.as_u32;
446  inner_ip4[1].hdr.src_addr = gu4->inner_src_addr.mask.as_u32;
447  inner_ip4[0].hdr.dst_addr = gu4->inner_dst_addr.addr.as_u32;
448  inner_ip4[1].hdr.dst_addr = gu4->inner_dst_addr.mask.as_u32;
449  item->spec = inner_ip4;
450  item->mask = inner_ip4 + 1;
451  }
452  }
453 
454  if (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP6)
455  {
457  vnet_flow_ip6_gtpu_ip6_t *gu6 = &f->ip6_gtpu_ip6;
458 
459  clib_memset (&zero_addr, 0, sizeof (ip6_address_t));
460 
461  vec_add2 (items, item, 1);
462  item->type = RTE_FLOW_ITEM_TYPE_IPV6;
463 
464  if (!clib_memcmp (&gu6->inner_src_addr.mask, &zero_addr, 16) &&
465  !clib_memcmp (&gu6->inner_dst_addr.mask, &zero_addr, 16))
466  {
467  item->spec = NULL;
468  item->mask = NULL;
469  }
470  else
471  {
472  clib_memcpy_fast (inner_ip6[0].hdr.src_addr,
473  &gu6->inner_src_addr.addr, 16);
474  clib_memcpy_fast (inner_ip6[1].hdr.src_addr,
475  &gu6->inner_src_addr.mask, 16);
476  clib_memcpy_fast (inner_ip6[0].hdr.dst_addr,
477  &gu6->inner_dst_addr.addr, 16);
478  clib_memcpy_fast (inner_ip6[1].hdr.dst_addr,
479  &gu6->inner_dst_addr.mask, 16);
480  item->spec = inner_ip6;
481  item->mask = inner_ip6 + 1;
482  }
483 
484  }
485  }
486 
487 pattern_end:
488  vec_add2 (items, item, 1);
489  item->type = RTE_FLOW_ITEM_TYPE_END;
490 
491  /* Actions */
492  /* Only one 'fate' can be assigned */
493  if (f->actions & VNET_FLOW_ACTION_REDIRECT_TO_QUEUE)
494  {
495  vec_add2 (actions, action, 1);
496  queue.index = f->redirect_queue;
497  action->type = RTE_FLOW_ACTION_TYPE_QUEUE;
498  action->conf = &queue;
499  fate = true;
500  }
501  if (f->actions & VNET_FLOW_ACTION_DROP)
502  {
503  vec_add2 (actions, action, 1);
504  action->type = RTE_FLOW_ACTION_TYPE_DROP;
505  if (fate == true)
506  {
507  rv = VNET_FLOW_ERROR_INTERNAL;
508  goto done;
509  }
510  else
511  fate = true;
512  }
513  if (fate == false)
514  {
515  vec_add2 (actions, action, 1);
516  action->type = RTE_FLOW_ACTION_TYPE_PASSTHRU;
517  }
518 
519  if (f->actions & VNET_FLOW_ACTION_MARK)
520  {
521  vec_add2 (actions, action, 1);
522  mark.id = fe->mark;
523  action->type = RTE_FLOW_ACTION_TYPE_MARK;
524  action->conf = &mark;
525  }
526 
527  vec_add2 (actions, action, 1);
528  action->type = RTE_FLOW_ACTION_TYPE_END;
529 
530  rv = rte_flow_validate (xd->device_index, &ingress, items, actions,
531  &xd->last_flow_error);
532 
533  if (rv)
534  {
535  if (rv == -EINVAL)
536  rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
537  else if (rv == -EEXIST)
538  rv = VNET_FLOW_ERROR_ALREADY_EXISTS;
539  else
540  rv = VNET_FLOW_ERROR_INTERNAL;
541  goto done;
542  }
543 
544  fe->handle = rte_flow_create (xd->device_index, &ingress, items, actions,
545  &xd->last_flow_error);
546 
547  if (!fe->handle)
548  rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
549 
550 done:
551  vec_free (items);
552  vec_free (actions);
553  return rv;
554 }
555 
556 int
558  u32 flow_index, uword * private_data)
559 {
560  dpdk_main_t *dm = &dpdk_main;
561  vnet_flow_t *flow = vnet_get_flow (flow_index);
562  dpdk_device_t *xd = vec_elt_at_index (dm->devices, dev_instance);
563  dpdk_flow_entry_t *fe;
564  dpdk_flow_lookup_entry_t *fle = 0;
565  int rv;
566 
567  /* recycle old flow lookup entries only after the main loop counter
568  increases - i.e. previously DMA'ed packets were handled */
569  if (vec_len (xd->parked_lookup_indexes) > 0 &&
571  {
572  u32 *fl_index;
573 
574  vec_foreach (fl_index, xd->parked_lookup_indexes)
575  pool_put_index (xd->flow_lookup_entries, *fl_index);
577  }
578 
579  if (op == VNET_FLOW_DEV_OP_DEL_FLOW)
580  {
581  fe = vec_elt_at_index (xd->flow_entries, *private_data);
582 
583  if ((rv = rte_flow_destroy (xd->device_index, fe->handle,
584  &xd->last_flow_error)))
585  return VNET_FLOW_ERROR_INTERNAL;
586 
587  if (fe->mark)
588  {
589  /* make sure no action is taken for in-flight (marked) packets */
590  fle = pool_elt_at_index (xd->flow_lookup_entries, fe->mark);
591  clib_memset (fle, -1, sizeof (*fle));
594  }
595 
596  clib_memset (fe, 0, sizeof (*fe));
597  pool_put (xd->flow_entries, fe);
598 
599  goto disable_rx_offload;
600  }
601 
602  if (op != VNET_FLOW_DEV_OP_ADD_FLOW)
603  return VNET_FLOW_ERROR_NOT_SUPPORTED;
604 
605  pool_get (xd->flow_entries, fe);
606  fe->flow_index = flow->index;
607 
608  if (flow->actions == 0)
609  {
610  rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
611  goto done;
612  }
613 
614  /* if we need to mark packets, assign one mark */
615  if (flow->actions & (VNET_FLOW_ACTION_MARK |
616  VNET_FLOW_ACTION_REDIRECT_TO_NODE |
617  VNET_FLOW_ACTION_BUFFER_ADVANCE))
618  {
619  /* reserve slot 0 */
620  if (xd->flow_lookup_entries == 0)
624  fe->mark = fle - xd->flow_lookup_entries;
625 
626  /* install entry in the lookup table */
627  clib_memset (fle, -1, sizeof (*fle));
628  if (flow->actions & VNET_FLOW_ACTION_MARK)
629  fle->flow_id = flow->mark_flow_id;
630  if (flow->actions & VNET_FLOW_ACTION_REDIRECT_TO_NODE)
632  if (flow->actions & VNET_FLOW_ACTION_BUFFER_ADVANCE)
633  fle->buffer_advance = flow->buffer_advance;
634  }
635  else
636  fe->mark = 0;
637 
638  if ((xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) == 0)
639  {
640  xd->flags |= DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD;
641  dpdk_device_setup (xd);
642  }
643 
644  switch (flow->type)
645  {
646  case VNET_FLOW_TYPE_ETHERNET:
647  case VNET_FLOW_TYPE_IP4_N_TUPLE:
648  case VNET_FLOW_TYPE_IP6_N_TUPLE:
649  case VNET_FLOW_TYPE_IP4_VXLAN:
650  case VNET_FLOW_TYPE_IP4_GTPC:
651  case VNET_FLOW_TYPE_IP4_GTPU:
652  case VNET_FLOW_TYPE_IP4_GTPU_IP4:
653  case VNET_FLOW_TYPE_IP4_GTPU_IP6:
654  case VNET_FLOW_TYPE_IP6_GTPC:
655  case VNET_FLOW_TYPE_IP6_GTPU:
656  case VNET_FLOW_TYPE_IP6_GTPU_IP4:
657  case VNET_FLOW_TYPE_IP6_GTPU_IP6:
658  if ((rv = dpdk_flow_add (xd, flow, fe)))
659  goto done;
660  break;
661  default:
662  rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
663  goto done;
664  }
665 
666  *private_data = fe - xd->flow_entries;
667 
668 done:
669  if (rv)
670  {
671  clib_memset (fe, 0, sizeof (*fe));
672  pool_put (xd->flow_entries, fe);
673  if (fle)
674  {
675  clib_memset (fle, -1, sizeof (*fle));
676  pool_put (xd->flow_lookup_entries, fle);
677  }
678  }
679 disable_rx_offload:
680  if ((xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) != 0
681  && pool_elts (xd->flow_entries) == 0)
682  {
683  xd->flags &= ~DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD;
684  dpdk_device_setup (xd);
685  }
686 
687  return rv;
688 }
689 
690 u8 *
691 format_dpdk_flow (u8 * s, va_list * args)
692 {
693  u32 dev_instance = va_arg (*args, u32);
694  u32 flow_index = va_arg (*args, u32);
695  uword private_data = va_arg (*args, uword);
696  dpdk_main_t *dm = &dpdk_main;
697  dpdk_device_t *xd = vec_elt_at_index (dm->devices, dev_instance);
698  dpdk_flow_entry_t *fe;
699 
700  if (flow_index == ~0)
701  {
702  s = format (s, "%-25s: %U\n", "supported flow actions",
704  s = format (s, "%-25s: %d\n", "last DPDK error type",
705  xd->last_flow_error.type);
706  s = format (s, "%-25s: %s\n", "last DPDK error message",
707  xd->last_flow_error.message ? xd->last_flow_error.message :
708  "n/a");
709  return s;
710  }
711 
712  if (private_data >= vec_len (xd->flow_entries))
713  return format (s, "unknown flow");
714 
715  fe = vec_elt_at_index (xd->flow_entries, private_data);
716  s = format (s, "mark %u", fe->mark);
717  return s;
718 }
719 
720 /*
721  * fd.io coding-style-patch-verification: ON
722  *
723  * Local Variables:
724  * eval: (c-set-style "gnu")
725  * End:
726  */
volatile u32 main_loop_count
Definition: main.h:99
vnet_flow_type_t type
Definition: flow.h:173
dpdk_main_t dpdk_main
Definition: init.c:45
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
#define NULL
Definition: clib.h:58
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
u16 flags
Definition: dpdk.h:210
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:523
vl_api_address_t src
Definition: gre.api:60
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:561
int i
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
u32 supported_flow_actions
Definition: dpdk.h:231
struct rte_flow * handle
Definition: dpdk.h:180
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
Definition: pool.h:237
vhost_vring_addr_t addr
Definition: vhost_user.h:147
unsigned char u8
Definition: types.h:56
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
u16 src_port
Definition: udp.api:41
#define VXLAN_FLAGS_I
Definition: vxlan_packet.h:52
vl_api_ip_proto_t protocol
Definition: lb_types.api:71
u32 mark_flow_id
Definition: flow.h:182
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
unsigned int u32
Definition: types.h:88
void dpdk_device_setup(dpdk_device_t *xd)
Definition: common.c:40
i16 buffer_advance
Definition: dpdk.h:187
u32 parked_loop_count
Definition: dpdk.h:235
static const struct rte_flow_attr ingress
Definition: flow.c:46
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:519
u32 redirect_queue
Definition: flow.h:189
static int dpdk_flow_add(dpdk_device_t *xd, vnet_flow_t *f, dpdk_flow_entry_t *fe)
Definition: flow.c:61
unsigned short u16
Definition: types.h:57
u32 vni
Definition: lisp_gpe.api:129
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:287
vnet_flow_t * vnet_get_flow(u32 flow_index)
Definition: flow.c:57
u32 index
Definition: flow.h:176
vl_api_address_t dst
Definition: gre.api:61
static bool mac_address_is_all_zero(const u8 addr[6])
Definition: flow.c:49
i32 buffer_advance
Definition: flow.h:192
#define pool_get_aligned(P, E, A)
Allocate an object E from a pool P with alignment A.
Definition: pool.h:231
u32 actions
Definition: flow.h:179
vnet_flow_dev_op_t
Definition: interface.h:91
u8 ip6[16]
Definition: one.api:477
format_function_t format_flow_actions
Definition: flow.h:235
Definition: dpdk.h:176
dpdk_device_t * devices
Definition: dpdk.h:399
dpdk_flow_lookup_entry_t * flow_lookup_entries
Definition: dpdk.h:233
u32 * parked_lookup_indexes
Definition: dpdk.h:234
#define clib_memcmp(s1, s2, m1)
Definition: string.h:737
u8 * format_dpdk_flow(u8 *s, va_list *args)
Definition: flow.c:691
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:342
int dpdk_flow_ops_fn(vnet_main_t *vnm, vnet_flow_dev_op_t op, u32 dev_instance, u32 flow_index, uword *private_data)
Definition: flow.c:557
u32 flow_index
Definition: dpdk.h:178
#define pool_put_index(p, i)
Free pool element with given index.
Definition: pool.h:316
dpdk_portid_t device_index
Definition: dpdk.h:196
Definition: dpdk.h:183
u8 action
Definition: l2.api:173
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
#define FLOW_IS_L2_LAYER(f)
Definition: flow.c:32
u64 uword
Definition: types.h:112
u16 next_index
Definition: dpdk.h:186
u32 redirect_device_input_next_index
Definition: flow.h:186
#define vec_foreach(var, vec)
Vector iterator.
dpdk_flow_entry_t * flow_entries
Definition: dpdk.h:232
u16 dst_port
Definition: udp.api:42
u32 ip4
Definition: one.api:440
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
u32 flow_id
Definition: dpdk.h:185
struct rte_flow_error last_flow_error
Definition: dpdk.h:236
icmpr_flow_t * flow
Definition: main.c:123
const ip46_address_t zero_addr
Definition: lookup.c:307
CLIB vectors are ubiquitous dynamically resized arrays with by user defined "headers".
u32 mark
Definition: dpdk.h:179
vlib_main_t * vlib_main
Definition: dpdk.h:421
static uword pool_elts(void *v)
Number of active elements in a pool.
Definition: pool.h:128