FD.io VPP  v19.04.2-12-g66b1689
Vector Packet Processing
ioam_cache_node.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /*
16  * This file implements caching of ioam header and reattaching
17  * it in response message by performing request-response matching.
18  * Works for TCP SYN/SYN-ACK.
19  * This feature is used for anycast server selection.
20  * ioam data thus cached is used to measure and get complete round trip
21  * network path to help in server selection.
22  * There are 2 graph nodes defined to :
23  * 1. process packets that contain iOAM header and cache it
24  * 2. process TCP SYN-ACKs and reattach ioam header from the
25  * cache corresponding to TCP-SYN
26  * These graph nodes are attached to the vnet graph based on
27  * ioam cache and classifier configs.
28  * e.g.
29  * If db06::06 is the anycast service IP6 address:
30  *
31  * set ioam ip6 cache
32  *
33  * Apply this classifier on interface where requests for anycast service are received:
34  * classify session acl-hit-next ip6-node ip6-lookup table-index 0 match l3 ip6 dst db06::06
35  * ioam-decap anycast <<< ioam-decap is hooked to cache when set ioam ip6 cache is enabled
36  *
37  * Apply this classifier on interface where responses from anycast service are received:
38  * classify session acl-hit-next ip6-node ip6-add-from-cache-hop-by-hop table-index 0 match l3
39  * ip6 src db06::06 ioam-encap anycast-response
40  *
41  */
42 #include <vlib/vlib.h>
43 #include <vnet/vnet.h>
44 #include <vnet/pg/pg.h>
45 #include <vppinfra/error.h>
46 #include <vnet/ip/ip.h>
47 #include <ioam/ip6/ioam_cache.h>
48 #include <vnet/ip/ip6_hop_by_hop.h>
50 
51 typedef struct
52 {
56 
57 /* packet trace format function */
58 static u8 *
59 format_cache_trace (u8 * s, va_list * args)
60 {
61  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
62  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
63  cache_trace_t *t = va_arg (*args, cache_trace_t *);
64 
65  s = format (s, "CACHE: flow_label %d, next index %d",
66  t->flow_label, t->next_index);
67  return s;
68 }
69 
70 #define foreach_cache_error \
71 _(RECORDED, "ip6 iOAM headers cached")
72 
73 typedef enum
74 {
75 #define _(sym,str) CACHE_ERROR_##sym,
77 #undef _
80 
81 static char *cache_error_strings[] = {
82 #define _(sym,string) string,
84 #undef _
85 };
86 
87 typedef enum
88 {
91 } cache_next_t;
92 
93 static uword
95  vlib_node_runtime_t * node, vlib_frame_t * frame)
96 {
97  u32 n_left_from, *from, *to_next;
98  cache_next_t next_index;
99  u32 recorded = 0;
100 
101  from = vlib_frame_vector_args (frame);
102  n_left_from = frame->n_vectors;
103  next_index = node->cached_next_index;
104 
105  while (n_left_from > 0)
106  {
107  u32 n_left_to_next;
108 
109  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
110  // TODO: Dual loop
111  while (n_left_from > 0 && n_left_to_next > 0)
112  {
113  u32 bi0;
114  vlib_buffer_t *p0;
116  ip6_header_t *ip0;
118  tcp_header_t *tcp0;
119  u32 tcp_offset0;
120 
121  /* speculatively enqueue p0 to the current next frame */
122  bi0 = from[0];
123  to_next[0] = bi0;
124  from += 1;
125  to_next += 1;
126  n_left_from -= 1;
127  n_left_to_next -= 1;
128 
129  p0 = vlib_get_buffer (vm, bi0);
130  ip0 = vlib_buffer_get_current (p0);
131  if (IP_PROTOCOL_TCP ==
132  ip6_locate_header (p0, ip0, IP_PROTOCOL_TCP, &tcp_offset0))
133  {
134  tcp0 = (tcp_header_t *) ((u8 *) ip0 + tcp_offset0);
135  if ((tcp0->flags & TCP_FLAG_SYN) == TCP_FLAG_SYN &&
136  (tcp0->flags & TCP_FLAG_ACK) == 0)
137  {
138  /* Cache the ioam hbh header */
139  hbh0 = (ip6_hop_by_hop_header_t *) (ip0 + 1);
140  if (0 == ioam_cache_add (p0,
141  ip0,
142  clib_net_to_host_u16
143  (tcp0->src_port),
144  clib_net_to_host_u16
145  (tcp0->dst_port), hbh0,
146  clib_net_to_host_u32
147  (tcp0->seq_number) + 1))
148  {
149  recorded++;
150  }
151  }
152  }
153  if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
154  {
155  if (p0->flags & VLIB_BUFFER_IS_TRACED)
156  {
157  cache_trace_t *t =
158  vlib_add_trace (vm, node, p0, sizeof (*t));
159  t->flow_label =
160  clib_net_to_host_u32
162  t->next_index = next0;
163  }
164  }
165  /* verify speculative enqueue, maybe switch current next frame */
166  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
167  to_next, n_left_to_next,
168  bi0, next0);
169  }
170 
171  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
172  }
173 
175  CACHE_ERROR_RECORDED, recorded);
176  return frame->n_vectors;
177 }
178 
179 /*
180  * Node for IP6 iOAM header cache
181  */
182 /* *INDENT-OFF* */
184 {
185  .function = ip6_ioam_cache_node_fn,
186  .name = "ip6-ioam-cache",
187  .vector_size = sizeof (u32),
188  .format_trace = format_cache_trace,
189  .type = VLIB_NODE_TYPE_INTERNAL,
190  .n_errors = ARRAY_LEN (cache_error_strings),
191  .error_strings = cache_error_strings,
192  .n_next_nodes = IOAM_CACHE_N_NEXT,
193  /* edit / add dispositions here */
194  .next_nodes =
195  {
196  [IOAM_CACHE_NEXT_POP_HBYH] = "ip6-pop-hop-by-hop"
197  },
198 };
199 /* *INDENT-ON* */
200 
201 typedef struct
202 {
205 
206 /* packet trace format function */
207 static u8 *
209 {
210  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
211  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
212  ip6_add_from_cache_hbh_trace_t *t = va_arg (*args,
214  *);
215 
216  s = format (s, "IP6_ADD_FROM_CACHE_HBH: next index %d", t->next_index);
217  return s;
218 }
219 
220 #define foreach_ip6_add_from_cache_hbh_error \
221 _(PROCESSED, "Pkts w/ added ip6 hop-by-hop options")
222 
223 typedef enum
224 {
225 #define _(sym,str) IP6_ADD_FROM_CACHE_HBH_ERROR_##sym,
227 #undef _
230 
232 #define _(sym,string) string,
234 #undef _
235 };
236 
237 #define foreach_ip6_ioam_cache_input_next \
238  _(IP6_LOOKUP, "ip6-lookup") \
239  _(DROP, "error-drop")
240 
241 typedef enum
242 {
243 #define _(s,n) IP6_IOAM_CACHE_INPUT_NEXT_##s,
245 #undef _
248 
249 
251  vlib_node_runtime_t * node,
252  vlib_frame_t * frame)
253 {
255  u32 n_left_from, *from, *to_next;
256  ip_lookup_next_t next_index;
257  u32 processed = 0;
258  u8 *rewrite = 0;
259  u32 rewrite_len = 0;
260  u32 sr_rewrite_len = vec_len (cm->sr_rewrite_template);
261 
262  from = vlib_frame_vector_args (frame);
263  n_left_from = frame->n_vectors;
264  next_index = node->cached_next_index;
265 
266  while (n_left_from > 0)
267  {
268  u32 n_left_to_next;
269 
270  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
271  // TODO: Dual loop
272  while (n_left_from > 0 && n_left_to_next > 0)
273  {
274  u32 bi0;
275  vlib_buffer_t *b0;
276  u32 next0;
277  ip6_header_t *ip0;
279  ip6_sr_header_t *srh0 = 0;
280  u64 *copy_src0, *copy_dst0;
281  u16 new_l0;
282  tcp_header_t *tcp0;
283  u32 tcp_offset0;
284  ioam_cache_entry_t *entry = 0;
285 
286  next0 = IP6_IOAM_CACHE_INPUT_NEXT_IP6_LOOKUP;
287  /* speculatively enqueue b0 to the current next frame */
288  bi0 = from[0];
289  to_next[0] = bi0;
290  from += 1;
291  to_next += 1;
292  n_left_from -= 1;
293  n_left_to_next -= 1;
294 
295  b0 = vlib_get_buffer (vm, bi0);
296 
297  ip0 = vlib_buffer_get_current (b0);
298  if (IP_PROTOCOL_TCP !=
299  ip6_locate_header (b0, ip0, IP_PROTOCOL_TCP, &tcp_offset0))
300  {
301  goto TRACE0;
302  }
303  tcp0 = (tcp_header_t *) ((u8 *) ip0 + tcp_offset0);
304  if (((tcp0->flags & TCP_FLAG_SYN) == TCP_FLAG_SYN &&
305  (tcp0->flags & TCP_FLAG_ACK) == TCP_FLAG_ACK) ||
306  (tcp0->flags & TCP_FLAG_RST) == TCP_FLAG_RST)
307  {
308  if (0 != (entry = ioam_cache_lookup (ip0,
309  clib_net_to_host_u16
310  (tcp0->src_port),
311  clib_net_to_host_u16
312  (tcp0->dst_port),
313  clib_net_to_host_u32
314  (tcp0->ack_number))))
315  {
316  rewrite = entry->ioam_rewrite_string;
317  rewrite_len = vec_len (rewrite);
318  }
319  else
320  {
321  next0 = IP6_IOAM_CACHE_INPUT_NEXT_DROP;
322  goto TRACE0;
323  }
324  }
325  else
326  goto TRACE0;
327 
328 
329  /* Copy the ip header left by the required amount */
330  copy_dst0 = (u64 *) (((u8 *) ip0) - (rewrite_len + sr_rewrite_len));
331  copy_src0 = (u64 *) ip0;
332 
333  copy_dst0[0] = copy_src0[0];
334  copy_dst0[1] = copy_src0[1];
335  copy_dst0[2] = copy_src0[2];
336  copy_dst0[3] = copy_src0[3];
337  copy_dst0[4] = copy_src0[4];
338  vlib_buffer_advance (b0, -(word) (rewrite_len + sr_rewrite_len));
339  ip0 = vlib_buffer_get_current (b0);
340 
341  hbh0 = (ip6_hop_by_hop_header_t *) (ip0 + 1);
342  srh0 = (ip6_sr_header_t *) ((u8 *) hbh0 + rewrite_len);
343  /* $$$ tune, rewrite_len is a multiple of 8 */
344  clib_memcpy_fast (hbh0, rewrite, rewrite_len);
345  clib_memcpy_fast (srh0, cm->sr_rewrite_template, sr_rewrite_len);
346  /* Copy dst address into the DA slot in the segment list */
348  sizeof (ip6_address_t));
349  /* Rewrite the ip6 dst address with the first hop */
351  sizeof (ip6_address_t));
352  clib_memcpy_fast (&srh0->segments[1],
353  (u8 *) hbh0 + entry->my_address_offset,
354  sizeof (ip6_address_t));
355  ioam_cache_entry_free (entry);
356 
357  /* Patch the protocol chain, insert the h-b-h (type 0) header */
358  srh0->protocol = ip0->protocol;
360  ip0->protocol = 0;
361  new_l0 =
362  clib_net_to_host_u16 (ip0->payload_length) + rewrite_len +
363  sr_rewrite_len;
364  ip0->payload_length = clib_host_to_net_u16 (new_l0);
365  processed++;
366  TRACE0:
367  if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
368  && (b0->flags & VLIB_BUFFER_IS_TRACED)))
369  {
371  vlib_add_trace (vm, node, b0, sizeof (*t));
372  t->next_index = next0;
373  }
374 
375  /* verify speculative enqueue, maybe switch current next frame */
376  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
377  to_next, n_left_to_next,
378  bi0, next0);
379  }
380 
381  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
382  }
383 
385  IP6_ADD_FROM_CACHE_HBH_ERROR_PROCESSED,
386  processed);
387  return frame->n_vectors;
388 }
389 /* *INDENT-OFF* */
391 {
392  .name = "ip6-add-from-cache-hop-by-hop",
393  .vector_size = sizeof (u32),
394  .format_trace = format_ip6_add_from_cache_hbh_trace,
395  .type = VLIB_NODE_TYPE_INTERNAL,
397  .error_strings = ip6_add_from_cache_hbh_error_strings,
398  /* See ip/lookup.h */
399  .n_next_nodes = IP6_IOAM_CACHE_INPUT_N_NEXT,
400  .next_nodes =
401  {
402 #define _(s,n) [IP6_IOAM_CACHE_INPUT_NEXT_##s] = n,
404 #undef _
405  },
406 };
407 /* *INDENT-ON* */
408 
409 /*
410  * fd.io coding-style-patch-verification: ON
411  *
412  * Local Variables:
413  * eval: (c-set-style "gnu")
414  * End:
415  */
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
#define CLIB_UNUSED(x)
Definition: clib.h:82
static ioam_cache_entry_t * ioam_cache_lookup(ip6_header_t *ip0, u16 src_port, u16 dst_port, u32 seq_no)
Definition: ioam_cache.h:322
#define TCP_FLAG_SYN
Definition: fa_node.h:13
u64 as_u64[2]
Definition: ip6_packet.h:51
unsigned long u64
Definition: types.h:89
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
u8 * sr_rewrite_template
Definition: ioam_cache.h:176
#define foreach_cache_error
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
static uword ip6_ioam_cache_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
#define VLIB_NODE_FN(node)
Definition: node.h:201
static u8 * format_ip6_add_from_cache_hbh_trace(u8 *s, va_list *args)
struct _tcp_header tcp_header_t
unsigned char u8
Definition: types.h:56
ip_lookup_next_t
An adjacency is a representation of an attached L3 peer.
Definition: adj.h:50
Definition: ioam_cache.h:99
i64 word
Definition: types.h:111
static int ioam_cache_add(vlib_buffer_t *b0, ip6_header_t *ip0, u16 src_port, u16 dst_port, ip6_hop_by_hop_header_t *hbh0, u32 seq_no)
Definition: ioam_cache.h:365
#define TCP_FLAG_ACK
Definition: fa_node.h:16
#define foreach_ip6_add_from_cache_hbh_error
#define IPPROTO_IPV6_ROUTE
Definition: sr_packet.h:113
unsigned int u32
Definition: types.h:88
cache_next_t
static char * cache_error_strings[]
unsigned short u16
Definition: types.h:57
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
#define PREDICT_FALSE(x)
Definition: clib.h:111
static void ioam_cache_entry_free(ioam_cache_entry_t *entry)
Definition: ioam_cache.h:299
ip6_address_t next_hop
Definition: ioam_cache.h:109
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:368
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1180
#define TCP_FLAG_RST
Definition: fa_node.h:14
ip6_ioam_cache_input_next_t
ip6_add_from_cache_hbh_error_t
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
u16 n_vectors
Definition: node.h:395
vlib_main_t * vm
Definition: buffer.c:312
#define foreach_ip6_ioam_cache_input_next
#define ARRAY_LEN(x)
Definition: clib.h:62
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:465
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:514
u16 my_address_offset
Definition: ioam_cache.h:110
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:248
static int ip6_locate_header(vlib_buffer_t *p0, ip6_header_t *ip0, int find_hdr_type, u32 *offset)
Definition: ip6.h:561
vlib_node_registration_t ip6_add_from_cache_hbh_node
(constructor) VLIB_REGISTER_NODE (ip6_add_from_cache_hbh_node)
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:57
u32 ip_version_traffic_class_and_flow_label
Definition: ip6_packet.h:372
u16 payload_length
Definition: ip6_packet.h:376
static char * ip6_add_from_cache_hbh_error_strings[]
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
ip6_address_t segments[0]
Definition: sr_packet.h:148
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:274
ioam_cache_main_t ioam_cache_main
Definition: ioam_cache.c:58
u16 flags
Copy of main node flags.
Definition: node.h:508
cache_error_t
static u8 * format_cache_trace(u8 *s, va_list *args)
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:301
u32 ip6_add_from_cache_hbh_node_index
Definition: ioam_cache.h:209
vlib_node_registration_t ioam_cache_node
(constructor) VLIB_REGISTER_NODE (ioam_cache_node)
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
u8 * ioam_rewrite_string
Definition: ioam_cache.h:111
ip6_address_t dst_address
Definition: ip6_packet.h:385