FD.io VPP  v19.04-6-g6f05f72
Vector Packet Processing
session.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2019 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /**
16  * @file
17  * @brief Session and session manager
18  */
19 
20 #include <vnet/session/session.h>
23 #include <vnet/dpo/load_balance.h>
24 #include <vnet/fib/ip4_fib.h>
25 
27 
28 static inline int
29 session_send_evt_to_thread (void *data, void *args, u32 thread_index,
30  session_evt_type_t evt_type)
31 {
32  session_event_t *evt;
33  svm_msg_q_msg_t msg;
34  svm_msg_q_t *mq;
35  u32 tries = 0, max_tries;
36 
37  mq = session_main_get_vpp_event_queue (thread_index);
38  while (svm_msg_q_try_lock (mq))
39  {
40  max_tries = vlib_get_current_process (vlib_get_main ())? 1e6 : 3;
41  if (tries++ == max_tries)
42  {
43  SESSION_DBG ("failed to enqueue evt");
44  return -1;
45  }
46  }
48  {
49  svm_msg_q_unlock (mq);
50  return -2;
51  }
54  {
55  svm_msg_q_unlock (mq);
56  return -2;
57  }
58  evt = (session_event_t *) svm_msg_q_msg_data (mq, &msg);
59  evt->event_type = evt_type;
60  switch (evt_type)
61  {
63  evt->rpc_args.fp = data;
64  evt->rpc_args.arg = args;
65  break;
66  case SESSION_IO_EVT_TX:
69  evt->session_index = *(u32 *) data;
70  break;
73  evt->session_handle = session_handle ((session_t *) data);
74  break;
75  default:
76  clib_warning ("evt unhandled!");
77  svm_msg_q_unlock (mq);
78  return -1;
79  }
80 
81  svm_msg_q_add_and_unlock (mq, &msg);
82  return 0;
83 }
84 
85 int
87 {
88  return session_send_evt_to_thread (&f->master_session_index, 0,
89  f->master_thread_index, evt_type);
90 }
91 
92 int
94  session_evt_type_t evt_type)
95 {
96  return session_send_evt_to_thread (data, 0, thread_index, evt_type);
97 }
98 
99 int
101 {
102  /* only event supported for now is disconnect */
103  ASSERT (evt_type == SESSION_CTRL_EVT_CLOSE);
104  return session_send_evt_to_thread (s, 0, s->thread_index,
106 }
107 
108 void
110  void *rpc_args)
111 {
112  session_send_evt_to_thread (fp, rpc_args, thread_index,
114 }
115 
116 void
117 session_send_rpc_evt_to_thread (u32 thread_index, void *fp, void *rpc_args)
118 {
119  if (thread_index != vlib_get_thread_index ())
120  session_send_rpc_evt_to_thread_force (thread_index, fp, rpc_args);
121  else
122  {
123  void (*fnp) (void *) = fp;
124  fnp (rpc_args);
125  }
126 }
127 
128 static void
130 {
131  u32 thread_index = vlib_get_thread_index ();
132  session_worker_t *wrk;
133  session_event_t *evt;
134 
135  /* If we are in the handler thread, or being called with the worker barrier
136  * held, just append a new event to pending disconnects vector. */
137  if (vlib_thread_is_main_w_barrier () || thread_index == s->thread_index)
138  {
140  vec_add2 (wrk->pending_disconnects, evt, 1);
141  clib_memset (evt, 0, sizeof (*evt));
142  evt->session_handle = session_handle (s);
143  evt->event_type = SESSION_CTRL_EVT_CLOSE;
144  }
145  else
147 }
148 
149 session_t *
150 session_alloc (u32 thread_index)
151 {
152  session_worker_t *wrk = &session_main.wrk[thread_index];
153  session_t *s;
154  u8 will_expand = 0;
155  pool_get_aligned_will_expand (wrk->sessions, will_expand,
157  /* If we have peekers, let them finish */
158  if (PREDICT_FALSE (will_expand && vlib_num_workers ()))
159  {
163  }
164  else
165  {
167  }
168  clib_memset (s, 0, sizeof (*s));
169  s->session_index = s - wrk->sessions;
170  s->thread_index = thread_index;
171  return s;
172 }
173 
174 void
176 {
177  if (CLIB_DEBUG)
178  {
179  u8 thread_index = s->thread_index;
180  clib_memset (s, 0xFA, sizeof (*s));
181  pool_put (session_main.wrk[thread_index].sessions, s);
182  return;
183  }
184  SESSION_EVT_DBG (SESSION_EVT_FREE, s);
185  pool_put (session_main.wrk[s->thread_index].sessions, s);
186 }
187 
188 void
190 {
192  session_free (s);
193 }
194 
195 /**
196  * Cleans up session and lookup table.
197  *
198  * Transport connection must still be valid.
199  */
200 static void
202 {
203  int rv;
204 
205  /* Delete from the main lookup table. */
206  if ((rv = session_lookup_del_session (s)))
207  clib_warning ("hash delete error, rv %d", rv);
208 
210 }
211 
212 static session_t *
214 {
215  session_t *s;
216  u32 thread_index = tc->thread_index;
217 
218  ASSERT (thread_index == vlib_get_thread_index ()
219  || transport_protocol_is_cl (tc->proto));
220 
221  s = session_alloc (thread_index);
222  s->session_type = session_type_from_proto_and_ip (tc->proto, tc->is_ip4);
224 
225  /* Attach transport to session and vice versa */
226  s->connection_index = tc->c_index;
227  tc->s_index = s->session_index;
228  return s;
229 }
230 
231 /**
232  * Discards bytes from buffer chain
233  *
234  * It discards n_bytes_to_drop starting at first buffer after chain_b
235  */
236 always_inline void
238  vlib_buffer_t ** chain_b,
239  u32 n_bytes_to_drop)
240 {
241  vlib_buffer_t *next = *chain_b;
242  u32 to_drop = n_bytes_to_drop;
243  ASSERT (b->flags & VLIB_BUFFER_NEXT_PRESENT);
244  while (to_drop && (next->flags & VLIB_BUFFER_NEXT_PRESENT))
245  {
246  next = vlib_get_buffer (vm, next->next_buffer);
247  if (next->current_length > to_drop)
248  {
249  vlib_buffer_advance (next, to_drop);
250  to_drop = 0;
251  }
252  else
253  {
254  to_drop -= next->current_length;
255  next->current_length = 0;
256  }
257  }
258  *chain_b = next;
259 
260  if (to_drop == 0)
261  b->total_length_not_including_first_buffer -= n_bytes_to_drop;
262 }
263 
264 /**
265  * Enqueue buffer chain tail
266  */
267 always_inline int
269  u32 offset, u8 is_in_order)
270 {
271  vlib_buffer_t *chain_b;
272  u32 chain_bi, len, diff;
274  u8 *data;
275  u32 written = 0;
276  int rv = 0;
277 
278  if (is_in_order && offset)
279  {
280  diff = offset - b->current_length;
282  return 0;
283  chain_b = b;
284  session_enqueue_discard_chain_bytes (vm, b, &chain_b, diff);
285  chain_bi = vlib_get_buffer_index (vm, chain_b);
286  }
287  else
288  chain_bi = b->next_buffer;
289 
290  do
291  {
292  chain_b = vlib_get_buffer (vm, chain_bi);
293  data = vlib_buffer_get_current (chain_b);
294  len = chain_b->current_length;
295  if (!len)
296  continue;
297  if (is_in_order)
298  {
299  rv = svm_fifo_enqueue_nowait (s->rx_fifo, len, data);
300  if (rv == len)
301  {
302  written += rv;
303  }
304  else if (rv < len)
305  {
306  return (rv > 0) ? (written + rv) : written;
307  }
308  else if (rv > len)
309  {
310  written += rv;
311 
312  /* written more than what was left in chain */
314  return written;
315 
316  /* drop the bytes that have already been delivered */
317  session_enqueue_discard_chain_bytes (vm, b, &chain_b, rv - len);
318  }
319  }
320  else
321  {
322  rv = svm_fifo_enqueue_with_offset (s->rx_fifo, offset, len, data);
323  if (rv)
324  {
325  clib_warning ("failed to enqueue multi-buffer seg");
326  return -1;
327  }
328  offset += len;
329  }
330  }
331  while ((chain_bi = (chain_b->flags & VLIB_BUFFER_NEXT_PRESENT)
332  ? chain_b->next_buffer : 0));
333 
334  if (is_in_order)
335  return written;
336 
337  return 0;
338 }
339 
340 /*
341  * Enqueue data for delivery to session peer. Does not notify peer of enqueue
342  * event but on request can queue notification events for later delivery by
343  * calling stream_server_flush_enqueue_events().
344  *
345  * @param tc Transport connection which is to be enqueued data
346  * @param b Buffer to be enqueued
347  * @param offset Offset at which to start enqueueing if out-of-order
348  * @param queue_event Flag to indicate if peer is to be notified or if event
349  * is to be queued. The former is useful when more data is
350  * enqueued and only one event is to be generated.
351  * @param is_in_order Flag to indicate if data is in order
352  * @return Number of bytes enqueued or a negative value if enqueueing failed.
353  */
354 int
356  vlib_buffer_t * b, u32 offset,
357  u8 queue_event, u8 is_in_order)
358 {
359  session_t *s;
360  int enqueued = 0, rv, in_order_off;
361 
362  s = session_get (tc->s_index, tc->thread_index);
363 
364  if (is_in_order)
365  {
366  enqueued = svm_fifo_enqueue_nowait (s->rx_fifo,
367  b->current_length,
369  if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_NEXT_PRESENT)
370  && enqueued >= 0))
371  {
372  in_order_off = enqueued > b->current_length ? enqueued : 0;
373  rv = session_enqueue_chain_tail (s, b, in_order_off, 1);
374  if (rv > 0)
375  enqueued += rv;
376  }
377  }
378  else
379  {
380  rv = svm_fifo_enqueue_with_offset (s->rx_fifo, offset,
381  b->current_length,
383  if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) && !rv))
384  session_enqueue_chain_tail (s, b, offset + b->current_length, 0);
385  /* if something was enqueued, report even this as success for ooo
386  * segment handling */
387  return rv;
388  }
389 
390  if (queue_event)
391  {
392  /* Queue RX event on this fifo. Eventually these will need to be flushed
393  * by calling stream_server_flush_enqueue_events () */
394  session_worker_t *wrk;
395 
397  if (!(s->flags & SESSION_F_RX_EVT))
398  {
399  s->flags |= SESSION_F_RX_EVT;
400  vec_add1 (wrk->session_to_enqueue[tc->proto], s->session_index);
401  }
402  }
403 
404  return enqueued;
405 }
406 
407 int
409  session_dgram_hdr_t * hdr,
410  vlib_buffer_t * b, u8 proto, u8 queue_event)
411 {
412  int enqueued = 0, rv, in_order_off;
413 
415  >= b->current_length + sizeof (*hdr));
416 
418  (u8 *) hdr);
421  if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) && enqueued >= 0))
422  {
423  in_order_off = enqueued > b->current_length ? enqueued : 0;
424  rv = session_enqueue_chain_tail (s, b, in_order_off, 1);
425  if (rv > 0)
426  enqueued += rv;
427  }
428  if (queue_event)
429  {
430  /* Queue RX event on this fifo. Eventually these will need to be flushed
431  * by calling stream_server_flush_enqueue_events () */
432  session_worker_t *wrk;
433 
435  if (!(s->flags & SESSION_F_RX_EVT))
436  {
437  s->flags |= SESSION_F_RX_EVT;
438  vec_add1 (wrk->session_to_enqueue[proto], s->session_index);
439  }
440  }
441  return enqueued;
442 }
443 
444 int
446  u32 offset, u32 max_bytes)
447 {
448  session_t *s = session_get (tc->s_index, tc->thread_index);
449  return svm_fifo_peek (s->tx_fifo, offset, max_bytes, buffer);
450 }
451 
452 u32
454 {
455  session_t *s = session_get (tc->s_index, tc->thread_index);
456  return svm_fifo_dequeue_drop (s->tx_fifo, max_bytes);
457 }
458 
459 static inline int
461  svm_fifo_t * f, session_evt_type_t evt_type)
462 {
463  app_worker_t *app_wrk;
464  application_t *app;
465  int i;
466 
467  app = application_get (app_index);
468  if (!app)
469  return -1;
470 
471  for (i = 0; i < f->n_subscribers; i++)
472  {
473  app_wrk = application_get_worker (app, f->subscribers[i]);
474  if (!app_wrk)
475  continue;
476  if (app_worker_lock_and_send_event (app_wrk, s, evt_type))
477  return -1;
478  }
479 
480  return 0;
481 }
482 
483 /**
484  * Notify session peer that new data has been enqueued.
485  *
486  * @param s Stream session for which the event is to be generated.
487  * @param lock Flag to indicate if call should lock message queue.
488  *
489  * @return 0 on success or negative number if failed to send notification.
490  */
491 static inline int
493 {
494  app_worker_t *app_wrk;
495 
496  app_wrk = app_worker_get_if_valid (s->app_wrk_index);
497  if (PREDICT_FALSE (!app_wrk))
498  {
499  SESSION_DBG ("invalid s->app_index = %d", s->app_wrk_index);
500  return 0;
501  }
502 
503  /* *INDENT-OFF* */
504  SESSION_EVT_DBG(SESSION_EVT_ENQ, s, ({
505  ed->data[0] = SESSION_IO_EVT_RX;
506  ed->data[1] = svm_fifo_max_dequeue (s->rx_fifo);
507  }));
508  /* *INDENT-ON* */
509 
510  s->flags &= ~SESSION_F_RX_EVT;
513  return -1;
514 
516  return session_notify_subscribers (app_wrk->app_index, s,
518 
519  return 0;
520 }
521 
522 int
524 {
526 }
527 
528 int
530 {
531  app_worker_t *app_wrk;
532 
533  app_wrk = app_worker_get_if_valid (s->app_wrk_index);
534  if (PREDICT_FALSE (!app_wrk))
535  return -1;
536 
539  return -1;
540 
541  if (PREDICT_FALSE (s->tx_fifo->n_subscribers))
542  return session_notify_subscribers (app_wrk->app_index, s,
544 
546 
547  return 0;
548 }
549 
550 /**
551  * Flushes queue of sessions that are to be notified of new data
552  * enqueued events.
553  *
554  * @param thread_index Thread index for which the flush is to be performed.
555  * @return 0 on success or a positive number indicating the number of
556  * failures due to API queue being full.
557  */
558 int
559 session_main_flush_enqueue_events (u8 transport_proto, u32 thread_index)
560 {
561  session_worker_t *wrk = session_main_get_worker (thread_index);
562  session_t *s;
563  int i, errors = 0;
564  u32 *indices;
565 
566  indices = wrk->session_to_enqueue[transport_proto];
567 
568  for (i = 0; i < vec_len (indices); i++)
569  {
570  s = session_get_if_valid (indices[i], thread_index);
571  if (PREDICT_FALSE (!s))
572  {
573  errors++;
574  continue;
575  }
576 
578  errors++;
579  }
580 
581  vec_reset_length (indices);
582  wrk->session_to_enqueue[transport_proto] = indices;
583 
584  return errors;
585 }
586 
587 int
589 {
591  int i, errors = 0;
592  for (i = 0; i < 1 + vtm->n_threads; i++)
593  errors += session_main_flush_enqueue_events (transport_proto, i);
594  return errors;
595 }
596 
597 int
599 {
600  u32 opaque = 0, new_ti, new_si;
601  app_worker_t *app_wrk;
602  session_t *s = 0;
603  u64 ho_handle;
604 
605  /*
606  * Find connection handle and cleanup half-open table
607  */
608  ho_handle = session_lookup_half_open_handle (tc);
609  if (ho_handle == HALF_OPEN_LOOKUP_INVALID_VALUE)
610  {
611  SESSION_DBG ("half-open was removed!");
612  return -1;
613  }
615 
616  /* Get the app's index from the handle we stored when opening connection
617  * and the opaque (api_context for external apps) from transport session
618  * index */
619  app_wrk = app_worker_get_if_valid (ho_handle >> 32);
620  if (!app_wrk)
621  return -1;
622 
623  opaque = tc->s_index;
624 
625  if (is_fail)
626  return app_worker_connect_notify (app_wrk, s, opaque);
627 
630  s->app_wrk_index = app_wrk->wrk_index;
631  new_si = s->session_index;
632  new_ti = s->thread_index;
633 
634  if (app_worker_init_connected (app_wrk, s))
635  {
636  session_free (s);
637  app_worker_connect_notify (app_wrk, 0, opaque);
638  return -1;
639  }
640 
641  if (app_worker_connect_notify (app_wrk, s, opaque))
642  {
643  s = session_get (new_si, new_ti);
645  return -1;
646  }
647 
648  s = session_get (new_si, new_ti);
651 
652  return 0;
653 }
654 
655 typedef struct _session_switch_pool_args
656 {
657  u32 session_index;
658  u32 thread_index;
659  u32 new_thread_index;
660  u32 new_session_index;
662 
663 static void
664 session_switch_pool (void *cb_args)
665 {
667  session_t *s;
668  ASSERT (args->thread_index == vlib_get_thread_index ());
669  s = session_get (args->session_index, args->thread_index);
670  s->tx_fifo->master_session_index = args->new_session_index;
671  s->tx_fifo->master_thread_index = args->new_thread_index;
673  s->thread_index);
674  session_free (s);
675  clib_mem_free (cb_args);
676 }
677 
678 /**
679  * Move dgram session to the right thread
680  */
681 int
683  u32 old_thread_index, session_t ** new_session)
684 {
685  session_t *new_s;
686  session_switch_pool_args_t *rpc_args;
687 
688  /*
689  * Clone half-open session to the right thread.
690  */
691  new_s = session_clone_safe (tc->s_index, old_thread_index);
692  new_s->connection_index = tc->c_index;
693  new_s->rx_fifo->master_session_index = new_s->session_index;
694  new_s->rx_fifo->master_thread_index = new_s->thread_index;
697 
698  /*
699  * Ask thread owning the old session to clean it up and make us the tx
700  * fifo owner
701  */
702  rpc_args = clib_mem_alloc (sizeof (*rpc_args));
703  rpc_args->new_session_index = new_s->session_index;
704  rpc_args->new_thread_index = new_s->thread_index;
705  rpc_args->session_index = tc->s_index;
706  rpc_args->thread_index = old_thread_index;
707  session_send_rpc_evt_to_thread (rpc_args->thread_index, session_switch_pool,
708  rpc_args);
709 
710  tc->s_index = new_s->session_index;
711  new_s->connection_index = tc->c_index;
712  *new_session = new_s;
713  return 0;
714 }
715 
716 /**
717  * Notification from transport that connection is being closed.
718  *
719  * A disconnect is sent to application but state is not removed. Once
720  * disconnect is acknowledged by application, session disconnect is called.
721  * Ultimately this leads to close being called on transport (passive close).
722  */
723 void
725 {
726  app_worker_t *app_wrk;
727  session_t *s;
728 
729  s = session_get (tc->s_index, tc->thread_index);
731  return;
733  app_wrk = app_worker_get (s->app_wrk_index);
734  app_worker_close_notify (app_wrk, s);
735 }
736 
737 /**
738  * Notification from transport that connection is being deleted
739  *
740  * This removes the session if it is still valid. It should be called only on
741  * previously fully established sessions. For instance failed connects should
742  * call stream_session_connect_notify and indicate that the connect has
743  * failed.
744  */
745 void
747 {
748  session_t *s;
749 
750  /* App might've been removed already */
751  if (!(s = session_get_if_valid (tc->s_index, tc->thread_index)))
752  return;
753 
754  /* Make sure we don't try to send anything more */
756 
757  switch (s->session_state)
758  {
760  /* Session was created but accept notification was not yet sent to the
761  * app. Cleanup everything. */
764  break;
767  /* If transport finishes or times out before we get a reply
768  * from the app, mark transport as closed and wait for reply
769  * before removing the session. Cleanup session table in advance
770  * because transport will soon be closed and closed sessions
771  * are assumed to have been removed from the lookup table */
774  break;
777  /* Cleanup lookup table as transport needs to still be valid.
778  * Program transport close to ensure that all session events
779  * have been cleaned up. Once transport close is called, the
780  * session is just removed because both transport and app have
781  * confirmed the close*/
785  break;
787  break;
789  session_delete (s);
790  break;
791  default:
792  clib_warning ("session state %u", s->session_state);
793  session_delete (s);
794  break;
795  }
796 }
797 
798 /**
799  * Notification from transport that session can be closed
800  *
801  * Should be called by transport only if it was closed with non-empty
802  * tx fifo and once it decides to begin the closing procedure prior to
803  * issuing a delete notify. This gives the chance to the session layer
804  * to cleanup any outstanding events.
805  */
806 void
808 {
809  session_t *s;
810 
811  if (!(s = session_get_if_valid (tc->s_index, tc->thread_index)))
812  return;
813 
814  /* If app close has not been received or has not yet resulted in
815  * a transport close, only mark the session transport as closed */
817  {
820  }
821  else
823 }
824 
825 /**
826  * Notify application that connection has been reset.
827  */
828 void
830 {
831  app_worker_t *app_wrk;
832  session_t *s;
833 
834  s = session_get (tc->s_index, tc->thread_index);
837  return;
839  app_wrk = app_worker_get (s->app_wrk_index);
840  app_worker_reset_notify (app_wrk, s);
841 }
842 
843 int
845 {
846  app_worker_t *app_wrk;
847  session_t *s;
848 
849  s = session_get (tc->s_index, tc->thread_index);
850  app_wrk = app_worker_get_if_valid (s->app_wrk_index);
851  if (!app_wrk)
852  return -1;
854  return app_worker_accept_notify (app_wrk, s);
855 }
856 
857 /**
858  * Accept a stream session. Optionally ping the server by callback.
859  */
860 int
862  u8 notify)
863 {
864  session_t *s;
865  int rv;
866 
868  s->listener_index = listener_index;
870 
871  if ((rv = app_worker_init_accepted (s)))
872  return rv;
873 
875 
876  /* Shoulder-tap the server */
877  if (notify)
878  {
879  app_worker_t *app_wrk = app_worker_get (s->app_wrk_index);
880  return app_worker_accept_notify (app_wrk, s);
881  }
882 
883  return 0;
884 }
885 
886 int
887 session_open_cl (u32 app_wrk_index, session_endpoint_t * rmt, u32 opaque)
888 {
891  app_worker_t *app_wrk;
892  session_handle_t sh;
893  session_t *s;
894  int rv;
895 
897  rv = transport_connect (rmt->transport_proto, tep);
898  if (rv < 0)
899  {
900  SESSION_DBG ("Transport failed to open connection.");
901  return VNET_API_ERROR_SESSION_CONNECT;
902  }
903 
904  tc = transport_get_half_open (rmt->transport_proto, (u32) rv);
905 
906  /* For dgram type of service, allocate session and fifos now */
907  app_wrk = app_worker_get (app_wrk_index);
909  s->app_wrk_index = app_wrk->wrk_index;
911  if (app_worker_init_connected (app_wrk, s))
912  {
913  session_free (s);
914  return -1;
915  }
916 
917  sh = session_handle (s);
919 
920  return app_worker_connect_notify (app_wrk, s, opaque);
921 }
922 
923 int
924 session_open_vc (u32 app_wrk_index, session_endpoint_t * rmt, u32 opaque)
925 {
928  u64 handle;
929  int rv;
930 
932  rv = transport_connect (rmt->transport_proto, tep);
933  if (rv < 0)
934  {
935  SESSION_DBG ("Transport failed to open connection.");
936  return VNET_API_ERROR_SESSION_CONNECT;
937  }
938 
939  tc = transport_get_half_open (rmt->transport_proto, (u32) rv);
940 
941  /* If transport offers a stream service, only allocate session once the
942  * connection has been established.
943  * Add connection to half-open table and save app and tc index. The
944  * latter is needed to help establish the connection while the former
945  * is needed when the connect notify comes and we have to notify the
946  * external app
947  */
948  handle = (((u64) app_wrk_index) << 32) | (u64) tc->c_index;
949  session_lookup_add_half_open (tc, handle);
950 
951  /* Store api_context (opaque) for when the reply comes. Not the nicest
952  * thing but better than allocating a separate half-open pool.
953  */
954  tc->s_index = opaque;
955  return 0;
956 }
957 
958 int
959 session_open_app (u32 app_wrk_index, session_endpoint_t * rmt, u32 opaque)
960 {
963 
964  sep->app_wrk_index = app_wrk_index;
965  sep->opaque = opaque;
966 
967  return transport_connect (rmt->transport_proto, tep_cfg);
968 }
969 
971 
972 /* *INDENT-OFF* */
977 };
978 /* *INDENT-ON* */
979 
980 /**
981  * Ask transport to open connection to remote transport endpoint.
982  *
983  * Stores handle for matching request with reply since the call can be
984  * asynchronous. For instance, for TCP the 3-way handshake must complete
985  * before reply comes. Session is only created once connection is established.
986  *
987  * @param app_index Index of the application requesting the connect
988  * @param st Session type requested.
989  * @param tep Remote transport endpoint
990  * @param opaque Opaque data (typically, api_context) the application expects
991  * on open completion.
992  */
993 int
994 session_open (u32 app_wrk_index, session_endpoint_t * rmt, u32 opaque)
995 {
997  tst = transport_protocol_service_type (rmt->transport_proto);
998  return session_open_srv_fns[tst] (app_wrk_index, rmt, opaque);
999 }
1000 
1001 /**
1002  * Ask transport to listen on session endpoint.
1003  *
1004  * @param s Session for which listen will be called. Note that unlike
1005  * established sessions, listen sessions are not associated to a
1006  * thread.
1007  * @param sep Local endpoint to be listened on.
1008  */
1009 int
1011 {
1012  transport_endpoint_t *tep;
1013  u32 tc_index, s_index;
1014 
1015  /* Transport bind/listen */
1016  tep = session_endpoint_to_transport (sep);
1017  s_index = ls->session_index;
1019  s_index, tep);
1020 
1021  if (tc_index == (u32) ~ 0)
1022  return -1;
1023 
1024  /* Attach transport to session. Lookup tables are populated by the app
1025  * worker because local tables (for ct sessions) are not backed by a fib */
1026  ls = listen_session_get (s_index);
1027  ls->connection_index = tc_index;
1028 
1029  return 0;
1030 }
1031 
1032 /**
1033  * Ask transport to stop listening on local transport endpoint.
1034  *
1035  * @param s Session to stop listening on. It must be in state LISTENING.
1036  */
1037 int
1039 {
1042 
1044  return -1;
1045 
1047  if (!tc)
1048  return VNET_API_ERROR_ADDRESS_NOT_IN_USE;
1049 
1052  return 0;
1053 }
1054 
1055 /**
1056  * Initialize session closing procedure.
1057  *
1058  * Request is always sent to session node to ensure that all outstanding
1059  * requests are served before transport is notified.
1060  */
1061 void
1063 {
1064  if (!s)
1065  return;
1066 
1068  {
1069  /* Session will only be removed once both app and transport
1070  * acknowledge the close */
1073 
1074  /* Session already closed. Clear the tx fifo */
1077  return;
1078  }
1079 
1082 }
1083 
1084 /**
1085  * Notify transport the session can be disconnected. This should eventually
1086  * result in a delete notification that allows us to cleanup session state.
1087  * Called for both active/passive disconnects.
1088  *
1089  * Must be called from the session's thread.
1090  */
1091 void
1093 {
1094  /* If transport is already closed, just free the session */
1096  {
1098  return;
1099  }
1100 
1101  /* If tx queue wasn't drained, change state to closed waiting for transport.
1102  * This way, the transport, if it so wishes, can continue to try sending the
1103  * outstanding data (in closed state it cannot). It MUST however at one
1104  * point, either after sending everything or after a timeout, call delete
1105  * notify. This will finally lead to the complete cleanup of the session.
1106  */
1107  if (svm_fifo_max_dequeue (s->tx_fifo))
1109  else
1111 
1113  s->thread_index);
1114 }
1115 
1116 /**
1117  * Cleanup transport and session state.
1118  *
1119  * Notify transport of the cleanup and free the session. This should
1120  * be called only if transport reported some error and is already
1121  * closed.
1122  */
1123 void
1125 {
1127 
1128  /* Delete from main lookup table before we axe the the transport */
1131  s->thread_index);
1132  /* Since we called cleanup, no delete notification will come. So, make
1133  * sure the session is properly freed. */
1135 }
1136 
1137 /**
1138  * Allocate event queues in the shared-memory segment
1139  *
1140  * That can either be a newly created memfd segment, that will need to be
1141  * mapped by all stack users, or the binary api's svm region. The latter is
1142  * assumed to be already mapped. NOTE that this assumption DOES NOT hold if
1143  * api clients bootstrap shm api over sockets (i.e. use memfd segments) and
1144  * vpp uses api svm region for event queues.
1145  */
1146 void
1148 {
1149  u32 evt_q_length = 2048, evt_size = sizeof (session_event_t);
1150  ssvm_private_t *eqs = &smm->evt_qs_segment;
1151  api_main_t *am = &api_main;
1152  uword eqs_size = 64 << 20;
1153  pid_t vpp_pid = getpid ();
1154  void *oldheap;
1155  int i;
1156 
1158  evt_q_length = smm->configured_event_queue_length;
1159 
1160  if (smm->evt_qs_use_memfd_seg)
1161  {
1162  if (smm->evt_qs_segment_size)
1163  eqs_size = smm->evt_qs_segment_size;
1164 
1165  eqs->ssvm_size = eqs_size;
1166  eqs->i_am_master = 1;
1167  eqs->my_pid = vpp_pid;
1168  eqs->name = format (0, "%s%c", "evt-qs-segment", 0);
1169  eqs->requested_va = smm->session_baseva;
1170 
1172  {
1173  clib_warning ("failed to initialize queue segment");
1174  return;
1175  }
1176  }
1177 
1178  if (smm->evt_qs_use_memfd_seg)
1179  oldheap = ssvm_push_heap (eqs->sh);
1180  else
1181  oldheap = svm_push_data_heap (am->vlib_rp);
1182 
1183  for (i = 0; i < vec_len (smm->wrk); i++)
1184  {
1185  svm_msg_q_cfg_t _cfg, *cfg = &_cfg;
1187  {evt_q_length, evt_size, 0}
1188  ,
1189  {evt_q_length >> 1, 256, 0}
1190  };
1191  cfg->consumer_pid = 0;
1192  cfg->n_rings = 2;
1193  cfg->q_nitems = evt_q_length;
1194  cfg->ring_cfgs = rc;
1195  smm->wrk[i].vpp_event_queue = svm_msg_q_alloc (cfg);
1196  if (smm->evt_qs_use_memfd_seg)
1197  {
1199  clib_warning ("eventfd returned");
1200  }
1201  }
1202 
1203  if (smm->evt_qs_use_memfd_seg)
1204  ssvm_pop_heap (oldheap);
1205  else
1206  svm_pop_heap (oldheap);
1207 }
1208 
1211 {
1212  session_main_t *smm = &session_main;
1213  if (smm->evt_qs_use_memfd_seg)
1214  return &smm->evt_qs_segment;
1215  return 0;
1216 }
1217 
1218 u64
1220 {
1221  svm_fifo_t *f;
1222 
1224  return SESSION_INVALID_HANDLE;
1225 
1226  f = s->rx_fifo;
1227  return segment_manager_make_segment_handle (f->segment_manager,
1228  f->segment_index);
1229 }
1230 
1231 /* *INDENT-OFF* */
1236  session_tx_fifo_dequeue_and_snd
1237 };
1238 /* *INDENT-ON* */
1239 
1240 /**
1241  * Initialize session layer for given transport proto and ip version
1242  *
1243  * Allocates per session type (transport proto + ip version) data structures
1244  * and adds arc from session queue node to session type output node.
1245  */
1246 void
1248  const transport_proto_vft_t * vft, u8 is_ip4,
1249  u32 output_node)
1250 {
1251  session_main_t *smm = &session_main;
1252  session_type_t session_type;
1253  u32 next_index = ~0;
1254 
1255  session_type = session_type_from_proto_and_ip (transport_proto, is_ip4);
1256 
1257  vec_validate (smm->session_type_to_next, session_type);
1258  vec_validate (smm->session_tx_fns, session_type);
1259 
1260  /* *INDENT-OFF* */
1261  if (output_node != ~0)
1262  {
1263  foreach_vlib_main (({
1264  next_index = vlib_node_add_next (this_vlib_main,
1265  session_queue_node.index,
1266  output_node);
1267  }));
1268  }
1269  /* *INDENT-ON* */
1270 
1271  smm->session_type_to_next[session_type] = next_index;
1272  smm->session_tx_fns[session_type] = session_tx_fns[vft->tx_type];
1273 }
1274 
1277 {
1281  else
1283  s->connection_index);
1284 }
1285 
1288 {
1290  s->connection_index);
1291 }
1292 
1293 void
1295 {
1296  ASSERT (vlib_get_thread_index () == 0);
1299 }
1300 
1301 static clib_error_t *
1303 {
1304  segment_manager_main_init_args_t _sm_args = { 0 }, *sm_args = &_sm_args;
1305  session_main_t *smm = &session_main;
1307  u32 num_threads, preallocated_sessions_per_worker;
1308  session_worker_t *wrk;
1309  int i;
1310 
1311  num_threads = 1 /* main thread */ + vtm->n_threads;
1312 
1313  if (num_threads < 1)
1314  return clib_error_return (0, "n_thread_stacks not set");
1315 
1316  /* Allocate cache line aligned worker contexts */
1317  vec_validate_aligned (smm->wrk, num_threads - 1, CLIB_CACHE_LINE_BYTES);
1318 
1319  for (i = 0; i < num_threads; i++)
1320  {
1321  wrk = &smm->wrk[i];
1322  vec_validate (wrk->free_event_vector, 128);
1323  _vec_len (wrk->free_event_vector) = 0;
1324  vec_validate (wrk->pending_event_vector, 128);
1325  _vec_len (wrk->pending_event_vector) = 0;
1326  vec_validate (wrk->pending_disconnects, 128);
1327  _vec_len (wrk->pending_disconnects) = 0;
1329  _vec_len (wrk->postponed_event_vector) = 0;
1330 
1332  wrk->dispatch_period = 500e-6;
1333 
1334  if (num_threads > 1)
1336  }
1337 
1338 #if SESSION_DEBUG
1339  vec_validate (smm->last_event_poll_by_thread, num_threads - 1);
1340 #endif
1341 
1342  /* Allocate vpp event queues segment and queue */
1344 
1345  /* Initialize fifo segment main baseva and timeout */
1346  sm_args->baseva = smm->session_baseva + smm->evt_qs_segment_size;
1347  sm_args->size = smm->session_va_space_size;
1348  segment_manager_main_init (sm_args);
1349 
1350  /* Preallocate sessions */
1351  if (smm->preallocated_sessions)
1352  {
1353  if (num_threads == 1)
1354  {
1356  }
1357  else
1358  {
1359  int j;
1360  preallocated_sessions_per_worker =
1361  (1.1 * (f64) smm->preallocated_sessions /
1362  (f64) (num_threads - 1));
1363 
1364  for (j = 1; j < num_threads; j++)
1365  {
1366  pool_init_fixed (smm->wrk[j].sessions,
1367  preallocated_sessions_per_worker);
1368  }
1369  }
1370  }
1371 
1374  transport_init ();
1375 
1376  smm->is_enabled = 1;
1377 
1378  /* Enable transports */
1379  transport_enable_disable (vm, 1);
1381  return 0;
1382 }
1383 
1384 void
1386 {
1387  u8 state = is_en ? VLIB_NODE_STATE_POLLING : VLIB_NODE_STATE_DISABLED;
1389  u8 have_workers = vtm->n_threads != 0;
1390 
1391  /* *INDENT-OFF* */
1392  foreach_vlib_main (({
1393  if (have_workers && ii == 0)
1394  {
1395  vlib_node_set_state (this_vlib_main, session_queue_process_node.index,
1396  state);
1397  if (is_en)
1398  {
1399  vlib_node_t *n = vlib_get_node (this_vlib_main,
1401  vlib_start_process (this_vlib_main, n->runtime_index);
1402  }
1403  else
1404  {
1405  vlib_process_signal_event_mt (this_vlib_main,
1408  }
1409 
1410  continue;
1411  }
1412  vlib_node_set_state (this_vlib_main, session_queue_node.index,
1413  state);
1414  }));
1415  /* *INDENT-ON* */
1416 }
1417 
1418 clib_error_t *
1420 {
1421  clib_error_t *error = 0;
1422  if (is_en)
1423  {
1424  if (session_main.is_enabled)
1425  return 0;
1426 
1428  error = session_manager_main_enable (vm);
1429  }
1430  else
1431  {
1432  session_main.is_enabled = 0;
1434  }
1435 
1436  return error;
1437 }
1438 
1439 clib_error_t *
1441 {
1442  session_main_t *smm = &session_main;
1444 #if (HIGH_SEGMENT_BASEVA > (4ULL << 30))
1445  smm->session_va_space_size = 128ULL << 30;
1446  smm->evt_qs_segment_size = 64 << 20;
1447 #else
1448  smm->session_va_space_size = 128 << 20;
1449  smm->evt_qs_segment_size = 1 << 20;
1450 #endif
1451  smm->is_enabled = 0;
1452  return 0;
1453 }
1454 
1456 
1457 static clib_error_t *
1459 {
1460  session_main_t *smm = &session_main;
1461  u32 nitems;
1462  uword tmp;
1463 
1464  while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
1465  {
1466  if (unformat (input, "event-queue-length %d", &nitems))
1467  {
1468  if (nitems >= 2048)
1469  smm->configured_event_queue_length = nitems;
1470  else
1471  clib_warning ("event queue length %d too small, ignored", nitems);
1472  }
1473  else if (unformat (input, "preallocated-sessions %d",
1474  &smm->preallocated_sessions))
1475  ;
1476  else if (unformat (input, "v4-session-table-buckets %d",
1478  ;
1479  else if (unformat (input, "v4-halfopen-table-buckets %d",
1481  ;
1482  else if (unformat (input, "v6-session-table-buckets %d",
1484  ;
1485  else if (unformat (input, "v6-halfopen-table-buckets %d",
1487  ;
1488  else if (unformat (input, "v4-session-table-memory %U",
1489  unformat_memory_size, &tmp))
1490  {
1491  if (tmp >= 0x100000000)
1492  return clib_error_return (0, "memory size %llx (%lld) too large",
1493  tmp, tmp);
1495  }
1496  else if (unformat (input, "v4-halfopen-table-memory %U",
1497  unformat_memory_size, &tmp))
1498  {
1499  if (tmp >= 0x100000000)
1500  return clib_error_return (0, "memory size %llx (%lld) too large",
1501  tmp, tmp);
1503  }
1504  else if (unformat (input, "v6-session-table-memory %U",
1505  unformat_memory_size, &tmp))
1506  {
1507  if (tmp >= 0x100000000)
1508  return clib_error_return (0, "memory size %llx (%lld) too large",
1509  tmp, tmp);
1511  }
1512  else if (unformat (input, "v6-halfopen-table-memory %U",
1513  unformat_memory_size, &tmp))
1514  {
1515  if (tmp >= 0x100000000)
1516  return clib_error_return (0, "memory size %llx (%lld) too large",
1517  tmp, tmp);
1519  }
1520  else if (unformat (input, "local-endpoints-table-memory %U",
1521  unformat_memory_size, &tmp))
1522  {
1523  if (tmp >= 0x100000000)
1524  return clib_error_return (0, "memory size %llx (%lld) too large",
1525  tmp, tmp);
1526  smm->local_endpoints_table_memory = tmp;
1527  }
1528  else if (unformat (input, "local-endpoints-table-buckets %d",
1530  ;
1531  else if (unformat (input, "evt_qs_memfd_seg"))
1532  smm->evt_qs_use_memfd_seg = 1;
1533  else if (unformat (input, "evt_qs_seg_size %U", unformat_memory_size,
1534  &smm->evt_qs_segment_size))
1535  ;
1536  else
1537  return clib_error_return (0, "unknown input `%U'",
1538  format_unformat_error, input);
1539  }
1540  return 0;
1541 }
1542 
1544 
1545 /*
1546  * fd.io coding-style-patch-verification: ON
1547  *
1548  * Local Variables:
1549  * eval: (c-set-style "gnu")
1550  * End:
1551  */
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:439
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
#define session_endpoint_to_transport_cfg(_sep)
Definition: session_types.h:87
u32 preallocated_sessions
Preallocate session config parameter.
Definition: session.h:169
int app_worker_lock_and_send_event(app_worker_t *app, session_t *s, u8 evt_type)
Send event to application.
void transport_close(transport_proto_t tp, u32 conn_index, u8 thread_index)
Definition: transport.c:298
u64 ssvm_size
Definition: ssvm.h:84
static session_open_service_fn session_open_srv_fns[TRANSPORT_N_SERVICES]
Definition: session.c:973
u32 connection_index
Index of the transport connection associated to the session.
uword evt_qs_segment_size
Definition: session.h:151
void session_flush_frames_main_thread(vlib_main_t *vm)
Definition: session.c:1294
int app_worker_init_accepted(session_t *s)
static u8 svm_msg_q_msg_is_invalid(svm_msg_q_msg_t *msg)
Check if message is invalid.
int session_lookup_del_connection(transport_connection_t *tc)
Delete transport connection from session table.
int app_worker_reset_notify(app_worker_t *app_wrk, session_t *s)
void * svm_msg_q_msg_data(svm_msg_q_t *mq, svm_msg_q_msg_t *msg)
Get data for message in queue.
static u8 svm_msg_q_ring_is_full(svm_msg_q_t *mq, u32 ring_index)
int session_open(u32 app_wrk_index, session_endpoint_t *rmt, u32 opaque)
Ask transport to open connection to remote transport endpoint.
Definition: session.c:994
static void svm_pop_heap(void *oldheap)
Definition: svm.h:94
session_type_t session_type
Type built from transport and network protocol types.
uword requested_va
Definition: ssvm.h:87
#define SESSION_Q_PROCESS_FLUSH_FRAMES
Definition: session.h:186
svm_msg_q_t * vpp_event_queue
vpp event message queue for worker
Definition: session.h:72
static void clib_rwlock_writer_lock(clib_rwlock_t *p)
Definition: lock.h:177
svm_fifo_t * tx_fifo
session_main_t session_main
Definition: session.c:26
int session_tx_fifo_peek_bytes(transport_connection_t *tc, u8 *buffer, u32 offset, u32 max_bytes)
Definition: session.c:445
u32 session_index
Index in thread pool where session was allocated.
int session_stop_listen(session_t *s)
Ask transport to stop listening on local transport endpoint.
Definition: session.c:1038
unsigned long u64
Definition: types.h:89
transport_connection_t * listen_session_get_transport(session_t *s)
Definition: session.c:1287
static svm_msg_q_t * session_main_get_vpp_event_queue(u32 thread_index)
Definition: session.h:505
u32 configured_v4_halfopen_table_buckets
Definition: session.h:157
void session_transport_delete_notify(transport_connection_t *tc)
Notification from transport that connection is being deleted.
Definition: session.c:746
transport_connection_t * session_get_transport(session_t *s)
Definition: session.c:1276
static f64 vlib_time_now(vlib_main_t *vm)
Definition: main.h:255
svm_fifo_t * rx_fifo
Pointers to rx/tx buffers.
session_worker_t * wrk
Worker contexts.
Definition: session.h:122
static session_t * session_get_if_valid(u64 si, u32 thread_index)
Definition: session.h:214
#define pool_get_aligned_will_expand(P, YESNO, A)
See if pool_get will expand the pool or not.
Definition: pool.h:242
int session_enqueue_dgram_connection(session_t *s, session_dgram_hdr_t *hdr, vlib_buffer_t *b, u8 proto, u8 queue_event)
Definition: session.c:408
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
static int session_enqueue_notify_inline(session_t *s)
Notify session peer that new data has been enqueued.
Definition: session.c:492
int session_main_flush_enqueue_events(u8 transport_proto, u32 thread_index)
Flushes queue of sessions that are to be notified of new data enqueued events.
Definition: session.c:559
session_evt_type_t
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:522
static transport_proto_t session_get_transport_proto(session_t *s)
void session_send_rpc_evt_to_thread(u32 thread_index, void *fp, void *rpc_args)
Definition: session.c:117
void session_transport_reset_notify(transport_connection_t *tc)
Notify application that connection has been reset.
Definition: session.c:829
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:560
int i
clib_memset(h->entries, 0, sizeof(h->entries[0])*entries)
int session_open_vc(u32 app_wrk_index, session_endpoint_t *rmt, u32 opaque)
Definition: session.c:924
void svm_fifo_dequeue_drop_all(svm_fifo_t *f)
Definition: svm_fifo.c:774
static u32 svm_fifo_max_enqueue(svm_fifo_t *f)
Definition: svm_fifo.h:147
ssvm_shared_header_t * sh
Definition: ssvm.h:83
u32 transport_start_listen(transport_proto_t tp, u32 session_index, transport_endpoint_t *tep)
Definition: transport.c:304
static session_t * session_get(u32 si, u32 thread_index)
Definition: session.h:207
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
u8 data[128]
Definition: ipsec.api:248
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
Definition: vec.h:450
u32 flags
Session flags.
int session_enqueue_stream_connection(transport_connection_t *tc, vlib_buffer_t *b, u32 offset, u8 queue_event, u8 is_in_order)
Definition: session.c:355
u64 session_lookup_half_open_handle(transport_connection_t *tc)
static session_t * session_clone_safe(u32 session_index, u32 thread_index)
Definition: session.h:304
u32 local_endpoints_table_memory
Transport table (preallocation) size parameters.
Definition: session.h:165
void session_node_enable_disable(u8 is_en)
Definition: session.c:1385
vlib_node_registration_t session_queue_node
(constructor) VLIB_REGISTER_NODE (session_queue_node)
vlib_main_t ** vlib_mains
Definition: buffer.c:321
static uword vlib_node_add_next(vlib_main_t *vm, uword node, uword next_node)
Definition: node_funcs.h:1122
unsigned char u8
Definition: types.h:56
app_worker_t * application_get_worker(application_t *app, u32 wrk_map_index)
Definition: application.c:674
session_fifo_rx_fn session_tx_fifo_peek_and_snd
session_t * sessions
Worker session pool.
Definition: session.h:69
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
double f64
Definition: types.h:142
static session_fifo_rx_fn * session_tx_fns[TRANSPORT_TX_N_FNS]
Definition: session.c:1232
static session_handle_t session_handle(session_t *s)
struct _svm_fifo svm_fifo_t
u8 session_type_t
void session_transport_closing_notify(transport_connection_t *tc)
Notification from transport that connection is being closed.
Definition: session.c:724
int session_open_cl(u32 app_wrk_index, session_endpoint_t *rmt, u32 opaque)
Definition: session.c:887
static session_worker_t * session_main_get_worker(u32 thread_index)
Definition: session.h:499
void session_free_w_fifos(session_t *s)
Definition: session.c:189
#define session_endpoint_to_transport(_sep)
Definition: session_types.h:86
void app_namespaces_init(void)
static clib_error_t * session_config_fn(vlib_main_t *vm, unformat_input_t *input)
Definition: session.c:1458
svm_msg_q_t * svm_msg_q_alloc(svm_msg_q_cfg_t *cfg)
Allocate message queue.
Definition: message_queue.c:40
int session_send_ctrl_evt_to_thread(session_t *s, session_evt_type_t evt_type)
Definition: session.c:100
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:163
static void * svm_push_data_heap(svm_region_t *rp)
Definition: svm.h:86
void segment_manager_dealloc_fifos(svm_fifo_t *rx_fifo, svm_fifo_t *tx_fifo)
#define always_inline
Definition: clib.h:98
static u32 svm_fifo_max_dequeue(svm_fifo_t *f)
Definition: svm_fifo.h:129
u32 * session_type_to_next
Per session type output nodes.
Definition: session.h:136
static int session_enqueue_chain_tail(session_t *s, vlib_buffer_t *b, u32 offset, u8 is_in_order)
Enqueue buffer chain tail.
Definition: session.c:268
static void * ssvm_push_heap(ssvm_shared_header_t *sh)
Definition: ssvm.h:144
#define clib_error_return(e, args...)
Definition: error.h:99
svm_region_t * vlib_rp
Current binary api segment descriptor.
Definition: api_common.h:255
uword session_baseva
Session ssvm segment configs.
Definition: session.h:149
int svm_fifo_enqueue_nowait(svm_fifo_t *f, u32 max_bytes, const u8 *copy_from_here)
Definition: svm_fifo.c:530
vhost_vring_state_t state
Definition: vhost_user.h:120
unsigned int u32
Definition: types.h:88
int session_send_io_evt_to_thread(svm_fifo_t *f, session_evt_type_t evt_type)
Definition: session.c:86
session_event_t * free_event_vector
Vector of partially read events.
Definition: session.h:90
int ssvm_master_init(ssvm_private_t *ssvm, ssvm_segment_type_t type)
Definition: ssvm.c:411
static void ssvm_pop_heap(void *oldheap)
Definition: ssvm.h:152
#define HALF_OPEN_LOOKUP_INVALID_VALUE
int session_lookup_del_half_open(transport_connection_t *tc)
static u32 vlib_get_buffer_index(vlib_main_t *vm, void *p)
Translate buffer pointer into buffer index.
Definition: buffer_funcs.h:257
static clib_error_t * session_manager_main_enable(vlib_main_t *vm)
Definition: session.c:1302
struct _transport_proto_vft transport_proto_vft_t
int session_dequeue_notify(session_t *s)
Definition: session.c:529
struct _session_endpoint_cfg session_endpoint_cfg_t
u32 configured_v6_halfopen_table_memory
Definition: session.h:162
u32 configured_v6_session_table_buckets
Definition: session.h:159
static session_type_t session_type_from_proto_and_ip(transport_proto_t proto, u8 is_ip4)
static void clib_rwlock_init(clib_rwlock_t *p)
Definition: lock.h:122
int svm_msg_q_alloc_consumer_eventfd(svm_msg_q_t *mq)
Allocate event fd for queue consumer.
transport_service_type_t transport_protocol_service_type(transport_proto_t tp)
Definition: transport.c:274
uword session_va_space_size
Definition: session.h:150
void session_send_rpc_evt_to_thread_force(u32 thread_index, void *fp, void *rpc_args)
Definition: session.c:109
u32 configured_v4_session_table_buckets
Session table size parameters.
Definition: session.h:155
int app_worker_accept_notify(app_worker_t *app_wrk, session_t *s)
static void svm_fifo_clear_tx_ntf(svm_fifo_t *f)
Definition: svm_fifo.h:272
session_event_t * pending_disconnects
Vector of postponed disconnects.
Definition: session.h:96
struct _unformat_input_t unformat_input_t
u32 configured_v6_halfopen_table_buckets
Definition: session.h:161
u32 configured_event_queue_length
vpp fifo event queue configured length
Definition: session.h:146
u8 is_enabled
Session manager is enabled.
Definition: session.h:143
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:286
enum transport_service_type_ transport_service_type_t
#define PREDICT_FALSE(x)
Definition: clib.h:111
#define VLIB_CONFIG_FUNCTION(x, n,...)
Definition: init.h:172
int session_lookup_del_session(session_t *s)
u32 wrk_index
Worker index in global worker pool.
Definition: application.h:37
clib_error_t * session_manager_main_init(vlib_main_t *vm)
Definition: session.c:1440
app_worker_t * app_worker_get_if_valid(u32 wrk_index)
session_fifo_rx_fn ** session_tx_fns
Per transport rx function that can either dequeue or peek.
Definition: session.h:131
u64 session_segment_handle(session_t *s)
Definition: session.c:1219
#define foreach_vlib_main(body)
Definition: threads.h:235
void transport_cleanup(transport_proto_t tp, u32 conn_index, u8 thread_index)
Definition: transport.c:286
clib_error_t * vnet_session_enable_disable(vlib_main_t *vm, u8 is_en)
Definition: session.c:1419
u32 transport_stop_listen(transport_proto_t tp, u32 conn_index)
Definition: transport.c:311
struct _session_switch_pool_args session_switch_pool_args_t
int( session_fifo_rx_fn)(vlib_main_t *vm, vlib_node_runtime_t *node, session_worker_t *wrk, session_event_t *e, int *n_tx_pkts)
Definition: session.h:108
u8 len
Definition: ip_types.api:49
API main structure, used by both vpp and binary API clients.
Definition: api_common.h:202
clib_rwlock_t peekers_rw_locks
Peekers rw lock.
Definition: session.h:102
#define pool_get_aligned(P, E, A)
Allocate an object E from a pool P with alignment A.
Definition: pool.h:230
#define SESSION_DBG(_fmt, _args...)
static void clib_rwlock_writer_unlock(clib_rwlock_t *p)
Definition: lock.h:185
u32 n_rings
number of msg rings
Definition: message_queue.h:54
#define SESSION_EVT_DBG(_evt, _args...)
u8 evt_qs_use_memfd_seg
Definition: session.h:152
void transport_init(void)
Definition: transport.c:672
f64 dispatch_period
Our approximation of a "complete" dispatch loop period.
Definition: session.h:75
session_fifo_rx_fn session_tx_fifo_dequeue_and_snd
ssvm_private_t evt_qs_segment
Event queues memfd segment initialized only if so configured.
Definition: session.h:125
#define UNFORMAT_END_OF_INPUT
Definition: format.h:144
u32 runtime_index
Definition: node.h:282
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:212
int transport_connect(transport_proto_t tp, transport_endpoint_cfg_t *tep)
Definition: transport.c:292
static vlib_process_t * vlib_get_current_process(vlib_main_t *vm)
Definition: node_funcs.h:421
vlib_main_t * vm
Definition: buffer.c:312
static void svm_msg_q_unlock(svm_msg_q_t *mq)
Unlock message queue.
static transport_connection_t * transport_get_connection(transport_proto_t tp, u32 conn_index, u8 thread_index)
Definition: transport.h:90
static void vlib_process_signal_event_mt(vlib_main_t *vm, uword node_index, uword type_opaque, uword data)
Signal event to process from any thread.
Definition: node_funcs.h:988
void session_free(session_t *s)
Definition: session.c:175
#define clib_warning(format, args...)
Definition: error.h:59
#define SESSION_INVALID_HANDLE
Definition: session_table.h:59
void transport_init_tx_pacers_period(void)
Initialize period for tx pacers.
Definition: transport.c:643
static int session_send_evt_to_thread(void *data, void *args, u32 thread_index, session_evt_type_t evt_type)
Definition: session.c:29
int session_dgram_connect_notify(transport_connection_t *tc, u32 old_thread_index, session_t **new_session)
Move dgram session to the right thread.
Definition: session.c:682
struct _transport_connection transport_connection_t
#define HIGH_SEGMENT_BASEVA
Definition: svm_common.h:84
int app_worker_init_connected(app_worker_t *app_wrk, session_t *s)
u32 my_pid
Definition: ssvm.h:85
session_event_t * pending_event_vector
Vector of active event vectors.
Definition: session.h:93
int session_stream_accept(transport_connection_t *tc, u32 listener_index, u8 notify)
Accept a stream session.
Definition: session.c:861
void transport_enable_disable(vlib_main_t *vm, u8 is_en)
Definition: transport.c:661
static u8 vlib_thread_is_main_w_barrier(void)
Definition: threads.h:511
static int svm_msg_q_try_lock(svm_msg_q_t *mq)
Try locking message queue.
#define pool_init_fixed(pool, max_elts)
initialize a fixed-size, preallocated pool
Definition: pool.h:86
application_t * application_get(u32 app_index)
Definition: application.c:447
static u64 segment_manager_make_segment_handle(u32 segment_manager_index, u32 segment_index)
#define SESSION_Q_PROCESS_STOP
Definition: session.h:187
int session_listen(session_t *ls, session_endpoint_cfg_t *sep)
Ask transport to listen on session endpoint.
Definition: session.c:1010
void session_vpp_event_queues_allocate(session_main_t *smm)
Allocate event queues in the shared-memory segment.
Definition: session.c:1147
#define ASSERT(truth)
static int session_notify_subscribers(u32 app_index, session_t *s, svm_fifo_t *f, session_evt_type_t evt_type)
Definition: session.c:460
void svm_msg_q_add_and_unlock(svm_msg_q_t *mq, svm_msg_q_msg_t *msg)
Producer enqueue one message to queue with mutex held.
int session_main_flush_all_enqueue_events(u8 transport_proto)
Definition: session.c:588
void segment_manager_main_init(segment_manager_main_init_args_t *a)
static void clib_mem_free(void *p)
Definition: mem.h:205
enum _transport_proto transport_proto_t
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:248
static void vlib_node_set_state(vlib_main_t *vm, u32 node_index, vlib_node_state_t new_state)
Set node dispatch state.
Definition: node_funcs.h:148
u32 configured_v4_halfopen_table_memory
Definition: session.h:158
static session_t * session_alloc_for_connection(transport_connection_t *tc)
Definition: session.c:213
int svm_fifo_enqueue_with_offset(svm_fifo_t *f, u32 offset, u32 required_bytes, u8 *copy_from_here)
Definition: svm_fifo.c:598
u32 configured_v4_session_table_memory
Definition: session.h:156
int session_stream_accept_notify(transport_connection_t *tc)
Definition: session.c:844
static void * clib_mem_alloc(uword size)
Definition: mem.h:132
svm_msg_q_ring_cfg_t * ring_cfgs
array of ring cfgs
Definition: message_queue.h:55
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:23
u8 * name
Definition: ssvm.h:86
u8 thread_index
Index of the thread that allocated the session.
session_t * session_alloc(u32 thread_index)
Definition: session.c:150
void session_transport_cleanup(session_t *s)
Cleanup transport and session state.
Definition: session.c:1124
static void session_enqueue_discard_chain_bytes(vlib_main_t *vm, vlib_buffer_t *b, vlib_buffer_t **chain_b, u32 n_bytes_to_drop)
Discards bytes from buffer chain.
Definition: session.c:237
template key/value backing page structure
Definition: bihash_doc.h:44
static void session_switch_pool(void *cb_args)
Definition: session.c:664
int svm_fifo_dequeue_drop(svm_fifo_t *f, u32 max_bytes)
Definition: svm_fifo.c:734
static transport_connection_t * transport_get_half_open(transport_proto_t tp, u32 conn_index)
Definition: transport.h:103
u32 local_endpoints_table_buckets
Definition: session.h:166
app_worker_t * app_worker_get(u32 wrk_index)
u32 q_nitems
msg queue size (not rings)
Definition: message_queue.h:53
u64 session_handle_t
static void session_program_transport_close(session_t *s)
Definition: session.c:129
void session_lookup_init(void)
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
session_fifo_rx_fn session_tx_fifo_dequeue_internal
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:140
volatile u8 session_state
State in session layer state machine.
int session_enqueue_notify(session_t *s)
Definition: session.c:523
void session_transport_closed_notify(transport_connection_t *tc)
Notification from transport that session can be closed.
Definition: session.c:807
u32 * session_to_enqueue[TRANSPORT_N_PROTO]
Per-proto vector of sessions to enqueue.
Definition: session.h:81
void session_close(session_t *s)
Initialize session closing procedure.
Definition: session.c:1062
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
ssvm_private_t * session_main_get_evt_q_segment(void)
Definition: session.c:1210
int session_send_io_evt_to_thread_custom(void *data, u32 thread_index, session_evt_type_t evt_type)
Definition: session.c:93
f64 last_vlib_time
vlib_time_now last time around the track
Definition: session.h:78
unformat_function_t unformat_memory_size
Definition: format.h:295
u32 app_index
Index of owning app.
Definition: application.h:43
static u8 svm_fifo_n_subscribers(svm_fifo_t *f)
Definition: svm_fifo.h:308
int session_lookup_add_connection(transport_connection_t *tc, u64 value)
Add transport connection to a session table.
u8 * format_unformat_error(u8 *s, va_list *va)
Definition: unformat.c:91
u32 configured_v6_session_table_memory
Definition: session.h:160
int session_stream_connect_notify(transport_connection_t *tc, u8 is_fail)
Definition: session.c:598
static vlib_thread_main_t * vlib_get_thread_main()
Definition: global_funcs.h:32
vlib_node_registration_t session_queue_process_node
(constructor) VLIB_REGISTER_NODE (session_queue_process_node)
static u32 vlib_num_workers()
Definition: threads.h:366
static vlib_node_t * vlib_get_node(vlib_main_t *vm, u32 i)
Get vlib node by index.
Definition: node_funcs.h:59
int(* session_open_service_fn)(u32, session_endpoint_t *, u32)
Definition: session.c:970
u32 app_wrk_index
Index of the app worker that owns the session.
session_event_t * postponed_event_vector
Vector of postponed events.
Definition: session.h:99
u32 session_tx_fifo_dequeue_drop(transport_connection_t *tc, u32 max_bytes)
Definition: session.c:453
static void session_delete(session_t *s)
Cleans up session and lookup table.
Definition: session.c:201
void session_transport_close(session_t *s)
Notify transport the session can be disconnected.
Definition: session.c:1092
int app_worker_close_notify(app_worker_t *app_wrk, session_t *s)
struct _session_endpoint session_endpoint_t
int consumer_pid
pid of msg consumer
Definition: message_queue.h:52
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
static transport_connection_t * transport_get_listener(transport_proto_t tp, u32 conn_index)
Definition: transport.h:97
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
Definition: buffer.h:167
int session_open_app(u32 app_wrk_index, session_endpoint_t *rmt, u32 opaque)
Definition: session.c:959
void vlib_start_process(vlib_main_t *vm, uword process_index)
Definition: main.c:1584
int svm_fifo_peek(svm_fifo_t *f, u32 relative_offset, u32 max_bytes, u8 *copy_here)
Definition: svm_fifo.c:726
void session_register_transport(transport_proto_t transport_proto, const transport_proto_vft_t *vft, u8 is_ip4, u32 output_node)
Initialize session layer for given transport proto and ip version.
Definition: session.c:1247
int session_lookup_add_half_open(transport_connection_t *tc, u64 value)
api_main_t api_main
Definition: api_shared.c:35
u8 transport_protocol_is_cl(transport_proto_t tp)
Definition: transport.c:317
int app_worker_connect_notify(app_worker_t *app_wrk, session_t *s, u32 opaque)
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
svm_msg_q_msg_t svm_msg_q_alloc_msg_w_ring(svm_msg_q_t *mq, u32 ring_index)
Allocate message buffer on ring.
int i_am_master
Definition: ssvm.h:88
static session_t * listen_session_get(u32 ls_index)
Definition: session.h:475
uword unformat(unformat_input_t *i, const char *fmt,...)
Definition: unformat.c:972
static uword unformat_check_input(unformat_input_t *i)
Definition: format.h:170
u32 listener_index
Parent listener session index if the result of an accept.