23 #define tcp_error(n,s) s, 29 #define foreach_tcp_state_next \ 30 _ (DROP, "error-drop") \ 31 _ (TCP4_OUTPUT, "tcp4-output") \ 32 _ (TCP6_OUTPUT, "tcp6-output") 34 typedef enum _tcp_established_next
36 #define _(s,n) TCP_ESTABLISHED_NEXT_##s, 42 typedef enum _tcp_rcv_process_next
44 #define _(s,n) TCP_RCV_PROCESS_NEXT_##s, 50 typedef enum _tcp_syn_sent_next
52 #define _(s,n) TCP_SYN_SENT_NEXT_##s, 58 typedef enum _tcp_listen_next
60 #define _(s,n) TCP_LISTEN_NEXT_##s, 67 typedef enum _tcp_state_next
69 #define _(s,n) TCP_NEXT_##s, 75 #define tcp_next_output(is_ip4) (is_ip4 ? TCP_NEXT_TCP4_OUTPUT \ 76 : TCP_NEXT_TCP6_OUTPUT) 103 return seq_leq (end_seq, tc->rcv_las + tc->rcv_wnd)
111 u8 opt_len, opts_len, kind;
116 data = (
const u8 *) (th + 1);
119 to->
flags &= (TCP_OPTS_FLAG_SACK_PERMITTED | TCP_OPTS_FLAG_WSCALE);
121 for (; opts_len > 0; opts_len -= opt_len, data += opt_len)
138 if (opt_len < 2 || opt_len > opts_len)
148 to->
flags |= TCP_OPTS_FLAG_MSS;
149 to->
mss = clib_net_to_host_u16 (*(
u16 *) (data + 2));
155 to->
flags |= TCP_OPTS_FLAG_WSCALE;
168 to->
flags |= TCP_OPTS_FLAG_TSTAMP;
169 to->
tsval = clib_net_to_host_u32 (*(
u32 *) (data + 2));
170 to->
tsecr = clib_net_to_host_u32 (*(
u32 *) (data + 6));
175 to->
flags |= TCP_OPTS_FLAG_SACK_PERMITTED;
179 if ((to->
flags & TCP_OPTS_FLAG_SACK_PERMITTED) == 0 ||
tcp_syn (th))
186 to->
flags |= TCP_OPTS_FLAG_SACK;
191 b.start = clib_net_to_host_u32 (*(
u32 *) (data + 2 + 4 * j));
192 b.end = clib_net_to_host_u32 (*(
u32 *) (data + 6 + 4 * j));
247 tc0->tsval_recent = 0;
293 tc0->tsval_recent = tc0->opt.tsval;
324 err = mrtt - tc->srtt;
325 tc->srtt += err >> 3;
329 tc->rttvar += (
clib_abs (err) - tc->rttvar) >> 2;
335 tc->rttvar = mrtt << 1;
354 if (tc->rtt_seq &&
seq_gt (ack, tc->rtt_seq) && !tc->rto_boff)
364 else if (
tcp_opts_tstamp (&tc->opt) && tc->opt.tsecr && tc->bytes_acked)
401 return ((
vnet_buffer (b)->tcp.ack_number == tc->snd_una)
402 &&
seq_gt (tc->snd_una_max, tc->snd_una)
404 && (new_snd_wnd == tc->snd_wnd));
415 next->prev = hole->prev;
421 prev->next = hole->next;
425 sb->head = hole->next;
439 memset (hole, 0,
sizeof (*hole));
443 hole_index = hole - sb->holes;
447 hole->prev = prev - sb->holes;
448 hole->next = prev->next;
451 next->prev = hole_index;
453 prev->next = hole_index;
457 sb->head = hole_index;
480 if (
seq_lt (blk->start, blk->end)
481 &&
seq_gt (blk->start, tc->snd_una)
482 &&
seq_gt (blk->start, ack) &&
seq_lt (blk->end, tc->snd_nxt))
485 vec_del1 (tc->opt.sacks, blk - tc->opt.sacks);
489 if (
seq_gt (ack, tc->snd_una))
491 tmp.start = tc->snd_una;
496 if (
vec_len (tc->opt.sacks) == 0)
500 for (i = 0; i <
vec_len (tc->opt.sacks); i++)
501 for (j = i; j <
vec_len (tc->opt.sacks); j++)
502 if (
seq_lt (tc->opt.sacks[j].start, tc->opt.sacks[i].start))
504 tmp = tc->opt.sacks[
i];
505 tc->opt.sacks[
i] = tc->opt.sacks[j];
506 tc->opt.sacks[j] = tmp;
517 while (hole && blk_index < vec_len (tc->opt.sacks))
519 blk = &tc->opt.sacks[blk_index];
521 if (
seq_leq (blk->start, hole->start))
524 if (
seq_geq (blk->end, hole->end))
529 if (
seq_lt (hole->end, ack))
532 if (
seq_lt (next_hole->start, ack))
533 sb->sacked_bytes -= next_hole->start - hole->end;
535 sb->sacked_bytes -= ack - hole->end;
548 sb->sacked_bytes += blk->end - hole->start;
549 hole->start = blk->end;
556 if (
seq_leq (blk->end, hole->end))
558 sb->sacked_bytes += blk->end - blk->start;
560 hole->end = blk->start - 1;
565 sb->sacked_bytes += hole->end - blk->start + 1;
566 hole->end = blk->start - 1;
580 if (tc->snd_wl1 < seq || (tc->snd_wl1 == seq && tc->snd_wl2 <= ack))
582 tc->snd_wnd = snd_wnd;
591 tc->cc_algo->congestion (tc);
599 tc->cc_algo->recovered (tc);
616 partial_ack =
seq_lt (tc->snd_una, tc->snd_una_max);
635 tc->cc_algo->rcv_ack (tc);
639 tc->tsecr_last_ack = tc->opt.tsecr;
645 ASSERT (tc->snd_una == ack);
651 if (tc->opt.tsecr != tc->tsecr_last_ack)
682 tc->cc_algo->init (tc);
697 *error = TCP_ERROR_ACK_INVALID;
704 *error = TCP_ERROR_ACK_OLD;
711 new_snd_wnd = clib_net_to_host_u16 (th->window) << tc->snd_wscale;
716 *error = TCP_ERROR_ACK_DUP;
721 tc->bytes_acked =
vnet_buffer (b)->tcp.ack_number - tc->snd_una;
737 if (tc->snd_una == tc->snd_una_max)
766 if (
seq_lt (tc->rcv_nxt, start))
775 for (i = 0; i <
vec_len (tc->snd_sacks); i++)
781 if (
seq_leq (tc->snd_sacks[i].start, tc->rcv_nxt)
782 ||
seq_leq (tc->snd_sacks[i].start, end))
786 n_elts =
clib_min (vec_len (tc->snd_sacks) - i,
794 tc->snd_sacks = new_list;
808 return TCP_ERROR_PURE_ACK;
821 else if (written > data_len)
823 tc->rcv_nxt =
vnet_buffer (b)->tcp.seq_end + written - data_len;
826 tc->flags |= TCP_CONN_SNDACK;
838 return TCP_ERROR_FIFO_FULL;
841 return TCP_ERROR_ENQUEUED;
854 offset = seq - tc->rcv_nxt;
858 return TCP_ERROR_FIFO_FULL;
874 return TCP_ERROR_ENQUEUED;
887 && (tc->flags & TCP_CONN_SENT_RCV_WND0) == 0
888 && (tc->flags & TCP_CONN_SNDACK) == 0)
896 u16 n_data_bytes,
u32 * next0)
907 if (tc->snt_dupacks < 3)
933 if (n_data_bytes == 0)
938 if (!tc->flags & TCP_CONN_DELACK)
940 vec_add1 (tm->delack_connections[tc->c_thread_index],
942 tc->flags |= TCP_CONN_DELACK;
950 if ((tc->flags & TCP_CONN_BURSTACK) == 0)
954 error = TCP_ERROR_ENQUEUED;
957 tc->flags |= TCP_CONN_BURSTACK;
970 tw_timer_wheel_16t_2w_512sl_t *tw;
972 tw = &tm->timer_wheels[thread_index];
973 conns = tm->delack_connections[thread_index];
974 for (i = 0; i <
vec_len (conns); i++)
979 tc->timers[TCP_TIMER_DELACK]
980 = tw_timer_start_16t_2w_512sl (tw, conns[i],
990 u32 n_left_from, next_index, *from, *to_next;
999 while (n_left_from > 0)
1005 while (n_left_from > 0 && n_left_to_next > 0)
1013 u32 n_advance_bytes0, n_data_bytes0;
1014 u32 next0 = TCP_ESTABLISHED_NEXT_DROP, error0 = TCP_ERROR_ENQUEUED;
1021 n_left_to_next -= 1;
1029 error0 = TCP_ERROR_INVALID_CONNECTION;
1041 n_data_bytes0 = clib_net_to_host_u16 (ip40->
length)
1051 n_advance_bytes0 +=
sizeof (ip60[0]);
1063 error0 = TCP_ERROR_SEGMENT_INVALID;
1085 tc0->state = TCP_STATE_CLOSE_WAIT;
1099 n_left_to_next, bi0, next0);
1110 TCP_ERROR_EVENT_FIFO_FULL, errors);
1113 TCP_ERROR_EVENT_FIFO_FULL, errors);
1139 .name =
"tcp4-established",
1141 .vector_size =
sizeof (
u32),
1147 #define _(s,n) [TCP_ESTABLISHED_NEXT_##s] = n, 1160 .name =
"tcp6-established",
1162 .vector_size =
sizeof (
u32),
1168 #define _(s,n) [TCP_ESTABLISHED_NEXT_##s] = n, 1186 u32 n_left_from, next_index, *from, *to_next;
1188 u8 sst = is_ip4 ? SESSION_TYPE_IP4_TCP : SESSION_TYPE_IP6_TCP;
1195 while (n_left_from > 0)
1201 while (n_left_from > 0 && n_left_to_next > 0)
1203 u32 bi0, ack0, seq0;
1209 u32 n_advance_bytes0, n_data_bytes0;
1211 u32 next0 = TCP_SYN_SENT_NEXT_DROP, error0 = TCP_ERROR_ENQUEUED;
1218 n_left_to_next -= 1;
1223 tcp.connection_index);
1236 n_data_bytes0 = clib_net_to_host_u16 (ip40->
length)
1246 n_advance_bytes0 +=
sizeof (ip60[0]);
1271 if (ack0 <= tc0->iss || ack0 > tc0->snd_nxt)
1280 if (tc0->snd_una > ack0)
1319 pool_get (tm->connections[my_thread_index], new_tc0);
1322 new_tc0->c_thread_index = my_thread_index;
1325 pool_put (tm->half_open_connections, tc0);
1328 new_tc0->irs = seq0;
1335 new_tc0->tsval_recent = new_tc0->opt.tsval;
1340 new_tc0->snd_wscale = new_tc0->opt.wscale;
1343 new_tc0->snd_wnd = clib_net_to_host_u16 (tcp0->window);
1344 new_tc0->snd_wl1 = seq0;
1345 new_tc0->snd_wl2 = ack0;
1355 new_tc0->snd_una = ack0;
1356 new_tc0->state = TCP_STATE_ESTABLISHED;
1359 new_tc0->rcv_las = new_tc0->rcv_nxt;
1365 new_tc0->flags |= TCP_CONN_SNDACK;
1370 new_tc0->state = TCP_STATE_SYN_RCVD;
1386 if (error0 == TCP_ERROR_PURE_ACK)
1387 error0 = TCP_ERROR_SYN_ACKS_RCVD;
1404 n_left_to_next, bi0, next0);
1415 TCP_ERROR_EVENT_FIFO_FULL, errors);
1418 TCP_ERROR_EVENT_FIFO_FULL, errors);
1442 .name =
"tcp4-syn-sent",
1444 .vector_size =
sizeof (
u32),
1450 #define _(s,n) [TCP_SYN_SENT_NEXT_##s] = n, 1463 .name =
"tcp6-syn-sent",
1465 .vector_size =
sizeof (
u32),
1471 #define _(s,n) [TCP_SYN_SENT_NEXT_##s] = n, 1488 u32 n_left_from, next_index, *from, *to_next;
1496 while (n_left_from > 0)
1502 while (n_left_from > 0 && n_left_to_next > 0)
1510 u32 n_advance_bytes0, n_data_bytes0;
1511 u32 next0 = TCP_RCV_PROCESS_NEXT_DROP, error0 = TCP_ERROR_ENQUEUED;
1518 n_left_to_next -= 1;
1525 error0 = TCP_ERROR_INVALID_CONNECTION;
1537 n_data_bytes0 = clib_net_to_host_u16 (ip40->
length)
1547 n_advance_bytes0 +=
sizeof (ip60[0]);
1559 case TCP_STATE_CLOSED:
1572 error0 = TCP_ERROR_SEGMENT_INVALID;
1579 case TCP_STATE_SYN_RCVD:
1592 tc0->state = TCP_STATE_ESTABLISHED;
1596 tc0->snd_wnd = clib_net_to_host_u16 (tcp0->window)
1607 case TCP_STATE_ESTABLISHED:
1614 case TCP_STATE_FIN_WAIT_1:
1622 if (tc0->snd_una == tc0->snd_una_max)
1624 tc0->state = TCP_STATE_FIN_WAIT_2;
1629 case TCP_STATE_FIN_WAIT_2:
1637 case TCP_STATE_CLOSE_WAIT:
1642 case TCP_STATE_CLOSING:
1650 tc0->state = TCP_STATE_TIME_WAIT;
1654 case TCP_STATE_LAST_ACK:
1662 tc0->state = TCP_STATE_CLOSED;
1676 case TCP_STATE_TIME_WAIT:
1693 case TCP_STATE_ESTABLISHED:
1694 case TCP_STATE_FIN_WAIT_1:
1695 case TCP_STATE_FIN_WAIT_2:
1698 case TCP_STATE_CLOSE_WAIT:
1699 case TCP_STATE_CLOSING:
1700 case TCP_STATE_LAST_ACK:
1701 case TCP_STATE_TIME_WAIT:
1713 case TCP_STATE_ESTABLISHED:
1714 case TCP_STATE_SYN_RCVD:
1720 tc0->state = TCP_STATE_CLOSE_WAIT;
1722 case TCP_STATE_CLOSE_WAIT:
1723 case TCP_STATE_CLOSING:
1724 case TCP_STATE_LAST_ACK:
1727 case TCP_STATE_FIN_WAIT_1:
1728 tc0->state = TCP_STATE_TIME_WAIT;
1732 case TCP_STATE_FIN_WAIT_2:
1734 tc0->state = TCP_STATE_TIME_WAIT;
1739 case TCP_STATE_TIME_WAIT:
1757 n_left_to_next, bi0, next0);
1768 TCP_ERROR_EVENT_FIFO_FULL, errors);
1771 TCP_ERROR_EVENT_FIFO_FULL, errors);
1795 .name =
"tcp4-rcv-process",
1797 .vector_size =
sizeof (
u32),
1803 #define _(s,n) [TCP_RCV_PROCESS_NEXT_##s] = n, 1816 .name =
"tcp6-rcv-process",
1818 .vector_size =
sizeof (
u32),
1824 #define _(s,n) [TCP_RCV_PROCESS_NEXT_##s] = n, 1843 u32 n_left_from, next_index, *from, *to_next;
1846 u8 sst = is_ip4 ? SESSION_TYPE_IP4_TCP : SESSION_TYPE_IP6_TCP;
1853 while (n_left_from > 0)
1859 while (n_left_from > 0 && n_left_to_next > 0)
1868 u32 error0 = TCP_ERROR_SYNS_RCVD, next0 = TCP_LISTEN_NEXT_DROP;
1875 n_left_to_next -= 1;
1907 pool_get (tm->connections[my_thread_index], child0);
1908 memset (child0, 0,
sizeof (*child0));
1910 child0->c_c_index = child0 - tm->connections[my_thread_index];
1911 child0->c_lcl_port = lc0->c_lcl_port;
1912 child0->c_rmt_port = th0->src_port;
1913 child0->c_is_ip4 = is_ip4;
1914 child0->c_thread_index = my_thread_index;
1932 error0 = TCP_ERROR_CREATE_SESSION_FAIL;
1939 child0->rcv_nxt =
vnet_buffer (b0)->tcp.seq_number + 1;
1940 child0->state = TCP_STATE_SYN_RCVD;
1946 child0->tsval_recent = child0->opt.tsval;
1951 child0->snd_wscale = child0->opt.wscale;
1954 child0->snd_wnd = clib_net_to_host_u16 (th0->window);
1955 child0->snd_wl1 =
vnet_buffer (b0)->tcp.seq_number;
1956 child0->snd_wl2 =
vnet_buffer (b0)->tcp.ack_number;
1975 n_left_to_next, bi0, next0);
2001 .name =
"tcp4-listen",
2003 .vector_size =
sizeof (
u32),
2009 #define _(s,n) [TCP_LISTEN_NEXT_##s] = n, 2022 .name =
"tcp6-listen",
2024 .vector_size =
sizeof (
u32),
2030 #define _(s,n) [TCP_LISTEN_NEXT_##s] = n, 2042 typedef enum _tcp_input_next
2053 #define foreach_tcp4_input_next \ 2054 _ (DROP, "error-drop") \ 2055 _ (LISTEN, "tcp4-listen") \ 2056 _ (RCV_PROCESS, "tcp4-rcv-process") \ 2057 _ (SYN_SENT, "tcp4-syn-sent") \ 2058 _ (ESTABLISHED, "tcp4-established") \ 2059 _ (RESET, "tcp4-reset") 2061 #define foreach_tcp6_input_next \ 2062 _ (DROP, "error-drop") \ 2063 _ (LISTEN, "tcp6-listen") \ 2064 _ (RCV_PROCESS, "tcp6-rcv-process") \ 2065 _ (SYN_SENT, "tcp6-syn-sent") \ 2066 _ (ESTABLISHED, "tcp6-established") \ 2067 _ (RESET, "tcp6-reset") 2083 s =
format (s,
"TCP: src-port %d dst-port %U%s\n",
2084 clib_net_to_host_u16 (t->
src_port),
2090 #define filter_flags (TCP_FLAG_SYN|TCP_FLAG_ACK|TCP_FLAG_RST|TCP_FLAG_FIN) 2096 u32 n_left_from, next_index, *from, *to_next;
2105 while (n_left_from > 0)
2111 while (n_left_from > 0 && n_left_to_next > 0)
2127 n_left_to_next -= 1;
2144 SESSION_TYPE_IP4_TCP,
2157 SESSION_TYPE_IP6_TCP,
2165 vnet_buffer (b0)->tcp.connection_index = tc0->c_c_index;
2167 clib_net_to_host_u32 (tcp0->seq_number);
2169 clib_net_to_host_u32 (tcp0->ack_number);
2172 next0 = tm->dispatch_table[tc0->state][flags0].next;
2173 error0 = tm->dispatch_table[tc0->state][flags0].error;
2185 error0 = TCP_ERROR_NO_LISTENER;
2196 n_left_to_next, bi0, next0);
2223 .name =
"tcp4-input",
2225 .vector_size =
sizeof (
u32),
2231 #define _(s,n) [TCP_INPUT_NEXT_##s] = n, 2246 .name =
"tcp6-input",
2248 .vector_size =
sizeof (
u32),
2254 #define _(s,n) [TCP_INPUT_NEXT_##s] = n, 2268 tw_timer_expire_timers_16t_2w_512sl (&tm->timer_wheels[thread_index], now);
2275 for (i = 0; i <
ARRAY_LEN (tm->dispatch_table); i++)
2276 for (j = 0; j <
ARRAY_LEN (tm->dispatch_table[i]); j++)
2279 tm->dispatch_table[
i][j].error = TCP_ERROR_DISPATCH;
2282 #define _(t,f,n,e) \ 2284 tm->dispatch_table[TCP_STATE_##t][f].next = (n); \ 2285 tm->dispatch_table[TCP_STATE_##t][f].error = (e); \
int session_manager_flush_enqueue_events(u32 thread_index)
Flushes queue of sessions that are to be notified of new data enqueued events.
sll srl srl sll sra u16x4 i
#define tcp_in_recovery(tc)
#define TCP_OPTION_LEN_SACK_PERMITTED
#define seq_leq(_s1, _s2)
void tcp_make_fin(tcp_connection_t *tc, vlib_buffer_t *b)
Convert buffer to FIN-ACK.
void tcp_send_reset(vlib_buffer_t *pkt, u8 is_ip4)
Send reset without reusing existing buffer.
struct _sack_block sack_block_t
int stream_session_accept(transport_connection_t *tc, u32 listener_index, u8 sst, u8 notify)
Accept a stream session.
void stream_session_connect_notify(transport_connection_t *tc, u8 sst, u8 is_fail)
#define tcp_opts_tstamp(_to)
void tcp_fast_retransmit(tcp_connection_t *tc)
static int ip4_header_bytes(ip4_header_t *i)
struct _sack_scoreboard sack_scoreboard_t
static tcp_connection_t * tcp_half_open_connection_get(u32 conn_index)
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
struct _tcp_main tcp_main_t
void tcp_connection_timers_reset(tcp_connection_t *tc)
Stop all connection timers.
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
#define tcp_recovery_off(tc)
struct _vlib_node_registration vlib_node_registration_t
struct _tcp_connection tcp_connection_t
#define tcp_opts_sack(_to)
vlib_error_t * errors
Vector of errors for this node.
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
u8 n_sack_blocks
Number of SACKs blocks.
struct _tcp_header tcp_header_t
struct _sack_scoreboard_hole sack_scoreboard_hole_t
u8 wscale
Window scale advertised by peer.
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
struct _stream_session_t stream_session_t
#define tcp_fastrecovery_on(tc)
static stream_session_t * stream_session_get(u64 si, u32 thread_index)
static u32 ooo_segment_end_offset(svm_fifo_t *f, ooo_segment_t *s)
#define VLIB_INIT_FUNCTION(x)
#define TCP_CLOSEWAIT_TIME
void stream_session_accept_notify(transport_connection_t *tc)
#define TCP_OPTION_LEN_SACK_BLOCK
static void * ip4_next_header(ip4_header_t *i)
static u32 tcp_time_now(void)
sack_block_t * sacks
SACK blocks received.
static tcp_cc_algorithm_t * tcp_cc_algo_get(tcp_cc_algorithm_type_e type)
static u32 scoreboard_hole_bytes(sack_scoreboard_hole_t *hole)
#define vlib_call_init_function(vm, x)
#define TCP_MAX_SACK_BLOCKS
Max number of SACK blocks stored.
#define TCP_EVT_DBG(_evt, _args...)
transport_connection_t * stream_session_lookup_transport4(ip4_address_t *lcl, ip4_address_t *rmt, u16 lcl_port, u16 rmt_port, u8 proto, u32 my_thread_index)
#define timestamp_lt(_t1, _t2)
static void tcp_timer_set(tcp_connection_t *tc, u8 timer_id, u32 interval)
#define TCP_OPTION_LEN_WINDOW_SCALE
u32 stream_session_dequeue_drop(transport_connection_t *tc, u32 max_bytes)
#define TCP_INVALID_SACK_HOLE_INDEX
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
#define tcp_in_fastrecovery(tc)
void tcp_retransmit_first_unacked(tcp_connection_t *tc)
Retansmit first unacked segment.
static sack_scoreboard_hole_t * scoreboard_next_hole(sack_scoreboard_t *sb, sack_scoreboard_hole_t *hole)
static u32 ooo_segment_offset(svm_fifo_t *f, ooo_segment_t *s)
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
#define pool_put(P, E)
Free an object E in pool P.
#define vec_del1(v, i)
Delete the element at index I.
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
#define TCP_OPTION_LEN_TIMESTAMP
static ooo_segment_t * svm_fifo_newest_ooo_segment(svm_fifo_t *f)
vlib_error_t error
Error code for buffers to be enqueued to error handler.
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
#define TCP_MAX_WND_SCALE
static void tcp_timer_reset(tcp_connection_t *tc, u8 timer_id)
#define vec_free(V)
Free vector's memory (no header).
#define TCP_DUPACK_THRESHOLD
format_function_t format_tcp_state
#define clib_warning(format, args...)
#define VLIB_BUFFER_IS_TRACED
#define clib_memcpy(a, b, c)
void tcp_make_synack(tcp_connection_t *ts, vlib_buffer_t *b)
Convert buffer to SYN-ACK.
u16 mss
Option flags, see above.
static void * ip6_next_header(ip6_header_t *i)
void tcp_make_ack(tcp_connection_t *ts, vlib_buffer_t *b)
Convert buffer to ACK.
void stream_session_disconnect_notify(transport_connection_t *tc)
Notification from transport that connection is being closed.
static void tcp_timer_update(tcp_connection_t *tc, u8 timer_id, u32 interval)
#define TCP_PAWS_IDLE
24 days
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
#define seq_geq(_s1, _s2)
#define vec_insert_elts(V, E, N, M)
Insert N vector elements starting at element M, insert given elements (no header, unspecified alignme...
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
int svm_fifo_enqueue_with_offset(svm_fifo_t *f, int pid, u32 offset, u32 required_bytes, u8 *copy_from_here)
#define tcp_opts_wscale(_to)
void tcp_connection_reset(tcp_connection_t *tc)
Notify session that connection has been reset.
u32 tsval
Peer's timestamp value.
u32 tsecr
Echoed/reflected time stamp.
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
void tcp_connection_init_vars(tcp_connection_t *tc)
Initialize tcp connection variables.
#define TCP_OPTION_LEN_MSS
clib_error_t * tcp_init(vlib_main_t *vm)
struct clib_bihash_value offset
template key/value backing page structure
int stream_session_enqueue_data(transport_connection_t *tc, u8 *data, u16 len, u8 queue_event)
static tcp_connection_t * tcp_connection_get(u32 conn_index, u32 thread_index)
#define VLIB_REGISTER_NODE(x,...)
static int tcp_header_bytes(tcp_header_t *t)
void tcp_connection_cleanup(tcp_connection_t *tc)
Cleans up connection state.
#define vec_foreach(var, vec)
Vector iterator.
#define tcp_opts_sack_permitted(_to)
static u32 tcp_loss_wnd(const tcp_connection_t *tc)
transport_connection_t * stream_session_lookup_transport6(ip6_address_t *lcl, ip6_address_t *rmt, u16 lcl_port, u16 rmt_port, u8 proto, u32 my_thread_index)
u32 flags
buffer flags: VLIB_BUFFER_IS_TRACED: trace this buffer.
static tcp_main_t * vnet_get_tcp_main()
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
static u8 tcp_timer_is_active(tcp_connection_t *tc, tcp_timers_e timer)
static tcp_connection_t * tcp_listener_get(u32 tli)