FD.io VPP  v17.01-9-ge7dcee4
Vector Packet Processing
ixge.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 /*
17  * WARNING!
18  * This driver is not intended for production use and it is unsupported.
19  * It is provided for educational use only.
20  * Please use supported DPDK driver instead.
21  */
22 
23 #if __x86_64__
24 #include <vppinfra/vector.h>
25 
26 #ifndef CLIB_HAVE_VEC128
27 #warning HACK: ixge driver wont really work, missing u32x4
28 typedef unsigned long long u32x4;
29 #endif
30 
31 #include <vlib/vlib.h>
32 #include <vlib/unix/unix.h>
33 #include <vlib/pci/pci.h>
34 #include <vnet/vnet.h>
35 #include <vnet/devices/nic/ixge.h>
36 #include <vnet/ethernet/ethernet.h>
37 
38 #define IXGE_ALWAYS_POLL 0
39 
40 #define EVENT_SET_FLAGS 0
41 #define IXGE_HWBP_RACE_ELOG 0
42 
43 #define PCI_VENDOR_ID_INTEL 0x8086
44 
45 /* 10 GIG E (XGE) PHY IEEE 802.3 clause 45 definitions. */
46 #define XGE_PHY_DEV_TYPE_PMA_PMD 1
47 #define XGE_PHY_DEV_TYPE_PHY_XS 4
48 #define XGE_PHY_ID1 0x2
49 #define XGE_PHY_ID2 0x3
50 #define XGE_PHY_CONTROL 0x0
51 #define XGE_PHY_CONTROL_RESET (1 << 15)
52 
56 
57 static void
59 {
60  ixge_main_t *xm = &ixge_main;
61  vlib_main_t *vm = xm->vlib_main;
62  ixge_regs_t *r = xd->regs;
63  u32 i;
64 
65  i = 0;
66  while (!(r->software_semaphore & (1 << 0)))
67  {
68  if (i > 0)
69  vlib_process_suspend (vm, 100e-6);
70  i++;
71  }
72  do
73  {
74  r->software_semaphore |= 1 << 1;
75  }
76  while (!(r->software_semaphore & (1 << 1)));
77 }
78 
79 static void
81 {
82  ixge_regs_t *r = xd->regs;
83  r->software_semaphore &= ~3;
84 }
85 
86 static void
88 {
89  ixge_main_t *xm = &ixge_main;
90  vlib_main_t *vm = xm->vlib_main;
91  ixge_regs_t *r = xd->regs;
92  u32 fw_mask = sw_mask << 5;
93  u32 m, done = 0;
94 
95  while (!done)
96  {
97  ixge_semaphore_get (xd);
99  done = (m & fw_mask) == 0;
100  if (done)
101  r->software_firmware_sync = m | sw_mask;
103  if (!done)
104  vlib_process_suspend (vm, 10e-3);
105  }
106 }
107 
108 static void
110 {
111  ixge_regs_t *r = xd->regs;
112  ixge_semaphore_get (xd);
113  r->software_firmware_sync &= ~sw_mask;
115 }
116 
117 u32
118 ixge_read_write_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index,
119  u32 v, u32 is_read)
120 {
121  ixge_regs_t *r = xd->regs;
122  const u32 busy_bit = 1 << 30;
123  u32 x;
124 
125  ASSERT (xd->phy_index < 2);
126  ixge_software_firmware_sync (xd, 1 << (1 + xd->phy_index));
127 
128  ASSERT (reg_index < (1 << 16));
129  ASSERT (dev_type < (1 << 5));
130  if (!is_read)
131  r->xge_mac.phy_data = v;
132 
133  /* Address cycle. */
134  x =
135  reg_index | (dev_type << 16) | (xd->
136  phys[xd->phy_index].mdio_address << 21);
137  r->xge_mac.phy_command = x | busy_bit;
138  /* Busy wait timed to take 28e-6 secs. No suspend. */
139  while (r->xge_mac.phy_command & busy_bit)
140  ;
141 
142  r->xge_mac.phy_command = x | ((is_read ? 2 : 1) << 26) | busy_bit;
143  while (r->xge_mac.phy_command & busy_bit)
144  ;
145 
146  if (is_read)
147  v = r->xge_mac.phy_data >> 16;
148 
149  ixge_software_firmware_sync_release (xd, 1 << (1 + xd->phy_index));
150 
151  return v;
152 }
153 
154 static u32
155 ixge_read_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index)
156 {
157  return ixge_read_write_phy_reg (xd, dev_type, reg_index, 0, /* is_read */
158  1);
159 }
160 
161 static void
162 ixge_write_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index, u32 v)
163 {
164  (void) ixge_read_write_phy_reg (xd, dev_type, reg_index, v, /* is_read */
165  0);
166 }
167 
168 static void
169 ixge_i2c_put_bits (i2c_bus_t * b, int scl, int sda)
170 {
171  ixge_main_t *xm = &ixge_main;
173  u32 v;
174 
175  v = 0;
176  v |= (sda != 0) << 3;
177  v |= (scl != 0) << 1;
178  xd->regs->i2c_control = v;
179 }
180 
181 static void
182 ixge_i2c_get_bits (i2c_bus_t * b, int *scl, int *sda)
183 {
184  ixge_main_t *xm = &ixge_main;
186  u32 v;
187 
188  v = xd->regs->i2c_control;
189  *sda = (v & (1 << 2)) != 0;
190  *scl = (v & (1 << 0)) != 0;
191 }
192 
193 static u16
195 {
196  ixge_regs_t *r = xd->regs;
197  u32 v;
198  r->eeprom_read = (( /* start bit */ (1 << 0)) | (address << 2));
199  /* Wait for done bit. */
200  while (!((v = r->eeprom_read) & (1 << 1)))
201  ;
202  return v >> 16;
203 }
204 
205 static void
207 {
208  u32 tx_disable_bit = 1 << 3;
209  if (enable)
210  xd->regs->sdp_control &= ~tx_disable_bit;
211  else
212  xd->regs->sdp_control |= tx_disable_bit;
213 }
214 
215 static void
217 {
218  u32 is_10g_bit = 1 << 5;
219  if (enable)
220  xd->regs->sdp_control |= is_10g_bit;
221  else
222  xd->regs->sdp_control &= ~is_10g_bit;
223 }
224 
225 static clib_error_t *
227 {
228  u16 a, id, reg_values_addr = 0;
229 
230  a = ixge_read_eeprom (xd, 0x2b);
231  if (a == 0 || a == 0xffff)
232  return clib_error_create ("no init sequence in eeprom");
233 
234  while (1)
235  {
236  id = ixge_read_eeprom (xd, ++a);
237  if (id == 0xffff)
238  break;
239  reg_values_addr = ixge_read_eeprom (xd, ++a);
240  if (id == sfp_type)
241  break;
242  }
243  if (id != sfp_type)
244  return clib_error_create ("failed to find id 0x%x", sfp_type);
245 
246  ixge_software_firmware_sync (xd, 1 << 3);
247  while (1)
248  {
249  u16 v = ixge_read_eeprom (xd, ++reg_values_addr);
250  if (v == 0xffff)
251  break;
252  xd->regs->core_analog_config = v;
253  }
255 
256  /* Make sure laser is off. We'll turn on the laser when
257  the interface is brought up. */
258  ixge_sfp_enable_disable_laser (xd, /* enable */ 0);
259  ixge_sfp_enable_disable_10g (xd, /* is_10g */ 1);
260 
261  return 0;
262 }
263 
264 static void
266 {
267  u32 v;
268 
269  if (is_up)
270  {
271  /* pma/pmd 10g serial SFI. */
272  xd->regs->xge_mac.auto_negotiation_control2 &= ~(3 << 16);
273  xd->regs->xge_mac.auto_negotiation_control2 |= 2 << 16;
274 
276  v &= ~(7 << 13);
277  v |= (0 << 13);
278  /* Restart autoneg. */
279  v |= (1 << 12);
281 
282  while (!(xd->regs->xge_mac.link_partner_ability[0] & 0xf0000))
283  ;
284 
286 
287  /* link mode 10g sfi serdes */
288  v &= ~(7 << 13);
289  v |= (3 << 13);
290 
291  /* Restart autoneg. */
292  v |= (1 << 12);
294 
295  xd->regs->xge_mac.link_status;
296  }
297 
298  ixge_sfp_enable_disable_laser (xd, /* enable */ is_up);
299 
300  /* Give time for link partner to notice that we're up. */
301  if (is_up && vlib_in_process_context (vlib_get_main ()))
302  {
303  vlib_process_suspend (vlib_get_main (), 300e-3);
304  }
305 }
306 
309 {
310  ixge_regs_t *r = xd->regs;
311  ASSERT (qi < 128);
312  if (rt == VLIB_RX)
313  return qi < 64 ? &r->rx_dma0[qi] : &r->rx_dma1[qi - 64];
314  else
315  return &r->tx_dma[qi];
316 }
317 
318 static clib_error_t *
320 {
321  vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
322  uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
323  ixge_main_t *xm = &ixge_main;
325  ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_RX, 0);
326 
327  if (is_up)
328  {
329  xd->regs->rx_enable |= 1;
330  xd->regs->tx_dma_control |= 1;
331  dr->control |= 1 << 25;
332  while (!(dr->control & (1 << 25)))
333  ;
334  }
335  else
336  {
337  xd->regs->rx_enable &= ~1;
338  xd->regs->tx_dma_control &= ~1;
339  }
340 
341  ixge_sfp_device_up_down (xd, is_up);
342 
343  return /* no error */ 0;
344 }
345 
346 static void
348 {
349  ixge_phy_t *phy = xd->phys + xd->phy_index;
350  i2c_bus_t *ib = &xd->i2c_bus;
351 
352  ib->private_data = xd->device_index;
355  vlib_i2c_init (ib);
356 
357  vlib_i2c_read_eeprom (ib, 0x50, 0, 128, (u8 *) & xd->sfp_eeprom);
358 
360  xd->sfp_eeprom.id = SFP_ID_unknown;
361  else
362  {
363  /* FIXME 5 => SR/LR eeprom ID. */
364  clib_error_t *e =
366  if (e)
367  clib_error_report (e);
368  }
369 
370  phy->mdio_address = ~0;
371 }
372 
373 static void
375 {
376  ixge_main_t *xm = &ixge_main;
377  vlib_main_t *vm = xm->vlib_main;
378  ixge_phy_t *phy = xd->phys + xd->phy_index;
379 
380  switch (xd->device_id)
381  {
382  case IXGE_82599_sfp:
383  case IXGE_82599_sfp_em:
384  case IXGE_82599_sfp_fcoe:
385  /* others? */
386  return ixge_sfp_phy_init (xd);
387 
388  default:
389  break;
390  }
391 
392  /* Probe address of phy. */
393  {
394  u32 i, v;
395 
396  phy->mdio_address = ~0;
397  for (i = 0; i < 32; i++)
398  {
399  phy->mdio_address = i;
401  if (v != 0xffff && v != 0)
402  break;
403  }
404 
405  /* No PHY found? */
406  if (i >= 32)
407  return;
408  }
409 
410  phy->id =
413 
414  {
415  ELOG_TYPE_DECLARE (e) =
416  {
417  .function = (char *) __FUNCTION__,.format =
418  "ixge %d, phy id 0x%d mdio address %d",.format_args = "i4i4i4",};
419  struct
420  {
421  u32 instance, id, address;
422  } *ed;
423  ed = ELOG_DATA (&vm->elog_main, e);
424  ed->instance = xd->device_index;
425  ed->id = phy->id;
426  ed->address = phy->mdio_address;
427  }
428 
429  /* Reset phy. */
432 
433  /* Wait for self-clearning reset bit to clear. */
434  do
435  {
436  vlib_process_suspend (vm, 1e-3);
437  }
440 }
441 
442 static u8 *
444 {
446  va_arg (*va, ixge_rx_from_hw_descriptor_t *);
447  u32 s0 = d->status[0], s2 = d->status[2];
448  u32 is_ip4, is_ip6, is_ip, is_tcp, is_udp;
449  uword indent = format_get_indent (s);
450 
451  s = format (s, "%s-owned",
453  "hw");
454  s =
455  format (s, ", length this descriptor %d, l3 offset %d",
459  s = format (s, ", end-of-packet");
460 
461  s = format (s, "\n%U", format_white_space, indent);
462 
464  s = format (s, "layer2 error");
465 
467  {
468  s = format (s, "layer 2 type %d", (s0 & 0x1f));
469  return s;
470  }
471 
473  s = format (s, "vlan header 0x%x\n%U", d->vlan_tag,
474  format_white_space, indent);
475 
476  if ((is_ip4 = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4)))
477  {
478  s = format (s, "ip4%s",
479  (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4_EXT) ? " options" :
480  "");
482  s = format (s, " checksum %s",
484  "bad" : "ok");
485  }
486  if ((is_ip6 = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6)))
487  s = format (s, "ip6%s",
488  (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6_EXT) ? " extended" :
489  "");
490  is_tcp = is_udp = 0;
491  if ((is_ip = (is_ip4 | is_ip6)))
492  {
493  is_tcp = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_TCP) != 0;
494  is_udp = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_UDP) != 0;
495  if (is_tcp)
496  s = format (s, ", tcp");
497  if (is_udp)
498  s = format (s, ", udp");
499  }
500 
502  s = format (s, ", tcp checksum %s",
504  "ok");
506  s = format (s, ", udp checksum %s",
508  "ok");
509 
510  return s;
511 }
512 
513 static u8 *
514 format_ixge_tx_descriptor (u8 * s, va_list * va)
515 {
516  ixge_tx_descriptor_t *d = va_arg (*va, ixge_tx_descriptor_t *);
517  u32 s0 = d->status0, s1 = d->status1;
518  uword indent = format_get_indent (s);
519  u32 v;
520 
521  s = format (s, "buffer 0x%Lx, %d packet bytes, %d bytes this buffer",
522  d->buffer_address, s1 >> 14, d->n_bytes_this_buffer);
523 
524  s = format (s, "\n%U", format_white_space, indent);
525 
526  if ((v = (s0 >> 0) & 3))
527  s = format (s, "reserved 0x%x, ", v);
528 
529  if ((v = (s0 >> 2) & 3))
530  s = format (s, "mac 0x%x, ", v);
531 
532  if ((v = (s0 >> 4) & 0xf) != 3)
533  s = format (s, "type 0x%x, ", v);
534 
535  s = format (s, "%s%s%s%s%s%s%s%s",
536  (s0 & (1 << 8)) ? "eop, " : "",
537  (s0 & (1 << 9)) ? "insert-fcs, " : "",
538  (s0 & (1 << 10)) ? "reserved26, " : "",
539  (s0 & (1 << 11)) ? "report-status, " : "",
540  (s0 & (1 << 12)) ? "reserved28, " : "",
541  (s0 & (1 << 13)) ? "is-advanced, " : "",
542  (s0 & (1 << 14)) ? "vlan-enable, " : "",
543  (s0 & (1 << 15)) ? "tx-segmentation, " : "");
544 
545  if ((v = s1 & 0xf) != 0)
546  s = format (s, "status 0x%x, ", v);
547 
548  if ((v = (s1 >> 4) & 0xf))
549  s = format (s, "context 0x%x, ", v);
550 
551  if ((v = (s1 >> 8) & 0x3f))
552  s = format (s, "options 0x%x, ", v);
553 
554  return s;
555 }
556 
557 typedef struct
558 {
560 
562 
564 
566 
568 
569  /* Copy of VLIB buffer; packet data stored in pre_data. */
572 
573 static u8 *
574 format_ixge_rx_dma_trace (u8 * s, va_list * va)
575 {
576  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
577  vlib_node_t *node = va_arg (*va, vlib_node_t *);
578  vnet_main_t *vnm = vnet_get_main ();
579  ixge_rx_dma_trace_t *t = va_arg (*va, ixge_rx_dma_trace_t *);
580  ixge_main_t *xm = &ixge_main;
583  uword indent = format_get_indent (s);
584 
585  {
586  vnet_sw_interface_t *sw =
588  s =
589  format (s, "%U rx queue %d", format_vnet_sw_interface_name, vnm, sw,
590  t->queue_index);
591  }
592 
593  s = format (s, "\n%Ubefore: %U",
594  format_white_space, indent,
596  s = format (s, "\n%Uafter : head/tail address 0x%Lx/0x%Lx",
597  format_white_space, indent,
599 
600  s = format (s, "\n%Ubuffer 0x%x: %U",
601  format_white_space, indent,
603 
604  s = format (s, "\n%U", format_white_space, indent);
605 
606  f = node->format_buffer;
607  if (!f || !t->is_start_of_packet)
608  f = format_hex_bytes;
609  s = format (s, "%U", f, t->buffer.pre_data, sizeof (t->buffer.pre_data));
610 
611  return s;
612 }
613 
614 #define foreach_ixge_error \
615  _ (none, "no error") \
616  _ (tx_full_drops, "tx ring full drops") \
617  _ (ip4_checksum_error, "ip4 checksum errors") \
618  _ (rx_alloc_fail, "rx buf alloc from free list failed") \
619  _ (rx_alloc_no_physmem, "rx buf alloc failed no physmem")
620 
621 typedef enum
622 {
623 #define _(f,s) IXGE_ERROR_##f,
625 #undef _
627 } ixge_error_t;
628 
629 always_inline void
631  u32 s00, u32 s02,
632  u8 * next0, u8 * error0, u32 * flags0)
633 {
634  u8 is0_ip4, is0_ip6, n0, e0;
635  u32 f0;
636 
637  e0 = IXGE_ERROR_none;
639 
641  n0 = is0_ip4 ? IXGE_RX_NEXT_IP4_INPUT : n0;
642 
643  e0 = (is0_ip4 && (s02 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
644  ? IXGE_ERROR_ip4_checksum_error : e0);
645 
646  is0_ip6 = s00 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
647  n0 = is0_ip6 ? IXGE_RX_NEXT_IP6_INPUT : n0;
648 
649  n0 = (xd->per_interface_next_index != ~0) ?
650  xd->per_interface_next_index : n0;
651 
652  /* Check for error. */
653  n0 = e0 != IXGE_ERROR_none ? IXGE_RX_NEXT_DROP : n0;
654 
658 
662 
663  *error0 = e0;
664  *next0 = n0;
665  *flags0 = f0;
666 }
667 
668 always_inline void
670  u32 s00, u32 s02,
671  u32 s10, u32 s12,
672  u8 * next0, u8 * error0, u32 * flags0,
673  u8 * next1, u8 * error1, u32 * flags1)
674 {
675  u8 is0_ip4, is0_ip6, n0, e0;
676  u8 is1_ip4, is1_ip6, n1, e1;
677  u32 f0, f1;
678 
679  e0 = e1 = IXGE_ERROR_none;
680  n0 = n1 = IXGE_RX_NEXT_IP4_INPUT;
681 
684 
685  n0 = is0_ip4 ? IXGE_RX_NEXT_IP4_INPUT : n0;
686  n1 = is1_ip4 ? IXGE_RX_NEXT_IP4_INPUT : n1;
687 
688  e0 = (is0_ip4 && (s02 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
689  ? IXGE_ERROR_ip4_checksum_error : e0);
690  e1 = (is1_ip4 && (s12 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
691  ? IXGE_ERROR_ip4_checksum_error : e1);
692 
693  is0_ip6 = s00 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
694  is1_ip6 = s10 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
695 
696  n0 = is0_ip6 ? IXGE_RX_NEXT_IP6_INPUT : n0;
697  n1 = is1_ip6 ? IXGE_RX_NEXT_IP6_INPUT : n1;
698 
699  n0 = (xd->per_interface_next_index != ~0) ?
700  xd->per_interface_next_index : n0;
701  n1 = (xd->per_interface_next_index != ~0) ?
702  xd->per_interface_next_index : n1;
703 
704  /* Check for error. */
705  n0 = e0 != IXGE_ERROR_none ? IXGE_RX_NEXT_DROP : n0;
706  n1 = e1 != IXGE_ERROR_none ? IXGE_RX_NEXT_DROP : n1;
707 
708  *error0 = e0;
709  *error1 = e1;
710 
711  *next0 = n0;
712  *next1 = n1;
713 
720 
727 
728  *flags0 = f0;
729  *flags1 = f1;
730 }
731 
732 static void
734  ixge_device_t * xd,
735  ixge_dma_queue_t * dq,
736  ixge_descriptor_t * before_descriptors,
737  u32 * before_buffers,
738  ixge_descriptor_t * after_descriptors, uword n_descriptors)
739 {
740  vlib_main_t *vm = xm->vlib_main;
741  vlib_node_runtime_t *node = dq->rx.node;
744  u32 *b, n_left, is_sop, next_index_sop;
745 
746  n_left = n_descriptors;
747  b = before_buffers;
748  bd = &before_descriptors->rx_from_hw;
749  ad = &after_descriptors->rx_to_hw;
750  is_sop = dq->rx.is_start_of_packet;
751  next_index_sop = dq->rx.saved_start_of_packet_next_index;
752 
753  while (n_left >= 2)
754  {
755  u32 bi0, bi1, flags0, flags1;
756  vlib_buffer_t *b0, *b1;
757  ixge_rx_dma_trace_t *t0, *t1;
758  u8 next0, error0, next1, error1;
759 
760  bi0 = b[0];
761  bi1 = b[1];
762  n_left -= 2;
763 
764  b0 = vlib_get_buffer (vm, bi0);
765  b1 = vlib_get_buffer (vm, bi1);
766 
768  bd[0].status[0], bd[0].status[2],
769  bd[1].status[0], bd[1].status[2],
770  &next0, &error0, &flags0,
771  &next1, &error1, &flags1);
772 
773  next_index_sop = is_sop ? next0 : next_index_sop;
774  vlib_trace_buffer (vm, node, next_index_sop, b0, /* follow_chain */ 0);
775  t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
776  t0->is_start_of_packet = is_sop;
777  is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
778 
779  next_index_sop = is_sop ? next1 : next_index_sop;
780  vlib_trace_buffer (vm, node, next_index_sop, b1, /* follow_chain */ 0);
781  t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
782  t1->is_start_of_packet = is_sop;
783  is_sop = (b1->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
784 
785  t0->queue_index = dq->queue_index;
786  t1->queue_index = dq->queue_index;
787  t0->device_index = xd->device_index;
788  t1->device_index = xd->device_index;
789  t0->before.rx_from_hw = bd[0];
790  t1->before.rx_from_hw = bd[1];
791  t0->after.rx_to_hw = ad[0];
792  t1->after.rx_to_hw = ad[1];
793  t0->buffer_index = bi0;
794  t1->buffer_index = bi1;
795  memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
796  memcpy (&t1->buffer, b1, sizeof (b1[0]) - sizeof (b0->pre_data));
797  memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
798  sizeof (t0->buffer.pre_data));
799  memcpy (t1->buffer.pre_data, b1->data + b1->current_data,
800  sizeof (t1->buffer.pre_data));
801 
802  b += 2;
803  bd += 2;
804  ad += 2;
805  }
806 
807  while (n_left >= 1)
808  {
809  u32 bi0, flags0;
810  vlib_buffer_t *b0;
812  u8 next0, error0;
813 
814  bi0 = b[0];
815  n_left -= 1;
816 
817  b0 = vlib_get_buffer (vm, bi0);
818 
820  bd[0].status[0], bd[0].status[2],
821  &next0, &error0, &flags0);
822 
823  next_index_sop = is_sop ? next0 : next_index_sop;
824  vlib_trace_buffer (vm, node, next_index_sop, b0, /* follow_chain */ 0);
825  t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
826  t0->is_start_of_packet = is_sop;
827  is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
828 
829  t0->queue_index = dq->queue_index;
830  t0->device_index = xd->device_index;
831  t0->before.rx_from_hw = bd[0];
832  t0->after.rx_to_hw = ad[0];
833  t0->buffer_index = bi0;
834  memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
835  memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
836  sizeof (t0->buffer.pre_data));
837 
838  b += 1;
839  bd += 1;
840  ad += 1;
841  }
842 }
843 
844 typedef struct
845 {
847 
849 
851 
853 
855 
856  /* Copy of VLIB buffer; packet data stored in pre_data. */
859 
860 static u8 *
861 format_ixge_tx_dma_trace (u8 * s, va_list * va)
862 {
863  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
864  CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
865  ixge_tx_dma_trace_t *t = va_arg (*va, ixge_tx_dma_trace_t *);
866  vnet_main_t *vnm = vnet_get_main ();
867  ixge_main_t *xm = &ixge_main;
870  uword indent = format_get_indent (s);
871 
872  {
873  vnet_sw_interface_t *sw =
875  s =
876  format (s, "%U tx queue %d", format_vnet_sw_interface_name, vnm, sw,
877  t->queue_index);
878  }
879 
880  s = format (s, "\n%Udescriptor: %U",
881  format_white_space, indent,
883 
884  s = format (s, "\n%Ubuffer 0x%x: %U",
885  format_white_space, indent,
887 
888  s = format (s, "\n%U", format_white_space, indent);
889 
891  if (!f || !t->is_start_of_packet)
892  f = format_hex_bytes;
893  s = format (s, "%U", f, t->buffer.pre_data, sizeof (t->buffer.pre_data));
894 
895  return s;
896 }
897 
898 typedef struct
899 {
901 
903 
905 
908 
909 static void
911  ixge_device_t * xd,
912  ixge_dma_queue_t * dq,
913  ixge_tx_state_t * tx_state,
914  ixge_tx_descriptor_t * descriptors,
915  u32 * buffers, uword n_descriptors)
916 {
917  vlib_main_t *vm = xm->vlib_main;
918  vlib_node_runtime_t *node = tx_state->node;
920  u32 *b, n_left, is_sop;
921 
922  n_left = n_descriptors;
923  b = buffers;
924  d = descriptors;
925  is_sop = tx_state->is_start_of_packet;
926 
927  while (n_left >= 2)
928  {
929  u32 bi0, bi1;
930  vlib_buffer_t *b0, *b1;
931  ixge_tx_dma_trace_t *t0, *t1;
932 
933  bi0 = b[0];
934  bi1 = b[1];
935  n_left -= 2;
936 
937  b0 = vlib_get_buffer (vm, bi0);
938  b1 = vlib_get_buffer (vm, bi1);
939 
940  t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
941  t0->is_start_of_packet = is_sop;
942  is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
943 
944  t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
945  t1->is_start_of_packet = is_sop;
946  is_sop = (b1->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
947 
948  t0->queue_index = dq->queue_index;
949  t1->queue_index = dq->queue_index;
950  t0->device_index = xd->device_index;
951  t1->device_index = xd->device_index;
952  t0->descriptor = d[0];
953  t1->descriptor = d[1];
954  t0->buffer_index = bi0;
955  t1->buffer_index = bi1;
956  memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
957  memcpy (&t1->buffer, b1, sizeof (b1[0]) - sizeof (b0->pre_data));
958  memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
959  sizeof (t0->buffer.pre_data));
960  memcpy (t1->buffer.pre_data, b1->data + b1->current_data,
961  sizeof (t1->buffer.pre_data));
962 
963  b += 2;
964  d += 2;
965  }
966 
967  while (n_left >= 1)
968  {
969  u32 bi0;
970  vlib_buffer_t *b0;
972 
973  bi0 = b[0];
974  n_left -= 1;
975 
976  b0 = vlib_get_buffer (vm, bi0);
977 
978  t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
979  t0->is_start_of_packet = is_sop;
980  is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
981 
982  t0->queue_index = dq->queue_index;
983  t0->device_index = xd->device_index;
984  t0->descriptor = d[0];
985  t0->buffer_index = bi0;
986  memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
987  memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
988  sizeof (t0->buffer.pre_data));
989 
990  b += 1;
991  d += 1;
992  }
993 }
994 
997 {
998  i32 d = i1 - i0;
999  ASSERT (i0 < q->n_descriptors);
1000  ASSERT (i1 < q->n_descriptors);
1001  return d < 0 ? q->n_descriptors + d : d;
1002 }
1003 
1006 {
1007  u32 d = i0 + i1;
1008  ASSERT (i0 < q->n_descriptors);
1009  ASSERT (i1 < q->n_descriptors);
1010  d -= d >= q->n_descriptors ? q->n_descriptors : 0;
1011  return d;
1012 }
1013 
1017 {
1018  u32 cmp;
1019 
1020  cmp = ((d->status0 & xm->tx_descriptor_template_mask.status0)
1022  if (cmp)
1023  return 0;
1024  cmp = ((d->status1 & xm->tx_descriptor_template_mask.status1)
1026  if (cmp)
1027  return 0;
1028 
1029  return 1;
1030 }
1031 
1032 static uword
1034  ixge_device_t * xd,
1035  ixge_dma_queue_t * dq,
1036  u32 * buffers,
1037  u32 start_descriptor_index,
1038  u32 n_descriptors, ixge_tx_state_t * tx_state)
1039 {
1040  vlib_main_t *vm = xm->vlib_main;
1041  ixge_tx_descriptor_t *d, *d_sop;
1042  u32 n_left = n_descriptors;
1043  u32 *to_free = vec_end (xm->tx_buffers_pending_free);
1044  u32 *to_tx =
1045  vec_elt_at_index (dq->descriptor_buffer_indices, start_descriptor_index);
1046  u32 is_sop = tx_state->is_start_of_packet;
1047  u32 len_sop = tx_state->n_bytes_in_packet;
1048  u16 template_status = xm->tx_descriptor_template.status0;
1049  u32 descriptor_prefetch_rotor = 0;
1050 
1051  ASSERT (start_descriptor_index + n_descriptors <= dq->n_descriptors);
1052  d = &dq->descriptors[start_descriptor_index].tx;
1053  d_sop = is_sop ? d : tx_state->start_of_packet_descriptor;
1054 
1055  while (n_left >= 4)
1056  {
1057  vlib_buffer_t *b0, *b1;
1058  u32 bi0, fi0, len0;
1059  u32 bi1, fi1, len1;
1060  u8 is_eop0, is_eop1;
1061 
1062  /* Prefetch next iteration. */
1063  vlib_prefetch_buffer_with_index (vm, buffers[2], LOAD);
1064  vlib_prefetch_buffer_with_index (vm, buffers[3], LOAD);
1065 
1066  if ((descriptor_prefetch_rotor & 0x3) == 0)
1067  CLIB_PREFETCH (d + 4, CLIB_CACHE_LINE_BYTES, STORE);
1068 
1069  descriptor_prefetch_rotor += 2;
1070 
1071  bi0 = buffers[0];
1072  bi1 = buffers[1];
1073 
1074  to_free[0] = fi0 = to_tx[0];
1075  to_tx[0] = bi0;
1076  to_free += fi0 != 0;
1077 
1078  to_free[0] = fi1 = to_tx[1];
1079  to_tx[1] = bi1;
1080  to_free += fi1 != 0;
1081 
1082  buffers += 2;
1083  n_left -= 2;
1084  to_tx += 2;
1085 
1086  b0 = vlib_get_buffer (vm, bi0);
1087  b1 = vlib_get_buffer (vm, bi1);
1088 
1089  is_eop0 = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
1090  is_eop1 = (b1->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
1091 
1092  len0 = b0->current_length;
1093  len1 = b1->current_length;
1094 
1097 
1098  d[0].buffer_address =
1100  d[1].buffer_address =
1102 
1103  d[0].n_bytes_this_buffer = len0;
1104  d[1].n_bytes_this_buffer = len1;
1105 
1106  d[0].status0 =
1107  template_status | (is_eop0 <<
1109  d[1].status0 =
1110  template_status | (is_eop1 <<
1112 
1113  len_sop = (is_sop ? 0 : len_sop) + len0;
1114  d_sop[0].status1 =
1116  d += 1;
1117  d_sop = is_eop0 ? d : d_sop;
1118 
1119  is_sop = is_eop0;
1120 
1121  len_sop = (is_sop ? 0 : len_sop) + len1;
1122  d_sop[0].status1 =
1124  d += 1;
1125  d_sop = is_eop1 ? d : d_sop;
1126 
1127  is_sop = is_eop1;
1128  }
1129 
1130  while (n_left > 0)
1131  {
1132  vlib_buffer_t *b0;
1133  u32 bi0, fi0, len0;
1134  u8 is_eop0;
1135 
1136  bi0 = buffers[0];
1137 
1138  to_free[0] = fi0 = to_tx[0];
1139  to_tx[0] = bi0;
1140  to_free += fi0 != 0;
1141 
1142  buffers += 1;
1143  n_left -= 1;
1144  to_tx += 1;
1145 
1146  b0 = vlib_get_buffer (vm, bi0);
1147 
1148  is_eop0 = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
1149 
1150  len0 = b0->current_length;
1151 
1153 
1154  d[0].buffer_address =
1156 
1157  d[0].n_bytes_this_buffer = len0;
1158 
1159  d[0].status0 =
1160  template_status | (is_eop0 <<
1162 
1163  len_sop = (is_sop ? 0 : len_sop) + len0;
1164  d_sop[0].status1 =
1166  d += 1;
1167  d_sop = is_eop0 ? d : d_sop;
1168 
1169  is_sop = is_eop0;
1170  }
1171 
1172  if (tx_state->node->flags & VLIB_NODE_FLAG_TRACE)
1173  {
1174  to_tx =
1176  start_descriptor_index);
1177  ixge_tx_trace (xm, xd, dq, tx_state,
1178  &dq->descriptors[start_descriptor_index].tx, to_tx,
1179  n_descriptors);
1180  }
1181 
1182  _vec_len (xm->tx_buffers_pending_free) =
1183  to_free - xm->tx_buffers_pending_free;
1184 
1185  /* When we are done d_sop can point to end of ring. Wrap it if so. */
1186  {
1187  ixge_tx_descriptor_t *d_start = &dq->descriptors[0].tx;
1188 
1189  ASSERT (d_sop - d_start <= dq->n_descriptors);
1190  d_sop = d_sop - d_start == dq->n_descriptors ? d_start : d_sop;
1191  }
1192 
1193  tx_state->is_start_of_packet = is_sop;
1194  tx_state->start_of_packet_descriptor = d_sop;
1195  tx_state->n_bytes_in_packet = len_sop;
1196 
1197  return n_descriptors;
1198 }
1199 
1200 static uword
1202  vlib_node_runtime_t * node, vlib_frame_t * f)
1203 {
1204  ixge_main_t *xm = &ixge_main;
1205  vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
1207  ixge_dma_queue_t *dq;
1208  u32 *from, n_left_tx, n_descriptors_to_tx, n_tail_drop;
1209  u32 queue_index = 0; /* fixme parameter */
1210  ixge_tx_state_t tx_state;
1211 
1212  tx_state.node = node;
1213  tx_state.is_start_of_packet = 1;
1214  tx_state.start_of_packet_descriptor = 0;
1215  tx_state.n_bytes_in_packet = 0;
1216 
1217  from = vlib_frame_vector_args (f);
1218 
1219  dq = vec_elt_at_index (xd->dma_queues[VLIB_TX], queue_index);
1220 
1221  dq->head_index = dq->tx.head_index_write_back[0];
1222 
1223  /* Since head == tail means ring is empty we can send up to dq->n_descriptors - 1. */
1224  n_left_tx = dq->n_descriptors - 1;
1225  n_left_tx -= ixge_ring_sub (dq, dq->head_index, dq->tail_index);
1226 
1227  _vec_len (xm->tx_buffers_pending_free) = 0;
1228 
1229  n_descriptors_to_tx = f->n_vectors;
1230  n_tail_drop = 0;
1231  if (PREDICT_FALSE (n_descriptors_to_tx > n_left_tx))
1232  {
1233  i32 i, n_ok, i_eop, i_sop;
1234 
1235  i_sop = i_eop = ~0;
1236  for (i = n_left_tx - 1; i >= 0; i--)
1237  {
1238  vlib_buffer_t *b = vlib_get_buffer (vm, from[i]);
1239  if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
1240  {
1241  if (i_sop != ~0 && i_eop != ~0)
1242  break;
1243  i_eop = i;
1244  i_sop = i + 1;
1245  }
1246  }
1247  if (i == 0)
1248  n_ok = 0;
1249  else
1250  n_ok = i_eop + 1;
1251 
1252  {
1253  ELOG_TYPE_DECLARE (e) =
1254  {
1255  .function = (char *) __FUNCTION__,.format =
1256  "ixge %d, ring full to tx %d head %d tail %d",.format_args =
1257  "i2i2i2i2",};
1258  struct
1259  {
1260  u16 instance, to_tx, head, tail;
1261  } *ed;
1262  ed = ELOG_DATA (&vm->elog_main, e);
1263  ed->instance = xd->device_index;
1264  ed->to_tx = n_descriptors_to_tx;
1265  ed->head = dq->head_index;
1266  ed->tail = dq->tail_index;
1267  }
1268 
1269  if (n_ok < n_descriptors_to_tx)
1270  {
1271  n_tail_drop = n_descriptors_to_tx - n_ok;
1272  vec_add (xm->tx_buffers_pending_free, from + n_ok, n_tail_drop);
1273  vlib_error_count (vm, ixge_input_node.index,
1274  IXGE_ERROR_tx_full_drops, n_tail_drop);
1275  }
1276 
1277  n_descriptors_to_tx = n_ok;
1278  }
1279 
1280  dq->tx.n_buffers_on_ring += n_descriptors_to_tx;
1281 
1282  /* Process from tail to end of descriptor ring. */
1283  if (n_descriptors_to_tx > 0 && dq->tail_index < dq->n_descriptors)
1284  {
1285  u32 n =
1286  clib_min (dq->n_descriptors - dq->tail_index, n_descriptors_to_tx);
1287  n = ixge_tx_no_wrap (xm, xd, dq, from, dq->tail_index, n, &tx_state);
1288  from += n;
1289  n_descriptors_to_tx -= n;
1290  dq->tail_index += n;
1291  ASSERT (dq->tail_index <= dq->n_descriptors);
1292  if (dq->tail_index == dq->n_descriptors)
1293  dq->tail_index = 0;
1294  }
1295 
1296  if (n_descriptors_to_tx > 0)
1297  {
1298  u32 n =
1299  ixge_tx_no_wrap (xm, xd, dq, from, 0, n_descriptors_to_tx, &tx_state);
1300  from += n;
1301  ASSERT (n == n_descriptors_to_tx);
1302  dq->tail_index += n;
1303  ASSERT (dq->tail_index <= dq->n_descriptors);
1304  if (dq->tail_index == dq->n_descriptors)
1305  dq->tail_index = 0;
1306  }
1307 
1308  /* We should only get full packets. */
1309  ASSERT (tx_state.is_start_of_packet);
1310 
1311  /* Report status when last descriptor is done. */
1312  {
1313  u32 i = dq->tail_index == 0 ? dq->n_descriptors - 1 : dq->tail_index - 1;
1314  ixge_tx_descriptor_t *d = &dq->descriptors[i].tx;
1316  }
1317 
1318  /* Give new descriptors to hardware. */
1319  {
1320  ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_TX, queue_index);
1321 
1323 
1324  dr->tail_index = dq->tail_index;
1325  }
1326 
1327  /* Free any buffers that are done. */
1328  {
1329  u32 n = _vec_len (xm->tx_buffers_pending_free);
1330  if (n > 0)
1331  {
1333  _vec_len (xm->tx_buffers_pending_free) = 0;
1334  ASSERT (dq->tx.n_buffers_on_ring >= n);
1335  dq->tx.n_buffers_on_ring -= (n - n_tail_drop);
1336  }
1337  }
1338 
1339  return f->n_vectors;
1340 }
1341 
1342 static uword
1344  ixge_device_t * xd,
1345  ixge_dma_queue_t * dq,
1346  u32 start_descriptor_index, u32 n_descriptors)
1347 {
1348  vlib_main_t *vm = xm->vlib_main;
1349  vlib_node_runtime_t *node = dq->rx.node;
1350  ixge_descriptor_t *d;
1351  static ixge_descriptor_t *d_trace_save;
1352  static u32 *d_trace_buffers;
1353  u32 n_descriptors_left = n_descriptors;
1354  u32 *to_rx =
1355  vec_elt_at_index (dq->descriptor_buffer_indices, start_descriptor_index);
1356  u32 *to_add;
1357  u32 bi_sop = dq->rx.saved_start_of_packet_buffer_index;
1358  u32 bi_last = dq->rx.saved_last_buffer_index;
1359  u32 next_index_sop = dq->rx.saved_start_of_packet_next_index;
1360  u32 is_sop = dq->rx.is_start_of_packet;
1361  u32 next_index, n_left_to_next, *to_next;
1362  u32 n_packets = 0;
1363  u32 n_bytes = 0;
1364  u32 n_trace = vlib_get_trace_count (vm, node);
1365  vlib_buffer_t *b_last, b_dummy;
1366 
1367  ASSERT (start_descriptor_index + n_descriptors <= dq->n_descriptors);
1368  d = &dq->descriptors[start_descriptor_index];
1369 
1370  b_last = bi_last != ~0 ? vlib_get_buffer (vm, bi_last) : &b_dummy;
1371  next_index = dq->rx.next_index;
1372 
1373  if (n_trace > 0)
1374  {
1375  u32 n = clib_min (n_trace, n_descriptors);
1376  if (d_trace_save)
1377  {
1378  _vec_len (d_trace_save) = 0;
1379  _vec_len (d_trace_buffers) = 0;
1380  }
1381  vec_add (d_trace_save, (ixge_descriptor_t *) d, n);
1382  vec_add (d_trace_buffers, to_rx, n);
1383  }
1384 
1385  {
1386  uword l = vec_len (xm->rx_buffers_to_add);
1387 
1388  if (l < n_descriptors_left)
1389  {
1390  u32 n_to_alloc = 2 * dq->n_descriptors - l;
1391  u32 n_allocated;
1392 
1393  vec_resize (xm->rx_buffers_to_add, n_to_alloc);
1394 
1395  _vec_len (xm->rx_buffers_to_add) = l;
1396  n_allocated = vlib_buffer_alloc_from_free_list
1397  (vm, xm->rx_buffers_to_add + l, n_to_alloc,
1399  _vec_len (xm->rx_buffers_to_add) += n_allocated;
1400 
1401  /* Handle transient allocation failure */
1402  if (PREDICT_FALSE (l + n_allocated <= n_descriptors_left))
1403  {
1404  if (n_allocated == 0)
1405  vlib_error_count (vm, ixge_input_node.index,
1406  IXGE_ERROR_rx_alloc_no_physmem, 1);
1407  else
1408  vlib_error_count (vm, ixge_input_node.index,
1409  IXGE_ERROR_rx_alloc_fail, 1);
1410 
1411  n_descriptors_left = l + n_allocated;
1412  }
1413  n_descriptors = n_descriptors_left;
1414  }
1415 
1416  /* Add buffers from end of vector going backwards. */
1417  to_add = vec_end (xm->rx_buffers_to_add) - 1;
1418  }
1419 
1420  while (n_descriptors_left > 0)
1421  {
1422  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1423 
1424  while (n_descriptors_left >= 4 && n_left_to_next >= 2)
1425  {
1426  vlib_buffer_t *b0, *b1;
1427  u32 bi0, fi0, len0, l3_offset0, s20, s00, flags0;
1428  u32 bi1, fi1, len1, l3_offset1, s21, s01, flags1;
1429  u8 is_eop0, error0, next0;
1430  u8 is_eop1, error1, next1;
1431  ixge_descriptor_t d0, d1;
1432 
1433  vlib_prefetch_buffer_with_index (vm, to_rx[2], STORE);
1434  vlib_prefetch_buffer_with_index (vm, to_rx[3], STORE);
1435 
1436  CLIB_PREFETCH (d + 2, 32, STORE);
1437 
1438  d0.as_u32x4 = d[0].as_u32x4;
1439  d1.as_u32x4 = d[1].as_u32x4;
1440 
1441  s20 = d0.rx_from_hw.status[2];
1442  s21 = d1.rx_from_hw.status[2];
1443 
1444  s00 = d0.rx_from_hw.status[0];
1445  s01 = d1.rx_from_hw.status[0];
1446 
1447  if (!
1449  goto found_hw_owned_descriptor_x2;
1450 
1451  bi0 = to_rx[0];
1452  bi1 = to_rx[1];
1453 
1454  ASSERT (to_add - 1 >= xm->rx_buffers_to_add);
1455  fi0 = to_add[0];
1456  fi1 = to_add[-1];
1457 
1458  to_rx[0] = fi0;
1459  to_rx[1] = fi1;
1460  to_rx += 2;
1461  to_add -= 2;
1462 
1463  ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED ==
1464  vlib_buffer_is_known (vm, bi0));
1465  ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED ==
1466  vlib_buffer_is_known (vm, bi1));
1467  ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED ==
1468  vlib_buffer_is_known (vm, fi0));
1469  ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED ==
1470  vlib_buffer_is_known (vm, fi1));
1471 
1472  b0 = vlib_get_buffer (vm, bi0);
1473  b1 = vlib_get_buffer (vm, bi1);
1474 
1475  /*
1476  * Turn this on if you run into
1477  * "bad monkey" contexts, and you want to know exactly
1478  * which nodes they've visited... See main.c...
1479  */
1482 
1485 
1486  is_eop0 = (s20 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET) != 0;
1487  is_eop1 = (s21 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET) != 0;
1488 
1489  ixge_rx_next_and_error_from_status_x2 (xd, s00, s20, s01, s21,
1490  &next0, &error0, &flags0,
1491  &next1, &error1, &flags1);
1492 
1493  next0 = is_sop ? next0 : next_index_sop;
1494  next1 = is_eop0 ? next1 : next0;
1495  next_index_sop = next1;
1496 
1497  b0->flags |= flags0 | (!is_eop0 << VLIB_BUFFER_LOG2_NEXT_PRESENT);
1498  b1->flags |= flags1 | (!is_eop1 << VLIB_BUFFER_LOG2_NEXT_PRESENT);
1499 
1500  vnet_buffer (b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
1501  vnet_buffer (b1)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
1502  vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
1503  vnet_buffer (b1)->sw_if_index[VLIB_TX] = (u32) ~ 0;
1504 
1505  b0->error = node->errors[error0];
1506  b1->error = node->errors[error1];
1507 
1510  n_bytes += len0 + len1;
1511  n_packets += is_eop0 + is_eop1;
1512 
1513  /* Give new buffers to hardware. */
1514  d0.rx_to_hw.tail_address =
1516  d1.rx_to_hw.tail_address =
1520  d[0].as_u32x4 = d0.as_u32x4;
1521  d[1].as_u32x4 = d1.as_u32x4;
1522 
1523  d += 2;
1524  n_descriptors_left -= 2;
1525 
1526  /* Point to either l2 or l3 header depending on next. */
1527  l3_offset0 = (is_sop && (next0 != IXGE_RX_NEXT_ETHERNET_INPUT))
1529  l3_offset1 = (is_eop0 && (next1 != IXGE_RX_NEXT_ETHERNET_INPUT))
1531 
1532  b0->current_length = len0 - l3_offset0;
1533  b1->current_length = len1 - l3_offset1;
1534  b0->current_data = l3_offset0;
1535  b1->current_data = l3_offset1;
1536 
1537  b_last->next_buffer = is_sop ? ~0 : bi0;
1538  b0->next_buffer = is_eop0 ? ~0 : bi1;
1539  bi_last = bi1;
1540  b_last = b1;
1541 
1542  if (CLIB_DEBUG > 0)
1543  {
1544  u32 bi_sop0 = is_sop ? bi0 : bi_sop;
1545  u32 bi_sop1 = is_eop0 ? bi1 : bi_sop0;
1546 
1547  if (is_eop0)
1548  {
1549  u8 *msg = vlib_validate_buffer (vm, bi_sop0,
1550  /* follow_buffer_next */ 1);
1551  ASSERT (!msg);
1552  }
1553  if (is_eop1)
1554  {
1555  u8 *msg = vlib_validate_buffer (vm, bi_sop1,
1556  /* follow_buffer_next */ 1);
1557  ASSERT (!msg);
1558  }
1559  }
1560  if (0) /* "Dave" version */
1561  {
1562  u32 bi_sop0 = is_sop ? bi0 : bi_sop;
1563  u32 bi_sop1 = is_eop0 ? bi1 : bi_sop0;
1564 
1565  if (is_eop0)
1566  {
1567  to_next[0] = bi_sop0;
1568  to_next++;
1569  n_left_to_next--;
1570 
1571  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1572  to_next, n_left_to_next,
1573  bi_sop0, next0);
1574  }
1575  if (is_eop1)
1576  {
1577  to_next[0] = bi_sop1;
1578  to_next++;
1579  n_left_to_next--;
1580 
1581  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1582  to_next, n_left_to_next,
1583  bi_sop1, next1);
1584  }
1585  is_sop = is_eop1;
1586  bi_sop = bi_sop1;
1587  }
1588  if (1) /* "Eliot" version */
1589  {
1590  /* Speculatively enqueue to cached next. */
1591  u8 saved_is_sop = is_sop;
1592  u32 bi_sop_save = bi_sop;
1593 
1594  bi_sop = saved_is_sop ? bi0 : bi_sop;
1595  to_next[0] = bi_sop;
1596  to_next += is_eop0;
1597  n_left_to_next -= is_eop0;
1598 
1599  bi_sop = is_eop0 ? bi1 : bi_sop;
1600  to_next[0] = bi_sop;
1601  to_next += is_eop1;
1602  n_left_to_next -= is_eop1;
1603 
1604  is_sop = is_eop1;
1605 
1606  if (PREDICT_FALSE
1607  (!(next0 == next_index && next1 == next_index)))
1608  {
1609  /* Undo speculation. */
1610  to_next -= is_eop0 + is_eop1;
1611  n_left_to_next += is_eop0 + is_eop1;
1612 
1613  /* Re-do both descriptors being careful about where we enqueue. */
1614  bi_sop = saved_is_sop ? bi0 : bi_sop_save;
1615  if (is_eop0)
1616  {
1617  if (next0 != next_index)
1618  vlib_set_next_frame_buffer (vm, node, next0, bi_sop);
1619  else
1620  {
1621  to_next[0] = bi_sop;
1622  to_next += 1;
1623  n_left_to_next -= 1;
1624  }
1625  }
1626 
1627  bi_sop = is_eop0 ? bi1 : bi_sop;
1628  if (is_eop1)
1629  {
1630  if (next1 != next_index)
1631  vlib_set_next_frame_buffer (vm, node, next1, bi_sop);
1632  else
1633  {
1634  to_next[0] = bi_sop;
1635  to_next += 1;
1636  n_left_to_next -= 1;
1637  }
1638  }
1639 
1640  /* Switch cached next index when next for both packets is the same. */
1641  if (is_eop0 && is_eop1 && next0 == next1)
1642  {
1643  vlib_put_next_frame (vm, node, next_index,
1644  n_left_to_next);
1645  next_index = next0;
1646  vlib_get_next_frame (vm, node, next_index,
1647  to_next, n_left_to_next);
1648  }
1649  }
1650  }
1651  }
1652 
1653  /* Bail out of dual loop and proceed with single loop. */
1654  found_hw_owned_descriptor_x2:
1655 
1656  while (n_descriptors_left > 0 && n_left_to_next > 0)
1657  {
1658  vlib_buffer_t *b0;
1659  u32 bi0, fi0, len0, l3_offset0, s20, s00, flags0;
1660  u8 is_eop0, error0, next0;
1661  ixge_descriptor_t d0;
1662 
1663  d0.as_u32x4 = d[0].as_u32x4;
1664 
1665  s20 = d0.rx_from_hw.status[2];
1666  s00 = d0.rx_from_hw.status[0];
1667 
1669  goto found_hw_owned_descriptor_x1;
1670 
1671  bi0 = to_rx[0];
1672  ASSERT (to_add >= xm->rx_buffers_to_add);
1673  fi0 = to_add[0];
1674 
1675  to_rx[0] = fi0;
1676  to_rx += 1;
1677  to_add -= 1;
1678 
1679  ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED ==
1680  vlib_buffer_is_known (vm, bi0));
1681  ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED ==
1682  vlib_buffer_is_known (vm, fi0));
1683 
1684  b0 = vlib_get_buffer (vm, bi0);
1685 
1686  /*
1687  * Turn this on if you run into
1688  * "bad monkey" contexts, and you want to know exactly
1689  * which nodes they've visited...
1690  */
1692 
1693  is_eop0 = (s20 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET) != 0;
1695  (xd, s00, s20, &next0, &error0, &flags0);
1696 
1697  next0 = is_sop ? next0 : next_index_sop;
1698  next_index_sop = next0;
1699 
1700  b0->flags |= flags0 | (!is_eop0 << VLIB_BUFFER_LOG2_NEXT_PRESENT);
1701 
1702  vnet_buffer (b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
1703  vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
1704 
1705  b0->error = node->errors[error0];
1706 
1708  n_bytes += len0;
1709  n_packets += is_eop0;
1710 
1711  /* Give new buffer to hardware. */
1712  d0.rx_to_hw.tail_address =
1715  d[0].as_u32x4 = d0.as_u32x4;
1716 
1717  d += 1;
1718  n_descriptors_left -= 1;
1719 
1720  /* Point to either l2 or l3 header depending on next. */
1721  l3_offset0 = (is_sop && (next0 != IXGE_RX_NEXT_ETHERNET_INPUT))
1723  b0->current_length = len0 - l3_offset0;
1724  b0->current_data = l3_offset0;
1725 
1726  b_last->next_buffer = is_sop ? ~0 : bi0;
1727  bi_last = bi0;
1728  b_last = b0;
1729 
1730  bi_sop = is_sop ? bi0 : bi_sop;
1731 
1732  if (CLIB_DEBUG > 0 && is_eop0)
1733  {
1734  u8 *msg =
1735  vlib_validate_buffer (vm, bi_sop, /* follow_buffer_next */ 1);
1736  ASSERT (!msg);
1737  }
1738 
1739  if (0) /* "Dave" version */
1740  {
1741  if (is_eop0)
1742  {
1743  to_next[0] = bi_sop;
1744  to_next++;
1745  n_left_to_next--;
1746 
1747  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1748  to_next, n_left_to_next,
1749  bi_sop, next0);
1750  }
1751  }
1752  if (1) /* "Eliot" version */
1753  {
1754  if (PREDICT_TRUE (next0 == next_index))
1755  {
1756  to_next[0] = bi_sop;
1757  to_next += is_eop0;
1758  n_left_to_next -= is_eop0;
1759  }
1760  else
1761  {
1762  if (next0 != next_index && is_eop0)
1763  vlib_set_next_frame_buffer (vm, node, next0, bi_sop);
1764 
1765  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1766  next_index = next0;
1767  vlib_get_next_frame (vm, node, next_index,
1768  to_next, n_left_to_next);
1769  }
1770  }
1771  is_sop = is_eop0;
1772  }
1773  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1774  }
1775 
1776 found_hw_owned_descriptor_x1:
1777  if (n_descriptors_left > 0)
1778  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1779 
1780  _vec_len (xm->rx_buffers_to_add) = (to_add + 1) - xm->rx_buffers_to_add;
1781 
1782  {
1783  u32 n_done = n_descriptors - n_descriptors_left;
1784 
1785  if (n_trace > 0 && n_done > 0)
1786  {
1787  u32 n = clib_min (n_trace, n_done);
1788  ixge_rx_trace (xm, xd, dq,
1789  d_trace_save,
1790  d_trace_buffers,
1791  &dq->descriptors[start_descriptor_index], n);
1792  vlib_set_trace_count (vm, node, n_trace - n);
1793  }
1794  if (d_trace_save)
1795  {
1796  _vec_len (d_trace_save) = 0;
1797  _vec_len (d_trace_buffers) = 0;
1798  }
1799 
1800  /* Don't keep a reference to b_last if we don't have to.
1801  Otherwise we can over-write a next_buffer pointer after already haven
1802  enqueued a packet. */
1803  if (is_sop)
1804  {
1805  b_last->next_buffer = ~0;
1806  bi_last = ~0;
1807  }
1808 
1809  dq->rx.n_descriptors_done_this_call = n_done;
1810  dq->rx.n_descriptors_done_total += n_done;
1811  dq->rx.is_start_of_packet = is_sop;
1812  dq->rx.saved_start_of_packet_buffer_index = bi_sop;
1813  dq->rx.saved_last_buffer_index = bi_last;
1814  dq->rx.saved_start_of_packet_next_index = next_index_sop;
1815  dq->rx.next_index = next_index;
1816  dq->rx.n_bytes += n_bytes;
1817 
1818  return n_packets;
1819  }
1820 }
1821 
1822 static uword
1824  ixge_device_t * xd,
1825  vlib_node_runtime_t * node, u32 queue_index)
1826 {
1827  ixge_dma_queue_t *dq =
1828  vec_elt_at_index (xd->dma_queues[VLIB_RX], queue_index);
1830  uword n_packets = 0;
1831  u32 hw_head_index, sw_head_index;
1832 
1833  /* One time initialization. */
1834  if (!dq->rx.node)
1835  {
1836  dq->rx.node = node;
1837  dq->rx.is_start_of_packet = 1;
1838  dq->rx.saved_start_of_packet_buffer_index = ~0;
1839  dq->rx.saved_last_buffer_index = ~0;
1840  }
1841 
1842  dq->rx.next_index = node->cached_next_index;
1843 
1844  dq->rx.n_descriptors_done_total = 0;
1845  dq->rx.n_descriptors_done_this_call = 0;
1846  dq->rx.n_bytes = 0;
1847 
1848  /* Fetch head from hardware and compare to where we think we are. */
1849  hw_head_index = dr->head_index;
1850  sw_head_index = dq->head_index;
1851 
1852  if (hw_head_index == sw_head_index)
1853  goto done;
1854 
1855  if (hw_head_index < sw_head_index)
1856  {
1857  u32 n_tried = dq->n_descriptors - sw_head_index;
1858  n_packets += ixge_rx_queue_no_wrap (xm, xd, dq, sw_head_index, n_tried);
1859  sw_head_index =
1860  ixge_ring_add (dq, sw_head_index,
1861  dq->rx.n_descriptors_done_this_call);
1862 
1863  if (dq->rx.n_descriptors_done_this_call != n_tried)
1864  goto done;
1865  }
1866  if (hw_head_index >= sw_head_index)
1867  {
1868  u32 n_tried = hw_head_index - sw_head_index;
1869  n_packets += ixge_rx_queue_no_wrap (xm, xd, dq, sw_head_index, n_tried);
1870  sw_head_index =
1871  ixge_ring_add (dq, sw_head_index,
1872  dq->rx.n_descriptors_done_this_call);
1873  }
1874 
1875 done:
1876  dq->head_index = sw_head_index;
1877  dq->tail_index =
1878  ixge_ring_add (dq, dq->tail_index, dq->rx.n_descriptors_done_total);
1879 
1880  /* Give tail back to hardware. */
1882 
1883  dr->tail_index = dq->tail_index;
1884 
1886  interface_main.combined_sw_if_counters +
1888  0 /* cpu_index */ ,
1889  xd->vlib_sw_if_index, n_packets,
1890  dq->rx.n_bytes);
1891 
1892  return n_packets;
1893 }
1894 
1895 static void
1897 {
1898  vlib_main_t *vm = xm->vlib_main;
1899  ixge_regs_t *r = xd->regs;
1900 
1901  if (i != 20)
1902  {
1903  ELOG_TYPE_DECLARE (e) =
1904  {
1905  .function = (char *) __FUNCTION__,.format =
1906  "ixge %d, %s",.format_args = "i1t1",.n_enum_strings =
1907  16,.enum_strings =
1908  {
1909  "flow director",
1910  "rx miss",
1911  "pci exception",
1912  "mailbox",
1913  "link status change",
1914  "linksec key exchange",
1915  "manageability event",
1916  "reserved23",
1917  "sdp0",
1918  "sdp1",
1919  "sdp2",
1920  "sdp3",
1921  "ecc", "descriptor handler error", "tcp timer", "other",},};
1922  struct
1923  {
1924  u8 instance;
1925  u8 index;
1926  } *ed;
1927  ed = ELOG_DATA (&vm->elog_main, e);
1928  ed->instance = xd->device_index;
1929  ed->index = i - 16;
1930  }
1931  else
1932  {
1933  u32 v = r->xge_mac.link_status;
1934  uword is_up = (v & (1 << 30)) != 0;
1935 
1936  ELOG_TYPE_DECLARE (e) =
1937  {
1938  .function = (char *) __FUNCTION__,.format =
1939  "ixge %d, link status change 0x%x",.format_args = "i4i4",};
1940  struct
1941  {
1942  u32 instance, link_status;
1943  } *ed;
1944  ed = ELOG_DATA (&vm->elog_main, e);
1945  ed->instance = xd->device_index;
1946  ed->link_status = v;
1948 
1951  ((is_up << 31) | xd->vlib_hw_if_index));
1952  }
1953 }
1954 
1956 clean_block (u32 * b, u32 * t, u32 n_left)
1957 {
1958  u32 *t0 = t;
1959 
1960  while (n_left >= 4)
1961  {
1962  u32 bi0, bi1, bi2, bi3;
1963 
1964  t[0] = bi0 = b[0];
1965  b[0] = 0;
1966  t += bi0 != 0;
1967 
1968  t[0] = bi1 = b[1];
1969  b[1] = 0;
1970  t += bi1 != 0;
1971 
1972  t[0] = bi2 = b[2];
1973  b[2] = 0;
1974  t += bi2 != 0;
1975 
1976  t[0] = bi3 = b[3];
1977  b[3] = 0;
1978  t += bi3 != 0;
1979 
1980  b += 4;
1981  n_left -= 4;
1982  }
1983 
1984  while (n_left > 0)
1985  {
1986  u32 bi0;
1987 
1988  t[0] = bi0 = b[0];
1989  b[0] = 0;
1990  t += bi0 != 0;
1991  b += 1;
1992  n_left -= 1;
1993  }
1994 
1995  return t - t0;
1996 }
1997 
1998 static void
1999 ixge_tx_queue (ixge_main_t * xm, ixge_device_t * xd, u32 queue_index)
2000 {
2001  vlib_main_t *vm = xm->vlib_main;
2002  ixge_dma_queue_t *dq =
2003  vec_elt_at_index (xd->dma_queues[VLIB_TX], queue_index);
2004  u32 n_clean, *b, *t, *t0;
2005  i32 n_hw_owned_descriptors;
2006  i32 first_to_clean, last_to_clean;
2007  u64 hwbp_race = 0;
2008 
2009  /* Handle case where head write back pointer update
2010  * arrives after the interrupt during high PCI bus loads.
2011  */
2012  while ((dq->head_index == dq->tx.head_index_write_back[0]) &&
2013  dq->tx.n_buffers_on_ring && (dq->head_index != dq->tail_index))
2014  {
2015  hwbp_race++;
2016  if (IXGE_HWBP_RACE_ELOG && (hwbp_race == 1))
2017  {
2018  ELOG_TYPE_DECLARE (e) =
2019  {
2020  .function = (char *) __FUNCTION__,.format =
2021  "ixge %d tx head index race: head %4d, tail %4d, buffs %4d",.format_args
2022  = "i4i4i4i4",};
2023  struct
2024  {
2025  u32 instance, head_index, tail_index, n_buffers_on_ring;
2026  } *ed;
2027  ed = ELOG_DATA (&vm->elog_main, e);
2028  ed->instance = xd->device_index;
2029  ed->head_index = dq->head_index;
2030  ed->tail_index = dq->tail_index;
2031  ed->n_buffers_on_ring = dq->tx.n_buffers_on_ring;
2032  }
2033  }
2034 
2035  dq->head_index = dq->tx.head_index_write_back[0];
2036  n_hw_owned_descriptors = ixge_ring_sub (dq, dq->head_index, dq->tail_index);
2037  ASSERT (dq->tx.n_buffers_on_ring >= n_hw_owned_descriptors);
2038  n_clean = dq->tx.n_buffers_on_ring - n_hw_owned_descriptors;
2039 
2040  if (IXGE_HWBP_RACE_ELOG && hwbp_race)
2041  {
2042  ELOG_TYPE_DECLARE (e) =
2043  {
2044  .function = (char *) __FUNCTION__,.format =
2045  "ixge %d tx head index race: head %4d, hw_owned %4d, n_clean %4d, retries %d",.format_args
2046  = "i4i4i4i4i4",};
2047  struct
2048  {
2049  u32 instance, head_index, n_hw_owned_descriptors, n_clean, retries;
2050  } *ed;
2051  ed = ELOG_DATA (&vm->elog_main, e);
2052  ed->instance = xd->device_index;
2053  ed->head_index = dq->head_index;
2054  ed->n_hw_owned_descriptors = n_hw_owned_descriptors;
2055  ed->n_clean = n_clean;
2056  ed->retries = hwbp_race;
2057  }
2058 
2059  /*
2060  * This function used to wait until hardware owned zero descriptors.
2061  * At high PPS rates, that doesn't happen until the TX ring is
2062  * completely full of descriptors which need to be cleaned up.
2063  * That, in turn, causes TX ring-full drops and/or long RX service
2064  * interruptions.
2065  */
2066  if (n_clean == 0)
2067  return;
2068 
2069  /* Clean the n_clean descriptors prior to the reported hardware head */
2070  last_to_clean = dq->head_index - 1;
2071  last_to_clean = (last_to_clean < 0) ? last_to_clean + dq->n_descriptors :
2072  last_to_clean;
2073 
2074  first_to_clean = (last_to_clean) - (n_clean - 1);
2075  first_to_clean = (first_to_clean < 0) ? first_to_clean + dq->n_descriptors :
2076  first_to_clean;
2077 
2079  t0 = t = xm->tx_buffers_pending_free;
2080  b = dq->descriptor_buffer_indices + first_to_clean;
2081 
2082  /* Wrap case: clean from first to end, then start to last */
2083  if (first_to_clean > last_to_clean)
2084  {
2085  t += clean_block (b, t, (dq->n_descriptors - 1) - first_to_clean);
2086  first_to_clean = 0;
2087  b = dq->descriptor_buffer_indices;
2088  }
2089 
2090  /* Typical case: clean from first to last */
2091  if (first_to_clean <= last_to_clean)
2092  t += clean_block (b, t, (last_to_clean - first_to_clean) + 1);
2093 
2094  if (t > t0)
2095  {
2096  u32 n = t - t0;
2097  vlib_buffer_free_no_next (vm, t0, n);
2098  ASSERT (dq->tx.n_buffers_on_ring >= n);
2099  dq->tx.n_buffers_on_ring -= n;
2100  _vec_len (xm->tx_buffers_pending_free) = 0;
2101  }
2102 }
2103 
2104 /* RX queue interrupts 0 thru 7; TX 8 thru 15. */
2107 {
2108  return i < 8;
2109 }
2110 
2113 {
2114  return i >= 8 && i < 16;
2115 }
2116 
2119 {
2120  return 8 + i;
2121 }
2122 
2125 {
2126  return 0 + i;
2127 }
2128 
2131 {
2133  return i - 0;
2134 }
2135 
2138 {
2140  return i - 8;
2141 }
2142 
2143 static uword
2145  ixge_device_t * xd, vlib_node_runtime_t * node)
2146 {
2147  ixge_regs_t *r = xd->regs;
2148  u32 i, s;
2149  uword n_rx_packets = 0;
2150 
2152  if (s)
2154 
2155  /* *INDENT-OFF* */
2156  foreach_set_bit (i, s, ({
2158  n_rx_packets += ixge_rx_queue (xm, xd, node, ixge_interrupt_rx_queue (i));
2159 
2160  else if (ixge_interrupt_is_tx_queue (i))
2161  ixge_tx_queue (xm, xd, ixge_interrupt_tx_queue (i));
2162 
2163  else
2164  ixge_interrupt (xm, xd, i);
2165  }));
2166  /* *INDENT-ON* */
2167 
2168  return n_rx_packets;
2169 }
2170 
2171 static uword
2173 {
2174  ixge_main_t *xm = &ixge_main;
2175  ixge_device_t *xd;
2176  uword n_rx_packets = 0;
2177 
2178  if (node->state == VLIB_NODE_STATE_INTERRUPT)
2179  {
2180  uword i;
2181 
2182  /* Loop over devices with interrupts. */
2183  /* *INDENT-OFF* */
2184  foreach_set_bit (i, node->runtime_data[0], ({
2185  xd = vec_elt_at_index (xm->devices, i);
2186  n_rx_packets += ixge_device_input (xm, xd, node);
2187 
2188  /* Re-enable interrupts since we're going to stay in interrupt mode. */
2189  if (! (node->flags & VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE))
2190  xd->regs->interrupt.enable_write_1_to_set = ~0;
2191  }));
2192  /* *INDENT-ON* */
2193 
2194  /* Clear mask of devices with pending interrupts. */
2195  node->runtime_data[0] = 0;
2196  }
2197  else
2198  {
2199  /* Poll all devices for input/interrupts. */
2200  vec_foreach (xd, xm->devices)
2201  {
2202  n_rx_packets += ixge_device_input (xm, xd, node);
2203 
2204  /* Re-enable interrupts when switching out of polling mode. */
2205  if (node->flags &
2208  }
2209  }
2210 
2211  return n_rx_packets;
2212 }
2213 
2214 static char *ixge_error_strings[] = {
2215 #define _(n,s) s,
2217 #undef _
2218 };
2219 
2220 /* *INDENT-OFF* */
2221 VLIB_REGISTER_NODE (ixge_input_node, static) = {
2222  .function = ixge_input,
2223  .type = VLIB_NODE_TYPE_INPUT,
2224  .name = "ixge-input",
2225 
2226  /* Will be enabled if/when hardware is detected. */
2227  .state = VLIB_NODE_STATE_DISABLED,
2228 
2229  .format_buffer = format_ethernet_header_with_length,
2230  .format_trace = format_ixge_rx_dma_trace,
2231 
2232  .n_errors = IXGE_N_ERROR,
2233  .error_strings = ixge_error_strings,
2234 
2235  .n_next_nodes = IXGE_RX_N_NEXT,
2236  .next_nodes = {
2237  [IXGE_RX_NEXT_DROP] = "error-drop",
2238  [IXGE_RX_NEXT_ETHERNET_INPUT] = "ethernet-input",
2239  [IXGE_RX_NEXT_IP4_INPUT] = "ip4-input",
2240  [IXGE_RX_NEXT_IP6_INPUT] = "ip6-input",
2241  },
2242 };
2243 
2246 /* *INDENT-ON* */
2247 
2248 static u8 *
2249 format_ixge_device_name (u8 * s, va_list * args)
2250 {
2251  u32 i = va_arg (*args, u32);
2252  ixge_main_t *xm = &ixge_main;
2253  ixge_device_t *xd = vec_elt_at_index (xm->devices, i);
2254  return format (s, "TenGigabitEthernet%U",
2256 }
2257 
2258 #define IXGE_COUNTER_IS_64_BIT (1 << 0)
2259 #define IXGE_COUNTER_NOT_CLEAR_ON_READ (1 << 1)
2260 
2262 #define _(a,f) 0,
2263 #define _64(a,f) IXGE_COUNTER_IS_64_BIT,
2265 #undef _
2266 #undef _64
2267 };
2268 
2269 static void
2271 {
2272  /* Byte offset for counter registers. */
2273  static u32 reg_offsets[] = {
2274 #define _(a,f) (a) / sizeof (u32),
2275 #define _64(a,f) _(a,f)
2277 #undef _
2278 #undef _64
2279  };
2280  volatile u32 *r = (volatile u32 *) xd->regs;
2281  int i;
2282 
2283  for (i = 0; i < ARRAY_LEN (xd->counters); i++)
2284  {
2285  u32 o = reg_offsets[i];
2286  xd->counters[i] += r[o];
2288  r[o] = 0;
2290  xd->counters[i] += (u64) r[o + 1] << (u64) 32;
2291  }
2292 }
2293 
2294 static u8 *
2295 format_ixge_device_id (u8 * s, va_list * args)
2296 {
2297  u32 device_id = va_arg (*args, u32);
2298  char *t = 0;
2299  switch (device_id)
2300  {
2301 #define _(f,n) case n: t = #f; break;
2303 #undef _
2304  default:
2305  t = 0;
2306  break;
2307  }
2308  if (t == 0)
2309  s = format (s, "unknown 0x%x", device_id);
2310  else
2311  s = format (s, "%s", t);
2312  return s;
2313 }
2314 
2315 static u8 *
2316 format_ixge_link_status (u8 * s, va_list * args)
2317 {
2318  ixge_device_t *xd = va_arg (*args, ixge_device_t *);
2320 
2321  s = format (s, "%s", (v & (1 << 30)) ? "up" : "down");
2322 
2323  {
2324  char *modes[] = {
2325  "1g", "10g parallel", "10g serial", "autoneg",
2326  };
2327  char *speeds[] = {
2328  "unknown", "100m", "1g", "10g",
2329  };
2330  s = format (s, ", mode %s, speed %s",
2331  modes[(v >> 26) & 3], speeds[(v >> 28) & 3]);
2332  }
2333 
2334  return s;
2335 }
2336 
2337 static u8 *
2338 format_ixge_device (u8 * s, va_list * args)
2339 {
2340  u32 dev_instance = va_arg (*args, u32);
2341  CLIB_UNUSED (int verbose) = va_arg (*args, int);
2342  ixge_main_t *xm = &ixge_main;
2343  ixge_device_t *xd = vec_elt_at_index (xm->devices, dev_instance);
2344  ixge_phy_t *phy = xd->phys + xd->phy_index;
2345  uword indent = format_get_indent (s);
2346 
2347  ixge_update_counters (xd);
2349 
2350  s = format (s, "Intel 8259X: id %U\n%Ulink %U",
2352  format_white_space, indent + 2, format_ixge_link_status, xd);
2353 
2354  {
2355 
2356  s = format (s, "\n%UPCIe %U", format_white_space, indent + 2,
2358  }
2359 
2360  s = format (s, "\n%U", format_white_space, indent + 2);
2361  if (phy->mdio_address != ~0)
2362  s = format (s, "PHY address %d, id 0x%x", phy->mdio_address, phy->id);
2363  else if (xd->sfp_eeprom.id == SFP_ID_sfp)
2364  s = format (s, "SFP %U", format_sfp_eeprom, &xd->sfp_eeprom);
2365  else
2366  s = format (s, "PHY not found");
2367 
2368  /* FIXME */
2369  {
2371  ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_RX, 0);
2372  u32 hw_head_index = dr->head_index;
2373  u32 sw_head_index = dq->head_index;
2374  u32 nitems;
2375 
2376  nitems = ixge_ring_sub (dq, hw_head_index, sw_head_index);
2377  s = format (s, "\n%U%d unprocessed, %d total buffers on rx queue 0 ring",
2378  format_white_space, indent + 2, nitems, dq->n_descriptors);
2379 
2380  s = format (s, "\n%U%d buffers in driver rx cache",
2381  format_white_space, indent + 2,
2382  vec_len (xm->rx_buffers_to_add));
2383 
2384  s = format (s, "\n%U%d buffers on tx queue 0 ring",
2385  format_white_space, indent + 2,
2386  xd->dma_queues[VLIB_TX][0].tx.n_buffers_on_ring);
2387  }
2388  {
2389  u32 i;
2390  u64 v;
2391  static char *names[] = {
2392 #define _(a,f) #f,
2393 #define _64(a,f) _(a,f)
2395 #undef _
2396 #undef _64
2397  };
2398 
2399  for (i = 0; i < ARRAY_LEN (names); i++)
2400  {
2401  v = xd->counters[i] - xd->counters_last_clear[i];
2402  if (v != 0)
2403  s = format (s, "\n%U%-40U%16Ld",
2404  format_white_space, indent + 2,
2405  format_c_identifier, names[i], v);
2406  }
2407  }
2408 
2409  return s;
2410 }
2411 
2412 static void
2414 {
2415  ixge_main_t *xm = &ixge_main;
2416  ixge_device_t *xd = vec_elt_at_index (xm->devices, instance);
2417  ixge_update_counters (xd);
2418  memcpy (xd->counters_last_clear, xd->counters, sizeof (xd->counters));
2419 }
2420 
2421 /*
2422  * Dynamically redirect all pkts from a specific interface
2423  * to the specified node
2424  */
2425 static void
2427  u32 node_index)
2428 {
2429  ixge_main_t *xm = &ixge_main;
2430  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
2432 
2433  /* Shut off redirection */
2434  if (node_index == ~0)
2435  {
2436  xd->per_interface_next_index = node_index;
2437  return;
2438  }
2439 
2441  vlib_node_add_next (xm->vlib_main, ixge_input_node.index, node_index);
2442 }
2443 
2444 
2445 /* *INDENT-OFF* */
2447  .name = "ixge",
2448  .tx_function = ixge_interface_tx,
2449  .format_device_name = format_ixge_device_name,
2450  .format_device = format_ixge_device,
2451  .format_tx_trace = format_ixge_tx_dma_trace,
2452  .clear_counters = ixge_clear_hw_interface_counters,
2453  .admin_up_down_function = ixge_interface_admin_up_down,
2454  .rx_redirect_to_node = ixge_set_interface_next_node,
2455  .flatten_output_chains = 1,
2456 };
2457 /* *INDENT-ON* */
2458 
2459 #define IXGE_N_BYTES_IN_RX_BUFFER (2048) // DAW-HACK: Set Rx buffer size so all packets < ETH_MTU_SIZE fit in the buffer (i.e. sop & eop for all descriptors).
2460 
2461 static clib_error_t *
2463 {
2464  ixge_main_t *xm = &ixge_main;
2465  vlib_main_t *vm = xm->vlib_main;
2466  ixge_dma_queue_t *dq;
2467  clib_error_t *error = 0;
2468 
2469  vec_validate (xd->dma_queues[rt], queue_index);
2470  dq = vec_elt_at_index (xd->dma_queues[rt], queue_index);
2471 
2474  CLIB_CACHE_LINE_BYTES / sizeof (dq->descriptors[0]);
2475 
2476  if (!xm->n_bytes_in_rx_buffer)
2479  if (!xm->vlib_buffer_free_list_index)
2480  {
2483  "ixge rx");
2485  }
2486 
2487  if (!xm->n_descriptors[rt])
2488  xm->n_descriptors[rt] = 4 * VLIB_FRAME_SIZE;
2489 
2490  dq->queue_index = queue_index;
2491  dq->n_descriptors =
2493  dq->head_index = dq->tail_index = 0;
2494 
2495  dq->descriptors = vlib_physmem_alloc_aligned (vm, &error,
2496  dq->n_descriptors *
2497  sizeof (dq->descriptors[0]),
2498  128 /* per chip spec */ );
2499  if (error)
2500  return error;
2501 
2502  memset (dq->descriptors, 0,
2503  dq->n_descriptors * sizeof (dq->descriptors[0]));
2505 
2506  if (rt == VLIB_RX)
2507  {
2508  u32 n_alloc, i;
2509 
2511  (vm, dq->descriptor_buffer_indices,
2514  ASSERT (n_alloc == vec_len (dq->descriptor_buffer_indices));
2515  for (i = 0; i < n_alloc; i++)
2516  {
2517  vlib_buffer_t *b =
2521  }
2522  }
2523  else
2524  {
2525  u32 i;
2526 
2527  dq->tx.head_index_write_back =
2529 
2530  for (i = 0; i < dq->n_descriptors; i++)
2531  dq->descriptors[i].tx = xm->tx_descriptor_template;
2532 
2534  }
2535 
2536  {
2537  ixge_dma_regs_t *dr = get_dma_regs (xd, rt, queue_index);
2538  u64 a;
2539 
2541  dr->descriptor_address[0] = a & 0xFFFFFFFF;
2542  dr->descriptor_address[1] = a >> (u64) 32;
2543  dr->n_descriptor_bytes = dq->n_descriptors * sizeof (dq->descriptors[0]);
2544  dq->head_index = dq->tail_index = 0;
2545 
2546  if (rt == VLIB_RX)
2547  {
2548  ASSERT ((xm->n_bytes_in_rx_buffer / 1024) < 32);
2549  dr->rx_split_control =
2550  ( /* buffer size */ ((xm->n_bytes_in_rx_buffer / 1024) << 0)
2551  | ( /* lo free descriptor threshold (units of 64 descriptors) */
2552  (1 << 22)) | ( /* descriptor type: advanced one buffer */
2553  (1 << 25)) | ( /* drop if no descriptors available */
2554  (1 << 28)));
2555 
2556  /* Give hardware all but last 16 cache lines' worth of descriptors. */
2557  dq->tail_index = dq->n_descriptors -
2559  }
2560  else
2561  {
2562  /* Make sure its initialized before hardware can get to it. */
2563  dq->tx.head_index_write_back[0] = dq->head_index;
2564 
2565  a =
2566  vlib_physmem_virtual_to_physical (vm, dq->tx.head_index_write_back);
2567  dr->tx.head_index_write_back_address[0] = /* enable bit */ 1 | a;
2568  dr->tx.head_index_write_back_address[1] = (u64) a >> (u64) 32;
2569  }
2570 
2571  /* DMA on 82599 does not work with [13] rx data write relaxed ordering
2572  and [12] undocumented set. */
2573  if (rt == VLIB_RX)
2574  dr->dca_control &= ~((1 << 13) | (1 << 12));
2575 
2577 
2578  if (rt == VLIB_TX)
2579  {
2580  xd->regs->tx_dma_control |= (1 << 0);
2581  dr->control |= ((32 << 0) /* prefetch threshold */
2582  | (64 << 8) /* host threshold */
2583  | (0 << 16) /* writeback threshold */ );
2584  }
2585 
2586  /* Enable this queue and wait for hardware to initialize
2587  before adding to tail. */
2588  if (rt == VLIB_TX)
2589  {
2590  dr->control |= 1 << 25;
2591  while (!(dr->control & (1 << 25)))
2592  ;
2593  }
2594 
2595  /* Set head/tail indices and enable DMA. */
2596  dr->head_index = dq->head_index;
2597  dr->tail_index = dq->tail_index;
2598  }
2599 
2600  return error;
2601 }
2602 
2603 static u32
2605 {
2606  ixge_device_t *xd;
2607  ixge_regs_t *r;
2608  u32 old;
2609  ixge_main_t *xm = &ixge_main;
2610 
2611  xd = vec_elt_at_index (xm->devices, hw->dev_instance);
2612  r = xd->regs;
2613 
2614  old = r->filter_control;
2615 
2617  r->filter_control = old | (1 << 9) /* unicast promiscuous */ ;
2618  else
2619  r->filter_control = old & ~(1 << 9);
2620 
2621  return old;
2622 }
2623 
2624 static void
2626 {
2627  vnet_main_t *vnm = vnet_get_main ();
2628  ixge_device_t *xd;
2629 
2630  /* Reset chip(s). */
2631  vec_foreach (xd, xm->devices)
2632  {
2633  ixge_regs_t *r = xd->regs;
2634  const u32 reset_bit = (1 << 26) | (1 << 3);
2635 
2636  r->control |= reset_bit;
2637 
2638  /* No need to suspend. Timed to take ~1e-6 secs */
2639  while (r->control & reset_bit)
2640  ;
2641 
2642  /* Software loaded. */
2643  r->extended_control |= (1 << 28);
2644 
2645  ixge_phy_init (xd);
2646 
2647  /* Register ethernet interface. */
2648  {
2649  u8 addr8[6];
2650  u32 i, addr32[2];
2651  clib_error_t *error;
2652 
2653  addr32[0] = r->rx_ethernet_address0[0][0];
2654  addr32[1] = r->rx_ethernet_address0[0][1];
2655  for (i = 0; i < 6; i++)
2656  addr8[i] = addr32[i / 4] >> ((i % 4) * 8);
2657 
2659  (vnm, ixge_device_class.index, xd->device_index,
2660  /* ethernet address */ addr8,
2662  if (error)
2663  clib_error_report (error);
2664  }
2665 
2666  {
2667  vnet_sw_interface_t *sw =
2669  xd->vlib_sw_if_index = sw->sw_if_index;
2670  }
2671 
2672  ixge_dma_init (xd, VLIB_RX, /* queue_index */ 0);
2673 
2675 
2676  ixge_dma_init (xd, VLIB_TX, /* queue_index */ 0);
2677 
2678  /* RX/TX queue 0 gets mapped to interrupt bits 0 & 8. */
2679  r->interrupt.queue_mapping[0] = (( /* valid bit */ (1 << 7) |
2680  ixge_rx_queue_to_interrupt (0)) << 0);
2681 
2682  r->interrupt.queue_mapping[0] |= (( /* valid bit */ (1 << 7) |
2683  ixge_tx_queue_to_interrupt (0)) << 8);
2684 
2685  /* No use in getting too many interrupts.
2686  Limit them to one every 3/4 ring size at line rate
2687  min sized packets.
2688  No need for this since kernel/vlib main loop provides adequate interrupt
2689  limiting scheme. */
2690  if (0)
2691  {
2692  f64 line_rate_max_pps =
2693  10e9 / (8 * (64 + /* interframe padding */ 20));
2695  .75 * xm->n_descriptors[VLIB_RX] /
2696  line_rate_max_pps);
2697  }
2698 
2699  /* Accept all multicast and broadcast packets. Should really add them
2700  to the dst_ethernet_address register array. */
2701  r->filter_control |= (1 << 10) | (1 << 8);
2702 
2703  /* Enable frames up to size in mac frame size register. */
2704  r->xge_mac.control |= 1 << 2;
2705  r->xge_mac.rx_max_frame_size = (9216 + 14) << 16;
2706 
2707  /* Enable all interrupts. */
2708  if (!IXGE_ALWAYS_POLL)
2710  }
2711 }
2712 
2713 static uword
2715 {
2716  vnet_main_t *vnm = vnet_get_main ();
2717  ixge_main_t *xm = &ixge_main;
2718  ixge_device_t *xd;
2719  uword event_type, *event_data = 0;
2720  f64 timeout, link_debounce_deadline;
2721 
2722  ixge_device_init (xm);
2723 
2724  /* Clear all counters. */
2725  vec_foreach (xd, xm->devices)
2726  {
2727  ixge_update_counters (xd);
2728  memset (xd->counters, 0, sizeof (xd->counters));
2729  }
2730 
2731  timeout = 30.0;
2732  link_debounce_deadline = 1e70;
2733 
2734  while (1)
2735  {
2736  /* 36 bit stat counters could overflow in ~50 secs.
2737  We poll every 30 secs to be conservative. */
2739 
2740  event_type = vlib_process_get_events (vm, &event_data);
2741 
2742  switch (event_type)
2743  {
2744  case EVENT_SET_FLAGS:
2745  /* 1 ms */
2746  link_debounce_deadline = vlib_time_now (vm) + 1e-3;
2747  timeout = 1e-3;
2748  break;
2749 
2750  case ~0:
2751  /* No events found: timer expired. */
2752  if (vlib_time_now (vm) > link_debounce_deadline)
2753  {
2754  vec_foreach (xd, xm->devices)
2755  {
2756  ixge_regs_t *r = xd->regs;
2757  u32 v = r->xge_mac.link_status;
2758  uword is_up = (v & (1 << 30)) != 0;
2759 
2761  (vnm, xd->vlib_hw_if_index,
2762  is_up ? VNET_HW_INTERFACE_FLAG_LINK_UP : 0);
2763  }
2764  link_debounce_deadline = 1e70;
2765  timeout = 30.0;
2766  }
2767  break;
2768 
2769  default:
2770  ASSERT (0);
2771  }
2772 
2773  if (event_data)
2774  _vec_len (event_data) = 0;
2775 
2776  /* Query stats every 30 secs. */
2777  {
2778  f64 now = vlib_time_now (vm);
2779  if (now - xm->time_last_stats_update > 30)
2780  {
2781  xm->time_last_stats_update = now;
2782  vec_foreach (xd, xm->devices) ixge_update_counters (xd);
2783  }
2784  }
2785  }
2786 
2787  return 0;
2788 }
2789 
2791  .function = ixge_process,
2792  .type = VLIB_NODE_TYPE_PROCESS,
2793  .name = "ixge-process",
2794 };
2795 
2796 clib_error_t *
2798 {
2799  ixge_main_t *xm = &ixge_main;
2800  clib_error_t *error;
2801 
2802  xm->vlib_main = vm;
2803  memset (&xm->tx_descriptor_template, 0,
2804  sizeof (xm->tx_descriptor_template));
2805  memset (&xm->tx_descriptor_template_mask, 0,
2806  sizeof (xm->tx_descriptor_template_mask));
2811  xm->tx_descriptor_template_mask.status0 = 0xffff;
2812  xm->tx_descriptor_template_mask.status1 = 0x00003fff;
2813 
2819 
2820  error = vlib_call_init_function (vm, pci_bus_init);
2821 
2822  return error;
2823 }
2824 
2826 
2827 
2828 static void
2830 {
2831  ixge_main_t *xm = &ixge_main;
2832  vlib_main_t *vm = xm->vlib_main;
2833 
2835 
2836  /* Let node know which device is interrupting. */
2837  {
2838  vlib_node_runtime_t *rt =
2840  rt->runtime_data[0] |= 1 << dev->private_data;
2841  }
2842 }
2843 
2844 static clib_error_t *
2846 {
2847  ixge_main_t *xm = &ixge_main;
2848  clib_error_t *error;
2849  void *r;
2850  ixge_device_t *xd;
2851 
2852  /* Device found: make sure we have dma memory. */
2853  if (unix_physmem_is_fake (vm))
2854  return clib_error_return (0, "no physical memory available");
2855 
2856  error = vlib_pci_map_resource (dev, 0, &r);
2857  if (error)
2858  return error;
2859 
2860  vec_add2 (xm->devices, xd, 1);
2861 
2862  if (vec_len (xm->devices) == 1)
2863  {
2864  ixge_input_node.function = ixge_input_multiarch_select ();
2865  }
2866 
2867  xd->pci_device = dev[0];
2869  xd->regs = r;
2870  xd->device_index = xd - xm->devices;
2871  xd->pci_function = dev->bus_address.function;
2872  xd->per_interface_next_index = ~0;
2873 
2874 
2875  /* Chip found so enable node. */
2876  {
2879  ? VLIB_NODE_STATE_POLLING
2880  : VLIB_NODE_STATE_INTERRUPT));
2881 
2882  dev->private_data = xd->device_index;
2883  }
2884 
2885  if (vec_len (xm->devices) == 1)
2886  {
2889  }
2890 
2891  error = vlib_pci_bus_master_enable (dev);
2892 
2893  if (error)
2894  return error;
2895 
2896  return vlib_pci_intr_enable (dev);
2897 }
2898 
2899 /* *INDENT-OFF* */
2900 PCI_REGISTER_DEVICE (ixge_pci_device_registration,static) = {
2901  .init_function = ixge_pci_init,
2902  .interrupt_handler = ixge_pci_intr_handler,
2903  .supported_devices = {
2904 #define _(t,i) { .vendor_id = PCI_VENDOR_ID_INTEL, .device_id = i, },
2906 #undef _
2907  { 0 },
2908  },
2909 };
2910 /* *INDENT-ON* */
2911 
2912 void
2914 {
2916 
2917  switch (next)
2918  {
2922  r->next_nodes[next] = name;
2923  break;
2924 
2925  default:
2926  clib_warning ("%s: illegal next %d\n", __FUNCTION__, next);
2927  break;
2928  }
2929 }
2930 #endif
2931 
2932 /*
2933  * fd.io coding-style-patch-verification: ON
2934  *
2935  * Local Variables:
2936  * eval: (c-set-style "gnu")
2937  * End:
2938  */
u32 mdio_address
Definition: ixge.h:1113
static void ixge_update_counters(ixge_device_t *xd)
Definition: ixge.c:2270
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:396
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:459
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_UDP
Definition: ixge.h:107
#define IXGE_TX_DESCRIPTOR_STATUS0_IS_END_OF_PACKET
Definition: ixge.h:138
u32 process_node_index
Definition: ixge.h:1258
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6_EXT
Definition: ixge.h:105
static clib_error_t * ixge_interface_admin_up_down(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
Definition: ixge.c:319
#define IXGE_RX_DESCRIPTOR_STATUS2_ETHERNET_ERROR
Definition: ixge.h:119
sll srl srl sll sra u16x4 i
Definition: vector_sse2.h:343
static void ixge_pci_intr_handler(vlib_pci_device_t *dev)
Definition: ixge.c:2829
#define clib_min(x, y)
Definition: clib.h:326
static void ixge_software_firmware_sync_release(ixge_device_t *xd, u32 sw_mask)
Definition: ixge.c:109
#define CLIB_UNUSED(x)
Definition: clib.h:79
ixge_rx_to_hw_descriptor_t rx_to_hw
Definition: ixge.h:168
static void ixge_phy_init(ixge_device_t *xd)
Definition: ixge.c:374
static u64 vlib_physmem_virtual_to_physical(vlib_main_t *vm, void *mem)
Definition: buffer_funcs.h:393
u32 vlib_buffer_get_or_create_free_list(vlib_main_t *vm, u32 n_data_bytes, char *fmt,...)
clib_error_t * vnet_hw_interface_set_flags(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
Definition: interface.c:531
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
Definition: trace_funcs.h:143
ixge_tx_descriptor_t tx_descriptor_template
Definition: ixge.h:1261
static f64 vlib_process_wait_for_event_or_clock(vlib_main_t *vm, f64 dt)
Suspend a cooperative multi-tasking thread Waits for an event, or for the indicated number of seconds...
Definition: node_funcs.h:684
u32 enable_write_1_to_set
Definition: ixge.h:245
a
Definition: bitmap.h:516
u32 n_descriptor_bytes
Definition: ixge.h:30
static u32 ixge_flag_change(vnet_main_t *vnm, vnet_hw_interface_t *hw, u32 flags)
Definition: ixge.c:2604
#define IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR
Definition: ixge.h:120
u8 runtime_data[0]
Definition: node.h:469
u32 vlib_hw_if_index
Definition: ixge.h:1222
u32 n_descriptors[VLIB_N_RX_TX]
Definition: ixge.h:1248
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:23
u32 link_status_at_last_link_change
Definition: ixge.h:1231
static void vlib_set_next_frame_buffer(vlib_main_t *vm, vlib_node_runtime_t *node, u32 next_index, u32 buffer_index)
Definition: node_funcs.h:383
u32 head_index
Definition: ixge.h:42
struct ixge_dma_queue_t::@105::@107 tx
static void * vlib_physmem_alloc(vlib_main_t *vm, clib_error_t **error, uword n_bytes)
Definition: buffer_funcs.h:380
#define VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b)
Definition: buffer.h:406
static void ixge_semaphore_release(ixge_device_t *xd)
Definition: ixge.c:80
#define PREDICT_TRUE(x)
Definition: clib.h:98
format_function_t format_vlib_buffer
Definition: buffer_funcs.h:532
u8 is_start_of_packet
Definition: ixge.c:567
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_IP4_CHECKSUMMED
Definition: ixge.h:115
u32 * descriptor_buffer_indices
Definition: ixge.h:1134
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
Definition: error_funcs.h:55
static void vlib_node_set_interrupt_pending(vlib_main_t *vm, u32 node_index)
Definition: node_funcs.h:181
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6
Definition: ixge.h:104
u32 queue_mapping[64]
Definition: ixge.h:266
u32 vlib_buffer_alloc_from_free_list(vlib_main_t *vm, u32 *buffers, u32 n_buffers, u32 free_list_index)
Allocate buffers from specific freelist into supplied array.
static f64 vlib_time_now(vlib_main_t *vm)
Definition: main.h:182
vlib_buffer_t buffer
Definition: ixge.c:570
static u8 * format_ixge_link_status(u8 *s, va_list *args)
Definition: ixge.c:2316
static vnet_hw_interface_t * vnet_get_hw_interface(vnet_main_t *vnm, u32 hw_if_index)
u32 phy_command
Definition: ixge.h:368
u32 * rx_buffers_to_add
Definition: ixge.h:1266
u8 * format_c_identifier(u8 *s, va_list *va)
Definition: std-formats.c:258
#define XGE_PHY_CONTROL
Definition: ixge.c:50
static vlib_node_registration_t ixge_input_node
(constructor) VLIB_REGISTER_NODE (ixge_input_node)
Definition: ixge.c:54
PCI_REGISTER_DEVICE(ixge_pci_device_registration, static)
static uword ixge_rx_queue_no_wrap(ixge_main_t *xm, ixge_device_t *xd, ixge_dma_queue_t *dq, u32 start_descriptor_index, u32 n_descriptors)
Definition: ixge.c:1343
ixge_device_t * devices
Definition: ixge.h:1245
static uword sfp_eeprom_is_valid(sfp_eeprom_t *e)
Definition: sfp.h:70
struct _vlib_node_registration vlib_node_registration_t
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:521
ixge_descriptor_t before
Definition: ixge.c:559
#define IXGE_N_BYTES_IN_RX_BUFFER
Definition: ixge.c:2459
static void ixge_throttle_queue_interrupt(ixge_regs_t *r, u32 queue_interrupt_index, f64 inter_interrupt_interval_in_secs)
Definition: ixge.h:994
#define XGE_PHY_ID2
Definition: ixge.c:49
static void ixge_i2c_put_bits(i2c_bus_t *b, int scl, int sda)
Definition: ixge.c:169
u32 per_interface_next_index
Definition: ixge.h:1208
static uword ixge_interrupt_is_rx_queue(uword i)
Definition: ixge.c:2106
static vnet_sw_interface_t * vnet_get_sw_interface(vnet_main_t *vnm, u32 sw_if_index)
u32 extended_control
Definition: ixge.h:192
u32 software_semaphore
Definition: ixge.h:890
static void ixge_i2c_get_bits(i2c_bus_t *b, int *scl, int *sda)
Definition: ixge.c:182
static void ixge_write_phy_reg(ixge_device_t *xd, u32 dev_type, u32 reg_index, u32 v)
Definition: ixge.c:162
ixge_rx_next_t
Definition: ixge.h:1274
#define clib_error_report(e)
Definition: error.h:125
static u8 ixge_counter_flags[]
Definition: ixge.c:2261
#define VNET_HW_INTERFACE_FLAG_LINK_UP
Definition: interface.h:377
#define CLIB_MULTIARCH_SELECT_FN(fn,...)
Definition: cpu.h:47
u32 rx_ethernet_address0[16][2]
Definition: ixge.h:581
vlib_error_t * errors
Definition: node.h:419
u32 tx_dma_control
Definition: ixge.h:500
static uword ixge_ring_add(ixge_dma_queue_t *q, u32 i0, u32 i1)
Definition: ixge.c:1005
vlib_pci_addr_t bus_address
Definition: pci.h:58
static u64 vlib_get_buffer_data_physical_address(vlib_main_t *vm, u32 buffer_index)
Definition: buffer_funcs.h:153
u32 * tx_buffers_pending_free
Definition: ixge.h:1264
static uword vlib_node_add_next(vlib_main_t *vm, uword node, uword next_node)
Definition: node_funcs.h:1063
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET
Definition: ixge.h:111
u32 filter_control
Definition: ixge.h:522
#define foreach_set_bit(var, mask, body)
Definition: bitops.h:158
u32 vlib_register_node(vlib_main_t *vm, vlib_node_registration_t *r)
Definition: node.c:451
u32 i2c_control
Definition: ixge.h:208
static vnet_sw_interface_t * vnet_get_hw_sw_interface(vnet_main_t *vnm, u32 hw_if_index)
u32 phy_data
Definition: ixge.h:369
static void ixge_rx_next_and_error_from_status_x1(ixge_device_t *xd, u32 s00, u32 s02, u8 *next0, u8 *error0, u32 *flags0)
Definition: ixge.c:630
#define vec_add(V, E, N)
Add N elements to end of vector V (no header, unspecified alignment)
Definition: vec.h:559
static void vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
Definition: trace_funcs.h:104
static u32 clean_block(u32 *b, u32 *t, u32 n_left)
Definition: ixge.c:1956
static uword vlib_process_suspend(vlib_main_t *vm, f64 dt)
Suspend a vlib cooperative multi-tasking thread for a period of time.
Definition: node_funcs.h:432
u32 ixge_read_write_phy_reg(ixge_device_t *xd, u32 dev_type, u32 reg_index, u32 v, u32 is_read)
Definition: ixge.c:118
u32 rx_split_control
Definition: ixge.h:51
static uword ixge_rx_queue_to_interrupt(uword i)
Definition: ixge.c:2124
vlib_rx_or_tx_t
Definition: defs.h:44
vnet_main_t * vnet_get_main(void)
Definition: misc.c:46
static clib_error_t * ixge_pci_init(vlib_main_t *vm, vlib_pci_device_t *dev)
Definition: ixge.c:2845
ixge_tx_descriptor_t descriptor
Definition: ixge.c:846
i2c_bus_t i2c_bus
Definition: ixge.h:1233
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:78
static u8 * format_ixge_rx_from_hw_descriptor(u8 *s, va_list *va)
Definition: ixge.c:443
#define vlib_prefetch_buffer_with_index(vm, bi, type)
Prefetch buffer metadata by buffer index The first 64 bytes of buffer contains most header informatio...
Definition: buffer_funcs.h:170
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:111
static clib_error_t * ixge_sfp_phy_init_from_eeprom(ixge_device_t *xd, u16 sfp_type)
Definition: ixge.c:226
static uword vlib_process_get_events(vlib_main_t *vm, uword **data_vector)
Return the first event type which has occurred and a vector of per-event data of that type...
Definition: node_funcs.h:527
#define always_inline
Definition: clib.h:84
static void ixge_rx_trace(ixge_main_t *xm, ixge_device_t *xd, ixge_dma_queue_t *dq, ixge_descriptor_t *before_descriptors, u32 *before_buffers, ixge_descriptor_t *after_descriptors, uword n_descriptors)
Definition: ixge.c:733
#define IP_BUFFER_L4_CHECKSUM_CORRECT
Definition: buffer.h:50
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:113
u32 dca_control
Definition: ixge.h:40
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED
Definition: ixge.h:114
unsigned long long u32x4
Definition: ixge.c:28
ixge_phy_t phys[2]
Definition: ixge.h:1228
int i32
Definition: types.h:81
#define IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR
Definition: ixge.h:121
ixge_dma_regs_t tx_dma[128]
Definition: ixge.h:616
u32 link_partner_ability
Definition: ixge.h:343
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_VLAN
Definition: ixge.h:112
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
u8 * format_hex_bytes(u8 *s, va_list *va)
Definition: std-formats.c:84
u8 pre_data[VLIB_BUFFER_PRE_DATA_SIZE]
Space for inserting data before buffer start.
Definition: buffer.h:150
#define clib_warning(format, args...)
Definition: error.h:59
vnet_device_class_t ixge_device_class
Definition: ixge.h:1272
unsigned long u64
Definition: types.h:89
ixge_main_t ixge_main
Definition: ixge.c:53
static vlib_node_registration_t ixge_process_node
Definition: ixge.c:55
u32 queue_index
Definition: ixge.h:1131
#define vec_resize(V, N)
Resize a vector (no header, unspecified alignment) Add N elements to end of given vector V...
Definition: vec.h:201
#define vec_end(v)
End (last data address) of vector.
#define vlib_call_init_function(vm, x)
Definition: init.h:161
f64 time_last_stats_update
Definition: ixge.h:1268
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_LAYER2
Definition: ixge.h:100
#define VLIB_BUFFER_NEXT_PRESENT
Definition: buffer.h:97
static void ixge_rx_next_and_error_from_status_x2(ixge_device_t *xd, u32 s00, u32 s02, u32 s10, u32 s12, u8 *next0, u8 *error0, u32 *flags0, u8 *next1, u8 *error1, u32 *flags1)
Definition: ixge.c:669
struct ixge_dma_regs_t::@76::@79 tx
u32 vlib_buffer_free_list_index
Definition: ixge.h:1256
static uword ixge_process(vlib_main_t *vm, vlib_node_runtime_t *rt, vlib_frame_t *f)
Definition: ixge.c:2714
static void ixge_sfp_enable_disable_10g(ixge_device_t *xd, uword enable)
Definition: ixge.c:216
u8 id
Definition: sfp.h:36
uword private_data
Definition: i2c.h:44
static u8 * format_ixge_tx_descriptor(u8 *s, va_list *va)
Definition: ixge.c:514
format_function_t format_vnet_sw_interface_name
static int unix_physmem_is_fake(vlib_main_t *vm)
Definition: unix.h:190
#define IXGE_TX_DESCRIPTOR_STATUS1_N_BYTES_IN_PACKET(l)
Definition: ixge.h:145
static uword format_get_indent(u8 *s)
Definition: format.h:72
static uword ixge_interface_tx(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *f)
Definition: ixge.c:1201
static void ixge_clear_hw_interface_counters(u32 instance)
Definition: ixge.c:2413
static void ixge_sfp_device_up_down(ixge_device_t *xd, uword is_up)
Definition: ixge.c:265
#define foreach_ixge_counter
Definition: ixge.h:1016
static uword ixge_tx_no_wrap(ixge_main_t *xm, ixge_device_t *xd, ixge_dma_queue_t *dq, u32 *buffers, u32 start_descriptor_index, u32 n_descriptors, ixge_tx_state_t *tx_state)
Definition: ixge.c:1033
u64 counters[IXGE_N_COUNTER]
Definition: ixge.h:1237
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:82
static void vlib_process_signal_event(vlib_main_t *vm, uword node_index, uword type_opaque, uword data)
Definition: node_funcs.h:931
ixge_tx_descriptor_t tx
Definition: ixge.h:170
#define IXGE_TX_DESCRIPTOR_STATUS0_LOG2_IS_END_OF_PACKET
Definition: ixge.h:137
#define IXGE_TX_DESCRIPTOR_STATUS0_IS_ADVANCED
Definition: ixge.h:133
static void ixge_set_interface_next_node(vnet_main_t *vnm, u32 hw_if_index, u32 node_index)
Definition: ixge.c:2426
#define v
Definition: acl.c:314
#define XGE_PHY_ID1
Definition: ixge.c:48
static u8 * format_ixge_device_id(u8 *s, va_list *args)
Definition: ixge.c:2295
#define XGE_PHY_DEV_TYPE_PMA_PMD
Definition: ixge.c:46
#define ELOG_DATA(em, f)
Definition: elog.h:392
static clib_error_t * vlib_pci_intr_enable(vlib_pci_device_t *dev)
Definition: pci.h:175
#define IXGE_TX_DESCRIPTOR_STATUS0_ADVANCED
Definition: ixge.h:132
format_function_t format_sfp_eeprom
Definition: sfp.h:107
#define PREDICT_FALSE(x)
Definition: clib.h:97
u16 n_bytes_this_buffer
Definition: ixge.h:129
#define clib_error_create(args...)
Definition: error.h:108
sfp_eeprom_t sfp_eeprom
Definition: ixge.h:1234
vnet_main_t vnet_main
Definition: misc.c:43
u32 control
Definition: ixge.h:62
#define VLIB_FRAME_SIZE
Definition: node.h:328
u32 descriptor_address[2]
Definition: ixge.h:29
static u16 ixge_read_eeprom(ixge_device_t *xd, u32 address)
Definition: ixge.c:194
format_function_t * format_buffer
Definition: node.h:311
VNET_DEVICE_CLASS(ixge_device_class)
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:216
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:350
u32 software_firmware_sync
Definition: ixge.h:894
format_function_t format_vlib_pci_handle
Definition: pci.h:238
static void ixge_sfp_phy_init(ixge_device_t *xd)
Definition: ixge.c:347
vlib_node_runtime_t * node
Definition: ixge.c:900
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_OWNED_BY_SOFTWARE
Definition: ixge.h:110
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:121
u32 vlib_sw_if_index
Definition: ixge.h:1222
uword private_data
Definition: pci.h:88
u32 head_index
Definition: ixge.h:1128
u32 auto_negotiation_control2
Definition: ixge.h:465
u8 * format_ethernet_header_with_length(u8 *s, va_list *args)
Definition: format.c:115
ixge_tx_descriptor_t * start_of_packet_descriptor
Definition: ixge.c:906
static ixge_dma_regs_t * get_dma_regs(ixge_device_t *xd, vlib_rx_or_tx_t rt, u32 qi)
Definition: ixge.c:308
static u8 * format_ixge_tx_dma_trace(u8 *s, va_list *va)
Definition: ixge.c:861
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4
Definition: ixge.h:102
u32 n_descriptors
Definition: ixge.h:1125
u16 n_vectors
Definition: node.h:344
#define VLIB_BUFFER_LOG2_NEXT_PRESENT
Definition: buffer.h:96
#define XGE_PHY_DEV_TYPE_PHY_XS
Definition: ixge.c:47
u32 status_write_1_to_set
Definition: ixge.h:232
Definition: i2c.h:33
static uword ixge_interrupt_tx_queue(uword i)
Definition: ixge.c:2137
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:82
ixge_descriptor_t after
Definition: ixge.c:559
u32x4 as_u32x4
Definition: ixge.h:171
format_function_t format_vlib_pci_link_speed
Definition: pci.h:239
#define IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET(s)
Definition: ixge.h:108
static vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Get node runtime by node index.
Definition: node_funcs.h:88
void(* get_bits)(struct i2c_bus_t *b, int *scl, int *sda)
Definition: i2c.h:36
elog_main_t elog_main
Definition: main.h:141
#define ETHERNET_INTERFACE_FLAG_ACCEPT_ALL
Definition: ethernet.h:113
static u8 * format_ixge_device_name(u8 *s, va_list *args)
Definition: ixge.c:2249
#define IXGE_TX_DESCRIPTOR_STATUS0_INSERT_FCS
Definition: ixge.h:136
#define ARRAY_LEN(x)
Definition: clib.h:59
u16 device_index
Definition: ixge.h:1216
#define ELOG_TYPE_DECLARE(f)
Definition: elog.h:350
static uword ixge_interrupt_rx_queue(uword i)
Definition: ixge.c:2130
u32 n_bytes_in_packet
Definition: ixge.c:904
static uword round_pow2(uword x, uword pow2)
Definition: clib.h:272
#define VLIB_NODE_FUNCTION_MULTIARCH_CLONE(fn)
Definition: node.h:157
void vlib_i2c_init(i2c_bus_t *b)
Definition: i2c.c:150
u32 sdp_control
Definition: ixge.h:201
static u8 * format_ixge_rx_dma_trace(u8 *s, va_list *va)
Definition: ixge.c:574
u32 tail_index
Definition: ixge.h:1128
static char * ixge_error_strings[]
Definition: ixge.c:2214
static void * vlib_physmem_alloc_aligned(vlib_main_t *vm, clib_error_t **error, uword n_bytes, uword alignment)
Definition: buffer_funcs.h:364
#define EVENT_SET_FLAGS
Definition: ixge.c:40
pci_config_type0_regs_t config0
Definition: pci.h:63
u16 cached_next_index
Definition: node.h:463
#define VNET_SW_INTERFACE_FLAG_ADMIN_UP
Definition: interface.h:528
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 cpu_index, u32 index, u32 packet_increment, u32 byte_increment)
Increment a combined counter.
Definition: counter.h:241
#define ASSERT(truth)
#define IXGE_COUNTER_NOT_CLEAR_ON_READ
Definition: ixge.c:2259
unsigned int u32
Definition: types.h:88
ixge_pci_device_id_t device_id
Definition: ixge.h:1214
ixge_descriptor_t * descriptors
Definition: ixge.h:1122
static void ixge_tx_queue(ixge_main_t *xm, ixge_device_t *xd, u32 queue_index)
Definition: ixge.c:1999
#define vnet_buffer(b)
Definition: buffer.h:361
struct ixge_regs_t::@84 xge_mac
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:117
clib_error_t * pci_bus_init(vlib_main_t *vm)
Definition: pci.c:165
clib_error_t * ethernet_register_interface(vnet_main_t *vnm, u32 dev_class_index, u32 dev_instance, u8 *address, u32 *hw_if_index_return, ethernet_flag_change_function_t flag_change)
Definition: interface.c:226
u32 auto_negotiation_control
Definition: ixge.h:427
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:259
static void vlib_node_set_state(vlib_main_t *vm, u32 node_index, vlib_node_state_t new_state)
Set node dispatch state.
Definition: node_funcs.h:146
u32 control
Definition: ixge.h:179
clib_error_t * vlib_pci_map_resource(vlib_pci_device_t *dev, u32 resource, void **result)
Definition: linux_pci.c:381
ixge_regs_t * regs
Definition: ixge.h:1205
u8 *( format_function_t)(u8 *s, va_list *args)
Definition: format.h:48
u32 id
Definition: ixge.h:1116
u32 rx_enable
Definition: ixge.h:308
static void ixge_semaphore_get(ixge_device_t *xd)
Definition: ixge.c:58
static void ixge_interrupt(ixge_main_t *xm, ixge_device_t *xd, u32 i)
Definition: ixge.c:1896
static uword ixge_tx_descriptor_matches_template(ixge_main_t *xm, ixge_tx_descriptor_t *d)
Definition: ixge.c:1015
static uword ixge_tx_queue_to_interrupt(uword i)
Definition: ixge.c:2118
u64 uword
Definition: types.h:112
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
void vlib_i2c_read_eeprom(i2c_bus_t *bus, u8 i2c_addr, u16 start_addr, u16 length, u8 *data)
Definition: i2c.c:201
void(* put_bits)(struct i2c_bus_t *b, int scl, int sda)
Definition: i2c.h:35
#define IP_BUFFER_L4_CHECKSUM_COMPUTED
Definition: buffer.h:49
static void ixge_device_init(ixge_main_t *xm)
Definition: ixge.c:2625
static void ixge_software_firmware_sync(ixge_device_t *xd, u32 sw_mask)
Definition: ixge.c:87
Definition: defs.h:47
ixge_tx_descriptor_t tx_descriptor_template_mask
Definition: ixge.h:1261
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED
Definition: ixge.h:113
void ixge_set_next_node(ixge_rx_next_t next, char *name)
Definition: ixge.c:2913
ixge_dma_queue_t * dma_queues[VLIB_N_RX_TX]
Definition: ixge.h:1224
unsigned short u16
Definition: types.h:57
static uword ixge_ring_sub(ixge_dma_queue_t *q, u32 i0, u32 i1)
Definition: ixge.c:996
static void ixge_tx_trace(ixge_main_t *xm, ixge_device_t *xd, ixge_dma_queue_t *dq, ixge_tx_state_t *tx_state, ixge_tx_descriptor_t *descriptors, u32 *buffers, uword n_descriptors)
Definition: ixge.c:910
u32 tail_index
Definition: ixge.h:53
u32 rx_max_frame_size
Definition: ixge.h:373
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
double f64
Definition: types.h:142
unsigned char u8
Definition: types.h:56
static uword ixge_interrupt_is_tx_queue(uword i)
Definition: ixge.c:2112
#define IXGE_COUNTER_IS_64_BIT
Definition: ixge.c:2258
#define VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE
Definition: node.h:262
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:253
vlib_buffer_t buffer
Definition: ixge.c:857
static uword ixge_device_input(ixge_main_t *xm, ixge_device_t *xd, vlib_node_runtime_t *node)
Definition: ixge.c:2144
static uword ixge_input(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *f)
Definition: ixge.c:2172
u32 n_descriptors_per_cache_line
Definition: ixge.h:1254
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4_EXT
Definition: ixge.h:103
vlib_main_t * vlib_main
Definition: ixge.h:1242
u16 n_packet_bytes_this_descriptor
Definition: ixge.h:96
static uword ixge_rx_queue(ixge_main_t *xm, ixge_device_t *xd, vlib_node_runtime_t *node, u32 queue_index)
Definition: ixge.c:1823
static clib_error_t * vlib_pci_bus_master_enable(vlib_pci_device_t *dev)
Definition: pci.h:207
#define XGE_PHY_CONTROL_RESET
Definition: ixge.c:51
static uword vlib_in_process_context(vlib_main_t *vm)
Definition: node_funcs.h:404
u32 is_start_of_packet
Definition: ixge.c:902
#define IXGE_HWBP_RACE_ELOG
Definition: ixge.c:41
vlib_pci_device_t pci_device
Definition: ixge.h:1211
static int vlib_i2c_bus_timed_out(i2c_bus_t *bus)
Definition: i2c.h:54
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:143
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:418
struct ixge_dma_queue_t::@105::@108 rx
ixge_dma_regs_t rx_dma0[64]
Definition: ixge.h:281
u8 is_start_of_packet
Definition: ixge.c:854
#define IXGE_ALWAYS_POLL
Definition: ixge.c:38
u8 data[0]
Packet data.
Definition: buffer.h:158
u32 core_analog_config
Definition: ixge.h:949
#define vec_foreach(var, vec)
Vector iterator.
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_TCP
Definition: ixge.h:106
clib_error_t * ixge_init(vlib_main_t *vm)
Definition: ixge.c:2797
#define CLIB_MEMORY_BARRIER()
Definition: clib.h:101
static void ixge_sfp_enable_disable_laser(ixge_device_t *xd, uword enable)
Definition: ixge.c:206
#define clib_error_return(e, args...)
Definition: error.h:111
u16 pci_function
Definition: ixge.h:1219
void vlib_buffer_free_no_next(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers, does not free the buffer chain for each buffer.
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
Definition: trace_funcs.h:159
u32 status_write_1_to_clear
Definition: ixge.h:230
#define IXGE_TX_DESCRIPTOR_STATUS0_REPORT_STATUS
Definition: ixge.h:135
u32 flags
Definition: vhost-user.h:75
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:67
static u8 * format_ixge_device(u8 *s, va_list *args)
Definition: ixge.c:2338
u32 phy_index
Definition: ixge.h:1227
u32 flags
buffer flags: VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:85
struct ixge_regs_t::@82 interrupt
u32 eeprom_read
Definition: ixge.h:881
static u32 ixge_read_phy_reg(ixge_device_t *xd, u32 dev_type, u32 reg_index)
Definition: ixge.c:155
u32 n_bytes_in_rx_buffer
Definition: ixge.h:1252
pci_config_header_t header
Definition: pci_config.h:240
u32 link_status
Definition: ixge.h:340
static clib_error_t * ixge_dma_init(ixge_device_t *xd, vlib_rx_or_tx_t rt, u32 queue_index)
Definition: ixge.c:2462
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:57
u64 counters_last_clear[IXGE_N_COUNTER]
Definition: ixge.h:1237
ixge_dma_regs_t rx_dma1[64]
Definition: ixge.h:747
ixge_rx_from_hw_descriptor_t rx_from_hw
Definition: ixge.h:169
#define foreach_ixge_error
Definition: ixge.c:614
Definition: defs.h:46
#define IXGE_TX_DESCRIPTOR_STATUS1_DONE
Definition: ixge.h:139
#define IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR
Definition: ixge.h:118
ixge_error_t
Definition: ixge.c:621