1 /******************************************************************************
3 Copyright (c) 2001-2010, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
37 #include "opt_inet6.h"
42 /*********************************************************************
43 * Set this to one to display debug statistics
44 *********************************************************************/
45 int ixgbe_display_debug_stats = 0;
47 /*********************************************************************
49 *********************************************************************/
50 char ixgbe_driver_version[] = "2.4.4";
52 /*********************************************************************
55 * Used by probe to select devices to load on
56 * Last field stores an index into ixgbe_strings
57 * Last entry must be all 0s
59 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
60 *********************************************************************/
62 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
64 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
65 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
66 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
68 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
69 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
70 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
71 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
72 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
73 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
74 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
75 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
76 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
77 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
78 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
79 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
80 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
81 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
82 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
83 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
84 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
85 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
86 /* required last entry */
90 /*********************************************************************
91 * Table of branding strings
92 *********************************************************************/
94 static char *ixgbe_strings[] = {
95 "Intel(R) PRO/10GbE PCI-Express Network Driver"
98 /*********************************************************************
100 *********************************************************************/
101 static int ixgbe_probe(device_t);
102 static int ixgbe_attach(device_t);
103 static int ixgbe_detach(device_t);
104 static int ixgbe_shutdown(device_t);
105 static void ixgbe_start(struct ifnet *);
106 static void ixgbe_start_locked(struct tx_ring *, struct ifnet *);
107 #if __FreeBSD_version >= 800000
108 static int ixgbe_mq_start(struct ifnet *, struct mbuf *);
109 static int ixgbe_mq_start_locked(struct ifnet *,
110 struct tx_ring *, struct mbuf *);
111 static void ixgbe_qflush(struct ifnet *);
113 static int ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
114 static void ixgbe_init(void *);
115 static void ixgbe_init_locked(struct adapter *);
116 static void ixgbe_stop(void *);
117 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
118 static int ixgbe_media_change(struct ifnet *);
119 static void ixgbe_identify_hardware(struct adapter *);
120 static int ixgbe_allocate_pci_resources(struct adapter *);
121 static int ixgbe_allocate_msix(struct adapter *);
122 static int ixgbe_allocate_legacy(struct adapter *);
123 static int ixgbe_allocate_queues(struct adapter *);
124 static int ixgbe_setup_msix(struct adapter *);
125 static void ixgbe_free_pci_resources(struct adapter *);
126 static void ixgbe_local_timer(void *);
127 static int ixgbe_setup_interface(device_t, struct adapter *);
128 static void ixgbe_config_link(struct adapter *);
130 static int ixgbe_allocate_transmit_buffers(struct tx_ring *);
131 static int ixgbe_setup_transmit_structures(struct adapter *);
132 static void ixgbe_setup_transmit_ring(struct tx_ring *);
133 static void ixgbe_initialize_transmit_units(struct adapter *);
134 static void ixgbe_free_transmit_structures(struct adapter *);
135 static void ixgbe_free_transmit_buffers(struct tx_ring *);
137 static int ixgbe_allocate_receive_buffers(struct rx_ring *);
138 static int ixgbe_setup_receive_structures(struct adapter *);
139 static int ixgbe_setup_receive_ring(struct rx_ring *);
140 static void ixgbe_initialize_receive_units(struct adapter *);
141 static void ixgbe_free_receive_structures(struct adapter *);
142 static void ixgbe_free_receive_buffers(struct rx_ring *);
143 static void ixgbe_setup_hw_rsc(struct rx_ring *);
145 static void ixgbe_enable_intr(struct adapter *);
146 static void ixgbe_disable_intr(struct adapter *);
147 static void ixgbe_update_stats_counters(struct adapter *);
148 static bool ixgbe_txeof(struct tx_ring *);
149 static bool ixgbe_rxeof(struct ix_queue *, int);
150 static void ixgbe_rx_checksum(u32, struct mbuf *, u32);
151 static void ixgbe_set_promisc(struct adapter *);
152 static void ixgbe_set_multi(struct adapter *);
153 static void ixgbe_update_link_status(struct adapter *);
154 static void ixgbe_refresh_mbufs(struct rx_ring *, int);
155 static int ixgbe_xmit(struct tx_ring *, struct mbuf **);
156 static int ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS);
157 static int ixgbe_set_advertise(SYSCTL_HANDLER_ARGS);
158 static int ixgbe_dma_malloc(struct adapter *, bus_size_t,
159 struct ixgbe_dma_alloc *, int);
160 static void ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
161 static void ixgbe_add_rx_process_limit(struct adapter *, const char *,
162 const char *, int *, int);
163 static bool ixgbe_tx_ctx_setup(struct tx_ring *, struct mbuf *);
164 static bool ixgbe_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
165 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
166 static void ixgbe_configure_ivars(struct adapter *);
167 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
169 static void ixgbe_setup_vlan_hw_support(struct adapter *);
170 static void ixgbe_register_vlan(void *, struct ifnet *, u16);
171 static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
173 static void ixgbe_add_hw_stats(struct adapter *adapter);
175 static __inline void ixgbe_rx_discard(struct rx_ring *, int);
176 static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *,
179 /* Support for pluggable optic modules */
180 static bool ixgbe_sfp_probe(struct adapter *);
181 static void ixgbe_setup_optics(struct adapter *);
183 /* Legacy (single vector interrupt handler */
184 static void ixgbe_legacy_irq(void *);
186 /* The MSI/X Interrupt handlers */
187 static void ixgbe_msix_que(void *);
188 static void ixgbe_msix_link(void *);
190 /* Deferred interrupt tasklets */
191 static void ixgbe_handle_que(void *, int);
192 static void ixgbe_handle_link(void *, int);
193 static void ixgbe_handle_msf(void *, int);
194 static void ixgbe_handle_mod(void *, int);
197 static void ixgbe_atr(struct tx_ring *, struct mbuf *);
198 static void ixgbe_reinit_fdir(void *, int);
201 /*********************************************************************
202 * FreeBSD Device Interface Entry Points
203 *********************************************************************/
205 static device_method_t ixgbe_methods[] = {
206 /* Device interface */
207 DEVMETHOD(device_probe, ixgbe_probe),
208 DEVMETHOD(device_attach, ixgbe_attach),
209 DEVMETHOD(device_detach, ixgbe_detach),
210 DEVMETHOD(device_shutdown, ixgbe_shutdown),
214 static driver_t ixgbe_driver = {
215 "ix", ixgbe_methods, sizeof(struct adapter),
218 devclass_t ixgbe_devclass;
219 DRIVER_MODULE(ixgbe, pci, ixgbe_driver, ixgbe_devclass, 0, 0);
221 MODULE_DEPEND(ixgbe, pci, 1, 1, 1);
222 MODULE_DEPEND(ixgbe, ether, 1, 1, 1);
225 ** TUNEABLE PARAMETERS:
229 ** AIM: Adaptive Interrupt Moderation
230 ** which means that the interrupt rate
231 ** is varied over time based on the
232 ** traffic for that interrupt vector
234 static int ixgbe_enable_aim = TRUE;
235 TUNABLE_INT("hw.ixgbe.enable_aim", &ixgbe_enable_aim);
237 static int ixgbe_max_interrupt_rate = (8000000 / IXGBE_LOW_LATENCY);
238 TUNABLE_INT("hw.ixgbe.max_interrupt_rate", &ixgbe_max_interrupt_rate);
240 /* How many packets rxeof tries to clean at a time */
241 static int ixgbe_rx_process_limit = 128;
242 TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
245 ** Smart speed setting, default to on
246 ** this only works as a compile option
247 ** right now as its during attach, set
248 ** this to 'ixgbe_smart_speed_off' to
251 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
254 * MSIX should be the default for best performance,
255 * but this allows it to be forced off for testing.
257 static int ixgbe_enable_msix = 1;
258 TUNABLE_INT("hw.ixgbe.enable_msix", &ixgbe_enable_msix);
261 * Header split: this causes the hardware to DMA
262 * the header into a separate mbuf from the payload,
263 * it can be a performance win in some workloads, but
264 * in others it actually hurts, its off by default.
266 static bool ixgbe_header_split = FALSE;
267 TUNABLE_INT("hw.ixgbe.hdr_split", &ixgbe_header_split);
270 * Number of Queues, can be set to 0,
271 * it then autoconfigures based on the
272 * number of cpus with a max of 8. This
273 * can be overriden manually here.
275 static int ixgbe_num_queues = 0;
276 TUNABLE_INT("hw.ixgbe.num_queues", &ixgbe_num_queues);
279 ** Number of TX descriptors per ring,
280 ** setting higher than RX as this seems
281 ** the better performing choice.
283 static int ixgbe_txd = PERFORM_TXD;
284 TUNABLE_INT("hw.ixgbe.txd", &ixgbe_txd);
286 /* Number of RX descriptors per ring */
287 static int ixgbe_rxd = PERFORM_RXD;
288 TUNABLE_INT("hw.ixgbe.rxd", &ixgbe_rxd);
290 /* Keep running tab on them for sanity check */
291 static int ixgbe_total_ports;
295 ** For Flow Director: this is the
296 ** number of TX packets we sample
297 ** for the filter pool, this means
298 ** every 20th packet will be probed.
300 ** This feature can be disabled by
301 ** setting this to 0.
303 static int atr_sample_rate = 20;
305 ** Flow Director actually 'steals'
306 ** part of the packet buffer as its
307 ** filter pool, this variable controls
309 ** 0 = 64K, 1 = 128K, 2 = 256K
311 static int fdir_pballoc = 1;
314 /*********************************************************************
315 * Device identification routine
317 * ixgbe_probe determines if the driver should be loaded on
318 * adapter based on PCI vendor/device id of the adapter.
320 * return BUS_PROBE_DEFAULT on success, positive on failure
321 *********************************************************************/
324 ixgbe_probe(device_t dev)
326 ixgbe_vendor_info_t *ent;
328 u16 pci_vendor_id = 0;
329 u16 pci_device_id = 0;
330 u16 pci_subvendor_id = 0;
331 u16 pci_subdevice_id = 0;
332 char adapter_name[256];
334 INIT_DEBUGOUT("ixgbe_probe: begin");
336 pci_vendor_id = pci_get_vendor(dev);
337 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
340 pci_device_id = pci_get_device(dev);
341 pci_subvendor_id = pci_get_subvendor(dev);
342 pci_subdevice_id = pci_get_subdevice(dev);
344 ent = ixgbe_vendor_info_array;
345 while (ent->vendor_id != 0) {
346 if ((pci_vendor_id == ent->vendor_id) &&
347 (pci_device_id == ent->device_id) &&
349 ((pci_subvendor_id == ent->subvendor_id) ||
350 (ent->subvendor_id == 0)) &&
352 ((pci_subdevice_id == ent->subdevice_id) ||
353 (ent->subdevice_id == 0))) {
354 sprintf(adapter_name, "%s, Version - %s",
355 ixgbe_strings[ent->index],
356 ixgbe_driver_version);
357 device_set_desc_copy(dev, adapter_name);
359 return (BUS_PROBE_DEFAULT);
366 /*********************************************************************
367 * Device initialization routine
369 * The attach entry point is called when the driver is being loaded.
370 * This routine identifies the type of hardware, allocates all resources
371 * and initializes the hardware.
373 * return 0 on success, positive on failure
374 *********************************************************************/
377 ixgbe_attach(device_t dev)
379 struct adapter *adapter;
385 INIT_DEBUGOUT("ixgbe_attach: begin");
387 if (resource_disabled("ixgbe", device_get_unit(dev))) {
388 device_printf(dev, "Disabled by device hint\n");
392 /* Allocate, clear, and link in our adapter structure */
393 adapter = device_get_softc(dev);
394 adapter->dev = adapter->osdep.dev = dev;
398 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
402 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
403 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
404 OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
405 adapter, 0, ixgbe_set_flowcntl, "I", "Flow Control");
407 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
408 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
409 OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
410 &ixgbe_enable_aim, 1, "Interrupt Moderation");
413 ** Allow a kind of speed control by forcing the autoneg
414 ** advertised speed list to only a certain value, this
415 ** supports 1G on 82599 devices, and 100Mb on x540.
417 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
418 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
419 OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
420 adapter, 0, ixgbe_set_advertise, "I", "Link Speed");
423 /* Set up the timer callout */
424 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
426 /* Determine hardware revision */
427 ixgbe_identify_hardware(adapter);
429 /* Do base PCI setup - map BAR0 */
430 if (ixgbe_allocate_pci_resources(adapter)) {
431 device_printf(dev, "Allocation of PCI resources failed\n");
436 /* Do descriptor calc and sanity checks */
437 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
438 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
439 device_printf(dev, "TXD config issue, using default!\n");
440 adapter->num_tx_desc = DEFAULT_TXD;
442 adapter->num_tx_desc = ixgbe_txd;
445 ** With many RX rings it is easy to exceed the
446 ** system mbuf allocation. Tuning nmbclusters
447 ** can alleviate this.
449 if (nmbclusters > 0 ) {
451 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
452 if (s > nmbclusters) {
453 device_printf(dev, "RX Descriptors exceed "
454 "system mbuf max, using default instead!\n");
455 ixgbe_rxd = DEFAULT_RXD;
459 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
460 ixgbe_rxd < MIN_TXD || ixgbe_rxd > MAX_TXD) {
461 device_printf(dev, "RXD config issue, using default!\n");
462 adapter->num_rx_desc = DEFAULT_RXD;
464 adapter->num_rx_desc = ixgbe_rxd;
466 /* Allocate our TX/RX Queues */
467 if (ixgbe_allocate_queues(adapter)) {
472 /* Allocate multicast array memory. */
473 adapter->mta = malloc(sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
474 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
475 if (adapter->mta == NULL) {
476 device_printf(dev, "Can not allocate multicast setup array\n");
481 /* Initialize the shared code */
482 error = ixgbe_init_shared_code(hw);
483 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
485 ** No optics in this port, set up
486 ** so the timer routine will probe
487 ** for later insertion.
489 adapter->sfp_probe = TRUE;
491 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
492 device_printf(dev,"Unsupported SFP+ module detected!\n");
496 device_printf(dev,"Unable to initialize the shared code\n");
501 /* Make sure we have a good EEPROM before we read from it */
502 if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
503 device_printf(dev,"The EEPROM Checksum Is Not Valid\n");
508 /* Get Hardware Flow Control setting */
509 hw->fc.requested_mode = ixgbe_fc_full;
510 adapter->fc = hw->fc.requested_mode;
511 hw->fc.pause_time = IXGBE_FC_PAUSE;
512 hw->fc.low_water = IXGBE_FC_LO;
513 hw->fc.high_water[0] = IXGBE_FC_HI;
514 hw->fc.send_xon = TRUE;
516 error = ixgbe_init_hw(hw);
517 if (error == IXGBE_ERR_EEPROM_VERSION) {
518 device_printf(dev, "This device is a pre-production adapter/"
519 "LOM. Please be aware there may be issues associated "
520 "with your hardware.\n If you are experiencing problems "
521 "please contact your Intel or hardware representative "
522 "who provided you with this hardware.\n");
523 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED)
524 device_printf(dev,"Unsupported SFP+ Module\n");
528 device_printf(dev,"Hardware Initialization Failure\n");
532 /* Detect and set physical type */
533 ixgbe_setup_optics(adapter);
535 if ((adapter->msix > 1) && (ixgbe_enable_msix))
536 error = ixgbe_allocate_msix(adapter);
538 error = ixgbe_allocate_legacy(adapter);
542 /* Setup OS specific network interface */
543 if (ixgbe_setup_interface(dev, adapter) != 0)
546 /* Sysctl for limiting the amount of work done in the taskqueue */
547 ixgbe_add_rx_process_limit(adapter, "rx_processing_limit",
548 "max number of rx packets to process", &adapter->rx_process_limit,
549 ixgbe_rx_process_limit);
551 /* Initialize statistics */
552 ixgbe_update_stats_counters(adapter);
554 /* Register for VLAN events */
555 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
556 ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
557 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
558 ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
560 /* Print PCIE bus type/speed/width info */
561 ixgbe_get_bus_info(hw);
562 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
563 ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s":
564 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"),
565 (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
566 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
567 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
570 if ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
571 (hw->bus.speed == ixgbe_bus_speed_2500)) {
572 device_printf(dev, "PCI-Express bandwidth available"
573 " for this card\n is not sufficient for"
574 " optimal performance.\n");
575 device_printf(dev, "For optimal performance a x8 "
576 "PCIE, or x4 PCIE 2 slot is required.\n");
579 /* let hardware know driver is loaded */
580 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
581 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
582 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
584 ixgbe_add_hw_stats(adapter);
586 INIT_DEBUGOUT("ixgbe_attach: end");
589 ixgbe_free_transmit_structures(adapter);
590 ixgbe_free_receive_structures(adapter);
592 if (adapter->ifp != NULL)
593 if_free(adapter->ifp);
594 ixgbe_free_pci_resources(adapter);
595 free(adapter->mta, M_DEVBUF);
600 /*********************************************************************
601 * Device removal routine
603 * The detach entry point is called when the driver is being removed.
604 * This routine stops the adapter and deallocates all the resources
605 * that were allocated for driver operation.
607 * return 0 on success, positive on failure
608 *********************************************************************/
611 ixgbe_detach(device_t dev)
613 struct adapter *adapter = device_get_softc(dev);
614 struct ix_queue *que = adapter->queues;
617 INIT_DEBUGOUT("ixgbe_detach: begin");
619 /* Make sure VLANS are not using driver */
620 if (adapter->ifp->if_vlantrunk != NULL) {
621 device_printf(dev,"Vlan in use, detach first\n");
625 IXGBE_CORE_LOCK(adapter);
627 IXGBE_CORE_UNLOCK(adapter);
629 for (int i = 0; i < adapter->num_queues; i++, que++) {
631 taskqueue_drain(que->tq, &que->que_task);
632 taskqueue_free(que->tq);
636 /* Drain the Link queue */
638 taskqueue_drain(adapter->tq, &adapter->link_task);
639 taskqueue_drain(adapter->tq, &adapter->mod_task);
640 taskqueue_drain(adapter->tq, &adapter->msf_task);
642 taskqueue_drain(adapter->tq, &adapter->fdir_task);
644 taskqueue_free(adapter->tq);
647 /* let hardware know driver is unloading */
648 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
649 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
650 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
652 /* Unregister VLAN events */
653 if (adapter->vlan_attach != NULL)
654 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
655 if (adapter->vlan_detach != NULL)
656 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
658 ether_ifdetach(adapter->ifp);
659 callout_drain(&adapter->timer);
660 ixgbe_free_pci_resources(adapter);
661 bus_generic_detach(dev);
662 if_free(adapter->ifp);
664 ixgbe_free_transmit_structures(adapter);
665 ixgbe_free_receive_structures(adapter);
666 free(adapter->mta, M_DEVBUF);
668 IXGBE_CORE_LOCK_DESTROY(adapter);
672 /*********************************************************************
674 * Shutdown entry point
676 **********************************************************************/
679 ixgbe_shutdown(device_t dev)
681 struct adapter *adapter = device_get_softc(dev);
682 IXGBE_CORE_LOCK(adapter);
684 IXGBE_CORE_UNLOCK(adapter);
689 /*********************************************************************
690 * Transmit entry point
692 * ixgbe_start is called by the stack to initiate a transmit.
693 * The driver will remain in this routine as long as there are
694 * packets to transmit and transmit resources are available.
695 * In case resources are not available stack is notified and
696 * the packet is requeued.
697 **********************************************************************/
700 ixgbe_start_locked(struct tx_ring *txr, struct ifnet * ifp)
703 struct adapter *adapter = txr->adapter;
705 IXGBE_TX_LOCK_ASSERT(txr);
707 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
710 if (!adapter->link_active)
713 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
715 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
719 if (ixgbe_xmit(txr, &m_head)) {
722 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
723 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
726 /* Send a copy of the frame to the BPF listener */
727 ETHER_BPF_MTAP(ifp, m_head);
729 /* Set watchdog on */
730 txr->watchdog_time = ticks;
731 txr->queue_status = IXGBE_QUEUE_WORKING;
738 * Legacy TX start - called by the stack, this
739 * always uses the first tx ring, and should
740 * not be used with multiqueue tx enabled.
743 ixgbe_start(struct ifnet *ifp)
745 struct adapter *adapter = ifp->if_softc;
746 struct tx_ring *txr = adapter->tx_rings;
748 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
750 ixgbe_start_locked(txr, ifp);
751 IXGBE_TX_UNLOCK(txr);
756 #if __FreeBSD_version >= 800000
758 ** Multiqueue Transmit driver
762 ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m)
764 struct adapter *adapter = ifp->if_softc;
765 struct ix_queue *que;
769 /* Which queue to use */
770 if ((m->m_flags & M_FLOWID) != 0)
771 i = m->m_pkthdr.flowid % adapter->num_queues;
773 txr = &adapter->tx_rings[i];
774 que = &adapter->queues[i];
776 if (IXGBE_TX_TRYLOCK(txr)) {
777 err = ixgbe_mq_start_locked(ifp, txr, m);
778 IXGBE_TX_UNLOCK(txr);
780 err = drbr_enqueue(ifp, txr->br, m);
781 taskqueue_enqueue(que->tq, &que->que_task);
788 ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
790 struct adapter *adapter = txr->adapter;
792 int enqueued, err = 0;
794 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
795 IFF_DRV_RUNNING || adapter->link_active == 0) {
797 err = drbr_enqueue(ifp, txr->br, m);
803 next = drbr_dequeue(ifp, txr->br);
804 } else if (drbr_needs_enqueue(ifp, txr->br)) {
805 if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
807 next = drbr_dequeue(ifp, txr->br);
811 /* Process the queue */
812 while (next != NULL) {
813 if ((err = ixgbe_xmit(txr, &next)) != 0) {
815 err = drbr_enqueue(ifp, txr->br, next);
819 drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
820 /* Send a copy of the frame to the BPF listener */
821 ETHER_BPF_MTAP(ifp, next);
822 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
824 if (txr->tx_avail < IXGBE_TX_OP_THRESHOLD)
826 if (txr->tx_avail < IXGBE_TX_OP_THRESHOLD) {
827 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
830 next = drbr_dequeue(ifp, txr->br);
834 /* Set watchdog on */
835 txr->queue_status = IXGBE_QUEUE_WORKING;
836 txr->watchdog_time = ticks;
843 ** Flush all ring buffers
846 ixgbe_qflush(struct ifnet *ifp)
848 struct adapter *adapter = ifp->if_softc;
849 struct tx_ring *txr = adapter->tx_rings;
852 for (int i = 0; i < adapter->num_queues; i++, txr++) {
854 while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
856 IXGBE_TX_UNLOCK(txr);
860 #endif /* __FreeBSD_version >= 800000 */
862 /*********************************************************************
865 * ixgbe_ioctl is called when the user wants to configure the
868 * return 0 on success, positive on failure
869 **********************************************************************/
872 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
874 struct adapter *adapter = ifp->if_softc;
875 struct ifreq *ifr = (struct ifreq *) data;
876 #if defined(INET) || defined(INET6)
877 struct ifaddr *ifa = (struct ifaddr *)data;
878 bool avoid_reset = FALSE;
886 if (ifa->ifa_addr->sa_family == AF_INET)
890 if (ifa->ifa_addr->sa_family == AF_INET6)
893 #if defined(INET) || defined(INET6)
895 ** Calling init results in link renegotiation,
896 ** so we avoid doing it when possible.
899 ifp->if_flags |= IFF_UP;
900 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
902 if (!(ifp->if_flags & IFF_NOARP))
903 arp_ifinit(ifp, ifa);
905 error = ether_ioctl(ifp, command, data);
909 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
910 if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
913 IXGBE_CORE_LOCK(adapter);
914 ifp->if_mtu = ifr->ifr_mtu;
915 adapter->max_frame_size =
916 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
917 ixgbe_init_locked(adapter);
918 IXGBE_CORE_UNLOCK(adapter);
922 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
923 IXGBE_CORE_LOCK(adapter);
924 if (ifp->if_flags & IFF_UP) {
925 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
926 if ((ifp->if_flags ^ adapter->if_flags) &
927 (IFF_PROMISC | IFF_ALLMULTI)) {
928 ixgbe_set_promisc(adapter);
931 ixgbe_init_locked(adapter);
933 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
935 adapter->if_flags = ifp->if_flags;
936 IXGBE_CORE_UNLOCK(adapter);
940 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
941 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
942 IXGBE_CORE_LOCK(adapter);
943 ixgbe_disable_intr(adapter);
944 ixgbe_set_multi(adapter);
945 ixgbe_enable_intr(adapter);
946 IXGBE_CORE_UNLOCK(adapter);
951 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
952 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
956 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
957 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
958 if (mask & IFCAP_HWCSUM)
959 ifp->if_capenable ^= IFCAP_HWCSUM;
960 if (mask & IFCAP_TSO4)
961 ifp->if_capenable ^= IFCAP_TSO4;
962 if (mask & IFCAP_LRO)
963 ifp->if_capenable ^= IFCAP_LRO;
964 if (mask & IFCAP_VLAN_HWTAGGING)
965 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
966 if (mask & IFCAP_VLAN_HWFILTER)
967 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
968 if (mask & IFCAP_VLAN_HWTSO)
969 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
970 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
971 IXGBE_CORE_LOCK(adapter);
972 ixgbe_init_locked(adapter);
973 IXGBE_CORE_UNLOCK(adapter);
975 VLAN_CAPABILITIES(ifp);
980 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
981 error = ether_ioctl(ifp, command, data);
988 /*********************************************************************
991 * This routine is used in two ways. It is used by the stack as
992 * init entry point in network interface structure. It is also used
993 * by the driver as a hw/sw initialization routine to get to a
996 * return 0 on success, positive on failure
997 **********************************************************************/
998 #define IXGBE_MHADD_MFS_SHIFT 16
1001 ixgbe_init_locked(struct adapter *adapter)
1003 struct ifnet *ifp = adapter->ifp;
1004 device_t dev = adapter->dev;
1005 struct ixgbe_hw *hw = &adapter->hw;
1006 u32 k, txdctl, mhadd, gpie;
1009 mtx_assert(&adapter->core_mtx, MA_OWNED);
1010 INIT_DEBUGOUT("ixgbe_init: begin");
1011 hw->adapter_stopped = FALSE;
1012 ixgbe_stop_adapter(hw);
1013 callout_stop(&adapter->timer);
1015 /* reprogram the RAR[0] in case user changed it. */
1016 ixgbe_set_rar(hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
1018 /* Get the latest mac address, User can use a LAA */
1019 bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
1020 IXGBE_ETH_LENGTH_OF_ADDRESS);
1021 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
1022 hw->addr_ctrl.rar_used_count = 1;
1024 /* Set the various hardware offload abilities */
1025 ifp->if_hwassist = 0;
1026 if (ifp->if_capenable & IFCAP_TSO4)
1027 ifp->if_hwassist |= CSUM_TSO;
1028 if (ifp->if_capenable & IFCAP_TXCSUM) {
1029 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1030 #if __FreeBSD_version >= 800000
1031 if (hw->mac.type != ixgbe_mac_82598EB)
1032 ifp->if_hwassist |= CSUM_SCTP;
1036 /* Prepare transmit descriptors and buffers */
1037 if (ixgbe_setup_transmit_structures(adapter)) {
1038 device_printf(dev,"Could not setup transmit structures\n");
1039 ixgbe_stop(adapter);
1044 ixgbe_initialize_transmit_units(adapter);
1046 /* Setup Multicast table */
1047 ixgbe_set_multi(adapter);
1050 ** Determine the correct mbuf pool
1051 ** for doing jumbo/headersplit
1053 if (adapter->max_frame_size <= 2048)
1054 adapter->rx_mbuf_sz = MCLBYTES;
1055 else if (adapter->max_frame_size <= 4096)
1056 adapter->rx_mbuf_sz = MJUMPAGESIZE;
1057 else if (adapter->max_frame_size <= 9216)
1058 adapter->rx_mbuf_sz = MJUM9BYTES;
1060 adapter->rx_mbuf_sz = MJUM16BYTES;
1062 /* Prepare receive descriptors and buffers */
1063 if (ixgbe_setup_receive_structures(adapter)) {
1064 device_printf(dev,"Could not setup receive structures\n");
1065 ixgbe_stop(adapter);
1069 /* Configure RX settings */
1070 ixgbe_initialize_receive_units(adapter);
1072 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
1074 /* Enable Fan Failure Interrupt */
1075 gpie |= IXGBE_SDP1_GPIEN;
1077 /* Add for Module detection */
1078 if (hw->mac.type == ixgbe_mac_82599EB)
1079 gpie |= IXGBE_SDP2_GPIEN;
1081 /* Thermal Failure Detection */
1082 if (hw->mac.type == ixgbe_mac_X540)
1083 gpie |= IXGBE_SDP0_GPIEN;
1085 if (adapter->msix > 1) {
1086 /* Enable Enhanced MSIX mode */
1087 gpie |= IXGBE_GPIE_MSIX_MODE;
1088 gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
1091 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
1094 if (ifp->if_mtu > ETHERMTU) {
1095 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
1096 mhadd &= ~IXGBE_MHADD_MFS_MASK;
1097 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
1098 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
1101 /* Now enable all the queues */
1103 for (int i = 0; i < adapter->num_queues; i++) {
1104 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
1105 txdctl |= IXGBE_TXDCTL_ENABLE;
1106 /* Set WTHRESH to 8, burst writeback */
1107 txdctl |= (8 << 16);
1108 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), txdctl);
1111 for (int i = 0; i < adapter->num_queues; i++) {
1112 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1113 if (hw->mac.type == ixgbe_mac_82598EB) {
1119 rxdctl &= ~0x3FFFFF;
1122 rxdctl |= IXGBE_RXDCTL_ENABLE;
1123 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
1124 for (k = 0; k < 10; k++) {
1125 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)) &
1126 IXGBE_RXDCTL_ENABLE)
1132 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), adapter->num_rx_desc - 1);
1135 /* Set up VLAN support and filter */
1136 ixgbe_setup_vlan_hw_support(adapter);
1138 /* Enable Receive engine */
1139 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1140 if (hw->mac.type == ixgbe_mac_82598EB)
1141 rxctrl |= IXGBE_RXCTRL_DMBYPS;
1142 rxctrl |= IXGBE_RXCTRL_RXEN;
1143 ixgbe_enable_rx_dma(hw, rxctrl);
1145 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1147 /* Set up MSI/X routing */
1148 if (ixgbe_enable_msix) {
1149 ixgbe_configure_ivars(adapter);
1150 /* Set up auto-mask */
1151 if (hw->mac.type == ixgbe_mac_82598EB)
1152 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1154 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
1155 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
1157 } else { /* Simple settings for Legacy/MSI */
1158 ixgbe_set_ivar(adapter, 0, 0, 0);
1159 ixgbe_set_ivar(adapter, 0, 0, 1);
1160 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1164 /* Init Flow director */
1165 if (hw->mac.type != ixgbe_mac_82598EB) {
1166 u32 hdrm = 64 << fdir_pballoc;
1168 hw->mac.ops.setup_rxpba(hw, 0, hdrm, PBA_STRATEGY_EQUAL);
1169 ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc);
1174 ** Check on any SFP devices that
1175 ** need to be kick-started
1177 if (hw->phy.type == ixgbe_phy_none) {
1178 int err = hw->phy.ops.identify(hw);
1179 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1181 "Unsupported SFP+ module type was detected.\n");
1186 /* Set moderation on the Link interrupt */
1187 IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->linkvec), IXGBE_LINK_ITR);
1189 /* Config/Enable Link */
1190 ixgbe_config_link(adapter);
1192 /* And now turn on interrupts */
1193 ixgbe_enable_intr(adapter);
1195 /* Now inform the stack we're ready */
1196 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1197 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1203 ixgbe_init(void *arg)
1205 struct adapter *adapter = arg;
1207 IXGBE_CORE_LOCK(adapter);
1208 ixgbe_init_locked(adapter);
1209 IXGBE_CORE_UNLOCK(adapter);
1216 ** MSIX Interrupt Handlers and Tasklets
1221 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1223 struct ixgbe_hw *hw = &adapter->hw;
1224 u64 queue = (u64)(1 << vector);
1227 if (hw->mac.type == ixgbe_mac_82598EB) {
1228 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1229 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1231 mask = (queue & 0xFFFFFFFF);
1233 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1234 mask = (queue >> 32);
1236 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1241 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1243 struct ixgbe_hw *hw = &adapter->hw;
1244 u64 queue = (u64)(1 << vector);
1247 if (hw->mac.type == ixgbe_mac_82598EB) {
1248 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1249 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1251 mask = (queue & 0xFFFFFFFF);
1253 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1254 mask = (queue >> 32);
1256 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1261 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
1265 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1266 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
1267 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
1269 mask = (queues & 0xFFFFFFFF);
1270 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
1271 mask = (queues >> 32);
1272 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
1278 ixgbe_handle_que(void *context, int pending)
1280 struct ix_queue *que = context;
1281 struct adapter *adapter = que->adapter;
1282 struct tx_ring *txr = que->txr;
1283 struct ifnet *ifp = adapter->ifp;
1286 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1287 more = ixgbe_rxeof(que, adapter->rx_process_limit);
1290 #if __FreeBSD_version >= 800000
1291 if (!drbr_empty(ifp, txr->br))
1292 ixgbe_mq_start_locked(ifp, txr, NULL);
1294 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1295 ixgbe_start_locked(txr, ifp);
1297 IXGBE_TX_UNLOCK(txr);
1299 taskqueue_enqueue(que->tq, &que->que_task);
1304 /* Reenable this interrupt */
1305 ixgbe_enable_queue(adapter, que->msix);
1310 /*********************************************************************
1312 * Legacy Interrupt Service routine
1314 **********************************************************************/
1317 ixgbe_legacy_irq(void *arg)
1319 struct ix_queue *que = arg;
1320 struct adapter *adapter = que->adapter;
1321 struct ixgbe_hw *hw = &adapter->hw;
1322 struct tx_ring *txr = adapter->tx_rings;
1323 bool more_tx, more_rx;
1324 u32 reg_eicr, loop = MAX_LOOP;
1327 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1330 if (reg_eicr == 0) {
1331 ixgbe_enable_intr(adapter);
1335 more_rx = ixgbe_rxeof(que, adapter->rx_process_limit);
1339 more_tx = ixgbe_txeof(txr);
1340 } while (loop-- && more_tx);
1341 IXGBE_TX_UNLOCK(txr);
1343 if (more_rx || more_tx)
1344 taskqueue_enqueue(que->tq, &que->que_task);
1346 /* Check for fan failure */
1347 if ((hw->phy.media_type == ixgbe_media_type_copper) &&
1348 (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1349 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1350 "REPLACE IMMEDIATELY!!\n");
1351 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1);
1354 /* Link status change */
1355 if (reg_eicr & IXGBE_EICR_LSC)
1356 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1358 ixgbe_enable_intr(adapter);
1363 /*********************************************************************
1365 * MSIX Queue Interrupt Service routine
1367 **********************************************************************/
1369 ixgbe_msix_que(void *arg)
1371 struct ix_queue *que = arg;
1372 struct adapter *adapter = que->adapter;
1373 struct tx_ring *txr = que->txr;
1374 struct rx_ring *rxr = que->rxr;
1375 bool more_tx, more_rx;
1380 more_rx = ixgbe_rxeof(que, adapter->rx_process_limit);
1383 more_tx = ixgbe_txeof(txr);
1385 ** Make certain that if the stack
1386 ** has anything queued the task gets
1387 ** scheduled to handle it.
1389 #if __FreeBSD_version < 800000
1390 if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd))
1392 if (!drbr_empty(adapter->ifp, txr->br))
1395 IXGBE_TX_UNLOCK(txr);
1399 if (ixgbe_enable_aim == FALSE)
1402 ** Do Adaptive Interrupt Moderation:
1403 ** - Write out last calculated setting
1404 ** - Calculate based on average size over
1405 ** the last interval.
1407 if (que->eitr_setting)
1408 IXGBE_WRITE_REG(&adapter->hw,
1409 IXGBE_EITR(que->msix), que->eitr_setting);
1411 que->eitr_setting = 0;
1413 /* Idle, do nothing */
1414 if ((txr->bytes == 0) && (rxr->bytes == 0))
1417 if ((txr->bytes) && (txr->packets))
1418 newitr = txr->bytes/txr->packets;
1419 if ((rxr->bytes) && (rxr->packets))
1420 newitr = max(newitr,
1421 (rxr->bytes / rxr->packets));
1422 newitr += 24; /* account for hardware frame, crc */
1424 /* set an upper boundary */
1425 newitr = min(newitr, 3000);
1427 /* Be nice to the mid range */
1428 if ((newitr > 300) && (newitr < 1200))
1429 newitr = (newitr / 3);
1431 newitr = (newitr / 2);
1433 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1434 newitr |= newitr << 16;
1436 newitr |= IXGBE_EITR_CNT_WDIS;
1438 /* save for next interrupt */
1439 que->eitr_setting = newitr;
1448 if (more_tx || more_rx)
1449 taskqueue_enqueue(que->tq, &que->que_task);
1450 else /* Reenable this interrupt */
1451 ixgbe_enable_queue(adapter, que->msix);
1457 ixgbe_msix_link(void *arg)
1459 struct adapter *adapter = arg;
1460 struct ixgbe_hw *hw = &adapter->hw;
1463 ++adapter->link_irq;
1465 /* First get the cause */
1466 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1467 /* Clear interrupt with write */
1468 IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
1470 /* Link status change */
1471 if (reg_eicr & IXGBE_EICR_LSC)
1472 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1474 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
1476 if (reg_eicr & IXGBE_EICR_FLOW_DIR) {
1477 /* This is probably overkill :) */
1478 if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
1480 /* Clear the interrupt */
1481 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR);
1482 /* Turn off the interface */
1483 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1484 taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
1487 if (reg_eicr & IXGBE_EICR_ECC) {
1488 device_printf(adapter->dev, "\nCRITICAL: ECC ERROR!! "
1489 "Please Reboot!!\n");
1490 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
1493 if (reg_eicr & IXGBE_EICR_GPI_SDP1) {
1494 /* Clear the interrupt */
1495 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1496 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1497 } else if (reg_eicr & IXGBE_EICR_GPI_SDP2) {
1498 /* Clear the interrupt */
1499 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
1500 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1504 /* Check for fan failure */
1505 if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1506 (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1507 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1508 "REPLACE IMMEDIATELY!!\n");
1509 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1512 /* Check for over temp condition */
1513 if ((hw->mac.type == ixgbe_mac_X540) &&
1514 (reg_eicr & IXGBE_EICR_GPI_SDP0)) {
1515 device_printf(adapter->dev, "\nCRITICAL: OVER TEMP!! "
1516 "PHY IS SHUT DOWN!!\n");
1517 device_printf(adapter->dev, "System shutdown required\n");
1518 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0);
1521 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1525 /*********************************************************************
1527 * Media Ioctl callback
1529 * This routine is called whenever the user queries the status of
1530 * the interface using ifconfig.
1532 **********************************************************************/
1534 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1536 struct adapter *adapter = ifp->if_softc;
1538 INIT_DEBUGOUT("ixgbe_media_status: begin");
1539 IXGBE_CORE_LOCK(adapter);
1540 ixgbe_update_link_status(adapter);
1542 ifmr->ifm_status = IFM_AVALID;
1543 ifmr->ifm_active = IFM_ETHER;
1545 if (!adapter->link_active) {
1546 IXGBE_CORE_UNLOCK(adapter);
1550 ifmr->ifm_status |= IFM_ACTIVE;
1552 switch (adapter->link_speed) {
1553 case IXGBE_LINK_SPEED_100_FULL:
1554 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1556 case IXGBE_LINK_SPEED_1GB_FULL:
1557 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1559 case IXGBE_LINK_SPEED_10GB_FULL:
1560 ifmr->ifm_active |= adapter->optics | IFM_FDX;
1564 IXGBE_CORE_UNLOCK(adapter);
1569 /*********************************************************************
1571 * Media Ioctl callback
1573 * This routine is called when the user changes speed/duplex using
1574 * media/mediopt option with ifconfig.
1576 **********************************************************************/
1578 ixgbe_media_change(struct ifnet * ifp)
1580 struct adapter *adapter = ifp->if_softc;
1581 struct ifmedia *ifm = &adapter->media;
1583 INIT_DEBUGOUT("ixgbe_media_change: begin");
1585 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1588 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1590 adapter->hw.phy.autoneg_advertised =
1591 IXGBE_LINK_SPEED_100_FULL |
1592 IXGBE_LINK_SPEED_1GB_FULL |
1593 IXGBE_LINK_SPEED_10GB_FULL;
1596 device_printf(adapter->dev, "Only auto media type\n");
1603 /*********************************************************************
1605 * This routine maps the mbufs to tx descriptors, allowing the
1606 * TX engine to transmit the packets.
1607 * - return 0 on success, positive on failure
1609 **********************************************************************/
1612 ixgbe_xmit(struct tx_ring *txr, struct mbuf **m_headp)
1614 struct adapter *adapter = txr->adapter;
1615 u32 olinfo_status = 0, cmd_type_len;
1617 int i, j, error, nsegs;
1618 int first, last = 0;
1619 struct mbuf *m_head;
1620 bus_dma_segment_t segs[adapter->num_segs];
1622 struct ixgbe_tx_buf *txbuf;
1623 union ixgbe_adv_tx_desc *txd = NULL;
1627 /* Basic descriptor defines */
1628 cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
1629 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
1631 if (m_head->m_flags & M_VLANTAG)
1632 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
1635 * Important to capture the first descriptor
1636 * used because it will contain the index of
1637 * the one we tell the hardware to report back
1639 first = txr->next_avail_desc;
1640 txbuf = &txr->tx_buffers[first];
1644 * Map the packet for DMA.
1646 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1647 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1649 if (error == EFBIG) {
1652 m = m_defrag(*m_headp, M_DONTWAIT);
1654 adapter->mbuf_defrag_failed++;
1662 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1663 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1665 if (error == ENOMEM) {
1666 adapter->no_tx_dma_setup++;
1668 } else if (error != 0) {
1669 adapter->no_tx_dma_setup++;
1674 } else if (error == ENOMEM) {
1675 adapter->no_tx_dma_setup++;
1677 } else if (error != 0) {
1678 adapter->no_tx_dma_setup++;
1684 /* Make certain there are enough descriptors */
1685 if (nsegs > txr->tx_avail - 2) {
1686 txr->no_desc_avail++;
1693 ** Set up the appropriate offload context
1694 ** this becomes the first descriptor of
1697 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
1698 if (ixgbe_tso_setup(txr, m_head, &paylen)) {
1699 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
1700 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
1701 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1702 olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
1706 } else if (ixgbe_tx_ctx_setup(txr, m_head))
1707 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1709 #ifdef IXGBE_IEEE1588
1710 /* This is changing soon to an mtag detection */
1711 if (we detect this mbuf has a TSTAMP mtag)
1712 cmd_type_len |= IXGBE_ADVTXD_MAC_TSTAMP;
1716 /* Do the flow director magic */
1717 if ((txr->atr_sample) && (!adapter->fdir_reinit)) {
1719 if (txr->atr_count >= atr_sample_rate) {
1720 ixgbe_atr(txr, m_head);
1725 /* Record payload length */
1727 olinfo_status |= m_head->m_pkthdr.len <<
1728 IXGBE_ADVTXD_PAYLEN_SHIFT;
1730 i = txr->next_avail_desc;
1731 for (j = 0; j < nsegs; j++) {
1735 txbuf = &txr->tx_buffers[i];
1736 txd = &txr->tx_base[i];
1737 seglen = segs[j].ds_len;
1738 segaddr = htole64(segs[j].ds_addr);
1740 txd->read.buffer_addr = segaddr;
1741 txd->read.cmd_type_len = htole32(txr->txd_cmd |
1742 cmd_type_len |seglen);
1743 txd->read.olinfo_status = htole32(olinfo_status);
1744 last = i; /* descriptor that will get completion IRQ */
1746 if (++i == adapter->num_tx_desc)
1749 txbuf->m_head = NULL;
1750 txbuf->eop_index = -1;
1753 txd->read.cmd_type_len |=
1754 htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
1755 txr->tx_avail -= nsegs;
1756 txr->next_avail_desc = i;
1758 txbuf->m_head = m_head;
1759 /* Swap the dma map between the first and last descriptor */
1760 txr->tx_buffers[first].map = txbuf->map;
1762 bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
1764 /* Set the index of the descriptor that will be marked done */
1765 txbuf = &txr->tx_buffers[first];
1766 txbuf->eop_index = last;
1768 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1769 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1771 * Advance the Transmit Descriptor Tail (Tdt), this tells the
1772 * hardware that this frame is available to transmit.
1774 ++txr->total_packets;
1775 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(txr->me), i);
1780 bus_dmamap_unload(txr->txtag, txbuf->map);
1786 ixgbe_set_promisc(struct adapter *adapter)
1789 struct ifnet *ifp = adapter->ifp;
1791 reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1792 reg_rctl &= (~IXGBE_FCTRL_UPE);
1793 reg_rctl &= (~IXGBE_FCTRL_MPE);
1794 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1796 if (ifp->if_flags & IFF_PROMISC) {
1797 reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1798 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1799 } else if (ifp->if_flags & IFF_ALLMULTI) {
1800 reg_rctl |= IXGBE_FCTRL_MPE;
1801 reg_rctl &= ~IXGBE_FCTRL_UPE;
1802 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1808 /*********************************************************************
1811 * This routine is called whenever multicast address list is updated.
1813 **********************************************************************/
1814 #define IXGBE_RAR_ENTRIES 16
1817 ixgbe_set_multi(struct adapter *adapter)
1822 struct ifmultiaddr *ifma;
1824 struct ifnet *ifp = adapter->ifp;
1826 IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
1829 bzero(mta, sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
1830 MAX_NUM_MULTICAST_ADDRESSES);
1832 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1833 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1834 if (ifp->if_flags & IFF_PROMISC)
1835 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1836 else if (ifp->if_flags & IFF_ALLMULTI) {
1837 fctrl |= IXGBE_FCTRL_MPE;
1838 fctrl &= ~IXGBE_FCTRL_UPE;
1840 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1842 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
1844 #if __FreeBSD_version < 800000
1847 if_maddr_rlock(ifp);
1849 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1850 if (ifma->ifma_addr->sa_family != AF_LINK)
1852 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1853 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1854 IXGBE_ETH_LENGTH_OF_ADDRESS);
1857 #if __FreeBSD_version < 800000
1858 IF_ADDR_UNLOCK(ifp);
1860 if_maddr_runlock(ifp);
1864 ixgbe_update_mc_addr_list(&adapter->hw,
1865 update_ptr, mcnt, ixgbe_mc_array_itr, TRUE);
1871 * This is an iterator function now needed by the multicast
1872 * shared code. It simply feeds the shared code routine the
1873 * addresses in the array of ixgbe_set_multi() one by one.
1876 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1878 u8 *addr = *update_ptr;
1882 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1883 *update_ptr = newptr;
1888 /*********************************************************************
1891 * This routine checks for link status,updates statistics,
1892 * and runs the watchdog check.
1894 **********************************************************************/
1897 ixgbe_local_timer(void *arg)
1899 struct adapter *adapter = arg;
1900 device_t dev = adapter->dev;
1901 struct tx_ring *txr = adapter->tx_rings;
1903 mtx_assert(&adapter->core_mtx, MA_OWNED);
1905 /* Check for pluggable optics */
1906 if (adapter->sfp_probe)
1907 if (!ixgbe_sfp_probe(adapter))
1908 goto out; /* Nothing to do */
1910 ixgbe_update_link_status(adapter);
1911 ixgbe_update_stats_counters(adapter);
1914 * If the interface has been paused
1915 * then don't do the watchdog check
1917 if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
1921 ** Check status on the TX queues for a hang
1923 for (int i = 0; i < adapter->num_queues; i++, txr++)
1924 if (txr->queue_status == IXGBE_QUEUE_HUNG)
1928 ixgbe_rearm_queues(adapter, adapter->que_mask);
1929 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1933 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1934 device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
1935 IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me)),
1936 IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me)));
1937 device_printf(dev,"TX(%d) desc avail = %d,"
1938 "Next TX to Clean = %d\n",
1939 txr->me, txr->tx_avail, txr->next_to_clean);
1940 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1941 adapter->watchdog_events++;
1942 ixgbe_init_locked(adapter);
1946 ** Note: this routine updates the OS on the link state
1947 ** the real check of the hardware only happens with
1948 ** a link interrupt.
1951 ixgbe_update_link_status(struct adapter *adapter)
1953 struct ifnet *ifp = adapter->ifp;
1954 struct tx_ring *txr = adapter->tx_rings;
1955 device_t dev = adapter->dev;
1958 if (adapter->link_up){
1959 if (adapter->link_active == FALSE) {
1961 device_printf(dev,"Link is up %d Gbps %s \n",
1962 ((adapter->link_speed == 128)? 10:1),
1964 adapter->link_active = TRUE;
1965 if_link_state_change(ifp, LINK_STATE_UP);
1967 } else { /* Link down */
1968 if (adapter->link_active == TRUE) {
1970 device_printf(dev,"Link is Down\n");
1971 if_link_state_change(ifp, LINK_STATE_DOWN);
1972 adapter->link_active = FALSE;
1973 for (int i = 0; i < adapter->num_queues;
1975 txr->queue_status = IXGBE_QUEUE_IDLE;
1983 /*********************************************************************
1985 * This routine disables all traffic on the adapter by issuing a
1986 * global reset on the MAC and deallocates TX/RX buffers.
1988 **********************************************************************/
1991 ixgbe_stop(void *arg)
1994 struct adapter *adapter = arg;
1995 struct ixgbe_hw *hw = &adapter->hw;
1998 mtx_assert(&adapter->core_mtx, MA_OWNED);
2000 INIT_DEBUGOUT("ixgbe_stop: begin\n");
2001 ixgbe_disable_intr(adapter);
2003 /* Tell the stack that the interface is no longer active */
2004 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2007 hw->adapter_stopped = FALSE;
2008 ixgbe_stop_adapter(hw);
2009 /* Turn off the laser */
2010 if (hw->phy.multispeed_fiber)
2011 ixgbe_disable_tx_laser(hw);
2012 callout_stop(&adapter->timer);
2014 /* reprogram the RAR[0] in case user changed it. */
2015 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
2021 /*********************************************************************
2023 * Determine hardware revision.
2025 **********************************************************************/
2027 ixgbe_identify_hardware(struct adapter *adapter)
2029 device_t dev = adapter->dev;
2030 struct ixgbe_hw *hw = &adapter->hw;
2032 /* Save off the information about this board */
2033 hw->vendor_id = pci_get_vendor(dev);
2034 hw->device_id = pci_get_device(dev);
2035 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
2036 hw->subsystem_vendor_id =
2037 pci_read_config(dev, PCIR_SUBVEND_0, 2);
2038 hw->subsystem_device_id =
2039 pci_read_config(dev, PCIR_SUBDEV_0, 2);
2041 /* We need this here to set the num_segs below */
2042 ixgbe_set_mac_type(hw);
2044 /* Pick up the 82599 and VF settings */
2045 if (hw->mac.type != ixgbe_mac_82598EB) {
2046 hw->phy.smart_speed = ixgbe_smart_speed;
2047 adapter->num_segs = IXGBE_82599_SCATTER;
2049 adapter->num_segs = IXGBE_82598_SCATTER;
2054 /*********************************************************************
2056 * Determine optic type
2058 **********************************************************************/
2060 ixgbe_setup_optics(struct adapter *adapter)
2062 struct ixgbe_hw *hw = &adapter->hw;
2065 layer = ixgbe_get_supported_physical_layer(hw);
2067 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
2068 adapter->optics = IFM_10G_T;
2072 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
2073 adapter->optics = IFM_1000_T;
2077 if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_LR |
2078 IXGBE_PHYSICAL_LAYER_10GBASE_LRM)) {
2079 adapter->optics = IFM_10G_LR;
2083 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
2084 adapter->optics = IFM_10G_SR;
2088 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU) {
2089 adapter->optics = IFM_10G_TWINAX;
2093 if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
2094 IXGBE_PHYSICAL_LAYER_10GBASE_CX4)) {
2095 adapter->optics = IFM_10G_CX4;
2099 /* If we get here just set the default */
2100 adapter->optics = IFM_ETHER | IFM_AUTO;
2104 /*********************************************************************
2106 * Setup the Legacy or MSI Interrupt handler
2108 **********************************************************************/
2110 ixgbe_allocate_legacy(struct adapter *adapter)
2112 device_t dev = adapter->dev;
2113 struct ix_queue *que = adapter->queues;
2117 if (adapter->msix == 1)
2120 /* We allocate a single interrupt resource */
2121 adapter->res = bus_alloc_resource_any(dev,
2122 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2123 if (adapter->res == NULL) {
2124 device_printf(dev, "Unable to allocate bus resource: "
2130 * Try allocating a fast interrupt and the associated deferred
2131 * processing contexts.
2133 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2134 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2135 taskqueue_thread_enqueue, &que->tq);
2136 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
2137 device_get_nameunit(adapter->dev));
2139 /* Tasklets for Link, SFP and Multispeed Fiber */
2140 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2141 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2142 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2144 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2146 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2147 taskqueue_thread_enqueue, &adapter->tq);
2148 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2149 device_get_nameunit(adapter->dev));
2151 if ((error = bus_setup_intr(dev, adapter->res,
2152 INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq,
2153 que, &adapter->tag)) != 0) {
2154 device_printf(dev, "Failed to register fast interrupt "
2155 "handler: %d\n", error);
2156 taskqueue_free(que->tq);
2157 taskqueue_free(adapter->tq);
2162 /* For simplicity in the handlers */
2163 adapter->que_mask = IXGBE_EIMS_ENABLE_MASK;
2169 /*********************************************************************
2171 * Setup MSIX Interrupt resources and handlers
2173 **********************************************************************/
2175 ixgbe_allocate_msix(struct adapter *adapter)
2177 device_t dev = adapter->dev;
2178 struct ix_queue *que = adapter->queues;
2179 int error, rid, vector = 0;
2181 for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
2183 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2184 RF_SHAREABLE | RF_ACTIVE);
2185 if (que->res == NULL) {
2186 device_printf(dev,"Unable to allocate"
2187 " bus resource: que interrupt [%d]\n", vector);
2190 /* Set the handler function */
2191 error = bus_setup_intr(dev, que->res,
2192 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2193 ixgbe_msix_que, que, &que->tag);
2196 device_printf(dev, "Failed to register QUE handler");
2199 #if __FreeBSD_version >= 800504
2200 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
2203 adapter->que_mask |= (u64)(1 << que->msix);
2205 ** Bind the msix vector, and thus the
2206 ** ring to the corresponding cpu.
2208 if (adapter->num_queues > 1)
2209 bus_bind_intr(dev, que->res, i);
2211 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2212 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2213 taskqueue_thread_enqueue, &que->tq);
2214 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
2215 device_get_nameunit(adapter->dev));
2220 adapter->res = bus_alloc_resource_any(dev,
2221 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2222 if (!adapter->res) {
2223 device_printf(dev,"Unable to allocate"
2224 " bus resource: Link interrupt [%d]\n", rid);
2227 /* Set the link handler function */
2228 error = bus_setup_intr(dev, adapter->res,
2229 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2230 ixgbe_msix_link, adapter, &adapter->tag);
2232 adapter->res = NULL;
2233 device_printf(dev, "Failed to register LINK handler");
2236 #if __FreeBSD_version >= 800504
2237 bus_describe_intr(dev, adapter->res, adapter->tag, "link");
2239 adapter->linkvec = vector;
2240 /* Tasklets for Link, SFP and Multispeed Fiber */
2241 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2242 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2243 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2245 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2247 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2248 taskqueue_thread_enqueue, &adapter->tq);
2249 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2250 device_get_nameunit(adapter->dev));
2256 * Setup Either MSI/X or MSI
2259 ixgbe_setup_msix(struct adapter *adapter)
2261 device_t dev = adapter->dev;
2262 int rid, want, queues, msgs;
2264 /* Override by tuneable */
2265 if (ixgbe_enable_msix == 0)
2268 /* First try MSI/X */
2269 rid = PCIR_BAR(MSIX_82598_BAR);
2270 adapter->msix_mem = bus_alloc_resource_any(dev,
2271 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2272 if (!adapter->msix_mem) {
2273 rid += 4; /* 82599 maps in higher BAR */
2274 adapter->msix_mem = bus_alloc_resource_any(dev,
2275 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2277 if (!adapter->msix_mem) {
2278 /* May not be enabled */
2279 device_printf(adapter->dev,
2280 "Unable to map MSIX table \n");
2284 msgs = pci_msix_count(dev);
2285 if (msgs == 0) { /* system has msix disabled */
2286 bus_release_resource(dev, SYS_RES_MEMORY,
2287 rid, adapter->msix_mem);
2288 adapter->msix_mem = NULL;
2292 /* Figure out a reasonable auto config value */
2293 queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus;
2295 if (ixgbe_num_queues != 0)
2296 queues = ixgbe_num_queues;
2297 /* Set max queues to 8 when autoconfiguring */
2298 else if ((ixgbe_num_queues == 0) && (queues > 8))
2302 ** Want one vector (RX/TX pair) per queue
2303 ** plus an additional for Link.
2309 device_printf(adapter->dev,
2310 "MSIX Configuration Problem, "
2311 "%d vectors but %d queues wanted!\n",
2313 return (0); /* Will go to Legacy setup */
2315 if ((msgs) && pci_alloc_msix(dev, &msgs) == 0) {
2316 device_printf(adapter->dev,
2317 "Using MSIX interrupts with %d vectors\n", msgs);
2318 adapter->num_queues = queues;
2322 msgs = pci_msi_count(dev);
2323 if (msgs == 1 && pci_alloc_msi(dev, &msgs) == 0)
2324 device_printf(adapter->dev,"Using an MSI interrupt\n");
2326 device_printf(adapter->dev,"Using a Legacy interrupt\n");
2332 ixgbe_allocate_pci_resources(struct adapter *adapter)
2335 device_t dev = adapter->dev;
2338 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2341 if (!(adapter->pci_mem)) {
2342 device_printf(dev,"Unable to allocate bus resource: memory\n");
2346 adapter->osdep.mem_bus_space_tag =
2347 rman_get_bustag(adapter->pci_mem);
2348 adapter->osdep.mem_bus_space_handle =
2349 rman_get_bushandle(adapter->pci_mem);
2350 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
2352 /* Legacy defaults */
2353 adapter->num_queues = 1;
2354 adapter->hw.back = &adapter->osdep;
2357 ** Now setup MSI or MSI/X, should
2358 ** return us the number of supported
2359 ** vectors. (Will be 1 for MSI)
2361 adapter->msix = ixgbe_setup_msix(adapter);
2366 ixgbe_free_pci_resources(struct adapter * adapter)
2368 struct ix_queue *que = adapter->queues;
2369 device_t dev = adapter->dev;
2372 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2373 memrid = PCIR_BAR(MSIX_82598_BAR);
2375 memrid = PCIR_BAR(MSIX_82599_BAR);
2378 ** There is a slight possibility of a failure mode
2379 ** in attach that will result in entering this function
2380 ** before interrupt resources have been initialized, and
2381 ** in that case we do not want to execute the loops below
2382 ** We can detect this reliably by the state of the adapter
2385 if (adapter->res == NULL)
2389 ** Release all msix queue resources:
2391 for (int i = 0; i < adapter->num_queues; i++, que++) {
2392 rid = que->msix + 1;
2393 if (que->tag != NULL) {
2394 bus_teardown_intr(dev, que->res, que->tag);
2397 if (que->res != NULL)
2398 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2402 /* Clean the Legacy or Link interrupt last */
2403 if (adapter->linkvec) /* we are doing MSIX */
2404 rid = adapter->linkvec + 1;
2406 (adapter->msix != 0) ? (rid = 1):(rid = 0);
2408 if (adapter->tag != NULL) {
2409 bus_teardown_intr(dev, adapter->res, adapter->tag);
2410 adapter->tag = NULL;
2412 if (adapter->res != NULL)
2413 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
2417 pci_release_msi(dev);
2419 if (adapter->msix_mem != NULL)
2420 bus_release_resource(dev, SYS_RES_MEMORY,
2421 memrid, adapter->msix_mem);
2423 if (adapter->pci_mem != NULL)
2424 bus_release_resource(dev, SYS_RES_MEMORY,
2425 PCIR_BAR(0), adapter->pci_mem);
2430 /*********************************************************************
2432 * Setup networking device structure and register an interface.
2434 **********************************************************************/
2436 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
2438 struct ixgbe_hw *hw = &adapter->hw;
2441 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
2443 ifp = adapter->ifp = if_alloc(IFT_ETHER);
2445 device_printf(dev, "can not allocate ifnet structure\n");
2448 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2449 ifp->if_mtu = ETHERMTU;
2450 ifp->if_baudrate = 1000000000;
2451 ifp->if_init = ixgbe_init;
2452 ifp->if_softc = adapter;
2453 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2454 ifp->if_ioctl = ixgbe_ioctl;
2455 ifp->if_start = ixgbe_start;
2456 #if __FreeBSD_version >= 800000
2457 ifp->if_transmit = ixgbe_mq_start;
2458 ifp->if_qflush = ixgbe_qflush;
2460 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
2462 ether_ifattach(ifp, adapter->hw.mac.addr);
2464 adapter->max_frame_size =
2465 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2468 * Tell the upper layer(s) we support long frames.
2470 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2472 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
2473 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2474 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2477 ifp->if_capenable = ifp->if_capabilities;
2479 /* Don't enable LRO by default */
2480 ifp->if_capabilities |= IFCAP_LRO;
2483 ** Don't turn this on by default, if vlans are
2484 ** created on another pseudo device (eg. lagg)
2485 ** then vlan events are not passed thru, breaking
2486 ** operation, but with HW FILTER off it works. If
2487 ** using vlans directly on the ixgbe driver you can
2488 ** enable this and get full hardware tag filtering.
2490 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2493 * Specify the media types supported by this adapter and register
2494 * callbacks to update media and link information
2496 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
2497 ixgbe_media_status);
2498 ifmedia_add(&adapter->media, IFM_ETHER | adapter->optics, 0, NULL);
2499 ifmedia_set(&adapter->media, IFM_ETHER | adapter->optics);
2500 if (hw->device_id == IXGBE_DEV_ID_82598AT) {
2501 ifmedia_add(&adapter->media,
2502 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2503 ifmedia_add(&adapter->media,
2504 IFM_ETHER | IFM_1000_T, 0, NULL);
2506 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2507 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2513 ixgbe_config_link(struct adapter *adapter)
2515 struct ixgbe_hw *hw = &adapter->hw;
2516 u32 autoneg, err = 0;
2517 bool sfp, negotiate;
2519 sfp = ixgbe_is_sfp(hw);
2522 if (hw->phy.multispeed_fiber) {
2523 hw->mac.ops.setup_sfp(hw);
2524 ixgbe_enable_tx_laser(hw);
2525 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
2527 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
2529 if (hw->mac.ops.check_link)
2530 err = ixgbe_check_link(hw, &autoneg,
2531 &adapter->link_up, FALSE);
2534 autoneg = hw->phy.autoneg_advertised;
2535 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
2536 err = hw->mac.ops.get_link_capabilities(hw,
2537 &autoneg, &negotiate);
2540 if (hw->mac.ops.setup_link)
2541 err = hw->mac.ops.setup_link(hw, autoneg,
2542 negotiate, adapter->link_up);
2548 /********************************************************************
2549 * Manage DMA'able memory.
2550 *******************************************************************/
2552 ixgbe_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
2556 *(bus_addr_t *) arg = segs->ds_addr;
2561 ixgbe_dma_malloc(struct adapter *adapter, bus_size_t size,
2562 struct ixgbe_dma_alloc *dma, int mapflags)
2564 device_t dev = adapter->dev;
2567 r = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
2568 DBA_ALIGN, 0, /* alignment, bounds */
2569 BUS_SPACE_MAXADDR, /* lowaddr */
2570 BUS_SPACE_MAXADDR, /* highaddr */
2571 NULL, NULL, /* filter, filterarg */
2574 size, /* maxsegsize */
2575 BUS_DMA_ALLOCNOW, /* flags */
2576 NULL, /* lockfunc */
2577 NULL, /* lockfuncarg */
2580 device_printf(dev,"ixgbe_dma_malloc: bus_dma_tag_create failed; "
2584 r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
2585 BUS_DMA_NOWAIT, &dma->dma_map);
2587 device_printf(dev,"ixgbe_dma_malloc: bus_dmamem_alloc failed; "
2591 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2595 mapflags | BUS_DMA_NOWAIT);
2597 device_printf(dev,"ixgbe_dma_malloc: bus_dmamap_load failed; "
2601 dma->dma_size = size;
2604 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2606 bus_dma_tag_destroy(dma->dma_tag);
2608 dma->dma_map = NULL;
2609 dma->dma_tag = NULL;
2614 ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma)
2616 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2617 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2618 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2619 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2620 bus_dma_tag_destroy(dma->dma_tag);
2624 /*********************************************************************
2626 * Allocate memory for the transmit and receive rings, and then
2627 * the descriptors associated with each, called only once at attach.
2629 **********************************************************************/
2631 ixgbe_allocate_queues(struct adapter *adapter)
2633 device_t dev = adapter->dev;
2634 struct ix_queue *que;
2635 struct tx_ring *txr;
2636 struct rx_ring *rxr;
2637 int rsize, tsize, error = IXGBE_SUCCESS;
2638 int txconf = 0, rxconf = 0;
2640 /* First allocate the top level queue structs */
2641 if (!(adapter->queues =
2642 (struct ix_queue *) malloc(sizeof(struct ix_queue) *
2643 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2644 device_printf(dev, "Unable to allocate queue memory\n");
2649 /* First allocate the TX ring struct memory */
2650 if (!(adapter->tx_rings =
2651 (struct tx_ring *) malloc(sizeof(struct tx_ring) *
2652 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2653 device_printf(dev, "Unable to allocate TX ring memory\n");
2658 /* Next allocate the RX */
2659 if (!(adapter->rx_rings =
2660 (struct rx_ring *) malloc(sizeof(struct rx_ring) *
2661 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2662 device_printf(dev, "Unable to allocate RX ring memory\n");
2667 /* For the ring itself */
2668 tsize = roundup2(adapter->num_tx_desc *
2669 sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
2672 * Now set up the TX queues, txconf is needed to handle the
2673 * possibility that things fail midcourse and we need to
2674 * undo memory gracefully
2676 for (int i = 0; i < adapter->num_queues; i++, txconf++) {
2677 /* Set up some basics */
2678 txr = &adapter->tx_rings[i];
2679 txr->adapter = adapter;
2682 /* Initialize the TX side lock */
2683 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2684 device_get_nameunit(dev), txr->me);
2685 mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
2687 if (ixgbe_dma_malloc(adapter, tsize,
2688 &txr->txdma, BUS_DMA_NOWAIT)) {
2690 "Unable to allocate TX Descriptor memory\n");
2694 txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2695 bzero((void *)txr->tx_base, tsize);
2697 /* Now allocate transmit buffers for the ring */
2698 if (ixgbe_allocate_transmit_buffers(txr)) {
2700 "Critical Failure setting up transmit buffers\n");
2704 #if __FreeBSD_version >= 800000
2705 /* Allocate a buf ring */
2706 txr->br = buf_ring_alloc(IXGBE_BR_SIZE, M_DEVBUF,
2707 M_WAITOK, &txr->tx_mtx);
2708 if (txr->br == NULL) {
2710 "Critical Failure setting up buf ring\n");
2718 * Next the RX queues...
2720 rsize = roundup2(adapter->num_rx_desc *
2721 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2722 for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
2723 rxr = &adapter->rx_rings[i];
2724 /* Set up some basics */
2725 rxr->adapter = adapter;
2728 /* Initialize the RX side lock */
2729 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2730 device_get_nameunit(dev), rxr->me);
2731 mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
2733 if (ixgbe_dma_malloc(adapter, rsize,
2734 &rxr->rxdma, BUS_DMA_NOWAIT)) {
2736 "Unable to allocate RxDescriptor memory\n");
2740 rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2741 bzero((void *)rxr->rx_base, rsize);
2743 /* Allocate receive buffers for the ring*/
2744 if (ixgbe_allocate_receive_buffers(rxr)) {
2746 "Critical Failure setting up receive buffers\n");
2753 ** Finally set up the queue holding structs
2755 for (int i = 0; i < adapter->num_queues; i++) {
2756 que = &adapter->queues[i];
2757 que->adapter = adapter;
2758 que->txr = &adapter->tx_rings[i];
2759 que->rxr = &adapter->rx_rings[i];
2765 for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
2766 ixgbe_dma_free(adapter, &rxr->rxdma);
2768 for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
2769 ixgbe_dma_free(adapter, &txr->txdma);
2770 free(adapter->rx_rings, M_DEVBUF);
2772 free(adapter->tx_rings, M_DEVBUF);
2774 free(adapter->queues, M_DEVBUF);
2779 /*********************************************************************
2781 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2782 * the information needed to transmit a packet on the wire. This is
2783 * called only once at attach, setup is done every reset.
2785 **********************************************************************/
2787 ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
2789 struct adapter *adapter = txr->adapter;
2790 device_t dev = adapter->dev;
2791 struct ixgbe_tx_buf *txbuf;
2795 * Setup DMA descriptor areas.
2797 if ((error = bus_dma_tag_create(NULL, /* parent */
2798 1, 0, /* alignment, bounds */
2799 BUS_SPACE_MAXADDR, /* lowaddr */
2800 BUS_SPACE_MAXADDR, /* highaddr */
2801 NULL, NULL, /* filter, filterarg */
2802 IXGBE_TSO_SIZE, /* maxsize */
2803 adapter->num_segs, /* nsegments */
2804 PAGE_SIZE, /* maxsegsize */
2806 NULL, /* lockfunc */
2807 NULL, /* lockfuncarg */
2809 device_printf(dev,"Unable to allocate TX DMA tag\n");
2813 if (!(txr->tx_buffers =
2814 (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) *
2815 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2816 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2821 /* Create the descriptor buffer dma maps */
2822 txbuf = txr->tx_buffers;
2823 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2824 error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
2826 device_printf(dev, "Unable to create TX DMA map\n");
2833 /* We free all, it handles case where we are in the middle */
2834 ixgbe_free_transmit_structures(adapter);
2838 /*********************************************************************
2840 * Initialize a transmit ring.
2842 **********************************************************************/
2844 ixgbe_setup_transmit_ring(struct tx_ring *txr)
2846 struct adapter *adapter = txr->adapter;
2847 struct ixgbe_tx_buf *txbuf;
2850 /* Clear the old ring contents */
2852 bzero((void *)txr->tx_base,
2853 (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
2855 txr->next_avail_desc = 0;
2856 txr->next_to_clean = 0;
2858 /* Free any existing tx buffers. */
2859 txbuf = txr->tx_buffers;
2860 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2861 if (txbuf->m_head != NULL) {
2862 bus_dmamap_sync(txr->txtag, txbuf->map,
2863 BUS_DMASYNC_POSTWRITE);
2864 bus_dmamap_unload(txr->txtag, txbuf->map);
2865 m_freem(txbuf->m_head);
2866 txbuf->m_head = NULL;
2868 /* Clear the EOP index */
2869 txbuf->eop_index = -1;
2873 /* Set the rate at which we sample packets */
2874 if (adapter->hw.mac.type != ixgbe_mac_82598EB)
2875 txr->atr_sample = atr_sample_rate;
2878 /* Set number of descriptors available */
2879 txr->tx_avail = adapter->num_tx_desc;
2881 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2882 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2883 IXGBE_TX_UNLOCK(txr);
2886 /*********************************************************************
2888 * Initialize all transmit rings.
2890 **********************************************************************/
2892 ixgbe_setup_transmit_structures(struct adapter *adapter)
2894 struct tx_ring *txr = adapter->tx_rings;
2896 for (int i = 0; i < adapter->num_queues; i++, txr++)
2897 ixgbe_setup_transmit_ring(txr);
2902 /*********************************************************************
2904 * Enable transmit unit.
2906 **********************************************************************/
2908 ixgbe_initialize_transmit_units(struct adapter *adapter)
2910 struct tx_ring *txr = adapter->tx_rings;
2911 struct ixgbe_hw *hw = &adapter->hw;
2913 /* Setup the Base and Length of the Tx Descriptor Ring */
2915 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2916 u64 tdba = txr->txdma.dma_paddr;
2919 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
2920 (tdba & 0x00000000ffffffffULL));
2921 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
2922 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
2923 adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
2925 /* Setup the HW Tx Head and Tail descriptor pointers */
2926 IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
2927 IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
2929 /* Setup Transmit Descriptor Cmd Settings */
2930 txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
2931 txr->queue_status = IXGBE_QUEUE_IDLE;
2933 /* Disable Head Writeback */
2934 switch (hw->mac.type) {
2935 case ixgbe_mac_82598EB:
2936 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
2938 case ixgbe_mac_82599EB:
2939 case ixgbe_mac_X540:
2941 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
2944 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
2945 switch (hw->mac.type) {
2946 case ixgbe_mac_82598EB:
2947 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
2949 case ixgbe_mac_82599EB:
2950 case ixgbe_mac_X540:
2952 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl);
2958 if (hw->mac.type != ixgbe_mac_82598EB) {
2959 u32 dmatxctl, rttdcs;
2960 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2961 dmatxctl |= IXGBE_DMATXCTL_TE;
2962 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2963 /* Disable arbiter to set MTQC */
2964 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2965 rttdcs |= IXGBE_RTTDCS_ARBDIS;
2966 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2967 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2968 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2969 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2975 /*********************************************************************
2977 * Free all transmit rings.
2979 **********************************************************************/
2981 ixgbe_free_transmit_structures(struct adapter *adapter)
2983 struct tx_ring *txr = adapter->tx_rings;
2985 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2987 ixgbe_free_transmit_buffers(txr);
2988 ixgbe_dma_free(adapter, &txr->txdma);
2989 IXGBE_TX_UNLOCK(txr);
2990 IXGBE_TX_LOCK_DESTROY(txr);
2992 free(adapter->tx_rings, M_DEVBUF);
2995 /*********************************************************************
2997 * Free transmit ring related data structures.
2999 **********************************************************************/
3001 ixgbe_free_transmit_buffers(struct tx_ring *txr)
3003 struct adapter *adapter = txr->adapter;
3004 struct ixgbe_tx_buf *tx_buffer;
3007 INIT_DEBUGOUT("free_transmit_ring: begin");
3009 if (txr->tx_buffers == NULL)
3012 tx_buffer = txr->tx_buffers;
3013 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
3014 if (tx_buffer->m_head != NULL) {
3015 bus_dmamap_sync(txr->txtag, tx_buffer->map,
3016 BUS_DMASYNC_POSTWRITE);
3017 bus_dmamap_unload(txr->txtag,
3019 m_freem(tx_buffer->m_head);
3020 tx_buffer->m_head = NULL;
3021 if (tx_buffer->map != NULL) {
3022 bus_dmamap_destroy(txr->txtag,
3024 tx_buffer->map = NULL;
3026 } else if (tx_buffer->map != NULL) {
3027 bus_dmamap_unload(txr->txtag,
3029 bus_dmamap_destroy(txr->txtag,
3031 tx_buffer->map = NULL;
3034 #if __FreeBSD_version >= 800000
3035 if (txr->br != NULL)
3036 buf_ring_free(txr->br, M_DEVBUF);
3038 if (txr->tx_buffers != NULL) {
3039 free(txr->tx_buffers, M_DEVBUF);
3040 txr->tx_buffers = NULL;
3042 if (txr->txtag != NULL) {
3043 bus_dma_tag_destroy(txr->txtag);
3049 /*********************************************************************
3051 * Advanced Context Descriptor setup for VLAN or CSUM
3053 **********************************************************************/
3056 ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
3058 struct adapter *adapter = txr->adapter;
3059 struct ixgbe_adv_tx_context_desc *TXD;
3060 struct ixgbe_tx_buf *tx_buffer;
3061 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
3062 struct ether_vlan_header *eh;
3064 struct ip6_hdr *ip6;
3065 int ehdrlen, ip_hlen = 0;
3068 bool offload = TRUE;
3069 int ctxd = txr->next_avail_desc;
3073 if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
3076 tx_buffer = &txr->tx_buffers[ctxd];
3077 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
3080 ** In advanced descriptors the vlan tag must
3081 ** be placed into the descriptor itself.
3083 if (mp->m_flags & M_VLANTAG) {
3084 vtag = htole16(mp->m_pkthdr.ether_vtag);
3085 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
3086 } else if (offload == FALSE)
3090 * Determine where frame payload starts.
3091 * Jump over vlan headers if already present,
3092 * helpful for QinQ too.
3094 eh = mtod(mp, struct ether_vlan_header *);
3095 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3096 etype = ntohs(eh->evl_proto);
3097 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3099 etype = ntohs(eh->evl_encap_proto);
3100 ehdrlen = ETHER_HDR_LEN;
3103 /* Set the ether header length */
3104 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
3108 ip = (struct ip *)(mp->m_data + ehdrlen);
3109 ip_hlen = ip->ip_hl << 2;
3111 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
3113 case ETHERTYPE_IPV6:
3114 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3115 ip_hlen = sizeof(struct ip6_hdr);
3116 ipproto = ip6->ip6_nxt;
3117 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
3124 vlan_macip_lens |= ip_hlen;
3125 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
3129 if (mp->m_pkthdr.csum_flags & CSUM_TCP)
3130 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
3134 if (mp->m_pkthdr.csum_flags & CSUM_UDP)
3135 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
3138 #if __FreeBSD_version >= 800000
3140 if (mp->m_pkthdr.csum_flags & CSUM_SCTP)
3141 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
3149 /* Now copy bits into descriptor */
3150 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
3151 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
3152 TXD->seqnum_seed = htole32(0);
3153 TXD->mss_l4len_idx = htole32(0);
3155 tx_buffer->m_head = NULL;
3156 tx_buffer->eop_index = -1;
3158 /* We've consumed the first desc, adjust counters */
3159 if (++ctxd == adapter->num_tx_desc)
3161 txr->next_avail_desc = ctxd;
3167 /**********************************************************************
3169 * Setup work for hardware segmentation offload (TSO) on
3170 * adapters using advanced tx descriptors
3172 **********************************************************************/
3174 ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
3176 struct adapter *adapter = txr->adapter;
3177 struct ixgbe_adv_tx_context_desc *TXD;
3178 struct ixgbe_tx_buf *tx_buffer;
3179 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
3180 u32 mss_l4len_idx = 0;
3182 int ctxd, ehdrlen, hdrlen, ip_hlen, tcp_hlen;
3183 struct ether_vlan_header *eh;
3189 * Determine where frame payload starts.
3190 * Jump over vlan headers if already present
3192 eh = mtod(mp, struct ether_vlan_header *);
3193 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
3194 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3196 ehdrlen = ETHER_HDR_LEN;
3198 /* Ensure we have at least the IP+TCP header in the first mbuf. */
3199 if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
3202 ctxd = txr->next_avail_desc;
3203 tx_buffer = &txr->tx_buffers[ctxd];
3204 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
3206 ip = (struct ip *)(mp->m_data + ehdrlen);
3207 if (ip->ip_p != IPPROTO_TCP)
3208 return FALSE; /* 0 */
3210 ip_hlen = ip->ip_hl << 2;
3211 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
3212 th->th_sum = in_pseudo(ip->ip_src.s_addr,
3213 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
3214 tcp_hlen = th->th_off << 2;
3215 hdrlen = ehdrlen + ip_hlen + tcp_hlen;
3217 /* This is used in the transmit desc in encap */
3218 *paylen = mp->m_pkthdr.len - hdrlen;
3220 /* VLAN MACLEN IPLEN */
3221 if (mp->m_flags & M_VLANTAG) {
3222 vtag = htole16(mp->m_pkthdr.ether_vtag);
3223 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
3226 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
3227 vlan_macip_lens |= ip_hlen;
3228 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
3230 /* ADV DTYPE TUCMD */
3231 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
3232 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
3233 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
3234 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
3238 mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
3239 mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
3240 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
3242 TXD->seqnum_seed = htole32(0);
3243 tx_buffer->m_head = NULL;
3244 tx_buffer->eop_index = -1;
3246 if (++ctxd == adapter->num_tx_desc)
3250 txr->next_avail_desc = ctxd;
3256 ** This routine parses packet headers so that Flow
3257 ** Director can make a hashed filter table entry
3258 ** allowing traffic flows to be identified and kept
3259 ** on the same cpu. This would be a performance
3260 ** hit, but we only do it at IXGBE_FDIR_RATE of
3264 ixgbe_atr(struct tx_ring *txr, struct mbuf *mp)
3266 struct adapter *adapter = txr->adapter;
3267 struct ix_queue *que;
3271 struct ether_vlan_header *eh;
3272 union ixgbe_atr_hash_dword input = {.dword = 0};
3273 union ixgbe_atr_hash_dword common = {.dword = 0};
3274 int ehdrlen, ip_hlen;
3277 eh = mtod(mp, struct ether_vlan_header *);
3278 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3279 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3280 etype = eh->evl_proto;
3282 ehdrlen = ETHER_HDR_LEN;
3283 etype = eh->evl_encap_proto;
3286 /* Only handling IPv4 */
3287 if (etype != htons(ETHERTYPE_IP))
3290 ip = (struct ip *)(mp->m_data + ehdrlen);
3291 ip_hlen = ip->ip_hl << 2;
3293 /* check if we're UDP or TCP */
3296 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
3297 /* src and dst are inverted */
3298 common.port.dst ^= th->th_sport;
3299 common.port.src ^= th->th_dport;
3300 input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_TCPV4;
3303 uh = (struct udphdr *)((caddr_t)ip + ip_hlen);
3304 /* src and dst are inverted */
3305 common.port.dst ^= uh->uh_sport;
3306 common.port.src ^= uh->uh_dport;
3307 input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_UDPV4;
3313 input.formatted.vlan_id = htobe16(mp->m_pkthdr.ether_vtag);
3314 if (mp->m_pkthdr.ether_vtag)
3315 common.flex_bytes ^= htons(ETHERTYPE_VLAN);
3317 common.flex_bytes ^= etype;
3318 common.ip ^= ip->ip_src.s_addr ^ ip->ip_dst.s_addr;
3320 que = &adapter->queues[txr->me];
3322 ** This assumes the Rx queue and Tx
3323 ** queue are bound to the same CPU
3325 ixgbe_fdir_add_signature_filter_82599(&adapter->hw,
3326 input, common, que->msix);
3328 #endif /* IXGBE_FDIR */
3330 /**********************************************************************
3332 * Examine each tx_buffer in the used queue. If the hardware is done
3333 * processing the packet then free associated resources. The
3334 * tx_buffer is put back on the free queue.
3336 **********************************************************************/
3338 ixgbe_txeof(struct tx_ring *txr)
3340 struct adapter *adapter = txr->adapter;
3341 struct ifnet *ifp = adapter->ifp;
3342 u32 first, last, done, processed;
3343 struct ixgbe_tx_buf *tx_buffer;
3344 struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
3346 mtx_assert(&txr->tx_mtx, MA_OWNED);
3348 if (txr->tx_avail == adapter->num_tx_desc) {
3349 txr->queue_status = IXGBE_QUEUE_IDLE;
3354 first = txr->next_to_clean;
3355 tx_buffer = &txr->tx_buffers[first];
3356 /* For cleanup we just use legacy struct */
3357 tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
3358 last = tx_buffer->eop_index;
3361 eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
3364 ** Get the index of the first descriptor
3365 ** BEYOND the EOP and call that 'done'.
3366 ** I do this so the comparison in the
3367 ** inner while loop below can be simple
3369 if (++last == adapter->num_tx_desc) last = 0;
3372 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
3373 BUS_DMASYNC_POSTREAD);
3375 ** Only the EOP descriptor of a packet now has the DD
3376 ** bit set, this is what we look for...
3378 while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
3379 /* We clean the range of the packet */
3380 while (first != done) {
3381 tx_desc->upper.data = 0;
3382 tx_desc->lower.data = 0;
3383 tx_desc->buffer_addr = 0;
3387 if (tx_buffer->m_head) {
3389 tx_buffer->m_head->m_pkthdr.len;
3390 bus_dmamap_sync(txr->txtag,
3392 BUS_DMASYNC_POSTWRITE);
3393 bus_dmamap_unload(txr->txtag,
3395 m_freem(tx_buffer->m_head);
3396 tx_buffer->m_head = NULL;
3397 tx_buffer->map = NULL;
3399 tx_buffer->eop_index = -1;
3400 txr->watchdog_time = ticks;
3402 if (++first == adapter->num_tx_desc)
3405 tx_buffer = &txr->tx_buffers[first];
3407 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
3411 /* See if there is more work now */
3412 last = tx_buffer->eop_index;
3415 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
3416 /* Get next done point */
3417 if (++last == adapter->num_tx_desc) last = 0;
3422 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
3423 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3425 txr->next_to_clean = first;
3428 ** Watchdog calculation, we know there's
3429 ** work outstanding or the first return
3430 ** would have been taken, so none processed
3431 ** for too long indicates a hang.
3433 if ((!processed) && ((ticks - txr->watchdog_time) > IXGBE_WATCHDOG))
3434 txr->queue_status = IXGBE_QUEUE_HUNG;
3437 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
3438 * it is OK to send packets. If there are no pending descriptors,
3439 * clear the timeout. Otherwise, if some descriptors have been freed,
3440 * restart the timeout.
3442 if (txr->tx_avail > IXGBE_TX_CLEANUP_THRESHOLD) {
3443 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3444 if (txr->tx_avail == adapter->num_tx_desc) {
3445 txr->queue_status = IXGBE_QUEUE_IDLE;
3453 /*********************************************************************
3455 * Refresh mbuf buffers for RX descriptor rings
3456 * - now keeps its own state so discards due to resource
3457 * exhaustion are unnecessary, if an mbuf cannot be obtained
3458 * it just returns, keeping its placeholder, thus it can simply
3459 * be recalled to try again.
3461 **********************************************************************/
3463 ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit)
3465 struct adapter *adapter = rxr->adapter;
3466 bus_dma_segment_t hseg[1];
3467 bus_dma_segment_t pseg[1];
3468 struct ixgbe_rx_buf *rxbuf;
3469 struct mbuf *mh, *mp;
3470 int i, j, nsegs, error;
3471 bool refreshed = FALSE;
3473 i = j = rxr->next_to_refresh;
3474 /* Control the loop with one beyond */
3475 if (++j == adapter->num_rx_desc)
3478 while (j != limit) {
3479 rxbuf = &rxr->rx_buffers[i];
3480 if (rxr->hdr_split == FALSE)
3483 if (rxbuf->m_head == NULL) {
3484 mh = m_gethdr(M_DONTWAIT, MT_DATA);
3490 mh->m_pkthdr.len = mh->m_len = MHLEN;
3492 mh->m_flags |= M_PKTHDR;
3493 /* Get the memory mapping */
3494 error = bus_dmamap_load_mbuf_sg(rxr->htag,
3495 rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
3497 printf("Refresh mbufs: hdr dmamap load"
3498 " failure - %d\n", error);
3500 rxbuf->m_head = NULL;
3504 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
3505 BUS_DMASYNC_PREREAD);
3506 rxr->rx_base[i].read.hdr_addr =
3507 htole64(hseg[0].ds_addr);
3510 if (rxbuf->m_pack == NULL) {
3511 mp = m_getjcl(M_DONTWAIT, MT_DATA,
3512 M_PKTHDR, adapter->rx_mbuf_sz);
3518 mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
3519 /* Get the memory mapping */
3520 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
3521 rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
3523 printf("Refresh mbufs: payload dmamap load"
3524 " failure - %d\n", error);
3526 rxbuf->m_pack = NULL;
3530 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
3531 BUS_DMASYNC_PREREAD);
3532 rxr->rx_base[i].read.pkt_addr =
3533 htole64(pseg[0].ds_addr);
3536 /* Next is precalculated */
3538 rxr->next_to_refresh = i;
3539 if (++j == adapter->num_rx_desc)
3543 if (refreshed) /* Update hardware tail index */
3544 IXGBE_WRITE_REG(&adapter->hw,
3545 IXGBE_RDT(rxr->me), rxr->next_to_refresh);
3549 /*********************************************************************
3551 * Allocate memory for rx_buffer structures. Since we use one
3552 * rx_buffer per received packet, the maximum number of rx_buffer's
3553 * that we'll need is equal to the number of receive descriptors
3554 * that we've allocated.
3556 **********************************************************************/
3558 ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
3560 struct adapter *adapter = rxr->adapter;
3561 device_t dev = adapter->dev;
3562 struct ixgbe_rx_buf *rxbuf;
3563 int i, bsize, error;
3565 bsize = sizeof(struct ixgbe_rx_buf) * adapter->num_rx_desc;
3566 if (!(rxr->rx_buffers =
3567 (struct ixgbe_rx_buf *) malloc(bsize,
3568 M_DEVBUF, M_NOWAIT | M_ZERO))) {
3569 device_printf(dev, "Unable to allocate rx_buffer memory\n");
3574 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
3575 1, 0, /* alignment, bounds */
3576 BUS_SPACE_MAXADDR, /* lowaddr */
3577 BUS_SPACE_MAXADDR, /* highaddr */
3578 NULL, NULL, /* filter, filterarg */
3579 MSIZE, /* maxsize */
3581 MSIZE, /* maxsegsize */
3583 NULL, /* lockfunc */
3584 NULL, /* lockfuncarg */
3586 device_printf(dev, "Unable to create RX DMA tag\n");
3590 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
3591 1, 0, /* alignment, bounds */
3592 BUS_SPACE_MAXADDR, /* lowaddr */
3593 BUS_SPACE_MAXADDR, /* highaddr */
3594 NULL, NULL, /* filter, filterarg */
3595 MJUM16BYTES, /* maxsize */
3597 MJUM16BYTES, /* maxsegsize */
3599 NULL, /* lockfunc */
3600 NULL, /* lockfuncarg */
3602 device_printf(dev, "Unable to create RX DMA tag\n");
3606 for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
3607 rxbuf = &rxr->rx_buffers[i];
3608 error = bus_dmamap_create(rxr->htag,
3609 BUS_DMA_NOWAIT, &rxbuf->hmap);
3611 device_printf(dev, "Unable to create RX head map\n");
3614 error = bus_dmamap_create(rxr->ptag,
3615 BUS_DMA_NOWAIT, &rxbuf->pmap);
3617 device_printf(dev, "Unable to create RX pkt map\n");
3625 /* Frees all, but can handle partial completion */
3626 ixgbe_free_receive_structures(adapter);
3631 ** Used to detect a descriptor that has
3632 ** been merged by Hardware RSC.
3635 ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
3637 return (le32toh(rx->wb.lower.lo_dword.data) &
3638 IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
3641 /*********************************************************************
3643 * Initialize Hardware RSC (LRO) feature on 82599
3644 * for an RX ring, this is toggled by the LRO capability
3645 * even though it is transparent to the stack.
3647 **********************************************************************/
3649 ixgbe_setup_hw_rsc(struct rx_ring *rxr)
3651 struct adapter *adapter = rxr->adapter;
3652 struct ixgbe_hw *hw = &adapter->hw;
3653 u32 rscctrl, rdrxctl;
3655 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
3656 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
3657 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
3658 rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
3659 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
3661 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
3662 rscctrl |= IXGBE_RSCCTL_RSCEN;
3665 ** Limit the total number of descriptors that
3666 ** can be combined, so it does not exceed 64K
3668 if (adapter->rx_mbuf_sz == MCLBYTES)
3669 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
3670 else if (adapter->rx_mbuf_sz == MJUMPAGESIZE)
3671 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
3672 else if (adapter->rx_mbuf_sz == MJUM9BYTES)
3673 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
3674 else /* Using 16K cluster */
3675 rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
3677 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl);
3679 /* Enable TCP header recognition */
3680 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0),
3681 (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) |
3682 IXGBE_PSRTYPE_TCPHDR));
3684 /* Disable RSC for ACK packets */
3685 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
3686 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
3693 ixgbe_free_receive_ring(struct rx_ring *rxr)
3695 struct adapter *adapter;
3696 struct ixgbe_rx_buf *rxbuf;
3699 adapter = rxr->adapter;
3700 for (i = 0; i < adapter->num_rx_desc; i++) {
3701 rxbuf = &rxr->rx_buffers[i];
3702 if (rxbuf->m_head != NULL) {
3703 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
3704 BUS_DMASYNC_POSTREAD);
3705 bus_dmamap_unload(rxr->htag, rxbuf->hmap);
3706 rxbuf->m_head->m_flags |= M_PKTHDR;
3707 m_freem(rxbuf->m_head);
3709 if (rxbuf->m_pack != NULL) {
3710 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
3711 BUS_DMASYNC_POSTREAD);
3712 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
3713 rxbuf->m_pack->m_flags |= M_PKTHDR;
3714 m_freem(rxbuf->m_pack);
3716 rxbuf->m_head = NULL;
3717 rxbuf->m_pack = NULL;
3722 /*********************************************************************
3724 * Initialize a receive ring and its buffers.
3726 **********************************************************************/
3728 ixgbe_setup_receive_ring(struct rx_ring *rxr)
3730 struct adapter *adapter;
3733 struct ixgbe_rx_buf *rxbuf;
3734 bus_dma_segment_t pseg[1], hseg[1];
3735 struct lro_ctrl *lro = &rxr->lro;
3736 int rsize, nsegs, error = 0;
3738 adapter = rxr->adapter;
3742 /* Clear the ring contents */
3744 rsize = roundup2(adapter->num_rx_desc *
3745 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
3746 bzero((void *)rxr->rx_base, rsize);
3748 /* Free current RX buffer structs and their mbufs */
3749 ixgbe_free_receive_ring(rxr);
3751 /* Configure header split? */
3752 if (ixgbe_header_split)
3753 rxr->hdr_split = TRUE;
3755 /* Now replenish the mbufs */
3756 for (int j = 0; j != adapter->num_rx_desc; ++j) {
3757 struct mbuf *mh, *mp;
3759 rxbuf = &rxr->rx_buffers[j];
3761 ** Don't allocate mbufs if not
3762 ** doing header split, its wasteful
3764 if (rxr->hdr_split == FALSE)
3767 /* First the header */
3768 rxbuf->m_head = m_gethdr(M_NOWAIT, MT_DATA);
3769 if (rxbuf->m_head == NULL) {
3773 m_adj(rxbuf->m_head, ETHER_ALIGN);
3775 mh->m_len = mh->m_pkthdr.len = MHLEN;
3776 mh->m_flags |= M_PKTHDR;
3777 /* Get the memory mapping */
3778 error = bus_dmamap_load_mbuf_sg(rxr->htag,
3779 rxbuf->hmap, rxbuf->m_head, hseg,
3780 &nsegs, BUS_DMA_NOWAIT);
3781 if (error != 0) /* Nothing elegant to do here */
3783 bus_dmamap_sync(rxr->htag,
3784 rxbuf->hmap, BUS_DMASYNC_PREREAD);
3785 /* Update descriptor */
3786 rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
3789 /* Now the payload cluster */
3790 rxbuf->m_pack = m_getjcl(M_NOWAIT, MT_DATA,
3791 M_PKTHDR, adapter->rx_mbuf_sz);
3792 if (rxbuf->m_pack == NULL) {
3797 mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
3798 /* Get the memory mapping */
3799 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
3800 rxbuf->pmap, mp, pseg,
3801 &nsegs, BUS_DMA_NOWAIT);
3804 bus_dmamap_sync(rxr->ptag,
3805 rxbuf->pmap, BUS_DMASYNC_PREREAD);
3806 /* Update descriptor */
3807 rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
3811 /* Setup our descriptor indices */
3812 rxr->next_to_check = 0;
3813 rxr->next_to_refresh = 0;
3814 rxr->lro_enabled = FALSE;
3815 rxr->rx_split_packets = 0;
3817 rxr->discard = FALSE;
3819 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3820 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3823 ** Now set up the LRO interface:
3824 ** 82598 uses software LRO, the
3825 ** 82599 and X540 use a hardware assist.
3827 if ((adapter->hw.mac.type != ixgbe_mac_82598EB) &&
3828 (ifp->if_capenable & IFCAP_RXCSUM) &&
3829 (ifp->if_capenable & IFCAP_LRO))
3830 ixgbe_setup_hw_rsc(rxr);
3831 else if (ifp->if_capenable & IFCAP_LRO) {
3832 int err = tcp_lro_init(lro);
3834 device_printf(dev, "LRO Initialization failed!\n");
3837 INIT_DEBUGOUT("RX Soft LRO Initialized\n");
3838 rxr->lro_enabled = TRUE;
3839 lro->ifp = adapter->ifp;
3842 IXGBE_RX_UNLOCK(rxr);
3846 ixgbe_free_receive_ring(rxr);
3847 IXGBE_RX_UNLOCK(rxr);
3851 /*********************************************************************
3853 * Initialize all receive rings.
3855 **********************************************************************/
3857 ixgbe_setup_receive_structures(struct adapter *adapter)
3859 struct rx_ring *rxr = adapter->rx_rings;
3862 for (j = 0; j < adapter->num_queues; j++, rxr++)
3863 if (ixgbe_setup_receive_ring(rxr))
3869 * Free RX buffers allocated so far, we will only handle
3870 * the rings that completed, the failing case will have
3871 * cleaned up for itself. 'j' failed, so its the terminus.
3873 for (int i = 0; i < j; ++i) {
3874 rxr = &adapter->rx_rings[i];
3875 ixgbe_free_receive_ring(rxr);
3881 /*********************************************************************
3883 * Setup receive registers and features.
3885 **********************************************************************/
3886 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3889 ixgbe_initialize_receive_units(struct adapter *adapter)
3891 struct rx_ring *rxr = adapter->rx_rings;
3892 struct ixgbe_hw *hw = &adapter->hw;
3893 struct ifnet *ifp = adapter->ifp;
3894 u32 bufsz, rxctrl, fctrl, srrctl, rxcsum;
3895 u32 reta, mrqc = 0, hlreg, random[10];
3899 * Make sure receives are disabled while
3900 * setting up the descriptor ring
3902 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3903 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL,
3904 rxctrl & ~IXGBE_RXCTRL_RXEN);
3906 /* Enable broadcasts */
3907 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3908 fctrl |= IXGBE_FCTRL_BAM;
3909 fctrl |= IXGBE_FCTRL_DPF;
3910 fctrl |= IXGBE_FCTRL_PMCF;
3911 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3913 /* Set for Jumbo Frames? */
3914 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3915 if (ifp->if_mtu > ETHERMTU)
3916 hlreg |= IXGBE_HLREG0_JUMBOEN;
3918 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3919 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
3921 bufsz = adapter->rx_mbuf_sz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3923 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3924 u64 rdba = rxr->rxdma.dma_paddr;
3926 /* Setup the Base and Length of the Rx Descriptor Ring */
3927 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
3928 (rdba & 0x00000000ffffffffULL));
3929 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
3930 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
3931 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3933 /* Set up the SRRCTL register */
3934 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
3935 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
3936 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
3938 if (rxr->hdr_split) {
3939 /* Use a standard mbuf for the header */
3940 srrctl |= ((IXGBE_RX_HDR <<
3941 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
3942 & IXGBE_SRRCTL_BSIZEHDR_MASK);
3943 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
3945 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3946 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
3948 /* Setup the HW Rx Head and Tail Descriptor Pointers */
3949 IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
3950 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
3953 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3954 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
3955 IXGBE_PSRTYPE_UDPHDR |
3956 IXGBE_PSRTYPE_IPV4HDR |
3957 IXGBE_PSRTYPE_IPV6HDR;
3958 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
3961 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3964 if (adapter->num_queues > 1) {
3968 /* set up random bits */
3969 arc4rand(&random, sizeof(random), 0);
3971 /* Set up the redirection table */
3972 for (i = 0, j = 0; i < 128; i++, j++) {
3973 if (j == adapter->num_queues) j = 0;
3974 reta = (reta << 8) | (j * 0x11);
3976 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3979 /* Now fill our hash function seeds */
3980 for (int i = 0; i < 10; i++)
3981 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random[i]);
3983 /* Perform hash on these packet types */
3984 mrqc = IXGBE_MRQC_RSSEN
3985 | IXGBE_MRQC_RSS_FIELD_IPV4
3986 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
3987 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
3988 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
3989 | IXGBE_MRQC_RSS_FIELD_IPV6_EX
3990 | IXGBE_MRQC_RSS_FIELD_IPV6
3991 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
3992 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
3993 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
3994 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3996 /* RSS and RX IPP Checksum are mutually exclusive */
3997 rxcsum |= IXGBE_RXCSUM_PCSD;
4000 if (ifp->if_capenable & IFCAP_RXCSUM)
4001 rxcsum |= IXGBE_RXCSUM_PCSD;
4003 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
4004 rxcsum |= IXGBE_RXCSUM_IPPCSE;
4006 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
4011 /*********************************************************************
4013 * Free all receive rings.
4015 **********************************************************************/
4017 ixgbe_free_receive_structures(struct adapter *adapter)
4019 struct rx_ring *rxr = adapter->rx_rings;
4021 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
4022 struct lro_ctrl *lro = &rxr->lro;
4023 ixgbe_free_receive_buffers(rxr);
4024 /* Free LRO memory */
4026 /* Free the ring memory as well */
4027 ixgbe_dma_free(adapter, &rxr->rxdma);
4030 free(adapter->rx_rings, M_DEVBUF);
4034 /*********************************************************************
4036 * Free receive ring data structures
4038 **********************************************************************/
4040 ixgbe_free_receive_buffers(struct rx_ring *rxr)
4042 struct adapter *adapter = rxr->adapter;
4043 struct ixgbe_rx_buf *rxbuf;
4045 INIT_DEBUGOUT("free_receive_structures: begin");
4047 /* Cleanup any existing buffers */
4048 if (rxr->rx_buffers != NULL) {
4049 for (int i = 0; i < adapter->num_rx_desc; i++) {
4050 rxbuf = &rxr->rx_buffers[i];
4051 if (rxbuf->m_head != NULL) {
4052 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
4053 BUS_DMASYNC_POSTREAD);
4054 bus_dmamap_unload(rxr->htag, rxbuf->hmap);
4055 rxbuf->m_head->m_flags |= M_PKTHDR;
4056 m_freem(rxbuf->m_head);
4058 if (rxbuf->m_pack != NULL) {
4059 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
4060 BUS_DMASYNC_POSTREAD);
4061 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
4062 rxbuf->m_pack->m_flags |= M_PKTHDR;
4063 m_freem(rxbuf->m_pack);
4065 rxbuf->m_head = NULL;
4066 rxbuf->m_pack = NULL;
4067 if (rxbuf->hmap != NULL) {
4068 bus_dmamap_destroy(rxr->htag, rxbuf->hmap);
4071 if (rxbuf->pmap != NULL) {
4072 bus_dmamap_destroy(rxr->ptag, rxbuf->pmap);
4076 if (rxr->rx_buffers != NULL) {
4077 free(rxr->rx_buffers, M_DEVBUF);
4078 rxr->rx_buffers = NULL;
4082 if (rxr->htag != NULL) {
4083 bus_dma_tag_destroy(rxr->htag);
4086 if (rxr->ptag != NULL) {
4087 bus_dma_tag_destroy(rxr->ptag);
4094 static __inline void
4095 ixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
4099 * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
4100 * should be computed by hardware. Also it should not have VLAN tag in
4103 if (rxr->lro_enabled &&
4104 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
4105 (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
4106 (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
4107 (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) &&
4108 (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
4109 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
4111 * Send to the stack if:
4112 ** - LRO not enabled, or
4113 ** - no LRO resources, or
4114 ** - lro enqueue fails
4116 if (rxr->lro.lro_cnt != 0)
4117 if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
4120 IXGBE_RX_UNLOCK(rxr);
4121 (*ifp->if_input)(ifp, m);
4125 static __inline void
4126 ixgbe_rx_discard(struct rx_ring *rxr, int i)
4128 struct ixgbe_rx_buf *rbuf;
4130 rbuf = &rxr->rx_buffers[i];
4132 if (rbuf->fmp != NULL) {/* Partial chain ? */
4133 rbuf->fmp->m_flags |= M_PKTHDR;
4139 ** With advanced descriptors the writeback
4140 ** clobbers the buffer addrs, so its easier
4141 ** to just free the existing mbufs and take
4142 ** the normal refresh path to get new buffers
4146 m_free(rbuf->m_head);
4147 rbuf->m_head = NULL;
4151 m_free(rbuf->m_pack);
4152 rbuf->m_pack = NULL;
4159 /*********************************************************************
4161 * This routine executes in interrupt context. It replenishes
4162 * the mbufs in the descriptor and sends data which has been
4163 * dma'ed into host memory to upper layer.
4165 * We loop at most count times if count is > 0, or until done if
4168 * Return TRUE for more work, FALSE for all clean.
4169 *********************************************************************/
4171 ixgbe_rxeof(struct ix_queue *que, int count)
4173 struct adapter *adapter = que->adapter;
4174 struct rx_ring *rxr = que->rxr;
4175 struct ifnet *ifp = adapter->ifp;
4176 struct lro_ctrl *lro = &rxr->lro;
4177 struct lro_entry *queued;
4178 int i, nextp, processed = 0;
4180 union ixgbe_adv_rx_desc *cur;
4181 struct ixgbe_rx_buf *rbuf, *nbuf;
4185 for (i = rxr->next_to_check; count != 0;) {
4186 struct mbuf *sendmp, *mh, *mp;
4188 u16 hlen, plen, hdr, vtag;
4191 /* Sync the ring. */
4192 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
4193 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4195 cur = &rxr->rx_base[i];
4196 staterr = le32toh(cur->wb.upper.status_error);
4198 if ((staterr & IXGBE_RXD_STAT_DD) == 0)
4200 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
4207 cur->wb.upper.status_error = 0;
4208 rbuf = &rxr->rx_buffers[i];
4212 plen = le16toh(cur->wb.upper.length);
4213 ptype = le32toh(cur->wb.lower.lo_dword.data) &
4214 IXGBE_RXDADV_PKTTYPE_MASK;
4215 hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
4216 vtag = le16toh(cur->wb.upper.vlan);
4217 eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
4219 /* Make sure bad packets are discarded */
4220 if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
4223 rxr->rx_discarded++;
4225 rxr->discard = FALSE;
4227 rxr->discard = TRUE;
4228 ixgbe_rx_discard(rxr, i);
4233 ** On 82599 which supports a hardware
4234 ** LRO (called HW RSC), packets need
4235 ** not be fragmented across sequential
4236 ** descriptors, rather the next descriptor
4237 ** is indicated in bits of the descriptor.
4238 ** This also means that we might proceses
4239 ** more than one packet at a time, something
4240 ** that has never been true before, it
4241 ** required eliminating global chain pointers
4242 ** in favor of what we are doing here. -jfv
4246 ** Figure out the next descriptor
4249 if (rxr->hw_rsc == TRUE) {
4250 rsc = ixgbe_rsc_count(cur);
4251 rxr->rsc_num += (rsc - 1);
4253 if (rsc) { /* Get hardware index */
4255 IXGBE_RXDADV_NEXTP_MASK) >>
4256 IXGBE_RXDADV_NEXTP_SHIFT);
4257 } else { /* Just sequential */
4259 if (nextp == adapter->num_rx_desc)
4262 nbuf = &rxr->rx_buffers[nextp];
4266 ** The header mbuf is ONLY used when header
4267 ** split is enabled, otherwise we get normal
4268 ** behavior, ie, both header and payload
4269 ** are DMA'd into the payload buffer.
4271 ** Rather than using the fmp/lmp global pointers
4272 ** we now keep the head of a packet chain in the
4273 ** buffer struct and pass this along from one
4274 ** descriptor to the next, until we get EOP.
4276 if (rxr->hdr_split && (rbuf->fmp == NULL)) {
4277 /* This must be an initial descriptor */
4278 hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
4279 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
4280 if (hlen > IXGBE_RX_HDR)
4281 hlen = IXGBE_RX_HDR;
4283 mh->m_flags |= M_PKTHDR;
4285 mh->m_pkthdr.len = mh->m_len;
4286 /* Null buf pointer so it is refreshed */
4287 rbuf->m_head = NULL;
4289 ** Check the payload length, this
4290 ** could be zero if its a small
4296 mp->m_flags &= ~M_PKTHDR;
4298 mh->m_pkthdr.len += mp->m_len;
4299 /* Null buf pointer so it is refreshed */
4300 rbuf->m_pack = NULL;
4301 rxr->rx_split_packets++;
4304 ** Now create the forward
4305 ** chain so when complete
4309 /* stash the chain head */
4311 /* Make forward chain */
4313 mp->m_next = nbuf->m_pack;
4315 mh->m_next = nbuf->m_pack;
4317 /* Singlet, prepare to send */
4319 if ((adapter->num_vlans) &&
4320 (staterr & IXGBE_RXD_STAT_VP)) {
4321 sendmp->m_pkthdr.ether_vtag = vtag;
4322 sendmp->m_flags |= M_VLANTAG;
4327 ** Either no header split, or a
4328 ** secondary piece of a fragmented
4333 ** See if there is a stored head
4334 ** that determines what we are
4337 rbuf->m_pack = rbuf->fmp = NULL;
4339 if (sendmp != NULL) /* secondary frag */
4340 sendmp->m_pkthdr.len += mp->m_len;
4342 /* first desc of a non-ps chain */
4344 sendmp->m_flags |= M_PKTHDR;
4345 sendmp->m_pkthdr.len = mp->m_len;
4346 if (staterr & IXGBE_RXD_STAT_VP) {
4347 sendmp->m_pkthdr.ether_vtag = vtag;
4348 sendmp->m_flags |= M_VLANTAG;
4351 /* Pass the head pointer on */
4355 mp->m_next = nbuf->m_pack;
4359 /* Sending this frame? */
4361 sendmp->m_pkthdr.rcvif = ifp;
4364 /* capture data for AIM */
4365 rxr->bytes += sendmp->m_pkthdr.len;
4366 rxr->rx_bytes += sendmp->m_pkthdr.len;
4367 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
4368 ixgbe_rx_checksum(staterr, sendmp, ptype);
4369 #if __FreeBSD_version >= 800000
4370 sendmp->m_pkthdr.flowid = que->msix;
4371 sendmp->m_flags |= M_FLOWID;
4375 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
4376 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4378 /* Advance our pointers to the next descriptor. */
4379 if (++i == adapter->num_rx_desc)
4382 /* Now send to the stack or do LRO */
4383 if (sendmp != NULL) {
4384 rxr->next_to_check = i;
4385 ixgbe_rx_input(rxr, ifp, sendmp, ptype);
4386 i = rxr->next_to_check;
4389 /* Every 8 descriptors we go to refresh mbufs */
4390 if (processed == 8) {
4391 ixgbe_refresh_mbufs(rxr, i);
4396 /* Refresh any remaining buf structs */
4397 if (ixgbe_rx_unrefreshed(rxr))
4398 ixgbe_refresh_mbufs(rxr, i);
4400 rxr->next_to_check = i;
4403 * Flush any outstanding LRO work
4405 while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
4406 SLIST_REMOVE_HEAD(&lro->lro_active, next);
4407 tcp_lro_flush(lro, queued);
4410 IXGBE_RX_UNLOCK(rxr);
4413 ** We still have cleaning to do?
4414 ** Schedule another interrupt if so.
4416 if ((staterr & IXGBE_RXD_STAT_DD) != 0) {
4417 ixgbe_rearm_queues(adapter, (u64)(1 << que->msix));
4425 /*********************************************************************
4427 * Verify that the hardware indicated that the checksum is valid.
4428 * Inform the stack about the status of checksum so that stack
4429 * doesn't spend time verifying the checksum.
4431 *********************************************************************/
4433 ixgbe_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype)
4435 u16 status = (u16) staterr;
4436 u8 errors = (u8) (staterr >> 24);
4439 if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
4440 (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
4443 if (status & IXGBE_RXD_STAT_IPCS) {
4444 if (!(errors & IXGBE_RXD_ERR_IPE)) {
4445 /* IP Checksum Good */
4446 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
4447 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4450 mp->m_pkthdr.csum_flags = 0;
4452 if (status & IXGBE_RXD_STAT_L4CS) {
4453 u16 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4454 #if __FreeBSD_version >= 800000
4456 type = CSUM_SCTP_VALID;
4458 if (!(errors & IXGBE_RXD_ERR_TCPE)) {
4459 mp->m_pkthdr.csum_flags |= type;
4461 mp->m_pkthdr.csum_data = htons(0xffff);
4469 ** This routine is run via an vlan config EVENT,
4470 ** it enables us to use the HW Filter table since
4471 ** we can get the vlan id. This just creates the
4472 ** entry in the soft version of the VFTA, init will
4473 ** repopulate the real table.
4476 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
4478 struct adapter *adapter = ifp->if_softc;
4481 if (ifp->if_softc != arg) /* Not our event */
4484 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
4487 IXGBE_CORE_LOCK(adapter);
4488 index = (vtag >> 5) & 0x7F;
4490 adapter->shadow_vfta[index] |= (1 << bit);
4491 ++adapter->num_vlans;
4492 ixgbe_init_locked(adapter);
4493 IXGBE_CORE_UNLOCK(adapter);
4497 ** This routine is run via an vlan
4498 ** unconfig EVENT, remove our entry
4499 ** in the soft vfta.
4502 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
4504 struct adapter *adapter = ifp->if_softc;
4507 if (ifp->if_softc != arg)
4510 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
4513 IXGBE_CORE_LOCK(adapter);
4514 index = (vtag >> 5) & 0x7F;
4516 adapter->shadow_vfta[index] &= ~(1 << bit);
4517 --adapter->num_vlans;
4518 /* Re-init to load the changes */
4519 ixgbe_init_locked(adapter);
4520 IXGBE_CORE_UNLOCK(adapter);
4524 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
4526 struct ifnet *ifp = adapter->ifp;
4527 struct ixgbe_hw *hw = &adapter->hw;
4532 ** We get here thru init_locked, meaning
4533 ** a soft reset, this has already cleared
4534 ** the VFTA and other state, so if there
4535 ** have been no vlan's registered do nothing.
4537 if (adapter->num_vlans == 0)
4541 ** A soft reset zero's out the VFTA, so
4542 ** we need to repopulate it now.
4544 for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
4545 if (adapter->shadow_vfta[i] != 0)
4546 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
4547 adapter->shadow_vfta[i]);
4549 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4550 /* Enable the Filter Table if enabled */
4551 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
4552 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
4553 ctrl |= IXGBE_VLNCTRL_VFE;
4555 if (hw->mac.type == ixgbe_mac_82598EB)
4556 ctrl |= IXGBE_VLNCTRL_VME;
4557 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
4559 /* On 82599 the VLAN enable is per/queue in RXDCTL */
4560 if (hw->mac.type != ixgbe_mac_82598EB)
4561 for (int i = 0; i < adapter->num_queues; i++) {
4562 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
4563 ctrl |= IXGBE_RXDCTL_VME;
4564 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
4569 ixgbe_enable_intr(struct adapter *adapter)
4571 struct ixgbe_hw *hw = &adapter->hw;
4572 struct ix_queue *que = adapter->queues;
4573 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
4576 /* Enable Fan Failure detection */
4577 if (hw->device_id == IXGBE_DEV_ID_82598AT)
4578 mask |= IXGBE_EIMS_GPI_SDP1;
4580 mask |= IXGBE_EIMS_ECC;
4581 mask |= IXGBE_EIMS_GPI_SDP0;
4582 mask |= IXGBE_EIMS_GPI_SDP1;
4583 mask |= IXGBE_EIMS_GPI_SDP2;
4585 mask |= IXGBE_EIMS_FLOW_DIR;
4589 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
4591 /* With RSS we use auto clear */
4592 if (adapter->msix_mem) {
4593 mask = IXGBE_EIMS_ENABLE_MASK;
4594 /* Don't autoclear Link */
4595 mask &= ~IXGBE_EIMS_OTHER;
4596 mask &= ~IXGBE_EIMS_LSC;
4597 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
4601 ** Now enable all queues, this is done separately to
4602 ** allow for handling the extended (beyond 32) MSIX
4603 ** vectors that can be used by 82599
4605 for (int i = 0; i < adapter->num_queues; i++, que++)
4606 ixgbe_enable_queue(adapter, que->msix);
4608 IXGBE_WRITE_FLUSH(hw);
4614 ixgbe_disable_intr(struct adapter *adapter)
4616 if (adapter->msix_mem)
4617 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
4618 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
4619 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
4621 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
4622 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
4623 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
4625 IXGBE_WRITE_FLUSH(&adapter->hw);
4630 ixgbe_read_pci_cfg(struct ixgbe_hw *hw, u32 reg)
4634 value = pci_read_config(((struct ixgbe_osdep *)hw->back)->dev,
4641 ixgbe_write_pci_cfg(struct ixgbe_hw *hw, u32 reg, u16 value)
4643 pci_write_config(((struct ixgbe_osdep *)hw->back)->dev,
4650 ** Setup the correct IVAR register for a particular MSIX interrupt
4651 ** (yes this is all very magic and confusing :)
4652 ** - entry is the register array entry
4653 ** - vector is the MSIX vector for this queue
4654 ** - type is RX/TX/MISC
4657 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
4659 struct ixgbe_hw *hw = &adapter->hw;
4662 vector |= IXGBE_IVAR_ALLOC_VAL;
4664 switch (hw->mac.type) {
4666 case ixgbe_mac_82598EB:
4668 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4670 entry += (type * 64);
4671 index = (entry >> 2) & 0x1F;
4672 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4673 ivar &= ~(0xFF << (8 * (entry & 0x3)));
4674 ivar |= (vector << (8 * (entry & 0x3)));
4675 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
4678 case ixgbe_mac_82599EB:
4679 case ixgbe_mac_X540:
4680 if (type == -1) { /* MISC IVAR */
4681 index = (entry & 1) * 8;
4682 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4683 ivar &= ~(0xFF << index);
4684 ivar |= (vector << index);
4685 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4686 } else { /* RX/TX IVARS */
4687 index = (16 * (entry & 1)) + (8 * type);
4688 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4689 ivar &= ~(0xFF << index);
4690 ivar |= (vector << index);
4691 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4700 ixgbe_configure_ivars(struct adapter *adapter)
4702 struct ix_queue *que = adapter->queues;
4705 if (ixgbe_max_interrupt_rate > 0)
4706 newitr = (8000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
4710 for (int i = 0; i < adapter->num_queues; i++, que++) {
4711 /* First the RX queue entry */
4712 ixgbe_set_ivar(adapter, i, que->msix, 0);
4713 /* ... and the TX */
4714 ixgbe_set_ivar(adapter, i, que->msix, 1);
4715 /* Set an Initial EITR value */
4716 IXGBE_WRITE_REG(&adapter->hw,
4717 IXGBE_EITR(que->msix), newitr);
4720 /* For the Link interrupt */
4721 ixgbe_set_ivar(adapter, 1, adapter->linkvec, -1);
4725 ** ixgbe_sfp_probe - called in the local timer to
4726 ** determine if a port had optics inserted.
4728 static bool ixgbe_sfp_probe(struct adapter *adapter)
4730 struct ixgbe_hw *hw = &adapter->hw;
4731 device_t dev = adapter->dev;
4732 bool result = FALSE;
4734 if ((hw->phy.type == ixgbe_phy_nl) &&
4735 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
4736 s32 ret = hw->phy.ops.identify_sfp(hw);
4739 ret = hw->phy.ops.reset(hw);
4740 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4741 device_printf(dev,"Unsupported SFP+ module detected!");
4742 printf(" Reload driver with supported module.\n");
4743 adapter->sfp_probe = FALSE;
4746 device_printf(dev,"SFP+ module detected!\n");
4747 /* We now have supported optics */
4748 adapter->sfp_probe = FALSE;
4749 /* Set the optics type so system reports correctly */
4750 ixgbe_setup_optics(adapter);
4758 ** Tasklet handler for MSIX Link interrupts
4759 ** - do outside interrupt since it might sleep
4762 ixgbe_handle_link(void *context, int pending)
4764 struct adapter *adapter = context;
4766 ixgbe_check_link(&adapter->hw,
4767 &adapter->link_speed, &adapter->link_up, 0);
4768 ixgbe_update_link_status(adapter);
4772 ** Tasklet for handling SFP module interrupts
4775 ixgbe_handle_mod(void *context, int pending)
4777 struct adapter *adapter = context;
4778 struct ixgbe_hw *hw = &adapter->hw;
4779 device_t dev = adapter->dev;
4782 err = hw->phy.ops.identify_sfp(hw);
4783 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4785 "Unsupported SFP+ module type was detected.\n");
4788 err = hw->mac.ops.setup_sfp(hw);
4789 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4791 "Setup failure - unsupported SFP+ module type.\n");
4794 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
4800 ** Tasklet for handling MSF (multispeed fiber) interrupts
4803 ixgbe_handle_msf(void *context, int pending)
4805 struct adapter *adapter = context;
4806 struct ixgbe_hw *hw = &adapter->hw;
4810 autoneg = hw->phy.autoneg_advertised;
4811 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4812 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
4813 if (hw->mac.ops.setup_link)
4814 hw->mac.ops.setup_link(hw, autoneg, negotiate, TRUE);
4820 ** Tasklet for reinitializing the Flow Director filter table
4823 ixgbe_reinit_fdir(void *context, int pending)
4825 struct adapter *adapter = context;
4826 struct ifnet *ifp = adapter->ifp;
4828 if (adapter->fdir_reinit != 1) /* Shouldn't happen */
4830 ixgbe_reinit_fdir_tables_82599(&adapter->hw);
4831 adapter->fdir_reinit = 0;
4832 /* Restart the interface */
4833 ifp->if_drv_flags |= IFF_DRV_RUNNING;
4838 /**********************************************************************
4840 * Update the board statistics counters.
4842 **********************************************************************/
4844 ixgbe_update_stats_counters(struct adapter *adapter)
4846 struct ifnet *ifp = adapter->ifp;
4847 struct ixgbe_hw *hw = &adapter->hw;
4848 u32 missed_rx = 0, bprc, lxon, lxoff, total;
4849 u64 total_missed_rx = 0;
4851 adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
4852 adapter->stats.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
4853 adapter->stats.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
4854 adapter->stats.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
4856 for (int i = 0; i < 8; i++) {
4858 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
4859 /* missed_rx tallies misses for the gprc workaround */
4861 /* global total per queue */
4862 adapter->stats.mpc[i] += mp;
4863 /* Running comprehensive total for stats display */
4864 total_missed_rx += adapter->stats.mpc[i];
4865 if (hw->mac.type == ixgbe_mac_82598EB)
4866 adapter->stats.rnbc[i] +=
4867 IXGBE_READ_REG(hw, IXGBE_RNBC(i));
4868 adapter->stats.pxontxc[i] +=
4869 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
4870 adapter->stats.pxonrxc[i] +=
4871 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
4872 adapter->stats.pxofftxc[i] +=
4873 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
4874 adapter->stats.pxoffrxc[i] +=
4875 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
4876 adapter->stats.pxon2offc[i] +=
4877 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
4879 for (int i = 0; i < 16; i++) {
4880 adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
4881 adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
4882 adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
4883 adapter->stats.qbrc[i] +=
4884 ((u64)IXGBE_READ_REG(hw, IXGBE_QBRC(i)) << 32);
4885 adapter->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
4886 adapter->stats.qbtc[i] +=
4887 ((u64)IXGBE_READ_REG(hw, IXGBE_QBTC(i)) << 32);
4888 adapter->stats.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
4890 adapter->stats.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
4891 adapter->stats.mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
4892 adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
4894 /* Hardware workaround, gprc counts missed packets */
4895 adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
4896 adapter->stats.gprc -= missed_rx;
4898 if (hw->mac.type != ixgbe_mac_82598EB) {
4899 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
4900 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
4901 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
4902 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
4903 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
4904 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
4905 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
4906 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
4908 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
4909 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
4910 /* 82598 only has a counter in the high register */
4911 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
4912 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
4913 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
4917 * Workaround: mprc hardware is incorrectly counting
4918 * broadcasts, so for now we subtract those.
4920 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
4921 adapter->stats.bprc += bprc;
4922 adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
4923 if (hw->mac.type == ixgbe_mac_82598EB)
4924 adapter->stats.mprc -= bprc;
4926 adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
4927 adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
4928 adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
4929 adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
4930 adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
4931 adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
4933 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
4934 adapter->stats.lxontxc += lxon;
4935 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
4936 adapter->stats.lxofftxc += lxoff;
4937 total = lxon + lxoff;
4939 adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
4940 adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
4941 adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
4942 adapter->stats.gptc -= total;
4943 adapter->stats.mptc -= total;
4944 adapter->stats.ptc64 -= total;
4945 adapter->stats.gotc -= total * ETHER_MIN_LEN;
4947 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
4948 adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
4949 adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
4950 adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
4951 adapter->stats.mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
4952 adapter->stats.mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
4953 adapter->stats.mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
4954 adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
4955 adapter->stats.tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
4956 adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
4957 adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
4958 adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
4959 adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
4960 adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
4961 adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
4962 adapter->stats.xec += IXGBE_READ_REG(hw, IXGBE_XEC);
4963 adapter->stats.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
4964 adapter->stats.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
4965 /* Only read FCOE on 82599 */
4966 if (hw->mac.type != ixgbe_mac_82598EB) {
4967 adapter->stats.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
4968 adapter->stats.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
4969 adapter->stats.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
4970 adapter->stats.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
4971 adapter->stats.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
4974 /* Fill out the OS statistics structure */
4975 ifp->if_ipackets = adapter->stats.gprc;
4976 ifp->if_opackets = adapter->stats.gptc;
4977 ifp->if_ibytes = adapter->stats.gorc;
4978 ifp->if_obytes = adapter->stats.gotc;
4979 ifp->if_imcasts = adapter->stats.mprc;
4980 ifp->if_collisions = 0;
4983 ifp->if_ierrors = total_missed_rx + adapter->stats.crcerrs +
4984 adapter->stats.rlec;
4987 /** ixgbe_sysctl_tdh_handler - Handler function
4988 * Retrieves the TDH value from the hardware
4991 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
4995 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
4998 unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
4999 error = sysctl_handle_int(oidp, &val, 0, req);
5000 if (error || !req->newptr)
5005 /** ixgbe_sysctl_tdt_handler - Handler function
5006 * Retrieves the TDT value from the hardware
5009 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
5013 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
5016 unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
5017 error = sysctl_handle_int(oidp, &val, 0, req);
5018 if (error || !req->newptr)
5023 /** ixgbe_sysctl_rdh_handler - Handler function
5024 * Retrieves the RDH value from the hardware
5027 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
5031 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
5034 unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
5035 error = sysctl_handle_int(oidp, &val, 0, req);
5036 if (error || !req->newptr)
5041 /** ixgbe_sysctl_rdt_handler - Handler function
5042 * Retrieves the RDT value from the hardware
5045 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
5049 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
5052 unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
5053 error = sysctl_handle_int(oidp, &val, 0, req);
5054 if (error || !req->newptr)
5060 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
5063 struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
5064 unsigned int reg, usec, rate;
5066 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
5067 usec = ((reg & 0x0FF8) >> 3);
5069 rate = 1000000 / usec;
5072 error = sysctl_handle_int(oidp, &rate, 0, req);
5073 if (error || !req->newptr)
5079 * Add sysctl variables, one per statistic, to the system.
5082 ixgbe_add_hw_stats(struct adapter *adapter)
5085 device_t dev = adapter->dev;
5087 struct tx_ring *txr = adapter->tx_rings;
5088 struct rx_ring *rxr = adapter->rx_rings;
5090 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
5091 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
5092 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
5093 struct ixgbe_hw_stats *stats = &adapter->stats;
5095 struct sysctl_oid *stat_node, *queue_node;
5096 struct sysctl_oid_list *stat_list, *queue_list;
5098 #define QUEUE_NAME_LEN 32
5099 char namebuf[QUEUE_NAME_LEN];
5101 /* Driver Statistics */
5102 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
5103 CTLFLAG_RD, &adapter->dropped_pkts,
5104 "Driver dropped packets");
5105 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
5106 CTLFLAG_RD, &adapter->mbuf_defrag_failed,
5107 "m_defrag() failed");
5108 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "no_tx_dma_setup",
5109 CTLFLAG_RD, &adapter->no_tx_dma_setup,
5110 "Driver tx dma failure in xmit");
5111 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
5112 CTLFLAG_RD, &adapter->watchdog_events,
5113 "Watchdog timeouts");
5114 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tso_tx",
5115 CTLFLAG_RD, &adapter->tso_tx,
5117 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
5118 CTLFLAG_RD, &adapter->link_irq,
5119 "Link MSIX IRQ Handled");
5121 for (int i = 0; i < adapter->num_queues; i++, txr++) {
5122 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
5123 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
5124 CTLFLAG_RD, NULL, "Queue Name");
5125 queue_list = SYSCTL_CHILDREN(queue_node);
5127 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
5128 CTLTYPE_UINT | CTLFLAG_RD, &adapter->queues[i],
5129 sizeof(&adapter->queues[i]),
5130 ixgbe_sysctl_interrupt_rate_handler, "IU",
5132 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
5133 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
5134 ixgbe_sysctl_tdh_handler, "IU",
5135 "Transmit Descriptor Head");
5136 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
5137 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
5138 ixgbe_sysctl_tdt_handler, "IU",
5139 "Transmit Descriptor Tail");
5140 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
5141 CTLFLAG_RD, &txr->no_desc_avail,
5142 "Queue No Descriptor Available");
5143 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
5144 CTLFLAG_RD, &txr->total_packets,
5145 "Queue Packets Transmitted");
5148 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
5149 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
5150 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
5151 CTLFLAG_RD, NULL, "Queue Name");
5152 queue_list = SYSCTL_CHILDREN(queue_node);
5154 struct lro_ctrl *lro = &rxr->lro;
5156 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
5157 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
5158 CTLFLAG_RD, NULL, "Queue Name");
5159 queue_list = SYSCTL_CHILDREN(queue_node);
5161 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
5162 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
5163 ixgbe_sysctl_rdh_handler, "IU",
5164 "Receive Descriptor Head");
5165 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
5166 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
5167 ixgbe_sysctl_rdt_handler, "IU",
5168 "Receive Descriptor Tail");
5169 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
5170 CTLFLAG_RD, &rxr->rx_packets,
5171 "Queue Packets Received");
5172 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
5173 CTLFLAG_RD, &rxr->rx_bytes,
5174 "Queue Bytes Received");
5175 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
5176 CTLFLAG_RD, &lro->lro_queued, 0,
5178 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
5179 CTLFLAG_RD, &lro->lro_flushed, 0,
5183 /* MAC stats get the own sub node */
5185 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
5186 CTLFLAG_RD, NULL, "MAC Statistics");
5187 stat_list = SYSCTL_CHILDREN(stat_node);
5189 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
5190 CTLFLAG_RD, &stats->crcerrs,
5192 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
5193 CTLFLAG_RD, &stats->illerrc,
5194 "Illegal Byte Errors");
5195 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
5196 CTLFLAG_RD, &stats->errbc,
5198 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
5199 CTLFLAG_RD, &stats->mspdc,
5200 "MAC Short Packets Discarded");
5201 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
5202 CTLFLAG_RD, &stats->mlfc,
5203 "MAC Local Faults");
5204 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
5205 CTLFLAG_RD, &stats->mrfc,
5206 "MAC Remote Faults");
5207 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
5208 CTLFLAG_RD, &stats->rlec,
5209 "Receive Length Errors");
5210 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "link_xon_txd",
5211 CTLFLAG_RD, &stats->lxontxc,
5212 "Link XON Transmitted");
5213 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "link_xon_rcvd",
5214 CTLFLAG_RD, &stats->lxonrxc,
5215 "Link XON Received");
5216 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "link_xoff_txd",
5217 CTLFLAG_RD, &stats->lxofftxc,
5218 "Link XOFF Transmitted");
5219 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "link_xoff_rcvd",
5220 CTLFLAG_RD, &stats->lxoffrxc,
5221 "Link XOFF Received");
5223 /* Packet Reception Stats */
5224 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
5225 CTLFLAG_RD, &stats->tor,
5226 "Total Octets Received");
5227 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
5228 CTLFLAG_RD, &stats->gorc,
5229 "Good Octets Received");
5230 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
5231 CTLFLAG_RD, &stats->tpr,
5232 "Total Packets Received");
5233 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
5234 CTLFLAG_RD, &stats->gprc,
5235 "Good Packets Received");
5236 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
5237 CTLFLAG_RD, &stats->mprc,
5238 "Multicast Packets Received");
5239 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
5240 CTLFLAG_RD, &stats->bprc,
5241 "Broadcast Packets Received");
5242 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
5243 CTLFLAG_RD, &stats->prc64,
5244 "64 byte frames received ");
5245 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
5246 CTLFLAG_RD, &stats->prc127,
5247 "65-127 byte frames received");
5248 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
5249 CTLFLAG_RD, &stats->prc255,
5250 "128-255 byte frames received");
5251 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
5252 CTLFLAG_RD, &stats->prc511,
5253 "256-511 byte frames received");
5254 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
5255 CTLFLAG_RD, &stats->prc1023,
5256 "512-1023 byte frames received");
5257 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
5258 CTLFLAG_RD, &stats->prc1522,
5259 "1023-1522 byte frames received");
5260 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
5261 CTLFLAG_RD, &stats->ruc,
5262 "Receive Undersized");
5263 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
5264 CTLFLAG_RD, &stats->rfc,
5265 "Fragmented Packets Received ");
5266 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
5267 CTLFLAG_RD, &stats->roc,
5268 "Oversized Packets Received");
5269 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
5270 CTLFLAG_RD, &stats->rjc,
5272 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
5273 CTLFLAG_RD, &stats->mngprc,
5274 "Management Packets Received");
5275 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
5276 CTLFLAG_RD, &stats->mngptc,
5277 "Management Packets Dropped");
5278 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
5279 CTLFLAG_RD, &stats->xec,
5282 /* Packet Transmission Stats */
5283 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
5284 CTLFLAG_RD, &stats->gotc,
5285 "Good Octets Transmitted");
5286 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
5287 CTLFLAG_RD, &stats->tpt,
5288 "Total Packets Transmitted");
5289 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
5290 CTLFLAG_RD, &stats->gptc,
5291 "Good Packets Transmitted");
5292 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
5293 CTLFLAG_RD, &stats->bptc,
5294 "Broadcast Packets Transmitted");
5295 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
5296 CTLFLAG_RD, &stats->mptc,
5297 "Multicast Packets Transmitted");
5298 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
5299 CTLFLAG_RD, &stats->mngptc,
5300 "Management Packets Transmitted");
5301 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
5302 CTLFLAG_RD, &stats->ptc64,
5303 "64 byte frames transmitted ");
5304 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
5305 CTLFLAG_RD, &stats->ptc127,
5306 "65-127 byte frames transmitted");
5307 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
5308 CTLFLAG_RD, &stats->ptc255,
5309 "128-255 byte frames transmitted");
5310 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
5311 CTLFLAG_RD, &stats->ptc511,
5312 "256-511 byte frames transmitted");
5313 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
5314 CTLFLAG_RD, &stats->ptc1023,
5315 "512-1023 byte frames transmitted");
5316 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
5317 CTLFLAG_RD, &stats->ptc1522,
5318 "1024-1522 byte frames transmitted");
5321 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_crc",
5322 CTLFLAG_RD, &stats->fccrc,
5324 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_last",
5325 CTLFLAG_RD, &stats->fclast,
5327 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_drpd",
5328 CTLFLAG_RD, &stats->fcoerpdc,
5329 "FCoE Packets Dropped");
5330 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_pkts_rcvd",
5331 CTLFLAG_RD, &stats->fcoeprc,
5332 "FCoE Packets Received");
5333 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_pkts_txd",
5334 CTLFLAG_RD, &stats->fcoeptc,
5335 "FCoE Packets Transmitted");
5336 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_dword_rcvd",
5337 CTLFLAG_RD, &stats->fcoedwrc,
5338 "FCoE DWords Received");
5339 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_dword_txd",
5340 CTLFLAG_RD, &stats->fcoedwtc,
5341 "FCoE DWords Transmitted");
5345 ** Set flow control using sysctl:
5346 ** Flow control values:
5353 ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS)
5356 struct adapter *adapter = (struct adapter *) arg1;
5359 error = sysctl_handle_int(oidp, &adapter->fc, 0, req);
5360 if ((error) || (req->newptr == NULL))
5363 /* Don't bother if it's not changed */
5364 if (adapter->fc == last)
5367 switch (adapter->fc) {
5368 case ixgbe_fc_rx_pause:
5369 case ixgbe_fc_tx_pause:
5371 adapter->hw.fc.requested_mode = adapter->fc;
5375 adapter->hw.fc.requested_mode = ixgbe_fc_none;
5378 ixgbe_fc_enable(&adapter->hw, 0);
5383 ixgbe_add_rx_process_limit(struct adapter *adapter, const char *name,
5384 const char *description, int *limit, int value)
5387 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
5388 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
5389 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
5393 ** Control link advertise speed:
5395 ** 1 - advertise only 1G
5396 ** 2 - advertise 100Mb
5399 ixgbe_set_advertise(SYSCTL_HANDLER_ARGS)
5402 struct adapter *adapter;
5404 struct ixgbe_hw *hw;
5405 ixgbe_link_speed speed, last;
5407 adapter = (struct adapter *) arg1;
5410 last = hw->phy.autoneg_advertised;
5412 error = sysctl_handle_int(oidp, &adapter->advertise, 0, req);
5414 if ((error) || (adapter->advertise == -1))
5417 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
5418 (hw->phy.multispeed_fiber)))
5421 if ((adapter->advertise == 2) && (hw->mac.type != ixgbe_mac_X540)) {
5422 device_printf(dev, "Set Advertise: 100Mb on X540 only\n");
5426 if (adapter->advertise == 1)
5427 speed = IXGBE_LINK_SPEED_1GB_FULL;
5428 else if (adapter->advertise == 2)
5429 speed = IXGBE_LINK_SPEED_100_FULL;
5431 speed = IXGBE_LINK_SPEED_1GB_FULL |
5432 IXGBE_LINK_SPEED_10GB_FULL;
5434 if (speed == last) /* no change */
5437 hw->mac.autotry_restart = TRUE;
5438 hw->mac.ops.setup_link(hw, speed, TRUE, TRUE);