1 /******************************************************************************
3 Copyright (c) 2001-2010, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
37 #include "opt_inet6.h"
42 /*********************************************************************
44 *********************************************************************/
45 char ixv_driver_version[] = "1.1.2";
47 /*********************************************************************
50 * Used by probe to select devices to load on
51 * Last field stores an index into ixv_strings
52 * Last entry must be all 0s
54 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
55 *********************************************************************/
57 static ixv_vendor_info_t ixv_vendor_info_array[] =
59 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
60 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
61 /* required last entry */
65 /*********************************************************************
66 * Table of branding strings
67 *********************************************************************/
69 static char *ixv_strings[] = {
70 "Intel(R) PRO/10GbE Virtual Function Network Driver"
73 /*********************************************************************
75 *********************************************************************/
76 static int ixv_probe(device_t);
77 static int ixv_attach(device_t);
78 static int ixv_detach(device_t);
79 static int ixv_shutdown(device_t);
80 #if __FreeBSD_version < 800000
81 static void ixv_start(struct ifnet *);
82 static void ixv_start_locked(struct tx_ring *, struct ifnet *);
84 static int ixv_mq_start(struct ifnet *, struct mbuf *);
85 static int ixv_mq_start_locked(struct ifnet *,
86 struct tx_ring *, struct mbuf *);
87 static void ixv_qflush(struct ifnet *);
89 static int ixv_ioctl(struct ifnet *, u_long, caddr_t);
90 static void ixv_init(void *);
91 static void ixv_init_locked(struct adapter *);
92 static void ixv_stop(void *);
93 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
94 static int ixv_media_change(struct ifnet *);
95 static void ixv_identify_hardware(struct adapter *);
96 static int ixv_allocate_pci_resources(struct adapter *);
97 static int ixv_allocate_msix(struct adapter *);
98 static int ixv_allocate_queues(struct adapter *);
99 static int ixv_setup_msix(struct adapter *);
100 static void ixv_free_pci_resources(struct adapter *);
101 static void ixv_local_timer(void *);
102 static void ixv_setup_interface(device_t, struct adapter *);
103 static void ixv_config_link(struct adapter *);
105 static int ixv_allocate_transmit_buffers(struct tx_ring *);
106 static int ixv_setup_transmit_structures(struct adapter *);
107 static void ixv_setup_transmit_ring(struct tx_ring *);
108 static void ixv_initialize_transmit_units(struct adapter *);
109 static void ixv_free_transmit_structures(struct adapter *);
110 static void ixv_free_transmit_buffers(struct tx_ring *);
112 static int ixv_allocate_receive_buffers(struct rx_ring *);
113 static int ixv_setup_receive_structures(struct adapter *);
114 static int ixv_setup_receive_ring(struct rx_ring *);
115 static void ixv_initialize_receive_units(struct adapter *);
116 static void ixv_free_receive_structures(struct adapter *);
117 static void ixv_free_receive_buffers(struct rx_ring *);
119 static void ixv_enable_intr(struct adapter *);
120 static void ixv_disable_intr(struct adapter *);
121 static bool ixv_txeof(struct tx_ring *);
122 static bool ixv_rxeof(struct ix_queue *, int);
123 static void ixv_rx_checksum(u32, struct mbuf *, u32);
124 static void ixv_set_multi(struct adapter *);
125 static void ixv_update_link_status(struct adapter *);
126 static void ixv_refresh_mbufs(struct rx_ring *, int);
127 static int ixv_xmit(struct tx_ring *, struct mbuf **);
128 static int ixv_sysctl_stats(SYSCTL_HANDLER_ARGS);
129 static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
130 static int ixv_set_flowcntl(SYSCTL_HANDLER_ARGS);
131 static int ixv_dma_malloc(struct adapter *, bus_size_t,
132 struct ixv_dma_alloc *, int);
133 static void ixv_dma_free(struct adapter *, struct ixv_dma_alloc *);
134 static void ixv_add_rx_process_limit(struct adapter *, const char *,
135 const char *, int *, int);
136 static bool ixv_tx_ctx_setup(struct tx_ring *, struct mbuf *);
137 static bool ixv_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
138 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
139 static void ixv_configure_ivars(struct adapter *);
140 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
142 static void ixv_setup_vlan_support(struct adapter *);
143 static void ixv_register_vlan(void *, struct ifnet *, u16);
144 static void ixv_unregister_vlan(void *, struct ifnet *, u16);
146 static void ixv_save_stats(struct adapter *);
147 static void ixv_init_stats(struct adapter *);
148 static void ixv_update_stats(struct adapter *);
150 static __inline void ixv_rx_discard(struct rx_ring *, int);
151 static __inline void ixv_rx_input(struct rx_ring *, struct ifnet *,
154 /* The MSI/X Interrupt handlers */
155 static void ixv_msix_que(void *);
156 static void ixv_msix_mbx(void *);
158 /* Deferred interrupt tasklets */
159 static void ixv_handle_que(void *, int);
160 static void ixv_handle_mbx(void *, int);
162 /*********************************************************************
163 * FreeBSD Device Interface Entry Points
164 *********************************************************************/
166 static device_method_t ixv_methods[] = {
167 /* Device interface */
168 DEVMETHOD(device_probe, ixv_probe),
169 DEVMETHOD(device_attach, ixv_attach),
170 DEVMETHOD(device_detach, ixv_detach),
171 DEVMETHOD(device_shutdown, ixv_shutdown),
175 static driver_t ixv_driver = {
176 "ix", ixv_methods, sizeof(struct adapter),
179 extern devclass_t ixgbe_devclass;
180 DRIVER_MODULE(ixv, pci, ixv_driver, ixgbe_devclass, 0, 0);
181 MODULE_DEPEND(ixv, pci, 1, 1, 1);
182 MODULE_DEPEND(ixv, ether, 1, 1, 1);
185 ** TUNEABLE PARAMETERS:
189 ** AIM: Adaptive Interrupt Moderation
190 ** which means that the interrupt rate
191 ** is varied over time based on the
192 ** traffic for that interrupt vector
194 static int ixv_enable_aim = FALSE;
195 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
197 /* How many packets rxeof tries to clean at a time */
198 static int ixv_rx_process_limit = 128;
199 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
201 /* Flow control setting, default to full */
202 static int ixv_flow_control = ixgbe_fc_full;
203 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
206 * Header split: this causes the hardware to DMA
207 * the header into a seperate mbuf from the payload,
208 * it can be a performance win in some workloads, but
209 * in others it actually hurts, its off by default.
211 static bool ixv_header_split = FALSE;
212 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
215 ** Number of TX descriptors per ring,
216 ** setting higher than RX as this seems
217 ** the better performing choice.
219 static int ixv_txd = DEFAULT_TXD;
220 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
222 /* Number of RX descriptors per ring */
223 static int ixv_rxd = DEFAULT_RXD;
224 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
227 ** Shadow VFTA table, this is needed because
228 ** the real filter table gets cleared during
229 ** a soft reset and we need to repopulate it.
231 static u32 ixv_shadow_vfta[VFTA_SIZE];
233 /*********************************************************************
234 * Device identification routine
236 * ixv_probe determines if the driver should be loaded on
237 * adapter based on PCI vendor/device id of the adapter.
239 * return 0 on success, positive on failure
240 *********************************************************************/
243 ixv_probe(device_t dev)
245 ixv_vendor_info_t *ent;
247 u16 pci_vendor_id = 0;
248 u16 pci_device_id = 0;
249 u16 pci_subvendor_id = 0;
250 u16 pci_subdevice_id = 0;
251 char adapter_name[256];
254 pci_vendor_id = pci_get_vendor(dev);
255 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
258 pci_device_id = pci_get_device(dev);
259 pci_subvendor_id = pci_get_subvendor(dev);
260 pci_subdevice_id = pci_get_subdevice(dev);
262 ent = ixv_vendor_info_array;
263 while (ent->vendor_id != 0) {
264 if ((pci_vendor_id == ent->vendor_id) &&
265 (pci_device_id == ent->device_id) &&
267 ((pci_subvendor_id == ent->subvendor_id) ||
268 (ent->subvendor_id == 0)) &&
270 ((pci_subdevice_id == ent->subdevice_id) ||
271 (ent->subdevice_id == 0))) {
272 sprintf(adapter_name, "%s, Version - %s",
273 ixv_strings[ent->index],
275 device_set_desc_copy(dev, adapter_name);
283 /*********************************************************************
284 * Device initialization routine
286 * The attach entry point is called when the driver is being loaded.
287 * This routine identifies the type of hardware, allocates all resources
288 * and initializes the hardware.
290 * return 0 on success, positive on failure
291 *********************************************************************/
294 ixv_attach(device_t dev)
296 struct adapter *adapter;
300 INIT_DEBUGOUT("ixv_attach: begin");
302 /* Allocate, clear, and link in our adapter structure */
303 adapter = device_get_softc(dev);
304 adapter->dev = adapter->osdep.dev = dev;
308 IXV_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
311 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
312 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
313 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
314 adapter, 0, ixv_sysctl_stats, "I", "Statistics");
316 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
317 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
318 OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW,
319 adapter, 0, ixv_sysctl_debug, "I", "Debug Info");
321 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
322 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
323 OID_AUTO, "flow_control", CTLTYPE_INT | CTLFLAG_RW,
324 adapter, 0, ixv_set_flowcntl, "I", "Flow Control");
326 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
327 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
328 OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
329 &ixv_enable_aim, 1, "Interrupt Moderation");
331 /* Set up the timer callout */
332 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
334 /* Determine hardware revision */
335 ixv_identify_hardware(adapter);
337 /* Do base PCI setup - map BAR0 */
338 if (ixv_allocate_pci_resources(adapter)) {
339 device_printf(dev, "Allocation of PCI resources failed\n");
344 /* Do descriptor calc and sanity checks */
345 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
346 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
347 device_printf(dev, "TXD config issue, using default!\n");
348 adapter->num_tx_desc = DEFAULT_TXD;
350 adapter->num_tx_desc = ixv_txd;
352 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
353 ixv_rxd < MIN_TXD || ixv_rxd > MAX_TXD) {
354 device_printf(dev, "RXD config issue, using default!\n");
355 adapter->num_rx_desc = DEFAULT_RXD;
357 adapter->num_rx_desc = ixv_rxd;
359 /* Allocate our TX/RX Queues */
360 if (ixv_allocate_queues(adapter)) {
366 ** Initialize the shared code: its
367 ** at this point the mac type is set.
369 error = ixgbe_init_shared_code(hw);
371 device_printf(dev,"Shared Code Initialization Failure\n");
376 /* Setup the mailbox */
377 ixgbe_init_mbx_params_vf(hw);
381 /* Get Hardware Flow Control setting */
382 hw->fc.requested_mode = ixgbe_fc_full;
383 hw->fc.pause_time = IXV_FC_PAUSE;
384 hw->fc.low_water = IXV_FC_LO;
385 hw->fc.high_water[0] = IXV_FC_HI;
386 hw->fc.send_xon = TRUE;
388 error = ixgbe_init_hw(hw);
390 device_printf(dev,"Hardware Initialization Failure\n");
395 error = ixv_allocate_msix(adapter);
399 /* Setup OS specific network interface */
400 ixv_setup_interface(dev, adapter);
402 /* Sysctl for limiting the amount of work done in the taskqueue */
403 ixv_add_rx_process_limit(adapter, "rx_processing_limit",
404 "max number of rx packets to process", &adapter->rx_process_limit,
405 ixv_rx_process_limit);
407 /* Do the stats setup */
408 ixv_save_stats(adapter);
409 ixv_init_stats(adapter);
411 /* Register for VLAN events */
412 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
413 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
414 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
415 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
417 INIT_DEBUGOUT("ixv_attach: end");
421 ixv_free_transmit_structures(adapter);
422 ixv_free_receive_structures(adapter);
424 ixv_free_pci_resources(adapter);
429 /*********************************************************************
430 * Device removal routine
432 * The detach entry point is called when the driver is being removed.
433 * This routine stops the adapter and deallocates all the resources
434 * that were allocated for driver operation.
436 * return 0 on success, positive on failure
437 *********************************************************************/
440 ixv_detach(device_t dev)
442 struct adapter *adapter = device_get_softc(dev);
443 struct ix_queue *que = adapter->queues;
445 INIT_DEBUGOUT("ixv_detach: begin");
447 /* Make sure VLANS are not using driver */
448 if (adapter->ifp->if_vlantrunk != NULL) {
449 device_printf(dev,"Vlan in use, detach first\n");
453 IXV_CORE_LOCK(adapter);
455 IXV_CORE_UNLOCK(adapter);
457 for (int i = 0; i < adapter->num_queues; i++, que++) {
459 taskqueue_drain(que->tq, &que->que_task);
460 taskqueue_free(que->tq);
464 /* Drain the Link queue */
466 taskqueue_drain(adapter->tq, &adapter->mbx_task);
467 taskqueue_free(adapter->tq);
470 /* Unregister VLAN events */
471 if (adapter->vlan_attach != NULL)
472 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
473 if (adapter->vlan_detach != NULL)
474 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
476 ether_ifdetach(adapter->ifp);
477 callout_drain(&adapter->timer);
478 ixv_free_pci_resources(adapter);
479 bus_generic_detach(dev);
480 if_free(adapter->ifp);
482 ixv_free_transmit_structures(adapter);
483 ixv_free_receive_structures(adapter);
485 IXV_CORE_LOCK_DESTROY(adapter);
489 /*********************************************************************
491 * Shutdown entry point
493 **********************************************************************/
495 ixv_shutdown(device_t dev)
497 struct adapter *adapter = device_get_softc(dev);
498 IXV_CORE_LOCK(adapter);
500 IXV_CORE_UNLOCK(adapter);
504 #if __FreeBSD_version < 800000
505 /*********************************************************************
506 * Transmit entry point
508 * ixv_start is called by the stack to initiate a transmit.
509 * The driver will remain in this routine as long as there are
510 * packets to transmit and transmit resources are available.
511 * In case resources are not available stack is notified and
512 * the packet is requeued.
513 **********************************************************************/
515 ixv_start_locked(struct tx_ring *txr, struct ifnet * ifp)
518 struct adapter *adapter = txr->adapter;
520 IXV_TX_LOCK_ASSERT(txr);
522 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
525 if (!adapter->link_active)
528 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
530 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
534 if (ixv_xmit(txr, &m_head)) {
537 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
538 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
541 /* Send a copy of the frame to the BPF listener */
542 ETHER_BPF_MTAP(ifp, m_head);
544 /* Set watchdog on */
545 txr->watchdog_check = TRUE;
546 txr->watchdog_time = ticks;
553 * Legacy TX start - called by the stack, this
554 * always uses the first tx ring, and should
555 * not be used with multiqueue tx enabled.
558 ixv_start(struct ifnet *ifp)
560 struct adapter *adapter = ifp->if_softc;
561 struct tx_ring *txr = adapter->tx_rings;
563 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
565 ixv_start_locked(txr, ifp);
574 ** Multiqueue Transmit driver
578 ixv_mq_start(struct ifnet *ifp, struct mbuf *m)
580 struct adapter *adapter = ifp->if_softc;
581 struct ix_queue *que;
585 /* Which queue to use */
586 if ((m->m_flags & M_FLOWID) != 0)
587 i = m->m_pkthdr.flowid % adapter->num_queues;
589 txr = &adapter->tx_rings[i];
590 que = &adapter->queues[i];
592 if (IXV_TX_TRYLOCK(txr)) {
593 err = ixv_mq_start_locked(ifp, txr, m);
596 err = drbr_enqueue(ifp, txr->br, m);
597 taskqueue_enqueue(que->tq, &que->que_task);
604 ixv_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
606 struct adapter *adapter = txr->adapter;
608 int enqueued, err = 0;
610 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
611 IFF_DRV_RUNNING || adapter->link_active == 0) {
613 err = drbr_enqueue(ifp, txr->br, m);
617 /* Do a clean if descriptors are low */
618 if (txr->tx_avail <= IXV_TX_CLEANUP_THRESHOLD)
623 next = drbr_dequeue(ifp, txr->br);
624 } else if (drbr_needs_enqueue(ifp, txr->br)) {
625 if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
627 next = drbr_dequeue(ifp, txr->br);
631 /* Process the queue */
632 while (next != NULL) {
633 if ((err = ixv_xmit(txr, &next)) != 0) {
635 err = drbr_enqueue(ifp, txr->br, next);
639 drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
640 /* Send a copy of the frame to the BPF listener */
641 ETHER_BPF_MTAP(ifp, next);
642 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
644 if (txr->tx_avail <= IXV_TX_OP_THRESHOLD) {
645 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
648 next = drbr_dequeue(ifp, txr->br);
652 /* Set watchdog on */
653 txr->watchdog_check = TRUE;
654 txr->watchdog_time = ticks;
661 ** Flush all ring buffers
664 ixv_qflush(struct ifnet *ifp)
666 struct adapter *adapter = ifp->if_softc;
667 struct tx_ring *txr = adapter->tx_rings;
670 for (int i = 0; i < adapter->num_queues; i++, txr++) {
672 while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
681 /*********************************************************************
684 * ixv_ioctl is called when the user wants to configure the
687 * return 0 on success, positive on failure
688 **********************************************************************/
691 ixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
693 struct adapter *adapter = ifp->if_softc;
694 struct ifreq *ifr = (struct ifreq *) data;
695 #if defined(INET) || defined(INET6)
696 struct ifaddr *ifa = (struct ifaddr *)data;
699 bool avoid_reset = FALSE;
705 if (ifa->ifa_addr->sa_family == AF_INET)
709 if (ifa->ifa_addr->sa_family == AF_INET6)
713 ** Calling init results in link renegotiation,
714 ** so we avoid doing it when possible.
717 ifp->if_flags |= IFF_UP;
718 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
720 if (!(ifp->if_flags & IFF_NOARP))
721 arp_ifinit(ifp, ifa);
723 error = ether_ioctl(ifp, command, data);
727 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
728 if (ifr->ifr_mtu > IXV_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
731 IXV_CORE_LOCK(adapter);
732 ifp->if_mtu = ifr->ifr_mtu;
733 adapter->max_frame_size =
734 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
735 ixv_init_locked(adapter);
736 IXV_CORE_UNLOCK(adapter);
740 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
741 IXV_CORE_LOCK(adapter);
742 if (ifp->if_flags & IFF_UP) {
743 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
744 ixv_init_locked(adapter);
746 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
748 adapter->if_flags = ifp->if_flags;
749 IXV_CORE_UNLOCK(adapter);
753 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
754 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
755 IXV_CORE_LOCK(adapter);
756 ixv_disable_intr(adapter);
757 ixv_set_multi(adapter);
758 ixv_enable_intr(adapter);
759 IXV_CORE_UNLOCK(adapter);
764 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
765 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
769 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
770 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
771 if (mask & IFCAP_HWCSUM)
772 ifp->if_capenable ^= IFCAP_HWCSUM;
773 if (mask & IFCAP_TSO4)
774 ifp->if_capenable ^= IFCAP_TSO4;
775 if (mask & IFCAP_LRO)
776 ifp->if_capenable ^= IFCAP_LRO;
777 if (mask & IFCAP_VLAN_HWTAGGING)
778 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
779 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
780 IXV_CORE_LOCK(adapter);
781 ixv_init_locked(adapter);
782 IXV_CORE_UNLOCK(adapter);
784 VLAN_CAPABILITIES(ifp);
789 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
790 error = ether_ioctl(ifp, command, data);
797 /*********************************************************************
800 * This routine is used in two ways. It is used by the stack as
801 * init entry point in network interface structure. It is also used
802 * by the driver as a hw/sw initialization routine to get to a
805 * return 0 on success, positive on failure
806 **********************************************************************/
807 #define IXGBE_MHADD_MFS_SHIFT 16
810 ixv_init_locked(struct adapter *adapter)
812 struct ifnet *ifp = adapter->ifp;
813 device_t dev = adapter->dev;
814 struct ixgbe_hw *hw = &adapter->hw;
817 INIT_DEBUGOUT("ixv_init: begin");
818 mtx_assert(&adapter->core_mtx, MA_OWNED);
819 hw->adapter_stopped = FALSE;
820 ixgbe_stop_adapter(hw);
821 callout_stop(&adapter->timer);
823 /* reprogram the RAR[0] in case user changed it. */
824 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
826 /* Get the latest mac address, User can use a LAA */
827 bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
828 IXGBE_ETH_LENGTH_OF_ADDRESS);
829 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
830 hw->addr_ctrl.rar_used_count = 1;
832 /* Prepare transmit descriptors and buffers */
833 if (ixv_setup_transmit_structures(adapter)) {
834 device_printf(dev,"Could not setup transmit structures\n");
840 ixv_initialize_transmit_units(adapter);
842 /* Setup Multicast table */
843 ixv_set_multi(adapter);
846 ** Determine the correct mbuf pool
847 ** for doing jumbo/headersplit
849 if (ifp->if_mtu > ETHERMTU)
850 adapter->rx_mbuf_sz = MJUMPAGESIZE;
852 adapter->rx_mbuf_sz = MCLBYTES;
854 /* Prepare receive descriptors and buffers */
855 if (ixv_setup_receive_structures(adapter)) {
856 device_printf(dev,"Could not setup receive structures\n");
861 /* Configure RX settings */
862 ixv_initialize_receive_units(adapter);
864 /* Enable Enhanced MSIX mode */
865 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
866 gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME;
867 gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD;
868 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
870 /* Set the various hardware offload abilities */
871 ifp->if_hwassist = 0;
872 if (ifp->if_capenable & IFCAP_TSO4)
873 ifp->if_hwassist |= CSUM_TSO;
874 if (ifp->if_capenable & IFCAP_TXCSUM) {
875 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
876 #if __FreeBSD_version >= 800000
877 ifp->if_hwassist |= CSUM_SCTP;
882 if (ifp->if_mtu > ETHERMTU) {
883 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
884 mhadd &= ~IXGBE_MHADD_MFS_MASK;
885 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
886 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
889 /* Set up VLAN offload and filter */
890 ixv_setup_vlan_support(adapter);
892 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
894 /* Set up MSI/X routing */
895 ixv_configure_ivars(adapter);
897 /* Set up auto-mask */
898 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
900 /* Set moderation on the Link interrupt */
901 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->mbxvec), IXV_LINK_ITR);
904 ixv_init_stats(adapter);
906 /* Config/Enable Link */
907 ixv_config_link(adapter);
909 /* And now turn on interrupts */
910 ixv_enable_intr(adapter);
912 /* Now inform the stack we're ready */
913 ifp->if_drv_flags |= IFF_DRV_RUNNING;
914 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
922 struct adapter *adapter = arg;
924 IXV_CORE_LOCK(adapter);
925 ixv_init_locked(adapter);
926 IXV_CORE_UNLOCK(adapter);
933 ** MSIX Interrupt Handlers and Tasklets
938 ixv_enable_queue(struct adapter *adapter, u32 vector)
940 struct ixgbe_hw *hw = &adapter->hw;
941 u32 queue = 1 << vector;
944 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
945 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
949 ixv_disable_queue(struct adapter *adapter, u32 vector)
951 struct ixgbe_hw *hw = &adapter->hw;
952 u64 queue = (u64)(1 << vector);
955 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
956 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
960 ixv_rearm_queues(struct adapter *adapter, u64 queues)
962 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
963 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
968 ixv_handle_que(void *context, int pending)
970 struct ix_queue *que = context;
971 struct adapter *adapter = que->adapter;
972 struct tx_ring *txr = que->txr;
973 struct ifnet *ifp = adapter->ifp;
976 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
977 more = ixv_rxeof(que, adapter->rx_process_limit);
980 #if __FreeBSD_version >= 800000
981 if (!drbr_empty(ifp, txr->br))
982 ixv_mq_start_locked(ifp, txr, NULL);
984 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
985 ixv_start_locked(txr, ifp);
989 taskqueue_enqueue(que->tq, &que->que_task);
994 /* Reenable this interrupt */
995 ixv_enable_queue(adapter, que->msix);
999 /*********************************************************************
1001 * MSI Queue Interrupt Service routine
1003 **********************************************************************/
1005 ixv_msix_que(void *arg)
1007 struct ix_queue *que = arg;
1008 struct adapter *adapter = que->adapter;
1009 struct tx_ring *txr = que->txr;
1010 struct rx_ring *rxr = que->rxr;
1011 bool more_tx, more_rx;
1014 ixv_disable_queue(adapter, que->msix);
1017 more_rx = ixv_rxeof(que, adapter->rx_process_limit);
1020 more_tx = ixv_txeof(txr);
1022 ** Make certain that if the stack
1023 ** has anything queued the task gets
1024 ** scheduled to handle it.
1026 #if __FreeBSD_version < 800000
1027 if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd))
1029 if (!drbr_empty(adapter->ifp, txr->br))
1034 more_rx = ixv_rxeof(que, adapter->rx_process_limit);
1038 if (ixv_enable_aim == FALSE)
1041 ** Do Adaptive Interrupt Moderation:
1042 ** - Write out last calculated setting
1043 ** - Calculate based on average size over
1044 ** the last interval.
1046 if (que->eitr_setting)
1047 IXGBE_WRITE_REG(&adapter->hw,
1048 IXGBE_VTEITR(que->msix),
1051 que->eitr_setting = 0;
1053 /* Idle, do nothing */
1054 if ((txr->bytes == 0) && (rxr->bytes == 0))
1057 if ((txr->bytes) && (txr->packets))
1058 newitr = txr->bytes/txr->packets;
1059 if ((rxr->bytes) && (rxr->packets))
1060 newitr = max(newitr,
1061 (rxr->bytes / rxr->packets));
1062 newitr += 24; /* account for hardware frame, crc */
1064 /* set an upper boundary */
1065 newitr = min(newitr, 3000);
1067 /* Be nice to the mid range */
1068 if ((newitr > 300) && (newitr < 1200))
1069 newitr = (newitr / 3);
1071 newitr = (newitr / 2);
1073 newitr |= newitr << 16;
1075 /* save for next interrupt */
1076 que->eitr_setting = newitr;
1085 if (more_tx || more_rx)
1086 taskqueue_enqueue(que->tq, &que->que_task);
1087 else /* Reenable this interrupt */
1088 ixv_enable_queue(adapter, que->msix);
1093 ixv_msix_mbx(void *arg)
1095 struct adapter *adapter = arg;
1096 struct ixgbe_hw *hw = &adapter->hw;
1101 /* First get the cause */
1102 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
1103 /* Clear interrupt with write */
1104 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
1106 /* Link status change */
1107 if (reg & IXGBE_EICR_LSC)
1108 taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
1110 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
1114 /*********************************************************************
1116 * Media Ioctl callback
1118 * This routine is called whenever the user queries the status of
1119 * the interface using ifconfig.
1121 **********************************************************************/
1123 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1125 struct adapter *adapter = ifp->if_softc;
1127 INIT_DEBUGOUT("ixv_media_status: begin");
1128 IXV_CORE_LOCK(adapter);
1129 ixv_update_link_status(adapter);
1131 ifmr->ifm_status = IFM_AVALID;
1132 ifmr->ifm_active = IFM_ETHER;
1134 if (!adapter->link_active) {
1135 IXV_CORE_UNLOCK(adapter);
1139 ifmr->ifm_status |= IFM_ACTIVE;
1141 switch (adapter->link_speed) {
1142 case IXGBE_LINK_SPEED_1GB_FULL:
1143 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1145 case IXGBE_LINK_SPEED_10GB_FULL:
1146 ifmr->ifm_active |= IFM_FDX;
1150 IXV_CORE_UNLOCK(adapter);
1155 /*********************************************************************
1157 * Media Ioctl callback
1159 * This routine is called when the user changes speed/duplex using
1160 * media/mediopt option with ifconfig.
1162 **********************************************************************/
1164 ixv_media_change(struct ifnet * ifp)
1166 struct adapter *adapter = ifp->if_softc;
1167 struct ifmedia *ifm = &adapter->media;
1169 INIT_DEBUGOUT("ixv_media_change: begin");
1171 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1174 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1178 device_printf(adapter->dev, "Only auto media type\n");
1185 /*********************************************************************
1187 * This routine maps the mbufs to tx descriptors, allowing the
1188 * TX engine to transmit the packets.
1189 * - return 0 on success, positive on failure
1191 **********************************************************************/
1194 ixv_xmit(struct tx_ring *txr, struct mbuf **m_headp)
1196 struct adapter *adapter = txr->adapter;
1197 u32 olinfo_status = 0, cmd_type_len;
1199 int i, j, error, nsegs;
1200 int first, last = 0;
1201 struct mbuf *m_head;
1202 bus_dma_segment_t segs[32];
1204 struct ixv_tx_buf *txbuf, *txbuf_mapped;
1205 union ixgbe_adv_tx_desc *txd = NULL;
1209 /* Basic descriptor defines */
1210 cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
1211 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
1213 if (m_head->m_flags & M_VLANTAG)
1214 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
1217 * Important to capture the first descriptor
1218 * used because it will contain the index of
1219 * the one we tell the hardware to report back
1221 first = txr->next_avail_desc;
1222 txbuf = &txr->tx_buffers[first];
1223 txbuf_mapped = txbuf;
1227 * Map the packet for DMA.
1229 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1230 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1232 if (error == EFBIG) {
1235 m = m_defrag(*m_headp, M_DONTWAIT);
1237 adapter->mbuf_defrag_failed++;
1245 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1246 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1248 if (error == ENOMEM) {
1249 adapter->no_tx_dma_setup++;
1251 } else if (error != 0) {
1252 adapter->no_tx_dma_setup++;
1257 } else if (error == ENOMEM) {
1258 adapter->no_tx_dma_setup++;
1260 } else if (error != 0) {
1261 adapter->no_tx_dma_setup++;
1267 /* Make certain there are enough descriptors */
1268 if (nsegs > txr->tx_avail - 2) {
1269 txr->no_desc_avail++;
1276 ** Set up the appropriate offload context
1277 ** this becomes the first descriptor of
1280 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
1281 if (ixv_tso_setup(txr, m_head, &paylen)) {
1282 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
1283 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
1284 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1285 olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
1289 } else if (ixv_tx_ctx_setup(txr, m_head))
1290 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1292 /* Record payload length */
1294 olinfo_status |= m_head->m_pkthdr.len <<
1295 IXGBE_ADVTXD_PAYLEN_SHIFT;
1297 i = txr->next_avail_desc;
1298 for (j = 0; j < nsegs; j++) {
1302 txbuf = &txr->tx_buffers[i];
1303 txd = &txr->tx_base[i];
1304 seglen = segs[j].ds_len;
1305 segaddr = htole64(segs[j].ds_addr);
1307 txd->read.buffer_addr = segaddr;
1308 txd->read.cmd_type_len = htole32(txr->txd_cmd |
1309 cmd_type_len |seglen);
1310 txd->read.olinfo_status = htole32(olinfo_status);
1311 last = i; /* descriptor that will get completion IRQ */
1313 if (++i == adapter->num_tx_desc)
1316 txbuf->m_head = NULL;
1317 txbuf->eop_index = -1;
1320 txd->read.cmd_type_len |=
1321 htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
1322 txr->tx_avail -= nsegs;
1323 txr->next_avail_desc = i;
1325 txbuf->m_head = m_head;
1327 bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
1329 /* Set the index of the descriptor that will be marked done */
1330 txbuf = &txr->tx_buffers[first];
1331 txbuf->eop_index = last;
1333 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1334 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1336 * Advance the Transmit Descriptor Tail (Tdt), this tells the
1337 * hardware that this frame is available to transmit.
1339 ++txr->total_packets;
1340 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(txr->me), i);
1345 bus_dmamap_unload(txr->txtag, txbuf->map);
1351 /*********************************************************************
1354 * This routine is called whenever multicast address list is updated.
1356 **********************************************************************/
1357 #define IXGBE_RAR_ENTRIES 16
1360 ixv_set_multi(struct adapter *adapter)
1362 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1364 struct ifmultiaddr *ifma;
1366 struct ifnet *ifp = adapter->ifp;
1368 IOCTL_DEBUGOUT("ixv_set_multi: begin");
1370 #if __FreeBSD_version < 800000
1373 if_maddr_rlock(ifp);
1375 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1376 if (ifma->ifma_addr->sa_family != AF_LINK)
1378 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1379 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1380 IXGBE_ETH_LENGTH_OF_ADDRESS);
1383 #if __FreeBSD_version < 800000
1384 IF_ADDR_UNLOCK(ifp);
1386 if_maddr_runlock(ifp);
1391 ixgbe_update_mc_addr_list(&adapter->hw,
1392 update_ptr, mcnt, ixv_mc_array_itr, TRUE);
1398 * This is an iterator function now needed by the multicast
1399 * shared code. It simply feeds the shared code routine the
1400 * addresses in the array of ixv_set_multi() one by one.
1403 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1405 u8 *addr = *update_ptr;
1409 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1410 *update_ptr = newptr;
1414 /*********************************************************************
1417 * This routine checks for link status,updates statistics,
1418 * and runs the watchdog check.
1420 **********************************************************************/
1423 ixv_local_timer(void *arg)
1425 struct adapter *adapter = arg;
1426 device_t dev = adapter->dev;
1427 struct tx_ring *txr = adapter->tx_rings;
1430 mtx_assert(&adapter->core_mtx, MA_OWNED);
1432 ixv_update_link_status(adapter);
1435 ixv_update_stats(adapter);
1438 * If the interface has been paused
1439 * then don't do the watchdog check
1441 if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
1444 ** Check for time since any descriptor was cleaned
1446 for (i = 0; i < adapter->num_queues; i++, txr++) {
1448 if (txr->watchdog_check == FALSE) {
1452 if ((ticks - txr->watchdog_time) > IXV_WATCHDOG)
1457 ixv_rearm_queues(adapter, adapter->que_mask);
1458 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1462 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1463 device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
1464 IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDH(i)),
1465 IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDT(i)));
1466 device_printf(dev,"TX(%d) desc avail = %d,"
1467 "Next TX to Clean = %d\n",
1468 txr->me, txr->tx_avail, txr->next_to_clean);
1469 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1470 adapter->watchdog_events++;
1472 ixv_init_locked(adapter);
1476 ** Note: this routine updates the OS on the link state
1477 ** the real check of the hardware only happens with
1478 ** a link interrupt.
1481 ixv_update_link_status(struct adapter *adapter)
1483 struct ifnet *ifp = adapter->ifp;
1484 struct tx_ring *txr = adapter->tx_rings;
1485 device_t dev = adapter->dev;
1488 if (adapter->link_up){
1489 if (adapter->link_active == FALSE) {
1491 device_printf(dev,"Link is up %d Gbps %s \n",
1492 ((adapter->link_speed == 128)? 10:1),
1494 adapter->link_active = TRUE;
1495 if_link_state_change(ifp, LINK_STATE_UP);
1497 } else { /* Link down */
1498 if (adapter->link_active == TRUE) {
1500 device_printf(dev,"Link is Down\n");
1501 if_link_state_change(ifp, LINK_STATE_DOWN);
1502 adapter->link_active = FALSE;
1503 for (int i = 0; i < adapter->num_queues;
1505 txr->watchdog_check = FALSE;
1513 /*********************************************************************
1515 * This routine disables all traffic on the adapter by issuing a
1516 * global reset on the MAC and deallocates TX/RX buffers.
1518 **********************************************************************/
1524 struct adapter *adapter = arg;
1525 struct ixgbe_hw *hw = &adapter->hw;
1528 mtx_assert(&adapter->core_mtx, MA_OWNED);
1530 INIT_DEBUGOUT("ixv_stop: begin\n");
1531 ixv_disable_intr(adapter);
1533 /* Tell the stack that the interface is no longer active */
1534 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1537 adapter->hw.adapter_stopped = FALSE;
1538 ixgbe_stop_adapter(hw);
1539 callout_stop(&adapter->timer);
1541 /* reprogram the RAR[0] in case user changed it. */
1542 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1548 /*********************************************************************
1550 * Determine hardware revision.
1552 **********************************************************************/
1554 ixv_identify_hardware(struct adapter *adapter)
1556 device_t dev = adapter->dev;
1560 ** Make sure BUSMASTER is set, on a VM under
1561 ** KVM it may not be and will break things.
1563 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1564 if (!((pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1565 (pci_cmd_word & PCIM_CMD_MEMEN))) {
1566 INIT_DEBUGOUT("Memory Access and/or Bus Master "
1567 "bits were not set!\n");
1568 pci_cmd_word |= (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1569 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1572 /* Save off the information about this board */
1573 adapter->hw.vendor_id = pci_get_vendor(dev);
1574 adapter->hw.device_id = pci_get_device(dev);
1575 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1576 adapter->hw.subsystem_vendor_id =
1577 pci_read_config(dev, PCIR_SUBVEND_0, 2);
1578 adapter->hw.subsystem_device_id =
1579 pci_read_config(dev, PCIR_SUBDEV_0, 2);
1584 /*********************************************************************
1586 * Setup MSIX Interrupt resources and handlers
1588 **********************************************************************/
1590 ixv_allocate_msix(struct adapter *adapter)
1592 device_t dev = adapter->dev;
1593 struct ix_queue *que = adapter->queues;
1594 int error, rid, vector = 0;
1596 for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
1598 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1599 RF_SHAREABLE | RF_ACTIVE);
1600 if (que->res == NULL) {
1601 device_printf(dev,"Unable to allocate"
1602 " bus resource: que interrupt [%d]\n", vector);
1605 /* Set the handler function */
1606 error = bus_setup_intr(dev, que->res,
1607 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1608 ixv_msix_que, que, &que->tag);
1611 device_printf(dev, "Failed to register QUE handler");
1614 #if __FreeBSD_version >= 800504
1615 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1618 adapter->que_mask |= (u64)(1 << que->msix);
1620 ** Bind the msix vector, and thus the
1621 ** ring to the corresponding cpu.
1623 if (adapter->num_queues > 1)
1624 bus_bind_intr(dev, que->res, i);
1626 TASK_INIT(&que->que_task, 0, ixv_handle_que, que);
1627 que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT,
1628 taskqueue_thread_enqueue, &que->tq);
1629 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1630 device_get_nameunit(adapter->dev));
1635 adapter->res = bus_alloc_resource_any(dev,
1636 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1637 if (!adapter->res) {
1638 device_printf(dev,"Unable to allocate"
1639 " bus resource: MBX interrupt [%d]\n", rid);
1642 /* Set the mbx handler function */
1643 error = bus_setup_intr(dev, adapter->res,
1644 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1645 ixv_msix_mbx, adapter, &adapter->tag);
1647 adapter->res = NULL;
1648 device_printf(dev, "Failed to register LINK handler");
1651 #if __FreeBSD_version >= 800504
1652 bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
1654 adapter->mbxvec = vector;
1655 /* Tasklets for Mailbox */
1656 TASK_INIT(&adapter->mbx_task, 0, ixv_handle_mbx, adapter);
1657 adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT,
1658 taskqueue_thread_enqueue, &adapter->tq);
1659 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq",
1660 device_get_nameunit(adapter->dev));
1662 ** XXX - remove this when KVM/QEMU fix gets in...
1663 ** Due to a broken design QEMU will fail to properly
1664 ** enable the guest for MSIX unless the vectors in
1665 ** the table are all set up, so we must rewrite the
1666 ** ENABLE in the MSIX control register again at this
1667 ** point to cause it to successfully initialize us.
1669 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1671 pci_find_extcap(dev, PCIY_MSIX, &rid);
1672 rid += PCIR_MSIX_CTRL;
1673 msix_ctrl = pci_read_config(dev, rid, 2);
1674 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1675 pci_write_config(dev, rid, msix_ctrl, 2);
1682 * Setup MSIX resources, note that the VF
1683 * device MUST use MSIX, there is no fallback.
1686 ixv_setup_msix(struct adapter *adapter)
1688 device_t dev = adapter->dev;
1689 int rid, vectors, want = 2;
1692 /* First try MSI/X */
1694 adapter->msix_mem = bus_alloc_resource_any(dev,
1695 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1696 if (!adapter->msix_mem) {
1697 device_printf(adapter->dev,
1698 "Unable to map MSIX table \n");
1702 vectors = pci_msix_count(dev);
1704 bus_release_resource(dev, SYS_RES_MEMORY,
1705 rid, adapter->msix_mem);
1706 adapter->msix_mem = NULL;
1711 ** Want two vectors: one for a queue,
1712 ** plus an additional for mailbox.
1714 if (pci_alloc_msix(dev, &want) == 0) {
1715 device_printf(adapter->dev,
1716 "Using MSIX interrupts with %d vectors\n", want);
1720 device_printf(adapter->dev,"MSIX config error\n");
1726 ixv_allocate_pci_resources(struct adapter *adapter)
1729 device_t dev = adapter->dev;
1732 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1735 if (!(adapter->pci_mem)) {
1736 device_printf(dev,"Unable to allocate bus resource: memory\n");
1740 adapter->osdep.mem_bus_space_tag =
1741 rman_get_bustag(adapter->pci_mem);
1742 adapter->osdep.mem_bus_space_handle =
1743 rman_get_bushandle(adapter->pci_mem);
1744 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
1746 adapter->num_queues = 1;
1747 adapter->hw.back = &adapter->osdep;
1750 ** Now setup MSI/X, should
1751 ** return us the number of
1752 ** configured vectors.
1754 adapter->msix = ixv_setup_msix(adapter);
1755 if (adapter->msix == ENXIO)
1762 ixv_free_pci_resources(struct adapter * adapter)
1764 struct ix_queue *que = adapter->queues;
1765 device_t dev = adapter->dev;
1768 memrid = PCIR_BAR(MSIX_BAR);
1771 ** There is a slight possibility of a failure mode
1772 ** in attach that will result in entering this function
1773 ** before interrupt resources have been initialized, and
1774 ** in that case we do not want to execute the loops below
1775 ** We can detect this reliably by the state of the adapter
1778 if (adapter->res == NULL)
1782 ** Release all msix queue resources:
1784 for (int i = 0; i < adapter->num_queues; i++, que++) {
1785 rid = que->msix + 1;
1786 if (que->tag != NULL) {
1787 bus_teardown_intr(dev, que->res, que->tag);
1790 if (que->res != NULL)
1791 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1795 /* Clean the Legacy or Link interrupt last */
1796 if (adapter->mbxvec) /* we are doing MSIX */
1797 rid = adapter->mbxvec + 1;
1799 (adapter->msix != 0) ? (rid = 1):(rid = 0);
1801 if (adapter->tag != NULL) {
1802 bus_teardown_intr(dev, adapter->res, adapter->tag);
1803 adapter->tag = NULL;
1805 if (adapter->res != NULL)
1806 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
1810 pci_release_msi(dev);
1812 if (adapter->msix_mem != NULL)
1813 bus_release_resource(dev, SYS_RES_MEMORY,
1814 memrid, adapter->msix_mem);
1816 if (adapter->pci_mem != NULL)
1817 bus_release_resource(dev, SYS_RES_MEMORY,
1818 PCIR_BAR(0), adapter->pci_mem);
1823 /*********************************************************************
1825 * Setup networking device structure and register an interface.
1827 **********************************************************************/
1829 ixv_setup_interface(device_t dev, struct adapter *adapter)
1833 INIT_DEBUGOUT("ixv_setup_interface: begin");
1835 ifp = adapter->ifp = if_alloc(IFT_ETHER);
1837 panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
1838 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1839 ifp->if_mtu = ETHERMTU;
1840 ifp->if_baudrate = 1000000000;
1841 ifp->if_init = ixv_init;
1842 ifp->if_softc = adapter;
1843 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1844 ifp->if_ioctl = ixv_ioctl;
1845 #if __FreeBSD_version >= 800000
1846 ifp->if_transmit = ixv_mq_start;
1847 ifp->if_qflush = ixv_qflush;
1849 ifp->if_start = ixv_start;
1851 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
1853 ether_ifattach(ifp, adapter->hw.mac.addr);
1855 adapter->max_frame_size =
1856 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1859 * Tell the upper layer(s) we support long frames.
1861 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1863 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
1864 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1865 ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_LRO;
1867 ifp->if_capenable = ifp->if_capabilities;
1870 * Specify the media types supported by this adapter and register
1871 * callbacks to update media and link information
1873 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1875 ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL);
1876 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1877 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1883 ixv_config_link(struct adapter *adapter)
1885 struct ixgbe_hw *hw = &adapter->hw;
1886 u32 autoneg, err = 0;
1887 bool negotiate = TRUE;
1889 if (hw->mac.ops.check_link)
1890 err = hw->mac.ops.check_link(hw, &autoneg,
1891 &adapter->link_up, FALSE);
1895 if (hw->mac.ops.setup_link)
1896 err = hw->mac.ops.setup_link(hw, autoneg,
1897 negotiate, adapter->link_up);
1902 /********************************************************************
1903 * Manage DMA'able memory.
1904 *******************************************************************/
1906 ixv_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
1910 *(bus_addr_t *) arg = segs->ds_addr;
1915 ixv_dma_malloc(struct adapter *adapter, bus_size_t size,
1916 struct ixv_dma_alloc *dma, int mapflags)
1918 device_t dev = adapter->dev;
1921 r = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
1922 DBA_ALIGN, 0, /* alignment, bounds */
1923 BUS_SPACE_MAXADDR, /* lowaddr */
1924 BUS_SPACE_MAXADDR, /* highaddr */
1925 NULL, NULL, /* filter, filterarg */
1928 size, /* maxsegsize */
1929 BUS_DMA_ALLOCNOW, /* flags */
1930 NULL, /* lockfunc */
1931 NULL, /* lockfuncarg */
1934 device_printf(dev,"ixv_dma_malloc: bus_dma_tag_create failed; "
1938 r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
1939 BUS_DMA_NOWAIT, &dma->dma_map);
1941 device_printf(dev,"ixv_dma_malloc: bus_dmamem_alloc failed; "
1945 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1949 mapflags | BUS_DMA_NOWAIT);
1951 device_printf(dev,"ixv_dma_malloc: bus_dmamap_load failed; "
1955 dma->dma_size = size;
1958 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1960 bus_dma_tag_destroy(dma->dma_tag);
1962 dma->dma_map = NULL;
1963 dma->dma_tag = NULL;
1968 ixv_dma_free(struct adapter *adapter, struct ixv_dma_alloc *dma)
1970 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
1971 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1972 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1973 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1974 bus_dma_tag_destroy(dma->dma_tag);
1978 /*********************************************************************
1980 * Allocate memory for the transmit and receive rings, and then
1981 * the descriptors associated with each, called only once at attach.
1983 **********************************************************************/
1985 ixv_allocate_queues(struct adapter *adapter)
1987 device_t dev = adapter->dev;
1988 struct ix_queue *que;
1989 struct tx_ring *txr;
1990 struct rx_ring *rxr;
1991 int rsize, tsize, error = 0;
1992 int txconf = 0, rxconf = 0;
1994 /* First allocate the top level queue structs */
1995 if (!(adapter->queues =
1996 (struct ix_queue *) malloc(sizeof(struct ix_queue) *
1997 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1998 device_printf(dev, "Unable to allocate queue memory\n");
2003 /* First allocate the TX ring struct memory */
2004 if (!(adapter->tx_rings =
2005 (struct tx_ring *) malloc(sizeof(struct tx_ring) *
2006 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2007 device_printf(dev, "Unable to allocate TX ring memory\n");
2012 /* Next allocate the RX */
2013 if (!(adapter->rx_rings =
2014 (struct rx_ring *) malloc(sizeof(struct rx_ring) *
2015 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2016 device_printf(dev, "Unable to allocate RX ring memory\n");
2021 /* For the ring itself */
2022 tsize = roundup2(adapter->num_tx_desc *
2023 sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
2026 * Now set up the TX queues, txconf is needed to handle the
2027 * possibility that things fail midcourse and we need to
2028 * undo memory gracefully
2030 for (int i = 0; i < adapter->num_queues; i++, txconf++) {
2031 /* Set up some basics */
2032 txr = &adapter->tx_rings[i];
2033 txr->adapter = adapter;
2036 /* Initialize the TX side lock */
2037 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2038 device_get_nameunit(dev), txr->me);
2039 mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
2041 if (ixv_dma_malloc(adapter, tsize,
2042 &txr->txdma, BUS_DMA_NOWAIT)) {
2044 "Unable to allocate TX Descriptor memory\n");
2048 txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2049 bzero((void *)txr->tx_base, tsize);
2051 /* Now allocate transmit buffers for the ring */
2052 if (ixv_allocate_transmit_buffers(txr)) {
2054 "Critical Failure setting up transmit buffers\n");
2058 #if __FreeBSD_version >= 800000
2059 /* Allocate a buf ring */
2060 txr->br = buf_ring_alloc(IXV_BR_SIZE, M_DEVBUF,
2061 M_WAITOK, &txr->tx_mtx);
2062 if (txr->br == NULL) {
2064 "Critical Failure setting up buf ring\n");
2072 * Next the RX queues...
2074 rsize = roundup2(adapter->num_rx_desc *
2075 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2076 for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
2077 rxr = &adapter->rx_rings[i];
2078 /* Set up some basics */
2079 rxr->adapter = adapter;
2082 /* Initialize the RX side lock */
2083 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2084 device_get_nameunit(dev), rxr->me);
2085 mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
2087 if (ixv_dma_malloc(adapter, rsize,
2088 &rxr->rxdma, BUS_DMA_NOWAIT)) {
2090 "Unable to allocate RxDescriptor memory\n");
2094 rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2095 bzero((void *)rxr->rx_base, rsize);
2097 /* Allocate receive buffers for the ring*/
2098 if (ixv_allocate_receive_buffers(rxr)) {
2100 "Critical Failure setting up receive buffers\n");
2107 ** Finally set up the queue holding structs
2109 for (int i = 0; i < adapter->num_queues; i++) {
2110 que = &adapter->queues[i];
2111 que->adapter = adapter;
2112 que->txr = &adapter->tx_rings[i];
2113 que->rxr = &adapter->rx_rings[i];
2119 for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
2120 ixv_dma_free(adapter, &rxr->rxdma);
2122 for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
2123 ixv_dma_free(adapter, &txr->txdma);
2124 free(adapter->rx_rings, M_DEVBUF);
2126 free(adapter->tx_rings, M_DEVBUF);
2128 free(adapter->queues, M_DEVBUF);
2134 /*********************************************************************
2136 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2137 * the information needed to transmit a packet on the wire. This is
2138 * called only once at attach, setup is done every reset.
2140 **********************************************************************/
2142 ixv_allocate_transmit_buffers(struct tx_ring *txr)
2144 struct adapter *adapter = txr->adapter;
2145 device_t dev = adapter->dev;
2146 struct ixv_tx_buf *txbuf;
2150 * Setup DMA descriptor areas.
2152 if ((error = bus_dma_tag_create(NULL, /* parent */
2153 1, 0, /* alignment, bounds */
2154 BUS_SPACE_MAXADDR, /* lowaddr */
2155 BUS_SPACE_MAXADDR, /* highaddr */
2156 NULL, NULL, /* filter, filterarg */
2157 IXV_TSO_SIZE, /* maxsize */
2159 PAGE_SIZE, /* maxsegsize */
2161 NULL, /* lockfunc */
2162 NULL, /* lockfuncarg */
2164 device_printf(dev,"Unable to allocate TX DMA tag\n");
2168 if (!(txr->tx_buffers =
2169 (struct ixv_tx_buf *) malloc(sizeof(struct ixv_tx_buf) *
2170 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2171 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2176 /* Create the descriptor buffer dma maps */
2177 txbuf = txr->tx_buffers;
2178 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2179 error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
2181 device_printf(dev, "Unable to create TX DMA map\n");
2188 /* We free all, it handles case where we are in the middle */
2189 ixv_free_transmit_structures(adapter);
2193 /*********************************************************************
2195 * Initialize a transmit ring.
2197 **********************************************************************/
2199 ixv_setup_transmit_ring(struct tx_ring *txr)
2201 struct adapter *adapter = txr->adapter;
2202 struct ixv_tx_buf *txbuf;
2205 /* Clear the old ring contents */
2207 bzero((void *)txr->tx_base,
2208 (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
2210 txr->next_avail_desc = 0;
2211 txr->next_to_clean = 0;
2213 /* Free any existing tx buffers. */
2214 txbuf = txr->tx_buffers;
2215 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2216 if (txbuf->m_head != NULL) {
2217 bus_dmamap_sync(txr->txtag, txbuf->map,
2218 BUS_DMASYNC_POSTWRITE);
2219 bus_dmamap_unload(txr->txtag, txbuf->map);
2220 m_freem(txbuf->m_head);
2221 txbuf->m_head = NULL;
2223 /* Clear the EOP index */
2224 txbuf->eop_index = -1;
2227 /* Set number of descriptors available */
2228 txr->tx_avail = adapter->num_tx_desc;
2230 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2231 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2235 /*********************************************************************
2237 * Initialize all transmit rings.
2239 **********************************************************************/
2241 ixv_setup_transmit_structures(struct adapter *adapter)
2243 struct tx_ring *txr = adapter->tx_rings;
2245 for (int i = 0; i < adapter->num_queues; i++, txr++)
2246 ixv_setup_transmit_ring(txr);
2251 /*********************************************************************
2253 * Enable transmit unit.
2255 **********************************************************************/
2257 ixv_initialize_transmit_units(struct adapter *adapter)
2259 struct tx_ring *txr = adapter->tx_rings;
2260 struct ixgbe_hw *hw = &adapter->hw;
2263 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2264 u64 tdba = txr->txdma.dma_paddr;
2267 /* Set WTHRESH to 8, burst writeback */
2268 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
2269 txdctl |= (8 << 16);
2270 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
2272 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
2273 txdctl |= IXGBE_TXDCTL_ENABLE;
2274 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
2276 /* Set the HW Tx Head and Tail indices */
2277 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
2278 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
2280 /* Setup Transmit Descriptor Cmd Settings */
2281 txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
2282 txr->watchdog_check = FALSE;
2284 /* Set Ring parameters */
2285 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
2286 (tdba & 0x00000000ffffffffULL));
2287 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
2288 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
2289 adapter->num_tx_desc *
2290 sizeof(struct ixgbe_legacy_tx_desc));
2291 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
2292 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
2293 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
2300 /*********************************************************************
2302 * Free all transmit rings.
2304 **********************************************************************/
2306 ixv_free_transmit_structures(struct adapter *adapter)
2308 struct tx_ring *txr = adapter->tx_rings;
2310 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2312 ixv_free_transmit_buffers(txr);
2313 ixv_dma_free(adapter, &txr->txdma);
2315 IXV_TX_LOCK_DESTROY(txr);
2317 free(adapter->tx_rings, M_DEVBUF);
2320 /*********************************************************************
2322 * Free transmit ring related data structures.
2324 **********************************************************************/
2326 ixv_free_transmit_buffers(struct tx_ring *txr)
2328 struct adapter *adapter = txr->adapter;
2329 struct ixv_tx_buf *tx_buffer;
2332 INIT_DEBUGOUT("free_transmit_ring: begin");
2334 if (txr->tx_buffers == NULL)
2337 tx_buffer = txr->tx_buffers;
2338 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2339 if (tx_buffer->m_head != NULL) {
2340 bus_dmamap_sync(txr->txtag, tx_buffer->map,
2341 BUS_DMASYNC_POSTWRITE);
2342 bus_dmamap_unload(txr->txtag,
2344 m_freem(tx_buffer->m_head);
2345 tx_buffer->m_head = NULL;
2346 if (tx_buffer->map != NULL) {
2347 bus_dmamap_destroy(txr->txtag,
2349 tx_buffer->map = NULL;
2351 } else if (tx_buffer->map != NULL) {
2352 bus_dmamap_unload(txr->txtag,
2354 bus_dmamap_destroy(txr->txtag,
2356 tx_buffer->map = NULL;
2359 #if __FreeBSD_version >= 800000
2360 if (txr->br != NULL)
2361 buf_ring_free(txr->br, M_DEVBUF);
2363 if (txr->tx_buffers != NULL) {
2364 free(txr->tx_buffers, M_DEVBUF);
2365 txr->tx_buffers = NULL;
2367 if (txr->txtag != NULL) {
2368 bus_dma_tag_destroy(txr->txtag);
2374 /*********************************************************************
2376 * Advanced Context Descriptor setup for VLAN or CSUM
2378 **********************************************************************/
2381 ixv_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
2383 struct adapter *adapter = txr->adapter;
2384 struct ixgbe_adv_tx_context_desc *TXD;
2385 struct ixv_tx_buf *tx_buffer;
2386 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2387 struct ether_vlan_header *eh;
2389 struct ip6_hdr *ip6;
2390 int ehdrlen, ip_hlen = 0;
2393 bool offload = TRUE;
2394 int ctxd = txr->next_avail_desc;
2398 if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
2402 tx_buffer = &txr->tx_buffers[ctxd];
2403 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2406 ** In advanced descriptors the vlan tag must
2407 ** be placed into the descriptor itself.
2409 if (mp->m_flags & M_VLANTAG) {
2410 vtag = htole16(mp->m_pkthdr.ether_vtag);
2411 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2412 } else if (offload == FALSE)
2416 * Determine where frame payload starts.
2417 * Jump over vlan headers if already present,
2418 * helpful for QinQ too.
2420 eh = mtod(mp, struct ether_vlan_header *);
2421 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2422 etype = ntohs(eh->evl_proto);
2423 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2425 etype = ntohs(eh->evl_encap_proto);
2426 ehdrlen = ETHER_HDR_LEN;
2429 /* Set the ether header length */
2430 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2434 ip = (struct ip *)(mp->m_data + ehdrlen);
2435 ip_hlen = ip->ip_hl << 2;
2436 if (mp->m_len < ehdrlen + ip_hlen)
2439 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2441 case ETHERTYPE_IPV6:
2442 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2443 ip_hlen = sizeof(struct ip6_hdr);
2444 if (mp->m_len < ehdrlen + ip_hlen)
2446 ipproto = ip6->ip6_nxt;
2447 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
2454 vlan_macip_lens |= ip_hlen;
2455 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2459 if (mp->m_pkthdr.csum_flags & CSUM_TCP)
2460 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2464 if (mp->m_pkthdr.csum_flags & CSUM_UDP)
2465 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
2468 #if __FreeBSD_version >= 800000
2470 if (mp->m_pkthdr.csum_flags & CSUM_SCTP)
2471 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
2479 /* Now copy bits into descriptor */
2480 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2481 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2482 TXD->seqnum_seed = htole32(0);
2483 TXD->mss_l4len_idx = htole32(0);
2485 tx_buffer->m_head = NULL;
2486 tx_buffer->eop_index = -1;
2488 /* We've consumed the first desc, adjust counters */
2489 if (++ctxd == adapter->num_tx_desc)
2491 txr->next_avail_desc = ctxd;
2497 /**********************************************************************
2499 * Setup work for hardware segmentation offload (TSO) on
2500 * adapters using advanced tx descriptors
2502 **********************************************************************/
2504 ixv_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
2506 struct adapter *adapter = txr->adapter;
2507 struct ixgbe_adv_tx_context_desc *TXD;
2508 struct ixv_tx_buf *tx_buffer;
2509 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2510 u32 mss_l4len_idx = 0;
2512 int ctxd, ehdrlen, hdrlen, ip_hlen, tcp_hlen;
2513 struct ether_vlan_header *eh;
2519 * Determine where frame payload starts.
2520 * Jump over vlan headers if already present
2522 eh = mtod(mp, struct ether_vlan_header *);
2523 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
2524 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2526 ehdrlen = ETHER_HDR_LEN;
2528 /* Ensure we have at least the IP+TCP header in the first mbuf. */
2529 if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
2532 ctxd = txr->next_avail_desc;
2533 tx_buffer = &txr->tx_buffers[ctxd];
2534 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2536 ip = (struct ip *)(mp->m_data + ehdrlen);
2537 if (ip->ip_p != IPPROTO_TCP)
2538 return FALSE; /* 0 */
2540 ip_hlen = ip->ip_hl << 2;
2541 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
2542 th->th_sum = in_pseudo(ip->ip_src.s_addr,
2543 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2544 tcp_hlen = th->th_off << 2;
2545 hdrlen = ehdrlen + ip_hlen + tcp_hlen;
2547 /* This is used in the transmit desc in encap */
2548 *paylen = mp->m_pkthdr.len - hdrlen;
2550 /* VLAN MACLEN IPLEN */
2551 if (mp->m_flags & M_VLANTAG) {
2552 vtag = htole16(mp->m_pkthdr.ether_vtag);
2553 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2556 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2557 vlan_macip_lens |= ip_hlen;
2558 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2560 /* ADV DTYPE TUCMD */
2561 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2562 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2563 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2564 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2568 mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
2569 mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
2570 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
2572 TXD->seqnum_seed = htole32(0);
2573 tx_buffer->m_head = NULL;
2574 tx_buffer->eop_index = -1;
2576 if (++ctxd == adapter->num_tx_desc)
2580 txr->next_avail_desc = ctxd;
2585 /**********************************************************************
2587 * Examine each tx_buffer in the used queue. If the hardware is done
2588 * processing the packet then free associated resources. The
2589 * tx_buffer is put back on the free queue.
2591 **********************************************************************/
2593 ixv_txeof(struct tx_ring *txr)
2595 struct adapter *adapter = txr->adapter;
2596 struct ifnet *ifp = adapter->ifp;
2597 u32 first, last, done;
2598 struct ixv_tx_buf *tx_buffer;
2599 struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
2601 mtx_assert(&txr->tx_mtx, MA_OWNED);
2603 if (txr->tx_avail == adapter->num_tx_desc)
2606 first = txr->next_to_clean;
2607 tx_buffer = &txr->tx_buffers[first];
2608 /* For cleanup we just use legacy struct */
2609 tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2610 last = tx_buffer->eop_index;
2613 eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2616 ** Get the index of the first descriptor
2617 ** BEYOND the EOP and call that 'done'.
2618 ** I do this so the comparison in the
2619 ** inner while loop below can be simple
2621 if (++last == adapter->num_tx_desc) last = 0;
2624 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2625 BUS_DMASYNC_POSTREAD);
2627 ** Only the EOP descriptor of a packet now has the DD
2628 ** bit set, this is what we look for...
2630 while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
2631 /* We clean the range of the packet */
2632 while (first != done) {
2633 tx_desc->upper.data = 0;
2634 tx_desc->lower.data = 0;
2635 tx_desc->buffer_addr = 0;
2638 if (tx_buffer->m_head) {
2639 bus_dmamap_sync(txr->txtag,
2641 BUS_DMASYNC_POSTWRITE);
2642 bus_dmamap_unload(txr->txtag,
2644 m_freem(tx_buffer->m_head);
2645 tx_buffer->m_head = NULL;
2646 tx_buffer->map = NULL;
2648 tx_buffer->eop_index = -1;
2649 txr->watchdog_time = ticks;
2651 if (++first == adapter->num_tx_desc)
2654 tx_buffer = &txr->tx_buffers[first];
2656 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2659 /* See if there is more work now */
2660 last = tx_buffer->eop_index;
2663 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2664 /* Get next done point */
2665 if (++last == adapter->num_tx_desc) last = 0;
2670 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2671 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2673 txr->next_to_clean = first;
2676 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
2677 * it is OK to send packets. If there are no pending descriptors,
2678 * clear the timeout. Otherwise, if some descriptors have been freed,
2679 * restart the timeout.
2681 if (txr->tx_avail > IXV_TX_CLEANUP_THRESHOLD) {
2682 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2683 if (txr->tx_avail == adapter->num_tx_desc) {
2684 txr->watchdog_check = FALSE;
2692 /*********************************************************************
2694 * Refresh mbuf buffers for RX descriptor rings
2695 * - now keeps its own state so discards due to resource
2696 * exhaustion are unnecessary, if an mbuf cannot be obtained
2697 * it just returns, keeping its placeholder, thus it can simply
2698 * be recalled to try again.
2700 **********************************************************************/
2702 ixv_refresh_mbufs(struct rx_ring *rxr, int limit)
2704 struct adapter *adapter = rxr->adapter;
2705 bus_dma_segment_t hseg[1];
2706 bus_dma_segment_t pseg[1];
2707 struct ixv_rx_buf *rxbuf;
2708 struct mbuf *mh, *mp;
2709 int i, j, nsegs, error;
2710 bool refreshed = FALSE;
2712 i = j = rxr->next_to_refresh;
2713 /* Get the control variable, one beyond refresh point */
2714 if (++j == adapter->num_rx_desc)
2716 while (j != limit) {
2717 rxbuf = &rxr->rx_buffers[i];
2718 if ((rxbuf->m_head == NULL) && (rxr->hdr_split)) {
2719 mh = m_gethdr(M_DONTWAIT, MT_DATA);
2722 mh->m_pkthdr.len = mh->m_len = MHLEN;
2724 mh->m_flags |= M_PKTHDR;
2725 m_adj(mh, ETHER_ALIGN);
2726 /* Get the memory mapping */
2727 error = bus_dmamap_load_mbuf_sg(rxr->htag,
2728 rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
2730 printf("GET BUF: dmamap load"
2731 " failure - %d\n", error);
2736 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
2737 BUS_DMASYNC_PREREAD);
2738 rxr->rx_base[i].read.hdr_addr =
2739 htole64(hseg[0].ds_addr);
2742 if (rxbuf->m_pack == NULL) {
2743 mp = m_getjcl(M_DONTWAIT, MT_DATA,
2744 M_PKTHDR, adapter->rx_mbuf_sz);
2750 mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
2751 /* Get the memory mapping */
2752 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
2753 rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
2755 printf("GET BUF: dmamap load"
2756 " failure - %d\n", error);
2758 rxbuf->m_pack = NULL;
2762 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
2763 BUS_DMASYNC_PREREAD);
2764 rxr->rx_base[i].read.pkt_addr =
2765 htole64(pseg[0].ds_addr);
2768 rxr->next_to_refresh = i = j;
2769 /* Calculate next index */
2770 if (++j == adapter->num_rx_desc)
2774 if (refreshed) /* update tail index */
2775 IXGBE_WRITE_REG(&adapter->hw,
2776 IXGBE_VFRDT(rxr->me), rxr->next_to_refresh);
2780 /*********************************************************************
2782 * Allocate memory for rx_buffer structures. Since we use one
2783 * rx_buffer per received packet, the maximum number of rx_buffer's
2784 * that we'll need is equal to the number of receive descriptors
2785 * that we've allocated.
2787 **********************************************************************/
2789 ixv_allocate_receive_buffers(struct rx_ring *rxr)
2791 struct adapter *adapter = rxr->adapter;
2792 device_t dev = adapter->dev;
2793 struct ixv_rx_buf *rxbuf;
2794 int i, bsize, error;
2796 bsize = sizeof(struct ixv_rx_buf) * adapter->num_rx_desc;
2797 if (!(rxr->rx_buffers =
2798 (struct ixv_rx_buf *) malloc(bsize,
2799 M_DEVBUF, M_NOWAIT | M_ZERO))) {
2800 device_printf(dev, "Unable to allocate rx_buffer memory\n");
2805 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2806 1, 0, /* alignment, bounds */
2807 BUS_SPACE_MAXADDR, /* lowaddr */
2808 BUS_SPACE_MAXADDR, /* highaddr */
2809 NULL, NULL, /* filter, filterarg */
2810 MSIZE, /* maxsize */
2812 MSIZE, /* maxsegsize */
2814 NULL, /* lockfunc */
2815 NULL, /* lockfuncarg */
2817 device_printf(dev, "Unable to create RX DMA tag\n");
2821 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2822 1, 0, /* alignment, bounds */
2823 BUS_SPACE_MAXADDR, /* lowaddr */
2824 BUS_SPACE_MAXADDR, /* highaddr */
2825 NULL, NULL, /* filter, filterarg */
2826 MJUMPAGESIZE, /* maxsize */
2828 MJUMPAGESIZE, /* maxsegsize */
2830 NULL, /* lockfunc */
2831 NULL, /* lockfuncarg */
2833 device_printf(dev, "Unable to create RX DMA tag\n");
2837 for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
2838 rxbuf = &rxr->rx_buffers[i];
2839 error = bus_dmamap_create(rxr->htag,
2840 BUS_DMA_NOWAIT, &rxbuf->hmap);
2842 device_printf(dev, "Unable to create RX head map\n");
2845 error = bus_dmamap_create(rxr->ptag,
2846 BUS_DMA_NOWAIT, &rxbuf->pmap);
2848 device_printf(dev, "Unable to create RX pkt map\n");
2856 /* Frees all, but can handle partial completion */
2857 ixv_free_receive_structures(adapter);
2862 ixv_free_receive_ring(struct rx_ring *rxr)
2864 struct adapter *adapter;
2865 struct ixv_rx_buf *rxbuf;
2868 adapter = rxr->adapter;
2869 for (i = 0; i < adapter->num_rx_desc; i++) {
2870 rxbuf = &rxr->rx_buffers[i];
2871 if (rxbuf->m_head != NULL) {
2872 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
2873 BUS_DMASYNC_POSTREAD);
2874 bus_dmamap_unload(rxr->htag, rxbuf->hmap);
2875 rxbuf->m_head->m_flags |= M_PKTHDR;
2876 m_freem(rxbuf->m_head);
2878 if (rxbuf->m_pack != NULL) {
2879 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
2880 BUS_DMASYNC_POSTREAD);
2881 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
2882 rxbuf->m_pack->m_flags |= M_PKTHDR;
2883 m_freem(rxbuf->m_pack);
2885 rxbuf->m_head = NULL;
2886 rxbuf->m_pack = NULL;
2891 /*********************************************************************
2893 * Initialize a receive ring and its buffers.
2895 **********************************************************************/
2897 ixv_setup_receive_ring(struct rx_ring *rxr)
2899 struct adapter *adapter;
2902 struct ixv_rx_buf *rxbuf;
2903 bus_dma_segment_t pseg[1], hseg[1];
2904 struct lro_ctrl *lro = &rxr->lro;
2905 int rsize, nsegs, error = 0;
2907 adapter = rxr->adapter;
2911 /* Clear the ring contents */
2913 rsize = roundup2(adapter->num_rx_desc *
2914 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2915 bzero((void *)rxr->rx_base, rsize);
2917 /* Free current RX buffer structs and their mbufs */
2918 ixv_free_receive_ring(rxr);
2920 /* Configure header split? */
2921 if (ixv_header_split)
2922 rxr->hdr_split = TRUE;
2924 /* Now replenish the mbufs */
2925 for (int j = 0; j != adapter->num_rx_desc; ++j) {
2926 struct mbuf *mh, *mp;
2928 rxbuf = &rxr->rx_buffers[j];
2930 ** Dont allocate mbufs if not
2931 ** doing header split, its wasteful
2933 if (rxr->hdr_split == FALSE)
2936 /* First the header */
2937 rxbuf->m_head = m_gethdr(M_NOWAIT, MT_DATA);
2938 if (rxbuf->m_head == NULL) {
2942 m_adj(rxbuf->m_head, ETHER_ALIGN);
2944 mh->m_len = mh->m_pkthdr.len = MHLEN;
2945 mh->m_flags |= M_PKTHDR;
2946 /* Get the memory mapping */
2947 error = bus_dmamap_load_mbuf_sg(rxr->htag,
2948 rxbuf->hmap, rxbuf->m_head, hseg,
2949 &nsegs, BUS_DMA_NOWAIT);
2950 if (error != 0) /* Nothing elegant to do here */
2952 bus_dmamap_sync(rxr->htag,
2953 rxbuf->hmap, BUS_DMASYNC_PREREAD);
2954 /* Update descriptor */
2955 rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
2958 /* Now the payload cluster */
2959 rxbuf->m_pack = m_getjcl(M_NOWAIT, MT_DATA,
2960 M_PKTHDR, adapter->rx_mbuf_sz);
2961 if (rxbuf->m_pack == NULL) {
2966 mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
2967 /* Get the memory mapping */
2968 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
2969 rxbuf->pmap, mp, pseg,
2970 &nsegs, BUS_DMA_NOWAIT);
2973 bus_dmamap_sync(rxr->ptag,
2974 rxbuf->pmap, BUS_DMASYNC_PREREAD);
2975 /* Update descriptor */
2976 rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
2980 /* Setup our descriptor indices */
2981 rxr->next_to_check = 0;
2982 rxr->next_to_refresh = 0;
2983 rxr->lro_enabled = FALSE;
2984 rxr->rx_split_packets = 0;
2986 rxr->discard = FALSE;
2988 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2989 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2992 ** Now set up the LRO interface:
2994 if (ifp->if_capenable & IFCAP_LRO) {
2995 int err = tcp_lro_init(lro);
2997 device_printf(dev, "LRO Initialization failed!\n");
3000 INIT_DEBUGOUT("RX Soft LRO Initialized\n");
3001 rxr->lro_enabled = TRUE;
3002 lro->ifp = adapter->ifp;
3009 ixv_free_receive_ring(rxr);
3014 /*********************************************************************
3016 * Initialize all receive rings.
3018 **********************************************************************/
3020 ixv_setup_receive_structures(struct adapter *adapter)
3022 struct rx_ring *rxr = adapter->rx_rings;
3025 for (j = 0; j < adapter->num_queues; j++, rxr++)
3026 if (ixv_setup_receive_ring(rxr))
3032 * Free RX buffers allocated so far, we will only handle
3033 * the rings that completed, the failing case will have
3034 * cleaned up for itself. 'j' failed, so its the terminus.
3036 for (int i = 0; i < j; ++i) {
3037 rxr = &adapter->rx_rings[i];
3038 ixv_free_receive_ring(rxr);
3044 /*********************************************************************
3046 * Setup receive registers and features.
3048 **********************************************************************/
3049 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3052 ixv_initialize_receive_units(struct adapter *adapter)
3054 struct rx_ring *rxr = adapter->rx_rings;
3055 struct ixgbe_hw *hw = &adapter->hw;
3056 struct ifnet *ifp = adapter->ifp;
3057 u32 bufsz, fctrl, rxcsum, hlreg;
3060 /* Enable broadcasts */
3061 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3062 fctrl |= IXGBE_FCTRL_BAM;
3063 fctrl |= IXGBE_FCTRL_DPF;
3064 fctrl |= IXGBE_FCTRL_PMCF;
3065 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3067 /* Set for Jumbo Frames? */
3068 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3069 if (ifp->if_mtu > ETHERMTU) {
3070 hlreg |= IXGBE_HLREG0_JUMBOEN;
3071 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3073 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3074 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3076 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
3078 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3079 u64 rdba = rxr->rxdma.dma_paddr;
3082 /* Do the queue enabling first */
3083 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
3084 rxdctl |= IXGBE_RXDCTL_ENABLE;
3085 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
3086 for (int k = 0; k < 10; k++) {
3087 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
3088 IXGBE_RXDCTL_ENABLE)
3095 /* Setup the Base and Length of the Rx Descriptor Ring */
3096 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
3097 (rdba & 0x00000000ffffffffULL));
3098 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
3100 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
3101 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3103 /* Set up the SRRCTL register */
3104 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
3105 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
3106 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
3108 if (rxr->hdr_split) {
3109 /* Use a standard mbuf for the header */
3110 reg |= ((IXV_RX_HDR <<
3111 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
3112 & IXGBE_SRRCTL_BSIZEHDR_MASK);
3113 reg |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
3115 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3116 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
3118 /* Setup the HW Rx Head and Tail Descriptor Pointers */
3119 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
3120 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
3121 adapter->num_rx_desc - 1);
3124 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3126 if (ifp->if_capenable & IFCAP_RXCSUM)
3127 rxcsum |= IXGBE_RXCSUM_PCSD;
3129 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
3130 rxcsum |= IXGBE_RXCSUM_IPPCSE;
3132 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3137 /*********************************************************************
3139 * Free all receive rings.
3141 **********************************************************************/
3143 ixv_free_receive_structures(struct adapter *adapter)
3145 struct rx_ring *rxr = adapter->rx_rings;
3147 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3148 struct lro_ctrl *lro = &rxr->lro;
3149 ixv_free_receive_buffers(rxr);
3150 /* Free LRO memory */
3152 /* Free the ring memory as well */
3153 ixv_dma_free(adapter, &rxr->rxdma);
3156 free(adapter->rx_rings, M_DEVBUF);
3160 /*********************************************************************
3162 * Free receive ring data structures
3164 **********************************************************************/
3166 ixv_free_receive_buffers(struct rx_ring *rxr)
3168 struct adapter *adapter = rxr->adapter;
3169 struct ixv_rx_buf *rxbuf;
3171 INIT_DEBUGOUT("free_receive_structures: begin");
3173 /* Cleanup any existing buffers */
3174 if (rxr->rx_buffers != NULL) {
3175 for (int i = 0; i < adapter->num_rx_desc; i++) {
3176 rxbuf = &rxr->rx_buffers[i];
3177 if (rxbuf->m_head != NULL) {
3178 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
3179 BUS_DMASYNC_POSTREAD);
3180 bus_dmamap_unload(rxr->htag, rxbuf->hmap);
3181 rxbuf->m_head->m_flags |= M_PKTHDR;
3182 m_freem(rxbuf->m_head);
3184 if (rxbuf->m_pack != NULL) {
3185 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
3186 BUS_DMASYNC_POSTREAD);
3187 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
3188 rxbuf->m_pack->m_flags |= M_PKTHDR;
3189 m_freem(rxbuf->m_pack);
3191 rxbuf->m_head = NULL;
3192 rxbuf->m_pack = NULL;
3193 if (rxbuf->hmap != NULL) {
3194 bus_dmamap_destroy(rxr->htag, rxbuf->hmap);
3197 if (rxbuf->pmap != NULL) {
3198 bus_dmamap_destroy(rxr->ptag, rxbuf->pmap);
3202 if (rxr->rx_buffers != NULL) {
3203 free(rxr->rx_buffers, M_DEVBUF);
3204 rxr->rx_buffers = NULL;
3208 if (rxr->htag != NULL) {
3209 bus_dma_tag_destroy(rxr->htag);
3212 if (rxr->ptag != NULL) {
3213 bus_dma_tag_destroy(rxr->ptag);
3220 static __inline void
3221 ixv_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
3225 * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
3226 * should be computed by hardware. Also it should not have VLAN tag in
3229 if (rxr->lro_enabled &&
3230 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
3231 (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
3232 (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
3233 (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) &&
3234 (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
3235 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
3237 * Send to the stack if:
3238 ** - LRO not enabled, or
3239 ** - no LRO resources, or
3240 ** - lro enqueue fails
3242 if (rxr->lro.lro_cnt != 0)
3243 if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
3247 (*ifp->if_input)(ifp, m);
3251 static __inline void
3252 ixv_rx_discard(struct rx_ring *rxr, int i)
3254 struct ixv_rx_buf *rbuf;
3256 rbuf = &rxr->rx_buffers[i];
3258 if (rbuf->fmp != NULL) {/* Partial chain ? */
3259 rbuf->fmp->m_flags |= M_PKTHDR;
3265 ** With advanced descriptors the writeback
3266 ** clobbers the buffer addrs, so its easier
3267 ** to just free the existing mbufs and take
3268 ** the normal refresh path to get new buffers
3272 m_free(rbuf->m_head);
3273 rbuf->m_head = NULL;
3277 m_free(rbuf->m_pack);
3278 rbuf->m_pack = NULL;
3285 /*********************************************************************
3287 * This routine executes in interrupt context. It replenishes
3288 * the mbufs in the descriptor and sends data which has been
3289 * dma'ed into host memory to upper layer.
3291 * We loop at most count times if count is > 0, or until done if
3294 * Return TRUE for more work, FALSE for all clean.
3295 *********************************************************************/
3297 ixv_rxeof(struct ix_queue *que, int count)
3299 struct adapter *adapter = que->adapter;
3300 struct rx_ring *rxr = que->rxr;
3301 struct ifnet *ifp = adapter->ifp;
3302 struct lro_ctrl *lro = &rxr->lro;
3303 struct lro_entry *queued;
3304 int i, nextp, processed = 0;
3306 union ixgbe_adv_rx_desc *cur;
3307 struct ixv_rx_buf *rbuf, *nbuf;
3311 for (i = rxr->next_to_check; count != 0;) {
3312 struct mbuf *sendmp, *mh, *mp;
3314 u16 hlen, plen, hdr, vtag;
3317 /* Sync the ring. */
3318 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3319 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3321 cur = &rxr->rx_base[i];
3322 staterr = le32toh(cur->wb.upper.status_error);
3324 if ((staterr & IXGBE_RXD_STAT_DD) == 0)
3326 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
3333 cur->wb.upper.status_error = 0;
3334 rbuf = &rxr->rx_buffers[i];
3338 plen = le16toh(cur->wb.upper.length);
3339 ptype = le32toh(cur->wb.lower.lo_dword.data) &
3340 IXGBE_RXDADV_PKTTYPE_MASK;
3341 hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
3342 vtag = le16toh(cur->wb.upper.vlan);
3343 eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
3345 /* Make sure all parts of a bad packet are discarded */
3346 if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
3349 rxr->rx_discarded++;
3351 rxr->discard = TRUE;
3353 rxr->discard = FALSE;
3354 ixv_rx_discard(rxr, i);
3360 if (nextp == adapter->num_rx_desc)
3362 nbuf = &rxr->rx_buffers[nextp];
3366 ** The header mbuf is ONLY used when header
3367 ** split is enabled, otherwise we get normal
3368 ** behavior, ie, both header and payload
3369 ** are DMA'd into the payload buffer.
3371 ** Rather than using the fmp/lmp global pointers
3372 ** we now keep the head of a packet chain in the
3373 ** buffer struct and pass this along from one
3374 ** descriptor to the next, until we get EOP.
3376 if (rxr->hdr_split && (rbuf->fmp == NULL)) {
3377 /* This must be an initial descriptor */
3378 hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
3379 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
3380 if (hlen > IXV_RX_HDR)
3383 mh->m_flags |= M_PKTHDR;
3385 mh->m_pkthdr.len = mh->m_len;
3386 /* Null buf pointer so it is refreshed */
3387 rbuf->m_head = NULL;
3389 ** Check the payload length, this
3390 ** could be zero if its a small
3396 mp->m_flags &= ~M_PKTHDR;
3398 mh->m_pkthdr.len += mp->m_len;
3399 /* Null buf pointer so it is refreshed */
3400 rbuf->m_pack = NULL;
3401 rxr->rx_split_packets++;
3404 ** Now create the forward
3405 ** chain so when complete
3409 /* stash the chain head */
3411 /* Make forward chain */
3413 mp->m_next = nbuf->m_pack;
3415 mh->m_next = nbuf->m_pack;
3417 /* Singlet, prepare to send */
3419 if ((adapter->num_vlans) &&
3420 (staterr & IXGBE_RXD_STAT_VP)) {
3421 sendmp->m_pkthdr.ether_vtag = vtag;
3422 sendmp->m_flags |= M_VLANTAG;
3427 ** Either no header split, or a
3428 ** secondary piece of a fragmented
3433 ** See if there is a stored head
3434 ** that determines what we are
3437 rbuf->m_pack = rbuf->fmp = NULL;
3439 if (sendmp != NULL) /* secondary frag */
3440 sendmp->m_pkthdr.len += mp->m_len;
3442 /* first desc of a non-ps chain */
3444 sendmp->m_flags |= M_PKTHDR;
3445 sendmp->m_pkthdr.len = mp->m_len;
3446 if (staterr & IXGBE_RXD_STAT_VP) {
3447 sendmp->m_pkthdr.ether_vtag = vtag;
3448 sendmp->m_flags |= M_VLANTAG;
3451 /* Pass the head pointer on */
3455 mp->m_next = nbuf->m_pack;
3459 /* Sending this frame? */
3461 sendmp->m_pkthdr.rcvif = ifp;
3464 /* capture data for AIM */
3465 rxr->bytes += sendmp->m_pkthdr.len;
3466 rxr->rx_bytes += sendmp->m_pkthdr.len;
3467 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
3468 ixv_rx_checksum(staterr, sendmp, ptype);
3469 #if __FreeBSD_version >= 800000
3470 sendmp->m_pkthdr.flowid = que->msix;
3471 sendmp->m_flags |= M_FLOWID;
3475 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3476 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3478 /* Advance our pointers to the next descriptor. */
3479 if (++i == adapter->num_rx_desc)
3482 /* Now send to the stack or do LRO */
3484 ixv_rx_input(rxr, ifp, sendmp, ptype);
3486 /* Every 8 descriptors we go to refresh mbufs */
3487 if (processed == 8) {
3488 ixv_refresh_mbufs(rxr, i);
3493 /* Refresh any remaining buf structs */
3494 if (ixv_rx_unrefreshed(rxr))
3495 ixv_refresh_mbufs(rxr, i);
3497 rxr->next_to_check = i;
3500 * Flush any outstanding LRO work
3502 while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
3503 SLIST_REMOVE_HEAD(&lro->lro_active, next);
3504 tcp_lro_flush(lro, queued);
3510 ** We still have cleaning to do?
3511 ** Schedule another interrupt if so.
3513 if ((staterr & IXGBE_RXD_STAT_DD) != 0) {
3514 ixv_rearm_queues(adapter, (u64)(1 << que->msix));
3522 /*********************************************************************
3524 * Verify that the hardware indicated that the checksum is valid.
3525 * Inform the stack about the status of checksum so that stack
3526 * doesn't spend time verifying the checksum.
3528 *********************************************************************/
3530 ixv_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype)
3532 u16 status = (u16) staterr;
3533 u8 errors = (u8) (staterr >> 24);
3536 if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
3537 (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
3540 if (status & IXGBE_RXD_STAT_IPCS) {
3541 if (!(errors & IXGBE_RXD_ERR_IPE)) {
3542 /* IP Checksum Good */
3543 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3544 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3547 mp->m_pkthdr.csum_flags = 0;
3549 if (status & IXGBE_RXD_STAT_L4CS) {
3550 u16 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3551 #if __FreeBSD_version >= 800000
3553 type = CSUM_SCTP_VALID;
3555 if (!(errors & IXGBE_RXD_ERR_TCPE)) {
3556 mp->m_pkthdr.csum_flags |= type;
3558 mp->m_pkthdr.csum_data = htons(0xffff);
3565 ixv_setup_vlan_support(struct adapter *adapter)
3567 struct ixgbe_hw *hw = &adapter->hw;
3568 u32 ctrl, vid, vfta, retry;
3572 ** We get here thru init_locked, meaning
3573 ** a soft reset, this has already cleared
3574 ** the VFTA and other state, so if there
3575 ** have been no vlan's registered do nothing.
3577 if (adapter->num_vlans == 0)
3580 /* Enable the queues */
3581 for (int i = 0; i < adapter->num_queues; i++) {
3582 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
3583 ctrl |= IXGBE_RXDCTL_VME;
3584 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
3588 ** A soft reset zero's out the VFTA, so
3589 ** we need to repopulate it now.
3591 for (int i = 0; i < VFTA_SIZE; i++) {
3592 if (ixv_shadow_vfta[i] == 0)
3594 vfta = ixv_shadow_vfta[i];
3596 ** Reconstruct the vlan id's
3597 ** based on the bits set in each
3598 ** of the array ints.
3600 for ( int j = 0; j < 32; j++) {
3602 if ((vfta & (1 << j)) == 0)
3605 /* Call the shared code mailbox routine */
3606 while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
3615 ** This routine is run via an vlan config EVENT,
3616 ** it enables us to use the HW Filter table since
3617 ** we can get the vlan id. This just creates the
3618 ** entry in the soft version of the VFTA, init will
3619 ** repopulate the real table.
3622 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3624 struct adapter *adapter = ifp->if_softc;
3627 if (ifp->if_softc != arg) /* Not our event */
3630 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3633 IXV_CORE_LOCK(adapter);
3634 index = (vtag >> 5) & 0x7F;
3636 ixv_shadow_vfta[index] |= (1 << bit);
3637 ++adapter->num_vlans;
3638 /* Re-init to load the changes */
3639 ixv_init_locked(adapter);
3640 IXV_CORE_UNLOCK(adapter);
3644 ** This routine is run via an vlan
3645 ** unconfig EVENT, remove our entry
3646 ** in the soft vfta.
3649 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3651 struct adapter *adapter = ifp->if_softc;
3654 if (ifp->if_softc != arg)
3657 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3660 IXV_CORE_LOCK(adapter);
3661 index = (vtag >> 5) & 0x7F;
3663 ixv_shadow_vfta[index] &= ~(1 << bit);
3664 --adapter->num_vlans;
3665 /* Re-init to load the changes */
3666 ixv_init_locked(adapter);
3667 IXV_CORE_UNLOCK(adapter);
3671 ixv_enable_intr(struct adapter *adapter)
3673 struct ixgbe_hw *hw = &adapter->hw;
3674 struct ix_queue *que = adapter->queues;
3675 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3678 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
3680 mask = IXGBE_EIMS_ENABLE_MASK;
3681 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
3682 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
3684 for (int i = 0; i < adapter->num_queues; i++, que++)
3685 ixv_enable_queue(adapter, que->msix);
3687 IXGBE_WRITE_FLUSH(hw);
3693 ixv_disable_intr(struct adapter *adapter)
3695 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
3696 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
3697 IXGBE_WRITE_FLUSH(&adapter->hw);
3702 ** Setup the correct IVAR register for a particular MSIX interrupt
3703 ** - entry is the register array entry
3704 ** - vector is the MSIX vector for this queue
3705 ** - type is RX/TX/MISC
3708 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3710 struct ixgbe_hw *hw = &adapter->hw;
3713 vector |= IXGBE_IVAR_ALLOC_VAL;
3715 if (type == -1) { /* MISC IVAR */
3716 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
3719 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
3720 } else { /* RX/TX IVARS */
3721 index = (16 * (entry & 1)) + (8 * type);
3722 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
3723 ivar &= ~(0xFF << index);
3724 ivar |= (vector << index);
3725 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
3730 ixv_configure_ivars(struct adapter *adapter)
3732 struct ix_queue *que = adapter->queues;
3734 for (int i = 0; i < adapter->num_queues; i++, que++) {
3735 /* First the RX queue entry */
3736 ixv_set_ivar(adapter, i, que->msix, 0);
3737 /* ... and the TX */
3738 ixv_set_ivar(adapter, i, que->msix, 1);
3739 /* Set an initial value in EITR */
3740 IXGBE_WRITE_REG(&adapter->hw,
3741 IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
3744 /* For the Link interrupt */
3745 ixv_set_ivar(adapter, 1, adapter->mbxvec, -1);
3750 ** Tasklet handler for MSIX MBX interrupts
3751 ** - do outside interrupt since it might sleep
3754 ixv_handle_mbx(void *context, int pending)
3756 struct adapter *adapter = context;
3758 ixgbe_check_link(&adapter->hw,
3759 &adapter->link_speed, &adapter->link_up, 0);
3760 ixv_update_link_status(adapter);
3764 ** The VF stats registers never have a truely virgin
3765 ** starting point, so this routine tries to make an
3766 ** artificial one, marking ground zero on attach as
3770 ixv_save_stats(struct adapter *adapter)
3772 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
3773 adapter->stats.saved_reset_vfgprc +=
3774 adapter->stats.vfgprc - adapter->stats.base_vfgprc;
3775 adapter->stats.saved_reset_vfgptc +=
3776 adapter->stats.vfgptc - adapter->stats.base_vfgptc;
3777 adapter->stats.saved_reset_vfgorc +=
3778 adapter->stats.vfgorc - adapter->stats.base_vfgorc;
3779 adapter->stats.saved_reset_vfgotc +=
3780 adapter->stats.vfgotc - adapter->stats.base_vfgotc;
3781 adapter->stats.saved_reset_vfmprc +=
3782 adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3787 ixv_init_stats(struct adapter *adapter)
3789 struct ixgbe_hw *hw = &adapter->hw;
3791 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
3792 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
3793 adapter->stats.last_vfgorc |=
3794 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
3796 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
3797 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
3798 adapter->stats.last_vfgotc |=
3799 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
3801 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
3803 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
3804 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
3805 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
3806 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
3807 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
3810 #define UPDATE_STAT_32(reg, last, count) \
3812 u32 current = IXGBE_READ_REG(hw, reg); \
3813 if (current < last) \
3814 count += 0x100000000LL; \
3816 count &= 0xFFFFFFFF00000000LL; \
3820 #define UPDATE_STAT_36(lsb, msb, last, count) \
3822 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \
3823 u64 cur_msb = IXGBE_READ_REG(hw, msb); \
3824 u64 current = ((cur_msb << 32) | cur_lsb); \
3825 if (current < last) \
3826 count += 0x1000000000LL; \
3828 count &= 0xFFFFFFF000000000LL; \
3833 ** ixv_update_stats - Update the board statistics counters.
3836 ixv_update_stats(struct adapter *adapter)
3838 struct ixgbe_hw *hw = &adapter->hw;
3840 UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
3841 adapter->stats.vfgprc);
3842 UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
3843 adapter->stats.vfgptc);
3844 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
3845 adapter->stats.last_vfgorc, adapter->stats.vfgorc);
3846 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
3847 adapter->stats.last_vfgotc, adapter->stats.vfgotc);
3848 UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
3849 adapter->stats.vfmprc);
3852 /**********************************************************************
3854 * This routine is called only when ixgbe_display_debug_stats is enabled.
3855 * This routine provides a way to take a look at important statistics
3856 * maintained by the driver and hardware.
3858 **********************************************************************/
3860 ixv_print_hw_stats(struct adapter * adapter)
3862 device_t dev = adapter->dev;
3864 device_printf(dev,"Std Mbuf Failed = %lu\n",
3865 adapter->mbuf_defrag_failed);
3866 device_printf(dev,"Driver dropped packets = %lu\n",
3867 adapter->dropped_pkts);
3868 device_printf(dev, "watchdog timeouts = %ld\n",
3869 adapter->watchdog_events);
3871 device_printf(dev,"Good Packets Rcvd = %llu\n",
3872 (long long)adapter->stats.vfgprc);
3873 device_printf(dev,"Good Packets Xmtd = %llu\n",
3874 (long long)adapter->stats.vfgptc);
3875 device_printf(dev,"TSO Transmissions = %lu\n",
3880 /**********************************************************************
3882 * This routine is called only when em_display_debug_stats is enabled.
3883 * This routine provides a way to take a look at important statistics
3884 * maintained by the driver and hardware.
3886 **********************************************************************/
3888 ixv_print_debug_info(struct adapter *adapter)
3890 device_t dev = adapter->dev;
3891 struct ixgbe_hw *hw = &adapter->hw;
3892 struct ix_queue *que = adapter->queues;
3893 struct rx_ring *rxr;
3894 struct tx_ring *txr;
3895 struct lro_ctrl *lro;
3897 device_printf(dev,"Error Byte Count = %u \n",
3898 IXGBE_READ_REG(hw, IXGBE_ERRBC));
3900 for (int i = 0; i < adapter->num_queues; i++, que++) {
3904 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
3905 que->msix, (long)que->irqs);
3906 device_printf(dev,"RX(%d) Packets Received: %lld\n",
3907 rxr->me, (long long)rxr->rx_packets);
3908 device_printf(dev,"RX(%d) Split RX Packets: %lld\n",
3909 rxr->me, (long long)rxr->rx_split_packets);
3910 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
3911 rxr->me, (long)rxr->rx_bytes);
3912 device_printf(dev,"RX(%d) LRO Queued= %d\n",
3913 rxr->me, lro->lro_queued);
3914 device_printf(dev,"RX(%d) LRO Flushed= %d\n",
3915 rxr->me, lro->lro_flushed);
3916 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
3917 txr->me, (long)txr->total_packets);
3918 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
3919 txr->me, (long)txr->no_desc_avail);
3922 device_printf(dev,"MBX IRQ Handled: %lu\n",
3923 (long)adapter->mbx_irq);
3928 ixv_sysctl_stats(SYSCTL_HANDLER_ARGS)
3932 struct adapter *adapter;
3935 error = sysctl_handle_int(oidp, &result, 0, req);
3937 if (error || !req->newptr)
3941 adapter = (struct adapter *) arg1;
3942 ixv_print_hw_stats(adapter);
3948 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
3951 struct adapter *adapter;
3954 error = sysctl_handle_int(oidp, &result, 0, req);
3956 if (error || !req->newptr)
3960 adapter = (struct adapter *) arg1;
3961 ixv_print_debug_info(adapter);
3967 ** Set flow control using sysctl:
3968 ** Flow control values:
3975 ixv_set_flowcntl(SYSCTL_HANDLER_ARGS)
3978 struct adapter *adapter;
3980 error = sysctl_handle_int(oidp, &ixv_flow_control, 0, req);
3985 adapter = (struct adapter *) arg1;
3986 switch (ixv_flow_control) {
3987 case ixgbe_fc_rx_pause:
3988 case ixgbe_fc_tx_pause:
3990 adapter->hw.fc.requested_mode = ixv_flow_control;
3994 adapter->hw.fc.requested_mode = ixgbe_fc_none;
3997 ixgbe_fc_enable(&adapter->hw, 0);
4002 ixv_add_rx_process_limit(struct adapter *adapter, const char *name,
4003 const char *description, int *limit, int value)
4006 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4007 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4008 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);