* };
*
* struct device devices[] = {
- * #define RTE_PCI_DEV_ID_DECL(vendorID, deviceID) {vend, dev},
+ * #define RTE_PCI_DEV_ID_DECL_IXGBE(vendorID, deviceID) {vend, dev},
* #include <rte_pci_dev_ids.h>
* };
* @endcode
#define RTE_PCI_DEV_ID_DECL_IGBVF(vend, dev)
#endif
+#ifndef RTE_PCI_DEV_ID_DECL_IXGBE
+#define RTE_PCI_DEV_ID_DECL_IXGBE(vend, dev)
+#endif
+
+#ifndef RTE_PCI_DEV_ID_DECL_IXGBEVF
+#define RTE_PCI_DEV_ID_DECL_IXGBEVF(vend, dev)
+#endif
+
#ifndef PCI_VENDOR_ID_INTEL
/** Vendor ID used by Intel devices */
#define PCI_VENDOR_ID_INTEL 0x8086
RTE_PCI_DEV_ID_DECL_IGB(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_DH89XXCC_SFP)
/****************** Physical IXGBE devices from ixgbe_type.h ******************/
-#ifdef RTE_LIBRTE_IXGBE_PMD
#define IXGBE_DEV_ID_82598 0x10B6
#define IXGBE_DEV_ID_82598_BX 0x1508
#define IXGBE_DEV_ID_82599_CX4 0x10F9
#define IXGBE_DEV_ID_82599_SFP 0x10FB
#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
+#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72
+#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0
+#define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470
#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152A
#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529
#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
+#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D
+#define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A
#define IXGBE_DEV_ID_82599EN_SFP 0x1557
#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
#define IXGBE_DEV_ID_82599_T3_LOM 0x151C
#define IXGBE_DEV_ID_X540T 0x1528
+#define IXGBE_DEV_ID_X540T1 0x1560
+
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_BX)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, \
+ IXGBE_DEV_ID_82598AF_SINGLE_PORT)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AT)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AT2)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598EB_CX4)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, \
+ IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598EB_XF_LR)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_KX4)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_KR)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, \
+ IXGBE_DEV_ID_82599_COMBO_BACKPLANE)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, \
+ IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_CX4)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_SFP)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_SUBDEV_ID_82599_SFP)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_SUBDEV_ID_82599_RNDC)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_SUBDEV_ID_82599_560FLR)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_SUBDEV_ID_82599_ECNA_DP)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_SFP_FCOE)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_SFP_EM)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_SFP_SF2)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599EN_SFP)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_XAUI_LOM)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_T3_LOM)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X540T)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X540T1)
+
+/****************** Virtual IGB devices from e1000_hw.h ******************/
+
+#define E1000_DEV_ID_82576_VF 0x10CA
+#define E1000_DEV_ID_82576_VF_HV 0x152D
+#define E1000_DEV_ID_I350_VF 0x1520
+#define E1000_DEV_ID_I350_VF_HV 0x152F
+
+RTE_PCI_DEV_ID_DECL_IGBVF(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82576_VF)
+RTE_PCI_DEV_ID_DECL_IGBVF(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82576_VF_HV)
+RTE_PCI_DEV_ID_DECL_IGBVF(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_I350_VF)
+RTE_PCI_DEV_ID_DECL_IGBVF(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_I350_VF_HV)
+
+/****************** Virtual IXGBE devices from ixgbe_type.h ******************/
+
+#define IXGBE_DEV_ID_82599_VF 0x10ED
+#define IXGBE_DEV_ID_82599_VF_HV 0x152E
+#define IXGBE_DEV_ID_X540_VF 0x1515
+#define IXGBE_DEV_ID_X540_VF_HV 0x1530
-RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598)
-RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_BX)
-RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT)
-RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT)
-RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AT)
-RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AT2)
-RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM)
-RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598EB_CX4)
-RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT)
-RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT)
-RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM)
-RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598EB_XF_LR)
-RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_KX4)
-RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ)
-RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_KR)
-RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE)
-RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
-RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_CX4)
-RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_SFP)
-RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_SUBDEV_ID_82599_SFP)
-RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE)
-RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_SFP_FCOE)
-RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_SFP_EM)
-RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599EN_SFP)
-RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_XAUI_LOM)
-RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_T3_LOM)
-RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X540T)
-
-#endif /* RTE_LIBRTE_IXGBE_PMD */
+RTE_PCI_DEV_ID_DECL_IXGBEVF(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_VF)
+RTE_PCI_DEV_ID_DECL_IXGBEVF(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_VF_HV)
+RTE_PCI_DEV_ID_DECL_IXGBEVF(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X540_VF)
+RTE_PCI_DEV_ID_DECL_IXGBEVF(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X540_VF_HV)
/*
* Undef all RTE_PCI_DEV_ID_DECL_* here.
#undef RTE_PCI_DEV_ID_DECL_EM
#undef RTE_PCI_DEV_ID_DECL_IGB
#undef RTE_PCI_DEV_ID_DECL_IGBVF
+#undef RTE_PCI_DEV_ID_DECL_IXGBE
+#undef RTE_PCI_DEV_ID_DECL_IXGBEVF
#
LIB = librte_pmd_ixgbe.a
-CFLAGS += -O3
+CFLAGS += -O3 -Wno-deprecated
CFLAGS += $(WERROR_FLAGS)
+VPATH += $(RTE_SDK)/lib/librte_pmd_ixgbe/ixgbe
+
#
# all source are stored in SRCS-y
#
-SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe/ixgbe_common.c
-SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe/ixgbe_82598.c
-SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe/ixgbe_82599.c
-SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe/ixgbe_x540.c
-SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe/ixgbe_phy.c
-SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe/ixgbe_api.c
-SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe/ixgbe_vf.c
-SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe/ixgbe_mbx.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_common.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_82598.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_82599.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_x540.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_phy.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_api.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_vf.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_dcb.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_dcb_82599.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_dcb_82598.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_mbx.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_rxtx.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_fdir.c
Intel® IXGBE driver
===================
-This directory contains code from the Intel® Network Adapter Driver for PCI-E
-10 Gigabit Network Connections under FreeBSD, version 2.4.4, dated 10/25/2011.
-This code is available from
-`http://downloadmirror.intel.com/14688/eng/ixgbe-2.4.4.tar.gz`
-
+This directory contains source code of FreeBSD ixgbe driver of version
+cid-10g-shared-code.2012.11.09 released by LAD. The sub-directory of lad/
+contains the original source package.
This driver is valid for the product(s) listed below
* Intel® 10 Gigabit AF DA Dual Port Server Adapter
+++ /dev/null
-/******************************************************************************
-
- Copyright (c) 2001-2010, Intel Corporation
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- POSSIBILITY OF SUCH DAMAGE.
-
-******************************************************************************/
-/*$FreeBSD$*/
-
-#ifdef HAVE_KERNEL_OPTION_HEADERS
-#include "opt_inet.h"
-#include "opt_inet6.h"
-#endif
-
-#include "ixgbe.h"
-
-/*********************************************************************
- * Set this to one to display debug statistics
- *********************************************************************/
-int ixgbe_display_debug_stats = 0;
-
-/*********************************************************************
- * Driver version
- *********************************************************************/
-char ixgbe_driver_version[] = "2.4.4";
-
-/*********************************************************************
- * PCI Device ID Table
- *
- * Used by probe to select devices to load on
- * Last field stores an index into ixgbe_strings
- * Last entry must be all 0s
- *
- * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
- *********************************************************************/
-
-static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
-{
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
- /* required last entry */
- {0, 0, 0, 0, 0}
-};
-
-/*********************************************************************
- * Table of branding strings
- *********************************************************************/
-
-static char *ixgbe_strings[] = {
- "Intel(R) PRO/10GbE PCI-Express Network Driver"
-};
-
-/*********************************************************************
- * Function prototypes
- *********************************************************************/
-static int ixgbe_probe(device_t);
-static int ixgbe_attach(device_t);
-static int ixgbe_detach(device_t);
-static int ixgbe_shutdown(device_t);
-static void ixgbe_start(struct ifnet *);
-static void ixgbe_start_locked(struct tx_ring *, struct ifnet *);
-#if __FreeBSD_version >= 800000
-static int ixgbe_mq_start(struct ifnet *, struct mbuf *);
-static int ixgbe_mq_start_locked(struct ifnet *,
- struct tx_ring *, struct mbuf *);
-static void ixgbe_qflush(struct ifnet *);
-#endif
-static int ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
-static void ixgbe_init(void *);
-static void ixgbe_init_locked(struct adapter *);
-static void ixgbe_stop(void *);
-static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
-static int ixgbe_media_change(struct ifnet *);
-static void ixgbe_identify_hardware(struct adapter *);
-static int ixgbe_allocate_pci_resources(struct adapter *);
-static int ixgbe_allocate_msix(struct adapter *);
-static int ixgbe_allocate_legacy(struct adapter *);
-static int ixgbe_allocate_queues(struct adapter *);
-static int ixgbe_setup_msix(struct adapter *);
-static void ixgbe_free_pci_resources(struct adapter *);
-static void ixgbe_local_timer(void *);
-static int ixgbe_setup_interface(device_t, struct adapter *);
-static void ixgbe_config_link(struct adapter *);
-
-static int ixgbe_allocate_transmit_buffers(struct tx_ring *);
-static int ixgbe_setup_transmit_structures(struct adapter *);
-static void ixgbe_setup_transmit_ring(struct tx_ring *);
-static void ixgbe_initialize_transmit_units(struct adapter *);
-static void ixgbe_free_transmit_structures(struct adapter *);
-static void ixgbe_free_transmit_buffers(struct tx_ring *);
-
-static int ixgbe_allocate_receive_buffers(struct rx_ring *);
-static int ixgbe_setup_receive_structures(struct adapter *);
-static int ixgbe_setup_receive_ring(struct rx_ring *);
-static void ixgbe_initialize_receive_units(struct adapter *);
-static void ixgbe_free_receive_structures(struct adapter *);
-static void ixgbe_free_receive_buffers(struct rx_ring *);
-static void ixgbe_setup_hw_rsc(struct rx_ring *);
-
-static void ixgbe_enable_intr(struct adapter *);
-static void ixgbe_disable_intr(struct adapter *);
-static void ixgbe_update_stats_counters(struct adapter *);
-static bool ixgbe_txeof(struct tx_ring *);
-static bool ixgbe_rxeof(struct ix_queue *, int);
-static void ixgbe_rx_checksum(u32, struct mbuf *, u32);
-static void ixgbe_set_promisc(struct adapter *);
-static void ixgbe_set_multi(struct adapter *);
-static void ixgbe_update_link_status(struct adapter *);
-static void ixgbe_refresh_mbufs(struct rx_ring *, int);
-static int ixgbe_xmit(struct tx_ring *, struct mbuf **);
-static int ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS);
-static int ixgbe_set_advertise(SYSCTL_HANDLER_ARGS);
-static int ixgbe_dma_malloc(struct adapter *, bus_size_t,
- struct ixgbe_dma_alloc *, int);
-static void ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
-static void ixgbe_add_rx_process_limit(struct adapter *, const char *,
- const char *, int *, int);
-static bool ixgbe_tx_ctx_setup(struct tx_ring *, struct mbuf *);
-static bool ixgbe_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
-static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
-static void ixgbe_configure_ivars(struct adapter *);
-static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
-
-static void ixgbe_setup_vlan_hw_support(struct adapter *);
-static void ixgbe_register_vlan(void *, struct ifnet *, u16);
-static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
-
-static void ixgbe_add_hw_stats(struct adapter *adapter);
-
-static __inline void ixgbe_rx_discard(struct rx_ring *, int);
-static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *,
- struct mbuf *, u32);
-
-/* Support for pluggable optic modules */
-static bool ixgbe_sfp_probe(struct adapter *);
-static void ixgbe_setup_optics(struct adapter *);
-
-/* Legacy (single vector interrupt handler */
-static void ixgbe_legacy_irq(void *);
-
-/* The MSI/X Interrupt handlers */
-static void ixgbe_msix_que(void *);
-static void ixgbe_msix_link(void *);
-
-/* Deferred interrupt tasklets */
-static void ixgbe_handle_que(void *, int);
-static void ixgbe_handle_link(void *, int);
-static void ixgbe_handle_msf(void *, int);
-static void ixgbe_handle_mod(void *, int);
-
-#ifdef IXGBE_FDIR
-static void ixgbe_atr(struct tx_ring *, struct mbuf *);
-static void ixgbe_reinit_fdir(void *, int);
-#endif
-
-/*********************************************************************
- * FreeBSD Device Interface Entry Points
- *********************************************************************/
-
-static device_method_t ixgbe_methods[] = {
- /* Device interface */
- DEVMETHOD(device_probe, ixgbe_probe),
- DEVMETHOD(device_attach, ixgbe_attach),
- DEVMETHOD(device_detach, ixgbe_detach),
- DEVMETHOD(device_shutdown, ixgbe_shutdown),
- {0, 0}
-};
-
-static driver_t ixgbe_driver = {
- "ix", ixgbe_methods, sizeof(struct adapter),
-};
-
-devclass_t ixgbe_devclass;
-DRIVER_MODULE(ixgbe, pci, ixgbe_driver, ixgbe_devclass, 0, 0);
-
-MODULE_DEPEND(ixgbe, pci, 1, 1, 1);
-MODULE_DEPEND(ixgbe, ether, 1, 1, 1);
-
-/*
-** TUNEABLE PARAMETERS:
-*/
-
-/*
-** AIM: Adaptive Interrupt Moderation
-** which means that the interrupt rate
-** is varied over time based on the
-** traffic for that interrupt vector
-*/
-static int ixgbe_enable_aim = TRUE;
-TUNABLE_INT("hw.ixgbe.enable_aim", &ixgbe_enable_aim);
-
-static int ixgbe_max_interrupt_rate = (8000000 / IXGBE_LOW_LATENCY);
-TUNABLE_INT("hw.ixgbe.max_interrupt_rate", &ixgbe_max_interrupt_rate);
-
-/* How many packets rxeof tries to clean at a time */
-static int ixgbe_rx_process_limit = 128;
-TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
-
-/*
-** Smart speed setting, default to on
-** this only works as a compile option
-** right now as its during attach, set
-** this to 'ixgbe_smart_speed_off' to
-** disable.
-*/
-static int ixgbe_smart_speed = ixgbe_smart_speed_on;
-
-/*
- * MSIX should be the default for best performance,
- * but this allows it to be forced off for testing.
- */
-static int ixgbe_enable_msix = 1;
-TUNABLE_INT("hw.ixgbe.enable_msix", &ixgbe_enable_msix);
-
-/*
- * Header split: this causes the hardware to DMA
- * the header into a separate mbuf from the payload,
- * it can be a performance win in some workloads, but
- * in others it actually hurts, its off by default.
- */
-static bool ixgbe_header_split = FALSE;
-TUNABLE_INT("hw.ixgbe.hdr_split", &ixgbe_header_split);
-
-/*
- * Number of Queues, can be set to 0,
- * it then autoconfigures based on the
- * number of cpus with a max of 8. This
- * can be overriden manually here.
- */
-static int ixgbe_num_queues = 0;
-TUNABLE_INT("hw.ixgbe.num_queues", &ixgbe_num_queues);
-
-/*
-** Number of TX descriptors per ring,
-** setting higher than RX as this seems
-** the better performing choice.
-*/
-static int ixgbe_txd = PERFORM_TXD;
-TUNABLE_INT("hw.ixgbe.txd", &ixgbe_txd);
-
-/* Number of RX descriptors per ring */
-static int ixgbe_rxd = PERFORM_RXD;
-TUNABLE_INT("hw.ixgbe.rxd", &ixgbe_rxd);
-
-/* Keep running tab on them for sanity check */
-static int ixgbe_total_ports;
-
-#ifdef IXGBE_FDIR
-/*
-** For Flow Director: this is the
-** number of TX packets we sample
-** for the filter pool, this means
-** every 20th packet will be probed.
-**
-** This feature can be disabled by
-** setting this to 0.
-*/
-static int atr_sample_rate = 20;
-/*
-** Flow Director actually 'steals'
-** part of the packet buffer as its
-** filter pool, this variable controls
-** how much it uses:
-** 0 = 64K, 1 = 128K, 2 = 256K
-*/
-static int fdir_pballoc = 1;
-#endif
-
-/*********************************************************************
- * Device identification routine
- *
- * ixgbe_probe determines if the driver should be loaded on
- * adapter based on PCI vendor/device id of the adapter.
- *
- * return BUS_PROBE_DEFAULT on success, positive on failure
- *********************************************************************/
-
-static int
-ixgbe_probe(device_t dev)
-{
- ixgbe_vendor_info_t *ent;
-
- u16 pci_vendor_id = 0;
- u16 pci_device_id = 0;
- u16 pci_subvendor_id = 0;
- u16 pci_subdevice_id = 0;
- char adapter_name[256];
-
- INIT_DEBUGOUT("ixgbe_probe: begin");
-
- pci_vendor_id = pci_get_vendor(dev);
- if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
- return (ENXIO);
-
- pci_device_id = pci_get_device(dev);
- pci_subvendor_id = pci_get_subvendor(dev);
- pci_subdevice_id = pci_get_subdevice(dev);
-
- ent = ixgbe_vendor_info_array;
- while (ent->vendor_id != 0) {
- if ((pci_vendor_id == ent->vendor_id) &&
- (pci_device_id == ent->device_id) &&
-
- ((pci_subvendor_id == ent->subvendor_id) ||
- (ent->subvendor_id == 0)) &&
-
- ((pci_subdevice_id == ent->subdevice_id) ||
- (ent->subdevice_id == 0))) {
- sprintf(adapter_name, "%s, Version - %s",
- ixgbe_strings[ent->index],
- ixgbe_driver_version);
- device_set_desc_copy(dev, adapter_name);
- ++ixgbe_total_ports;
- return (BUS_PROBE_DEFAULT);
- }
- ent++;
- }
- return (ENXIO);
-}
-
-/*********************************************************************
- * Device initialization routine
- *
- * The attach entry point is called when the driver is being loaded.
- * This routine identifies the type of hardware, allocates all resources
- * and initializes the hardware.
- *
- * return 0 on success, positive on failure
- *********************************************************************/
-
-static int
-ixgbe_attach(device_t dev)
-{
- struct adapter *adapter;
- struct ixgbe_hw *hw;
- int error = 0;
- u16 csum;
- u32 ctrl_ext;
-
- INIT_DEBUGOUT("ixgbe_attach: begin");
-
- if (resource_disabled("ixgbe", device_get_unit(dev))) {
- device_printf(dev, "Disabled by device hint\n");
- return (ENXIO);
- }
-
- /* Allocate, clear, and link in our adapter structure */
- adapter = device_get_softc(dev);
- adapter->dev = adapter->osdep.dev = dev;
- hw = &adapter->hw;
-
- /* Core Lock Init*/
- IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
-
- /* SYSCTL APIs */
-
- SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
- adapter, 0, ixgbe_set_flowcntl, "I", "Flow Control");
-
- SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
- &ixgbe_enable_aim, 1, "Interrupt Moderation");
-
- /*
- ** Allow a kind of speed control by forcing the autoneg
- ** advertised speed list to only a certain value, this
- ** supports 1G on 82599 devices, and 100Mb on x540.
- */
- SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
- adapter, 0, ixgbe_set_advertise, "I", "Link Speed");
-
-
- /* Set up the timer callout */
- callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
-
- /* Determine hardware revision */
- ixgbe_identify_hardware(adapter);
-
- /* Do base PCI setup - map BAR0 */
- if (ixgbe_allocate_pci_resources(adapter)) {
- device_printf(dev, "Allocation of PCI resources failed\n");
- error = ENXIO;
- goto err_out;
- }
-
- /* Do descriptor calc and sanity checks */
- if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
- ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
- device_printf(dev, "TXD config issue, using default!\n");
- adapter->num_tx_desc = DEFAULT_TXD;
- } else
- adapter->num_tx_desc = ixgbe_txd;
-
- /*
- ** With many RX rings it is easy to exceed the
- ** system mbuf allocation. Tuning nmbclusters
- ** can alleviate this.
- */
- if (nmbclusters > 0 ) {
- int s;
- s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
- if (s > nmbclusters) {
- device_printf(dev, "RX Descriptors exceed "
- "system mbuf max, using default instead!\n");
- ixgbe_rxd = DEFAULT_RXD;
- }
- }
-
- if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
- ixgbe_rxd < MIN_TXD || ixgbe_rxd > MAX_TXD) {
- device_printf(dev, "RXD config issue, using default!\n");
- adapter->num_rx_desc = DEFAULT_RXD;
- } else
- adapter->num_rx_desc = ixgbe_rxd;
-
- /* Allocate our TX/RX Queues */
- if (ixgbe_allocate_queues(adapter)) {
- error = ENOMEM;
- goto err_out;
- }
-
- /* Allocate multicast array memory. */
- adapter->mta = malloc(sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
- MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
- if (adapter->mta == NULL) {
- device_printf(dev, "Can not allocate multicast setup array\n");
- error = ENOMEM;
- goto err_late;
- }
-
- /* Initialize the shared code */
- error = ixgbe_init_shared_code(hw);
- if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
- /*
- ** No optics in this port, set up
- ** so the timer routine will probe
- ** for later insertion.
- */
- adapter->sfp_probe = TRUE;
- error = 0;
- } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- device_printf(dev,"Unsupported SFP+ module detected!\n");
- error = EIO;
- goto err_late;
- } else if (error) {
- device_printf(dev,"Unable to initialize the shared code\n");
- error = EIO;
- goto err_late;
- }
-
- /* Make sure we have a good EEPROM before we read from it */
- if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
- device_printf(dev,"The EEPROM Checksum Is Not Valid\n");
- error = EIO;
- goto err_late;
- }
-
- /* Get Hardware Flow Control setting */
- hw->fc.requested_mode = ixgbe_fc_full;
- adapter->fc = hw->fc.requested_mode;
- hw->fc.pause_time = IXGBE_FC_PAUSE;
- hw->fc.low_water = IXGBE_FC_LO;
- hw->fc.high_water[0] = IXGBE_FC_HI;
- hw->fc.send_xon = TRUE;
-
- error = ixgbe_init_hw(hw);
- if (error == IXGBE_ERR_EEPROM_VERSION) {
- device_printf(dev, "This device is a pre-production adapter/"
- "LOM. Please be aware there may be issues associated "
- "with your hardware.\n If you are experiencing problems "
- "please contact your Intel or hardware representative "
- "who provided you with this hardware.\n");
- } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED)
- device_printf(dev,"Unsupported SFP+ Module\n");
-
- if (error) {
- error = EIO;
- device_printf(dev,"Hardware Initialization Failure\n");
- goto err_late;
- }
-
- /* Detect and set physical type */
- ixgbe_setup_optics(adapter);
-
- if ((adapter->msix > 1) && (ixgbe_enable_msix))
- error = ixgbe_allocate_msix(adapter);
- else
- error = ixgbe_allocate_legacy(adapter);
- if (error)
- goto err_late;
-
- /* Setup OS specific network interface */
- if (ixgbe_setup_interface(dev, adapter) != 0)
- goto err_late;
-
- /* Sysctl for limiting the amount of work done in the taskqueue */
- ixgbe_add_rx_process_limit(adapter, "rx_processing_limit",
- "max number of rx packets to process", &adapter->rx_process_limit,
- ixgbe_rx_process_limit);
-
- /* Initialize statistics */
- ixgbe_update_stats_counters(adapter);
-
- /* Register for VLAN events */
- adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
- ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
- adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
- ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
-
- /* Print PCIE bus type/speed/width info */
- ixgbe_get_bus_info(hw);
- device_printf(dev,"PCI Express Bus: Speed %s %s\n",
- ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s":
- (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"),
- (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
- (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
- (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
- ("Unknown"));
-
- if ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
- (hw->bus.speed == ixgbe_bus_speed_2500)) {
- device_printf(dev, "PCI-Express bandwidth available"
- " for this card\n is not sufficient for"
- " optimal performance.\n");
- device_printf(dev, "For optimal performance a x8 "
- "PCIE, or x4 PCIE 2 slot is required.\n");
- }
-
- /* let hardware know driver is loaded */
- ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
- ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
- IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
-
- ixgbe_add_hw_stats(adapter);
-
- INIT_DEBUGOUT("ixgbe_attach: end");
- return (0);
-err_late:
- ixgbe_free_transmit_structures(adapter);
- ixgbe_free_receive_structures(adapter);
-err_out:
- if (adapter->ifp != NULL)
- if_free(adapter->ifp);
- ixgbe_free_pci_resources(adapter);
- free(adapter->mta, M_DEVBUF);
- return (error);
-
-}
-
-/*********************************************************************
- * Device removal routine
- *
- * The detach entry point is called when the driver is being removed.
- * This routine stops the adapter and deallocates all the resources
- * that were allocated for driver operation.
- *
- * return 0 on success, positive on failure
- *********************************************************************/
-
-static int
-ixgbe_detach(device_t dev)
-{
- struct adapter *adapter = device_get_softc(dev);
- struct ix_queue *que = adapter->queues;
- u32 ctrl_ext;
-
- INIT_DEBUGOUT("ixgbe_detach: begin");
-
- /* Make sure VLANS are not using driver */
- if (adapter->ifp->if_vlantrunk != NULL) {
- device_printf(dev,"Vlan in use, detach first\n");
- return (EBUSY);
- }
-
- IXGBE_CORE_LOCK(adapter);
- ixgbe_stop(adapter);
- IXGBE_CORE_UNLOCK(adapter);
-
- for (int i = 0; i < adapter->num_queues; i++, que++) {
- if (que->tq) {
- taskqueue_drain(que->tq, &que->que_task);
- taskqueue_free(que->tq);
- }
- }
-
- /* Drain the Link queue */
- if (adapter->tq) {
- taskqueue_drain(adapter->tq, &adapter->link_task);
- taskqueue_drain(adapter->tq, &adapter->mod_task);
- taskqueue_drain(adapter->tq, &adapter->msf_task);
-#ifdef IXGBE_FDIR
- taskqueue_drain(adapter->tq, &adapter->fdir_task);
-#endif
- taskqueue_free(adapter->tq);
- }
-
- /* let hardware know driver is unloading */
- ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
- ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
-
- /* Unregister VLAN events */
- if (adapter->vlan_attach != NULL)
- EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
- if (adapter->vlan_detach != NULL)
- EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
-
- ether_ifdetach(adapter->ifp);
- callout_drain(&adapter->timer);
- ixgbe_free_pci_resources(adapter);
- bus_generic_detach(dev);
- if_free(adapter->ifp);
-
- ixgbe_free_transmit_structures(adapter);
- ixgbe_free_receive_structures(adapter);
- free(adapter->mta, M_DEVBUF);
-
- IXGBE_CORE_LOCK_DESTROY(adapter);
- return (0);
-}
-
-/*********************************************************************
- *
- * Shutdown entry point
- *
- **********************************************************************/
-
-static int
-ixgbe_shutdown(device_t dev)
-{
- struct adapter *adapter = device_get_softc(dev);
- IXGBE_CORE_LOCK(adapter);
- ixgbe_stop(adapter);
- IXGBE_CORE_UNLOCK(adapter);
- return (0);
-}
-
-
-/*********************************************************************
- * Transmit entry point
- *
- * ixgbe_start is called by the stack to initiate a transmit.
- * The driver will remain in this routine as long as there are
- * packets to transmit and transmit resources are available.
- * In case resources are not available stack is notified and
- * the packet is requeued.
- **********************************************************************/
-
-static void
-ixgbe_start_locked(struct tx_ring *txr, struct ifnet * ifp)
-{
- struct mbuf *m_head;
- struct adapter *adapter = txr->adapter;
-
- IXGBE_TX_LOCK_ASSERT(txr);
-
- if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
- IFF_DRV_RUNNING)
- return;
- if (!adapter->link_active)
- return;
-
- while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
-
- IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
- if (m_head == NULL)
- break;
-
- if (ixgbe_xmit(txr, &m_head)) {
- if (m_head == NULL)
- break;
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
- IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
- break;
- }
- /* Send a copy of the frame to the BPF listener */
- ETHER_BPF_MTAP(ifp, m_head);
-
- /* Set watchdog on */
- txr->watchdog_time = ticks;
- txr->queue_status = IXGBE_QUEUE_WORKING;
-
- }
- return;
-}
-
-/*
- * Legacy TX start - called by the stack, this
- * always uses the first tx ring, and should
- * not be used with multiqueue tx enabled.
- */
-static void
-ixgbe_start(struct ifnet *ifp)
-{
- struct adapter *adapter = ifp->if_softc;
- struct tx_ring *txr = adapter->tx_rings;
-
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- IXGBE_TX_LOCK(txr);
- ixgbe_start_locked(txr, ifp);
- IXGBE_TX_UNLOCK(txr);
- }
- return;
-}
-
-#if __FreeBSD_version >= 800000
-/*
-** Multiqueue Transmit driver
-**
-*/
-static int
-ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m)
-{
- struct adapter *adapter = ifp->if_softc;
- struct ix_queue *que;
- struct tx_ring *txr;
- int i = 0, err = 0;
-
- /* Which queue to use */
- if ((m->m_flags & M_FLOWID) != 0)
- i = m->m_pkthdr.flowid % adapter->num_queues;
-
- txr = &adapter->tx_rings[i];
- que = &adapter->queues[i];
-
- if (IXGBE_TX_TRYLOCK(txr)) {
- err = ixgbe_mq_start_locked(ifp, txr, m);
- IXGBE_TX_UNLOCK(txr);
- } else {
- err = drbr_enqueue(ifp, txr->br, m);
- taskqueue_enqueue(que->tq, &que->que_task);
- }
-
- return (err);
-}
-
-static int
-ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
-{
- struct adapter *adapter = txr->adapter;
- struct mbuf *next;
- int enqueued, err = 0;
-
- if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
- IFF_DRV_RUNNING || adapter->link_active == 0) {
- if (m != NULL)
- err = drbr_enqueue(ifp, txr->br, m);
- return (err);
- }
-
- enqueued = 0;
- if (m == NULL) {
- next = drbr_dequeue(ifp, txr->br);
- } else if (drbr_needs_enqueue(ifp, txr->br)) {
- if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
- return (err);
- next = drbr_dequeue(ifp, txr->br);
- } else
- next = m;
-
- /* Process the queue */
- while (next != NULL) {
- if ((err = ixgbe_xmit(txr, &next)) != 0) {
- if (next != NULL)
- err = drbr_enqueue(ifp, txr->br, next);
- break;
- }
- enqueued++;
- drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
- /* Send a copy of the frame to the BPF listener */
- ETHER_BPF_MTAP(ifp, next);
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
- break;
- if (txr->tx_avail < IXGBE_TX_OP_THRESHOLD)
- ixgbe_txeof(txr);
- if (txr->tx_avail < IXGBE_TX_OP_THRESHOLD) {
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
- break;
- }
- next = drbr_dequeue(ifp, txr->br);
- }
-
- if (enqueued > 0) {
- /* Set watchdog on */
- txr->queue_status = IXGBE_QUEUE_WORKING;
- txr->watchdog_time = ticks;
- }
-
- return (err);
-}
-
-/*
-** Flush all ring buffers
-*/
-static void
-ixgbe_qflush(struct ifnet *ifp)
-{
- struct adapter *adapter = ifp->if_softc;
- struct tx_ring *txr = adapter->tx_rings;
- struct mbuf *m;
-
- for (int i = 0; i < adapter->num_queues; i++, txr++) {
- IXGBE_TX_LOCK(txr);
- while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
- m_freem(m);
- IXGBE_TX_UNLOCK(txr);
- }
- if_qflush(ifp);
-}
-#endif /* __FreeBSD_version >= 800000 */
-
-/*********************************************************************
- * Ioctl entry point
- *
- * ixgbe_ioctl is called when the user wants to configure the
- * interface.
- *
- * return 0 on success, positive on failure
- **********************************************************************/
-
-static int
-ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
-{
- struct adapter *adapter = ifp->if_softc;
- struct ifreq *ifr = (struct ifreq *) data;
-#if defined(INET) || defined(INET6)
- struct ifaddr *ifa = (struct ifaddr *)data;
- bool avoid_reset = FALSE;
-#endif
- int error = 0;
-
- switch (command) {
-
- case SIOCSIFADDR:
-#ifdef INET
- if (ifa->ifa_addr->sa_family == AF_INET)
- avoid_reset = TRUE;
-#endif
-#ifdef INET6
- if (ifa->ifa_addr->sa_family == AF_INET6)
- avoid_reset = TRUE;
-#endif
-#if defined(INET) || defined(INET6)
- /*
- ** Calling init results in link renegotiation,
- ** so we avoid doing it when possible.
- */
- if (avoid_reset) {
- ifp->if_flags |= IFF_UP;
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
- ixgbe_init(adapter);
- if (!(ifp->if_flags & IFF_NOARP))
- arp_ifinit(ifp, ifa);
- } else
- error = ether_ioctl(ifp, command, data);
- break;
-#endif
- case SIOCSIFMTU:
- IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
- if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
- error = EINVAL;
- } else {
- IXGBE_CORE_LOCK(adapter);
- ifp->if_mtu = ifr->ifr_mtu;
- adapter->max_frame_size =
- ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
- ixgbe_init_locked(adapter);
- IXGBE_CORE_UNLOCK(adapter);
- }
- break;
- case SIOCSIFFLAGS:
- IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
- IXGBE_CORE_LOCK(adapter);
- if (ifp->if_flags & IFF_UP) {
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
- if ((ifp->if_flags ^ adapter->if_flags) &
- (IFF_PROMISC | IFF_ALLMULTI)) {
- ixgbe_set_promisc(adapter);
- }
- } else
- ixgbe_init_locked(adapter);
- } else
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- ixgbe_stop(adapter);
- adapter->if_flags = ifp->if_flags;
- IXGBE_CORE_UNLOCK(adapter);
- break;
- case SIOCADDMULTI:
- case SIOCDELMULTI:
- IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- IXGBE_CORE_LOCK(adapter);
- ixgbe_disable_intr(adapter);
- ixgbe_set_multi(adapter);
- ixgbe_enable_intr(adapter);
- IXGBE_CORE_UNLOCK(adapter);
- }
- break;
- case SIOCSIFMEDIA:
- case SIOCGIFMEDIA:
- IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
- error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
- break;
- case SIOCSIFCAP:
- {
- int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
- IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
- if (mask & IFCAP_HWCSUM)
- ifp->if_capenable ^= IFCAP_HWCSUM;
- if (mask & IFCAP_TSO4)
- ifp->if_capenable ^= IFCAP_TSO4;
- if (mask & IFCAP_LRO)
- ifp->if_capenable ^= IFCAP_LRO;
- if (mask & IFCAP_VLAN_HWTAGGING)
- ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
- if (mask & IFCAP_VLAN_HWFILTER)
- ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
- if (mask & IFCAP_VLAN_HWTSO)
- ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- IXGBE_CORE_LOCK(adapter);
- ixgbe_init_locked(adapter);
- IXGBE_CORE_UNLOCK(adapter);
- }
- VLAN_CAPABILITIES(ifp);
- break;
- }
-
- default:
- IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
- error = ether_ioctl(ifp, command, data);
- break;
- }
-
- return (error);
-}
-
-/*********************************************************************
- * Init entry point
- *
- * This routine is used in two ways. It is used by the stack as
- * init entry point in network interface structure. It is also used
- * by the driver as a hw/sw initialization routine to get to a
- * consistent state.
- *
- * return 0 on success, positive on failure
- **********************************************************************/
-#define IXGBE_MHADD_MFS_SHIFT 16
-
-static void
-ixgbe_init_locked(struct adapter *adapter)
-{
- struct ifnet *ifp = adapter->ifp;
- device_t dev = adapter->dev;
- struct ixgbe_hw *hw = &adapter->hw;
- u32 k, txdctl, mhadd, gpie;
- u32 rxdctl, rxctrl;
-
- mtx_assert(&adapter->core_mtx, MA_OWNED);
- INIT_DEBUGOUT("ixgbe_init: begin");
- hw->adapter_stopped = FALSE;
- ixgbe_stop_adapter(hw);
- callout_stop(&adapter->timer);
-
- /* reprogram the RAR[0] in case user changed it. */
- ixgbe_set_rar(hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
-
- /* Get the latest mac address, User can use a LAA */
- bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
- IXGBE_ETH_LENGTH_OF_ADDRESS);
- ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
- hw->addr_ctrl.rar_used_count = 1;
-
- /* Set the various hardware offload abilities */
- ifp->if_hwassist = 0;
- if (ifp->if_capenable & IFCAP_TSO4)
- ifp->if_hwassist |= CSUM_TSO;
- if (ifp->if_capenable & IFCAP_TXCSUM) {
- ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
-#if __FreeBSD_version >= 800000
- if (hw->mac.type != ixgbe_mac_82598EB)
- ifp->if_hwassist |= CSUM_SCTP;
-#endif
- }
-
- /* Prepare transmit descriptors and buffers */
- if (ixgbe_setup_transmit_structures(adapter)) {
- device_printf(dev,"Could not setup transmit structures\n");
- ixgbe_stop(adapter);
- return;
- }
-
- ixgbe_init_hw(hw);
- ixgbe_initialize_transmit_units(adapter);
-
- /* Setup Multicast table */
- ixgbe_set_multi(adapter);
-
- /*
- ** Determine the correct mbuf pool
- ** for doing jumbo/headersplit
- */
- if (adapter->max_frame_size <= 2048)
- adapter->rx_mbuf_sz = MCLBYTES;
- else if (adapter->max_frame_size <= 4096)
- adapter->rx_mbuf_sz = MJUMPAGESIZE;
- else if (adapter->max_frame_size <= 9216)
- adapter->rx_mbuf_sz = MJUM9BYTES;
- else
- adapter->rx_mbuf_sz = MJUM16BYTES;
-
- /* Prepare receive descriptors and buffers */
- if (ixgbe_setup_receive_structures(adapter)) {
- device_printf(dev,"Could not setup receive structures\n");
- ixgbe_stop(adapter);
- return;
- }
-
- /* Configure RX settings */
- ixgbe_initialize_receive_units(adapter);
-
- gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
-
- /* Enable Fan Failure Interrupt */
- gpie |= IXGBE_SDP1_GPIEN;
-
- /* Add for Module detection */
- if (hw->mac.type == ixgbe_mac_82599EB)
- gpie |= IXGBE_SDP2_GPIEN;
-
- /* Thermal Failure Detection */
- if (hw->mac.type == ixgbe_mac_X540)
- gpie |= IXGBE_SDP0_GPIEN;
-
- if (adapter->msix > 1) {
- /* Enable Enhanced MSIX mode */
- gpie |= IXGBE_GPIE_MSIX_MODE;
- gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
- IXGBE_GPIE_OCD;
- }
- IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
-
- /* Set MTU size */
- if (ifp->if_mtu > ETHERMTU) {
- mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
- mhadd &= ~IXGBE_MHADD_MFS_MASK;
- mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
- IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
- }
-
- /* Now enable all the queues */
-
- for (int i = 0; i < adapter->num_queues; i++) {
- txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
- txdctl |= IXGBE_TXDCTL_ENABLE;
- /* Set WTHRESH to 8, burst writeback */
- txdctl |= (8 << 16);
- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), txdctl);
- }
-
- for (int i = 0; i < adapter->num_queues; i++) {
- rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
- if (hw->mac.type == ixgbe_mac_82598EB) {
- /*
- ** PTHRESH = 21
- ** HTHRESH = 4
- ** WTHRESH = 8
- */
- rxdctl &= ~0x3FFFFF;
- rxdctl |= 0x080420;
- }
- rxdctl |= IXGBE_RXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
- for (k = 0; k < 10; k++) {
- if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)) &
- IXGBE_RXDCTL_ENABLE)
- break;
- else
- msec_delay(1);
- }
- wmb();
- IXGBE_WRITE_REG(hw, IXGBE_RDT(i), adapter->num_rx_desc - 1);
- }
-
- /* Set up VLAN support and filter */
- ixgbe_setup_vlan_hw_support(adapter);
-
- /* Enable Receive engine */
- rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
- if (hw->mac.type == ixgbe_mac_82598EB)
- rxctrl |= IXGBE_RXCTRL_DMBYPS;
- rxctrl |= IXGBE_RXCTRL_RXEN;
- ixgbe_enable_rx_dma(hw, rxctrl);
-
- callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
-
- /* Set up MSI/X routing */
- if (ixgbe_enable_msix) {
- ixgbe_configure_ivars(adapter);
- /* Set up auto-mask */
- if (hw->mac.type == ixgbe_mac_82598EB)
- IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
- else {
- IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
- IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
- }
- } else { /* Simple settings for Legacy/MSI */
- ixgbe_set_ivar(adapter, 0, 0, 0);
- ixgbe_set_ivar(adapter, 0, 0, 1);
- IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
- }
-
-#ifdef IXGBE_FDIR
- /* Init Flow director */
- if (hw->mac.type != ixgbe_mac_82598EB) {
- u32 hdrm = 64 << fdir_pballoc;
-
- hw->mac.ops.setup_rxpba(hw, 0, hdrm, PBA_STRATEGY_EQUAL);
- ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc);
- }
-#endif
-
- /*
- ** Check on any SFP devices that
- ** need to be kick-started
- */
- if (hw->phy.type == ixgbe_phy_none) {
- int err = hw->phy.ops.identify(hw);
- if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- device_printf(dev,
- "Unsupported SFP+ module type was detected.\n");
- return;
- }
- }
-
- /* Set moderation on the Link interrupt */
- IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->linkvec), IXGBE_LINK_ITR);
-
- /* Config/Enable Link */
- ixgbe_config_link(adapter);
-
- /* And now turn on interrupts */
- ixgbe_enable_intr(adapter);
-
- /* Now inform the stack we're ready */
- ifp->if_drv_flags |= IFF_DRV_RUNNING;
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
-
- return;
-}
-
-static void
-ixgbe_init(void *arg)
-{
- struct adapter *adapter = arg;
-
- IXGBE_CORE_LOCK(adapter);
- ixgbe_init_locked(adapter);
- IXGBE_CORE_UNLOCK(adapter);
- return;
-}
-
-
-/*
-**
-** MSIX Interrupt Handlers and Tasklets
-**
-*/
-
-static inline void
-ixgbe_enable_queue(struct adapter *adapter, u32 vector)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u64 queue = (u64)(1 << vector);
- u32 mask;
-
- if (hw->mac.type == ixgbe_mac_82598EB) {
- mask = (IXGBE_EIMS_RTX_QUEUE & queue);
- IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
- } else {
- mask = (queue & 0xFFFFFFFF);
- if (mask)
- IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
- mask = (queue >> 32);
- if (mask)
- IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
- }
-}
-
-static inline void
-ixgbe_disable_queue(struct adapter *adapter, u32 vector)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u64 queue = (u64)(1 << vector);
- u32 mask;
-
- if (hw->mac.type == ixgbe_mac_82598EB) {
- mask = (IXGBE_EIMS_RTX_QUEUE & queue);
- IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
- } else {
- mask = (queue & 0xFFFFFFFF);
- if (mask)
- IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
- mask = (queue >> 32);
- if (mask)
- IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
- }
-}
-
-static inline void
-ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
-{
- u32 mask;
-
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
- mask = (IXGBE_EIMS_RTX_QUEUE & queues);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
- } else {
- mask = (queues & 0xFFFFFFFF);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
- mask = (queues >> 32);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
- }
-}
-
-
-static void
-ixgbe_handle_que(void *context, int pending)
-{
- struct ix_queue *que = context;
- struct adapter *adapter = que->adapter;
- struct tx_ring *txr = que->txr;
- struct ifnet *ifp = adapter->ifp;
- bool more;
-
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- more = ixgbe_rxeof(que, adapter->rx_process_limit);
- IXGBE_TX_LOCK(txr);
- ixgbe_txeof(txr);
-#if __FreeBSD_version >= 800000
- if (!drbr_empty(ifp, txr->br))
- ixgbe_mq_start_locked(ifp, txr, NULL);
-#else
- if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
- ixgbe_start_locked(txr, ifp);
-#endif
- IXGBE_TX_UNLOCK(txr);
- if (more) {
- taskqueue_enqueue(que->tq, &que->que_task);
- return;
- }
- }
-
- /* Reenable this interrupt */
- ixgbe_enable_queue(adapter, que->msix);
- return;
-}
-
-
-/*********************************************************************
- *
- * Legacy Interrupt Service routine
- *
- **********************************************************************/
-
-static void
-ixgbe_legacy_irq(void *arg)
-{
- struct ix_queue *que = arg;
- struct adapter *adapter = que->adapter;
- struct ixgbe_hw *hw = &adapter->hw;
- struct tx_ring *txr = adapter->tx_rings;
- bool more_tx, more_rx;
- u32 reg_eicr, loop = MAX_LOOP;
-
-
- reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
-
- ++que->irqs;
- if (reg_eicr == 0) {
- ixgbe_enable_intr(adapter);
- return;
- }
-
- more_rx = ixgbe_rxeof(que, adapter->rx_process_limit);
-
- IXGBE_TX_LOCK(txr);
- do {
- more_tx = ixgbe_txeof(txr);
- } while (loop-- && more_tx);
- IXGBE_TX_UNLOCK(txr);
-
- if (more_rx || more_tx)
- taskqueue_enqueue(que->tq, &que->que_task);
-
- /* Check for fan failure */
- if ((hw->phy.media_type == ixgbe_media_type_copper) &&
- (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
- device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
- "REPLACE IMMEDIATELY!!\n");
- IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1);
- }
-
- /* Link status change */
- if (reg_eicr & IXGBE_EICR_LSC)
- taskqueue_enqueue(adapter->tq, &adapter->link_task);
-
- ixgbe_enable_intr(adapter);
- return;
-}
-
-
-/*********************************************************************
- *
- * MSIX Queue Interrupt Service routine
- *
- **********************************************************************/
-void
-ixgbe_msix_que(void *arg)
-{
- struct ix_queue *que = arg;
- struct adapter *adapter = que->adapter;
- struct tx_ring *txr = que->txr;
- struct rx_ring *rxr = que->rxr;
- bool more_tx, more_rx;
- u32 newitr = 0;
-
- ++que->irqs;
-
- more_rx = ixgbe_rxeof(que, adapter->rx_process_limit);
-
- IXGBE_TX_LOCK(txr);
- more_tx = ixgbe_txeof(txr);
- /*
- ** Make certain that if the stack
- ** has anything queued the task gets
- ** scheduled to handle it.
- */
-#if __FreeBSD_version < 800000
- if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd))
-#else
- if (!drbr_empty(adapter->ifp, txr->br))
-#endif
- more_tx = 1;
- IXGBE_TX_UNLOCK(txr);
-
- /* Do AIM now? */
-
- if (ixgbe_enable_aim == FALSE)
- goto no_calc;
- /*
- ** Do Adaptive Interrupt Moderation:
- ** - Write out last calculated setting
- ** - Calculate based on average size over
- ** the last interval.
- */
- if (que->eitr_setting)
- IXGBE_WRITE_REG(&adapter->hw,
- IXGBE_EITR(que->msix), que->eitr_setting);
-
- que->eitr_setting = 0;
-
- /* Idle, do nothing */
- if ((txr->bytes == 0) && (rxr->bytes == 0))
- goto no_calc;
-
- if ((txr->bytes) && (txr->packets))
- newitr = txr->bytes/txr->packets;
- if ((rxr->bytes) && (rxr->packets))
- newitr = max(newitr,
- (rxr->bytes / rxr->packets));
- newitr += 24; /* account for hardware frame, crc */
-
- /* set an upper boundary */
- newitr = min(newitr, 3000);
-
- /* Be nice to the mid range */
- if ((newitr > 300) && (newitr < 1200))
- newitr = (newitr / 3);
- else
- newitr = (newitr / 2);
-
- if (adapter->hw.mac.type == ixgbe_mac_82598EB)
- newitr |= newitr << 16;
- else
- newitr |= IXGBE_EITR_CNT_WDIS;
-
- /* save for next interrupt */
- que->eitr_setting = newitr;
-
- /* Reset state */
- txr->bytes = 0;
- txr->packets = 0;
- rxr->bytes = 0;
- rxr->packets = 0;
-
-no_calc:
- if (more_tx || more_rx)
- taskqueue_enqueue(que->tq, &que->que_task);
- else /* Reenable this interrupt */
- ixgbe_enable_queue(adapter, que->msix);
- return;
-}
-
-
-static void
-ixgbe_msix_link(void *arg)
-{
- struct adapter *adapter = arg;
- struct ixgbe_hw *hw = &adapter->hw;
- u32 reg_eicr;
-
- ++adapter->link_irq;
-
- /* First get the cause */
- reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
- /* Clear interrupt with write */
- IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
-
- /* Link status change */
- if (reg_eicr & IXGBE_EICR_LSC)
- taskqueue_enqueue(adapter->tq, &adapter->link_task);
-
- if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
-#ifdef IXGBE_FDIR
- if (reg_eicr & IXGBE_EICR_FLOW_DIR) {
- /* This is probably overkill :) */
- if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
- return;
- /* Clear the interrupt */
- IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR);
- /* Turn off the interface */
- adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
- taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
- } else
-#endif
- if (reg_eicr & IXGBE_EICR_ECC) {
- device_printf(adapter->dev, "\nCRITICAL: ECC ERROR!! "
- "Please Reboot!!\n");
- IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
- } else
-
- if (reg_eicr & IXGBE_EICR_GPI_SDP1) {
- /* Clear the interrupt */
- IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
- taskqueue_enqueue(adapter->tq, &adapter->msf_task);
- } else if (reg_eicr & IXGBE_EICR_GPI_SDP2) {
- /* Clear the interrupt */
- IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
- taskqueue_enqueue(adapter->tq, &adapter->mod_task);
- }
- }
-
- /* Check for fan failure */
- if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
- (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
- device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
- "REPLACE IMMEDIATELY!!\n");
- IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
- }
-
- /* Check for over temp condition */
- if ((hw->mac.type == ixgbe_mac_X540) &&
- (reg_eicr & IXGBE_EICR_GPI_SDP0)) {
- device_printf(adapter->dev, "\nCRITICAL: OVER TEMP!! "
- "PHY IS SHUT DOWN!!\n");
- device_printf(adapter->dev, "System shutdown required\n");
- IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0);
- }
-
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
- return;
-}
-
-/*********************************************************************
- *
- * Media Ioctl callback
- *
- * This routine is called whenever the user queries the status of
- * the interface using ifconfig.
- *
- **********************************************************************/
-static void
-ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
-{
- struct adapter *adapter = ifp->if_softc;
-
- INIT_DEBUGOUT("ixgbe_media_status: begin");
- IXGBE_CORE_LOCK(adapter);
- ixgbe_update_link_status(adapter);
-
- ifmr->ifm_status = IFM_AVALID;
- ifmr->ifm_active = IFM_ETHER;
-
- if (!adapter->link_active) {
- IXGBE_CORE_UNLOCK(adapter);
- return;
- }
-
- ifmr->ifm_status |= IFM_ACTIVE;
-
- switch (adapter->link_speed) {
- case IXGBE_LINK_SPEED_100_FULL:
- ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
- break;
- case IXGBE_LINK_SPEED_1GB_FULL:
- ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
- break;
- case IXGBE_LINK_SPEED_10GB_FULL:
- ifmr->ifm_active |= adapter->optics | IFM_FDX;
- break;
- }
-
- IXGBE_CORE_UNLOCK(adapter);
-
- return;
-}
-
-/*********************************************************************
- *
- * Media Ioctl callback
- *
- * This routine is called when the user changes speed/duplex using
- * media/mediopt option with ifconfig.
- *
- **********************************************************************/
-static int
-ixgbe_media_change(struct ifnet * ifp)
-{
- struct adapter *adapter = ifp->if_softc;
- struct ifmedia *ifm = &adapter->media;
-
- INIT_DEBUGOUT("ixgbe_media_change: begin");
-
- if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
- return (EINVAL);
-
- switch (IFM_SUBTYPE(ifm->ifm_media)) {
- case IFM_AUTO:
- adapter->hw.phy.autoneg_advertised =
- IXGBE_LINK_SPEED_100_FULL |
- IXGBE_LINK_SPEED_1GB_FULL |
- IXGBE_LINK_SPEED_10GB_FULL;
- break;
- default:
- device_printf(adapter->dev, "Only auto media type\n");
- return (EINVAL);
- }
-
- return (0);
-}
-
-/*********************************************************************
- *
- * This routine maps the mbufs to tx descriptors, allowing the
- * TX engine to transmit the packets.
- * - return 0 on success, positive on failure
- *
- **********************************************************************/
-
-static int
-ixgbe_xmit(struct tx_ring *txr, struct mbuf **m_headp)
-{
- struct adapter *adapter = txr->adapter;
- u32 olinfo_status = 0, cmd_type_len;
- u32 paylen = 0;
- int i, j, error, nsegs;
- int first, last = 0;
- struct mbuf *m_head;
- bus_dma_segment_t segs[adapter->num_segs];
- bus_dmamap_t map;
- struct ixgbe_tx_buf *txbuf;
- union ixgbe_adv_tx_desc *txd = NULL;
-
- m_head = *m_headp;
-
- /* Basic descriptor defines */
- cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
- IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
-
- if (m_head->m_flags & M_VLANTAG)
- cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
-
- /*
- * Important to capture the first descriptor
- * used because it will contain the index of
- * the one we tell the hardware to report back
- */
- first = txr->next_avail_desc;
- txbuf = &txr->tx_buffers[first];
- map = txbuf->map;
-
- /*
- * Map the packet for DMA.
- */
- error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
- *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
-
- if (error == EFBIG) {
- struct mbuf *m;
-
- m = m_defrag(*m_headp, M_DONTWAIT);
- if (m == NULL) {
- adapter->mbuf_defrag_failed++;
- m_freem(*m_headp);
- *m_headp = NULL;
- return (ENOBUFS);
- }
- *m_headp = m;
-
- /* Try it again */
- error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
- *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
-
- if (error == ENOMEM) {
- adapter->no_tx_dma_setup++;
- return (error);
- } else if (error != 0) {
- adapter->no_tx_dma_setup++;
- m_freem(*m_headp);
- *m_headp = NULL;
- return (error);
- }
- } else if (error == ENOMEM) {
- adapter->no_tx_dma_setup++;
- return (error);
- } else if (error != 0) {
- adapter->no_tx_dma_setup++;
- m_freem(*m_headp);
- *m_headp = NULL;
- return (error);
- }
-
- /* Make certain there are enough descriptors */
- if (nsegs > txr->tx_avail - 2) {
- txr->no_desc_avail++;
- error = ENOBUFS;
- goto xmit_fail;
- }
- m_head = *m_headp;
-
- /*
- ** Set up the appropriate offload context
- ** this becomes the first descriptor of
- ** a packet.
- */
- if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
- if (ixgbe_tso_setup(txr, m_head, &paylen)) {
- cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
- olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
- olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
- olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
- ++adapter->tso_tx;
- } else
- return (ENXIO);
- } else if (ixgbe_tx_ctx_setup(txr, m_head))
- olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
-
-#ifdef IXGBE_IEEE1588
- /* This is changing soon to an mtag detection */
- if (we detect this mbuf has a TSTAMP mtag)
- cmd_type_len |= IXGBE_ADVTXD_MAC_TSTAMP;
-#endif
-
-#ifdef IXGBE_FDIR
- /* Do the flow director magic */
- if ((txr->atr_sample) && (!adapter->fdir_reinit)) {
- ++txr->atr_count;
- if (txr->atr_count >= atr_sample_rate) {
- ixgbe_atr(txr, m_head);
- txr->atr_count = 0;
- }
- }
-#endif
- /* Record payload length */
- if (paylen == 0)
- olinfo_status |= m_head->m_pkthdr.len <<
- IXGBE_ADVTXD_PAYLEN_SHIFT;
-
- i = txr->next_avail_desc;
- for (j = 0; j < nsegs; j++) {
- bus_size_t seglen;
- bus_addr_t segaddr;
-
- txbuf = &txr->tx_buffers[i];
- txd = &txr->tx_base[i];
- seglen = segs[j].ds_len;
- segaddr = htole64(segs[j].ds_addr);
-
- txd->read.buffer_addr = segaddr;
- txd->read.cmd_type_len = htole32(txr->txd_cmd |
- cmd_type_len |seglen);
- txd->read.olinfo_status = htole32(olinfo_status);
- last = i; /* descriptor that will get completion IRQ */
-
- if (++i == adapter->num_tx_desc)
- i = 0;
-
- txbuf->m_head = NULL;
- txbuf->eop_index = -1;
- }
-
- txd->read.cmd_type_len |=
- htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
- txr->tx_avail -= nsegs;
- txr->next_avail_desc = i;
-
- txbuf->m_head = m_head;
- /* Swap the dma map between the first and last descriptor */
- txr->tx_buffers[first].map = txbuf->map;
- txbuf->map = map;
- bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
-
- /* Set the index of the descriptor that will be marked done */
- txbuf = &txr->tx_buffers[first];
- txbuf->eop_index = last;
-
- bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- /*
- * Advance the Transmit Descriptor Tail (Tdt), this tells the
- * hardware that this frame is available to transmit.
- */
- ++txr->total_packets;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(txr->me), i);
-
- return (0);
-
-xmit_fail:
- bus_dmamap_unload(txr->txtag, txbuf->map);
- return (error);
-
-}
-
-static void
-ixgbe_set_promisc(struct adapter *adapter)
-{
- u_int32_t reg_rctl;
- struct ifnet *ifp = adapter->ifp;
-
- reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
- reg_rctl &= (~IXGBE_FCTRL_UPE);
- reg_rctl &= (~IXGBE_FCTRL_MPE);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
-
- if (ifp->if_flags & IFF_PROMISC) {
- reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
- } else if (ifp->if_flags & IFF_ALLMULTI) {
- reg_rctl |= IXGBE_FCTRL_MPE;
- reg_rctl &= ~IXGBE_FCTRL_UPE;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
- }
- return;
-}
-
-
-/*********************************************************************
- * Multicast Update
- *
- * This routine is called whenever multicast address list is updated.
- *
- **********************************************************************/
-#define IXGBE_RAR_ENTRIES 16
-
-static void
-ixgbe_set_multi(struct adapter *adapter)
-{
- u32 fctrl;
- u8 *mta;
- u8 *update_ptr;
- struct ifmultiaddr *ifma;
- int mcnt = 0;
- struct ifnet *ifp = adapter->ifp;
-
- IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
-
- mta = adapter->mta;
- bzero(mta, sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
- MAX_NUM_MULTICAST_ADDRESSES);
-
- fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
- fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
- if (ifp->if_flags & IFF_PROMISC)
- fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
- else if (ifp->if_flags & IFF_ALLMULTI) {
- fctrl |= IXGBE_FCTRL_MPE;
- fctrl &= ~IXGBE_FCTRL_UPE;
- } else
- fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
-
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
-
-#if __FreeBSD_version < 800000
- IF_ADDR_LOCK(ifp);
-#else
- if_maddr_rlock(ifp);
-#endif
- TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
- if (ifma->ifma_addr->sa_family != AF_LINK)
- continue;
- bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
- &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
- IXGBE_ETH_LENGTH_OF_ADDRESS);
- mcnt++;
- }
-#if __FreeBSD_version < 800000
- IF_ADDR_UNLOCK(ifp);
-#else
- if_maddr_runlock(ifp);
-#endif
-
- update_ptr = mta;
- ixgbe_update_mc_addr_list(&adapter->hw,
- update_ptr, mcnt, ixgbe_mc_array_itr, TRUE);
-
- return;
-}
-
-/*
- * This is an iterator function now needed by the multicast
- * shared code. It simply feeds the shared code routine the
- * addresses in the array of ixgbe_set_multi() one by one.
- */
-static u8 *
-ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
-{
- u8 *addr = *update_ptr;
- u8 *newptr;
- *vmdq = 0;
-
- newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
- *update_ptr = newptr;
- return addr;
-}
-
-
-/*********************************************************************
- * Timer routine
- *
- * This routine checks for link status,updates statistics,
- * and runs the watchdog check.
- *
- **********************************************************************/
-
-static void
-ixgbe_local_timer(void *arg)
-{
- struct adapter *adapter = arg;
- device_t dev = adapter->dev;
- struct tx_ring *txr = adapter->tx_rings;
-
- mtx_assert(&adapter->core_mtx, MA_OWNED);
-
- /* Check for pluggable optics */
- if (adapter->sfp_probe)
- if (!ixgbe_sfp_probe(adapter))
- goto out; /* Nothing to do */
-
- ixgbe_update_link_status(adapter);
- ixgbe_update_stats_counters(adapter);
-
- /*
- * If the interface has been paused
- * then don't do the watchdog check
- */
- if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
- goto out;
-
- /*
- ** Check status on the TX queues for a hang
- */
- for (int i = 0; i < adapter->num_queues; i++, txr++)
- if (txr->queue_status == IXGBE_QUEUE_HUNG)
- goto hung;
-
-out:
- ixgbe_rearm_queues(adapter, adapter->que_mask);
- callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
- return;
-
-hung:
- device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
- device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
- IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me)),
- IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me)));
- device_printf(dev,"TX(%d) desc avail = %d,"
- "Next TX to Clean = %d\n",
- txr->me, txr->tx_avail, txr->next_to_clean);
- adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
- adapter->watchdog_events++;
- ixgbe_init_locked(adapter);
-}
-
-/*
-** Note: this routine updates the OS on the link state
-** the real check of the hardware only happens with
-** a link interrupt.
-*/
-static void
-ixgbe_update_link_status(struct adapter *adapter)
-{
- struct ifnet *ifp = adapter->ifp;
- struct tx_ring *txr = adapter->tx_rings;
- device_t dev = adapter->dev;
-
-
- if (adapter->link_up){
- if (adapter->link_active == FALSE) {
- if (bootverbose)
- device_printf(dev,"Link is up %d Gbps %s \n",
- ((adapter->link_speed == 128)? 10:1),
- "Full Duplex");
- adapter->link_active = TRUE;
- if_link_state_change(ifp, LINK_STATE_UP);
- }
- } else { /* Link down */
- if (adapter->link_active == TRUE) {
- if (bootverbose)
- device_printf(dev,"Link is Down\n");
- if_link_state_change(ifp, LINK_STATE_DOWN);
- adapter->link_active = FALSE;
- for (int i = 0; i < adapter->num_queues;
- i++, txr++)
- txr->queue_status = IXGBE_QUEUE_IDLE;
- }
- }
-
- return;
-}
-
-
-/*********************************************************************
- *
- * This routine disables all traffic on the adapter by issuing a
- * global reset on the MAC and deallocates TX/RX buffers.
- *
- **********************************************************************/
-
-static void
-ixgbe_stop(void *arg)
-{
- struct ifnet *ifp;
- struct adapter *adapter = arg;
- struct ixgbe_hw *hw = &adapter->hw;
- ifp = adapter->ifp;
-
- mtx_assert(&adapter->core_mtx, MA_OWNED);
-
- INIT_DEBUGOUT("ixgbe_stop: begin\n");
- ixgbe_disable_intr(adapter);
-
- /* Tell the stack that the interface is no longer active */
- ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
-
- ixgbe_reset_hw(hw);
- hw->adapter_stopped = FALSE;
- ixgbe_stop_adapter(hw);
- /* Turn off the laser */
- if (hw->phy.multispeed_fiber)
- ixgbe_disable_tx_laser(hw);
- callout_stop(&adapter->timer);
-
- /* reprogram the RAR[0] in case user changed it. */
- ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
-
- return;
-}
-
-
-/*********************************************************************
- *
- * Determine hardware revision.
- *
- **********************************************************************/
-static void
-ixgbe_identify_hardware(struct adapter *adapter)
-{
- device_t dev = adapter->dev;
- struct ixgbe_hw *hw = &adapter->hw;
-
- /* Save off the information about this board */
- hw->vendor_id = pci_get_vendor(dev);
- hw->device_id = pci_get_device(dev);
- hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
- hw->subsystem_vendor_id =
- pci_read_config(dev, PCIR_SUBVEND_0, 2);
- hw->subsystem_device_id =
- pci_read_config(dev, PCIR_SUBDEV_0, 2);
-
- /* We need this here to set the num_segs below */
- ixgbe_set_mac_type(hw);
-
- /* Pick up the 82599 and VF settings */
- if (hw->mac.type != ixgbe_mac_82598EB) {
- hw->phy.smart_speed = ixgbe_smart_speed;
- adapter->num_segs = IXGBE_82599_SCATTER;
- } else
- adapter->num_segs = IXGBE_82598_SCATTER;
-
- return;
-}
-
-/*********************************************************************
- *
- * Determine optic type
- *
- **********************************************************************/
-static void
-ixgbe_setup_optics(struct adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- int layer;
-
- layer = ixgbe_get_supported_physical_layer(hw);
-
- if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
- adapter->optics = IFM_10G_T;
- return;
- }
-
- if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
- adapter->optics = IFM_1000_T;
- return;
- }
-
- if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_LR |
- IXGBE_PHYSICAL_LAYER_10GBASE_LRM)) {
- adapter->optics = IFM_10G_LR;
- return;
- }
-
- if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
- adapter->optics = IFM_10G_SR;
- return;
- }
-
- if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU) {
- adapter->optics = IFM_10G_TWINAX;
- return;
- }
-
- if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
- IXGBE_PHYSICAL_LAYER_10GBASE_CX4)) {
- adapter->optics = IFM_10G_CX4;
- return;
- }
-
- /* If we get here just set the default */
- adapter->optics = IFM_ETHER | IFM_AUTO;
- return;
-}
-
-/*********************************************************************
- *
- * Setup the Legacy or MSI Interrupt handler
- *
- **********************************************************************/
-static int
-ixgbe_allocate_legacy(struct adapter *adapter)
-{
- device_t dev = adapter->dev;
- struct ix_queue *que = adapter->queues;
- int error, rid = 0;
-
- /* MSI RID at 1 */
- if (adapter->msix == 1)
- rid = 1;
-
- /* We allocate a single interrupt resource */
- adapter->res = bus_alloc_resource_any(dev,
- SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
- if (adapter->res == NULL) {
- device_printf(dev, "Unable to allocate bus resource: "
- "interrupt\n");
- return (ENXIO);
- }
-
- /*
- * Try allocating a fast interrupt and the associated deferred
- * processing contexts.
- */
- TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
- que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
- taskqueue_thread_enqueue, &que->tq);
- taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
- device_get_nameunit(adapter->dev));
-
- /* Tasklets for Link, SFP and Multispeed Fiber */
- TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
- TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
- TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
-#ifdef IXGBE_FDIR
- TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
-#endif
- adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
- taskqueue_thread_enqueue, &adapter->tq);
- taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
- device_get_nameunit(adapter->dev));
-
- if ((error = bus_setup_intr(dev, adapter->res,
- INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq,
- que, &adapter->tag)) != 0) {
- device_printf(dev, "Failed to register fast interrupt "
- "handler: %d\n", error);
- taskqueue_free(que->tq);
- taskqueue_free(adapter->tq);
- que->tq = NULL;
- adapter->tq = NULL;
- return (error);
- }
- /* For simplicity in the handlers */
- adapter->que_mask = IXGBE_EIMS_ENABLE_MASK;
-
- return (0);
-}
-
-
-/*********************************************************************
- *
- * Setup MSIX Interrupt resources and handlers
- *
- **********************************************************************/
-static int
-ixgbe_allocate_msix(struct adapter *adapter)
-{
- device_t dev = adapter->dev;
- struct ix_queue *que = adapter->queues;
- int error, rid, vector = 0;
-
- for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
- rid = vector + 1;
- que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
- RF_SHAREABLE | RF_ACTIVE);
- if (que->res == NULL) {
- device_printf(dev,"Unable to allocate"
- " bus resource: que interrupt [%d]\n", vector);
- return (ENXIO);
- }
- /* Set the handler function */
- error = bus_setup_intr(dev, que->res,
- INTR_TYPE_NET | INTR_MPSAFE, NULL,
- ixgbe_msix_que, que, &que->tag);
- if (error) {
- que->res = NULL;
- device_printf(dev, "Failed to register QUE handler");
- return (error);
- }
-#if __FreeBSD_version >= 800504
- bus_describe_intr(dev, que->res, que->tag, "que %d", i);
-#endif
- que->msix = vector;
- adapter->que_mask |= (u64)(1 << que->msix);
- /*
- ** Bind the msix vector, and thus the
- ** ring to the corresponding cpu.
- */
- if (adapter->num_queues > 1)
- bus_bind_intr(dev, que->res, i);
-
- TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
- que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
- taskqueue_thread_enqueue, &que->tq);
- taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
- device_get_nameunit(adapter->dev));
- }
-
- /* and Link */
- rid = vector + 1;
- adapter->res = bus_alloc_resource_any(dev,
- SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
- if (!adapter->res) {
- device_printf(dev,"Unable to allocate"
- " bus resource: Link interrupt [%d]\n", rid);
- return (ENXIO);
- }
- /* Set the link handler function */
- error = bus_setup_intr(dev, adapter->res,
- INTR_TYPE_NET | INTR_MPSAFE, NULL,
- ixgbe_msix_link, adapter, &adapter->tag);
- if (error) {
- adapter->res = NULL;
- device_printf(dev, "Failed to register LINK handler");
- return (error);
- }
-#if __FreeBSD_version >= 800504
- bus_describe_intr(dev, adapter->res, adapter->tag, "link");
-#endif
- adapter->linkvec = vector;
- /* Tasklets for Link, SFP and Multispeed Fiber */
- TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
- TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
- TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
-#ifdef IXGBE_FDIR
- TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
-#endif
- adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
- taskqueue_thread_enqueue, &adapter->tq);
- taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
- device_get_nameunit(adapter->dev));
-
- return (0);
-}
-
-/*
- * Setup Either MSI/X or MSI
- */
-static int
-ixgbe_setup_msix(struct adapter *adapter)
-{
- device_t dev = adapter->dev;
- int rid, want, queues, msgs;
-
- /* Override by tuneable */
- if (ixgbe_enable_msix == 0)
- goto msi;
-
- /* First try MSI/X */
- rid = PCIR_BAR(MSIX_82598_BAR);
- adapter->msix_mem = bus_alloc_resource_any(dev,
- SYS_RES_MEMORY, &rid, RF_ACTIVE);
- if (!adapter->msix_mem) {
- rid += 4; /* 82599 maps in higher BAR */
- adapter->msix_mem = bus_alloc_resource_any(dev,
- SYS_RES_MEMORY, &rid, RF_ACTIVE);
- }
- if (!adapter->msix_mem) {
- /* May not be enabled */
- device_printf(adapter->dev,
- "Unable to map MSIX table \n");
- goto msi;
- }
-
- msgs = pci_msix_count(dev);
- if (msgs == 0) { /* system has msix disabled */
- bus_release_resource(dev, SYS_RES_MEMORY,
- rid, adapter->msix_mem);
- adapter->msix_mem = NULL;
- goto msi;
- }
-
- /* Figure out a reasonable auto config value */
- queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus;
-
- if (ixgbe_num_queues != 0)
- queues = ixgbe_num_queues;
- /* Set max queues to 8 when autoconfiguring */
- else if ((ixgbe_num_queues == 0) && (queues > 8))
- queues = 8;
-
- /*
- ** Want one vector (RX/TX pair) per queue
- ** plus an additional for Link.
- */
- want = queues + 1;
- if (msgs >= want)
- msgs = want;
- else {
- device_printf(adapter->dev,
- "MSIX Configuration Problem, "
- "%d vectors but %d queues wanted!\n",
- msgs, want);
- return (0); /* Will go to Legacy setup */
- }
- if ((msgs) && pci_alloc_msix(dev, &msgs) == 0) {
- device_printf(adapter->dev,
- "Using MSIX interrupts with %d vectors\n", msgs);
- adapter->num_queues = queues;
- return (msgs);
- }
-msi:
- msgs = pci_msi_count(dev);
- if (msgs == 1 && pci_alloc_msi(dev, &msgs) == 0)
- device_printf(adapter->dev,"Using an MSI interrupt\n");
- else
- device_printf(adapter->dev,"Using a Legacy interrupt\n");
- return (msgs);
-}
-
-
-static int
-ixgbe_allocate_pci_resources(struct adapter *adapter)
-{
- int rid;
- device_t dev = adapter->dev;
-
- rid = PCIR_BAR(0);
- adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
- &rid, RF_ACTIVE);
-
- if (!(adapter->pci_mem)) {
- device_printf(dev,"Unable to allocate bus resource: memory\n");
- return (ENXIO);
- }
-
- adapter->osdep.mem_bus_space_tag =
- rman_get_bustag(adapter->pci_mem);
- adapter->osdep.mem_bus_space_handle =
- rman_get_bushandle(adapter->pci_mem);
- adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
-
- /* Legacy defaults */
- adapter->num_queues = 1;
- adapter->hw.back = &adapter->osdep;
-
- /*
- ** Now setup MSI or MSI/X, should
- ** return us the number of supported
- ** vectors. (Will be 1 for MSI)
- */
- adapter->msix = ixgbe_setup_msix(adapter);
- return (0);
-}
-
-static void
-ixgbe_free_pci_resources(struct adapter * adapter)
-{
- struct ix_queue *que = adapter->queues;
- device_t dev = adapter->dev;
- int rid, memrid;
-
- if (adapter->hw.mac.type == ixgbe_mac_82598EB)
- memrid = PCIR_BAR(MSIX_82598_BAR);
- else
- memrid = PCIR_BAR(MSIX_82599_BAR);
-
- /*
- ** There is a slight possibility of a failure mode
- ** in attach that will result in entering this function
- ** before interrupt resources have been initialized, and
- ** in that case we do not want to execute the loops below
- ** We can detect this reliably by the state of the adapter
- ** res pointer.
- */
- if (adapter->res == NULL)
- goto mem;
-
- /*
- ** Release all msix queue resources:
- */
- for (int i = 0; i < adapter->num_queues; i++, que++) {
- rid = que->msix + 1;
- if (que->tag != NULL) {
- bus_teardown_intr(dev, que->res, que->tag);
- que->tag = NULL;
- }
- if (que->res != NULL)
- bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
- }
-
-
- /* Clean the Legacy or Link interrupt last */
- if (adapter->linkvec) /* we are doing MSIX */
- rid = adapter->linkvec + 1;
- else
- (adapter->msix != 0) ? (rid = 1):(rid = 0);
-
- if (adapter->tag != NULL) {
- bus_teardown_intr(dev, adapter->res, adapter->tag);
- adapter->tag = NULL;
- }
- if (adapter->res != NULL)
- bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
-
-mem:
- if (adapter->msix)
- pci_release_msi(dev);
-
- if (adapter->msix_mem != NULL)
- bus_release_resource(dev, SYS_RES_MEMORY,
- memrid, adapter->msix_mem);
-
- if (adapter->pci_mem != NULL)
- bus_release_resource(dev, SYS_RES_MEMORY,
- PCIR_BAR(0), adapter->pci_mem);
-
- return;
-}
-
-/*********************************************************************
- *
- * Setup networking device structure and register an interface.
- *
- **********************************************************************/
-static int
-ixgbe_setup_interface(device_t dev, struct adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- struct ifnet *ifp;
-
- INIT_DEBUGOUT("ixgbe_setup_interface: begin");
-
- ifp = adapter->ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "can not allocate ifnet structure\n");
- return (-1);
- }
- if_initname(ifp, device_get_name(dev), device_get_unit(dev));
- ifp->if_mtu = ETHERMTU;
- ifp->if_baudrate = 1000000000;
- ifp->if_init = ixgbe_init;
- ifp->if_softc = adapter;
- ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
- ifp->if_ioctl = ixgbe_ioctl;
- ifp->if_start = ixgbe_start;
-#if __FreeBSD_version >= 800000
- ifp->if_transmit = ixgbe_mq_start;
- ifp->if_qflush = ixgbe_qflush;
-#endif
- ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
-
- ether_ifattach(ifp, adapter->hw.mac.addr);
-
- adapter->max_frame_size =
- ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
-
- /*
- * Tell the upper layer(s) we support long frames.
- */
- ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
-
- ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
- ifp->if_capabilities |= IFCAP_JUMBO_MTU;
- ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
- | IFCAP_VLAN_HWTSO
- | IFCAP_VLAN_MTU;
- ifp->if_capenable = ifp->if_capabilities;
-
- /* Don't enable LRO by default */
- ifp->if_capabilities |= IFCAP_LRO;
-
- /*
- ** Don't turn this on by default, if vlans are
- ** created on another pseudo device (eg. lagg)
- ** then vlan events are not passed thru, breaking
- ** operation, but with HW FILTER off it works. If
- ** using vlans directly on the ixgbe driver you can
- ** enable this and get full hardware tag filtering.
- */
- ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
-
- /*
- * Specify the media types supported by this adapter and register
- * callbacks to update media and link information
- */
- ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
- ixgbe_media_status);
- ifmedia_add(&adapter->media, IFM_ETHER | adapter->optics, 0, NULL);
- ifmedia_set(&adapter->media, IFM_ETHER | adapter->optics);
- if (hw->device_id == IXGBE_DEV_ID_82598AT) {
- ifmedia_add(&adapter->media,
- IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
- ifmedia_add(&adapter->media,
- IFM_ETHER | IFM_1000_T, 0, NULL);
- }
- ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
- ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
-
- return (0);
-}
-
-static void
-ixgbe_config_link(struct adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u32 autoneg, err = 0;
- bool sfp, negotiate;
-
- sfp = ixgbe_is_sfp(hw);
-
- if (sfp) {
- if (hw->phy.multispeed_fiber) {
- hw->mac.ops.setup_sfp(hw);
- ixgbe_enable_tx_laser(hw);
- taskqueue_enqueue(adapter->tq, &adapter->msf_task);
- } else
- taskqueue_enqueue(adapter->tq, &adapter->mod_task);
- } else {
- if (hw->mac.ops.check_link)
- err = ixgbe_check_link(hw, &autoneg,
- &adapter->link_up, FALSE);
- if (err)
- goto out;
- autoneg = hw->phy.autoneg_advertised;
- if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
- err = hw->mac.ops.get_link_capabilities(hw,
- &autoneg, &negotiate);
- if (err)
- goto out;
- if (hw->mac.ops.setup_link)
- err = hw->mac.ops.setup_link(hw, autoneg,
- negotiate, adapter->link_up);
- }
-out:
- return;
-}
-
-/********************************************************************
- * Manage DMA'able memory.
- *******************************************************************/
-static void
-ixgbe_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
-{
- if (error)
- return;
- *(bus_addr_t *) arg = segs->ds_addr;
- return;
-}
-
-static int
-ixgbe_dma_malloc(struct adapter *adapter, bus_size_t size,
- struct ixgbe_dma_alloc *dma, int mapflags)
-{
- device_t dev = adapter->dev;
- int r;
-
- r = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
- DBA_ALIGN, 0, /* alignment, bounds */
- BUS_SPACE_MAXADDR, /* lowaddr */
- BUS_SPACE_MAXADDR, /* highaddr */
- NULL, NULL, /* filter, filterarg */
- size, /* maxsize */
- 1, /* nsegments */
- size, /* maxsegsize */
- BUS_DMA_ALLOCNOW, /* flags */
- NULL, /* lockfunc */
- NULL, /* lockfuncarg */
- &dma->dma_tag);
- if (r != 0) {
- device_printf(dev,"ixgbe_dma_malloc: bus_dma_tag_create failed; "
- "error %u\n", r);
- goto fail_0;
- }
- r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
- BUS_DMA_NOWAIT, &dma->dma_map);
- if (r != 0) {
- device_printf(dev,"ixgbe_dma_malloc: bus_dmamem_alloc failed; "
- "error %u\n", r);
- goto fail_1;
- }
- r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
- size,
- ixgbe_dmamap_cb,
- &dma->dma_paddr,
- mapflags | BUS_DMA_NOWAIT);
- if (r != 0) {
- device_printf(dev,"ixgbe_dma_malloc: bus_dmamap_load failed; "
- "error %u\n", r);
- goto fail_2;
- }
- dma->dma_size = size;
- return (0);
-fail_2:
- bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
-fail_1:
- bus_dma_tag_destroy(dma->dma_tag);
-fail_0:
- dma->dma_map = NULL;
- dma->dma_tag = NULL;
- return (r);
-}
-
-static void
-ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma)
-{
- bus_dmamap_sync(dma->dma_tag, dma->dma_map,
- BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(dma->dma_tag, dma->dma_map);
- bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
- bus_dma_tag_destroy(dma->dma_tag);
-}
-
-
-/*********************************************************************
- *
- * Allocate memory for the transmit and receive rings, and then
- * the descriptors associated with each, called only once at attach.
- *
- **********************************************************************/
-static int
-ixgbe_allocate_queues(struct adapter *adapter)
-{
- device_t dev = adapter->dev;
- struct ix_queue *que;
- struct tx_ring *txr;
- struct rx_ring *rxr;
- int rsize, tsize, error = IXGBE_SUCCESS;
- int txconf = 0, rxconf = 0;
-
- /* First allocate the top level queue structs */
- if (!(adapter->queues =
- (struct ix_queue *) malloc(sizeof(struct ix_queue) *
- adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
- device_printf(dev, "Unable to allocate queue memory\n");
- error = ENOMEM;
- goto fail;
- }
-
- /* First allocate the TX ring struct memory */
- if (!(adapter->tx_rings =
- (struct tx_ring *) malloc(sizeof(struct tx_ring) *
- adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
- device_printf(dev, "Unable to allocate TX ring memory\n");
- error = ENOMEM;
- goto tx_fail;
- }
-
- /* Next allocate the RX */
- if (!(adapter->rx_rings =
- (struct rx_ring *) malloc(sizeof(struct rx_ring) *
- adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
- device_printf(dev, "Unable to allocate RX ring memory\n");
- error = ENOMEM;
- goto rx_fail;
- }
-
- /* For the ring itself */
- tsize = roundup2(adapter->num_tx_desc *
- sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
-
- /*
- * Now set up the TX queues, txconf is needed to handle the
- * possibility that things fail midcourse and we need to
- * undo memory gracefully
- */
- for (int i = 0; i < adapter->num_queues; i++, txconf++) {
- /* Set up some basics */
- txr = &adapter->tx_rings[i];
- txr->adapter = adapter;
- txr->me = i;
-
- /* Initialize the TX side lock */
- snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
- device_get_nameunit(dev), txr->me);
- mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
-
- if (ixgbe_dma_malloc(adapter, tsize,
- &txr->txdma, BUS_DMA_NOWAIT)) {
- device_printf(dev,
- "Unable to allocate TX Descriptor memory\n");
- error = ENOMEM;
- goto err_tx_desc;
- }
- txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
- bzero((void *)txr->tx_base, tsize);
-
- /* Now allocate transmit buffers for the ring */
- if (ixgbe_allocate_transmit_buffers(txr)) {
- device_printf(dev,
- "Critical Failure setting up transmit buffers\n");
- error = ENOMEM;
- goto err_tx_desc;
- }
-#if __FreeBSD_version >= 800000
- /* Allocate a buf ring */
- txr->br = buf_ring_alloc(IXGBE_BR_SIZE, M_DEVBUF,
- M_WAITOK, &txr->tx_mtx);
- if (txr->br == NULL) {
- device_printf(dev,
- "Critical Failure setting up buf ring\n");
- error = ENOMEM;
- goto err_tx_desc;
- }
-#endif
- }
-
- /*
- * Next the RX queues...
- */
- rsize = roundup2(adapter->num_rx_desc *
- sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
- for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
- rxr = &adapter->rx_rings[i];
- /* Set up some basics */
- rxr->adapter = adapter;
- rxr->me = i;
-
- /* Initialize the RX side lock */
- snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
- device_get_nameunit(dev), rxr->me);
- mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
-
- if (ixgbe_dma_malloc(adapter, rsize,
- &rxr->rxdma, BUS_DMA_NOWAIT)) {
- device_printf(dev,
- "Unable to allocate RxDescriptor memory\n");
- error = ENOMEM;
- goto err_rx_desc;
- }
- rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
- bzero((void *)rxr->rx_base, rsize);
-
- /* Allocate receive buffers for the ring*/
- if (ixgbe_allocate_receive_buffers(rxr)) {
- device_printf(dev,
- "Critical Failure setting up receive buffers\n");
- error = ENOMEM;
- goto err_rx_desc;
- }
- }
-
- /*
- ** Finally set up the queue holding structs
- */
- for (int i = 0; i < adapter->num_queues; i++) {
- que = &adapter->queues[i];
- que->adapter = adapter;
- que->txr = &adapter->tx_rings[i];
- que->rxr = &adapter->rx_rings[i];
- }
-
- return (0);
-
-err_rx_desc:
- for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
- ixgbe_dma_free(adapter, &rxr->rxdma);
-err_tx_desc:
- for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
- ixgbe_dma_free(adapter, &txr->txdma);
- free(adapter->rx_rings, M_DEVBUF);
-rx_fail:
- free(adapter->tx_rings, M_DEVBUF);
-tx_fail:
- free(adapter->queues, M_DEVBUF);
-fail:
- return (error);
-}
-
-/*********************************************************************
- *
- * Allocate memory for tx_buffer structures. The tx_buffer stores all
- * the information needed to transmit a packet on the wire. This is
- * called only once at attach, setup is done every reset.
- *
- **********************************************************************/
-static int
-ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
-{
- struct adapter *adapter = txr->adapter;
- device_t dev = adapter->dev;
- struct ixgbe_tx_buf *txbuf;
- int error, i;
-
- /*
- * Setup DMA descriptor areas.
- */
- if ((error = bus_dma_tag_create(NULL, /* parent */
- 1, 0, /* alignment, bounds */
- BUS_SPACE_MAXADDR, /* lowaddr */
- BUS_SPACE_MAXADDR, /* highaddr */
- NULL, NULL, /* filter, filterarg */
- IXGBE_TSO_SIZE, /* maxsize */
- adapter->num_segs, /* nsegments */
- PAGE_SIZE, /* maxsegsize */
- 0, /* flags */
- NULL, /* lockfunc */
- NULL, /* lockfuncarg */
- &txr->txtag))) {
- device_printf(dev,"Unable to allocate TX DMA tag\n");
- goto fail;
- }
-
- if (!(txr->tx_buffers =
- (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) *
- adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
- device_printf(dev, "Unable to allocate tx_buffer memory\n");
- error = ENOMEM;
- goto fail;
- }
-
- /* Create the descriptor buffer dma maps */
- txbuf = txr->tx_buffers;
- for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
- error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
- if (error != 0) {
- device_printf(dev, "Unable to create TX DMA map\n");
- goto fail;
- }
- }
-
- return 0;
-fail:
- /* We free all, it handles case where we are in the middle */
- ixgbe_free_transmit_structures(adapter);
- return (error);
-}
-
-/*********************************************************************
- *
- * Initialize a transmit ring.
- *
- **********************************************************************/
-static void
-ixgbe_setup_transmit_ring(struct tx_ring *txr)
-{
- struct adapter *adapter = txr->adapter;
- struct ixgbe_tx_buf *txbuf;
- int i;
-
- /* Clear the old ring contents */
- IXGBE_TX_LOCK(txr);
- bzero((void *)txr->tx_base,
- (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
- /* Reset indices */
- txr->next_avail_desc = 0;
- txr->next_to_clean = 0;
-
- /* Free any existing tx buffers. */
- txbuf = txr->tx_buffers;
- for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
- if (txbuf->m_head != NULL) {
- bus_dmamap_sync(txr->txtag, txbuf->map,
- BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(txr->txtag, txbuf->map);
- m_freem(txbuf->m_head);
- txbuf->m_head = NULL;
- }
- /* Clear the EOP index */
- txbuf->eop_index = -1;
- }
-
-#ifdef IXGBE_FDIR
- /* Set the rate at which we sample packets */
- if (adapter->hw.mac.type != ixgbe_mac_82598EB)
- txr->atr_sample = atr_sample_rate;
-#endif
-
- /* Set number of descriptors available */
- txr->tx_avail = adapter->num_tx_desc;
-
- bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- IXGBE_TX_UNLOCK(txr);
-}
-
-/*********************************************************************
- *
- * Initialize all transmit rings.
- *
- **********************************************************************/
-static int
-ixgbe_setup_transmit_structures(struct adapter *adapter)
-{
- struct tx_ring *txr = adapter->tx_rings;
-
- for (int i = 0; i < adapter->num_queues; i++, txr++)
- ixgbe_setup_transmit_ring(txr);
-
- return (0);
-}
-
-/*********************************************************************
- *
- * Enable transmit unit.
- *
- **********************************************************************/
-static void
-ixgbe_initialize_transmit_units(struct adapter *adapter)
-{
- struct tx_ring *txr = adapter->tx_rings;
- struct ixgbe_hw *hw = &adapter->hw;
-
- /* Setup the Base and Length of the Tx Descriptor Ring */
-
- for (int i = 0; i < adapter->num_queues; i++, txr++) {
- u64 tdba = txr->txdma.dma_paddr;
- u32 txctrl;
-
- IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
- (tdba & 0x00000000ffffffffULL));
- IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
- IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
- adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
-
- /* Setup the HW Tx Head and Tail descriptor pointers */
- IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
- IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
-
- /* Setup Transmit Descriptor Cmd Settings */
- txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
- txr->queue_status = IXGBE_QUEUE_IDLE;
-
- /* Disable Head Writeback */
- switch (hw->mac.type) {
- case ixgbe_mac_82598EB:
- txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
- break;
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- default:
- txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
- break;
- }
- txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
- switch (hw->mac.type) {
- case ixgbe_mac_82598EB:
- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
- break;
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- default:
- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl);
- break;
- }
-
- }
-
- if (hw->mac.type != ixgbe_mac_82598EB) {
- u32 dmatxctl, rttdcs;
- dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
- dmatxctl |= IXGBE_DMATXCTL_TE;
- IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
- /* Disable arbiter to set MTQC */
- rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
- rttdcs |= IXGBE_RTTDCS_ARBDIS;
- IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
- IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
- rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
- IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
- }
-
- return;
-}
-
-/*********************************************************************
- *
- * Free all transmit rings.
- *
- **********************************************************************/
-static void
-ixgbe_free_transmit_structures(struct adapter *adapter)
-{
- struct tx_ring *txr = adapter->tx_rings;
-
- for (int i = 0; i < adapter->num_queues; i++, txr++) {
- IXGBE_TX_LOCK(txr);
- ixgbe_free_transmit_buffers(txr);
- ixgbe_dma_free(adapter, &txr->txdma);
- IXGBE_TX_UNLOCK(txr);
- IXGBE_TX_LOCK_DESTROY(txr);
- }
- free(adapter->tx_rings, M_DEVBUF);
-}
-
-/*********************************************************************
- *
- * Free transmit ring related data structures.
- *
- **********************************************************************/
-static void
-ixgbe_free_transmit_buffers(struct tx_ring *txr)
-{
- struct adapter *adapter = txr->adapter;
- struct ixgbe_tx_buf *tx_buffer;
- int i;
-
- INIT_DEBUGOUT("free_transmit_ring: begin");
-
- if (txr->tx_buffers == NULL)
- return;
-
- tx_buffer = txr->tx_buffers;
- for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
- if (tx_buffer->m_head != NULL) {
- bus_dmamap_sync(txr->txtag, tx_buffer->map,
- BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(txr->txtag,
- tx_buffer->map);
- m_freem(tx_buffer->m_head);
- tx_buffer->m_head = NULL;
- if (tx_buffer->map != NULL) {
- bus_dmamap_destroy(txr->txtag,
- tx_buffer->map);
- tx_buffer->map = NULL;
- }
- } else if (tx_buffer->map != NULL) {
- bus_dmamap_unload(txr->txtag,
- tx_buffer->map);
- bus_dmamap_destroy(txr->txtag,
- tx_buffer->map);
- tx_buffer->map = NULL;
- }
- }
-#if __FreeBSD_version >= 800000
- if (txr->br != NULL)
- buf_ring_free(txr->br, M_DEVBUF);
-#endif
- if (txr->tx_buffers != NULL) {
- free(txr->tx_buffers, M_DEVBUF);
- txr->tx_buffers = NULL;
- }
- if (txr->txtag != NULL) {
- bus_dma_tag_destroy(txr->txtag);
- txr->txtag = NULL;
- }
- return;
-}
-
-/*********************************************************************
- *
- * Advanced Context Descriptor setup for VLAN or CSUM
- *
- **********************************************************************/
-
-static boolean_t
-ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
-{
- struct adapter *adapter = txr->adapter;
- struct ixgbe_adv_tx_context_desc *TXD;
- struct ixgbe_tx_buf *tx_buffer;
- u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
- struct ether_vlan_header *eh;
- struct ip *ip;
- struct ip6_hdr *ip6;
- int ehdrlen, ip_hlen = 0;
- u16 etype;
- u8 ipproto = 0;
- bool offload = TRUE;
- int ctxd = txr->next_avail_desc;
- u16 vtag = 0;
-
-
- if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
- offload = FALSE;
-
- tx_buffer = &txr->tx_buffers[ctxd];
- TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
-
- /*
- ** In advanced descriptors the vlan tag must
- ** be placed into the descriptor itself.
- */
- if (mp->m_flags & M_VLANTAG) {
- vtag = htole16(mp->m_pkthdr.ether_vtag);
- vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
- } else if (offload == FALSE)
- return FALSE;
-
- /*
- * Determine where frame payload starts.
- * Jump over vlan headers if already present,
- * helpful for QinQ too.
- */
- eh = mtod(mp, struct ether_vlan_header *);
- if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
- etype = ntohs(eh->evl_proto);
- ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
- } else {
- etype = ntohs(eh->evl_encap_proto);
- ehdrlen = ETHER_HDR_LEN;
- }
-
- /* Set the ether header length */
- vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
-
- switch (etype) {
- case ETHERTYPE_IP:
- ip = (struct ip *)(mp->m_data + ehdrlen);
- ip_hlen = ip->ip_hl << 2;
- ipproto = ip->ip_p;
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
- break;
- case ETHERTYPE_IPV6:
- ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
- ip_hlen = sizeof(struct ip6_hdr);
- ipproto = ip6->ip6_nxt;
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
- break;
- default:
- offload = FALSE;
- break;
- }
-
- vlan_macip_lens |= ip_hlen;
- type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
-
- switch (ipproto) {
- case IPPROTO_TCP:
- if (mp->m_pkthdr.csum_flags & CSUM_TCP)
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
- break;
-
- case IPPROTO_UDP:
- if (mp->m_pkthdr.csum_flags & CSUM_UDP)
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
- break;
-
-#if __FreeBSD_version >= 800000
- case IPPROTO_SCTP:
- if (mp->m_pkthdr.csum_flags & CSUM_SCTP)
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
- break;
-#endif
- default:
- offload = FALSE;
- break;
- }
-
- /* Now copy bits into descriptor */
- TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
- TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
- TXD->seqnum_seed = htole32(0);
- TXD->mss_l4len_idx = htole32(0);
-
- tx_buffer->m_head = NULL;
- tx_buffer->eop_index = -1;
-
- /* We've consumed the first desc, adjust counters */
- if (++ctxd == adapter->num_tx_desc)
- ctxd = 0;
- txr->next_avail_desc = ctxd;
- --txr->tx_avail;
-
- return (offload);
-}
-
-/**********************************************************************
- *
- * Setup work for hardware segmentation offload (TSO) on
- * adapters using advanced tx descriptors
- *
- **********************************************************************/
-static boolean_t
-ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
-{
- struct adapter *adapter = txr->adapter;
- struct ixgbe_adv_tx_context_desc *TXD;
- struct ixgbe_tx_buf *tx_buffer;
- u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
- u32 mss_l4len_idx = 0;
- u16 vtag = 0;
- int ctxd, ehdrlen, hdrlen, ip_hlen, tcp_hlen;
- struct ether_vlan_header *eh;
- struct ip *ip;
- struct tcphdr *th;
-
-
- /*
- * Determine where frame payload starts.
- * Jump over vlan headers if already present
- */
- eh = mtod(mp, struct ether_vlan_header *);
- if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
- ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
- else
- ehdrlen = ETHER_HDR_LEN;
-
- /* Ensure we have at least the IP+TCP header in the first mbuf. */
- if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
- return FALSE;
-
- ctxd = txr->next_avail_desc;
- tx_buffer = &txr->tx_buffers[ctxd];
- TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
-
- ip = (struct ip *)(mp->m_data + ehdrlen);
- if (ip->ip_p != IPPROTO_TCP)
- return FALSE; /* 0 */
- ip->ip_sum = 0;
- ip_hlen = ip->ip_hl << 2;
- th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
- th->th_sum = in_pseudo(ip->ip_src.s_addr,
- ip->ip_dst.s_addr, htons(IPPROTO_TCP));
- tcp_hlen = th->th_off << 2;
- hdrlen = ehdrlen + ip_hlen + tcp_hlen;
-
- /* This is used in the transmit desc in encap */
- *paylen = mp->m_pkthdr.len - hdrlen;
-
- /* VLAN MACLEN IPLEN */
- if (mp->m_flags & M_VLANTAG) {
- vtag = htole16(mp->m_pkthdr.ether_vtag);
- vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
- }
-
- vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
- vlan_macip_lens |= ip_hlen;
- TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
-
- /* ADV DTYPE TUCMD */
- type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
- TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
-
-
- /* MSS L4LEN IDX */
- mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
- mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
- TXD->mss_l4len_idx = htole32(mss_l4len_idx);
-
- TXD->seqnum_seed = htole32(0);
- tx_buffer->m_head = NULL;
- tx_buffer->eop_index = -1;
-
- if (++ctxd == adapter->num_tx_desc)
- ctxd = 0;
-
- txr->tx_avail--;
- txr->next_avail_desc = ctxd;
- return TRUE;
-}
-
-#ifdef IXGBE_FDIR
-/*
-** This routine parses packet headers so that Flow
-** Director can make a hashed filter table entry
-** allowing traffic flows to be identified and kept
-** on the same cpu. This would be a performance
-** hit, but we only do it at IXGBE_FDIR_RATE of
-** packets.
-*/
-static void
-ixgbe_atr(struct tx_ring *txr, struct mbuf *mp)
-{
- struct adapter *adapter = txr->adapter;
- struct ix_queue *que;
- struct ip *ip;
- struct tcphdr *th;
- struct udphdr *uh;
- struct ether_vlan_header *eh;
- union ixgbe_atr_hash_dword input = {.dword = 0};
- union ixgbe_atr_hash_dword common = {.dword = 0};
- int ehdrlen, ip_hlen;
- u16 etype;
-
- eh = mtod(mp, struct ether_vlan_header *);
- if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
- ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
- etype = eh->evl_proto;
- } else {
- ehdrlen = ETHER_HDR_LEN;
- etype = eh->evl_encap_proto;
- }
-
- /* Only handling IPv4 */
- if (etype != htons(ETHERTYPE_IP))
- return;
-
- ip = (struct ip *)(mp->m_data + ehdrlen);
- ip_hlen = ip->ip_hl << 2;
-
- /* check if we're UDP or TCP */
- switch (ip->ip_p) {
- case IPPROTO_TCP:
- th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
- /* src and dst are inverted */
- common.port.dst ^= th->th_sport;
- common.port.src ^= th->th_dport;
- input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_TCPV4;
- break;
- case IPPROTO_UDP:
- uh = (struct udphdr *)((caddr_t)ip + ip_hlen);
- /* src and dst are inverted */
- common.port.dst ^= uh->uh_sport;
- common.port.src ^= uh->uh_dport;
- input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_UDPV4;
- break;
- default:
- return;
- }
-
- input.formatted.vlan_id = htobe16(mp->m_pkthdr.ether_vtag);
- if (mp->m_pkthdr.ether_vtag)
- common.flex_bytes ^= htons(ETHERTYPE_VLAN);
- else
- common.flex_bytes ^= etype;
- common.ip ^= ip->ip_src.s_addr ^ ip->ip_dst.s_addr;
-
- que = &adapter->queues[txr->me];
- /*
- ** This assumes the Rx queue and Tx
- ** queue are bound to the same CPU
- */
- ixgbe_fdir_add_signature_filter_82599(&adapter->hw,
- input, common, que->msix);
-}
-#endif /* IXGBE_FDIR */
-
-/**********************************************************************
- *
- * Examine each tx_buffer in the used queue. If the hardware is done
- * processing the packet then free associated resources. The
- * tx_buffer is put back on the free queue.
- *
- **********************************************************************/
-static boolean_t
-ixgbe_txeof(struct tx_ring *txr)
-{
- struct adapter *adapter = txr->adapter;
- struct ifnet *ifp = adapter->ifp;
- u32 first, last, done, processed;
- struct ixgbe_tx_buf *tx_buffer;
- struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
-
- mtx_assert(&txr->tx_mtx, MA_OWNED);
-
- if (txr->tx_avail == adapter->num_tx_desc) {
- txr->queue_status = IXGBE_QUEUE_IDLE;
- return FALSE;
- }
-
- processed = 0;
- first = txr->next_to_clean;
- tx_buffer = &txr->tx_buffers[first];
- /* For cleanup we just use legacy struct */
- tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
- last = tx_buffer->eop_index;
- if (last == -1)
- return FALSE;
- eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
-
- /*
- ** Get the index of the first descriptor
- ** BEYOND the EOP and call that 'done'.
- ** I do this so the comparison in the
- ** inner while loop below can be simple
- */
- if (++last == adapter->num_tx_desc) last = 0;
- done = last;
-
- bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
- BUS_DMASYNC_POSTREAD);
- /*
- ** Only the EOP descriptor of a packet now has the DD
- ** bit set, this is what we look for...
- */
- while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
- /* We clean the range of the packet */
- while (first != done) {
- tx_desc->upper.data = 0;
- tx_desc->lower.data = 0;
- tx_desc->buffer_addr = 0;
- ++txr->tx_avail;
- ++processed;
-
- if (tx_buffer->m_head) {
- txr->bytes +=
- tx_buffer->m_head->m_pkthdr.len;
- bus_dmamap_sync(txr->txtag,
- tx_buffer->map,
- BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(txr->txtag,
- tx_buffer->map);
- m_freem(tx_buffer->m_head);
- tx_buffer->m_head = NULL;
- tx_buffer->map = NULL;
- }
- tx_buffer->eop_index = -1;
- txr->watchdog_time = ticks;
-
- if (++first == adapter->num_tx_desc)
- first = 0;
-
- tx_buffer = &txr->tx_buffers[first];
- tx_desc =
- (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
- }
- ++txr->packets;
- ++ifp->if_opackets;
- /* See if there is more work now */
- last = tx_buffer->eop_index;
- if (last != -1) {
- eop_desc =
- (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
- /* Get next done point */
- if (++last == adapter->num_tx_desc) last = 0;
- done = last;
- } else
- break;
- }
- bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
-
- txr->next_to_clean = first;
-
- /*
- ** Watchdog calculation, we know there's
- ** work outstanding or the first return
- ** would have been taken, so none processed
- ** for too long indicates a hang.
- */
- if ((!processed) && ((ticks - txr->watchdog_time) > IXGBE_WATCHDOG))
- txr->queue_status = IXGBE_QUEUE_HUNG;
-
- /*
- * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
- * it is OK to send packets. If there are no pending descriptors,
- * clear the timeout. Otherwise, if some descriptors have been freed,
- * restart the timeout.
- */
- if (txr->tx_avail > IXGBE_TX_CLEANUP_THRESHOLD) {
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- if (txr->tx_avail == adapter->num_tx_desc) {
- txr->queue_status = IXGBE_QUEUE_IDLE;
- return FALSE;
- }
- }
-
- return TRUE;
-}
-
-/*********************************************************************
- *
- * Refresh mbuf buffers for RX descriptor rings
- * - now keeps its own state so discards due to resource
- * exhaustion are unnecessary, if an mbuf cannot be obtained
- * it just returns, keeping its placeholder, thus it can simply
- * be recalled to try again.
- *
- **********************************************************************/
-static void
-ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit)
-{
- struct adapter *adapter = rxr->adapter;
- bus_dma_segment_t hseg[1];
- bus_dma_segment_t pseg[1];
- struct ixgbe_rx_buf *rxbuf;
- struct mbuf *mh, *mp;
- int i, j, nsegs, error;
- bool refreshed = FALSE;
-
- i = j = rxr->next_to_refresh;
- /* Control the loop with one beyond */
- if (++j == adapter->num_rx_desc)
- j = 0;
-
- while (j != limit) {
- rxbuf = &rxr->rx_buffers[i];
- if (rxr->hdr_split == FALSE)
- goto no_split;
-
- if (rxbuf->m_head == NULL) {
- mh = m_gethdr(M_DONTWAIT, MT_DATA);
- if (mh == NULL)
- goto update;
- } else
- mh = rxbuf->m_head;
-
- mh->m_pkthdr.len = mh->m_len = MHLEN;
- mh->m_len = MHLEN;
- mh->m_flags |= M_PKTHDR;
- /* Get the memory mapping */
- error = bus_dmamap_load_mbuf_sg(rxr->htag,
- rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
- if (error != 0) {
- printf("Refresh mbufs: hdr dmamap load"
- " failure - %d\n", error);
- m_free(mh);
- rxbuf->m_head = NULL;
- goto update;
- }
- rxbuf->m_head = mh;
- bus_dmamap_sync(rxr->htag, rxbuf->hmap,
- BUS_DMASYNC_PREREAD);
- rxr->rx_base[i].read.hdr_addr =
- htole64(hseg[0].ds_addr);
-
-no_split:
- if (rxbuf->m_pack == NULL) {
- mp = m_getjcl(M_DONTWAIT, MT_DATA,
- M_PKTHDR, adapter->rx_mbuf_sz);
- if (mp == NULL)
- goto update;
- } else
- mp = rxbuf->m_pack;
-
- mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
- /* Get the memory mapping */
- error = bus_dmamap_load_mbuf_sg(rxr->ptag,
- rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
- if (error != 0) {
- printf("Refresh mbufs: payload dmamap load"
- " failure - %d\n", error);
- m_free(mp);
- rxbuf->m_pack = NULL;
- goto update;
- }
- rxbuf->m_pack = mp;
- bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
- BUS_DMASYNC_PREREAD);
- rxr->rx_base[i].read.pkt_addr =
- htole64(pseg[0].ds_addr);
-
- refreshed = TRUE;
- /* Next is precalculated */
- i = j;
- rxr->next_to_refresh = i;
- if (++j == adapter->num_rx_desc)
- j = 0;
- }
-update:
- if (refreshed) /* Update hardware tail index */
- IXGBE_WRITE_REG(&adapter->hw,
- IXGBE_RDT(rxr->me), rxr->next_to_refresh);
- return;
-}
-
-/*********************************************************************
- *
- * Allocate memory for rx_buffer structures. Since we use one
- * rx_buffer per received packet, the maximum number of rx_buffer's
- * that we'll need is equal to the number of receive descriptors
- * that we've allocated.
- *
- **********************************************************************/
-static int
-ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
-{
- struct adapter *adapter = rxr->adapter;
- device_t dev = adapter->dev;
- struct ixgbe_rx_buf *rxbuf;
- int i, bsize, error;
-
- bsize = sizeof(struct ixgbe_rx_buf) * adapter->num_rx_desc;
- if (!(rxr->rx_buffers =
- (struct ixgbe_rx_buf *) malloc(bsize,
- M_DEVBUF, M_NOWAIT | M_ZERO))) {
- device_printf(dev, "Unable to allocate rx_buffer memory\n");
- error = ENOMEM;
- goto fail;
- }
-
- if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
- 1, 0, /* alignment, bounds */
- BUS_SPACE_MAXADDR, /* lowaddr */
- BUS_SPACE_MAXADDR, /* highaddr */
- NULL, NULL, /* filter, filterarg */
- MSIZE, /* maxsize */
- 1, /* nsegments */
- MSIZE, /* maxsegsize */
- 0, /* flags */
- NULL, /* lockfunc */
- NULL, /* lockfuncarg */
- &rxr->htag))) {
- device_printf(dev, "Unable to create RX DMA tag\n");
- goto fail;
- }
-
- if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
- 1, 0, /* alignment, bounds */
- BUS_SPACE_MAXADDR, /* lowaddr */
- BUS_SPACE_MAXADDR, /* highaddr */
- NULL, NULL, /* filter, filterarg */
- MJUM16BYTES, /* maxsize */
- 1, /* nsegments */
- MJUM16BYTES, /* maxsegsize */
- 0, /* flags */
- NULL, /* lockfunc */
- NULL, /* lockfuncarg */
- &rxr->ptag))) {
- device_printf(dev, "Unable to create RX DMA tag\n");
- goto fail;
- }
-
- for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
- rxbuf = &rxr->rx_buffers[i];
- error = bus_dmamap_create(rxr->htag,
- BUS_DMA_NOWAIT, &rxbuf->hmap);
- if (error) {
- device_printf(dev, "Unable to create RX head map\n");
- goto fail;
- }
- error = bus_dmamap_create(rxr->ptag,
- BUS_DMA_NOWAIT, &rxbuf->pmap);
- if (error) {
- device_printf(dev, "Unable to create RX pkt map\n");
- goto fail;
- }
- }
-
- return (0);
-
-fail:
- /* Frees all, but can handle partial completion */
- ixgbe_free_receive_structures(adapter);
- return (error);
-}
-
-/*
-** Used to detect a descriptor that has
-** been merged by Hardware RSC.
-*/
-static inline u32
-ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
-{
- return (le32toh(rx->wb.lower.lo_dword.data) &
- IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
-}
-
-/*********************************************************************
- *
- * Initialize Hardware RSC (LRO) feature on 82599
- * for an RX ring, this is toggled by the LRO capability
- * even though it is transparent to the stack.
- *
- **********************************************************************/
-static void
-ixgbe_setup_hw_rsc(struct rx_ring *rxr)
-{
- struct adapter *adapter = rxr->adapter;
- struct ixgbe_hw *hw = &adapter->hw;
- u32 rscctrl, rdrxctl;
-
- rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
- rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
- rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
- rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
- IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
-
- rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
- rscctrl |= IXGBE_RSCCTL_RSCEN;
-
- /*
- ** Limit the total number of descriptors that
- ** can be combined, so it does not exceed 64K
- */
- if (adapter->rx_mbuf_sz == MCLBYTES)
- rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
- else if (adapter->rx_mbuf_sz == MJUMPAGESIZE)
- rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
- else if (adapter->rx_mbuf_sz == MJUM9BYTES)
- rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
- else /* Using 16K cluster */
- rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
-
- IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl);
-
- /* Enable TCP header recognition */
- IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0),
- (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) |
- IXGBE_PSRTYPE_TCPHDR));
-
- /* Disable RSC for ACK packets */
- IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
- (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
-
- rxr->hw_rsc = TRUE;
-}
-
-
-static void
-ixgbe_free_receive_ring(struct rx_ring *rxr)
-{
- struct adapter *adapter;
- struct ixgbe_rx_buf *rxbuf;
- int i;
-
- adapter = rxr->adapter;
- for (i = 0; i < adapter->num_rx_desc; i++) {
- rxbuf = &rxr->rx_buffers[i];
- if (rxbuf->m_head != NULL) {
- bus_dmamap_sync(rxr->htag, rxbuf->hmap,
- BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(rxr->htag, rxbuf->hmap);
- rxbuf->m_head->m_flags |= M_PKTHDR;
- m_freem(rxbuf->m_head);
- }
- if (rxbuf->m_pack != NULL) {
- bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
- BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
- rxbuf->m_pack->m_flags |= M_PKTHDR;
- m_freem(rxbuf->m_pack);
- }
- rxbuf->m_head = NULL;
- rxbuf->m_pack = NULL;
- }
-}
-
-
-/*********************************************************************
- *
- * Initialize a receive ring and its buffers.
- *
- **********************************************************************/
-static int
-ixgbe_setup_receive_ring(struct rx_ring *rxr)
-{
- struct adapter *adapter;
- struct ifnet *ifp;
- device_t dev;
- struct ixgbe_rx_buf *rxbuf;
- bus_dma_segment_t pseg[1], hseg[1];
- struct lro_ctrl *lro = &rxr->lro;
- int rsize, nsegs, error = 0;
-
- adapter = rxr->adapter;
- ifp = adapter->ifp;
- dev = adapter->dev;
-
- /* Clear the ring contents */
- IXGBE_RX_LOCK(rxr);
- rsize = roundup2(adapter->num_rx_desc *
- sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
- bzero((void *)rxr->rx_base, rsize);
-
- /* Free current RX buffer structs and their mbufs */
- ixgbe_free_receive_ring(rxr);
-
- /* Configure header split? */
- if (ixgbe_header_split)
- rxr->hdr_split = TRUE;
-
- /* Now replenish the mbufs */
- for (int j = 0; j != adapter->num_rx_desc; ++j) {
- struct mbuf *mh, *mp;
-
- rxbuf = &rxr->rx_buffers[j];
- /*
- ** Don't allocate mbufs if not
- ** doing header split, its wasteful
- */
- if (rxr->hdr_split == FALSE)
- goto skip_head;
-
- /* First the header */
- rxbuf->m_head = m_gethdr(M_NOWAIT, MT_DATA);
- if (rxbuf->m_head == NULL) {
- error = ENOBUFS;
- goto fail;
- }
- m_adj(rxbuf->m_head, ETHER_ALIGN);
- mh = rxbuf->m_head;
- mh->m_len = mh->m_pkthdr.len = MHLEN;
- mh->m_flags |= M_PKTHDR;
- /* Get the memory mapping */
- error = bus_dmamap_load_mbuf_sg(rxr->htag,
- rxbuf->hmap, rxbuf->m_head, hseg,
- &nsegs, BUS_DMA_NOWAIT);
- if (error != 0) /* Nothing elegant to do here */
- goto fail;
- bus_dmamap_sync(rxr->htag,
- rxbuf->hmap, BUS_DMASYNC_PREREAD);
- /* Update descriptor */
- rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
-
-skip_head:
- /* Now the payload cluster */
- rxbuf->m_pack = m_getjcl(M_NOWAIT, MT_DATA,
- M_PKTHDR, adapter->rx_mbuf_sz);
- if (rxbuf->m_pack == NULL) {
- error = ENOBUFS;
- goto fail;
- }
- mp = rxbuf->m_pack;
- mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
- /* Get the memory mapping */
- error = bus_dmamap_load_mbuf_sg(rxr->ptag,
- rxbuf->pmap, mp, pseg,
- &nsegs, BUS_DMA_NOWAIT);
- if (error != 0)
- goto fail;
- bus_dmamap_sync(rxr->ptag,
- rxbuf->pmap, BUS_DMASYNC_PREREAD);
- /* Update descriptor */
- rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
- }
-
-
- /* Setup our descriptor indices */
- rxr->next_to_check = 0;
- rxr->next_to_refresh = 0;
- rxr->lro_enabled = FALSE;
- rxr->rx_split_packets = 0;
- rxr->rx_bytes = 0;
- rxr->discard = FALSE;
-
- bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
-
- /*
- ** Now set up the LRO interface:
- ** 82598 uses software LRO, the
- ** 82599 and X540 use a hardware assist.
- */
- if ((adapter->hw.mac.type != ixgbe_mac_82598EB) &&
- (ifp->if_capenable & IFCAP_RXCSUM) &&
- (ifp->if_capenable & IFCAP_LRO))
- ixgbe_setup_hw_rsc(rxr);
- else if (ifp->if_capenable & IFCAP_LRO) {
- int err = tcp_lro_init(lro);
- if (err) {
- device_printf(dev, "LRO Initialization failed!\n");
- goto fail;
- }
- INIT_DEBUGOUT("RX Soft LRO Initialized\n");
- rxr->lro_enabled = TRUE;
- lro->ifp = adapter->ifp;
- }
-
- IXGBE_RX_UNLOCK(rxr);
- return (0);
-
-fail:
- ixgbe_free_receive_ring(rxr);
- IXGBE_RX_UNLOCK(rxr);
- return (error);
-}
-
-/*********************************************************************
- *
- * Initialize all receive rings.
- *
- **********************************************************************/
-static int
-ixgbe_setup_receive_structures(struct adapter *adapter)
-{
- struct rx_ring *rxr = adapter->rx_rings;
- int j;
-
- for (j = 0; j < adapter->num_queues; j++, rxr++)
- if (ixgbe_setup_receive_ring(rxr))
- goto fail;
-
- return (0);
-fail:
- /*
- * Free RX buffers allocated so far, we will only handle
- * the rings that completed, the failing case will have
- * cleaned up for itself. 'j' failed, so its the terminus.
- */
- for (int i = 0; i < j; ++i) {
- rxr = &adapter->rx_rings[i];
- ixgbe_free_receive_ring(rxr);
- }
-
- return (ENOBUFS);
-}
-
-/*********************************************************************
- *
- * Setup receive registers and features.
- *
- **********************************************************************/
-#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
-
-static void
-ixgbe_initialize_receive_units(struct adapter *adapter)
-{
- struct rx_ring *rxr = adapter->rx_rings;
- struct ixgbe_hw *hw = &adapter->hw;
- struct ifnet *ifp = adapter->ifp;
- u32 bufsz, rxctrl, fctrl, srrctl, rxcsum;
- u32 reta, mrqc = 0, hlreg, random[10];
-
-
- /*
- * Make sure receives are disabled while
- * setting up the descriptor ring
- */
- rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
- IXGBE_WRITE_REG(hw, IXGBE_RXCTRL,
- rxctrl & ~IXGBE_RXCTRL_RXEN);
-
- /* Enable broadcasts */
- fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
- fctrl |= IXGBE_FCTRL_BAM;
- fctrl |= IXGBE_FCTRL_DPF;
- fctrl |= IXGBE_FCTRL_PMCF;
- IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
-
- /* Set for Jumbo Frames? */
- hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
- if (ifp->if_mtu > ETHERMTU)
- hlreg |= IXGBE_HLREG0_JUMBOEN;
- else
- hlreg &= ~IXGBE_HLREG0_JUMBOEN;
- IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
-
- bufsz = adapter->rx_mbuf_sz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
-
- for (int i = 0; i < adapter->num_queues; i++, rxr++) {
- u64 rdba = rxr->rxdma.dma_paddr;
-
- /* Setup the Base and Length of the Rx Descriptor Ring */
- IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
- (rdba & 0x00000000ffffffffULL));
- IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
- IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
- adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
-
- /* Set up the SRRCTL register */
- srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
- srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
- srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
- srrctl |= bufsz;
- if (rxr->hdr_split) {
- /* Use a standard mbuf for the header */
- srrctl |= ((IXGBE_RX_HDR <<
- IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
- & IXGBE_SRRCTL_BSIZEHDR_MASK);
- srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
- } else
- srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
- IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
-
- /* Setup the HW Rx Head and Tail Descriptor Pointers */
- IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
- IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
- }
-
- if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
- u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
- IXGBE_PSRTYPE_UDPHDR |
- IXGBE_PSRTYPE_IPV4HDR |
- IXGBE_PSRTYPE_IPV6HDR;
- IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
- }
-
- rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
-
- /* Setup RSS */
- if (adapter->num_queues > 1) {
- int i, j;
- reta = 0;
-
- /* set up random bits */
- arc4rand(&random, sizeof(random), 0);
-
- /* Set up the redirection table */
- for (i = 0, j = 0; i < 128; i++, j++) {
- if (j == adapter->num_queues) j = 0;
- reta = (reta << 8) | (j * 0x11);
- if ((i & 3) == 3)
- IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
- }
-
- /* Now fill our hash function seeds */
- for (int i = 0; i < 10; i++)
- IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random[i]);
-
- /* Perform hash on these packet types */
- mrqc = IXGBE_MRQC_RSSEN
- | IXGBE_MRQC_RSS_FIELD_IPV4
- | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
- | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
- | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
- | IXGBE_MRQC_RSS_FIELD_IPV6_EX
- | IXGBE_MRQC_RSS_FIELD_IPV6
- | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
- | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
- | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
- IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
-
- /* RSS and RX IPP Checksum are mutually exclusive */
- rxcsum |= IXGBE_RXCSUM_PCSD;
- }
-
- if (ifp->if_capenable & IFCAP_RXCSUM)
- rxcsum |= IXGBE_RXCSUM_PCSD;
-
- if (!(rxcsum & IXGBE_RXCSUM_PCSD))
- rxcsum |= IXGBE_RXCSUM_IPPCSE;
-
- IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
-
- return;
-}
-
-/*********************************************************************
- *
- * Free all receive rings.
- *
- **********************************************************************/
-static void
-ixgbe_free_receive_structures(struct adapter *adapter)
-{
- struct rx_ring *rxr = adapter->rx_rings;
-
- for (int i = 0; i < adapter->num_queues; i++, rxr++) {
- struct lro_ctrl *lro = &rxr->lro;
- ixgbe_free_receive_buffers(rxr);
- /* Free LRO memory */
- tcp_lro_free(lro);
- /* Free the ring memory as well */
- ixgbe_dma_free(adapter, &rxr->rxdma);
- }
-
- free(adapter->rx_rings, M_DEVBUF);
-}
-
-
-/*********************************************************************
- *
- * Free receive ring data structures
- *
- **********************************************************************/
-static void
-ixgbe_free_receive_buffers(struct rx_ring *rxr)
-{
- struct adapter *adapter = rxr->adapter;
- struct ixgbe_rx_buf *rxbuf;
-
- INIT_DEBUGOUT("free_receive_structures: begin");
-
- /* Cleanup any existing buffers */
- if (rxr->rx_buffers != NULL) {
- for (int i = 0; i < adapter->num_rx_desc; i++) {
- rxbuf = &rxr->rx_buffers[i];
- if (rxbuf->m_head != NULL) {
- bus_dmamap_sync(rxr->htag, rxbuf->hmap,
- BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(rxr->htag, rxbuf->hmap);
- rxbuf->m_head->m_flags |= M_PKTHDR;
- m_freem(rxbuf->m_head);
- }
- if (rxbuf->m_pack != NULL) {
- bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
- BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
- rxbuf->m_pack->m_flags |= M_PKTHDR;
- m_freem(rxbuf->m_pack);
- }
- rxbuf->m_head = NULL;
- rxbuf->m_pack = NULL;
- if (rxbuf->hmap != NULL) {
- bus_dmamap_destroy(rxr->htag, rxbuf->hmap);
- rxbuf->hmap = NULL;
- }
- if (rxbuf->pmap != NULL) {
- bus_dmamap_destroy(rxr->ptag, rxbuf->pmap);
- rxbuf->pmap = NULL;
- }
- }
- if (rxr->rx_buffers != NULL) {
- free(rxr->rx_buffers, M_DEVBUF);
- rxr->rx_buffers = NULL;
- }
- }
-
- if (rxr->htag != NULL) {
- bus_dma_tag_destroy(rxr->htag);
- rxr->htag = NULL;
- }
- if (rxr->ptag != NULL) {
- bus_dma_tag_destroy(rxr->ptag);
- rxr->ptag = NULL;
- }
-
- return;
-}
-
-static __inline void
-ixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
-{
-
- /*
- * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
- * should be computed by hardware. Also it should not have VLAN tag in
- * ethernet header.
- */
- if (rxr->lro_enabled &&
- (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
- (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
- (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
- (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) &&
- (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
- (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
- /*
- * Send to the stack if:
- ** - LRO not enabled, or
- ** - no LRO resources, or
- ** - lro enqueue fails
- */
- if (rxr->lro.lro_cnt != 0)
- if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
- return;
- }
- IXGBE_RX_UNLOCK(rxr);
- (*ifp->if_input)(ifp, m);
- IXGBE_RX_LOCK(rxr);
-}
-
-static __inline void
-ixgbe_rx_discard(struct rx_ring *rxr, int i)
-{
- struct ixgbe_rx_buf *rbuf;
-
- rbuf = &rxr->rx_buffers[i];
-
- if (rbuf->fmp != NULL) {/* Partial chain ? */
- rbuf->fmp->m_flags |= M_PKTHDR;
- m_freem(rbuf->fmp);
- rbuf->fmp = NULL;
- }
-
- /*
- ** With advanced descriptors the writeback
- ** clobbers the buffer addrs, so its easier
- ** to just free the existing mbufs and take
- ** the normal refresh path to get new buffers
- ** and mapping.
- */
- if (rbuf->m_head) {
- m_free(rbuf->m_head);
- rbuf->m_head = NULL;
- }
-
- if (rbuf->m_pack) {
- m_free(rbuf->m_pack);
- rbuf->m_pack = NULL;
- }
-
- return;
-}
-
-
-/*********************************************************************
- *
- * This routine executes in interrupt context. It replenishes
- * the mbufs in the descriptor and sends data which has been
- * dma'ed into host memory to upper layer.
- *
- * We loop at most count times if count is > 0, or until done if
- * count < 0.
- *
- * Return TRUE for more work, FALSE for all clean.
- *********************************************************************/
-static bool
-ixgbe_rxeof(struct ix_queue *que, int count)
-{
- struct adapter *adapter = que->adapter;
- struct rx_ring *rxr = que->rxr;
- struct ifnet *ifp = adapter->ifp;
- struct lro_ctrl *lro = &rxr->lro;
- struct lro_entry *queued;
- int i, nextp, processed = 0;
- u32 staterr = 0;
- union ixgbe_adv_rx_desc *cur;
- struct ixgbe_rx_buf *rbuf, *nbuf;
-
- IXGBE_RX_LOCK(rxr);
-
- for (i = rxr->next_to_check; count != 0;) {
- struct mbuf *sendmp, *mh, *mp;
- u32 rsc, ptype;
- u16 hlen, plen, hdr, vtag;
- bool eop;
-
- /* Sync the ring. */
- bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
- BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
-
- cur = &rxr->rx_base[i];
- staterr = le32toh(cur->wb.upper.status_error);
-
- if ((staterr & IXGBE_RXD_STAT_DD) == 0)
- break;
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
- break;
-
- count--;
- sendmp = NULL;
- nbuf = NULL;
- rsc = 0;
- cur->wb.upper.status_error = 0;
- rbuf = &rxr->rx_buffers[i];
- mh = rbuf->m_head;
- mp = rbuf->m_pack;
-
- plen = le16toh(cur->wb.upper.length);
- ptype = le32toh(cur->wb.lower.lo_dword.data) &
- IXGBE_RXDADV_PKTTYPE_MASK;
- hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
- vtag = le16toh(cur->wb.upper.vlan);
- eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
-
- /* Make sure bad packets are discarded */
- if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
- (rxr->discard)) {
- ifp->if_ierrors++;
- rxr->rx_discarded++;
- if (eop)
- rxr->discard = FALSE;
- else
- rxr->discard = TRUE;
- ixgbe_rx_discard(rxr, i);
- goto next_desc;
- }
-
- /*
- ** On 82599 which supports a hardware
- ** LRO (called HW RSC), packets need
- ** not be fragmented across sequential
- ** descriptors, rather the next descriptor
- ** is indicated in bits of the descriptor.
- ** This also means that we might proceses
- ** more than one packet at a time, something
- ** that has never been true before, it
- ** required eliminating global chain pointers
- ** in favor of what we are doing here. -jfv
- */
- if (!eop) {
- /*
- ** Figure out the next descriptor
- ** of this frame.
- */
- if (rxr->hw_rsc == TRUE) {
- rsc = ixgbe_rsc_count(cur);
- rxr->rsc_num += (rsc - 1);
- }
- if (rsc) { /* Get hardware index */
- nextp = ((staterr &
- IXGBE_RXDADV_NEXTP_MASK) >>
- IXGBE_RXDADV_NEXTP_SHIFT);
- } else { /* Just sequential */
- nextp = i + 1;
- if (nextp == adapter->num_rx_desc)
- nextp = 0;
- }
- nbuf = &rxr->rx_buffers[nextp];
- prefetch(nbuf);
- }
- /*
- ** The header mbuf is ONLY used when header
- ** split is enabled, otherwise we get normal
- ** behavior, ie, both header and payload
- ** are DMA'd into the payload buffer.
- **
- ** Rather than using the fmp/lmp global pointers
- ** we now keep the head of a packet chain in the
- ** buffer struct and pass this along from one
- ** descriptor to the next, until we get EOP.
- */
- if (rxr->hdr_split && (rbuf->fmp == NULL)) {
- /* This must be an initial descriptor */
- hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
- IXGBE_RXDADV_HDRBUFLEN_SHIFT;
- if (hlen > IXGBE_RX_HDR)
- hlen = IXGBE_RX_HDR;
- mh->m_len = hlen;
- mh->m_flags |= M_PKTHDR;
- mh->m_next = NULL;
- mh->m_pkthdr.len = mh->m_len;
- /* Null buf pointer so it is refreshed */
- rbuf->m_head = NULL;
- /*
- ** Check the payload length, this
- ** could be zero if its a small
- ** packet.
- */
- if (plen > 0) {
- mp->m_len = plen;
- mp->m_next = NULL;
- mp->m_flags &= ~M_PKTHDR;
- mh->m_next = mp;
- mh->m_pkthdr.len += mp->m_len;
- /* Null buf pointer so it is refreshed */
- rbuf->m_pack = NULL;
- rxr->rx_split_packets++;
- }
- /*
- ** Now create the forward
- ** chain so when complete
- ** we wont have to.
- */
- if (eop == 0) {
- /* stash the chain head */
- nbuf->fmp = mh;
- /* Make forward chain */
- if (plen)
- mp->m_next = nbuf->m_pack;
- else
- mh->m_next = nbuf->m_pack;
- } else {
- /* Singlet, prepare to send */
- sendmp = mh;
- if ((adapter->num_vlans) &&
- (staterr & IXGBE_RXD_STAT_VP)) {
- sendmp->m_pkthdr.ether_vtag = vtag;
- sendmp->m_flags |= M_VLANTAG;
- }
- }
- } else {
- /*
- ** Either no header split, or a
- ** secondary piece of a fragmented
- ** split packet.
- */
- mp->m_len = plen;
- /*
- ** See if there is a stored head
- ** that determines what we are
- */
- sendmp = rbuf->fmp;
- rbuf->m_pack = rbuf->fmp = NULL;
-
- if (sendmp != NULL) /* secondary frag */
- sendmp->m_pkthdr.len += mp->m_len;
- else {
- /* first desc of a non-ps chain */
- sendmp = mp;
- sendmp->m_flags |= M_PKTHDR;
- sendmp->m_pkthdr.len = mp->m_len;
- if (staterr & IXGBE_RXD_STAT_VP) {
- sendmp->m_pkthdr.ether_vtag = vtag;
- sendmp->m_flags |= M_VLANTAG;
- }
- }
- /* Pass the head pointer on */
- if (eop == 0) {
- nbuf->fmp = sendmp;
- sendmp = NULL;
- mp->m_next = nbuf->m_pack;
- }
- }
- ++processed;
- /* Sending this frame? */
- if (eop) {
- sendmp->m_pkthdr.rcvif = ifp;
- ifp->if_ipackets++;
- rxr->rx_packets++;
- /* capture data for AIM */
- rxr->bytes += sendmp->m_pkthdr.len;
- rxr->rx_bytes += sendmp->m_pkthdr.len;
- if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
- ixgbe_rx_checksum(staterr, sendmp, ptype);
-#if __FreeBSD_version >= 800000
- sendmp->m_pkthdr.flowid = que->msix;
- sendmp->m_flags |= M_FLOWID;
-#endif
- }
-next_desc:
- bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
-
- /* Advance our pointers to the next descriptor. */
- if (++i == adapter->num_rx_desc)
- i = 0;
-
- /* Now send to the stack or do LRO */
- if (sendmp != NULL) {
- rxr->next_to_check = i;
- ixgbe_rx_input(rxr, ifp, sendmp, ptype);
- i = rxr->next_to_check;
- }
-
- /* Every 8 descriptors we go to refresh mbufs */
- if (processed == 8) {
- ixgbe_refresh_mbufs(rxr, i);
- processed = 0;
- }
- }
-
- /* Refresh any remaining buf structs */
- if (ixgbe_rx_unrefreshed(rxr))
- ixgbe_refresh_mbufs(rxr, i);
-
- rxr->next_to_check = i;
-
- /*
- * Flush any outstanding LRO work
- */
- while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
- SLIST_REMOVE_HEAD(&lro->lro_active, next);
- tcp_lro_flush(lro, queued);
- }
-
- IXGBE_RX_UNLOCK(rxr);
-
- /*
- ** We still have cleaning to do?
- ** Schedule another interrupt if so.
- */
- if ((staterr & IXGBE_RXD_STAT_DD) != 0) {
- ixgbe_rearm_queues(adapter, (u64)(1 << que->msix));
- return (TRUE);
- }
-
- return (FALSE);
-}
-
-
-/*********************************************************************
- *
- * Verify that the hardware indicated that the checksum is valid.
- * Inform the stack about the status of checksum so that stack
- * doesn't spend time verifying the checksum.
- *
- *********************************************************************/
-static void
-ixgbe_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype)
-{
- u16 status = (u16) staterr;
- u8 errors = (u8) (staterr >> 24);
- bool sctp = FALSE;
-
- if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
- (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
- sctp = TRUE;
-
- if (status & IXGBE_RXD_STAT_IPCS) {
- if (!(errors & IXGBE_RXD_ERR_IPE)) {
- /* IP Checksum Good */
- mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
- mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
-
- } else
- mp->m_pkthdr.csum_flags = 0;
- }
- if (status & IXGBE_RXD_STAT_L4CS) {
- u16 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
-#if __FreeBSD_version >= 800000
- if (sctp)
- type = CSUM_SCTP_VALID;
-#endif
- if (!(errors & IXGBE_RXD_ERR_TCPE)) {
- mp->m_pkthdr.csum_flags |= type;
- if (!sctp)
- mp->m_pkthdr.csum_data = htons(0xffff);
- }
- }
- return;
-}
-
-
-/*
-** This routine is run via an vlan config EVENT,
-** it enables us to use the HW Filter table since
-** we can get the vlan id. This just creates the
-** entry in the soft version of the VFTA, init will
-** repopulate the real table.
-*/
-static void
-ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
-{
- struct adapter *adapter = ifp->if_softc;
- u16 index, bit;
-
- if (ifp->if_softc != arg) /* Not our event */
- return;
-
- if ((vtag == 0) || (vtag > 4095)) /* Invalid */
- return;
-
- IXGBE_CORE_LOCK(adapter);
- index = (vtag >> 5) & 0x7F;
- bit = vtag & 0x1F;
- adapter->shadow_vfta[index] |= (1 << bit);
- ++adapter->num_vlans;
- ixgbe_init_locked(adapter);
- IXGBE_CORE_UNLOCK(adapter);
-}
-
-/*
-** This routine is run via an vlan
-** unconfig EVENT, remove our entry
-** in the soft vfta.
-*/
-static void
-ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
-{
- struct adapter *adapter = ifp->if_softc;
- u16 index, bit;
-
- if (ifp->if_softc != arg)
- return;
-
- if ((vtag == 0) || (vtag > 4095)) /* Invalid */
- return;
-
- IXGBE_CORE_LOCK(adapter);
- index = (vtag >> 5) & 0x7F;
- bit = vtag & 0x1F;
- adapter->shadow_vfta[index] &= ~(1 << bit);
- --adapter->num_vlans;
- /* Re-init to load the changes */
- ixgbe_init_locked(adapter);
- IXGBE_CORE_UNLOCK(adapter);
-}
-
-static void
-ixgbe_setup_vlan_hw_support(struct adapter *adapter)
-{
- struct ifnet *ifp = adapter->ifp;
- struct ixgbe_hw *hw = &adapter->hw;
- u32 ctrl;
-
-
- /*
- ** We get here thru init_locked, meaning
- ** a soft reset, this has already cleared
- ** the VFTA and other state, so if there
- ** have been no vlan's registered do nothing.
- */
- if (adapter->num_vlans == 0)
- return;
-
- /*
- ** A soft reset zero's out the VFTA, so
- ** we need to repopulate it now.
- */
- for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
- if (adapter->shadow_vfta[i] != 0)
- IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
- adapter->shadow_vfta[i]);
-
- ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- /* Enable the Filter Table if enabled */
- if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
- ctrl &= ~IXGBE_VLNCTRL_CFIEN;
- ctrl |= IXGBE_VLNCTRL_VFE;
- }
- if (hw->mac.type == ixgbe_mac_82598EB)
- ctrl |= IXGBE_VLNCTRL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
-
- /* On 82599 the VLAN enable is per/queue in RXDCTL */
- if (hw->mac.type != ixgbe_mac_82598EB)
- for (int i = 0; i < adapter->num_queues; i++) {
- ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
- ctrl |= IXGBE_RXDCTL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
- }
-}
-
-static void
-ixgbe_enable_intr(struct adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- struct ix_queue *que = adapter->queues;
- u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
-
-
- /* Enable Fan Failure detection */
- if (hw->device_id == IXGBE_DEV_ID_82598AT)
- mask |= IXGBE_EIMS_GPI_SDP1;
- else {
- mask |= IXGBE_EIMS_ECC;
- mask |= IXGBE_EIMS_GPI_SDP0;
- mask |= IXGBE_EIMS_GPI_SDP1;
- mask |= IXGBE_EIMS_GPI_SDP2;
-#ifdef IXGBE_FDIR
- mask |= IXGBE_EIMS_FLOW_DIR;
-#endif
- }
-
- IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
-
- /* With RSS we use auto clear */
- if (adapter->msix_mem) {
- mask = IXGBE_EIMS_ENABLE_MASK;
- /* Don't autoclear Link */
- mask &= ~IXGBE_EIMS_OTHER;
- mask &= ~IXGBE_EIMS_LSC;
- IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
- }
-
- /*
- ** Now enable all queues, this is done separately to
- ** allow for handling the extended (beyond 32) MSIX
- ** vectors that can be used by 82599
- */
- for (int i = 0; i < adapter->num_queues; i++, que++)
- ixgbe_enable_queue(adapter, que->msix);
-
- IXGBE_WRITE_FLUSH(hw);
-
- return;
-}
-
-static void
-ixgbe_disable_intr(struct adapter *adapter)
-{
- if (adapter->msix_mem)
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
- } else {
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
- }
- IXGBE_WRITE_FLUSH(&adapter->hw);
- return;
-}
-
-u16
-ixgbe_read_pci_cfg(struct ixgbe_hw *hw, u32 reg)
-{
- u16 value;
-
- value = pci_read_config(((struct ixgbe_osdep *)hw->back)->dev,
- reg, 2);
-
- return (value);
-}
-
-void
-ixgbe_write_pci_cfg(struct ixgbe_hw *hw, u32 reg, u16 value)
-{
- pci_write_config(((struct ixgbe_osdep *)hw->back)->dev,
- reg, value, 2);
-
- return;
-}
-
-/*
-** Setup the correct IVAR register for a particular MSIX interrupt
-** (yes this is all very magic and confusing :)
-** - entry is the register array entry
-** - vector is the MSIX vector for this queue
-** - type is RX/TX/MISC
-*/
-static void
-ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u32 ivar, index;
-
- vector |= IXGBE_IVAR_ALLOC_VAL;
-
- switch (hw->mac.type) {
-
- case ixgbe_mac_82598EB:
- if (type == -1)
- entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
- else
- entry += (type * 64);
- index = (entry >> 2) & 0x1F;
- ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
- ivar &= ~(0xFF << (8 * (entry & 0x3)));
- ivar |= (vector << (8 * (entry & 0x3)));
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
- break;
-
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- if (type == -1) { /* MISC IVAR */
- index = (entry & 1) * 8;
- ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
- ivar &= ~(0xFF << index);
- ivar |= (vector << index);
- IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
- } else { /* RX/TX IVARS */
- index = (16 * (entry & 1)) + (8 * type);
- ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
- ivar &= ~(0xFF << index);
- ivar |= (vector << index);
- IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
- }
-
- default:
- break;
- }
-}
-
-static void
-ixgbe_configure_ivars(struct adapter *adapter)
-{
- struct ix_queue *que = adapter->queues;
- u32 newitr;
-
- if (ixgbe_max_interrupt_rate > 0)
- newitr = (8000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
- else
- newitr = 0;
-
- for (int i = 0; i < adapter->num_queues; i++, que++) {
- /* First the RX queue entry */
- ixgbe_set_ivar(adapter, i, que->msix, 0);
- /* ... and the TX */
- ixgbe_set_ivar(adapter, i, que->msix, 1);
- /* Set an Initial EITR value */
- IXGBE_WRITE_REG(&adapter->hw,
- IXGBE_EITR(que->msix), newitr);
- }
-
- /* For the Link interrupt */
- ixgbe_set_ivar(adapter, 1, adapter->linkvec, -1);
-}
-
-/*
-** ixgbe_sfp_probe - called in the local timer to
-** determine if a port had optics inserted.
-*/
-static bool ixgbe_sfp_probe(struct adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- device_t dev = adapter->dev;
- bool result = FALSE;
-
- if ((hw->phy.type == ixgbe_phy_nl) &&
- (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
- s32 ret = hw->phy.ops.identify_sfp(hw);
- if (ret)
- goto out;
- ret = hw->phy.ops.reset(hw);
- if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- device_printf(dev,"Unsupported SFP+ module detected!");
- printf(" Reload driver with supported module.\n");
- adapter->sfp_probe = FALSE;
- goto out;
- } else
- device_printf(dev,"SFP+ module detected!\n");
- /* We now have supported optics */
- adapter->sfp_probe = FALSE;
- /* Set the optics type so system reports correctly */
- ixgbe_setup_optics(adapter);
- result = TRUE;
- }
-out:
- return (result);
-}
-
-/*
-** Tasklet handler for MSIX Link interrupts
-** - do outside interrupt since it might sleep
-*/
-static void
-ixgbe_handle_link(void *context, int pending)
-{
- struct adapter *adapter = context;
-
- ixgbe_check_link(&adapter->hw,
- &adapter->link_speed, &adapter->link_up, 0);
- ixgbe_update_link_status(adapter);
-}
-
-/*
-** Tasklet for handling SFP module interrupts
-*/
-static void
-ixgbe_handle_mod(void *context, int pending)
-{
- struct adapter *adapter = context;
- struct ixgbe_hw *hw = &adapter->hw;
- device_t dev = adapter->dev;
- u32 err;
-
- err = hw->phy.ops.identify_sfp(hw);
- if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- device_printf(dev,
- "Unsupported SFP+ module type was detected.\n");
- return;
- }
- err = hw->mac.ops.setup_sfp(hw);
- if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- device_printf(dev,
- "Setup failure - unsupported SFP+ module type.\n");
- return;
- }
- taskqueue_enqueue(adapter->tq, &adapter->msf_task);
- return;
-}
-
-
-/*
-** Tasklet for handling MSF (multispeed fiber) interrupts
-*/
-static void
-ixgbe_handle_msf(void *context, int pending)
-{
- struct adapter *adapter = context;
- struct ixgbe_hw *hw = &adapter->hw;
- u32 autoneg;
- bool negotiate;
-
- autoneg = hw->phy.autoneg_advertised;
- if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
- hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
- if (hw->mac.ops.setup_link)
- hw->mac.ops.setup_link(hw, autoneg, negotiate, TRUE);
- return;
-}
-
-#ifdef IXGBE_FDIR
-/*
-** Tasklet for reinitializing the Flow Director filter table
-*/
-static void
-ixgbe_reinit_fdir(void *context, int pending)
-{
- struct adapter *adapter = context;
- struct ifnet *ifp = adapter->ifp;
-
- if (adapter->fdir_reinit != 1) /* Shouldn't happen */
- return;
- ixgbe_reinit_fdir_tables_82599(&adapter->hw);
- adapter->fdir_reinit = 0;
- /* Restart the interface */
- ifp->if_drv_flags |= IFF_DRV_RUNNING;
- return;
-}
-#endif
-
-/**********************************************************************
- *
- * Update the board statistics counters.
- *
- **********************************************************************/
-static void
-ixgbe_update_stats_counters(struct adapter *adapter)
-{
- struct ifnet *ifp = adapter->ifp;
- struct ixgbe_hw *hw = &adapter->hw;
- u32 missed_rx = 0, bprc, lxon, lxoff, total;
- u64 total_missed_rx = 0;
-
- adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
- adapter->stats.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
- adapter->stats.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
- adapter->stats.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
-
- for (int i = 0; i < 8; i++) {
- u32 mp;
- mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
- /* missed_rx tallies misses for the gprc workaround */
- missed_rx += mp;
- /* global total per queue */
- adapter->stats.mpc[i] += mp;
- /* Running comprehensive total for stats display */
- total_missed_rx += adapter->stats.mpc[i];
- if (hw->mac.type == ixgbe_mac_82598EB)
- adapter->stats.rnbc[i] +=
- IXGBE_READ_REG(hw, IXGBE_RNBC(i));
- adapter->stats.pxontxc[i] +=
- IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
- adapter->stats.pxonrxc[i] +=
- IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
- adapter->stats.pxofftxc[i] +=
- IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
- adapter->stats.pxoffrxc[i] +=
- IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
- adapter->stats.pxon2offc[i] +=
- IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
- }
- for (int i = 0; i < 16; i++) {
- adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
- adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
- adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
- adapter->stats.qbrc[i] +=
- ((u64)IXGBE_READ_REG(hw, IXGBE_QBRC(i)) << 32);
- adapter->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
- adapter->stats.qbtc[i] +=
- ((u64)IXGBE_READ_REG(hw, IXGBE_QBTC(i)) << 32);
- adapter->stats.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
- }
- adapter->stats.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
- adapter->stats.mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
- adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
-
- /* Hardware workaround, gprc counts missed packets */
- adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
- adapter->stats.gprc -= missed_rx;
-
- if (hw->mac.type != ixgbe_mac_82598EB) {
- adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
- ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
- adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
- ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
- adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
- ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
- adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
- adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
- } else {
- adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
- adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
- /* 82598 only has a counter in the high register */
- adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
- adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
- adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
- }
-
- /*
- * Workaround: mprc hardware is incorrectly counting
- * broadcasts, so for now we subtract those.
- */
- bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
- adapter->stats.bprc += bprc;
- adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
- if (hw->mac.type == ixgbe_mac_82598EB)
- adapter->stats.mprc -= bprc;
-
- adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
- adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
- adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
- adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
- adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
- adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
-
- lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
- adapter->stats.lxontxc += lxon;
- lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
- adapter->stats.lxofftxc += lxoff;
- total = lxon + lxoff;
-
- adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
- adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
- adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
- adapter->stats.gptc -= total;
- adapter->stats.mptc -= total;
- adapter->stats.ptc64 -= total;
- adapter->stats.gotc -= total * ETHER_MIN_LEN;
-
- adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
- adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
- adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
- adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
- adapter->stats.mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
- adapter->stats.mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
- adapter->stats.mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
- adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
- adapter->stats.tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
- adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
- adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
- adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
- adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
- adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
- adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
- adapter->stats.xec += IXGBE_READ_REG(hw, IXGBE_XEC);
- adapter->stats.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
- adapter->stats.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
- /* Only read FCOE on 82599 */
- if (hw->mac.type != ixgbe_mac_82598EB) {
- adapter->stats.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
- adapter->stats.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
- adapter->stats.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
- adapter->stats.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
- adapter->stats.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
- }
-
- /* Fill out the OS statistics structure */
- ifp->if_ipackets = adapter->stats.gprc;
- ifp->if_opackets = adapter->stats.gptc;
- ifp->if_ibytes = adapter->stats.gorc;
- ifp->if_obytes = adapter->stats.gotc;
- ifp->if_imcasts = adapter->stats.mprc;
- ifp->if_collisions = 0;
-
- /* Rx Errors */
- ifp->if_ierrors = total_missed_rx + adapter->stats.crcerrs +
- adapter->stats.rlec;
-}
-
-/** ixgbe_sysctl_tdh_handler - Handler function
- * Retrieves the TDH value from the hardware
- */
-static int
-ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
-{
- int error;
-
- struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
- if (!txr) return 0;
-
- unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
- error = sysctl_handle_int(oidp, &val, 0, req);
- if (error || !req->newptr)
- return error;
- return 0;
-}
-
-/** ixgbe_sysctl_tdt_handler - Handler function
- * Retrieves the TDT value from the hardware
- */
-static int
-ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
-{
- int error;
-
- struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
- if (!txr) return 0;
-
- unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
- error = sysctl_handle_int(oidp, &val, 0, req);
- if (error || !req->newptr)
- return error;
- return 0;
-}
-
-/** ixgbe_sysctl_rdh_handler - Handler function
- * Retrieves the RDH value from the hardware
- */
-static int
-ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
-{
- int error;
-
- struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
- if (!rxr) return 0;
-
- unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
- error = sysctl_handle_int(oidp, &val, 0, req);
- if (error || !req->newptr)
- return error;
- return 0;
-}
-
-/** ixgbe_sysctl_rdt_handler - Handler function
- * Retrieves the RDT value from the hardware
- */
-static int
-ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
-{
- int error;
-
- struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
- if (!rxr) return 0;
-
- unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
- error = sysctl_handle_int(oidp, &val, 0, req);
- if (error || !req->newptr)
- return error;
- return 0;
-}
-
-static int
-ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
-{
- int error;
- struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
- unsigned int reg, usec, rate;
-
- reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
- usec = ((reg & 0x0FF8) >> 3);
- if (usec > 0)
- rate = 1000000 / usec;
- else
- rate = 0;
- error = sysctl_handle_int(oidp, &rate, 0, req);
- if (error || !req->newptr)
- return error;
- return 0;
-}
-
-/*
- * Add sysctl variables, one per statistic, to the system.
- */
-static void
-ixgbe_add_hw_stats(struct adapter *adapter)
-{
-
- device_t dev = adapter->dev;
-
- struct tx_ring *txr = adapter->tx_rings;
- struct rx_ring *rxr = adapter->rx_rings;
-
- struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
- struct sysctl_oid *tree = device_get_sysctl_tree(dev);
- struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
- struct ixgbe_hw_stats *stats = &adapter->stats;
-
- struct sysctl_oid *stat_node, *queue_node;
- struct sysctl_oid_list *stat_list, *queue_list;
-
-#define QUEUE_NAME_LEN 32
- char namebuf[QUEUE_NAME_LEN];
-
- /* Driver Statistics */
- SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
- CTLFLAG_RD, &adapter->dropped_pkts,
- "Driver dropped packets");
- SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
- CTLFLAG_RD, &adapter->mbuf_defrag_failed,
- "m_defrag() failed");
- SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "no_tx_dma_setup",
- CTLFLAG_RD, &adapter->no_tx_dma_setup,
- "Driver tx dma failure in xmit");
- SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
- CTLFLAG_RD, &adapter->watchdog_events,
- "Watchdog timeouts");
- SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tso_tx",
- CTLFLAG_RD, &adapter->tso_tx,
- "TSO");
- SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
- CTLFLAG_RD, &adapter->link_irq,
- "Link MSIX IRQ Handled");
-
- for (int i = 0; i < adapter->num_queues; i++, txr++) {
- snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
- queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
- CTLFLAG_RD, NULL, "Queue Name");
- queue_list = SYSCTL_CHILDREN(queue_node);
-
- SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
- CTLTYPE_UINT | CTLFLAG_RD, &adapter->queues[i],
- sizeof(&adapter->queues[i]),
- ixgbe_sysctl_interrupt_rate_handler, "IU",
- "Interrupt Rate");
- SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
- CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
- ixgbe_sysctl_tdh_handler, "IU",
- "Transmit Descriptor Head");
- SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
- CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
- ixgbe_sysctl_tdt_handler, "IU",
- "Transmit Descriptor Tail");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
- CTLFLAG_RD, &txr->no_desc_avail,
- "Queue No Descriptor Available");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
- CTLFLAG_RD, &txr->total_packets,
- "Queue Packets Transmitted");
- }
-
- for (int i = 0; i < adapter->num_queues; i++, rxr++) {
- snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
- queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
- CTLFLAG_RD, NULL, "Queue Name");
- queue_list = SYSCTL_CHILDREN(queue_node);
-
- struct lro_ctrl *lro = &rxr->lro;
-
- snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
- queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
- CTLFLAG_RD, NULL, "Queue Name");
- queue_list = SYSCTL_CHILDREN(queue_node);
-
- SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
- CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
- ixgbe_sysctl_rdh_handler, "IU",
- "Receive Descriptor Head");
- SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
- CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
- ixgbe_sysctl_rdt_handler, "IU",
- "Receive Descriptor Tail");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
- CTLFLAG_RD, &rxr->rx_packets,
- "Queue Packets Received");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
- CTLFLAG_RD, &rxr->rx_bytes,
- "Queue Bytes Received");
- SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
- CTLFLAG_RD, &lro->lro_queued, 0,
- "LRO Queued");
- SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
- CTLFLAG_RD, &lro->lro_flushed, 0,
- "LRO Flushed");
- }
-
- /* MAC stats get the own sub node */
-
- stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
- CTLFLAG_RD, NULL, "MAC Statistics");
- stat_list = SYSCTL_CHILDREN(stat_node);
-
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
- CTLFLAG_RD, &stats->crcerrs,
- "CRC Errors");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
- CTLFLAG_RD, &stats->illerrc,
- "Illegal Byte Errors");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
- CTLFLAG_RD, &stats->errbc,
- "Byte Errors");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
- CTLFLAG_RD, &stats->mspdc,
- "MAC Short Packets Discarded");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
- CTLFLAG_RD, &stats->mlfc,
- "MAC Local Faults");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
- CTLFLAG_RD, &stats->mrfc,
- "MAC Remote Faults");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
- CTLFLAG_RD, &stats->rlec,
- "Receive Length Errors");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "link_xon_txd",
- CTLFLAG_RD, &stats->lxontxc,
- "Link XON Transmitted");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "link_xon_rcvd",
- CTLFLAG_RD, &stats->lxonrxc,
- "Link XON Received");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "link_xoff_txd",
- CTLFLAG_RD, &stats->lxofftxc,
- "Link XOFF Transmitted");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "link_xoff_rcvd",
- CTLFLAG_RD, &stats->lxoffrxc,
- "Link XOFF Received");
-
- /* Packet Reception Stats */
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
- CTLFLAG_RD, &stats->tor,
- "Total Octets Received");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
- CTLFLAG_RD, &stats->gorc,
- "Good Octets Received");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
- CTLFLAG_RD, &stats->tpr,
- "Total Packets Received");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
- CTLFLAG_RD, &stats->gprc,
- "Good Packets Received");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
- CTLFLAG_RD, &stats->mprc,
- "Multicast Packets Received");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
- CTLFLAG_RD, &stats->bprc,
- "Broadcast Packets Received");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
- CTLFLAG_RD, &stats->prc64,
- "64 byte frames received ");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
- CTLFLAG_RD, &stats->prc127,
- "65-127 byte frames received");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
- CTLFLAG_RD, &stats->prc255,
- "128-255 byte frames received");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
- CTLFLAG_RD, &stats->prc511,
- "256-511 byte frames received");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
- CTLFLAG_RD, &stats->prc1023,
- "512-1023 byte frames received");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
- CTLFLAG_RD, &stats->prc1522,
- "1023-1522 byte frames received");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
- CTLFLAG_RD, &stats->ruc,
- "Receive Undersized");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
- CTLFLAG_RD, &stats->rfc,
- "Fragmented Packets Received ");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
- CTLFLAG_RD, &stats->roc,
- "Oversized Packets Received");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
- CTLFLAG_RD, &stats->rjc,
- "Received Jabber");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
- CTLFLAG_RD, &stats->mngprc,
- "Management Packets Received");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
- CTLFLAG_RD, &stats->mngptc,
- "Management Packets Dropped");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
- CTLFLAG_RD, &stats->xec,
- "Checksum Errors");
-
- /* Packet Transmission Stats */
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
- CTLFLAG_RD, &stats->gotc,
- "Good Octets Transmitted");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
- CTLFLAG_RD, &stats->tpt,
- "Total Packets Transmitted");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
- CTLFLAG_RD, &stats->gptc,
- "Good Packets Transmitted");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
- CTLFLAG_RD, &stats->bptc,
- "Broadcast Packets Transmitted");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
- CTLFLAG_RD, &stats->mptc,
- "Multicast Packets Transmitted");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
- CTLFLAG_RD, &stats->mngptc,
- "Management Packets Transmitted");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
- CTLFLAG_RD, &stats->ptc64,
- "64 byte frames transmitted ");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
- CTLFLAG_RD, &stats->ptc127,
- "65-127 byte frames transmitted");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
- CTLFLAG_RD, &stats->ptc255,
- "128-255 byte frames transmitted");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
- CTLFLAG_RD, &stats->ptc511,
- "256-511 byte frames transmitted");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
- CTLFLAG_RD, &stats->ptc1023,
- "512-1023 byte frames transmitted");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
- CTLFLAG_RD, &stats->ptc1522,
- "1024-1522 byte frames transmitted");
-
- /* FC Stats */
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_crc",
- CTLFLAG_RD, &stats->fccrc,
- "FC CRC Errors");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_last",
- CTLFLAG_RD, &stats->fclast,
- "FC Last Error");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_drpd",
- CTLFLAG_RD, &stats->fcoerpdc,
- "FCoE Packets Dropped");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_pkts_rcvd",
- CTLFLAG_RD, &stats->fcoeprc,
- "FCoE Packets Received");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_pkts_txd",
- CTLFLAG_RD, &stats->fcoeptc,
- "FCoE Packets Transmitted");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_dword_rcvd",
- CTLFLAG_RD, &stats->fcoedwrc,
- "FCoE DWords Received");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_dword_txd",
- CTLFLAG_RD, &stats->fcoedwtc,
- "FCoE DWords Transmitted");
-}
-
-/*
-** Set flow control using sysctl:
-** Flow control values:
-** 0 - off
-** 1 - rx pause
-** 2 - tx pause
-** 3 - full
-*/
-static int
-ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS)
-{
- int error, last;
- struct adapter *adapter = (struct adapter *) arg1;
-
- last = adapter->fc;
- error = sysctl_handle_int(oidp, &adapter->fc, 0, req);
- if ((error) || (req->newptr == NULL))
- return (error);
-
- /* Don't bother if it's not changed */
- if (adapter->fc == last)
- return (0);
-
- switch (adapter->fc) {
- case ixgbe_fc_rx_pause:
- case ixgbe_fc_tx_pause:
- case ixgbe_fc_full:
- adapter->hw.fc.requested_mode = adapter->fc;
- break;
- case ixgbe_fc_none:
- default:
- adapter->hw.fc.requested_mode = ixgbe_fc_none;
- }
-
- ixgbe_fc_enable(&adapter->hw, 0);
- return error;
-}
-
-static void
-ixgbe_add_rx_process_limit(struct adapter *adapter, const char *name,
- const char *description, int *limit, int value)
-{
- *limit = value;
- SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
- OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
-}
-
-/*
-** Control link advertise speed:
-** 0 - normal
-** 1 - advertise only 1G
-** 2 - advertise 100Mb
-*/
-static int
-ixgbe_set_advertise(SYSCTL_HANDLER_ARGS)
-{
- int error = 0;
- struct adapter *adapter;
- device_t dev;
- struct ixgbe_hw *hw;
- ixgbe_link_speed speed, last;
-
- adapter = (struct adapter *) arg1;
- dev = adapter->dev;
- hw = &adapter->hw;
- last = hw->phy.autoneg_advertised;
-
- error = sysctl_handle_int(oidp, &adapter->advertise, 0, req);
-
- if ((error) || (adapter->advertise == -1))
- return (error);
-
- if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
- (hw->phy.multispeed_fiber)))
- return (error);
-
- if ((adapter->advertise == 2) && (hw->mac.type != ixgbe_mac_X540)) {
- device_printf(dev, "Set Advertise: 100Mb on X540 only\n");
- return (error);
- }
-
- if (adapter->advertise == 1)
- speed = IXGBE_LINK_SPEED_1GB_FULL;
- else if (adapter->advertise == 2)
- speed = IXGBE_LINK_SPEED_100_FULL;
- else
- speed = IXGBE_LINK_SPEED_1GB_FULL |
- IXGBE_LINK_SPEED_10GB_FULL;
-
- if (speed == last) /* no change */
- return (error);
-
- hw->mac.autotry_restart = TRUE;
- hw->mac.ops.setup_link(hw, speed, TRUE, TRUE);
-
- return (error);
-}
-
+++ /dev/null
-/******************************************************************************
-
- Copyright (c) 2001-2010, Intel Corporation
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- POSSIBILITY OF SUCH DAMAGE.
-
-******************************************************************************/
-/*$FreeBSD$*/
-
-
-#ifndef _IXGBE_H_
-#define _IXGBE_H_
-
-
-#include <sys/param.h>
-#include <sys/systm.h>
-#if __FreeBSD_version >= 800000
-#include <sys/buf_ring.h>
-#endif
-#include <sys/mbuf.h>
-#include <sys/protosw.h>
-#include <sys/socket.h>
-#include <sys/malloc.h>
-#include <sys/kernel.h>
-#include <sys/module.h>
-#include <sys/sockio.h>
-
-#include <net/if.h>
-#include <net/if_arp.h>
-#include <net/bpf.h>
-#include <net/ethernet.h>
-#include <net/if_dl.h>
-#include <net/if_media.h>
-
-#include <net/bpf.h>
-#include <net/if_types.h>
-#include <net/if_vlan_var.h>
-
-#include <netinet/in_systm.h>
-#include <netinet/in.h>
-#include <netinet/if_ether.h>
-#include <netinet/ip.h>
-#include <netinet/ip6.h>
-#include <netinet/tcp.h>
-#include <netinet/tcp_lro.h>
-#include <netinet/udp.h>
-
-#include <machine/in_cksum.h>
-
-#include <sys/bus.h>
-#include <machine/bus.h>
-#include <sys/rman.h>
-#include <machine/resource.h>
-#include <vm/vm.h>
-#include <vm/pmap.h>
-#include <machine/clock.h>
-#include <dev/pci/pcivar.h>
-#include <dev/pci/pcireg.h>
-#include <sys/proc.h>
-#include <sys/sysctl.h>
-#include <sys/endian.h>
-#include <sys/taskqueue.h>
-#include <sys/pcpu.h>
-#include <sys/smp.h>
-#include <machine/smp.h>
-
-#ifdef IXGBE_IEEE1588
-#include <sys/ieee1588.h>
-#endif
-
-#include "ixgbe_api.h"
-
-/* Tunables */
-
-/*
- * TxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the
- * number of transmit descriptors allocated by the driver. Increasing this
- * value allows the driver to queue more transmits. Each descriptor is 16
- * bytes. Performance tests have show the 2K value to be optimal for top
- * performance.
- */
-#define DEFAULT_TXD 1024
-#define PERFORM_TXD 2048
-#define MAX_TXD 4096
-#define MIN_TXD 64
-
-/*
- * RxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the
- * number of receive descriptors allocated for each RX queue. Increasing this
- * value allows the driver to buffer more incoming packets. Each descriptor
- * is 16 bytes. A receive buffer is also allocated for each descriptor.
- *
- * Note: with 8 rings and a dual port card, it is possible to bump up
- * against the system mbuf pool limit, you can tune nmbclusters
- * to adjust for this.
- */
-#define DEFAULT_RXD 1024
-#define PERFORM_RXD 2048
-#define MAX_RXD 4096
-#define MIN_RXD 64
-
-/* Alignment for rings */
-#define DBA_ALIGN 128
-
-/*
- * This parameter controls the maximum no of times the driver will loop in
- * the isr. Minimum Value = 1
- */
-#define MAX_LOOP 10
-
-/*
- * This is the max watchdog interval, ie. the time that can
- * pass between any two TX clean operations, such only happening
- * when the TX hardware is functioning.
- */
-#define IXGBE_WATCHDOG (10 * hz)
-
-/*
- * This parameters control when the driver calls the routine to reclaim
- * transmit descriptors.
- */
-#define IXGBE_TX_CLEANUP_THRESHOLD (adapter->num_tx_desc / 8)
-#define IXGBE_TX_OP_THRESHOLD (adapter->num_tx_desc / 32)
-
-#define IXGBE_MAX_FRAME_SIZE 0x3F00
-
-/* Flow control constants */
-#define IXGBE_FC_PAUSE 0xFFFF
-#define IXGBE_FC_HI 0x20000
-#define IXGBE_FC_LO 0x10000
-
-/* Keep older OS drivers building... */
-#if !defined(SYSCTL_ADD_UQUAD)
-#define SYSCTL_ADD_UQUAD SYSCTL_ADD_QUAD
-#endif
-
-/* Defines for printing debug information */
-#define DEBUG_INIT 0
-#define DEBUG_IOCTL 0
-#define DEBUG_HW 0
-
-#define INIT_DEBUGOUT(S) if (DEBUG_INIT) printf(S "\n")
-#define INIT_DEBUGOUT1(S, A) if (DEBUG_INIT) printf(S "\n", A)
-#define INIT_DEBUGOUT2(S, A, B) if (DEBUG_INIT) printf(S "\n", A, B)
-#define IOCTL_DEBUGOUT(S) if (DEBUG_IOCTL) printf(S "\n")
-#define IOCTL_DEBUGOUT1(S, A) if (DEBUG_IOCTL) printf(S "\n", A)
-#define IOCTL_DEBUGOUT2(S, A, B) if (DEBUG_IOCTL) printf(S "\n", A, B)
-#define HW_DEBUGOUT(S) if (DEBUG_HW) printf(S "\n")
-#define HW_DEBUGOUT1(S, A) if (DEBUG_HW) printf(S "\n", A)
-#define HW_DEBUGOUT2(S, A, B) if (DEBUG_HW) printf(S "\n", A, B)
-
-#define MAX_NUM_MULTICAST_ADDRESSES 128
-#define IXGBE_82598_SCATTER 100
-#define IXGBE_82599_SCATTER 32
-#define MSIX_82598_BAR 3
-#define MSIX_82599_BAR 4
-#define IXGBE_TSO_SIZE 65535
-#define IXGBE_TX_BUFFER_SIZE ((u32) 1514)
-#define IXGBE_RX_HDR 128
-#define IXGBE_VFTA_SIZE 128
-#define IXGBE_BR_SIZE 4096
-#define IXGBE_QUEUE_IDLE 0
-#define IXGBE_QUEUE_WORKING 1
-#define IXGBE_QUEUE_HUNG 2
-
-/* Offload bits in mbuf flag */
-#if __FreeBSD_version >= 800000
-#define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP)
-#else
-#define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP)
-#endif
-
-/* For 6.X code compatibility */
-#if !defined(ETHER_BPF_MTAP)
-#define ETHER_BPF_MTAP BPF_MTAP
-#endif
-
-#if __FreeBSD_version < 700000
-#define CSUM_TSO 0
-#define IFCAP_TSO4 0
-#endif
-
-/*
- * Interrupt Moderation parameters
- */
-#define IXGBE_LOW_LATENCY 128
-#define IXGBE_AVE_LATENCY 400
-#define IXGBE_BULK_LATENCY 1200
-#define IXGBE_LINK_ITR 2000
-
-/*
- *****************************************************************************
- * vendor_info_array
- *
- * This array contains the list of Subvendor/Subdevice IDs on which the driver
- * should load.
- *
- *****************************************************************************
- */
-typedef struct _ixgbe_vendor_info_t {
- unsigned int vendor_id;
- unsigned int device_id;
- unsigned int subvendor_id;
- unsigned int subdevice_id;
- unsigned int index;
-} ixgbe_vendor_info_t;
-
-
-struct ixgbe_tx_buf {
- u32 eop_index;
- struct mbuf *m_head;
- bus_dmamap_t map;
-};
-
-struct ixgbe_rx_buf {
- struct mbuf *m_head;
- struct mbuf *m_pack;
- struct mbuf *fmp;
- bus_dmamap_t hmap;
- bus_dmamap_t pmap;
-};
-
-/*
- * Bus dma allocation structure used by ixgbe_dma_malloc and ixgbe_dma_free.
- */
-struct ixgbe_dma_alloc {
- bus_addr_t dma_paddr;
- caddr_t dma_vaddr;
- bus_dma_tag_t dma_tag;
- bus_dmamap_t dma_map;
- bus_dma_segment_t dma_seg;
- bus_size_t dma_size;
- int dma_nseg;
-};
-
-/*
-** Driver queue struct: this is the interrupt container
-** for the associated tx and rx ring.
-*/
-struct ix_queue {
- struct adapter *adapter;
- u32 msix; /* This queue's MSIX vector */
- u32 eims; /* This queue's EIMS bit */
- u32 eitr_setting;
- struct resource *res;
- void *tag;
- struct tx_ring *txr;
- struct rx_ring *rxr;
- struct task que_task;
- struct taskqueue *tq;
- u64 irqs;
-};
-
-/*
- * The transmit ring, one per queue
- */
-struct tx_ring {
- struct adapter *adapter;
- struct mtx tx_mtx;
- u32 me;
- int queue_status;
- int watchdog_time;
- union ixgbe_adv_tx_desc *tx_base;
- struct ixgbe_dma_alloc txdma;
- u32 next_avail_desc;
- u32 next_to_clean;
- struct ixgbe_tx_buf *tx_buffers;
- volatile u16 tx_avail;
- u32 txd_cmd;
- bus_dma_tag_t txtag;
- char mtx_name[16];
-#if __FreeBSD_version >= 800000
- struct buf_ring *br;
-#endif
-#ifdef IXGBE_FDIR
- u16 atr_sample;
- u16 atr_count;
-#endif
- u32 bytes; /* used for AIM */
- u32 packets;
- /* Soft Stats */
- u64 no_desc_avail;
- u64 total_packets;
-};
-
-
-/*
- * The Receive ring, one per rx queue
- */
-struct rx_ring {
- struct adapter *adapter;
- struct mtx rx_mtx;
- u32 me;
- union ixgbe_adv_rx_desc *rx_base;
- struct ixgbe_dma_alloc rxdma;
- struct lro_ctrl lro;
- bool lro_enabled;
- bool hdr_split;
- bool hw_rsc;
- bool discard;
- u32 next_to_refresh;
- u32 next_to_check;
- char mtx_name[16];
- struct ixgbe_rx_buf *rx_buffers;
- bus_dma_tag_t htag;
- bus_dma_tag_t ptag;
-
- u32 bytes; /* Used for AIM calc */
- u32 packets;
-
- /* Soft stats */
- u64 rx_irq;
- u64 rx_split_packets;
- u64 rx_packets;
- u64 rx_bytes;
- u64 rx_discarded;
- u64 rsc_num;
-#ifdef IXGBE_FDIR
- u64 flm;
-#endif
-};
-
-/* Our adapter structure */
-struct adapter {
- struct ifnet *ifp;
- struct ixgbe_hw hw;
-
- struct ixgbe_osdep osdep;
- struct device *dev;
-
- struct resource *pci_mem;
- struct resource *msix_mem;
-
- /*
- * Interrupt resources: this set is
- * either used for legacy, or for Link
- * when doing MSIX
- */
- void *tag;
- struct resource *res;
-
- struct ifmedia media;
- struct callout timer;
- int msix;
- int if_flags;
-
- struct mtx core_mtx;
-
- eventhandler_tag vlan_attach;
- eventhandler_tag vlan_detach;
-
- u16 num_vlans;
- u16 num_queues;
-
- /*
- ** Shadow VFTA table, this is needed because
- ** the real vlan filter table gets cleared during
- ** a soft reset and the driver needs to be able
- ** to repopulate it.
- */
- u32 shadow_vfta[IXGBE_VFTA_SIZE];
-
- /* Info about the interface */
- u32 optics;
- u32 fc; /* local flow ctrl setting */
- int advertise; /* link speeds */
- bool link_active;
- u16 max_frame_size;
- u16 num_segs;
- u32 link_speed;
- bool link_up;
- u32 linkvec;
-
- /* Mbuf cluster size */
- u32 rx_mbuf_sz;
-
- /* Support for pluggable optics */
- bool sfp_probe;
- struct task link_task; /* Link tasklet */
- struct task mod_task; /* SFP tasklet */
- struct task msf_task; /* Multispeed Fiber */
-#ifdef IXGBE_FDIR
- int fdir_reinit;
- struct task fdir_task;
-#endif
- struct taskqueue *tq;
-
- /*
- ** Queues:
- ** This is the irq holder, it has
- ** and RX/TX pair or rings associated
- ** with it.
- */
- struct ix_queue *queues;
-
- /*
- * Transmit rings:
- * Allocated at run time, an array of rings.
- */
- struct tx_ring *tx_rings;
- int num_tx_desc;
-
- /*
- * Receive rings:
- * Allocated at run time, an array of rings.
- */
- struct rx_ring *rx_rings;
- int num_rx_desc;
- u64 que_mask;
- u32 rx_process_limit;
-
- /* Multicast array memory */
- u8 *mta;
-
- /* Misc stats maintained by the driver */
- unsigned long dropped_pkts;
- unsigned long mbuf_defrag_failed;
- unsigned long mbuf_header_failed;
- unsigned long mbuf_packet_failed;
- unsigned long no_tx_map_avail;
- unsigned long no_tx_dma_setup;
- unsigned long watchdog_events;
- unsigned long tso_tx;
- unsigned long link_irq;
-
- struct ixgbe_hw_stats stats;
-};
-
-/* Precision Time Sync (IEEE 1588) defines */
-#define ETHERTYPE_IEEE1588 0x88F7
-#define PICOSECS_PER_TICK 20833
-#define TSYNC_UDP_PORT 319 /* UDP port for the protocol */
-#define IXGBE_ADVTXD_TSTAMP 0x00080000
-
-
-#define IXGBE_CORE_LOCK_INIT(_sc, _name) \
- mtx_init(&(_sc)->core_mtx, _name, "IXGBE Core Lock", MTX_DEF)
-#define IXGBE_CORE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->core_mtx)
-#define IXGBE_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->tx_mtx)
-#define IXGBE_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_mtx)
-#define IXGBE_CORE_LOCK(_sc) mtx_lock(&(_sc)->core_mtx)
-#define IXGBE_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_mtx)
-#define IXGBE_TX_TRYLOCK(_sc) mtx_trylock(&(_sc)->tx_mtx)
-#define IXGBE_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_mtx)
-#define IXGBE_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->core_mtx)
-#define IXGBE_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_mtx)
-#define IXGBE_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_mtx)
-#define IXGBE_CORE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->core_mtx, MA_OWNED)
-#define IXGBE_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_mtx, MA_OWNED)
-
-
-static inline bool
-ixgbe_is_sfp(struct ixgbe_hw *hw)
-{
- switch (hw->phy.type) {
- case ixgbe_phy_sfp_avago:
- case ixgbe_phy_sfp_ftl:
- case ixgbe_phy_sfp_intel:
- case ixgbe_phy_sfp_unknown:
- case ixgbe_phy_sfp_passive_tyco:
- case ixgbe_phy_sfp_passive_unknown:
- return TRUE;
- default:
- return FALSE;
- }
-}
-
-/* Workaround to make 8.0 buildable */
-#if __FreeBSD_version >= 800000 && __FreeBSD_version < 800504
-static __inline int
-drbr_needs_enqueue(struct ifnet *ifp, struct buf_ring *br)
-{
-#ifdef ALTQ
- if (ALTQ_IS_ENABLED(&ifp->if_snd))
- return (1);
-#endif
- return (!buf_ring_empty(br));
-}
-#endif
-
-/*
-** Find the number of unrefreshed RX descriptors
-*/
-static inline u16
-ixgbe_rx_unrefreshed(struct rx_ring *rxr)
-{
- struct adapter *adapter = rxr->adapter;
-
- if (rxr->next_to_check > rxr->next_to_refresh)
- return (rxr->next_to_check - rxr->next_to_refresh - 1);
- else
- return ((adapter->num_rx_desc + rxr->next_to_check) -
- rxr->next_to_refresh - 1);
-}
-
-#endif /* _IXGBE_H_ */
***************************************************************************/
#include "ixgbe_type.h"
+#include "ixgbe_82598.h"
#include "ixgbe_api.h"
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
+#ident "$Id: ixgbe_82598.c,v 1.194 2012/03/28 00:54:08 jtkirshe Exp $"
-u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw);
-s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
STATIC s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg);
STATIC enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
-s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num);
STATIC s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
bool autoneg_wait_to_complete);
STATIC s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
bool autoneg,
bool autoneg_wait_to_complete);
STATIC s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
-s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw);
-void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw);
-s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
STATIC s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
-s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan,
- u32 vind, bool vlan_on);
STATIC s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
-s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val);
-s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val);
-s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
- u8 *eeprom_data);
-u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw);
-s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw);
-void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw);
-void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw);
static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
u32 headroom, int strategy);
IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
}
-/**
- * ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count
- * @hw: pointer to hardware structure
- *
- * Read PCIe configuration space, and get the MSI-X vector count from
- * the capabilities table.
- **/
-u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw)
-{
- u32 msix_count = 18;
-
- DEBUGFUNC("ixgbe_get_pcie_msix_count_82598");
-
- if (hw->mac.msix_vectors_from_pcie) {
- msix_count = IXGBE_READ_PCIE_WORD(hw,
- IXGBE_PCIE_MSIX_82598_CAPS);
- msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
-
- /* MSI-X count is zero-based in HW, so increment to give
- * proper value */
- msix_count++;
- }
- return msix_count;
-}
-
/**
* ixgbe_init_ops_82598 - Inits func ptrs and MAC type
* @hw: pointer to hardware structure
mac->ops.set_vmdq = &ixgbe_set_vmdq_82598;
mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598;
mac->ops.set_vfta = &ixgbe_set_vfta_82598;
+ mac->ops.set_vlvf = NULL;
mac->ops.clear_vfta = &ixgbe_clear_vfta_82598;
/* Flow Control */
mac->rx_pb_size = 512;
mac->max_tx_queues = 32;
mac->max_rx_queues = 64;
- mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw);
+ mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
/* SFP+ Module */
phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
for (i = 0; ((i < hw->mac.max_tx_queues) &&
(i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
- regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+ regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
}
for (i = 0; ((i < hw->mac.max_rx_queues) &&
(i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
- regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
- IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
+ regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
+ IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
}
/**
* ixgbe_fc_enable_82598 - Enable flow control
* @hw: pointer to hardware structure
- * @packetbuf_num: packet buffer number (0-7)
*
* Enable flow control according to the current settings.
**/
-s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
+s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
{
s32 ret_val = IXGBE_SUCCESS;
u32 fctrl_reg;
u32 rmcs_reg;
u32 reg;
+ u32 fcrtl, fcrth;
u32 link_speed = 0;
+ int i;
bool link_up;
DEBUGFUNC("ixgbe_fc_enable_82598");
+ /* Validate the water mark configuration */
+ if (!hw->fc.pause_time) {
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+
+ /* Low water mark of zero causes XOFF floods */
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+ hw->fc.high_water[i]) {
+ if (!hw->fc.low_water[i] ||
+ hw->fc.low_water[i] >= hw->fc.high_water[i]) {
+ DEBUGOUT("Invalid water mark configuration\n");
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+ }
+ }
+
/*
* On 82598 having Rx FC on causes resets while doing 1G
* so if it's on turn it off once we know link_speed. For
}
/* Negotiate the fc mode to use */
- ret_val = ixgbe_fc_autoneg(hw);
- if (ret_val == IXGBE_ERR_FLOW_CONTROL)
- goto out;
+ ixgbe_fc_autoneg(hw);
/* Disable any previous flow control settings */
fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
/* Set up and enable Rx high/low water mark thresholds, enable XON. */
- if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
- reg = hw->fc.low_water << 6;
- if (hw->fc.send_xon)
- reg |= IXGBE_FCRTL_XONE;
-
- IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg);
-
- reg = hw->fc.high_water[packetbuf_num] << 6;
- reg |= IXGBE_FCRTH_FCEN;
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+ hw->fc.high_water[i]) {
+ fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
+ fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
+ } else {
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
+ }
- IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg);
}
/* Configure pause time (2 TCs per register) */
- reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
- if ((packetbuf_num & 1) == 0)
- reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
- else
- reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
- IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
+ reg = hw->fc.pause_time * 0x00010001;
+ for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
+ IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
- IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
+ /* Configure flow control refresh threshold value */
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
out:
return ret_val;
(ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS))
*link_up = false;
- /* if link is down, zero out the current_mode */
- if (*link_up == FALSE) {
- hw->fc.current_mode = ixgbe_fc_none;
- hw->fc.fc_was_autonegged = FALSE;
- }
out:
return IXGBE_SUCCESS;
}
u32 rar_high;
u32 rar_entries = hw->mac.num_rar_entries;
- UNREFERENCED_1PARAMETER(vmdq);
/* Make sure we are using a valid rar index range */
if (rar >= rar_entries) {
for (i = 0; ((i < hw->mac.max_tx_queues) &&
(i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
- regval |= IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+ regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
}
for (i = 0; ((i < hw->mac.max_rx_queues) &&
(i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
- regval |= (IXGBE_DCA_RXCTRL_DESC_WRO_EN |
- IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
+ regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
+ IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
}
{
u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
u8 i = 0;
- UNREFERENCED_1PARAMETER(headroom);
if (!num_pb)
return;
--- /dev/null
+/*******************************************************************************
+
+Copyright (c) 2001-2012, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _IXGBE_82598_H_
+#define _IXGBE_82598_H_
+#ident "$Id: ixgbe_82598.h,v 1.3 2012/03/27 22:16:51 jtkirshe Exp $"
+
+u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw);
+s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw);
+void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw);
+s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on);
+s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val);
+s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val);
+s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *eeprom_data);
+u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw);
+s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw);
+void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw);
+void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw);
+#endif /* _IXGBE_82598_H_ */
***************************************************************************/
#include "ixgbe_type.h"
+#include "ixgbe_82599.h"
#include "ixgbe_api.h"
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
+#ident "$Id: ixgbe_82599.c,v 1.301 2012/11/08 11:33:27 jtkirshe Exp $"
-s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw);
-s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *autoneg);
-enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw);
-void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
-void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
-void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
-s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
- ixgbe_link_speed speed, bool autoneg,
- bool autoneg_wait_to_complete);
-s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
- ixgbe_link_speed speed, bool autoneg,
- bool autoneg_wait_to_complete);
-s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
- bool autoneg_wait_to_complete);
-s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg,
- bool autoneg_wait_to_complete);
STATIC s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg,
bool autoneg_wait_to_complete);
-s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw);
-void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw);
-s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw);
-s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val);
-s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val);
-s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw);
-s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw);
-s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw);
-u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
-s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval);
STATIC s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
-bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
STATIC s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
u16 offset, u16 *data);
STATIC s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
{
s32 ret_val = IXGBE_SUCCESS;
- u32 reg_anlp1 = 0;
- u32 i = 0;
u16 list_offset, data_offset, data_value;
+ bool got_lock = false;
DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
/* Delay obtaining semaphore again to allow FW access */
msec_delay(hw->eeprom.semaphore_delay);
- /* Now restart DSP by setting Restart_AN and clearing LMS */
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw,
- IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) |
- IXGBE_AUTOC_AN_RESTART));
-
- /* Wait for AN to leave state 0 */
- for (i = 0; i < 10; i++) {
- msec_delay(4);
- reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1);
- if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)
- break;
+ /* Need SW/FW semaphore around AUTOC writes if LESM on,
+ * likewise reset_pipeline requires lock as it also writes
+ * AUTOC.
+ */
+ if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (ret_val != IXGBE_SUCCESS) {
+ ret_val = IXGBE_ERR_SWFW_SYNC;
+ goto setup_sfp_out;
+ }
+
+ got_lock = true;
+ }
+
+ /* Restart DSP and set SFI mode */
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
+ IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL));
+
+ ret_val = ixgbe_reset_pipeline_82599(hw);
+
+ if (got_lock) {
+ hw->mac.ops.release_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ got_lock = false;
}
- if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) {
+
+ if (ret_val) {
DEBUGOUT("sfp module setup not complete\n");
ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
goto setup_sfp_out;
}
- /* Restart DSP by setting Restart_AN and return to SFI mode */
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
- IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
- IXGBE_AUTOC_AN_RESTART));
}
setup_sfp_out:
mac->ops.get_media_type = &ixgbe_get_media_type_82599;
mac->ops.get_supported_physical_layer =
&ixgbe_get_supported_physical_layer_82599;
+ mac->ops.disable_sec_rx_path = &ixgbe_disable_sec_rx_path_generic;
+ mac->ops.enable_sec_rx_path = &ixgbe_enable_sec_rx_path_generic;
mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599;
mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599;
mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599;
/* RAR, Multicast, VLAN */
mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
+ mac->ops.set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic;
mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
mac->rar_highwater = 1;
mac->ops.set_vfta = &ixgbe_set_vfta_generic;
+ mac->ops.set_vlvf = &ixgbe_set_vlvf_generic;
mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599;
/* Check if 1G SFP module. */
if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) {
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
*speed = IXGBE_LINK_SPEED_1GB_FULL;
*negotiation = true;
goto out;
case IXGBE_DEV_ID_82599_SFP:
case IXGBE_DEV_ID_82599_SFP_FCOE:
case IXGBE_DEV_ID_82599_SFP_EM:
+ case IXGBE_DEV_ID_82599_SFP_SF2:
+ case IXGBE_DEV_ID_82599_SFP_SF_QP:
case IXGBE_DEV_ID_82599EN_SFP:
media_type = ixgbe_media_type_fiber;
break;
u32 links_reg;
u32 i;
s32 status = IXGBE_SUCCESS;
+ bool got_lock = false;
DEBUGFUNC("ixgbe_start_mac_link_82599");
+ /* reset_pipeline requires us to hold this lock as it writes to
+ * AUTOC.
+ */
+ if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ status = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (status != IXGBE_SUCCESS)
+ goto out;
+
+ got_lock = true;
+ }
+
/* Restart link */
- autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
- autoc_reg |= IXGBE_AUTOC_AN_RESTART;
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+ ixgbe_reset_pipeline_82599(hw);
+
+ if (got_lock)
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
/* Only poll for autoneg to complete if specified to do so */
if (autoneg_wait_to_complete) {
+ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
IXGBE_AUTOC_LMS_KX4_KX_KR ||
(autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
/* Add delay to filter out noises during initial link setup */
msec_delay(50);
+out:
return status;
}
u32 links_reg;
u32 i;
ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
+ bool got_lock = false;
DEBUGFUNC("ixgbe_setup_mac_link_82599");
/* Check to see if speed passed in is supported. */
status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
- if (status != IXGBE_SUCCESS)
+ if (status)
goto out;
speed &= link_capabilities;
link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
/* Set KX4/KX/KR support according to speed requested */
autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
- if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
autoc |= IXGBE_AUTOC_KX4_SUPP;
if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
(hw->phy.smart_speed_active == false))
autoc |= IXGBE_AUTOC_KR_SUPP;
+ }
if (speed & IXGBE_LINK_SPEED_1GB_FULL)
autoc |= IXGBE_AUTOC_KX_SUPP;
} else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
}
if (autoc != start_autoc) {
+ /* Need SW/FW semaphore around AUTOC writes if LESM is on,
+ * likewise reset_pipeline requires us to hold this lock as
+ * it also writes to AUTOC.
+ */
+ if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ status = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (status != IXGBE_SUCCESS) {
+ status = IXGBE_ERR_SWFW_SYNC;
+ goto out;
+ }
+
+ got_lock = true;
+ }
+
/* Restart link */
- autoc |= IXGBE_AUTOC_AN_RESTART;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
+ ixgbe_reset_pipeline_82599(hw);
+
+ if (got_lock) {
+ hw->mac.ops.release_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ got_lock = false;
+ }
/* Only poll for autoneg to complete if specified to do so */
if (autoneg_wait_to_complete) {
*/
autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
- if (hw->mac.orig_link_settings_stored == FALSE) {
+
+ /* Enable link if disabled in NVM */
+ if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
+ autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+
+ if (hw->mac.orig_link_settings_stored == false) {
hw->mac.orig_autoc = autoc;
hw->mac.orig_autoc2 = autoc2;
hw->mac.orig_link_settings_stored = true;
} else {
- if (autoc != hw->mac.orig_autoc)
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
- IXGBE_AUTOC_AN_RESTART));
+ if (autoc != hw->mac.orig_autoc) {
+ /* Need SW/FW semaphore around AUTOC writes if LESM is
+ * on, likewise reset_pipeline requires us to hold
+ * this lock as it also writes to AUTOC.
+ */
+ bool got_lock = false;
+ if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ status = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (status != IXGBE_SUCCESS) {
+ status = IXGBE_ERR_SWFW_SYNC;
+ goto reset_hw_out;
+ }
+
+ got_lock = true;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
+ ixgbe_reset_pipeline_82599(hw);
+
+ if (got_lock)
+ hw->mac.ops.release_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ }
if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
(hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
hw->mac.san_addr, 0, IXGBE_RAH_AV);
+ /* Save the SAN MAC RAR index */
+ hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
+
/* Reserve the last RAR for the SAN MAC address */
hw->mac.num_rar_entries--;
}
if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
IXGBE_FDIRCTRL_INIT_DONE)
break;
- usec_delay(10);
+ msec_delay(1);
}
if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
DEBUGOUT("Flow Director Signature poll time exceeded!\n");
if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
goto out;
else
- status = ixgbe_identify_sfp_module_generic(hw);
+ status = ixgbe_identify_module_generic(hw);
}
/* Set PHY type none if no PHY detected */
physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
+ else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE)
+ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_SX;
break;
default:
break;
**/
s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
{
-#define IXGBE_MAX_SECRX_POLL 30
- int i;
- int secrxreg;
DEBUGFUNC("ixgbe_enable_rx_dma_82599");
* the Rx DMA unit. Therefore, make sure the security engine is
* completely disabled prior to enabling the Rx unit.
*/
- secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
- secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
- IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
- for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
- secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
- if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
- break;
- else
- /* Use interrupt-safe sleep just in case */
- usec_delay(10);
- }
- /* For informational purposes only */
- if (i >= IXGBE_MAX_SECRX_POLL)
- DEBUGOUT("Rx unit being enabled before security "
- "path fully disabled. Continuing with init.\n");
+ hw->mac.ops.disable_sec_rx_path(hw);
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
- secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
- secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
- IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
- IXGBE_WRITE_FLUSH(hw);
+
+ hw->mac.ops.enable_sec_rx_path(hw);
return IXGBE_SUCCESS;
}
* Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
* if the FW version is not supported.
**/
-STATIC s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
+s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
{
s32 status = IXGBE_ERR_EEPROM_VERSION;
u16 fw_offset, fw_ptp_cfg_offset;
return ret_val;
}
+
+/**
+ * ixgbe_reset_pipeline_82599 - perform pipeline reset
+ *
+ * @hw: pointer to hardware structure
+ *
+ * Reset pipeline by asserting Restart_AN together with LMS change to ensure
+ * full pipeline reset
+ **/
+s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
+{
+ s32 i, autoc_reg, autoc2_reg, ret_val;
+ s32 anlp1_reg = 0;
+
+ /* Enable link if disabled in NVM */
+ autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+ if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
+ autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+
+ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ autoc_reg |= IXGBE_AUTOC_AN_RESTART;
+ /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg ^ IXGBE_AUTOC_LMS_1G_AN);
+ /* Wait for AN to leave state 0 */
+ for (i = 0; i < 10; i++) {
+ msec_delay(4);
+ anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
+ if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
+ break;
+ }
+
+ if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
+ DEBUGOUT("auto negotiation not completed\n");
+ ret_val = IXGBE_ERR_RESET_FAILED;
+ goto reset_pipeline_out;
+ }
+
+ ret_val = IXGBE_SUCCESS;
+
+reset_pipeline_out:
+ /* Write AUTOC register with original LMS field and Restart_AN */
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return ret_val;
+}
+
+
+
--- /dev/null
+/*******************************************************************************
+
+Copyright (c) 2001-2012, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _IXGBE_82599_H_
+#define _IXGBE_82599_H_
+#ident "$Id: ixgbe_82599.h,v 1.7 2012/10/03 07:10:29 jtkirshe Exp $"
+
+s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed, bool *autoneg);
+enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw);
+void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
+void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
+void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
+s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed, bool autoneg,
+ bool autoneg_wait_to_complete);
+s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed, bool autoneg,
+ bool autoneg_wait_to_complete);
+s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
+ bool autoneg_wait_to_complete);
+s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg, bool autoneg_wait_to_complete);
+s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw);
+void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw);
+s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw);
+s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val);
+s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val);
+s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw);
+s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw);
+s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw);
+u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
+s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval);
+#endif /* _IXGBE_82599_H_ */
#include "ixgbe_api.h"
#include "ixgbe_common.h"
-
-extern s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
-extern s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw);
-extern s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw);
-extern s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw);
+#ident "$Id: ixgbe_api.c,v 1.187 2012/11/08 10:11:52 jtkirshe Exp $"
/**
* ixgbe_init_shared_code - Initialize the shared code
DEBUGFUNC("ixgbe_set_mac_type\n");
- if (hw->vendor_id == IXGBE_INTEL_VENDOR_ID) {
switch (hw->device_id) {
case IXGBE_DEV_ID_82598:
case IXGBE_DEV_ID_82598_BX:
case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
case IXGBE_DEV_ID_82599_SFP_FCOE:
case IXGBE_DEV_ID_82599_SFP_EM:
+ case IXGBE_DEV_ID_82599_SFP_SF2:
+ case IXGBE_DEV_ID_82599_SFP_SF_QP:
case IXGBE_DEV_ID_82599EN_SFP:
case IXGBE_DEV_ID_82599_CX4:
case IXGBE_DEV_ID_82599_T3_LOM:
hw->mac.type = ixgbe_mac_82599EB;
break;
case IXGBE_DEV_ID_82599_VF:
+ case IXGBE_DEV_ID_82599_VF_HV:
hw->mac.type = ixgbe_mac_82599_vf;
break;
case IXGBE_DEV_ID_X540_VF:
+ case IXGBE_DEV_ID_X540_VF_HV:
hw->mac.type = ixgbe_mac_X540_vf;
break;
case IXGBE_DEV_ID_X540T:
+ case IXGBE_DEV_ID_X540T1:
hw->mac.type = ixgbe_mac_X540;
break;
default:
ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
break;
}
- } else {
- ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
- }
DEBUGOUT2("ixgbe_set_mac_type found mac: %d, returns: %d\n",
hw->mac.type, ret_val);
{
return ixgbe_call_func(hw, hw->mac.ops.set_vmdq, (hw, rar, vmdq),
IXGBE_NOT_IMPLEMENTED);
+
+}
+
+/**
+ * ixgbe_set_vmdq_san_mac - Associate VMDq index 127 with a receive address
+ * @hw: pointer to hardware structure
+ * @vmdq: VMDq default pool index
+ **/
+s32 ixgbe_set_vmdq_san_mac(struct ixgbe_hw *hw, u32 vmdq)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.set_vmdq_san_mac,
+ (hw, vmdq), IXGBE_NOT_IMPLEMENTED);
}
/**
vlan_on), IXGBE_NOT_IMPLEMENTED);
}
+/**
+ * ixgbe_set_vlvf - Set VLAN Pool Filter
+ * @hw: pointer to hardware structure
+ * @vlan: VLAN id to write to VLAN filter
+ * @vind: VMDq output index that maps queue to VLAN id in VFVFB
+ * @vlan_on: boolean flag to turn on/off VLAN in VFVF
+ * @vfta_changed: pointer to boolean flag which indicates whether VFTA
+ * should be changed
+ *
+ * Turn on/off specified bit in VLVF table.
+ **/
+s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on,
+ bool *vfta_changed)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.set_vlvf, (hw, vlan, vind,
+ vlan_on, vfta_changed), IXGBE_NOT_IMPLEMENTED);
+}
+
/**
* ixgbe_fc_enable - Enable flow control
* @hw: pointer to hardware structure
- * @packetbuf_num: packet buffer number (0-7)
*
* Configures the flow control settings based on SW configuration.
**/
-s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packetbuf_num)
+s32 ixgbe_fc_enable(struct ixgbe_hw *hw)
{
- return ixgbe_call_func(hw, hw->mac.ops.fc_enable, (hw, packetbuf_num),
+ return ixgbe_call_func(hw, hw->mac.ops.fc_enable, (hw),
IXGBE_NOT_IMPLEMENTED);
}
(hw, regval), IXGBE_NOT_IMPLEMENTED);
}
+/**
+ * ixgbe_disable_sec_rx_path - Stops the receive data path
+ * @hw: pointer to hardware structure
+ *
+ * Stops the receive data path.
+ **/
+s32 ixgbe_disable_sec_rx_path(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.disable_sec_rx_path,
+ (hw), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_enable_sec_rx_path - Enables the receive data path
+ * @hw: pointer to hardware structure
+ *
+ * Enables the receive data path.
+ **/
+s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.enable_sec_rx_path,
+ (hw), IXGBE_NOT_IMPLEMENTED);
+}
+
/**
* ixgbe_acquire_swfw_semaphore - Acquire SWFW semaphore
* @hw: pointer to hardware structure
#define _IXGBE_API_H_
#include "ixgbe_type.h"
+#ident "$Id: ixgbe_api.h,v 1.115 2012/08/23 23:30:15 jtkirshe Exp $"
s32 ixgbe_init_shared_code(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw);
+
s32 ixgbe_set_mac_type(struct ixgbe_hw *hw);
s32 ixgbe_init_hw(struct ixgbe_hw *hw);
s32 ixgbe_reset_hw(struct ixgbe_hw *hw);
u32 enable_addr);
s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_set_vmdq_san_mac(struct ixgbe_hw *hw, u32 vmdq);
s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw);
u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw);
s32 ixgbe_clear_vfta(struct ixgbe_hw *hw);
s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan,
u32 vind, bool vlan_on);
-
-s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packetbuf_num);
+s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on, bool *vfta_changed);
+s32 ixgbe_fc_enable(struct ixgbe_hw *hw);
s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
u8 ver);
void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr);
s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data);
u32 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval);
+s32 ixgbe_disable_sec_rx_path(struct ixgbe_hw *hw);
+s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw);
s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
union ixgbe_atr_input *mask);
u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
union ixgbe_atr_hash_dword common);
+bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
u8 *data);
s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
#include "ixgbe_api.h"
+#ident "$Id: ixgbe_common.c,v 1.349 2012/11/05 23:08:30 jtkirshe Exp $"
static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
u16 *san_mac_offset);
-static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw);
-static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw);
-static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw);
-static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
-static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
- u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
-static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num);
static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
u16 offset);
-
-s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan);
-
/**
* ixgbe_init_ops_generic - Inits function ptrs
* @hw: pointer to the hardware structure
mac->ops.disable_mc = &ixgbe_disable_mc_generic;
mac->ops.clear_vfta = NULL;
mac->ops.set_vfta = NULL;
+ mac->ops.set_vlvf = NULL;
mac->ops.init_uta_tables = NULL;
/* Flow Control */
return IXGBE_SUCCESS;
}
+/**
+ * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
+ * control
+ * @hw: pointer to hardware structure
+ *
+ * There are several phys that do not support autoneg flow control. This
+ * function check the device id to see if the associated phy supports
+ * autoneg flow control.
+ **/
+s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
+{
+
+ DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X540T:
+ case IXGBE_DEV_ID_X540T1:
+ case IXGBE_DEV_ID_82599_T3_LOM:
+ return IXGBE_SUCCESS;
+ default:
+ return IXGBE_ERR_FC_NOT_SUPPORTED;
+ }
+}
+
+/**
+ * ixgbe_setup_fc - Set up flow control
+ * @hw: pointer to hardware structure
+ *
+ * Called at init time to set up flow control.
+ **/
+static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_SUCCESS;
+ u32 reg = 0, reg_bp = 0;
+ u16 reg_cu = 0;
+ bool got_lock = false;
+
+ DEBUGFUNC("ixgbe_setup_fc");
+
+ /*
+ * Validate the requested mode. Strict IEEE mode does not allow
+ * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
+ */
+ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
+ DEBUGOUT("ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+
+ /*
+ * 10gig parts do not have a word in the EEPROM to determine the
+ * default flow control setting, so we explicitly set it to full.
+ */
+ if (hw->fc.requested_mode == ixgbe_fc_default)
+ hw->fc.requested_mode = ixgbe_fc_full;
+
+ /*
+ * Set up the 1G and 10G flow control advertisement registers so the
+ * HW will be able to do fc autoneg once the cable is plugged in. If
+ * we link at 10G, the 1G advertisement is harmless and vice versa.
+ */
+ switch (hw->phy.media_type) {
+ case ixgbe_media_type_fiber:
+ case ixgbe_media_type_backplane:
+ reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+ reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ break;
+ case ixgbe_media_type_copper:
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu);
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * The possible values of fc.requested_mode are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames,
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but
+ * we do not support receiving pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) are enabled.
+ * other: Invalid.
+ */
+ switch (hw->fc.requested_mode) {
+ case ixgbe_fc_none:
+ /* Flow control completely disabled by software override. */
+ reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
+ if (hw->phy.media_type == ixgbe_media_type_backplane)
+ reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
+ IXGBE_AUTOC_ASM_PAUSE);
+ else if (hw->phy.media_type == ixgbe_media_type_copper)
+ reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
+ break;
+ case ixgbe_fc_tx_pause:
+ /*
+ * Tx Flow control is enabled, and Rx Flow control is
+ * disabled by software override.
+ */
+ reg |= IXGBE_PCS1GANA_ASM_PAUSE;
+ reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
+ if (hw->phy.media_type == ixgbe_media_type_backplane) {
+ reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
+ reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
+ } else if (hw->phy.media_type == ixgbe_media_type_copper) {
+ reg_cu |= IXGBE_TAF_ASM_PAUSE;
+ reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
+ }
+ break;
+ case ixgbe_fc_rx_pause:
+ /*
+ * Rx Flow control is enabled and Tx Flow control is
+ * disabled by software override. Since there really
+ * isn't a way to advertise that we are capable of RX
+ * Pause ONLY, we will advertise that we support both
+ * symmetric and asymmetric Rx PAUSE, as such we fall
+ * through to the fc_full statement. Later, we will
+ * disable the adapter's ability to send PAUSE frames.
+ */
+ case ixgbe_fc_full:
+ /* Flow control (both Rx and Tx) is enabled by SW override. */
+ reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
+ if (hw->phy.media_type == ixgbe_media_type_backplane)
+ reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
+ IXGBE_AUTOC_ASM_PAUSE;
+ else if (hw->phy.media_type == ixgbe_media_type_copper)
+ reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
+ break;
+ default:
+ DEBUGOUT("Flow control param set incorrectly\n");
+ ret_val = IXGBE_ERR_CONFIG;
+ goto out;
+ break;
+ }
+
+ if (hw->mac.type != ixgbe_mac_X540) {
+ /*
+ * Enable auto-negotiation between the MAC & PHY;
+ * the MAC will advertise clause 37 flow control.
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
+ reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
+
+ /* Disable AN timeout */
+ if (hw->fc.strict_ieee)
+ reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
+
+ IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
+ DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
+ }
+
+ /*
+ * AUTOC restart handles negotiation of 1G and 10G on backplane
+ * and copper. There is no need to set the PCS1GCTL register.
+ *
+ */
+ if (hw->phy.media_type == ixgbe_media_type_backplane) {
+ reg_bp |= IXGBE_AUTOC_AN_RESTART;
+ /* Need the SW/FW semaphore around AUTOC writes if 82599 and
+ * LESM is on, likewise reset_pipeline requries the lock as
+ * it also writes AUTOC.
+ */
+ if ((hw->mac.type == ixgbe_mac_82599EB) &&
+ ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (ret_val != IXGBE_SUCCESS) {
+ ret_val = IXGBE_ERR_SWFW_SYNC;
+ goto out;
+ }
+ got_lock = true;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
+ if (hw->mac.type == ixgbe_mac_82599EB)
+ ixgbe_reset_pipeline_82599(hw);
+
+ if (got_lock)
+ hw->mac.ops.release_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
+ (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)) {
+ hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
+ }
+
+ DEBUGOUT1("Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
+out:
+ return ret_val;
+}
+
/**
* ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
* @hw: pointer to hardware structure
**/
s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
{
+ s32 ret_val;
u32 ctrl_ext;
DEBUGFUNC("ixgbe_start_hw_generic");
IXGBE_WRITE_FLUSH(hw);
/* Setup flow control */
- ixgbe_setup_fc(hw, 0);
+ ret_val = ixgbe_setup_fc(hw);
+ if (ret_val != IXGBE_SUCCESS)
+ goto out;
/* Clear adapter stopped flag */
hw->adapter_stopped = false;
- return IXGBE_SUCCESS;
+out:
+ return ret_val;
}
/**
/* Disable relaxed ordering */
for (i = 0; i < hw->mac.max_tx_queues; i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
- regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+ regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
}
for (i = 0; i < hw->mac.max_rx_queues; i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
- regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
- IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
+ regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
+ IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
}
if (hw->mac.type == ixgbe_mac_X540) {
if (hw->phy.id == 0)
ixgbe_identify_phy(hw);
- hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECL, &i);
- hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECH, &i);
- hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECL, &i);
- hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECH, &i);
+ hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
+ IXGBE_MDIO_PCS_DEV_TYPE, &i);
+ hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
+ IXGBE_MDIO_PCS_DEV_TYPE, &i);
+ hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
+ IXGBE_MDIO_PCS_DEV_TYPE, &i);
+ hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
+ IXGBE_MDIO_PCS_DEV_TYPE, &i);
}
return IXGBE_SUCCESS;
return IXGBE_SUCCESS;
}
+/**
+ * ixgbe_read_pba_raw
+ * @hw: pointer to the HW structure
+ * @eeprom_buf: optional pointer to EEPROM image
+ * @eeprom_buf_size: size of EEPROM image in words
+ * @max_pba_block_size: PBA block size limit
+ * @pba: pointer to output PBA structure
+ *
+ * Reads PBA from EEPROM image when eeprom_buf is not NULL.
+ * Reads PBA from physical EEPROM device when eeprom_buf is NULL.
+ *
+ **/
+s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
+ u32 eeprom_buf_size, u16 max_pba_block_size,
+ struct ixgbe_pba *pba)
+{
+ s32 ret_val;
+ u16 pba_block_size;
+
+ if (pba == NULL)
+ return IXGBE_ERR_PARAM;
+
+ if (eeprom_buf == NULL) {
+ ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
+ &pba->word[0]);
+ if (ret_val)
+ return ret_val;
+ } else {
+ if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
+ pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
+ pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
+ } else {
+ return IXGBE_ERR_PARAM;
+ }
+ }
+
+ if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
+ if (pba->pba_block == NULL)
+ return IXGBE_ERR_PARAM;
+
+ ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf,
+ eeprom_buf_size,
+ &pba_block_size);
+ if (ret_val)
+ return ret_val;
+
+ if (pba_block_size > max_pba_block_size)
+ return IXGBE_ERR_PARAM;
+
+ if (eeprom_buf == NULL) {
+ ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1],
+ pba_block_size,
+ pba->pba_block);
+ if (ret_val)
+ return ret_val;
+ } else {
+ if (eeprom_buf_size > (u32)(pba->word[1] +
+ pba->pba_block[0])) {
+ memcpy(pba->pba_block,
+ &eeprom_buf[pba->word[1]],
+ pba_block_size * sizeof(u16));
+ } else {
+ return IXGBE_ERR_PARAM;
+ }
+ }
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_write_pba_raw
+ * @hw: pointer to the HW structure
+ * @eeprom_buf: optional pointer to EEPROM image
+ * @eeprom_buf_size: size of EEPROM image in words
+ * @pba: pointer to PBA structure
+ *
+ * Writes PBA to EEPROM image when eeprom_buf is not NULL.
+ * Writes PBA to physical EEPROM device when eeprom_buf is NULL.
+ *
+ **/
+s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
+ u32 eeprom_buf_size, struct ixgbe_pba *pba)
+{
+ s32 ret_val;
+
+ if (pba == NULL)
+ return IXGBE_ERR_PARAM;
+
+ if (eeprom_buf == NULL) {
+ ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2,
+ &pba->word[0]);
+ if (ret_val)
+ return ret_val;
+ } else {
+ if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
+ eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0];
+ eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1];
+ } else {
+ return IXGBE_ERR_PARAM;
+ }
+ }
+
+ if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
+ if (pba->pba_block == NULL)
+ return IXGBE_ERR_PARAM;
+
+ if (eeprom_buf == NULL) {
+ ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1],
+ pba->pba_block[0],
+ pba->pba_block);
+ if (ret_val)
+ return ret_val;
+ } else {
+ if (eeprom_buf_size > (u32)(pba->word[1] +
+ pba->pba_block[0])) {
+ memcpy(&eeprom_buf[pba->word[1]],
+ pba->pba_block,
+ pba->pba_block[0] * sizeof(u16));
+ } else {
+ return IXGBE_ERR_PARAM;
+ }
+ }
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_get_pba_block_size
+ * @hw: pointer to the HW structure
+ * @eeprom_buf: optional pointer to EEPROM image
+ * @eeprom_buf_size: size of EEPROM image in words
+ * @pba_data_size: pointer to output variable
+ *
+ * Returns the size of the PBA block in words. Function operates on EEPROM
+ * image if the eeprom_buf pointer is not NULL otherwise it accesses physical
+ * EEPROM device.
+ *
+ **/
+s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
+ u32 eeprom_buf_size, u16 *pba_block_size)
+{
+ s32 ret_val;
+ u16 pba_word[2];
+ u16 length;
+
+ DEBUGFUNC("ixgbe_get_pba_block_size");
+
+ if (eeprom_buf == NULL) {
+ ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
+ &pba_word[0]);
+ if (ret_val)
+ return ret_val;
+ } else {
+ if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
+ pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
+ pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
+ } else {
+ return IXGBE_ERR_PARAM;
+ }
+ }
+
+ if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) {
+ if (eeprom_buf == NULL) {
+ ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0,
+ &length);
+ if (ret_val)
+ return ret_val;
+ } else {
+ if (eeprom_buf_size > pba_word[1])
+ length = eeprom_buf[pba_word[1] + 0];
+ else
+ return IXGBE_ERR_PARAM;
+ }
+
+ if (length == 0xFFFF || length == 0)
+ return IXGBE_ERR_PBA_SECTION;
+ } else {
+ /* PBA number in legacy format, there is no PBA Block. */
+ length = 0;
+ }
+
+ if (pba_block_size != NULL)
+ *pba_block_size = length;
+
+ return IXGBE_SUCCESS;
+}
+
/**
* ixgbe_get_mac_addr_generic - Generic get MAC address
* @hw: pointer to hardware structure
case IXGBE_PCI_LINK_SPEED_5000:
hw->bus.speed = ixgbe_bus_speed_5000;
break;
+ case IXGBE_PCI_LINK_SPEED_8000:
+ hw->bus.speed = ixgbe_bus_speed_8000;
+ break;
default:
hw->bus.speed = ixgbe_bus_speed_unknown;
break;
/**
* ixgbe_fc_enable_generic - Enable flow control
* @hw: pointer to hardware structure
- * @packetbuf_num: packet buffer number (0-7)
*
* Enable flow control according to the current settings.
**/
-s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
+s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
{
s32 ret_val = IXGBE_SUCCESS;
u32 mflcn_reg, fccfg_reg;
u32 reg;
u32 fcrtl, fcrth;
+ int i;
DEBUGFUNC("ixgbe_fc_enable_generic");
- /* Negotiate the fc mode to use */
- ret_val = ixgbe_fc_autoneg(hw);
- if (ret_val == IXGBE_ERR_FLOW_CONTROL)
+ /* Validate the water mark configuration */
+ if (!hw->fc.pause_time) {
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
goto out;
+ }
+
+ /* Low water mark of zero causes XOFF floods */
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+ hw->fc.high_water[i]) {
+ if (!hw->fc.low_water[i] ||
+ hw->fc.low_water[i] >= hw->fc.high_water[i]) {
+ DEBUGOUT("Invalid water mark configuration\n");
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+ }
+ }
+
+ /* Negotiate the fc mode to use */
+ ixgbe_fc_autoneg(hw);
+
+ /* Disable any previous flow control settings */
+ mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+ mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
- /* Disable any previous flow control settings */
- mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
- mflcn_reg &= ~(IXGBE_MFLCN_RFCE | IXGBE_MFLCN_RPFCE);
-
fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
- fcrth = hw->fc.high_water[packetbuf_num] << 10;
- fcrtl = hw->fc.low_water << 10;
- if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
- fcrth |= IXGBE_FCRTH_FCEN;
- if (hw->fc.send_xon)
- fcrtl |= IXGBE_FCRTL_XONE;
+ /* Set up and enable Rx high/low water mark thresholds, enable XON. */
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+ hw->fc.high_water[i]) {
+ fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
+ fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
+ } else {
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
+ /*
+ * In order to prevent Tx hangs when the internal Tx
+ * switch is enabled we must set the high water mark
+ * to the maximum FCRTH value. This allows the Tx
+ * switch to function even under heavy Rx workloads.
+ */
+ fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
}
- IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), fcrth);
- IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), fcrtl);
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
+ }
/* Configure pause time (2 TCs per register) */
- reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
- if ((packetbuf_num & 1) == 0)
- reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
- else
- reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
- IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
+ reg = hw->fc.pause_time * 0x00010001;
+ for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
+ IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
- IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
+ /* Configure flow control refresh threshold value */
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
out:
return ret_val;
}
/**
- * ixgbe_fc_autoneg - Configure flow control
+ * ixgbe_negotiate_fc - Negotiate flow control
* @hw: pointer to hardware structure
+ * @adv_reg: flow control advertised settings
+ * @lp_reg: link partner's flow control settings
+ * @adv_sym: symmetric pause bit in advertisement
+ * @adv_asm: asymmetric pause bit in advertisement
+ * @lp_sym: symmetric pause bit in link partner advertisement
+ * @lp_asm: asymmetric pause bit in link partner advertisement
*
- * Compares our advertised flow control capabilities to those advertised by
- * our link partner, and determines the proper flow control mode to use.
+ * Find the intersection between advertised settings and link partner's
+ * advertised settings
**/
-s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
+static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
+ u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
{
- s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
- ixgbe_link_speed speed;
- bool link_up;
-
- DEBUGFUNC("ixgbe_fc_autoneg");
-
- if (hw->fc.disable_fc_autoneg)
- goto out;
+ if ((!(adv_reg)) || (!(lp_reg)))
+ return IXGBE_ERR_FC_NOT_NEGOTIATED;
+ if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
/*
- * AN should have completed when the cable was plugged in.
- * Look for reasons to bail out. Bail out if:
- * - FC autoneg is disabled, or if
- * - link is not up.
- *
- * Since we're being called from an LSC, link is already known to be up.
- * So use link_up_wait_to_complete=FALSE.
+ * Now we need to check if the user selected Rx ONLY
+ * of pause frames. In this case, we had to advertise
+ * FULL flow control because we could not advertise RX
+ * ONLY. Hence, we must now check to see if we need to
+ * turn OFF the TRANSMISSION of PAUSE frames.
*/
- hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
- if (!link_up) {
- ret_val = IXGBE_ERR_FLOW_CONTROL;
- goto out;
- }
-
- switch (hw->phy.media_type) {
- /* Autoneg flow control on fiber adapters */
- case ixgbe_media_type_fiber:
- if (speed == IXGBE_LINK_SPEED_1GB_FULL)
- ret_val = ixgbe_fc_autoneg_fiber(hw);
- break;
-
- /* Autoneg flow control on backplane adapters */
- case ixgbe_media_type_backplane:
- ret_val = ixgbe_fc_autoneg_backplane(hw);
- break;
-
- /* Autoneg flow control on copper adapters */
- case ixgbe_media_type_copper:
- if (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)
- ret_val = ixgbe_fc_autoneg_copper(hw);
- break;
-
- default:
- break;
+ if (hw->fc.requested_mode == ixgbe_fc_full) {
+ hw->fc.current_mode = ixgbe_fc_full;
+ DEBUGOUT("Flow Control = FULL.\n");
+ } else {
+ hw->fc.current_mode = ixgbe_fc_rx_pause;
+ DEBUGOUT("Flow Control=RX PAUSE frames only\n");
}
-
-out:
- if (ret_val == IXGBE_SUCCESS) {
- hw->fc.fc_was_autonegged = TRUE;
+ } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
+ (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
+ hw->fc.current_mode = ixgbe_fc_tx_pause;
+ DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
+ } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
+ !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
+ hw->fc.current_mode = ixgbe_fc_rx_pause;
+ DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
} else {
- hw->fc.fc_was_autonegged = FALSE;
- hw->fc.current_mode = hw->fc.requested_mode;
+ hw->fc.current_mode = ixgbe_fc_none;
+ DEBUGOUT("Flow Control = NONE.\n");
}
- return ret_val;
+ return IXGBE_SUCCESS;
}
/**
static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
{
u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
- s32 ret_val;
+ s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
/*
* On multispeed fiber at 1g, bail out if
linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
- (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
- ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1))
goto out;
- }
pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
{
u32 links2, anlp1_reg, autoc_reg, links;
- s32 ret_val;
+ s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
/*
* On backplane, bail out if
* - we are 82599 and link partner is not AN enabled
*/
links = IXGBE_READ_REG(hw, IXGBE_LINKS);
- if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
- hw->fc.fc_was_autonegged = FALSE;
- hw->fc.current_mode = hw->fc.requested_mode;
- ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ if ((links & IXGBE_LINKS_KX_AN_COMP) == 0)
goto out;
- }
if (hw->mac.type == ixgbe_mac_82599EB) {
links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
- if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
- hw->fc.fc_was_autonegged = FALSE;
- hw->fc.current_mode = hw->fc.requested_mode;
- ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)
goto out;
}
- }
/*
* Read the 10g AN autoc and LP ability registers and resolve
* local flow control settings accordingly
}
/**
- * ixgbe_negotiate_fc - Negotiate flow control
- * @hw: pointer to hardware structure
- * @adv_reg: flow control advertised settings
- * @lp_reg: link partner's flow control settings
- * @adv_sym: symmetric pause bit in advertisement
- * @adv_asm: asymmetric pause bit in advertisement
- * @lp_sym: symmetric pause bit in link partner advertisement
- * @lp_asm: asymmetric pause bit in link partner advertisement
- *
- * Find the intersection between advertised settings and link partner's
- * advertised settings
- **/
-static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
- u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
-{
- if ((!(adv_reg)) || (!(lp_reg)))
- return IXGBE_ERR_FC_NOT_NEGOTIATED;
-
- if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
- /*
- * Now we need to check if the user selected Rx ONLY
- * of pause frames. In this case, we had to advertise
- * FULL flow control because we could not advertise RX
- * ONLY. Hence, we must now check to see if we need to
- * turn OFF the TRANSMISSION of PAUSE frames.
- */
- if (hw->fc.requested_mode == ixgbe_fc_full) {
- hw->fc.current_mode = ixgbe_fc_full;
- DEBUGOUT("Flow Control = FULL.\n");
- } else {
- hw->fc.current_mode = ixgbe_fc_rx_pause;
- DEBUGOUT("Flow Control=RX PAUSE frames only\n");
- }
- } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
- (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
- hw->fc.current_mode = ixgbe_fc_tx_pause;
- DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
- } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
- !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
- hw->fc.current_mode = ixgbe_fc_rx_pause;
- DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
- } else {
- hw->fc.current_mode = ixgbe_fc_none;
- DEBUGOUT("Flow Control = NONE.\n");
- }
- return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_setup_fc - Set up flow control
+ * ixgbe_fc_autoneg - Configure flow control
* @hw: pointer to hardware structure
*
- * Called at init time to set up flow control.
+ * Compares our advertised flow control capabilities to those advertised by
+ * our link partner, and determines the proper flow control mode to use.
**/
-static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
+void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
{
- s32 ret_val = IXGBE_SUCCESS;
- u32 reg = 0, reg_bp = 0;
- u16 reg_cu = 0;
-
- DEBUGFUNC("ixgbe_setup_fc");
+ s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ ixgbe_link_speed speed;
+ bool link_up;
- /* Validate the packetbuf configuration */
- if (packetbuf_num < 0 || packetbuf_num > 7) {
- DEBUGOUT1("Invalid packet buffer number [%d], expected range "
- "is 0-7\n", packetbuf_num);
- ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
- goto out;
- }
+ DEBUGFUNC("ixgbe_fc_autoneg");
/*
- * Validate the water mark configuration. Zero water marks are invalid
- * because it causes the controller to just blast out fc packets.
+ * AN should have completed when the cable was plugged in.
+ * Look for reasons to bail out. Bail out if:
+ * - FC autoneg is disabled, or if
+ * - link is not up.
*/
- if (!hw->fc.low_water ||
- !hw->fc.high_water[packetbuf_num] ||
- !hw->fc.pause_time) {
- DEBUGOUT("Invalid water mark configuration\n");
- ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ if (hw->fc.disable_fc_autoneg)
goto out;
- }
- /*
- * Validate the requested mode. Strict IEEE mode does not allow
- * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
- */
- if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
- DEBUGOUT("ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
- ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ hw->mac.ops.check_link(hw, &speed, &link_up, false);
+ if (!link_up)
goto out;
- }
-
- /*
- * 10gig parts do not have a word in the EEPROM to determine the
- * default flow control setting, so we explicitly set it to full.
- */
- if (hw->fc.requested_mode == ixgbe_fc_default)
- hw->fc.requested_mode = ixgbe_fc_full;
-
- /*
- * Set up the 1G and 10G flow control advertisement registers so the
- * HW will be able to do fc autoneg once the cable is plugged in. If
- * we link at 10G, the 1G advertisement is harmless and vice versa.
- */
switch (hw->phy.media_type) {
+ /* Autoneg flow control on fiber adapters */
case ixgbe_media_type_fiber:
+ if (speed == IXGBE_LINK_SPEED_1GB_FULL)
+ ret_val = ixgbe_fc_autoneg_fiber(hw);
+ break;
+
+ /* Autoneg flow control on backplane adapters */
case ixgbe_media_type_backplane:
- reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
- reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ ret_val = ixgbe_fc_autoneg_backplane(hw);
break;
+ /* Autoneg flow control on copper adapters */
case ixgbe_media_type_copper:
- hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu);
+ if (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)
+ ret_val = ixgbe_fc_autoneg_copper(hw);
break;
default:
- ;
- }
-
- /*
- * The possible values of fc.requested_mode are:
- * 0: Flow control is completely disabled
- * 1: Rx flow control is enabled (we can receive pause frames,
- * but not send pause frames).
- * 2: Tx flow control is enabled (we can send pause frames but
- * we do not support receiving pause frames).
- * 3: Both Rx and Tx flow control (symmetric) are enabled.
- * other: Invalid.
- */
- switch (hw->fc.requested_mode) {
- case ixgbe_fc_none:
- /* Flow control completely disabled by software override. */
- reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
- if (hw->phy.media_type == ixgbe_media_type_backplane)
- reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
- IXGBE_AUTOC_ASM_PAUSE);
- else if (hw->phy.media_type == ixgbe_media_type_copper)
- reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
- break;
- case ixgbe_fc_rx_pause:
- /*
- * Rx Flow control is enabled and Tx Flow control is
- * disabled by software override. Since there really
- * isn't a way to advertise that we are capable of RX
- * Pause ONLY, we will advertise that we support both
- * symmetric and asymmetric Rx PAUSE. Later, we will
- * disable the adapter's ability to send PAUSE frames.
- */
- reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
- if (hw->phy.media_type == ixgbe_media_type_backplane)
- reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
- IXGBE_AUTOC_ASM_PAUSE);
- else if (hw->phy.media_type == ixgbe_media_type_copper)
- reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
break;
- case ixgbe_fc_tx_pause:
- /*
- * Tx Flow control is enabled, and Rx Flow control is
- * disabled by software override.
- */
- reg |= (IXGBE_PCS1GANA_ASM_PAUSE);
- reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE);
- if (hw->phy.media_type == ixgbe_media_type_backplane) {
- reg_bp |= (IXGBE_AUTOC_ASM_PAUSE);
- reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE);
- } else if (hw->phy.media_type == ixgbe_media_type_copper) {
- reg_cu |= (IXGBE_TAF_ASM_PAUSE);
- reg_cu &= ~(IXGBE_TAF_SYM_PAUSE);
- }
- break;
- case ixgbe_fc_full:
- /* Flow control (both Rx and Tx) is enabled by SW override. */
- reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
- if (hw->phy.media_type == ixgbe_media_type_backplane)
- reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
- IXGBE_AUTOC_ASM_PAUSE);
- else if (hw->phy.media_type == ixgbe_media_type_copper)
- reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
- break;
- default:
- DEBUGOUT("Flow control param set incorrectly\n");
- ret_val = IXGBE_ERR_CONFIG;
- goto out;
- break;
- }
-
- if (hw->mac.type != ixgbe_mac_X540) {
- /*
- * Enable auto-negotiation between the MAC & PHY;
- * the MAC will advertise clause 37 flow control.
- */
- IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
- reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
-
- /* Disable AN timeout */
- if (hw->fc.strict_ieee)
- reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
-
- IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
- DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
}
- /*
- * AUTOC restart handles negotiation of 1G and 10G on backplane
- * and copper. There is no need to set the PCS1GCTL register.
- *
- */
- if (hw->phy.media_type == ixgbe_media_type_backplane) {
- reg_bp |= IXGBE_AUTOC_AN_RESTART;
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
- } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
- (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)) {
- hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
- }
-
- DEBUGOUT1("Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
out:
- return ret_val;
+ if (ret_val == IXGBE_SUCCESS) {
+ hw->fc.fc_was_autonegged = true;
+ } else {
+ hw->fc.fc_was_autonegged = false;
+ hw->fc.current_mode = hw->fc.requested_mode;
+ }
}
/**
ixgbe_release_eeprom_semaphore(hw);
}
+/**
+ * ixgbe_disable_sec_rx_path_generic - Stops the receive data path
+ * @hw: pointer to hardware structure
+ *
+ * Stops the receive data path and waits for the HW to internally empty
+ * the Rx security block
+ **/
+s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
+{
+#define IXGBE_MAX_SECRX_POLL 40
+
+ int i;
+ int secrxreg;
+
+ DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
+
+
+ secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+ secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
+ for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
+ secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
+ if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
+ break;
+ else
+ /* Use interrupt-safe sleep just in case */
+ usec_delay(1000);
+ }
+
+ /* For informational purposes only */
+ if (i >= IXGBE_MAX_SECRX_POLL)
+ DEBUGOUT("Rx unit being enabled before security "
+ "path fully disabled. Continuing with init.\n");
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_enable_sec_rx_path_generic - Enables the receive data path
+ * @hw: pointer to hardware structure
+ *
+ * Enables the receive data path.
+ **/
+s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
+{
+ int secrxreg;
+
+ DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
+
+ secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+ secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return IXGBE_SUCCESS;
+}
+
/**
* ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
* @hw: pointer to hardware structure
bool link_up = 0;
u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ s32 ret_val = IXGBE_SUCCESS;
DEBUGFUNC("ixgbe_blink_led_start_generic");
hw->mac.ops.check_link(hw, &speed, &link_up, false);
if (!link_up) {
+ /* Need the SW/FW semaphore around AUTOC writes if 82599 and
+ * LESM is on.
+ */
+ bool got_lock = false;
+ if ((hw->mac.type == ixgbe_mac_82599EB) &&
+ ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (ret_val != IXGBE_SUCCESS) {
+ ret_val = IXGBE_ERR_SWFW_SYNC;
+ goto out;
+ }
+ got_lock = true;
+ }
+
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
autoc_reg |= IXGBE_AUTOC_FLU;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
IXGBE_WRITE_FLUSH(hw);
+
+ if (got_lock)
+ hw->mac.ops.release_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
msec_delay(10);
}
IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
IXGBE_WRITE_FLUSH(hw);
- return IXGBE_SUCCESS;
+out:
+ return ret_val;
}
/**
{
u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ s32 ret_val = IXGBE_SUCCESS;
+ bool got_lock = false;
DEBUGFUNC("ixgbe_blink_led_stop_generic");
+ /* Need the SW/FW semaphore around AUTOC writes if 82599 and
+ * LESM is on.
+ */
+ if ((hw->mac.type == ixgbe_mac_82599EB) &&
+ ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (ret_val != IXGBE_SUCCESS) {
+ ret_val = IXGBE_ERR_SWFW_SYNC;
+ goto out;
+ }
+ got_lock = true;
+ }
autoc_reg &= ~IXGBE_AUTOC_FLU;
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+ if (hw->mac.type == ixgbe_mac_82599EB)
+ ixgbe_reset_pipeline_82599(hw);
+
+ if (got_lock)
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+
led_reg &= ~IXGBE_LED_MODE_MASK(index);
led_reg &= ~IXGBE_LED_BLINK(index);
led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
IXGBE_WRITE_FLUSH(hw);
- return IXGBE_SUCCESS;
+out:
+ return ret_val;
}
/**
* Read PCIe configuration space, and get the MSI-X vector count from
* the capabilities table.
**/
-u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
+u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
{
- u32 msix_count = 64;
+ u16 msix_count = 1;
+ u16 max_msix_count;
+ u16 pcie_offset;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
+ max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
+ max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
+ break;
+ default:
+ return msix_count;
+ }
DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
- if (hw->mac.msix_vectors_from_pcie) {
- msix_count = IXGBE_READ_PCIE_WORD(hw,
- IXGBE_PCIE_MSIX_82599_CAPS);
+ msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
- /* MSI-X count is zero-based in HW, so increment to give
- * proper value */
+ /* MSI-X count is zero-based in HW */
msix_count++;
- }
+
+ if (msix_count > max_msix_count)
+ msix_count = max_msix_count;
return msix_count;
}
return IXGBE_SUCCESS;
}
+/**
+ * This function should only be involved in the IOV mode.
+ * In IOV mode, Default pool is next pool after the number of
+ * VFs advertized and not 0.
+ * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
+ *
+ * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
+ * @hw: pointer to hardware struct
+ * @vmdq: VMDq pool index
+ **/
+s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
+{
+ u32 rar = hw->mac.san_mac_rar_index;
+
+ DEBUGFUNC("ixgbe_set_vmdq_san_mac");
+
+ if (vmdq < 32) {
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
+ } else {
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
+ }
+
+ return IXGBE_SUCCESS;
+}
+
/**
* ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
* @hw: pointer to hardware structure
s32 regindex;
u32 bitindex;
u32 vfta;
- u32 bits;
- u32 vt;
u32 targetbit;
- bool vfta_changed = FALSE;
+ s32 ret_val = IXGBE_SUCCESS;
+ bool vfta_changed = false;
DEBUGFUNC("ixgbe_set_vfta_generic");
if (vlan_on) {
if (!(vfta & targetbit)) {
vfta |= targetbit;
- vfta_changed = TRUE;
+ vfta_changed = true;
}
} else {
if ((vfta & targetbit)) {
vfta &= ~targetbit;
- vfta_changed = TRUE;
+ vfta_changed = true;
}
}
/* Part 2
- * If VT Mode is set
+ * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
+ */
+ ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on,
+ &vfta_changed);
+ if (ret_val != IXGBE_SUCCESS)
+ return ret_val;
+
+ if (vfta_changed)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_set_vlvf_generic - Set VLAN Pool Filter
+ * @hw: pointer to hardware structure
+ * @vlan: VLAN id to write to VLAN filter
+ * @vind: VMDq output index that maps queue to VLAN id in VFVFB
+ * @vlan_on: boolean flag to turn on/off VLAN in VFVF
+ * @vfta_changed: pointer to boolean flag which indicates whether VFTA
+ * should be changed
+ *
+ * Turn on/off specified bit in VLVF table.
+ **/
+s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on, bool *vfta_changed)
+{
+ u32 vt;
+
+ DEBUGFUNC("ixgbe_set_vlvf_generic");
+
+ if (vlan > 4095)
+ return IXGBE_ERR_PARAM;
+
+ /* If VT Mode is set
* Either vlan_on
* make sure the vlan is in VLVF
* set the vind bit in the matching VLVFB
vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
if (vt & IXGBE_VT_CTL_VT_ENABLE) {
s32 vlvf_index;
+ u32 bits;
vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
if (vlvf_index < 0)
if (bits) {
IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
(IXGBE_VLVF_VIEN | vlan));
- if (!vlan_on) {
+ if ((!vlan_on) && (vfta_changed != NULL)) {
/* someone wants to clear the vfta entry
* but some pools/VFs are still using it.
* Ignore it. */
- vfta_changed = FALSE;
- }
+ *vfta_changed = false;
}
- else
+ } else
IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
}
- if (vfta_changed)
- IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
-
return IXGBE_SUCCESS;
}
else
*speed = IXGBE_LINK_SPEED_UNKNOWN;
- /* if link is down, zero out the current_mode */
- if (*link_up == FALSE) {
- hw->fc.current_mode = ixgbe_fc_none;
- hw->fc.fc_was_autonegged = FALSE;
- }
-
return IXGBE_SUCCESS;
}
return status;
}
-/**
- * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
- * control
- * @hw: pointer to hardware structure
- *
- * There are several phys that do not support autoneg flow control. This
- * function check the device id to see if the associated phy supports
- * autoneg flow control.
- **/
-static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
-{
-
- DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
-
- switch (hw->device_id) {
- case IXGBE_DEV_ID_X540T:
- return IXGBE_SUCCESS;
- case IXGBE_DEV_ID_82599_T3_LOM:
- return IXGBE_SUCCESS;
- default:
- return IXGBE_ERR_FC_NOT_SUPPORTED;
- }
-}
-
/**
* ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
* @hw: pointer to hardware structure
* PFVFSPOOF register array is size 8 with 8 bits assigned to
* MAC anti-spoof enables in each register array element.
*/
- for (j = 0; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
+ for (j = 0; j < pf_target_reg; j++)
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
- /* If not enabling anti-spoofing then done */
- if (!enable)
- return;
-
/*
* The PF should be allowed to spoof so that it can support
- * emulation mode NICs. Reset the bit assigned to the PF
+ * emulation mode NICs. Do not set the bits assigned to the PF
*/
- pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg));
- pfvfspoof ^= (1 << pf_target_shift);
- IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg), pfvfspoof);
+ pfvfspoof &= (1 << pf_target_shift) - 1;
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
+
+ /*
+ * Remaining pools belong to the PF so they do not need to have
+ * anti-spoofing enabled.
+ */
+ for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0);
}
/**
/* Enable relaxed ordering */
for (i = 0; i < hw->mac.max_tx_queues; i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
- regval |= IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+ regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
}
for (i = 0; i < hw->mac.max_rx_queues; i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
- regval |= (IXGBE_DCA_RXCTRL_DESC_WRO_EN |
- IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
+ regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
+ IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
}
* Communicates with the manageability block. On success return IXGBE_SUCCESS
* else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
**/
-static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u8 *buffer,
+static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
u32 length)
{
- u32 hicr, i;
+ u32 hicr, i, bi;
u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
u8 buf_len, dword_len;
*/
for (i = 0; i < dword_len; i++)
IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
- i, *((u32 *)buffer + i));
+ i, IXGBE_CPU_TO_LE32(buffer[i]));
/* Setting this bit tells the ARC that a new command is pending. */
IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
dword_len = hdr_size >> 2;
/* first pull in the header so we know the buffer length */
- for (i = 0; i < dword_len; i++)
- *((u32 *)buffer + i) =
- IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, i);
+ for (bi = 0; bi < dword_len; bi++) {
+ buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
+ IXGBE_LE32_TO_CPUS(&buffer[bi]);
+ }
/* If there is any thing in data position pull it in */
buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
goto out;
}
- /* Calculate length in DWORDs, add one for odd lengths */
- dword_len = (buf_len + 1) >> 2;
+ /* Calculate length in DWORDs, add 3 for odd lengths */
+ dword_len = (buf_len + 3) >> 2;
- /* Pull in the rest of the buffer (i is where we left off)*/
- for (; i < buf_len; i++)
- *((u32 *)buffer + i) =
- IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, i);
+ /* Pull in the rest of the buffer (bi is where we left off)*/
+ for (; bi <= dword_len; bi++) {
+ buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
+ IXGBE_LE32_TO_CPUS(&buffer[bi]);
+ }
out:
return ret_val;
fw_cmd.pad2 = 0;
for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
- ret_val = ixgbe_host_interface_command(hw, (u8 *)&fw_cmd,
+ ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
sizeof(fw_cmd));
if (ret_val != IXGBE_SUCCESS)
continue;
* buffers requested using supplied strategy.
*/
switch (strategy) {
- case (PBA_STRATEGY_WEIGHTED):
- /* pba_80_48 strategy weight first half of packet buffer with
- * 5/8 of the packet buffer space.
+ case PBA_STRATEGY_WEIGHTED:
+ /* ixgbe_dcb_pba_80_48 strategy weight first half of packet
+ * buffer with 5/8 of the packet buffer space.
*/
- rxpktsize = (pbsize * 5 * 2) / (num_pb * 8);
+ rxpktsize = (pbsize * 5) / (num_pb * 4);
pbsize -= rxpktsize * (num_pb / 2);
rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
for (; i < (num_pb / 2); i++)
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
/* Fall through to configure remaining packet buffers */
- case (PBA_STRATEGY_EQUAL):
+ case PBA_STRATEGY_EQUAL:
rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
for (; i < num_pb; i++)
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
#define _IXGBE_COMMON_H_
#include "ixgbe_type.h"
+#ident "$Id: ixgbe_common.h,v 1.133 2012/11/05 23:08:30 jtkirshe Exp $"
#define IXGBE_WRITE_REG64(hw, reg, value) \
do { \
IXGBE_WRITE_REG(hw, reg, (u32) value); \
IXGBE_WRITE_REG(hw, reg + 4, (u32) (value >> 32)); \
} while (0)
+struct ixgbe_pba {
+ u16 word[2];
+ u16 *pba_block;
+};
-u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
-
+u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num);
s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
u32 pba_num_size);
+s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
+ u32 eeprom_buf_size, u16 max_pba_block_size,
+ struct ixgbe_pba *pba);
+s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
+ u32 eeprom_buf_size, struct ixgbe_pba *pba);
+s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
+ u32 eeprom_buf_size, u16 *pba_block_size);
s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw);
s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
+s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw);
+s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw);
-s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packtetbuf_num);
-s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw);
+s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
+s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
+void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
s32 ixgbe_validate_mac_addr(u8 *mac_addr);
s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq);
s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
u32 vind, bool vlan_on);
+s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on, bool *vfta_changed);
s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
+s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan);
s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
u8 build, u8 ver);
void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
+
+extern s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
+
#endif /* IXGBE_COMMON */
{
s32 ret_val = IXGBE_ERR_MBX;
- UNREFERENCED_1PARAMETER(mbx_id);
DEBUGFUNC("ixgbe_check_for_msg_vf");
if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) {
{
s32 ret_val = IXGBE_ERR_MBX;
- UNREFERENCED_1PARAMETER(mbx_id);
DEBUGFUNC("ixgbe_check_for_ack_vf");
if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) {
{
s32 ret_val = IXGBE_ERR_MBX;
- UNREFERENCED_1PARAMETER(mbx_id);
DEBUGFUNC("ixgbe_check_for_rst_vf");
if (!ixgbe_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD |
s32 ret_val;
u16 i;
- UNREFERENCED_1PARAMETER(mbx_id);
DEBUGFUNC("ixgbe_write_mbx_vf");
u16 i;
DEBUGFUNC("ixgbe_read_mbx_vf");
- UNREFERENCED_1PARAMETER(mbx_id);
/* lock the mailbox to prevent pf/vf race condition */
ret_val = ixgbe_obtain_mbx_lock_vf(hw);
/* bits 23:16 are used for extra info for certain messages */
#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
+/* definitions to support mailbox API version negotiation */
+
+/*
+ * each element denotes a version of the API; existing numbers may not
+ * change; any additions must go at the end
+ */
+enum ixgbe_pfvf_api_rev {
+ ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */
+ ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */
+ ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */
+ /* This value should always be last */
+ ixgbe_mbox_api_unknown, /* indicates that API version is not known */
+};
+
+/* mailbox API, legacy requests */
#define IXGBE_VF_RESET 0x01 /* VF requests reset */
#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
+
+/* mailbox API, version 1.0 VF requests */
#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
+#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */
+
+/* mailbox API, version 1.1 VF requests */
+#define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */
+
+/* GET_QUEUES return data indices within the mailbox */
+#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */
+#define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */
+#define IXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */
+#define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */
/* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4
#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */
+/* mailbox API, version 2.0 VF requests */
+#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */
+#define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */
+#define IXGBE_VF_ENABLE_MACADDR 0x0A /* enable MAC address */
+#define IXGBE_VF_DISABLE_MACADDR 0x0B /* disable MAC address */
+#define IXGBE_VF_GET_MACADDRS 0x0C /* get all configured MAC addrs */
+#define IXGBE_VF_SET_MCAST_PROMISC 0x0D /* enable multicast promiscuous */
+#define IXGBE_VF_GET_MTU 0x0E /* get bounds on MTU */
+#define IXGBE_VF_SET_MTU 0x0F /* set a specific MTU */
+
+/* mailbox API, version 2.0 PF requests */
+#define IXGBE_PF_TRANSPARENT_VLAN 0x0101 /* enable transparent vlan */
#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
#pragma warning(disable:1419) /* External declaration in primary source file */
#pragma warning(disable:111) /* Statement is unreachable */
#pragma warning(disable:981) /* Operands are evaluated in unspecified order */
+#pragma warning(disable:593) /* Variable was set but never used */
+#pragma warning(disable:174) /* expression has no effect */
#else
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wformat"
#pragma GCC diagnostic ignored "-Wuninitialized"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
+#pragma GCC diagnostic ignored "-Wunused-value"
+#pragma GCC diagnostic ignored "-Wformat-extra-args"
+#if (((__GNUC__) >= 4) && ((__GNUC_MINOR__) >= 6))
+#pragma GCC diagnostic ignored "-Wunused-but-set-variable"
+#endif
#if (((__GNUC__) >= 4) && ((__GNUC_MINOR__) >= 7))
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#endif
#define FALSE 0
#define TRUE 1
+#define false 0
+#define true 1
+#define min(a,b) RTE_MIN(a,b)
+
+#define EWARN(hw, S, args...) DEBUGOUT1(S, ##args)
+
/* Bunch of defines for shared code bogosity */
#define UNREFERENCED_PARAMETER(_p)
#define UNREFERENCED_1PARAMETER(_p)
#define UNREFERENCED_3PARAMETER(_p, _q, _r)
#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s)
-
#define STATIC static
#define IXGBE_NTOHL(_i) rte_be_to_cpu_32(_i)
#define IXGBE_NTOHS(_i) rte_be_to_cpu_16(_i)
+#define IXGBE_CPU_TO_LE32(_i) rte_cpu_to_le_32(_i)
+#define IXGBE_LE32_TO_CPUS(_i) rte_le_to_cpu_32(_i)
typedef uint8_t u8;
typedef int8_t s8;
#include "ixgbe_api.h"
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
+#ident "$Id: ixgbe_phy.c,v 1.139 2012/05/24 23:36:12 jtkirshe Exp $"
static void ixgbe_i2c_start(struct ixgbe_hw *hw);
static void ixgbe_i2c_stop(struct ixgbe_hw *hw);
static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data);
static bool ixgbe_get_i2c_data(u32 *i2cctl);
-void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
/**
* ixgbe_init_phy_ops_generic - Inits PHY function ptrs
phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic;
phy->ops.write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic;
phy->ops.i2c_bus_clear = &ixgbe_i2c_bus_clear;
- phy->ops.identify_sfp = &ixgbe_identify_sfp_module_generic;
+ phy->ops.identify_sfp = &ixgbe_identify_module_generic;
phy->sfp_type = ixgbe_sfp_type_unknown;
phy->ops.check_overtemp = &ixgbe_tn_check_overtemp;
return IXGBE_SUCCESS;
&autoneg_reg);
autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE;
- if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE) {
+ if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE)
break;
}
- }
if (time_out == max_time_out) {
status = IXGBE_ERR_LINK_SETUP;
bool autoneg,
bool autoneg_wait_to_complete)
{
- UNREFERENCED_2PARAMETER(autoneg, autoneg_wait_to_complete);
DEBUGFUNC("ixgbe_setup_phy_link_speed_generic");
&autoneg_reg);
autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE;
- if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE) {
+ if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE)
break;
}
- }
if (time_out == max_time_out) {
status = IXGBE_ERR_LINK_SETUP;
return ret_val;
}
+/**
+ * ixgbe_identify_module_generic - Identifies module type
+ * @hw: pointer to hardware structure
+ *
+ * Determines HW type and calls appropriate function.
+ **/
+s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_SFP_NOT_PRESENT;
+
+ DEBUGFUNC("ixgbe_identify_module_generic");
+
+ switch (hw->mac.ops.get_media_type(hw)) {
+ case ixgbe_media_type_fiber:
+ status = ixgbe_identify_sfp_module_generic(hw);
+ break;
+
+
+ default:
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ status = IXGBE_ERR_SFP_NOT_PRESENT;
+ break;
+ }
+
+ return status;
+}
+
/**
* ixgbe_identify_sfp_module_generic - Identifies SFP modules
* @hw: pointer to hardware structure
* 8 SFP_act_lmt_DA_CORE1 - 82599-specific
* 9 SFP_1g_cu_CORE0 - 82599-specific
* 10 SFP_1g_cu_CORE1 - 82599-specific
+ * 11 SFP_1g_sx_CORE0 - 82599-specific
+ * 12 SFP_1g_sx_CORE1 - 82599-specific
*/
if (hw->mac.type == ixgbe_mac_82598EB) {
if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
else
hw->phy.sfp_type =
ixgbe_sfp_type_1g_cu_core1;
+ } else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_sx_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_sx_core1;
} else {
hw->phy.sfp_type = ixgbe_sfp_type_unknown;
}
/* Verify supported 1G SFP modules */
if (comp_codes_10g == 0 &&
!(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0)) {
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
hw->phy.type = ixgbe_phy_sfp_unsupported;
status = IXGBE_ERR_SFP_NOT_SUPPORTED;
goto out;
ixgbe_get_device_caps(hw, &enforce_sfp);
if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
!((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) ||
- (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1))) {
+ (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) ||
+ (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0) ||
+ (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1))) {
/* Make sure we're a supported PHY type */
if (hw->phy.type == ixgbe_phy_sfp_intel) {
status = IXGBE_SUCCESS;
} else {
+ if (hw->allow_unsupported_sfp == true) {
+ EWARN(hw, "WARNING: Intel (R) Network "
+ "Connections are quality tested "
+ "using Intel (R) Ethernet Optics."
+ " Using untested modules is not "
+ "supported and may cause unstable"
+ " operation or damage to the "
+ "module or the adapter. Intel "
+ "Corporation is not responsible "
+ "for any harm caused by using "
+ "untested modules.\n", status);
+ status = IXGBE_SUCCESS;
+ } else {
DEBUGOUT("SFP+ module not supported\n");
hw->phy.type =
ixgbe_phy_sfp_unsupported;
status = IXGBE_ERR_SFP_NOT_SUPPORTED;
}
+ }
} else {
status = IXGBE_SUCCESS;
}
* SR modules
*/
if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 ||
- sfp_type == ixgbe_sfp_type_1g_cu_core0)
+ sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
+ sfp_type == ixgbe_sfp_type_1g_sx_core0)
sfp_type = ixgbe_sfp_type_srlr_core0;
else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 ||
- sfp_type == ixgbe_sfp_type_1g_cu_core1)
+ sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
+ sfp_type == ixgbe_sfp_type_1g_sx_core1)
sfp_type = ixgbe_sfp_type_srlr_core1;
/* Read offset to PHY init contents */
**/
static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
{
+ u32 i = 0;
+ u32 timeout = IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT;
+ u32 i2cctl_r = 0;
+
DEBUGFUNC("ixgbe_raise_i2c_clk");
+ for (i = 0; i < timeout; i++) {
*i2cctl |= IXGBE_I2C_CLK_OUT;
IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
IXGBE_WRITE_FLUSH(hw);
-
/* SCL rise time (1000ns) */
usec_delay(IXGBE_I2C_T_RISE);
+
+ i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+ if (i2cctl_r & IXGBE_I2C_CLK_IN)
+ break;
+ }
}
/**
#define IXGBE_TN_LASI_STATUS_REG 0x9005
#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008
+#ident "$Id: ixgbe_phy.h,v 1.48 2012/01/04 01:49:02 jtkirshe Exp $"
+
s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
u16 *firmware_version);
s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
+s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
u16 *list_offset,
u8 *eeprom_data);
s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 eeprom_data);
+void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
#endif /* _IXGBE_PHY_H_ */
#include "ixgbe_osdep.h"
-
-/* Vendor ID */
-#define IXGBE_INTEL_VENDOR_ID 0x8086
+#ident "$Id: ixgbe_type.h,v 1.552 2012/11/08 11:33:27 jtkirshe Exp $"
/* Device IDs */
#define IXGBE_DEV_ID_82598 0x10B6
#define IXGBE_DEV_ID_82599_CX4 0x10F9
#define IXGBE_DEV_ID_82599_SFP 0x10FB
#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
+#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72
+#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0
+#define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470
#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152A
#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529
#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
+#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D
+#define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A
#define IXGBE_DEV_ID_82599EN_SFP 0x1557
#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
#define IXGBE_DEV_ID_82599_T3_LOM 0x151C
#define IXGBE_DEV_ID_82599_VF 0x10ED
-#define IXGBE_DEV_ID_X540_VF 0x1515
+#define IXGBE_DEV_ID_82599_VF_HV 0x152E
#define IXGBE_DEV_ID_X540T 0x1528
+#define IXGBE_DEV_ID_X540_VF 0x1515
+#define IXGBE_DEV_ID_X540_VF_HV 0x1530
+#define IXGBE_DEV_ID_X540T1 0x1560
/* General Registers */
#define IXGBE_CTRL 0x00000
#define IXGBE_I2C_CLK_OUT 0x00000002
#define IXGBE_I2C_DATA_IN 0x00000004
#define IXGBE_I2C_DATA_OUT 0x00000008
-#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8
+#define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT 500
+
/* Interrupt Registers */
#define IXGBE_EICR 0x00800
/* Receive DMA Registers */
#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \
- (0x0D000 + ((_i - 64) * 0x40)))
+ (0x0D000 + (((_i) - 64) * 0x40)))
#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \
- (0x0D004 + ((_i - 64) * 0x40)))
+ (0x0D004 + (((_i) - 64) * 0x40)))
#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \
- (0x0D008 + ((_i - 64) * 0x40)))
+ (0x0D008 + (((_i) - 64) * 0x40)))
#define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \
- (0x0D010 + ((_i - 64) * 0x40)))
+ (0x0D010 + (((_i) - 64) * 0x40)))
#define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \
- (0x0D018 + ((_i - 64) * 0x40)))
+ (0x0D018 + (((_i) - 64) * 0x40)))
#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \
- (0x0D028 + ((_i - 64) * 0x40)))
+ (0x0D028 + (((_i) - 64) * 0x40)))
#define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \
- (0x0D02C + ((_i - 64) * 0x40)))
+ (0x0D02C + (((_i) - 64) * 0x40)))
#define IXGBE_RSCDBU 0x03028
#define IXGBE_RDDCC 0x02F20
#define IXGBE_RXMEMWRAP 0x03190
*/
#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \
(((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \
- (0x0D014 + ((_i - 64) * 0x40))))
+ (0x0D014 + (((_i) - 64) * 0x40))))
/*
* Rx DCA Control Register:
* 00-15 : 0x02200 + n*4
*/
#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \
(((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
- (0x0D00C + ((_i - 64) * 0x40))))
+ (0x0D00C + (((_i) - 64) * 0x40))))
#define IXGBE_RDRXCTL 0x02F00
#define IXGBE_RDRXCTL_RSC_PUSH 0x80
/* 8 of these 0x03C00 - 0x03C1C */
#define IXGBE_WUPL_LENGTH_MASK 0xFFFF
/* DCB registers */
-#define MAX_TRAFFIC_CLASS 8
+#define IXGBE_DCB_MAX_TRAFFIC_CLASS 8
#define IXGBE_RMCS 0x03D00
#define IXGBE_DPMCS 0x07F40
#define IXGBE_PDPMCS 0x0CD00
#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003
#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \
IXGBE_GCR_EXT_VT_MODE_64)
+#define IXGBE_GCR_EXT_VT_MODE_MASK 0x00000003
/* Time Sync Registers */
#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */
#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */
#define IXGBE_TRGTTIMH0 0x08C28 /* Target Time Register 0 High - RW */
#define IXGBE_TRGTTIML1 0x08C2C /* Target Time Register 1 Low - RW */
#define IXGBE_TRGTTIMH1 0x08C30 /* Target Time Register 1 High - RW */
+#define IXGBE_CLKTIML 0x08C34 /* Clock Out Time Register Low - RW */
+#define IXGBE_CLKTIMH 0x08C38 /* Clock Out Time Register High - RW */
#define IXGBE_FREQOUT0 0x08C34 /* Frequency Out 0 Control register - RW */
#define IXGBE_FREQOUT1 0x08C38 /* Frequency Out 1 Control register - RW */
#define IXGBE_AUXSTMPL0 0x08C3C /* Auxiliary Time Stamp 0 register Low - RO */
#define IXGBE_DCA_TXCTRL_CPUID_MASK_82599 0xFF000000 /* Tx CPUID Mask */
#define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 24 /* Tx CPUID Shift */
#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
-#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
+#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */
+#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
#define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */
/* MSCA Bit Masks */
#define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */
#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */
#define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */
+#define IXGBE_EICR_TIMESYNC 0x01000000 /* Timesync Event */
#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */
#define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */
#define IXGBE_EICR_GPI_SDP2 0x04000000 /* Gen Purpose Interrupt on SDP2 */
#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */
#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
+#define IXGBE_EICS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
#define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
#define IXGBE_EICS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */
#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermal Sensor Event */
+#define IXGBE_EIMS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
#define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
#define IXGBE_EIMS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */
#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
+#define IXGBE_EIMC_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
#define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
#define IXGBE_EIMC_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */
#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */
#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */
+#define IXGBE_ETQF_POOL_SHIFT 20
#define IXGBE_ETQS_RX_QUEUE 0x007F0000 /* bits 22:16 */
#define IXGBE_ETQS_RX_QUEUE_SHIFT 16
#define IXGBE_ESDP_SDP4 0x00000010 /* SDP4 Data Value */
#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */
#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */
-#define IXGBE_ESDP_SDP4_DIR 0x00000004 /* SDP4 IO direction */
+#define IXGBE_ESDP_SDP7 0x00000080 /* SDP7 Data Value */
+#define IXGBE_ESDP_SDP0_DIR 0x00000100 /* SDP0 IO direction */
+#define IXGBE_ESDP_SDP1_DIR 0x00000200 /* SDP1 IO direction */
+#define IXGBE_ESDP_SDP2_DIR 0x00000400 /* SDP1 IO direction */
+#define IXGBE_ESDP_SDP3_DIR 0x00000800 /* SDP3 IO direction */
+#define IXGBE_ESDP_SDP4_DIR 0x00001000 /* SDP4 IO direction */
#define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */
+#define IXGBE_ESDP_SDP6_DIR 0x00004000 /* SDP6 IO direction */
+#define IXGBE_ESDP_SDP7_DIR 0x00008000 /* SDP7 IO direction */
+#define IXGBE_ESDP_SDP0_NATIVE 0x00010000 /* SDP0 IO mode */
+#define IXGBE_ESDP_SDP1_NATIVE 0x00020000 /* SDP1 IO mode */
+
/* LEDCTL Bit Masks */
#define IXGBE_LED_IVRT_BASE 0x00000040
#define IXGBE_AUTOC2_10G_KR (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC2_LINK_DISABLE_MASK 0x70000000
#define IXGBE_MACC_FLU 0x00000001
#define IXGBE_MACC_FSV_10G 0x00030000
#define IXGBE_FW_PTR 0x0F
#define IXGBE_PBANUM0_PTR 0x15
#define IXGBE_PBANUM1_PTR 0x16
+#define IXGBE_ALT_MAC_ADDR_PTR 0x37
#define IXGBE_FREE_SPACE_PTR 0X3E
+
#define IXGBE_SAN_MAC_ADDR_PTR 0x28
#define IXGBE_DEVICE_CAPS 0x2C
-#define IXGBE_DEVICE_CAPS_EXT_THERMAL_SENSOR 0x10
#define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11
#define IXGBE_PCIE_MSIX_82599_CAPS 0x72
+#define IXGBE_MAX_MSIX_VECTORS_82599 0x40
#define IXGBE_PCIE_MSIX_82598_CAPS 0x62
+#define IXGBE_MAX_MSIX_VECTORS_82598 0x13
/* MSI-X capability fields masks */
#define IXGBE_PCIE_MSIX_TBL_SZ_MASK 0x7FF
#define IXGBE_PCI_LINK_SPEED 0xF
#define IXGBE_PCI_LINK_SPEED_2500 0x1
#define IXGBE_PCI_LINK_SPEED_5000 0x2
+#define IXGBE_PCI_LINK_SPEED_8000 0x3
#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E
#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80
#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005
#define IXGBE_RXDCTL_RLPML_EN 0x00008000
#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
+#define IXGBE_TSAUXC_EN_CLK 0x00000004
+#define IXGBE_TSAUXC_SYNCLK 0x00000008
+#define IXGBE_TSAUXC_SDP0_INT 0x00000040
+
#define IXGBE_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */
#define IXGBE_TSYNCTXCTL_ENABLED 0x00000010 /* Tx timestamping enabled */
#define IXGBE_MFLCN_DPF 0x00000002 /* Discard Pause Frame */
#define IXGBE_MFLCN_RPFCE 0x00000004 /* Receive Priority FC Enable */
#define IXGBE_MFLCN_RFCE 0x00000008 /* Receive FC Enable */
+#define IXGBE_MFLCN_RPFCE_MASK 0x00000FF4 /* Rx Priority FC bitmap mask */
#define IXGBE_MFLCN_RPFCE_SHIFT 4 /* Rx Priority FC bitmap shift */
/* Multiple Receive Queue Control */
#define IXGBE_QDE_ENABLE 0x00000001
#define IXGBE_QDE_IDX_MASK 0x00007F00
#define IXGBE_QDE_IDX_SHIFT 8
+#define IXGBE_QDE_WRITE 0x00010000
+#define IXGBE_QDE_READ 0x00020000
#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
/* SR-IOV specific macros */
#define IXGBE_MBVFICR_INDEX(vf_number) (vf_number >> 4)
-#define IXGBE_MBVFICR(_i) (0x00710 + (_i * 4))
+#define IXGBE_MBVFICR(_i) (0x00710 + ((_i) * 4))
#define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600))
-#define IXGBE_VFLREC(_i) (0x00700 + (_i * 4))
+#define IXGBE_VFLREC(_i) (0x00700 + ((_i) * 4))
/* Little Endian defines */
#ifndef __le16
#define IXGBE_FDIR_DROP_QUEUE 127
+#define IXGBE_STATUS_OVERHEATING_BIT 20 /* STATUS overtemp bit num */
/* Manageablility Host Interface defines */
#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */
#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x0800
#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000
#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000
+#define IXGBE_PHYSICAL_LAYER_1000BASE_SX 0x4000
/* Flow Control Data Sheet defined values
* Calculation and defines taken from 802.1bb Annex O
*/
/* BitTimes (BT) conversion */
-#define IXGBE_BT2KB(BT) ((BT + 1023) / (8 * 1024))
+#define IXGBE_BT2KB(BT) ((BT + (8 * 1024 - 1)) / (8 * 1024))
#define IXGBE_B2BT(BT) (BT * 8)
/* Calculate Delay to respond to PFC */
#define IXGBE_PCI_DELAY 10000
/* Calculate X540 delay value in bit times */
-#define IXGBE_FILL_RATE (36 / 25)
-
-#define IXGBE_DV_X540(LINK, TC) (IXGBE_FILL_RATE * \
- (IXGBE_B2BT(LINK) + IXGBE_PFC_D + \
+#define IXGBE_DV_X540(_max_frame_link, _max_frame_tc) \
+ ((36 * \
+ (IXGBE_B2BT(_max_frame_link) + \
+ IXGBE_PFC_D + \
(2 * IXGBE_CABLE_DC) + \
(2 * IXGBE_ID_X540) + \
- IXGBE_HD + IXGBE_B2BT(TC)))
+ IXGBE_HD) / 25 + 1) + \
+ 2 * IXGBE_B2BT(_max_frame_tc))
/* Calculate 82599, 82598 delay value in bit times */
-#define IXGBE_DV(LINK, TC) (IXGBE_FILL_RATE * \
- (IXGBE_B2BT(LINK) + IXGBE_PFC_D + \
- (2 * IXGBE_CABLE_DC) + (2 * IXGBE_ID) + \
- IXGBE_HD + IXGBE_B2BT(TC)))
+#define IXGBE_DV(_max_frame_link, _max_frame_tc) \
+ ((36 * \
+ (IXGBE_B2BT(_max_frame_link) + \
+ IXGBE_PFC_D + \
+ (2 * IXGBE_CABLE_DC) + \
+ (2 * IXGBE_ID) + \
+ IXGBE_HD) / 25 + 1) + \
+ 2 * IXGBE_B2BT(_max_frame_tc))
/* Calculate low threshold delay values */
-#define IXGBE_LOW_DV_X540(TC) (2 * IXGBE_B2BT(TC) + \
- (IXGBE_FILL_RATE * IXGBE_PCI_DELAY))
-#define IXGBE_LOW_DV(TC) (2 * IXGBE_LOW_DV_X540(TC))
+#define IXGBE_LOW_DV_X540(_max_frame_tc) \
+ (2 * IXGBE_B2BT(_max_frame_tc) + \
+ (36 * IXGBE_PCI_DELAY / 25) + 1)
+#define IXGBE_LOW_DV(_max_frame_tc) \
+ (2 * IXGBE_LOW_DV_X540(_max_frame_tc))
/* Software ATR hash keys */
#define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2
ixgbe_sfp_type_da_act_lmt_core1 = 8,
ixgbe_sfp_type_1g_cu_core0 = 9,
ixgbe_sfp_type_1g_cu_core1 = 10,
+ ixgbe_sfp_type_1g_sx_core0 = 11,
+ ixgbe_sfp_type_1g_sx_core1 = 12,
ixgbe_sfp_type_not_present = 0xFFFE,
ixgbe_sfp_type_unknown = 0xFFFF
};
ixgbe_bus_speed_133 = 133,
ixgbe_bus_speed_2500 = 2500,
ixgbe_bus_speed_5000 = 5000,
+ ixgbe_bus_speed_8000 = 8000,
ixgbe_bus_speed_reserved
};
/* Flow control parameters */
struct ixgbe_fc_info {
- u32 high_water[MAX_TRAFFIC_CLASS]; /* Flow Control High-water */
- u32 low_water; /* Flow Control Low-water */
+ u32 high_water[IXGBE_DCB_MAX_TRAFFIC_CLASS]; /* Flow Ctrl High-water */
+ u32 low_water[IXGBE_DCB_MAX_TRAFFIC_CLASS]; /* Flow Ctrl Low-water */
u16 pause_time; /* Flow Control Pause timer */
bool send_xon; /* Flow control send XON */
bool strict_ieee; /* Strict IEEE mode */
s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
s32 (*setup_sfp)(struct ixgbe_hw *);
s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
+ s32 (*disable_sec_rx_path)(struct ixgbe_hw *);
+ s32 (*enable_sec_rx_path)(struct ixgbe_hw *);
s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16);
void (*release_swfw_sync)(struct ixgbe_hw *, u16);
s32 (*clear_rar)(struct ixgbe_hw *, u32);
s32 (*insert_mac_addr)(struct ixgbe_hw *, u8 *, u32);
s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
+ s32 (*set_vmdq_san_mac)(struct ixgbe_hw *, u32);
s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
s32 (*init_rx_addrs)(struct ixgbe_hw *);
s32 (*update_uc_addr_list)(struct ixgbe_hw *, u8 *, u32,
s32 (*disable_mc)(struct ixgbe_hw *);
s32 (*clear_vfta)(struct ixgbe_hw *);
s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
+ s32 (*set_vlvf)(struct ixgbe_hw *, u32, u32, bool, bool *);
s32 (*init_uta_tables)(struct ixgbe_hw *);
void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int);
void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int);
/* Flow Control */
- s32 (*fc_enable)(struct ixgbe_hw *, s32);
+ s32 (*fc_enable)(struct ixgbe_hw *);
/* Manageability interface */
s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
u32 rx_pb_size;
u32 max_tx_queues;
u32 max_rx_queues;
- u32 max_msix_vectors;
- bool msix_vectors_from_pcie;
u32 orig_autoc;
- bool arc_subsystem_valid;
+ u8 san_mac_rar_index;
u32 orig_autoc2;
+ u16 max_msix_vectors;
+ bool arc_subsystem_valid;
bool orig_link_settings_stored;
bool autotry_restart;
u8 flags;
u16 subsystem_vendor_id;
u8 revision_id;
bool adapter_stopped;
+ int api_version;
bool force_full_reset;
+ bool allow_unsupported_sfp;
};
#define ixgbe_call_func(hw, func, params, error) \
#define IXGBE_ERR_OVERTEMP -26
#define IXGBE_ERR_FC_NOT_NEGOTIATED -27
#define IXGBE_ERR_FC_NOT_SUPPORTED -28
-#define IXGBE_ERR_FLOW_CONTROL -29
#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30
#define IXGBE_ERR_PBA_SECTION -31
#define IXGBE_ERR_INVALID_ARGUMENT -32
#include "ixgbe_api.h"
#include "ixgbe_type.h"
#include "ixgbe_vf.h"
-
-s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw);
-s32 ixgbe_init_hw_vf(struct ixgbe_hw *hw);
-s32 ixgbe_start_hw_vf(struct ixgbe_hw *hw);
-s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw);
-s32 ixgbe_stop_adapter_vf(struct ixgbe_hw *hw);
-u32 ixgbe_get_num_of_tx_queues_vf(struct ixgbe_hw *hw);
-u32 ixgbe_get_num_of_rx_queues_vf(struct ixgbe_hw *hw);
-s32 ixgbe_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr);
-s32 ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw,
- ixgbe_link_speed speed, bool autoneg,
- bool autoneg_wait_to_complete);
-s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
- bool *link_up, bool autoneg_wait_to_complete);
-s32 ixgbe_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
- u32 enable_addr);
-s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr);
-s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
- u32 mc_addr_count, ixgbe_mc_addr_itr,
- bool clear);
-s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on);
+#ident "$Id: ixgbe_vf.c,v 1.58 2012/08/09 20:24:53 cmwyborn Exp $"
#ifndef IXGBE_VFWRITE_REG
#define IXGBE_VFWRITE_REG IXGBE_WRITE_REG
/* Call adapter stop to disable tx/rx and clear interrupts */
hw->mac.ops.stop_adapter(hw);
+ /* reset the api version */
+ hw->api_version = ixgbe_mbox_api_10;
+
DEBUGOUT("Issuing a function level reset to MAC\n");
ctrl = IXGBE_VFREAD_REG(hw, IXGBE_VFCTRL) | IXGBE_CTRL_RST;
return vector;
}
+static void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw,
+ u32 *msg, u16 size)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 retmsg[IXGBE_VFMAILBOX_SIZE];
+ s32 retval = mbx->ops.write_posted(hw, msg, size, 0);
+
+ if (!retval)
+ mbx->ops.read_posted(hw, retmsg, size, 0);
+}
+
/**
* ixgbe_set_rar_vf - set device MAC address
* @hw: pointer to hardware structure
u32 msgbuf[3];
u8 *msg_addr = (u8 *)(&msgbuf[1]);
s32 ret_val;
- UNREFERENCED_3PARAMETER(vmdq, enable_addr, index);
memset(msgbuf, 0, 12);
msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
u32 cnt, i;
u32 vmdq;
- UNREFERENCED_1PARAMETER(clear);
DEBUGFUNC("ixgbe_update_mc_addr_list_vf");
* @hw: pointer to the HW structure
* @vlan: 12 bit VLAN ID
* @vind: unused by VF drivers
- * @vlan_on: if TRUE then set bit, else clear bit
+ * @vlan_on: if true then set bit, else clear bit
**/
s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[2];
- UNREFERENCED_1PARAMETER(vind);
+ s32 ret_val;
msgbuf[0] = IXGBE_VF_SET_VLAN;
msgbuf[1] = vlan;
/* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
- return(mbx->ops.write_posted(hw, msgbuf, 2, 0));
+ ret_val = mbx->ops.write_posted(hw, msgbuf, 2, 0);
+ if (!ret_val)
+ ret_val = mbx->ops.read_posted(hw, msgbuf, 1, 0);
+
+ if (!ret_val && (msgbuf[0] & IXGBE_VT_MSGTYPE_ACK))
+ return IXGBE_SUCCESS;
+
+ return ret_val | (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK);
}
/**
**/
u32 ixgbe_get_num_of_tx_queues_vf(struct ixgbe_hw *hw)
{
- UNREFERENCED_1PARAMETER(hw);
return IXGBE_VF_MAX_TX_QUEUES;
}
**/
u32 ixgbe_get_num_of_rx_queues_vf(struct ixgbe_hw *hw)
{
- UNREFERENCED_1PARAMETER(hw);
return IXGBE_VF_MAX_RX_QUEUES;
}
ixgbe_link_speed speed, bool autoneg,
bool autoneg_wait_to_complete)
{
- UNREFERENCED_4PARAMETER(hw, speed, autoneg, autoneg_wait_to_complete);
return IXGBE_SUCCESS;
}
bool *link_up, bool autoneg_wait_to_complete)
{
u32 links_reg;
- UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
if (!(hw->mbx.ops.check_for_rst(hw, 0))) {
*link_up = false;
links_reg = IXGBE_VFREAD_REG(hw, IXGBE_VFLINKS);
if (links_reg & IXGBE_LINKS_UP)
- *link_up = TRUE;
+ *link_up = true;
else
- *link_up = FALSE;
+ *link_up = false;
- if ((links_reg & IXGBE_LINKS_SPEED_10G_82599) ==
- IXGBE_LINKS_SPEED_10G_82599)
+ switch (links_reg & IXGBE_LINKS_SPEED_10G_82599) {
+ case IXGBE_LINKS_SPEED_10G_82599:
*speed = IXGBE_LINK_SPEED_10GB_FULL;
- else
+ break;
+ case IXGBE_LINKS_SPEED_1G_82599:
*speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case IXGBE_LINKS_SPEED_100_82599:
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+ break;
+ }
return IXGBE_SUCCESS;
}
+/**
+ * ixgbevf_rlpml_set_vf - Set the maximum receive packet length
+ * @hw: pointer to the HW structure
+ * @max_size: value to assign to max frame size
+ **/
+void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size)
+{
+ u32 msgbuf[2];
+
+ msgbuf[0] = IXGBE_VF_SET_LPE;
+ msgbuf[1] = max_size;
+ ixgbevf_write_msg_read_ack(hw, msgbuf, 2);
+}
+
+/**
+ * ixgbevf_negotiate_api_version - Negotiate supported API version
+ * @hw: pointer to the HW structure
+ * @api: integer containing requested API version
+ **/
+int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api)
+{
+ int err;
+ u32 msg[3];
+
+ /* Negotiate the mailbox API version */
+ msg[0] = IXGBE_VF_API_NEGOTIATE;
+ msg[1] = api;
+ msg[2] = 0;
+ err = hw->mbx.ops.write_posted(hw, msg, 3, 0);
+
+ if (!err)
+ err = hw->mbx.ops.read_posted(hw, msg, 3, 0);
+
+ if (!err) {
+ msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+ /* Store value and return 0 on success */
+ if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) {
+ hw->api_version = api;
+ return 0;
+ }
+
+ err = IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ return err;
+}
+
+int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
+ unsigned int *default_tc)
+{
+ int err;
+ u32 msg[5];
+
+ /* do nothing if API doesn't support ixgbevf_get_queues */
+ switch (hw->api_version) {
+ case ixgbe_mbox_api_11:
+ break;
+ default:
+ return 0;
+ }
+
+ /* Fetch queue configuration from the PF */
+ msg[0] = IXGBE_VF_GET_QUEUES;
+ msg[1] = msg[2] = msg[3] = msg[4] = 0;
+ err = hw->mbx.ops.write_posted(hw, msg, 5, 0);
+
+ if (!err)
+ err = hw->mbx.ops.read_posted(hw, msg, 5, 0);
+
+ if (!err) {
+ msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+ /*
+ * if we we didn't get an ACK there must have been
+ * some sort of mailbox error so we should treat it
+ * as such
+ */
+ if (msg[0] != (IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK))
+ return IXGBE_ERR_MBX;
+
+ /* record and validate values from message */
+ hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
+ if (hw->mac.max_tx_queues == 0 ||
+ hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
+ hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
+
+ hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
+ if (hw->mac.max_rx_queues == 0 ||
+ hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
+ hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
+
+ *num_tcs = msg[IXGBE_VF_TRANS_VLAN];
+ /* in case of unknown state assume we cannot tag frames */
+ if (*num_tcs > hw->mac.max_rx_queues)
+ *num_tcs = 1;
+
+ *default_tc = msg[IXGBE_VF_DEF_QUEUE];
+ /* default to queue 0 on out-of-bounds queue number */
+ if (*default_tc >= hw->mac.max_tx_queues)
+ *default_tc = 0;
+ }
+
+ return err;
+}
+
#ifndef __IXGBE_VF_H__
#define __IXGBE_VF_H__
+#ident "$Id: ixgbe_vf.h,v 1.34 2012/08/09 17:04:18 cmwyborn Exp $"
#define IXGBE_VF_IRQ_CLEAR_MASK 7
#define IXGBE_VF_MAX_TX_QUEUES 8
#define IXGBE_VF_MAX_RX_QUEUES 8
+/* DCB define */
+#define IXGBE_VF_MAX_TRAFFIC_CLASS 8
+
#define IXGBE_VFCTRL 0x00000
#define IXGBE_VFSTATUS 0x00008
#define IXGBE_VFLINKS 0x00010
#define IXGBE_VTEIMC 0x0010C
#define IXGBE_VTEIAC 0x00110
#define IXGBE_VTEIAM 0x00114
-#define IXGBE_VTEITR(x) (0x00820 + (4 * x))
-#define IXGBE_VTIVAR(x) (0x00120 + (4 * x))
+#define IXGBE_VTEITR(x) (0x00820 + (4 * (x)))
+#define IXGBE_VTIVAR(x) (0x00120 + (4 * (x)))
#define IXGBE_VTIVAR_MISC 0x00140
-#define IXGBE_VTRSCINT(x) (0x00180 + (4 * x))
+#define IXGBE_VTRSCINT(x) (0x00180 + (4 * (x)))
/* define IXGBE_VFPBACL still says TBD in EAS */
-#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * x))
-#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * x))
-#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * x))
-#define IXGBE_VFRDH(x) (0x01010 + (0x40 * x))
-#define IXGBE_VFRDT(x) (0x01018 + (0x40 * x))
-#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * x))
-#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * x))
-#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * x))
+#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * (x)))
+#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * (x)))
+#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * (x)))
+#define IXGBE_VFRDH(x) (0x01010 + (0x40 * (x)))
+#define IXGBE_VFRDT(x) (0x01018 + (0x40 * (x)))
+#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * (x)))
+#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * (x)))
+#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * (x)))
#define IXGBE_VFPSRTYPE 0x00300
-#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * x))
-#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * x))
-#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * x))
-#define IXGBE_VFTDH(x) (0x02010 + (0x40 * x))
-#define IXGBE_VFTDT(x) (0x02018 + (0x40 * x))
-#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * x))
-#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * x))
-#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * x))
-#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * x))
-#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * x))
+#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * (x)))
+#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * (x)))
+#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * (x)))
+#define IXGBE_VFTDH(x) (0x02010 + (0x40 * (x)))
+#define IXGBE_VFTDT(x) (0x02018 + (0x40 * (x)))
+#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * (x)))
+#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * (x)))
+#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * (x)))
+#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * (x)))
+#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * (x)))
#define IXGBE_VFGPRC 0x0101C
#define IXGBE_VFGPTC 0x0201C
#define IXGBE_VFGORC_LSB 0x01020
u64 saved_reset_vfmprc;
};
-#endif /* __IXGBE_VF_H__ */
+s32 ixgbe_init_hw_vf(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_vf(struct ixgbe_hw *hw);
+s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw);
+s32 ixgbe_stop_adapter_vf(struct ixgbe_hw *hw);
+u32 ixgbe_get_num_of_tx_queues_vf(struct ixgbe_hw *hw);
+u32 ixgbe_get_num_of_rx_queues_vf(struct ixgbe_hw *hw);
+s32 ixgbe_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr);
+s32 ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg, bool autoneg_wait_to_complete);
+s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool autoneg_wait_to_complete);
+s32 ixgbe_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+ u32 enable_addr);
+s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr);
+s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count, ixgbe_mc_addr_itr,
+ bool clear);
+s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on);
+void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
+int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api);
+int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
+ unsigned int *default_tc);
+#endif /* __IXGBE_VF_H__ */
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
-s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw);
-s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *autoneg);
-enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw);
-s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg, bool link_up_wait_to_complete);
-s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw);
-s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw);
-u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw);
-
-s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
-s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data);
-s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
- u16 offset, u16 words, u16 *data);
-s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data);
-s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
- u16 offset, u16 words, u16 *data);
-s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw);
-s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, u16 *checksum_val);
-u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw);
-
-s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
-void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
-
STATIC s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
STATIC s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
STATIC s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
* ixgbe_init_ops_X540 - Inits func ptrs and MAC type
* @hw: pointer to hardware structure
*
- * Initialize the function pointers and assign the MAC type for 82599.
+ * Initialize the function pointers and assign the MAC type for X540.
* Does not touch the hardware.
**/
s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw)
mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic;
mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540;
mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync_X540;
+ mac->ops.disable_sec_rx_path = &ixgbe_disable_sec_rx_path_generic;
+ mac->ops.enable_sec_rx_path = &ixgbe_enable_sec_rx_path_generic;
/* RAR, Multicast, VLAN */
mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
+ mac->ops.set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic;
mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
mac->rar_highwater = 1;
mac->ops.set_vfta = &ixgbe_set_vfta_generic;
+ mac->ops.set_vlvf = &ixgbe_set_vlvf_generic;
mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing;
**/
enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
{
- UNREFERENCED_1PARAMETER(hw);
return ixgbe_media_type_copper;
}
hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
hw->mac.san_addr, 0, IXGBE_RAH_AV);
+ /* Save the SAN MAC RAR index */
+ hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
+
/* Reserve the last RAR for the SAN MAC address */
hw->mac.num_rar_entries--;
}
{
u32 macc_reg;
u32 ledctl_reg;
+ ixgbe_link_speed speed;
+ bool link_up;
DEBUGFUNC("ixgbe_blink_led_start_X540");
/*
- * In order for the blink bit in the LED control register
- * to work, link and speed must be forced in the MAC. We
- * will reverse this when we stop the blinking.
+ * Link should be up in order for the blink bit in the LED control
+ * register to work. Force link and speed in the MAC if link is down.
+ * This will be reversed when we stop the blinking.
*/
+ hw->mac.ops.check_link(hw, &speed, &link_up, false);
+ if (link_up == false) {
macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS;
IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
-
+ }
/* Set the LED to LINK_UP + BLINK. */
ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
#define _IXGBE_X540_H_
#include "ixgbe_type.h"
+#ident "$Id: ixgbe_x540.h,v 1.6 2012/08/09 20:43:58 cmwyborn Exp $"
+
+s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed, bool *autoneg);
+enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw);
+s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg, bool link_up_wait_to_complete);
+s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw);
+u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw);
+
+s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
+s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data);
+s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words,
+ u16 *data);
+s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data);
+s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words,
+ u16 *data);
+s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw);
+s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, u16 *checksum_val);
+u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw);
+
+s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
+void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index);
+++ /dev/null
-/******************************************************************************
-
- Copyright (c) 2001-2010, Intel Corporation
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- POSSIBILITY OF SUCH DAMAGE.
-
-******************************************************************************/
-/*$FreeBSD$*/
-
-#ifdef HAVE_KERNEL_OPTION_HEADERS
-#include "opt_inet.h"
-#include "opt_inet6.h"
-#endif
-
-#include "ixv.h"
-
-/*********************************************************************
- * Driver version
- *********************************************************************/
-char ixv_driver_version[] = "1.1.2";
-
-/*********************************************************************
- * PCI Device ID Table
- *
- * Used by probe to select devices to load on
- * Last field stores an index into ixv_strings
- * Last entry must be all 0s
- *
- * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
- *********************************************************************/
-
-static ixv_vendor_info_t ixv_vendor_info_array[] =
-{
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
- /* required last entry */
- {0, 0, 0, 0, 0}
-};
-
-/*********************************************************************
- * Table of branding strings
- *********************************************************************/
-
-static char *ixv_strings[] = {
- "Intel(R) PRO/10GbE Virtual Function Network Driver"
-};
-
-/*********************************************************************
- * Function prototypes
- *********************************************************************/
-static int ixv_probe(device_t);
-static int ixv_attach(device_t);
-static int ixv_detach(device_t);
-static int ixv_shutdown(device_t);
-#if __FreeBSD_version < 800000
-static void ixv_start(struct ifnet *);
-static void ixv_start_locked(struct tx_ring *, struct ifnet *);
-#else
-static int ixv_mq_start(struct ifnet *, struct mbuf *);
-static int ixv_mq_start_locked(struct ifnet *,
- struct tx_ring *, struct mbuf *);
-static void ixv_qflush(struct ifnet *);
-#endif
-static int ixv_ioctl(struct ifnet *, u_long, caddr_t);
-static void ixv_init(void *);
-static void ixv_init_locked(struct adapter *);
-static void ixv_stop(void *);
-static void ixv_media_status(struct ifnet *, struct ifmediareq *);
-static int ixv_media_change(struct ifnet *);
-static void ixv_identify_hardware(struct adapter *);
-static int ixv_allocate_pci_resources(struct adapter *);
-static int ixv_allocate_msix(struct adapter *);
-static int ixv_allocate_queues(struct adapter *);
-static int ixv_setup_msix(struct adapter *);
-static void ixv_free_pci_resources(struct adapter *);
-static void ixv_local_timer(void *);
-static void ixv_setup_interface(device_t, struct adapter *);
-static void ixv_config_link(struct adapter *);
-
-static int ixv_allocate_transmit_buffers(struct tx_ring *);
-static int ixv_setup_transmit_structures(struct adapter *);
-static void ixv_setup_transmit_ring(struct tx_ring *);
-static void ixv_initialize_transmit_units(struct adapter *);
-static void ixv_free_transmit_structures(struct adapter *);
-static void ixv_free_transmit_buffers(struct tx_ring *);
-
-static int ixv_allocate_receive_buffers(struct rx_ring *);
-static int ixv_setup_receive_structures(struct adapter *);
-static int ixv_setup_receive_ring(struct rx_ring *);
-static void ixv_initialize_receive_units(struct adapter *);
-static void ixv_free_receive_structures(struct adapter *);
-static void ixv_free_receive_buffers(struct rx_ring *);
-
-static void ixv_enable_intr(struct adapter *);
-static void ixv_disable_intr(struct adapter *);
-static bool ixv_txeof(struct tx_ring *);
-static bool ixv_rxeof(struct ix_queue *, int);
-static void ixv_rx_checksum(u32, struct mbuf *, u32);
-static void ixv_set_multi(struct adapter *);
-static void ixv_update_link_status(struct adapter *);
-static void ixv_refresh_mbufs(struct rx_ring *, int);
-static int ixv_xmit(struct tx_ring *, struct mbuf **);
-static int ixv_sysctl_stats(SYSCTL_HANDLER_ARGS);
-static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
-static int ixv_set_flowcntl(SYSCTL_HANDLER_ARGS);
-static int ixv_dma_malloc(struct adapter *, bus_size_t,
- struct ixv_dma_alloc *, int);
-static void ixv_dma_free(struct adapter *, struct ixv_dma_alloc *);
-static void ixv_add_rx_process_limit(struct adapter *, const char *,
- const char *, int *, int);
-static bool ixv_tx_ctx_setup(struct tx_ring *, struct mbuf *);
-static bool ixv_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
-static void ixv_set_ivar(struct adapter *, u8, u8, s8);
-static void ixv_configure_ivars(struct adapter *);
-static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
-
-static void ixv_setup_vlan_support(struct adapter *);
-static void ixv_register_vlan(void *, struct ifnet *, u16);
-static void ixv_unregister_vlan(void *, struct ifnet *, u16);
-
-static void ixv_save_stats(struct adapter *);
-static void ixv_init_stats(struct adapter *);
-static void ixv_update_stats(struct adapter *);
-
-static __inline void ixv_rx_discard(struct rx_ring *, int);
-static __inline void ixv_rx_input(struct rx_ring *, struct ifnet *,
- struct mbuf *, u32);
-
-/* The MSI/X Interrupt handlers */
-static void ixv_msix_que(void *);
-static void ixv_msix_mbx(void *);
-
-/* Deferred interrupt tasklets */
-static void ixv_handle_que(void *, int);
-static void ixv_handle_mbx(void *, int);
-
-/*********************************************************************
- * FreeBSD Device Interface Entry Points
- *********************************************************************/
-
-static device_method_t ixv_methods[] = {
- /* Device interface */
- DEVMETHOD(device_probe, ixv_probe),
- DEVMETHOD(device_attach, ixv_attach),
- DEVMETHOD(device_detach, ixv_detach),
- DEVMETHOD(device_shutdown, ixv_shutdown),
- {0, 0}
-};
-
-static driver_t ixv_driver = {
- "ix", ixv_methods, sizeof(struct adapter),
-};
-
-extern devclass_t ixgbe_devclass;
-DRIVER_MODULE(ixv, pci, ixv_driver, ixgbe_devclass, 0, 0);
-MODULE_DEPEND(ixv, pci, 1, 1, 1);
-MODULE_DEPEND(ixv, ether, 1, 1, 1);
-
-/*
-** TUNEABLE PARAMETERS:
-*/
-
-/*
-** AIM: Adaptive Interrupt Moderation
-** which means that the interrupt rate
-** is varied over time based on the
-** traffic for that interrupt vector
-*/
-static int ixv_enable_aim = FALSE;
-TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
-
-/* How many packets rxeof tries to clean at a time */
-static int ixv_rx_process_limit = 128;
-TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
-
-/* Flow control setting, default to full */
-static int ixv_flow_control = ixgbe_fc_full;
-TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
-
-/*
- * Header split: this causes the hardware to DMA
- * the header into a seperate mbuf from the payload,
- * it can be a performance win in some workloads, but
- * in others it actually hurts, its off by default.
- */
-static bool ixv_header_split = FALSE;
-TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
-
-/*
-** Number of TX descriptors per ring,
-** setting higher than RX as this seems
-** the better performing choice.
-*/
-static int ixv_txd = DEFAULT_TXD;
-TUNABLE_INT("hw.ixv.txd", &ixv_txd);
-
-/* Number of RX descriptors per ring */
-static int ixv_rxd = DEFAULT_RXD;
-TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
-
-/*
-** Shadow VFTA table, this is needed because
-** the real filter table gets cleared during
-** a soft reset and we need to repopulate it.
-*/
-static u32 ixv_shadow_vfta[VFTA_SIZE];
-
-/*********************************************************************
- * Device identification routine
- *
- * ixv_probe determines if the driver should be loaded on
- * adapter based on PCI vendor/device id of the adapter.
- *
- * return 0 on success, positive on failure
- *********************************************************************/
-
-static int
-ixv_probe(device_t dev)
-{
- ixv_vendor_info_t *ent;
-
- u16 pci_vendor_id = 0;
- u16 pci_device_id = 0;
- u16 pci_subvendor_id = 0;
- u16 pci_subdevice_id = 0;
- char adapter_name[256];
-
-
- pci_vendor_id = pci_get_vendor(dev);
- if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
- return (ENXIO);
-
- pci_device_id = pci_get_device(dev);
- pci_subvendor_id = pci_get_subvendor(dev);
- pci_subdevice_id = pci_get_subdevice(dev);
-
- ent = ixv_vendor_info_array;
- while (ent->vendor_id != 0) {
- if ((pci_vendor_id == ent->vendor_id) &&
- (pci_device_id == ent->device_id) &&
-
- ((pci_subvendor_id == ent->subvendor_id) ||
- (ent->subvendor_id == 0)) &&
-
- ((pci_subdevice_id == ent->subdevice_id) ||
- (ent->subdevice_id == 0))) {
- sprintf(adapter_name, "%s, Version - %s",
- ixv_strings[ent->index],
- ixv_driver_version);
- device_set_desc_copy(dev, adapter_name);
- return (0);
- }
- ent++;
- }
- return (ENXIO);
-}
-
-/*********************************************************************
- * Device initialization routine
- *
- * The attach entry point is called when the driver is being loaded.
- * This routine identifies the type of hardware, allocates all resources
- * and initializes the hardware.
- *
- * return 0 on success, positive on failure
- *********************************************************************/
-
-static int
-ixv_attach(device_t dev)
-{
- struct adapter *adapter;
- struct ixgbe_hw *hw;
- int error = 0;
-
- INIT_DEBUGOUT("ixv_attach: begin");
-
- /* Allocate, clear, and link in our adapter structure */
- adapter = device_get_softc(dev);
- adapter->dev = adapter->osdep.dev = dev;
- hw = &adapter->hw;
-
- /* Core Lock Init*/
- IXV_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
-
- /* SYSCTL APIs */
- SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
- adapter, 0, ixv_sysctl_stats, "I", "Statistics");
-
- SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW,
- adapter, 0, ixv_sysctl_debug, "I", "Debug Info");
-
- SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "flow_control", CTLTYPE_INT | CTLFLAG_RW,
- adapter, 0, ixv_set_flowcntl, "I", "Flow Control");
-
- SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
- &ixv_enable_aim, 1, "Interrupt Moderation");
-
- /* Set up the timer callout */
- callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
-
- /* Determine hardware revision */
- ixv_identify_hardware(adapter);
-
- /* Do base PCI setup - map BAR0 */
- if (ixv_allocate_pci_resources(adapter)) {
- device_printf(dev, "Allocation of PCI resources failed\n");
- error = ENXIO;
- goto err_out;
- }
-
- /* Do descriptor calc and sanity checks */
- if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
- ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
- device_printf(dev, "TXD config issue, using default!\n");
- adapter->num_tx_desc = DEFAULT_TXD;
- } else
- adapter->num_tx_desc = ixv_txd;
-
- if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
- ixv_rxd < MIN_TXD || ixv_rxd > MAX_TXD) {
- device_printf(dev, "RXD config issue, using default!\n");
- adapter->num_rx_desc = DEFAULT_RXD;
- } else
- adapter->num_rx_desc = ixv_rxd;
-
- /* Allocate our TX/RX Queues */
- if (ixv_allocate_queues(adapter)) {
- error = ENOMEM;
- goto err_out;
- }
-
- /*
- ** Initialize the shared code: its
- ** at this point the mac type is set.
- */
- error = ixgbe_init_shared_code(hw);
- if (error) {
- device_printf(dev,"Shared Code Initialization Failure\n");
- error = EIO;
- goto err_late;
- }
-
- /* Setup the mailbox */
- ixgbe_init_mbx_params_vf(hw);
-
- ixgbe_reset_hw(hw);
-
- /* Get Hardware Flow Control setting */
- hw->fc.requested_mode = ixgbe_fc_full;
- hw->fc.pause_time = IXV_FC_PAUSE;
- hw->fc.low_water = IXV_FC_LO;
- hw->fc.high_water[0] = IXV_FC_HI;
- hw->fc.send_xon = TRUE;
-
- error = ixgbe_init_hw(hw);
- if (error) {
- device_printf(dev,"Hardware Initialization Failure\n");
- error = EIO;
- goto err_late;
- }
-
- error = ixv_allocate_msix(adapter);
- if (error)
- goto err_late;
-
- /* Setup OS specific network interface */
- ixv_setup_interface(dev, adapter);
-
- /* Sysctl for limiting the amount of work done in the taskqueue */
- ixv_add_rx_process_limit(adapter, "rx_processing_limit",
- "max number of rx packets to process", &adapter->rx_process_limit,
- ixv_rx_process_limit);
-
- /* Do the stats setup */
- ixv_save_stats(adapter);
- ixv_init_stats(adapter);
-
- /* Register for VLAN events */
- adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
- ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
- adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
- ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
-
- INIT_DEBUGOUT("ixv_attach: end");
- return (0);
-
-err_late:
- ixv_free_transmit_structures(adapter);
- ixv_free_receive_structures(adapter);
-err_out:
- ixv_free_pci_resources(adapter);
- return (error);
-
-}
-
-/*********************************************************************
- * Device removal routine
- *
- * The detach entry point is called when the driver is being removed.
- * This routine stops the adapter and deallocates all the resources
- * that were allocated for driver operation.
- *
- * return 0 on success, positive on failure
- *********************************************************************/
-
-static int
-ixv_detach(device_t dev)
-{
- struct adapter *adapter = device_get_softc(dev);
- struct ix_queue *que = adapter->queues;
-
- INIT_DEBUGOUT("ixv_detach: begin");
-
- /* Make sure VLANS are not using driver */
- if (adapter->ifp->if_vlantrunk != NULL) {
- device_printf(dev,"Vlan in use, detach first\n");
- return (EBUSY);
- }
-
- IXV_CORE_LOCK(adapter);
- ixv_stop(adapter);
- IXV_CORE_UNLOCK(adapter);
-
- for (int i = 0; i < adapter->num_queues; i++, que++) {
- if (que->tq) {
- taskqueue_drain(que->tq, &que->que_task);
- taskqueue_free(que->tq);
- }
- }
-
- /* Drain the Link queue */
- if (adapter->tq) {
- taskqueue_drain(adapter->tq, &adapter->mbx_task);
- taskqueue_free(adapter->tq);
- }
-
- /* Unregister VLAN events */
- if (adapter->vlan_attach != NULL)
- EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
- if (adapter->vlan_detach != NULL)
- EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
-
- ether_ifdetach(adapter->ifp);
- callout_drain(&adapter->timer);
- ixv_free_pci_resources(adapter);
- bus_generic_detach(dev);
- if_free(adapter->ifp);
-
- ixv_free_transmit_structures(adapter);
- ixv_free_receive_structures(adapter);
-
- IXV_CORE_LOCK_DESTROY(adapter);
- return (0);
-}
-
-/*********************************************************************
- *
- * Shutdown entry point
- *
- **********************************************************************/
-static int
-ixv_shutdown(device_t dev)
-{
- struct adapter *adapter = device_get_softc(dev);
- IXV_CORE_LOCK(adapter);
- ixv_stop(adapter);
- IXV_CORE_UNLOCK(adapter);
- return (0);
-}
-
-#if __FreeBSD_version < 800000
-/*********************************************************************
- * Transmit entry point
- *
- * ixv_start is called by the stack to initiate a transmit.
- * The driver will remain in this routine as long as there are
- * packets to transmit and transmit resources are available.
- * In case resources are not available stack is notified and
- * the packet is requeued.
- **********************************************************************/
-static void
-ixv_start_locked(struct tx_ring *txr, struct ifnet * ifp)
-{
- struct mbuf *m_head;
- struct adapter *adapter = txr->adapter;
-
- IXV_TX_LOCK_ASSERT(txr);
-
- if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
- IFF_DRV_RUNNING)
- return;
- if (!adapter->link_active)
- return;
-
- while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
-
- IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
- if (m_head == NULL)
- break;
-
- if (ixv_xmit(txr, &m_head)) {
- if (m_head == NULL)
- break;
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
- IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
- break;
- }
- /* Send a copy of the frame to the BPF listener */
- ETHER_BPF_MTAP(ifp, m_head);
-
- /* Set watchdog on */
- txr->watchdog_check = TRUE;
- txr->watchdog_time = ticks;
-
- }
- return;
-}
-
-/*
- * Legacy TX start - called by the stack, this
- * always uses the first tx ring, and should
- * not be used with multiqueue tx enabled.
- */
-static void
-ixv_start(struct ifnet *ifp)
-{
- struct adapter *adapter = ifp->if_softc;
- struct tx_ring *txr = adapter->tx_rings;
-
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- IXV_TX_LOCK(txr);
- ixv_start_locked(txr, ifp);
- IXV_TX_UNLOCK(txr);
- }
- return;
-}
-
-#else
-
-/*
-** Multiqueue Transmit driver
-**
-*/
-static int
-ixv_mq_start(struct ifnet *ifp, struct mbuf *m)
-{
- struct adapter *adapter = ifp->if_softc;
- struct ix_queue *que;
- struct tx_ring *txr;
- int i = 0, err = 0;
-
- /* Which queue to use */
- if ((m->m_flags & M_FLOWID) != 0)
- i = m->m_pkthdr.flowid % adapter->num_queues;
-
- txr = &adapter->tx_rings[i];
- que = &adapter->queues[i];
-
- if (IXV_TX_TRYLOCK(txr)) {
- err = ixv_mq_start_locked(ifp, txr, m);
- IXV_TX_UNLOCK(txr);
- } else {
- err = drbr_enqueue(ifp, txr->br, m);
- taskqueue_enqueue(que->tq, &que->que_task);
- }
-
- return (err);
-}
-
-static int
-ixv_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
-{
- struct adapter *adapter = txr->adapter;
- struct mbuf *next;
- int enqueued, err = 0;
-
- if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
- IFF_DRV_RUNNING || adapter->link_active == 0) {
- if (m != NULL)
- err = drbr_enqueue(ifp, txr->br, m);
- return (err);
- }
-
- /* Do a clean if descriptors are low */
- if (txr->tx_avail <= IXV_TX_CLEANUP_THRESHOLD)
- ixv_txeof(txr);
-
- enqueued = 0;
- if (m == NULL) {
- next = drbr_dequeue(ifp, txr->br);
- } else if (drbr_needs_enqueue(ifp, txr->br)) {
- if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
- return (err);
- next = drbr_dequeue(ifp, txr->br);
- } else
- next = m;
-
- /* Process the queue */
- while (next != NULL) {
- if ((err = ixv_xmit(txr, &next)) != 0) {
- if (next != NULL)
- err = drbr_enqueue(ifp, txr->br, next);
- break;
- }
- enqueued++;
- drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
- /* Send a copy of the frame to the BPF listener */
- ETHER_BPF_MTAP(ifp, next);
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
- break;
- if (txr->tx_avail <= IXV_TX_OP_THRESHOLD) {
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
- break;
- }
- next = drbr_dequeue(ifp, txr->br);
- }
-
- if (enqueued > 0) {
- /* Set watchdog on */
- txr->watchdog_check = TRUE;
- txr->watchdog_time = ticks;
- }
-
- return (err);
-}
-
-/*
-** Flush all ring buffers
-*/
-static void
-ixv_qflush(struct ifnet *ifp)
-{
- struct adapter *adapter = ifp->if_softc;
- struct tx_ring *txr = adapter->tx_rings;
- struct mbuf *m;
-
- for (int i = 0; i < adapter->num_queues; i++, txr++) {
- IXV_TX_LOCK(txr);
- while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
- m_freem(m);
- IXV_TX_UNLOCK(txr);
- }
- if_qflush(ifp);
-}
-
-#endif
-
-/*********************************************************************
- * Ioctl entry point
- *
- * ixv_ioctl is called when the user wants to configure the
- * interface.
- *
- * return 0 on success, positive on failure
- **********************************************************************/
-
-static int
-ixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
-{
- struct adapter *adapter = ifp->if_softc;
- struct ifreq *ifr = (struct ifreq *) data;
-#if defined(INET) || defined(INET6)
- struct ifaddr *ifa = (struct ifaddr *)data;
-#endif
- int error = 0;
- bool avoid_reset = FALSE;
-
- switch (command) {
-
- case SIOCSIFADDR:
-#ifdef INET
- if (ifa->ifa_addr->sa_family == AF_INET)
- avoid_reset = TRUE;
-#endif
-#ifdef INET6
- if (ifa->ifa_addr->sa_family == AF_INET6)
- avoid_reset = TRUE;
-#endif
- /*
- ** Calling init results in link renegotiation,
- ** so we avoid doing it when possible.
- */
- if (avoid_reset) {
- ifp->if_flags |= IFF_UP;
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
- ixv_init(adapter);
- if (!(ifp->if_flags & IFF_NOARP))
- arp_ifinit(ifp, ifa);
- } else
- error = ether_ioctl(ifp, command, data);
- break;
-
- case SIOCSIFMTU:
- IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
- if (ifr->ifr_mtu > IXV_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
- error = EINVAL;
- } else {
- IXV_CORE_LOCK(adapter);
- ifp->if_mtu = ifr->ifr_mtu;
- adapter->max_frame_size =
- ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
- ixv_init_locked(adapter);
- IXV_CORE_UNLOCK(adapter);
- }
- break;
- case SIOCSIFFLAGS:
- IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
- IXV_CORE_LOCK(adapter);
- if (ifp->if_flags & IFF_UP) {
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
- ixv_init_locked(adapter);
- } else
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- ixv_stop(adapter);
- adapter->if_flags = ifp->if_flags;
- IXV_CORE_UNLOCK(adapter);
- break;
- case SIOCADDMULTI:
- case SIOCDELMULTI:
- IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- IXV_CORE_LOCK(adapter);
- ixv_disable_intr(adapter);
- ixv_set_multi(adapter);
- ixv_enable_intr(adapter);
- IXV_CORE_UNLOCK(adapter);
- }
- break;
- case SIOCSIFMEDIA:
- case SIOCGIFMEDIA:
- IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
- error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
- break;
- case SIOCSIFCAP:
- {
- int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
- IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
- if (mask & IFCAP_HWCSUM)
- ifp->if_capenable ^= IFCAP_HWCSUM;
- if (mask & IFCAP_TSO4)
- ifp->if_capenable ^= IFCAP_TSO4;
- if (mask & IFCAP_LRO)
- ifp->if_capenable ^= IFCAP_LRO;
- if (mask & IFCAP_VLAN_HWTAGGING)
- ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- IXV_CORE_LOCK(adapter);
- ixv_init_locked(adapter);
- IXV_CORE_UNLOCK(adapter);
- }
- VLAN_CAPABILITIES(ifp);
- break;
- }
-
- default:
- IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
- error = ether_ioctl(ifp, command, data);
- break;
- }
-
- return (error);
-}
-
-/*********************************************************************
- * Init entry point
- *
- * This routine is used in two ways. It is used by the stack as
- * init entry point in network interface structure. It is also used
- * by the driver as a hw/sw initialization routine to get to a
- * consistent state.
- *
- * return 0 on success, positive on failure
- **********************************************************************/
-#define IXGBE_MHADD_MFS_SHIFT 16
-
-static void
-ixv_init_locked(struct adapter *adapter)
-{
- struct ifnet *ifp = adapter->ifp;
- device_t dev = adapter->dev;
- struct ixgbe_hw *hw = &adapter->hw;
- u32 mhadd, gpie;
-
- INIT_DEBUGOUT("ixv_init: begin");
- mtx_assert(&adapter->core_mtx, MA_OWNED);
- hw->adapter_stopped = FALSE;
- ixgbe_stop_adapter(hw);
- callout_stop(&adapter->timer);
-
- /* reprogram the RAR[0] in case user changed it. */
- ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
-
- /* Get the latest mac address, User can use a LAA */
- bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
- IXGBE_ETH_LENGTH_OF_ADDRESS);
- ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
- hw->addr_ctrl.rar_used_count = 1;
-
- /* Prepare transmit descriptors and buffers */
- if (ixv_setup_transmit_structures(adapter)) {
- device_printf(dev,"Could not setup transmit structures\n");
- ixv_stop(adapter);
- return;
- }
-
- ixgbe_reset_hw(hw);
- ixv_initialize_transmit_units(adapter);
-
- /* Setup Multicast table */
- ixv_set_multi(adapter);
-
- /*
- ** Determine the correct mbuf pool
- ** for doing jumbo/headersplit
- */
- if (ifp->if_mtu > ETHERMTU)
- adapter->rx_mbuf_sz = MJUMPAGESIZE;
- else
- adapter->rx_mbuf_sz = MCLBYTES;
-
- /* Prepare receive descriptors and buffers */
- if (ixv_setup_receive_structures(adapter)) {
- device_printf(dev,"Could not setup receive structures\n");
- ixv_stop(adapter);
- return;
- }
-
- /* Configure RX settings */
- ixv_initialize_receive_units(adapter);
-
- /* Enable Enhanced MSIX mode */
- gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
- gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME;
- gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD;
- IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
-
- /* Set the various hardware offload abilities */
- ifp->if_hwassist = 0;
- if (ifp->if_capenable & IFCAP_TSO4)
- ifp->if_hwassist |= CSUM_TSO;
- if (ifp->if_capenable & IFCAP_TXCSUM) {
- ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
-#if __FreeBSD_version >= 800000
- ifp->if_hwassist |= CSUM_SCTP;
-#endif
- }
-
- /* Set MTU size */
- if (ifp->if_mtu > ETHERMTU) {
- mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
- mhadd &= ~IXGBE_MHADD_MFS_MASK;
- mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
- IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
- }
-
- /* Set up VLAN offload and filter */
- ixv_setup_vlan_support(adapter);
-
- callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
-
- /* Set up MSI/X routing */
- ixv_configure_ivars(adapter);
-
- /* Set up auto-mask */
- IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
-
- /* Set moderation on the Link interrupt */
- IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->mbxvec), IXV_LINK_ITR);
-
- /* Stats init */
- ixv_init_stats(adapter);
-
- /* Config/Enable Link */
- ixv_config_link(adapter);
-
- /* And now turn on interrupts */
- ixv_enable_intr(adapter);
-
- /* Now inform the stack we're ready */
- ifp->if_drv_flags |= IFF_DRV_RUNNING;
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
-
- return;
-}
-
-static void
-ixv_init(void *arg)
-{
- struct adapter *adapter = arg;
-
- IXV_CORE_LOCK(adapter);
- ixv_init_locked(adapter);
- IXV_CORE_UNLOCK(adapter);
- return;
-}
-
-
-/*
-**
-** MSIX Interrupt Handlers and Tasklets
-**
-*/
-
-static inline void
-ixv_enable_queue(struct adapter *adapter, u32 vector)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u32 queue = 1 << vector;
- u32 mask;
-
- mask = (IXGBE_EIMS_RTX_QUEUE & queue);
- IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
-}
-
-static inline void
-ixv_disable_queue(struct adapter *adapter, u32 vector)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u64 queue = (u64)(1 << vector);
- u32 mask;
-
- mask = (IXGBE_EIMS_RTX_QUEUE & queue);
- IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
-}
-
-static inline void
-ixv_rearm_queues(struct adapter *adapter, u64 queues)
-{
- u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
-}
-
-
-static void
-ixv_handle_que(void *context, int pending)
-{
- struct ix_queue *que = context;
- struct adapter *adapter = que->adapter;
- struct tx_ring *txr = que->txr;
- struct ifnet *ifp = adapter->ifp;
- bool more;
-
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- more = ixv_rxeof(que, adapter->rx_process_limit);
- IXV_TX_LOCK(txr);
- ixv_txeof(txr);
-#if __FreeBSD_version >= 800000
- if (!drbr_empty(ifp, txr->br))
- ixv_mq_start_locked(ifp, txr, NULL);
-#else
- if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
- ixv_start_locked(txr, ifp);
-#endif
- IXV_TX_UNLOCK(txr);
- if (more) {
- taskqueue_enqueue(que->tq, &que->que_task);
- return;
- }
- }
-
- /* Reenable this interrupt */
- ixv_enable_queue(adapter, que->msix);
- return;
-}
-
-/*********************************************************************
- *
- * MSI Queue Interrupt Service routine
- *
- **********************************************************************/
-void
-ixv_msix_que(void *arg)
-{
- struct ix_queue *que = arg;
- struct adapter *adapter = que->adapter;
- struct tx_ring *txr = que->txr;
- struct rx_ring *rxr = que->rxr;
- bool more_tx, more_rx;
- u32 newitr = 0;
-
- ixv_disable_queue(adapter, que->msix);
- ++que->irqs;
-
- more_rx = ixv_rxeof(que, adapter->rx_process_limit);
-
- IXV_TX_LOCK(txr);
- more_tx = ixv_txeof(txr);
- /*
- ** Make certain that if the stack
- ** has anything queued the task gets
- ** scheduled to handle it.
- */
-#if __FreeBSD_version < 800000
- if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd))
-#else
- if (!drbr_empty(adapter->ifp, txr->br))
-#endif
- more_tx = 1;
- IXV_TX_UNLOCK(txr);
-
- more_rx = ixv_rxeof(que, adapter->rx_process_limit);
-
- /* Do AIM now? */
-
- if (ixv_enable_aim == FALSE)
- goto no_calc;
- /*
- ** Do Adaptive Interrupt Moderation:
- ** - Write out last calculated setting
- ** - Calculate based on average size over
- ** the last interval.
- */
- if (que->eitr_setting)
- IXGBE_WRITE_REG(&adapter->hw,
- IXGBE_VTEITR(que->msix),
- que->eitr_setting);
-
- que->eitr_setting = 0;
-
- /* Idle, do nothing */
- if ((txr->bytes == 0) && (rxr->bytes == 0))
- goto no_calc;
-
- if ((txr->bytes) && (txr->packets))
- newitr = txr->bytes/txr->packets;
- if ((rxr->bytes) && (rxr->packets))
- newitr = max(newitr,
- (rxr->bytes / rxr->packets));
- newitr += 24; /* account for hardware frame, crc */
-
- /* set an upper boundary */
- newitr = min(newitr, 3000);
-
- /* Be nice to the mid range */
- if ((newitr > 300) && (newitr < 1200))
- newitr = (newitr / 3);
- else
- newitr = (newitr / 2);
-
- newitr |= newitr << 16;
-
- /* save for next interrupt */
- que->eitr_setting = newitr;
-
- /* Reset state */
- txr->bytes = 0;
- txr->packets = 0;
- rxr->bytes = 0;
- rxr->packets = 0;
-
-no_calc:
- if (more_tx || more_rx)
- taskqueue_enqueue(que->tq, &que->que_task);
- else /* Reenable this interrupt */
- ixv_enable_queue(adapter, que->msix);
- return;
-}
-
-static void
-ixv_msix_mbx(void *arg)
-{
- struct adapter *adapter = arg;
- struct ixgbe_hw *hw = &adapter->hw;
- u32 reg;
-
- ++adapter->mbx_irq;
-
- /* First get the cause */
- reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
- /* Clear interrupt with write */
- IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
-
- /* Link status change */
- if (reg & IXGBE_EICR_LSC)
- taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
-
- IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
- return;
-}
-
-/*********************************************************************
- *
- * Media Ioctl callback
- *
- * This routine is called whenever the user queries the status of
- * the interface using ifconfig.
- *
- **********************************************************************/
-static void
-ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
-{
- struct adapter *adapter = ifp->if_softc;
-
- INIT_DEBUGOUT("ixv_media_status: begin");
- IXV_CORE_LOCK(adapter);
- ixv_update_link_status(adapter);
-
- ifmr->ifm_status = IFM_AVALID;
- ifmr->ifm_active = IFM_ETHER;
-
- if (!adapter->link_active) {
- IXV_CORE_UNLOCK(adapter);
- return;
- }
-
- ifmr->ifm_status |= IFM_ACTIVE;
-
- switch (adapter->link_speed) {
- case IXGBE_LINK_SPEED_1GB_FULL:
- ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
- break;
- case IXGBE_LINK_SPEED_10GB_FULL:
- ifmr->ifm_active |= IFM_FDX;
- break;
- }
-
- IXV_CORE_UNLOCK(adapter);
-
- return;
-}
-
-/*********************************************************************
- *
- * Media Ioctl callback
- *
- * This routine is called when the user changes speed/duplex using
- * media/mediopt option with ifconfig.
- *
- **********************************************************************/
-static int
-ixv_media_change(struct ifnet * ifp)
-{
- struct adapter *adapter = ifp->if_softc;
- struct ifmedia *ifm = &adapter->media;
-
- INIT_DEBUGOUT("ixv_media_change: begin");
-
- if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
- return (EINVAL);
-
- switch (IFM_SUBTYPE(ifm->ifm_media)) {
- case IFM_AUTO:
- break;
- default:
- device_printf(adapter->dev, "Only auto media type\n");
- return (EINVAL);
- }
-
- return (0);
-}
-
-/*********************************************************************
- *
- * This routine maps the mbufs to tx descriptors, allowing the
- * TX engine to transmit the packets.
- * - return 0 on success, positive on failure
- *
- **********************************************************************/
-
-static int
-ixv_xmit(struct tx_ring *txr, struct mbuf **m_headp)
-{
- struct adapter *adapter = txr->adapter;
- u32 olinfo_status = 0, cmd_type_len;
- u32 paylen = 0;
- int i, j, error, nsegs;
- int first, last = 0;
- struct mbuf *m_head;
- bus_dma_segment_t segs[32];
- bus_dmamap_t map;
- struct ixv_tx_buf *txbuf, *txbuf_mapped;
- union ixgbe_adv_tx_desc *txd = NULL;
-
- m_head = *m_headp;
-
- /* Basic descriptor defines */
- cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
- IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
-
- if (m_head->m_flags & M_VLANTAG)
- cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
-
- /*
- * Important to capture the first descriptor
- * used because it will contain the index of
- * the one we tell the hardware to report back
- */
- first = txr->next_avail_desc;
- txbuf = &txr->tx_buffers[first];
- txbuf_mapped = txbuf;
- map = txbuf->map;
-
- /*
- * Map the packet for DMA.
- */
- error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
- *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
-
- if (error == EFBIG) {
- struct mbuf *m;
-
- m = m_defrag(*m_headp, M_DONTWAIT);
- if (m == NULL) {
- adapter->mbuf_defrag_failed++;
- m_freem(*m_headp);
- *m_headp = NULL;
- return (ENOBUFS);
- }
- *m_headp = m;
-
- /* Try it again */
- error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
- *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
-
- if (error == ENOMEM) {
- adapter->no_tx_dma_setup++;
- return (error);
- } else if (error != 0) {
- adapter->no_tx_dma_setup++;
- m_freem(*m_headp);
- *m_headp = NULL;
- return (error);
- }
- } else if (error == ENOMEM) {
- adapter->no_tx_dma_setup++;
- return (error);
- } else if (error != 0) {
- adapter->no_tx_dma_setup++;
- m_freem(*m_headp);
- *m_headp = NULL;
- return (error);
- }
-
- /* Make certain there are enough descriptors */
- if (nsegs > txr->tx_avail - 2) {
- txr->no_desc_avail++;
- error = ENOBUFS;
- goto xmit_fail;
- }
- m_head = *m_headp;
-
- /*
- ** Set up the appropriate offload context
- ** this becomes the first descriptor of
- ** a packet.
- */
- if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
- if (ixv_tso_setup(txr, m_head, &paylen)) {
- cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
- olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
- olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
- olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
- ++adapter->tso_tx;
- } else
- return (ENXIO);
- } else if (ixv_tx_ctx_setup(txr, m_head))
- olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
-
- /* Record payload length */
- if (paylen == 0)
- olinfo_status |= m_head->m_pkthdr.len <<
- IXGBE_ADVTXD_PAYLEN_SHIFT;
-
- i = txr->next_avail_desc;
- for (j = 0; j < nsegs; j++) {
- bus_size_t seglen;
- bus_addr_t segaddr;
-
- txbuf = &txr->tx_buffers[i];
- txd = &txr->tx_base[i];
- seglen = segs[j].ds_len;
- segaddr = htole64(segs[j].ds_addr);
-
- txd->read.buffer_addr = segaddr;
- txd->read.cmd_type_len = htole32(txr->txd_cmd |
- cmd_type_len |seglen);
- txd->read.olinfo_status = htole32(olinfo_status);
- last = i; /* descriptor that will get completion IRQ */
-
- if (++i == adapter->num_tx_desc)
- i = 0;
-
- txbuf->m_head = NULL;
- txbuf->eop_index = -1;
- }
-
- txd->read.cmd_type_len |=
- htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
- txr->tx_avail -= nsegs;
- txr->next_avail_desc = i;
-
- txbuf->m_head = m_head;
- txbuf->map = map;
- bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
-
- /* Set the index of the descriptor that will be marked done */
- txbuf = &txr->tx_buffers[first];
- txbuf->eop_index = last;
-
- bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- /*
- * Advance the Transmit Descriptor Tail (Tdt), this tells the
- * hardware that this frame is available to transmit.
- */
- ++txr->total_packets;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(txr->me), i);
-
- return (0);
-
-xmit_fail:
- bus_dmamap_unload(txr->txtag, txbuf->map);
- return (error);
-
-}
-
-
-/*********************************************************************
- * Multicast Update
- *
- * This routine is called whenever multicast address list is updated.
- *
- **********************************************************************/
-#define IXGBE_RAR_ENTRIES 16
-
-static void
-ixv_set_multi(struct adapter *adapter)
-{
- u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
- u8 *update_ptr;
- struct ifmultiaddr *ifma;
- int mcnt = 0;
- struct ifnet *ifp = adapter->ifp;
-
- IOCTL_DEBUGOUT("ixv_set_multi: begin");
-
-#if __FreeBSD_version < 800000
- IF_ADDR_LOCK(ifp);
-#else
- if_maddr_rlock(ifp);
-#endif
- TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
- if (ifma->ifma_addr->sa_family != AF_LINK)
- continue;
- bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
- &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
- IXGBE_ETH_LENGTH_OF_ADDRESS);
- mcnt++;
- }
-#if __FreeBSD_version < 800000
- IF_ADDR_UNLOCK(ifp);
-#else
- if_maddr_runlock(ifp);
-#endif
-
- update_ptr = mta;
-
- ixgbe_update_mc_addr_list(&adapter->hw,
- update_ptr, mcnt, ixv_mc_array_itr, TRUE);
-
- return;
-}
-
-/*
- * This is an iterator function now needed by the multicast
- * shared code. It simply feeds the shared code routine the
- * addresses in the array of ixv_set_multi() one by one.
- */
-static u8 *
-ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
-{
- u8 *addr = *update_ptr;
- u8 *newptr;
- *vmdq = 0;
-
- newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
- *update_ptr = newptr;
- return addr;
-}
-
-/*********************************************************************
- * Timer routine
- *
- * This routine checks for link status,updates statistics,
- * and runs the watchdog check.
- *
- **********************************************************************/
-
-static void
-ixv_local_timer(void *arg)
-{
- struct adapter *adapter = arg;
- device_t dev = adapter->dev;
- struct tx_ring *txr = adapter->tx_rings;
- int i;
-
- mtx_assert(&adapter->core_mtx, MA_OWNED);
-
- ixv_update_link_status(adapter);
-
- /* Stats Update */
- ixv_update_stats(adapter);
-
- /*
- * If the interface has been paused
- * then don't do the watchdog check
- */
- if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
- goto out;
- /*
- ** Check for time since any descriptor was cleaned
- */
- for (i = 0; i < adapter->num_queues; i++, txr++) {
- IXV_TX_LOCK(txr);
- if (txr->watchdog_check == FALSE) {
- IXV_TX_UNLOCK(txr);
- continue;
- }
- if ((ticks - txr->watchdog_time) > IXV_WATCHDOG)
- goto hung;
- IXV_TX_UNLOCK(txr);
- }
-out:
- ixv_rearm_queues(adapter, adapter->que_mask);
- callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
- return;
-
-hung:
- device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
- device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
- IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDH(i)),
- IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDT(i)));
- device_printf(dev,"TX(%d) desc avail = %d,"
- "Next TX to Clean = %d\n",
- txr->me, txr->tx_avail, txr->next_to_clean);
- adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
- adapter->watchdog_events++;
- IXV_TX_UNLOCK(txr);
- ixv_init_locked(adapter);
-}
-
-/*
-** Note: this routine updates the OS on the link state
-** the real check of the hardware only happens with
-** a link interrupt.
-*/
-static void
-ixv_update_link_status(struct adapter *adapter)
-{
- struct ifnet *ifp = adapter->ifp;
- struct tx_ring *txr = adapter->tx_rings;
- device_t dev = adapter->dev;
-
-
- if (adapter->link_up){
- if (adapter->link_active == FALSE) {
- if (bootverbose)
- device_printf(dev,"Link is up %d Gbps %s \n",
- ((adapter->link_speed == 128)? 10:1),
- "Full Duplex");
- adapter->link_active = TRUE;
- if_link_state_change(ifp, LINK_STATE_UP);
- }
- } else { /* Link down */
- if (adapter->link_active == TRUE) {
- if (bootverbose)
- device_printf(dev,"Link is Down\n");
- if_link_state_change(ifp, LINK_STATE_DOWN);
- adapter->link_active = FALSE;
- for (int i = 0; i < adapter->num_queues;
- i++, txr++)
- txr->watchdog_check = FALSE;
- }
- }
-
- return;
-}
-
-
-/*********************************************************************
- *
- * This routine disables all traffic on the adapter by issuing a
- * global reset on the MAC and deallocates TX/RX buffers.
- *
- **********************************************************************/
-
-static void
-ixv_stop(void *arg)
-{
- struct ifnet *ifp;
- struct adapter *adapter = arg;
- struct ixgbe_hw *hw = &adapter->hw;
- ifp = adapter->ifp;
-
- mtx_assert(&adapter->core_mtx, MA_OWNED);
-
- INIT_DEBUGOUT("ixv_stop: begin\n");
- ixv_disable_intr(adapter);
-
- /* Tell the stack that the interface is no longer active */
- ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
-
- ixgbe_reset_hw(hw);
- adapter->hw.adapter_stopped = FALSE;
- ixgbe_stop_adapter(hw);
- callout_stop(&adapter->timer);
-
- /* reprogram the RAR[0] in case user changed it. */
- ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
-
- return;
-}
-
-
-/*********************************************************************
- *
- * Determine hardware revision.
- *
- **********************************************************************/
-static void
-ixv_identify_hardware(struct adapter *adapter)
-{
- device_t dev = adapter->dev;
- u16 pci_cmd_word;
-
- /*
- ** Make sure BUSMASTER is set, on a VM under
- ** KVM it may not be and will break things.
- */
- pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
- if (!((pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
- (pci_cmd_word & PCIM_CMD_MEMEN))) {
- INIT_DEBUGOUT("Memory Access and/or Bus Master "
- "bits were not set!\n");
- pci_cmd_word |= (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
- pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
- }
-
- /* Save off the information about this board */
- adapter->hw.vendor_id = pci_get_vendor(dev);
- adapter->hw.device_id = pci_get_device(dev);
- adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
- adapter->hw.subsystem_vendor_id =
- pci_read_config(dev, PCIR_SUBVEND_0, 2);
- adapter->hw.subsystem_device_id =
- pci_read_config(dev, PCIR_SUBDEV_0, 2);
-
- return;
-}
-
-/*********************************************************************
- *
- * Setup MSIX Interrupt resources and handlers
- *
- **********************************************************************/
-static int
-ixv_allocate_msix(struct adapter *adapter)
-{
- device_t dev = adapter->dev;
- struct ix_queue *que = adapter->queues;
- int error, rid, vector = 0;
-
- for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
- rid = vector + 1;
- que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
- RF_SHAREABLE | RF_ACTIVE);
- if (que->res == NULL) {
- device_printf(dev,"Unable to allocate"
- " bus resource: que interrupt [%d]\n", vector);
- return (ENXIO);
- }
- /* Set the handler function */
- error = bus_setup_intr(dev, que->res,
- INTR_TYPE_NET | INTR_MPSAFE, NULL,
- ixv_msix_que, que, &que->tag);
- if (error) {
- que->res = NULL;
- device_printf(dev, "Failed to register QUE handler");
- return (error);
- }
-#if __FreeBSD_version >= 800504
- bus_describe_intr(dev, que->res, que->tag, "que %d", i);
-#endif
- que->msix = vector;
- adapter->que_mask |= (u64)(1 << que->msix);
- /*
- ** Bind the msix vector, and thus the
- ** ring to the corresponding cpu.
- */
- if (adapter->num_queues > 1)
- bus_bind_intr(dev, que->res, i);
-
- TASK_INIT(&que->que_task, 0, ixv_handle_que, que);
- que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT,
- taskqueue_thread_enqueue, &que->tq);
- taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
- device_get_nameunit(adapter->dev));
- }
-
- /* and Mailbox */
- rid = vector + 1;
- adapter->res = bus_alloc_resource_any(dev,
- SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
- if (!adapter->res) {
- device_printf(dev,"Unable to allocate"
- " bus resource: MBX interrupt [%d]\n", rid);
- return (ENXIO);
- }
- /* Set the mbx handler function */
- error = bus_setup_intr(dev, adapter->res,
- INTR_TYPE_NET | INTR_MPSAFE, NULL,
- ixv_msix_mbx, adapter, &adapter->tag);
- if (error) {
- adapter->res = NULL;
- device_printf(dev, "Failed to register LINK handler");
- return (error);
- }
-#if __FreeBSD_version >= 800504
- bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
-#endif
- adapter->mbxvec = vector;
- /* Tasklets for Mailbox */
- TASK_INIT(&adapter->mbx_task, 0, ixv_handle_mbx, adapter);
- adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT,
- taskqueue_thread_enqueue, &adapter->tq);
- taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq",
- device_get_nameunit(adapter->dev));
- /*
- ** XXX - remove this when KVM/QEMU fix gets in...
- ** Due to a broken design QEMU will fail to properly
- ** enable the guest for MSIX unless the vectors in
- ** the table are all set up, so we must rewrite the
- ** ENABLE in the MSIX control register again at this
- ** point to cause it to successfully initialize us.
- */
- if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
- int msix_ctrl;
- pci_find_extcap(dev, PCIY_MSIX, &rid);
- rid += PCIR_MSIX_CTRL;
- msix_ctrl = pci_read_config(dev, rid, 2);
- msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
- pci_write_config(dev, rid, msix_ctrl, 2);
- }
-
- return (0);
-}
-
-/*
- * Setup MSIX resources, note that the VF
- * device MUST use MSIX, there is no fallback.
- */
-static int
-ixv_setup_msix(struct adapter *adapter)
-{
- device_t dev = adapter->dev;
- int rid, vectors, want = 2;
-
-
- /* First try MSI/X */
- rid = PCIR_BAR(3);
- adapter->msix_mem = bus_alloc_resource_any(dev,
- SYS_RES_MEMORY, &rid, RF_ACTIVE);
- if (!adapter->msix_mem) {
- device_printf(adapter->dev,
- "Unable to map MSIX table \n");
- goto out;
- }
-
- vectors = pci_msix_count(dev);
- if (vectors < 2) {
- bus_release_resource(dev, SYS_RES_MEMORY,
- rid, adapter->msix_mem);
- adapter->msix_mem = NULL;
- goto out;
- }
-
- /*
- ** Want two vectors: one for a queue,
- ** plus an additional for mailbox.
- */
- if (pci_alloc_msix(dev, &want) == 0) {
- device_printf(adapter->dev,
- "Using MSIX interrupts with %d vectors\n", want);
- return (want);
- }
-out:
- device_printf(adapter->dev,"MSIX config error\n");
- return (ENXIO);
-}
-
-
-static int
-ixv_allocate_pci_resources(struct adapter *adapter)
-{
- int rid;
- device_t dev = adapter->dev;
-
- rid = PCIR_BAR(0);
- adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
- &rid, RF_ACTIVE);
-
- if (!(adapter->pci_mem)) {
- device_printf(dev,"Unable to allocate bus resource: memory\n");
- return (ENXIO);
- }
-
- adapter->osdep.mem_bus_space_tag =
- rman_get_bustag(adapter->pci_mem);
- adapter->osdep.mem_bus_space_handle =
- rman_get_bushandle(adapter->pci_mem);
- adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
-
- adapter->num_queues = 1;
- adapter->hw.back = &adapter->osdep;
-
- /*
- ** Now setup MSI/X, should
- ** return us the number of
- ** configured vectors.
- */
- adapter->msix = ixv_setup_msix(adapter);
- if (adapter->msix == ENXIO)
- return (ENXIO);
- else
- return (0);
-}
-
-static void
-ixv_free_pci_resources(struct adapter * adapter)
-{
- struct ix_queue *que = adapter->queues;
- device_t dev = adapter->dev;
- int rid, memrid;
-
- memrid = PCIR_BAR(MSIX_BAR);
-
- /*
- ** There is a slight possibility of a failure mode
- ** in attach that will result in entering this function
- ** before interrupt resources have been initialized, and
- ** in that case we do not want to execute the loops below
- ** We can detect this reliably by the state of the adapter
- ** res pointer.
- */
- if (adapter->res == NULL)
- goto mem;
-
- /*
- ** Release all msix queue resources:
- */
- for (int i = 0; i < adapter->num_queues; i++, que++) {
- rid = que->msix + 1;
- if (que->tag != NULL) {
- bus_teardown_intr(dev, que->res, que->tag);
- que->tag = NULL;
- }
- if (que->res != NULL)
- bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
- }
-
-
- /* Clean the Legacy or Link interrupt last */
- if (adapter->mbxvec) /* we are doing MSIX */
- rid = adapter->mbxvec + 1;
- else
- (adapter->msix != 0) ? (rid = 1):(rid = 0);
-
- if (adapter->tag != NULL) {
- bus_teardown_intr(dev, adapter->res, adapter->tag);
- adapter->tag = NULL;
- }
- if (adapter->res != NULL)
- bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
-
-mem:
- if (adapter->msix)
- pci_release_msi(dev);
-
- if (adapter->msix_mem != NULL)
- bus_release_resource(dev, SYS_RES_MEMORY,
- memrid, adapter->msix_mem);
-
- if (adapter->pci_mem != NULL)
- bus_release_resource(dev, SYS_RES_MEMORY,
- PCIR_BAR(0), adapter->pci_mem);
-
- return;
-}
-
-/*********************************************************************
- *
- * Setup networking device structure and register an interface.
- *
- **********************************************************************/
-static void
-ixv_setup_interface(device_t dev, struct adapter *adapter)
-{
- struct ifnet *ifp;
-
- INIT_DEBUGOUT("ixv_setup_interface: begin");
-
- ifp = adapter->ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL)
- panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
- if_initname(ifp, device_get_name(dev), device_get_unit(dev));
- ifp->if_mtu = ETHERMTU;
- ifp->if_baudrate = 1000000000;
- ifp->if_init = ixv_init;
- ifp->if_softc = adapter;
- ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
- ifp->if_ioctl = ixv_ioctl;
-#if __FreeBSD_version >= 800000
- ifp->if_transmit = ixv_mq_start;
- ifp->if_qflush = ixv_qflush;
-#else
- ifp->if_start = ixv_start;
-#endif
- ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
-
- ether_ifattach(ifp, adapter->hw.mac.addr);
-
- adapter->max_frame_size =
- ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
-
- /*
- * Tell the upper layer(s) we support long frames.
- */
- ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
-
- ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
- ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
- ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_LRO;
-
- ifp->if_capenable = ifp->if_capabilities;
-
- /*
- * Specify the media types supported by this adapter and register
- * callbacks to update media and link information
- */
- ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
- ixv_media_status);
- ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL);
- ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
- ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
-
- return;
-}
-
-static void
-ixv_config_link(struct adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u32 autoneg, err = 0;
- bool negotiate = TRUE;
-
- if (hw->mac.ops.check_link)
- err = hw->mac.ops.check_link(hw, &autoneg,
- &adapter->link_up, FALSE);
- if (err)
- goto out;
-
- if (hw->mac.ops.setup_link)
- err = hw->mac.ops.setup_link(hw, autoneg,
- negotiate, adapter->link_up);
-out:
- return;
-}
-
-/********************************************************************
- * Manage DMA'able memory.
- *******************************************************************/
-static void
-ixv_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
-{
- if (error)
- return;
- *(bus_addr_t *) arg = segs->ds_addr;
- return;
-}
-
-static int
-ixv_dma_malloc(struct adapter *adapter, bus_size_t size,
- struct ixv_dma_alloc *dma, int mapflags)
-{
- device_t dev = adapter->dev;
- int r;
-
- r = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
- DBA_ALIGN, 0, /* alignment, bounds */
- BUS_SPACE_MAXADDR, /* lowaddr */
- BUS_SPACE_MAXADDR, /* highaddr */
- NULL, NULL, /* filter, filterarg */
- size, /* maxsize */
- 1, /* nsegments */
- size, /* maxsegsize */
- BUS_DMA_ALLOCNOW, /* flags */
- NULL, /* lockfunc */
- NULL, /* lockfuncarg */
- &dma->dma_tag);
- if (r != 0) {
- device_printf(dev,"ixv_dma_malloc: bus_dma_tag_create failed; "
- "error %u\n", r);
- goto fail_0;
- }
- r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
- BUS_DMA_NOWAIT, &dma->dma_map);
- if (r != 0) {
- device_printf(dev,"ixv_dma_malloc: bus_dmamem_alloc failed; "
- "error %u\n", r);
- goto fail_1;
- }
- r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
- size,
- ixv_dmamap_cb,
- &dma->dma_paddr,
- mapflags | BUS_DMA_NOWAIT);
- if (r != 0) {
- device_printf(dev,"ixv_dma_malloc: bus_dmamap_load failed; "
- "error %u\n", r);
- goto fail_2;
- }
- dma->dma_size = size;
- return (0);
-fail_2:
- bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
-fail_1:
- bus_dma_tag_destroy(dma->dma_tag);
-fail_0:
- dma->dma_map = NULL;
- dma->dma_tag = NULL;
- return (r);
-}
-
-static void
-ixv_dma_free(struct adapter *adapter, struct ixv_dma_alloc *dma)
-{
- bus_dmamap_sync(dma->dma_tag, dma->dma_map,
- BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(dma->dma_tag, dma->dma_map);
- bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
- bus_dma_tag_destroy(dma->dma_tag);
-}
-
-
-/*********************************************************************
- *
- * Allocate memory for the transmit and receive rings, and then
- * the descriptors associated with each, called only once at attach.
- *
- **********************************************************************/
-static int
-ixv_allocate_queues(struct adapter *adapter)
-{
- device_t dev = adapter->dev;
- struct ix_queue *que;
- struct tx_ring *txr;
- struct rx_ring *rxr;
- int rsize, tsize, error = 0;
- int txconf = 0, rxconf = 0;
-
- /* First allocate the top level queue structs */
- if (!(adapter->queues =
- (struct ix_queue *) malloc(sizeof(struct ix_queue) *
- adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
- device_printf(dev, "Unable to allocate queue memory\n");
- error = ENOMEM;
- goto fail;
- }
-
- /* First allocate the TX ring struct memory */
- if (!(adapter->tx_rings =
- (struct tx_ring *) malloc(sizeof(struct tx_ring) *
- adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
- device_printf(dev, "Unable to allocate TX ring memory\n");
- error = ENOMEM;
- goto tx_fail;
- }
-
- /* Next allocate the RX */
- if (!(adapter->rx_rings =
- (struct rx_ring *) malloc(sizeof(struct rx_ring) *
- adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
- device_printf(dev, "Unable to allocate RX ring memory\n");
- error = ENOMEM;
- goto rx_fail;
- }
-
- /* For the ring itself */
- tsize = roundup2(adapter->num_tx_desc *
- sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
-
- /*
- * Now set up the TX queues, txconf is needed to handle the
- * possibility that things fail midcourse and we need to
- * undo memory gracefully
- */
- for (int i = 0; i < adapter->num_queues; i++, txconf++) {
- /* Set up some basics */
- txr = &adapter->tx_rings[i];
- txr->adapter = adapter;
- txr->me = i;
-
- /* Initialize the TX side lock */
- snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
- device_get_nameunit(dev), txr->me);
- mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
-
- if (ixv_dma_malloc(adapter, tsize,
- &txr->txdma, BUS_DMA_NOWAIT)) {
- device_printf(dev,
- "Unable to allocate TX Descriptor memory\n");
- error = ENOMEM;
- goto err_tx_desc;
- }
- txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
- bzero((void *)txr->tx_base, tsize);
-
- /* Now allocate transmit buffers for the ring */
- if (ixv_allocate_transmit_buffers(txr)) {
- device_printf(dev,
- "Critical Failure setting up transmit buffers\n");
- error = ENOMEM;
- goto err_tx_desc;
- }
-#if __FreeBSD_version >= 800000
- /* Allocate a buf ring */
- txr->br = buf_ring_alloc(IXV_BR_SIZE, M_DEVBUF,
- M_WAITOK, &txr->tx_mtx);
- if (txr->br == NULL) {
- device_printf(dev,
- "Critical Failure setting up buf ring\n");
- error = ENOMEM;
- goto err_tx_desc;
- }
-#endif
- }
-
- /*
- * Next the RX queues...
- */
- rsize = roundup2(adapter->num_rx_desc *
- sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
- for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
- rxr = &adapter->rx_rings[i];
- /* Set up some basics */
- rxr->adapter = adapter;
- rxr->me = i;
-
- /* Initialize the RX side lock */
- snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
- device_get_nameunit(dev), rxr->me);
- mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
-
- if (ixv_dma_malloc(adapter, rsize,
- &rxr->rxdma, BUS_DMA_NOWAIT)) {
- device_printf(dev,
- "Unable to allocate RxDescriptor memory\n");
- error = ENOMEM;
- goto err_rx_desc;
- }
- rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
- bzero((void *)rxr->rx_base, rsize);
-
- /* Allocate receive buffers for the ring*/
- if (ixv_allocate_receive_buffers(rxr)) {
- device_printf(dev,
- "Critical Failure setting up receive buffers\n");
- error = ENOMEM;
- goto err_rx_desc;
- }
- }
-
- /*
- ** Finally set up the queue holding structs
- */
- for (int i = 0; i < adapter->num_queues; i++) {
- que = &adapter->queues[i];
- que->adapter = adapter;
- que->txr = &adapter->tx_rings[i];
- que->rxr = &adapter->rx_rings[i];
- }
-
- return (0);
-
-err_rx_desc:
- for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
- ixv_dma_free(adapter, &rxr->rxdma);
-err_tx_desc:
- for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
- ixv_dma_free(adapter, &txr->txdma);
- free(adapter->rx_rings, M_DEVBUF);
-rx_fail:
- free(adapter->tx_rings, M_DEVBUF);
-tx_fail:
- free(adapter->queues, M_DEVBUF);
-fail:
- return (error);
-}
-
-
-/*********************************************************************
- *
- * Allocate memory for tx_buffer structures. The tx_buffer stores all
- * the information needed to transmit a packet on the wire. This is
- * called only once at attach, setup is done every reset.
- *
- **********************************************************************/
-static int
-ixv_allocate_transmit_buffers(struct tx_ring *txr)
-{
- struct adapter *adapter = txr->adapter;
- device_t dev = adapter->dev;
- struct ixv_tx_buf *txbuf;
- int error, i;
-
- /*
- * Setup DMA descriptor areas.
- */
- if ((error = bus_dma_tag_create(NULL, /* parent */
- 1, 0, /* alignment, bounds */
- BUS_SPACE_MAXADDR, /* lowaddr */
- BUS_SPACE_MAXADDR, /* highaddr */
- NULL, NULL, /* filter, filterarg */
- IXV_TSO_SIZE, /* maxsize */
- 32, /* nsegments */
- PAGE_SIZE, /* maxsegsize */
- 0, /* flags */
- NULL, /* lockfunc */
- NULL, /* lockfuncarg */
- &txr->txtag))) {
- device_printf(dev,"Unable to allocate TX DMA tag\n");
- goto fail;
- }
-
- if (!(txr->tx_buffers =
- (struct ixv_tx_buf *) malloc(sizeof(struct ixv_tx_buf) *
- adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
- device_printf(dev, "Unable to allocate tx_buffer memory\n");
- error = ENOMEM;
- goto fail;
- }
-
- /* Create the descriptor buffer dma maps */
- txbuf = txr->tx_buffers;
- for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
- error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
- if (error != 0) {
- device_printf(dev, "Unable to create TX DMA map\n");
- goto fail;
- }
- }
-
- return 0;
-fail:
- /* We free all, it handles case where we are in the middle */
- ixv_free_transmit_structures(adapter);
- return (error);
-}
-
-/*********************************************************************
- *
- * Initialize a transmit ring.
- *
- **********************************************************************/
-static void
-ixv_setup_transmit_ring(struct tx_ring *txr)
-{
- struct adapter *adapter = txr->adapter;
- struct ixv_tx_buf *txbuf;
- int i;
-
- /* Clear the old ring contents */
- IXV_TX_LOCK(txr);
- bzero((void *)txr->tx_base,
- (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
- /* Reset indices */
- txr->next_avail_desc = 0;
- txr->next_to_clean = 0;
-
- /* Free any existing tx buffers. */
- txbuf = txr->tx_buffers;
- for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
- if (txbuf->m_head != NULL) {
- bus_dmamap_sync(txr->txtag, txbuf->map,
- BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(txr->txtag, txbuf->map);
- m_freem(txbuf->m_head);
- txbuf->m_head = NULL;
- }
- /* Clear the EOP index */
- txbuf->eop_index = -1;
- }
-
- /* Set number of descriptors available */
- txr->tx_avail = adapter->num_tx_desc;
-
- bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- IXV_TX_UNLOCK(txr);
-}
-
-/*********************************************************************
- *
- * Initialize all transmit rings.
- *
- **********************************************************************/
-static int
-ixv_setup_transmit_structures(struct adapter *adapter)
-{
- struct tx_ring *txr = adapter->tx_rings;
-
- for (int i = 0; i < adapter->num_queues; i++, txr++)
- ixv_setup_transmit_ring(txr);
-
- return (0);
-}
-
-/*********************************************************************
- *
- * Enable transmit unit.
- *
- **********************************************************************/
-static void
-ixv_initialize_transmit_units(struct adapter *adapter)
-{
- struct tx_ring *txr = adapter->tx_rings;
- struct ixgbe_hw *hw = &adapter->hw;
-
-
- for (int i = 0; i < adapter->num_queues; i++, txr++) {
- u64 tdba = txr->txdma.dma_paddr;
- u32 txctrl, txdctl;
-
- /* Set WTHRESH to 8, burst writeback */
- txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
- txdctl |= (8 << 16);
- IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
- /* Now enable */
- txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
- txdctl |= IXGBE_TXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
-
- /* Set the HW Tx Head and Tail indices */
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
-
- /* Setup Transmit Descriptor Cmd Settings */
- txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
- txr->watchdog_check = FALSE;
-
- /* Set Ring parameters */
- IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
- (tdba & 0x00000000ffffffffULL));
- IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
- IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
- adapter->num_tx_desc *
- sizeof(struct ixgbe_legacy_tx_desc));
- txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
- txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
- IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
- break;
- }
-
- return;
-}
-
-/*********************************************************************
- *
- * Free all transmit rings.
- *
- **********************************************************************/
-static void
-ixv_free_transmit_structures(struct adapter *adapter)
-{
- struct tx_ring *txr = adapter->tx_rings;
-
- for (int i = 0; i < adapter->num_queues; i++, txr++) {
- IXV_TX_LOCK(txr);
- ixv_free_transmit_buffers(txr);
- ixv_dma_free(adapter, &txr->txdma);
- IXV_TX_UNLOCK(txr);
- IXV_TX_LOCK_DESTROY(txr);
- }
- free(adapter->tx_rings, M_DEVBUF);
-}
-
-/*********************************************************************
- *
- * Free transmit ring related data structures.
- *
- **********************************************************************/
-static void
-ixv_free_transmit_buffers(struct tx_ring *txr)
-{
- struct adapter *adapter = txr->adapter;
- struct ixv_tx_buf *tx_buffer;
- int i;
-
- INIT_DEBUGOUT("free_transmit_ring: begin");
-
- if (txr->tx_buffers == NULL)
- return;
-
- tx_buffer = txr->tx_buffers;
- for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
- if (tx_buffer->m_head != NULL) {
- bus_dmamap_sync(txr->txtag, tx_buffer->map,
- BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(txr->txtag,
- tx_buffer->map);
- m_freem(tx_buffer->m_head);
- tx_buffer->m_head = NULL;
- if (tx_buffer->map != NULL) {
- bus_dmamap_destroy(txr->txtag,
- tx_buffer->map);
- tx_buffer->map = NULL;
- }
- } else if (tx_buffer->map != NULL) {
- bus_dmamap_unload(txr->txtag,
- tx_buffer->map);
- bus_dmamap_destroy(txr->txtag,
- tx_buffer->map);
- tx_buffer->map = NULL;
- }
- }
-#if __FreeBSD_version >= 800000
- if (txr->br != NULL)
- buf_ring_free(txr->br, M_DEVBUF);
-#endif
- if (txr->tx_buffers != NULL) {
- free(txr->tx_buffers, M_DEVBUF);
- txr->tx_buffers = NULL;
- }
- if (txr->txtag != NULL) {
- bus_dma_tag_destroy(txr->txtag);
- txr->txtag = NULL;
- }
- return;
-}
-
-/*********************************************************************
- *
- * Advanced Context Descriptor setup for VLAN or CSUM
- *
- **********************************************************************/
-
-static boolean_t
-ixv_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
-{
- struct adapter *adapter = txr->adapter;
- struct ixgbe_adv_tx_context_desc *TXD;
- struct ixv_tx_buf *tx_buffer;
- u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
- struct ether_vlan_header *eh;
- struct ip *ip;
- struct ip6_hdr *ip6;
- int ehdrlen, ip_hlen = 0;
- u16 etype;
- u8 ipproto = 0;
- bool offload = TRUE;
- int ctxd = txr->next_avail_desc;
- u16 vtag = 0;
-
-
- if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
- offload = FALSE;
-
-
- tx_buffer = &txr->tx_buffers[ctxd];
- TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
-
- /*
- ** In advanced descriptors the vlan tag must
- ** be placed into the descriptor itself.
- */
- if (mp->m_flags & M_VLANTAG) {
- vtag = htole16(mp->m_pkthdr.ether_vtag);
- vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
- } else if (offload == FALSE)
- return FALSE;
-
- /*
- * Determine where frame payload starts.
- * Jump over vlan headers if already present,
- * helpful for QinQ too.
- */
- eh = mtod(mp, struct ether_vlan_header *);
- if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
- etype = ntohs(eh->evl_proto);
- ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
- } else {
- etype = ntohs(eh->evl_encap_proto);
- ehdrlen = ETHER_HDR_LEN;
- }
-
- /* Set the ether header length */
- vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
-
- switch (etype) {
- case ETHERTYPE_IP:
- ip = (struct ip *)(mp->m_data + ehdrlen);
- ip_hlen = ip->ip_hl << 2;
- if (mp->m_len < ehdrlen + ip_hlen)
- return (FALSE);
- ipproto = ip->ip_p;
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
- break;
- case ETHERTYPE_IPV6:
- ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
- ip_hlen = sizeof(struct ip6_hdr);
- if (mp->m_len < ehdrlen + ip_hlen)
- return (FALSE);
- ipproto = ip6->ip6_nxt;
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
- break;
- default:
- offload = FALSE;
- break;
- }
-
- vlan_macip_lens |= ip_hlen;
- type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
-
- switch (ipproto) {
- case IPPROTO_TCP:
- if (mp->m_pkthdr.csum_flags & CSUM_TCP)
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
- break;
-
- case IPPROTO_UDP:
- if (mp->m_pkthdr.csum_flags & CSUM_UDP)
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
- break;
-
-#if __FreeBSD_version >= 800000
- case IPPROTO_SCTP:
- if (mp->m_pkthdr.csum_flags & CSUM_SCTP)
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
- break;
-#endif
- default:
- offload = FALSE;
- break;
- }
-
- /* Now copy bits into descriptor */
- TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
- TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
- TXD->seqnum_seed = htole32(0);
- TXD->mss_l4len_idx = htole32(0);
-
- tx_buffer->m_head = NULL;
- tx_buffer->eop_index = -1;
-
- /* We've consumed the first desc, adjust counters */
- if (++ctxd == adapter->num_tx_desc)
- ctxd = 0;
- txr->next_avail_desc = ctxd;
- --txr->tx_avail;
-
- return (offload);
-}
-
-/**********************************************************************
- *
- * Setup work for hardware segmentation offload (TSO) on
- * adapters using advanced tx descriptors
- *
- **********************************************************************/
-static boolean_t
-ixv_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
-{
- struct adapter *adapter = txr->adapter;
- struct ixgbe_adv_tx_context_desc *TXD;
- struct ixv_tx_buf *tx_buffer;
- u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
- u32 mss_l4len_idx = 0;
- u16 vtag = 0;
- int ctxd, ehdrlen, hdrlen, ip_hlen, tcp_hlen;
- struct ether_vlan_header *eh;
- struct ip *ip;
- struct tcphdr *th;
-
-
- /*
- * Determine where frame payload starts.
- * Jump over vlan headers if already present
- */
- eh = mtod(mp, struct ether_vlan_header *);
- if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
- ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
- else
- ehdrlen = ETHER_HDR_LEN;
-
- /* Ensure we have at least the IP+TCP header in the first mbuf. */
- if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
- return FALSE;
-
- ctxd = txr->next_avail_desc;
- tx_buffer = &txr->tx_buffers[ctxd];
- TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
-
- ip = (struct ip *)(mp->m_data + ehdrlen);
- if (ip->ip_p != IPPROTO_TCP)
- return FALSE; /* 0 */
- ip->ip_sum = 0;
- ip_hlen = ip->ip_hl << 2;
- th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
- th->th_sum = in_pseudo(ip->ip_src.s_addr,
- ip->ip_dst.s_addr, htons(IPPROTO_TCP));
- tcp_hlen = th->th_off << 2;
- hdrlen = ehdrlen + ip_hlen + tcp_hlen;
-
- /* This is used in the transmit desc in encap */
- *paylen = mp->m_pkthdr.len - hdrlen;
-
- /* VLAN MACLEN IPLEN */
- if (mp->m_flags & M_VLANTAG) {
- vtag = htole16(mp->m_pkthdr.ether_vtag);
- vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
- }
-
- vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
- vlan_macip_lens |= ip_hlen;
- TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
-
- /* ADV DTYPE TUCMD */
- type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
- TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
-
-
- /* MSS L4LEN IDX */
- mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
- mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
- TXD->mss_l4len_idx = htole32(mss_l4len_idx);
-
- TXD->seqnum_seed = htole32(0);
- tx_buffer->m_head = NULL;
- tx_buffer->eop_index = -1;
-
- if (++ctxd == adapter->num_tx_desc)
- ctxd = 0;
-
- txr->tx_avail--;
- txr->next_avail_desc = ctxd;
- return TRUE;
-}
-
-
-/**********************************************************************
- *
- * Examine each tx_buffer in the used queue. If the hardware is done
- * processing the packet then free associated resources. The
- * tx_buffer is put back on the free queue.
- *
- **********************************************************************/
-static boolean_t
-ixv_txeof(struct tx_ring *txr)
-{
- struct adapter *adapter = txr->adapter;
- struct ifnet *ifp = adapter->ifp;
- u32 first, last, done;
- struct ixv_tx_buf *tx_buffer;
- struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
-
- mtx_assert(&txr->tx_mtx, MA_OWNED);
-
- if (txr->tx_avail == adapter->num_tx_desc)
- return FALSE;
-
- first = txr->next_to_clean;
- tx_buffer = &txr->tx_buffers[first];
- /* For cleanup we just use legacy struct */
- tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
- last = tx_buffer->eop_index;
- if (last == -1)
- return FALSE;
- eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
-
- /*
- ** Get the index of the first descriptor
- ** BEYOND the EOP and call that 'done'.
- ** I do this so the comparison in the
- ** inner while loop below can be simple
- */
- if (++last == adapter->num_tx_desc) last = 0;
- done = last;
-
- bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
- BUS_DMASYNC_POSTREAD);
- /*
- ** Only the EOP descriptor of a packet now has the DD
- ** bit set, this is what we look for...
- */
- while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
- /* We clean the range of the packet */
- while (first != done) {
- tx_desc->upper.data = 0;
- tx_desc->lower.data = 0;
- tx_desc->buffer_addr = 0;
- ++txr->tx_avail;
-
- if (tx_buffer->m_head) {
- bus_dmamap_sync(txr->txtag,
- tx_buffer->map,
- BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(txr->txtag,
- tx_buffer->map);
- m_freem(tx_buffer->m_head);
- tx_buffer->m_head = NULL;
- tx_buffer->map = NULL;
- }
- tx_buffer->eop_index = -1;
- txr->watchdog_time = ticks;
-
- if (++first == adapter->num_tx_desc)
- first = 0;
-
- tx_buffer = &txr->tx_buffers[first];
- tx_desc =
- (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
- }
- ++ifp->if_opackets;
- /* See if there is more work now */
- last = tx_buffer->eop_index;
- if (last != -1) {
- eop_desc =
- (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
- /* Get next done point */
- if (++last == adapter->num_tx_desc) last = 0;
- done = last;
- } else
- break;
- }
- bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
-
- txr->next_to_clean = first;
-
- /*
- * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
- * it is OK to send packets. If there are no pending descriptors,
- * clear the timeout. Otherwise, if some descriptors have been freed,
- * restart the timeout.
- */
- if (txr->tx_avail > IXV_TX_CLEANUP_THRESHOLD) {
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- if (txr->tx_avail == adapter->num_tx_desc) {
- txr->watchdog_check = FALSE;
- return FALSE;
- }
- }
-
- return TRUE;
-}
-
-/*********************************************************************
- *
- * Refresh mbuf buffers for RX descriptor rings
- * - now keeps its own state so discards due to resource
- * exhaustion are unnecessary, if an mbuf cannot be obtained
- * it just returns, keeping its placeholder, thus it can simply
- * be recalled to try again.
- *
- **********************************************************************/
-static void
-ixv_refresh_mbufs(struct rx_ring *rxr, int limit)
-{
- struct adapter *adapter = rxr->adapter;
- bus_dma_segment_t hseg[1];
- bus_dma_segment_t pseg[1];
- struct ixv_rx_buf *rxbuf;
- struct mbuf *mh, *mp;
- int i, j, nsegs, error;
- bool refreshed = FALSE;
-
- i = j = rxr->next_to_refresh;
- /* Get the control variable, one beyond refresh point */
- if (++j == adapter->num_rx_desc)
- j = 0;
- while (j != limit) {
- rxbuf = &rxr->rx_buffers[i];
- if ((rxbuf->m_head == NULL) && (rxr->hdr_split)) {
- mh = m_gethdr(M_DONTWAIT, MT_DATA);
- if (mh == NULL)
- goto update;
- mh->m_pkthdr.len = mh->m_len = MHLEN;
- mh->m_len = MHLEN;
- mh->m_flags |= M_PKTHDR;
- m_adj(mh, ETHER_ALIGN);
- /* Get the memory mapping */
- error = bus_dmamap_load_mbuf_sg(rxr->htag,
- rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
- if (error != 0) {
- printf("GET BUF: dmamap load"
- " failure - %d\n", error);
- m_free(mh);
- goto update;
- }
- rxbuf->m_head = mh;
- bus_dmamap_sync(rxr->htag, rxbuf->hmap,
- BUS_DMASYNC_PREREAD);
- rxr->rx_base[i].read.hdr_addr =
- htole64(hseg[0].ds_addr);
- }
-
- if (rxbuf->m_pack == NULL) {
- mp = m_getjcl(M_DONTWAIT, MT_DATA,
- M_PKTHDR, adapter->rx_mbuf_sz);
- if (mp == NULL)
- goto update;
- } else
- mp = rxbuf->m_pack;
-
- mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
- /* Get the memory mapping */
- error = bus_dmamap_load_mbuf_sg(rxr->ptag,
- rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
- if (error != 0) {
- printf("GET BUF: dmamap load"
- " failure - %d\n", error);
- m_free(mp);
- rxbuf->m_pack = NULL;
- goto update;
- }
- rxbuf->m_pack = mp;
- bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
- BUS_DMASYNC_PREREAD);
- rxr->rx_base[i].read.pkt_addr =
- htole64(pseg[0].ds_addr);
-
- refreshed = TRUE;
- rxr->next_to_refresh = i = j;
- /* Calculate next index */
- if (++j == adapter->num_rx_desc)
- j = 0;
- }
-update:
- if (refreshed) /* update tail index */
- IXGBE_WRITE_REG(&adapter->hw,
- IXGBE_VFRDT(rxr->me), rxr->next_to_refresh);
- return;
-}
-
-/*********************************************************************
- *
- * Allocate memory for rx_buffer structures. Since we use one
- * rx_buffer per received packet, the maximum number of rx_buffer's
- * that we'll need is equal to the number of receive descriptors
- * that we've allocated.
- *
- **********************************************************************/
-static int
-ixv_allocate_receive_buffers(struct rx_ring *rxr)
-{
- struct adapter *adapter = rxr->adapter;
- device_t dev = adapter->dev;
- struct ixv_rx_buf *rxbuf;
- int i, bsize, error;
-
- bsize = sizeof(struct ixv_rx_buf) * adapter->num_rx_desc;
- if (!(rxr->rx_buffers =
- (struct ixv_rx_buf *) malloc(bsize,
- M_DEVBUF, M_NOWAIT | M_ZERO))) {
- device_printf(dev, "Unable to allocate rx_buffer memory\n");
- error = ENOMEM;
- goto fail;
- }
-
- if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
- 1, 0, /* alignment, bounds */
- BUS_SPACE_MAXADDR, /* lowaddr */
- BUS_SPACE_MAXADDR, /* highaddr */
- NULL, NULL, /* filter, filterarg */
- MSIZE, /* maxsize */
- 1, /* nsegments */
- MSIZE, /* maxsegsize */
- 0, /* flags */
- NULL, /* lockfunc */
- NULL, /* lockfuncarg */
- &rxr->htag))) {
- device_printf(dev, "Unable to create RX DMA tag\n");
- goto fail;
- }
-
- if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
- 1, 0, /* alignment, bounds */
- BUS_SPACE_MAXADDR, /* lowaddr */
- BUS_SPACE_MAXADDR, /* highaddr */
- NULL, NULL, /* filter, filterarg */
- MJUMPAGESIZE, /* maxsize */
- 1, /* nsegments */
- MJUMPAGESIZE, /* maxsegsize */
- 0, /* flags */
- NULL, /* lockfunc */
- NULL, /* lockfuncarg */
- &rxr->ptag))) {
- device_printf(dev, "Unable to create RX DMA tag\n");
- goto fail;
- }
-
- for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
- rxbuf = &rxr->rx_buffers[i];
- error = bus_dmamap_create(rxr->htag,
- BUS_DMA_NOWAIT, &rxbuf->hmap);
- if (error) {
- device_printf(dev, "Unable to create RX head map\n");
- goto fail;
- }
- error = bus_dmamap_create(rxr->ptag,
- BUS_DMA_NOWAIT, &rxbuf->pmap);
- if (error) {
- device_printf(dev, "Unable to create RX pkt map\n");
- goto fail;
- }
- }
-
- return (0);
-
-fail:
- /* Frees all, but can handle partial completion */
- ixv_free_receive_structures(adapter);
- return (error);
-}
-
-static void
-ixv_free_receive_ring(struct rx_ring *rxr)
-{
- struct adapter *adapter;
- struct ixv_rx_buf *rxbuf;
- int i;
-
- adapter = rxr->adapter;
- for (i = 0; i < adapter->num_rx_desc; i++) {
- rxbuf = &rxr->rx_buffers[i];
- if (rxbuf->m_head != NULL) {
- bus_dmamap_sync(rxr->htag, rxbuf->hmap,
- BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(rxr->htag, rxbuf->hmap);
- rxbuf->m_head->m_flags |= M_PKTHDR;
- m_freem(rxbuf->m_head);
- }
- if (rxbuf->m_pack != NULL) {
- bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
- BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
- rxbuf->m_pack->m_flags |= M_PKTHDR;
- m_freem(rxbuf->m_pack);
- }
- rxbuf->m_head = NULL;
- rxbuf->m_pack = NULL;
- }
-}
-
-
-/*********************************************************************
- *
- * Initialize a receive ring and its buffers.
- *
- **********************************************************************/
-static int
-ixv_setup_receive_ring(struct rx_ring *rxr)
-{
- struct adapter *adapter;
- struct ifnet *ifp;
- device_t dev;
- struct ixv_rx_buf *rxbuf;
- bus_dma_segment_t pseg[1], hseg[1];
- struct lro_ctrl *lro = &rxr->lro;
- int rsize, nsegs, error = 0;
-
- adapter = rxr->adapter;
- ifp = adapter->ifp;
- dev = adapter->dev;
-
- /* Clear the ring contents */
- IXV_RX_LOCK(rxr);
- rsize = roundup2(adapter->num_rx_desc *
- sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
- bzero((void *)rxr->rx_base, rsize);
-
- /* Free current RX buffer structs and their mbufs */
- ixv_free_receive_ring(rxr);
-
- /* Configure header split? */
- if (ixv_header_split)
- rxr->hdr_split = TRUE;
-
- /* Now replenish the mbufs */
- for (int j = 0; j != adapter->num_rx_desc; ++j) {
- struct mbuf *mh, *mp;
-
- rxbuf = &rxr->rx_buffers[j];
- /*
- ** Dont allocate mbufs if not
- ** doing header split, its wasteful
- */
- if (rxr->hdr_split == FALSE)
- goto skip_head;
-
- /* First the header */
- rxbuf->m_head = m_gethdr(M_NOWAIT, MT_DATA);
- if (rxbuf->m_head == NULL) {
- error = ENOBUFS;
- goto fail;
- }
- m_adj(rxbuf->m_head, ETHER_ALIGN);
- mh = rxbuf->m_head;
- mh->m_len = mh->m_pkthdr.len = MHLEN;
- mh->m_flags |= M_PKTHDR;
- /* Get the memory mapping */
- error = bus_dmamap_load_mbuf_sg(rxr->htag,
- rxbuf->hmap, rxbuf->m_head, hseg,
- &nsegs, BUS_DMA_NOWAIT);
- if (error != 0) /* Nothing elegant to do here */
- goto fail;
- bus_dmamap_sync(rxr->htag,
- rxbuf->hmap, BUS_DMASYNC_PREREAD);
- /* Update descriptor */
- rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
-
-skip_head:
- /* Now the payload cluster */
- rxbuf->m_pack = m_getjcl(M_NOWAIT, MT_DATA,
- M_PKTHDR, adapter->rx_mbuf_sz);
- if (rxbuf->m_pack == NULL) {
- error = ENOBUFS;
- goto fail;
- }
- mp = rxbuf->m_pack;
- mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
- /* Get the memory mapping */
- error = bus_dmamap_load_mbuf_sg(rxr->ptag,
- rxbuf->pmap, mp, pseg,
- &nsegs, BUS_DMA_NOWAIT);
- if (error != 0)
- goto fail;
- bus_dmamap_sync(rxr->ptag,
- rxbuf->pmap, BUS_DMASYNC_PREREAD);
- /* Update descriptor */
- rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
- }
-
-
- /* Setup our descriptor indices */
- rxr->next_to_check = 0;
- rxr->next_to_refresh = 0;
- rxr->lro_enabled = FALSE;
- rxr->rx_split_packets = 0;
- rxr->rx_bytes = 0;
- rxr->discard = FALSE;
-
- bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
-
- /*
- ** Now set up the LRO interface:
- */
- if (ifp->if_capenable & IFCAP_LRO) {
- int err = tcp_lro_init(lro);
- if (err) {
- device_printf(dev, "LRO Initialization failed!\n");
- goto fail;
- }
- INIT_DEBUGOUT("RX Soft LRO Initialized\n");
- rxr->lro_enabled = TRUE;
- lro->ifp = adapter->ifp;
- }
-
- IXV_RX_UNLOCK(rxr);
- return (0);
-
-fail:
- ixv_free_receive_ring(rxr);
- IXV_RX_UNLOCK(rxr);
- return (error);
-}
-
-/*********************************************************************
- *
- * Initialize all receive rings.
- *
- **********************************************************************/
-static int
-ixv_setup_receive_structures(struct adapter *adapter)
-{
- struct rx_ring *rxr = adapter->rx_rings;
- int j;
-
- for (j = 0; j < adapter->num_queues; j++, rxr++)
- if (ixv_setup_receive_ring(rxr))
- goto fail;
-
- return (0);
-fail:
- /*
- * Free RX buffers allocated so far, we will only handle
- * the rings that completed, the failing case will have
- * cleaned up for itself. 'j' failed, so its the terminus.
- */
- for (int i = 0; i < j; ++i) {
- rxr = &adapter->rx_rings[i];
- ixv_free_receive_ring(rxr);
- }
-
- return (ENOBUFS);
-}
-
-/*********************************************************************
- *
- * Setup receive registers and features.
- *
- **********************************************************************/
-#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
-
-static void
-ixv_initialize_receive_units(struct adapter *adapter)
-{
- struct rx_ring *rxr = adapter->rx_rings;
- struct ixgbe_hw *hw = &adapter->hw;
- struct ifnet *ifp = adapter->ifp;
- u32 bufsz, fctrl, rxcsum, hlreg;
-
-
- /* Enable broadcasts */
- fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
- fctrl |= IXGBE_FCTRL_BAM;
- fctrl |= IXGBE_FCTRL_DPF;
- fctrl |= IXGBE_FCTRL_PMCF;
- IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
-
- /* Set for Jumbo Frames? */
- hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
- if (ifp->if_mtu > ETHERMTU) {
- hlreg |= IXGBE_HLREG0_JUMBOEN;
- bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
- } else {
- hlreg &= ~IXGBE_HLREG0_JUMBOEN;
- bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
- }
- IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
-
- for (int i = 0; i < adapter->num_queues; i++, rxr++) {
- u64 rdba = rxr->rxdma.dma_paddr;
- u32 reg, rxdctl;
-
- /* Do the queue enabling first */
- rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
- rxdctl |= IXGBE_RXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
- for (int k = 0; k < 10; k++) {
- if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
- IXGBE_RXDCTL_ENABLE)
- break;
- else
- msec_delay(1);
- }
- wmb();
-
- /* Setup the Base and Length of the Rx Descriptor Ring */
- IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
- (rdba & 0x00000000ffffffffULL));
- IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
- (rdba >> 32));
- IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
- adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
-
- /* Set up the SRRCTL register */
- reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
- reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
- reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
- reg |= bufsz;
- if (rxr->hdr_split) {
- /* Use a standard mbuf for the header */
- reg |= ((IXV_RX_HDR <<
- IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
- & IXGBE_SRRCTL_BSIZEHDR_MASK);
- reg |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
- } else
- reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
- IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
-
- /* Setup the HW Rx Head and Tail Descriptor Pointers */
- IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
- IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
- adapter->num_rx_desc - 1);
- }
-
- rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
-
- if (ifp->if_capenable & IFCAP_RXCSUM)
- rxcsum |= IXGBE_RXCSUM_PCSD;
-
- if (!(rxcsum & IXGBE_RXCSUM_PCSD))
- rxcsum |= IXGBE_RXCSUM_IPPCSE;
-
- IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
-
- return;
-}
-
-/*********************************************************************
- *
- * Free all receive rings.
- *
- **********************************************************************/
-static void
-ixv_free_receive_structures(struct adapter *adapter)
-{
- struct rx_ring *rxr = adapter->rx_rings;
-
- for (int i = 0; i < adapter->num_queues; i++, rxr++) {
- struct lro_ctrl *lro = &rxr->lro;
- ixv_free_receive_buffers(rxr);
- /* Free LRO memory */
- tcp_lro_free(lro);
- /* Free the ring memory as well */
- ixv_dma_free(adapter, &rxr->rxdma);
- }
-
- free(adapter->rx_rings, M_DEVBUF);
-}
-
-
-/*********************************************************************
- *
- * Free receive ring data structures
- *
- **********************************************************************/
-static void
-ixv_free_receive_buffers(struct rx_ring *rxr)
-{
- struct adapter *adapter = rxr->adapter;
- struct ixv_rx_buf *rxbuf;
-
- INIT_DEBUGOUT("free_receive_structures: begin");
-
- /* Cleanup any existing buffers */
- if (rxr->rx_buffers != NULL) {
- for (int i = 0; i < adapter->num_rx_desc; i++) {
- rxbuf = &rxr->rx_buffers[i];
- if (rxbuf->m_head != NULL) {
- bus_dmamap_sync(rxr->htag, rxbuf->hmap,
- BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(rxr->htag, rxbuf->hmap);
- rxbuf->m_head->m_flags |= M_PKTHDR;
- m_freem(rxbuf->m_head);
- }
- if (rxbuf->m_pack != NULL) {
- bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
- BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
- rxbuf->m_pack->m_flags |= M_PKTHDR;
- m_freem(rxbuf->m_pack);
- }
- rxbuf->m_head = NULL;
- rxbuf->m_pack = NULL;
- if (rxbuf->hmap != NULL) {
- bus_dmamap_destroy(rxr->htag, rxbuf->hmap);
- rxbuf->hmap = NULL;
- }
- if (rxbuf->pmap != NULL) {
- bus_dmamap_destroy(rxr->ptag, rxbuf->pmap);
- rxbuf->pmap = NULL;
- }
- }
- if (rxr->rx_buffers != NULL) {
- free(rxr->rx_buffers, M_DEVBUF);
- rxr->rx_buffers = NULL;
- }
- }
-
- if (rxr->htag != NULL) {
- bus_dma_tag_destroy(rxr->htag);
- rxr->htag = NULL;
- }
- if (rxr->ptag != NULL) {
- bus_dma_tag_destroy(rxr->ptag);
- rxr->ptag = NULL;
- }
-
- return;
-}
-
-static __inline void
-ixv_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
-{
-
- /*
- * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
- * should be computed by hardware. Also it should not have VLAN tag in
- * ethernet header.
- */
- if (rxr->lro_enabled &&
- (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
- (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
- (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
- (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) &&
- (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
- (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
- /*
- * Send to the stack if:
- ** - LRO not enabled, or
- ** - no LRO resources, or
- ** - lro enqueue fails
- */
- if (rxr->lro.lro_cnt != 0)
- if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
- return;
- }
- IXV_RX_UNLOCK(rxr);
- (*ifp->if_input)(ifp, m);
- IXV_RX_LOCK(rxr);
-}
-
-static __inline void
-ixv_rx_discard(struct rx_ring *rxr, int i)
-{
- struct ixv_rx_buf *rbuf;
-
- rbuf = &rxr->rx_buffers[i];
-
- if (rbuf->fmp != NULL) {/* Partial chain ? */
- rbuf->fmp->m_flags |= M_PKTHDR;
- m_freem(rbuf->fmp);
- rbuf->fmp = NULL;
- }
-
- /*
- ** With advanced descriptors the writeback
- ** clobbers the buffer addrs, so its easier
- ** to just free the existing mbufs and take
- ** the normal refresh path to get new buffers
- ** and mapping.
- */
- if (rbuf->m_head) {
- m_free(rbuf->m_head);
- rbuf->m_head = NULL;
- }
-
- if (rbuf->m_pack) {
- m_free(rbuf->m_pack);
- rbuf->m_pack = NULL;
- }
-
- return;
-}
-
-
-/*********************************************************************
- *
- * This routine executes in interrupt context. It replenishes
- * the mbufs in the descriptor and sends data which has been
- * dma'ed into host memory to upper layer.
- *
- * We loop at most count times if count is > 0, or until done if
- * count < 0.
- *
- * Return TRUE for more work, FALSE for all clean.
- *********************************************************************/
-static bool
-ixv_rxeof(struct ix_queue *que, int count)
-{
- struct adapter *adapter = que->adapter;
- struct rx_ring *rxr = que->rxr;
- struct ifnet *ifp = adapter->ifp;
- struct lro_ctrl *lro = &rxr->lro;
- struct lro_entry *queued;
- int i, nextp, processed = 0;
- u32 staterr = 0;
- union ixgbe_adv_rx_desc *cur;
- struct ixv_rx_buf *rbuf, *nbuf;
-
- IXV_RX_LOCK(rxr);
-
- for (i = rxr->next_to_check; count != 0;) {
- struct mbuf *sendmp, *mh, *mp;
- u32 rsc, ptype;
- u16 hlen, plen, hdr, vtag;
- bool eop;
-
- /* Sync the ring. */
- bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
- BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
-
- cur = &rxr->rx_base[i];
- staterr = le32toh(cur->wb.upper.status_error);
-
- if ((staterr & IXGBE_RXD_STAT_DD) == 0)
- break;
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
- break;
-
- count--;
- sendmp = NULL;
- nbuf = NULL;
- rsc = 0;
- cur->wb.upper.status_error = 0;
- rbuf = &rxr->rx_buffers[i];
- mh = rbuf->m_head;
- mp = rbuf->m_pack;
-
- plen = le16toh(cur->wb.upper.length);
- ptype = le32toh(cur->wb.lower.lo_dword.data) &
- IXGBE_RXDADV_PKTTYPE_MASK;
- hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
- vtag = le16toh(cur->wb.upper.vlan);
- eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
-
- /* Make sure all parts of a bad packet are discarded */
- if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
- (rxr->discard)) {
- ifp->if_ierrors++;
- rxr->rx_discarded++;
- if (!eop)
- rxr->discard = TRUE;
- else
- rxr->discard = FALSE;
- ixv_rx_discard(rxr, i);
- goto next_desc;
- }
-
- if (!eop) {
- nextp = i + 1;
- if (nextp == adapter->num_rx_desc)
- nextp = 0;
- nbuf = &rxr->rx_buffers[nextp];
- prefetch(nbuf);
- }
- /*
- ** The header mbuf is ONLY used when header
- ** split is enabled, otherwise we get normal
- ** behavior, ie, both header and payload
- ** are DMA'd into the payload buffer.
- **
- ** Rather than using the fmp/lmp global pointers
- ** we now keep the head of a packet chain in the
- ** buffer struct and pass this along from one
- ** descriptor to the next, until we get EOP.
- */
- if (rxr->hdr_split && (rbuf->fmp == NULL)) {
- /* This must be an initial descriptor */
- hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
- IXGBE_RXDADV_HDRBUFLEN_SHIFT;
- if (hlen > IXV_RX_HDR)
- hlen = IXV_RX_HDR;
- mh->m_len = hlen;
- mh->m_flags |= M_PKTHDR;
- mh->m_next = NULL;
- mh->m_pkthdr.len = mh->m_len;
- /* Null buf pointer so it is refreshed */
- rbuf->m_head = NULL;
- /*
- ** Check the payload length, this
- ** could be zero if its a small
- ** packet.
- */
- if (plen > 0) {
- mp->m_len = plen;
- mp->m_next = NULL;
- mp->m_flags &= ~M_PKTHDR;
- mh->m_next = mp;
- mh->m_pkthdr.len += mp->m_len;
- /* Null buf pointer so it is refreshed */
- rbuf->m_pack = NULL;
- rxr->rx_split_packets++;
- }
- /*
- ** Now create the forward
- ** chain so when complete
- ** we wont have to.
- */
- if (eop == 0) {
- /* stash the chain head */
- nbuf->fmp = mh;
- /* Make forward chain */
- if (plen)
- mp->m_next = nbuf->m_pack;
- else
- mh->m_next = nbuf->m_pack;
- } else {
- /* Singlet, prepare to send */
- sendmp = mh;
- if ((adapter->num_vlans) &&
- (staterr & IXGBE_RXD_STAT_VP)) {
- sendmp->m_pkthdr.ether_vtag = vtag;
- sendmp->m_flags |= M_VLANTAG;
- }
- }
- } else {
- /*
- ** Either no header split, or a
- ** secondary piece of a fragmented
- ** split packet.
- */
- mp->m_len = plen;
- /*
- ** See if there is a stored head
- ** that determines what we are
- */
- sendmp = rbuf->fmp;
- rbuf->m_pack = rbuf->fmp = NULL;
-
- if (sendmp != NULL) /* secondary frag */
- sendmp->m_pkthdr.len += mp->m_len;
- else {
- /* first desc of a non-ps chain */
- sendmp = mp;
- sendmp->m_flags |= M_PKTHDR;
- sendmp->m_pkthdr.len = mp->m_len;
- if (staterr & IXGBE_RXD_STAT_VP) {
- sendmp->m_pkthdr.ether_vtag = vtag;
- sendmp->m_flags |= M_VLANTAG;
- }
- }
- /* Pass the head pointer on */
- if (eop == 0) {
- nbuf->fmp = sendmp;
- sendmp = NULL;
- mp->m_next = nbuf->m_pack;
- }
- }
- ++processed;
- /* Sending this frame? */
- if (eop) {
- sendmp->m_pkthdr.rcvif = ifp;
- ifp->if_ipackets++;
- rxr->rx_packets++;
- /* capture data for AIM */
- rxr->bytes += sendmp->m_pkthdr.len;
- rxr->rx_bytes += sendmp->m_pkthdr.len;
- if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
- ixv_rx_checksum(staterr, sendmp, ptype);
-#if __FreeBSD_version >= 800000
- sendmp->m_pkthdr.flowid = que->msix;
- sendmp->m_flags |= M_FLOWID;
-#endif
- }
-next_desc:
- bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
-
- /* Advance our pointers to the next descriptor. */
- if (++i == adapter->num_rx_desc)
- i = 0;
-
- /* Now send to the stack or do LRO */
- if (sendmp != NULL)
- ixv_rx_input(rxr, ifp, sendmp, ptype);
-
- /* Every 8 descriptors we go to refresh mbufs */
- if (processed == 8) {
- ixv_refresh_mbufs(rxr, i);
- processed = 0;
- }
- }
-
- /* Refresh any remaining buf structs */
- if (ixv_rx_unrefreshed(rxr))
- ixv_refresh_mbufs(rxr, i);
-
- rxr->next_to_check = i;
-
- /*
- * Flush any outstanding LRO work
- */
- while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
- SLIST_REMOVE_HEAD(&lro->lro_active, next);
- tcp_lro_flush(lro, queued);
- }
-
- IXV_RX_UNLOCK(rxr);
-
- /*
- ** We still have cleaning to do?
- ** Schedule another interrupt if so.
- */
- if ((staterr & IXGBE_RXD_STAT_DD) != 0) {
- ixv_rearm_queues(adapter, (u64)(1 << que->msix));
- return (TRUE);
- }
-
- return (FALSE);
-}
-
-
-/*********************************************************************
- *
- * Verify that the hardware indicated that the checksum is valid.
- * Inform the stack about the status of checksum so that stack
- * doesn't spend time verifying the checksum.
- *
- *********************************************************************/
-static void
-ixv_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype)
-{
- u16 status = (u16) staterr;
- u8 errors = (u8) (staterr >> 24);
- bool sctp = FALSE;
-
- if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
- (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
- sctp = TRUE;
-
- if (status & IXGBE_RXD_STAT_IPCS) {
- if (!(errors & IXGBE_RXD_ERR_IPE)) {
- /* IP Checksum Good */
- mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
- mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
-
- } else
- mp->m_pkthdr.csum_flags = 0;
- }
- if (status & IXGBE_RXD_STAT_L4CS) {
- u16 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
-#if __FreeBSD_version >= 800000
- if (sctp)
- type = CSUM_SCTP_VALID;
-#endif
- if (!(errors & IXGBE_RXD_ERR_TCPE)) {
- mp->m_pkthdr.csum_flags |= type;
- if (!sctp)
- mp->m_pkthdr.csum_data = htons(0xffff);
- }
- }
- return;
-}
-
-static void
-ixv_setup_vlan_support(struct adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u32 ctrl, vid, vfta, retry;
-
-
- /*
- ** We get here thru init_locked, meaning
- ** a soft reset, this has already cleared
- ** the VFTA and other state, so if there
- ** have been no vlan's registered do nothing.
- */
- if (adapter->num_vlans == 0)
- return;
-
- /* Enable the queues */
- for (int i = 0; i < adapter->num_queues; i++) {
- ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
- ctrl |= IXGBE_RXDCTL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
- }
-
- /*
- ** A soft reset zero's out the VFTA, so
- ** we need to repopulate it now.
- */
- for (int i = 0; i < VFTA_SIZE; i++) {
- if (ixv_shadow_vfta[i] == 0)
- continue;
- vfta = ixv_shadow_vfta[i];
- /*
- ** Reconstruct the vlan id's
- ** based on the bits set in each
- ** of the array ints.
- */
- for ( int j = 0; j < 32; j++) {
- retry = 0;
- if ((vfta & (1 << j)) == 0)
- continue;
- vid = (i * 32) + j;
- /* Call the shared code mailbox routine */
- while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
- if (++retry > 5)
- break;
- }
- }
- }
-}
-
-/*
-** This routine is run via an vlan config EVENT,
-** it enables us to use the HW Filter table since
-** we can get the vlan id. This just creates the
-** entry in the soft version of the VFTA, init will
-** repopulate the real table.
-*/
-static void
-ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
-{
- struct adapter *adapter = ifp->if_softc;
- u16 index, bit;
-
- if (ifp->if_softc != arg) /* Not our event */
- return;
-
- if ((vtag == 0) || (vtag > 4095)) /* Invalid */
- return;
-
- IXV_CORE_LOCK(adapter);
- index = (vtag >> 5) & 0x7F;
- bit = vtag & 0x1F;
- ixv_shadow_vfta[index] |= (1 << bit);
- ++adapter->num_vlans;
- /* Re-init to load the changes */
- ixv_init_locked(adapter);
- IXV_CORE_UNLOCK(adapter);
-}
-
-/*
-** This routine is run via an vlan
-** unconfig EVENT, remove our entry
-** in the soft vfta.
-*/
-static void
-ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
-{
- struct adapter *adapter = ifp->if_softc;
- u16 index, bit;
-
- if (ifp->if_softc != arg)
- return;
-
- if ((vtag == 0) || (vtag > 4095)) /* Invalid */
- return;
-
- IXV_CORE_LOCK(adapter);
- index = (vtag >> 5) & 0x7F;
- bit = vtag & 0x1F;
- ixv_shadow_vfta[index] &= ~(1 << bit);
- --adapter->num_vlans;
- /* Re-init to load the changes */
- ixv_init_locked(adapter);
- IXV_CORE_UNLOCK(adapter);
-}
-
-static void
-ixv_enable_intr(struct adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- struct ix_queue *que = adapter->queues;
- u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
-
-
- IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
-
- mask = IXGBE_EIMS_ENABLE_MASK;
- mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
- IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
-
- for (int i = 0; i < adapter->num_queues; i++, que++)
- ixv_enable_queue(adapter, que->msix);
-
- IXGBE_WRITE_FLUSH(hw);
-
- return;
-}
-
-static void
-ixv_disable_intr(struct adapter *adapter)
-{
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
- IXGBE_WRITE_FLUSH(&adapter->hw);
- return;
-}
-
-/*
-** Setup the correct IVAR register for a particular MSIX interrupt
-** - entry is the register array entry
-** - vector is the MSIX vector for this queue
-** - type is RX/TX/MISC
-*/
-static void
-ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u32 ivar, index;
-
- vector |= IXGBE_IVAR_ALLOC_VAL;
-
- if (type == -1) { /* MISC IVAR */
- ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
- ivar &= ~0xFF;
- ivar |= vector;
- IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
- } else { /* RX/TX IVARS */
- index = (16 * (entry & 1)) + (8 * type);
- ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
- ivar &= ~(0xFF << index);
- ivar |= (vector << index);
- IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
- }
-}
-
-static void
-ixv_configure_ivars(struct adapter *adapter)
-{
- struct ix_queue *que = adapter->queues;
-
- for (int i = 0; i < adapter->num_queues; i++, que++) {
- /* First the RX queue entry */
- ixv_set_ivar(adapter, i, que->msix, 0);
- /* ... and the TX */
- ixv_set_ivar(adapter, i, que->msix, 1);
- /* Set an initial value in EITR */
- IXGBE_WRITE_REG(&adapter->hw,
- IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
- }
-
- /* For the Link interrupt */
- ixv_set_ivar(adapter, 1, adapter->mbxvec, -1);
-}
-
-
-/*
-** Tasklet handler for MSIX MBX interrupts
-** - do outside interrupt since it might sleep
-*/
-static void
-ixv_handle_mbx(void *context, int pending)
-{
- struct adapter *adapter = context;
-
- ixgbe_check_link(&adapter->hw,
- &adapter->link_speed, &adapter->link_up, 0);
- ixv_update_link_status(adapter);
-}
-
-/*
-** The VF stats registers never have a truely virgin
-** starting point, so this routine tries to make an
-** artificial one, marking ground zero on attach as
-** it were.
-*/
-static void
-ixv_save_stats(struct adapter *adapter)
-{
- if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
- adapter->stats.saved_reset_vfgprc +=
- adapter->stats.vfgprc - adapter->stats.base_vfgprc;
- adapter->stats.saved_reset_vfgptc +=
- adapter->stats.vfgptc - adapter->stats.base_vfgptc;
- adapter->stats.saved_reset_vfgorc +=
- adapter->stats.vfgorc - adapter->stats.base_vfgorc;
- adapter->stats.saved_reset_vfgotc +=
- adapter->stats.vfgotc - adapter->stats.base_vfgotc;
- adapter->stats.saved_reset_vfmprc +=
- adapter->stats.vfmprc - adapter->stats.base_vfmprc;
- }
-}
-
-static void
-ixv_init_stats(struct adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
-
- adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
- adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
- adapter->stats.last_vfgorc |=
- (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
-
- adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
- adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
- adapter->stats.last_vfgotc |=
- (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
-
- adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
-
- adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
- adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
- adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
- adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
- adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
-}
-
-#define UPDATE_STAT_32(reg, last, count) \
-{ \
- u32 current = IXGBE_READ_REG(hw, reg); \
- if (current < last) \
- count += 0x100000000LL; \
- last = current; \
- count &= 0xFFFFFFFF00000000LL; \
- count |= current; \
-}
-
-#define UPDATE_STAT_36(lsb, msb, last, count) \
-{ \
- u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \
- u64 cur_msb = IXGBE_READ_REG(hw, msb); \
- u64 current = ((cur_msb << 32) | cur_lsb); \
- if (current < last) \
- count += 0x1000000000LL; \
- last = current; \
- count &= 0xFFFFFFF000000000LL; \
- count |= current; \
-}
-
-/*
-** ixv_update_stats - Update the board statistics counters.
-*/
-void
-ixv_update_stats(struct adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
-
- UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
- adapter->stats.vfgprc);
- UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
- adapter->stats.vfgptc);
- UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
- adapter->stats.last_vfgorc, adapter->stats.vfgorc);
- UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
- adapter->stats.last_vfgotc, adapter->stats.vfgotc);
- UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
- adapter->stats.vfmprc);
-}
-
-/**********************************************************************
- *
- * This routine is called only when ixgbe_display_debug_stats is enabled.
- * This routine provides a way to take a look at important statistics
- * maintained by the driver and hardware.
- *
- **********************************************************************/
-static void
-ixv_print_hw_stats(struct adapter * adapter)
-{
- device_t dev = adapter->dev;
-
- device_printf(dev,"Std Mbuf Failed = %lu\n",
- adapter->mbuf_defrag_failed);
- device_printf(dev,"Driver dropped packets = %lu\n",
- adapter->dropped_pkts);
- device_printf(dev, "watchdog timeouts = %ld\n",
- adapter->watchdog_events);
-
- device_printf(dev,"Good Packets Rcvd = %llu\n",
- (long long)adapter->stats.vfgprc);
- device_printf(dev,"Good Packets Xmtd = %llu\n",
- (long long)adapter->stats.vfgptc);
- device_printf(dev,"TSO Transmissions = %lu\n",
- adapter->tso_tx);
-
-}
-
-/**********************************************************************
- *
- * This routine is called only when em_display_debug_stats is enabled.
- * This routine provides a way to take a look at important statistics
- * maintained by the driver and hardware.
- *
- **********************************************************************/
-static void
-ixv_print_debug_info(struct adapter *adapter)
-{
- device_t dev = adapter->dev;
- struct ixgbe_hw *hw = &adapter->hw;
- struct ix_queue *que = adapter->queues;
- struct rx_ring *rxr;
- struct tx_ring *txr;
- struct lro_ctrl *lro;
-
- device_printf(dev,"Error Byte Count = %u \n",
- IXGBE_READ_REG(hw, IXGBE_ERRBC));
-
- for (int i = 0; i < adapter->num_queues; i++, que++) {
- txr = que->txr;
- rxr = que->rxr;
- lro = &rxr->lro;
- device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
- que->msix, (long)que->irqs);
- device_printf(dev,"RX(%d) Packets Received: %lld\n",
- rxr->me, (long long)rxr->rx_packets);
- device_printf(dev,"RX(%d) Split RX Packets: %lld\n",
- rxr->me, (long long)rxr->rx_split_packets);
- device_printf(dev,"RX(%d) Bytes Received: %lu\n",
- rxr->me, (long)rxr->rx_bytes);
- device_printf(dev,"RX(%d) LRO Queued= %d\n",
- rxr->me, lro->lro_queued);
- device_printf(dev,"RX(%d) LRO Flushed= %d\n",
- rxr->me, lro->lro_flushed);
- device_printf(dev,"TX(%d) Packets Sent: %lu\n",
- txr->me, (long)txr->total_packets);
- device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
- txr->me, (long)txr->no_desc_avail);
- }
-
- device_printf(dev,"MBX IRQ Handled: %lu\n",
- (long)adapter->mbx_irq);
- return;
-}
-
-static int
-ixv_sysctl_stats(SYSCTL_HANDLER_ARGS)
-{
- int error;
- int result;
- struct adapter *adapter;
-
- result = -1;
- error = sysctl_handle_int(oidp, &result, 0, req);
-
- if (error || !req->newptr)
- return (error);
-
- if (result == 1) {
- adapter = (struct adapter *) arg1;
- ixv_print_hw_stats(adapter);
- }
- return error;
-}
-
-static int
-ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
-{
- int error, result;
- struct adapter *adapter;
-
- result = -1;
- error = sysctl_handle_int(oidp, &result, 0, req);
-
- if (error || !req->newptr)
- return (error);
-
- if (result == 1) {
- adapter = (struct adapter *) arg1;
- ixv_print_debug_info(adapter);
- }
- return error;
-}
-
-/*
-** Set flow control using sysctl:
-** Flow control values:
-** 0 - off
-** 1 - rx pause
-** 2 - tx pause
-** 3 - full
-*/
-static int
-ixv_set_flowcntl(SYSCTL_HANDLER_ARGS)
-{
- int error;
- struct adapter *adapter;
-
- error = sysctl_handle_int(oidp, &ixv_flow_control, 0, req);
-
- if (error)
- return (error);
-
- adapter = (struct adapter *) arg1;
- switch (ixv_flow_control) {
- case ixgbe_fc_rx_pause:
- case ixgbe_fc_tx_pause:
- case ixgbe_fc_full:
- adapter->hw.fc.requested_mode = ixv_flow_control;
- break;
- case ixgbe_fc_none:
- default:
- adapter->hw.fc.requested_mode = ixgbe_fc_none;
- }
-
- ixgbe_fc_enable(&adapter->hw, 0);
- return error;
-}
-
-static void
-ixv_add_rx_process_limit(struct adapter *adapter, const char *name,
- const char *description, int *limit, int value)
-{
- *limit = value;
- SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
- OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
-}
-
+++ /dev/null
-/******************************************************************************
-
- Copyright (c) 2001-2010, Intel Corporation
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- POSSIBILITY OF SUCH DAMAGE.
-
-******************************************************************************/
-/*$FreeBSD$*/
-
-
-#ifndef _IXV_H_
-#define _IXV_H_
-
-
-#include <sys/param.h>
-#include <sys/systm.h>
-#include <sys/mbuf.h>
-#include <sys/protosw.h>
-#include <sys/socket.h>
-#include <sys/malloc.h>
-#include <sys/kernel.h>
-#include <sys/module.h>
-#include <sys/sockio.h>
-
-#include <net/if.h>
-#include <net/if_arp.h>
-#include <net/bpf.h>
-#include <net/ethernet.h>
-#include <net/if_dl.h>
-#include <net/if_media.h>
-
-#include <net/bpf.h>
-#include <net/if_types.h>
-#include <net/if_vlan_var.h>
-
-#include <netinet/in_systm.h>
-#include <netinet/in.h>
-#include <netinet/if_ether.h>
-#include <netinet/ip.h>
-#include <netinet/ip6.h>
-#include <netinet/tcp.h>
-#include <netinet/tcp_lro.h>
-#include <netinet/udp.h>
-
-#include <machine/in_cksum.h>
-
-#include <sys/bus.h>
-#include <machine/bus.h>
-#include <sys/rman.h>
-#include <machine/resource.h>
-#include <vm/vm.h>
-#include <vm/pmap.h>
-#include <machine/clock.h>
-#include <dev/pci/pcivar.h>
-#include <dev/pci/pcireg.h>
-#include <sys/proc.h>
-#include <sys/sysctl.h>
-#include <sys/endian.h>
-#include <sys/taskqueue.h>
-#include <sys/pcpu.h>
-#include <sys/smp.h>
-#include <machine/smp.h>
-
-#include "ixgbe_api.h"
-#include "ixgbe_vf.h"
-
-/* Tunables */
-
-/*
- * TxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the
- * number of transmit descriptors allocated by the driver. Increasing this
- * value allows the driver to queue more transmits. Each descriptor is 16
- * bytes. Performance tests have show the 2K value to be optimal for top
- * performance.
- */
-#define DEFAULT_TXD 1024
-#define PERFORM_TXD 2048
-#define MAX_TXD 4096
-#define MIN_TXD 64
-
-/*
- * RxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the
- * number of receive descriptors allocated for each RX queue. Increasing this
- * value allows the driver to buffer more incoming packets. Each descriptor
- * is 16 bytes. A receive buffer is also allocated for each descriptor.
- *
- * Note: with 8 rings and a dual port card, it is possible to bump up
- * against the system mbuf pool limit, you can tune nmbclusters
- * to adjust for this.
- */
-#define DEFAULT_RXD 1024
-#define PERFORM_RXD 2048
-#define MAX_RXD 4096
-#define MIN_RXD 64
-
-/* Alignment for rings */
-#define DBA_ALIGN 128
-
-/*
- * This parameter controls the maximum no of times the driver will loop in
- * the isr. Minimum Value = 1
- */
-#define MAX_LOOP 10
-
-/*
- * This is the max watchdog interval, ie. the time that can
- * pass between any two TX clean operations, such only happening
- * when the TX hardware is functioning.
- */
-#define IXV_WATCHDOG (10 * hz)
-
-/*
- * This parameters control when the driver calls the routine to reclaim
- * transmit descriptors.
- */
-#define IXV_TX_CLEANUP_THRESHOLD (adapter->num_tx_desc / 8)
-#define IXV_TX_OP_THRESHOLD (adapter->num_tx_desc / 32)
-
-#define IXV_MAX_FRAME_SIZE 0x3F00
-
-/* Flow control constants */
-#define IXV_FC_PAUSE 0xFFFF
-#define IXV_FC_HI 0x20000
-#define IXV_FC_LO 0x10000
-
-/* Defines for printing debug information */
-#define DEBUG_INIT 0
-#define DEBUG_IOCTL 0
-#define DEBUG_HW 0
-
-#define INIT_DEBUGOUT(S) if (DEBUG_INIT) printf(S "\n")
-#define INIT_DEBUGOUT1(S, A) if (DEBUG_INIT) printf(S "\n", A)
-#define INIT_DEBUGOUT2(S, A, B) if (DEBUG_INIT) printf(S "\n", A, B)
-#define IOCTL_DEBUGOUT(S) if (DEBUG_IOCTL) printf(S "\n")
-#define IOCTL_DEBUGOUT1(S, A) if (DEBUG_IOCTL) printf(S "\n", A)
-#define IOCTL_DEBUGOUT2(S, A, B) if (DEBUG_IOCTL) printf(S "\n", A, B)
-#define HW_DEBUGOUT(S) if (DEBUG_HW) printf(S "\n")
-#define HW_DEBUGOUT1(S, A) if (DEBUG_HW) printf(S "\n", A)
-#define HW_DEBUGOUT2(S, A, B) if (DEBUG_HW) printf(S "\n", A, B)
-
-#define MAX_NUM_MULTICAST_ADDRESSES 128
-#define IXV_EITR_DEFAULT 128
-#define IXV_SCATTER 32
-#define IXV_RX_HDR 128
-#define MSIX_BAR 3
-#define IXV_TSO_SIZE 65535
-#define IXV_BR_SIZE 4096
-#define IXV_LINK_ITR 2000
-#define TX_BUFFER_SIZE ((u32) 1514)
-#define VFTA_SIZE 128
-
-/* Offload bits in mbuf flag */
-#define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP)
-
-/*
- *****************************************************************************
- * vendor_info_array
- *
- * This array contains the list of Subvendor/Subdevice IDs on which the driver
- * should load.
- *
- *****************************************************************************
- */
-typedef struct _ixv_vendor_info_t {
- unsigned int vendor_id;
- unsigned int device_id;
- unsigned int subvendor_id;
- unsigned int subdevice_id;
- unsigned int index;
-} ixv_vendor_info_t;
-
-
-struct ixv_tx_buf {
- u32 eop_index;
- struct mbuf *m_head;
- bus_dmamap_t map;
-};
-
-struct ixv_rx_buf {
- struct mbuf *m_head;
- struct mbuf *m_pack;
- struct mbuf *fmp;
- bus_dmamap_t hmap;
- bus_dmamap_t pmap;
-};
-
-/*
- * Bus dma allocation structure used by ixv_dma_malloc and ixv_dma_free.
- */
-struct ixv_dma_alloc {
- bus_addr_t dma_paddr;
- caddr_t dma_vaddr;
- bus_dma_tag_t dma_tag;
- bus_dmamap_t dma_map;
- bus_dma_segment_t dma_seg;
- bus_size_t dma_size;
- int dma_nseg;
-};
-
-/*
-** Driver queue struct: this is the interrupt container
-** for the associated tx and rx ring.
-*/
-struct ix_queue {
- struct adapter *adapter;
- u32 msix; /* This queue's MSIX vector */
- u32 eims; /* This queue's EIMS bit */
- u32 eitr_setting;
- u32 eitr; /* cached reg */
- struct resource *res;
- void *tag;
- struct tx_ring *txr;
- struct rx_ring *rxr;
- struct task que_task;
- struct taskqueue *tq;
- u64 irqs;
-};
-
-/*
- * The transmit ring, one per queue
- */
-struct tx_ring {
- struct adapter *adapter;
- struct mtx tx_mtx;
- u32 me;
- bool watchdog_check;
- int watchdog_time;
- union ixgbe_adv_tx_desc *tx_base;
- struct ixv_dma_alloc txdma;
- u32 next_avail_desc;
- u32 next_to_clean;
- struct ixv_tx_buf *tx_buffers;
- volatile u16 tx_avail;
- u32 txd_cmd;
- bus_dma_tag_t txtag;
- char mtx_name[16];
- struct buf_ring *br;
- /* Soft Stats */
- u32 bytes;
- u32 packets;
- u64 no_desc_avail;
- u64 total_packets;
-};
-
-
-/*
- * The Receive ring, one per rx queue
- */
-struct rx_ring {
- struct adapter *adapter;
- struct mtx rx_mtx;
- u32 me;
- union ixgbe_adv_rx_desc *rx_base;
- struct ixv_dma_alloc rxdma;
- struct lro_ctrl lro;
- bool lro_enabled;
- bool hdr_split;
- bool discard;
- u32 next_to_refresh;
- u32 next_to_check;
- char mtx_name[16];
- struct ixv_rx_buf *rx_buffers;
- bus_dma_tag_t htag;
- bus_dma_tag_t ptag;
-
- u32 bytes; /* Used for AIM calc */
- u32 packets;
-
- /* Soft stats */
- u64 rx_irq;
- u64 rx_split_packets;
- u64 rx_packets;
- u64 rx_bytes;
- u64 rx_discarded;
-};
-
-/* Our adapter structure */
-struct adapter {
- struct ifnet *ifp;
- struct ixgbe_hw hw;
-
- struct ixgbe_osdep osdep;
- struct device *dev;
-
- struct resource *pci_mem;
- struct resource *msix_mem;
-
- /*
- * Interrupt resources: this set is
- * either used for legacy, or for Link
- * when doing MSIX
- */
- void *tag;
- struct resource *res;
-
- struct ifmedia media;
- struct callout timer;
- int msix;
- int if_flags;
-
- struct mtx core_mtx;
-
- eventhandler_tag vlan_attach;
- eventhandler_tag vlan_detach;
-
- u16 num_vlans;
- u16 num_queues;
-
- /* Info about the board itself */
- bool link_active;
- u16 max_frame_size;
- u32 link_speed;
- bool link_up;
- u32 mbxvec;
-
- /* Mbuf cluster size */
- u32 rx_mbuf_sz;
-
- /* Support for pluggable optics */
- struct task mbx_task; /* Mailbox tasklet */
- struct taskqueue *tq;
-
- /*
- ** Queues:
- ** This is the irq holder, it has
- ** and RX/TX pair or rings associated
- ** with it.
- */
- struct ix_queue *queues;
-
- /*
- * Transmit rings:
- * Allocated at run time, an array of rings.
- */
- struct tx_ring *tx_rings;
- int num_tx_desc;
-
- /*
- * Receive rings:
- * Allocated at run time, an array of rings.
- */
- struct rx_ring *rx_rings;
- int num_rx_desc;
- u64 que_mask;
- u32 rx_process_limit;
-
- /* Misc stats maintained by the driver */
- unsigned long dropped_pkts;
- unsigned long mbuf_defrag_failed;
- unsigned long mbuf_header_failed;
- unsigned long mbuf_packet_failed;
- unsigned long no_tx_map_avail;
- unsigned long no_tx_dma_setup;
- unsigned long watchdog_events;
- unsigned long tso_tx;
- unsigned long mbx_irq;
-
- struct ixgbevf_hw_stats stats;
-};
-
-
-#define IXV_CORE_LOCK_INIT(_sc, _name) \
- mtx_init(&(_sc)->core_mtx, _name, "IXV Core Lock", MTX_DEF)
-#define IXV_CORE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->core_mtx)
-#define IXV_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->tx_mtx)
-#define IXV_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_mtx)
-#define IXV_CORE_LOCK(_sc) mtx_lock(&(_sc)->core_mtx)
-#define IXV_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_mtx)
-#define IXV_TX_TRYLOCK(_sc) mtx_trylock(&(_sc)->tx_mtx)
-#define IXV_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_mtx)
-#define IXV_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->core_mtx)
-#define IXV_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_mtx)
-#define IXV_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_mtx)
-#define IXV_CORE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->core_mtx, MA_OWNED)
-#define IXV_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_mtx, MA_OWNED)
-
-/* Workaround to make 8.0 buildable */
-#if __FreeBSD_version < 800504
-static __inline int
-drbr_needs_enqueue(struct ifnet *ifp, struct buf_ring *br)
-{
-#ifdef ALTQ
- if (ALTQ_IS_ENABLED(&ifp->if_snd))
- return (1);
-#endif
- return (!buf_ring_empty(br));
-}
-#endif
-
-/*
-** Find the number of unrefreshed RX descriptors
-*/
-static inline u16
-ixv_rx_unrefreshed(struct rx_ring *rxr)
-{
- struct adapter *adapter = rxr->adapter;
-
- if (rxr->next_to_check > rxr->next_to_refresh)
- return (rxr->next_to_check - rxr->next_to_refresh - 1);
- else
- return ((adapter->num_rx_desc + rxr->next_to_check) -
- rxr->next_to_refresh - 1);
-}
-
-#endif /* _IXV_H_ */