/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
* are met:
*
- * * Redistributions of source code must retain the above copyright
+ * * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <sys/queue.h>
#include "ixgbe/ixgbe_vf.h"
#include "ixgbe/ixgbe_common.h"
#include "ixgbe_ethdev.h"
+#include "ixgbe_bypass.h"
/*
* High threshold controlling when to start sending XOFF frames. Must be at
#define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
#define IXGBE_LINK_UP_CHECK_TIMEOUT 1000 /* ms */
+#define IXGBE_VMDQ_NUM_UC_MAC 4096 /* Maximum nb. of UC MAC addr. */
+
+
+#define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */
#define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0]))
static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue,
int on);
static void ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
-static void ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev);
-static void ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev);
-static void ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev);
-static void ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev);
static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue);
static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev);
struct rte_eth_fc_conf *fc_conf);
static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
struct rte_eth_pfc_conf *pfc_conf);
+static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta *reta_conf);
+static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta *reta_conf);
static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
-static int ixgbe_dev_interrupt_setup(struct rte_eth_dev *dev);
+static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle,
static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
+/* For Eth VMDQ APIs support */
+static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
+ ether_addr* mac_addr,uint8_t on);
+static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev,uint8_t on);
+static int ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
+ uint16_t rx_mask, uint8_t on);
+static int ixgbe_set_pool_rx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
+static int ixgbe_set_pool_tx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
+static int ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
+ uint64_t pool_mask,uint8_t vlan_on);
+static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
+ struct rte_eth_vmdq_mirror_conf *mirror_conf,
+ uint8_t rule_id, uint8_t on);
+static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
+ uint8_t rule_id);
/*
* Define VF Stats MACRO for Non "cleared on read" register
.vlan_strip_queue_set = ixgbe_vlan_strip_queue_set,
.rx_queue_setup = ixgbe_dev_rx_queue_setup,
.rx_queue_release = ixgbe_dev_rx_queue_release,
+ .rx_queue_count = ixgbe_dev_rx_queue_count,
+ .rx_descriptor_done = ixgbe_dev_rx_descriptor_done,
.tx_queue_setup = ixgbe_dev_tx_queue_setup,
.tx_queue_release = ixgbe_dev_tx_queue_release,
.dev_led_on = ixgbe_dev_led_on,
.priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set,
.mac_addr_add = ixgbe_add_rar,
.mac_addr_remove = ixgbe_remove_rar,
+ .uc_hash_table_set = ixgbe_uc_hash_table_set,
+ .uc_all_hash_table_set = ixgbe_uc_all_hash_table_set,
+ .mirror_rule_set = ixgbe_mirror_rule_set,
+ .mirror_rule_reset = ixgbe_mirror_rule_reset,
+ .set_vf_rx_mode = ixgbe_set_pool_rx_mode,
+ .set_vf_rx = ixgbe_set_pool_rx,
+ .set_vf_tx = ixgbe_set_pool_tx,
+ .set_vf_vlan_filter = ixgbe_set_pool_vlan_filter,
.fdir_add_signature_filter = ixgbe_fdir_add_signature_filter,
.fdir_update_signature_filter = ixgbe_fdir_update_signature_filter,
.fdir_remove_signature_filter = ixgbe_fdir_remove_signature_filter,
.fdir_update_perfect_filter = ixgbe_fdir_update_perfect_filter,
.fdir_remove_perfect_filter = ixgbe_fdir_remove_perfect_filter,
.fdir_set_masks = ixgbe_fdir_set_masks,
+ .reta_update = ixgbe_dev_rss_reta_update,
+ .reta_query = ixgbe_dev_rss_reta_query,
+#ifdef RTE_NIC_BYPASS
+ .bypass_init = ixgbe_bypass_init,
+ .bypass_state_set = ixgbe_bypass_state_store,
+ .bypass_state_show = ixgbe_bypass_state_show,
+ .bypass_event_set = ixgbe_bypass_event_store,
+ .bypass_event_show = ixgbe_bypass_event_show,
+ .bypass_wd_timeout_set = ixgbe_bypass_wd_timeout_store,
+ .bypass_wd_timeout_show = ixgbe_bypass_wd_timeout_show,
+ .bypass_ver_show = ixgbe_bypass_ver_show,
+ .bypass_wd_reset = ixgbe_bypass_wd_reset,
+#endif /* RTE_NIC_BYPASS */
};
/*
}
}
+static inline int32_t
+ixgbe_pf_reset_hw(struct ixgbe_hw *hw)
+{
+ uint32_t ctrl_ext;
+ int32_t status;
+
+ status = ixgbe_reset_hw(hw);
+
+ ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+ /* Set PF Reset Done bit so PF/VF Mail Ops can work */
+ ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return status;
+}
+
+static inline void
+ixgbe_enable_intr(struct rte_eth_dev *dev)
+{
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask);
+ IXGBE_WRITE_FLUSH(hw);
+}
+
/*
* This function is based on ixgbe_disable_intr() in ixgbe/ixgbe.h.
*/
PMD_INIT_LOG(INFO, "Setting port %d, %s queue_id %d to stat index %d\n",
(int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", queue_id, stat_idx);
- n = queue_id / NB_QMAP_FIELDS_PER_QSM_REG;
+ n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
if (n >= IXGBE_NB_STAT_MAPPING_REGS) {
PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded\n");
return -EIO;
}
- offset = queue_id % NB_QMAP_FIELDS_PER_QSM_REG;
+ offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
/* Now clear any previous stat_idx set */
clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
{
uint8_t i;
struct ixgbe_dcb_tc_config *tc;
- int dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS;
+ uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS;
dcb_config->num_tcs.pg_tcs = dcb_max_tc;
dcb_config->num_tcs.pfc_tcs = dcb_max_tc;
for (i = 0; i < dcb_max_tc; i++) {
tc = &dcb_config->tc_config[i];
tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i;
- tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 100/dcb_max_tc + (i & 1);
+ tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
+ (uint8_t)(100/dcb_max_tc + (i & 1));
tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i;
- tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 100/dcb_max_tc + (i & 1);
+ tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
+ (uint8_t)(100/dcb_max_tc + (i & 1));
tc->pfc = ixgbe_dcb_pfc_disabled;
}
/* Vendor and Device ID need to be set before init of shared code */
hw->device_id = pci_dev->id.device_id;
hw->vendor_id = pci_dev->id.vendor_id;
- hw->hw_addr = (void *)pci_dev->mem_resource.addr;
+ hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
+#ifdef RTE_LIBRTE_IXGBE_ALLOW_UNSUPPORTED_SFP
+ hw->allow_unsupported_sfp = 1;
+#endif
/* Initialize the shared code */
+#ifdef RTE_NIC_BYPASS
+ diag = ixgbe_bypass_init_shared_code(hw);
+#else
diag = ixgbe_init_shared_code(hw);
+#endif /* RTE_NIC_BYPASS */
+
if (diag != IXGBE_SUCCESS) {
PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
return -EIO;
}
hw->fc.send_xon = 1;
- ixgbe_disable_intr(hw);
-
/* Make sure we have a good EEPROM before we read from it */
diag = ixgbe_validate_eeprom_checksum(hw, &csum);
if (diag != IXGBE_SUCCESS) {
return -EIO;
}
+#ifdef RTE_NIC_BYPASS
+ diag = ixgbe_bypass_init_hw(hw);
+#else
diag = ixgbe_init_hw(hw);
+#endif /* RTE_NIC_BYPASS */
/*
* Devices with copper phys will fail to initialise if ixgbe_init_hw()
return -EIO;
}
+ /* disable interrupt */
+ ixgbe_disable_intr(hw);
+
/* pick up the PCI bus settings for reporting later */
ixgbe_get_bus_info(hw);
hw->mac.num_rar_entries, 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_INIT_LOG(ERR,
- "Failed to allocate %d bytes needed to store MAC addresses",
+ "Failed to allocate %u bytes needed to store "
+ "MAC addresses",
ETHER_ADDR_LEN * hw->mac.num_rar_entries);
return -ENOMEM;
}
/* Copy the permanent MAC address */
ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
ð_dev->data->mac_addrs[0]);
+
+ /* Allocate memory for storing hash filter MAC addresses */
+ eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
+ IXGBE_VMDQ_NUM_UC_MAC, 0);
+ if (eth_dev->data->hash_mac_addrs == NULL) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate %d bytes needed to store MAC addresses",
+ ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
+ return -ENOMEM;
+ }
/* initialize the vfta */
memset(shadow_vfta, 0, sizeof(*shadow_vfta));
/* initialize the hw strip bitmap*/
memset(hwstrip, 0, sizeof(*hwstrip));
- /* let hardware know driver is loaded */
+ /* initialize PF if max_vfs not zero */
+ ixgbe_pf_host_init(eth_dev);
+
ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+ /* let hardware know driver is loaded */
ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
+ /* Set PF Reset Done bit so PF/VF Mail Ops can work */
+ ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+ IXGBE_WRITE_FLUSH(hw);
if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
PMD_INIT_LOG(DEBUG,
rte_intr_callback_register(&(pci_dev->intr_handle),
ixgbe_dev_interrupt_handler, (void *)eth_dev);
+ /* enable uio intr after callback register */
+ rte_intr_enable(&(pci_dev->intr_handle));
+
+ /* enable support intr */
+ ixgbe_enable_intr(eth_dev);
+
return 0;
}
+static void ixgbevf_get_queue_num(struct ixgbe_hw *hw)
+{
+ /* Traffic classes are not supported by now */
+ unsigned int tcs, tc;
+
+ /*
+ * Must let PF know we are at mailbox API version 1.1.
+ * Otherwise PF won't answer properly.
+ * In case that PF fails to provide Rx/Tx queue number,
+ * max_tx_queues and max_rx_queues remain to be 1.
+ */
+ if (!ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11))
+ ixgbevf_get_queues(hw, &tcs, &tc);
+}
+
/*
* Virtual Function device init
*/
PMD_INIT_LOG(DEBUG, "eth_ixgbevf_dev_init");
eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
+ eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
+
+ /* for secondary processes, we don't initialise any further as primary
+ * has already done this work. Only check we don't need a different
+ * RX function */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY){
+ if (eth_dev->data->scattered_rx)
+ eth_dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
+ return 0;
+ }
+
pci_dev = eth_dev->pci_dev;
hw->device_id = pci_dev->id.device_id;
hw->vendor_id = pci_dev->id.vendor_id;
- hw->hw_addr = (void *)pci_dev->mem_resource.addr;
+ hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
/* initialize the vfta */
memset(shadow_vfta, 0, sizeof(*shadow_vfta));
hw->mac.num_rar_entries = hw->mac.max_rx_queues;
diag = hw->mac.ops.reset_hw(hw);
+
if (diag != IXGBE_SUCCESS) {
PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
+ RTE_LOG(ERR, PMD, "\tThe MAC address is not valid.\n"
+ "\tThe most likely cause of this error is that the VM host\n"
+ "\thas not assigned a valid MAC address to this VF device.\n"
+ "\tPlease consult the DPDK Release Notes (FAQ section) for\n"
+ "\ta possible solution to this problem.\n");
return (diag);
}
+ /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */
+ ixgbevf_get_queue_num(hw);
+
/* Allocate memory for storing MAC addresses */
eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
hw->mac.num_rar_entries, 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_INIT_LOG(ERR,
- "Failed to allocate %d bytes needed to store MAC addresses",
+ "Failed to allocate %u bytes needed to store "
+ "MAC addresses",
ETHER_ADDR_LEN * hw->mac.num_rar_entries);
return -ENOMEM;
}
+
/* Copy the permanent MAC address */
ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
ð_dev->data->mac_addrs[0]);
{
.name = "rte_ixgbe_pmd",
.id_table = pci_id_ixgbe_map,
+#ifdef RTE_EAL_UNBIND_PORTS
.drv_flags = RTE_PCI_DRV_NEED_IGB_UIO,
+#endif
},
.eth_dev_init = eth_ixgbe_dev_init,
.dev_private_size = sizeof(struct ixgbe_adapter),
{
.name = "rte_ixgbevf_pmd",
.id_table = pci_id_ixgbevf_map,
+#ifdef RTE_EAL_UNBIND_PORTS
.drv_flags = RTE_PCI_DRV_NEED_IGB_UIO,
+#endif
},
.eth_dev_init = eth_ixgbevf_dev_init,
.dev_private_size = sizeof(struct ixgbe_adapter),
IXGBE_WRITE_REG(hw, IXGBE_EXVET, tpid << 16);
}
-static void
+void
ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw =
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
}
-static void
+void
ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw =
ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
}
-static void
+void
ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw =
}
}
-static void
+void
ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw =
}
}
+static void
+ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
+ uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
+}
+
static int
ixgbe_dev_configure(struct rte_eth_dev *dev)
{
int err, link_up = 0, negotiate = 0;
uint32_t speed = 0;
int mask = 0;
+ int status;
PMD_INIT_FUNC_TRACE();
/* IXGBE devices don't support half duplex */
if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
(dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
- PMD_INIT_LOG(ERR, "Invalid link_duplex (%u) for port %u\n",
+ PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu\n",
dev->data->dev_conf.link_duplex,
dev->data->port_id);
return -EINVAL;
/* reinitialize adapter
* this calls reset and start */
- ixgbe_init_hw(hw);
+ status = ixgbe_pf_reset_hw(hw);
+ if (status != 0)
+ return -1;
+ hw->mac.ops.start_hw(hw);
+
+ /* configure PF module if SRIOV enabled */
+ ixgbe_pf_host_configure(dev);
/* initialize transmission unit */
ixgbe_dev_tx_init(dev);
-
+
/* This can fail when allocating mbufs for descriptor rings */
err = ixgbe_dev_rx_init(dev);
if (err) {
}
/* Turn on the laser */
- if (hw->phy.multispeed_fiber)
- ixgbe_enable_tx_laser(hw);
+ ixgbe_enable_tx_laser(hw);
+
+ /* Skip link setup if loopback mode is enabled for 82599. */
+ if (hw->mac.type == ixgbe_mac_82599EB &&
+ dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
+ goto skip_link_setup;
err = ixgbe_check_link(hw, &speed, &link_up, 0);
if (err)
speed = IXGBE_LINK_SPEED_10GB_FULL;
break;
default:
- PMD_INIT_LOG(ERR, "Invalid link_speed (%u) for port %u\n",
- dev->data->dev_conf.link_speed, dev->data->port_id);
+ PMD_INIT_LOG(ERR, "Invalid link_speed (%hu) for port %hhu\n",
+ dev->data->dev_conf.link_speed,
+ dev->data->port_id);
goto error;
}
if (err)
goto error;
+skip_link_setup:
+
/* check if lsc interrupt is enabled */
- if (dev->data->dev_conf.intr_conf.lsc != 0) {
- err = ixgbe_dev_interrupt_setup(dev);
- if (err)
- goto error;
- }
+ if (dev->data->dev_conf.intr_conf.lsc != 0)
+ ixgbe_dev_lsc_interrupt_setup(dev);
+
+ /* resume enabled intr since hw reset */
+ ixgbe_enable_intr(dev);
mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
ETH_VLAN_EXTEND_MASK;
ixgbe_vlan_offload_set(dev, mask);
-
+
+ if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+ /* Enable vlan filtering for VMDq */
+ ixgbe_vmdq_vlan_hw_filter_enable(dev);
+ }
+
/* Configure DCB hw */
ixgbe_configure_dcb(dev);
struct rte_eth_link link;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_vf_info *vfinfo =
+ *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
+ int vf;
PMD_INIT_FUNC_TRACE();
ixgbe_disable_intr(hw);
/* reset the NIC */
- ixgbe_reset_hw(hw);
+ ixgbe_pf_reset_hw(hw);
hw->adapter_stopped = FALSE;
/* stop adapter */
ixgbe_stop_adapter(hw);
+ for (vf = 0; vfinfo != NULL &&
+ vf < dev->pci_dev->max_vfs; vf++)
+ vfinfo[vf].clear_to_send = false;
+
/* Turn off the laser */
- if (hw->phy.multispeed_fiber)
- ixgbe_disable_tx_laser(hw);
+ ixgbe_disable_tx_laser(hw);
ixgbe_dev_clear_queues(dev);
PMD_INIT_FUNC_TRACE();
- ixgbe_reset_hw(hw);
-
+ ixgbe_pf_reset_hw(hw);
ixgbe_dev_stop(dev);
hw->adapter_stopped = 1;
hw_stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
if (hw->mac.type != ixgbe_mac_82598EB) {
- hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
- ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
- hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
- ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
- hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
- ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
+ hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
+ hw_stats->gorc += ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
+ hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
+ hw_stats->gotc += ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
+ hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
+ hw_stats->tor += ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
} else {
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- dev_info->max_rx_queues = hw->mac.max_rx_queues;
- dev_info->max_tx_queues = hw->mac.max_tx_queues;
+ dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
+ dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */
dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */
dev_info->max_mac_addrs = hw->mac.num_rar_entries;
+ dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
+ dev_info->max_vfs = dev->pci_dev->max_vfs;
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ dev_info->max_vmdq_pools = ETH_16_POOLS;
+ else
+ dev_info->max_vmdq_pools = ETH_64_POOLS;
}
/* return 0 means link status changed, -1 means not changed */
* - On failure, a negative value.
*/
static int
-ixgbe_dev_interrupt_setup(struct rte_eth_dev *dev)
+ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev)
{
- struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
ixgbe_dev_link_status_print(dev);
- IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_LSC);
- IXGBE_WRITE_FLUSH(hw);
- rte_intr_enable(&(dev->pci_dev->intr_handle));
+ intr->mask |= IXGBE_EICR_LSC;
return 0;
}
struct ixgbe_interrupt *intr =
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
- IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_LSC);
- IXGBE_WRITE_FLUSH(hw);
+ /* clear all cause mask */
+ ixgbe_disable_intr(hw);
/* read-on-clear nic registers here */
eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
- PMD_INIT_LOG(INFO, "eicr %x", eicr);
+ PMD_DRV_LOG(INFO, "eicr %x", eicr);
+
+ intr->flags = 0;
if (eicr & IXGBE_EICR_LSC) {
/* set flag for async link update */
intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
}
+ if (eicr & IXGBE_EICR_MAILBOX)
+ intr->flags |= IXGBE_FLAG_MAILBOX;
+
return 0;
}
{
struct ixgbe_interrupt *intr =
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ int64_t timeout;
+ struct rte_eth_link link;
+ int intr_enable_delay = false;
- if (!(intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) {
- return -1;
+ PMD_DRV_LOG(DEBUG, "intr action type %d\n", intr->flags);
+
+ if (intr->flags & IXGBE_FLAG_MAILBOX) {
+ ixgbe_pf_mbx_process(dev);
+ intr->flags &= ~IXGBE_FLAG_MAILBOX;
+ }
+
+ if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
+ /* get the link status before link update, for predicting later */
+ memset(&link, 0, sizeof(link));
+ rte_ixgbe_dev_atomic_read_link_status(dev, &link);
+
+ ixgbe_dev_link_update(dev, 0);
+
+ /* likely to up */
+ if (!link.link_status)
+ /* handle it 1 sec later, wait it being stable */
+ timeout = IXGBE_LINK_UP_CHECK_TIMEOUT;
+ /* likely to down */
+ else
+ /* handle it 4 sec later, wait it being stable */
+ timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
+
+ ixgbe_dev_link_status_print(dev);
+
+ intr_enable_delay = true;
+ }
+
+ if (intr_enable_delay) {
+ if (rte_eal_alarm_set(timeout * 1000,
+ ixgbe_dev_interrupt_delayed_handler, (void*)dev) < 0)
+ PMD_DRV_LOG(ERR, "Error setting alarm");
+ } else {
+ PMD_DRV_LOG(DEBUG, "enable intr immediately");
+ ixgbe_enable_intr(dev);
+ rte_intr_enable(&(dev->pci_dev->intr_handle));
}
- ixgbe_dev_link_update(dev, 0);
+
return 0;
}
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t eicr;
+
+ eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
+ if (eicr & IXGBE_EICR_MAILBOX)
+ ixgbe_pf_mbx_process(dev);
- IXGBE_READ_REG(hw, IXGBE_EICR);
- ixgbe_dev_interrupt_action(dev);
if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
+ ixgbe_dev_link_update(dev, 0);
intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
- rte_intr_enable(&(dev->pci_dev->intr_handle));
- IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_LSC);
- IXGBE_WRITE_FLUSH(hw);
ixgbe_dev_link_status_print(dev);
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
}
+
+ PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]\n", eicr);
+ ixgbe_enable_intr(dev);
+ rte_intr_enable(&(dev->pci_dev->intr_handle));
}
/**
* void
*/
static void
-ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle, void *param)
+ixgbe_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
+ void *param)
{
- int64_t timeout;
- struct rte_eth_link link;
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
- struct ixgbe_interrupt *intr =
- IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
-
- /* get the link status before link update, for predicting later */
- memset(&link, 0, sizeof(link));
- rte_ixgbe_dev_atomic_read_link_status(dev, &link);
ixgbe_dev_interrupt_get_status(dev);
ixgbe_dev_interrupt_action(dev);
-
- if (!(intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
- return;
-
- /* likely to up */
- if (!link.link_status)
- /* handle it 1 sec later, wait it being stable */
- timeout = IXGBE_LINK_UP_CHECK_TIMEOUT;
- /* likely to down */
- else
- /* handle it 4 sec later, wait it being stable */
- timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
-
- ixgbe_dev_link_status_print(dev);
- if (rte_eal_alarm_set(timeout * 1000,
- ixgbe_dev_interrupt_delayed_handler, param) < 0)
- PMD_INIT_LOG(ERR, "Error setting alarm");
}
static int
int err;
uint32_t rx_buf_size;
uint32_t max_high_water;
+ uint32_t mflcn;
enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
ixgbe_fc_none,
ixgbe_fc_rx_pause,
hw->fc.send_xon = fc_conf->send_xon;
err = ixgbe_fc_enable(hw);
+
/* Not negotiated is not an error case */
if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) {
+
+ /* check if we want to forward MAC frames - driver doesn't have native
+ * capability to do that, so we'll write the registers ourselves */
+
+ mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+
+ /* set or clear MFLCN.PMCF bit depending on configuration */
+ if (fc_conf->mac_ctrl_frame_fwd != 0)
+ mflcn |= IXGBE_MFLCN_PMCF;
+ else
+ mflcn &= ~IXGBE_MFLCN_PMCF;
+
+ IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn);
+ IXGBE_WRITE_FLUSH(hw);
+
return 0;
}
return -EIO;
}
+static int
+ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta *reta_conf)
+{
+ uint8_t i,j,mask;
+ uint32_t reta;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+ /*
+ * Update Redirection Table RETA[n],n=0...31,The redirection table has
+ * 128-entries in 32 registers
+ */
+ for(i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) {
+ if (i < ETH_RSS_RETA_NUM_ENTRIES/2)
+ mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF);
+ else
+ mask = (uint8_t)((reta_conf->mask_hi >>
+ (i - ETH_RSS_RETA_NUM_ENTRIES/2)) & 0xF);
+ if (mask != 0) {
+ reta = 0;
+ if (mask != 0xF)
+ reta = IXGBE_READ_REG(hw,IXGBE_RETA(i >> 2));
+
+ for (j = 0; j < 4; j++) {
+ if (mask & (0x1 << j)) {
+ if (mask != 0xF)
+ reta &= ~(0xFF << 8 * j);
+ reta |= reta_conf->reta[i + j] << 8*j;
+ }
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2),reta);
+ }
+ }
+
+ return 0;
+}
+
+static int
+ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta *reta_conf)
+{
+ uint8_t i,j,mask;
+ uint32_t reta;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+ /*
+ * Read Redirection Table RETA[n],n=0...31,The redirection table has
+ * 128-entries in 32 registers
+ */
+ for(i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) {
+ if (i < ETH_RSS_RETA_NUM_ENTRIES/2)
+ mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF);
+ else
+ mask = (uint8_t)((reta_conf->mask_hi >>
+ (i - ETH_RSS_RETA_NUM_ENTRIES/2)) & 0xF);
+
+ if (mask != 0) {
+ reta = IXGBE_READ_REG(hw,IXGBE_RETA(i >> 2));
+ for (j = 0; j < 4; j++) {
+ if (mask & (0x1 << j))
+ reta_conf->reta[i + j] =
+ (uint8_t)((reta >> 8 * j) & 0xFF);
+ }
+ }
+ }
+
+ return 0;
+}
+
static void
ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
uint32_t index, uint32_t pool)
static int
ixgbevf_dev_start(struct rte_eth_dev *dev)
{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
int err, mask = 0;
PMD_INIT_LOG(DEBUG, "ixgbevf_dev_start");
+ hw->mac.ops.reset_hw(hw);
+
ixgbevf_dev_tx_init(dev);
/* This can fail when allocating mbufs for descriptor rings */
{
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t i, on = 0;
+ uint16_t i;
+ int on = 0;
/* VF function only support hw strip feature, others are not support */
if(mask & ETH_VLAN_STRIP_MASK){
}
}
+static int
+ixgbe_vmdq_mode_check(struct ixgbe_hw *hw)
+{
+ uint32_t reg_val;
+
+ /* we only need to do this if VMDq is enabled */
+ reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+ if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
+ PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting\n");
+ return (-1);
+ }
+
+ return 0;
+}
+
+static uint32_t
+ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr* uc_addr)
+{
+ uint32_t vector = 0;
+ switch (hw->mac.mc_filter_type) {
+ case 0: /* use bits [47:36] of the address */
+ vector = ((uc_addr->addr_bytes[4] >> 4) |
+ (((uint16_t)uc_addr->addr_bytes[5]) << 4));
+ break;
+ case 1: /* use bits [46:35] of the address */
+ vector = ((uc_addr->addr_bytes[4] >> 3) |
+ (((uint16_t)uc_addr->addr_bytes[5]) << 5));
+ break;
+ case 2: /* use bits [45:34] of the address */
+ vector = ((uc_addr->addr_bytes[4] >> 2) |
+ (((uint16_t)uc_addr->addr_bytes[5]) << 6));
+ break;
+ case 3: /* use bits [43:32] of the address */
+ vector = ((uc_addr->addr_bytes[4]) |
+ (((uint16_t)uc_addr->addr_bytes[5]) << 8));
+ break;
+ default: /* Invalid mc_filter_type */
+ break;
+ }
+
+ /* vector can only be 12-bits or boundary will be exceeded */
+ vector &= 0xFFF;
+ return vector;
+}
+
+static int
+ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr,
+ uint8_t on)
+{
+ uint32_t vector;
+ uint32_t uta_idx;
+ uint32_t reg_val;
+ uint32_t uta_shift;
+ uint32_t rc;
+ const uint32_t ixgbe_uta_idx_mask = 0x7F;
+ const uint32_t ixgbe_uta_bit_shift = 5;
+ const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1;
+ const uint32_t bit1 = 0x1;
+
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_uta_info *uta_info =
+ IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
+
+ /* The UTA table only exists on 82599 hardware and newer */
+ if (hw->mac.type < ixgbe_mac_82599EB)
+ return (-ENOTSUP);
+
+ vector = ixgbe_uta_vector(hw,mac_addr);
+ uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask;
+ uta_shift = vector & ixgbe_uta_bit_mask;
+
+ rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0);
+ if(rc == on)
+ return 0;
+
+ reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx));
+ if (on) {
+ uta_info->uta_in_use++;
+ reg_val |= (bit1 << uta_shift);
+ uta_info->uta_shadow[uta_idx] |= (bit1 << uta_shift);
+ } else {
+ uta_info->uta_in_use--;
+ reg_val &= ~(bit1 << uta_shift);
+ uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift);
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val);
+
+ if (uta_info->uta_in_use > 0)
+ IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
+ IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,hw->mac.mc_filter_type);
+
+ return 0;
+}
+
+static int
+ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
+{
+ int i;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_uta_info *uta_info =
+ IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
+
+ /* The UTA table only exists on 82599 hardware and newer */
+ if (hw->mac.type < ixgbe_mac_82599EB)
+ return (-ENOTSUP);
+
+ if(on) {
+ for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+ uta_info->uta_shadow[i] = ~0;
+ IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
+ }
+ } else {
+ for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+ uta_info->uta_shadow[i] = 0;
+ IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
+ }
+ }
+ return 0;
+
+}
+static int
+ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
+ uint16_t rx_mask, uint8_t on)
+{
+ int val = 0;
+
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
+
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
+ " on 82599 hardware and newer\n");
+ return (-ENOTSUP);
+ }
+ if (ixgbe_vmdq_mode_check(hw) < 0)
+ return (-ENOTSUP);
+
+ if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG )
+ val |= IXGBE_VMOLR_AUPE;
+ if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC )
+ val |= IXGBE_VMOLR_ROMPE;
+ if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
+ val |= IXGBE_VMOLR_ROPE;
+ if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+ val |= IXGBE_VMOLR_BAM;
+ if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+ val |= IXGBE_VMOLR_MPE;
+
+ if (on)
+ vmolr |= val;
+ else
+ vmolr &= ~val;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
+
+ return 0;
+}
+
+static int
+ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
+{
+ uint32_t reg,addr;
+ uint32_t val;
+ const uint8_t bit1 = 0x1;
+
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (ixgbe_vmdq_mode_check(hw) < 0)
+ return (-ENOTSUP);
+
+ addr = IXGBE_VFRE(pool >= ETH_64_POOLS/2);
+ reg = IXGBE_READ_REG(hw, addr);
+ val = bit1 << pool;
+
+ if (on)
+ reg |= val;
+ else
+ reg &= ~val;
+
+ IXGBE_WRITE_REG(hw, addr,reg);
+
+ return 0;
+}
+
+static int
+ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
+{
+ uint32_t reg,addr;
+ uint32_t val;
+ const uint8_t bit1 = 0x1;
+
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (ixgbe_vmdq_mode_check(hw) < 0)
+ return (-ENOTSUP);
+
+ addr = IXGBE_VFTE(pool >= ETH_64_POOLS/2);
+ reg = IXGBE_READ_REG(hw, addr);
+ val = bit1 << pool;
+
+ if (on)
+ reg |= val;
+ else
+ reg &= ~val;
+
+ IXGBE_WRITE_REG(hw, addr,reg);
+
+ return 0;
+}
+
+static int
+ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
+ uint64_t pool_mask, uint8_t vlan_on)
+{
+ int ret = 0;
+ uint16_t pool_idx;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (ixgbe_vmdq_mode_check(hw) < 0)
+ return (-ENOTSUP);
+ for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) {
+ if (pool_mask & ((uint64_t)(1ULL << pool_idx)))
+ ret = hw->mac.ops.set_vfta(hw,vlan,pool_idx,vlan_on);
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int
+ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
+ struct rte_eth_vmdq_mirror_conf *mirror_conf,
+ uint8_t rule_id, uint8_t on)
+{
+ uint32_t mr_ctl,vlvf;
+ uint32_t mp_lsb = 0;
+ uint32_t mv_msb = 0;
+ uint32_t mv_lsb = 0;
+ uint32_t mp_msb = 0;
+ uint8_t i = 0;
+ int reg_index = 0;
+ uint64_t vlan_mask = 0;
+
+ const uint8_t pool_mask_offset = 32;
+ const uint8_t vlan_mask_offset = 32;
+ const uint8_t dst_pool_offset = 8;
+ const uint8_t rule_mr_offset = 4;
+ const uint8_t mirror_rule_mask= 0x0F;
+
+ struct ixgbe_mirror_info *mr_info =
+ (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (ixgbe_vmdq_mode_check(hw) < 0)
+ return (-ENOTSUP);
+
+ /* Check if vlan mask is valid */
+ if ((mirror_conf->rule_type_mask & ETH_VMDQ_VLAN_MIRROR) && (on)) {
+ if (mirror_conf->vlan.vlan_mask == 0)
+ return (-EINVAL);
+ }
+
+ /* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */
+ if (mirror_conf->rule_type_mask & ETH_VMDQ_VLAN_MIRROR) {
+ for (i = 0;i < IXGBE_VLVF_ENTRIES; i++) {
+ if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
+ /* search vlan id related pool vlan filter index */
+ reg_index = ixgbe_find_vlvf_slot(hw,
+ mirror_conf->vlan.vlan_id[i]);
+ if(reg_index < 0)
+ return (-EINVAL);
+ vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_index));
+ if ((vlvf & IXGBE_VLVF_VIEN) &&
+ ((vlvf & IXGBE_VLVF_VLANID_MASK)
+ == mirror_conf->vlan.vlan_id[i]))
+ vlan_mask |= (1ULL << reg_index);
+ else
+ return (-EINVAL);
+ }
+ }
+
+ if (on) {
+ mv_lsb = vlan_mask & 0xFFFFFFFF;
+ mv_msb = vlan_mask >> vlan_mask_offset;
+
+ mr_info->mr_conf[rule_id].vlan.vlan_mask =
+ mirror_conf->vlan.vlan_mask;
+ for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
+ if(mirror_conf->vlan.vlan_mask & (1ULL << i))
+ mr_info->mr_conf[rule_id].vlan.vlan_id[i] =
+ mirror_conf->vlan.vlan_id[i];
+ }
+ } else {
+ mv_lsb = 0;
+ mv_msb = 0;
+ mr_info->mr_conf[rule_id].vlan.vlan_mask = 0;
+ for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
+ mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
+ }
+ }
+
+ /*
+ * if enable pool mirror, write related pool mask register,if disable
+ * pool mirror, clear PFMRVM register
+ */
+ if (mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) {
+ if (on) {
+ mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF;
+ mp_msb = mirror_conf->pool_mask >> pool_mask_offset;
+ mr_info->mr_conf[rule_id].pool_mask =
+ mirror_conf->pool_mask;
+
+ } else {
+ mp_lsb = 0;
+ mp_msb = 0;
+ mr_info->mr_conf[rule_id].pool_mask = 0;
+ }
+ }
+
+ /* read mirror control register and recalculate it */
+ mr_ctl = IXGBE_READ_REG(hw,IXGBE_MRCTL(rule_id));
+
+ if (on) {
+ mr_ctl |= mirror_conf->rule_type_mask;
+ mr_ctl &= mirror_rule_mask;
+ mr_ctl |= mirror_conf->dst_pool << dst_pool_offset;
+ } else
+ mr_ctl &= ~(mirror_conf->rule_type_mask & mirror_rule_mask);
+
+ mr_info->mr_conf[rule_id].rule_type_mask = (uint8_t)(mr_ctl & mirror_rule_mask);
+ mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool;
+
+ /* write mirrror control register */
+ IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
+
+ /* write pool mirrror control register */
+ if (mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) {
+ IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
+ IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset),
+ mp_msb);
+ }
+ /* write VLAN mirrror control register */
+ if (mirror_conf->rule_type_mask & ETH_VMDQ_VLAN_MIRROR) {
+ IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb);
+ IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset),
+ mv_msb);
+ }
+
+ return 0;
+}
+
+static int
+ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
+{
+ int mr_ctl = 0;
+ uint32_t lsb_val = 0;
+ uint32_t msb_val = 0;
+ const uint8_t rule_mr_offset = 4;
+
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_mirror_info *mr_info =
+ (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
+
+ if (ixgbe_vmdq_mode_check(hw) < 0)
+ return (-ENOTSUP);
+
+ memset(&mr_info->mr_conf[rule_id], 0,
+ sizeof(struct rte_eth_vmdq_mirror_conf));
+
+ /* clear PFVMCTL register */
+ IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
+
+ /* clear pool mask register */
+ IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val);
+ IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val);
+
+ /* clear vlan mask register */
+ IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val);
+ IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val);
+
+ return 0;
+}