#include <rte_string_fns.h>
#include <rte_pci.h>
#include <rte_bus_pci.h>
-#include <rte_ethdev_driver.h>
-#include <rte_ethdev_pci.h>
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
#include <rte_malloc.h>
#include <rte_alarm.h>
#include "igc_logs.h"
#include "igc_txrx.h"
+#include "igc_filter.h"
+#include "igc_flow.h"
#define IGC_INTEL_VENDOR_ID 0x8086
-/*
- * The overhead from MTU to max frame size.
- * Considering VLAN so tag needs to be counted.
- */
-#define IGC_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + \
- RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE)
-
#define IGC_FC_PAUSE_TIME 0x0680
#define IGC_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */
#define IGC_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */
/* External VLAN Enable bit mask */
#define IGC_CTRL_EXT_EXT_VLAN (1u << 26)
+/* Speed select */
+#define IGC_CTRL_SPEED_MASK (7u << 8)
+#define IGC_CTRL_SPEED_2500 (6u << 8)
+
+/* External VLAN Ether Type bit mask and shift */
+#define IGC_VET_EXT 0xFFFF0000
+#define IGC_VET_EXT_SHIFT 16
+
+/* Force EEE Auto-negotiation */
+#define IGC_EEER_EEE_FRC_AN (1u << 28)
+
/* Per Queue Good Packets Received Count */
#define IGC_PQGPRC(idx) (0x10010 + 0x100 * (idx))
/* Per Queue Good Octets Received Count */
static int eth_igc_configure(struct rte_eth_dev *dev);
static int eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete);
-static void eth_igc_stop(struct rte_eth_dev *dev);
+static int eth_igc_stop(struct rte_eth_dev *dev);
static int eth_igc_start(struct rte_eth_dev *dev);
static int eth_igc_set_link_up(struct rte_eth_dev *dev);
static int eth_igc_set_link_down(struct rte_eth_dev *dev);
-static void eth_igc_close(struct rte_eth_dev *dev);
+static int eth_igc_close(struct rte_eth_dev *dev);
static int eth_igc_reset(struct rte_eth_dev *dev);
static int eth_igc_promiscuous_enable(struct rte_eth_dev *dev);
static int eth_igc_promiscuous_disable(struct rte_eth_dev *dev);
struct rte_eth_xstat_name *xstats_names,
unsigned int size);
static int eth_igc_xstats_get_names_by_id(struct rte_eth_dev *dev,
- struct rte_eth_xstat_name *xstats_names, const uint64_t *ids,
+ const uint64_t *ids, struct rte_eth_xstat_name *xstats_names,
unsigned int limit);
static int eth_igc_xstats_reset(struct rte_eth_dev *dev);
static int
eth_igc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
static int
eth_igc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
+static int
+eth_igc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf);
+static int
+eth_igc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf);
+static int eth_igc_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+static int eth_igc_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+static int eth_igc_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+static int eth_igc_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+static int
+eth_igc_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
+static int eth_igc_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+static int eth_igc_vlan_tpid_set(struct rte_eth_dev *dev,
+ enum rte_vlan_type vlan_type, uint16_t tpid);
static const struct eth_dev_ops eth_igc_ops = {
.dev_configure = eth_igc_configure,
.rx_queue_setup = eth_igc_rx_queue_setup,
.rx_queue_release = eth_igc_rx_queue_release,
- .rx_queue_count = eth_igc_rx_queue_count,
- .rx_descriptor_done = eth_igc_rx_descriptor_done,
- .rx_descriptor_status = eth_igc_rx_descriptor_status,
- .tx_descriptor_status = eth_igc_tx_descriptor_status,
.tx_queue_setup = eth_igc_tx_queue_setup,
.tx_queue_release = eth_igc_tx_queue_release,
.tx_done_cleanup = eth_igc_tx_done_cleanup,
.queue_stats_mapping_set = eth_igc_queue_stats_mapping_set,
.rx_queue_intr_enable = eth_igc_rx_queue_intr_enable,
.rx_queue_intr_disable = eth_igc_rx_queue_intr_disable,
+ .flow_ctrl_get = eth_igc_flow_ctrl_get,
+ .flow_ctrl_set = eth_igc_flow_ctrl_set,
+ .reta_update = eth_igc_rss_reta_update,
+ .reta_query = eth_igc_rss_reta_query,
+ .rss_hash_update = eth_igc_rss_hash_update,
+ .rss_hash_conf_get = eth_igc_rss_hash_conf_get,
+ .vlan_filter_set = eth_igc_vlan_filter_set,
+ .vlan_offload_set = eth_igc_vlan_offload_set,
+ .vlan_tpid_set = eth_igc_vlan_tpid_set,
+ .vlan_strip_queue_set = eth_igc_vlan_strip_queue_set,
+ .flow_ops_get = eth_igc_flow_ops_get,
};
/*
return -EINVAL;
}
- if (rx_mq_mode != ETH_MQ_RX_NONE &&
- rx_mq_mode != ETH_MQ_RX_RSS) {
+ if (rx_mq_mode != RTE_ETH_MQ_RX_NONE &&
+ rx_mq_mode != RTE_ETH_MQ_RX_RSS) {
/* RSS together with VMDq not supported*/
PMD_INIT_LOG(ERR, "RX mode %d is not supported.",
rx_mq_mode);
/* To no break software that set invalid mode, only display
* warning if invalid mode is used.
*/
- if (tx_mq_mode != ETH_MQ_TX_NONE)
+ if (tx_mq_mode != RTE_ETH_MQ_TX_NONE)
PMD_INIT_LOG(WARNING,
"TX mode %d is not supported. Due to meaningless in this driver, just ignore",
tx_mq_mode);
PMD_INIT_FUNC_TRACE();
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
+
ret = igc_check_mq_mode(dev);
if (ret != 0)
return ret;
{
struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
if (rte_intr_allow_others(intr_handle) &&
dev->data->dev_conf.intr_conf.lsc) {
struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
if (rte_intr_allow_others(intr_handle) &&
dev->data->dev_conf.intr_conf.lsc) {
uint16_t duplex, speed;
hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
link.link_duplex = (duplex == FULL_DUPLEX) ?
- ETH_LINK_FULL_DUPLEX :
- ETH_LINK_HALF_DUPLEX;
+ RTE_ETH_LINK_FULL_DUPLEX :
+ RTE_ETH_LINK_HALF_DUPLEX;
link.link_speed = speed;
- link.link_status = ETH_LINK_UP;
+ link.link_status = RTE_ETH_LINK_UP;
link.link_autoneg = !(dev->data->dev_conf.link_speeds &
- ETH_LINK_SPEED_FIXED);
+ RTE_ETH_LINK_SPEED_FIXED);
if (speed == SPEED_2500) {
uint32_t tipg = IGC_READ_REG(hw, IGC_TIPG);
}
} else {
link.link_speed = 0;
- link.link_duplex = ETH_LINK_HALF_DUPLEX;
- link.link_status = ETH_LINK_DOWN;
- link.link_autoneg = ETH_LINK_FIXED;
+ link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+ link.link_status = RTE_ETH_LINK_DOWN;
+ link.link_autoneg = RTE_ETH_LINK_FIXED;
}
return rte_eth_linkstatus_set(dev, &link);
" Port %d: Link Up - speed %u Mbps - %s",
dev->data->port_id,
(unsigned int)link.link_speed,
- link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+ link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
"full-duplex" : "half-duplex");
else
PMD_DRV_LOG(INFO, " Port %d: Link Down",
pci_dev->addr.bus,
pci_dev->addr.devid,
pci_dev->addr.function);
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
- NULL);
+ rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
}
* This routine disables all traffic on the adapter by issuing a
* global reset on the MAC.
*/
-static void
+static int
eth_igc_stop(struct rte_eth_dev *dev)
{
struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev);
struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
struct rte_eth_link link;
+ dev->data->dev_started = 0;
adapter->stopped = 1;
/* disable receive and transmit */
/* disable all wake up */
IGC_WRITE_REG(hw, IGC_WUC, 0);
+ /* disable checking EEE operation in MAC loopback mode */
+ igc_read_reg_check_clear_bits(hw, IGC_EEER, IGC_EEER_EEE_FRC_AN);
+
/* Set bit for Go Link disconnect */
igc_read_reg_check_set_bits(hw, IGC_82580_PHY_POWER_MGMT,
IGC_82580_PM_GO_LINKD);
/* Clean datapath event and queue/vec mapping */
rte_intr_efd_disable(intr_handle);
- if (intr_handle->intr_vec != NULL) {
- rte_free(intr_handle->intr_vec);
- intr_handle->intr_vec = NULL;
- }
+ rte_intr_vec_list_free(intr_handle);
+
+ return 0;
}
/*
{
struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
uint32_t intr_mask;
uint32_t vec = IGC_MISC_VEC_ID;
IGC_WRITE_REG(hw, IGC_GPIE, IGC_GPIE_MSIX_MODE |
IGC_GPIE_PBA | IGC_GPIE_EIAME |
IGC_GPIE_NSICR);
- intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) <<
- misc_shift;
+ intr_mask = RTE_LEN2MASK(rte_intr_nb_efd_get(intr_handle),
+ uint32_t) << misc_shift;
if (dev->data->dev_conf.intr_conf.lsc)
intr_mask |= (1u << IGC_MSIX_OTHER_INTR_VEC);
for (i = 0; i < dev->data->nb_rx_queues; i++) {
igc_write_ivar(hw, i, 0, vec);
- intr_handle->intr_vec[i] = vec;
- if (vec < base + intr_handle->nb_efd - 1)
+ rte_intr_vec_list_index_set(intr_handle, i, vec);
+ if (vec < base + rte_intr_nb_efd_get(intr_handle) - 1)
vec++;
}
uint32_t mask;
struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
int misc_shift = rte_intr_allow_others(intr_handle) ? 1 : 0;
/* won't configure msix register if no mapping is done
if (!rte_intr_dp_is_en(intr_handle))
return;
- mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) << misc_shift;
+ mask = RTE_LEN2MASK(rte_intr_nb_efd_get(intr_handle), uint32_t)
+ << misc_shift;
IGC_WRITE_REG(hw, IGC_EIMS, mask);
}
struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
uint32_t *speeds;
int ret;
return -1;
}
- if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
- intr_handle->intr_vec = rte_zmalloc("intr_vec",
- dev->data->nb_rx_queues * sizeof(int), 0);
- if (intr_handle->intr_vec == NULL) {
+ if (rte_intr_dp_is_en(intr_handle)) {
+ if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
+ dev->data->nb_rx_queues)) {
PMD_DRV_LOG(ERR,
"Failed to allocate %d rx_queues intr_vec",
dev->data->nb_rx_queues);
igc_clear_hw_cntrs_base_generic(hw);
+ /* VLAN Offload Settings */
+ eth_igc_vlan_offload_set(dev,
+ RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+ RTE_ETH_VLAN_EXTEND_MASK);
+
/* Setup link speed and duplex */
speeds = &dev->data->dev_conf.link_speeds;
- if (*speeds == ETH_LINK_SPEED_AUTONEG) {
+ if (*speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500;
hw->mac.autoneg = 1;
} else {
int num_speeds = 0;
- bool autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0;
- /* Reset */
+ if (*speeds & RTE_ETH_LINK_SPEED_FIXED) {
+ PMD_DRV_LOG(ERR,
+ "Force speed mode currently not supported");
+ igc_dev_clear_queues(dev);
+ return -EINVAL;
+ }
+
hw->phy.autoneg_advertised = 0;
+ hw->mac.autoneg = 1;
- if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
- ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
- ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G |
- ETH_LINK_SPEED_FIXED)) {
+ if (*speeds & ~(RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+ RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+ RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G)) {
num_speeds = -1;
goto error_invalid_config;
}
- if (*speeds & ETH_LINK_SPEED_10M_HD) {
+ if (*speeds & RTE_ETH_LINK_SPEED_10M_HD) {
hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
num_speeds++;
}
- if (*speeds & ETH_LINK_SPEED_10M) {
+ if (*speeds & RTE_ETH_LINK_SPEED_10M) {
hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
num_speeds++;
}
- if (*speeds & ETH_LINK_SPEED_100M_HD) {
+ if (*speeds & RTE_ETH_LINK_SPEED_100M_HD) {
hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
num_speeds++;
}
- if (*speeds & ETH_LINK_SPEED_100M) {
+ if (*speeds & RTE_ETH_LINK_SPEED_100M) {
hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
num_speeds++;
}
- if (*speeds & ETH_LINK_SPEED_1G) {
+ if (*speeds & RTE_ETH_LINK_SPEED_1G) {
hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
num_speeds++;
}
- if (*speeds & ETH_LINK_SPEED_2_5G) {
+ if (*speeds & RTE_ETH_LINK_SPEED_2_5G) {
hw->phy.autoneg_advertised |= ADVERTISE_2500_FULL;
num_speeds++;
}
- if (num_speeds == 0 || (!autoneg && num_speeds > 1))
+ if (num_speeds == 0)
goto error_invalid_config;
-
- /* Set/reset the mac.autoneg based on the link speed,
- * fixed or not
- */
- if (!autoneg) {
- hw->mac.autoneg = 0;
- hw->mac.forced_speed_duplex =
- hw->phy.autoneg_advertised;
- } else {
- hw->mac.autoneg = 1;
- }
}
igc_setup_link(hw);
eth_igc_rxtx_control(dev, true);
eth_igc_link_update(dev, 0);
+ /* configure MAC-loopback mode */
+ if (dev->data->dev_conf.lpbk_mode == 1) {
+ uint32_t reg_val;
+
+ reg_val = IGC_READ_REG(hw, IGC_CTRL);
+ reg_val &= ~IGC_CTRL_SPEED_MASK;
+ reg_val |= IGC_CTRL_SLU | IGC_CTRL_FRCSPD |
+ IGC_CTRL_FRCDPX | IGC_CTRL_FD | IGC_CTRL_SPEED_2500;
+ IGC_WRITE_REG(hw, IGC_CTRL, reg_val);
+
+ igc_read_reg_check_set_bits(hw, IGC_EEER, IGC_EEER_EEE_FRC_AN);
+ }
+
return 0;
error_invalid_config:
uint16_t i;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- eth_igc_rx_queue_release(dev->data->rx_queues[i]);
+ eth_igc_rx_queue_release(dev, i);
dev->data->rx_queues[i] = NULL;
}
dev->data->nb_rx_queues = 0;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- eth_igc_tx_queue_release(dev->data->tx_queues[i]);
+ eth_igc_tx_queue_release(dev, i);
dev->data->tx_queues[i] = NULL;
}
dev->data->nb_tx_queues = 0;
}
-static void
+static int
eth_igc_close(struct rte_eth_dev *dev)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev);
int retry = 0;
+ int ret = 0;
PMD_INIT_FUNC_TRACE();
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
if (!adapter->stopped)
- eth_igc_stop(dev);
+ ret = eth_igc_stop(dev);
+
+ igc_flow_flush(dev, NULL);
+ igc_clear_all_filter(dev);
igc_intr_other_disable(dev);
do {
/* Reset any pending lock */
igc_reset_swfw_lock(hw);
+
+ return ret;
}
static void
PMD_INIT_FUNC_TRACE();
dev->dev_ops = ð_igc_ops;
+ dev->rx_queue_count = eth_igc_rx_queue_count;
+ dev->rx_descriptor_status = eth_igc_rx_descriptor_status;
+ dev->tx_descriptor_status = eth_igc_tx_descriptor_status;
/*
* for secondary processes, we don't initialize any further as primary
return 0;
rte_eth_copy_pci_info(dev, pci_dev);
+ dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
hw->back = pci_dev;
hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
goto err_late;
}
- /* Pass the information to the rte_eth_dev_close() that it should also
- * release the private port resources.
- */
- dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
-
hw->mac.get_link_status = 1;
igc->stopped = 0;
dev->data->port_id, pci_dev->id.vendor_id,
pci_dev->id.device_id);
- rte_intr_callback_register(&pci_dev->intr_handle,
+ rte_intr_callback_register(pci_dev->intr_handle,
eth_igc_interrupt_handler, (void *)dev);
/* enable uio/vfio intr/eventfd mapping */
- rte_intr_enable(&pci_dev->intr_handle);
+ rte_intr_enable(pci_dev->intr_handle);
/* enable support intr */
igc_intr_other_enable(dev);
igc->rxq_stats_map[i] = -1;
}
+ igc_flow_init(dev);
+ igc_clear_all_filter(dev);
return 0;
err_late:
eth_igc_dev_uninit(__rte_unused struct rte_eth_dev *eth_dev)
{
PMD_INIT_FUNC_TRACE();
-
- if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- return 0;
-
eth_igc_close(eth_dev);
return 0;
}
fw.eep_build);
}
}
+ if (ret < 0)
+ return -EINVAL;
ret += 1; /* add the size of '\0' */
- if (fw_size < (u32)ret)
+ if (fw_size < (size_t)ret)
return ret;
else
return 0;
dev_info->max_mac_addrs = hw->mac.rar_entry_count;
dev_info->rx_offload_capa = IGC_RX_OFFLOAD_ALL;
dev_info->tx_offload_capa = IGC_TX_OFFLOAD_ALL;
+ dev_info->rx_queue_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
dev_info->max_rx_queues = IGC_QUEUE_PAIRS_NUM;
dev_info->max_tx_queues = IGC_QUEUE_PAIRS_NUM;
dev_info->max_vmdq_pools = 0;
dev_info->hash_key_size = IGC_HKEY_MAX_INDEX * sizeof(uint32_t);
- dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+ dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
dev_info->flow_type_rss_offloads = IGC_RSS_OFFLOAD_ALL;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
dev_info->rx_desc_lim = rx_desc_lim;
dev_info->tx_desc_lim = tx_desc_lim;
- dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
- ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
- ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+ RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+ RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G;
dev_info->max_mtu = dev_info->max_rx_pktlen - IGC_ETH_OVERHEAD;
dev_info->min_mtu = RTE_ETHER_MIN_MTU;
if (IGC_READ_REG(hw, IGC_CTRL_EXT) & IGC_CTRL_EXT_EXT_VLAN)
frame_size += VLAN_TAG_SIZE;
- /* check that mtu is within the allowed range */
- if (mtu < RTE_ETHER_MIN_MTU ||
- frame_size > MAX_RX_JUMBO_FRAME_SIZE)
- return -EINVAL;
-
/*
- * refuse mtu that requires the support of scattered packets when
- * this feature has not been enabled before.
+ * If device is started, refuse mtu that requires the support of
+ * scattered packets when this feature has not been enabled before.
*/
- if (!dev->data->scattered_rx &&
- frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
+ if (dev->data->dev_started && !dev->data->scattered_rx &&
+ frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
+ PMD_INIT_LOG(ERR, "Stop port first.");
return -EINVAL;
+ }
rctl = IGC_READ_REG(hw, IGC_RCTL);
-
- /* switch to jumbo mode if needed */
- if (mtu > RTE_ETHER_MTU) {
- dev->data->dev_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_JUMBO_FRAME;
+ if (mtu > RTE_ETHER_MTU)
rctl |= IGC_RCTL_LPE;
- } else {
- dev->data->dev_conf.rxmode.offloads &=
- ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+ else
rctl &= ~IGC_RCTL_LPE;
- }
IGC_WRITE_REG(hw, IGC_RCTL, rctl);
- /* update max frame size */
- dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
-
- IGC_WRITE_REG(hw, IGC_RLPML,
- dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ IGC_WRITE_REG(hw, IGC_RLPML, frame_size);
return 0;
}
/* Rx Errors */
rte_stats->imissed = stats->mpc;
- rte_stats->ierrors = stats->crcerrs +
- stats->rlec + stats->ruc + stats->roc +
+ rte_stats->ierrors = stats->crcerrs + stats->rlec +
stats->rxerrc + stats->algnerrc;
/* Tx Errors */
static int
eth_igc_xstats_get_names_by_id(struct rte_eth_dev *dev,
- struct rte_eth_xstat_name *xstats_names, const uint64_t *ids,
+ const uint64_t *ids, struct rte_eth_xstat_name *xstats_names,
unsigned int limit)
{
unsigned int i;
{
struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
uint32_t vec = IGC_MISC_VEC_ID;
if (rte_intr_allow_others(intr_handle))
{
struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
uint32_t vec = IGC_MISC_VEC_ID;
if (rte_intr_allow_others(intr_handle))
return 0;
}
+static int
+eth_igc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ uint32_t ctrl;
+ int tx_pause;
+ int rx_pause;
+
+ fc_conf->pause_time = hw->fc.pause_time;
+ fc_conf->high_water = hw->fc.high_water;
+ fc_conf->low_water = hw->fc.low_water;
+ fc_conf->send_xon = hw->fc.send_xon;
+ fc_conf->autoneg = hw->mac.autoneg;
+
+ /*
+ * Return rx_pause and tx_pause status according to actual setting of
+ * the TFCE and RFCE bits in the CTRL register.
+ */
+ ctrl = IGC_READ_REG(hw, IGC_CTRL);
+ if (ctrl & IGC_CTRL_TFCE)
+ tx_pause = 1;
+ else
+ tx_pause = 0;
+
+ if (ctrl & IGC_CTRL_RFCE)
+ rx_pause = 1;
+ else
+ rx_pause = 0;
+
+ if (rx_pause && tx_pause)
+ fc_conf->mode = RTE_ETH_FC_FULL;
+ else if (rx_pause)
+ fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
+ else if (tx_pause)
+ fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
+ else
+ fc_conf->mode = RTE_ETH_FC_NONE;
+
+ return 0;
+}
+
+static int
+eth_igc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ uint32_t rx_buf_size;
+ uint32_t max_high_water;
+ uint32_t rctl;
+ int err;
+
+ if (fc_conf->autoneg != hw->mac.autoneg)
+ return -ENOTSUP;
+
+ rx_buf_size = igc_get_rx_buffer_size(hw);
+ PMD_DRV_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
+
+ /* At least reserve one Ethernet frame for watermark */
+ max_high_water = rx_buf_size - RTE_ETHER_MAX_LEN;
+ if (fc_conf->high_water > max_high_water ||
+ fc_conf->high_water < fc_conf->low_water) {
+ PMD_DRV_LOG(ERR,
+ "Incorrect high(%u)/low(%u) water value, max is %u",
+ fc_conf->high_water, fc_conf->low_water,
+ max_high_water);
+ return -EINVAL;
+ }
+
+ switch (fc_conf->mode) {
+ case RTE_ETH_FC_NONE:
+ hw->fc.requested_mode = igc_fc_none;
+ break;
+ case RTE_ETH_FC_RX_PAUSE:
+ hw->fc.requested_mode = igc_fc_rx_pause;
+ break;
+ case RTE_ETH_FC_TX_PAUSE:
+ hw->fc.requested_mode = igc_fc_tx_pause;
+ break;
+ case RTE_ETH_FC_FULL:
+ hw->fc.requested_mode = igc_fc_full;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unsupported fc mode: %u", fc_conf->mode);
+ return -EINVAL;
+ }
+
+ hw->fc.pause_time = fc_conf->pause_time;
+ hw->fc.high_water = fc_conf->high_water;
+ hw->fc.low_water = fc_conf->low_water;
+ hw->fc.send_xon = fc_conf->send_xon;
+
+ err = igc_setup_link_generic(hw);
+ if (err == IGC_SUCCESS) {
+ /**
+ * check if we want to forward MAC frames - driver doesn't have
+ * native capability to do that, so we'll write the registers
+ * ourselves
+ **/
+ rctl = IGC_READ_REG(hw, IGC_RCTL);
+
+ /* set or clear MFLCN.PMCF bit depending on configuration */
+ if (fc_conf->mac_ctrl_frame_fwd != 0)
+ rctl |= IGC_RCTL_PMCF;
+ else
+ rctl &= ~IGC_RCTL_PMCF;
+
+ IGC_WRITE_REG(hw, IGC_RCTL, rctl);
+ IGC_WRITE_FLUSH(hw);
+
+ return 0;
+ }
+
+ PMD_DRV_LOG(ERR, "igc_setup_link_generic = 0x%x", err);
+ return -EIO;
+}
+
+static int
+eth_igc_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ uint16_t i;
+
+ if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
+ PMD_DRV_LOG(ERR,
+ "The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)",
+ reta_size, RTE_ETH_RSS_RETA_SIZE_128);
+ return -EINVAL;
+ }
+
+ RTE_BUILD_BUG_ON(RTE_ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
+
+ /* set redirection table */
+ for (i = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
+ union igc_rss_reta_reg reta, reg;
+ uint16_t idx, shift;
+ uint8_t j, mask;
+
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
+ mask = (uint8_t)((reta_conf[idx].mask >> shift) &
+ IGC_RSS_RDT_REG_SIZE_MASK);
+
+ /* if no need to update the register */
+ if (!mask ||
+ shift > (RTE_ETH_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
+ continue;
+
+ /* check mask whether need to read the register value first */
+ if (mask == IGC_RSS_RDT_REG_SIZE_MASK)
+ reg.dword = 0;
+ else
+ reg.dword = IGC_READ_REG_LE_VALUE(hw,
+ IGC_RETA(i / IGC_RSS_RDT_REG_SIZE));
+
+ /* update the register */
+ RTE_BUILD_BUG_ON(sizeof(reta.bytes) != IGC_RSS_RDT_REG_SIZE);
+ for (j = 0; j < IGC_RSS_RDT_REG_SIZE; j++) {
+ if (mask & (1u << j))
+ reta.bytes[j] =
+ (uint8_t)reta_conf[idx].reta[shift + j];
+ else
+ reta.bytes[j] = reg.bytes[j];
+ }
+ IGC_WRITE_REG_LE_VALUE(hw,
+ IGC_RETA(i / IGC_RSS_RDT_REG_SIZE), reta.dword);
+ }
+
+ return 0;
+}
+
+static int
+eth_igc_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ uint16_t i;
+
+ if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
+ PMD_DRV_LOG(ERR,
+ "The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)",
+ reta_size, RTE_ETH_RSS_RETA_SIZE_128);
+ return -EINVAL;
+ }
+
+ RTE_BUILD_BUG_ON(RTE_ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
+
+ /* read redirection table */
+ for (i = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
+ union igc_rss_reta_reg reta;
+ uint16_t idx, shift;
+ uint8_t j, mask;
+
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
+ mask = (uint8_t)((reta_conf[idx].mask >> shift) &
+ IGC_RSS_RDT_REG_SIZE_MASK);
+
+ /* if no need to read register */
+ if (!mask ||
+ shift > (RTE_ETH_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
+ continue;
+
+ /* read register and get the queue index */
+ RTE_BUILD_BUG_ON(sizeof(reta.bytes) != IGC_RSS_RDT_REG_SIZE);
+ reta.dword = IGC_READ_REG_LE_VALUE(hw,
+ IGC_RETA(i / IGC_RSS_RDT_REG_SIZE));
+ for (j = 0; j < IGC_RSS_RDT_REG_SIZE; j++) {
+ if (mask & (1u << j))
+ reta_conf[idx].reta[shift + j] = reta.bytes[j];
+ }
+ }
+
+ return 0;
+}
+
+static int
+eth_igc_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ igc_hw_rss_hash_set(hw, rss_conf);
+ return 0;
+}
+
+static int
+eth_igc_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ uint32_t *hash_key = (uint32_t *)rss_conf->rss_key;
+ uint32_t mrqc;
+ uint64_t rss_hf;
+
+ if (hash_key != NULL) {
+ int i;
+
+ /* if not enough space for store hash key */
+ if (rss_conf->rss_key_len != IGC_HKEY_SIZE) {
+ PMD_DRV_LOG(ERR,
+ "RSS hash key size %u in parameter doesn't match the hardware hash key size %u",
+ rss_conf->rss_key_len, IGC_HKEY_SIZE);
+ return -EINVAL;
+ }
+
+ /* read RSS key from register */
+ for (i = 0; i < IGC_HKEY_MAX_INDEX; i++)
+ hash_key[i] = IGC_READ_REG_LE_VALUE(hw, IGC_RSSRK(i));
+ }
+
+ /* get RSS functions configured in MRQC register */
+ mrqc = IGC_READ_REG(hw, IGC_MRQC);
+ if ((mrqc & IGC_MRQC_ENABLE_RSS_4Q) == 0)
+ return 0;
+
+ rss_hf = 0;
+ if (mrqc & IGC_MRQC_RSS_FIELD_IPV4)
+ rss_hf |= RTE_ETH_RSS_IPV4;
+ if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_TCP)
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
+ if (mrqc & IGC_MRQC_RSS_FIELD_IPV6)
+ rss_hf |= RTE_ETH_RSS_IPV6;
+ if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_EX)
+ rss_hf |= RTE_ETH_RSS_IPV6_EX;
+ if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP)
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
+ if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP_EX)
+ rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
+ if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_UDP)
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
+ if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP)
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
+ if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP_EX)
+ rss_hf |= RTE_ETH_RSS_IPV6_UDP_EX;
+
+ rss_conf->rss_hf |= rss_hf;
+ return 0;
+}
+
+static int
+eth_igc_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ struct igc_vfta *shadow_vfta = IGC_DEV_PRIVATE_VFTA(dev);
+ uint32_t vfta;
+ uint32_t vid_idx;
+ uint32_t vid_bit;
+
+ vid_idx = (vlan_id >> IGC_VFTA_ENTRY_SHIFT) & IGC_VFTA_ENTRY_MASK;
+ vid_bit = 1u << (vlan_id & IGC_VFTA_ENTRY_BIT_SHIFT_MASK);
+ vfta = shadow_vfta->vfta[vid_idx];
+ if (on)
+ vfta |= vid_bit;
+ else
+ vfta &= ~vid_bit;
+ IGC_WRITE_REG_ARRAY(hw, IGC_VFTA, vid_idx, vfta);
+
+ /* update local VFTA copy */
+ shadow_vfta->vfta[vid_idx] = vfta;
+
+ return 0;
+}
+
+static void
+igc_vlan_hw_filter_disable(struct rte_eth_dev *dev)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ igc_read_reg_check_clear_bits(hw, IGC_RCTL,
+ IGC_RCTL_CFIEN | IGC_RCTL_VFE);
+}
+
+static void
+igc_vlan_hw_filter_enable(struct rte_eth_dev *dev)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ struct igc_vfta *shadow_vfta = IGC_DEV_PRIVATE_VFTA(dev);
+ uint32_t reg_val;
+ int i;
+
+ /* Filter Table Enable, CFI not used for packet acceptance */
+ reg_val = IGC_READ_REG(hw, IGC_RCTL);
+ reg_val &= ~IGC_RCTL_CFIEN;
+ reg_val |= IGC_RCTL_VFE;
+ IGC_WRITE_REG(hw, IGC_RCTL, reg_val);
+
+ /* restore VFTA table */
+ for (i = 0; i < IGC_VFTA_SIZE; i++)
+ IGC_WRITE_REG_ARRAY(hw, IGC_VFTA, i, shadow_vfta->vfta[i]);
+}
+
+static void
+igc_vlan_hw_strip_disable(struct rte_eth_dev *dev)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+
+ igc_read_reg_check_clear_bits(hw, IGC_CTRL, IGC_CTRL_VME);
+}
+
+static void
+igc_vlan_hw_strip_enable(struct rte_eth_dev *dev)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+
+ igc_read_reg_check_set_bits(hw, IGC_CTRL, IGC_CTRL_VME);
+}
+
+static int
+igc_vlan_hw_extend_disable(struct rte_eth_dev *dev)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ uint32_t frame_size = dev->data->mtu + IGC_ETH_OVERHEAD;
+ uint32_t ctrl_ext;
+
+ ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
+
+ /* if extend vlan hasn't been enabled */
+ if ((ctrl_ext & IGC_CTRL_EXT_EXT_VLAN) == 0)
+ return 0;
+
+ /* Update maximum packet length */
+ if (frame_size < RTE_ETHER_MIN_MTU + VLAN_TAG_SIZE) {
+ PMD_DRV_LOG(ERR, "Maximum packet length %u error, min is %u",
+ frame_size, VLAN_TAG_SIZE + RTE_ETHER_MIN_MTU);
+ return -EINVAL;
+ }
+ IGC_WRITE_REG(hw, IGC_RLPML, frame_size - VLAN_TAG_SIZE);
+
+ IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext & ~IGC_CTRL_EXT_EXT_VLAN);
+ return 0;
+}
+
+static int
+igc_vlan_hw_extend_enable(struct rte_eth_dev *dev)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ uint32_t frame_size = dev->data->mtu + IGC_ETH_OVERHEAD;
+ uint32_t ctrl_ext;
+
+ ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
+
+ /* if extend vlan has been enabled */
+ if (ctrl_ext & IGC_CTRL_EXT_EXT_VLAN)
+ return 0;
+
+ /* Update maximum packet length */
+ if (frame_size > MAX_RX_JUMBO_FRAME_SIZE) {
+ PMD_DRV_LOG(ERR, "Maximum packet length %u error, max is %u",
+ frame_size, MAX_RX_JUMBO_FRAME_SIZE);
+ return -EINVAL;
+ }
+ IGC_WRITE_REG(hw, IGC_RLPML, frame_size);
+
+ IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_EXT_VLAN);
+ return 0;
+}
+
+static int
+eth_igc_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ struct rte_eth_rxmode *rxmode;
+
+ rxmode = &dev->data->dev_conf.rxmode;
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+ igc_vlan_hw_strip_enable(dev);
+ else
+ igc_vlan_hw_strip_disable(dev);
+ }
+
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
+ igc_vlan_hw_filter_enable(dev);
+ else
+ igc_vlan_hw_filter_disable(dev);
+ }
+
+ if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
+ return igc_vlan_hw_extend_enable(dev);
+ else
+ return igc_vlan_hw_extend_disable(dev);
+ }
+
+ return 0;
+}
+
+static int
+eth_igc_vlan_tpid_set(struct rte_eth_dev *dev,
+ enum rte_vlan_type vlan_type,
+ uint16_t tpid)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ uint32_t reg_val;
+
+ /* only outer TPID of double VLAN can be configured*/
+ if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
+ reg_val = IGC_READ_REG(hw, IGC_VET);
+ reg_val = (reg_val & (~IGC_VET_EXT)) |
+ ((uint32_t)tpid << IGC_VET_EXT_SHIFT);
+ IGC_WRITE_REG(hw, IGC_VET, reg_val);
+
+ return 0;
+ }
+
+ /* all other TPID values are read-only*/
+ PMD_DRV_LOG(ERR, "Not supported");
+ return -ENOTSUP;
+}
+
static int
eth_igc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)