*/
#define IXGBE_FC_LO 0x40
+/* Default minimum inter-interrupt interval for EITR configuration */
+#define IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT 0x79E
+
/* Timer value included in XOFF frames. */
#define IXGBE_FC_PAUSE 0x680
| IXGBE_TIMINCA_INCVALUE)
static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev);
+static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev);
static int ixgbe_dev_configure(struct rte_eth_dev *dev);
static int ixgbe_dev_start(struct rte_eth_dev *dev);
static void ixgbe_dev_stop(struct rte_eth_dev *dev);
int wait_to_complete);
static void ixgbe_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
+static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstats *xstats, unsigned n);
static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
+static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev);
static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
uint16_t queue_id,
uint8_t stat_idx,
uint16_t reta_size);
static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
+static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle,
/* For Virtual Function support */
static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
+static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev);
+static int ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev);
+static int ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev);
static int ixgbevf_dev_configure(struct rte_eth_dev *dev);
static int ixgbevf_dev_start(struct rte_eth_dev *dev);
static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
static void ixgbevf_dev_close(struct rte_eth_dev *dev);
static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
+static void ixgbevf_intr_enable(struct ixgbe_hw *hw);
static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
uint16_t queue, int on);
static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
+static void ixgbevf_dev_interrupt_handler(struct rte_intr_handle *handle,
+ void *param);
+static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
+ uint16_t queue_id);
+static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
+ uint16_t queue_id);
+static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
+ uint8_t queue, uint8_t msix_vector);
+static void ixgbevf_configure_msix(struct rte_eth_dev *dev);
/* For Eth VMDQ APIs support */
static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
uint8_t rule_id, uint8_t on);
static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
uint8_t rule_id);
+static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
+ uint16_t queue_id);
+static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
+ uint16_t queue_id);
+static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
+ uint8_t queue, uint8_t msix_vector);
+static void ixgbe_configure_msix(struct rte_eth_dev *dev);
static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
uint16_t queue_idx, uint16_t tx_rate);
static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
struct ether_addr *mc_addr_set,
uint32_t nb_mc_addr);
+static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
+ struct rte_eth_dcb_info *dcb_info);
static int ixgbe_get_reg_length(struct rte_eth_dev *dev);
static int ixgbe_get_regs(struct rte_eth_dev *dev,
/*
* Define VF Stats MACRO for Non "cleared on read" register
*/
-#define UPDATE_VF_STAT(reg, last, cur) \
+#define UPDATE_VF_STAT(reg, last, cur) \
{ \
- u32 latest = IXGBE_READ_REG(hw, reg); \
- cur += latest - last; \
+ uint32_t latest = IXGBE_READ_REG(hw, reg); \
+ cur += (latest - last) & UINT_MAX; \
last = latest; \
}
.allmulticast_disable = ixgbe_dev_allmulticast_disable,
.link_update = ixgbe_dev_link_update,
.stats_get = ixgbe_dev_stats_get,
+ .xstats_get = ixgbe_dev_xstats_get,
.stats_reset = ixgbe_dev_stats_reset,
+ .xstats_reset = ixgbe_dev_xstats_reset,
.queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
.dev_infos_get = ixgbe_dev_info_get,
.mtu_set = ixgbe_dev_mtu_set,
.tx_queue_start = ixgbe_dev_tx_queue_start,
.tx_queue_stop = ixgbe_dev_tx_queue_stop,
.rx_queue_setup = ixgbe_dev_rx_queue_setup,
+ .rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable,
+ .rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable,
.rx_queue_release = ixgbe_dev_rx_queue_release,
.rx_queue_count = ixgbe_dev_rx_queue_count,
.rx_descriptor_done = ixgbe_dev_rx_descriptor_done,
.get_eeprom_length = ixgbe_get_eeprom_length,
.get_eeprom = ixgbe_get_eeprom,
.set_eeprom = ixgbe_set_eeprom,
+ .get_dcb_info = ixgbe_dev_get_dcb_info,
};
/*
.vlan_offload_set = ixgbevf_vlan_offload_set,
.rx_queue_setup = ixgbe_dev_rx_queue_setup,
.rx_queue_release = ixgbe_dev_rx_queue_release,
+ .rx_descriptor_done = ixgbe_dev_rx_descriptor_done,
.tx_queue_setup = ixgbe_dev_tx_queue_setup,
.tx_queue_release = ixgbe_dev_tx_queue_release,
+ .rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable,
+ .rx_queue_intr_disable = ixgbevf_dev_rx_queue_intr_disable,
.mac_addr_add = ixgbevf_add_mac_addr,
.mac_addr_remove = ixgbevf_remove_mac_addr,
.set_mc_addr_list = ixgbe_dev_set_mc_addr_list,
.mac_addr_set = ixgbevf_set_default_mac_addr,
.get_reg_length = ixgbevf_get_reg_length,
.get_reg = ixgbevf_get_regs,
+ .reta_update = ixgbe_dev_rss_reta_update,
+ .reta_query = ixgbe_dev_rss_reta_query,
+ .rss_hash_update = ixgbe_dev_rss_hash_update,
+ .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get,
};
+/* store statistics names and its offset in stats structure */
+struct rte_ixgbe_xstats_name_off {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ unsigned offset;
+};
+
+static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = {
+ {"rx_illegal_byte_err", offsetof(struct ixgbe_hw_stats, errbc)},
+ {"rx_len_err", offsetof(struct ixgbe_hw_stats, rlec)},
+ {"rx_undersize_count", offsetof(struct ixgbe_hw_stats, ruc)},
+ {"rx_oversize_count", offsetof(struct ixgbe_hw_stats, roc)},
+ {"rx_fragment_count", offsetof(struct ixgbe_hw_stats, rfc)},
+ {"rx_jabber_count", offsetof(struct ixgbe_hw_stats, rjc)},
+ {"l3_l4_xsum_error", offsetof(struct ixgbe_hw_stats, xec)},
+ {"mac_local_fault", offsetof(struct ixgbe_hw_stats, mlfc)},
+ {"mac_remote_fault", offsetof(struct ixgbe_hw_stats, mrfc)},
+ {"mac_short_pkt_discard", offsetof(struct ixgbe_hw_stats, mspdc)},
+ {"fccrc_error", offsetof(struct ixgbe_hw_stats, fccrc)},
+ {"fcoe_drop", offsetof(struct ixgbe_hw_stats, fcoerpdc)},
+ {"fc_last_error", offsetof(struct ixgbe_hw_stats, fclast)},
+ {"rx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bprc)},
+ {"rx_phy_multicast_packets", offsetof(struct ixgbe_hw_stats, mprc)},
+ {"mgmt_pkts_dropped", offsetof(struct ixgbe_hw_stats, mngpdc)},
+ {"rx_crc_errors", offsetof(struct ixgbe_hw_stats, crcerrs)},
+ {"fdir_match", offsetof(struct ixgbe_hw_stats, fdirmatch)},
+ {"fdir_miss", offsetof(struct ixgbe_hw_stats, fdirmiss)},
+ {"tx_flow_control_xon", offsetof(struct ixgbe_hw_stats, lxontxc)},
+ {"rx_flow_control_xon", offsetof(struct ixgbe_hw_stats, lxonrxc)},
+ {"tx_flow_control_xoff", offsetof(struct ixgbe_hw_stats, lxofftxc)},
+ {"rx_flow_control_xoff", offsetof(struct ixgbe_hw_stats, lxoffrxc)},
+};
+
+#define IXGBE_NB_XSTATS (sizeof(rte_ixgbe_stats_strings) / \
+ sizeof(rte_ixgbe_stats_strings[0]))
+
/**
* Atomically reads the link status information from global
* structure rte_eth_dev.
(hw->mac.type != ixgbe_mac_X550EM_x))
return -ENOSYS;
- PMD_INIT_LOG(INFO, "Setting port %d, %s queue_id %d to stat index %d",
+ PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
(int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
queue_id, stat_idx);
else
stat_mappings->rqsmr[n] |= qsmr_mask;
- PMD_INIT_LOG(INFO, "Set port %d, %s queue_id %d to stat index %d",
+ PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
(int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
queue_id, stat_idx);
- PMD_INIT_LOG(INFO, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
+ PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
/* Now write the mapping in the appropriate register */
if (is_rx) {
- PMD_INIT_LOG(INFO, "Write 0x%x to RX IXGBE stat mapping reg:%d",
+ PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d",
stat_mappings->rqsmr[n], n);
IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
}
else {
- PMD_INIT_LOG(INFO, "Write 0x%x to TX IXGBE stat mapping reg:%d",
+ PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d",
stat_mappings->tqsm[n], n);
IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
}
ixgbe_set_tx_function(eth_dev, txq);
} else {
/* Use default TX function if we get here */
- PMD_INIT_LOG(INFO, "No TX queues configured yet. "
- "Using default TX function.");
+ PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
+ "Using default TX function.");
}
ixgbe_set_rx_function(eth_dev);
eth_dev->data->port_id, pci_dev->id.vendor_id,
pci_dev->id.device_id);
- rte_intr_callback_register(&(pci_dev->intr_handle),
- ixgbe_dev_interrupt_handler, (void *)eth_dev);
-
- /* enable uio intr after callback register */
- rte_intr_enable(&(pci_dev->intr_handle));
-
/* enable support intr */
ixgbe_enable_intr(eth_dev);
return 0;
}
+static int
+eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev;
+ struct ixgbe_hw *hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -EPERM;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ pci_dev = eth_dev->pci_dev;
+
+ if (hw->adapter_stopped == 0)
+ ixgbe_dev_close(eth_dev);
+
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+
+ /* Unlock any pending hardware semaphore */
+ ixgbe_swfw_lock_reset(hw);
+
+ /* disable uio intr before callback unregister */
+ rte_intr_disable(&(pci_dev->intr_handle));
+ rte_intr_callback_unregister(&(pci_dev->intr_handle),
+ ixgbe_dev_interrupt_handler, (void *)eth_dev);
+
+ /* uninitialize PF if max_vfs not zero */
+ ixgbe_pf_host_uninit(eth_dev);
+
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+
+ rte_free(eth_dev->data->hash_mac_addrs);
+ eth_dev->data->hash_mac_addrs = NULL;
+
+ return 0;
+}
/*
* Negotiate mailbox API version with the PF.
return 0;
}
+/* Virtual Function device uninit */
+
+static int
+eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct ixgbe_hw *hw;
+ unsigned i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -EPERM;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+
+ if (hw->adapter_stopped == 0)
+ ixgbevf_dev_close(eth_dev);
+
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+
+ /* Disable the interrupts for VF */
+ ixgbevf_intr_disable(hw);
+
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ ixgbe_dev_rx_queue_release(eth_dev->data->rx_queues[i]);
+ eth_dev->data->rx_queues[i] = NULL;
+ }
+ eth_dev->data->nb_rx_queues = 0;
+
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ ixgbe_dev_tx_queue_release(eth_dev->data->tx_queues[i]);
+ eth_dev->data->tx_queues[i] = NULL;
+ }
+ eth_dev->data->nb_tx_queues = 0;
+
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+
+ return 0;
+}
+
static struct eth_driver rte_ixgbe_pmd = {
.pci_drv = {
.name = "rte_ixgbe_pmd",
.id_table = pci_id_ixgbe_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_DETACHABLE,
},
.eth_dev_init = eth_ixgbe_dev_init,
+ .eth_dev_uninit = eth_ixgbe_dev_uninit,
.dev_private_size = sizeof(struct ixgbe_adapter),
};
.pci_drv = {
.name = "rte_ixgbevf_pmd",
.id_table = pci_id_ixgbevf_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
},
.eth_dev_init = eth_ixgbevf_dev_init,
+ .eth_dev_uninit = eth_ixgbevf_dev_uninit,
.dev_private_size = sizeof(struct ixgbe_adapter),
};
if (hw->mac.type == ixgbe_mac_82598EB) {
/* No queue level support */
- PMD_INIT_LOG(INFO, "82598EB not support queue level hw strip");
+ PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
return;
}
else {
if (hw->mac.type == ixgbe_mac_82598EB) {
/* No queue level supported */
- PMD_INIT_LOG(INFO, "82598EB not support queue level hw strip");
+ PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
return;
}
else {
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
}
+static int
+ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
+{
+ switch (nb_rx_q) {
+ case 1:
+ case 2:
+ RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
+ break;
+ case 4:
+ RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q;
+ RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = dev->pci_dev->max_vfs * nb_rx_q;
+
+ return 0;
+}
+
+static int
+ixgbe_check_mq_mode(struct rte_eth_dev *dev)
+{
+ struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+ uint16_t nb_rx_q = dev->data->nb_rx_queues;
+ uint16_t nb_tx_q = dev->data->nb_rx_queues;
+
+ if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
+ /* check multi-queue mode */
+ switch (dev_conf->rxmode.mq_mode) {
+ case ETH_MQ_RX_VMDQ_DCB:
+ case ETH_MQ_RX_VMDQ_DCB_RSS:
+ /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
+ PMD_INIT_LOG(ERR, "SRIOV active,"
+ " unsupported mq_mode rx %d.",
+ dev_conf->rxmode.mq_mode);
+ return -EINVAL;
+ case ETH_MQ_RX_RSS:
+ case ETH_MQ_RX_VMDQ_RSS:
+ dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
+ if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
+ if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
+ PMD_INIT_LOG(ERR, "SRIOV is active,"
+ " invalid queue number"
+ " for VMDQ RSS, allowed"
+ " value are 1, 2 or 4.");
+ return -EINVAL;
+ }
+ break;
+ case ETH_MQ_RX_VMDQ_ONLY:
+ case ETH_MQ_RX_NONE:
+ /* if nothing mq mode configure, use default scheme */
+ dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
+ if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
+ RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
+ break;
+ default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
+ /* SRIOV only works in VMDq enable mode */
+ PMD_INIT_LOG(ERR, "SRIOV is active,"
+ " wrong mq_mode rx %d.",
+ dev_conf->rxmode.mq_mode);
+ return -EINVAL;
+ }
+
+ switch (dev_conf->txmode.mq_mode) {
+ case ETH_MQ_TX_VMDQ_DCB:
+ /* DCB VMDQ in SRIOV mode, not implement yet */
+ PMD_INIT_LOG(ERR, "SRIOV is active,"
+ " unsupported VMDQ mq_mode tx %d.",
+ dev_conf->txmode.mq_mode);
+ return -EINVAL;
+ default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
+ dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
+ break;
+ }
+
+ /* check valid queue number */
+ if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
+ (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
+ PMD_INIT_LOG(ERR, "SRIOV is active,"
+ " queue number must less equal to %d.",
+ RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
+ return -EINVAL;
+ }
+ } else {
+ /* check configuration for vmdb+dcb mode */
+ if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
+ const struct rte_eth_vmdq_dcb_conf *conf;
+
+ if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
+ PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
+ IXGBE_VMDQ_DCB_NB_QUEUES);
+ return -EINVAL;
+ }
+ conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
+ if (!(conf->nb_queue_pools == ETH_16_POOLS ||
+ conf->nb_queue_pools == ETH_32_POOLS)) {
+ PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
+ " nb_queue_pools must be %d or %d.",
+ ETH_16_POOLS, ETH_32_POOLS);
+ return -EINVAL;
+ }
+ }
+ if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+ const struct rte_eth_vmdq_dcb_tx_conf *conf;
+
+ if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
+ PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
+ IXGBE_VMDQ_DCB_NB_QUEUES);
+ return -EINVAL;
+ }
+ conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
+ if (!(conf->nb_queue_pools == ETH_16_POOLS ||
+ conf->nb_queue_pools == ETH_32_POOLS)) {
+ PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
+ " nb_queue_pools != %d and"
+ " nb_queue_pools != %d.",
+ ETH_16_POOLS, ETH_32_POOLS);
+ return -EINVAL;
+ }
+ }
+
+ /* For DCB mode check our configuration before we go further */
+ if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
+ const struct rte_eth_dcb_rx_conf *conf;
+
+ if (nb_rx_q != IXGBE_DCB_NB_QUEUES) {
+ PMD_INIT_LOG(ERR, "DCB selected, nb_rx_q != %d.",
+ IXGBE_DCB_NB_QUEUES);
+ return -EINVAL;
+ }
+ conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
+ if (!(conf->nb_tcs == ETH_4_TCS ||
+ conf->nb_tcs == ETH_8_TCS)) {
+ PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
+ " and nb_tcs != %d.",
+ ETH_4_TCS, ETH_8_TCS);
+ return -EINVAL;
+ }
+ }
+
+ if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+ const struct rte_eth_dcb_tx_conf *conf;
+
+ if (nb_tx_q != IXGBE_DCB_NB_QUEUES) {
+ PMD_INIT_LOG(ERR, "DCB, nb_tx_q != %d.",
+ IXGBE_DCB_NB_QUEUES);
+ return -EINVAL;
+ }
+ conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
+ if (!(conf->nb_tcs == ETH_4_TCS ||
+ conf->nb_tcs == ETH_8_TCS)) {
+ PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
+ " and nb_tcs != %d.",
+ ETH_4_TCS, ETH_8_TCS);
+ return -EINVAL;
+ }
+ }
+ }
+ return 0;
+}
+
static int
ixgbe_dev_configure(struct rte_eth_dev *dev)
{
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
struct ixgbe_adapter *adapter =
(struct ixgbe_adapter *)dev->data->dev_private;
+ int ret;
PMD_INIT_FUNC_TRACE();
+ /* multipe queue mode checking */
+ ret = ixgbe_check_mq_mode(dev);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.",
+ ret);
+ return ret;
+ }
/* set flag to update link status after init */
intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_vf_info *vfinfo =
*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ uint32_t intr_vector = 0;
int err, link_up = 0, negotiate = 0;
uint32_t speed = 0;
int mask = 0;
}
/* stop adapter */
- hw->adapter_stopped = FALSE;
+ hw->adapter_stopped = 0;
ixgbe_stop_adapter(hw);
/* reinitialize adapter
/* configure PF module if SRIOV enabled */
ixgbe_pf_host_configure(dev);
+ /* check and configure queue intr-vector mapping */
+ if (dev->data->dev_conf.intr_conf.rxq != 0)
+ intr_vector = dev->data->nb_rx_queues;
+
+ if (rte_intr_efd_enable(intr_handle, intr_vector))
+ return -1;
+
+ if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+ intr_handle->intr_vec =
+ rte_zmalloc("intr_vec",
+ dev->data->nb_rx_queues * sizeof(int),
+ 0);
+ if (intr_handle->intr_vec == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
+ " intr_vec\n", dev->data->nb_rx_queues);
+ return -ENOMEM;
+ }
+ }
+
+ /* confiugre msix for sleep until rx interrupt */
+ ixgbe_configure_msix(dev);
+
/* initialize transmission unit */
ixgbe_dev_tx_init(dev);
skip_link_setup:
/* check if lsc interrupt is enabled */
- if (dev->data->dev_conf.intr_conf.lsc != 0)
- ixgbe_dev_lsc_interrupt_setup(dev);
+ if (dev->data->dev_conf.intr_conf.lsc != 0) {
+ if (rte_intr_allow_others(intr_handle)) {
+ rte_intr_callback_register(intr_handle,
+ ixgbe_dev_interrupt_handler,
+ (void *)dev);
+ ixgbe_dev_lsc_interrupt_setup(dev);
+ } else
+ PMD_INIT_LOG(INFO, "lsc won't enable because of"
+ " no intr multiplex\n");
+ }
+
+ /* check if rxq interrupt is enabled */
+ if (dev->data->dev_conf.intr_conf.rxq != 0)
+ ixgbe_dev_rxq_interrupt_setup(dev);
+
+ /* enable uio/vfio intr/eventfd mapping */
+ rte_intr_enable(intr_handle);
/* resume enabled intr since hw reset */
ixgbe_enable_intr(dev);
struct ixgbe_filter_info *filter_info =
IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
struct ixgbe_5tuple_filter *p_5tuple, *p_5tuple_next;
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
int vf;
PMD_INIT_FUNC_TRACE();
/* disable interrupts */
ixgbe_disable_intr(hw);
+ /* disable intr eventfd mapping */
+ rte_intr_disable(intr_handle);
+
/* reset the NIC */
ixgbe_pf_reset_hw(hw);
- hw->adapter_stopped = FALSE;
+ hw->adapter_stopped = 0;
/* stop adapter */
ixgbe_stop_adapter(hw);
memset(filter_info->fivetuple_mask, 0,
sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
+ /* Clean datapath event and queue/vec mapping */
+ rte_intr_efd_disable(intr_handle);
+ if (intr_handle->intr_vec != NULL) {
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
}
/*
ixgbe_dev_stop(dev);
hw->adapter_stopped = 1;
+ ixgbe_dev_free_queues(dev);
+
ixgbe_disable_pcie_master(hw);
/* reprogram the RAR[0] in case user changed it. */
ixgbe_read_stats_registers(struct ixgbe_hw *hw, struct ixgbe_hw_stats
*hw_stats, uint64_t *total_missed_rx,
uint64_t *total_qbrc, uint64_t *total_qprc,
- uint64_t *rxnfgpc, uint64_t *txdgpc,
uint64_t *total_qprdc)
{
uint32_t bprc, lxon, lxoff, total;
hw_stats->mpc[i] += mp;
/* Running comprehensive total for stats display */
*total_missed_rx += hw_stats->mpc[i];
- if (hw->mac.type == ixgbe_mac_82598EB)
+ if (hw->mac.type == ixgbe_mac_82598EB) {
hw_stats->rnbc[i] +=
IXGBE_READ_REG(hw, IXGBE_RNBC(i));
+ hw_stats->pxonrxc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
+ hw_stats->pxoffrxc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
+ } else {
+ hw_stats->pxonrxc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
+ hw_stats->pxoffrxc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
+ hw_stats->pxon2offc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
+ }
hw_stats->pxontxc[i] +=
IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
- hw_stats->pxonrxc[i] +=
- IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
hw_stats->pxofftxc[i] +=
IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
- hw_stats->pxoffrxc[i] +=
- IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
- hw_stats->pxon2offc[i] +=
- IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
}
for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
hw_stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
/* Note that gprc counts missed packets */
hw_stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
- *rxnfgpc += IXGBE_READ_REG(hw, IXGBE_RXNFGPC);
- *txdgpc += IXGBE_READ_REG(hw, IXGBE_TXDGPC);
if (hw->mac.type != ixgbe_mac_82598EB) {
hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
struct ixgbe_hw_stats *hw_stats =
IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
- uint64_t rxnfgpc, txdgpc;
unsigned i;
total_missed_rx = 0;
total_qbrc = 0;
total_qprc = 0;
total_qprdc = 0;
- rxnfgpc = 0;
- txdgpc = 0;
ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc,
- &total_qprc, &rxnfgpc, &txdgpc, &total_qprdc);
+ &total_qprc, &total_qprdc);
if (stats == NULL)
return;
stats->ibytes = total_qbrc;
stats->opackets = hw_stats->gptc;
stats->obytes = hw_stats->gotc;
- stats->imcasts = hw_stats->mprc;
for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
stats->q_ipackets[i] = hw_stats->qprc[i];
}
/* Rx Errors */
- stats->ibadcrc = hw_stats->crcerrs;
- stats->ibadlen = hw_stats->rlec + hw_stats->ruc + hw_stats->roc;
- stats->imissed = total_missed_rx;
- stats->ierrors = stats->ibadcrc +
- stats->ibadlen +
- stats->imissed +
- hw_stats->illerrc + hw_stats->errbc;
+ stats->ierrors = hw_stats->crcerrs +
+ hw_stats->mspdc +
+ hw_stats->rlec +
+ hw_stats->ruc +
+ hw_stats->roc +
+ total_missed_rx +
+ hw_stats->illerrc +
+ hw_stats->errbc +
+ hw_stats->xec +
+ hw_stats->mlfc +
+ hw_stats->mrfc +
+ hw_stats->rfc +
+ hw_stats->fccrc +
+ hw_stats->fclast;
/* Tx Errors */
- /*txdgpc: packets that are DMA'ed*/
- /*gptc: packets that are sent*/
- stats->oerrors = txdgpc - hw_stats->gptc;
-
- /* XON/XOFF pause frames */
- stats->tx_pause_xon = hw_stats->lxontxc;
- stats->rx_pause_xon = hw_stats->lxonrxc;
- stats->tx_pause_xoff = hw_stats->lxofftxc;
- stats->rx_pause_xoff = hw_stats->lxoffrxc;
-
- /* Flow Director Stats registers */
- stats->fdirmatch = hw_stats->fdirmatch;
- stats->fdirmiss = hw_stats->fdirmiss;
+ stats->oerrors = 0;
}
static void
memset(stats, 0, sizeof(*stats));
}
+static int
+ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
+ unsigned n)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_hw_stats *hw_stats =
+ IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+ uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
+ unsigned i, count = IXGBE_NB_XSTATS;
+
+ if (n < count)
+ return count;
+
+ total_missed_rx = 0;
+ total_qbrc = 0;
+ total_qprc = 0;
+ total_qprdc = 0;
+
+ ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc,
+ &total_qprc, &total_qprdc);
+
+ /* If this is a reset xstats is NULL, and we have cleared the
+ * registers by reading them.
+ */
+ if (!xstats)
+ return 0;
+
+ /* Extended stats */
+ for (i = 0; i < IXGBE_NB_XSTATS; i++) {
+ snprintf(xstats[i].name, sizeof(xstats[i].name),
+ "%s", rte_ixgbe_stats_strings[i].name);
+ xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
+ rte_ixgbe_stats_strings[i].offset);
+ }
+
+ return count;
+}
+
+static void
+ixgbe_dev_xstats_reset(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw_stats *stats =
+ IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+ /* HW registers are cleared on read */
+ ixgbe_dev_xstats_get(dev, NULL, IXGBE_NB_XSTATS);
+
+ /* Reset software totals */
+ memset(stats, 0, sizeof(*stats));
+}
+
static void
ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
stats->opackets = hw_stats->vfgptc;
stats->obytes = hw_stats->vfgotc;
stats->imcasts = hw_stats->vfmprc;
+ /* stats->imcasts should be removed as imcasts is deprecated */
}
static void
ETH_TXQ_FLAGS_NOOFFLOADS,
};
dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
- dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+ dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type);
dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
}
DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM;
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
return 0;
}
+/**
+ * It clears the interrupt causes and enables the interrupt.
+ * It will be called once only during nic initialized.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
+{
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ intr->mask |= IXGBE_EICR_RTX_QUEUE;
+
+ return 0;
+}
+
/*
* It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
*
/* read-on-clear nic registers here */
eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
- PMD_DRV_LOG(INFO, "eicr %x", eicr);
+ PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
intr->flags = 0;
- if (eicr & IXGBE_EICR_LSC) {
- /* set flag for async link update */
+
+ /* set flag for async link update */
+ if (eicr & IXGBE_EICR_LSC)
intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
- }
if (eicr & IXGBE_EICR_MAILBOX)
intr->flags |= IXGBE_FLAG_MAILBOX;
return 0;
}
+static int
+ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev)
+{
+ uint32_t eicr;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ /* clear all cause mask */
+ ixgbevf_intr_disable(hw);
+
+ /* read-on-clear nic registers here */
+ eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR);
+ PMD_DRV_LOG(INFO, "eicr %x", eicr);
+
+ intr->flags = 0;
+
+ /* set flag for async link update */
+ if (eicr & IXGBE_EICR_LSC)
+ intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
+
+ return 0;
+}
+
/**
* It gets and then prints the link status.
*
PMD_INIT_LOG(INFO, " Port %d: Link Down",
(int)(dev->data->port_id));
}
- PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
+ PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d",
dev->pci_dev->addr.domain,
dev->pci_dev->addr.bus,
dev->pci_dev->addr.devid,
return 0;
}
+static int
+ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_DRV_LOG(DEBUG, "enable intr immediately");
+ ixgbevf_intr_enable(hw);
+ rte_intr_enable(&dev->pci_dev->intr_handle);
+ return 0;
+}
+
/**
* Interrupt handler which shall be registered for alarm callback for delayed
* handling specific interrupt to wait for the stable nic state. As the
*/
static void
ixgbe_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
- void *param)
+ void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+
ixgbe_dev_interrupt_get_status(dev);
ixgbe_dev_interrupt_action(dev);
}
+static void
+ixgbevf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
+ void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+
+ ixgbevf_dev_interrupt_get_status(dev);
+ ixgbevf_dev_interrupt_action(dev);
+}
+
static int
ixgbe_dev_led_on(struct rte_eth_dev *dev)
{
uint32_t reta, r;
uint16_t idx, shift;
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint16_t sp_reta_size;
+ uint32_t reta_reg;
PMD_INIT_FUNC_TRACE();
- if (reta_size != ETH_RSS_RETA_SIZE_128) {
+
+ if (!ixgbe_rss_update_sp(hw->mac.type)) {
+ PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
+ "NIC.");
+ return -ENOTSUP;
+ }
+
+ sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
+ if (reta_size != sp_reta_size) {
PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number hardware can supported "
- "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
+ "(%d)\n", reta_size, sp_reta_size);
return -EINVAL;
}
IXGBE_4_BIT_MASK);
if (!mask)
continue;
+ reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
if (mask == IXGBE_4_BIT_MASK)
r = 0;
else
- r = IXGBE_READ_REG(hw, IXGBE_RETA(i >> 2));
+ r = IXGBE_READ_REG(hw, reta_reg);
for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) {
if (mask & (0x1 << j))
reta |= reta_conf[idx].reta[shift + j] <<
reta |= r & (IXGBE_8_BIT_MASK <<
(CHAR_BIT * j));
}
- IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
+ IXGBE_WRITE_REG(hw, reta_reg, reta);
}
return 0;
uint32_t reta;
uint16_t idx, shift;
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint16_t sp_reta_size;
+ uint32_t reta_reg;
PMD_INIT_FUNC_TRACE();
- if (reta_size != ETH_RSS_RETA_SIZE_128) {
+ sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
+ if (reta_size != sp_reta_size) {
PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number hardware can supported "
- "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
+ "(%d)\n", reta_size, sp_reta_size);
return -EINVAL;
}
- for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IXGBE_4_BIT_WIDTH) {
+ for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
idx = i / RTE_RETA_GROUP_SIZE;
shift = i % RTE_RETA_GROUP_SIZE;
mask = (uint8_t)((reta_conf[idx].mask >> shift) &
if (!mask)
continue;
- reta = IXGBE_READ_REG(hw, IXGBE_RETA(i >> 2));
+ reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
+ reta = IXGBE_READ_REG(hw, reta_reg);
for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) {
if (mask & (0x1 << j))
reta_conf[idx].reta[shift + j] =
IXGBE_WRITE_FLUSH(hw);
}
+static void
+ixgbevf_intr_enable(struct ixgbe_hw *hw)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ /* VF enable interrupt autoclean */
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_VF_IRQ_ENABLE_MASK);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, IXGBE_VF_IRQ_ENABLE_MASK);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK);
+
+ IXGBE_WRITE_FLUSH(hw);
+}
+
static int
ixgbevf_dev_configure(struct rte_eth_dev *dev)
{
*/
#ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
if (!conf->rxmode.hw_strip_crc) {
- PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip");
+ PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
conf->rxmode.hw_strip_crc = 1;
}
#else
if (conf->rxmode.hw_strip_crc) {
- PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip");
+ PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
conf->rxmode.hw_strip_crc = 0;
}
#endif
{
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t intr_vector = 0;
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+
int err, mask = 0;
PMD_INIT_FUNC_TRACE();
ixgbevf_dev_rxtx_start(dev);
+ /* check and configure queue intr-vector mapping */
+ if (dev->data->dev_conf.intr_conf.rxq != 0)
+ intr_vector = dev->data->nb_rx_queues;
+
+ if (rte_intr_efd_enable(intr_handle, intr_vector))
+ return -1;
+
+ if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+ intr_handle->intr_vec =
+ rte_zmalloc("intr_vec",
+ dev->data->nb_rx_queues * sizeof(int), 0);
+ if (intr_handle->intr_vec == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
+ " intr_vec\n", dev->data->nb_rx_queues);
+ return -ENOMEM;
+ }
+ }
+ ixgbevf_configure_msix(dev);
+
+ if (dev->data->dev_conf.intr_conf.lsc != 0) {
+ if (rte_intr_allow_others(intr_handle))
+ rte_intr_callback_register(intr_handle,
+ ixgbevf_dev_interrupt_handler,
+ (void *)dev);
+ else
+ PMD_INIT_LOG(INFO, "lsc won't enable because of"
+ " no intr multiplex\n");
+ }
+
+ rte_intr_enable(intr_handle);
+
+ /* Re-enable interrupt for VF */
+ ixgbevf_intr_enable(hw);
+
return 0;
}
ixgbevf_dev_stop(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
PMD_INIT_FUNC_TRACE();
- hw->adapter_stopped = TRUE;
+ hw->adapter_stopped = 1;
ixgbe_stop_adapter(hw);
/*
dev->data->scattered_rx = 0;
ixgbe_dev_clear_queues(dev);
+
+ /* disable intr eventfd mapping */
+ rte_intr_disable(intr_handle);
+
+ /* Clean datapath event and queue/vec mapping */
+ rte_intr_efd_disable(intr_handle);
+ if (intr_handle->intr_vec != NULL) {
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
}
static void
ixgbevf_dev_close(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev;
PMD_INIT_FUNC_TRACE();
ixgbevf_dev_stop(dev);
+ ixgbe_dev_free_queues(dev);
+
/* reprogram the RAR[0] in case user changed it. */
ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+
+ pci_dev = dev->pci_dev;
+ if (pci_dev->intr_handle.intr_vec) {
+ rte_free(pci_dev->intr_handle.intr_vec);
+ pci_dev->intr_handle.intr_vec = NULL;
+ }
}
static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
return 0;
}
+static int
+ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ uint32_t mask;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
+ mask |= (1 << queue_id);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+
+ rte_intr_enable(&dev->pci_dev->intr_handle);
+
+ return 0;
+}
+
+static int
+ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ uint32_t mask;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
+ mask &= ~(1 << queue_id);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+
+ return 0;
+}
+
+static int
+ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ uint32_t mask;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ if (queue_id < 16) {
+ ixgbe_disable_intr(hw);
+ intr->mask |= (1 << queue_id);
+ ixgbe_enable_intr(dev);
+ } else if (queue_id < 32) {
+ mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
+ mask &= (1 << queue_id);
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
+ } else if (queue_id < 64) {
+ mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
+ mask &= (1 << (queue_id - 32));
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
+ }
+ rte_intr_enable(&dev->pci_dev->intr_handle);
+
+ return 0;
+}
+
+static int
+ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ uint32_t mask;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ if (queue_id < 16) {
+ ixgbe_disable_intr(hw);
+ intr->mask &= ~(1 << queue_id);
+ ixgbe_enable_intr(dev);
+ } else if (queue_id < 32) {
+ mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
+ mask &= ~(1 << queue_id);
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
+ } else if (queue_id < 64) {
+ mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
+ mask &= ~(1 << (queue_id - 32));
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
+ }
+
+ return 0;
+}
+
+static void
+ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
+ uint8_t queue, uint8_t msix_vector)
+{
+ uint32_t tmp, idx;
+
+ if (direction == -1) {
+ /* other causes */
+ msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+ tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
+ tmp &= ~0xFF;
+ tmp |= msix_vector;
+ IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, tmp);
+ } else {
+ /* rx or tx cause */
+ msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+ idx = ((16 * (queue & 1)) + (8 * direction));
+ tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
+ tmp &= ~(0xFF << idx);
+ tmp |= (msix_vector << idx);
+ IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), tmp);
+ }
+}
+
+/**
+ * set the IVAR registers, mapping interrupt causes to vectors
+ * @param hw
+ * pointer to ixgbe_hw struct
+ * @direction
+ * 0 for Rx, 1 for Tx, -1 for other causes
+ * @queue
+ * queue to map the corresponding interrupt to
+ * @msix_vector
+ * the vector to map to the corresponding queue
+ */
+static void
+ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
+ uint8_t queue, uint8_t msix_vector)
+{
+ uint32_t tmp, idx;
+
+ msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ if (direction == -1)
+ direction = 0;
+ idx = (((direction * 64) + queue) >> 2) & 0x1F;
+ tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(idx));
+ tmp &= ~(0xFF << (8 * (queue & 0x3)));
+ tmp |= (msix_vector << (8 * (queue & 0x3)));
+ IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp);
+ } else if ((hw->mac.type == ixgbe_mac_82599EB) ||
+ (hw->mac.type == ixgbe_mac_X540)) {
+ if (direction == -1) {
+ /* other causes */
+ idx = ((queue & 1) * 8);
+ tmp = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
+ tmp &= ~(0xFF << idx);
+ tmp |= (msix_vector << idx);
+ IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, tmp);
+ } else {
+ /* rx or tx causes */
+ idx = ((16 * (queue & 1)) + (8 * direction));
+ tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
+ tmp &= ~(0xFF << idx);
+ tmp |= (msix_vector << idx);
+ IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), tmp);
+ }
+ }
+}
+
+static void
+ixgbevf_configure_msix(struct rte_eth_dev *dev)
+{
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t q_idx;
+ uint32_t vector_idx = 0;
+
+ /* won't configure msix register if no mapping is done
+ * between intr vector and event fd.
+ */
+ if (!rte_intr_dp_is_en(intr_handle))
+ return;
+
+ /* Configure all RX queues of VF */
+ for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {
+ /* Force all queue use vector 0,
+ * as IXGBE_VF_MAXMSIVECOTR = 1
+ */
+ ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
+ intr_handle->intr_vec[q_idx] = vector_idx;
+ }
+
+ /* Configure VF Rx queue ivar */
+ ixgbevf_set_ivar_map(hw, -1, 1, vector_idx);
+}
+
+/**
+ * Sets up the hardware to properly generate MSI-X interrupts
+ * @hw
+ * board private structure
+ */
+static void
+ixgbe_configure_msix(struct rte_eth_dev *dev)
+{
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t queue_id, vec = 0;
+ uint32_t mask;
+ uint32_t gpie;
+
+ /* won't configure msix register if no mapping is done
+ * between intr vector and event fd
+ */
+ if (!rte_intr_dp_is_en(intr_handle))
+ return;
+
+ /* setup GPIE for MSI-x mode */
+ gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+ gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
+ IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME;
+ /* auto clearing and auto setting corresponding bits in EIMS
+ * when MSI-X interrupt is triggered
+ */
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
+ } else {
+ IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
+ IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+
+ /* Populate the IVAR table and set the ITR values to the
+ * corresponding register.
+ */
+ for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
+ queue_id++) {
+ /* by default, 1:1 mapping */
+ ixgbe_set_ivar_map(hw, 0, queue_id, vec);
+ intr_handle->intr_vec[queue_id] = vec;
+ if (vec < intr_handle->nb_efd - 1)
+ vec++;
+ }
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ ixgbe_set_ivar_map(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
+ intr_handle->max_intr - 1);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ ixgbe_set_ivar_map(hw, -1, 1, intr_handle->max_intr - 1);
+ break;
+ default:
+ break;
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_EITR(queue_id),
+ IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT & 0xFFF);
+
+ /* set up to autoclear timer, and the vectors */
+ mask = IXGBE_EIMS_ENABLE_MASK;
+ mask &= ~(IXGBE_EIMS_OTHER |
+ IXGBE_EIMS_MAILBOX |
+ IXGBE_EIMS_LSC);
+
+ IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
+}
+
static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
uint16_t queue_idx, uint16_t tx_rate)
{
first = in_eeprom->offset >> 1;
length = in_eeprom->length >> 1;
- if ((first >= hw->eeprom.word_size) ||
- ((first + length) >= hw->eeprom.word_size))
+ if ((first > hw->eeprom.word_size) ||
+ ((first + length) > hw->eeprom.word_size))
return -EINVAL;
in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
first = in_eeprom->offset >> 1;
length = in_eeprom->length >> 1;
- if ((first >= hw->eeprom.word_size) ||
- ((first + length) >= hw->eeprom.word_size))
+ if ((first > hw->eeprom.word_size) ||
+ ((first + length) > hw->eeprom.word_size))
return -EINVAL;
in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
return eeprom->ops.write_buffer(hw, first, length, data);
}
+uint16_t
+ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) {
+ switch (mac_type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ return ETH_RSS_RETA_SIZE_512;
+ case ixgbe_mac_X550_vf:
+ case ixgbe_mac_X550EM_x_vf:
+ return ETH_RSS_RETA_SIZE_64;
+ default:
+ return ETH_RSS_RETA_SIZE_128;
+ }
+}
+
+uint32_t
+ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) {
+ switch (mac_type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ if (reta_idx < ETH_RSS_RETA_SIZE_128)
+ return IXGBE_RETA(reta_idx >> 2);
+ else
+ return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2);
+ case ixgbe_mac_X550_vf:
+ case ixgbe_mac_X550EM_x_vf:
+ return IXGBE_VFRETA(reta_idx >> 2);
+ default:
+ return IXGBE_RETA(reta_idx >> 2);
+ }
+}
+
+uint32_t
+ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type) {
+ switch (mac_type) {
+ case ixgbe_mac_X550_vf:
+ case ixgbe_mac_X550EM_x_vf:
+ return IXGBE_VFMRQC;
+ default:
+ return IXGBE_MRQC;
+ }
+}
+
+uint32_t
+ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i) {
+ switch (mac_type) {
+ case ixgbe_mac_X550_vf:
+ case ixgbe_mac_X550EM_x_vf:
+ return IXGBE_VFRSSRK(i);
+ default:
+ return IXGBE_RSSRK(i);
+ }
+}
+
+bool
+ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type) {
+ switch (mac_type) {
+ case ixgbe_mac_82599_vf:
+ case ixgbe_mac_X540_vf:
+ return 0;
+ default:
+ return 1;
+ }
+}
+
+static int
+ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
+ struct rte_eth_dcb_info *dcb_info)
+{
+ struct ixgbe_dcb_config *dcb_config =
+ IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
+ struct ixgbe_dcb_tc_config *tc;
+ uint8_t i, j;
+
+ if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
+ dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
+ else
+ dcb_info->nb_tcs = 1;
+
+ if (dcb_config->vt_mode) { /* vt is enabled*/
+ struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
+ &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+ dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
+ for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
+ for (j = 0; j < dcb_info->nb_tcs; j++) {
+ dcb_info->tc_queue.tc_rxq[i][j].base =
+ i * dcb_info->nb_tcs + j;
+ dcb_info->tc_queue.tc_rxq[i][j].nb_queue = 1;
+ dcb_info->tc_queue.tc_txq[i][j].base =
+ i * dcb_info->nb_tcs + j;
+ dcb_info->tc_queue.tc_txq[i][j].nb_queue = 1;
+ }
+ }
+ } else { /* vt is disabled*/
+ struct rte_eth_dcb_rx_conf *rx_conf =
+ &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+ dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
+ if (dcb_info->nb_tcs == ETH_4_TCS) {
+ for (i = 0; i < dcb_info->nb_tcs; i++) {
+ dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
+ dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
+ }
+ dcb_info->tc_queue.tc_txq[0][0].base = 0;
+ dcb_info->tc_queue.tc_txq[0][1].base = 64;
+ dcb_info->tc_queue.tc_txq[0][2].base = 96;
+ dcb_info->tc_queue.tc_txq[0][3].base = 112;
+ dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
+ dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
+ dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
+ dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
+ } else if (dcb_info->nb_tcs == ETH_8_TCS) {
+ for (i = 0; i < dcb_info->nb_tcs; i++) {
+ dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
+ dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
+ }
+ dcb_info->tc_queue.tc_txq[0][0].base = 0;
+ dcb_info->tc_queue.tc_txq[0][1].base = 32;
+ dcb_info->tc_queue.tc_txq[0][2].base = 64;
+ dcb_info->tc_queue.tc_txq[0][3].base = 80;
+ dcb_info->tc_queue.tc_txq[0][4].base = 96;
+ dcb_info->tc_queue.tc_txq[0][5].base = 104;
+ dcb_info->tc_queue.tc_txq[0][6].base = 112;
+ dcb_info->tc_queue.tc_txq[0][7].base = 120;
+ dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
+ dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
+ dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
+ dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
+ dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
+ dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
+ dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
+ dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
+ }
+ }
+ for (i = 0; i < dcb_info->nb_tcs; i++) {
+ tc = &dcb_config->tc_config[i];
+ dcb_info->tc_bws[i] = tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent;
+ }
+ return 0;
+}
+
static struct rte_driver rte_ixgbe_driver = {
.type = PMD_PDEV,
.init = rte_ixgbe_pmd_init,