struct rte_eth_stats *stats);
static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev,
struct rte_eth_xstats *xstats, unsigned n);
+static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstats *xstats, unsigned n);
static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev);
static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
/* For Virtual Function support */
static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev);
-static int ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev);
-static int ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev);
static int ixgbevf_dev_configure(struct rte_eth_dev *dev);
static int ixgbevf_dev_start(struct rte_eth_dev *dev);
static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
uint16_t queue, int on);
static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
-static void ixgbevf_dev_interrupt_handler(struct rte_intr_handle *handle,
- void *param);
static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
uint16_t queue_id);
static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
struct ether_addr *mc_addr_set,
uint32_t nb_mc_addr);
+static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
+ struct rte_eth_dcb_info *dcb_info);
static int ixgbe_get_reg_length(struct rte_eth_dev *dev);
static int ixgbe_get_regs(struct rte_eth_dev *dev,
};
+static const struct rte_eth_desc_lim rx_desc_lim = {
+ .nb_max = IXGBE_MAX_RING_DESC,
+ .nb_min = IXGBE_MIN_RING_DESC,
+ .nb_align = IXGBE_RXD_ALIGN,
+};
+
+static const struct rte_eth_desc_lim tx_desc_lim = {
+ .nb_max = IXGBE_MAX_RING_DESC,
+ .nb_min = IXGBE_MIN_RING_DESC,
+ .nb_align = IXGBE_TXD_ALIGN,
+};
+
static const struct eth_dev_ops ixgbe_eth_dev_ops = {
.dev_configure = ixgbe_dev_configure,
.dev_start = ixgbe_dev_start,
.rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get,
.filter_ctrl = ixgbe_dev_filter_ctrl,
.set_mc_addr_list = ixgbe_dev_set_mc_addr_list,
+ .rxq_info_get = ixgbe_rxq_info_get,
+ .txq_info_get = ixgbe_txq_info_get,
.timesync_enable = ixgbe_timesync_enable,
.timesync_disable = ixgbe_timesync_disable,
.timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp,
.get_eeprom_length = ixgbe_get_eeprom_length,
.get_eeprom = ixgbe_get_eeprom,
.set_eeprom = ixgbe_set_eeprom,
+ .get_dcb_info = ixgbe_dev_get_dcb_info,
};
/*
.dev_stop = ixgbevf_dev_stop,
.link_update = ixgbe_dev_link_update,
.stats_get = ixgbevf_dev_stats_get,
+ .xstats_get = ixgbevf_dev_xstats_get,
.stats_reset = ixgbevf_dev_stats_reset,
+ .xstats_reset = ixgbevf_dev_stats_reset,
.dev_close = ixgbevf_dev_close,
.dev_infos_get = ixgbevf_dev_info_get,
.mtu_set = ixgbevf_dev_set_mtu,
.mac_addr_add = ixgbevf_add_mac_addr,
.mac_addr_remove = ixgbevf_remove_mac_addr,
.set_mc_addr_list = ixgbe_dev_set_mc_addr_list,
+ .rxq_info_get = ixgbe_rxq_info_get,
+ .txq_info_get = ixgbe_txq_info_get,
.mac_addr_set = ixgbevf_set_default_mac_addr,
.get_reg_length = ixgbevf_get_reg_length,
.get_reg = ixgbevf_get_regs,
+ .reta_update = ixgbe_dev_rss_reta_update,
+ .reta_query = ixgbe_dev_rss_reta_query,
+ .rss_hash_update = ixgbe_dev_rss_hash_update,
+ .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get,
};
/* store statistics names and its offset in stats structure */
};
static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = {
- {"rx_illegal_byte_err", offsetof(struct ixgbe_hw_stats, errbc)},
- {"rx_len_err", offsetof(struct ixgbe_hw_stats, rlec)},
- {"rx_undersize_count", offsetof(struct ixgbe_hw_stats, ruc)},
- {"rx_oversize_count", offsetof(struct ixgbe_hw_stats, roc)},
- {"rx_fragment_count", offsetof(struct ixgbe_hw_stats, rfc)},
- {"rx_jabber_count", offsetof(struct ixgbe_hw_stats, rjc)},
- {"l3_l4_xsum_error", offsetof(struct ixgbe_hw_stats, xec)},
- {"mac_local_fault", offsetof(struct ixgbe_hw_stats, mlfc)},
- {"mac_remote_fault", offsetof(struct ixgbe_hw_stats, mrfc)},
- {"mac_short_pkt_discard", offsetof(struct ixgbe_hw_stats, mspdc)},
- {"fccrc_error", offsetof(struct ixgbe_hw_stats, fccrc)},
- {"fcoe_drop", offsetof(struct ixgbe_hw_stats, fcoerpdc)},
- {"fc_last_error", offsetof(struct ixgbe_hw_stats, fclast)},
- {"rx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bprc)},
- {"rx_phy_multicast_packets", offsetof(struct ixgbe_hw_stats, mprc)},
- {"mgmt_pkts_dropped", offsetof(struct ixgbe_hw_stats, mngpdc)},
{"rx_crc_errors", offsetof(struct ixgbe_hw_stats, crcerrs)},
- {"fdir_match", offsetof(struct ixgbe_hw_stats, fdirmatch)},
- {"fdir_miss", offsetof(struct ixgbe_hw_stats, fdirmiss)},
- {"tx_flow_control_xon", offsetof(struct ixgbe_hw_stats, lxontxc)},
- {"rx_flow_control_xon", offsetof(struct ixgbe_hw_stats, lxonrxc)},
- {"tx_flow_control_xoff", offsetof(struct ixgbe_hw_stats, lxofftxc)},
- {"rx_flow_control_xoff", offsetof(struct ixgbe_hw_stats, lxoffrxc)},
+ {"rx_illegal_byte_errors", offsetof(struct ixgbe_hw_stats, illerrc)},
+ {"rx_error_bytes", offsetof(struct ixgbe_hw_stats, errbc)},
+ {"mac_local_errors", offsetof(struct ixgbe_hw_stats, mlfc)},
+ {"mac_remote_errors", offsetof(struct ixgbe_hw_stats, mrfc)},
+ {"rx_length_errors", offsetof(struct ixgbe_hw_stats, rlec)},
+ {"tx_xon_packets", offsetof(struct ixgbe_hw_stats, lxontxc)},
+ {"rx_xon_packets", offsetof(struct ixgbe_hw_stats, lxonrxc)},
+ {"tx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxofftxc)},
+ {"rx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxoffrxc)},
+ {"rx_size_64_packets", offsetof(struct ixgbe_hw_stats, prc64)},
+ {"rx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, prc127)},
+ {"rx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, prc255)},
+ {"rx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, prc511)},
+ {"rx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats,
+ prc1023)},
+ {"rx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats,
+ prc1522)},
+ {"rx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bprc)},
+ {"rx_multicast_packets", offsetof(struct ixgbe_hw_stats, mprc)},
+ {"rx_fragment_errors", offsetof(struct ixgbe_hw_stats, rfc)},
+ {"rx_undersize_errors", offsetof(struct ixgbe_hw_stats, ruc)},
+ {"rx_oversize_errors", offsetof(struct ixgbe_hw_stats, roc)},
+ {"rx_jabber_errors", offsetof(struct ixgbe_hw_stats, rjc)},
+ {"rx_management_packets", offsetof(struct ixgbe_hw_stats, mngprc)},
+ {"rx_management_dropped", offsetof(struct ixgbe_hw_stats, mngpdc)},
+ {"tx_management_packets", offsetof(struct ixgbe_hw_stats, mngptc)},
+ {"rx_total_packets", offsetof(struct ixgbe_hw_stats, tpr)},
+ {"rx_total_bytes", offsetof(struct ixgbe_hw_stats, tor)},
+ {"tx_total_packets", offsetof(struct ixgbe_hw_stats, tpt)},
+ {"tx_size_64_packets", offsetof(struct ixgbe_hw_stats, ptc64)},
+ {"tx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, ptc127)},
+ {"tx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, ptc255)},
+ {"tx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, ptc511)},
+ {"tx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats,
+ ptc1023)},
+ {"tx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats,
+ ptc1522)},
+ {"tx_multicast_packets", offsetof(struct ixgbe_hw_stats, mptc)},
+ {"tx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bptc)},
+ {"rx_mac_short_packet_dropped", offsetof(struct ixgbe_hw_stats, mspdc)},
+ {"rx_l3_l4_xsum_error", offsetof(struct ixgbe_hw_stats, xec)},
+
+ {"flow_director_added_filters", offsetof(struct ixgbe_hw_stats,
+ fdirustat_add)},
+ {"flow_director_removed_filters", offsetof(struct ixgbe_hw_stats,
+ fdirustat_remove)},
+ {"flow_director_filter_add_errors", offsetof(struct ixgbe_hw_stats,
+ fdirfstat_fadd)},
+ {"flow_director_filter_remove_errors", offsetof(struct ixgbe_hw_stats,
+ fdirfstat_fremove)},
+ {"flow_director_matched_filters", offsetof(struct ixgbe_hw_stats,
+ fdirmatch)},
+ {"flow_director_missed_filters", offsetof(struct ixgbe_hw_stats,
+ fdirmiss)},
+
+ {"rx_fcoe_crc_errors", offsetof(struct ixgbe_hw_stats, fccrc)},
+ {"rx_fcoe_dropped", offsetof(struct ixgbe_hw_stats, fcoerpdc)},
+ {"rx_fcoe_mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats,
+ fclast)},
+ {"rx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeprc)},
+ {"tx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeptc)},
+ {"rx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwrc)},
+ {"tx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwtc)},
+ {"rx_fcoe_no_direct_data_placement", offsetof(struct ixgbe_hw_stats,
+ fcoe_noddp)},
+ {"rx_fcoe_no_direct_data_placement_ext_buff",
+ offsetof(struct ixgbe_hw_stats, fcoe_noddp_ext_buff)},
+
+ {"tx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats,
+ lxontxc)},
+ {"rx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats,
+ lxonrxc)},
+ {"tx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats,
+ lxofftxc)},
+ {"rx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats,
+ lxoffrxc)},
+ {"rx_total_missed_packets", offsetof(struct ixgbe_hw_stats, mpctotal)},
+};
+
+#define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \
+ sizeof(rte_ixgbe_stats_strings[0]))
+
+/* Per-queue statistics */
+#define IXBGE_NB_8_PER_Q_STATS (8 * 7)
+#define IXBGE_NB_16_PER_Q_STATS (16 * 5)
+#define IXGBE_NB_Q_STATS (IXBGE_NB_8_PER_Q_STATS + IXBGE_NB_16_PER_Q_STATS)
+
+#define IXGBE_NB_XSTATS (IXGBE_NB_HW_STATS + IXGBE_NB_Q_STATS)
+
+static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = {
+ {"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)},
};
-#define IXGBE_NB_XSTATS (sizeof(rte_ixgbe_stats_strings) / \
- sizeof(rte_ixgbe_stats_strings[0]))
+#define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) / \
+ sizeof(rte_ixgbevf_stats_strings[0]))
/**
* Atomically reads the link status information from global
}
pci_dev = eth_dev->pci_dev;
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+
/* Vendor and Device ID need to be set before init of shared code */
hw->device_id = pci_dev->id.device_id;
hw->vendor_id = pci_dev->id.vendor_id;
eth_dev->data->port_id, pci_dev->id.vendor_id,
pci_dev->id.device_id);
+ rte_intr_callback_register(&pci_dev->intr_handle,
+ ixgbe_dev_interrupt_handler,
+ (void *)eth_dev);
+
+ /* enable uio/vfio intr/eventfd mapping */
+ rte_intr_enable(&pci_dev->intr_handle);
+
/* enable support intr */
ixgbe_enable_intr(eth_dev);
pci_dev = eth_dev->pci_dev;
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+
hw->device_id = pci_dev->id.device_id;
hw->vendor_id = pci_dev->id.vendor_id;
hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
}
+static int
+ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
+{
+ switch (nb_rx_q) {
+ case 1:
+ case 2:
+ RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
+ break;
+ case 4:
+ RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q;
+ RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = dev->pci_dev->max_vfs * nb_rx_q;
+
+ return 0;
+}
+
+static int
+ixgbe_check_mq_mode(struct rte_eth_dev *dev)
+{
+ struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+ uint16_t nb_rx_q = dev->data->nb_rx_queues;
+ uint16_t nb_tx_q = dev->data->nb_rx_queues;
+
+ if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
+ /* check multi-queue mode */
+ switch (dev_conf->rxmode.mq_mode) {
+ case ETH_MQ_RX_VMDQ_DCB:
+ case ETH_MQ_RX_VMDQ_DCB_RSS:
+ /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
+ PMD_INIT_LOG(ERR, "SRIOV active,"
+ " unsupported mq_mode rx %d.",
+ dev_conf->rxmode.mq_mode);
+ return -EINVAL;
+ case ETH_MQ_RX_RSS:
+ case ETH_MQ_RX_VMDQ_RSS:
+ dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
+ if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
+ if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
+ PMD_INIT_LOG(ERR, "SRIOV is active,"
+ " invalid queue number"
+ " for VMDQ RSS, allowed"
+ " value are 1, 2 or 4.");
+ return -EINVAL;
+ }
+ break;
+ case ETH_MQ_RX_VMDQ_ONLY:
+ case ETH_MQ_RX_NONE:
+ /* if nothing mq mode configure, use default scheme */
+ dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
+ if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
+ RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
+ break;
+ default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
+ /* SRIOV only works in VMDq enable mode */
+ PMD_INIT_LOG(ERR, "SRIOV is active,"
+ " wrong mq_mode rx %d.",
+ dev_conf->rxmode.mq_mode);
+ return -EINVAL;
+ }
+
+ switch (dev_conf->txmode.mq_mode) {
+ case ETH_MQ_TX_VMDQ_DCB:
+ /* DCB VMDQ in SRIOV mode, not implement yet */
+ PMD_INIT_LOG(ERR, "SRIOV is active,"
+ " unsupported VMDQ mq_mode tx %d.",
+ dev_conf->txmode.mq_mode);
+ return -EINVAL;
+ default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
+ dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
+ break;
+ }
+
+ /* check valid queue number */
+ if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
+ (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
+ PMD_INIT_LOG(ERR, "SRIOV is active,"
+ " queue number must less equal to %d.",
+ RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
+ return -EINVAL;
+ }
+ } else {
+ /* check configuration for vmdb+dcb mode */
+ if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
+ const struct rte_eth_vmdq_dcb_conf *conf;
+
+ if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
+ PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
+ IXGBE_VMDQ_DCB_NB_QUEUES);
+ return -EINVAL;
+ }
+ conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
+ if (!(conf->nb_queue_pools == ETH_16_POOLS ||
+ conf->nb_queue_pools == ETH_32_POOLS)) {
+ PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
+ " nb_queue_pools must be %d or %d.",
+ ETH_16_POOLS, ETH_32_POOLS);
+ return -EINVAL;
+ }
+ }
+ if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+ const struct rte_eth_vmdq_dcb_tx_conf *conf;
+
+ if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
+ PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
+ IXGBE_VMDQ_DCB_NB_QUEUES);
+ return -EINVAL;
+ }
+ conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
+ if (!(conf->nb_queue_pools == ETH_16_POOLS ||
+ conf->nb_queue_pools == ETH_32_POOLS)) {
+ PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
+ " nb_queue_pools != %d and"
+ " nb_queue_pools != %d.",
+ ETH_16_POOLS, ETH_32_POOLS);
+ return -EINVAL;
+ }
+ }
+
+ /* For DCB mode check our configuration before we go further */
+ if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
+ const struct rte_eth_dcb_rx_conf *conf;
+
+ if (nb_rx_q != IXGBE_DCB_NB_QUEUES) {
+ PMD_INIT_LOG(ERR, "DCB selected, nb_rx_q != %d.",
+ IXGBE_DCB_NB_QUEUES);
+ return -EINVAL;
+ }
+ conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
+ if (!(conf->nb_tcs == ETH_4_TCS ||
+ conf->nb_tcs == ETH_8_TCS)) {
+ PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
+ " and nb_tcs != %d.",
+ ETH_4_TCS, ETH_8_TCS);
+ return -EINVAL;
+ }
+ }
+
+ if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+ const struct rte_eth_dcb_tx_conf *conf;
+
+ if (nb_tx_q != IXGBE_DCB_NB_QUEUES) {
+ PMD_INIT_LOG(ERR, "DCB, nb_tx_q != %d.",
+ IXGBE_DCB_NB_QUEUES);
+ return -EINVAL;
+ }
+ conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
+ if (!(conf->nb_tcs == ETH_4_TCS ||
+ conf->nb_tcs == ETH_8_TCS)) {
+ PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
+ " and nb_tcs != %d.",
+ ETH_4_TCS, ETH_8_TCS);
+ return -EINVAL;
+ }
+ }
+ }
+ return 0;
+}
+
static int
ixgbe_dev_configure(struct rte_eth_dev *dev)
{
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
struct ixgbe_adapter *adapter =
(struct ixgbe_adapter *)dev->data->dev_private;
+ int ret;
PMD_INIT_FUNC_TRACE();
+ /* multipe queue mode checking */
+ ret = ixgbe_check_mq_mode(dev);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.",
+ ret);
+ return ret;
+ }
/* set flag to update link status after init */
intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
ixgbe_pf_host_configure(dev);
/* check and configure queue intr-vector mapping */
- if (dev->data->dev_conf.intr_conf.rxq != 0)
+ if ((rte_intr_cap_multiple(intr_handle) ||
+ !RTE_ETH_DEV_SRIOV(dev).active) &&
+ dev->data->dev_conf.intr_conf.rxq != 0) {
intr_vector = dev->data->nb_rx_queues;
-
- if (rte_intr_efd_enable(intr_handle, intr_vector))
- return -1;
+ if (rte_intr_efd_enable(intr_handle, intr_vector))
+ return -1;
+ }
if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
intr_handle->intr_vec =
rte_zmalloc("intr_vec",
- dev->data->nb_rx_queues * sizeof(int),
- 0);
+ dev->data->nb_rx_queues * sizeof(int), 0);
if (intr_handle->intr_vec == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
" intr_vec\n", dev->data->nb_rx_queues);
skip_link_setup:
- /* check if lsc interrupt is enabled */
- if (dev->data->dev_conf.intr_conf.lsc != 0) {
- if (rte_intr_allow_others(intr_handle)) {
- rte_intr_callback_register(intr_handle,
- ixgbe_dev_interrupt_handler,
- (void *)dev);
+ if (rte_intr_allow_others(intr_handle)) {
+ /* check if lsc interrupt is enabled */
+ if (dev->data->dev_conf.intr_conf.lsc != 0)
ixgbe_dev_lsc_interrupt_setup(dev);
- } else
+ } else {
+ rte_intr_callback_unregister(intr_handle,
+ ixgbe_dev_interrupt_handler,
+ (void *)dev);
+ if (dev->data->dev_conf.intr_conf.lsc != 0)
PMD_INIT_LOG(INFO, "lsc won't enable because of"
" no intr multiplex\n");
}
/* check if rxq interrupt is enabled */
- if (dev->data->dev_conf.intr_conf.rxq != 0)
+ if (dev->data->dev_conf.intr_conf.rxq != 0 &&
+ rte_intr_dp_is_en(intr_handle))
ixgbe_dev_rxq_interrupt_setup(dev);
/* enable uio/vfio intr/eventfd mapping */
memset(filter_info->fivetuple_mask, 0,
sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
+ if (!rte_intr_allow_others(intr_handle))
+ /* resume to the default handler */
+ rte_intr_callback_register(intr_handle,
+ ixgbe_dev_interrupt_handler,
+ (void *)dev);
+
/* Clean datapath event and queue/vec mapping */
rte_intr_efd_disable(intr_handle);
if (intr_handle->intr_vec != NULL) {
hw_stats->illerrc +
hw_stats->errbc +
hw_stats->xec +
- hw_stats->mlfc +
- hw_stats->mrfc +
hw_stats->rfc +
hw_stats->fccrc +
hw_stats->fclast;
total_qprdc = 0;
ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc,
- &total_qprc, &total_qprdc);
+ &total_qprc, &total_qprdc);
/* If this is a reset xstats is NULL, and we have cleared the
* registers by reading them.
if (!xstats)
return 0;
- /* Extended stats */
- for (i = 0; i < IXGBE_NB_XSTATS; i++) {
- snprintf(xstats[i].name, sizeof(xstats[i].name),
- "%s", rte_ixgbe_stats_strings[i].name);
- xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
- rte_ixgbe_stats_strings[i].offset);
+ /* Extended stats from ixgbe_hw_stats */
+ count = 0;
+ for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
+ snprintf(xstats[count].name, sizeof(xstats[count].name), "%s",
+ rte_ixgbe_stats_strings[i].name);
+ xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
+ rte_ixgbe_stats_strings[i].offset);
+ count++;
+ }
+
+ /* Per-Q stats, with 8 queues available */
+ for (i = 0; i < 8; i++) {
+ snprintf(xstats[count].name, sizeof(xstats[count].name),
+ "rx_q%u_mbuf_allocation_errors", i);
+ xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
+ offsetof(struct ixgbe_hw_stats, rnbc[i]));
+ count++;
+
+ snprintf(xstats[count].name, sizeof(xstats[count].name),
+ "rx_q%u_missed_packets", i);
+ xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
+ offsetof(struct ixgbe_hw_stats, mpc[i]));
+ count++;
+
+ snprintf(xstats[count].name, sizeof(xstats[count].name),
+ "rx_q%u_xon_priority_packets", i);
+ xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
+ offsetof(struct ixgbe_hw_stats, pxonrxc[i]));
+ count++;
+
+ snprintf(xstats[count].name, sizeof(xstats[count].name),
+ "tx_q%u_xon_priority_packets", i);
+ xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
+ offsetof(struct ixgbe_hw_stats, pxontxc[i]));
+ count++;
+
+ snprintf(xstats[count].name, sizeof(xstats[count].name),
+ "rx_q%u_xoff_priority_packets", i);
+ xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
+ offsetof(struct ixgbe_hw_stats, pxoffrxc[i]));
+ count++;
+
+ snprintf(xstats[count].name, sizeof(xstats[count].name),
+ "tx_q%u_xoff_priority_packets", i);
+ xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
+ offsetof(struct ixgbe_hw_stats, pxofftxc[i]));
+ count++;
+
+ snprintf(xstats[count].name, sizeof(xstats[count].name),
+ "xx_q%u_xon_to_xoff_priority_packets", i);
+ xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
+ offsetof(struct ixgbe_hw_stats, pxon2offc[i]));
+ count++;
+ }
+
+ for (i = 0; i < 16; i++) {
+ snprintf(xstats[count].name, sizeof(xstats[count].name),
+ "rx_q%u_packets", i);
+ xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
+ offsetof(struct ixgbe_hw_stats, qprc[i]));
+ count++;
+
+ snprintf(xstats[count].name, sizeof(xstats[count].name),
+ "rx_q%u_bytes", i);
+ xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
+ offsetof(struct ixgbe_hw_stats, qbrc[i]));
+ count++;
+
+ snprintf(xstats[count].name, sizeof(xstats[count].name),
+ "tx_q%u_packets", i);
+ xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
+ offsetof(struct ixgbe_hw_stats, qptc[i]));
+ count++;
+
+ snprintf(xstats[count].name, sizeof(xstats[count].name),
+ "tx_q%u_bytes", i);
+ xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
+ offsetof(struct ixgbe_hw_stats, qbtc[i]));
+ count++;
+
+ snprintf(xstats[count].name, sizeof(xstats[count].name),
+ "rx_q%u_dropped", i);
+ xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
+ offsetof(struct ixgbe_hw_stats, qprdc[i]));
+ count++;
}
return count;
}
static void
-ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+ixgbevf_update_stats(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
/* Rx Multicst Packet */
UPDATE_VF_STAT(IXGBE_VFMPRC,
hw_stats->last_vfmprc, hw_stats->vfmprc);
+}
+
+static int
+ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
+ unsigned n)
+{
+ struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
+ IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+ unsigned i;
+
+ if (n < IXGBEVF_NB_XSTATS)
+ return IXGBEVF_NB_XSTATS;
+
+ ixgbevf_update_stats(dev);
+
+ if (!xstats)
+ return 0;
+
+ /* Extended stats */
+ for (i = 0; i < IXGBEVF_NB_XSTATS; i++) {
+ snprintf(xstats[i].name, sizeof(xstats[i].name),
+ "%s", rte_ixgbevf_stats_strings[i].name);
+ xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
+ rte_ixgbevf_stats_strings[i].offset);
+ }
+
+ return IXGBEVF_NB_XSTATS;
+}
+
+static void
+ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
+ IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+ ixgbevf_update_stats(dev);
if (stats == NULL)
return;
.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
ETH_TXQ_FLAGS_NOOFFLOADS,
};
+
+ dev_info->rx_desc_lim = rx_desc_lim;
+ dev_info->tx_desc_lim = tx_desc_lim;
+
dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type);
dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
ETH_TXQ_FLAGS_NOOFFLOADS,
};
+
+ dev_info->rx_desc_lim = rx_desc_lim;
+ dev_info->tx_desc_lim = tx_desc_lim;
}
/* return 0 means link status changed, -1 means not changed */
return 0;
}
-static int
-ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev)
-{
- uint32_t eicr;
- struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct ixgbe_interrupt *intr =
- IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
-
- /* clear all cause mask */
- ixgbevf_intr_disable(hw);
-
- /* read-on-clear nic registers here */
- eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR);
- PMD_DRV_LOG(INFO, "eicr %x", eicr);
-
- intr->flags = 0;
-
- /* set flag for async link update */
- if (eicr & IXGBE_EICR_LSC)
- intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
-
- return 0;
-}
-
/**
* It gets and then prints the link status.
*
return 0;
}
-static int
-ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev)
-{
- struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
- PMD_DRV_LOG(DEBUG, "enable intr immediately");
- ixgbevf_intr_enable(hw);
- rte_intr_enable(&dev->pci_dev->intr_handle);
- return 0;
-}
-
/**
* Interrupt handler which shall be registered for alarm callback for delayed
* handling specific interrupt to wait for the stable nic state. As the
ixgbe_dev_interrupt_action(dev);
}
-static void
-ixgbevf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
- void *param)
-{
- struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
-
- ixgbevf_dev_interrupt_get_status(dev);
- ixgbevf_dev_interrupt_action(dev);
-}
-
static int
ixgbe_dev_led_on(struct rte_eth_dev *dev)
{
uint32_t reta_reg;
PMD_INIT_FUNC_TRACE();
+
+ if (!ixgbe_rss_update_sp(hw->mac.type)) {
+ PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
+ "NIC.");
+ return -ENOTSUP;
+ }
+
sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
if (reta_size != sp_reta_size) {
PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
ixgbevf_dev_rxtx_start(dev);
/* check and configure queue intr-vector mapping */
- if (dev->data->dev_conf.intr_conf.rxq != 0)
+ if (dev->data->dev_conf.intr_conf.rxq != 0) {
intr_vector = dev->data->nb_rx_queues;
-
- if (rte_intr_efd_enable(intr_handle, intr_vector))
- return -1;
+ if (rte_intr_efd_enable(intr_handle, intr_vector))
+ return -1;
+ }
if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
intr_handle->intr_vec =
}
ixgbevf_configure_msix(dev);
- if (dev->data->dev_conf.intr_conf.lsc != 0) {
- if (rte_intr_allow_others(intr_handle))
- rte_intr_callback_register(intr_handle,
- ixgbevf_dev_interrupt_handler,
- (void *)dev);
- else
- PMD_INIT_LOG(INFO, "lsc won't enable because of"
- " no intr multiplex\n");
- }
-
rte_intr_enable(intr_handle);
/* Re-enable interrupt for VF */
ixgbevf_dev_close(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pci_dev;
PMD_INIT_FUNC_TRACE();
/* reprogram the RAR[0] in case user changed it. */
ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
-
- pci_dev = dev->pci_dev;
- if (pci_dev->intr_handle.intr_vec) {
- rte_free(pci_dev->intr_handle.intr_vec);
- pci_dev->intr_handle.intr_vec = NULL;
- }
}
static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
- mask |= (1 << queue_id);
+ mask |= (1 << IXGBE_MISC_VEC_ID);
+ RTE_SET_USED(queue_id);
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
rte_intr_enable(&dev->pci_dev->intr_handle);
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
- mask &= ~(1 << queue_id);
+ mask &= ~(1 << IXGBE_MISC_VEC_ID);
+ RTE_SET_USED(queue_id);
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
return 0;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t q_idx;
- uint32_t vector_idx = 0;
+ uint32_t vector_idx = IXGBE_MISC_VEC_ID;
/* won't configure msix register if no mapping is done
* between intr vector and event fd.
intr_handle->intr_vec[q_idx] = vector_idx;
}
- /* Configure VF Rx queue ivar */
+ /* Configure VF other cause ivar */
ixgbevf_set_ivar_map(hw, -1, 1, vector_idx);
}
struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t queue_id, vec = 0;
+ uint32_t queue_id, base = IXGBE_MISC_VEC_ID;
+ uint32_t vec = IXGBE_MISC_VEC_ID;
uint32_t mask;
uint32_t gpie;
if (!rte_intr_dp_is_en(intr_handle))
return;
+ if (rte_intr_allow_others(intr_handle))
+ vec = base = IXGBE_RX_VEC_START;
+
/* setup GPIE for MSI-x mode */
gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
/* by default, 1:1 mapping */
ixgbe_set_ivar_map(hw, 0, queue_id, vec);
intr_handle->intr_vec[queue_id] = vec;
- if (vec < intr_handle->nb_efd - 1)
+ if (vec < base + intr_handle->nb_efd - 1)
vec++;
}
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
ixgbe_set_ivar_map(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
- intr_handle->max_intr - 1);
+ IXGBE_MISC_VEC_ID);
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
- ixgbe_set_ivar_map(hw, -1, 1, intr_handle->max_intr - 1);
+ ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
break;
default:
break;
}
- IXGBE_WRITE_REG(hw, IXGBE_EITR(queue_id),
+ IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID),
IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT & 0xFFF);
/* set up to autoclear timer, and the vectors */
}
}
+uint32_t
+ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type) {
+ switch (mac_type) {
+ case ixgbe_mac_X550_vf:
+ case ixgbe_mac_X550EM_x_vf:
+ return IXGBE_VFMRQC;
+ default:
+ return IXGBE_MRQC;
+ }
+}
+
+uint32_t
+ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i) {
+ switch (mac_type) {
+ case ixgbe_mac_X550_vf:
+ case ixgbe_mac_X550EM_x_vf:
+ return IXGBE_VFRSSRK(i);
+ default:
+ return IXGBE_RSSRK(i);
+ }
+}
+
+bool
+ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type) {
+ switch (mac_type) {
+ case ixgbe_mac_82599_vf:
+ case ixgbe_mac_X540_vf:
+ return 0;
+ default:
+ return 1;
+ }
+}
+
+static int
+ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
+ struct rte_eth_dcb_info *dcb_info)
+{
+ struct ixgbe_dcb_config *dcb_config =
+ IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
+ struct ixgbe_dcb_tc_config *tc;
+ uint8_t i, j;
+
+ if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
+ dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
+ else
+ dcb_info->nb_tcs = 1;
+
+ if (dcb_config->vt_mode) { /* vt is enabled*/
+ struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
+ &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+ dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
+ for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
+ for (j = 0; j < dcb_info->nb_tcs; j++) {
+ dcb_info->tc_queue.tc_rxq[i][j].base =
+ i * dcb_info->nb_tcs + j;
+ dcb_info->tc_queue.tc_rxq[i][j].nb_queue = 1;
+ dcb_info->tc_queue.tc_txq[i][j].base =
+ i * dcb_info->nb_tcs + j;
+ dcb_info->tc_queue.tc_txq[i][j].nb_queue = 1;
+ }
+ }
+ } else { /* vt is disabled*/
+ struct rte_eth_dcb_rx_conf *rx_conf =
+ &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+ dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
+ if (dcb_info->nb_tcs == ETH_4_TCS) {
+ for (i = 0; i < dcb_info->nb_tcs; i++) {
+ dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
+ dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
+ }
+ dcb_info->tc_queue.tc_txq[0][0].base = 0;
+ dcb_info->tc_queue.tc_txq[0][1].base = 64;
+ dcb_info->tc_queue.tc_txq[0][2].base = 96;
+ dcb_info->tc_queue.tc_txq[0][3].base = 112;
+ dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
+ dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
+ dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
+ dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
+ } else if (dcb_info->nb_tcs == ETH_8_TCS) {
+ for (i = 0; i < dcb_info->nb_tcs; i++) {
+ dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
+ dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
+ }
+ dcb_info->tc_queue.tc_txq[0][0].base = 0;
+ dcb_info->tc_queue.tc_txq[0][1].base = 32;
+ dcb_info->tc_queue.tc_txq[0][2].base = 64;
+ dcb_info->tc_queue.tc_txq[0][3].base = 80;
+ dcb_info->tc_queue.tc_txq[0][4].base = 96;
+ dcb_info->tc_queue.tc_txq[0][5].base = 104;
+ dcb_info->tc_queue.tc_txq[0][6].base = 112;
+ dcb_info->tc_queue.tc_txq[0][7].base = 120;
+ dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
+ dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
+ dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
+ dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
+ dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
+ dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
+ dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
+ dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
+ }
+ }
+ for (i = 0; i < dcb_info->nb_tcs; i++) {
+ tc = &dcb_config->tc_config[i];
+ dcb_info->tc_bws[i] = tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent;
+ }
+ return 0;
+}
+
static struct rte_driver rte_ixgbe_driver = {
.type = PMD_PDEV,
.init = rte_ixgbe_pmd_init,