+static int txgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names, unsigned int limit)
+{
+ unsigned int i;
+
+ if (limit < TXGBEVF_NB_XSTATS && xstats_names != NULL)
+ return -ENOMEM;
+
+ if (xstats_names != NULL)
+ for (i = 0; i < TXGBEVF_NB_XSTATS; i++)
+ snprintf(xstats_names[i].name,
+ sizeof(xstats_names[i].name),
+ "%s", rte_txgbevf_stats_strings[i].name);
+ return TXGBEVF_NB_XSTATS;
+}
+
+static void
+txgbevf_update_stats(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbevf_hw_stats *hw_stats = (struct txgbevf_hw_stats *)
+ TXGBE_DEV_STATS(dev);
+ unsigned int i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ /* Good Rx packet, include VF loopback */
+ TXGBE_UPDCNT32(TXGBE_QPRXPKT(i),
+ hw_stats->qp[i].last_vfgprc, hw_stats->qp[i].vfgprc);
+
+ /* Good Rx octets, include VF loopback */
+ TXGBE_UPDCNT36(TXGBE_QPRXOCTL(i),
+ hw_stats->qp[i].last_vfgorc, hw_stats->qp[i].vfgorc);
+
+ /* Rx Multicst Packet */
+ TXGBE_UPDCNT32(TXGBE_QPRXMPKT(i),
+ hw_stats->qp[i].last_vfmprc, hw_stats->qp[i].vfmprc);
+ }
+ hw->rx_loaded = 0;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ /* Good Tx packet, include VF loopback */
+ TXGBE_UPDCNT32(TXGBE_QPTXPKT(i),
+ hw_stats->qp[i].last_vfgptc, hw_stats->qp[i].vfgptc);
+
+ /* Good Tx octets, include VF loopback */
+ TXGBE_UPDCNT36(TXGBE_QPTXOCTL(i),
+ hw_stats->qp[i].last_vfgotc, hw_stats->qp[i].vfgotc);
+ }
+ hw->offset_loaded = 0;
+}
+
+static int
+txgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ unsigned int n)
+{
+ struct txgbevf_hw_stats *hw_stats = (struct txgbevf_hw_stats *)
+ TXGBE_DEV_STATS(dev);
+ unsigned int i;
+
+ if (n < TXGBEVF_NB_XSTATS)
+ return TXGBEVF_NB_XSTATS;
+
+ txgbevf_update_stats(dev);
+
+ if (!xstats)
+ return 0;
+
+ /* Extended stats */
+ for (i = 0; i < TXGBEVF_NB_XSTATS; i++) {
+ xstats[i].id = i;
+ xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
+ rte_txgbevf_stats_strings[i].offset);
+ }
+
+ return TXGBEVF_NB_XSTATS;
+}
+
+static int
+txgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ struct txgbevf_hw_stats *hw_stats = (struct txgbevf_hw_stats *)
+ TXGBE_DEV_STATS(dev);
+ uint32_t i;
+
+ txgbevf_update_stats(dev);
+
+ if (stats == NULL)
+ return -EINVAL;
+
+ stats->ipackets = 0;
+ stats->ibytes = 0;
+ stats->opackets = 0;
+ stats->obytes = 0;
+
+ for (i = 0; i < 8; i++) {
+ stats->ipackets += hw_stats->qp[i].vfgprc;
+ stats->ibytes += hw_stats->qp[i].vfgorc;
+ stats->opackets += hw_stats->qp[i].vfgptc;
+ stats->obytes += hw_stats->qp[i].vfgotc;
+ }
+
+ return 0;
+}
+
+static int
+txgbevf_dev_stats_reset(struct rte_eth_dev *dev)
+{
+ struct txgbevf_hw_stats *hw_stats = (struct txgbevf_hw_stats *)
+ TXGBE_DEV_STATS(dev);
+ uint32_t i;
+
+ /* Sync HW register to the last stats */
+ txgbevf_dev_stats_get(dev, NULL);
+
+ /* reset HW current stats*/
+ for (i = 0; i < 8; i++) {
+ hw_stats->qp[i].vfgprc = 0;
+ hw_stats->qp[i].vfgorc = 0;
+ hw_stats->qp[i].vfgptc = 0;
+ hw_stats->qp[i].vfgotc = 0;
+ }
+
+ return 0;
+}
+
+static int
+txgbevf_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
+ dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
+ dev_info->min_rx_bufsize = 1024;
+ dev_info->max_rx_pktlen = TXGBE_FRAME_SIZE_MAX;
+ dev_info->max_mac_addrs = hw->mac.num_rar_entries;
+ dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
+ dev_info->max_vfs = pci_dev->max_vfs;
+ dev_info->max_vmdq_pools = ETH_64_POOLS;
+ dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
+ dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
+ dev_info->rx_queue_offload_capa);
+ dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
+ dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
+ dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
+ dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+ dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_thresh = {
+ .pthresh = TXGBE_DEFAULT_RX_PTHRESH,
+ .hthresh = TXGBE_DEFAULT_RX_HTHRESH,
+ .wthresh = TXGBE_DEFAULT_RX_WTHRESH,
+ },
+ .rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH,
+ .rx_drop_en = 0,
+ .offloads = 0,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_thresh = {
+ .pthresh = TXGBE_DEFAULT_TX_PTHRESH,
+ .hthresh = TXGBE_DEFAULT_TX_HTHRESH,
+ .wthresh = TXGBE_DEFAULT_TX_WTHRESH,
+ },
+ .tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH,
+ .offloads = 0,
+ };
+
+ dev_info->rx_desc_lim = rx_desc_lim;
+ dev_info->tx_desc_lim = tx_desc_lim;
+
+ return 0;
+}
+
+static int
+txgbevf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+ return txgbe_dev_link_update_share(dev, wait_to_complete);
+}
+