ethdev: add device flag to bypass auto-filled queue xstats
authorFerruh Yigit <ferruh.yigit@intel.com>
Wed, 14 Oct 2020 02:26:47 +0000 (03:26 +0100)
committerFerruh Yigit <ferruh.yigit@intel.com>
Fri, 16 Oct 2020 21:27:15 +0000 (23:27 +0200)
Queue stats are stored in 'struct rte_eth_stats' as array and array size
is defined by 'RTE_ETHDEV_QUEUE_STAT_CNTRS' compile time flag.

As a result of technical board discussion, decided to remove the queue
statistics from 'struct rte_eth_stats' in the long term.

Instead PMDs should represent the queue statistics via xstats, this
gives more flexibility on the number of the queues supported.

Currently queue stats in the xstats are filled by ethdev layer, using
some basic stats, when queue stats removed from basic stats the
responsibility to fill the relevant xstats will be pushed to the PMDs.

During the switch period, temporary 'RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS'
device flag is created. Initially all PMDs using xstats set this flag.
The PMDs implemented queue stats in the xstats should clear the flag.

When all PMDs switch to the xstats for the queue stats, queue stats
related fields from 'struct rte_eth_stats' will be removed, as well as
'RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS' flag.
Later 'RTE_ETHDEV_QUEUE_STAT_CNTRS' compile time flag also can be
removed.

Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-by: Haiyue Wang <haiyue.wang@intel.com>
Acked-by: Xiao Wang <xiao.w.wang@intel.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
60 files changed:
drivers/net/af_packet/rte_eth_af_packet.c
drivers/net/af_xdp/rte_eth_af_xdp.c
drivers/net/ark/ark_ethdev.c
drivers/net/atlantic/atl_ethdev.c
drivers/net/avp/avp_ethdev.c
drivers/net/axgbe/axgbe_ethdev.c
drivers/net/bnx2x/bnx2x_ethdev.c
drivers/net/bnxt/bnxt_ethdev.c
drivers/net/bnxt/bnxt_reps.c
drivers/net/bonding/rte_eth_bond_pmd.c
drivers/net/cxgbe/cxgbe_ethdev.c
drivers/net/dpaa/dpaa_ethdev.c
drivers/net/dpaa2/dpaa2_ethdev.c
drivers/net/e1000/em_ethdev.c
drivers/net/e1000/igb_ethdev.c
drivers/net/ena/ena_ethdev.c
drivers/net/enetc/enetc_ethdev.c
drivers/net/enic/enic_ethdev.c
drivers/net/enic/enic_vf_representor.c
drivers/net/failsafe/failsafe.c
drivers/net/fm10k/fm10k_ethdev.c
drivers/net/hinic/hinic_pmd_ethdev.c
drivers/net/hns3/hns3_ethdev.c
drivers/net/hns3/hns3_ethdev_vf.c
drivers/net/i40e/i40e_ethdev.c
drivers/net/i40e/i40e_ethdev_vf.c
drivers/net/i40e/i40e_vf_representor.c
drivers/net/iavf/iavf_ethdev.c
drivers/net/ice/ice_dcf_ethdev.c
drivers/net/ice/ice_ethdev.c
drivers/net/igc/igc_ethdev.c
drivers/net/ionic/ionic_ethdev.c
drivers/net/ipn3ke/ipn3ke_representor.c
drivers/net/ixgbe/ixgbe_ethdev.c
drivers/net/kni/rte_eth_kni.c
drivers/net/liquidio/lio_ethdev.c
drivers/net/memif/rte_eth_memif.c
drivers/net/mlx4/mlx4.c
drivers/net/mlx5/linux/mlx5_os.c
drivers/net/mvneta/mvneta_ethdev.c
drivers/net/mvpp2/mrvl_ethdev.c
drivers/net/netvsc/hn_ethdev.c
drivers/net/nfb/nfb_ethdev.c
drivers/net/nfp/nfp_net.c
drivers/net/null/rte_eth_null.c
drivers/net/octeontx/octeontx_ethdev.c
drivers/net/octeontx2/otx2_ethdev.c
drivers/net/pcap/rte_eth_pcap.c
drivers/net/pfe/pfe_ethdev.c
drivers/net/qede/qede_ethdev.c
drivers/net/ring/rte_eth_ring.c
drivers/net/sfc/sfc_ethdev.c
drivers/net/szedata2/rte_eth_szedata2.c
drivers/net/tap/rte_eth_tap.c
drivers/net/thunderx/nicvf_ethdev.c
drivers/net/vhost/rte_eth_vhost.c
drivers/net/virtio/virtio_ethdev.c
drivers/net/vmxnet3/vmxnet3_ethdev.c
lib/librte_ethdev/rte_ethdev.c
lib/librte_ethdev/rte_ethdev.h

index cb1c39b..671ee87 100644 (file)
@@ -860,6 +860,7 @@ rte_pmd_init_internals(struct rte_vdev_device *dev,
        data->nb_tx_queues = (uint16_t)nb_queues;
        data->dev_link = pmd_link;
        data->mac_addrs = &(*internals)->eth_addr;
+       data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        (*eth_dev)->dev_ops = &ops;
 
index 1c1e3ca..df2767b 100644 (file)
@@ -1562,6 +1562,7 @@ init_internals(struct rte_vdev_device *dev, const char *if_name,
        eth_dev->data->dev_private = internals;
        eth_dev->data->dev_link = pmd_link;
        eth_dev->data->mac_addrs = &internals->eth_addr;
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
        eth_dev->dev_ops = &ops;
        eth_dev->rx_pkt_burst = eth_af_xdp_rx;
        eth_dev->tx_pkt_burst = eth_af_xdp_tx;
index 1dcc059..168b365 100644 (file)
@@ -256,6 +256,7 @@ eth_ark_dev_init(struct rte_eth_dev *dev)
                return ret;
        pci_dev = RTE_ETH_DEV_TO_PCI(dev);
        rte_eth_copy_pci_info(dev, pci_dev);
+       dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        /* Use dummy function until setup */
        dev->rx_pkt_burst = &eth_ark_recv_pkts_noop;
@@ -383,6 +384,7 @@ eth_ark_dev_init(struct rte_eth_dev *dev)
                eth_dev->rx_pkt_burst = ark->eth_dev->rx_pkt_burst;
 
                rte_eth_copy_pci_info(eth_dev, pci_dev);
+               eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
                eth_dev->data->mac_addrs = rte_zmalloc(name,
                                                RTE_ETHER_ADDR_LEN, 0);
index 419ee47..b071677 100644 (file)
@@ -380,6 +380,8 @@ eth_atl_dev_init(struct rte_eth_dev *eth_dev)
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
                return 0;
 
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
        /* Vendor and Device ID need to be set before init of shared code */
        hw->device_id = pci_dev->id.device_id;
        hw->vendor_id = pci_dev->id.vendor_id;
index 67f1662..5f8187b 100644 (file)
@@ -974,6 +974,7 @@ eth_avp_dev_init(struct rte_eth_dev *eth_dev)
        }
 
        rte_eth_copy_pci_info(eth_dev, pci_dev);
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        /* Check current migration status */
        if (avp_dev_migration_pending(eth_dev)) {
index 6e9722a..cfe6aba 100644 (file)
@@ -1972,6 +1972,8 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
                return 0;
 
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
        pdata = eth_dev->data->dev_private;
        /* initial state */
        rte_bit_relaxed_set32(AXGBE_DOWN, &pdata->dev_state);
index 28f8aae..d35c75a 100644 (file)
@@ -648,6 +648,7 @@ bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
        }
 
        rte_eth_copy_pci_info(eth_dev, pci_dev);
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        sc->pcie_bus    = pci_dev->addr.bus;
        sc->pcie_device = pci_dev->addr.devid;
index 7bc610b..6c12369 100644 (file)
@@ -5997,6 +5997,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused)
                return 0;
 
        rte_eth_copy_pci_info(eth_dev, pci_dev);
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        bp = eth_dev->data->dev_private;
 
index 935f2c0..b4566c9 100644 (file)
@@ -184,7 +184,8 @@ int bnxt_representor_init(struct rte_eth_dev *eth_dev, void *params)
        vf_rep_bp->rep_fc_r2f = rep_params->rep_fc_r2f;
        vf_rep_bp->rep_fc_f2r = rep_params->rep_fc_f2r;
 
-       eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR |
+                                       RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
        eth_dev->data->representor_id = rep_params->vf_id;
 
        rte_eth_random_addr(vf_rep_bp->dflt_mac_addr);
index 9d5eef5..057b1ad 100644 (file)
@@ -3234,7 +3234,8 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
        }
 
        eth_dev->dev_ops = &default_dev_ops;
-       eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC;
+       eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC |
+                                       RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        rte_spinlock_init(&internals->lock);
        rte_spinlock_init(&internals->lsc_lock);
index a3a0d6d..98d0362 100644 (file)
@@ -1262,6 +1262,8 @@ static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev)
                return 0;
        }
 
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
        snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id);
        adapter = rte_zmalloc(name, sizeof(*adapter), 0);
        if (!adapter)
index c915b7b..f00279e 100644 (file)
@@ -2219,6 +2219,8 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
        if (dpaa_drv->drv_flags & RTE_DPAA_DRV_INTR_LSC)
                eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
 
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
        /* Invoke PMD device initialization function */
        diag = dpaa_dev_init(eth_dev);
        if (diag == 0) {
index f9d9a93..04e60c5 100644 (file)
@@ -2794,6 +2794,8 @@ rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv,
        if (dpaa2_drv->drv_flags & RTE_DPAA2_DRV_INTR_LSC)
                eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
 
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
        /* Invoke PMD device initialization function */
        diag = dpaa2_dev_init(eth_dev);
        if (diag == 0) {
index 8e02868..8ee9422 100644 (file)
@@ -265,6 +265,7 @@ eth_em_dev_init(struct rte_eth_dev *eth_dev)
        }
 
        rte_eth_copy_pci_info(eth_dev, pci_dev);
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
        hw->device_id = pci_dev->id.device_id;
index 43181d3..ac4b8f1 100644 (file)
@@ -765,6 +765,7 @@ eth_igb_dev_init(struct rte_eth_dev *eth_dev)
        }
 
        rte_eth_copy_pci_info(eth_dev, pci_dev);
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        hw->hw_addr= (void *)pci_dev->mem_resource[0].addr;
 
@@ -959,6 +960,7 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
 
        pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
        rte_eth_copy_pci_info(eth_dev, pci_dev);
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        hw->device_id = pci_dev->id.device_id;
        hw->vendor_id = pci_dev->id.vendor_id;
index d2e7b80..c513faf 100644 (file)
@@ -1779,6 +1779,8 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
                return 0;
 
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
        memset(adapter, 0, sizeof(struct ena_adapter));
        ena_dev = &adapter->ena_dev;
 
index 910e40f..6ff3022 100644 (file)
@@ -885,6 +885,8 @@ enetc_dev_init(struct rte_eth_dev *eth_dev)
        eth_dev->rx_pkt_burst = &enetc_recv_pkts;
        eth_dev->tx_pkt_burst = &enetc_xmit_pkts;
 
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
        /* Retrieving and storing the HW base address of device */
        hw->hw.reg = (void *)pci_dev->mem_resource[0].addr;
        hw->device_id = pci_dev->id.device_id;
index 4a34c0e..8d0054f 100644 (file)
@@ -1300,6 +1300,7 @@ static int eth_enic_dev_init(struct rte_eth_dev *eth_dev,
 
        pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
        rte_eth_copy_pci_info(eth_dev, pdev);
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
        enic->pdev = pdev;
        addr = &pdev->addr;
 
index 984a754..c2c03c0 100644 (file)
@@ -672,7 +672,8 @@ int enic_vf_representor_init(struct rte_eth_dev *eth_dev, void *init_params)
 
        eth_dev->device->driver = pf->rte_dev->device->driver;
        eth_dev->dev_ops = &enic_vf_representor_dev_ops;
-       eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR |
+                                       RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
        eth_dev->data->representor_id = vf->vf_id;
        eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr_vf",
                sizeof(struct rte_ether_addr) *
index b921e10..2e9a9c7 100644 (file)
@@ -264,7 +264,8 @@ fs_eth_dev_create(struct rte_vdev_device *vdev)
                mac->addr_bytes[0], mac->addr_bytes[1],
                mac->addr_bytes[2], mac->addr_bytes[3],
                mac->addr_bytes[4], mac->addr_bytes[5]);
-       dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
+       dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC |
+                               RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
        PRIV(dev)->intr_handle = (struct rte_intr_handle){
                .fd = -1,
                .type = RTE_INTR_HANDLE_EXT,
index 93c6d8c..dc2979b 100644 (file)
@@ -3076,6 +3076,7 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev)
        }
 
        rte_eth_copy_pci_info(dev, pdev);
+       dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
        memset(macvlan, 0, sizeof(*macvlan));
index 5974368..b694fd8 100644 (file)
@@ -3108,6 +3108,8 @@ static int hinic_func_init(struct rte_eth_dev *eth_dev)
                return 0;
        }
 
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
        nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
        memset(nic_dev, 0, sizeof(*nic_dev));
 
index 6869c8e..5a234e2 100644 (file)
@@ -6106,6 +6106,8 @@ hns3_dev_init(struct rte_eth_dev *eth_dev)
                return 0;
        }
 
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
        ret = hns3_mp_init_primary();
        if (ret) {
                PMD_INIT_LOG(ERR,
index 2b1de8d..d1c3fb8 100644 (file)
@@ -2753,6 +2753,8 @@ hns3vf_dev_init(struct rte_eth_dev *eth_dev)
                return 0;
        }
 
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
        ret = hns3_mp_init_primary();
        if (ret) {
                PMD_INIT_LOG(ERR,
index a1aaa20..4778aaf 100644 (file)
@@ -1465,6 +1465,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
        intr_handle = &pci_dev->intr_handle;
 
        rte_eth_copy_pci_info(dev, pci_dev);
+       dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
        pf->adapter->eth_dev = dev;
index 7659b18..53154c3 100644 (file)
@@ -1575,6 +1575,7 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev)
        }
        i40e_set_default_ptype_table(eth_dev);
        rte_eth_copy_pci_info(eth_dev, pci_dev);
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        hw->vendor_id = pci_dev->id.vendor_id;
        hw->device_id = pci_dev->id.device_id;
index 791c050..9e40406 100644 (file)
@@ -508,7 +508,8 @@ i40e_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params)
                return -ENODEV;
        }
 
-       ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+       ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR |
+                                       RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
        ethdev->data->representor_id = representor->vf_id;
 
        /* Setting the number queues allocated to the VF */
index 3671213..0ef023c 100644 (file)
@@ -1434,6 +1434,7 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
                return 0;
        }
        rte_eth_copy_pci_info(eth_dev, pci_dev);
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        hw->vendor_id = pci_dev->id.vendor_id;
        hw->device_id = pci_dev->id.device_id;
index 0cc80a2..b0b2ecb 100644 (file)
@@ -906,6 +906,8 @@ ice_dcf_dev_init(struct rte_eth_dev *eth_dev)
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
                return 0;
 
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
        adapter->real_hw.vc_event_msg_cb = ice_dcf_handle_pf_event_msg;
        if (ice_dcf_init_hw(eth_dev, &adapter->real_hw) != 0) {
                PMD_INIT_LOG(ERR, "Failed to init DCF hardware");
index e63e423..c65125f 100644 (file)
@@ -2137,6 +2137,8 @@ ice_dev_init(struct rte_eth_dev *dev)
                return 0;
        }
 
+       dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
        ice_set_default_ptype_table(dev);
        pci_dev = RTE_DEV_TO_PCI(dev->device);
        intr_handle = &pci_dev->intr_handle;
index 1bcfb63..802212f 100644 (file)
@@ -1244,6 +1244,7 @@ eth_igc_dev_init(struct rte_eth_dev *dev)
                return 0;
 
        rte_eth_copy_pci_info(dev, pci_dev);
+       dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        hw->back = pci_dev;
        hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
index ff1971b..600333e 100644 (file)
@@ -1003,6 +1003,7 @@ eth_ionic_dev_init(struct rte_eth_dev *eth_dev, void *init_params)
                return 0;
 
        rte_eth_copy_pci_info(eth_dev, pci_dev);
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        lif->index = adapter->nlifs;
        lif->eth_dev = eth_dev;
index f15ee07..8a53602 100644 (file)
@@ -2966,7 +2966,8 @@ ipn3ke_rpst_init(struct rte_eth_dev *ethdev, void *init_params)
                return -ENODEV;
        }
 
-       ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+       ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR |
+                                       RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        rte_spinlock_lock(&ipn3ke_link_notify_list_lk);
        TAILQ_INSERT_TAIL(&ipn3ke_rpst_list, rpst, next);
index 6f9a1f7..14a254a 100644 (file)
@@ -1118,6 +1118,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
 
        rte_atomic32_clear(&ad->link_thread_running);
        rte_eth_copy_pci_info(eth_dev, pci_dev);
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        /* Vendor and Device ID need to be set before init of shared code */
        hw->device_id = pci_dev->id.device_id;
@@ -1596,6 +1597,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
                              pci_dev->device.devargs);
 
        rte_eth_copy_pci_info(eth_dev, pci_dev);
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        hw->device_id = pci_dev->id.device_id;
        hw->vendor_id = pci_dev->id.vendor_id;
index f544d54..1696787 100644 (file)
@@ -392,6 +392,7 @@ eth_kni_create(struct rte_vdev_device *vdev,
        data->mac_addrs = &internals->eth_addr;
        data->promiscuous = 1;
        data->all_multicast = 1;
+       data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        rte_eth_random_addr(internals->eth_addr.addr_bytes);
 
index bd62b75..d4dd376 100644 (file)
@@ -2094,6 +2094,7 @@ lio_eth_dev_init(struct rte_eth_dev *eth_dev)
                return 0;
 
        rte_eth_copy_pci_info(eth_dev, pdev);
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        if (pdev->mem_resource[0].addr) {
                lio_dev->hw_addr = pdev->mem_resource[0].addr;
index fe72a01..f7ae55f 100644 (file)
@@ -1539,6 +1539,7 @@ memif_create(struct rte_vdev_device *vdev, enum memif_role_t role,
        data->dev_link = pmd_link;
        data->mac_addrs = ether_addr;
        data->promiscuous = 1;
+       data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        eth_dev->dev_ops = &ops;
        eth_dev->device = &vdev->device;
index 0857f5e..34c2bbb 100644 (file)
@@ -1035,6 +1035,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
                eth_dev->data->mac_addrs = priv->mac;
                eth_dev->device = &pci_dev->device;
                rte_eth_copy_pci_info(eth_dev, pci_dev);
+               eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
                /* Initialize local interrupt handle for current port. */
                memset(&priv->intr_handle, 0, sizeof(struct rte_intr_handle));
                priv->intr_handle.fd = -1;
index 09d0944..10f6370 100644 (file)
@@ -1277,6 +1277,7 @@ err_secondary:
        priv->dev_data = eth_dev->data;
        eth_dev->data->mac_addrs = priv->mac;
        eth_dev->device = dpdk_dev;
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
        /* Configure the first MAC address by default. */
        if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
                DRV_LOG(ERR,
index 5a8c5aa..2cd7391 100644 (file)
@@ -840,6 +840,7 @@ mvneta_eth_dev_create(struct rte_vdev_device *vdev, const char *name)
        eth_dev->rx_pkt_burst = mvneta_rx_pkt_burst;
        mvneta_set_tx_function(eth_dev);
        eth_dev->dev_ops = &mvneta_ops;
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        rte_eth_dev_probing_finish(eth_dev);
        return 0;
index 68f7648..3c7c9d8 100644 (file)
@@ -2865,6 +2865,7 @@ mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name)
        eth_dev->rx_pkt_burst = mrvl_rx_pkt_burst;
        mrvl_set_tx_function(eth_dev);
        eth_dev->dev_ops = &mrvl_ops;
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        rte_eth_dev_probing_finish(eth_dev);
        return 0;
index d5c4252..4a01f1d 100644 (file)
@@ -950,6 +950,8 @@ eth_hn_dev_init(struct rte_eth_dev *eth_dev)
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
                return 0;
 
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
        /* Since Hyper-V only supports one MAC address */
        eth_dev->data->mac_addrs = rte_calloc("hv_mac", HN_MAX_MAC_ADDRS,
                                              sizeof(struct rte_ether_addr), 0);
index f63c86f..c55bcdf 100644 (file)
@@ -516,6 +516,8 @@ nfb_eth_dev_init(struct rte_eth_dev *dev)
        data->all_multicast = nfb_eth_allmulticast_get(dev);
        internals->rx_filter_original = data->promiscuous;
 
+       dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
        RTE_LOG(INFO, PMD, "NFB device ("
                PCI_PRI_FMT ") successfully initialized\n",
                pci_addr->domain, pci_addr->bus, pci_addr->devid,
index 3820377..1608bf5 100644 (file)
@@ -2992,6 +2992,8 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
        if (!(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
                eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR;
 
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
        PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x "
                     "mac=%02x:%02x:%02x:%02x:%02x:%02x",
                     eth_dev->data->port_id, pci_dev->id.vendor_id,
index 9ed88f1..49ee8da 100644 (file)
@@ -550,6 +550,7 @@ eth_dev_null_create(struct rte_vdev_device *dev, struct pmd_options *args)
        data->mac_addrs = &internals->eth_addr;
        data->promiscuous = 1;
        data->all_multicast = 1;
+       data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        eth_dev->dev_ops = &ops;
 
index 1c21d00..3ee7b04 100644 (file)
@@ -1375,6 +1375,7 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev,
        data->promiscuous = 0;
        data->all_multicast = 0;
        data->scattered_rx = 0;
+       data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        /* Get maximum number of supported MAC entries */
        max_entries = octeontx_bgx_port_mac_entries_get(nic->port_id);
index e52e195..cfb733a 100644 (file)
@@ -2424,6 +2424,7 @@ otx2_eth_dev_init(struct rte_eth_dev *eth_dev)
        pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
 
        rte_eth_copy_pci_info(eth_dev, pci_dev);
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        /* Zero out everything after OTX2_DEV to allow proper dev_reset() */
        memset(&dev->otx2_eth_dev_data_start, 0, sizeof(*dev) -
index 5954745..34e8231 100644 (file)
@@ -1158,6 +1158,7 @@ pmd_init_internals(struct rte_vdev_device *vdev,
        data->mac_addrs = &(*internals)->eth_addr;
        data->promiscuous = 1;
        data->all_multicast = 1;
+       data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        /*
         * NOTE: we'll replace the data element, of originally allocated
index 32b7959..3b07969 100644 (file)
@@ -855,6 +855,8 @@ pfe_eth_init(struct rte_vdev_device *vdev, struct pfe *pfe, int id)
        eth_dev->data->nb_rx_queues = 1;
        eth_dev->data->nb_tx_queues = 1;
 
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
        /* For link status, open the PFE CDEV; Error from this function
         * is silently ignored; In case of error, the link status will not
         * be available.
index c26d4b0..5490135 100644 (file)
@@ -2545,6 +2545,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
        }
 
        rte_eth_copy_pci_info(eth_dev, pci_dev);
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        /* @DPDK */
        edev->vendor_id = pci_dev->id.vendor_id;
index 9511a87..d6115ab 100644 (file)
@@ -361,6 +361,7 @@ do_eth_dev_ring_create(const char *name,
        data->mac_addrs = &internals->address;
        data->promiscuous = 1;
        data->all_multicast = 1;
+       data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        eth_dev->dev_ops = &ops;
        data->numa_node = numa_node;
index 105afc2..c067208 100644 (file)
@@ -2217,6 +2217,7 @@ sfc_eth_dev_init(struct rte_eth_dev *dev)
 
        /* Copy PCI device info to the dev->data */
        rte_eth_copy_pci_info(dev, pci_dev);
+       dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        rc = sfc_kvargs_parse(sa);
        if (rc != 0)
index 35dcc2c..7874c4f 100644 (file)
@@ -1548,6 +1548,8 @@ rte_szedata2_eth_dev_init(struct rte_eth_dev *dev, struct port_info *pi)
 
        rte_ether_addr_copy(&eth_addr, data->mac_addrs);
 
+       dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
        PMD_INIT_LOG(INFO, "%s device %s successfully initialized",
                        RTE_STR(RTE_SZEDATA2_DRIVER_NAME), data->name);
 
index d1e8279..81c6884 100644 (file)
@@ -1922,7 +1922,8 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, const char *tap_name,
        /* Setup some default values */
        data = dev->data;
        data->dev_private = pmd;
-       data->dev_flags = RTE_ETH_DEV_INTR_LSC;
+       data->dev_flags = RTE_ETH_DEV_INTR_LSC |
+                               RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
        data->numa_node = numa_node;
 
        data->dev_link = pmd_link;
index ac87fbc..f0bd20a 100644 (file)
@@ -2155,6 +2155,7 @@ nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
 
        pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
        rte_eth_copy_pci_info(eth_dev, pci_dev);
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        nic->device_id = pci_dev->id.device_id;
        nic->vendor_id = pci_dev->id.vendor_id;
index 23ca115..f271203 100644 (file)
@@ -1447,7 +1447,8 @@ eth_dev_vhost_create(struct rte_vdev_device *dev, char *iface_name,
        internal->flags = flags;
        internal->disable_flags = disable_flags;
        data->dev_link = pmd_link;
-       data->dev_flags = RTE_ETH_DEV_INTR_LSC;
+       data->dev_flags = RTE_ETH_DEV_INTR_LSC |
+                               RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
        data->promiscuous = 1;
        data->all_multicast = 1;
 
index 661c2aa..516c277 100644 (file)
@@ -1718,6 +1718,8 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
        else
                eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
 
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
        /* Setting up rx_header size for the device */
        if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
            vtpci_with_feature(hw, VIRTIO_F_VERSION_1) ||
index a685102..6920ab5 100644 (file)
@@ -250,6 +250,7 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
                return 0;
 
        rte_eth_copy_pci_info(eth_dev, pci_dev);
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        /* Vendor and Device ID need to be set before init of shared code */
        hw->device_id = pci_dev->id.device_id;
index ee2bc51..a0508ac 100644 (file)
@@ -2698,8 +2698,10 @@ get_xstats_basic_count(struct rte_eth_dev *dev)
        nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
 
        count = RTE_NB_STATS;
-       count += nb_rxqs * RTE_NB_RXQ_STATS;
-       count += nb_txqs * RTE_NB_TXQ_STATS;
+       if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) {
+               count += nb_rxqs * RTE_NB_RXQ_STATS;
+               count += nb_txqs * RTE_NB_TXQ_STATS;
+       }
 
        return count;
 }
@@ -2790,6 +2792,10 @@ rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
                        sizeof(xstats_names[0].name));
                cnt_used_entries++;
        }
+
+       if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
+               return cnt_used_entries;
+
        num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
        for (id_queue = 0; id_queue < num_q; id_queue++) {
                for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
@@ -2988,6 +2994,9 @@ rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
                xstats[count++].value = val;
        }
 
+       if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
+               return count;
+
        /* per-rxq stats */
        for (q = 0; q < nb_rxqs; q++) {
                for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
@@ -3123,8 +3132,9 @@ rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
        nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
 
        /* Return generic statistics */
-       count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
-               (nb_txqs * RTE_NB_TXQ_STATS);
+       count = RTE_NB_STATS;
+       if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS)
+               count += (nb_rxqs * RTE_NB_RXQ_STATS) + (nb_txqs * RTE_NB_TXQ_STATS);
 
        /* implemented by the driver */
        if (dev->dev_ops->xstats_get != NULL) {
index 77f9df7..001cf3f 100644 (file)
@@ -1802,6 +1802,11 @@ struct rte_eth_dev_owner {
 #define RTE_ETH_DEV_REPRESENTOR  0x0010
 /** Device does not support MAC change after started */
 #define RTE_ETH_DEV_NOLIVE_MAC_ADDR  0x0020
+/**
+ * Queue xstats filled automatically by ethdev layer.
+ * PMDs filling the queue xstats themselves should not set this flag
+ */
+#define RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS 0x0040
 
 /**
  * Iterates over valid ethdev ports owned by a specific owner.