]> git.droids-corp.org - dpdk.git/commitdiff
ethdev: add device flag to bypass auto-filled queue xstats
authorFerruh Yigit <ferruh.yigit@intel.com>
Wed, 14 Oct 2020 02:26:47 +0000 (03:26 +0100)
committerFerruh Yigit <ferruh.yigit@intel.com>
Fri, 16 Oct 2020 21:27:15 +0000 (23:27 +0200)
Queue stats are stored in 'struct rte_eth_stats' as array and array size
is defined by 'RTE_ETHDEV_QUEUE_STAT_CNTRS' compile time flag.

As a result of technical board discussion, decided to remove the queue
statistics from 'struct rte_eth_stats' in the long term.

Instead PMDs should represent the queue statistics via xstats, this
gives more flexibility on the number of the queues supported.

Currently queue stats in the xstats are filled by ethdev layer, using
some basic stats, when queue stats removed from basic stats the
responsibility to fill the relevant xstats will be pushed to the PMDs.

During the switch period, temporary 'RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS'
device flag is created. Initially all PMDs using xstats set this flag.
The PMDs implemented queue stats in the xstats should clear the flag.

When all PMDs switch to the xstats for the queue stats, queue stats
related fields from 'struct rte_eth_stats' will be removed, as well as
'RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS' flag.
Later 'RTE_ETHDEV_QUEUE_STAT_CNTRS' compile time flag also can be
removed.

Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-by: Haiyue Wang <haiyue.wang@intel.com>
Acked-by: Xiao Wang <xiao.w.wang@intel.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
60 files changed:
drivers/net/af_packet/rte_eth_af_packet.c
drivers/net/af_xdp/rte_eth_af_xdp.c
drivers/net/ark/ark_ethdev.c
drivers/net/atlantic/atl_ethdev.c
drivers/net/avp/avp_ethdev.c
drivers/net/axgbe/axgbe_ethdev.c
drivers/net/bnx2x/bnx2x_ethdev.c
drivers/net/bnxt/bnxt_ethdev.c
drivers/net/bnxt/bnxt_reps.c
drivers/net/bonding/rte_eth_bond_pmd.c
drivers/net/cxgbe/cxgbe_ethdev.c
drivers/net/dpaa/dpaa_ethdev.c
drivers/net/dpaa2/dpaa2_ethdev.c
drivers/net/e1000/em_ethdev.c
drivers/net/e1000/igb_ethdev.c
drivers/net/ena/ena_ethdev.c
drivers/net/enetc/enetc_ethdev.c
drivers/net/enic/enic_ethdev.c
drivers/net/enic/enic_vf_representor.c
drivers/net/failsafe/failsafe.c
drivers/net/fm10k/fm10k_ethdev.c
drivers/net/hinic/hinic_pmd_ethdev.c
drivers/net/hns3/hns3_ethdev.c
drivers/net/hns3/hns3_ethdev_vf.c
drivers/net/i40e/i40e_ethdev.c
drivers/net/i40e/i40e_ethdev_vf.c
drivers/net/i40e/i40e_vf_representor.c
drivers/net/iavf/iavf_ethdev.c
drivers/net/ice/ice_dcf_ethdev.c
drivers/net/ice/ice_ethdev.c
drivers/net/igc/igc_ethdev.c
drivers/net/ionic/ionic_ethdev.c
drivers/net/ipn3ke/ipn3ke_representor.c
drivers/net/ixgbe/ixgbe_ethdev.c
drivers/net/kni/rte_eth_kni.c
drivers/net/liquidio/lio_ethdev.c
drivers/net/memif/rte_eth_memif.c
drivers/net/mlx4/mlx4.c
drivers/net/mlx5/linux/mlx5_os.c
drivers/net/mvneta/mvneta_ethdev.c
drivers/net/mvpp2/mrvl_ethdev.c
drivers/net/netvsc/hn_ethdev.c
drivers/net/nfb/nfb_ethdev.c
drivers/net/nfp/nfp_net.c
drivers/net/null/rte_eth_null.c
drivers/net/octeontx/octeontx_ethdev.c
drivers/net/octeontx2/otx2_ethdev.c
drivers/net/pcap/rte_eth_pcap.c
drivers/net/pfe/pfe_ethdev.c
drivers/net/qede/qede_ethdev.c
drivers/net/ring/rte_eth_ring.c
drivers/net/sfc/sfc_ethdev.c
drivers/net/szedata2/rte_eth_szedata2.c
drivers/net/tap/rte_eth_tap.c
drivers/net/thunderx/nicvf_ethdev.c
drivers/net/vhost/rte_eth_vhost.c
drivers/net/virtio/virtio_ethdev.c
drivers/net/vmxnet3/vmxnet3_ethdev.c
lib/librte_ethdev/rte_ethdev.c
lib/librte_ethdev/rte_ethdev.h

index cb1c39b027d5c04de0b519e46b3b06ceb40e0f47..671ee87ae2f1fb732e21dd24112231693e2bdfe3 100644 (file)
@@ -860,6 +860,7 @@ rte_pmd_init_internals(struct rte_vdev_device *dev,
        data->nb_tx_queues = (uint16_t)nb_queues;
        data->dev_link = pmd_link;
        data->mac_addrs = &(*internals)->eth_addr;
+       data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        (*eth_dev)->dev_ops = &ops;
 
index 1c1e3cadd65e738479042db7ebc77a50339ad679..df2767b81c6677f91528c1ea36c646cb2f0aaf8b 100644 (file)
@@ -1562,6 +1562,7 @@ init_internals(struct rte_vdev_device *dev, const char *if_name,
        eth_dev->data->dev_private = internals;
        eth_dev->data->dev_link = pmd_link;
        eth_dev->data->mac_addrs = &internals->eth_addr;
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
        eth_dev->dev_ops = &ops;
        eth_dev->rx_pkt_burst = eth_af_xdp_rx;
        eth_dev->tx_pkt_burst = eth_af_xdp_tx;
index 1dcc059276cb58a05f2ed2dd4e0702b7ac366862..168b3659d6646e052349b92eab90bac3be976b19 100644 (file)
@@ -256,6 +256,7 @@ eth_ark_dev_init(struct rte_eth_dev *dev)
                return ret;
        pci_dev = RTE_ETH_DEV_TO_PCI(dev);
        rte_eth_copy_pci_info(dev, pci_dev);
+       dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        /* Use dummy function until setup */
        dev->rx_pkt_burst = &eth_ark_recv_pkts_noop;
@@ -383,6 +384,7 @@ eth_ark_dev_init(struct rte_eth_dev *dev)
                eth_dev->rx_pkt_burst = ark->eth_dev->rx_pkt_burst;
 
                rte_eth_copy_pci_info(eth_dev, pci_dev);
+               eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
                eth_dev->data->mac_addrs = rte_zmalloc(name,
                                                RTE_ETHER_ADDR_LEN, 0);
index 419ee477afce1a81ab727b905954a8d8de51d1cb..b0716773addb6d3c53edce0d56c4da50dce40c81 100644 (file)
@@ -380,6 +380,8 @@ eth_atl_dev_init(struct rte_eth_dev *eth_dev)
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
                return 0;
 
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
        /* Vendor and Device ID need to be set before init of shared code */
        hw->device_id = pci_dev->id.device_id;
        hw->vendor_id = pci_dev->id.vendor_id;
index 67f16623c5cd5a4b098183709550c2771cbfaab1..5f8187b905545cf4f9c99ff8e899dc814c80139a 100644 (file)
@@ -974,6 +974,7 @@ eth_avp_dev_init(struct rte_eth_dev *eth_dev)
        }
 
        rte_eth_copy_pci_info(eth_dev, pci_dev);
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        /* Check current migration status */
        if (avp_dev_migration_pending(eth_dev)) {
index 6e9722a8bfa4e301da72776e6de6dafb9fdd351e..cfe6aba73a223c55c728ac488458d80064917a34 100644 (file)
@@ -1972,6 +1972,8 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
                return 0;
 
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
        pdata = eth_dev->data->dev_private;
        /* initial state */
        rte_bit_relaxed_set32(AXGBE_DOWN, &pdata->dev_state);
index 28f8aaeb4d40d17c41614ac8a273b0a382acfe96..d35c75a2e211a082f96daa6cbf28fbcb19d512b6 100644 (file)
@@ -648,6 +648,7 @@ bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
        }
 
        rte_eth_copy_pci_info(eth_dev, pci_dev);
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        sc->pcie_bus    = pci_dev->addr.bus;
        sc->pcie_device = pci_dev->addr.devid;
index 7bc610b73c0e9d06172900861329c306fa3cae95..6c1236953a722552bba8ee0ff0ce0971018ffaf3 100644 (file)
@@ -5997,6 +5997,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused)
                return 0;
 
        rte_eth_copy_pci_info(eth_dev, pci_dev);
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        bp = eth_dev->data->dev_private;
 
index 935f2c08b310e81ba38f93b76e92af258a905e3e..b4566c926a1737ae64efc4d12c5256e06b88f178 100644 (file)
@@ -184,7 +184,8 @@ int bnxt_representor_init(struct rte_eth_dev *eth_dev, void *params)
        vf_rep_bp->rep_fc_r2f = rep_params->rep_fc_r2f;
        vf_rep_bp->rep_fc_f2r = rep_params->rep_fc_f2r;
 
-       eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR |
+                                       RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
        eth_dev->data->representor_id = rep_params->vf_id;
 
        rte_eth_random_addr(vf_rep_bp->dflt_mac_addr);
index 9d5eef5f5123b5c517deef6b1d52f4c90757ba21..057b1ada54c7d72d1a143ce5b1fb1b44a24e740a 100644 (file)
@@ -3234,7 +3234,8 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
        }
 
        eth_dev->dev_ops = &default_dev_ops;
-       eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC;
+       eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC |
+                                       RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        rte_spinlock_init(&internals->lock);
        rte_spinlock_init(&internals->lsc_lock);
index a3a0d6d7579e9825ec33d288e5d8c1cd449d8f17..98d0362fa3a7102be9abb23256893188c09b445f 100644 (file)
@@ -1262,6 +1262,8 @@ static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev)
                return 0;
        }
 
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
        snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id);
        adapter = rte_zmalloc(name, sizeof(*adapter), 0);
        if (!adapter)
index c915b7ba1b1e4af9bf22998cbf485c2ad76c6547..f00279e004b185f7933ce7b3afdab9cb0de5e2ea 100644 (file)
@@ -2219,6 +2219,8 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
        if (dpaa_drv->drv_flags & RTE_DPAA_DRV_INTR_LSC)
                eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
 
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
        /* Invoke PMD device initialization function */
        diag = dpaa_dev_init(eth_dev);
        if (diag == 0) {
index f9d9a93fc14f2e8b7b5615605d43a55b25e4db87..04e60c56f22b299d38e64bc22f50579c08cb6d95 100644 (file)
@@ -2794,6 +2794,8 @@ rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv,
        if (dpaa2_drv->drv_flags & RTE_DPAA2_DRV_INTR_LSC)
                eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
 
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
        /* Invoke PMD device initialization function */
        diag = dpaa2_dev_init(eth_dev);
        if (diag == 0) {
index 8e028689e4635d4855206f9744278caab987b390..8ee9422bf4d3879f72f5fad97954614223264806 100644 (file)
@@ -265,6 +265,7 @@ eth_em_dev_init(struct rte_eth_dev *eth_dev)
        }
 
        rte_eth_copy_pci_info(eth_dev, pci_dev);
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
        hw->device_id = pci_dev->id.device_id;
index 43181d3f598a1164fb62086d6498a0b3fd831279..ac4b8f1123f8e14402a5bff3010f0dea8d8a90be 100644 (file)
@@ -765,6 +765,7 @@ eth_igb_dev_init(struct rte_eth_dev *eth_dev)
        }
 
        rte_eth_copy_pci_info(eth_dev, pci_dev);
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        hw->hw_addr= (void *)pci_dev->mem_resource[0].addr;
 
@@ -959,6 +960,7 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
 
        pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
        rte_eth_copy_pci_info(eth_dev, pci_dev);
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        hw->device_id = pci_dev->id.device_id;
        hw->vendor_id = pci_dev->id.vendor_id;
index d2e7b80b84a33c4abc733f30381757eb04f95098..c513faf202d657a5fdf3d34f1f2bf30e1057375a 100644 (file)
@@ -1779,6 +1779,8 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
                return 0;
 
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
        memset(adapter, 0, sizeof(struct ena_adapter));
        ena_dev = &adapter->ena_dev;
 
index 910e40fb70b37bdbd6163cfac2891ea2f7a11c69..6ff3022874528a261c75833348fe0cb00d3747e9 100644 (file)
@@ -885,6 +885,8 @@ enetc_dev_init(struct rte_eth_dev *eth_dev)
        eth_dev->rx_pkt_burst = &enetc_recv_pkts;
        eth_dev->tx_pkt_burst = &enetc_xmit_pkts;
 
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
        /* Retrieving and storing the HW base address of device */
        hw->hw.reg = (void *)pci_dev->mem_resource[0].addr;
        hw->device_id = pci_dev->id.device_id;
index 4a34c0ef833bdab582c184e901e7465a499596b0..8d0054ffec88067dc0f561f4909a93102f4614b9 100644 (file)
@@ -1300,6 +1300,7 @@ static int eth_enic_dev_init(struct rte_eth_dev *eth_dev,
 
        pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
        rte_eth_copy_pci_info(eth_dev, pdev);
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
        enic->pdev = pdev;
        addr = &pdev->addr;
 
index 984a754e356b703317998e270bd933bfa29c65af..c2c03c02816692db5235b87a8a30cb540390e447 100644 (file)
@@ -672,7 +672,8 @@ int enic_vf_representor_init(struct rte_eth_dev *eth_dev, void *init_params)
 
        eth_dev->device->driver = pf->rte_dev->device->driver;
        eth_dev->dev_ops = &enic_vf_representor_dev_ops;
-       eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR |
+                                       RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
        eth_dev->data->representor_id = vf->vf_id;
        eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr_vf",
                sizeof(struct rte_ether_addr) *
index b921e101e69df866b4d2fd65ace3e89ed1983a5a..2e9a9c7d2107e0f2528b6e80227c09af03c22cfd 100644 (file)
@@ -264,7 +264,8 @@ fs_eth_dev_create(struct rte_vdev_device *vdev)
                mac->addr_bytes[0], mac->addr_bytes[1],
                mac->addr_bytes[2], mac->addr_bytes[3],
                mac->addr_bytes[4], mac->addr_bytes[5]);
-       dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
+       dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC |
+                               RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
        PRIV(dev)->intr_handle = (struct rte_intr_handle){
                .fd = -1,
                .type = RTE_INTR_HANDLE_EXT,
index 93c6d8c1e7ef877bd9bca70a3d9b836eeb481b3d..dc2979bdd2db08466fcc1f1844d2bf52311ab277 100644 (file)
@@ -3076,6 +3076,7 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev)
        }
 
        rte_eth_copy_pci_info(dev, pdev);
+       dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
        memset(macvlan, 0, sizeof(*macvlan));
index 59743686a3744ae460914b2ccc3bb4eb312bacfa..b694fd83cf777301413ae341b9be523bc046e0e4 100644 (file)
@@ -3108,6 +3108,8 @@ static int hinic_func_init(struct rte_eth_dev *eth_dev)
                return 0;
        }
 
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
        nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
        memset(nic_dev, 0, sizeof(*nic_dev));
 
index 6869c8e56c9152dda8028b52050eb260ccb49981..5a234e2955e80bb50e64d6818917f9c78c72dd89 100644 (file)
@@ -6106,6 +6106,8 @@ hns3_dev_init(struct rte_eth_dev *eth_dev)
                return 0;
        }
 
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
        ret = hns3_mp_init_primary();
        if (ret) {
                PMD_INIT_LOG(ERR,
index 2b1de8d325de407f59062f559b3ef6d92f128e61..d1c3fb81ceb373b6fec7c2ff2ead5c4745189632 100644 (file)
@@ -2753,6 +2753,8 @@ hns3vf_dev_init(struct rte_eth_dev *eth_dev)
                return 0;
        }
 
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
        ret = hns3_mp_init_primary();
        if (ret) {
                PMD_INIT_LOG(ERR,
index a1aaa205a1b2604c1c33e9d5ebd7027c57610405..4778aaf2995d5c0ec560f67f5cb22935984a0fea 100644 (file)
@@ -1465,6 +1465,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
        intr_handle = &pci_dev->intr_handle;
 
        rte_eth_copy_pci_info(dev, pci_dev);
+       dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
        pf->adapter->eth_dev = dev;
index 7659b18c35beabf23e844f6fb95fe969bd87e542..53154c3ef34346181d52a43f40d5397ef7c20bdc 100644 (file)
@@ -1575,6 +1575,7 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev)
        }
        i40e_set_default_ptype_table(eth_dev);
        rte_eth_copy_pci_info(eth_dev, pci_dev);
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        hw->vendor_id = pci_dev->id.vendor_id;
        hw->device_id = pci_dev->id.device_id;
index 791c050b8c83cc51dd5cb7b8f86114df1df6488c..9e40406a3de7d09c8af3b2ad9a5a1621a9dba86f 100644 (file)
@@ -508,7 +508,8 @@ i40e_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params)
                return -ENODEV;
        }
 
-       ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+       ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR |
+                                       RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
        ethdev->data->representor_id = representor->vf_id;
 
        /* Setting the number queues allocated to the VF */
index 36712133b2007be87dd65ab126aacc71f2571f5e..0ef023c0aee4548cfc24b719750c354aa8f01381 100644 (file)
@@ -1434,6 +1434,7 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
                return 0;
        }
        rte_eth_copy_pci_info(eth_dev, pci_dev);
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        hw->vendor_id = pci_dev->id.vendor_id;
        hw->device_id = pci_dev->id.device_id;
index 0cc80a208a76fbf905a895364c0b6ef9f7496fef..b0b2ecb0d6729145dc314b3c2412a47c7e0ad08c 100644 (file)
@@ -906,6 +906,8 @@ ice_dcf_dev_init(struct rte_eth_dev *eth_dev)
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
                return 0;
 
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
        adapter->real_hw.vc_event_msg_cb = ice_dcf_handle_pf_event_msg;
        if (ice_dcf_init_hw(eth_dev, &adapter->real_hw) != 0) {
                PMD_INIT_LOG(ERR, "Failed to init DCF hardware");
index e63e423215ecbef0eca64a5838f3c426fdc6535e..c65125ff3260a891e948e13f725db8bf038a2193 100644 (file)
@@ -2137,6 +2137,8 @@ ice_dev_init(struct rte_eth_dev *dev)
                return 0;
        }
 
+       dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
        ice_set_default_ptype_table(dev);
        pci_dev = RTE_DEV_TO_PCI(dev->device);
        intr_handle = &pci_dev->intr_handle;
index 1bcfb63d34ac4dd5d2928e63c73174a778afb012..802212fc5784b2ccb382500ae91cdfee331fb799 100644 (file)
@@ -1244,6 +1244,7 @@ eth_igc_dev_init(struct rte_eth_dev *dev)
                return 0;
 
        rte_eth_copy_pci_info(dev, pci_dev);
+       dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        hw->back = pci_dev;
        hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
index ff1971b1cac58acabadc5a57da0cb8257e3417ff..600333e20fd56bb14ee02e6355a164ac75e9a441 100644 (file)
@@ -1003,6 +1003,7 @@ eth_ionic_dev_init(struct rte_eth_dev *eth_dev, void *init_params)
                return 0;
 
        rte_eth_copy_pci_info(eth_dev, pci_dev);
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        lif->index = adapter->nlifs;
        lif->eth_dev = eth_dev;
index f15ee0728a79ffdcc65dd198417fad1d2db11a0f..8a536025769429f8344bbe09149ff53edfbdf757 100644 (file)
@@ -2966,7 +2966,8 @@ ipn3ke_rpst_init(struct rte_eth_dev *ethdev, void *init_params)
                return -ENODEV;
        }
 
-       ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+       ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR |
+                                       RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        rte_spinlock_lock(&ipn3ke_link_notify_list_lk);
        TAILQ_INSERT_TAIL(&ipn3ke_rpst_list, rpst, next);
index 6f9a1f7d2f55c39e287550dce302e32842a6017c..14a254ab74369ed387946ae1c48b93b22577c727 100644 (file)
@@ -1118,6 +1118,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
 
        rte_atomic32_clear(&ad->link_thread_running);
        rte_eth_copy_pci_info(eth_dev, pci_dev);
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        /* Vendor and Device ID need to be set before init of shared code */
        hw->device_id = pci_dev->id.device_id;
@@ -1596,6 +1597,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
                              pci_dev->device.devargs);
 
        rte_eth_copy_pci_info(eth_dev, pci_dev);
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        hw->device_id = pci_dev->id.device_id;
        hw->vendor_id = pci_dev->id.vendor_id;
index f544d54771d7e3ac6297b16b2334b4010d9ee51b..1696787a9b361e31d3b124323f9a53376bec7f4b 100644 (file)
@@ -392,6 +392,7 @@ eth_kni_create(struct rte_vdev_device *vdev,
        data->mac_addrs = &internals->eth_addr;
        data->promiscuous = 1;
        data->all_multicast = 1;
+       data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        rte_eth_random_addr(internals->eth_addr.addr_bytes);
 
index bd62b75a17604bbdebcc3cf67b14c788f6fa246c..d4dd3768cdb68ea9d2b90f23ff67f197258c6b07 100644 (file)
@@ -2094,6 +2094,7 @@ lio_eth_dev_init(struct rte_eth_dev *eth_dev)
                return 0;
 
        rte_eth_copy_pci_info(eth_dev, pdev);
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        if (pdev->mem_resource[0].addr) {
                lio_dev->hw_addr = pdev->mem_resource[0].addr;
index fe72a0161251bea71e13b03b68ca8221ec05e51e..f7ae55fbc701cea7a8f4e667b34131c9bf1ad605 100644 (file)
@@ -1539,6 +1539,7 @@ memif_create(struct rte_vdev_device *vdev, enum memif_role_t role,
        data->dev_link = pmd_link;
        data->mac_addrs = ether_addr;
        data->promiscuous = 1;
+       data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        eth_dev->dev_ops = &ops;
        eth_dev->device = &vdev->device;
index 0857f5e1e3e5a2ac7d27d760e5caee50eaceb046..34c2bbb3735d108eb66d5461aac1edf4bdc5ef4a 100644 (file)
@@ -1035,6 +1035,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
                eth_dev->data->mac_addrs = priv->mac;
                eth_dev->device = &pci_dev->device;
                rte_eth_copy_pci_info(eth_dev, pci_dev);
+               eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
                /* Initialize local interrupt handle for current port. */
                memset(&priv->intr_handle, 0, sizeof(struct rte_intr_handle));
                priv->intr_handle.fd = -1;
index 09d0944f45e41d4dd8e8bb7c0042a0655b1fc189..10f6370c2ffb5a327e0b42659f7c27089254b27a 100644 (file)
@@ -1277,6 +1277,7 @@ err_secondary:
        priv->dev_data = eth_dev->data;
        eth_dev->data->mac_addrs = priv->mac;
        eth_dev->device = dpdk_dev;
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
        /* Configure the first MAC address by default. */
        if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
                DRV_LOG(ERR,
index 5a8c5aad29d6cf5eafab79264f06edff50951eca..2cd73919ce6eb62cb002e295d5285281af5209f1 100644 (file)
@@ -840,6 +840,7 @@ mvneta_eth_dev_create(struct rte_vdev_device *vdev, const char *name)
        eth_dev->rx_pkt_burst = mvneta_rx_pkt_burst;
        mvneta_set_tx_function(eth_dev);
        eth_dev->dev_ops = &mvneta_ops;
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        rte_eth_dev_probing_finish(eth_dev);
        return 0;
index 68f7648c802095243b2919db391908712499bf3a..3c7c9d87ed487345bf5222c7948de668f5b2cd7a 100644 (file)
@@ -2865,6 +2865,7 @@ mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name)
        eth_dev->rx_pkt_burst = mrvl_rx_pkt_burst;
        mrvl_set_tx_function(eth_dev);
        eth_dev->dev_ops = &mrvl_ops;
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        rte_eth_dev_probing_finish(eth_dev);
        return 0;
index d5c4252f0a489829a5840ee75ba9de2d9a397a92..4a01f1d464b88f9b79c403784cfb7a9c4fd5cf45 100644 (file)
@@ -950,6 +950,8 @@ eth_hn_dev_init(struct rte_eth_dev *eth_dev)
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
                return 0;
 
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
        /* Since Hyper-V only supports one MAC address */
        eth_dev->data->mac_addrs = rte_calloc("hv_mac", HN_MAX_MAC_ADDRS,
                                              sizeof(struct rte_ether_addr), 0);
index f63c86f60b5e031ed9f11f8bf5fb65a7d0e6f2a7..c55bcdf1efdcbe76906a4f62b94d21bea6af89ac 100644 (file)
@@ -516,6 +516,8 @@ nfb_eth_dev_init(struct rte_eth_dev *dev)
        data->all_multicast = nfb_eth_allmulticast_get(dev);
        internals->rx_filter_original = data->promiscuous;
 
+       dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
        RTE_LOG(INFO, PMD, "NFB device ("
                PCI_PRI_FMT ") successfully initialized\n",
                pci_addr->domain, pci_addr->bus, pci_addr->devid,
index 38203777a3e390c871fc288be45b69edd51a9256..1608bf5ea12beb0bce2fbb4fb379c47156eb5ff6 100644 (file)
@@ -2992,6 +2992,8 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
        if (!(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
                eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR;
 
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
        PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x "
                     "mac=%02x:%02x:%02x:%02x:%02x:%02x",
                     eth_dev->data->port_id, pci_dev->id.vendor_id,
index 9ed88f110f54b9d717b2d15abf03793a22304bce..49ee8da8e4db1bd5761a17d40e75f76308b7210c 100644 (file)
@@ -550,6 +550,7 @@ eth_dev_null_create(struct rte_vdev_device *dev, struct pmd_options *args)
        data->mac_addrs = &internals->eth_addr;
        data->promiscuous = 1;
        data->all_multicast = 1;
+       data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        eth_dev->dev_ops = &ops;
 
index 1c21d00a3f800282fe6032c9e4d8dd88df0aa4cc..3ee7b043fd86e2d92204325480e4e9f8531af854 100644 (file)
@@ -1375,6 +1375,7 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev,
        data->promiscuous = 0;
        data->all_multicast = 0;
        data->scattered_rx = 0;
+       data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        /* Get maximum number of supported MAC entries */
        max_entries = octeontx_bgx_port_mac_entries_get(nic->port_id);
index e52e1952baa5245faed3cf7293f30a3ef10ac260..cfb733a4b557f56c958b0bfda893b0490f2167db 100644 (file)
@@ -2424,6 +2424,7 @@ otx2_eth_dev_init(struct rte_eth_dev *eth_dev)
        pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
 
        rte_eth_copy_pci_info(eth_dev, pci_dev);
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        /* Zero out everything after OTX2_DEV to allow proper dev_reset() */
        memset(&dev->otx2_eth_dev_data_start, 0, sizeof(*dev) -
index 595474558b623b8d3b6931ef58594270ca2f09a1..34e82317b1f06f6406263af89e8f4bcd124e2b94 100644 (file)
@@ -1158,6 +1158,7 @@ pmd_init_internals(struct rte_vdev_device *vdev,
        data->mac_addrs = &(*internals)->eth_addr;
        data->promiscuous = 1;
        data->all_multicast = 1;
+       data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        /*
         * NOTE: we'll replace the data element, of originally allocated
index 32b79590fe290ae86c4362fd807dd57ce166a9a1..3b079693fb8ecab8a5ae95e2e273ab61502cbd3a 100644 (file)
@@ -855,6 +855,8 @@ pfe_eth_init(struct rte_vdev_device *vdev, struct pfe *pfe, int id)
        eth_dev->data->nb_rx_queues = 1;
        eth_dev->data->nb_tx_queues = 1;
 
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
        /* For link status, open the PFE CDEV; Error from this function
         * is silently ignored; In case of error, the link status will not
         * be available.
index c26d4b08f96a6462468833d539c9e85646c4c879..549013557c3a05323367cb60b780253ad0f47a74 100644 (file)
@@ -2545,6 +2545,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
        }
 
        rte_eth_copy_pci_info(eth_dev, pci_dev);
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        /* @DPDK */
        edev->vendor_id = pci_dev->id.vendor_id;
index 9511a87fa11b05ae55e9e3c23901e37f639e67bc..d6115abdd50884030b056e76c5af924aa79afacb 100644 (file)
@@ -361,6 +361,7 @@ do_eth_dev_ring_create(const char *name,
        data->mac_addrs = &internals->address;
        data->promiscuous = 1;
        data->all_multicast = 1;
+       data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        eth_dev->dev_ops = &ops;
        data->numa_node = numa_node;
index 105afc21980d584994ca42e9b138e43dd1134280..c0672083ec32442c940a13737b00df24ea470a1c 100644 (file)
@@ -2217,6 +2217,7 @@ sfc_eth_dev_init(struct rte_eth_dev *dev)
 
        /* Copy PCI device info to the dev->data */
        rte_eth_copy_pci_info(dev, pci_dev);
+       dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        rc = sfc_kvargs_parse(sa);
        if (rc != 0)
index 35dcc2c77576cd8f2e7cb706ff64ae02b64b757f..7874c4f4f832ef6768784b5d94293740ddb840a8 100644 (file)
@@ -1548,6 +1548,8 @@ rte_szedata2_eth_dev_init(struct rte_eth_dev *dev, struct port_info *pi)
 
        rte_ether_addr_copy(&eth_addr, data->mac_addrs);
 
+       dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
        PMD_INIT_LOG(INFO, "%s device %s successfully initialized",
                        RTE_STR(RTE_SZEDATA2_DRIVER_NAME), data->name);
 
index d1e82799f581f48e512c8a9af1860c33f37ae7cb..81c688471d3f5d0777dbb2b404490db8fd6e6a61 100644 (file)
@@ -1922,7 +1922,8 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, const char *tap_name,
        /* Setup some default values */
        data = dev->data;
        data->dev_private = pmd;
-       data->dev_flags = RTE_ETH_DEV_INTR_LSC;
+       data->dev_flags = RTE_ETH_DEV_INTR_LSC |
+                               RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
        data->numa_node = numa_node;
 
        data->dev_link = pmd_link;
index ac87fbcc3c1f7fde16ca55da94925471de725996..f0bd20a22b6a6f4aa32b35c6f8290fdd5622999a 100644 (file)
@@ -2155,6 +2155,7 @@ nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
 
        pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
        rte_eth_copy_pci_info(eth_dev, pci_dev);
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        nic->device_id = pci_dev->id.device_id;
        nic->vendor_id = pci_dev->id.vendor_id;
index 23ca1158776baab271d2741fdc774592278acd98..f2712036fc779572288f9db0743449c7ccd48bde 100644 (file)
@@ -1447,7 +1447,8 @@ eth_dev_vhost_create(struct rte_vdev_device *dev, char *iface_name,
        internal->flags = flags;
        internal->disable_flags = disable_flags;
        data->dev_link = pmd_link;
-       data->dev_flags = RTE_ETH_DEV_INTR_LSC;
+       data->dev_flags = RTE_ETH_DEV_INTR_LSC |
+                               RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
        data->promiscuous = 1;
        data->all_multicast = 1;
 
index 661c2aa9e49ce738d0de4e6c0beb448720ccef94..516c277f9cb407956c024822a2a177d535033791 100644 (file)
@@ -1718,6 +1718,8 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
        else
                eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
 
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
        /* Setting up rx_header size for the device */
        if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
            vtpci_with_feature(hw, VIRTIO_F_VERSION_1) ||
index a685102b9392f7432d112720f22c5651ca77520f..6920ab568c62e19452936516994e66cd5f20cb87 100644 (file)
@@ -250,6 +250,7 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
                return 0;
 
        rte_eth_copy_pci_info(eth_dev, pci_dev);
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        /* Vendor and Device ID need to be set before init of shared code */
        hw->device_id = pci_dev->id.device_id;
index ee2bc51671bf8c3efd7378344e36b62244108edf..a0508acb0ec67b3c022e4612f386394d763a1f10 100644 (file)
@@ -2698,8 +2698,10 @@ get_xstats_basic_count(struct rte_eth_dev *dev)
        nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
 
        count = RTE_NB_STATS;
-       count += nb_rxqs * RTE_NB_RXQ_STATS;
-       count += nb_txqs * RTE_NB_TXQ_STATS;
+       if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) {
+               count += nb_rxqs * RTE_NB_RXQ_STATS;
+               count += nb_txqs * RTE_NB_TXQ_STATS;
+       }
 
        return count;
 }
@@ -2790,6 +2792,10 @@ rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
                        sizeof(xstats_names[0].name));
                cnt_used_entries++;
        }
+
+       if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
+               return cnt_used_entries;
+
        num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
        for (id_queue = 0; id_queue < num_q; id_queue++) {
                for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
@@ -2988,6 +2994,9 @@ rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
                xstats[count++].value = val;
        }
 
+       if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
+               return count;
+
        /* per-rxq stats */
        for (q = 0; q < nb_rxqs; q++) {
                for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
@@ -3123,8 +3132,9 @@ rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
        nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
 
        /* Return generic statistics */
-       count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
-               (nb_txqs * RTE_NB_TXQ_STATS);
+       count = RTE_NB_STATS;
+       if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS)
+               count += (nb_rxqs * RTE_NB_RXQ_STATS) + (nb_txqs * RTE_NB_TXQ_STATS);
 
        /* implemented by the driver */
        if (dev->dev_ops->xstats_get != NULL) {
index 77f9df7e5af1d5f004d8be49dad1bfeb40d801ac..001cf3f8d4336b990b6a049235755f603349cd8f 100644 (file)
@@ -1802,6 +1802,11 @@ struct rte_eth_dev_owner {
 #define RTE_ETH_DEV_REPRESENTOR  0x0010
 /** Device does not support MAC change after started */
 #define RTE_ETH_DEV_NOLIVE_MAC_ADDR  0x0020
+/**
+ * Queue xstats filled automatically by ethdev layer.
+ * PMDs filling the queue xstats themselves should not set this flag
+ */
+#define RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS 0x0040
 
 /**
  * Iterates over valid ethdev ports owned by a specific owner.