data->nb_tx_queues = (uint16_t)nb_queues;
data->dev_link = pmd_link;
data->mac_addrs = &(*internals)->eth_addr;
+ data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
(*eth_dev)->dev_ops = &ops;
eth_dev->data->dev_private = internals;
eth_dev->data->dev_link = pmd_link;
eth_dev->data->mac_addrs = &internals->eth_addr;
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
eth_dev->dev_ops = &ops;
eth_dev->rx_pkt_burst = eth_af_xdp_rx;
eth_dev->tx_pkt_burst = eth_af_xdp_tx;
return ret;
pci_dev = RTE_ETH_DEV_TO_PCI(dev);
rte_eth_copy_pci_info(dev, pci_dev);
+ dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
/* Use dummy function until setup */
dev->rx_pkt_burst = ð_ark_recv_pkts_noop;
eth_dev->rx_pkt_burst = ark->eth_dev->rx_pkt_burst;
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
eth_dev->data->mac_addrs = rte_zmalloc(name,
RTE_ETHER_ADDR_LEN, 0);
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
/* Vendor and Device ID need to be set before init of shared code */
hw->device_id = pci_dev->id.device_id;
hw->vendor_id = pci_dev->id.vendor_id;
}
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
/* Check current migration status */
if (avp_dev_migration_pending(eth_dev)) {
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
pdata = eth_dev->data->dev_private;
/* initial state */
rte_bit_relaxed_set32(AXGBE_DOWN, &pdata->dev_state);
}
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
sc->pcie_bus = pci_dev->addr.bus;
sc->pcie_device = pci_dev->addr.devid;
return 0;
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
bp = eth_dev->data->dev_private;
vf_rep_bp->rep_fc_r2f = rep_params->rep_fc_r2f;
vf_rep_bp->rep_fc_f2r = rep_params->rep_fc_f2r;
- eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR |
+ RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
eth_dev->data->representor_id = rep_params->vf_id;
rte_eth_random_addr(vf_rep_bp->dflt_mac_addr);
}
eth_dev->dev_ops = &default_dev_ops;
- eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC;
+ eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC |
+ RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
rte_spinlock_init(&internals->lock);
rte_spinlock_init(&internals->lsc_lock);
return 0;
}
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id);
adapter = rte_zmalloc(name, sizeof(*adapter), 0);
if (!adapter)
if (dpaa_drv->drv_flags & RTE_DPAA_DRV_INTR_LSC)
eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
/* Invoke PMD device initialization function */
diag = dpaa_dev_init(eth_dev);
if (diag == 0) {
if (dpaa2_drv->drv_flags & RTE_DPAA2_DRV_INTR_LSC)
eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
/* Invoke PMD device initialization function */
diag = dpaa2_dev_init(eth_dev);
if (diag == 0) {
}
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
hw->device_id = pci_dev->id.device_id;
}
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
hw->hw_addr= (void *)pci_dev->mem_resource[0].addr;
pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
hw->device_id = pci_dev->id.device_id;
hw->vendor_id = pci_dev->id.vendor_id;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
memset(adapter, 0, sizeof(struct ena_adapter));
ena_dev = &adapter->ena_dev;
eth_dev->rx_pkt_burst = &enetc_recv_pkts;
eth_dev->tx_pkt_burst = &enetc_xmit_pkts;
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
/* Retrieving and storing the HW base address of device */
hw->hw.reg = (void *)pci_dev->mem_resource[0].addr;
hw->device_id = pci_dev->id.device_id;
pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
rte_eth_copy_pci_info(eth_dev, pdev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
enic->pdev = pdev;
addr = &pdev->addr;
eth_dev->device->driver = pf->rte_dev->device->driver;
eth_dev->dev_ops = &enic_vf_representor_dev_ops;
- eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR |
+ RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
eth_dev->data->representor_id = vf->vf_id;
eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr_vf",
sizeof(struct rte_ether_addr) *
mac->addr_bytes[0], mac->addr_bytes[1],
mac->addr_bytes[2], mac->addr_bytes[3],
mac->addr_bytes[4], mac->addr_bytes[5]);
- dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
+ dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC |
+ RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
PRIV(dev)->intr_handle = (struct rte_intr_handle){
.fd = -1,
.type = RTE_INTR_HANDLE_EXT,
}
rte_eth_copy_pci_info(dev, pdev);
+ dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
memset(macvlan, 0, sizeof(*macvlan));
return 0;
}
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
memset(nic_dev, 0, sizeof(*nic_dev));
return 0;
}
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
ret = hns3_mp_init_primary();
if (ret) {
PMD_INIT_LOG(ERR,
return 0;
}
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
ret = hns3_mp_init_primary();
if (ret) {
PMD_INIT_LOG(ERR,
intr_handle = &pci_dev->intr_handle;
rte_eth_copy_pci_info(dev, pci_dev);
+ dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
pf->adapter->eth_dev = dev;
}
i40e_set_default_ptype_table(eth_dev);
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
hw->vendor_id = pci_dev->id.vendor_id;
hw->device_id = pci_dev->id.device_id;
return -ENODEV;
}
- ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+ ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR |
+ RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
ethdev->data->representor_id = representor->vf_id;
/* Setting the number queues allocated to the VF */
return 0;
}
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
hw->vendor_id = pci_dev->id.vendor_id;
hw->device_id = pci_dev->id.device_id;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
adapter->real_hw.vc_event_msg_cb = ice_dcf_handle_pf_event_msg;
if (ice_dcf_init_hw(eth_dev, &adapter->real_hw) != 0) {
PMD_INIT_LOG(ERR, "Failed to init DCF hardware");
return 0;
}
+ dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
ice_set_default_ptype_table(dev);
pci_dev = RTE_DEV_TO_PCI(dev->device);
intr_handle = &pci_dev->intr_handle;
return 0;
rte_eth_copy_pci_info(dev, pci_dev);
+ dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
hw->back = pci_dev;
hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
return 0;
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
lif->index = adapter->nlifs;
lif->eth_dev = eth_dev;
return -ENODEV;
}
- ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+ ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR |
+ RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
rte_spinlock_lock(&ipn3ke_link_notify_list_lk);
TAILQ_INSERT_TAIL(&ipn3ke_rpst_list, rpst, next);
rte_atomic32_clear(&ad->link_thread_running);
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
/* Vendor and Device ID need to be set before init of shared code */
hw->device_id = pci_dev->id.device_id;
pci_dev->device.devargs);
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
hw->device_id = pci_dev->id.device_id;
hw->vendor_id = pci_dev->id.vendor_id;
data->mac_addrs = &internals->eth_addr;
data->promiscuous = 1;
data->all_multicast = 1;
+ data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
rte_eth_random_addr(internals->eth_addr.addr_bytes);
return 0;
rte_eth_copy_pci_info(eth_dev, pdev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
if (pdev->mem_resource[0].addr) {
lio_dev->hw_addr = pdev->mem_resource[0].addr;
data->dev_link = pmd_link;
data->mac_addrs = ether_addr;
data->promiscuous = 1;
+ data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
eth_dev->dev_ops = &ops;
eth_dev->device = &vdev->device;
eth_dev->data->mac_addrs = priv->mac;
eth_dev->device = &pci_dev->device;
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
/* Initialize local interrupt handle for current port. */
memset(&priv->intr_handle, 0, sizeof(struct rte_intr_handle));
priv->intr_handle.fd = -1;
priv->dev_data = eth_dev->data;
eth_dev->data->mac_addrs = priv->mac;
eth_dev->device = dpdk_dev;
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
/* Configure the first MAC address by default. */
if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
DRV_LOG(ERR,
eth_dev->rx_pkt_burst = mvneta_rx_pkt_burst;
mvneta_set_tx_function(eth_dev);
eth_dev->dev_ops = &mvneta_ops;
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
rte_eth_dev_probing_finish(eth_dev);
return 0;
eth_dev->rx_pkt_burst = mrvl_rx_pkt_burst;
mrvl_set_tx_function(eth_dev);
eth_dev->dev_ops = &mrvl_ops;
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
rte_eth_dev_probing_finish(eth_dev);
return 0;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
/* Since Hyper-V only supports one MAC address */
eth_dev->data->mac_addrs = rte_calloc("hv_mac", HN_MAX_MAC_ADDRS,
sizeof(struct rte_ether_addr), 0);
data->all_multicast = nfb_eth_allmulticast_get(dev);
internals->rx_filter_original = data->promiscuous;
+ dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
RTE_LOG(INFO, PMD, "NFB device ("
PCI_PRI_FMT ") successfully initialized\n",
pci_addr->domain, pci_addr->bus, pci_addr->devid,
if (!(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR;
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x "
"mac=%02x:%02x:%02x:%02x:%02x:%02x",
eth_dev->data->port_id, pci_dev->id.vendor_id,
data->mac_addrs = &internals->eth_addr;
data->promiscuous = 1;
data->all_multicast = 1;
+ data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
eth_dev->dev_ops = &ops;
data->promiscuous = 0;
data->all_multicast = 0;
data->scattered_rx = 0;
+ data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
/* Get maximum number of supported MAC entries */
max_entries = octeontx_bgx_port_mac_entries_get(nic->port_id);
pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
/* Zero out everything after OTX2_DEV to allow proper dev_reset() */
memset(&dev->otx2_eth_dev_data_start, 0, sizeof(*dev) -
data->mac_addrs = &(*internals)->eth_addr;
data->promiscuous = 1;
data->all_multicast = 1;
+ data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
/*
* NOTE: we'll replace the data element, of originally allocated
eth_dev->data->nb_rx_queues = 1;
eth_dev->data->nb_tx_queues = 1;
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
/* For link status, open the PFE CDEV; Error from this function
* is silently ignored; In case of error, the link status will not
* be available.
}
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
/* @DPDK */
edev->vendor_id = pci_dev->id.vendor_id;
data->mac_addrs = &internals->address;
data->promiscuous = 1;
data->all_multicast = 1;
+ data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
eth_dev->dev_ops = &ops;
data->numa_node = numa_node;
/* Copy PCI device info to the dev->data */
rte_eth_copy_pci_info(dev, pci_dev);
+ dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
rc = sfc_kvargs_parse(sa);
if (rc != 0)
rte_ether_addr_copy(ð_addr, data->mac_addrs);
+ dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
PMD_INIT_LOG(INFO, "%s device %s successfully initialized",
RTE_STR(RTE_SZEDATA2_DRIVER_NAME), data->name);
/* Setup some default values */
data = dev->data;
data->dev_private = pmd;
- data->dev_flags = RTE_ETH_DEV_INTR_LSC;
+ data->dev_flags = RTE_ETH_DEV_INTR_LSC |
+ RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
data->numa_node = numa_node;
data->dev_link = pmd_link;
pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
nic->device_id = pci_dev->id.device_id;
nic->vendor_id = pci_dev->id.vendor_id;
internal->flags = flags;
internal->disable_flags = disable_flags;
data->dev_link = pmd_link;
- data->dev_flags = RTE_ETH_DEV_INTR_LSC;
+ data->dev_flags = RTE_ETH_DEV_INTR_LSC |
+ RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
data->promiscuous = 1;
data->all_multicast = 1;
else
eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
/* Setting up rx_header size for the device */
if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
vtpci_with_feature(hw, VIRTIO_F_VERSION_1) ||
return 0;
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
/* Vendor and Device ID need to be set before init of shared code */
hw->device_id = pci_dev->id.device_id;
nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
count = RTE_NB_STATS;
- count += nb_rxqs * RTE_NB_RXQ_STATS;
- count += nb_txqs * RTE_NB_TXQ_STATS;
+ if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) {
+ count += nb_rxqs * RTE_NB_RXQ_STATS;
+ count += nb_txqs * RTE_NB_TXQ_STATS;
+ }
return count;
}
sizeof(xstats_names[0].name));
cnt_used_entries++;
}
+
+ if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
+ return cnt_used_entries;
+
num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
for (id_queue = 0; id_queue < num_q; id_queue++) {
for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
xstats[count++].value = val;
}
+ if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
+ return count;
+
/* per-rxq stats */
for (q = 0; q < nb_rxqs; q++) {
for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
/* Return generic statistics */
- count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
- (nb_txqs * RTE_NB_TXQ_STATS);
+ count = RTE_NB_STATS;
+ if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS)
+ count += (nb_rxqs * RTE_NB_RXQ_STATS) + (nb_txqs * RTE_NB_TXQ_STATS);
/* implemented by the driver */
if (dev->dev_ops->xstats_get != NULL) {
#define RTE_ETH_DEV_REPRESENTOR 0x0010
/** Device does not support MAC change after started */
#define RTE_ETH_DEV_NOLIVE_MAC_ADDR 0x0020
+/**
+ * Queue xstats filled automatically by ethdev layer.
+ * PMDs filling the queue xstats themselves should not set this flag
+ */
+#define RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS 0x0040
/**
* Iterates over valid ethdev ports owned by a specific owner.