static int i40e_dev_xstats_get(struct rte_eth_dev *dev,
struct rte_eth_xstats *xstats, unsigned n);
static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
-static void i40e_dev_xstats_reset(struct rte_eth_dev *dev);
static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
uint16_t queue_id,
uint8_t stat_idx,
.stats_get = i40e_dev_stats_get,
.xstats_get = i40e_dev_xstats_get,
.stats_reset = i40e_dev_stats_reset,
- .xstats_reset = i40e_dev_xstats_reset,
+ .xstats_reset = i40e_dev_stats_reset,
.queue_stats_mapping_set = i40e_dev_queue_stats_mapping_set,
.dev_infos_get = i40e_dev_info_get,
.vlan_filter_set = i40e_vlan_filter_set,
PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
}
+/* Reset the statistics */
static void
-i40e_dev_xstats_reset(struct rte_eth_dev *dev)
+i40e_dev_stats_reset(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct i40e_hw_port_stats *hw_stats = &pf->stats;
- /* The hw registers are cleared on read */
+ /* Mark PF and VSI stats to update the offset, aka "reset" */
pf->offset_loaded = false;
- i40e_read_stats_registers(pf, hw);
+ if (pf->main_vsi)
+ pf->main_vsi->offset_loaded = false;
- /* reset software counters */
- memset(hw_stats, 0, sizeof(*hw_stats));
+ /* read the stats, reading current register values into offset */
+ i40e_read_stats_registers(pf, hw);
}
static int
return I40E_NB_XSTATS;
}
-/* Reset the statistics */
-static void
-i40e_dev_stats_reset(struct rte_eth_dev *dev)
-{
- struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-
- /* It results in reloading the start point of each counter */
- pf->offset_loaded = false;
-}
-
static int
i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
__rte_unused uint16_t queue_id,
u64 size,
u32 alignment)
{
- static uint64_t id = 0;
const struct rte_memzone *mz = NULL;
char z_name[RTE_MEMZONE_NAMESIZE];
if (!mem)
return I40E_ERR_PARAM;
- id++;
- snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, id);
+ snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand());
#ifdef RTE_LIBRTE_XEN_DOM0
mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
alignment, RTE_PGSIZE_2M);
if (!mz)
return I40E_ERR_NO_MEMORY;
- mem->id = id;
mem->size = size;
mem->va = mz->addr;
#ifdef RTE_LIBRTE_XEN_DOM0
#else
mem->pa = mz->phys_addr;
#endif
+ mem->zone = (const void *)mz;
+ PMD_DRV_LOG(DEBUG, "memzone %s allocated with physical address: "
+ "%"PRIu64, mz->name, mem->pa);
return I40E_SUCCESS;
}
i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
struct i40e_dma_mem *mem)
{
- if (!mem || !mem->va)
+ if (!mem)
return I40E_ERR_PARAM;
+ PMD_DRV_LOG(DEBUG, "memzone %s to be freed with physical address: "
+ "%"PRIu64, ((const struct rte_memzone *)mem->zone)->name,
+ mem->pa);
+ rte_memzone_free((const struct rte_memzone *)mem->zone);
+ mem->zone = NULL;
mem->va = NULL;
mem->pa = (u64)0;
/* VMDq queue/VSI allocation */
pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num;
+ pf->vmdq_nb_qps = 0;
+ pf->max_nb_vmdq_vsi = 0;
if (hw->func_caps.vmdq) {
- pf->flags |= I40E_FLAG_VMDQ;
- pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
- pf->max_nb_vmdq_vsi = 1;
- PMD_DRV_LOG(DEBUG, "%u VMDQ VSIs, %u queues per VMDQ VSI, "
- "in total %u queues", pf->max_nb_vmdq_vsi,
- pf->vmdq_nb_qps,
- pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi);
- } else {
- pf->vmdq_nb_qps = 0;
- pf->max_nb_vmdq_vsi = 0;
+ if (qp_count < hw->func_caps.num_tx_qp) {
+ pf->max_nb_vmdq_vsi = (hw->func_caps.num_tx_qp -
+ qp_count) / pf->vmdq_nb_qp_max;
+
+ /* Limit the maximum number of VMDq vsi to the maximum
+ * ethdev can support
+ */
+ pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
+ ETH_64_POOLS);
+ if (pf->max_nb_vmdq_vsi) {
+ pf->flags |= I40E_FLAG_VMDQ;
+ pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
+ PMD_DRV_LOG(DEBUG, "%u VMDQ VSIs, %u queues "
+ "per VMDQ VSI, in total %u queues",
+ pf->max_nb_vmdq_vsi,
+ pf->vmdq_nb_qps, pf->vmdq_nb_qps *
+ pf->max_nb_vmdq_vsi);
+ } else {
+ PMD_DRV_LOG(INFO, "No enough queues left for "
+ "VMDq");
+ }
+ } else {
+ PMD_DRV_LOG(INFO, "No queue left for VMDq");
+ }
}
qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
vsi_count += pf->max_nb_vmdq_vsi;