volatile void *mem_bar;
};
+/**
+ * Get Tx datapath specific device info.
+ *
+ * @param dev_info Device info to be adjusted
+ */
+typedef void (sfc_dp_tx_get_dev_info_t)(struct rte_eth_dev_info *dev_info);
+
/**
* Get size of transmit and event queue rings by the number of Tx
* descriptors.
#define SFC_DP_TX_FEAT_MULTI_PROCESS 0x8
#define SFC_DP_TX_FEAT_MULTI_POOL 0x10
#define SFC_DP_TX_FEAT_REFCNT 0x20
+ sfc_dp_tx_get_dev_info_t *get_dev_info;
sfc_dp_tx_qsize_up_rings_t *qsize_up_rings;
sfc_dp_tx_qcreate_t *qcreate;
sfc_dp_tx_qdestroy_t *qdestroy;
return pktp - &tx_pkts[0];
}
+static sfc_dp_tx_get_dev_info_t sfc_ef10_get_dev_info;
+static void
+sfc_ef10_get_dev_info(struct rte_eth_dev_info *dev_info)
+{
+ /*
+ * Number of descriptors just defines maximum number of pushed
+ * descriptors (fill level).
+ */
+ dev_info->tx_desc_lim.nb_min = 1;
+ dev_info->tx_desc_lim.nb_align = 1;
+}
static sfc_dp_tx_qsize_up_rings_t sfc_ef10_tx_qsize_up_rings;
static int
unsigned int *evq_entries,
unsigned int *txq_max_fill_level)
{
- *txq_entries = nb_tx_desc;
- *evq_entries = nb_tx_desc;
- *txq_max_fill_level = SFC_EF10_TXQ_LIMIT(*txq_entries);
+ /*
+ * rte_ethdev API guarantees that the number meets min, max and
+ * alignment requirements.
+ */
+ if (nb_tx_desc <= EFX_TXQ_MINNDESCS)
+ *txq_entries = EFX_TXQ_MINNDESCS;
+ else
+ *txq_entries = rte_align32pow2(nb_tx_desc);
+
+ *evq_entries = *txq_entries;
+
+ *txq_max_fill_level = RTE_MIN(nb_tx_desc,
+ SFC_EF10_TXQ_LIMIT(*evq_entries));
return 0;
}
SFC_DP_TX_FEAT_MULTI_POOL |
SFC_DP_TX_FEAT_REFCNT |
SFC_DP_TX_FEAT_MULTI_PROCESS,
+ .get_dev_info = sfc_ef10_get_dev_info,
.qsize_up_rings = sfc_ef10_tx_qsize_up_rings,
.qcreate = sfc_ef10_tx_qcreate,
.qdestroy = sfc_ef10_tx_qdestroy,
.type = SFC_DP_TX,
},
.features = SFC_DP_TX_FEAT_MULTI_PROCESS,
+ .get_dev_info = sfc_ef10_get_dev_info,
.qsize_up_rings = sfc_ef10_tx_qsize_up_rings,
.qcreate = sfc_ef10_tx_qcreate,
.qdestroy = sfc_ef10_tx_qdestroy,
*/
dev_info->rx_desc_lim.nb_align = EFX_RXQ_MINNDESCS;
+ /* Initialize to hardware limits */
dev_info->tx_desc_lim.nb_max = sa->txq_max_entries;
dev_info->tx_desc_lim.nb_min = EFX_TXQ_MINNDESCS;
/*
if (sa->dp_rx->get_dev_info != NULL)
sa->dp_rx->get_dev_info(dev_info);
+ if (sa->dp_tx->get_dev_info != NULL)
+ sa->dp_tx->get_dev_info(dev_info);
}
static const uint32_t *
&txq_max_fill_level);
if (rc != 0)
goto fail_size_up_rings;
+ SFC_ASSERT(txq_entries >= EFX_TXQ_MINNDESCS);
+ SFC_ASSERT(txq_entries <= sa->txq_max_entries);
+ SFC_ASSERT(txq_entries >= nb_tx_desc);
+ SFC_ASSERT(txq_max_fill_level <= nb_tx_desc);
rc = sfc_tx_qcheck_conf(sa, txq_max_fill_level, tx_conf);
if (rc != 0)
SFC_ASSERT(sw_index < sa->txq_count);
txq_info = &sa->txq_info[sw_index];
- SFC_ASSERT(txq_entries <= sa->txq_max_entries);
txq_info->entries = txq_entries;
rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_TX, sw_index,