sa->txq_max_entries = encp->enc_txq_max_ndescs;
SFC_ASSERT(rte_is_power_of_2(sa->txq_max_entries));
+ sa->txq_min_entries = encp->enc_txq_min_ndescs;
+ SFC_ASSERT(rte_is_power_of_2(sa->txq_min_entries));
+
rc = sfc_intr_attach(sa);
if (rc != 0)
goto fail_intr_attach;
unsigned int txq_max;
unsigned int txq_max_entries;
+ unsigned int txq_min_entries;
uint32_t evq_flags;
unsigned int evq_count;
struct sfc_dp_queue dpq;
};
+/** Datapath transmit queue descriptor number limitations */
+struct sfc_dp_tx_hw_limits {
+ unsigned int txq_max_entries;
+ unsigned int txq_min_entries;
+};
+
/**
* Datapath transmit queue creation information.
*
* @return 0 or positive errno.
*/
typedef int (sfc_dp_tx_qsize_up_rings_t)(uint16_t nb_tx_desc,
+ struct sfc_dp_tx_hw_limits *limits,
unsigned int *txq_entries,
unsigned int *evq_entries,
unsigned int *txq_max_fill_level);
static sfc_dp_tx_qsize_up_rings_t sfc_ef10_tx_qsize_up_rings;
static int
sfc_ef10_tx_qsize_up_rings(uint16_t nb_tx_desc,
+ struct sfc_dp_tx_hw_limits *limits,
unsigned int *txq_entries,
unsigned int *evq_entries,
unsigned int *txq_max_fill_level)
* rte_ethdev API guarantees that the number meets min, max and
* alignment requirements.
*/
- if (nb_tx_desc <= EFX_TXQ_MINNDESCS)
- *txq_entries = EFX_TXQ_MINNDESCS;
+ if (nb_tx_desc <= limits->txq_min_entries)
+ *txq_entries = limits->txq_min_entries;
else
*txq_entries = rte_align32pow2(nb_tx_desc);
/* Initialize to hardware limits */
dev_info->tx_desc_lim.nb_max = sa->txq_max_entries;
- dev_info->tx_desc_lim.nb_min = EFX_TXQ_MINNDESCS;
+ dev_info->tx_desc_lim.nb_min = sa->txq_min_entries;
/*
* The TXQ hardware requires that the descriptor count is a power
* of 2, but tx_desc_lim cannot properly describe that constraint
*/
- dev_info->tx_desc_lim.nb_align = EFX_TXQ_MINNDESCS;
+ dev_info->tx_desc_lim.nb_align = sa->txq_min_entries;
if (sap->dp_rx->get_dev_info != NULL)
sap->dp_rx->get_dev_info(dev_info);
int rc = 0;
struct sfc_dp_tx_qcreate_info info;
uint64_t offloads;
+ struct sfc_dp_tx_hw_limits hw_limits;
sfc_log_init(sa, "TxQ = %u", sw_index);
- rc = sa->priv.dp_tx->qsize_up_rings(nb_tx_desc, &txq_entries,
- &evq_entries, &txq_max_fill_level);
+ memset(&hw_limits, 0, sizeof(hw_limits));
+ hw_limits.txq_max_entries = sa->txq_max_entries;
+ hw_limits.txq_min_entries = sa->txq_min_entries;
+
+ rc = sa->priv.dp_tx->qsize_up_rings(nb_tx_desc, &hw_limits,
+ &txq_entries, &evq_entries,
+ &txq_max_fill_level);
if (rc != 0)
goto fail_size_up_rings;
- SFC_ASSERT(txq_entries >= EFX_TXQ_MINNDESCS);
+ SFC_ASSERT(txq_entries >= sa->txq_min_entries);
SFC_ASSERT(txq_entries <= sa->txq_max_entries);
SFC_ASSERT(txq_entries >= nb_tx_desc);
SFC_ASSERT(txq_max_fill_level <= nb_tx_desc);
static sfc_dp_tx_qsize_up_rings_t sfc_efx_tx_qsize_up_rings;
static int
sfc_efx_tx_qsize_up_rings(uint16_t nb_tx_desc,
+ __rte_unused struct sfc_dp_tx_hw_limits *limits,
unsigned int *txq_entries,
unsigned int *evq_entries,
unsigned int *txq_max_fill_level)