if (rc != 0)
goto fail_estimate_rsrc_limits;
+ sa->rxq_max_entries = encp->enc_rxq_max_ndescs;
+ SFC_ASSERT(rte_is_power_of_2(sa->rxq_max_entries));
+
+ sa->rxq_min_entries = encp->enc_rxq_min_ndescs;
+ SFC_ASSERT(rte_is_power_of_2(sa->rxq_min_entries));
+
sa->txq_max_entries = encp->enc_txq_max_ndescs;
SFC_ASSERT(rte_is_power_of_2(sa->txq_max_entries));
unsigned int rxq_max;
unsigned int txq_max;
+ unsigned int rxq_max_entries;
+ unsigned int rxq_min_entries;
+
unsigned int txq_max_entries;
unsigned int txq_min_entries;
struct sfc_dp_queue dpq;
};
+/** Datapath receive queue descriptor number limitations */
+struct sfc_dp_rx_hw_limits {
+ unsigned int rxq_max_entries;
+ unsigned int rxq_min_entries;
+};
+
/**
* Datapath receive queue creation information.
*
* @return 0 or positive errno.
*/
typedef int (sfc_dp_rx_qsize_up_rings_t)(uint16_t nb_rx_desc,
+ struct sfc_dp_rx_hw_limits *limits,
struct rte_mempool *mb_pool,
unsigned int *rxq_entries,
unsigned int *evq_entries,
static sfc_dp_rx_qsize_up_rings_t sfc_ef10_essb_rx_qsize_up_rings;
static int
sfc_ef10_essb_rx_qsize_up_rings(uint16_t nb_rx_desc,
+ struct sfc_dp_rx_hw_limits *limits,
struct rte_mempool *mb_pool,
unsigned int *rxq_entries,
unsigned int *evq_entries,
nb_hw_rx_desc = RTE_MAX(SFC_DIV_ROUND_UP(nb_rx_desc,
mp_info.contig_block_size),
SFC_EF10_RX_WPTR_ALIGN + 1);
- if (nb_hw_rx_desc <= EFX_RXQ_MINNDESCS) {
- *rxq_entries = EFX_RXQ_MINNDESCS;
+ if (nb_hw_rx_desc <= limits->rxq_min_entries) {
+ *rxq_entries = limits->rxq_min_entries;
} else {
*rxq_entries = rte_align32pow2(nb_hw_rx_desc);
- if (*rxq_entries > EFX_RXQ_MAXNDESCS)
+ if (*rxq_entries > limits->rxq_max_entries)
return EINVAL;
}
static sfc_dp_rx_qsize_up_rings_t sfc_ef10_rx_qsize_up_rings;
static int
sfc_ef10_rx_qsize_up_rings(uint16_t nb_rx_desc,
+ struct sfc_dp_rx_hw_limits *limits,
__rte_unused struct rte_mempool *mb_pool,
unsigned int *rxq_entries,
unsigned int *evq_entries,
* rte_ethdev API guarantees that the number meets min, max and
* alignment requirements.
*/
- if (nb_rx_desc <= EFX_RXQ_MINNDESCS)
- *rxq_entries = EFX_RXQ_MINNDESCS;
+ if (nb_rx_desc <= limits->rxq_min_entries)
+ *rxq_entries = limits->rxq_min_entries;
else
*rxq_entries = rte_align32pow2(nb_rx_desc);
}
/* Initialize to hardware limits */
- dev_info->rx_desc_lim.nb_max = EFX_RXQ_MAXNDESCS;
- dev_info->rx_desc_lim.nb_min = EFX_RXQ_MINNDESCS;
+ dev_info->rx_desc_lim.nb_max = sa->rxq_max_entries;
+ dev_info->rx_desc_lim.nb_min = sa->rxq_min_entries;
/* The RXQ hardware requires that the descriptor count is a power
* of 2, but rx_desc_lim cannot properly describe that constraint.
*/
- dev_info->rx_desc_lim.nb_align = EFX_RXQ_MINNDESCS;
+ dev_info->rx_desc_lim.nb_align = sa->rxq_min_entries;
/* Initialize to hardware limits */
dev_info->tx_desc_lim.nb_max = sa->txq_max_entries;
static sfc_dp_rx_qsize_up_rings_t sfc_efx_rx_qsize_up_rings;
static int
sfc_efx_rx_qsize_up_rings(uint16_t nb_rx_desc,
+ __rte_unused struct sfc_dp_rx_hw_limits *limits,
__rte_unused struct rte_mempool *mb_pool,
unsigned int *rxq_entries,
unsigned int *evq_entries,
struct sfc_evq *evq;
struct sfc_rxq *rxq;
struct sfc_dp_rx_qcreate_info info;
+ struct sfc_dp_rx_hw_limits hw_limits;
- rc = sa->priv.dp_rx->qsize_up_rings(nb_rx_desc, mb_pool, &rxq_entries,
- &evq_entries, &rxq_max_fill_level);
+ memset(&hw_limits, 0, sizeof(hw_limits));
+ hw_limits.rxq_max_entries = sa->rxq_max_entries;
+ hw_limits.rxq_min_entries = sa->rxq_min_entries;
+
+ rc = sa->priv.dp_rx->qsize_up_rings(nb_rx_desc, &hw_limits, mb_pool,
+ &rxq_entries, &evq_entries,
+ &rxq_max_fill_level);
if (rc != 0)
goto fail_size_up_rings;
- SFC_ASSERT(rxq_entries >= EFX_RXQ_MINNDESCS);
- SFC_ASSERT(rxq_entries <= EFX_RXQ_MAXNDESCS);
+ SFC_ASSERT(rxq_entries >= sa->rxq_min_entries);
+ SFC_ASSERT(rxq_entries <= sa->rxq_max_entries);
SFC_ASSERT(rxq_max_fill_level <= nb_rx_desc);
offloads = rx_conf->offloads |
{
struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
struct sfc_rxq_info *rxq_info = &sas->rxq_info[sw_index];
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
unsigned int max_entries;
- max_entries = EFX_RXQ_MAXNDESCS;
+ max_entries = encp->enc_rxq_max_ndescs;
SFC_ASSERT(rte_is_power_of_2(max_entries));
rxq_info->max_entries = max_entries;