if (rc != 0)
goto fail_estimate_rsrc_limits;
+ sa->evq_max_entries = encp->enc_evq_max_nevs;
+ SFC_ASSERT(rte_is_power_of_2(sa->evq_max_entries));
+
+ sa->evq_min_entries = encp->enc_evq_min_nevs;
+ SFC_ASSERT(rte_is_power_of_2(sa->evq_min_entries));
+
sa->rxq_max_entries = encp->enc_rxq_max_ndescs;
SFC_ASSERT(rte_is_power_of_2(sa->rxq_max_entries));
unsigned int txq_max_entries;
unsigned int txq_min_entries;
+ unsigned int evq_max_entries;
+ unsigned int evq_min_entries;
+
uint32_t evq_flags;
unsigned int evq_count;
struct sfc_dp_rx_hw_limits {
unsigned int rxq_max_entries;
unsigned int rxq_min_entries;
+ unsigned int evq_max_entries;
+ unsigned int evq_min_entries;
};
/**
1 /* Rx error */ + 1 /* flush */ + 1 /* head-tail space */;
*evq_entries = rte_align32pow2(max_events);
- *evq_entries = RTE_MAX(*evq_entries, (unsigned int)EFX_EVQ_MINNEVS);
- *evq_entries = RTE_MIN(*evq_entries, (unsigned int)EFX_EVQ_MAXNEVS);
+ *evq_entries = RTE_MAX(*evq_entries, limits->evq_min_entries);
+ *evq_entries = RTE_MIN(*evq_entries, limits->evq_max_entries);
/*
* May be even maximum event queue size is insufficient to handle
sa->mgmt_evq_index = 0;
rte_spinlock_init(&sa->mgmt_evq_lock);
- rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_MGMT, 0, SFC_MGMT_EVQ_ENTRIES,
+ rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_MGMT, 0, sa->evq_min_entries,
sa->socket_id, &sa->mgmt_evq);
if (rc != 0)
goto fail_mgmt_evq_init;
extern "C" {
#endif
-/* Number of entries in the management event queue */
-#define SFC_MGMT_EVQ_ENTRIES (EFX_EVQ_MINNEVS)
-
struct sfc_adapter;
struct sfc_dp_rxq;
struct sfc_dp_txq;
memset(&hw_limits, 0, sizeof(hw_limits));
hw_limits.rxq_max_entries = sa->rxq_max_entries;
hw_limits.rxq_min_entries = sa->rxq_min_entries;
+ hw_limits.evq_max_entries = sa->evq_max_entries;
+ hw_limits.evq_min_entries = sa->evq_min_entries;
rc = sa->priv.dp_rx->qsize_up_rings(nb_rx_desc, &hw_limits, mb_pool,
&rxq_entries, &evq_entries,