{
struct sfc_adapter *sa = dev->data->dev_private;
struct sfc_rxq_info *rxq_info;
- struct sfc_rxq *rxq;
sfc_adapter_lock(sa);
SFC_ASSERT(rx_queue_id < sa->rxq_count);
rxq_info = &sa->rxq_info[rx_queue_id];
- rxq = rxq_info->rxq;
- SFC_ASSERT(rxq != NULL);
- qinfo->mp = rxq->refill_mb_pool;
- qinfo->conf.rx_free_thresh = rxq->refill_threshold;
+ qinfo->mp = rxq_info->refill_mb_pool;
+ qinfo->conf.rx_free_thresh = rxq_info->refill_threshold;
qinfo->conf.rx_drop_en = 1;
qinfo->conf.rx_deferred_start = rxq_info->deferred_start;
qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
rxq_info->type_flags, evq->common, &rxq->common);
break;
case EFX_RXQ_TYPE_ES_SUPER_BUFFER: {
- struct rte_mempool *mp = rxq->refill_mb_pool;
+ struct rte_mempool *mp = rxq_info->refill_mb_pool;
struct rte_mempool_info mp_info;
rc = rte_mempool_ops_get_info(mp, &mp_info);
rxq->evq = evq;
rxq->hw_index = sw_index;
- rxq->refill_threshold =
+ rxq_info->refill_threshold =
RTE_MAX(rx_conf->rx_free_thresh, SFC_RX_REFILL_BULK);
- rxq->refill_mb_pool = mb_pool;
+ rxq_info->refill_mb_pool = mb_pool;
rxq->buf_size = buf_size;
rc = sfc_dma_alloc(sa, "rxq", sw_index, EFX_RXQ_SIZE(rxq_info->entries),
goto fail_dma_alloc;
memset(&info, 0, sizeof(info));
- info.refill_mb_pool = rxq->refill_mb_pool;
+ info.refill_mb_pool = rxq_info->refill_mb_pool;
info.max_fill_level = rxq_max_fill_level;
- info.refill_threshold = rxq->refill_threshold;
+ info.refill_threshold = rxq_info->refill_threshold;
info.buf_size = buf_size;
info.batch_max = encp->enc_rx_batch_max;
info.prefix_size = encp->enc_rx_prefix_size;
efx_rxq_t *common;
efsys_mem_t mem;
unsigned int hw_index;
- unsigned int refill_threshold;
- struct rte_mempool *refill_mb_pool;
uint16_t buf_size;
struct sfc_dp_rxq *dp;
unsigned int state;
struct sfc_rxq *rxq;
boolean_t deferred_start;
boolean_t deferred_started;
+ unsigned int refill_threshold;
+ struct rte_mempool *refill_mb_pool;
};
int sfc_rx_configure(struct sfc_adapter *sa);