rte_mempool_in_use_count(rxq->mb_pool));
return -ENOMEM;
}
- rxq->sw_rx_ring[idx].mbuf = new_mb;
- rxq->sw_rx_ring[idx].page_offset = 0;
+ rxq->sw_rx_ring[idx] = new_mb;
mapping = rte_mbuf_data_iova_default(new_mb);
/* Advance PROD and get BD pointer */
rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
static inline int qede_alloc_rx_bulk_mbufs(struct qede_rx_queue *rxq, int count)
{
- void *obj_p[QEDE_MAX_BULK_ALLOC_COUNT] __rte_cache_aligned;
struct rte_mbuf *mbuf = NULL;
struct eth_rx_bd *rx_bd;
dma_addr_t mapping;
int i, ret = 0;
uint16_t idx;
+ uint16_t mask = NUM_RX_BDS(rxq);
if (count > QEDE_MAX_BULK_ALLOC_COUNT)
count = QEDE_MAX_BULK_ALLOC_COUNT;
- ret = rte_mempool_get_bulk(rxq->mb_pool, obj_p, count);
+ idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
+
+ if (count > mask - idx + 1)
+ count = mask - idx + 1;
+
+ ret = rte_mempool_get_bulk(rxq->mb_pool, (void **)&rxq->sw_rx_ring[idx],
+ count);
+
if (unlikely(ret)) {
PMD_RX_LOG(ERR, rxq,
"Failed to allocate %d rx buffers "
}
for (i = 0; i < count; i++) {
- mbuf = obj_p[i];
- if (likely(i < count - 1))
- rte_prefetch0(obj_p[i + 1]);
+ rte_prefetch0(rxq->sw_rx_ring[(idx + 1) & NUM_RX_BDS(rxq)]);
+ mbuf = rxq->sw_rx_ring[idx & NUM_RX_BDS(rxq)];
- idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
- rxq->sw_rx_ring[idx].mbuf = mbuf;
- rxq->sw_rx_ring[idx].page_offset = 0;
mapping = rte_mbuf_data_iova_default(mbuf);
rx_bd = (struct eth_rx_bd *)
ecore_chain_produce(&rxq->rx_bd_ring);
rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
rx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
- rxq->sw_rx_prod++;
+ idx++;
}
+ rxq->sw_rx_prod = idx;
return 0;
}
if (rxq->sw_rx_ring) {
for (i = 0; i < rxq->nb_rx_desc; i++) {
- if (rxq->sw_rx_ring[i].mbuf) {
- rte_pktmbuf_free(rxq->sw_rx_ring[i].mbuf);
- rxq->sw_rx_ring[i].mbuf = NULL;
+ if (rxq->sw_rx_ring[i]) {
+ rte_pktmbuf_free(rxq->sw_rx_ring[i]);
+ rxq->sw_rx_ring[i] = NULL;
}
}
}
static inline void
qede_reuse_page(__rte_unused struct qede_dev *qdev,
- struct qede_rx_queue *rxq, struct qede_rx_entry *curr_cons)
+ struct qede_rx_queue *rxq, struct rte_mbuf *curr_cons)
{
struct eth_rx_bd *rx_bd_prod = ecore_chain_produce(&rxq->rx_bd_ring);
uint16_t idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
- struct qede_rx_entry *curr_prod;
dma_addr_t new_mapping;
- curr_prod = &rxq->sw_rx_ring[idx];
- *curr_prod = *curr_cons;
+ rxq->sw_rx_ring[idx] = curr_cons;
- new_mapping = rte_mbuf_data_iova_default(curr_prod->mbuf) +
- curr_prod->page_offset;
+ new_mapping = rte_mbuf_data_iova_default(curr_cons);
rx_bd_prod->addr.hi = rte_cpu_to_le_32(U64_HI(new_mapping));
rx_bd_prod->addr.lo = rte_cpu_to_le_32(U64_LO(new_mapping));
qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
struct qede_dev *qdev, uint8_t count)
{
- struct qede_rx_entry *curr_cons;
+ struct rte_mbuf *curr_cons;
for (; count > 0; count--) {
- curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS(rxq)];
+ curr_cons = rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS(rxq)];
qede_reuse_page(qdev, rxq, curr_cons);
qede_rx_bd_ring_consume(rxq);
}
if (rte_le_to_cpu_16(len)) {
tpa_info = &rxq->tpa_info[agg_index];
cons_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
- curr_frag = rxq->sw_rx_ring[cons_idx].mbuf;
+ curr_frag = rxq->sw_rx_ring[cons_idx];
assert(curr_frag);
curr_frag->nb_segs = 1;
curr_frag->pkt_len = rte_le_to_cpu_16(len);
return -EINVAL;
}
sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
- seg2 = rxq->sw_rx_ring[sw_rx_index].mbuf;
+ seg2 = rxq->sw_rx_ring[sw_rx_index];
qede_rx_bd_ring_consume(rxq);
pkt_len -= cur_size;
seg2->data_len = cur_size;
/* Get the data from the SW ring */
sw_rx_index = rxq->sw_rx_cons & num_rx_bds;
- rx_mb = rxq->sw_rx_ring[sw_rx_index].mbuf;
+ rx_mb = rxq->sw_rx_ring[sw_rx_index];
assert(rx_mb != NULL);
parse_flag = rte_le_to_cpu_16(fp_cqe->pars_flags.flags);
/* Prefetch next mbuf while processing current one. */
preload_idx = rxq->sw_rx_cons & num_rx_bds;
- rte_prefetch0(rxq->sw_rx_ring[preload_idx].mbuf);
+ rte_prefetch0(rxq->sw_rx_ring[preload_idx]);
/* Update rest of the MBUF fields */
rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
/* Get the data from the SW ring */
sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
- rx_mb = rxq->sw_rx_ring[sw_rx_index].mbuf;
+ rx_mb = rxq->sw_rx_ring[sw_rx_index];
assert(rx_mb != NULL);
/* Handle regular CQE or TPA start CQE */
/* Prefetch next mbuf while processing current one. */
preload_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
- rte_prefetch0(rxq->sw_rx_ring[preload_idx].mbuf);
+ rte_prefetch0(rxq->sw_rx_ring[preload_idx]);
/* Update rest of the MBUF fields */
rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;