if (unlikely((rx_cq_cons_sb & MAX_RCQ_ENTRIES(rxq)) ==
MAX_RCQ_ENTRIES(rxq)))
rx_cq_cons_sb++;
+
+ PMD_RX_LOG(DEBUG, "hw CQ cons = %d, sw CQ cons = %d",
+ rx_cq_cons_sb, rxq->rx_cq_head);
+
return rxq->rx_cq_head != rx_cq_cons_sb;
}
uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod;
+ rte_spinlock_lock(&(fp)->rx_mtx);
+
rxq = sc->rx_queues[fp->index];
if (!rxq) {
PMD_RX_LOG(ERR, "RX queue %d is NULL", fp->index);
+ rte_spinlock_unlock(&(fp)->rx_mtx);
return 0;
}
rxq->rx_cq_head = sw_cq_cons;
rxq->rx_cq_tail = sw_cq_prod;
+ PMD_RX_LOG(DEBUG, "BD prod = %d, sw CQ prod = %d",
+ bd_prod_fw, sw_cq_prod);
+
/* Update producers */
bnx2x_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod);
+ rte_spinlock_unlock(&(fp)->rx_mtx);
+
return sw_cq_cons != hw_cq_cons;
}
uint16_t len, pad;
struct rte_mbuf *rx_mb = NULL;
+ rte_spinlock_lock(&(fp)->rx_mtx);
+
hw_cq_cons = le16toh(*fp->rx_cq_cons_sb);
if ((hw_cq_cons & USABLE_RCQ_ENTRIES_PER_PAGE) ==
USABLE_RCQ_ENTRIES_PER_PAGE) {
sw_cq_cons = rxq->rx_cq_head;
sw_cq_prod = rxq->rx_cq_tail;
- if (sw_cq_cons == hw_cq_cons)
+ if (sw_cq_cons == hw_cq_cons) {
+ rte_spinlock_unlock(&(fp)->rx_mtx);
return 0;
+ }
while (nb_rx < nb_pkts && sw_cq_cons != hw_cq_cons) {
bnx2x_upd_rx_prod_fast(sc, fp, bd_prod, sw_cq_prod);
+ rte_spinlock_unlock(&(fp)->rx_mtx);
+
return nb_rx;
}