struct bnxt_rx_ring_info;
struct bnxt_cp_ring_info;
struct bnxt_rx_queue {
+ rte_spinlock_t lock; /* Synchronize between rx_queue_stop
+ * and fast path
+ */
struct rte_mempool *mb_pool; /* mbuf pool for RX ring */
struct rte_mbuf *pkt_first_seg; /* 1st seg of pkt */
struct rte_mbuf *pkt_last_seg; /* Last seg of pkt */
uint16_t nb_rx_hold; /* num held free RX desc */
uint16_t rx_free_thresh; /* max free RX desc to hold */
uint16_t queue_id; /* RX queue index */
+#ifdef RTE_ARCH_X86
+ uint16_t rxrearm_nb; /* number of descs to reinit. */
+ uint16_t rxrearm_start; /* next desc index to reinit. */
+#endif
uint16_t reg_idx; /* RX queue register index */
uint16_t port_id; /* Device port identifier */
uint8_t crc_len; /* 0 if CRC stripped, 4 otherwise */
uint32_t rx_buf_use_size; /* useable size */
struct bnxt_rx_ring_info *rx_ring;
struct bnxt_cp_ring_info *cp_ring;
+ struct bnxt_cp_ring_info *nq_ring;
rte_atomic64_t rx_mbuf_alloc_fail;
const struct rte_memzone *mz;
};
uint16_t rx_queue_id);
int bnxt_rx_queue_stop(struct rte_eth_dev *dev,
uint16_t rx_queue_id);
+void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq);
#endif