-/* Compressed CQE context. */
-struct rxq_zip {
- uint16_t ai; /* Array index. */
- uint16_t ca; /* Current array index. */
- uint16_t na; /* Next array index. */
- uint16_t cq_ci; /* The next CQE. */
- uint32_t cqe_cnt; /* Number of CQEs. */
-};
-
-/* Multi-Packet RQ buffer header. */
-struct mlx5_mprq_buf {
- struct rte_mempool *mp;
- rte_atomic16_t refcnt; /* Atomically accessed refcnt. */
- uint8_t pad[RTE_PKTMBUF_HEADROOM]; /* Headroom for the first packet. */
-} __rte_cache_aligned;
-
-/* Get pointer to the first stride. */
-#define mlx5_mprq_buf_addr(ptr) ((ptr) + 1)
-
-enum mlx5_rxq_err_state {
- MLX5_RXQ_ERR_STATE_NO_ERROR = 0,
- MLX5_RXQ_ERR_STATE_NEED_RESET,
- MLX5_RXQ_ERR_STATE_NEED_READY,
-};
-
-/* RX queue descriptor. */
-struct mlx5_rxq_data {
- unsigned int csum:1; /* Enable checksum offloading. */
- unsigned int hw_timestamp:1; /* Enable HW timestamp. */
- unsigned int vlan_strip:1; /* Enable VLAN stripping. */
- unsigned int crc_present:1; /* CRC must be subtracted. */
- unsigned int sges_n:2; /* Log 2 of SGEs (max buffers per packet). */
- unsigned int cqe_n:4; /* Log 2 of CQ elements. */
- unsigned int elts_n:4; /* Log 2 of Mbufs. */
- unsigned int rss_hash:1; /* RSS hash result is enabled. */
- unsigned int mark:1; /* Marked flow available on the queue. */
- unsigned int strd_num_n:5; /* Log 2 of the number of stride. */
- unsigned int strd_sz_n:4; /* Log 2 of stride size. */
- unsigned int strd_shift_en:1; /* Enable 2bytes shift on a stride. */
- unsigned int err_state:2; /* enum mlx5_rxq_err_state. */
- unsigned int :4; /* Remaining bits. */
- volatile uint32_t *rq_db;
- volatile uint32_t *cq_db;
- uint16_t port_id;
- uint32_t rq_ci;
- uint16_t consumed_strd; /* Number of consumed strides in WQE. */
- uint32_t rq_pi;
- uint32_t cq_ci;
- uint16_t rq_repl_thresh; /* Threshold for buffer replenishment. */
- union {
- struct rxq_zip zip; /* Compressed context. */
- uint16_t decompressed;
- /* Number of ready mbufs decompressed from the CQ. */
- };
- struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */
- uint16_t mprq_max_memcpy_len; /* Maximum size of packet to memcpy. */
- volatile void *wqes;
- volatile struct mlx5_cqe(*cqes)[];
- RTE_STD_C11
- union {
- struct rte_mbuf *(*elts)[];
- struct mlx5_mprq_buf *(*mprq_bufs)[];
- };
- struct rte_mempool *mp;
- struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */
- struct mlx5_mprq_buf *mprq_repl; /* Stashed mbuf for replenish. */
- uint16_t idx; /* Queue index. */
- struct mlx5_rxq_stats stats;
- uint64_t mbuf_initializer; /* Default rearm_data for vectorized Rx. */
- struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */
- void *cq_uar; /* CQ user access region. */
- uint32_t cqn; /* CQ number. */
- uint8_t cq_arm_sn; /* CQ arm seq number. */
-#ifndef RTE_ARCH_64
- rte_spinlock_t *uar_lock_cq;
- /* CQ (UAR) access lock required for 32bit implementations */
-#endif
- uint32_t tunnel; /* Tunnel information. */
-} __rte_cache_aligned;
-
-enum mlx5_rxq_obj_type {
- MLX5_RXQ_OBJ_TYPE_IBV, /* mlx5_rxq_obj with ibv_wq. */
- MLX5_RXQ_OBJ_TYPE_DEVX_RQ, /* mlx5_rxq_obj with mlx5_devx_rq. */
-};
-
-/* Verbs/DevX Rx queue elements. */
-struct mlx5_rxq_obj {
- LIST_ENTRY(mlx5_rxq_obj) next; /* Pointer to the next element. */
- rte_atomic32_t refcnt; /* Reference counter. */
- struct mlx5_rxq_ctrl *rxq_ctrl; /* Back pointer to parent. */
- struct ibv_cq *cq; /* Completion Queue. */
- enum mlx5_rxq_obj_type type;
- RTE_STD_C11
- union {
- struct ibv_wq *wq; /* Work Queue. */
- struct mlx5_devx_obj *rq; /* DevX object for Rx Queue. */
- };
- struct ibv_comp_channel *channel;
-};
-
-/* RX queue control descriptor. */
-struct mlx5_rxq_ctrl {
- struct mlx5_rxq_data rxq; /* Data path structure. */
- LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */
- rte_atomic32_t refcnt; /* Reference counter. */
- struct mlx5_rxq_obj *obj; /* Verbs/DevX elements. */
- struct mlx5_priv *priv; /* Back pointer to private data. */
- unsigned int socket; /* CPU socket ID for allocations. */
- unsigned int irq:1; /* Whether IRQ is enabled. */
- uint32_t flow_mark_n; /* Number of Mark/Flag flows using this Queue. */
- uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */
- uint32_t wqn; /* WQ number. */
- uint16_t dump_file_n; /* Number of dump files. */
-};
-
-enum mlx5_ind_tbl_type {
- MLX5_IND_TBL_TYPE_IBV,
- MLX5_IND_TBL_TYPE_DEVX,
-};
-
-/* Indirection table. */
-struct mlx5_ind_table_obj {
- LIST_ENTRY(mlx5_ind_table_obj) next; /* Pointer to the next element. */
- rte_atomic32_t refcnt; /* Reference counter. */
- enum mlx5_ind_tbl_type type;
- RTE_STD_C11
- union {
- struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */
- struct mlx5_devx_obj *rqt; /* DevX RQT object. */
- };
- uint32_t queues_n; /**< Number of queues in the list. */
- uint16_t queues[]; /**< Queue list. */
-};
-
-/* Hash Rx queue. */
-struct mlx5_hrxq {
- LIST_ENTRY(mlx5_hrxq) next; /* Pointer to the next element. */
- rte_atomic32_t refcnt; /* Reference counter. */
- struct mlx5_ind_table_obj *ind_table; /* Indirection table. */
- struct ibv_qp *qp; /* Verbs queue pair. */
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
- void *action; /* DV QP action pointer. */
-#endif
- uint64_t hash_fields; /* Verbs Hash fields. */
- uint32_t rss_key_len; /* Hash key length in bytes. */
- uint8_t rss_key[]; /* Hash key. */
-};
-