};
/* TX queue descriptor. */
-RTE_STD_C11
+__extension__
struct txq {
- uint16_t elts_head; /* Current index in (*elts)[]. */
- uint16_t elts_tail; /* First element awaiting completion. */
+ uint16_t elts_head; /* Current counter in (*elts)[]. */
+ uint16_t elts_tail; /* Counter of first element awaiting completion. */
uint16_t elts_comp; /* Counter since last completion request. */
uint16_t mpw_comp; /* WQ index since last completion request. */
uint16_t cq_ci; /* Consumer index for completion queue. */
void priv_destroy_hash_rxqs(struct priv *);
int priv_allow_flow_type(struct priv *, enum hash_rxq_flow_type);
int priv_rehash_flows(struct priv *);
-int priv_intr_efd_enable(struct priv *priv);
-void priv_intr_efd_disable(struct priv *priv);
-int priv_create_intr_vec(struct priv *priv);
-void priv_destroy_intr_vec(struct priv *priv);
void rxq_cleanup(struct rxq_ctrl *);
int rxq_rehash(struct rte_eth_dev *, struct rxq_ctrl *);
int rxq_ctrl_setup(struct rte_eth_dev *, struct rxq_ctrl *, uint16_t,
const struct rte_eth_rxconf *, struct rte_mempool *);
void mlx5_rx_queue_release(void *);
uint16_t mlx5_rx_burst_secondary_setup(void *, struct rte_mbuf **, uint16_t);
+int priv_rx_intr_vec_enable(struct priv *priv);
+void priv_rx_intr_vec_disable(struct priv *priv);
+#ifdef HAVE_UPDATE_CQ_CI
+int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+#endif /* HAVE_UPDATE_CQ_CI */
/* mlx5_txq.c */
uint16_t removed_rx_burst(void *, struct rte_mbuf **, uint16_t);
int mlx5_rx_descriptor_status(void *, uint16_t);
int mlx5_tx_descriptor_status(void *, uint16_t);
-int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
-int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
/* mlx5_mr.c */