ethdev: change input parameters for Rx queue count
authorKonstantin Ananyev <konstantin.ananyev@intel.com>
Wed, 13 Oct 2021 13:37:00 +0000 (14:37 +0100)
committerFerruh Yigit <ferruh.yigit@intel.com>
Wed, 13 Oct 2021 20:14:58 +0000 (22:14 +0200)
Currently majority of fast-path ethdev ops take pointers to internal
queue data structures as an input parameter.
While eth_rx_queue_count() takes a pointer to rte_eth_dev and queue
index.
For future work to hide rte_eth_devices[] and friends it would be
plausible to unify parameters list of all fast-path ethdev ops.
This patch changes eth_rx_queue_count() to accept pointer to internal
queue data as input parameter.
While this change is transparent to user, it still counts as an ABI change,
as eth_rx_queue_count_t is used by ethdev public inline function
rte_eth_rx_queue_count().

Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Reviewed-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Acked-by: Hyong Youb Kim <hyonkim@cisco.com>
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
Tested-by: Feifei Wang <feifei.wang2@arm.com>
43 files changed:
doc/guides/rel_notes/release_21_11.rst
drivers/net/ark/ark_ethdev_rx.c
drivers/net/ark/ark_ethdev_rx.h
drivers/net/atlantic/atl_ethdev.h
drivers/net/atlantic/atl_rxtx.c
drivers/net/bnxt/bnxt_ethdev.c
drivers/net/dpaa/dpaa_ethdev.c
drivers/net/dpaa2/dpaa2_ethdev.c
drivers/net/e1000/e1000_ethdev.h
drivers/net/e1000/em_rxtx.c
drivers/net/e1000/igb_rxtx.c
drivers/net/enic/enic_ethdev.c
drivers/net/fm10k/fm10k.h
drivers/net/fm10k/fm10k_rxtx.c
drivers/net/hns3/hns3_rxtx.c
drivers/net/hns3/hns3_rxtx.h
drivers/net/i40e/i40e_rxtx.c
drivers/net/i40e/i40e_rxtx.h
drivers/net/iavf/iavf_rxtx.c
drivers/net/iavf/iavf_rxtx.h
drivers/net/ice/ice_rxtx.c
drivers/net/ice/ice_rxtx.h
drivers/net/igc/igc_txrx.c
drivers/net/igc/igc_txrx.h
drivers/net/ixgbe/ixgbe_ethdev.h
drivers/net/ixgbe/ixgbe_rxtx.c
drivers/net/mlx5/mlx5_rx.c
drivers/net/mlx5/mlx5_rx.h
drivers/net/netvsc/hn_rxtx.c
drivers/net/netvsc/hn_var.h
drivers/net/nfp/nfp_rxtx.c
drivers/net/nfp/nfp_rxtx.h
drivers/net/octeontx2/otx2_ethdev.h
drivers/net/octeontx2/otx2_ethdev_ops.c
drivers/net/sfc/sfc_ethdev.c
drivers/net/thunderx/nicvf_ethdev.c
drivers/net/thunderx/nicvf_rxtx.c
drivers/net/thunderx/nicvf_rxtx.h
drivers/net/txgbe/txgbe_ethdev.h
drivers/net/txgbe/txgbe_rxtx.c
drivers/net/vhost/rte_eth_vhost.c
lib/ethdev/rte_ethdev.h
lib/ethdev/rte_ethdev_core.h

index 7ecb76c..a86f67d 100644 (file)
@@ -329,6 +329,12 @@ ABI Changes
    Also, make sure to start the actual text at the margin.
    =======================================================
 
+* ethdev: Input parameters for ``eth_rx_queue_count_t`` was changed.
+  Instead of pointer to ``rte_eth_dev`` and queue index, now it accepts pointer
+  to internal queue data as input parameter. While this change is transparent
+  to user, it still counts as an ABI change, as ``eth_rx_queue_count_t``
+  is used by  public inline function ``rte_eth_rx_queue_count``.
+
 * security: ``rte_security_set_pkt_metadata`` and ``rte_security_get_userdata``
   routines used by inline outbound and inline inbound security processing were
   made inline and enhanced to do simple 64-bit set/get for PMDs that do not
index d255f01..98658ce 100644 (file)
@@ -388,11 +388,11 @@ eth_ark_rx_queue_drain(struct ark_rx_queue *queue)
 }
 
 uint32_t
-eth_ark_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_id)
+eth_ark_dev_rx_queue_count(void *rx_queue)
 {
        struct ark_rx_queue *queue;
 
-       queue = dev->data->rx_queues[queue_id];
+       queue = rx_queue;
        return (queue->prod_index - queue->cons_index); /* mod arith */
 }
 
index c8dc340..859fcf1 100644 (file)
@@ -17,8 +17,7 @@ int eth_ark_dev_rx_queue_setup(struct rte_eth_dev *dev,
                               unsigned int socket_id,
                               const struct rte_eth_rxconf *rx_conf,
                               struct rte_mempool *mp);
-uint32_t eth_ark_dev_rx_queue_count(struct rte_eth_dev *dev,
-                                   uint16_t rx_queue_id);
+uint32_t eth_ark_dev_rx_queue_count(void *rx_queue);
 int eth_ark_rx_stop_queue(struct rte_eth_dev *dev, uint16_t queue_id);
 int eth_ark_rx_start_queue(struct rte_eth_dev *dev, uint16_t queue_id);
 uint16_t eth_ark_recv_pkts_noop(void *rx_queue, struct rte_mbuf **rx_pkts,
index a2d1d43..fbc9917 100644 (file)
@@ -66,7 +66,7 @@ int atl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
                uint16_t nb_tx_desc, unsigned int socket_id,
                const struct rte_eth_txconf *tx_conf);
 
-uint32_t atl_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+uint32_t atl_rx_queue_count(void *rx_queue);
 
 int atl_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
 int atl_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
index fca682d..0d34603 100644 (file)
@@ -689,18 +689,13 @@ atl_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 /* Return Rx queue avail count */
 
 uint32_t
-atl_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+atl_rx_queue_count(void *rx_queue)
 {
        struct atl_rx_queue *rxq;
 
        PMD_INIT_FUNC_TRACE();
 
-       if (rx_queue_id >= dev->data->nb_rx_queues) {
-               PMD_DRV_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
-               return 0;
-       }
-
-       rxq = dev->data->rx_queues[rx_queue_id];
+       rxq = rx_queue;
 
        if (rxq == NULL)
                return 0;
index a98f93a..ebda74d 100644 (file)
@@ -3154,20 +3154,22 @@ bnxt_dev_led_off_op(struct rte_eth_dev *dev)
 }
 
 static uint32_t
-bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+bnxt_rx_queue_count_op(void *rx_queue)
 {
-       struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+       struct bnxt *bp;
        struct bnxt_cp_ring_info *cpr;
        uint32_t desc = 0, raw_cons, cp_ring_size;
        struct bnxt_rx_queue *rxq;
        struct rx_pkt_cmpl *rxcmp;
        int rc;
 
+       rxq = rx_queue;
+       bp = rxq->bp;
+
        rc = is_bnxt_in_error(bp);
        if (rc)
                return rc;
 
-       rxq = dev->data->rx_queues[rx_queue_id];
        cpr = rxq->cp_ring;
        raw_cons = cpr->cp_raw_cons;
        cp_ring_size = cpr->cp_ring_struct->ring_size;
index 59f4a93..c087ce6 100644 (file)
@@ -1267,17 +1267,16 @@ int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 }
 
 static uint32_t
-dpaa_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+dpaa_dev_rx_queue_count(void *rx_queue)
 {
-       struct dpaa_if *dpaa_intf = dev->data->dev_private;
-       struct qman_fq *rxq = &dpaa_intf->rx_queues[rx_queue_id];
+       struct qman_fq *rxq = rx_queue;
        u32 frm_cnt = 0;
 
        PMD_INIT_FUNC_TRACE();
 
        if (qman_query_fq_frm_cnt(rxq, &frm_cnt) == 0) {
-               DPAA_PMD_DEBUG("RX frame count for q(%d) is %u",
-                              rx_queue_id, frm_cnt);
+               DPAA_PMD_DEBUG("RX frame count for q(%p) is %u",
+                              rx_queue, frm_cnt);
        }
        return frm_cnt;
 }
index ff8ae89..f2519f0 100644 (file)
@@ -1007,10 +1007,9 @@ dpaa2_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 }
 
 static uint32_t
-dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+dpaa2_dev_rx_queue_count(void *rx_queue)
 {
        int32_t ret;
-       struct dpaa2_dev_priv *priv = dev->data->dev_private;
        struct dpaa2_queue *dpaa2_q;
        struct qbman_swp *swp;
        struct qbman_fq_query_np_rslt state;
@@ -1027,12 +1026,12 @@ dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
        }
        swp = DPAA2_PER_LCORE_PORTAL;
 
-       dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
+       dpaa2_q = rx_queue;
 
        if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) {
                frame_cnt = qbman_fq_state_frame_count(&state);
-               DPAA2_PMD_DP_DEBUG("RX frame count for q(%d) is %u",
-                               rx_queue_id, frame_cnt);
+               DPAA2_PMD_DP_DEBUG("RX frame count for q(%p) is %u",
+                               rx_queue, frame_cnt);
        }
        return frame_cnt;
 }
index dafd586..050852b 100644 (file)
@@ -399,8 +399,7 @@ int eth_igb_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
                const struct rte_eth_rxconf *rx_conf,
                struct rte_mempool *mb_pool);
 
-uint32_t eth_igb_rx_queue_count(struct rte_eth_dev *dev,
-               uint16_t rx_queue_id);
+uint32_t eth_igb_rx_queue_count(void *rx_queue);
 
 int eth_igb_rx_descriptor_status(void *rx_queue, uint16_t offset);
 int eth_igb_tx_descriptor_status(void *tx_queue, uint16_t offset);
@@ -474,8 +473,7 @@ int eth_em_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
                const struct rte_eth_rxconf *rx_conf,
                struct rte_mempool *mb_pool);
 
-uint32_t eth_em_rx_queue_count(struct rte_eth_dev *dev,
-               uint16_t rx_queue_id);
+uint32_t eth_em_rx_queue_count(void *rx_queue);
 
 int eth_em_rx_descriptor_status(void *rx_queue, uint16_t offset);
 int eth_em_tx_descriptor_status(void *tx_queue, uint16_t offset);
index 9994703..506b415 100644 (file)
@@ -1495,14 +1495,14 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
 }
 
 uint32_t
-eth_em_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+eth_em_rx_queue_count(void *rx_queue)
 {
 #define EM_RXQ_SCAN_INTERVAL 4
        volatile struct e1000_rx_desc *rxdp;
        struct em_rx_queue *rxq;
        uint32_t desc = 0;
 
-       rxq = dev->data->rx_queues[rx_queue_id];
+       rxq = rx_queue;
        rxdp = &(rxq->rx_ring[rxq->rx_tail]);
 
        while ((desc < rxq->nb_rx_desc) &&
index 7b2a6b0..e04c2b4 100644 (file)
@@ -1776,14 +1776,14 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 }
 
 uint32_t
-eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+eth_igb_rx_queue_count(void *rx_queue)
 {
 #define IGB_RXQ_SCAN_INTERVAL 4
        volatile union e1000_adv_rx_desc *rxdp;
        struct igb_rx_queue *rxq;
        uint32_t desc = 0;
 
-       rxq = dev->data->rx_queues[rx_queue_id];
+       rxq = rx_queue;
        rxdp = &(rxq->rx_ring[rxq->rx_tail]);
 
        while ((desc < rxq->nb_rx_desc) &&
index b03e56b..b94332c 100644 (file)
@@ -237,18 +237,18 @@ static void enicpmd_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
        enic_free_rq(rxq);
 }
 
-static uint32_t enicpmd_dev_rx_queue_count(struct rte_eth_dev *dev,
-                                          uint16_t rx_queue_id)
+static uint32_t enicpmd_dev_rx_queue_count(void *rx_queue)
 {
-       struct enic *enic = pmd_priv(dev);
+       struct enic *enic;
+       struct vnic_rq *sop_rq;
        uint32_t queue_count = 0;
        struct vnic_cq *cq;
        uint32_t cq_tail;
        uint16_t cq_idx;
-       int rq_num;
 
-       rq_num = enic_rte_rq_idx_to_sop_idx(rx_queue_id);
-       cq = &enic->cq[enic_cq_rq(enic, rq_num)];
+       sop_rq = rx_queue;
+       enic = vnic_dev_priv(sop_rq->vdev);
+       cq = &enic->cq[enic_cq_rq(enic, sop_rq->index)];
        cq_idx = cq->to_clean;
 
        cq_tail = ioread32(&cq->ctrl->cq_tail);
index 2e47ada..17c73c4 100644 (file)
@@ -324,7 +324,7 @@ uint16_t fm10k_recv_scattered_pkts(void *rx_queue,
                struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
 
 uint32_t
-fm10k_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+fm10k_dev_rx_queue_count(void *rx_queue);
 
 int
 fm10k_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
index d983350..b3515ae 100644 (file)
@@ -367,14 +367,14 @@ fm10k_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 }
 
 uint32_t
-fm10k_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+fm10k_dev_rx_queue_count(void *rx_queue)
 {
 #define FM10K_RXQ_SCAN_INTERVAL 4
        volatile union fm10k_rx_desc *rxdp;
        struct fm10k_rx_queue *rxq;
        uint16_t desc = 0;
 
-       rxq = dev->data->rx_queues[rx_queue_id];
+       rxq = rx_queue;
        rxdp = &rxq->hw_ring[rxq->next_dd];
        while ((desc < rxq->nb_desc) &&
                rxdp->w.status & rte_cpu_to_le_16(FM10K_RXD_STATUS_DD)) {
index 70de0d2..02040b8 100644 (file)
@@ -4685,7 +4685,7 @@ hns3_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
 }
 
 uint32_t
-hns3_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+hns3_rx_queue_count(void *rx_queue)
 {
        /*
         * Number of BDs that have been processed by the driver
@@ -4693,9 +4693,12 @@ hns3_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
         */
        uint32_t driver_hold_bd_num;
        struct hns3_rx_queue *rxq;
+       const struct rte_eth_dev *dev;
        uint32_t fbd_num;
 
-       rxq = dev->data->rx_queues[rx_queue_id];
+       rxq = rx_queue;
+       dev = &rte_eth_devices[rxq->port_id];
+
        fbd_num = hns3_read_dev(rxq, HNS3_RING_RX_FBDNUM_REG);
        if (dev->rx_pkt_burst == hns3_recv_pkts_vec ||
            dev->rx_pkt_burst == hns3_recv_pkts_vec_sve)
index bb309d3..c8229e9 100644 (file)
@@ -696,7 +696,7 @@ int hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
                        struct rte_mempool *mp);
 int hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
                        unsigned int socket, const struct rte_eth_txconf *conf);
-uint32_t hns3_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+uint32_t hns3_rx_queue_count(void *rx_queue);
 int hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
 int hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
 int hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
index ab77ec0..3df4e3d 100644 (file)
@@ -2121,14 +2121,14 @@ i40e_rx_queue_release(void *rxq)
 }
 
 uint32_t
-i40e_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+i40e_dev_rx_queue_count(void *rx_queue)
 {
 #define I40E_RXQ_SCAN_INTERVAL 4
        volatile union i40e_rx_desc *rxdp;
        struct i40e_rx_queue *rxq;
        uint16_t desc = 0;
 
-       rxq = dev->data->rx_queues[rx_queue_id];
+       rxq = rx_queue;
        rxdp = &(rxq->rx_ring[rxq->rx_tail]);
        while ((desc < rxq->nb_rx_desc) &&
                ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
index 7a24dd6..2301e63 100644 (file)
@@ -229,8 +229,7 @@ int i40e_tx_done_cleanup(void *txq, uint32_t free_cnt);
 int i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq);
 void i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq);
 
-uint32_t i40e_dev_rx_queue_count(struct rte_eth_dev *dev,
-                                uint16_t rx_queue_id);
+uint32_t i40e_dev_rx_queue_count(void *rx_queue);
 int i40e_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
 int i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
 
index 88661e5..88bbd40 100644 (file)
@@ -2799,14 +2799,14 @@ iavf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 
 /* Get the number of used descriptors of a rx queue */
 uint32_t
-iavf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id)
+iavf_dev_rxq_count(void *rx_queue)
 {
 #define IAVF_RXQ_SCAN_INTERVAL 4
        volatile union iavf_rx_desc *rxdp;
        struct iavf_rx_queue *rxq;
        uint16_t desc = 0;
 
-       rxq = dev->data->rx_queues[queue_id];
+       rxq = rx_queue;
        rxdp = &rxq->rx_ring[rxq->rx_tail];
 
        while ((desc < rxq->nb_rx_desc) &&
index 9591e45..f4ae2fd 100644 (file)
@@ -453,7 +453,7 @@ void iavf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
                          struct rte_eth_rxq_info *qinfo);
 void iavf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
                          struct rte_eth_txq_info *qinfo);
-uint32_t iavf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id);
+uint32_t iavf_dev_rxq_count(void *rx_queue);
 int iavf_dev_rx_desc_status(void *rx_queue, uint16_t offset);
 int iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset);
 
index 7a2220d..b06c2f1 100644 (file)
@@ -1460,14 +1460,14 @@ ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 }
 
 uint32_t
-ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+ice_rx_queue_count(void *rx_queue)
 {
 #define ICE_RXQ_SCAN_INTERVAL 4
        volatile union ice_rx_flex_desc *rxdp;
        struct ice_rx_queue *rxq;
        uint16_t desc = 0;
 
-       rxq = dev->data->rx_queues[rx_queue_id];
+       rxq = rx_queue;
        rxdp = &rxq->rx_ring[rxq->rx_tail];
        while ((desc < rxq->nb_rx_desc) &&
               rte_le_to_cpu_16(rxdp->wb.status_error0) &
index c5ec6b7..e1c644f 100644 (file)
@@ -230,7 +230,7 @@ uint16_t ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
 void ice_set_tx_function_flag(struct rte_eth_dev *dev,
                              struct ice_tx_queue *txq);
 void ice_set_tx_function(struct rte_eth_dev *dev);
-uint32_t ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+uint32_t ice_rx_queue_count(void *rx_queue);
 void ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
                      struct rte_eth_rxq_info *qinfo);
 void ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
index a66ce1d..383bf83 100644 (file)
@@ -722,8 +722,7 @@ void eth_igc_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
                igc_rx_queue_release(dev->data->rx_queues[qid]);
 }
 
-uint32_t eth_igc_rx_queue_count(struct rte_eth_dev *dev,
-               uint16_t rx_queue_id)
+uint32_t eth_igc_rx_queue_count(void *rx_queue)
 {
        /**
         * Check the DD bit of a rx descriptor of each 4 in a group,
@@ -736,7 +735,7 @@ uint32_t eth_igc_rx_queue_count(struct rte_eth_dev *dev,
        struct igc_rx_queue *rxq;
        uint16_t desc = 0;
 
-       rxq = dev->data->rx_queues[rx_queue_id];
+       rxq = rx_queue;
        rxdp = &rxq->rx_ring[rxq->rx_tail];
 
        while (desc < rxq->nb_rx_desc - rxq->rx_tail) {
index bb04036..535108a 100644 (file)
@@ -22,8 +22,7 @@ int eth_igc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
                const struct rte_eth_rxconf *rx_conf,
                struct rte_mempool *mb_pool);
 
-uint32_t eth_igc_rx_queue_count(struct rte_eth_dev *dev,
-               uint16_t rx_queue_id);
+uint32_t eth_igc_rx_queue_count(void *rx_queue);
 
 int eth_igc_rx_descriptor_status(void *rx_queue, uint16_t offset);
 
index c01a74d..950fb2d 100644 (file)
@@ -594,8 +594,7 @@ int  ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
                uint16_t nb_tx_desc, unsigned int socket_id,
                const struct rte_eth_txconf *tx_conf);
 
-uint32_t ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev,
-               uint16_t rx_queue_id);
+uint32_t ixgbe_dev_rx_queue_count(void *rx_queue);
 
 int ixgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
 int ixgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
index 0ac89cb..4d3d30b 100644 (file)
@@ -3262,14 +3262,14 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 }
 
 uint32_t
-ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+ixgbe_dev_rx_queue_count(void *rx_queue)
 {
 #define IXGBE_RXQ_SCAN_INTERVAL 4
        volatile union ixgbe_adv_rx_desc *rxdp;
        struct ixgbe_rx_queue *rxq;
        uint32_t desc = 0;
 
-       rxq = dev->data->rx_queues[rx_queue_id];
+       rxq = rx_queue;
        rxdp = &(rxq->rx_ring[rxq->rx_tail]);
 
        while ((desc < rxq->nb_rx_desc) &&
index e3b1051..1a9eb35 100644 (file)
@@ -240,32 +240,32 @@ mlx5_rx_burst_mode_get(struct rte_eth_dev *dev,
 /**
  * DPDK callback to get the number of used descriptors in a RX queue.
  *
- * @param dev
- *   Pointer to the device structure.
- *
- * @param rx_queue_id
- *   The Rx queue.
+ * @param rx_queue
+ *   The Rx queue pointer.
  *
  * @return
  *   The number of used rx descriptor.
  *   -EINVAL if the queue is invalid
  */
 uint32_t
-mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+mlx5_rx_queue_count(void *rx_queue)
 {
-       struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_rxq_data *rxq;
+       struct mlx5_rxq_data *rxq = rx_queue;
+       struct rte_eth_dev *dev;
+
+       if (!rxq) {
+               rte_errno = EINVAL;
+               return -rte_errno;
+       }
+
+       dev = &rte_eth_devices[rxq->port_id];
 
        if (dev->rx_pkt_burst == NULL ||
            dev->rx_pkt_burst == removed_rx_burst) {
                rte_errno = ENOTSUP;
                return -rte_errno;
        }
-       rxq = (*priv->rxqs)[rx_queue_id];
-       if (!rxq) {
-               rte_errno = EINVAL;
-               return -rte_errno;
-       }
+
        return rx_queue_count(rxq);
 }
 
index 1b00076..a90cb49 100644 (file)
@@ -260,7 +260,7 @@ uint16_t mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts,
 uint16_t removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts,
                          uint16_t pkts_n);
 int mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset);
-uint32_t mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+uint32_t mlx5_rx_queue_count(void *rx_queue);
 void mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
                       struct rte_eth_rxq_info *qinfo);
 int mlx5_rx_burst_mode_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
index e880dc2..f8fff1b 100644 (file)
@@ -1018,9 +1018,9 @@ hn_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
  * For this device that means how many packets are pending in the ring.
  */
 uint32_t
-hn_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_id)
+hn_dev_rx_queue_count(void *rx_queue)
 {
-       struct hn_rx_queue *rxq = dev->data->rx_queues[queue_id];
+       struct hn_rx_queue *rxq = rx_queue;
 
        return rte_ring_count(rxq->rx_ring);
 }
index 2cd1f8a..18703f9 100644 (file)
@@ -215,7 +215,7 @@ int hn_dev_rx_queue_setup(struct rte_eth_dev *dev,
 void   hn_dev_rx_queue_info(struct rte_eth_dev *dev, uint16_t queue_id,
                             struct rte_eth_rxq_info *qinfo);
 void   hn_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
-uint32_t hn_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_id);
+uint32_t hn_dev_rx_queue_count(void *rx_queue);
 int    hn_dev_rx_queue_status(void *rxq, uint16_t offset);
 void   hn_dev_free_queues(struct rte_eth_dev *dev);
 
index feeacb5..733f81e 100644 (file)
@@ -97,14 +97,14 @@ nfp_net_rx_freelist_setup(struct rte_eth_dev *dev)
 }
 
 uint32_t
-nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
+nfp_net_rx_queue_count(void *rx_queue)
 {
        struct nfp_net_rxq *rxq;
        struct nfp_net_rx_desc *rxds;
        uint32_t idx;
        uint32_t count;
 
-       rxq = (struct nfp_net_rxq *)dev->data->rx_queues[queue_idx];
+       rxq = rx_queue;
 
        idx = rxq->rd_p;
 
index ab49898..1a813de 100644 (file)
@@ -275,8 +275,7 @@ struct nfp_net_rxq {
 } __rte_aligned(64);
 
 int nfp_net_rx_freelist_setup(struct rte_eth_dev *dev);
-uint32_t nfp_net_rx_queue_count(struct rte_eth_dev *dev,
-                                      uint16_t queue_idx);
+uint32_t nfp_net_rx_queue_count(void *rx_queue);
 uint16_t nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                                  uint16_t nb_pkts);
 void nfp_net_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx);
index 0d73013..7a8d19d 100644 (file)
@@ -431,7 +431,7 @@ int otx2_rx_burst_mode_get(struct rte_eth_dev *dev, uint16_t queue_id,
                           struct rte_eth_burst_mode *mode);
 int otx2_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t queue_id,
                           struct rte_eth_burst_mode *mode);
-uint32_t otx2_nix_rx_queue_count(struct rte_eth_dev *eth_dev, uint16_t qidx);
+uint32_t otx2_nix_rx_queue_count(void *rx_queue);
 int otx2_nix_tx_done_cleanup(void *txq, uint32_t free_cnt);
 int otx2_nix_rx_descriptor_status(void *rx_queue, uint16_t offset);
 int otx2_nix_tx_descriptor_status(void *tx_queue, uint16_t offset);
index 5cb3905..3a763f6 100644 (file)
@@ -342,13 +342,13 @@ nix_rx_head_tail_get(struct otx2_eth_dev *dev,
 }
 
 uint32_t
-otx2_nix_rx_queue_count(struct rte_eth_dev *eth_dev, uint16_t queue_idx)
+otx2_nix_rx_queue_count(void *rx_queue)
 {
-       struct otx2_eth_rxq *rxq = eth_dev->data->rx_queues[queue_idx];
-       struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+       struct otx2_eth_rxq *rxq = rx_queue;
+       struct otx2_eth_dev *dev = otx2_eth_pmd_priv(rxq->eth_dev);
        uint32_t head, tail;
 
-       nix_rx_head_tail_get(dev, &head, &tail, queue_idx);
+       nix_rx_head_tail_get(dev, &head, &tail, rxq->rq);
        return (tail - head) % rxq->qlen;
 }
 
index e127b18..9195694 100644 (file)
@@ -1357,19 +1357,19 @@ sfc_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t ethdev_qid,
  * use any process-local pointers from the adapter data.
  */
 static uint32_t
-sfc_rx_queue_count(struct rte_eth_dev *dev, uint16_t ethdev_qid)
+sfc_rx_queue_count(void *rx_queue)
 {
-       const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
-       struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
-       sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
+       struct sfc_dp_rxq *dp_rxq = rx_queue;
+       const struct sfc_dp_rx *dp_rx;
        struct sfc_rxq_info *rxq_info;
 
-       rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
+       dp_rx = sfc_dp_rx_by_dp_rxq(dp_rxq);
+       rxq_info = sfc_rxq_info_by_dp_rxq(dp_rxq);
 
        if ((rxq_info->state & SFC_RXQ_STARTED) == 0)
                return 0;
 
-       return sap->dp_rx->qdesc_npending(rxq_info->dp);
+       return dp_rx->qdesc_npending(dp_rxq);
 }
 
 /*
index 7e07d38..b08701b 100644 (file)
@@ -1060,8 +1060,7 @@ nicvf_rx_queue_release_mbufs(struct rte_eth_dev *dev, struct nicvf_rxq *rxq)
        if (dev->rx_pkt_burst == NULL)
                return;
 
-       while ((rxq_cnt = nicvf_dev_rx_queue_count(dev,
-                               nicvf_netdev_qidx(rxq->nic, rxq->queue_id)))) {
+       while ((rxq_cnt = nicvf_dev_rx_queue_count(rxq))) {
                nb_pkts = dev->rx_pkt_burst(rxq, rx_pkts,
                                        NICVF_MAX_RX_FREE_THRESH);
                PMD_DRV_LOG(INFO, "nb_pkts=%d  rxq_cnt=%d", nb_pkts, rxq_cnt);
index 91e09ff..0d4f4ae 100644 (file)
@@ -649,11 +649,11 @@ nicvf_recv_pkts_multiseg_cksum_vlan_strip(void *rx_queue,
 }
 
 uint32_t
-nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
+nicvf_dev_rx_queue_count(void *rx_queue)
 {
        struct nicvf_rxq *rxq;
 
-       rxq = dev->data->rx_queues[queue_idx];
+       rxq = rx_queue;
        return nicvf_addr_read(rxq->cq_status) & NICVF_CQ_CQE_COUNT_MASK;
 }
 
index d6ed660..271f329 100644 (file)
@@ -83,7 +83,7 @@ nicvf_mbuff_init_mseg_update(struct rte_mbuf *pkt, const uint64_t mbuf_init,
        *(uint64_t *)(&pkt->rearm_data) = init.value;
 }
 
-uint32_t nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx);
+uint32_t nicvf_dev_rx_queue_count(void *rx_queue);
 uint32_t nicvf_dev_rbdr_refill(struct rte_eth_dev *dev, uint16_t queue_idx);
 
 uint16_t nicvf_recv_pkts_no_offload(void *rxq, struct rte_mbuf **rx_pkts,
index 112567e..528f114 100644 (file)
@@ -438,8 +438,7 @@ int  txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
                uint16_t nb_tx_desc, unsigned int socket_id,
                const struct rte_eth_txconf *tx_conf);
 
-uint32_t txgbe_dev_rx_queue_count(struct rte_eth_dev *dev,
-               uint16_t rx_queue_id);
+uint32_t txgbe_dev_rx_queue_count(void *rx_queue);
 
 int txgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
 int txgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
index b6339fe..4849fb3 100644 (file)
@@ -2688,14 +2688,14 @@ txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 }
 
 uint32_t
-txgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+txgbe_dev_rx_queue_count(void *rx_queue)
 {
 #define TXGBE_RXQ_SCAN_INTERVAL 4
        volatile struct txgbe_rx_desc *rxdp;
        struct txgbe_rx_queue *rxq;
        uint32_t desc = 0;
 
-       rxq = dev->data->rx_queues[rx_queue_id];
+       rxq = rx_queue;
        rxdp = &rxq->rx_ring[rxq->rx_tail];
 
        while ((desc < rxq->nb_rx_desc) &&
index 2e24e5f..a7935a7 100644 (file)
@@ -1375,11 +1375,11 @@ eth_link_update(struct rte_eth_dev *dev __rte_unused,
 }
 
 static uint32_t
-eth_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+eth_rx_queue_count(void *rx_queue)
 {
        struct vhost_queue *vq;
 
-       vq = dev->data->rx_queues[rx_queue_id];
+       vq = rx_queue;
        if (vq == NULL)
                return 0;
 
index cb847a2..4007bd0 100644 (file)
@@ -5040,7 +5040,7 @@ rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
            dev->data->rx_queues[queue_id] == NULL)
                return -EINVAL;
 
-       return (int)(*dev->rx_queue_count)(dev, queue_id);
+       return (int)(*dev->rx_queue_count)(dev->data->rx_queues[queue_id]);
 }
 
 /**@{@name Rx hardware descriptor states
index 8aae713..af824ef 100644 (file)
@@ -41,8 +41,7 @@ typedef uint16_t (*eth_tx_prep_t)(void *txq,
 /**< @internal Prepare output packets on a transmit queue of an Ethernet device. */
 
 
-typedef uint32_t (*eth_rx_queue_count_t)(struct rte_eth_dev *dev,
-                                        uint16_t rx_queue_id);
+typedef uint32_t (*eth_rx_queue_count_t)(void *rxq);
 /**< @internal Get number of used descriptors on a receive queue. */
 
 typedef int (*eth_rx_descriptor_status_t)(void *rxq, uint16_t offset);