Also, make sure to start the actual text at the margin.
=======================================================
+* ethdev: Input parameters for ``eth_rx_queue_count_t`` was changed.
+ Instead of pointer to ``rte_eth_dev`` and queue index, now it accepts pointer
+ to internal queue data as input parameter. While this change is transparent
+ to user, it still counts as an ABI change, as ``eth_rx_queue_count_t``
+ is used by public inline function ``rte_eth_rx_queue_count``.
+
* security: ``rte_security_set_pkt_metadata`` and ``rte_security_get_userdata``
routines used by inline outbound and inline inbound security processing were
made inline and enhanced to do simple 64-bit set/get for PMDs that do not
}
uint32_t
-eth_ark_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_id)
+eth_ark_dev_rx_queue_count(void *rx_queue)
{
struct ark_rx_queue *queue;
- queue = dev->data->rx_queues[queue_id];
+ queue = rx_queue;
return (queue->prod_index - queue->cons_index); /* mod arith */
}
unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp);
-uint32_t eth_ark_dev_rx_queue_count(struct rte_eth_dev *dev,
- uint16_t rx_queue_id);
+uint32_t eth_ark_dev_rx_queue_count(void *rx_queue);
int eth_ark_rx_stop_queue(struct rte_eth_dev *dev, uint16_t queue_id);
int eth_ark_rx_start_queue(struct rte_eth_dev *dev, uint16_t queue_id);
uint16_t eth_ark_recv_pkts_noop(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_tx_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
-uint32_t atl_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+uint32_t atl_rx_queue_count(void *rx_queue);
int atl_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
int atl_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
/* Return Rx queue avail count */
uint32_t
-atl_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+atl_rx_queue_count(void *rx_queue)
{
struct atl_rx_queue *rxq;
PMD_INIT_FUNC_TRACE();
- if (rx_queue_id >= dev->data->nb_rx_queues) {
- PMD_DRV_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
- return 0;
- }
-
- rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = rx_queue;
if (rxq == NULL)
return 0;
}
static uint32_t
-bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+bnxt_rx_queue_count_op(void *rx_queue)
{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp;
struct bnxt_cp_ring_info *cpr;
uint32_t desc = 0, raw_cons, cp_ring_size;
struct bnxt_rx_queue *rxq;
struct rx_pkt_cmpl *rxcmp;
int rc;
+ rxq = rx_queue;
+ bp = rxq->bp;
+
rc = is_bnxt_in_error(bp);
if (rc)
return rc;
- rxq = dev->data->rx_queues[rx_queue_id];
cpr = rxq->cp_ring;
raw_cons = cpr->cp_raw_cons;
cp_ring_size = cpr->cp_ring_struct->ring_size;
}
static uint32_t
-dpaa_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+dpaa_dev_rx_queue_count(void *rx_queue)
{
- struct dpaa_if *dpaa_intf = dev->data->dev_private;
- struct qman_fq *rxq = &dpaa_intf->rx_queues[rx_queue_id];
+ struct qman_fq *rxq = rx_queue;
u32 frm_cnt = 0;
PMD_INIT_FUNC_TRACE();
if (qman_query_fq_frm_cnt(rxq, &frm_cnt) == 0) {
- DPAA_PMD_DEBUG("RX frame count for q(%d) is %u",
- rx_queue_id, frm_cnt);
+ DPAA_PMD_DEBUG("RX frame count for q(%p) is %u",
+ rx_queue, frm_cnt);
}
return frm_cnt;
}
}
static uint32_t
-dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+dpaa2_dev_rx_queue_count(void *rx_queue)
{
int32_t ret;
- struct dpaa2_dev_priv *priv = dev->data->dev_private;
struct dpaa2_queue *dpaa2_q;
struct qbman_swp *swp;
struct qbman_fq_query_np_rslt state;
}
swp = DPAA2_PER_LCORE_PORTAL;
- dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
+ dpaa2_q = rx_queue;
if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) {
frame_cnt = qbman_fq_state_frame_count(&state);
- DPAA2_PMD_DP_DEBUG("RX frame count for q(%d) is %u",
- rx_queue_id, frame_cnt);
+ DPAA2_PMD_DP_DEBUG("RX frame count for q(%p) is %u",
+ rx_queue, frame_cnt);
}
return frame_cnt;
}
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mb_pool);
-uint32_t eth_igb_rx_queue_count(struct rte_eth_dev *dev,
- uint16_t rx_queue_id);
+uint32_t eth_igb_rx_queue_count(void *rx_queue);
int eth_igb_rx_descriptor_status(void *rx_queue, uint16_t offset);
int eth_igb_tx_descriptor_status(void *tx_queue, uint16_t offset);
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mb_pool);
-uint32_t eth_em_rx_queue_count(struct rte_eth_dev *dev,
- uint16_t rx_queue_id);
+uint32_t eth_em_rx_queue_count(void *rx_queue);
int eth_em_rx_descriptor_status(void *rx_queue, uint16_t offset);
int eth_em_tx_descriptor_status(void *tx_queue, uint16_t offset);
}
uint32_t
-eth_em_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+eth_em_rx_queue_count(void *rx_queue)
{
#define EM_RXQ_SCAN_INTERVAL 4
volatile struct e1000_rx_desc *rxdp;
struct em_rx_queue *rxq;
uint32_t desc = 0;
- rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = rx_queue;
rxdp = &(rxq->rx_ring[rxq->rx_tail]);
while ((desc < rxq->nb_rx_desc) &&
}
uint32_t
-eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+eth_igb_rx_queue_count(void *rx_queue)
{
#define IGB_RXQ_SCAN_INTERVAL 4
volatile union e1000_adv_rx_desc *rxdp;
struct igb_rx_queue *rxq;
uint32_t desc = 0;
- rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = rx_queue;
rxdp = &(rxq->rx_ring[rxq->rx_tail]);
while ((desc < rxq->nb_rx_desc) &&
enic_free_rq(rxq);
}
-static uint32_t enicpmd_dev_rx_queue_count(struct rte_eth_dev *dev,
- uint16_t rx_queue_id)
+static uint32_t enicpmd_dev_rx_queue_count(void *rx_queue)
{
- struct enic *enic = pmd_priv(dev);
+ struct enic *enic;
+ struct vnic_rq *sop_rq;
uint32_t queue_count = 0;
struct vnic_cq *cq;
uint32_t cq_tail;
uint16_t cq_idx;
- int rq_num;
- rq_num = enic_rte_rq_idx_to_sop_idx(rx_queue_id);
- cq = &enic->cq[enic_cq_rq(enic, rq_num)];
+ sop_rq = rx_queue;
+ enic = vnic_dev_priv(sop_rq->vdev);
+ cq = &enic->cq[enic_cq_rq(enic, sop_rq->index)];
cq_idx = cq->to_clean;
cq_tail = ioread32(&cq->ctrl->cq_tail);
struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
uint32_t
-fm10k_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+fm10k_dev_rx_queue_count(void *rx_queue);
int
fm10k_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
}
uint32_t
-fm10k_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+fm10k_dev_rx_queue_count(void *rx_queue)
{
#define FM10K_RXQ_SCAN_INTERVAL 4
volatile union fm10k_rx_desc *rxdp;
struct fm10k_rx_queue *rxq;
uint16_t desc = 0;
- rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = rx_queue;
rxdp = &rxq->hw_ring[rxq->next_dd];
while ((desc < rxq->nb_desc) &&
rxdp->w.status & rte_cpu_to_le_16(FM10K_RXD_STATUS_DD)) {
}
uint32_t
-hns3_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+hns3_rx_queue_count(void *rx_queue)
{
/*
* Number of BDs that have been processed by the driver
*/
uint32_t driver_hold_bd_num;
struct hns3_rx_queue *rxq;
+ const struct rte_eth_dev *dev;
uint32_t fbd_num;
- rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = rx_queue;
+ dev = &rte_eth_devices[rxq->port_id];
+
fbd_num = hns3_read_dev(rxq, HNS3_RING_RX_FBDNUM_REG);
if (dev->rx_pkt_burst == hns3_recv_pkts_vec ||
dev->rx_pkt_burst == hns3_recv_pkts_vec_sve)
struct rte_mempool *mp);
int hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
unsigned int socket, const struct rte_eth_txconf *conf);
-uint32_t hns3_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+uint32_t hns3_rx_queue_count(void *rx_queue);
int hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
int hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
int hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
}
uint32_t
-i40e_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+i40e_dev_rx_queue_count(void *rx_queue)
{
#define I40E_RXQ_SCAN_INTERVAL 4
volatile union i40e_rx_desc *rxdp;
struct i40e_rx_queue *rxq;
uint16_t desc = 0;
- rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = rx_queue;
rxdp = &(rxq->rx_ring[rxq->rx_tail]);
while ((desc < rxq->nb_rx_desc) &&
((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
int i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq);
void i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq);
-uint32_t i40e_dev_rx_queue_count(struct rte_eth_dev *dev,
- uint16_t rx_queue_id);
+uint32_t i40e_dev_rx_queue_count(void *rx_queue);
int i40e_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
int i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
/* Get the number of used descriptors of a rx queue */
uint32_t
-iavf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id)
+iavf_dev_rxq_count(void *rx_queue)
{
#define IAVF_RXQ_SCAN_INTERVAL 4
volatile union iavf_rx_desc *rxdp;
struct iavf_rx_queue *rxq;
uint16_t desc = 0;
- rxq = dev->data->rx_queues[queue_id];
+ rxq = rx_queue;
rxdp = &rxq->rx_ring[rxq->rx_tail];
while ((desc < rxq->nb_rx_desc) &&
struct rte_eth_rxq_info *qinfo);
void iavf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_txq_info *qinfo);
-uint32_t iavf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id);
+uint32_t iavf_dev_rxq_count(void *rx_queue);
int iavf_dev_rx_desc_status(void *rx_queue, uint16_t offset);
int iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset);
}
uint32_t
-ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+ice_rx_queue_count(void *rx_queue)
{
#define ICE_RXQ_SCAN_INTERVAL 4
volatile union ice_rx_flex_desc *rxdp;
struct ice_rx_queue *rxq;
uint16_t desc = 0;
- rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = rx_queue;
rxdp = &rxq->rx_ring[rxq->rx_tail];
while ((desc < rxq->nb_rx_desc) &&
rte_le_to_cpu_16(rxdp->wb.status_error0) &
void ice_set_tx_function_flag(struct rte_eth_dev *dev,
struct ice_tx_queue *txq);
void ice_set_tx_function(struct rte_eth_dev *dev);
-uint32_t ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+uint32_t ice_rx_queue_count(void *rx_queue);
void ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_rxq_info *qinfo);
void ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
igc_rx_queue_release(dev->data->rx_queues[qid]);
}
-uint32_t eth_igc_rx_queue_count(struct rte_eth_dev *dev,
- uint16_t rx_queue_id)
+uint32_t eth_igc_rx_queue_count(void *rx_queue)
{
/**
* Check the DD bit of a rx descriptor of each 4 in a group,
struct igc_rx_queue *rxq;
uint16_t desc = 0;
- rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = rx_queue;
rxdp = &rxq->rx_ring[rxq->rx_tail];
while (desc < rxq->nb_rx_desc - rxq->rx_tail) {
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mb_pool);
-uint32_t eth_igc_rx_queue_count(struct rte_eth_dev *dev,
- uint16_t rx_queue_id);
+uint32_t eth_igc_rx_queue_count(void *rx_queue);
int eth_igc_rx_descriptor_status(void *rx_queue, uint16_t offset);
uint16_t nb_tx_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
-uint32_t ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev,
- uint16_t rx_queue_id);
+uint32_t ixgbe_dev_rx_queue_count(void *rx_queue);
int ixgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
int ixgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
}
uint32_t
-ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+ixgbe_dev_rx_queue_count(void *rx_queue)
{
#define IXGBE_RXQ_SCAN_INTERVAL 4
volatile union ixgbe_adv_rx_desc *rxdp;
struct ixgbe_rx_queue *rxq;
uint32_t desc = 0;
- rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = rx_queue;
rxdp = &(rxq->rx_ring[rxq->rx_tail]);
while ((desc < rxq->nb_rx_desc) &&
/**
* DPDK callback to get the number of used descriptors in a RX queue.
*
- * @param dev
- * Pointer to the device structure.
- *
- * @param rx_queue_id
- * The Rx queue.
+ * @param rx_queue
+ * The Rx queue pointer.
*
* @return
* The number of used rx descriptor.
* -EINVAL if the queue is invalid
*/
uint32_t
-mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+mlx5_rx_queue_count(void *rx_queue)
{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_data *rxq;
+ struct mlx5_rxq_data *rxq = rx_queue;
+ struct rte_eth_dev *dev;
+
+ if (!rxq) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+
+ dev = &rte_eth_devices[rxq->port_id];
if (dev->rx_pkt_burst == NULL ||
dev->rx_pkt_burst == removed_rx_burst) {
rte_errno = ENOTSUP;
return -rte_errno;
}
- rxq = (*priv->rxqs)[rx_queue_id];
- if (!rxq) {
- rte_errno = EINVAL;
- return -rte_errno;
- }
+
return rx_queue_count(rxq);
}
uint16_t removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts,
uint16_t pkts_n);
int mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset);
-uint32_t mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+uint32_t mlx5_rx_queue_count(void *rx_queue);
void mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_rxq_info *qinfo);
int mlx5_rx_burst_mode_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
* For this device that means how many packets are pending in the ring.
*/
uint32_t
-hn_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_id)
+hn_dev_rx_queue_count(void *rx_queue)
{
- struct hn_rx_queue *rxq = dev->data->rx_queues[queue_id];
+ struct hn_rx_queue *rxq = rx_queue;
return rte_ring_count(rxq->rx_ring);
}
void hn_dev_rx_queue_info(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_rxq_info *qinfo);
void hn_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
-uint32_t hn_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_id);
+uint32_t hn_dev_rx_queue_count(void *rx_queue);
int hn_dev_rx_queue_status(void *rxq, uint16_t offset);
void hn_dev_free_queues(struct rte_eth_dev *dev);
}
uint32_t
-nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
+nfp_net_rx_queue_count(void *rx_queue)
{
struct nfp_net_rxq *rxq;
struct nfp_net_rx_desc *rxds;
uint32_t idx;
uint32_t count;
- rxq = (struct nfp_net_rxq *)dev->data->rx_queues[queue_idx];
+ rxq = rx_queue;
idx = rxq->rd_p;
} __rte_aligned(64);
int nfp_net_rx_freelist_setup(struct rte_eth_dev *dev);
-uint32_t nfp_net_rx_queue_count(struct rte_eth_dev *dev,
- uint16_t queue_idx);
+uint32_t nfp_net_rx_queue_count(void *rx_queue);
uint16_t nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
void nfp_net_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx);
struct rte_eth_burst_mode *mode);
int otx2_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_burst_mode *mode);
-uint32_t otx2_nix_rx_queue_count(struct rte_eth_dev *eth_dev, uint16_t qidx);
+uint32_t otx2_nix_rx_queue_count(void *rx_queue);
int otx2_nix_tx_done_cleanup(void *txq, uint32_t free_cnt);
int otx2_nix_rx_descriptor_status(void *rx_queue, uint16_t offset);
int otx2_nix_tx_descriptor_status(void *tx_queue, uint16_t offset);
}
uint32_t
-otx2_nix_rx_queue_count(struct rte_eth_dev *eth_dev, uint16_t queue_idx)
+otx2_nix_rx_queue_count(void *rx_queue)
{
- struct otx2_eth_rxq *rxq = eth_dev->data->rx_queues[queue_idx];
- struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_eth_rxq *rxq = rx_queue;
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(rxq->eth_dev);
uint32_t head, tail;
- nix_rx_head_tail_get(dev, &head, &tail, queue_idx);
+ nix_rx_head_tail_get(dev, &head, &tail, rxq->rq);
return (tail - head) % rxq->qlen;
}
* use any process-local pointers from the adapter data.
*/
static uint32_t
-sfc_rx_queue_count(struct rte_eth_dev *dev, uint16_t ethdev_qid)
+sfc_rx_queue_count(void *rx_queue)
{
- const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
- struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
- sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
+ struct sfc_dp_rxq *dp_rxq = rx_queue;
+ const struct sfc_dp_rx *dp_rx;
struct sfc_rxq_info *rxq_info;
- rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
+ dp_rx = sfc_dp_rx_by_dp_rxq(dp_rxq);
+ rxq_info = sfc_rxq_info_by_dp_rxq(dp_rxq);
if ((rxq_info->state & SFC_RXQ_STARTED) == 0)
return 0;
- return sap->dp_rx->qdesc_npending(rxq_info->dp);
+ return dp_rx->qdesc_npending(dp_rxq);
}
/*
if (dev->rx_pkt_burst == NULL)
return;
- while ((rxq_cnt = nicvf_dev_rx_queue_count(dev,
- nicvf_netdev_qidx(rxq->nic, rxq->queue_id)))) {
+ while ((rxq_cnt = nicvf_dev_rx_queue_count(rxq))) {
nb_pkts = dev->rx_pkt_burst(rxq, rx_pkts,
NICVF_MAX_RX_FREE_THRESH);
PMD_DRV_LOG(INFO, "nb_pkts=%d rxq_cnt=%d", nb_pkts, rxq_cnt);
}
uint32_t
-nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
+nicvf_dev_rx_queue_count(void *rx_queue)
{
struct nicvf_rxq *rxq;
- rxq = dev->data->rx_queues[queue_idx];
+ rxq = rx_queue;
return nicvf_addr_read(rxq->cq_status) & NICVF_CQ_CQE_COUNT_MASK;
}
*(uint64_t *)(&pkt->rearm_data) = init.value;
}
-uint32_t nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx);
+uint32_t nicvf_dev_rx_queue_count(void *rx_queue);
uint32_t nicvf_dev_rbdr_refill(struct rte_eth_dev *dev, uint16_t queue_idx);
uint16_t nicvf_recv_pkts_no_offload(void *rxq, struct rte_mbuf **rx_pkts,
uint16_t nb_tx_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
-uint32_t txgbe_dev_rx_queue_count(struct rte_eth_dev *dev,
- uint16_t rx_queue_id);
+uint32_t txgbe_dev_rx_queue_count(void *rx_queue);
int txgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
int txgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
}
uint32_t
-txgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+txgbe_dev_rx_queue_count(void *rx_queue)
{
#define TXGBE_RXQ_SCAN_INTERVAL 4
volatile struct txgbe_rx_desc *rxdp;
struct txgbe_rx_queue *rxq;
uint32_t desc = 0;
- rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = rx_queue;
rxdp = &rxq->rx_ring[rxq->rx_tail];
while ((desc < rxq->nb_rx_desc) &&
}
static uint32_t
-eth_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+eth_rx_queue_count(void *rx_queue)
{
struct vhost_queue *vq;
- vq = dev->data->rx_queues[rx_queue_id];
+ vq = rx_queue;
if (vq == NULL)
return 0;
dev->data->rx_queues[queue_id] == NULL)
return -EINVAL;
- return (int)(*dev->rx_queue_count)(dev, queue_id);
+ return (int)(*dev->rx_queue_count)(dev->data->rx_queues[queue_id]);
}
/**@{@name Rx hardware descriptor states
/**< @internal Prepare output packets on a transmit queue of an Ethernet device. */
-typedef uint32_t (*eth_rx_queue_count_t)(struct rte_eth_dev *dev,
- uint16_t rx_queue_id);
+typedef uint32_t (*eth_rx_queue_count_t)(void *rxq);
/**< @internal Get number of used descriptors on a receive queue. */
typedef int (*eth_rx_descriptor_status_t)(void *rxq, uint16_t offset);